From d65b541bbf59e16cfc24ccaf3e311b5d106fb4fb Mon Sep 17 00:00:00 2001 From: dapan1121 <89396746@qq.com> Date: Tue, 2 Mar 2021 19:18:13 +0800 Subject: [PATCH 001/178] support having --- src/client/inc/tsclient.h | 1 + src/client/src/tscSQLParser.c | 283 ++++++++++++++++++++++++++++++---- src/client/src/tscUtil.c | 4 + src/inc/ttokendef.h | 2 + src/query/inc/qSqlparser.h | 3 +- src/query/inc/sql.y | 4 +- src/query/src/qParserImpl.c | 6 +- src/query/src/sql.c | 4 +- 8 files changed, 267 insertions(+), 40 deletions(-) diff --git a/src/client/inc/tsclient.h b/src/client/inc/tsclient.h index 8f76f812ac..f3bdbe2e84 100644 --- a/src/client/inc/tsclient.h +++ b/src/client/inc/tsclient.h @@ -121,6 +121,7 @@ typedef struct SInternalField { bool visible; SExprInfo *pArithExprInfo; SSqlExpr *pSqlExpr; + SColumn *pFieldFilters; } SInternalField; typedef struct SFieldInfo { diff --git a/src/client/src/tscSQLParser.c b/src/client/src/tscSQLParser.c index b2c1a594a1..9656aca2a7 100644 --- a/src/client/src/tscSQLParser.c +++ b/src/client/src/tscSQLParser.c @@ -1031,6 +1031,41 @@ static bool validateTableColumnInfo(SArray* pFieldList, SSqlCmd* pCmd) { return true; } +static void exchangeExpr(tSQLExpr* pExpr) { + tSQLExpr* pLeft = pExpr->pLeft; + tSQLExpr* pRight = pExpr->pRight; + + if ((pRight->nSQLOptr == TK_ID || (pRight->nSQLOptr >= TK_COUNT && pRight->nSQLOptr <= TK_AVG_IRATE)) && + (pLeft->nSQLOptr == TK_INTEGER || pLeft->nSQLOptr == TK_FLOAT || pLeft->nSQLOptr == TK_STRING || pLeft->nSQLOptr == TK_BOOL)) { + /* + * exchange value of the left handside and the value of the right-handside + * to make sure that the value of filter expression always locates in + * right-handside and + * the column-id/function is at the left handside. + */ + uint32_t optr = 0; + switch (pExpr->nSQLOptr) { + case TK_LE: + optr = TK_GE; + break; + case TK_LT: + optr = TK_GT; + break; + case TK_GT: + optr = TK_LT; + break; + case TK_GE: + optr = TK_LE; + break; + default: + optr = pExpr->nSQLOptr; + } + + pExpr->nSQLOptr = optr; + SWAP(pExpr->pLeft, pExpr->pRight, void*); + } +} + static bool validateTagParams(SArray* pTagsList, SArray* pFieldList, SSqlCmd* pCmd) { assert(pTagsList != NULL); @@ -3062,6 +3097,214 @@ int32_t parseGroupbyClause(SQueryInfo* pQueryInfo, SArray* pList, SSqlCmd* pCmd) return TSDB_CODE_SUCCESS; } +static int32_t handleExprInHavingClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SColumnIndex* pIndex, tSQLExpr* pExpr, int32_t sqlOptr) { + STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, pIndex->tableIndex); + + STableMeta* pTableMeta = pTableMetaInfo->pTableMeta; + SSchema* pSchema = tscGetTableColumnSchema(pTableMeta, pIndex->columnIndex); + + const char* msg1 = "non binary column not support like operator"; + const char* msg2 = "binary column not support this operator"; + const char* msg3 = "bool column not support this operator"; + + SColumn* pColumn = tscColumnListInsert(pQueryInfo->colList, pIndex); + SColumnFilterInfo* pColFilter = NULL; + + /* + * in case of TK_AND filter condition, we first find the corresponding column and build the query condition together + * the already existed condition. + */ + if (sqlOptr == TK_AND) { + // this is a new filter condition on this column + if (pColumn->numOfFilters == 0) { + pColFilter = addColumnFilterInfo(pColumn); + } else { // update the existed column filter information, find the filter info here + pColFilter = &pColumn->filterInfo[0]; + } + + if (pColFilter == NULL) { + return TSDB_CODE_TSC_OUT_OF_MEMORY; + } + } else if (sqlOptr == TK_OR) { + // TODO fixme: failed to invalid the filter expression: "col1 = 1 OR col2 = 2" + pColFilter = addColumnFilterInfo(pColumn); + if (pColFilter == NULL) { + return TSDB_CODE_TSC_OUT_OF_MEMORY; + } + } else { // error; + return TSDB_CODE_TSC_INVALID_SQL; + } + + pColFilter->filterstr = + ((pSchema->type == TSDB_DATA_TYPE_BINARY || pSchema->type == TSDB_DATA_TYPE_NCHAR) ? 1 : 0); + + if (pColFilter->filterstr) { + if (pExpr->nSQLOptr != TK_EQ + && pExpr->nSQLOptr != TK_NE + && pExpr->nSQLOptr != TK_ISNULL + && pExpr->nSQLOptr != TK_NOTNULL + && pExpr->nSQLOptr != TK_LIKE + ) { + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2); + } + } else { + if (pExpr->nSQLOptr == TK_LIKE) { + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1); + } + + if (pSchema->type == TSDB_DATA_TYPE_BOOL) { + if (pExpr->nSQLOptr != TK_EQ && pExpr->nSQLOptr != TK_NE) { + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3); + } + } + } + + pColumn->colIndex = *pIndex; + return doExtractColumnFilterInfo(pCmd, pQueryInfo, pColFilter, pIndex, pExpr); +} + +int32_t getHavingExpr(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSQLExpr** pExpr, int32_t parentOptr) { + if (pExpr == NULL || (*pExpr) == NULL) { + return TSDB_CODE_SUCCESS; + } + + const char* msg1 = "invalid having clause"; + + tSQLExpr* pLeft = (*pExpr)->pLeft; + tSQLExpr* pRight = (*pExpr)->pRight; + + if ((*pExpr)->nSQLOptr == TK_AND || (*pExpr)->nSQLOptr == TK_OR) { + int32_t ret = getHavingExpr(pCmd, pQueryInfo, &(*pExpr)->pLeft, (*pExpr)->nSQLOptr); + if (ret != TSDB_CODE_SUCCESS) { + return ret; + } + + return getHavingExpr(pCmd, pQueryInfo, &(*pExpr)->pRight, (*pExpr)->nSQLOptr); + } + + if ((pLeft->nSQLOptr >= TK_COUNT && pLeft->nSQLOptr <= TK_AVG_IRATE) && + (pRight->nSQLOptr >= TK_COUNT && pRight->nSQLOptr <= TK_AVG_IRATE)) { + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1); + } + + if (pLeft->nSQLOptr >= TK_BOOL + && pLeft->nSQLOptr <= TK_BINARY + && pRight->nSQLOptr >= TK_BOOL + && pRight->nSQLOptr <= TK_BINARY) { + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1); + } + + exchangeExpr(*pExpr); + + pLeft = (*pExpr)->pLeft; + pRight = (*pExpr)->pRight; + + if (!(pLeft->nSQLOptr >= TK_COUNT && pLeft->nSQLOptr <= TK_AVG_IRATE)) { + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1); + } + + if (!(pRight->nSQLOptr >= TK_BOOL && pRight->nSQLOptr <= TK_BINARY)) { + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1); + } + + if ((*pExpr)->nSQLOptr >= TK_BITAND) { + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1); + } + + if (pLeft->pParam == NULL || pLeft->pParam->nExpr < 1) { + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1); + } + + for (int32_t i = 0; i < pLeft->pParam->nExpr; i++) { + tSqlExprItem* pParamElem = &(pLeft->pParam->a[i]); + if (pParamElem->pNode->nSQLOptr != TK_ALL && pParamElem->pNode->nSQLOptr != TK_ID) { + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1); + } + + SColumnIndex index = COLUMN_INDEX_INITIALIZER; + if ((getColumnIndexByName(pCmd, &pParamElem->pNode->colInfo, pQueryInfo, &index) != TSDB_CODE_SUCCESS)) { + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1); + } + + STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, index.tableIndex); + STableMeta* pTableMeta = pTableMetaInfo->pTableMeta; + + if (index.columnIndex <= 0 || + index.columnIndex >= tscGetNumOfColumns(pTableMeta)) { + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1); + } + } + + tSqlExprItem item = {.pNode = pLeft, .aliasName = NULL, .distinct = false}; + + int32_t outputIndex = (int32_t)tscSqlExprNumOfExprs(pQueryInfo); + + // ADD TRUE FOR TEST + if (addExprAndResultField(pCmd, pQueryInfo, outputIndex, &item, true) != TSDB_CODE_SUCCESS) { + return TSDB_CODE_TSC_INVALID_SQL; + } + + int32_t slot = tscNumOfFields(pQueryInfo) - 1; + SInternalField* pInfo = tscFieldInfoGetInternalField(&pQueryInfo->fieldsInfo, slot); + + if (pInfo->pFieldFilters == NULL) { + SColumn* pFieldFilters = calloc(1, sizeof(SColumn)); + if (pFieldFilters == NULL) { + return TSDB_CODE_TSC_OUT_OF_MEMORY; + } + + pInfo->pFieldFilters = pFieldFilters; + } + + return handleExprInHavingClause(pCmd, pQueryInfo, pInfo->pFieldFilters, pExpr, parentOptr); +} + + + +int32_t parseHavingClause(SQueryInfo* pQueryInfo, tSQLExpr** pExpr, SSqlCmd* pCmd) { + const char* msg1 = "having only works with group by"; + //const char* msg2 = "invalid column name in having clause"; + //const char* msg3 = "columns from one table allowed as having columns"; + //const char* msg4 = "no tag allowed in having clause"; + const char* msg5 = "invalid expression in having clause"; + +/* + const char* msg1 = "too many columns in group by clause"; + const char* msg4 = "join query does not support group by"; + const char* msg7 = "not support group by expression"; + const char* msg8 = "not allowed column type for group by"; + const char* msg9 = "tags not allowed for table query"; +*/ + + // todo : handle two tables situation + //STableMetaInfo* pTableMetaInfo = NULL; + + if (pExpr == NULL || (*pExpr) == NULL) { + return TSDB_CODE_SUCCESS; + } + + if (pQueryInfo->groupbyExpr.numOfGroupCols <= 0) { + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1); + } + + if ((*pExpr)->pLeft == NULL || (*pExpr)->pRight == NULL) { + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg5); + } + + if (pQueryInfo->colList == NULL) { + pQueryInfo->colList = taosArrayInit(4, POINTER_BYTES); + } + + int32_t ret = 0; + + if ((ret = getHavingExpr(pCmd, pQueryInfo, pExpr, TK_AND)) != TSDB_CODE_SUCCESS) { + return ret; + } + + return TSDB_CODE_SUCCESS; +} + + static SColumnFilterInfo* addColumnFilterInfo(SColumn* pColumn) { if (pColumn == NULL) { return NULL; @@ -3715,40 +3958,7 @@ static bool isValidExpr(tSQLExpr* pLeft, tSQLExpr* pRight, int32_t optr) { return true; } -static void exchangeExpr(tSQLExpr* pExpr) { - tSQLExpr* pLeft = pExpr->pLeft; - tSQLExpr* pRight = pExpr->pRight; - if (pRight->nSQLOptr == TK_ID && (pLeft->nSQLOptr == TK_INTEGER || pLeft->nSQLOptr == TK_FLOAT || - pLeft->nSQLOptr == TK_STRING || pLeft->nSQLOptr == TK_BOOL)) { - /* - * exchange value of the left handside and the value of the right-handside - * to make sure that the value of filter expression always locates in - * right-handside and - * the column-id is at the left handside. - */ - uint32_t optr = 0; - switch (pExpr->nSQLOptr) { - case TK_LE: - optr = TK_GE; - break; - case TK_LT: - optr = TK_GT; - break; - case TK_GT: - optr = TK_LT; - break; - case TK_GE: - optr = TK_LE; - break; - default: - optr = pExpr->nSQLOptr; - } - - pExpr->nSQLOptr = optr; - SWAP(pExpr->pLeft, pExpr->pRight, void*); - } -} static bool validateJoinExprNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSQLExpr* pExpr, SColumnIndex* pLeftIndex) { const char* msg1 = "illegal column name"; @@ -6729,7 +6939,7 @@ int32_t doCheckForQuery(SSqlObj* pSql, SQuerySQL* pQuerySql, int32_t index) { */ if (pQuerySql->from == NULL) { assert(pQuerySql->fillType == NULL && pQuerySql->pGroupby == NULL && pQuerySql->pWhere == NULL && - pQuerySql->pSortOrder == NULL); + pQuerySql->pSortOrder == NULL && pQuerySql->pHaving == NULL); return doLocalQueryProcess(pCmd, pQueryInfo, pQuerySql); } @@ -6817,6 +7027,11 @@ int32_t doCheckForQuery(SSqlObj* pSql, SQuerySQL* pQuerySql, int32_t index) { return TSDB_CODE_TSC_INVALID_SQL; } + // parse the having clause in the first place + if (parseHavingClause(pQueryInfo, &pQuerySql->pHaving, pCmd) != TSDB_CODE_SUCCESS) { + return TSDB_CODE_TSC_INVALID_SQL; + } + // set where info STableComInfo tinfo = tscGetTableInfo(pTableMetaInfo->pTableMeta); diff --git a/src/client/src/tscUtil.c b/src/client/src/tscUtil.c index cfa73b969d..ef5e9170da 100644 --- a/src/client/src/tscUtil.c +++ b/src/client/src/tscUtil.c @@ -1061,6 +1061,10 @@ void tscFieldInfoClear(SFieldInfo* pFieldInfo) { tfree(pInfo->pArithExprInfo); } + + if (pInfo->pFieldFilters != NULL) { + tscColumnDestroy(pInfo->pFieldFilters); + } } taosArrayDestroy(pFieldInfo->internalField); diff --git a/src/inc/ttokendef.h b/src/inc/ttokendef.h index 8bb9cde935..bf72a15ce2 100644 --- a/src/inc/ttokendef.h +++ b/src/inc/ttokendef.h @@ -233,6 +233,8 @@ + + #define TK_SPACE 300 #define TK_COMMENT 301 #define TK_ILLEGAL 302 diff --git a/src/query/inc/qSqlparser.h b/src/query/inc/qSqlparser.h index bcc876c953..8efaa4cb21 100644 --- a/src/query/inc/qSqlparser.h +++ b/src/query/inc/qSqlparser.h @@ -71,6 +71,7 @@ typedef struct SQuerySQL { SLimitVal slimit; // group limit offset [optional] SArray * fillType; // fill type[optional], SArray SStrToken selectToken; // sql string + struct tSQLExpr * pHaving; // having clause [optional] } SQuerySQL; typedef struct SCreatedTableInfo { @@ -242,7 +243,7 @@ void tSqlExprListDestroy(tSQLExprList *pList); SQuerySQL *tSetQuerySqlElems(SStrToken *pSelectToken, tSQLExprList *pSelection, SArray *pFrom, tSQLExpr *pWhere, SArray *pGroupby, SArray *pSortOrder, SIntervalVal *pInterval, - SStrToken *pSliding, SArray *pFill, SLimitVal *pLimit, SLimitVal *pGLimit); + SStrToken *pSliding, SArray *pFill, SLimitVal *pLimit, SLimitVal *pGLimit, tSQLExpr *pHaving); SCreateTableSQL *tSetCreateSqlElems(SArray *pCols, SArray *pTags, SQuerySQL *pSelect, int32_t type); diff --git a/src/query/inc/sql.y b/src/query/inc/sql.y index 8a01a736b7..7d81d4279b 100644 --- a/src/query/inc/sql.y +++ b/src/query/inc/sql.y @@ -437,7 +437,7 @@ tagitem(A) ::= PLUS(X) FLOAT(Y). { %type select {SQuerySQL*} %destructor select {doDestroyQuerySql($$);} select(A) ::= SELECT(T) selcollist(W) from(X) where_opt(Y) interval_opt(K) fill_opt(F) sliding_opt(S) groupby_opt(P) orderby_opt(Z) having_opt(N) slimit_opt(G) limit_opt(L). { - A = tSetQuerySqlElems(&T, W, X, Y, P, Z, &K, &S, F, &L, &G); + A = tSetQuerySqlElems(&T, W, X, Y, P, Z, &K, &S, F, &L, &G, N); } %type union {SSubclauseInfo*} @@ -455,7 +455,7 @@ cmd ::= union(X). { setSqlInfo(pInfo, X, NULL, TSDB_SQL_SELECT); } // select server_version(), select client_version(), // select server_state(); select(A) ::= SELECT(T) selcollist(W). { - A = tSetQuerySqlElems(&T, W, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL); + A = tSetQuerySqlElems(&T, W, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL); } // selcollist is a list of expressions that are to become the return diff --git a/src/query/src/qParserImpl.c b/src/query/src/qParserImpl.c index 2416250dce..658815adcf 100644 --- a/src/query/src/qParserImpl.c +++ b/src/query/src/qParserImpl.c @@ -531,7 +531,7 @@ void tSqlSetColumnType(TAOS_FIELD *pField, SStrToken *type) { */ SQuerySQL *tSetQuerySqlElems(SStrToken *pSelectToken, tSQLExprList *pSelection, SArray *pFrom, tSQLExpr *pWhere, SArray *pGroupby, SArray *pSortOrder, SIntervalVal *pInterval, - SStrToken *pSliding, SArray *pFill, SLimitVal *pLimit, SLimitVal *pGLimit) { + SStrToken *pSliding, SArray *pFill, SLimitVal *pLimit, SLimitVal *pGLimit, tSQLExpr *pHaving) { assert(pSelection != NULL); SQuerySQL *pQuery = calloc(1, sizeof(SQuerySQL)); @@ -543,6 +543,7 @@ SQuerySQL *tSetQuerySqlElems(SStrToken *pSelectToken, tSQLExprList *pSelection, pQuery->pGroupby = pGroupby; pQuery->pSortOrder = pSortOrder; pQuery->pWhere = pWhere; + pQuery->pHaving = pHaving; if (pLimit != NULL) { pQuery->limit = *pLimit; @@ -589,6 +590,9 @@ void doDestroyQuerySql(SQuerySQL *pQuerySql) { tSqlExprDestroy(pQuerySql->pWhere); pQuerySql->pWhere = NULL; + + tSqlExprDestroy(pQuerySql->pHaving); + pQuerySql->pHaving = NULL; taosArrayDestroyEx(pQuerySql->pSortOrder, freeVariant); pQuerySql->pSortOrder = NULL; diff --git a/src/query/src/sql.c b/src/query/src/sql.c index 2b1109688d..15a642bed5 100644 --- a/src/query/src/sql.c +++ b/src/query/src/sql.c @@ -2864,7 +2864,7 @@ static YYACTIONTYPE yy_reduce( break; case 147: /* select ::= SELECT selcollist from where_opt interval_opt fill_opt sliding_opt groupby_opt orderby_opt having_opt slimit_opt limit_opt */ { - yylhsminor.yy114 = tSetQuerySqlElems(&yymsp[-11].minor.yy0, yymsp[-10].minor.yy522, yymsp[-9].minor.yy247, yymsp[-8].minor.yy326, yymsp[-4].minor.yy247, yymsp[-3].minor.yy247, &yymsp[-7].minor.yy430, &yymsp[-5].minor.yy0, yymsp[-6].minor.yy247, &yymsp[0].minor.yy204, &yymsp[-1].minor.yy204); + yylhsminor.yy114 = tSetQuerySqlElems(&yymsp[-11].minor.yy0, yymsp[-10].minor.yy522, yymsp[-9].minor.yy247, yymsp[-8].minor.yy326, yymsp[-4].minor.yy247, yymsp[-3].minor.yy247, &yymsp[-7].minor.yy430, &yymsp[-5].minor.yy0, yymsp[-6].minor.yy247, &yymsp[0].minor.yy204, &yymsp[-1].minor.yy204, yymsp[-2].minor.yy326); } yymsp[-11].minor.yy114 = yylhsminor.yy114; break; @@ -2888,7 +2888,7 @@ static YYACTIONTYPE yy_reduce( break; case 153: /* select ::= SELECT selcollist */ { - yylhsminor.yy114 = tSetQuerySqlElems(&yymsp[-1].minor.yy0, yymsp[0].minor.yy522, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL); + yylhsminor.yy114 = tSetQuerySqlElems(&yymsp[-1].minor.yy0, yymsp[0].minor.yy522, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL); } yymsp[-1].minor.yy114 = yylhsminor.yy114; break; From 984e037e672ed2f23a3f0734f819f0811c65f014 Mon Sep 17 00:00:00 2001 From: dapan1121 <89396746@qq.com> Date: Wed, 3 Mar 2021 16:54:41 +0800 Subject: [PATCH 002/178] support having --- src/client/inc/tsclient.h | 17 +- src/client/src/tscLocalMerge.c | 20 ++ src/client/src/tscSQLParser.c | 480 ++++++++++++++++++--------------- src/client/src/tscUtil.c | 32 ++- src/query/inc/qSqlparser.h | 2 + src/query/src/qParserImpl.c | 52 ++++ 6 files changed, 369 insertions(+), 234 deletions(-) diff --git a/src/client/inc/tsclient.h b/src/client/inc/tsclient.h index f3bdbe2e84..aa0f1c9cff 100644 --- a/src/client/inc/tsclient.h +++ b/src/client/inc/tsclient.h @@ -116,13 +116,6 @@ typedef struct SColumnIndex { int16_t columnIndex; } SColumnIndex; -typedef struct SInternalField { - TAOS_FIELD field; - bool visible; - SExprInfo *pArithExprInfo; - SSqlExpr *pSqlExpr; - SColumn *pFieldFilters; -} SInternalField; typedef struct SFieldInfo { int16_t numOfOutput; // number of column in result @@ -136,6 +129,15 @@ typedef struct SColumn { SColumnFilterInfo *filterInfo; } SColumn; +typedef struct SInternalField { + TAOS_FIELD field; + bool visible; + SExprInfo *pArithExprInfo; + SSqlExpr *pSqlExpr; + tSQLExpr *pExpr; //used for having parse + SColumn *pFieldFilters; //having filter info +} SInternalField; + typedef struct SCond { uint64_t uid; int32_t len; // length of tag query condition data @@ -228,6 +230,7 @@ typedef struct SQueryInfo { int32_t round; // 0/1/.... int32_t bufLen; char* buf; + int32_t havingFieldNum; } SQueryInfo; typedef struct { diff --git a/src/client/src/tscLocalMerge.c b/src/client/src/tscLocalMerge.c index 23fb0ab67c..61d90598b4 100644 --- a/src/client/src/tscLocalMerge.c +++ b/src/client/src/tscLocalMerge.c @@ -1229,6 +1229,24 @@ static bool saveGroupResultInfo(SSqlObj *pSql) { return false; } +int32_t doHavingFilter(SQueryInfo* pQueryInfo) { + if (pQueryInfo->havingFieldNum <= 0) { + return TSDB_CODE_SUCCESS; + } + + size_t numOfOutput = tscNumOfFields(pQueryInfo); + for(int32_t i = 0; i < numOfOutput; ++i) { + SColumn* pFieldFilters = tscFieldInfoGetInternalField(&pQueryInfo->fieldsInfo, i)->pFieldFilters; + if (pFieldFilters != NULL) { + continue; + } + } + + return TSDB_CODE_SUCCESS; +} + + + /** * * @param pSql @@ -1269,6 +1287,8 @@ bool genFinalResults(SSqlObj *pSql, SLocalMerger *pLocalMerge, bool noMoreCurren doArithmeticCalculate(pQueryInfo, pResBuf, pModel->rowSize, pLocalMerge->finalModel->rowSize); } + doHavingFilter(pQueryInfo); + // no interval query, no fill operation if (pQueryInfo->interval.interval == 0 || pQueryInfo->fillType == TSDB_FILL_NONE) { genFinalResWithoutFill(pRes, pLocalMerge, pQueryInfo); diff --git a/src/client/src/tscSQLParser.c b/src/client/src/tscSQLParser.c index 9656aca2a7..588e9b18aa 100644 --- a/src/client/src/tscSQLParser.c +++ b/src/client/src/tscSQLParser.c @@ -3097,213 +3097,6 @@ int32_t parseGroupbyClause(SQueryInfo* pQueryInfo, SArray* pList, SSqlCmd* pCmd) return TSDB_CODE_SUCCESS; } -static int32_t handleExprInHavingClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SColumnIndex* pIndex, tSQLExpr* pExpr, int32_t sqlOptr) { - STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, pIndex->tableIndex); - - STableMeta* pTableMeta = pTableMetaInfo->pTableMeta; - SSchema* pSchema = tscGetTableColumnSchema(pTableMeta, pIndex->columnIndex); - - const char* msg1 = "non binary column not support like operator"; - const char* msg2 = "binary column not support this operator"; - const char* msg3 = "bool column not support this operator"; - - SColumn* pColumn = tscColumnListInsert(pQueryInfo->colList, pIndex); - SColumnFilterInfo* pColFilter = NULL; - - /* - * in case of TK_AND filter condition, we first find the corresponding column and build the query condition together - * the already existed condition. - */ - if (sqlOptr == TK_AND) { - // this is a new filter condition on this column - if (pColumn->numOfFilters == 0) { - pColFilter = addColumnFilterInfo(pColumn); - } else { // update the existed column filter information, find the filter info here - pColFilter = &pColumn->filterInfo[0]; - } - - if (pColFilter == NULL) { - return TSDB_CODE_TSC_OUT_OF_MEMORY; - } - } else if (sqlOptr == TK_OR) { - // TODO fixme: failed to invalid the filter expression: "col1 = 1 OR col2 = 2" - pColFilter = addColumnFilterInfo(pColumn); - if (pColFilter == NULL) { - return TSDB_CODE_TSC_OUT_OF_MEMORY; - } - } else { // error; - return TSDB_CODE_TSC_INVALID_SQL; - } - - pColFilter->filterstr = - ((pSchema->type == TSDB_DATA_TYPE_BINARY || pSchema->type == TSDB_DATA_TYPE_NCHAR) ? 1 : 0); - - if (pColFilter->filterstr) { - if (pExpr->nSQLOptr != TK_EQ - && pExpr->nSQLOptr != TK_NE - && pExpr->nSQLOptr != TK_ISNULL - && pExpr->nSQLOptr != TK_NOTNULL - && pExpr->nSQLOptr != TK_LIKE - ) { - return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2); - } - } else { - if (pExpr->nSQLOptr == TK_LIKE) { - return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1); - } - - if (pSchema->type == TSDB_DATA_TYPE_BOOL) { - if (pExpr->nSQLOptr != TK_EQ && pExpr->nSQLOptr != TK_NE) { - return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3); - } - } - } - - pColumn->colIndex = *pIndex; - return doExtractColumnFilterInfo(pCmd, pQueryInfo, pColFilter, pIndex, pExpr); -} - -int32_t getHavingExpr(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSQLExpr** pExpr, int32_t parentOptr) { - if (pExpr == NULL || (*pExpr) == NULL) { - return TSDB_CODE_SUCCESS; - } - - const char* msg1 = "invalid having clause"; - - tSQLExpr* pLeft = (*pExpr)->pLeft; - tSQLExpr* pRight = (*pExpr)->pRight; - - if ((*pExpr)->nSQLOptr == TK_AND || (*pExpr)->nSQLOptr == TK_OR) { - int32_t ret = getHavingExpr(pCmd, pQueryInfo, &(*pExpr)->pLeft, (*pExpr)->nSQLOptr); - if (ret != TSDB_CODE_SUCCESS) { - return ret; - } - - return getHavingExpr(pCmd, pQueryInfo, &(*pExpr)->pRight, (*pExpr)->nSQLOptr); - } - - if ((pLeft->nSQLOptr >= TK_COUNT && pLeft->nSQLOptr <= TK_AVG_IRATE) && - (pRight->nSQLOptr >= TK_COUNT && pRight->nSQLOptr <= TK_AVG_IRATE)) { - return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1); - } - - if (pLeft->nSQLOptr >= TK_BOOL - && pLeft->nSQLOptr <= TK_BINARY - && pRight->nSQLOptr >= TK_BOOL - && pRight->nSQLOptr <= TK_BINARY) { - return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1); - } - - exchangeExpr(*pExpr); - - pLeft = (*pExpr)->pLeft; - pRight = (*pExpr)->pRight; - - if (!(pLeft->nSQLOptr >= TK_COUNT && pLeft->nSQLOptr <= TK_AVG_IRATE)) { - return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1); - } - - if (!(pRight->nSQLOptr >= TK_BOOL && pRight->nSQLOptr <= TK_BINARY)) { - return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1); - } - - if ((*pExpr)->nSQLOptr >= TK_BITAND) { - return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1); - } - - if (pLeft->pParam == NULL || pLeft->pParam->nExpr < 1) { - return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1); - } - - for (int32_t i = 0; i < pLeft->pParam->nExpr; i++) { - tSqlExprItem* pParamElem = &(pLeft->pParam->a[i]); - if (pParamElem->pNode->nSQLOptr != TK_ALL && pParamElem->pNode->nSQLOptr != TK_ID) { - return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1); - } - - SColumnIndex index = COLUMN_INDEX_INITIALIZER; - if ((getColumnIndexByName(pCmd, &pParamElem->pNode->colInfo, pQueryInfo, &index) != TSDB_CODE_SUCCESS)) { - return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1); - } - - STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, index.tableIndex); - STableMeta* pTableMeta = pTableMetaInfo->pTableMeta; - - if (index.columnIndex <= 0 || - index.columnIndex >= tscGetNumOfColumns(pTableMeta)) { - return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1); - } - } - - tSqlExprItem item = {.pNode = pLeft, .aliasName = NULL, .distinct = false}; - - int32_t outputIndex = (int32_t)tscSqlExprNumOfExprs(pQueryInfo); - - // ADD TRUE FOR TEST - if (addExprAndResultField(pCmd, pQueryInfo, outputIndex, &item, true) != TSDB_CODE_SUCCESS) { - return TSDB_CODE_TSC_INVALID_SQL; - } - - int32_t slot = tscNumOfFields(pQueryInfo) - 1; - SInternalField* pInfo = tscFieldInfoGetInternalField(&pQueryInfo->fieldsInfo, slot); - - if (pInfo->pFieldFilters == NULL) { - SColumn* pFieldFilters = calloc(1, sizeof(SColumn)); - if (pFieldFilters == NULL) { - return TSDB_CODE_TSC_OUT_OF_MEMORY; - } - - pInfo->pFieldFilters = pFieldFilters; - } - - return handleExprInHavingClause(pCmd, pQueryInfo, pInfo->pFieldFilters, pExpr, parentOptr); -} - - - -int32_t parseHavingClause(SQueryInfo* pQueryInfo, tSQLExpr** pExpr, SSqlCmd* pCmd) { - const char* msg1 = "having only works with group by"; - //const char* msg2 = "invalid column name in having clause"; - //const char* msg3 = "columns from one table allowed as having columns"; - //const char* msg4 = "no tag allowed in having clause"; - const char* msg5 = "invalid expression in having clause"; - -/* - const char* msg1 = "too many columns in group by clause"; - const char* msg4 = "join query does not support group by"; - const char* msg7 = "not support group by expression"; - const char* msg8 = "not allowed column type for group by"; - const char* msg9 = "tags not allowed for table query"; -*/ - - // todo : handle two tables situation - //STableMetaInfo* pTableMetaInfo = NULL; - - if (pExpr == NULL || (*pExpr) == NULL) { - return TSDB_CODE_SUCCESS; - } - - if (pQueryInfo->groupbyExpr.numOfGroupCols <= 0) { - return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1); - } - - if ((*pExpr)->pLeft == NULL || (*pExpr)->pRight == NULL) { - return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg5); - } - - if (pQueryInfo->colList == NULL) { - pQueryInfo->colList = taosArrayInit(4, POINTER_BYTES); - } - - int32_t ret = 0; - - if ((ret = getHavingExpr(pCmd, pQueryInfo, pExpr, TK_AND)) != TSDB_CODE_SUCCESS) { - return ret; - } - - return TSDB_CODE_SUCCESS; -} - static SColumnFilterInfo* addColumnFilterInfo(SColumn* pColumn) { if (pColumn == NULL) { @@ -3328,15 +3121,11 @@ static SColumnFilterInfo* addColumnFilterInfo(SColumn* pColumn) { } static int32_t doExtractColumnFilterInfo(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SColumnFilterInfo* pColumnFilter, - SColumnIndex* columnIndex, tSQLExpr* pExpr) { + int16_t colType, tSQLExpr* pExpr) { const char* msg = "not supported filter condition"; tSQLExpr* pRight = pExpr->pRight; - STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, columnIndex->tableIndex); - SSchema* pSchema = tscGetTableColumnSchema(pTableMetaInfo->pTableMeta, columnIndex->columnIndex); - - int16_t colType = pSchema->type; if (colType >= TSDB_DATA_TYPE_TINYINT && colType <= TSDB_DATA_TYPE_BIGINT) { colType = TSDB_DATA_TYPE_BIGINT; } else if (colType == TSDB_DATA_TYPE_FLOAT || colType == TSDB_DATA_TYPE_DOUBLE) { @@ -3649,7 +3438,10 @@ static int32_t extractColumnFilterInfo(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SC } pColumn->colIndex = *pIndex; - return doExtractColumnFilterInfo(pCmd, pQueryInfo, pColFilter, pIndex, pExpr); + + int16_t colType = pSchema->type; + + return doExtractColumnFilterInfo(pCmd, pQueryInfo, pColFilter, colType, pExpr); } static void relToString(tSQLExpr* pExpr, char** str) { @@ -6901,6 +6693,259 @@ static int32_t checkQueryRangeForFill(SSqlCmd* pCmd, SQueryInfo* pQueryInfo) { return TSDB_CODE_SUCCESS; } + + int32_t tscInsertExprFields(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSQLExpr* pExpr, SInternalField** interField) { + tSqlExprItem item = {.pNode = pExpr, .aliasName = NULL, .distinct = false}; + + int32_t outputIndex = (int32_t)tscSqlExprNumOfExprs(pQueryInfo); + + // ADD TRUE FOR TEST + if (addExprAndResultField(pCmd, pQueryInfo, outputIndex, &item, true) != TSDB_CODE_SUCCESS) { + return TSDB_CODE_TSC_INVALID_SQL; + } + + ++pQueryInfo->havingFieldNum; + + int32_t slot = tscNumOfFields(pQueryInfo) - 1; + SInternalField* pInfo = tscFieldInfoGetInternalField(&pQueryInfo->fieldsInfo, slot); + pInfo->visible = false; + pInfo->pExpr = pExpr; + + if (pInfo->pFieldFilters == NULL) { + SColumn* pFieldFilters = calloc(1, sizeof(SColumn)); + if (pFieldFilters == NULL) { + return TSDB_CODE_TSC_OUT_OF_MEMORY; + } + + pInfo->pFieldFilters = pFieldFilters; + } + + *interField = pInfo; + + return TSDB_CODE_SUCCESS; +} + +int32_t tscGetExprFilters(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSQLExpr* pExpr, SInternalField** pField) { + SInternalField* pInfo = NULL; + + for (int32_t i = pQueryInfo->havingFieldNum - 1; i >= 0; --i) { + pInfo = tscFieldInfoGetInternalField(&pQueryInfo->fieldsInfo, i); + + if (0 == tSqlExprCompare(pInfo->pExpr, pExpr)) { + *pField = pInfo; + return TSDB_CODE_SUCCESS; + } + } + + int32_t ret = tscInsertExprFields(pCmd, pQueryInfo, pExpr, &pInfo); + if (ret) { + return ret; + } + + *pField = pInfo; + + return TSDB_CODE_SUCCESS; +} + + + +static int32_t handleExprInHavingClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSQLExpr* pExpr, int32_t sqlOptr) { + const char* msg1 = "non binary column not support like operator"; + const char* msg2 = "invalid operator for binary column in having clause"; + const char* msg3 = "invalid operator for bool column in having clause"; + + SColumn* pColumn = NULL; + SColumnFilterInfo* pColFilter = NULL; + SInternalField* pInfo = NULL; + + /* + * in case of TK_AND filter condition, we first find the corresponding column and build the query condition together + * the already existed condition. + */ + if (sqlOptr == TK_AND) { + int32_t ret = tscGetExprFilters(pCmd, pQueryInfo, pExpr->pLeft, &pInfo); + if (ret) { + return ret; + } + + pColumn = pInfo->pFieldFilters; + + // this is a new filter condition on this column + if (pColumn->numOfFilters == 0) { + pColFilter = addColumnFilterInfo(pColumn); + } else { // update the existed column filter information, find the filter info here + pColFilter = &pColumn->filterInfo[0]; + } + + if (pColFilter == NULL) { + return TSDB_CODE_TSC_OUT_OF_MEMORY; + } + } else if (sqlOptr == TK_OR) { + int32_t ret = tscInsertExprFields(pCmd, pQueryInfo, pExpr->pLeft, &pInfo); + if (ret) { + return ret; + } + + // TODO fixme: failed to invalid the filter expression: "col1 = 1 OR col2 = 2" + pColFilter = addColumnFilterInfo(pColumn); + if (pColFilter == NULL) { + return TSDB_CODE_TSC_OUT_OF_MEMORY; + } + } else { // error; + return TSDB_CODE_TSC_INVALID_SQL; + } + + pColFilter->filterstr = + ((pInfo->field.type == TSDB_DATA_TYPE_BINARY || pInfo->field.type == TSDB_DATA_TYPE_NCHAR) ? 1 : 0); + + if (pColFilter->filterstr) { + if (pExpr->nSQLOptr != TK_EQ + && pExpr->nSQLOptr != TK_NE + && pExpr->nSQLOptr != TK_ISNULL + && pExpr->nSQLOptr != TK_NOTNULL + && pExpr->nSQLOptr != TK_LIKE + ) { + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2); + } + } else { + if (pExpr->nSQLOptr == TK_LIKE) { + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1); + } + + if (pInfo->field.type == TSDB_DATA_TYPE_BOOL) { + if (pExpr->nSQLOptr != TK_EQ && pExpr->nSQLOptr != TK_NE) { + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3); + } + } + } + + return doExtractColumnFilterInfo(pCmd, pQueryInfo, pColFilter, pInfo->field.type, pExpr); +} + +int32_t getHavingExpr(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSQLExpr* pExpr, int32_t parentOptr) { + if (pExpr == NULL) { + return TSDB_CODE_SUCCESS; + } + + const char* msg1 = "invalid having clause"; + + tSQLExpr* pLeft = pExpr->pLeft; + tSQLExpr* pRight = pExpr->pRight; + + if (pExpr->nSQLOptr == TK_AND || pExpr->nSQLOptr == TK_OR) { + int32_t ret = getHavingExpr(pCmd, pQueryInfo, pExpr->pLeft, pExpr->nSQLOptr); + if (ret != TSDB_CODE_SUCCESS) { + return ret; + } + + return getHavingExpr(pCmd, pQueryInfo, pExpr->pRight, pExpr->nSQLOptr); + } + + if ((pLeft->nSQLOptr >= TK_COUNT && pLeft->nSQLOptr <= TK_AVG_IRATE) && + (pRight->nSQLOptr >= TK_COUNT && pRight->nSQLOptr <= TK_AVG_IRATE)) { + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1); + } + + if (pLeft->nSQLOptr >= TK_BOOL + && pLeft->nSQLOptr <= TK_BINARY + && pRight->nSQLOptr >= TK_BOOL + && pRight->nSQLOptr <= TK_BINARY) { + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1); + } + + exchangeExpr(pExpr); + + pLeft = pExpr->pLeft; + pRight = pExpr->pRight; + + if (!(pLeft->nSQLOptr >= TK_COUNT && pLeft->nSQLOptr <= TK_AVG_IRATE)) { + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1); + } + + if (!(pRight->nSQLOptr >= TK_BOOL && pRight->nSQLOptr <= TK_BINARY)) { + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1); + } + + if (pExpr->nSQLOptr >= TK_BITAND) { + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1); + } + + if (pLeft->pParam == NULL || pLeft->pParam->nExpr < 1) { + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1); + } + + for (int32_t i = 0; i < pLeft->pParam->nExpr; i++) { + tSqlExprItem* pParamElem = &(pLeft->pParam->a[i]); + if (pParamElem->pNode->nSQLOptr != TK_ALL && pParamElem->pNode->nSQLOptr != TK_ID) { + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1); + } + + SColumnIndex index = COLUMN_INDEX_INITIALIZER; + if ((getColumnIndexByName(pCmd, &pParamElem->pNode->colInfo, pQueryInfo, &index) != TSDB_CODE_SUCCESS)) { + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1); + } + + STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, index.tableIndex); + STableMeta* pTableMeta = pTableMetaInfo->pTableMeta; + + if (index.columnIndex <= 0 || + index.columnIndex >= tscGetNumOfColumns(pTableMeta)) { + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1); + } + } + + return handleExprInHavingClause(pCmd, pQueryInfo, pExpr, parentOptr); +} + + + +int32_t parseHavingClause(SQueryInfo* pQueryInfo, tSQLExpr* pExpr, SSqlCmd* pCmd) { + const char* msg1 = "having only works with group by"; + //const char* msg2 = "invalid column name in having clause"; + //const char* msg3 = "columns from one table allowed as having columns"; + //const char* msg4 = "no tag allowed in having clause"; + const char* msg5 = "invalid expression in having clause"; + +/* + const char* msg1 = "too many columns in group by clause"; + const char* msg4 = "join query does not support group by"; + const char* msg7 = "not support group by expression"; + const char* msg8 = "not allowed column type for group by"; + const char* msg9 = "tags not allowed for table query"; +*/ + + // todo : handle two tables situation + //STableMetaInfo* pTableMetaInfo = NULL; + + if (pExpr == NULL) { + return TSDB_CODE_SUCCESS; + } + + if (pQueryInfo->groupbyExpr.numOfGroupCols <= 0) { + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1); + } + + if (pExpr->pLeft == NULL || pExpr->pRight == NULL) { + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg5); + } + + if (pQueryInfo->colList == NULL) { + pQueryInfo->colList = taosArrayInit(4, POINTER_BYTES); + } + + int32_t ret = 0; + + if ((ret = getHavingExpr(pCmd, pQueryInfo, pExpr, TK_AND)) != TSDB_CODE_SUCCESS) { + return ret; + } + + return TSDB_CODE_SUCCESS; +} + + + + + int32_t doCheckForQuery(SSqlObj* pSql, SQuerySQL* pQuerySql, int32_t index) { assert(pQuerySql != NULL && (pQuerySql->from == NULL || taosArrayGetSize(pQuerySql->from) > 0)); @@ -7028,7 +7073,7 @@ int32_t doCheckForQuery(SSqlObj* pSql, SQuerySQL* pQuerySql, int32_t index) { } // parse the having clause in the first place - if (parseHavingClause(pQueryInfo, &pQuerySql->pHaving, pCmd) != TSDB_CODE_SUCCESS) { + if (parseHavingClause(pQueryInfo, pQuerySql->pHaving, pCmd) != TSDB_CODE_SUCCESS) { return TSDB_CODE_TSC_INVALID_SQL; } @@ -7261,3 +7306,10 @@ bool hasNormalColumnFilter(SQueryInfo* pQueryInfo) { return false; } + + + + + + + diff --git a/src/client/src/tscUtil.c b/src/client/src/tscUtil.c index ef5e9170da..e0b3e8a565 100644 --- a/src/client/src/tscUtil.c +++ b/src/client/src/tscUtil.c @@ -956,6 +956,7 @@ SInternalField* tscFieldInfoAppend(SFieldInfo* pFieldInfo, TAOS_FIELD* pField) { .pSqlExpr = NULL, .pArithExprInfo = NULL, .visible = true, + .pFieldFilters = NULL, }; info.field = *pField; @@ -968,6 +969,7 @@ SInternalField* tscFieldInfoInsert(SFieldInfo* pFieldInfo, int32_t index, TAOS_F .pSqlExpr = NULL, .pArithExprInfo = NULL, .visible = true, + .pFieldFilters = NULL, }; info.field = *field; @@ -1041,6 +1043,22 @@ int32_t tscGetResRowLength(SArray* pExprList) { return size; } +static void destroyFilterInfo(SColumnFilterInfo* pFilterInfo, int32_t numOfFilters) { + for(int32_t i = 0; i < numOfFilters; ++i) { + if (pFilterInfo[i].filterstr) { + tfree(pFilterInfo[i].pz); + } + } + + tfree(pFilterInfo); +} + +static void tscColumnDestroy(SColumn* pCol) { + destroyFilterInfo(pCol->filterInfo, pCol->numOfFilters); + free(pCol); +} + + void tscFieldInfoClear(SFieldInfo* pFieldInfo) { if (pFieldInfo == NULL) { return; @@ -1298,15 +1316,7 @@ SColumn* tscColumnListInsert(SArray* pColumnList, SColumnIndex* pColIndex) { return taosArrayGetP(pColumnList, i); } -static void destroyFilterInfo(SColumnFilterInfo* pFilterInfo, int32_t numOfFilters) { - for(int32_t i = 0; i < numOfFilters; ++i) { - if (pFilterInfo[i].filterstr) { - tfree(pFilterInfo[i].pz); - } - } - - tfree(pFilterInfo); -} + SColumn* tscColumnClone(const SColumn* src) { assert(src != NULL); @@ -1323,10 +1333,6 @@ SColumn* tscColumnClone(const SColumn* src) { return dst; } -static void tscColumnDestroy(SColumn* pCol) { - destroyFilterInfo(pCol->filterInfo, pCol->numOfFilters); - free(pCol); -} void tscColumnListCopy(SArray* dst, const SArray* src, int16_t tableIndex) { assert(src != NULL && dst != NULL); diff --git a/src/query/inc/qSqlparser.h b/src/query/inc/qSqlparser.h index 8efaa4cb21..08becde20a 100644 --- a/src/query/inc/qSqlparser.h +++ b/src/query/inc/qSqlparser.h @@ -233,6 +233,8 @@ SArray *tVariantListAppendToken(SArray *pList, SStrToken *pAliasToken, uint8_t s tSQLExpr *tSqlExprCreate(tSQLExpr *pLeft, tSQLExpr *pRight, int32_t optrType); +int32_t tSqlExprCompare(tSQLExpr *left, tSQLExpr *right); + tSQLExpr *tSqlExprClone(tSQLExpr *pSrc); void tSqlExprDestroy(tSQLExpr *pExpr); diff --git a/src/query/src/qParserImpl.c b/src/query/src/qParserImpl.c index 658815adcf..029af0dcbb 100644 --- a/src/query/src/qParserImpl.c +++ b/src/query/src/qParserImpl.c @@ -290,6 +290,58 @@ tSQLExpr *tSqlExprCreate(tSQLExpr *pLeft, tSQLExpr *pRight, int32_t optrType) { } +static FORCE_INLINE int32_t tStrTokenCompare(SStrToken* left, SStrToken* right) { + return (left->type == right->type && left->n == right->n && strncasecmp(left->z, right->z, left->n) == 0) ? 0 : 1; +} + + +int32_t tSqlExprCompare(tSQLExpr *left, tSQLExpr *right) { + if (left->nSQLOptr != right->nSQLOptr) { + return 1; + } + + if ((left->pLeft && right->pLeft == NULL) + || (left->pLeft == NULL && right->pLeft) + || (left->pRight && right->pRight == NULL) + || (left->pRight == NULL && right->pRight) + || (left->pParam && right->pParam == NULL) + || (left->pParam == NULL && right->pParam)) { + return 1; + } + + if (tVariantCompare(&left->val, &right->val)) { + return 1; + } + + if (tStrTokenCompare(&left->colInfo, &right->colInfo)) { + return 1; + } + + if (left->pParam && left->pParam->nExpr != right->pParam->nExpr) { + return 1; + } + + for (int32_t i = 0; i < right->pParam->nExpr; i++) { + tSQLExpr* pSubLeft = left->pParam->a[i].pNode; + tSQLExpr* pSubRight = right->pParam->a[i].pNode; + + if (tSqlExprCompare(pSubLeft, pSubRight)) { + return 1; + } + } + + if (left->pLeft && tSqlExprCompare(left->pLeft, right->pLeft)) { + return 1; + } + + if (left->pRight && tSqlExprCompare(left->pRight, right->pRight)) { + return 1; + } + + return 0; +} + + tSQLExpr *tSqlExprClone(tSQLExpr *pSrc) { tSQLExpr *pExpr = calloc(1, sizeof(tSQLExpr)); From a18b6723d3bd33ae591412b6db0b0fcfbda3d888 Mon Sep 17 00:00:00 2001 From: dapan1121 <89396746@qq.com> Date: Thu, 4 Mar 2021 10:03:41 +0800 Subject: [PATCH 003/178] support having --- src/query/src/qExecutor.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/query/src/qExecutor.c b/src/query/src/qExecutor.c index f8119b0d4a..7d171dff9a 100644 --- a/src/query/src/qExecutor.c +++ b/src/query/src/qExecutor.c @@ -213,6 +213,10 @@ bool doFilterData(SQuery *pQuery, int32_t elemPos) { return true; } +bool doFilterDataOnce() { + +} + int64_t getNumOfResult(SQueryRuntimeEnv *pRuntimeEnv) { SQuery *pQuery = pRuntimeEnv->pQuery; bool hasMainFunction = hasMainOutput(pQuery); From a83cbb75d63d170e6012b635775161d19fe6ad28 Mon Sep 17 00:00:00 2001 From: dapan1121 <89396746@qq.com> Date: Thu, 4 Mar 2021 19:34:09 +0800 Subject: [PATCH 004/178] support having --- src/client/inc/tsclient.h | 9 +++- src/client/src/tscLocalMerge.c | 78 +++++++++++++++++++++++++++++++--- src/client/src/tscSQLParser.c | 53 ++++++++++++++++++++--- src/client/src/tscUtil.c | 3 +- src/query/src/qExecutor.c | 3 -- src/query/src/qParserImpl.c | 4 ++ 6 files changed, 133 insertions(+), 17 deletions(-) diff --git a/src/client/inc/tsclient.h b/src/client/inc/tsclient.h index aa0f1c9cff..11b40326b9 100644 --- a/src/client/inc/tsclient.h +++ b/src/client/inc/tsclient.h @@ -129,13 +129,18 @@ typedef struct SColumn { SColumnFilterInfo *filterInfo; } SColumn; +typedef struct SExprFilter { + tSQLExpr *pExpr; //used for having parse + SArray *fp; + SColumn *pFilters; //having filter info +}SExprFilter; + typedef struct SInternalField { TAOS_FIELD field; bool visible; SExprInfo *pArithExprInfo; SSqlExpr *pSqlExpr; - tSQLExpr *pExpr; //used for having parse - SColumn *pFieldFilters; //having filter info + SExprFilter *pFieldFilters; } SInternalField; typedef struct SCond { diff --git a/src/client/src/tscLocalMerge.c b/src/client/src/tscLocalMerge.c index 61d90598b4..2b7a70de04 100644 --- a/src/client/src/tscLocalMerge.c +++ b/src/client/src/tscLocalMerge.c @@ -22,6 +22,7 @@ #include "tscUtil.h" #include "tschemautil.h" #include "tsclient.h" +#include "qutil.h" typedef struct SCompareParam { SLocalDataSource **pLocalData; @@ -1229,18 +1230,71 @@ static bool saveGroupResultInfo(SSqlObj *pSql) { return false; } -int32_t doHavingFilter(SQueryInfo* pQueryInfo) { + +bool doFilterFieldData(SQueryInfo* pQueryInfo, char *input, tFilePage* pOutput, SExprFilter* pFieldFilters, int16_t type, bool* notSkipped) { + bool qualified = false; + + for(int32_t k = 0; k < pFieldFilters->pFilters->numOfFilters; ++k) { + __filter_func_t fp = taosArrayGetP(pFieldFilters->fp, k); + SColumnFilterElem filterElem = {.filterInfo = pFieldFilters->pFilters->filterInfo[k]}; + + bool isnull = isNull(input, type); + if (isnull) { + if (fp == isNullOperator) { + qualified = true; + break; + } else { + continue; + } + } else { + if (fp == notNullOperator) { + qualified = true; + break; + } else if (fp == isNullOperator) { + continue; + } + } + + if (fp(&filterElem, input, input, type)) { + qualified = true; + break; + } + } + + *notSkipped = qualified; + + return TSDB_CODE_SUCCESS; +} + + +int32_t doHavingFilter(SQueryInfo* pQueryInfo, tFilePage* pOutput, bool* notSkipped) { + *notSkipped = true; + if (pQueryInfo->havingFieldNum <= 0) { return TSDB_CODE_SUCCESS; } + //int32_t exprNum = (int32_t) tscSqlExprNumOfExprs(pQueryInfo); + size_t numOfOutput = tscNumOfFields(pQueryInfo); for(int32_t i = 0; i < numOfOutput; ++i) { - SColumn* pFieldFilters = tscFieldInfoGetInternalField(&pQueryInfo->fieldsInfo, i)->pFieldFilters; - if (pFieldFilters != NULL) { + SInternalField* pInterField = tscFieldInfoGetInternalField(&pQueryInfo->fieldsInfo, i); + SExprFilter* pFieldFilters = pInterField->pFieldFilters; + + if (pFieldFilters == NULL) { continue; } - } + + int32_t type = pInterField->field.type; + + SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, i); + char* pInput = pOutput->data + pOutput->num* pExpr->offset; + + doFilterFieldData(pQueryInfo, pInput, pOutput, pFieldFilters, type, notSkipped); + if (!notSkipped) { + return TSDB_CODE_SUCCESS; + } + } return TSDB_CODE_SUCCESS; } @@ -1287,7 +1341,21 @@ bool genFinalResults(SSqlObj *pSql, SLocalMerger *pLocalMerge, bool noMoreCurren doArithmeticCalculate(pQueryInfo, pResBuf, pModel->rowSize, pLocalMerge->finalModel->rowSize); } - doHavingFilter(pQueryInfo); + bool notSkipped = true; + + doHavingFilter(pQueryInfo, pResBuf, ¬Skipped); + + if (!notSkipped) { + pRes->numOfRows = 0; + pLocalMerge->discard = !noMoreCurrentGroupRes; + + if (pLocalMerge->discard) { + SColumnModel *pInternModel = pLocalMerge->pDesc->pColumnModel; + tColModelAppend(pInternModel, pLocalMerge->discardData, pLocalMerge->pTempBuffer->data, 0, 1, 1); + } + + return notSkipped; + } // no interval query, no fill operation if (pQueryInfo->interval.interval == 0 || pQueryInfo->fillType == TSDB_FILL_NONE) { diff --git a/src/client/src/tscSQLParser.c b/src/client/src/tscSQLParser.c index 588e9b18aa..0443c3cc5d 100644 --- a/src/client/src/tscSQLParser.c +++ b/src/client/src/tscSQLParser.c @@ -34,6 +34,7 @@ #include "tstoken.h" #include "tstrbuild.h" #include "ttokendef.h" +#include "qutil.h" #define DEFAULT_PRIMARY_TIMESTAMP_COL_NAME "_c0" @@ -6709,17 +6710,26 @@ static int32_t checkQueryRangeForFill(SSqlCmd* pCmd, SQueryInfo* pQueryInfo) { int32_t slot = tscNumOfFields(pQueryInfo) - 1; SInternalField* pInfo = tscFieldInfoGetInternalField(&pQueryInfo->fieldsInfo, slot); pInfo->visible = false; - pInfo->pExpr = pExpr; if (pInfo->pFieldFilters == NULL) { - SColumn* pFieldFilters = calloc(1, sizeof(SColumn)); + SExprFilter* pFieldFilters = calloc(1, sizeof(SExprFilter)); if (pFieldFilters == NULL) { return TSDB_CODE_TSC_OUT_OF_MEMORY; } - + + SColumn* pFilters = calloc(1, sizeof(SColumn)); + if (pFilters == NULL) { + tfree(pFieldFilters); + + return TSDB_CODE_TSC_OUT_OF_MEMORY; + } + + pFieldFilters->pFilters = pFilters; pInfo->pFieldFilters = pFieldFilters; } + pInfo->pFieldFilters->pExpr = pExpr; + *interField = pInfo; return TSDB_CODE_SUCCESS; @@ -6731,7 +6741,7 @@ int32_t tscGetExprFilters(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSQLExpr* pExpr for (int32_t i = pQueryInfo->havingFieldNum - 1; i >= 0; --i) { pInfo = tscFieldInfoGetInternalField(&pQueryInfo->fieldsInfo, i); - if (0 == tSqlExprCompare(pInfo->pExpr, pExpr)) { + if (pInfo->pFieldFilters && 0 == tSqlExprCompare(pInfo->pFieldFilters->pExpr, pExpr)) { *pField = pInfo; return TSDB_CODE_SUCCESS; } @@ -6747,7 +6757,33 @@ int32_t tscGetExprFilters(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSQLExpr* pExpr return TSDB_CODE_SUCCESS; } +static int32_t genExprFilter(SExprFilter * exprFilter) { + exprFilter->fp = taosArrayInit(4, sizeof(__filter_func_t)); + if (exprFilter->fp == NULL) { + return TSDB_CODE_TSC_OUT_OF_MEMORY; + } + for (int32_t i = 0; i < exprFilter->pFilters->numOfFilters; ++i) { + SColumnFilterInfo *filterInfo = &exprFilter->pFilters->filterInfo[i]; + + int32_t lower = filterInfo->lowerRelOptr; + int32_t upper = filterInfo->upperRelOptr; + if (lower == TSDB_RELATION_INVALID && upper == TSDB_RELATION_INVALID) { + tscError("invalid rel optr"); + return TSDB_CODE_TSC_APP_ERROR; + } + + __filter_func_t ffp = getFilterOperator(lower, upper); + if (ffp == NULL) { + tscError("invalid filter info"); + return TSDB_CODE_TSC_APP_ERROR; + } + + taosArrayPush(exprFilter->fp, &ffp); + } + + return TSDB_CODE_SUCCESS; +} static int32_t handleExprInHavingClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSQLExpr* pExpr, int32_t sqlOptr) { const char* msg1 = "non binary column not support like operator"; @@ -6768,7 +6804,7 @@ static int32_t handleExprInHavingClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, t return ret; } - pColumn = pInfo->pFieldFilters; + pColumn = pInfo->pFieldFilters->pFilters; // this is a new filter condition on this column if (pColumn->numOfFilters == 0) { @@ -6819,7 +6855,12 @@ static int32_t handleExprInHavingClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, t } } - return doExtractColumnFilterInfo(pCmd, pQueryInfo, pColFilter, pInfo->field.type, pExpr); + int32_t ret = doExtractColumnFilterInfo(pCmd, pQueryInfo, pColFilter, pInfo->field.type, pExpr); + if (ret) { + return ret; + } + + return genExprFilter(pInfo->pFieldFilters); } int32_t getHavingExpr(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSQLExpr* pExpr, int32_t parentOptr) { diff --git a/src/client/src/tscUtil.c b/src/client/src/tscUtil.c index e0b3e8a565..e725d0df1a 100644 --- a/src/client/src/tscUtil.c +++ b/src/client/src/tscUtil.c @@ -1081,7 +1081,8 @@ void tscFieldInfoClear(SFieldInfo* pFieldInfo) { } if (pInfo->pFieldFilters != NULL) { - tscColumnDestroy(pInfo->pFieldFilters); + tscColumnDestroy(pInfo->pFieldFilters->pFilters); + tfree(pInfo->pFieldFilters); } } diff --git a/src/query/src/qExecutor.c b/src/query/src/qExecutor.c index 7d171dff9a..05572acefd 100644 --- a/src/query/src/qExecutor.c +++ b/src/query/src/qExecutor.c @@ -213,9 +213,6 @@ bool doFilterData(SQuery *pQuery, int32_t elemPos) { return true; } -bool doFilterDataOnce() { - -} int64_t getNumOfResult(SQueryRuntimeEnv *pRuntimeEnv) { SQuery *pQuery = pRuntimeEnv->pQuery; diff --git a/src/query/src/qParserImpl.c b/src/query/src/qParserImpl.c index 029af0dcbb..551a55999b 100644 --- a/src/query/src/qParserImpl.c +++ b/src/query/src/qParserImpl.c @@ -296,6 +296,10 @@ static FORCE_INLINE int32_t tStrTokenCompare(SStrToken* left, SStrToken* right) int32_t tSqlExprCompare(tSQLExpr *left, tSQLExpr *right) { + if ((left == NULL && right) || (left && right == NULL)) { + return 1; + } + if (left->nSQLOptr != right->nSQLOptr) { return 1; } From e8eb77b85632171b552a3718772fb62a8e01eae0 Mon Sep 17 00:00:00 2001 From: dapan1121 <89396746@qq.com> Date: Fri, 5 Mar 2021 19:00:27 +0800 Subject: [PATCH 005/178] support having --- src/client/src/tscLocalMerge.c | 2 +- src/client/src/tscSQLParser.c | 90 ++++++++++++++++++---------------- 2 files changed, 49 insertions(+), 43 deletions(-) diff --git a/src/client/src/tscLocalMerge.c b/src/client/src/tscLocalMerge.c index 2b7a70de04..9d69e53566 100644 --- a/src/client/src/tscLocalMerge.c +++ b/src/client/src/tscLocalMerge.c @@ -1291,7 +1291,7 @@ int32_t doHavingFilter(SQueryInfo* pQueryInfo, tFilePage* pOutput, bool* notSkip char* pInput = pOutput->data + pOutput->num* pExpr->offset; doFilterFieldData(pQueryInfo, pInput, pOutput, pFieldFilters, type, notSkipped); - if (!notSkipped) { + if (*notSkipped == false) { return TSDB_CODE_SUCCESS; } } diff --git a/src/client/src/tscSQLParser.c b/src/client/src/tscSQLParser.c index df811721d9..57399a0590 100644 --- a/src/client/src/tscSQLParser.c +++ b/src/client/src/tscSQLParser.c @@ -6912,27 +6912,29 @@ int32_t getHavingExpr(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSQLExpr* pExpr, in return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1); } - if (pLeft->pParam == NULL || pLeft->pParam->nExpr < 1) { - return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1); - } + //if (pLeft->pParam == NULL || pLeft->pParam->nExpr < 1) { + // return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1); + //} - for (int32_t i = 0; i < pLeft->pParam->nExpr; i++) { - tSqlExprItem* pParamElem = &(pLeft->pParam->a[i]); - if (pParamElem->pNode->nSQLOptr != TK_ALL && pParamElem->pNode->nSQLOptr != TK_ID) { - return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1); - } - - SColumnIndex index = COLUMN_INDEX_INITIALIZER; - if ((getColumnIndexByName(pCmd, &pParamElem->pNode->colInfo, pQueryInfo, &index) != TSDB_CODE_SUCCESS)) { - return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1); - } - - STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, index.tableIndex); - STableMeta* pTableMeta = pTableMetaInfo->pTableMeta; - - if (index.columnIndex <= 0 || - index.columnIndex >= tscGetNumOfColumns(pTableMeta)) { - return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1); + if (pLeft->pParam) { + for (int32_t i = 0; i < pLeft->pParam->nExpr; i++) { + tSqlExprItem* pParamElem = &(pLeft->pParam->a[i]); + if (pParamElem->pNode->nSQLOptr != TK_ALL && pParamElem->pNode->nSQLOptr != TK_ID) { + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1); + } + + SColumnIndex index = COLUMN_INDEX_INITIALIZER; + if ((getColumnIndexByName(pCmd, &pParamElem->pNode->colInfo, pQueryInfo, &index) != TSDB_CODE_SUCCESS)) { + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1); + } + + STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, index.tableIndex); + STableMeta* pTableMeta = pTableMetaInfo->pTableMeta; + + if (index.columnIndex <= 0 || + index.columnIndex >= tscGetNumOfColumns(pTableMeta)) { + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1); + } } } @@ -6941,23 +6943,10 @@ int32_t getHavingExpr(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSQLExpr* pExpr, in -int32_t parseHavingClause(SQueryInfo* pQueryInfo, tSQLExpr* pExpr, SSqlCmd* pCmd) { +int32_t parseHavingClause(SQueryInfo* pQueryInfo, tSQLExpr* pExpr, SSqlCmd* pCmd, bool isSTable, int32_t joinQuery, int32_t intervalQuery) { const char* msg1 = "having only works with group by"; - //const char* msg2 = "invalid column name in having clause"; - //const char* msg3 = "columns from one table allowed as having columns"; - //const char* msg4 = "no tag allowed in having clause"; - const char* msg5 = "invalid expression in having clause"; - -/* - const char* msg1 = "too many columns in group by clause"; - const char* msg4 = "join query does not support group by"; - const char* msg7 = "not support group by expression"; - const char* msg8 = "not allowed column type for group by"; - const char* msg9 = "tags not allowed for table query"; -*/ - - // todo : handle two tables situation - //STableMetaInfo* pTableMetaInfo = NULL; + const char* msg2 = "functions can not be mixed up"; + const char* msg3 = "invalid expression in having clause"; if (pExpr == NULL) { return TSDB_CODE_SUCCESS; @@ -6968,7 +6957,7 @@ int32_t parseHavingClause(SQueryInfo* pQueryInfo, tSQLExpr* pExpr, SSqlCmd* pCmd } if (pExpr->pLeft == NULL || pExpr->pRight == NULL) { - return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg5); + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3); } if (pQueryInfo->colList == NULL) { @@ -6981,6 +6970,23 @@ int32_t parseHavingClause(SQueryInfo* pQueryInfo, tSQLExpr* pExpr, SSqlCmd* pCmd return ret; } + //REDO function check + if (!functionCompatibleCheck(pQueryInfo, joinQuery, intervalQuery)) { + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2); + } + + /* + * transfer sql functions that need secondary merge into another format + * in dealing with super table queries such as: count/first/last + */ + if (isSTable) { + tscTansformFuncForSTableQuery(pQueryInfo); + + if (hasUnsupportFunctionsForSTableQuery(pCmd, pQueryInfo)) { + return TSDB_CODE_TSC_INVALID_SQL; + } + } + return TSDB_CODE_SUCCESS; } @@ -7114,11 +7120,6 @@ int32_t doCheckForQuery(SSqlObj* pSql, SQuerySQL* pQuerySql, int32_t index) { return TSDB_CODE_TSC_INVALID_SQL; } - // parse the having clause in the first place - if (parseHavingClause(pQueryInfo, pQuerySql->pHaving, pCmd) != TSDB_CODE_SUCCESS) { - return TSDB_CODE_TSC_INVALID_SQL; - } - // set where info STableComInfo tinfo = tscGetTableInfo(pTableMetaInfo->pTableMeta); @@ -7161,6 +7162,11 @@ int32_t doCheckForQuery(SSqlObj* pSql, SQuerySQL* pQuerySql, int32_t index) { } } + // parse the having clause in the first place + if (parseHavingClause(pQueryInfo, pQuerySql->pHaving, pCmd, isSTable, joinQuery, intervalQuery) != TSDB_CODE_SUCCESS) { + return TSDB_CODE_TSC_INVALID_SQL; + } + // no result due to invalid query time range if (pQueryInfo->window.skey > pQueryInfo->window.ekey) { pQueryInfo->command = TSDB_SQL_RETRIEVE_EMPTY_RESULT; From 4b824dde0b61af7490c3ac894d8949a80b458679 Mon Sep 17 00:00:00 2001 From: dapan1121 <89396746@qq.com> Date: Mon, 8 Mar 2021 15:25:07 +0800 Subject: [PATCH 006/178] fix bug --- src/client/inc/tsclient.h | 1 + src/client/src/tscLocalMerge.c | 3 +-- src/client/src/tscSQLParser.c | 10 ++++++++-- src/query/src/qParserImpl.c | 16 +++++++++------- 4 files changed, 19 insertions(+), 11 deletions(-) diff --git a/src/client/inc/tsclient.h b/src/client/inc/tsclient.h index 6ddb61477f..d36ecc54c5 100644 --- a/src/client/inc/tsclient.h +++ b/src/client/inc/tsclient.h @@ -131,6 +131,7 @@ typedef struct SColumn { typedef struct SExprFilter { tSQLExpr *pExpr; //used for having parse + SSqlExpr *pSqlExpr; SArray *fp; SColumn *pFilters; //having filter info }SExprFilter; diff --git a/src/client/src/tscLocalMerge.c b/src/client/src/tscLocalMerge.c index 9d69e53566..9870830d2e 100644 --- a/src/client/src/tscLocalMerge.c +++ b/src/client/src/tscLocalMerge.c @@ -1287,8 +1287,7 @@ int32_t doHavingFilter(SQueryInfo* pQueryInfo, tFilePage* pOutput, bool* notSkip int32_t type = pInterField->field.type; - SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, i); - char* pInput = pOutput->data + pOutput->num* pExpr->offset; + char* pInput = pOutput->data + pOutput->num* pFieldFilters->pSqlExpr->offset; doFilterFieldData(pQueryInfo, pInput, pOutput, pFieldFilters, type, notSkipped); if (*notSkipped == false) { diff --git a/src/client/src/tscSQLParser.c b/src/client/src/tscSQLParser.c index 57399a0590..accfc01b07 100644 --- a/src/client/src/tscSQLParser.c +++ b/src/client/src/tscSQLParser.c @@ -6708,6 +6708,9 @@ static int32_t checkQueryRangeForFill(SSqlCmd* pCmd, SQueryInfo* pQueryInfo) { ++pQueryInfo->havingFieldNum; + size_t n = tscSqlExprNumOfExprs(pQueryInfo); + SSqlExpr* pSqlExpr = tscSqlExprGet(pQueryInfo, n - 1); + int32_t slot = tscNumOfFields(pQueryInfo) - 1; SInternalField* pInfo = tscFieldInfoGetInternalField(&pQueryInfo->fieldsInfo, slot); pInfo->visible = false; @@ -6726,6 +6729,7 @@ static int32_t checkQueryRangeForFill(SSqlCmd* pCmd, SQueryInfo* pQueryInfo) { } pFieldFilters->pFilters = pFilters; + pFieldFilters->pSqlExpr = pSqlExpr; pInfo->pFieldFilters = pFieldFilters; } @@ -6740,7 +6744,7 @@ int32_t tscGetExprFilters(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSQLExpr* pExpr SInternalField* pInfo = NULL; for (int32_t i = pQueryInfo->havingFieldNum - 1; i >= 0; --i) { - pInfo = tscFieldInfoGetInternalField(&pQueryInfo->fieldsInfo, i); + pInfo = tscFieldInfoGetInternalField(&pQueryInfo->fieldsInfo, pQueryInfo->fieldsInfo.numOfOutput - 1 - i); if (pInfo->pFieldFilters && 0 == tSqlExprCompare(pInfo->pFieldFilters->pExpr, pExpr)) { *pField = pInfo; @@ -6818,10 +6822,12 @@ static int32_t handleExprInHavingClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, t return TSDB_CODE_TSC_OUT_OF_MEMORY; } } else if (sqlOptr == TK_OR) { - int32_t ret = tscInsertExprFields(pCmd, pQueryInfo, pExpr->pLeft, &pInfo); + int32_t ret = tscGetExprFilters(pCmd, pQueryInfo, pExpr->pLeft, &pInfo); if (ret) { return ret; } + + pColumn = pInfo->pFieldFilters->pFilters; // TODO fixme: failed to invalid the filter expression: "col1 = 1 OR col2 = 2" pColFilter = addColumnFilterInfo(pColumn); diff --git a/src/query/src/qParserImpl.c b/src/query/src/qParserImpl.c index 551a55999b..ccfe15a20b 100644 --- a/src/query/src/qParserImpl.c +++ b/src/query/src/qParserImpl.c @@ -324,13 +324,15 @@ int32_t tSqlExprCompare(tSQLExpr *left, tSQLExpr *right) { if (left->pParam && left->pParam->nExpr != right->pParam->nExpr) { return 1; } - - for (int32_t i = 0; i < right->pParam->nExpr; i++) { - tSQLExpr* pSubLeft = left->pParam->a[i].pNode; - tSQLExpr* pSubRight = right->pParam->a[i].pNode; - - if (tSqlExprCompare(pSubLeft, pSubRight)) { - return 1; + + if (right->pParam && left->pParam) { + for (int32_t i = 0; i < right->pParam->nExpr; i++) { + tSQLExpr* pSubLeft = left->pParam->a[i].pNode; + tSQLExpr* pSubRight = right->pParam->a[i].pNode; + + if (tSqlExprCompare(pSubLeft, pSubRight)) { + return 1; + } } } From 9a5b1debb4e87e7700708f19381449a1467c520b Mon Sep 17 00:00:00 2001 From: dapan1121 <89396746@qq.com> Date: Wed, 10 Mar 2021 10:31:25 +0800 Subject: [PATCH 007/178] fix bug --- src/client/src/tscSQLParser.c | 40 ++++++++++++----------------------- src/query/src/qAggMain.c | 12 +++++------ 2 files changed, 20 insertions(+), 32 deletions(-) diff --git a/src/client/src/tscSQLParser.c b/src/client/src/tscSQLParser.c index accfc01b07..d02598d85f 100644 --- a/src/client/src/tscSQLParser.c +++ b/src/client/src/tscSQLParser.c @@ -1644,18 +1644,6 @@ int32_t parseSelectClause(SSqlCmd* pCmd, int32_t clauseIndex, tSQLExprList* pSel return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2); } - /* - * transfer sql functions that need secondary merge into another format - * in dealing with super table queries such as: count/first/last - */ - if (isSTable) { - tscTansformFuncForSTableQuery(pQueryInfo); - - if (hasUnsupportFunctionsForSTableQuery(pCmd, pQueryInfo)) { - return TSDB_CODE_TSC_INVALID_SQL; - } - } - return TSDB_CODE_SUCCESS; } @@ -5968,7 +5956,7 @@ static int32_t doAddGroupbyColumnsOnDemand(SSqlCmd* pCmd, SQueryInfo* pQueryInfo if (TSDB_COL_IS_TAG(pColIndex->flag)) { SColumnIndex index = {.tableIndex = pQueryInfo->groupbyExpr.tableIndex, .columnIndex = colIndex}; - SSqlExpr* pExpr = tscSqlExprAppend(pQueryInfo, TSDB_FUNC_TAG, &index, type, bytes, getNewResColId(pQueryInfo), bytes, true); + SSqlExpr* pExpr = tscSqlExprInsert(pQueryInfo, size - pQueryInfo->havingFieldNum, TSDB_FUNC_TAG, &index, type, bytes, getNewResColId(pQueryInfo), bytes, true); memset(pExpr->aliasName, 0, sizeof(pExpr->aliasName)); tstrncpy(pExpr->aliasName, name, sizeof(pExpr->aliasName)); @@ -5977,7 +5965,7 @@ static int32_t doAddGroupbyColumnsOnDemand(SSqlCmd* pCmd, SQueryInfo* pQueryInfo // NOTE: tag column does not add to source column list SColumnList ids = getColumnList(1, 0, pColIndex->colIndex); - insertResultField(pQueryInfo, (int32_t)size, &ids, bytes, (int8_t)type, name, pExpr); + insertResultField(pQueryInfo, (int32_t)size - pQueryInfo->havingFieldNum, &ids, bytes, (int8_t)type, name, pExpr); } else { // if this query is "group by" normal column, interval is not allowed if (pQueryInfo->interval.interval > 0) { @@ -6981,18 +6969,6 @@ int32_t parseHavingClause(SQueryInfo* pQueryInfo, tSQLExpr* pExpr, SSqlCmd* pCmd return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2); } - /* - * transfer sql functions that need secondary merge into another format - * in dealing with super table queries such as: count/first/last - */ - if (isSTable) { - tscTansformFuncForSTableQuery(pQueryInfo); - - if (hasUnsupportFunctionsForSTableQuery(pCmd, pQueryInfo)) { - return TSDB_CODE_TSC_INVALID_SQL; - } - } - return TSDB_CODE_SUCCESS; } @@ -7173,6 +7149,18 @@ int32_t doCheckForQuery(SSqlObj* pSql, SQuerySQL* pQuerySql, int32_t index) { return TSDB_CODE_TSC_INVALID_SQL; } + /* + * transfer sql functions that need secondary merge into another format + * in dealing with super table queries such as: count/first/last + */ + if (isSTable) { + tscTansformFuncForSTableQuery(pQueryInfo); + + if (hasUnsupportFunctionsForSTableQuery(pCmd, pQueryInfo)) { + return TSDB_CODE_TSC_INVALID_SQL; + } + } + // no result due to invalid query time range if (pQueryInfo->window.skey > pQueryInfo->window.ekey) { pQueryInfo->command = TSDB_SQL_RETRIEVE_EMPTY_RESULT; diff --git a/src/query/src/qAggMain.c b/src/query/src/qAggMain.c index f99fe3f072..ef296ab0fb 100644 --- a/src/query/src/qAggMain.c +++ b/src/query/src/qAggMain.c @@ -4655,12 +4655,12 @@ static void sumrate_finalizer(SQLFunctionCtx *pCtx) { int32_t functionCompatList[] = { // count, sum, avg, min, max, stddev, percentile, apercentile, first, last 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - // last_row,top, bottom, spread, twa, leastsqr, ts, ts_dummy, tag_dummy, ts_z - 4, -1, -1, 1, 1, 1, 1, 1, 1, -1, - // tag, colprj, tagprj, arithmetic, diff, first_dist, last_dist, interp rate irate - 1, 1, 1, 1, -1, 1, 1, 5, 1, 1, - // sum_rate, sum_irate, avg_rate, avg_irate - 1, 1, 1, 1, + // last_row,top, bottom, spread, twa, leastsqr, ts, ts_dummy, tag_dummy, ts_z + 4, -1, -1, 1, 1, 1, 1, 1, 1, -1, + // tag, colprj, tagprj, arithmetic, diff, first_dist, last_dist, stddev_dst, interp rate + 1, 1, 1, 1, -1, 1, 1, 1, 5, 1, + // irate sum_rate, sum_irate, avg_rate, avg_irate + 1, 1, 1, 1, 1 }; SAggFunctionInfo aAggs[] = {{ From 0cfb65a89f8ce22eac9d66beb0a600842c77b067 Mon Sep 17 00:00:00 2001 From: dapan1121 <89396746@qq.com> Date: Thu, 11 Mar 2021 11:19:06 +0800 Subject: [PATCH 008/178] fix bug; add cases --- src/client/src/tscSQLParser.c | 59 ++++++++++++++++++++++++++--------- 1 file changed, 45 insertions(+), 14 deletions(-) diff --git a/src/client/src/tscSQLParser.c b/src/client/src/tscSQLParser.c index d02598d85f..4e0d2a05b6 100644 --- a/src/client/src/tscSQLParser.c +++ b/src/client/src/tscSQLParser.c @@ -1576,7 +1576,7 @@ bool isValidDistinctSql(SQueryInfo* pQueryInfo) { int32_t parseSelectClause(SSqlCmd* pCmd, int32_t clauseIndex, tSQLExprList* pSelection, bool isSTable, bool joinQuery, bool intervalQuery) { assert(pSelection != NULL && pCmd != NULL); - const char* msg2 = "functions can not be mixed up"; + const char* msg2 = "functions or others can not be mixed up"; const char* msg3 = "not support query expression"; const char* msg5 = "invalid function name"; const char* msg6 = "only support distinct one tag"; @@ -2926,6 +2926,23 @@ bool hasUnsupportFunctionsForSTableQuery(SSqlCmd* pCmd, SQueryInfo* pQueryInfo) return false; } +static bool groupbyTagsOrNull(SQueryInfo* pQueryInfo) { + if (pQueryInfo->groupbyExpr.columnInfo == NULL || + taosArrayGetSize(pQueryInfo->groupbyExpr.columnInfo) == 0) { + return true; + } + + size_t s = taosArrayGetSize(pQueryInfo->groupbyExpr.columnInfo); + for (int32_t i = 0; i < s; i++) { + SColIndex* colIndex = taosArrayGet(pQueryInfo->groupbyExpr.columnInfo, i); + if (colIndex->flag != TSDB_COL_TAG) { + return false; + } + } + + return true; +} + static bool functionCompatibleCheck(SQueryInfo* pQueryInfo, bool joinQuery, bool intervalQuery) { int32_t startIdx = 0; @@ -2942,7 +2959,7 @@ static bool functionCompatibleCheck(SQueryInfo* pQueryInfo, bool joinQuery, bool int32_t factor = functionCompatList[tscSqlExprGet(pQueryInfo, startIdx)->functionId]; - if (tscSqlExprGet(pQueryInfo, 0)->functionId == TSDB_FUNC_LAST_ROW && (joinQuery || intervalQuery)) { + if (tscSqlExprGet(pQueryInfo, 0)->functionId == TSDB_FUNC_LAST_ROW && (joinQuery || intervalQuery || !groupbyTagsOrNull(pQueryInfo))) { return false; } @@ -2970,7 +2987,7 @@ static bool functionCompatibleCheck(SQueryInfo* pQueryInfo, bool joinQuery, bool } } - if (functionId == TSDB_FUNC_LAST_ROW && (joinQuery || intervalQuery)) { + if (functionId == TSDB_FUNC_LAST_ROW && (joinQuery || intervalQuery || !groupbyTagsOrNull(pQueryInfo))) { return false; } } @@ -6877,6 +6894,10 @@ int32_t getHavingExpr(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSQLExpr* pExpr, in return getHavingExpr(pCmd, pQueryInfo, pExpr->pRight, pExpr->nSQLOptr); } + if (pLeft == NULL || pRight == NULL) { + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1); + } + if ((pLeft->nSQLOptr >= TK_COUNT && pLeft->nSQLOptr <= TK_AVG_IRATE) && (pRight->nSQLOptr >= TK_COUNT && pRight->nSQLOptr <= TK_AVG_IRATE)) { return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1); @@ -6913,21 +6934,31 @@ int32_t getHavingExpr(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSQLExpr* pExpr, in if (pLeft->pParam) { for (int32_t i = 0; i < pLeft->pParam->nExpr; i++) { tSqlExprItem* pParamElem = &(pLeft->pParam->a[i]); - if (pParamElem->pNode->nSQLOptr != TK_ALL && pParamElem->pNode->nSQLOptr != TK_ID) { + if (pParamElem->pNode->nSQLOptr != TK_ALL && + pParamElem->pNode->nSQLOptr != TK_ID && + pParamElem->pNode->nSQLOptr != TK_STRING && + pParamElem->pNode->nSQLOptr != TK_INTEGER && + pParamElem->pNode->nSQLOptr != TK_FLOAT) { return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1); } - SColumnIndex index = COLUMN_INDEX_INITIALIZER; - if ((getColumnIndexByName(pCmd, &pParamElem->pNode->colInfo, pQueryInfo, &index) != TSDB_CODE_SUCCESS)) { + if (pParamElem->pNode->nSQLOptr == TK_ID && (pParamElem->pNode->colInfo.z == NULL && pParamElem->pNode->colInfo.n == 0)) { return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1); } - - STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, index.tableIndex); - STableMeta* pTableMeta = pTableMetaInfo->pTableMeta; - - if (index.columnIndex <= 0 || - index.columnIndex >= tscGetNumOfColumns(pTableMeta)) { - return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1); + + if (pParamElem->pNode->nSQLOptr == TK_ID) { + SColumnIndex index = COLUMN_INDEX_INITIALIZER; + if ((getColumnIndexByName(pCmd, &pParamElem->pNode->colInfo, pQueryInfo, &index) != TSDB_CODE_SUCCESS)) { + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1); + } + + STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, index.tableIndex); + STableMeta* pTableMeta = pTableMetaInfo->pTableMeta; + + if (index.columnIndex <= 0 || + index.columnIndex >= tscGetNumOfColumns(pTableMeta)) { + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1); + } } } } @@ -6939,7 +6970,7 @@ int32_t getHavingExpr(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSQLExpr* pExpr, in int32_t parseHavingClause(SQueryInfo* pQueryInfo, tSQLExpr* pExpr, SSqlCmd* pCmd, bool isSTable, int32_t joinQuery, int32_t intervalQuery) { const char* msg1 = "having only works with group by"; - const char* msg2 = "functions can not be mixed up"; + const char* msg2 = "functions or others can not be mixed up"; const char* msg3 = "invalid expression in having clause"; if (pExpr == NULL) { From a19ed6e956c7d345d41941bf605c69a3fba5ae6b Mon Sep 17 00:00:00 2001 From: dapan1121 <89396746@qq.com> Date: Thu, 11 Mar 2021 11:20:17 +0800 Subject: [PATCH 009/178] add case --- tests/script/general/parser/having.sim | 1841 ++++++++++++++++++++++++ 1 file changed, 1841 insertions(+) create mode 100644 tests/script/general/parser/having.sim diff --git a/tests/script/general/parser/having.sim b/tests/script/general/parser/having.sim new file mode 100644 index 0000000000..2e95664d31 --- /dev/null +++ b/tests/script/general/parser/having.sim @@ -0,0 +1,1841 @@ +system sh/stop_dnodes.sh +system sh/deploy.sh -n dnode1 -i 1 +system sh/cfg.sh -n dnode1 -c walLevel -v 0 +system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 2 +system sh/exec.sh -n dnode1 -s start + +sleep 100 +sql connect +print ======================== dnode1 start + +$db = testdb + +sql create database $db +sql use $db + +sql create stable st2 (ts timestamp, f1 int, f2 float, f3 double, f4 bigint, f5 smallint, f6 tinyint, f7 bool, f8 binary(10), f9 nchar(10)) tags (id1 int, id2 float, id3 nchar(10), id4 double, id5 smallint, id6 bigint, id7 binary(10)) + +sql create table tb1 using st2 tags (1,1.0,"1",1.0,1,1,"1"); +sql create table tb2 using st2 tags (2,2.0,"2",2.0,2,2,"2"); +sql create table tb3 using st2 tags (3,3.0,"3",3.0,3,3,"3"); +sql create table tb4 using st2 tags (4,4.0,"4",4.0,4,4,"4"); + +sql insert into tb1 values (now-200s,1,1.0,1.0,1,1,1,true ,"1","1") +sql insert into tb1 values (now-150s,1,1.0,1.0,1,1,1,false,"1","1") +sql insert into tb1 values (now-100s,2,2.0,2.0,2,2,2,true ,"2","2") +sql insert into tb1 values (now-50s ,2,2.0,2.0,2,2,2,false,"2","2") +sql insert into tb1 values (now ,3,3.0,3.0,3,3,3,true ,"3","3") +sql insert into tb1 values (now+50s ,3,3.0,3.0,3,3,3,false,"3","3") +sql insert into tb1 values (now+100s,4,4.0,4.0,4,4,4,true ,"4","4") +sql insert into tb1 values (now+150s,4,4.0,4.0,4,4,4,false,"4","4") + + +sql select count(*),f1 from st2 group by f1 having count(f1) > 0; +if $rows != 4 then + return -1 +endi +if $data00 != 2 then + return -1 +endi +if $data01 != 1 then + return -1 +endi +if $data10 != 2 then + return -1 +endi +if $data11 != 2 then + return -1 +endi +if $data20 != 2 then + return -1 +endi +if $data21 != 3 then + return -1 +endi +if $data30 != 2 then + return -1 +endi +if $data31 != 4 then + return -1 +endi + + + + +sql select count(*),f1 from st2 group by f1 having count(*) > 0; +if $rows != 4 then + return -1 +endi +if $data00 != 2 then + return -1 +endi +if $data01 != 1 then + return -1 +endi +if $data10 != 2 then + return -1 +endi +if $data11 != 2 then + return -1 +endi +if $data20 != 2 then + return -1 +endi +if $data21 != 3 then + return -1 +endi +if $data30 != 2 then + return -1 +endi +if $data31 != 4 then + return -1 +endi + + +sql select count(*),f1 from st2 group by f1 having count(f2) > 0; +if $rows != 4 then + return -1 +endi +if $data00 != 2 then + return -1 +endi +if $data01 != 1 then + return -1 +endi +if $data10 != 2 then + return -1 +endi +if $data11 != 2 then + return -1 +endi +if $data20 != 2 then + return -1 +endi +if $data21 != 3 then + return -1 +endi +if $data30 != 2 then + return -1 +endi +if $data31 != 4 then + return -1 +endi + +sql_error select top(f1,2) from st2 group by f1 having count(f2) > 0; + +sql select last(f1) from st2 group by f1 having count(f2) > 0; +if $rows != 4 then + return -1 +endi +if $data00 != 1 then + return -1 +endi +if $data10 != 2 then + return -1 +endi +if $data20 != 3 then + return -1 +endi +if $data30 != 4 then + return -1 +endi + +sql_error select top(f1,2) from st2 group by f1 having count(f2) > 0; + +sql_error select top(f1,2) from st2 group by f1 having count(f2) > 0; + +sql_error select top(f1,2) from st2 group by f1 having avg(f1) > 0; + +sql select avg(f1),count(f1) from st2 group by f1 having avg(f1) > 2; +if $rows != 2 then + return -1 +endi +if $data00 != 3.000000000 then + return -1 +endi +if $data01 != 2 then + return -1 +endi +if $data10 != 4.000000000 then + return -1 +endi +if $data11 != 2 then + return -1 +endi + + +sql select avg(f1),count(f1) from st2 group by f1 having avg(f1) > 2 and sum(f1) > 0; +if $rows != 2 then + return -1 +endi +if $data00 != 3.000000000 then + return -1 +endi +if $data01 != 2 then + return -1 +endi +if $data10 != 4.000000000 then + return -1 +endi +if $data11 != 2 then + return -1 +endi + +sql select avg(f1),count(f1),sum(f1) from st2 group by f1 having avg(f1) > 2 and sum(f1) > 0; +if $rows != 2 then + return -1 +endi +if $data00 != 3.000000000 then + return -1 +endi +if $data01 != 2 then + return -1 +endi +if $data02 != 6 then + return -1 +endi +if $data10 != 4.000000000 then + return -1 +endi +if $data11 != 2 then + return -1 +endi +if $data12 != 8 then + return -1 +endi + +sql select avg(f1),count(f1),sum(f1) from st2 group by f1 having avg(f1) > 2; +if $rows != 2 then + return -1 +endi +if $data00 != 3.000000000 then + return -1 +endi +if $data01 != 2 then + return -1 +endi +if $data02 != 6 then + return -1 +endi +if $data10 != 4.000000000 then + return -1 +endi +if $data11 != 2 then + return -1 +endi +if $data12 != 8 then + return -1 +endi + +sql select avg(f1),count(f1),sum(f1) from st2 group by f1 having sum(f1) > 0; +if $rows != 4 then + return -1 +endi +if $data00 != 1.000000000 then + return -1 +endi +if $data01 != 2 then + return -1 +endi +if $data02 != 2 then + return -1 +endi +if $data10 != 2.000000000 then + return -1 +endi +if $data11 != 2 then + return -1 +endi +if $data12 != 4 then + return -1 +endi +if $data20 != 3.000000000 then + return -1 +endi +if $data21 != 2 then + return -1 +endi +if $data22 != 6 then + return -1 +endi +if $data30 != 4.000000000 then + return -1 +endi +if $data31 != 2 then + return -1 +endi +if $data32 != 8 then + return -1 +endi + +sql select avg(f1),count(f1),sum(f1) from st2 group by f1 having sum(f1) > 2 and sum(f1) < 6; +if $rows != 1 then + return -1 +endi +if $data00 != 2.000000000 then + return -1 +endi +if $data01 != 2 then + return -1 +endi +if $data02 != 4 then + return -1 +endi + + +sql select avg(f1),count(f1),sum(f1) from st2 group by f1 having 1 <= sum(f1) and 5 >= sum(f1); +if $rows != 2 then + return -1 +endi +if $data00 != 1.000000000 then + return -1 +endi +if $data01 != 2 then + return -1 +endi +if $data02 != 2 then + return -1 +endi +if $data10 != 2.000000000 then + return -1 +endi +if $data11 != 2 then + return -1 +endi +if $data12 != 4 then + return -1 +endi + +sql select avg(f1),count(f1),sum(f1),twa(f1) from st2 group by tbname having twa(f1) > 0; +if $rows != 1 then + return -1 +endi +if $data00 != 2.500000000 then + return -1 +endi +if $data01 != 8 then + return -1 +endi +if $data02 != 20 then + return -1 +endi +if $data04 != tb1 then + return -1 +endi + +sql_error select avg(f1),count(f1),sum(f1),twa(f1) from st2 group by f1 having twa(f1) > 0; + +sql select avg(f1),count(f1),sum(f1),twa(f1) from st2 group by tbname having sum(f1) > 0; +if $rows != 1 then + return -1 +endi +if $data00 != 2.500000000 then + return -1 +endi +if $data01 != 8 then + return -1 +endi +if $data02 != 20 then + return -1 +endi +if $data04 != tb1 then + return -1 +endi + +sql_error select avg(f1),count(f1),sum(f1),twa(f1) from st2 group by f1 having sum(f1) > 0; + +sql select avg(f1),count(f1),sum(f1) from st2 group by f1 having sum(f1) > 0; +if $rows != 4 then + return -1 +endi +if $data00 != 1.000000000 then + return -1 +endi +if $data01 != 2 then + return -1 +endi +if $data02 != 2 then + return -1 +endi +if $data10 != 2.000000000 then + return -1 +endi +if $data11 != 2 then + return -1 +endi +if $data12 != 4 then + return -1 +endi +if $data20 != 3.000000000 then + return -1 +endi +if $data21 != 2 then + return -1 +endi +if $data22 != 6 then + return -1 +endi +if $data30 != 4.000000000 then + return -1 +endi +if $data31 != 2 then + return -1 +endi +if $data32 != 8 then + return -1 +endi + +sql select avg(f1),count(f1),sum(f1) from st2 group by f1 having sum(f1) > 3; +if $rows != 3 then + return -1 +endi +if $data00 != 2.000000000 then + return -1 +endi +if $data01 != 2 then + return -1 +endi +if $data02 != 4 then + return -1 +endi +if $data10 != 3.000000000 then + return -1 +endi +if $data11 != 2 then + return -1 +endi +if $data12 != 6 then + return -1 +endi +if $data20 != 4.000000000 then + return -1 +endi +if $data21 != 2 then + return -1 +endi +if $data22 != 8 then + return -1 +endi + +###########and issue +sql select avg(f1),count(f1),sum(f1) from st2 group by f1 having sum(f1) > 3 and sum(f1) > 1; +if $rows != 4 then + return -1 +endi +if $data00 != 1.000000000 then + return -1 +endi +if $data01 != 2 then + return -1 +endi +if $data02 != 2 then + return -1 +endi +if $data10 != 2.000000000 then + return -1 +endi +if $data11 != 2 then + return -1 +endi +if $data12 != 4 then + return -1 +endi +if $data20 != 3.000000000 then + return -1 +endi +if $data21 != 2 then + return -1 +endi +if $data22 != 6 then + return -1 +endi +if $data30 != 4.000000000 then + return -1 +endi +if $data31 != 2 then + return -1 +endi +if $data32 != 8 then + return -1 +endi + + +sql select avg(f1),count(f1),sum(f1) from st2 group by f1 having sum(f1) > 3 or sum(f1) > 1; +if $rows != 4 then + return -1 +endi +if $data00 != 1.000000000 then + return -1 +endi +if $data01 != 2 then + return -1 +endi +if $data02 != 2 then + return -1 +endi +if $data10 != 2.000000000 then + return -1 +endi +if $data11 != 2 then + return -1 +endi +if $data12 != 4 then + return -1 +endi +if $data20 != 3.000000000 then + return -1 +endi +if $data21 != 2 then + return -1 +endi +if $data22 != 6 then + return -1 +endi +if $data30 != 4.000000000 then + return -1 +endi +if $data31 != 2 then + return -1 +endi +if $data32 != 8 then + return -1 +endi + +sql select avg(f1),count(f1),sum(f1) from st2 group by f1 having sum(f1) > 3 or sum(f1) > 4; +if $rows != 3 then + return -1 +endi +if $data00 != 2.000000000 then + return -1 +endi +if $data01 != 2 then + return -1 +endi +if $data02 != 4 then + return -1 +endi +if $data10 != 3.000000000 then + return -1 +endi +if $data11 != 2 then + return -1 +endi +if $data12 != 6 then + return -1 +endi +if $data20 != 4.000000000 then + return -1 +endi +if $data21 != 2 then + return -1 +endi +if $data22 != 8 then + return -1 +endi + +############or issue +sql select avg(f1),count(f1),sum(f1) from st2 group by f1 having sum(f1) > 3 or avg(f1) > 4; +if $rows != 0 then + return -1 +endi + +sql select avg(f1),count(f1),sum(f1) from st2 group by f1 having (sum(f1) > 3); +if $rows != 3 then + return -1 +endi +if $data00 != 2.000000000 then + return -1 +endi +if $data01 != 2 then + return -1 +endi +if $data02 != 4 then + return -1 +endi +if $data10 != 3.000000000 then + return -1 +endi +if $data11 != 2 then + return -1 +endi +if $data12 != 6 then + return -1 +endi +if $data20 != 4.000000000 then + return -1 +endi +if $data21 != 2 then + return -1 +endi +if $data22 != 8 then + return -1 +endi + +sql_error select avg(f1),count(f1),sum(f1) from st2 group by f1 having (sum(*) > 3); + +sql select avg(f1),count(f1),sum(f1) from st2 group by f1 having (sum(st2.f1) > 3); +if $rows != 3 then + return -1 +endi +if $data00 != 2.000000000 then + return -1 +endi +if $data01 != 2 then + return -1 +endi +if $data02 != 4 then + return -1 +endi +if $data10 != 3.000000000 then + return -1 +endi +if $data11 != 2 then + return -1 +endi +if $data12 != 6 then + return -1 +endi +if $data20 != 4.000000000 then + return -1 +endi +if $data21 != 2 then + return -1 +endi +if $data22 != 8 then + return -1 +endi + +sql select avg(f1),count(st2.*),sum(f1) from st2 group by f1 having (sum(st2.f1) > 3); +if $rows != 3 then + return -1 +endi +if $data00 != 2.000000000 then + return -1 +endi +if $data01 != 2 then + return -1 +endi +if $data02 != 4 then + return -1 +endi +if $data10 != 3.000000000 then + return -1 +endi +if $data11 != 2 then + return -1 +endi +if $data12 != 6 then + return -1 +endi +if $data20 != 4.000000000 then + return -1 +endi +if $data21 != 2 then + return -1 +endi +if $data22 != 8 then + return -1 +endi + +sql select avg(f1),count(st2.*),sum(f1),stddev(f1),stddev(f1) from st2 group by f1; +if $rows != 4 then + return -1 +endi +if $data00 != 1.000000000 then + return -1 +endi +if $data01 != 2 then + return -1 +endi +if $data02 != 2 then + return -1 +endi +if $data03 != 0.000000000 then + return -1 +endi +if $data04 != 0.000000000 then + return -1 +endi +if $data10 != 2.000000000 then + return -1 +endi +if $data11 != 2 then + return -1 +endi +if $data12 != 4 then + return -1 +endi +if $data13 != 0.000000000 then + return -1 +endi +if $data14 != 0.000000000 then + return -1 +endi +if $data20 != 3.000000000 then + return -1 +endi +if $data21 != 2 then + return -1 +endi +if $data22 != 6 then + return -1 +endi +if $data23 != 0.000000000 then + return -1 +endi +if $data24 != 0.000000000 then + return -1 +endi +if $data30 != 4.000000000 then + return -1 +endi +if $data31 != 2 then + return -1 +endi +if $data32 != 8 then + return -1 +endi +if $data33 != 0.000000000 then + return -1 +endi +if $data34 != 0.000000000 then + return -1 +endi + +sql select avg(f1),count(st2.*),sum(f1),stddev(f1) from st2 group by f1 having (stddev(st2.f1) > 3); +if $rows != 0 then + return -1 +endi + +sql select avg(f1),count(st2.*),sum(f1),stddev(f1) from st2 group by f1 having (stddev(st2.f1) < 1); +if $rows != 4 then + return -1 +endi +if $data00 != 1.000000000 then + return -1 +endi +if $data01 != 2 then + return -1 +endi +if $data02 != 2 then + return -1 +endi +if $data03 != 0.000000000 then + return -1 +endi +if $data10 != 2.000000000 then + return -1 +endi +if $data11 != 2 then + return -1 +endi +if $data12 != 4 then + return -1 +endi +if $data13 != 0.000000000 then + return -1 +endi +if $data20 != 3.000000000 then + return -1 +endi +if $data21 != 2 then + return -1 +endi +if $data22 != 6 then + return -1 +endi +if $data23 != 0.000000000 then + return -1 +endi +if $data30 != 4.000000000 then + return -1 +endi +if $data31 != 2 then + return -1 +endi +if $data32 != 8 then + return -1 +endi +if $data33 != 0.000000000 then + return -1 +endi + + +sql_error select avg(f1),count(st2.*),sum(f1),stddev(f1) from st2 group by f1 having (LEASTSQUARES(f1) < 1); + +sql_error select avg(f1),count(st2.*),sum(f1),stddev(f1) from st2 group by f1 having LEASTSQUARES(f1) < 1; + +sql_error select avg(f1),count(st2.*),sum(f1),stddev(f1) from st2 group by f1 having LEASTSQUARES(f1,1,1) < 1; + +sql_error select avg(f1),count(st2.*),sum(f1),stddev(f1) from st2 group by f1 having LEASTSQUARES(f1,1,1) > 2; + +sql_error select avg(f1),count(st2.*),sum(f1),stddev(f1),LEASTSQUARES(f1,1,1) from st2 group by f1 having LEASTSQUARES(f1,1,1) > 2; + +sql_error select avg(f1),count(st2.*),sum(f1),stddev(f1),LEASTSQUARES(f1,1,1) from st2 group by f1 having sum(f1) > 2; + +sql select avg(f1),count(st2.*),sum(f1),stddev(f1) from st2 group by f1 having min(f1) > 2; +if $rows != 2 then + return -1 +endi +if $data00 != 3.000000000 then + return -1 +endi +if $data01 != 2 then + return -1 +endi +if $data02 != 6 then + return -1 +endi +if $data03 != 0.000000000 then + return -1 +endi +if $data10 != 4.000000000 then + return -1 +endi +if $data11 != 2 then + return -1 +endi +if $data12 != 8 then + return -1 +endi +if $data13 != 0.000000000 then + return -1 +endi + +sql select avg(f1),count(st2.*),sum(f1),stddev(f1),min(f1) from st2 group by f1 having min(f1) > 2; +if $rows != 2 then + return -1 +endi +if $data00 != 3.000000000 then + return -1 +endi +if $data01 != 2 then + return -1 +endi +if $data02 != 6 then + return -1 +endi +if $data03 != 0.000000000 then + return -1 +endi +if $data04 != 3 then + return -1 +endi +if $data10 != 4.000000000 then + return -1 +endi +if $data11 != 2 then + return -1 +endi +if $data12 != 8 then + return -1 +endi +if $data13 != 0.000000000 then + return -1 +endi +if $data14 != 4 then + return -1 +endi + +sql select avg(f1),count(st2.*),sum(f1),stddev(f1),min(f1) from st2 group by f1 having max(f1) > 2; +if $rows != 2 then + return -1 +endi +if $data00 != 3.000000000 then + return -1 +endi +if $data01 != 2 then + return -1 +endi +if $data02 != 6 then + return -1 +endi +if $data03 != 0.000000000 then + return -1 +endi +if $data04 != 3 then + return -1 +endi +if $data10 != 4.000000000 then + return -1 +endi +if $data11 != 2 then + return -1 +endi +if $data12 != 8 then + return -1 +endi +if $data13 != 0.000000000 then + return -1 +endi +if $data14 != 4 then + return -1 +endi + +sql select avg(f1),count(st2.*),sum(f1),stddev(f1),min(f1),max(f1) from st2 group by f1 having max(f1) != 2; +if $rows != 3 then + return -1 +endi +if $data00 != 1.000000000 then + return -1 +endi +if $data01 != 2 then + return -1 +endi +if $data02 != 2 then + return -1 +endi +if $data03 != 0.000000000 then + return -1 +endi +if $data04 != 1 then + return -1 +endi +if $data05 != 1 then + return -1 +endi +if $data10 != 3.000000000 then + return -1 +endi +if $data11 != 2 then + return -1 +endi +if $data12 != 6 then + return -1 +endi +if $data13 != 0.000000000 then + return -1 +endi +if $data14 != 3 then + return -1 +endi +if $data15 != 3 then + return -1 +endi +if $data20 != 4.000000000 then + return -1 +endi +if $data21 != 2 then + return -1 +endi +if $data22 != 8 then + return -1 +endi +if $data23 != 0.000000000 then + return -1 +endi +if $data24 != 4 then + return -1 +endi +if $data25 != 4 then + return -1 +endi + +sql select avg(f1),count(st2.*),sum(f1),stddev(f1),min(f1),max(f1) from st2 group by f1 having first(f1) != 2; +if $rows != 3 then + return -1 +endi +if $data00 != 1.000000000 then + return -1 +endi +if $data01 != 2 then + return -1 +endi +if $data02 != 2 then + return -1 +endi +if $data03 != 0.000000000 then + return -1 +endi +if $data04 != 1 then + return -1 +endi +if $data05 != 1 then + return -1 +endi +if $data10 != 3.000000000 then + return -1 +endi +if $data11 != 2 then + return -1 +endi +if $data12 != 6 then + return -1 +endi +if $data13 != 0.000000000 then + return -1 +endi +if $data14 != 3 then + return -1 +endi +if $data15 != 3 then + return -1 +endi +if $data20 != 4.000000000 then + return -1 +endi +if $data21 != 2 then + return -1 +endi +if $data22 != 8 then + return -1 +endi +if $data23 != 0.000000000 then + return -1 +endi +if $data24 != 4 then + return -1 +endi +if $data25 != 4 then + return -1 +endi + + + +sql select avg(f1),count(st2.*),sum(f1),stddev(f1),min(f1),max(f1),first(f1) from st2 group by f1 having first(f1) != 2; +if $rows != 3 then + return -1 +endi +if $data00 != 1.000000000 then + return -1 +endi +if $data01 != 2 then + return -1 +endi +if $data02 != 2 then + return -1 +endi +if $data03 != 0.000000000 then + return -1 +endi +if $data04 != 1 then + return -1 +endi +if $data05 != 1 then + return -1 +endi +if $data06 != 1 then + return -1 +endi +if $data10 != 3.000000000 then + return -1 +endi +if $data11 != 2 then + return -1 +endi +if $data12 != 6 then + return -1 +endi +if $data13 != 0.000000000 then + return -1 +endi +if $data14 != 3 then + return -1 +endi +if $data15 != 3 then + return -1 +endi +if $data16 != 3 then + return -1 +endi +if $data20 != 4.000000000 then + return -1 +endi +if $data21 != 2 then + return -1 +endi +if $data22 != 8 then + return -1 +endi +if $data23 != 0.000000000 then + return -1 +endi +if $data24 != 4 then + return -1 +endi +if $data25 != 4 then + return -1 +endi +if $data26 != 4 then + return -1 +endi + + + +sql_error select avg(f1),count(st2.*),sum(f1),stddev(f1),min(f1),max(f1),first(f1),last(f1) from st2 group by f1 having top(f1,1); + +sql_error select avg(f1),count(st2.*),sum(f1),stddev(f1),min(f1),max(f1),first(f1),last(f1) from st2 group by f1 having top(f1,1) > 1; + +sql_error select avg(f1),count(st2.*),sum(f1),stddev(f1),min(f1),max(f1),first(f1),last(f1) from st2 group by f1 having bottom(f1,1) > 1; + +sql_error select avg(f1),count(st2.*),sum(f1),stddev(f1),min(f1),max(f1),first(f1),last(f1),top(f1,1),bottom(f1,1) from st2 group by f1 having bottom(f1,1) > 1; + +sql_error select avg(f1),count(st2.*),sum(f1),stddev(f1),min(f1),max(f1),first(f1),last(f1),top(f1,1),bottom(f1,1) from st2 group by f1 having sum(f1) > 1; + +sql_error select PERCENTILE(f1) from st2 group by f1 having sum(f1) > 1; + +sql_error select PERCENTILE(f1,20) from st2 group by f1 having sum(f1) > 1; + +sql select aPERCENTILE(f1,20) from st2 group by f1 having sum(f1) > 1; +if $rows != 4 then + return -1 +endi +if $data00 != 1.000000000 then + return -1 +endi +if $data10 != 2.000000000 then + return -1 +endi +if $data20 != 3.000000000 then + return -1 +endi +if $data30 != 4.000000000 then + return -1 +endi + +sql select aPERCENTILE(f1,20) from st2 group by f1 having apercentile(f1,1) > 1; +if $rows != 3 then + return -1 +endi +if $data00 != 2.000000000 then + return -1 +endi +if $data10 != 3.000000000 then + return -1 +endi +if $data20 != 4.000000000 then + return -1 +endi + +sql select aPERCENTILE(f1,20) from st2 group by f1 having apercentile(f1,1) > 1 and apercentile(f1,1) < 50; +if $rows != 3 then + return -1 +endi +if $data00 != 2.000000000 then + return -1 +endi +if $data10 != 3.000000000 then + return -1 +endi +if $data20 != 4.000000000 then + return -1 +endi + +sql select aPERCENTILE(f1,20) from st2 group by f1 having apercentile(f1,1) > 1 and apercentile(f1,1) < 3; +if $rows != 1 then + return -1 +endi +if $data00 != 2.000000000 then + return -1 +endi + +sql select aPERCENTILE(f1,20) from st2 group by f1 having apercentile(f1,1) > 1 and apercentile(f1,3) < 3; +if $rows != 1 then + return -1 +endi +if $data00 != 2.000000000 then + return -1 +endi + +sql_error select aPERCENTILE(f1,20) from st2 group by f1 having apercentile(1) > 1; + +sql_error select aPERCENTILE(f1,20),LAST_ROW(f1) from st2 group by f1 having apercentile(1) > 1; + +sql_error select aPERCENTILE(f1,20),LAST_ROW(f1) from st2 group by f1 having apercentile(f1,1) > 1; + +sql_error select sum(f1) from st2 group by f1 having last_row(f1) > 1; + +sql_error select avg(f1) from st2 group by f1 having diff(f1) > 0; + +sql_error select avg(f1),diff(f1) from st2 group by f1 having avg(f1) > 0; + +sql_error select avg(f1),diff(f1) from st2 group by f1 having spread(f2) > 0; + +sql select avg(f1) from st2 group by f1 having spread(f2) > 0; +if $rows != 0 then + return -1 +endi + +sql select avg(f1) from st2 group by f1 having spread(f2) = 0; +if $rows != 4 then + return -1 +endi +if $data00 != 1.000000000 then + return -1 +endi +if $data10 != 2.000000000 then + return -1 +endi +if $data20 != 3.000000000 then + return -1 +endi +if $data30 != 4.000000000 then + return -1 +endi + +sql select avg(f1),spread(f2) from st2 group by f1; +if $rows != 4 then + return -1 +endi +if $data00 != 1.000000000 then + return -1 +endi +if $data01 != 0.000000000 then + return -1 +endi +if $data10 != 2.000000000 then + return -1 +endi +if $data11 != 0.000000000 then + return -1 +endi +if $data20 != 3.000000000 then + return -1 +endi +if $data21 != 0.000000000 then + return -1 +endi +if $data30 != 4.000000000 then + return -1 +endi +if $data31 != 0.000000000 then + return -1 +endi + +sql select avg(f1),spread(f1,f2,st2.f1) from st2 group by f1 having spread(f1) = 0; +if $rows != 4 then + return -1 +endi +if $data00 != 1.000000000 then + return -1 +endi +if $data01 != 0.000000000 then + return -1 +endi +if $data02 != 0.000000000 then + return -1 +endi +if $data03 != 0.000000000 then + return -1 +endi +if $data10 != 2.000000000 then + return -1 +endi +if $data11 != 0.000000000 then + return -1 +endi +if $data12 != 0.000000000 then + return -1 +endi +if $data13 != 0.000000000 then + return -1 +endi +if $data20 != 3.000000000 then + return -1 +endi +if $data21 != 0.000000000 then + return -1 +endi +if $data22 != 0.000000000 then + return -1 +endi +if $data23 != 0.000000000 then + return -1 +endi +if $data30 != 4.000000000 then + return -1 +endi +if $data31 != 0.000000000 then + return -1 +endi +if $data32 != 0.000000000 then + return -1 +endi +if $data33 != 0.000000000 then + return -1 +endi + +sql select avg(f1),spread(f1,f2,st2.f1) from st2 group by f1 having spread(f1) != 0; +if $rows != 0 then + return -1 +endi + + +sql_error select avg(f1),spread(f1,f2,st2.f1) from st2 group by f1 having spread(f1) + 1 > 0; + +sql_error select avg(f1),spread(f1,f2,st2.f1) from st2 group by f1 having spread(f1) + 1; + +sql_error select avg(f1),spread(f1,f2,st2.f1) from st2 group by f1 having spread(f1) + sum(f1); + +sql_error select avg(f1),spread(f1,f2,st2.f1) from st2 group by f1 having spread(f1) + sum(f1) > 0; + +sql_error select avg(f1),spread(f1,f2,st2.f1) from st2 group by f1 having spread(f1) - sum(f1) > 0; + +sql_error select avg(f1),spread(f1,f2,st2.f1) from st2 group by f1 having spread(f1) * sum(f1) > 0; + +sql_error select avg(f1),spread(f1,f2,st2.f1) from st2 group by f1 having spread(f1) / sum(f1) > 0; + +sql_error select avg(f1),spread(f1,f2,st2.f1) from st2 group by f1 having spread(f1) > sum(f1); + +sql_error select avg(f1),spread(f1,f2,st2.f1) from st2 group by f1 having spread(f1) and sum(f1); + +sql_error select avg(f1),spread(f1,f2,st2.f1) from st2 group by f1 having spread(f1) 0 and sum(f1); + +sql_error select avg(f1),spread(f1,f2,st2.f1) from st2 group by f1 having spread(f1) + 0 and sum(f1); + +sql_error select avg(f1),spread(f1,f2,st2.f1) from st2 group by f1 having spread(f1) - f1 and sum(f1); + +sql_error select avg(f1),spread(f1,f2,st2.f1) from st2 group by f1 having spread(f1) - id1 and sum(f1); + +sql_error select avg(f1),spread(f1,f2,st2.f1) from st2 group by f1 having spread(f1) > id1 and sum(f1); + +sql_error select avg(f1),spread(f1,f2,st2.f1) from st2 group by f1 having spread(f1) > id1 and sum(f1) > 1; + +sql select avg(f1),spread(f1,f2,st2.f1) from st2 group by f1 having spread(f1) > 2 and sum(f1) > 1; +if $rows != 0 then + return -1 +endi + +sql select avg(f1),spread(f1,f2,st2.f1) from st2 group by f1 having spread(f1) = 0 and sum(f1) > 1; +if $rows != 4 then + return -1 +endi +if $data00 != 1.000000000 then + return -1 +endi +if $data01 != 0.000000000 then + return -1 +endi +if $data02 != 0.000000000 then + return -1 +endi +if $data03 != 0.000000000 then + return -1 +endi +if $data10 != 2.000000000 then + return -1 +endi +if $data11 != 0.000000000 then + return -1 +endi +if $data12 != 0.000000000 then + return -1 +endi +if $data13 != 0.000000000 then + return -1 +endi +if $data20 != 3.000000000 then + return -1 +endi +if $data21 != 0.000000000 then + return -1 +endi +if $data22 != 0.000000000 then + return -1 +endi +if $data23 != 0.000000000 then + return -1 +endi +if $data30 != 4.000000000 then + return -1 +endi +if $data31 != 0.000000000 then + return -1 +endi +if $data32 != 0.000000000 then + return -1 +endi +if $data33 != 0.000000000 then + return -1 +endi + +sql select avg(f1),spread(f1,f2,st2.f1) from st2 group by f1 having spread(f1) = 0 and avg(f1) > 1; +if $rows != 3 then + return -1 +endi +if $data00 != 2.000000000 then + return -1 +endi +if $data01 != 0.000000000 then + return -1 +endi +if $data02 != 0.000000000 then + return -1 +endi +if $data03 != 0.000000000 then + return -1 +endi +if $data10 != 3.000000000 then + return -1 +endi +if $data11 != 0.000000000 then + return -1 +endi +if $data12 != 0.000000000 then + return -1 +endi +if $data13 != 0.000000000 then + return -1 +endi +if $data20 != 4.000000000 then + return -1 +endi +if $data21 != 0.000000000 then + return -1 +endi +if $data22 != 0.000000000 then + return -1 +endi +if $data23 != 0.000000000 then + return -1 +endi + +sql_error select avg(f1),spread(f1,f2,st2.f1) from st2 group by c1 having spread(f1) = 0 and avg(f1) > 1; + +sql_error select avg(f1),spread(f1,f2,st2.f1) from st2 group by id1 having avg(id1) > 0; + +sql_error select avg(f1),spread(f1,f2,st2.f1) from st2 group by id1 having avg(f1) > id1; + +sql_error select avg(f1),spread(f1,f2,st2.f1),avg(id1) from st2 group by id1 having avg(f1) > id1; + +sql select avg(f1),spread(f1,f2,st2.f1) from st2 group by id1 having avg(f1) > 0; +if $rows != 1 then + return -1 +endi +if $data00 != 2.500000000 then + return -1 +endi +if $data01 != 3.000000000 then + return -1 +endi +if $data02 != 3.000000000 then + return -1 +endi +if $data03 != 3.000000000 then + return -1 +endi +if $data04 != 1 then + return -1 +endi + +sql select avg(f1),spread(f1,f2,st2.f1) from st2 group by id1 having avg(f1) < 2; +if $rows != 0 then + return -1 +endi + +sql select avg(f1),spread(f1,f2,st2.f1) from st2 where f1 > 0 group by f1 having avg(f1) > 0; +if $rows != 4 then + return -1 +endi +if $data00 != 1.000000000 then + return -1 +endi +if $data01 != 0.000000000 then + return -1 +endi +if $data02 != 0.000000000 then + return -1 +endi +if $data03 != 0.000000000 then + return -1 +endi +if $data10 != 2.000000000 then + return -1 +endi +if $data11 != 0.000000000 then + return -1 +endi +if $data12 != 0.000000000 then + return -1 +endi +if $data13 != 0.000000000 then + return -1 +endi +if $data20 != 3.000000000 then + return -1 +endi +if $data21 != 0.000000000 then + return -1 +endi +if $data22 != 0.000000000 then + return -1 +endi +if $data23 != 0.000000000 then + return -1 +endi +if $data30 != 4.000000000 then + return -1 +endi +if $data31 != 0.000000000 then + return -1 +endi +if $data32 != 0.000000000 then + return -1 +endi +if $data33 != 0.000000000 then + return -1 +endi + +sql select avg(f1),spread(f1,f2,st2.f1) from st2 where f1 > 2 group by f1 having avg(f1) > 0; +if $rows != 2 then + return -1 +endi +if $data00 != 3.000000000 then + return -1 +endi +if $data01 != 0.000000000 then + return -1 +endi +if $data02 != 0.000000000 then + return -1 +endi +if $data03 != 0.000000000 then + return -1 +endi +if $data10 != 4.000000000 then + return -1 +endi +if $data11 != 0.000000000 then + return -1 +endi +if $data12 != 0.000000000 then + return -1 +endi +if $data13 != 0.000000000 then + return -1 +endi + +sql select avg(f1),spread(f1,f2,st2.f1) from st2 where f2 > 2 group by f1 having avg(f1) > 0; +if $rows != 2 then + return -1 +endi +if $data00 != 3.000000000 then + return -1 +endi +if $data01 != 0.000000000 then + return -1 +endi +if $data02 != 0.000000000 then + return -1 +endi +if $data03 != 0.000000000 then + return -1 +endi +if $data10 != 4.000000000 then + return -1 +endi +if $data11 != 0.000000000 then + return -1 +endi +if $data12 != 0.000000000 then + return -1 +endi +if $data13 != 0.000000000 then + return -1 +endi + +sql select avg(f1),spread(f1,f2,st2.f1) from st2 where f3 > 2 group by f1 having avg(f1) > 0; +if $rows != 2 then + return -1 +endi +if $data00 != 3.000000000 then + return -1 +endi +if $data01 != 0.000000000 then + return -1 +endi +if $data02 != 0.000000000 then + return -1 +endi +if $data03 != 0.000000000 then + return -1 +endi +if $data10 != 4.000000000 then + return -1 +endi +if $data11 != 0.000000000 then + return -1 +endi +if $data12 != 0.000000000 then + return -1 +endi +if $data13 != 0.000000000 then + return -1 +endi + +sql select avg(f1),spread(f1,f2,st2.f1) from st2 where f2 > 3 group by f1 having avg(f1) > 0; +if $rows != 1 then + return -1 +endi +if $data00 != 4.000000000 then + return -1 +endi +if $data01 != 0.000000000 then + return -1 +endi +if $data02 != 0.000000000 then + return -1 +endi +if $data03 != 0.000000000 then + return -1 +endi + +sql_error select avg(f1),spread(f1,f2,st2.f1) from st2 where f2 > 3 group by f1 having avg(ts) > 0; + +sql_error select avg(f1),spread(f1,f2,st2.f1) from st2 where f2 > 3 group by f1 having avg(f7) > 0; + +sql_error select avg(f1),spread(f1,f2,st2.f1) from st2 where f2 > 3 group by f1 having avg(f8) > 0; + +sql_error select avg(f1),spread(f1,f2,st2.f1) from st2 where f2 > 3 group by f1 having avg(f9) > 0; + +sql select avg(f1),spread(f1,f2,st2.f1) from st2 where f2 > 3 group by f1 having count(f9) > 0; +if $rows != 1 then + return -1 +endi +if $data00 != 4.000000000 then + return -1 +endi +if $data01 != 0.000000000 then + return -1 +endi +if $data02 != 0.000000000 then + return -1 +endi +if $data03 != 0.000000000 then + return -1 +endi + +sql_error select avg(f1),spread(f1,f2,st2.f1) from st2 where f2 > 3 group by f1 having last(f9) > 0; + +sql select avg(f1),spread(f1,f2,st2.f1) from st2 where f2 > 3 group by f1 having last(f2) > 0; +if $rows != 1 then + return -1 +endi +if $data00 != 4.000000000 then + return -1 +endi +if $data01 != 0.000000000 then + return -1 +endi +if $data02 != 0.000000000 then + return -1 +endi +if $data03 != 0.000000000 then + return -1 +endi + +sql select avg(f1),spread(f1,f2,st2.f1) from st2 where f2 > 3 group by f1 having last(f3) > 0; +if $rows != 1 then + return -1 +endi +if $data00 != 4.000000000 then + return -1 +endi +if $data01 != 0.000000000 then + return -1 +endi +if $data02 != 0.000000000 then + return -1 +endi +if $data03 != 0.000000000 then + return -1 +endi + +sql select avg(f1),spread(f1,f2,st2.f1) from st2 where f2 > 1 group by f1 having last(f3) > 0; +if $rows != 3 then + return -1 +endi +if $data00 != 2.000000000 then + return -1 +endi +if $data01 != 0.000000000 then + return -1 +endi +if $data02 != 0.000000000 then + return -1 +endi +if $data03 != 0.000000000 then + return -1 +endi +if $data10 != 3.000000000 then + return -1 +endi +if $data11 != 0.000000000 then + return -1 +endi +if $data12 != 0.000000000 then + return -1 +endi +if $data13 != 0.000000000 then + return -1 +endi +if $data20 != 4.000000000 then + return -1 +endi +if $data21 != 0.000000000 then + return -1 +endi +if $data22 != 0.000000000 then + return -1 +endi +if $data23 != 0.000000000 then + return -1 +endi + +sql select avg(f1),spread(f1,f2,st2.f1) from st2 where f2 > 1 group by f1 having last(f4) > 0; +if $rows != 3 then + return -1 +endi +if $data00 != 2.000000000 then + return -1 +endi +if $data01 != 0.000000000 then + return -1 +endi +if $data02 != 0.000000000 then + return -1 +endi +if $data03 != 0.000000000 then + return -1 +endi +if $data10 != 3.000000000 then + return -1 +endi +if $data11 != 0.000000000 then + return -1 +endi +if $data12 != 0.000000000 then + return -1 +endi +if $data13 != 0.000000000 then + return -1 +endi +if $data20 != 4.000000000 then + return -1 +endi +if $data21 != 0.000000000 then + return -1 +endi +if $data22 != 0.000000000 then + return -1 +endi +if $data23 != 0.000000000 then + return -1 +endi + +sql select avg(f1),spread(f1,f2,st2.f1) from st2 where f2 > 1 group by f1 having last(f5) > 0; +if $rows != 3 then + return -1 +endi +if $data00 != 2.000000000 then + return -1 +endi +if $data01 != 0.000000000 then + return -1 +endi +if $data02 != 0.000000000 then + return -1 +endi +if $data03 != 0.000000000 then + return -1 +endi +if $data10 != 3.000000000 then + return -1 +endi +if $data11 != 0.000000000 then + return -1 +endi +if $data12 != 0.000000000 then + return -1 +endi +if $data13 != 0.000000000 then + return -1 +endi +if $data20 != 4.000000000 then + return -1 +endi +if $data21 != 0.000000000 then + return -1 +endi +if $data22 != 0.000000000 then + return -1 +endi +if $data23 != 0.000000000 then + return -1 +endi + +sql select avg(f1),spread(f1,f2,st2.f1) from st2 where f2 > 1 group by f1 having last(f6) > 0; +if $rows != 3 then + return -1 +endi +if $data00 != 2.000000000 then + return -1 +endi +if $data01 != 0.000000000 then + return -1 +endi +if $data02 != 0.000000000 then + return -1 +endi +if $data03 != 0.000000000 then + return -1 +endi +if $data10 != 3.000000000 then + return -1 +endi +if $data11 != 0.000000000 then + return -1 +endi +if $data12 != 0.000000000 then + return -1 +endi +if $data13 != 0.000000000 then + return -1 +endi +if $data20 != 4.000000000 then + return -1 +endi +if $data21 != 0.000000000 then + return -1 +endi +if $data22 != 0.000000000 then + return -1 +endi +if $data23 != 0.000000000 then + return -1 +endi + + +sql_error select avg(f1),spread(f1,f2,st2.f1),f1,f2 from st2 where f2 > 1 group by f1 having last(f6) > 0; + +sql_error select avg(f1),spread(f1,f2,st2.f1),f1,f6 from st2 where f2 > 1 group by f1 having last(f6) > 0; + +sql_error select avg(f1),spread(f1,f2,st2.f1),f1,f6 from st2 where f2 > 1 group by f1,f2 having last(f6) > 0; + +sql_error select avg(f1),spread(f1,f2,st2.f1),f1,f6 from st2 where f2 > 1 group by f1,id1 having last(f6) > 0; + +sql_error select avg(f1),spread(f1,f2,st2.f1),f1,f6 from st2 where f2 > 1 group by id1 having last(f6) > 0; + +sql select avg(f1),spread(f1,f2,st2.f1) from st2 where f2 > 1 group by id1 having last(f6) > 0; +if $rows != 1 then + return -1 +endi +if $data00 != 3.000000000 then + return -1 +endi +if $data01 != 2.000000000 then + return -1 +endi +if $data02 != 2.000000000 then + return -1 +endi +if $data03 != 2.000000000 then + return -1 +endi +if $data04 != 1 then + return -1 +endi + +sql_error select top(f1,2) from tb1 group by f1 having count(f1) > 0; + +system sh/exec.sh -n dnode1 -s stop -x SIGINT From f0a32e5c3f0a6324cddcf9bd6dd78bc1157dd3b1 Mon Sep 17 00:00:00 2001 From: dapan1121 <89396746@qq.com> Date: Thu, 11 Mar 2021 16:49:57 +0800 Subject: [PATCH 010/178] pass expr filter to taosd --- src/client/inc/tsclient.h | 30 ++++++------- src/client/src/tscSQLParser.c | 1 + src/client/src/tscServer.c | 36 ++++++++++++++++ src/inc/taosmsg.h | 60 ++++++++++++++------------ src/query/src/qExecutor.c | 79 ++++++++++++++++++++++++++++++++++- 5 files changed, 164 insertions(+), 42 deletions(-) diff --git a/src/client/inc/tsclient.h b/src/client/inc/tsclient.h index d36ecc54c5..2466262e7b 100644 --- a/src/client/inc/tsclient.h +++ b/src/client/inc/tsclient.h @@ -96,20 +96,6 @@ typedef struct STableMetaInfo { SArray *tagColList; // SArray, involved tag columns } STableMetaInfo; -/* the structure for sql function in select clause */ -typedef struct SSqlExpr { - char aliasName[TSDB_COL_NAME_LEN]; // as aliasName - SColIndex colInfo; - uint64_t uid; // refactor use the pointer - int16_t functionId; // function id in aAgg array - int16_t resType; // return value type - int16_t resBytes; // length of return value - int32_t interBytes; // inter result buffer size - int16_t numOfParams; // argument value of each function - tVariant param[3]; // parameters are not more than 3 - int32_t offset; // sub result column value of arithmetic expression. - int16_t resColId; // result column id -} SSqlExpr; typedef struct SColumnIndex { int16_t tableIndex; @@ -129,6 +115,22 @@ typedef struct SColumn { SColumnFilterInfo *filterInfo; } SColumn; +/* the structure for sql function in select clause */ +typedef struct SSqlExpr { + char aliasName[TSDB_COL_NAME_LEN]; // as aliasName + SColIndex colInfo; + uint64_t uid; // refactor use the pointer + int16_t functionId; // function id in aAgg array + int16_t resType; // return value type + int16_t resBytes; // length of return value + int32_t interBytes; // inter result buffer size + int16_t numOfParams; // argument value of each function + tVariant param[3]; // parameters are not more than 3 + int32_t offset; // sub result column value of arithmetic expression. + int16_t resColId; // result column id + SColumn *pFilter; // expr filter +} SSqlExpr; + typedef struct SExprFilter { tSQLExpr *pExpr; //used for having parse SSqlExpr *pSqlExpr; diff --git a/src/client/src/tscSQLParser.c b/src/client/src/tscSQLParser.c index 4e0d2a05b6..0ea053e8ea 100644 --- a/src/client/src/tscSQLParser.c +++ b/src/client/src/tscSQLParser.c @@ -6735,6 +6735,7 @@ static int32_t checkQueryRangeForFill(SSqlCmd* pCmd, SQueryInfo* pQueryInfo) { pFieldFilters->pFilters = pFilters; pFieldFilters->pSqlExpr = pSqlExpr; + pSqlExpr->pFilter = pFilters; pInfo->pFieldFilters = pFieldFilters; } diff --git a/src/client/src/tscServer.c b/src/client/src/tscServer.c index d005eaf75c..6f72f1a079 100644 --- a/src/client/src/tscServer.c +++ b/src/client/src/tscServer.c @@ -838,8 +838,44 @@ int tscBuildQueryMsg(SSqlObj *pSql, SSqlInfo *pInfo) { pSqlFuncExpr->functionId = htons(pExpr->functionId); pSqlFuncExpr->numOfParams = htons(pExpr->numOfParams); pSqlFuncExpr->resColId = htons(pExpr->resColId); + if (pExpr->pFilter && pExpr->pFilter->numOfFilters > 0) { + pSqlFuncExpr->filterNum = htonl(pExpr->pFilter->numOfFilters); + } else { + pSqlFuncExpr->filterNum = 0; + } + pMsg += sizeof(SSqlFuncMsg); + if (pSqlFuncExpr->filterNum) { + pMsg += sizeof(SColumnFilterInfo) * pExpr->pFilter->numOfFilters; + + // append the filter information after the basic column information + for (int32_t f = 0; f < pExpr->pFilter->numOfFilters; ++f) { + SColumnFilterInfo *pColFilter = &pExpr->pFilter->filterInfo[f]; + + SColumnFilterInfo *pFilterMsg = &pSqlFuncExpr->filterInfo[f]; + pFilterMsg->filterstr = htons(pColFilter->filterstr); + + if (pColFilter->filterstr) { + pFilterMsg->len = htobe64(pColFilter->len); + memcpy(pMsg, (void *)pColFilter->pz, (size_t)(pColFilter->len + 1)); + pMsg += (pColFilter->len + 1); // append the additional filter binary info + } else { + pFilterMsg->lowerBndi = htobe64(pColFilter->lowerBndi); + pFilterMsg->upperBndi = htobe64(pColFilter->upperBndi); + } + + pFilterMsg->lowerRelOptr = htons(pColFilter->lowerRelOptr); + pFilterMsg->upperRelOptr = htons(pColFilter->upperRelOptr); + + if (pColFilter->lowerRelOptr == TSDB_RELATION_INVALID && pColFilter->upperRelOptr == TSDB_RELATION_INVALID) { + tscError("invalid filter info"); + return TSDB_CODE_TSC_INVALID_SQL; + } + } + } + + for (int32_t j = 0; j < pExpr->numOfParams; ++j) { // todo add log pSqlFuncExpr->arg[j].argType = htons((uint16_t)pExpr->param[j].nType); diff --git a/src/inc/taosmsg.h b/src/inc/taosmsg.h index 721b9ca605..33d5421a28 100644 --- a/src/inc/taosmsg.h +++ b/src/inc/taosmsg.h @@ -390,33 +390,6 @@ typedef struct SColIndex { char name[TSDB_COL_NAME_LEN]; } SColIndex; -/* sql function msg, to describe the message to vnode about sql function - * operations in select clause */ -typedef struct SSqlFuncMsg { - int16_t functionId; - int16_t numOfParams; - int16_t resColId; // result column id, id of the current output column - - SColIndex colInfo; - struct ArgElem { - int16_t argType; - int16_t argBytes; - union { - double d; - int64_t i64; - char * pz; - } argValue; - } arg[3]; -} SSqlFuncMsg; - -typedef struct SExprInfo { - SSqlFuncMsg base; - struct tExprNode* pExpr; - int16_t bytes; - int16_t type; - int32_t interBytes; - int64_t uid; -} SExprInfo; typedef struct SColumnFilterInfo { int16_t lowerRelOptr; @@ -439,6 +412,39 @@ typedef struct SColumnFilterInfo { }; } SColumnFilterInfo; +/* sql function msg, to describe the message to vnode about sql function + * operations in select clause */ +typedef struct SSqlFuncMsg { + int16_t functionId; + int16_t numOfParams; + int16_t resColId; // result column id, id of the current output column + + SColIndex colInfo; + struct ArgElem { + int16_t argType; + int16_t argBytes; + union { + double d; + int64_t i64; + char * pz; + } argValue; + } arg[3]; + + int32_t filterNum; + SColumnFilterInfo filterInfo[]; +} SSqlFuncMsg; + + +typedef struct SExprInfo { + SSqlFuncMsg base; + SColumnFilterInfo * pFilter; + struct tExprNode* pExpr; + int16_t bytes; + int16_t type; + int32_t interBytes; + int64_t uid; +} SExprInfo; + /* * for client side struct, we only need the column id, type, bytes are not necessary * But for data in vnode side, we need all the following information. diff --git a/src/query/src/qExecutor.c b/src/query/src/qExecutor.c index 05572acefd..0f82f87082 100644 --- a/src/query/src/qExecutor.c +++ b/src/query/src/qExecutor.c @@ -6120,9 +6120,35 @@ int32_t convertQueryMsg(SQueryTableMsg *pQueryMsg, SQueryParam* param) { pExprMsg->functionId = htons(pExprMsg->functionId); pExprMsg->numOfParams = htons(pExprMsg->numOfParams); pExprMsg->resColId = htons(pExprMsg->resColId); + pExprMsg->filterNum = htonl(pExprMsg->filterNum); pMsg += sizeof(SSqlFuncMsg); + SColumnFilterInfo* pExprFilterInfo = pExprMsg->filterInfo; + + pMsg += sizeof(SColumnFilterInfo) * pExprMsg->filterNum; + + for (int32_t f = 0; f < pExprMsg->filterNum; ++f) { + SColumnFilterInfo *pFilterMsg = (SColumnFilterInfo *)pExprFilterInfo; + + pFilterMsg->filterstr = htons(pFilterMsg->filterstr); + + if (pFilterMsg->filterstr) { + pFilterMsg->len = htobe64(pFilterMsg->len); + + pFilterMsg->pz = (int64_t)pMsg; + pMsg += (pFilterMsg->len + 1); + } else { + pFilterMsg->lowerBndi = htobe64(pFilterMsg->lowerBndi); + pFilterMsg->upperBndi = htobe64(pFilterMsg->upperBndi); + } + + pFilterMsg->lowerRelOptr = htons(pFilterMsg->lowerRelOptr); + pFilterMsg->upperRelOptr = htons(pFilterMsg->upperRelOptr); + + pExprFilterInfo++; + } + for (int32_t j = 0; j < pExprMsg->numOfParams; ++j) { pExprMsg->arg[j].argType = htons(pExprMsg->arg[j].argType); pExprMsg->arg[j].argBytes = htons(pExprMsg->arg[j].argBytes); @@ -6304,6 +6330,42 @@ _cleanup: return code; } +int32_t cloneExprFilterInfo(SColumnFilterInfo **dst, SColumnFilterInfo* src, int32_t filterNum) { + if (filterNum <= 0) { + return TSDB_CODE_SUCCESS; + } + + *dst = calloc(filterNum, sizeof(*src)); + if (*dst == NULL) { + return TSDB_CODE_QRY_OUT_OF_MEMORY; + } + + memcpy(*dst, src, sizeof(*src) * filterNum); + + for (int32_t i = 0; i < filterNum; i++) { + if (dst[i]->filterstr && dst[i]->len > 0) { + void *pz = calloc(1, dst[i]->len + 1); + + if (pz == NULL) { + if (i == 0) { + free(*dst); + } else { + freeColumnFilterInfo(*dst, i); + } + + return TSDB_CODE_QRY_OUT_OF_MEMORY; + } + + memcpy(pz, (void *)src->pz, src->len + 1); + + dst[i]->pz = (int64_t)pz; + } + } + + return TSDB_CODE_SUCCESS; +} + + static int32_t buildArithmeticExprFromMsg(SExprInfo *pArithExprInfo, SQueryTableMsg *pQueryMsg) { qDebug("qmsg:%p create arithmetic expr from binary", pQueryMsg); @@ -6396,6 +6458,13 @@ int32_t createQueryFuncExprFromMsg(SQueryTableMsg *pQueryMsg, int32_t numOfOutpu type = s->type; bytes = s->bytes; } + + if (pExprs[i].base.filterNum > 0) { + int32_t ret = cloneExprFilterInfo(&pExprs[i].pFilter, pExprMsg[i]->filterInfo, pExprMsg[i]->filterNum); + if (ret) { + return ret; + } + } } int32_t param = (int32_t)pExprs[i].base.arg[0].argValue.i64; @@ -6770,6 +6839,10 @@ _cleanup_query: tExprTreeDestroy(pExprInfo->pExpr, NULL); pExprInfo->pExpr = NULL; } + + if (pExprInfo->pFilter) { + freeColumnFilterInfo(pExprInfo->pFilter, pExprInfo->base.filterNum); + } } tfree(pExprs); @@ -6850,7 +6923,7 @@ void freeColumnFilterInfo(SColumnFilterInfo* pFilter, int32_t numOfFilters) { } for (int32_t i = 0; i < numOfFilters; i++) { - if (pFilter[i].filterstr) { + if (pFilter[i].filterstr && pFilter[i].pz) { free((void*)(pFilter[i].pz)); } } @@ -6892,6 +6965,10 @@ static void* destroyQueryFuncExpr(SExprInfo* pExprInfo, int32_t numOfExpr) { if (pExprInfo[i].pExpr != NULL) { tExprTreeDestroy(pExprInfo[i].pExpr, NULL); } + + if (pExprInfo[i].pFilter) { + freeColumnFilterInfo(pExprInfo[i].pFilter, pExprInfo[i].base.filterNum); + } } tfree(pExprInfo); From 53a128b2a3a469b06ae28ff013397b8d015433c8 Mon Sep 17 00:00:00 2001 From: wu champion Date: Mon, 22 Mar 2021 17:02:53 +0800 Subject: [PATCH 011/178] fix case,add delete file when processing over --- tests/pytest/query/query1970YearsAf.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/tests/pytest/query/query1970YearsAf.py b/tests/pytest/query/query1970YearsAf.py index d2eead241f..441f835e89 100644 --- a/tests/pytest/query/query1970YearsAf.py +++ b/tests/pytest/query/query1970YearsAf.py @@ -18,7 +18,6 @@ import json import subprocess import datetime - from util.log import * from util.sql import * from util.cases import * @@ -71,7 +70,7 @@ class TDTestCase: "update": 0 } - # 设置创建的超级表格式 + # set stable schema stable1 = { "name": "stb2", "child_table_exists": "no", @@ -83,7 +82,7 @@ class TDTestCase: "insert_rows": 5000, "multi_thread_write_one_tbl": "no", "number_of_tbl_in_one_sql": 0, - "rows_per_tbl": 1000, + "rows_per_tbl": 1, "max_sql_len": 65480, "disorder_ratio": 0, "disorder_range": 1000, @@ -117,7 +116,7 @@ class TDTestCase: ] } - # 创建不同的超级表格式并添加至super_tables + # create different stables like stable1 and add to list super_tables super_tables = [] super_tables.append(stable1) database = { @@ -235,7 +234,7 @@ class TDTestCase: tdLog.info("==========step2:query join") self.sqlsquery() - # 进行数据落盘后检查 + # after wal and sync, check again tdSql.query("show dnodes") index = tdSql.getData(0, 0) tdDnodes.stop(index) @@ -246,8 +245,9 @@ class TDTestCase: def stop(self): tdSql.close() + rm_cmd = f"rm -f /tmp/insert* > /dev/null 2>&1" + _ = subprocess.check_output(rm_cmd, shell=True).decode("utf-8") tdLog.success(f"{__file__} successfully executed") - tdCases.addLinux(__file__, TDTestCase()) tdCases.addWindows(__file__, TDTestCase()) \ No newline at end of file From 7c749e24c8814f03eb147048158efa1ae7b7bfd9 Mon Sep 17 00:00:00 2001 From: wu champion Date: Fri, 19 Mar 2021 18:35:26 +0800 Subject: [PATCH 012/178] [TD-1380] fix the case of query1970YearsAf.py --- tests/pytest/query/query1970YearsAf.py | 253 +++++++++++++++++++++++++ 1 file changed, 253 insertions(+) create mode 100644 tests/pytest/query/query1970YearsAf.py diff --git a/tests/pytest/query/query1970YearsAf.py b/tests/pytest/query/query1970YearsAf.py new file mode 100644 index 0000000000..9902f6908b --- /dev/null +++ b/tests/pytest/query/query1970YearsAf.py @@ -0,0 +1,253 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import taos +import sys +import os +import json +import subprocess +import datetime + + +from util.log import * +from util.sql import * +from util.cases import * +from util.dnodes import * +from util.dnodes import TDDnode + +class TDTestCase: + + def __init__(self): + self.path = "" + + def init(self, conn, logSql): + tdLog.debug(f"start to excute {__file__}") + tdSql.init(conn.cursor()) + + def getcfgPath(self, path): + binPath = os.path.dirname(os.path.realpath(__file__)) + binPath = binPath + "/../../../debug/" + tdLog.debug(f"binPath {binPath}") + binPath = os.path.realpath(binPath) + tdLog.debug(f"binPath real path {binPath}") + if path == "": + self.path = os.path.abspath(binPath + "../../") + else: + self.path = os.path.realpath(path) + return self.path + + def getCfgDir(self): + self.getcfgPath(self.path) + self.cfgDir = f"{self.path}/sim/psim/cfg" + return self.cfgDir + + def creatcfg(self): + dbinfo = { + "name": "db", + "drop": "yes", + "replica": 1, + "days": 10, + "cache": 16, + "blocks": 8, + "precision": "ms", + "keep": 36500, + "minRows": 100, + "maxRows": 4096, + "comp": 2, + "walLevel": 1, + "cachelast": 0, + "quorum": 1, + "fsync": 3000, + "update": 0 + } + + # 设置创建的超级表格式 + stable1 = { + "name": "stb2", + "child_table_exists": "no", + "childtable_count": 10, + "childtable_prefix": "t", + "auto_create_table": "no", + "data_source": "rand", + "insert_mode": "taosc", + "insert_rows": 5000, + "multi_thread_write_one_tbl": "no", + "number_of_tbl_in_one_sql": 0, + "rows_per_tbl": 1000, + "max_sql_len": 65480, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 20000, + "start_timestamp": "1969-12-31 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [ + {"type": "INT", "count": 2}, + {"type": "DOUBLE", "count": 2}, + {"type": "BIGINT", "count": 2}, + {"type": "FLOAT", "count": 2}, + {"type": "SMALLINT", "count": 2}, + {"type": "TINYINT", "count": 2}, + {"type": "BOOL", "count": 2}, + {"type": "NCHAR", "len": 3, "count": 1}, + {"type": "BINARY", "len": 8, "count": 1} + + ], + "tags": [ + {"type": "INT", "count": 2}, + {"type": "DOUBLE", "count": 2}, + {"type": "BIGINT", "count": 2}, + {"type": "FLOAT", "count": 2}, + {"type": "SMALLINT", "count": 2}, + {"type": "TINYINT", "count": 2}, + {"type": "BOOL", "count": 2}, + {"type": "NCHAR", "len": 3, "count": 1}, + {"type": "BINARY", "len": 8, "count": 1} + ] + } + + # 创建不同的超级表格式并添加至super_tables + super_tables = [] + super_tables.append(stable1) + database = { + "dbinfo": dbinfo, + "super_tables": super_tables + } + + cfgdir = self.getCfgDir() + create_table = { + "filetype": "insert", + "cfgdir": cfgdir, + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 4, + "thread_count_create_tbl": 4, + "result_file": "/tmp/insert_res.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 0, + "num_of_records_per_req": 100, + "databases": [database] + } + return create_table + + def createinsertfile(self): + create_table = self.creatcfg() + date = datetime.datetime.now().strftime("%Y%m%d%H%M") + file_create_table = f"/tmp/insert_{date}.json" + + with open(file_create_table, 'w') as f: + json.dump(create_table, f) + return file_create_table + + def inserttable(self, filepath): + create_table_cmd = f"taosdemo -f {filepath} > /tmp/1.log 2>&1" + _ = subprocess.check_output(create_table_cmd, shell=True).decode("utf-8") + + def sqlsquery(self): + # stable query + tdSql.query( + "select * from stb2 where stb2.ts < '1970-01-01 00:00:00.000' " + ) + tdSql.checkRows(43200) + + tdSql.query( + "select * from stb2 where stb2.ts >= '1970-01-01 00:00:00.000' " + ) + tdSql.checkRows(6800) + + tdSql.query( + "select * from stb2 where stb2.ts > '1969-12-31 23:00:00.000' and stb2.ts <'1970-01-01 01:00:00.000' " + ) + tdSql.checkRows(3590) + + # child-table query + tdSql.query( + "select * from t0 where t0.ts < '1970-01-01 00:00:00.000' " + ) + tdSql.checkRows(4320) + + tdSql.query( + "select * from t1 where t1.ts >= '1970-01-01 00:00:00.000' " + ) + tdSql.checkRows(680) + + tdSql.query( + "select * from t9 where t9.ts > '1969-12-31 22:00:00.000' and t9.ts <'1970-01-01 02:00:00.000' " + ) + tdSql.checkRows(719) + + tdSql.query( + "select * from t0,t1 where t0.ts=t1.ts and t1.ts >= '1970-01-01 00:00:00.000' " + ) + tdSql.checkRows(680) + + tdSql.query( + "select diff(col1) from t0 where t0.ts >= '1970-01-01 00:00:00.000' " + ) + tdSql.checkRows(679) + + tdSql.query( + "select t0,col1 from stb2 where stb2.ts < '1970-01-01 00:00:00.000' order by ts" + ) + tdSql.checkRows(43200) + + # query with timestamp in 'where ...' + tdSql.query( + "select * from stb2 where stb2.ts > -28800000 " + ) + tdSql.checkRows(6790) + + tdSql.query( + "select * from stb2 where stb2.ts > -28800000 and stb2.ts < '1970-01-01 08:00:00.000' " + ) + tdSql.checkRows(6790) + + tdSql.query( + "select * from stb2 where stb2.ts < -28800000 and stb2.ts > '1969-12-31 22:00:00.000' " + ) + tdSql.checkRows(3590) + + def run(self): + s = 'reset query cache' + tdSql.execute(s) + s = 'create database if not exists db' + tdSql.execute(s) + s = 'use db' + tdSql.execute(s) + + tdLog.info("==========step1:create table stable and child table,then insert data automatically") + insertfile = self.createinsertfile() + self.inserttable(insertfile) + + tdLog.info("==========step2:query join") + self.sqlsquery() + + # 进行数据落盘后检查 + tdSql.query("show dnodes") + index = tdSql.getData(0, 0) + tdDnodes.stop(index) + tdDnodes.start(index) + + tdLog.info("==========step3: query join again") + self.sqlsquery() + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) \ No newline at end of file From dff5c11f290453a349bdb885e00128586700bece Mon Sep 17 00:00:00 2001 From: wu champion Date: Sat, 20 Mar 2021 16:10:45 +0800 Subject: [PATCH 013/178] [test] add test case and bug case --- tests/pytest/fulltest.sh | 4 ++ tests/pytest/query/bug3351.py | 74 ++++++++++++++++++++++++++ tests/pytest/query/query1970YearsAf.py | 4 +- 3 files changed, 80 insertions(+), 2 deletions(-) create mode 100644 tests/pytest/query/bug3351.py diff --git a/tests/pytest/fulltest.sh b/tests/pytest/fulltest.sh index 5037f2c399..aa8e679e81 100755 --- a/tests/pytest/fulltest.sh +++ b/tests/pytest/fulltest.sh @@ -196,6 +196,10 @@ python3 ./test.py -f query/bug2119.py python3 ./test.py -f query/isNullTest.py python3 ./test.py -f query/queryWithTaosdKilled.py python3 ./test.py -f query/floatCompare.py +python3 ./test.py -f query/query1970YearsAf.py +python3 ./test.py -f query/bug3351.py + + #stream python3 ./test.py -f stream/metric_1.py diff --git a/tests/pytest/query/bug3351.py b/tests/pytest/query/bug3351.py new file mode 100644 index 0000000000..288d071a69 --- /dev/null +++ b/tests/pytest/query/bug3351.py @@ -0,0 +1,74 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + def run(self): + tdSql.prepare() + tdSql.execute("drop database if exists db") + tdSql.execute("create database if not exists db keep 36500") + tdSql.execute("use db") + tdLog.printNoPrefix("==========step1:create table && insert data") + + tdSql.execute( + "create table stb1 (ts timestamp, c1 int) TAGS(t1 int)" + ) + tdSql.execute("create table t0 using stb1 tags(1)") + tdSql.execute("insert into t0 values (-865000000, 1)") + tdSql.execute("insert into t0 values (-864000000, 2)") + tdSql.execute("insert into t0 values (-863000000, 3)") + tdSql.execute("insert into t0 values (-15230000, 4)") + tdSql.execute("insert into t0 values (-15220000, 5)") + tdSql.execute("insert into t0 values (-15210000, 6)") + + tdLog.printNoPrefix("==========step2:query") + # bug1:when ts > -864000000, return 0 rows; + # bug2:when ts = -15220000, return 0 rows. + tdSql.query('select * from t0 where ts < -864000000') + tdSql.checkRows(1) + tdSql.query('select * from t0 where ts <= -864000000') + tdSql.checkRows(2) + tdSql.query('select * from t0 where ts = -864000000') + tdSql.checkRows(1) + tdSql.query('select * from t0 where ts > -864000000') + tdSql.checkRows(4) + tdSql.query('select * from t0 where ts >= -864000000') + tdSql.checkRows(5) + tdSql.query('select * from t0 where ts < -15220000') + tdSql.checkRows(4) + tdSql.query('select * from t0 where ts <= -15220000') + tdSql.checkRows(5) + tdSql.query('select * from t0 where ts = -15220000') + tdSql.checkRows(1) + tdSql.query('select * from t0 where ts > -15220000') + tdSql.checkRows(1) + tdSql.query('select * from t0 where ts >= -15220000') + tdSql.checkRows(2) + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) \ No newline at end of file diff --git a/tests/pytest/query/query1970YearsAf.py b/tests/pytest/query/query1970YearsAf.py index 9902f6908b..d2eead241f 100644 --- a/tests/pytest/query/query1970YearsAf.py +++ b/tests/pytest/query/query1970YearsAf.py @@ -88,7 +88,7 @@ class TDTestCase: "disorder_ratio": 0, "disorder_range": 1000, "timestamp_step": 20000, - "start_timestamp": "1969-12-31 00:00:00.000", + "start_timestamp": "1969-12-30 23:59:40.000", "sample_format": "csv", "sample_file": "./sample.csv", "tags_file": "", @@ -153,7 +153,7 @@ class TDTestCase: return file_create_table def inserttable(self, filepath): - create_table_cmd = f"taosdemo -f {filepath} > /tmp/1.log 2>&1" + create_table_cmd = f"taosdemo -f {filepath} > /dev/null 2>&1" _ = subprocess.check_output(create_table_cmd, shell=True).decode("utf-8") def sqlsquery(self): From ef248d09fd248775dfea4a88136354b42dd54f1e Mon Sep 17 00:00:00 2001 From: wu champion Date: Mon, 22 Mar 2021 17:02:53 +0800 Subject: [PATCH 014/178] fix case,add delete file when processing over --- tests/pytest/query/query1970YearsAf.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/tests/pytest/query/query1970YearsAf.py b/tests/pytest/query/query1970YearsAf.py index d2eead241f..441f835e89 100644 --- a/tests/pytest/query/query1970YearsAf.py +++ b/tests/pytest/query/query1970YearsAf.py @@ -18,7 +18,6 @@ import json import subprocess import datetime - from util.log import * from util.sql import * from util.cases import * @@ -71,7 +70,7 @@ class TDTestCase: "update": 0 } - # 设置创建的超级表格式 + # set stable schema stable1 = { "name": "stb2", "child_table_exists": "no", @@ -83,7 +82,7 @@ class TDTestCase: "insert_rows": 5000, "multi_thread_write_one_tbl": "no", "number_of_tbl_in_one_sql": 0, - "rows_per_tbl": 1000, + "rows_per_tbl": 1, "max_sql_len": 65480, "disorder_ratio": 0, "disorder_range": 1000, @@ -117,7 +116,7 @@ class TDTestCase: ] } - # 创建不同的超级表格式并添加至super_tables + # create different stables like stable1 and add to list super_tables super_tables = [] super_tables.append(stable1) database = { @@ -235,7 +234,7 @@ class TDTestCase: tdLog.info("==========step2:query join") self.sqlsquery() - # 进行数据落盘后检查 + # after wal and sync, check again tdSql.query("show dnodes") index = tdSql.getData(0, 0) tdDnodes.stop(index) @@ -246,8 +245,9 @@ class TDTestCase: def stop(self): tdSql.close() + rm_cmd = f"rm -f /tmp/insert* > /dev/null 2>&1" + _ = subprocess.check_output(rm_cmd, shell=True).decode("utf-8") tdLog.success(f"{__file__} successfully executed") - tdCases.addLinux(__file__, TDTestCase()) tdCases.addWindows(__file__, TDTestCase()) \ No newline at end of file From 2de96282490234d89195ad401d001bc71ab71bb5 Mon Sep 17 00:00:00 2001 From: wu champion Date: Tue, 23 Mar 2021 13:40:29 +0800 Subject: [PATCH 015/178] fix the case --- tests/pytest/query/query1970YearsAf.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/tests/pytest/query/query1970YearsAf.py b/tests/pytest/query/query1970YearsAf.py index 441f835e89..9cc8948c2f 100644 --- a/tests/pytest/query/query1970YearsAf.py +++ b/tests/pytest/query/query1970YearsAf.py @@ -243,10 +243,11 @@ class TDTestCase: tdLog.info("==========step3: query join again") self.sqlsquery() - def stop(self): - tdSql.close() rm_cmd = f"rm -f /tmp/insert* > /dev/null 2>&1" _ = subprocess.check_output(rm_cmd, shell=True).decode("utf-8") + + def stop(self): + tdSql.close() tdLog.success(f"{__file__} successfully executed") tdCases.addLinux(__file__, TDTestCase()) From 965d553964aea78aeb2e59b3bad9811ea08a86f5 Mon Sep 17 00:00:00 2001 From: wu champion Date: Tue, 23 Mar 2021 13:59:29 +0800 Subject: [PATCH 016/178] fix the case:add delete tmp file step --- tests/pytest/query/query1970YearsAf.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/tests/pytest/query/query1970YearsAf.py b/tests/pytest/query/query1970YearsAf.py index 9cc8948c2f..ea83d8669d 100644 --- a/tests/pytest/query/query1970YearsAf.py +++ b/tests/pytest/query/query1970YearsAf.py @@ -87,7 +87,7 @@ class TDTestCase: "disorder_ratio": 0, "disorder_range": 1000, "timestamp_step": 20000, - "start_timestamp": "1969-12-30 23:59:40.000", + "start_timestamp": "1969-12-31 00:00:00.000", "sample_format": "csv", "sample_file": "./sample.csv", "tags_file": "", @@ -172,7 +172,7 @@ class TDTestCase: ) tdSql.checkRows(3590) - # child-table query + # child-tables query tdSql.query( "select * from t0 where t0.ts < '1970-01-01 00:00:00.000' " ) @@ -243,6 +243,7 @@ class TDTestCase: tdLog.info("==========step3: query join again") self.sqlsquery() + # delete temporary file rm_cmd = f"rm -f /tmp/insert* > /dev/null 2>&1" _ = subprocess.check_output(rm_cmd, shell=True).decode("utf-8") From 5f27c38a0b0753d1ff9bcab9fbf64dd378d923ab Mon Sep 17 00:00:00 2001 From: wu champion <72908628+wu-champion@users.noreply.github.com> Date: Tue, 23 Mar 2021 14:10:25 +0800 Subject: [PATCH 017/178] Update query1970YearsAf.py --- tests/pytest/query/query1970YearsAf.py | 17 +---------------- 1 file changed, 1 insertion(+), 16 deletions(-) diff --git a/tests/pytest/query/query1970YearsAf.py b/tests/pytest/query/query1970YearsAf.py index 307180a29c..b3f3ff7a82 100644 --- a/tests/pytest/query/query1970YearsAf.py +++ b/tests/pytest/query/query1970YearsAf.py @@ -87,11 +87,7 @@ class TDTestCase: "disorder_ratio": 0, "disorder_range": 1000, "timestamp_step": 20000, -<<<<<<< HEAD "start_timestamp": "1969-12-31 00:00:00.000", -======= - "start_timestamp": "1969-12-30 23:59:40.000", ->>>>>>> 53a128b2a3a469b06ae28ff013397b8d015433c8 "sample_format": "csv", "sample_file": "./sample.csv", "tags_file": "", @@ -176,11 +172,7 @@ class TDTestCase: ) tdSql.checkRows(3590) -<<<<<<< HEAD # child-tables query -======= - # child-table query ->>>>>>> 53a128b2a3a469b06ae28ff013397b8d015433c8 tdSql.query( "select * from t0 where t0.ts < '1970-01-01 00:00:00.000' " ) @@ -251,20 +243,13 @@ class TDTestCase: tdLog.info("==========step3: query join again") self.sqlsquery() -<<<<<<< HEAD # delete temporary file rm_cmd = f"rm -f /tmp/insert* > /dev/null 2>&1" _ = subprocess.check_output(rm_cmd, shell=True).decode("utf-8") def stop(self): tdSql.close() -======= - def stop(self): - tdSql.close() - rm_cmd = f"rm -f /tmp/insert* > /dev/null 2>&1" - _ = subprocess.check_output(rm_cmd, shell=True).decode("utf-8") ->>>>>>> 53a128b2a3a469b06ae28ff013397b8d015433c8 tdLog.success(f"{__file__} successfully executed") tdCases.addLinux(__file__, TDTestCase()) -tdCases.addWindows(__file__, TDTestCase()) \ No newline at end of file +tdCases.addWindows(__file__, TDTestCase()) From 2eb7174f762a09e6631a7a28d1d0906c6ac926ef Mon Sep 17 00:00:00 2001 From: dapan1121 <89396746@qq.com> Date: Fri, 26 Mar 2021 19:29:42 +0800 Subject: [PATCH 018/178] refact code --- src/client/inc/tsclient.h | 2 +- src/client/src/tscSQLParser.c | 131 +++++++++++++--------------------- src/inc/ttokendef.h | 1 + src/query/inc/qSqlparser.h | 11 ++- src/query/inc/sql.y | 2 +- src/query/src/qSqlParser.c | 19 ++--- src/query/src/sql.c | 2 +- 7 files changed, 70 insertions(+), 98 deletions(-) diff --git a/src/client/inc/tsclient.h b/src/client/inc/tsclient.h index 446658e38b..8983365e58 100644 --- a/src/client/inc/tsclient.h +++ b/src/client/inc/tsclient.h @@ -132,7 +132,7 @@ typedef struct SSqlExpr { } SSqlExpr; typedef struct SExprFilter { - tSQLExpr *pExpr; //used for having parse + tSqlExpr *pExpr; //used for having parse SSqlExpr *pSqlExpr; SArray *fp; SColumn *pFilters; //having filter info diff --git a/src/client/src/tscSQLParser.c b/src/client/src/tscSQLParser.c index efd3c7bb93..14691decec 100644 --- a/src/client/src/tscSQLParser.c +++ b/src/client/src/tscSQLParser.c @@ -1098,40 +1098,6 @@ static bool validateTableColumnInfo(SArray* pFieldList, SSqlCmd* pCmd) { return true; } -static void exchangeExpr(tSQLExpr* pExpr) { - tSQLExpr* pLeft = pExpr->pLeft; - tSQLExpr* pRight = pExpr->pRight; - - if ((pRight->nSQLOptr == TK_ID || (pRight->nSQLOptr >= TK_COUNT && pRight->nSQLOptr <= TK_AVG_IRATE)) && - (pLeft->nSQLOptr == TK_INTEGER || pLeft->nSQLOptr == TK_FLOAT || pLeft->nSQLOptr == TK_STRING || pLeft->nSQLOptr == TK_BOOL)) { - /* - * exchange value of the left handside and the value of the right-handside - * to make sure that the value of filter expression always locates in - * right-handside and - * the column-id/function is at the left handside. - */ - uint32_t optr = 0; - switch (pExpr->nSQLOptr) { - case TK_LE: - optr = TK_GE; - break; - case TK_LT: - optr = TK_GT; - break; - case TK_GT: - optr = TK_LT; - break; - case TK_GE: - optr = TK_LE; - break; - default: - optr = pExpr->nSQLOptr; - } - - pExpr->nSQLOptr = optr; - SWAP(pExpr->pLeft, pExpr->pRight, void*); - } -} static bool validateTagParams(SArray* pTagsList, SArray* pFieldList, SSqlCmd* pCmd) { assert(pTagsList != NULL); @@ -3118,6 +3084,7 @@ static int32_t doExtractColumnFilterInfo(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSqlExpr* pRight = pExpr->pRight; STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, columnIndex->tableIndex); + SSchema* pSchema = tscGetTableColumnSchema(pTableMetaInfo->pTableMeta, columnIndex->columnIndex); int16_t colType = pSchema->type; @@ -3324,10 +3291,8 @@ static int32_t extractColumnFilterInfo(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SC } pColumn->colIndex = *pIndex; - - int16_t colType = pSchema->type; - return doExtractColumnFilterInfo(pCmd, pQueryInfo, pColFilter, colType, pExpr); + return doExtractColumnFilterInfo(pCmd, pQueryInfo, pColFilter, pIndex, pExpr); } static int32_t getTablenameCond(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSqlExpr* pTableCond, SStringBuilder* sb) { @@ -6766,7 +6731,7 @@ static int32_t checkQueryRangeForFill(SSqlCmd* pCmd, SQueryInfo* pQueryInfo) { } - int32_t tscInsertExprFields(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSQLExpr* pExpr, SInternalField** interField) { + int32_t tscInsertExprFields(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSqlExpr* pExpr, SInternalField** interField) { tSqlExprItem item = {.pNode = pExpr, .aliasName = NULL, .distinct = false}; int32_t outputIndex = (int32_t)tscSqlExprNumOfExprs(pQueryInfo); @@ -6811,7 +6776,7 @@ static int32_t checkQueryRangeForFill(SSqlCmd* pCmd, SQueryInfo* pQueryInfo) { return TSDB_CODE_SUCCESS; } -int32_t tscGetExprFilters(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSQLExpr* pExpr, SInternalField** pField) { +int32_t tscGetExprFilters(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSqlExpr* pExpr, SInternalField** pField) { SInternalField* pInfo = NULL; for (int32_t i = pQueryInfo->havingFieldNum - 1; i >= 0; --i) { @@ -6861,7 +6826,7 @@ static int32_t genExprFilter(SExprFilter * exprFilter) { return TSDB_CODE_SUCCESS; } -static int32_t handleExprInHavingClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSQLExpr* pExpr, int32_t sqlOptr) { +static int32_t handleExprInHavingClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSqlExpr* pExpr, int32_t sqlOptr) { const char* msg1 = "non binary column not support like operator"; const char* msg2 = "invalid operator for binary column in having clause"; const char* msg3 = "invalid operator for bool column in having clause"; @@ -6913,27 +6878,32 @@ static int32_t handleExprInHavingClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, t ((pInfo->field.type == TSDB_DATA_TYPE_BINARY || pInfo->field.type == TSDB_DATA_TYPE_NCHAR) ? 1 : 0); if (pColFilter->filterstr) { - if (pExpr->nSQLOptr != TK_EQ - && pExpr->nSQLOptr != TK_NE - && pExpr->nSQLOptr != TK_ISNULL - && pExpr->nSQLOptr != TK_NOTNULL - && pExpr->nSQLOptr != TK_LIKE + if (pExpr->tokenId != TK_EQ + && pExpr->tokenId != TK_NE + && pExpr->tokenId != TK_ISNULL + && pExpr->tokenId != TK_NOTNULL + && pExpr->tokenId != TK_LIKE ) { return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2); } } else { - if (pExpr->nSQLOptr == TK_LIKE) { + if (pExpr->tokenId == TK_LIKE) { return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1); } if (pInfo->field.type == TSDB_DATA_TYPE_BOOL) { - if (pExpr->nSQLOptr != TK_EQ && pExpr->nSQLOptr != TK_NE) { + if (pExpr->tokenId != TK_EQ && pExpr->tokenId != TK_NE) { return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3); } } } - int32_t ret = doExtractColumnFilterInfo(pCmd, pQueryInfo, pColFilter, pInfo->field.type, pExpr); + SColumnIndex index = COLUMN_INDEX_INITIALIZER; + if (getColumnIndexByName(pCmd, &pExpr->pLeft->colInfo, pQueryInfo, &index) != TSDB_CODE_SUCCESS) { + return TSDB_CODE_TSC_INVALID_SQL; + } + + int32_t ret = doExtractColumnFilterInfo(pCmd, pQueryInfo, pColFilter, &index, pExpr); if (ret) { return ret; } @@ -6941,38 +6911,30 @@ static int32_t handleExprInHavingClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, t return genExprFilter(pInfo->pFieldFilters); } -int32_t getHavingExpr(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSQLExpr* pExpr, int32_t parentOptr) { +int32_t getHavingExpr(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSqlExpr* pExpr, int32_t parentOptr) { if (pExpr == NULL) { return TSDB_CODE_SUCCESS; } const char* msg1 = "invalid having clause"; - tSQLExpr* pLeft = pExpr->pLeft; - tSQLExpr* pRight = pExpr->pRight; + tSqlExpr* pLeft = pExpr->pLeft; + tSqlExpr* pRight = pExpr->pRight; - if (pExpr->nSQLOptr == TK_AND || pExpr->nSQLOptr == TK_OR) { - int32_t ret = getHavingExpr(pCmd, pQueryInfo, pExpr->pLeft, pExpr->nSQLOptr); + if (pExpr->tokenId == TK_AND || pExpr->tokenId == TK_OR) { + int32_t ret = getHavingExpr(pCmd, pQueryInfo, pExpr->pLeft, pExpr->tokenId); if (ret != TSDB_CODE_SUCCESS) { return ret; } - return getHavingExpr(pCmd, pQueryInfo, pExpr->pRight, pExpr->nSQLOptr); + return getHavingExpr(pCmd, pQueryInfo, pExpr->pRight, pExpr->tokenId); } if (pLeft == NULL || pRight == NULL) { return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1); } - if ((pLeft->nSQLOptr >= TK_COUNT && pLeft->nSQLOptr <= TK_AVG_IRATE) && - (pRight->nSQLOptr >= TK_COUNT && pRight->nSQLOptr <= TK_AVG_IRATE)) { - return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1); - } - - if (pLeft->nSQLOptr >= TK_BOOL - && pLeft->nSQLOptr <= TK_BINARY - && pRight->nSQLOptr >= TK_BOOL - && pRight->nSQLOptr <= TK_BINARY) { + if (pLeft->type == pRight->type) { return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1); } @@ -6981,15 +6943,16 @@ int32_t getHavingExpr(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSQLExpr* pExpr, in pLeft = pExpr->pLeft; pRight = pExpr->pRight; - if (!(pLeft->nSQLOptr >= TK_COUNT && pLeft->nSQLOptr <= TK_AVG_IRATE)) { + + if (pLeft->type != SQL_NODE_SQLFUNCTION) { + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1); + } + + if (pRight->type != SQL_NODE_VALUE) { return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1); } - if (!(pRight->nSQLOptr >= TK_BOOL && pRight->nSQLOptr <= TK_BINARY)) { - return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1); - } - - if (pExpr->nSQLOptr >= TK_BITAND) { + if (pExpr->tokenId >= TK_BITAND) { return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1); } @@ -6998,21 +6961,22 @@ int32_t getHavingExpr(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSQLExpr* pExpr, in //} if (pLeft->pParam) { - for (int32_t i = 0; i < pLeft->pParam->nExpr; i++) { - tSqlExprItem* pParamElem = &(pLeft->pParam->a[i]); - if (pParamElem->pNode->nSQLOptr != TK_ALL && - pParamElem->pNode->nSQLOptr != TK_ID && - pParamElem->pNode->nSQLOptr != TK_STRING && - pParamElem->pNode->nSQLOptr != TK_INTEGER && - pParamElem->pNode->nSQLOptr != TK_FLOAT) { + size_t size = taosArrayGetSize(pLeft->pParam); + for (int32_t i = 0; i < size; i++) { + tSqlExprItem* pParamElem = taosArrayGet(pLeft->pParam, i); + if (pParamElem->pNode->tokenId != TK_ALL && + pParamElem->pNode->tokenId != TK_ID && + pParamElem->pNode->tokenId != TK_STRING && + pParamElem->pNode->tokenId != TK_INTEGER && + pParamElem->pNode->tokenId != TK_FLOAT) { return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1); } - if (pParamElem->pNode->nSQLOptr == TK_ID && (pParamElem->pNode->colInfo.z == NULL && pParamElem->pNode->colInfo.n == 0)) { + if (pParamElem->pNode->tokenId == TK_ID && (pParamElem->pNode->colInfo.z == NULL && pParamElem->pNode->colInfo.n == 0)) { return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1); } - if (pParamElem->pNode->nSQLOptr == TK_ID) { + if (pParamElem->pNode->tokenId == TK_ID) { SColumnIndex index = COLUMN_INDEX_INITIALIZER; if ((getColumnIndexByName(pCmd, &pParamElem->pNode->colInfo, pQueryInfo, &index) != TSDB_CODE_SUCCESS)) { return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1); @@ -7029,12 +6993,17 @@ int32_t getHavingExpr(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSQLExpr* pExpr, in } } + pLeft->functionId = isValidFunction(pLeft->operand.z, pLeft->operand.n); + if (pLeft->functionId < 0) { + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1); + } + return handleExprInHavingClause(pCmd, pQueryInfo, pExpr, parentOptr); } -int32_t parseHavingClause(SQueryInfo* pQueryInfo, tSQLExpr* pExpr, SSqlCmd* pCmd, bool isSTable, int32_t joinQuery, int32_t intervalQuery) { +int32_t parseHavingClause(SQueryInfo* pQueryInfo, tSqlExpr* pExpr, SSqlCmd* pCmd, bool isSTable, int32_t joinQuery, int32_t timeWindowQuery) { const char* msg1 = "having only works with group by"; const char* msg2 = "functions or others can not be mixed up"; const char* msg3 = "invalid expression in having clause"; @@ -7062,7 +7031,7 @@ int32_t parseHavingClause(SQueryInfo* pQueryInfo, tSQLExpr* pExpr, SSqlCmd* pCmd } //REDO function check - if (!functionCompatibleCheck(pQueryInfo, joinQuery, intervalQuery)) { + if (!functionCompatibleCheck(pQueryInfo, joinQuery, timeWindowQuery)) { return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2); } @@ -7239,7 +7208,7 @@ int32_t doValidateSqlNode(SSqlObj* pSql, SQuerySqlNode* pQuerySqlNode, int32_t i } // parse the having clause in the first place - if (parseHavingClause(pQueryInfo, pQuerySql->pHaving, pCmd, isSTable, joinQuery, intervalQuery) != TSDB_CODE_SUCCESS) { + if (parseHavingClause(pQueryInfo, pQuerySqlNode->pHaving, pCmd, isSTable, joinQuery, timeWindowQuery) != TSDB_CODE_SUCCESS) { return TSDB_CODE_TSC_INVALID_SQL; } diff --git a/src/inc/ttokendef.h b/src/inc/ttokendef.h index 4039d63485..7acfb569f3 100644 --- a/src/inc/ttokendef.h +++ b/src/inc/ttokendef.h @@ -208,6 +208,7 @@ + #define TK_SPACE 300 #define TK_COMMENT 301 #define TK_ILLEGAL 302 diff --git a/src/query/inc/qSqlparser.h b/src/query/inc/qSqlparser.h index cb27bc8e2d..c5ee172c40 100644 --- a/src/query/inc/qSqlparser.h +++ b/src/query/inc/qSqlparser.h @@ -98,7 +98,7 @@ typedef struct SQuerySqlNode { SLimitVal limit; // limit offset [optional] SLimitVal slimit; // group limit offset [optional] SStrToken sqlstr; // sql string in select clause - struct tSQLExpr * pHaving; // having clause [optional] + struct tSqlExpr *pHaving; // having clause [optional] } SQuerySqlNode; typedef struct STableNamePair { @@ -118,7 +118,6 @@ typedef struct SFromInfo { SArray *tableList; // SArray }; } SFromInfo; ->>>>>>> develop typedef struct SCreatedTableInfo { SStrToken name; // table name token @@ -255,11 +254,11 @@ SArray *tVariantListAppend(SArray *pList, tVariant *pVar, uint8_t sortOrder); SArray *tVariantListInsert(SArray *pList, tVariant *pVar, uint8_t sortOrder, int32_t index); SArray *tVariantListAppendToken(SArray *pList, SStrToken *pAliasToken, uint8_t sortOrder); -tSQLExpr *tSqlExprCreate(tSQLExpr *pLeft, tSQLExpr *pRight, int32_t optrType); +tSqlExpr *tSqlExprCreate(tSqlExpr *pLeft, tSqlExpr *pRight, int32_t optrType); -int32_t tSqlExprCompare(tSQLExpr *left, tSQLExpr *right); +int32_t tSqlExprCompare(tSqlExpr *left, tSqlExpr *right); -tSQLExpr *tSqlExprClone(tSQLExpr *pSrc); +tSqlExpr *tSqlExprClone(tSqlExpr *pSrc); SFromInfo *setTableNameList(SFromInfo* pFromInfo, SStrToken *pName, SStrToken* pAlias); SFromInfo *setSubquery(SFromInfo* pFromInfo, SQuerySqlNode *pSqlNode); void *destroyFromInfo(SFromInfo* pFromInfo); @@ -279,7 +278,7 @@ void tSqlExprListDestroy(SArray *pList); SQuerySqlNode *tSetQuerySqlNode(SStrToken *pSelectToken, SArray *pSelectList, SFromInfo *pFrom, tSqlExpr *pWhere, SArray *pGroupby, SArray *pSortOrder, SIntervalVal *pInterval, SSessionWindowVal *ps, - SStrToken *pSliding, SArray *pFill, SLimitVal *pLimit, SLimitVal *pgLimit, tSQLExpr *pHaving); + SStrToken *pSliding, SArray *pFill, SLimitVal *pLimit, SLimitVal *pgLimit, tSqlExpr *pHaving); SCreateTableSql *tSetCreateTableInfo(SArray *pCols, SArray *pTags, SQuerySqlNode *pSelect, int32_t type); diff --git a/src/query/inc/sql.y b/src/query/inc/sql.y index 4b88427ba0..f76c534da1 100644 --- a/src/query/inc/sql.y +++ b/src/query/inc/sql.y @@ -471,7 +471,7 @@ cmd ::= union(X). { setSqlInfo(pInfo, X, NULL, TSDB_SQL_SELECT); } // select client_version() // select server_state() select(A) ::= SELECT(T) selcollist(W). { - A = tSetQuerySqlNode(&T, W, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL); + A = tSetQuerySqlNode(&T, W, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL); } // selcollist is a list of expressions that are to become the return diff --git a/src/query/src/qSqlParser.c b/src/query/src/qSqlParser.c index f82cfefc88..53c30565c0 100644 --- a/src/query/src/qSqlParser.c +++ b/src/query/src/qSqlParser.c @@ -310,12 +310,12 @@ static FORCE_INLINE int32_t tStrTokenCompare(SStrToken* left, SStrToken* right) } -int32_t tSqlExprCompare(tSQLExpr *left, tSQLExpr *right) { +int32_t tSqlExprCompare(tSqlExpr *left, tSqlExpr *right) { if ((left == NULL && right) || (left && right == NULL)) { return 1; } - if (left->nSQLOptr != right->nSQLOptr) { + if (left->type != right->type) { return 1; } @@ -328,7 +328,7 @@ int32_t tSqlExprCompare(tSQLExpr *left, tSQLExpr *right) { return 1; } - if (tVariantCompare(&left->val, &right->val)) { + if (tVariantCompare(&left->value, &right->value)) { return 1; } @@ -336,14 +336,17 @@ int32_t tSqlExprCompare(tSQLExpr *left, tSQLExpr *right) { return 1; } - if (left->pParam && left->pParam->nExpr != right->pParam->nExpr) { + size_t size = taosArrayGetSize(right->pParam); + if (left->pParam && taosArrayGetSize(left->pParam) != size) { return 1; } if (right->pParam && left->pParam) { - for (int32_t i = 0; i < right->pParam->nExpr; i++) { - tSQLExpr* pSubLeft = left->pParam->a[i].pNode; - tSQLExpr* pSubRight = right->pParam->a[i].pNode; + for (int32_t i = 0; i < size; i++) { + tSqlExprItem* pLeftElem = taosArrayGet(left->pParam, i); + tSqlExpr* pSubLeft = pLeftElem->pNode; + tSqlExprItem* pRightElem = taosArrayGet(left->pParam, i); + tSqlExpr* pSubRight = pRightElem->pNode; if (tSqlExprCompare(pSubLeft, pSubRight)) { return 1; @@ -690,7 +693,7 @@ void tSetColumnType(TAOS_FIELD *pField, SStrToken *type) { SQuerySqlNode *tSetQuerySqlNode(SStrToken *pSelectToken, SArray *pSelectList, SFromInfo *pFrom, tSqlExpr *pWhere, SArray *pGroupby, SArray *pSortOrder, SIntervalVal *pInterval, SSessionWindowVal *pSession, SStrToken *pSliding, SArray *pFill, SLimitVal *pLimit, - SLimitVal *psLimit, tSQLExpr *pHaving) { + SLimitVal *psLimit, tSqlExpr *pHaving) { assert(pSelectList != NULL); SQuerySqlNode *pSqlNode = calloc(1, sizeof(SQuerySqlNode)); diff --git a/src/query/src/sql.c b/src/query/src/sql.c index 8992e960d0..0e1916bc8b 100644 --- a/src/query/src/sql.c +++ b/src/query/src/sql.c @@ -2905,7 +2905,7 @@ static YYACTIONTYPE yy_reduce( break; case 161: /* select ::= SELECT selcollist */ { - yylhsminor.yy254 = tSetQuerySqlNode(&yymsp[-1].minor.yy0, yymsp[0].minor.yy429, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL); + yylhsminor.yy254 = tSetQuerySqlNode(&yymsp[-1].minor.yy0, yymsp[0].minor.yy429, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL); } yymsp[-1].minor.yy254 = yylhsminor.yy254; break; From 59233ea596fe8c443d0325f0c7c2225f835b9b9e Mon Sep 17 00:00:00 2001 From: dapan1121 <89396746@qq.com> Date: Mon, 29 Mar 2021 16:17:04 +0800 Subject: [PATCH 019/178] fix bug --- src/query/src/qExecutor.c | 42 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 42 insertions(+) diff --git a/src/query/src/qExecutor.c b/src/query/src/qExecutor.c index 7ba629cfc2..234bb81e2b 100644 --- a/src/query/src/qExecutor.c +++ b/src/query/src/qExecutor.c @@ -200,6 +200,7 @@ static bool isPointInterpoQuery(SQuery *pQuery); static void setResultBufSize(SQuery* pQuery, SRspResultInfo* pResultInfo); static void setCtxTagForJoin(SQueryRuntimeEnv* pRuntimeEnv, SQLFunctionCtx* pCtx, SExprInfo* pExprInfo, void* pTable); static void setParamForStableStddev(SQueryRuntimeEnv* pRuntimeEnv, SQLFunctionCtx* pCtx, int32_t numOfOutput, SExprInfo* pExpr); +static void setParamForStableStddevByColData(SQueryRuntimeEnv* pRuntimeEnv, SQLFunctionCtx* pCtx, int32_t numOfOutput, SExprInfo* pExpr, char* val, int16_t bytes); static void doSetTableGroupOutputBuf(SQueryRuntimeEnv* pRuntimeEnv, SResultRowInfo* pResultRowInfo, SQLFunctionCtx* pCtx, int32_t* rowCellInfoOffset, int32_t numOfOutput, int32_t groupIndex); @@ -1330,6 +1331,7 @@ static void doHashGroupbyAgg(SOperatorInfo* pOperator, SGroupbyOperatorInfo *pIn SColumnInfoData* pColInfoData = taosArrayGet(pSDataBlock->pDataBlock, pInfo->colIndex); int16_t bytes = pColInfoData->info.bytes; int16_t type = pColInfoData->info.type; + SQuery *pQuery = pRuntimeEnv->pQuery; if (type == TSDB_DATA_TYPE_FLOAT || type == TSDB_DATA_TYPE_DOUBLE) { qError("QInfo:%"PRIu64" group by not supported on double/float columns, abort", GET_QID(pRuntimeEnv)); @@ -1350,6 +1352,10 @@ static void doHashGroupbyAgg(SOperatorInfo* pOperator, SGroupbyOperatorInfo *pIn memcpy(pInfo->prevData, val, bytes); + if (pQuery->stableQuery && pQuery->stabledev && (pRuntimeEnv->prevResult != NULL)) { + setParamForStableStddevByColData(pRuntimeEnv, pInfo->binfo.pCtx, pOperator->numOfOutput, pOperator->pExpr, val, bytes); + } + int32_t ret = setGroupResultOutputBuf(pRuntimeEnv, pInfo, pOperator->numOfOutput, val, type, bytes, item->groupIndex); if (ret != TSDB_CODE_SUCCESS) { // null data, too many state code @@ -3396,6 +3402,42 @@ void setParamForStableStddev(SQueryRuntimeEnv* pRuntimeEnv, SQLFunctionCtx* pCtx } +void setParamForStableStddevByColData(SQueryRuntimeEnv* pRuntimeEnv, SQLFunctionCtx* pCtx, int32_t numOfOutput, SExprInfo* pExpr, char* val, int16_t bytes) { + SQuery* pQuery = pRuntimeEnv->pQuery; + + int32_t numOfExprs = pQuery->numOfOutput; + for(int32_t i = 0; i < numOfExprs; ++i) { + SExprInfo* pExprInfo = &(pExpr[i]); + if (pExprInfo->base.functionId != TSDB_FUNC_STDDEV_DST) { + continue; + } + + SSqlFuncMsg* pFuncMsg = &pExprInfo->base; + + pCtx[i].param[0].arr = NULL; + pCtx[i].param[0].nType = TSDB_DATA_TYPE_INT; // avoid freeing the memory by setting the type to be int + + // TODO use hash to speedup this loop + int32_t numOfGroup = (int32_t)taosArrayGetSize(pRuntimeEnv->prevResult); + for (int32_t j = 0; j < numOfGroup; ++j) { + SInterResult* p = taosArrayGet(pRuntimeEnv->prevResult, j); + if (bytes == 0 || memcmp(p->tags, val, bytes) == 0) { + int32_t numOfCols = (int32_t)taosArrayGetSize(p->pResult); + for (int32_t k = 0; k < numOfCols; ++k) { + SStddevInterResult* pres = taosArrayGet(p->pResult, k); + if (pres->colId == pFuncMsg->colInfo.colId) { + pCtx[i].param[0].arr = pres->pResult; + break; + } + } + } + } + } + +} + + + /* * There are two cases to handle: * From 22882196c36321ccdf155fdf6078117e7d94c152 Mon Sep 17 00:00:00 2001 From: dapan1121 <89396746@qq.com> Date: Mon, 29 Mar 2021 17:36:57 +0800 Subject: [PATCH 020/178] fix bug --- src/client/src/tscSQLParser.c | 17 +++++--------- src/client/src/tscServer.c | 2 +- src/query/src/qExecutor.c | 42 +++++++++++++++++++++++++++++++++++ src/query/src/qSqlParser.c | 17 ++++++++++---- 4 files changed, 61 insertions(+), 17 deletions(-) diff --git a/src/client/src/tscSQLParser.c b/src/client/src/tscSQLParser.c index 14691decec..3455395481 100644 --- a/src/client/src/tscSQLParser.c +++ b/src/client/src/tscSQLParser.c @@ -3079,14 +3079,10 @@ static SColumnFilterInfo* addColumnFilterInfo(SColumn* pColumn) { } static int32_t doExtractColumnFilterInfo(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SColumnFilterInfo* pColumnFilter, - SColumnIndex* columnIndex, tSqlExpr* pExpr) { + int16_t colType, tSqlExpr* pExpr) { const char* msg = "not supported filter condition"; tSqlExpr* pRight = pExpr->pRight; - STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, columnIndex->tableIndex); - SSchema* pSchema = tscGetTableColumnSchema(pTableMetaInfo->pTableMeta, columnIndex->columnIndex); - - int16_t colType = pSchema->type; if (colType >= TSDB_DATA_TYPE_TINYINT && colType <= TSDB_DATA_TYPE_BIGINT) { colType = TSDB_DATA_TYPE_BIGINT; @@ -3291,8 +3287,10 @@ static int32_t extractColumnFilterInfo(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SC } pColumn->colIndex = *pIndex; + + int16_t colType = pSchema->type; - return doExtractColumnFilterInfo(pCmd, pQueryInfo, pColFilter, pIndex, pExpr); + return doExtractColumnFilterInfo(pCmd, pQueryInfo, pColFilter, colType, pExpr); } static int32_t getTablenameCond(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSqlExpr* pTableCond, SStringBuilder* sb) { @@ -6898,12 +6896,7 @@ static int32_t handleExprInHavingClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, t } } - SColumnIndex index = COLUMN_INDEX_INITIALIZER; - if (getColumnIndexByName(pCmd, &pExpr->pLeft->colInfo, pQueryInfo, &index) != TSDB_CODE_SUCCESS) { - return TSDB_CODE_TSC_INVALID_SQL; - } - - int32_t ret = doExtractColumnFilterInfo(pCmd, pQueryInfo, pColFilter, &index, pExpr); + int32_t ret = doExtractColumnFilterInfo(pCmd, pQueryInfo, pColFilter, pInfo->field.type, pExpr); if (ret) { return ret; } diff --git a/src/client/src/tscServer.c b/src/client/src/tscServer.c index 29274b36ea..24d7e19c85 100644 --- a/src/client/src/tscServer.c +++ b/src/client/src/tscServer.c @@ -862,7 +862,7 @@ int tscBuildQueryMsg(SSqlObj *pSql, SSqlInfo *pInfo) { pSqlFuncExpr->functionId = htons(pExpr->functionId); pSqlFuncExpr->numOfParams = htons(pExpr->numOfParams); pSqlFuncExpr->resColId = htons(pExpr->resColId); - if (pExpr->pFilter && pExpr->pFilter->numOfFilters > 0) { + if (pTableMeta->tableType != TSDB_SUPER_TABLE && pExpr->pFilter && pExpr->pFilter->numOfFilters > 0) { pSqlFuncExpr->filterNum = htonl(pExpr->pFilter->numOfFilters); } else { pSqlFuncExpr->filterNum = 0; diff --git a/src/query/src/qExecutor.c b/src/query/src/qExecutor.c index e9361ba0cf..5a00a3ac45 100644 --- a/src/query/src/qExecutor.c +++ b/src/query/src/qExecutor.c @@ -200,6 +200,7 @@ static bool isPointInterpoQuery(SQuery *pQuery); static void setResultBufSize(SQuery* pQuery, SRspResultInfo* pResultInfo); static void setCtxTagForJoin(SQueryRuntimeEnv* pRuntimeEnv, SQLFunctionCtx* pCtx, SExprInfo* pExprInfo, void* pTable); static void setParamForStableStddev(SQueryRuntimeEnv* pRuntimeEnv, SQLFunctionCtx* pCtx, int32_t numOfOutput, SExprInfo* pExpr); +static void setParamForStableStddevByColData(SQueryRuntimeEnv* pRuntimeEnv, SQLFunctionCtx* pCtx, int32_t numOfOutput, SExprInfo* pExpr, char* val, int16_t bytes); static void doSetTableGroupOutputBuf(SQueryRuntimeEnv* pRuntimeEnv, SResultRowInfo* pResultRowInfo, SQLFunctionCtx* pCtx, int32_t* rowCellInfoOffset, int32_t numOfOutput, int32_t groupIndex); @@ -1330,6 +1331,7 @@ static void doHashGroupbyAgg(SOperatorInfo* pOperator, SGroupbyOperatorInfo *pIn SColumnInfoData* pColInfoData = taosArrayGet(pSDataBlock->pDataBlock, pInfo->colIndex); int16_t bytes = pColInfoData->info.bytes; int16_t type = pColInfoData->info.type; + SQuery *pQuery = pRuntimeEnv->pQuery; if (type == TSDB_DATA_TYPE_FLOAT || type == TSDB_DATA_TYPE_DOUBLE) { qError("QInfo:%"PRIu64" group by not supported on double/float columns, abort", GET_QID(pRuntimeEnv)); @@ -1350,6 +1352,10 @@ static void doHashGroupbyAgg(SOperatorInfo* pOperator, SGroupbyOperatorInfo *pIn memcpy(pInfo->prevData, val, bytes); + if (pQuery->stableQuery && pQuery->stabledev && (pRuntimeEnv->prevResult != NULL)) { + setParamForStableStddevByColData(pRuntimeEnv, pInfo->binfo.pCtx, pOperator->numOfOutput, pOperator->pExpr, val, bytes); + } + int32_t ret = setGroupResultOutputBuf(pRuntimeEnv, pInfo, pOperator->numOfOutput, val, type, bytes, item->groupIndex); if (ret != TSDB_CODE_SUCCESS) { // null data, too many state code @@ -3396,6 +3402,42 @@ void setParamForStableStddev(SQueryRuntimeEnv* pRuntimeEnv, SQLFunctionCtx* pCtx } +void setParamForStableStddevByColData(SQueryRuntimeEnv* pRuntimeEnv, SQLFunctionCtx* pCtx, int32_t numOfOutput, SExprInfo* pExpr, char* val, int16_t bytes) { + SQuery* pQuery = pRuntimeEnv->pQuery; + + int32_t numOfExprs = pQuery->numOfOutput; + for(int32_t i = 0; i < numOfExprs; ++i) { + SExprInfo* pExprInfo = &(pExpr[i]); + if (pExprInfo->base.functionId != TSDB_FUNC_STDDEV_DST) { + continue; + } + + SSqlFuncMsg* pFuncMsg = &pExprInfo->base; + + pCtx[i].param[0].arr = NULL; + pCtx[i].param[0].nType = TSDB_DATA_TYPE_INT; // avoid freeing the memory by setting the type to be int + + // TODO use hash to speedup this loop + int32_t numOfGroup = (int32_t)taosArrayGetSize(pRuntimeEnv->prevResult); + for (int32_t j = 0; j < numOfGroup; ++j) { + SInterResult* p = taosArrayGet(pRuntimeEnv->prevResult, j); + if (bytes == 0 || memcmp(p->tags, val, bytes) == 0) { + int32_t numOfCols = (int32_t)taosArrayGetSize(p->pResult); + for (int32_t k = 0; k < numOfCols; ++k) { + SStddevInterResult* pres = taosArrayGet(p->pResult, k); + if (pres->colId == pFuncMsg->colInfo.colId) { + pCtx[i].param[0].arr = pres->pResult; + break; + } + } + } + } + } + +} + + + /* * There are two cases to handle: * diff --git a/src/query/src/qSqlParser.c b/src/query/src/qSqlParser.c index 53c30565c0..25fa8465b1 100644 --- a/src/query/src/qSqlParser.c +++ b/src/query/src/qSqlParser.c @@ -319,6 +319,14 @@ int32_t tSqlExprCompare(tSqlExpr *left, tSqlExpr *right) { return 1; } + if (left->tokenId != right->tokenId) { + return 1; + } + + if (left->functionId != right->functionId) { + return 1; + } + if ((left->pLeft && right->pLeft == NULL) || (left->pLeft == NULL && right->pLeft) || (left->pRight && right->pRight == NULL) @@ -336,12 +344,13 @@ int32_t tSqlExprCompare(tSqlExpr *left, tSqlExpr *right) { return 1; } - size_t size = taosArrayGetSize(right->pParam); - if (left->pParam && taosArrayGetSize(left->pParam) != size) { - return 1; - } if (right->pParam && left->pParam) { + size_t size = taosArrayGetSize(right->pParam); + if (left->pParam && taosArrayGetSize(left->pParam) != size) { + return 1; + } + for (int32_t i = 0; i < size; i++) { tSqlExprItem* pLeftElem = taosArrayGet(left->pParam, i); tSqlExpr* pSubLeft = pLeftElem->pNode; From a5b4ae69451a727388174e67e42bb94e7c25333b Mon Sep 17 00:00:00 2001 From: dapan1121 <89396746@qq.com> Date: Tue, 30 Mar 2021 19:39:48 +0800 Subject: [PATCH 021/178] support child table having query --- src/client/src/tscLocalMerge.c | 4 +- src/query/inc/qExecutor.h | 8 + src/query/src/qExecutor.c | 202 +- tests/script/general/parser/having_child.sim | 1839 ++++++++++++++++++ 4 files changed, 2048 insertions(+), 5 deletions(-) create mode 100644 tests/script/general/parser/having_child.sim diff --git a/src/client/src/tscLocalMerge.c b/src/client/src/tscLocalMerge.c index 65fa825a27..60eb0c9226 100644 --- a/src/client/src/tscLocalMerge.c +++ b/src/client/src/tscLocalMerge.c @@ -1235,7 +1235,7 @@ static bool saveGroupResultInfo(SSqlObj *pSql) { } -bool doFilterFieldData(SQueryInfo* pQueryInfo, char *input, tFilePage* pOutput, SExprFilter* pFieldFilters, int16_t type, bool* notSkipped) { +bool doFilterFieldData(char *input, SExprFilter* pFieldFilters, int16_t type, bool* notSkipped) { bool qualified = false; for(int32_t k = 0; k < pFieldFilters->pFilters->numOfFilters; ++k) { @@ -1293,7 +1293,7 @@ int32_t doHavingFilter(SQueryInfo* pQueryInfo, tFilePage* pOutput, bool* notSkip char* pInput = pOutput->data + pOutput->num* pFieldFilters->pSqlExpr->offset; - doFilterFieldData(pQueryInfo, pInput, pOutput, pFieldFilters, type, notSkipped); + doFilterFieldData(pInput, pFieldFilters, type, notSkipped); if (*notSkipped == false) { return TSDB_CODE_SUCCESS; } diff --git a/src/query/inc/qExecutor.h b/src/query/inc/qExecutor.h index 5ff574ec67..d528362e95 100644 --- a/src/query/inc/qExecutor.h +++ b/src/query/inc/qExecutor.h @@ -189,6 +189,8 @@ typedef struct SQuery { bool stabledev; // super table stddev query int32_t interBufSize; // intermediate buffer sizse + int32_t havingNum; // having expr number + SOrderVal order; int16_t numOfCols; int16_t numOfTags; @@ -284,6 +286,7 @@ enum OPERATOR_TYPE_E { OP_Fill = 13, OP_MultiTableAggregate = 14, OP_MultiTableTimeInterval = 15, + OP_Having = 16, }; typedef struct SOperatorInfo { @@ -401,6 +404,11 @@ typedef struct SOffsetOperatorInfo { int64_t offset; } SOffsetOperatorInfo; +typedef struct SHavingOperatorInfo { + SArray* fp; +} SHavingOperatorInfo; + + typedef struct SFillOperatorInfo { SFillInfo *pFillInfo; SSDataBlock *pRes; diff --git a/src/query/src/qExecutor.c b/src/query/src/qExecutor.c index 5a00a3ac45..71bc4913e0 100644 --- a/src/query/src/qExecutor.c +++ b/src/query/src/qExecutor.c @@ -181,6 +181,7 @@ static SOperatorInfo* createMultiTableAggOperatorInfo(SQueryRuntimeEnv* pRuntime static SOperatorInfo* createMultiTableTimeIntervalOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput); static SOperatorInfo* createTagScanOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SExprInfo* pExpr, int32_t numOfOutput); static SOperatorInfo* createTableBlockInfoScanOperator(void* pTsdbQueryHandle, SQueryRuntimeEnv* pRuntimeEnv); +static SOperatorInfo* createHavingOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput); static void destroyBasicOperatorInfo(void* param, int32_t numOfOutput); static void destroySFillOperatorInfo(void* param, int32_t numOfOutput); @@ -1819,6 +1820,10 @@ static int32_t setupQueryRuntimeEnv(SQueryRuntimeEnv *pRuntimeEnv, int32_t numOf } } + if (pQuery->havingNum > 0) { + pRuntimeEnv->proot = createHavingOperatorInfo(pRuntimeEnv, pRuntimeEnv->proot, pQuery->pExpr1, pQuery->numOfOutput); + } + if (pQuery->limit.offset > 0) { pRuntimeEnv->proot = createOffsetOperatorInfo(pRuntimeEnv, pRuntimeEnv->proot); } @@ -4653,6 +4658,111 @@ static SSDataBlock* doOffset(void* param) { } } + +bool doFilterData(SColumnInfoData* p, int32_t rid, SColumnFilterElem *filterElem, __filter_func_t fp) { + char* input = p->pData + p->info.bytes * rid; + bool isnull = isNull(input, p->info.type); + if (isnull) { + return (fp == isNullOperator) ? true : false; + } else { + if (fp == notNullOperator) { + return true; + } else if (fp == isNullOperator) { + return false; + } + } + + if (fp(filterElem, input, input, p->info.type)) { + return true; + } + + return false; +} + + +void doHavingImpl(SOperatorInfo *pOperator, SSDataBlock *pBlock) { + SHavingOperatorInfo* pInfo = pOperator->info; + int32_t f = 0; + int32_t allQualified = 1; + int32_t exprQualified = 0; + + for (int32_t r = 0; r < pBlock->info.rows; ++r) { + allQualified = 1; + + for (int32_t i = 0; i < pOperator->numOfOutput; ++i) { + SExprInfo* pExprInfo = &(pOperator->pExpr[i]); + if (pExprInfo->pFilter == NULL) { + continue; + } + + SArray* es = taosArrayGetP(pInfo->fp, i); + assert(es); + + size_t fpNum = taosArrayGetSize(es); + + exprQualified = 0; + for (int32_t m = 0; m < fpNum; ++m) { + __filter_func_t fp = taosArrayGetP(es, m); + + assert(fp); + + //SColIndex* colIdx = &pExprInfo->base.colInfo; + SColumnInfoData* p = taosArrayGet(pBlock->pDataBlock, i); + + SColumnFilterElem filterElem = {.filterInfo = *pExprInfo->pFilter}; + + if (doFilterData(p, r, &filterElem, fp)) { + exprQualified = 1; + break; + } + } + + if (exprQualified == 0) { + allQualified = 0; + break; + } + } + + if (allQualified == 0) { + continue; + } + + for (int32_t i = 0; i < pBlock->info.numOfCols; ++i) { + SColumnInfoData *pColInfoData = taosArrayGet(pBlock->pDataBlock, i); + + int16_t bytes = pColInfoData->info.bytes; + memmove(pColInfoData->pData + f * bytes, pColInfoData->pData + bytes * r, bytes); + } + + ++f; + } + + pBlock->info.rows = f; +} + +static SSDataBlock* doHaving(void* param) { + SOperatorInfo *pOperator = (SOperatorInfo *)param; + if (pOperator->status == OP_EXEC_DONE) { + return NULL; + } + + SQueryRuntimeEnv* pRuntimeEnv = pOperator->pRuntimeEnv; + + while (1) { + SSDataBlock *pBlock = pOperator->upstream->exec(pOperator->upstream); + if (pBlock == NULL) { + setQueryStatus(pRuntimeEnv, QUERY_COMPLETED); + pOperator->status = OP_EXEC_DONE; + return NULL; + } + + doHavingImpl(pOperator, pBlock); + + return pBlock; + } +} + + static SSDataBlock* doIntervalAgg(void* param) { SOperatorInfo* pOperator = (SOperatorInfo*) param; if (pOperator->status == OP_EXEC_DONE) { @@ -5003,6 +5113,13 @@ static void destroyTagScanOperatorInfo(void* param, int32_t numOfOutput) { pInfo->pRes = destroyOutputBuf(pInfo->pRes); } +static void destroyHavingOperatorInfo(void* param, int32_t numOfOutput) { + SHavingOperatorInfo* pInfo = (SHavingOperatorInfo*) param; + if (pInfo->fp) { + taosArrayDestroy(pInfo->fp); + } +} + SOperatorInfo* createMultiTableAggOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput) { SAggOperatorInfo* pInfo = calloc(1, sizeof(SAggOperatorInfo)); @@ -5059,6 +5176,81 @@ SOperatorInfo* createArithOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorI return pOperator; } + +int32_t initFilterFp(SExprInfo* pExpr, int32_t numOfOutput, SArray** fps) { + __filter_func_t fp = NULL; + + *fps = taosArrayInit(numOfOutput, sizeof(SArray*)); + if (*fps == NULL) { + return TSDB_CODE_TSC_OUT_OF_MEMORY; + } + + for (int32_t i = 0; i < numOfOutput; ++i) { + SExprInfo* pExprInfo = &(pExpr[i]); + SColIndex* colIdx = &pExprInfo->base.colInfo; + + if (pExprInfo->pFilter == NULL || !TSDB_COL_IS_NORMAL_COL(colIdx->flag)) { + taosArrayPush(*fps, &fp); + + continue; + } + + int32_t filterNum = pExprInfo->base.filterNum; + SColumnFilterInfo *filterInfo = pExprInfo->pFilter; + + SArray* es = taosArrayInit(filterNum, sizeof(__filter_func_t)); + + for (int32_t j = 0; j < filterNum; ++j) { + int32_t lower = filterInfo->lowerRelOptr; + int32_t upper = filterInfo->upperRelOptr; + if (lower == TSDB_RELATION_INVALID && upper == TSDB_RELATION_INVALID) { + qError("invalid rel optr"); + return TSDB_CODE_QRY_APP_ERROR; + } + + __filter_func_t ffp = getFilterOperator(lower, upper); + if (ffp == NULL) { + qError("invalid filter info"); + return TSDB_CODE_QRY_APP_ERROR; + } + + taosArrayPush(es, &ffp); + + filterInfo += 1; + } + + taosArrayPush(*fps, &es); + } + + return TSDB_CODE_SUCCESS; +} + +SOperatorInfo* createHavingOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput) { + SHavingOperatorInfo* pInfo = calloc(1, sizeof(SHavingOperatorInfo)); + + initFilterFp(pExpr, numOfOutput, &pInfo->fp); + + assert(pInfo->fp); + + SOperatorInfo* pOperator = calloc(1, sizeof(SOperatorInfo)); + + pOperator->name = "HavingOperator"; + pOperator->operatorType = OP_Having; + pOperator->blockingOptr = false; + pOperator->status = OP_IN_EXECUTING; + pOperator->numOfOutput = numOfOutput; + pOperator->pExpr = pExpr; + pOperator->upstream = upstream; + pOperator->exec = doHaving; + pOperator->info = pInfo; + pOperator->pRuntimeEnv = pRuntimeEnv; + pOperator->cleanup = destroyHavingOperatorInfo; + + return pOperator; +} + + + SOperatorInfo* createLimitOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream) { SLimitOperatorInfo* pInfo = calloc(1, sizeof(SLimitOperatorInfo)); pInfo->limit = pRuntimeEnv->pQuery->limit.limit; @@ -5856,8 +6048,8 @@ int32_t cloneExprFilterInfo(SColumnFilterInfo **dst, SColumnFilterInfo* src, int memcpy(*dst, src, sizeof(*src) * filterNum); for (int32_t i = 0; i < filterNum; i++) { - if (dst[i]->filterstr && dst[i]->len > 0) { - void *pz = calloc(1, dst[i]->len + 1); + if ((*dst)[i].filterstr && dst[i]->len > 0) { + void *pz = calloc(1, (*dst)[i].len + 1); if (pz == NULL) { if (i == 0) { @@ -5871,7 +6063,7 @@ int32_t cloneExprFilterInfo(SColumnFilterInfo **dst, SColumnFilterInfo* src, int memcpy(pz, (void *)src->pz, src->len + 1); - dst[i]->pz = (int64_t)pz; + (*dst)[i].pz = (int64_t)pz; } } @@ -6288,6 +6480,10 @@ SQInfo* createQInfoImpl(SQueryTableMsg* pQueryMsg, SSqlGroupbyExpr* pGroupbyExpr if (TSDB_COL_IS_TAG(pExprs[col].base.colInfo.flag)) { pQuery->tagLen += pExprs[col].bytes; } + + if (pExprs[col].pFilter) { + ++pQuery->havingNum; + } } doUpdateExprColumnIndex(pQuery); diff --git a/tests/script/general/parser/having_child.sim b/tests/script/general/parser/having_child.sim new file mode 100644 index 0000000000..f9c554aeab --- /dev/null +++ b/tests/script/general/parser/having_child.sim @@ -0,0 +1,1839 @@ +system sh/stop_dnodes.sh +system sh/deploy.sh -n dnode1 -i 1 +system sh/cfg.sh -n dnode1 -c walLevel -v 0 +system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 2 +system sh/exec.sh -n dnode1 -s start + +sleep 100 +sql connect +print ======================== dnode1 start + +$db = testdb + +sql create database $db +sql use $db + +sql create stable st2 (ts timestamp, f1 int, f2 float, f3 double, f4 bigint, f5 smallint, f6 tinyint, f7 bool, f8 binary(10), f9 nchar(10)) tags (id1 int, id2 float, id3 nchar(10), id4 double, id5 smallint, id6 bigint, id7 binary(10)) + +sql create table tb1 using st2 tags (1,1.0,"1",1.0,1,1,"1"); +sql create table tb2 using st2 tags (2,2.0,"2",2.0,2,2,"2"); +sql create table tb3 using st2 tags (3,3.0,"3",3.0,3,3,"3"); +sql create table tb4 using st2 tags (4,4.0,"4",4.0,4,4,"4"); + +sql insert into tb1 values (now-200s,1,1.0,1.0,1,1,1,true ,"1","1") +sql insert into tb1 values (now-150s,1,1.0,1.0,1,1,1,false,"1","1") +sql insert into tb1 values (now-100s,2,2.0,2.0,2,2,2,true ,"2","2") +sql insert into tb1 values (now-50s ,2,2.0,2.0,2,2,2,false,"2","2") +sql insert into tb1 values (now ,3,3.0,3.0,3,3,3,true ,"3","3") +sql insert into tb1 values (now+50s ,3,3.0,3.0,3,3,3,false,"3","3") +sql insert into tb1 values (now+100s,4,4.0,4.0,4,4,4,true ,"4","4") +sql insert into tb1 values (now+150s,4,4.0,4.0,4,4,4,false,"4","4") + + +sql select count(*),f1 from tb1 group by f1 having count(f1) > 0; +if $rows != 4 then + return -1 +endi +if $data00 != 2 then + return -1 +endi +if $data01 != 1 then + return -1 +endi +if $data10 != 2 then + return -1 +endi +if $data11 != 2 then + return -1 +endi +if $data20 != 2 then + return -1 +endi +if $data21 != 3 then + return -1 +endi +if $data30 != 2 then + return -1 +endi +if $data31 != 4 then + return -1 +endi + + +sql select count(*),f1 from tb1 group by f1 having count(*) > 0; +if $rows != 4 then + return -1 +endi +if $data00 != 2 then + return -1 +endi +if $data01 != 1 then + return -1 +endi +if $data10 != 2 then + return -1 +endi +if $data11 != 2 then + return -1 +endi +if $data20 != 2 then + return -1 +endi +if $data21 != 3 then + return -1 +endi +if $data30 != 2 then + return -1 +endi +if $data31 != 4 then + return -1 +endi + + +sql select count(*),f1 from tb1 group by f1 having count(f2) > 0; +if $rows != 4 then + return -1 +endi +if $data00 != 2 then + return -1 +endi +if $data01 != 1 then + return -1 +endi +if $data10 != 2 then + return -1 +endi +if $data11 != 2 then + return -1 +endi +if $data20 != 2 then + return -1 +endi +if $data21 != 3 then + return -1 +endi +if $data30 != 2 then + return -1 +endi +if $data31 != 4 then + return -1 +endi + +sql_error select top(f1,2) from tb1 group by f1 having count(f2) > 0; + +sql select last(f1) from tb1 group by f1 having count(f2) > 0; +if $rows != 4 then + return -1 +endi +if $data00 != 1 then + return -1 +endi +if $data10 != 2 then + return -1 +endi +if $data20 != 3 then + return -1 +endi +if $data30 != 4 then + return -1 +endi + +sql_error select top(f1,2) from tb1 group by f1 having count(f2) > 0; + +sql_error select top(f1,2) from tb1 group by f1 having count(f2) > 0; + +sql_error select top(f1,2) from tb1 group by f1 having avg(f1) > 0; + +sql select avg(f1),count(f1) from tb1 group by f1 having avg(f1) > 2; +if $rows != 2 then + return -1 +endi +if $data00 != 3.000000000 then + return -1 +endi +if $data01 != 2 then + return -1 +endi +if $data10 != 4.000000000 then + return -1 +endi +if $data11 != 2 then + return -1 +endi + + +sql select avg(f1),count(f1) from tb1 group by f1 having avg(f1) > 2 and sum(f1) > 0; +if $rows != 2 then + return -1 +endi +if $data00 != 3.000000000 then + return -1 +endi +if $data01 != 2 then + return -1 +endi +if $data10 != 4.000000000 then + return -1 +endi +if $data11 != 2 then + return -1 +endi + +sql select avg(f1),count(f1),sum(f1) from tb1 group by f1 having avg(f1) > 2 and sum(f1) > 0; +if $rows != 2 then + return -1 +endi +if $data00 != 3.000000000 then + return -1 +endi +if $data01 != 2 then + return -1 +endi +if $data02 != 6 then + return -1 +endi +if $data10 != 4.000000000 then + return -1 +endi +if $data11 != 2 then + return -1 +endi +if $data12 != 8 then + return -1 +endi + +sql select avg(f1),count(f1),sum(f1) from tb1 group by f1 having avg(f1) > 2; +if $rows != 2 then + return -1 +endi +if $data00 != 3.000000000 then + return -1 +endi +if $data01 != 2 then + return -1 +endi +if $data02 != 6 then + return -1 +endi +if $data10 != 4.000000000 then + return -1 +endi +if $data11 != 2 then + return -1 +endi +if $data12 != 8 then + return -1 +endi + +sql select avg(f1),count(f1),sum(f1) from tb1 group by f1 having sum(f1) > 0; +if $rows != 4 then + return -1 +endi +if $data00 != 1.000000000 then + return -1 +endi +if $data01 != 2 then + return -1 +endi +if $data02 != 2 then + return -1 +endi +if $data10 != 2.000000000 then + return -1 +endi +if $data11 != 2 then + return -1 +endi +if $data12 != 4 then + return -1 +endi +if $data20 != 3.000000000 then + return -1 +endi +if $data21 != 2 then + return -1 +endi +if $data22 != 6 then + return -1 +endi +if $data30 != 4.000000000 then + return -1 +endi +if $data31 != 2 then + return -1 +endi +if $data32 != 8 then + return -1 +endi + +sql select avg(f1),count(f1),sum(f1) from tb1 group by f1 having sum(f1) > 2 and sum(f1) < 6; +if $rows != 1 then + return -1 +endi +if $data00 != 2.000000000 then + return -1 +endi +if $data01 != 2 then + return -1 +endi +if $data02 != 4 then + return -1 +endi + + +sql select avg(f1),count(f1),sum(f1) from tb1 group by f1 having 1 <= sum(f1) and 5 >= sum(f1); +if $rows != 2 then + return -1 +endi +if $data00 != 1.000000000 then + return -1 +endi +if $data01 != 2 then + return -1 +endi +if $data02 != 2 then + return -1 +endi +if $data10 != 2.000000000 then + return -1 +endi +if $data11 != 2 then + return -1 +endi +if $data12 != 4 then + return -1 +endi + +sql_error select avg(f1),count(f1),sum(f1),twa(f1) from tb1 group by tbname having twa(f1) > 0; + +sql select avg(f1),count(f1),sum(f1),twa(f1) from tb1 group by f1 having twa(f1) > 3; +if $rows != 1 then + return -1 +endi +if $data00 != 4.000000000 then + return -1 +endi +if $data01 != 2 then + return -1 +endi +if $data02 != 8 then + return -1 +endi +if $data03 != 4.000000000 then + return -1 +endi + +sql_error select avg(f1),count(f1),sum(f1),twa(f1) from tb1 group by tbname having sum(f1) > 0; + +sql select avg(f1),count(f1),sum(f1),twa(f1) from tb1 group by f1 having sum(f1) = 4; +if $rows != 1 then + return -1 +endi +if $data00 != 2.000000000 then + return -1 +endi +if $data01 != 2 then + return -1 +endi +if $data02 != 4 then + return -1 +endi +if $data03 != 2.000000000 then + return -1 +endi + +sql select avg(f1),count(f1),sum(f1) from tb1 group by f1 having sum(f1) > 0; +if $rows != 4 then + return -1 +endi +if $data00 != 1.000000000 then + return -1 +endi +if $data01 != 2 then + return -1 +endi +if $data02 != 2 then + return -1 +endi +if $data10 != 2.000000000 then + return -1 +endi +if $data11 != 2 then + return -1 +endi +if $data12 != 4 then + return -1 +endi +if $data20 != 3.000000000 then + return -1 +endi +if $data21 != 2 then + return -1 +endi +if $data22 != 6 then + return -1 +endi +if $data30 != 4.000000000 then + return -1 +endi +if $data31 != 2 then + return -1 +endi +if $data32 != 8 then + return -1 +endi + +sql select avg(f1),count(f1),sum(f1) from tb1 group by f1 having sum(f1) > 3; +if $rows != 3 then + return -1 +endi +if $data00 != 2.000000000 then + return -1 +endi +if $data01 != 2 then + return -1 +endi +if $data02 != 4 then + return -1 +endi +if $data10 != 3.000000000 then + return -1 +endi +if $data11 != 2 then + return -1 +endi +if $data12 != 6 then + return -1 +endi +if $data20 != 4.000000000 then + return -1 +endi +if $data21 != 2 then + return -1 +endi +if $data22 != 8 then + return -1 +endi + +###########and issue +sql select avg(f1),count(f1),sum(f1) from tb1 group by f1 having sum(f1) > 3 and sum(f1) > 1; +if $rows != 4 then + return -1 +endi +if $data00 != 1.000000000 then + return -1 +endi +if $data01 != 2 then + return -1 +endi +if $data02 != 2 then + return -1 +endi +if $data10 != 2.000000000 then + return -1 +endi +if $data11 != 2 then + return -1 +endi +if $data12 != 4 then + return -1 +endi +if $data20 != 3.000000000 then + return -1 +endi +if $data21 != 2 then + return -1 +endi +if $data22 != 6 then + return -1 +endi +if $data30 != 4.000000000 then + return -1 +endi +if $data31 != 2 then + return -1 +endi +if $data32 != 8 then + return -1 +endi + + +sql select avg(f1),count(f1),sum(f1) from tb1 group by f1 having sum(f1) > 3 or sum(f1) > 1; +if $rows != 4 then + return -1 +endi +if $data00 != 1.000000000 then + return -1 +endi +if $data01 != 2 then + return -1 +endi +if $data02 != 2 then + return -1 +endi +if $data10 != 2.000000000 then + return -1 +endi +if $data11 != 2 then + return -1 +endi +if $data12 != 4 then + return -1 +endi +if $data20 != 3.000000000 then + return -1 +endi +if $data21 != 2 then + return -1 +endi +if $data22 != 6 then + return -1 +endi +if $data30 != 4.000000000 then + return -1 +endi +if $data31 != 2 then + return -1 +endi +if $data32 != 8 then + return -1 +endi + +sql select avg(f1),count(f1),sum(f1) from tb1 group by f1 having sum(f1) > 3 or sum(f1) > 4; +if $rows != 3 then + return -1 +endi +if $data00 != 2.000000000 then + return -1 +endi +if $data01 != 2 then + return -1 +endi +if $data02 != 4 then + return -1 +endi +if $data10 != 3.000000000 then + return -1 +endi +if $data11 != 2 then + return -1 +endi +if $data12 != 6 then + return -1 +endi +if $data20 != 4.000000000 then + return -1 +endi +if $data21 != 2 then + return -1 +endi +if $data22 != 8 then + return -1 +endi + +############or issue +sql select avg(f1),count(f1),sum(f1) from tb1 group by f1 having sum(f1) > 3 or avg(f1) > 4; +if $rows != 0 then + return -1 +endi + +sql select avg(f1),count(f1),sum(f1) from tb1 group by f1 having (sum(f1) > 3); +if $rows != 3 then + return -1 +endi +if $data00 != 2.000000000 then + return -1 +endi +if $data01 != 2 then + return -1 +endi +if $data02 != 4 then + return -1 +endi +if $data10 != 3.000000000 then + return -1 +endi +if $data11 != 2 then + return -1 +endi +if $data12 != 6 then + return -1 +endi +if $data20 != 4.000000000 then + return -1 +endi +if $data21 != 2 then + return -1 +endi +if $data22 != 8 then + return -1 +endi + +sql_error select avg(f1),count(f1),sum(f1) from tb1 group by f1 having (sum(*) > 3); + +sql select avg(f1),count(f1),sum(f1) from tb1 group by f1 having (sum(tb1.f1) > 3); +if $rows != 3 then + return -1 +endi +if $data00 != 2.000000000 then + return -1 +endi +if $data01 != 2 then + return -1 +endi +if $data02 != 4 then + return -1 +endi +if $data10 != 3.000000000 then + return -1 +endi +if $data11 != 2 then + return -1 +endi +if $data12 != 6 then + return -1 +endi +if $data20 != 4.000000000 then + return -1 +endi +if $data21 != 2 then + return -1 +endi +if $data22 != 8 then + return -1 +endi + +sql select avg(f1),count(tb1.*),sum(f1) from tb1 group by f1 having (sum(tb1.f1) > 3); +if $rows != 3 then + return -1 +endi +if $data00 != 2.000000000 then + return -1 +endi +if $data01 != 2 then + return -1 +endi +if $data02 != 4 then + return -1 +endi +if $data10 != 3.000000000 then + return -1 +endi +if $data11 != 2 then + return -1 +endi +if $data12 != 6 then + return -1 +endi +if $data20 != 4.000000000 then + return -1 +endi +if $data21 != 2 then + return -1 +endi +if $data22 != 8 then + return -1 +endi + +sql select avg(f1),count(tb1.*),sum(f1),stddev(f1),stddev(f1) from tb1 group by f1; +if $rows != 4 then + return -1 +endi +if $data00 != 1.000000000 then + return -1 +endi +if $data01 != 2 then + return -1 +endi +if $data02 != 2 then + return -1 +endi +if $data03 != 0.000000000 then + return -1 +endi +if $data04 != 0.000000000 then + return -1 +endi +if $data10 != 2.000000000 then + return -1 +endi +if $data11 != 2 then + return -1 +endi +if $data12 != 4 then + return -1 +endi +if $data13 != 0.000000000 then + return -1 +endi +if $data14 != 0.000000000 then + return -1 +endi +if $data20 != 3.000000000 then + return -1 +endi +if $data21 != 2 then + return -1 +endi +if $data22 != 6 then + return -1 +endi +if $data23 != 0.000000000 then + return -1 +endi +if $data24 != 0.000000000 then + return -1 +endi +if $data30 != 4.000000000 then + return -1 +endi +if $data31 != 2 then + return -1 +endi +if $data32 != 8 then + return -1 +endi +if $data33 != 0.000000000 then + return -1 +endi +if $data34 != 0.000000000 then + return -1 +endi + +sql select avg(f1),count(tb1.*),sum(f1),stddev(f1) from tb1 group by f1 having (stddev(tb1.f1) > 3); +if $rows != 0 then + return -1 +endi + +sql select avg(f1),count(tb1.*),sum(f1),stddev(f1) from tb1 group by f1 having (stddev(tb1.f1) < 1); +if $rows != 4 then + return -1 +endi +if $data00 != 1.000000000 then + return -1 +endi +if $data01 != 2 then + return -1 +endi +if $data02 != 2 then + return -1 +endi +if $data03 != 0.000000000 then + return -1 +endi +if $data10 != 2.000000000 then + return -1 +endi +if $data11 != 2 then + return -1 +endi +if $data12 != 4 then + return -1 +endi +if $data13 != 0.000000000 then + return -1 +endi +if $data20 != 3.000000000 then + return -1 +endi +if $data21 != 2 then + return -1 +endi +if $data22 != 6 then + return -1 +endi +if $data23 != 0.000000000 then + return -1 +endi +if $data30 != 4.000000000 then + return -1 +endi +if $data31 != 2 then + return -1 +endi +if $data32 != 8 then + return -1 +endi +if $data33 != 0.000000000 then + return -1 +endi + + +sql_error select avg(f1),count(tb1.*),sum(f1),stddev(f1) from tb1 group by f1 having (LEASTSQUARES(f1) < 1); + +sql_error select avg(f1),count(tb1.*),sum(f1),stddev(f1) from tb1 group by f1 having LEASTSQUARES(f1) < 1; + +sql_error select avg(f1),count(tb1.*),sum(f1),stddev(f1) from tb1 group by f1 having LEASTSQUARES(f1,1,1) < 1; + +sql_error select avg(f1),count(tb1.*),sum(f1),stddev(f1) from tb1 group by f1 having LEASTSQUARES(f1,1,1) > 2; + +sql_error select avg(f1),count(tb1.*),sum(f1),stddev(f1),LEASTSQUARES(f1,1,1) from tb1 group by f1 having LEASTSQUARES(f1,1,1) > 2; + +sql_error select avg(f1),count(tb1.*),sum(f1),stddev(f1),LEASTSQUARES(f1,1,1) from tb1 group by f1 having sum(f1) > 2; + +sql select avg(f1),count(tb1.*),sum(f1),stddev(f1) from tb1 group by f1 having min(f1) > 2; +if $rows != 2 then + return -1 +endi +if $data00 != 3.000000000 then + return -1 +endi +if $data01 != 2 then + return -1 +endi +if $data02 != 6 then + return -1 +endi +if $data03 != 0.000000000 then + return -1 +endi +if $data10 != 4.000000000 then + return -1 +endi +if $data11 != 2 then + return -1 +endi +if $data12 != 8 then + return -1 +endi +if $data13 != 0.000000000 then + return -1 +endi + +sql select avg(f1),count(tb1.*),sum(f1),stddev(f1),min(f1) from tb1 group by f1 having min(f1) > 2; +if $rows != 2 then + return -1 +endi +if $data00 != 3.000000000 then + return -1 +endi +if $data01 != 2 then + return -1 +endi +if $data02 != 6 then + return -1 +endi +if $data03 != 0.000000000 then + return -1 +endi +if $data04 != 3 then + return -1 +endi +if $data10 != 4.000000000 then + return -1 +endi +if $data11 != 2 then + return -1 +endi +if $data12 != 8 then + return -1 +endi +if $data13 != 0.000000000 then + return -1 +endi +if $data14 != 4 then + return -1 +endi + +sql select avg(f1),count(tb1.*),sum(f1),stddev(f1),min(f1) from tb1 group by f1 having max(f1) > 2; +if $rows != 2 then + return -1 +endi +if $data00 != 3.000000000 then + return -1 +endi +if $data01 != 2 then + return -1 +endi +if $data02 != 6 then + return -1 +endi +if $data03 != 0.000000000 then + return -1 +endi +if $data04 != 3 then + return -1 +endi +if $data10 != 4.000000000 then + return -1 +endi +if $data11 != 2 then + return -1 +endi +if $data12 != 8 then + return -1 +endi +if $data13 != 0.000000000 then + return -1 +endi +if $data14 != 4 then + return -1 +endi + +sql select avg(f1),count(tb1.*),sum(f1),stddev(f1),min(f1),max(f1) from tb1 group by f1 having max(f1) != 2; +if $rows != 3 then + return -1 +endi +if $data00 != 1.000000000 then + return -1 +endi +if $data01 != 2 then + return -1 +endi +if $data02 != 2 then + return -1 +endi +if $data03 != 0.000000000 then + return -1 +endi +if $data04 != 1 then + return -1 +endi +if $data05 != 1 then + return -1 +endi +if $data10 != 3.000000000 then + return -1 +endi +if $data11 != 2 then + return -1 +endi +if $data12 != 6 then + return -1 +endi +if $data13 != 0.000000000 then + return -1 +endi +if $data14 != 3 then + return -1 +endi +if $data15 != 3 then + return -1 +endi +if $data20 != 4.000000000 then + return -1 +endi +if $data21 != 2 then + return -1 +endi +if $data22 != 8 then + return -1 +endi +if $data23 != 0.000000000 then + return -1 +endi +if $data24 != 4 then + return -1 +endi +if $data25 != 4 then + return -1 +endi + +sql select avg(f1),count(tb1.*),sum(f1),stddev(f1),min(f1),max(f1) from tb1 group by f1 having first(f1) != 2; +if $rows != 3 then + return -1 +endi +if $data00 != 1.000000000 then + return -1 +endi +if $data01 != 2 then + return -1 +endi +if $data02 != 2 then + return -1 +endi +if $data03 != 0.000000000 then + return -1 +endi +if $data04 != 1 then + return -1 +endi +if $data05 != 1 then + return -1 +endi +if $data10 != 3.000000000 then + return -1 +endi +if $data11 != 2 then + return -1 +endi +if $data12 != 6 then + return -1 +endi +if $data13 != 0.000000000 then + return -1 +endi +if $data14 != 3 then + return -1 +endi +if $data15 != 3 then + return -1 +endi +if $data20 != 4.000000000 then + return -1 +endi +if $data21 != 2 then + return -1 +endi +if $data22 != 8 then + return -1 +endi +if $data23 != 0.000000000 then + return -1 +endi +if $data24 != 4 then + return -1 +endi +if $data25 != 4 then + return -1 +endi + + + +sql select avg(f1),count(tb1.*),sum(f1),stddev(f1),min(f1),max(f1),first(f1) from tb1 group by f1 having first(f1) != 2; +if $rows != 3 then + return -1 +endi +if $data00 != 1.000000000 then + return -1 +endi +if $data01 != 2 then + return -1 +endi +if $data02 != 2 then + return -1 +endi +if $data03 != 0.000000000 then + return -1 +endi +if $data04 != 1 then + return -1 +endi +if $data05 != 1 then + return -1 +endi +if $data06 != 1 then + return -1 +endi +if $data10 != 3.000000000 then + return -1 +endi +if $data11 != 2 then + return -1 +endi +if $data12 != 6 then + return -1 +endi +if $data13 != 0.000000000 then + return -1 +endi +if $data14 != 3 then + return -1 +endi +if $data15 != 3 then + return -1 +endi +if $data16 != 3 then + return -1 +endi +if $data20 != 4.000000000 then + return -1 +endi +if $data21 != 2 then + return -1 +endi +if $data22 != 8 then + return -1 +endi +if $data23 != 0.000000000 then + return -1 +endi +if $data24 != 4 then + return -1 +endi +if $data25 != 4 then + return -1 +endi +if $data26 != 4 then + return -1 +endi + + + +sql_error select avg(f1),count(tb1.*),sum(f1),stddev(f1),min(f1),max(f1),first(f1),last(f1) from tb1 group by f1 having top(f1,1); + +sql_error select avg(f1),count(tb1.*),sum(f1),stddev(f1),min(f1),max(f1),first(f1),last(f1) from tb1 group by f1 having top(f1,1) > 1; + +sql_error select avg(f1),count(tb1.*),sum(f1),stddev(f1),min(f1),max(f1),first(f1),last(f1) from tb1 group by f1 having bottom(f1,1) > 1; + +sql_error select avg(f1),count(tb1.*),sum(f1),stddev(f1),min(f1),max(f1),first(f1),last(f1),top(f1,1),bottom(f1,1) from tb1 group by f1 having bottom(f1,1) > 1; + +sql_error select avg(f1),count(tb1.*),sum(f1),stddev(f1),min(f1),max(f1),first(f1),last(f1),top(f1,1),bottom(f1,1) from tb1 group by f1 having sum(f1) > 1; + +sql_error select PERCENTILE(f1) from tb1 group by f1 having sum(f1) > 1; + +sql_error select PERCENTILE(f1,20) from tb1 group by f1 having sum(f1) > 1; + +sql select aPERCENTILE(f1,20) from tb1 group by f1 having sum(f1) > 1; +if $rows != 4 then + return -1 +endi +if $data00 != 1.000000000 then + return -1 +endi +if $data10 != 2.000000000 then + return -1 +endi +if $data20 != 3.000000000 then + return -1 +endi +if $data30 != 4.000000000 then + return -1 +endi + +sql select aPERCENTILE(f1,20) from tb1 group by f1 having apercentile(f1,1) > 1; +if $rows != 3 then + return -1 +endi +if $data00 != 2.000000000 then + return -1 +endi +if $data10 != 3.000000000 then + return -1 +endi +if $data20 != 4.000000000 then + return -1 +endi + +sql select aPERCENTILE(f1,20) from tb1 group by f1 having apercentile(f1,1) > 1 and apercentile(f1,1) < 50; +if $rows != 3 then + return -1 +endi +if $data00 != 2.000000000 then + return -1 +endi +if $data10 != 3.000000000 then + return -1 +endi +if $data20 != 4.000000000 then + return -1 +endi + +sql select aPERCENTILE(f1,20) from tb1 group by f1 having apercentile(f1,1) > 1 and apercentile(f1,1) < 3; +if $rows != 1 then + return -1 +endi +if $data00 != 2.000000000 then + return -1 +endi + +sql select aPERCENTILE(f1,20) from tb1 group by f1 having apercentile(f1,1) > 1 and apercentile(f1,3) < 3; +if $rows != 1 then + return -1 +endi +if $data00 != 2.000000000 then + return -1 +endi + +sql_error select aPERCENTILE(f1,20) from tb1 group by f1 having apercentile(1) > 1; + +sql_error select aPERCENTILE(f1,20),LAST_ROW(f1) from tb1 group by f1 having apercentile(1) > 1; + +sql_error select aPERCENTILE(f1,20),LAST_ROW(f1) from tb1 group by f1 having apercentile(f1,1) > 1; + +sql_error select sum(f1) from tb1 group by f1 having last_row(f1) > 1; + +sql_error select avg(f1) from tb1 group by f1 having diff(f1) > 0; + +sql_error select avg(f1),diff(f1) from tb1 group by f1 having avg(f1) > 0; + +sql_error select avg(f1),diff(f1) from tb1 group by f1 having spread(f2) > 0; + +sql select avg(f1) from tb1 group by f1 having spread(f2) > 0; +if $rows != 0 then + return -1 +endi + +sql select avg(f1) from tb1 group by f1 having spread(f2) = 0; +if $rows != 4 then + return -1 +endi +if $data00 != 1.000000000 then + return -1 +endi +if $data10 != 2.000000000 then + return -1 +endi +if $data20 != 3.000000000 then + return -1 +endi +if $data30 != 4.000000000 then + return -1 +endi + +sql select avg(f1),spread(f2) from tb1 group by f1; +if $rows != 4 then + return -1 +endi +if $data00 != 1.000000000 then + return -1 +endi +if $data01 != 0.000000000 then + return -1 +endi +if $data10 != 2.000000000 then + return -1 +endi +if $data11 != 0.000000000 then + return -1 +endi +if $data20 != 3.000000000 then + return -1 +endi +if $data21 != 0.000000000 then + return -1 +endi +if $data30 != 4.000000000 then + return -1 +endi +if $data31 != 0.000000000 then + return -1 +endi + +sql select avg(f1),spread(f1,f2,tb1.f1) from tb1 group by f1 having spread(f1) = 0; +if $rows != 4 then + return -1 +endi +if $data00 != 1.000000000 then + return -1 +endi +if $data01 != 0.000000000 then + return -1 +endi +if $data02 != 0.000000000 then + return -1 +endi +if $data03 != 0.000000000 then + return -1 +endi +if $data10 != 2.000000000 then + return -1 +endi +if $data11 != 0.000000000 then + return -1 +endi +if $data12 != 0.000000000 then + return -1 +endi +if $data13 != 0.000000000 then + return -1 +endi +if $data20 != 3.000000000 then + return -1 +endi +if $data21 != 0.000000000 then + return -1 +endi +if $data22 != 0.000000000 then + return -1 +endi +if $data23 != 0.000000000 then + return -1 +endi +if $data30 != 4.000000000 then + return -1 +endi +if $data31 != 0.000000000 then + return -1 +endi +if $data32 != 0.000000000 then + return -1 +endi +if $data33 != 0.000000000 then + return -1 +endi + +sql select avg(f1),spread(f1,f2,tb1.f1) from tb1 group by f1 having spread(f1) != 0; +if $rows != 0 then + return -1 +endi + + +sql_error select avg(f1),spread(f1,f2,tb1.f1) from tb1 group by f1 having spread(f1) + 1 > 0; + +sql_error select avg(f1),spread(f1,f2,tb1.f1) from tb1 group by f1 having spread(f1) + 1; + +sql_error select avg(f1),spread(f1,f2,tb1.f1) from tb1 group by f1 having spread(f1) + sum(f1); + +sql_error select avg(f1),spread(f1,f2,tb1.f1) from tb1 group by f1 having spread(f1) + sum(f1) > 0; + +sql_error select avg(f1),spread(f1,f2,tb1.f1) from tb1 group by f1 having spread(f1) - sum(f1) > 0; + +sql_error select avg(f1),spread(f1,f2,tb1.f1) from tb1 group by f1 having spread(f1) * sum(f1) > 0; + +sql_error select avg(f1),spread(f1,f2,tb1.f1) from tb1 group by f1 having spread(f1) / sum(f1) > 0; + +sql_error select avg(f1),spread(f1,f2,tb1.f1) from tb1 group by f1 having spread(f1) > sum(f1); + +sql_error select avg(f1),spread(f1,f2,tb1.f1) from tb1 group by f1 having spread(f1) and sum(f1); + +sql_error select avg(f1),spread(f1,f2,tb1.f1) from tb1 group by f1 having spread(f1) 0 and sum(f1); + +sql_error select avg(f1),spread(f1,f2,tb1.f1) from tb1 group by f1 having spread(f1) + 0 and sum(f1); + +sql_error select avg(f1),spread(f1,f2,tb1.f1) from tb1 group by f1 having spread(f1) - f1 and sum(f1); + +sql_error select avg(f1),spread(f1,f2,tb1.f1) from tb1 group by f1 having spread(f1) - id1 and sum(f1); + +sql_error select avg(f1),spread(f1,f2,tb1.f1) from tb1 group by f1 having spread(f1) > id1 and sum(f1); + +sql_error select avg(f1),spread(f1,f2,tb1.f1) from tb1 group by f1 having spread(f1) > id1 and sum(f1) > 1; + +sql select avg(f1),spread(f1,f2,tb1.f1) from tb1 group by f1 having spread(f1) > 2 and sum(f1) > 1; +if $rows != 0 then + return -1 +endi + +sql select avg(f1),spread(f1,f2,tb1.f1) from tb1 group by f1 having spread(f1) = 0 and sum(f1) > 1; +if $rows != 4 then + return -1 +endi +if $data00 != 1.000000000 then + return -1 +endi +if $data01 != 0.000000000 then + return -1 +endi +if $data02 != 0.000000000 then + return -1 +endi +if $data03 != 0.000000000 then + return -1 +endi +if $data10 != 2.000000000 then + return -1 +endi +if $data11 != 0.000000000 then + return -1 +endi +if $data12 != 0.000000000 then + return -1 +endi +if $data13 != 0.000000000 then + return -1 +endi +if $data20 != 3.000000000 then + return -1 +endi +if $data21 != 0.000000000 then + return -1 +endi +if $data22 != 0.000000000 then + return -1 +endi +if $data23 != 0.000000000 then + return -1 +endi +if $data30 != 4.000000000 then + return -1 +endi +if $data31 != 0.000000000 then + return -1 +endi +if $data32 != 0.000000000 then + return -1 +endi +if $data33 != 0.000000000 then + return -1 +endi + +sql select avg(f1),spread(f1,f2,tb1.f1) from tb1 group by f1 having spread(f1) = 0 and avg(f1) > 1; +if $rows != 3 then + return -1 +endi +if $data00 != 2.000000000 then + return -1 +endi +if $data01 != 0.000000000 then + return -1 +endi +if $data02 != 0.000000000 then + return -1 +endi +if $data03 != 0.000000000 then + return -1 +endi +if $data10 != 3.000000000 then + return -1 +endi +if $data11 != 0.000000000 then + return -1 +endi +if $data12 != 0.000000000 then + return -1 +endi +if $data13 != 0.000000000 then + return -1 +endi +if $data20 != 4.000000000 then + return -1 +endi +if $data21 != 0.000000000 then + return -1 +endi +if $data22 != 0.000000000 then + return -1 +endi +if $data23 != 0.000000000 then + return -1 +endi + +sql_error select avg(f1),spread(f1,f2,tb1.f1) from tb1 group by c1 having spread(f1) = 0 and avg(f1) > 1; + +sql_error select avg(f1),spread(f1,f2,tb1.f1) from tb1 group by id1 having avg(id1) > 0; + +sql_error select avg(f1),spread(f1,f2,tb1.f1) from tb1 group by id1 having avg(f1) > id1; + +sql_error select avg(f1),spread(f1,f2,tb1.f1),avg(id1) from tb1 group by id1 having avg(f1) > id1; + +sql select avg(f1),spread(f1,f2,tb1.f1) from tb1 group by id1 having avg(f1) > 0; +if $rows != 1 then + return -1 +endi +if $data00 != 2.500000000 then + return -1 +endi +if $data01 != 3.000000000 then + return -1 +endi +if $data02 != 3.000000000 then + return -1 +endi +if $data03 != 3.000000000 then + return -1 +endi +if $data04 != 1 then + return -1 +endi + +sql select avg(f1),spread(f1,f2,tb1.f1) from tb1 group by id1 having avg(f1) < 2; +if $rows != 0 then + return -1 +endi + +sql select avg(f1),spread(f1,f2,tb1.f1) from tb1 where f1 > 0 group by f1 having avg(f1) > 0; +if $rows != 4 then + return -1 +endi +if $data00 != 1.000000000 then + return -1 +endi +if $data01 != 0.000000000 then + return -1 +endi +if $data02 != 0.000000000 then + return -1 +endi +if $data03 != 0.000000000 then + return -1 +endi +if $data10 != 2.000000000 then + return -1 +endi +if $data11 != 0.000000000 then + return -1 +endi +if $data12 != 0.000000000 then + return -1 +endi +if $data13 != 0.000000000 then + return -1 +endi +if $data20 != 3.000000000 then + return -1 +endi +if $data21 != 0.000000000 then + return -1 +endi +if $data22 != 0.000000000 then + return -1 +endi +if $data23 != 0.000000000 then + return -1 +endi +if $data30 != 4.000000000 then + return -1 +endi +if $data31 != 0.000000000 then + return -1 +endi +if $data32 != 0.000000000 then + return -1 +endi +if $data33 != 0.000000000 then + return -1 +endi + +sql select avg(f1),spread(f1,f2,tb1.f1) from tb1 where f1 > 2 group by f1 having avg(f1) > 0; +if $rows != 2 then + return -1 +endi +if $data00 != 3.000000000 then + return -1 +endi +if $data01 != 0.000000000 then + return -1 +endi +if $data02 != 0.000000000 then + return -1 +endi +if $data03 != 0.000000000 then + return -1 +endi +if $data10 != 4.000000000 then + return -1 +endi +if $data11 != 0.000000000 then + return -1 +endi +if $data12 != 0.000000000 then + return -1 +endi +if $data13 != 0.000000000 then + return -1 +endi + +sql select avg(f1),spread(f1,f2,tb1.f1) from tb1 where f2 > 2 group by f1 having avg(f1) > 0; +if $rows != 2 then + return -1 +endi +if $data00 != 3.000000000 then + return -1 +endi +if $data01 != 0.000000000 then + return -1 +endi +if $data02 != 0.000000000 then + return -1 +endi +if $data03 != 0.000000000 then + return -1 +endi +if $data10 != 4.000000000 then + return -1 +endi +if $data11 != 0.000000000 then + return -1 +endi +if $data12 != 0.000000000 then + return -1 +endi +if $data13 != 0.000000000 then + return -1 +endi + +sql select avg(f1),spread(f1,f2,tb1.f1) from tb1 where f3 > 2 group by f1 having avg(f1) > 0; +if $rows != 2 then + return -1 +endi +if $data00 != 3.000000000 then + return -1 +endi +if $data01 != 0.000000000 then + return -1 +endi +if $data02 != 0.000000000 then + return -1 +endi +if $data03 != 0.000000000 then + return -1 +endi +if $data10 != 4.000000000 then + return -1 +endi +if $data11 != 0.000000000 then + return -1 +endi +if $data12 != 0.000000000 then + return -1 +endi +if $data13 != 0.000000000 then + return -1 +endi + +sql select avg(f1),spread(f1,f2,tb1.f1) from tb1 where f2 > 3 group by f1 having avg(f1) > 0; +if $rows != 1 then + return -1 +endi +if $data00 != 4.000000000 then + return -1 +endi +if $data01 != 0.000000000 then + return -1 +endi +if $data02 != 0.000000000 then + return -1 +endi +if $data03 != 0.000000000 then + return -1 +endi + +sql_error select avg(f1),spread(f1,f2,tb1.f1) from tb1 where f2 > 3 group by f1 having avg(ts) > 0; + +sql_error select avg(f1),spread(f1,f2,tb1.f1) from tb1 where f2 > 3 group by f1 having avg(f7) > 0; + +sql_error select avg(f1),spread(f1,f2,tb1.f1) from tb1 where f2 > 3 group by f1 having avg(f8) > 0; + +sql_error select avg(f1),spread(f1,f2,tb1.f1) from tb1 where f2 > 3 group by f1 having avg(f9) > 0; + +sql select avg(f1),spread(f1,f2,tb1.f1) from tb1 where f2 > 3 group by f1 having count(f9) > 0; +if $rows != 1 then + return -1 +endi +if $data00 != 4.000000000 then + return -1 +endi +if $data01 != 0.000000000 then + return -1 +endi +if $data02 != 0.000000000 then + return -1 +endi +if $data03 != 0.000000000 then + return -1 +endi + +sql_error select avg(f1),spread(f1,f2,tb1.f1) from tb1 where f2 > 3 group by f1 having last(f9) > 0; + +sql select avg(f1),spread(f1,f2,tb1.f1) from tb1 where f2 > 3 group by f1 having last(f2) > 0; +if $rows != 1 then + return -1 +endi +if $data00 != 4.000000000 then + return -1 +endi +if $data01 != 0.000000000 then + return -1 +endi +if $data02 != 0.000000000 then + return -1 +endi +if $data03 != 0.000000000 then + return -1 +endi + +sql select avg(f1),spread(f1,f2,tb1.f1) from tb1 where f2 > 3 group by f1 having last(f3) > 0; +if $rows != 1 then + return -1 +endi +if $data00 != 4.000000000 then + return -1 +endi +if $data01 != 0.000000000 then + return -1 +endi +if $data02 != 0.000000000 then + return -1 +endi +if $data03 != 0.000000000 then + return -1 +endi + +sql select avg(f1),spread(f1,f2,tb1.f1) from tb1 where f2 > 1 group by f1 having last(f3) > 0; +if $rows != 3 then + return -1 +endi +if $data00 != 2.000000000 then + return -1 +endi +if $data01 != 0.000000000 then + return -1 +endi +if $data02 != 0.000000000 then + return -1 +endi +if $data03 != 0.000000000 then + return -1 +endi +if $data10 != 3.000000000 then + return -1 +endi +if $data11 != 0.000000000 then + return -1 +endi +if $data12 != 0.000000000 then + return -1 +endi +if $data13 != 0.000000000 then + return -1 +endi +if $data20 != 4.000000000 then + return -1 +endi +if $data21 != 0.000000000 then + return -1 +endi +if $data22 != 0.000000000 then + return -1 +endi +if $data23 != 0.000000000 then + return -1 +endi + +sql select avg(f1),spread(f1,f2,tb1.f1) from tb1 where f2 > 1 group by f1 having last(f4) > 0; +if $rows != 3 then + return -1 +endi +if $data00 != 2.000000000 then + return -1 +endi +if $data01 != 0.000000000 then + return -1 +endi +if $data02 != 0.000000000 then + return -1 +endi +if $data03 != 0.000000000 then + return -1 +endi +if $data10 != 3.000000000 then + return -1 +endi +if $data11 != 0.000000000 then + return -1 +endi +if $data12 != 0.000000000 then + return -1 +endi +if $data13 != 0.000000000 then + return -1 +endi +if $data20 != 4.000000000 then + return -1 +endi +if $data21 != 0.000000000 then + return -1 +endi +if $data22 != 0.000000000 then + return -1 +endi +if $data23 != 0.000000000 then + return -1 +endi + +sql select avg(f1),spread(f1,f2,tb1.f1) from tb1 where f2 > 1 group by f1 having last(f5) > 0; +if $rows != 3 then + return -1 +endi +if $data00 != 2.000000000 then + return -1 +endi +if $data01 != 0.000000000 then + return -1 +endi +if $data02 != 0.000000000 then + return -1 +endi +if $data03 != 0.000000000 then + return -1 +endi +if $data10 != 3.000000000 then + return -1 +endi +if $data11 != 0.000000000 then + return -1 +endi +if $data12 != 0.000000000 then + return -1 +endi +if $data13 != 0.000000000 then + return -1 +endi +if $data20 != 4.000000000 then + return -1 +endi +if $data21 != 0.000000000 then + return -1 +endi +if $data22 != 0.000000000 then + return -1 +endi +if $data23 != 0.000000000 then + return -1 +endi + +sql select avg(f1),spread(f1,f2,tb1.f1) from tb1 where f2 > 1 group by f1 having last(f6) > 0; +if $rows != 3 then + return -1 +endi +if $data00 != 2.000000000 then + return -1 +endi +if $data01 != 0.000000000 then + return -1 +endi +if $data02 != 0.000000000 then + return -1 +endi +if $data03 != 0.000000000 then + return -1 +endi +if $data10 != 3.000000000 then + return -1 +endi +if $data11 != 0.000000000 then + return -1 +endi +if $data12 != 0.000000000 then + return -1 +endi +if $data13 != 0.000000000 then + return -1 +endi +if $data20 != 4.000000000 then + return -1 +endi +if $data21 != 0.000000000 then + return -1 +endi +if $data22 != 0.000000000 then + return -1 +endi +if $data23 != 0.000000000 then + return -1 +endi + + +sql_error select avg(f1),spread(f1,f2,tb1.f1),f1,f2 from tb1 where f2 > 1 group by f1 having last(f6) > 0; + +sql_error select avg(f1),spread(f1,f2,tb1.f1),f1,f6 from tb1 where f2 > 1 group by f1 having last(f6) > 0; + +sql_error select avg(f1),spread(f1,f2,tb1.f1),f1,f6 from tb1 where f2 > 1 group by f1,f2 having last(f6) > 0; + +sql_error select avg(f1),spread(f1,f2,tb1.f1),f1,f6 from tb1 where f2 > 1 group by f1,id1 having last(f6) > 0; + +sql_error select avg(f1),spread(f1,f2,tb1.f1),f1,f6 from tb1 where f2 > 1 group by id1 having last(f6) > 0; + +sql select avg(f1),spread(f1,f2,tb1.f1) from tb1 where f2 > 1 group by id1 having last(f6) > 0; +if $rows != 1 then + return -1 +endi +if $data00 != 3.000000000 then + return -1 +endi +if $data01 != 2.000000000 then + return -1 +endi +if $data02 != 2.000000000 then + return -1 +endi +if $data03 != 2.000000000 then + return -1 +endi +if $data04 != 1 then + return -1 +endi + +sql_error select top(f1,2) from tb1 group by f1 having count(f1) > 0; + +system sh/exec.sh -n dnode1 -s stop -x SIGINT From 22d1853903e0dee27aa981cae2449893a0a8a661 Mon Sep 17 00:00:00 2001 From: dapan1121 <89396746@qq.com> Date: Wed, 31 Mar 2021 10:52:13 +0800 Subject: [PATCH 022/178] fix mem leak issue --- src/query/inc/qExecutor.h | 3 ++- src/query/src/qExecutor.c | 9 +++++---- src/query/src/qUtil.c | 13 ++++--------- 3 files changed, 11 insertions(+), 14 deletions(-) diff --git a/src/query/inc/qExecutor.h b/src/query/inc/qExecutor.h index 5ff574ec67..c62e78a3a9 100644 --- a/src/query/inc/qExecutor.h +++ b/src/query/inc/qExecutor.h @@ -86,7 +86,8 @@ typedef struct SResultRow { bool closed; // this result status: closed or opened uint32_t numOfRows; // number of rows of current time window SResultRowCellInfo* pCellInfo; // For each result column, there is a resultInfo - union {STimeWindow win; char* key;}; // start key of current result row + STimeWindow win; + char* key; // start key of current result row } SResultRow; typedef struct SGroupResInfo { diff --git a/src/query/src/qExecutor.c b/src/query/src/qExecutor.c index 7ba629cfc2..1f8baeb6d8 100644 --- a/src/query/src/qExecutor.c +++ b/src/query/src/qExecutor.c @@ -1870,14 +1870,15 @@ static void teardownQueryRuntimeEnv(SQueryRuntimeEnv *pRuntimeEnv) { taosHashCleanup(pRuntimeEnv->pResultRowHashTable); pRuntimeEnv->pResultRowHashTable = NULL; - pRuntimeEnv->pool = destroyResultRowPool(pRuntimeEnv->pool); - taosArrayDestroyEx(pRuntimeEnv->prevResult, freeInterResult); - pRuntimeEnv->prevResult = NULL; - taosHashCleanup(pRuntimeEnv->pTableRetrieveTsMap); pRuntimeEnv->pTableRetrieveTsMap = NULL; destroyOperatorInfo(pRuntimeEnv->proot); + + pRuntimeEnv->pool = destroyResultRowPool(pRuntimeEnv->pool); + taosArrayDestroyEx(pRuntimeEnv->prevResult, freeInterResult); + pRuntimeEnv->prevResult = NULL; + } static bool needBuildResAfterQueryComplete(SQInfo* pQInfo) { diff --git a/src/query/src/qUtil.c b/src/query/src/qUtil.c index da7dbcd501..3e125d73da 100644 --- a/src/query/src/qUtil.c +++ b/src/query/src/qUtil.c @@ -66,10 +66,8 @@ void cleanupResultRowInfo(SResultRowInfo *pResultRowInfo) { return; } - if (pResultRowInfo->type == TSDB_DATA_TYPE_BINARY || pResultRowInfo->type == TSDB_DATA_TYPE_NCHAR) { - for(int32_t i = 0; i < pResultRowInfo->size; ++i) { - tfree(pResultRowInfo->pResult[i]->key); - } + for(int32_t i = 0; i < pResultRowInfo->size; ++i) { + tfree(pResultRowInfo->pResult[i]->key); } tfree(pResultRowInfo->pResult); @@ -153,11 +151,8 @@ void clearResultRow(SQueryRuntimeEnv *pRuntimeEnv, SResultRow *pResultRow, int16 pResultRow->offset = -1; pResultRow->closed = false; - if (type == TSDB_DATA_TYPE_BINARY || type == TSDB_DATA_TYPE_NCHAR) { - tfree(pResultRow->key); - } else { - pResultRow->win = TSWINDOW_INITIALIZER; - } + tfree(pResultRow->key); + pResultRow->win = TSWINDOW_INITIALIZER; } // TODO refactor: use macro From d2a0fc5b931459022819c94c6942e72940df4f85 Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Tue, 30 Mar 2021 16:36:16 +0800 Subject: [PATCH 023/178] [TD-3314]: new staging directory for deleting vnodes --- src/dnode/src/dnodeMain.c | 12 +++ src/vnode/inc/vnodeBackup.h | 32 +++++++ src/vnode/src/vnodeBackup.c | 172 ++++++++++++++++++++++++++++++++++++ src/vnode/src/vnodeMain.c | 15 ++-- src/vnode/src/vnodeMgmt.c | 2 + 5 files changed, 224 insertions(+), 9 deletions(-) create mode 100644 src/vnode/inc/vnodeBackup.h create mode 100644 src/vnode/src/vnodeBackup.c diff --git a/src/dnode/src/dnodeMain.c b/src/dnode/src/dnodeMain.c index 2cd5c637e5..ea0ef4655d 100644 --- a/src/dnode/src/dnodeMain.c +++ b/src/dnode/src/dnodeMain.c @@ -237,6 +237,18 @@ static int32_t dnodeInitStorage() { return -1; } + TDIR *tdir = tfsOpendir("vnode_bak/.staging"); + if (tfsReaddir(tdir) != NULL) { + dError("vnode_bak/.staging dir not empty, fix it first."); + tfsClosedir(tdir); + return -1; + } + + if (tfsMkdir("vnode_bak/.staging") < 0) { + dError("failed to create vnode_bak/.staging dir since %s", tstrerror(terrno)); + return -1; + } + dnodeCheckDataDirOpenned(tsDnodeDir); dInfo("dnode storage is initialized at %s", tsDnodeDir); diff --git a/src/vnode/inc/vnodeBackup.h b/src/vnode/inc/vnodeBackup.h new file mode 100644 index 0000000000..0a6b26546c --- /dev/null +++ b/src/vnode/inc/vnodeBackup.h @@ -0,0 +1,32 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#ifndef TDENGINE_VNODE_BACKUP_H +#define TDENGINE_VNODE_BACKUP_H + +#ifdef __cplusplus +extern "C" { +#endif +#include "vnodeInt.h" + +int32_t vnodeInitBackup(); +void vnodeCleanupBackup(); +int32_t vnodeBackup(int32_t vgId); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/src/vnode/src/vnodeBackup.c b/src/vnode/src/vnodeBackup.c new file mode 100644 index 0000000000..a0a975be2b --- /dev/null +++ b/src/vnode/src/vnodeBackup.c @@ -0,0 +1,172 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#define _DEFAULT_SOURCE +#include "os.h" +#include "taoserror.h" +#include "taosmsg.h" +#include "tutil.h" +#include "tqueue.h" +#include "tglobal.h" +#include "tfs.h" +#include "vnodeBackup.h" +#include "vnodeMain.h" + +typedef struct { + int32_t vgId; +} SVBackupMsg; + +typedef struct { + pthread_t thread; + int32_t workerId; +} SVBackupWorker; + +typedef struct { + int32_t num; + SVBackupWorker *worker; +} SVBackupWorkerPool; + +static SVBackupWorkerPool tsVBackupPool; +static taos_qset tsVBackupQset; +static taos_queue tsVBackupQueue; + +static void vnodeProcessBackupMsg(SVBackupMsg *pMsg) { + int32_t vgId = pMsg->vgId; + char newDir[TSDB_FILENAME_LEN] = {0}; + char stagingDir[TSDB_FILENAME_LEN] = {0}; + + sprintf(newDir, "%s/vnode%d", "vnode_bak", vgId); + sprintf(stagingDir, "%s/.staging/vnode%d", "vnode_bak", vgId); + + if (tsEnableVnodeBak) { + tfsRmdir(newDir); + tfsRename(stagingDir, newDir); + } else { + vInfo("vgId:%d, vnode backup not enabled", vgId); + + tfsRmdir(stagingDir); + } +} + +static void *vnodeBackupFunc(void *param) { + while (1) { + SVBackupMsg *pMsg = NULL; + if (taosReadQitemFromQset(tsVBackupQset, NULL, (void **)&pMsg, NULL) == 0) { + vDebug("qset:%p, vbackup got no message from qset, exiting", tsVBackupQset); + break; + } + + vTrace("vgId:%d, will be processed in vbackup queue", pMsg->vgId); + vnodeProcessBackupMsg(pMsg); + + vTrace("vgId:%d, disposed in vbackup worker", pMsg->vgId); + taosFreeQitem(pMsg); + } + + return NULL; +} + +static int32_t vnodeStartBackup() { + tsVBackupQueue = taosOpenQueue(); + if (tsVBackupQueue == NULL) return TSDB_CODE_DND_OUT_OF_MEMORY; + + taosAddIntoQset(tsVBackupQset, tsVBackupQueue, NULL); + + for (int32_t i = 0; i < tsVBackupPool.num; ++i) { + SVBackupWorker *pWorker = tsVBackupPool.worker + i; + pWorker->workerId = i; + + pthread_attr_t thAttr; + pthread_attr_init(&thAttr); + pthread_attr_setdetachstate(&thAttr, PTHREAD_CREATE_JOINABLE); + + if (pthread_create(&pWorker->thread, &thAttr, vnodeBackupFunc, pWorker) != 0) { + vError("failed to create thread to process vbackup queue, reason:%s", strerror(errno)); + } + + pthread_attr_destroy(&thAttr); + + vDebug("vbackup:%d is launched, total:%d", pWorker->workerId, tsVBackupPool.num); + } + + vDebug("vbackup queue:%p is allocated", tsVBackupQueue); + + return TSDB_CODE_SUCCESS; +} + +static int32_t vnodeWriteIntoBackupWorker(int32_t vgId) { + SVBackupMsg *pMsg = taosAllocateQitem(sizeof(SVBackupMsg)); + if (pMsg == NULL) return TSDB_CODE_VND_OUT_OF_MEMORY; + + pMsg->vgId = vgId; + + int32_t code = taosWriteQitem(tsVBackupQueue, TAOS_QTYPE_RPC, pMsg); + if (code == 0) code = TSDB_CODE_DND_ACTION_IN_PROGRESS; + + return code; +} + +int32_t vnodeBackup(int32_t vgId) { + vTrace("vgId:%d, will backup", vgId); + return vnodeWriteIntoBackupWorker(vgId); +} + +int32_t vnodeInitBackup() { + tsVBackupQset = taosOpenQset(); + + tsVBackupPool.num = 1; + tsVBackupPool.worker = calloc(sizeof(SVBackupWorker), tsVBackupPool.num); + + if (tsVBackupPool.worker == NULL) return -1; + for (int32_t i = 0; i < tsVBackupPool.num; ++i) { + SVBackupWorker *pWorker = tsVBackupPool.worker + i; + pWorker->workerId = i; + vDebug("vbackup:%d is created", i); + } + + vDebug("vbackup is initialized, num:%d qset:%p", tsVBackupPool.num, tsVBackupQset); + + return vnodeStartBackup(); +} + +void vnodeCleanupBackup() { + for (int32_t i = 0; i < tsVBackupPool.num; ++i) { + SVBackupWorker *pWorker = tsVBackupPool.worker + i; + if (taosCheckPthreadValid(pWorker->thread)) { + taosQsetThreadResume(tsVBackupQset); + } + vDebug("vbackup:%d is closed", i); + } + + for (int32_t i = 0; i < tsVBackupPool.num; ++i) { + SVBackupWorker *pWorker = tsVBackupPool.worker + i; + vDebug("vbackup:%d start to join", i); + if (taosCheckPthreadValid(pWorker->thread)) { + pthread_join(pWorker->thread, NULL); + } + vDebug("vbackup:%d join success", i); + } + + vDebug("vbackup is closed, qset:%p", tsVBackupQset); + + taosCloseQset(tsVBackupQset); + tsVBackupQset = NULL; + + tfree(tsVBackupPool.worker); + + vDebug("vbackup queue:%p is freed", tsVBackupQueue); + taosCloseQueue(tsVBackupQueue); + tsVBackupQueue = NULL; +} diff --git a/src/vnode/src/vnodeMain.c b/src/vnode/src/vnodeMain.c index 441e951250..7a14cc7f5b 100644 --- a/src/vnode/src/vnodeMain.c +++ b/src/vnode/src/vnodeMain.c @@ -27,6 +27,7 @@ #include "vnodeVersion.h" #include "vnodeMgmt.h" #include "vnodeWorker.h" +#include "vnodeBackup.h" #include "vnodeMain.h" static int32_t vnodeProcessTsdbStatus(void *arg, int32_t status, int32_t eno); @@ -448,18 +449,14 @@ void vnodeDestroy(SVnodeObj *pVnode) { if (pVnode->dropped) { char rootDir[TSDB_FILENAME_LEN] = {0}; - char newDir[TSDB_FILENAME_LEN] = {0}; + char stagingDir[TSDB_FILENAME_LEN] = {0}; sprintf(rootDir, "%s/vnode%d", "vnode", vgId); - sprintf(newDir, "%s/vnode%d", "vnode_bak", vgId); + sprintf(stagingDir, "%s/.staging/vnode%d", "vnode_bak", vgId); - if (0 == tsEnableVnodeBak) { - vInfo("vgId:%d, vnode backup not enabled", pVnode->vgId); - } else { - tfsRmdir(newDir); - tfsRename(rootDir, newDir); - } + tfsRename(rootDir, stagingDir); + + vnodeBackup(vgId); - tfsRmdir(rootDir); dnodeSendStatusMsgToMnode(); } diff --git a/src/vnode/src/vnodeMgmt.c b/src/vnode/src/vnodeMgmt.c index 71d9bc07f5..32f9532138 100644 --- a/src/vnode/src/vnodeMgmt.c +++ b/src/vnode/src/vnodeMgmt.c @@ -17,6 +17,7 @@ #include "os.h" #include "dnode.h" #include "vnodeStatus.h" +#include "vnodeBackup.h" #include "vnodeWorker.h" #include "vnodeRead.h" #include "vnodeWrite.h" @@ -29,6 +30,7 @@ static void vnodeCleanupHash(void); static void vnodeIncRef(void *ptNode); static SStep tsVnodeSteps[] = { + {"vnode-backup", vnodeInitBackup, vnodeCleanupBackup}, {"vnode-worker", vnodeInitMWorker, vnodeCleanupMWorker}, {"vnode-write", vnodeInitWrite, vnodeCleanupWrite}, {"vnode-read", vnodeInitRead, vnodeCleanupRead}, From 61d7d9ab41b52e1327c40987facff6c1f547e91d Mon Sep 17 00:00:00 2001 From: nianhongdong Date: Wed, 31 Mar 2021 13:46:01 +0800 Subject: [PATCH 024/178] Update JDBCDemo.java MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit 71和76行测试示例的数据库和表名没有动态拼接,测试用在前面改数据库名和表名,会导致运行报错 --- .../JDBCDemo/src/main/java/com/taosdata/example/JDBCDemo.java | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/JDBCDemo.java b/tests/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/JDBCDemo.java index 07a21e79d1..da865b3ffd 100644 --- a/tests/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/JDBCDemo.java +++ b/tests/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/JDBCDemo.java @@ -68,12 +68,12 @@ public class JDBCDemo { } private void insert() { - final String sql = "insert into test.weather (ts, temperature, humidity) values(now, 20.5, 34)"; + final String sql = "insert into " + dbName + "." + tbName + " (ts, temperature, humidity) values(now, 20.5, 34)"; exuete(sql); } private void select() { - final String sql = "select * from test.weather"; + final String sql = "select * from "+ dbName + "." + tbName; executeQuery(sql); } From 536a9e9f3fcb7792ee3bc5943bf342d6d03a8cb5 Mon Sep 17 00:00:00 2001 From: slguan Date: Wed, 31 Mar 2021 16:55:31 +0800 Subject: [PATCH 025/178] remove FResetVersion function --- src/inc/tsync.h | 4 ---- src/sync/inc/syncInt.h | 1 - src/sync/src/syncMain.c | 1 - src/sync/src/syncRestore.c | 1 - src/vnode/inc/vnodeSync.h | 1 - src/vnode/src/vnodeMain.c | 1 - src/vnode/src/vnodeSync.c | 19 ++----------------- 7 files changed, 2 insertions(+), 26 deletions(-) diff --git a/src/inc/tsync.h b/src/inc/tsync.h index 99dfd3a6a3..d1b68e3f5a 100644 --- a/src/inc/tsync.h +++ b/src/inc/tsync.h @@ -79,9 +79,6 @@ typedef void (*FStopSyncFile)(int32_t vgId, uint64_t fversion); // get file version typedef int32_t (*FGetVersion)(int32_t vgId, uint64_t *fver, uint64_t *vver); -// reset version -typedef int32_t (*FResetVersion)(int32_t vgId, uint64_t fver); - typedef int32_t (*FSendFile)(void *tsdb, SOCKET socketFd); typedef int32_t (*FRecvFile)(void *tsdb, SOCKET socketFd); @@ -99,7 +96,6 @@ typedef struct { FStartSyncFile startSyncFileFp; FStopSyncFile stopSyncFileFp; FGetVersion getVersionFp; - FResetVersion resetVersionFp; FSendFile sendFileFp; FRecvFile recvFileFp; } SSyncInfo; diff --git a/src/sync/inc/syncInt.h b/src/sync/inc/syncInt.h index 2b87938474..91613ae351 100644 --- a/src/sync/inc/syncInt.h +++ b/src/sync/inc/syncInt.h @@ -117,7 +117,6 @@ typedef struct SSyncNode { FStartSyncFile startSyncFileFp; FStopSyncFile stopSyncFileFp; FGetVersion getVersionFp; - FResetVersion resetVersionFp; FSendFile sendFileFp; FRecvFile recvFileFp; pthread_mutex_t mutex; diff --git a/src/sync/src/syncMain.c b/src/sync/src/syncMain.c index 76d4379c5f..d21743d40a 100644 --- a/src/sync/src/syncMain.c +++ b/src/sync/src/syncMain.c @@ -182,7 +182,6 @@ int64_t syncStart(const SSyncInfo *pInfo) { pNode->startSyncFileFp = pInfo->startSyncFileFp; pNode->stopSyncFileFp = pInfo->stopSyncFileFp; pNode->getVersionFp = pInfo->getVersionFp; - pNode->resetVersionFp = pInfo->resetVersionFp; pNode->sendFileFp = pInfo->sendFileFp; pNode->recvFileFp = pInfo->recvFileFp; diff --git a/src/sync/src/syncRestore.c b/src/sync/src/syncRestore.c index 22d0a27581..c0d66316cd 100644 --- a/src/sync/src/syncRestore.c +++ b/src/sync/src/syncRestore.c @@ -238,7 +238,6 @@ static int32_t syncRestoreDataStepByStep(SSyncPeer *pPeer) { (*pNode->stopSyncFileFp)(pNode->vgId, fversion); nodeVersion = fversion; - if (pNode->resetVersionFp) (*pNode->resetVersionFp)(pNode->vgId, fversion); sInfo("%s, start to restore wal, fver:%" PRIu64, pPeer->id, nodeVersion); uint64_t wver = 0; diff --git a/src/vnode/inc/vnodeSync.h b/src/vnode/inc/vnodeSync.h index 75d7ffbabd..28fb63dd6a 100644 --- a/src/vnode/inc/vnodeSync.h +++ b/src/vnode/inc/vnodeSync.h @@ -30,7 +30,6 @@ void vnodeStopSyncFile(int32_t vgId, uint64_t fversion); void vnodeConfirmForard(int32_t vgId, void *wparam, int32_t code); int32_t vnodeWriteToCache(int32_t vgId, void *wparam, int32_t qtype, void *rparam); int32_t vnodeGetVersion(int32_t vgId, uint64_t *fver, uint64_t *wver); -int32_t vnodeResetVersion(int32_t vgId, uint64_t fver); void vnodeConfirmForward(void *pVnode, uint64_t version, int32_t code, bool force); diff --git a/src/vnode/src/vnodeMain.c b/src/vnode/src/vnodeMain.c index 441e951250..96a9decf4b 100644 --- a/src/vnode/src/vnodeMain.c +++ b/src/vnode/src/vnodeMain.c @@ -364,7 +364,6 @@ int32_t vnodeOpen(int32_t vgId) { syncInfo.startSyncFileFp = vnodeStartSyncFile; syncInfo.stopSyncFileFp = vnodeStopSyncFile; syncInfo.getVersionFp = vnodeGetVersion; - syncInfo.resetVersionFp = vnodeResetVersion; syncInfo.sendFileFp = tsdbSyncSend; syncInfo.recvFileFp = tsdbSyncRecv; syncInfo.pTsdb = pVnode->tsdb; diff --git a/src/vnode/src/vnodeSync.c b/src/vnode/src/vnodeSync.c index 929dd15926..aa4cf0fc15 100644 --- a/src/vnode/src/vnodeSync.c +++ b/src/vnode/src/vnodeSync.c @@ -107,8 +107,9 @@ void vnodeStopSyncFile(int32_t vgId, uint64_t fversion) { pVnode->fversion = fversion; pVnode->version = fversion; vnodeSaveVersion(pVnode); + walResetVersion(pVnode->wal, fversion); - vDebug("vgId:%d, datafile is synced, fver:%" PRIu64 " vver:%" PRIu64, vgId, fversion, fversion); + vInfo("vgId:%d, datafile is synced, fver:%" PRIu64 " vver:%" PRIu64, vgId, fversion, fversion); vnodeSetReadyStatus(pVnode); vnodeRelease(pVnode); @@ -158,22 +159,6 @@ int32_t vnodeGetVersion(int32_t vgId, uint64_t *fver, uint64_t *wver) { return code; } -int32_t vnodeResetVersion(int32_t vgId, uint64_t fver) { - SVnodeObj *pVnode = vnodeAcquire(vgId); - if (pVnode == NULL) { - vError("vgId:%d, vnode not found while reset version", vgId); - return -1; - } - - pVnode->fversion = fver; - pVnode->version = fver; - walResetVersion(pVnode->wal, fver); - vInfo("vgId:%d, version reset to %" PRIu64, vgId, fver); - - vnodeRelease(pVnode); - return 0; -} - void vnodeConfirmForward(void *vparam, uint64_t version, int32_t code, bool force) { SVnodeObj *pVnode = vparam; syncConfirmForward(pVnode->sync, version, code, force); From 0bbfc5a532d94ed49a35f2164ee84c9cd7aeeaa1 Mon Sep 17 00:00:00 2001 From: slguan Date: Wed, 31 Mar 2021 18:32:56 +0800 Subject: [PATCH 026/178] Bxiao request changed from 100 to 1000 --- src/inc/taosdef.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/inc/taosdef.h b/src/inc/taosdef.h index 252e37a5d9..024bc198df 100644 --- a/src/inc/taosdef.h +++ b/src/inc/taosdef.h @@ -259,7 +259,7 @@ do { \ #define TSDB_MIN_TABLES 4 #define TSDB_MAX_TABLES 10000000 #define TSDB_DEFAULT_TABLES 1000000 -#define TSDB_TABLES_STEP 100 +#define TSDB_TABLES_STEP 1000 #define TSDB_MIN_DAYS_PER_FILE 1 #define TSDB_MAX_DAYS_PER_FILE 3650 From 6cb5af8072ae1d4a196c7d666d459654146aa8e7 Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Wed, 31 Mar 2021 21:40:40 +0800 Subject: [PATCH 027/178] Hotfix/sangshuduo/td 3607 taosdemo buffer overflow (#5636) * [TD-3607] : fix taosdemo buffer overflow. * [TD-3607] : taosdemo buffer overflow. add tmp buffer. * [TD-3607] : taosdemo buffer overflow. fix data generation. * [TD-3607] : taosdemo buffer overflow. fix normal table writting. * [TD-3607] : taosdemo buffer overflow. remove tail spaces. * [TD-3607] : taosdemo buffer overflow. fix taosdemo alter table test case. * [TD-3607] : taosdemo buffer overflow. fix taosdemo alter table case. Co-authored-by: Shuduo Sang --- src/kit/taosdemo/taosdemo.c | 240 ++++++++++-------- tests/pytest/fulltest.sh | 24 +- ...taosdemoTest2.py => taosdemoTestTblAlt.py} | 50 +++- 3 files changed, 185 insertions(+), 129 deletions(-) rename tests/pytest/tools/{taosdemoTest2.py => taosdemoTestTblAlt.py} (63%) diff --git a/src/kit/taosdemo/taosdemo.c b/src/kit/taosdemo/taosdemo.c index 3725712339..9f367b41f8 100644 --- a/src/kit/taosdemo/taosdemo.c +++ b/src/kit/taosdemo/taosdemo.c @@ -124,7 +124,7 @@ typedef enum enum_INSERT_MODE { typedef enum enumQUERY_TYPE { NO_INSERT_TYPE, - INSERT_TYPE, + INSERT_TYPE, QUERY_TYPE_BUT } QUERY_TYPE; @@ -229,7 +229,7 @@ typedef struct SColumn_S { typedef struct SSuperTable_S { char sTblName[MAX_TB_NAME_SIZE+1]; int childTblCount; - bool childTblExists; // 0: no, 1: yes + bool childTblExists; // 0: no, 1: yes int batchCreateTableNum; // 0: no batch, > 0: batch table number in one sql int8_t autoCreateTable; // 0: create sub table, 1: auto create sub table char childTblPrefix[MAX_TB_NAME_SIZE]; @@ -239,15 +239,15 @@ typedef struct SSuperTable_S { int childTblOffset; int multiThreadWriteOneTbl; // 0: no, 1: yes - int interlaceRows; // + int interlaceRows; // int disorderRatio; // 0: no disorder, >0: x% int disorderRange; // ms or us by database precision - int maxSqlLen; // + int maxSqlLen; // int insertInterval; // insert interval, will override global insert interval int64_t insertRows; // 0: no limit int timeStampStep; - char startTimestamp[MAX_TB_NAME_SIZE]; // + char startTimestamp[MAX_TB_NAME_SIZE]; // char sampleFormat[MAX_TB_NAME_SIZE]; // csv, json char sampleFile[MAX_FILE_NAME_LEN+1]; char tagsFile[MAX_FILE_NAME_LEN+1]; @@ -539,7 +539,7 @@ SArguments g_args = { true, // insert_only false, // debug_print false, // verbose_print - false, // performance statistic print + false, // performance statistic print false, // answer_yes; "./output.txt", // output_file 0, // mode : sync or async @@ -641,7 +641,7 @@ static void printHelp() { "The password to use when connecting to the server. Default is 'taosdata'."); printf("%s%s%s%s\n", indent, "-c", indent, "Configuration directory. Default is '/etc/taos/'."); -#endif +#endif printf("%s%s%s%s\n", indent, "-h", indent, "The host to connect to TDengine. Default is localhost."); printf("%s%s%s%s\n", indent, "-p", indent, @@ -684,7 +684,7 @@ static void printHelp() { "Print debug info."); printf("%s%s%s%s\n", indent, "-V, --version", indent, "Print version info."); -/* printf("%s%s%s%s\n", indent, "-D", indent, +/* printf("%s%s%s%s\n", indent, "-D", indent, "if elete database if exists. 0: no, 1: yes, default is 1"); */ } @@ -749,7 +749,7 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) { && strcasecmp(argv[i], "SMALLINT") && strcasecmp(argv[i], "BIGINT") && strcasecmp(argv[i], "DOUBLE") - && strcasecmp(argv[i], "BINARY") + && strcasecmp(argv[i], "BINARY") && strcasecmp(argv[i], "NCHAR")) { printHelp(); ERROR_EXIT( "Invalid data_type!\n"); @@ -1762,7 +1762,7 @@ static void printfQuerySystemInfo(TAOS * taos) { } for (int i = 0; i < dbCount; i++) { - // printf database info + // printf database info printfDbInfoForQueryToFile(filename, dbInfos[i], i); // show db.vgroups @@ -2098,7 +2098,7 @@ static int calcRowLen(SSuperTable* superTbls) { lenOfTagOfOneRow += superTbls->tags[tagIndex].dataLen + 6; } else if (strcasecmp(dataType, "FLOAT") == 0) { lenOfTagOfOneRow += superTbls->tags[tagIndex].dataLen + 22; - } else if (strcasecmp(dataType, "DOUBLE") == 0) { + } else if (strcasecmp(dataType, "DOUBLE") == 0) { lenOfTagOfOneRow += superTbls->tags[tagIndex].dataLen + 42; } else { printf("get error tag type : %s\n", dataType); @@ -2262,7 +2262,7 @@ static int getSuperTableFromServer(TAOS * taos, char* dbName, /* if (TBL_ALREADY_EXISTS == superTbls->childTblExists) { - //get all child table name use cmd: select tbname from superTblName; + //get all child table name use cmd: select tbname from superTblName; int childTblCount = 10000; superTbls->childTblName = (char*)calloc(1, childTblCount * TSDB_TABLE_NAME_LEN); if (superTbls->childTblName == NULL) { @@ -2289,7 +2289,7 @@ static int createSuperTable(TAOS * taos, char* dbName, int lenOfOneRow = 0; for (colIndex = 0; colIndex < superTbls->columnCount; colIndex++) { char* dataType = superTbls->columns[colIndex].dataType; - + if (strcasecmp(dataType, "BINARY") == 0) { len += snprintf(cols + len, STRING_LEN - len, ", col%d %s(%d)", colIndex, "BINARY", @@ -2386,7 +2386,7 @@ static int createSuperTable(TAOS * taos, char* dbName, len += snprintf(tags + len, STRING_LEN - len, "t%d %s, ", tagIndex, "FLOAT"); lenOfTagOfOneRow += superTbls->tags[tagIndex].dataLen + 22; - } else if (strcasecmp(dataType, "DOUBLE") == 0) { + } else if (strcasecmp(dataType, "DOUBLE") == 0) { len += snprintf(tags + len, STRING_LEN - len, "t%d %s, ", tagIndex, "DOUBLE"); lenOfTagOfOneRow += superTbls->tags[tagIndex].dataLen + 42; @@ -2638,7 +2638,7 @@ static void* createTable(void *sarg) lastPrintTime = currentPrintTime; } } - + if (0 != len) { verbosePrint("%s() %d buffer: %s\n", __func__, __LINE__, buffer); if (0 != queryDbExec(winfo->taos, buffer, NO_INSERT_TYPE, false)) { @@ -2703,7 +2703,7 @@ static int startMultiThreadCreateChildTable( t_info->minDelay = INT16_MAX; pthread_create(pids + i, NULL, createTable, t_info); } - + for (int i = 0; i < threads; i++) { pthread_join(pids[i], NULL); } @@ -2920,7 +2920,7 @@ static bool getColumnAndTagTypeFromInsertJsonFile( cJSON* stbInfo, SSuperTable* superTbls) { bool ret = false; - // columns + // columns cJSON *columns = cJSON_GetObjectItem(stbInfo, "columns"); if (columns && columns->type != cJSON_Array) { printf("ERROR: failed to read json, columns not found\n"); @@ -2958,7 +2958,7 @@ static bool getColumnAndTagTypeFromInsertJsonFile( count = 1; } - // column info + // column info memset(&columnCase, 0, sizeof(StrColumn)); cJSON *dataType = cJSON_GetObjectItem(column, "type"); if (!dataType || dataType->type != cJSON_String || dataType->valuestring == NULL) { @@ -2989,7 +2989,7 @@ static bool getColumnAndTagTypeFromInsertJsonFile( count = 1; index = 0; - // tags + // tags cJSON *tags = cJSON_GetObjectItem(stbInfo, "tags"); if (!tags || tags->type != cJSON_Array) { debugPrint("%s() LN%d, failed to read json, tags not found\n", __func__, __LINE__); @@ -3018,7 +3018,7 @@ static bool getColumnAndTagTypeFromInsertJsonFile( count = 1; } - // column info + // column info memset(&columnCase, 0, sizeof(StrColumn)); cJSON *dataType = cJSON_GetObjectItem(tag, "type"); if (!dataType || dataType->type != cJSON_String || dataType->valuestring == NULL) { @@ -3166,7 +3166,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { if (numRecPerReq && numRecPerReq->type == cJSON_Number) { g_args.num_of_RPR = numRecPerReq->valueint; } else if (!numRecPerReq) { - g_args.num_of_RPR = 100; + g_args.num_of_RPR = 0xffff; } else { errorPrint("%s() LN%d, failed to read json, num_of_records_per_req not found\n", __func__, __LINE__); goto PARSE_OVER; @@ -3209,7 +3209,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { cJSON* dbinfos = cJSON_GetArrayItem(dbs, i); if (dbinfos == NULL) continue; - // dbinfo + // dbinfo cJSON *dbinfo = cJSON_GetObjectItem(dbinfos, "dbinfo"); if (!dbinfo || dbinfo->type != cJSON_Object) { printf("ERROR: failed to read json, dbinfo not found\n"); @@ -3615,7 +3615,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { } g_Dbs.db[i].superTbls[j].maxSqlLen = len; } else if (!maxSqlLen) { - g_Dbs.db[i].superTbls[j].maxSqlLen = TSDB_MAX_SQL_LEN; + g_Dbs.db[i].superTbls[j].maxSqlLen = g_args.max_sql_len; } else { printf("ERROR: failed to read json, maxSqlLen not found\n"); goto PARSE_OVER; @@ -3748,7 +3748,7 @@ static bool getMetaFromQueryJsonFile(cJSON* root) { cJSON* user = cJSON_GetObjectItem(root, "user"); if (user && user->type == cJSON_String && user->valuestring != NULL) { - tstrncpy(g_queryInfo.user, user->valuestring, MAX_USERNAME_SIZE); + tstrncpy(g_queryInfo.user, user->valuestring, MAX_USERNAME_SIZE); } else if (!user) { tstrncpy(g_queryInfo.user, "root", MAX_USERNAME_SIZE); ; } @@ -3805,7 +3805,7 @@ static bool getMetaFromQueryJsonFile(cJSON* root) { goto PARSE_OVER; } - // super_table_query + // super_table_query cJSON *specifiedQuery = cJSON_GetObjectItem(root, "specified_table_query"); if (!specifiedQuery) { g_queryInfo.specifiedQueryInfo.concurrent = 0; @@ -3930,7 +3930,7 @@ static bool getMetaFromQueryJsonFile(cJSON* root) { } } - // sub_table_query + // sub_table_query cJSON *superQuery = cJSON_GetObjectItem(root, "super_table_query"); if (!superQuery) { g_queryInfo.superQueryInfo.threadCnt = 0; @@ -3996,7 +3996,7 @@ static bool getMetaFromQueryJsonFile(cJSON* root) { cJSON* subinterval = cJSON_GetObjectItem(superQuery, "interval"); if (subinterval && subinterval->type == cJSON_Number) { g_queryInfo.superQueryInfo.subscribeInterval = subinterval->valueint; - } else if (!subinterval) { + } else if (!subinterval) { //printf("failed to read json, subscribe interval no found\n"); //goto PARSE_OVER; g_queryInfo.superQueryInfo.subscribeInterval = 10000; @@ -4200,71 +4200,77 @@ static int getRowDataFromSample(char* dataBuf, int maxLen, int64_t timestamp, dataLen += snprintf(dataBuf + dataLen, maxLen - dataLen, ")"); (*sampleUsePos)++; - + return dataLen; } -static int generateRowData(char* dataBuf, int maxLen, int64_t timestamp, SSuperTable* stbInfo) { - int dataLen = 0; - dataLen += snprintf(dataBuf + dataLen, maxLen - dataLen, "(%" PRId64 ", ", timestamp); +static int generateRowData(char* recBuf, int64_t timestamp, SSuperTable* stbInfo) { + int dataLen = 0; + char *pstr = recBuf; + int maxLen = MAX_DATA_SIZE; + + dataLen += snprintf(pstr + dataLen, maxLen - dataLen, "(%" PRId64 ", ", timestamp); + for (int i = 0; i < stbInfo->columnCount; i++) { if ((0 == strncasecmp(stbInfo->columns[i].dataType, "binary", 6)) || (0 == strncasecmp(stbInfo->columns[i].dataType, "nchar", 5))) { if (stbInfo->columns[i].dataLen > TSDB_MAX_BINARY_LEN) { errorPrint( "binary or nchar length overflow, max size:%u\n", (uint32_t)TSDB_MAX_BINARY_LEN); - return (-1); + return -1; } char* buf = (char*)calloc(stbInfo->columns[i].dataLen+1, 1); if (NULL == buf) { errorPrint( "calloc failed! size:%d\n", stbInfo->columns[i].dataLen); - return (-1); + return -1; } rand_string(buf, stbInfo->columns[i].dataLen); - dataLen += snprintf(dataBuf + dataLen, maxLen - dataLen, "\'%s\', ", buf); + dataLen += snprintf(pstr + dataLen, maxLen - dataLen, "\'%s\', ", buf); tmfree(buf); } else if (0 == strncasecmp(stbInfo->columns[i].dataType, "int", 3)) { - dataLen += snprintf(dataBuf + dataLen, maxLen - dataLen, + dataLen += snprintf(pstr + dataLen, maxLen - dataLen, "%d, ", rand_int()); } else if (0 == strncasecmp(stbInfo->columns[i].dataType, "bigint", 6)) { - dataLen += snprintf(dataBuf + dataLen, maxLen - dataLen, + dataLen += snprintf(pstr + dataLen, maxLen - dataLen, "%"PRId64", ", rand_bigint()); } else if (0 == strncasecmp(stbInfo->columns[i].dataType, "float", 5)) { - dataLen += snprintf(dataBuf + dataLen, maxLen - dataLen, + dataLen += snprintf(pstr + dataLen, maxLen - dataLen, "%f, ", rand_float()); } else if (0 == strncasecmp(stbInfo->columns[i].dataType, "double", 6)) { - dataLen += snprintf(dataBuf + dataLen, maxLen - dataLen, + dataLen += snprintf(pstr + dataLen, maxLen - dataLen, "%f, ", rand_double()); } else if (0 == strncasecmp(stbInfo->columns[i].dataType, "smallint", 8)) { - dataLen += snprintf(dataBuf + dataLen, maxLen - dataLen, "%d, ", rand_smallint()); + dataLen += snprintf(pstr + dataLen, maxLen - dataLen, "%d, ", rand_smallint()); } else if (0 == strncasecmp(stbInfo->columns[i].dataType, "tinyint", 7)) { - dataLen += snprintf(dataBuf + dataLen, maxLen - dataLen, "%d, ", rand_tinyint()); + dataLen += snprintf(pstr + dataLen, maxLen - dataLen, "%d, ", rand_tinyint()); } else if (0 == strncasecmp(stbInfo->columns[i].dataType, "bool", 4)) { - dataLen += snprintf(dataBuf + dataLen, maxLen - dataLen, "%d, ", rand_bool()); + dataLen += snprintf(pstr + dataLen, maxLen - dataLen, "%d, ", rand_bool()); } else if (0 == strncasecmp(stbInfo->columns[i].dataType, "timestamp", 9)) { - dataLen += snprintf(dataBuf + dataLen, maxLen - dataLen, "%"PRId64", ", rand_bigint()); + dataLen += snprintf(pstr + dataLen, maxLen - dataLen, "%"PRId64", ", rand_bigint()); } else { errorPrint( "No support data type: %s\n", stbInfo->columns[i].dataType); - return (-1); + return -1; } } dataLen -= 2; - dataLen += snprintf(dataBuf + dataLen, maxLen - dataLen, ")"); + dataLen += snprintf(pstr + dataLen, maxLen - dataLen, ")"); - return dataLen; + verbosePrint("%s() LN%d, recBuf:\n\t%s\n", __func__, __LINE__, recBuf); + + return strlen(recBuf); } -static int32_t generateData(char *res, char **data_type, +static int32_t generateData(char *recBuf, char **data_type, int num_of_cols, int64_t timestamp, int lenOfBinary) { - memset(res, 0, MAX_DATA_SIZE); - char *pstr = res; + memset(recBuf, 0, MAX_DATA_SIZE); + char *pstr = recBuf; pstr += sprintf(pstr, "(%" PRId64, timestamp); int c = 0; @@ -4285,7 +4291,7 @@ static int32_t generateData(char *res, char **data_type, } else if (strcasecmp(data_type[i % c], "smallint") == 0) { pstr += sprintf(pstr, ", %d", rand_smallint()); } else if (strcasecmp(data_type[i % c], "int") == 0) { - pstr += sprintf(pstr, ", %d", rand_int()); + pstr += sprintf(pstr, ", %d", rand_int()); } else if (strcasecmp(data_type[i % c], "bigint") == 0) { pstr += sprintf(pstr, ", %" PRId64, rand_bigint()); } else if (strcasecmp(data_type[i % c], "float") == 0) { @@ -4308,7 +4314,7 @@ static int32_t generateData(char *res, char **data_type, free(s); } - if (pstr - res > MAX_DATA_SIZE) { + if (strlen(recBuf) > MAX_DATA_SIZE) { perror("column length too long, abort"); exit(-1); } @@ -4316,7 +4322,7 @@ static int32_t generateData(char *res, char **data_type, pstr += sprintf(pstr, ")"); - return (int32_t)(pstr - res); + return (int32_t)strlen(recBuf); } static int prepareSampleDataForSTable(SSuperTable *superTblInfo) { @@ -4325,9 +4331,9 @@ static int prepareSampleDataForSTable(SSuperTable *superTblInfo) { sampleDataBuf = calloc( superTblInfo->lenOfOneRow * MAX_SAMPLES_ONCE_FROM_FILE, 1); if (sampleDataBuf == NULL) { - errorPrint("%s() LN%d, Failed to calloc %d Bytes, reason:%s\n", + errorPrint("%s() LN%d, Failed to calloc %d Bytes, reason:%s\n", __func__, __LINE__, - superTblInfo->lenOfOneRow * MAX_SAMPLES_ONCE_FROM_FILE, + superTblInfo->lenOfOneRow * MAX_SAMPLES_ONCE_FROM_FILE, strerror(errno)); return -1; } @@ -4396,7 +4402,7 @@ static void getTableName(char *pTblName, threadInfo* pThreadInfo, int tableSeq) static int generateDataTail(char *tableName, int32_t tableSeq, threadInfo* pThreadInfo, SSuperTable* superTblInfo, - int batch, char* buffer, int64_t insertRows, + int batch, char* buffer, int remainderBufLen, int64_t insertRows, int64_t startFrom, uint64_t startTime, int *pSamplePos, int *dataLen) { int len = 0; int ncols_per_record = 1; // count first col ts @@ -4413,18 +4419,19 @@ static int generateDataTail(char *tableName, int32_t tableSeq, int k = 0; for (k = 0; k < batch;) { - if (superTblInfo) { - int retLen = 0; + char data[MAX_DATA_SIZE]; + int retLen = 0; - if (0 == strncasecmp(superTblInfo->dataSource, + if (superTblInfo) { + if (0 == strncasecmp(superTblInfo->dataSource, "sample", strlen("sample"))) { retLen = getRowDataFromSample( - buffer + len, - superTblInfo->maxSqlLen - len, + data, + remainderBufLen, startTime + superTblInfo->timeStampStep * k, superTblInfo, pSamplePos); - } else if (0 == strncasecmp(superTblInfo->dataSource, + } else if (0 == strncasecmp(superTblInfo->dataSource, "rand", strlen("rand"))) { int rand_num = rand_tinyint() % 100; if (0 != superTblInfo->disorderRatio @@ -4433,60 +4440,56 @@ static int generateDataTail(char *tableName, int32_t tableSeq, + superTblInfo->timeStampStep * k - taosRandom() % superTblInfo->disorderRange; retLen = generateRowData( - buffer + len, - superTblInfo->maxSqlLen - len, + data, d, superTblInfo); } else { retLen = generateRowData( - buffer + len, - superTblInfo->maxSqlLen - len, + data, startTime + superTblInfo->timeStampStep * k, superTblInfo); - } - } + } + } - if (retLen < 0) { - return -1; - } + if (retLen > remainderBufLen) { + break; + } - len += retLen; - - if (len >= (superTblInfo->maxSqlLen - 256)) { // reserve for overwrite - k++; - break; - } + buffer += sprintf(buffer, " %s", data); + k++; + len += retLen; + remainderBufLen -= retLen; } else { int rand_num = taosRandom() % 100; - char data[MAX_DATA_SIZE]; - char **data_type = g_args.datatype; - int lenOfBinary = g_args.len_of_binary; + char **data_type = g_args.datatype; + int lenOfBinary = g_args.len_of_binary; if ((g_args.disorderRatio != 0) && (rand_num < g_args.disorderRange)) { - int64_t d = startTime + DEFAULT_TIMESTAMP_STEP * k + int64_t d = startTime + DEFAULT_TIMESTAMP_STEP * k - taosRandom() % 1000000 + rand_num; - len = generateData(data, data_type, + retLen = generateData(data, data_type, ncols_per_record, d, lenOfBinary); } else { - len = generateData(data, data_type, + retLen = generateData(data, data_type, ncols_per_record, startTime + DEFAULT_TIMESTAMP_STEP * k, lenOfBinary); } + if (len > remainderBufLen) + break; + buffer += sprintf(buffer, " %s", data); - if (strlen(buffer) >= (g_args.max_sql_len - 256)) { // too long - k++; - break; - } + k++; + len += retLen; + remainderBufLen -= retLen; } verbosePrint("%s() LN%d len=%d k=%d \nbuffer=%s\n", __func__, __LINE__, len, k, buffer); - k++; startFrom ++; if (startFrom >= insertRows) { @@ -4570,20 +4573,25 @@ static int generateProgressiveDataBuffer(char *pTblName, assert(buffer != NULL); - memset(buffer, 0, superTblInfo?superTblInfo->maxSqlLen:g_args.max_sql_len); + int maxSqlLen = superTblInfo?superTblInfo->maxSqlLen:g_args.max_sql_len; + int remainderBufLen = maxSqlLen; + + memset(buffer, 0, maxSqlLen); char *pstr = buffer; int headLen = generateSQLHead(pTblName, tableSeq, pThreadInfo, superTblInfo, buffer); pstr += headLen; + remainderBufLen -= headLen; int k; int dataLen; k = generateDataTail(pTblName, tableSeq, pThreadInfo, superTblInfo, - g_args.num_of_RPR, pstr, insertRows, startFrom, + g_args.num_of_RPR, pstr, remainderBufLen, insertRows, startFrom, startTime, pSamplePos, &dataLen); + return k; } @@ -4656,13 +4664,18 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) { int generatedRecPerTbl = 0; bool flagSleep = true; int sleepTimeTotal = 0; + + int maxSqlLen = superTblInfo?superTblInfo->maxSqlLen:g_args.max_sql_len; + int remainderBufLen; + while(pThreadInfo->totalInsertRows < pThreadInfo->ntables * insertRows) { if ((flagSleep) && (insert_interval)) { st = taosGetTimestampUs(); flagSleep = false; } // generate data - memset(buffer, 0, superTblInfo?superTblInfo->maxSqlLen:g_args.max_sql_len); + memset(buffer, 0, maxSqlLen); + remainderBufLen = maxSqlLen; char *pstr = buffer; int recOfBatch = 0; @@ -4685,6 +4698,8 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) { pThreadInfo->threadID, __func__, __LINE__, i, buffer); pstr += headLen; + remainderBufLen -= headLen; + int dataLen = 0; verbosePrint("[%d] %s() LN%d i=%d batchPerTblTimes=%d batchPerTbl = %d\n", @@ -4698,13 +4713,20 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) { } else { startTime = 1500000000000; } - generateDataTail( + int generated = generateDataTail( tableName, tableSeq, pThreadInfo, superTblInfo, - batchPerTbl, pstr, insertRows, 0, + batchPerTbl, pstr, remainderBufLen, insertRows, 0, startTime, &(pThreadInfo->samplePos), &dataLen); + if (generated < 0) { + debugPrint("[%d] %s() LN%d, generated data is %d\n", + pThreadInfo->threadID, __func__, __LINE__, generated); + goto free_and_statistics_interlace; + } pstr += dataLen; + remainderBufLen -= dataLen; + recOfBatch += batchPerTbl; startTime += batchPerTbl * superTblInfo->timeStampStep; pThreadInfo->totalInsertRows += batchPerTbl; @@ -4796,9 +4818,9 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) { free_and_statistics_interlace: tmfree(buffer); - printf("====thread[%d] completed total inserted rows: %"PRId64 ", total affected rows: %"PRId64 "====\n", - pThreadInfo->threadID, - pThreadInfo->totalInsertRows, + printf("====thread[%d] completed total inserted rows: %"PRId64 ", total affected rows: %"PRId64 "====\n", + pThreadInfo->threadID, + pThreadInfo->totalInsertRows, pThreadInfo->totalAffectedRows); return NULL; } @@ -4929,16 +4951,16 @@ static void* syncWriteProgressive(threadInfo *pThreadInfo) { free_and_statistics_2: tmfree(buffer); - printf("====thread[%d] completed total inserted rows: %"PRId64 ", total affected rows: %"PRId64 "====\n", - pThreadInfo->threadID, - pThreadInfo->totalInsertRows, + printf("====thread[%d] completed total inserted rows: %"PRId64 ", total affected rows: %"PRId64 "====\n", + pThreadInfo->threadID, + pThreadInfo->totalInsertRows, pThreadInfo->totalAffectedRows); return NULL; } static void* syncWrite(void *sarg) { - threadInfo *winfo = (threadInfo *)sarg; + threadInfo *winfo = (threadInfo *)sarg; SSuperTable* superTblInfo = winfo->superTblInfo; int interlaceRows = superTblInfo?superTblInfo->interlaceRows:g_args.interlace_rows; @@ -4953,7 +4975,7 @@ static void* syncWrite(void *sarg) { } static void callBack(void *param, TAOS_RES *res, int code) { - threadInfo* winfo = (threadInfo*)param; + threadInfo* winfo = (threadInfo*)param; SSuperTable* superTblInfo = winfo->superTblInfo; int insert_interval = @@ -4966,7 +4988,7 @@ static void callBack(void *param, TAOS_RES *res, int code) { } char *buffer = calloc(1, winfo->superTblInfo->maxSqlLen); - char *data = calloc(1, MAX_DATA_SIZE); + char data[MAX_DATA_SIZE]; char *pstr = buffer; pstr += sprintf(pstr, "insert into %s.%s%d values", winfo->db_name, winfo->tb_prefix, winfo->start_table_from); @@ -4978,7 +5000,6 @@ static void callBack(void *param, TAOS_RES *res, int code) { if (winfo->start_table_from > winfo->end_table_to) { tsem_post(&winfo->lock_sem); free(buffer); - free(data); taos_free_result(res); return; } @@ -4988,11 +5009,9 @@ static void callBack(void *param, TAOS_RES *res, int code) { if (0 != winfo->superTblInfo->disorderRatio && rand_num < winfo->superTblInfo->disorderRatio) { int64_t d = winfo->lastTs - taosRandom() % 1000000 + rand_num; - //generateData(data, datatype, ncols_per_record, d, len_of_binary); - generateRowData(data, MAX_DATA_SIZE, d, winfo->superTblInfo); + generateRowData(data, d, winfo->superTblInfo); } else { - //generateData(data, datatype, ncols_per_record, start_time += 1000, len_of_binary); - generateRowData(data, MAX_DATA_SIZE, winfo->lastTs += 1000, winfo->superTblInfo); + generateRowData(data, winfo->lastTs += 1000, winfo->superTblInfo); } pstr += sprintf(pstr, "%s", data); winfo->counter++; @@ -5007,7 +5026,6 @@ static void callBack(void *param, TAOS_RES *res, int code) { } taos_query_a(winfo->taos, buffer, callBack, winfo); free(buffer); - free(data); taos_free_result(res); } @@ -5373,7 +5391,7 @@ static void *readTable(void *sarg) { } static void *readMetric(void *sarg) { -#if 1 +#if 1 threadInfo *rinfo = (threadInfo *)sarg; TAOS *taos = rinfo->taos; char command[BUFFER_SIZE] = "\0"; @@ -5524,7 +5542,7 @@ static int insertTestProcess() { //int64_t totalInsertRows = 0; //int64_t totalAffectedRows = 0; - //for (int i = 0; i < g_Dbs.dbCount; i++) { + //for (int i = 0; i < g_Dbs.dbCount; i++) { // for (int j = 0; j < g_Dbs.db[i].superTblCount; j++) { // totalInsertRows+= g_Dbs.db[i].superTbls[j].totalInsertRows; // totalAffectedRows += g_Dbs.db[i].superTbls[j].totalAffectedRows; @@ -5921,7 +5939,7 @@ static void *subSubscribeProcess(void *sarg) { sprintf(tmpFile, "%s-%d", g_queryInfo.superQueryInfo.result[i], winfo->threadID); } - tsub[i] = subscribeImpl(winfo->taos, subSqlstr, topic, tmpFile); + tsub[i] = subscribeImpl(winfo->taos, subSqlstr, topic, tmpFile); if (NULL == tsub[i]) { taos_close(winfo->taos); return NULL; @@ -6109,7 +6127,7 @@ static int subscribeTestProcess() { && (g_queryInfo.superQueryInfo.threadCnt > 0)) { pidsOfSub = malloc(g_queryInfo.superQueryInfo.threadCnt * sizeof(pthread_t)); - infosOfSub = malloc(g_queryInfo.superQueryInfo.threadCnt * + infosOfSub = malloc(g_queryInfo.superQueryInfo.threadCnt * sizeof(threadInfo)); if ((NULL == pidsOfSub) || (NULL == infosOfSub)) { errorPrint("%s() LN%d, malloc failed for create threads\n", @@ -6256,7 +6274,7 @@ static void setParaFromArg(){ g_Dbs.db[0].superTbls[0].timeStampStep = DEFAULT_TIMESTAMP_STEP; g_Dbs.db[0].superTbls[0].insertRows = g_args.num_of_DPT; - g_Dbs.db[0].superTbls[0].maxSqlLen = TSDB_PAYLOAD_SIZE; + g_Dbs.db[0].superTbls[0].maxSqlLen = g_args.max_sql_len; g_Dbs.db[0].superTbls[0].columnCount = 0; for (int i = 0; i < MAX_NUM_DATATYPE; i++) { diff --git a/tests/pytest/fulltest.sh b/tests/pytest/fulltest.sh index c3282ce6f7..8e15d1b5ec 100755 --- a/tests/pytest/fulltest.sh +++ b/tests/pytest/fulltest.sh @@ -139,6 +139,18 @@ python3 ./test.py -f import_merge/importInsertThenImport.py python3 ./test.py -f import_merge/importCSV.py #======================p1-end=============== #======================p2-start=============== +# tools +python3 test.py -f tools/taosdumpTest.py + +python3 test.py -f tools/taosdemoTest.py +python3 test.py -f tools/taosdemoTestWithoutMetric.py +python3 test.py -f tools/taosdemoTestWithJson.py +python3 test.py -f tools/taosdemoTestLimitOffset.py +python3 test.py -f tools/taosdemoTestTblAlt.py +python3 test.py -f tools/taosdemoTestSampleData.py +python3 test.py -f tools/taosdemoTestInterlace.py +python3 test.py -f tools/taosdemoTestQuery.py + # update python3 ./test.py -f update/allow_update.py python3 ./test.py -f update/allow_update-0.py @@ -247,18 +259,6 @@ python3 test.py -f subscribe/supertable.py #======================p3-end=============== #======================p4-start=============== -# tools -python3 test.py -f tools/taosdumpTest.py - -python3 test.py -f tools/taosdemoTest.py -python3 test.py -f tools/taosdemoTestWithoutMetric.py -python3 test.py -f tools/taosdemoTestWithJson.py -python3 test.py -f tools/taosdemoTestLimitOffset.py -python3 test.py -f tools/taosdemoTest2.py -python3 test.py -f tools/taosdemoTestSampleData.py -python3 test.py -f tools/taosdemoTestInterlace.py -python3 test.py -f tools/taosdemoTestQuery.py - python3 ./test.py -f update/merge_commit_data-0.py # wal python3 ./test.py -f wal/addOldWalTest.py diff --git a/tests/pytest/tools/taosdemoTest2.py b/tests/pytest/tools/taosdemoTestTblAlt.py similarity index 63% rename from tests/pytest/tools/taosdemoTest2.py rename to tests/pytest/tools/taosdemoTestTblAlt.py index 74b05faf8b..bb367817cf 100644 --- a/tests/pytest/tools/taosdemoTest2.py +++ b/tests/pytest/tools/taosdemoTestTblAlt.py @@ -29,10 +29,33 @@ class TDTestCase: self.numberOfTables = 10 self.numberOfRecords = 1000000 + def getBuildPath(self): + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + for root, dirs, files in os.walk(projPath): + if ("taosd" in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + buildPath = root[:len(root) - len("/build/bin")] + break + return buildPath + def insertDataAndAlterTable(self, threadID): + buildPath = self.getBuildPath() + if (buildPath == ""): + tdLog.exit("taosd not found!") + else: + tdLog.info("taosd found in %s" % buildPath) + binPath = buildPath + "/build/bin/" + if(threadID == 0): - os.system("taosdemo -y -t %d -n %d" % - (self.numberOfTables, self.numberOfRecords)) + os.system("%staosdemo -y -t %d -n %d" % + (binPath, self.numberOfTables, self.numberOfRecords)) if(threadID == 1): time.sleep(2) print("use test") @@ -47,7 +70,13 @@ class TDTestCase: # check if all the tables have heen created while True: - tdSql.query("show tables") + try: + tdSql.query("show tables") + except Exception as e: + tdLog.info("show tables test failed") + time.sleep(1) + continue + rows = tdSql.queryRows print("number of tables: %d" % rows) if(rows == self.numberOfTables): @@ -56,16 +85,23 @@ class TDTestCase: # check if there are any records in the last created table while True: print("query started") - tdSql.query("select * from test.t9") + try: + tdSql.query("select * from test.t9") + except Exception as e: + tdLog.info("select * test failed") + time.sleep(2) + continue + rows = tdSql.queryRows print("number of records: %d" % rows) if(rows > 0): break time.sleep(1) + print("alter table test.meters add column col10 int") tdSql.execute("alter table test.meters add column col10 int") - print("insert into test.t0 values (now, 1, 2, 3, 4, 0.1, 0.01,'test', '测试', TRUE, 1610000000000, 0)") - tdSql.execute("insert into test.t0 values (now, 1, 2, 3, 4, 0.1, 0.01,'test', '测试', TRUE, 1610000000000, 0)") + print("insert into test.t9 values (now, 1, 2, 3, 4, 0.1, 0.01,'test', '测试', TRUE, 1610000000000, 0)") + tdSql.execute("insert into test.t9 values (now, 1, 2, 3, 4, 0.1, 0.01,'test', '测试', TRUE, 1610000000000, 0)") def run(self): tdSql.prepare() @@ -78,6 +114,8 @@ class TDTestCase: t1.join() t2.join() + time.sleep(3) + tdSql.query("select count(*) from test.meters") tdSql.checkData(0, 0, self.numberOfRecords * self.numberOfTables + 1) From 524b7080797a5cedb0a9f5572e31bb38e90350dd Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Thu, 1 Apr 2021 02:06:48 +0800 Subject: [PATCH 028/178] add connection timeout --- src/os/inc/osArm32.h | 1 + src/os/inc/osArm64.h | 1 + src/os/inc/osLinux32.h | 1 + src/os/inc/osLinux64.h | 1 + src/os/inc/osSocket.h | 2 ++ src/os/src/detail/osSocket.c | 3 +++ src/util/src/tsocket.c | 43 ++++++++++++++++++++++++++++++++++-- 7 files changed, 50 insertions(+), 2 deletions(-) diff --git a/src/os/inc/osArm32.h b/src/os/inc/osArm32.h index 24ff95522e..54835a1ca8 100644 --- a/src/os/inc/osArm32.h +++ b/src/os/inc/osArm32.h @@ -77,6 +77,7 @@ extern "C" { #include #include #include +#include #define TAOS_OS_FUNC_LZ4 #define BUILDIN_CLZL(val) __builtin_clzll(val) diff --git a/src/os/inc/osArm64.h b/src/os/inc/osArm64.h index 22f0000e96..76098f6846 100644 --- a/src/os/inc/osArm64.h +++ b/src/os/inc/osArm64.h @@ -78,6 +78,7 @@ extern "C" { #include #include #include +#include #ifdef __cplusplus } diff --git a/src/os/inc/osLinux32.h b/src/os/inc/osLinux32.h index cfef05368f..eee4999399 100644 --- a/src/os/inc/osLinux32.h +++ b/src/os/inc/osLinux32.h @@ -77,6 +77,7 @@ extern "C" { #include #include #include +#include #define TAOS_OS_FUNC_LZ4 #define BUILDIN_CLZL(val) __builtin_clzll(val) diff --git a/src/os/inc/osLinux64.h b/src/os/inc/osLinux64.h index a2febd51b7..0dfc819da3 100644 --- a/src/os/inc/osLinux64.h +++ b/src/os/inc/osLinux64.h @@ -75,6 +75,7 @@ extern "C" { #include #include #include +#include #ifndef _ALPINE #include #endif diff --git a/src/os/inc/osSocket.h b/src/os/inc/osSocket.h index 111fca712c..cf75b74a9a 100644 --- a/src/os/inc/osSocket.h +++ b/src/os/inc/osSocket.h @@ -68,6 +68,8 @@ void taosSetMaskSIGPIPE(); // TAOS_OS_FUNC_SOCKET_SETSOCKETOPT int32_t taosSetSockOpt(SOCKET socketfd, int32_t level, int32_t optname, void *optval, int32_t optlen); +int32_t taosGetSockOpt(SOCKET socketfd, int32_t level, int32_t optname, void *optval, int32_t* optlen); + // TAOS_OS_FUNC_SOCKET_INET uint32_t taosInetAddr(char *ipAddr); const char *taosInetNtoa(struct in_addr ipInt); diff --git a/src/os/src/detail/osSocket.c b/src/os/src/detail/osSocket.c index 1186a6dd0a..db976acccd 100644 --- a/src/os/src/detail/osSocket.c +++ b/src/os/src/detail/osSocket.c @@ -71,6 +71,9 @@ int32_t taosSetSockOpt(SOCKET socketfd, int32_t level, int32_t optname, void *op return setsockopt(socketfd, level, optname, optval, (socklen_t)optlen); } +int32_t taosGetSockOpt(SOCKET socketfd, int32_t level, int32_t optname, void *optval, int32_t* optlen) { + return getsockopt(socketfd, level, optname, optval, (socklen_t *)optlen); +} #endif #ifndef TAOS_OS_FUNC_SOCKET_INET diff --git a/src/util/src/tsocket.c b/src/util/src/tsocket.c index b33cdb8b80..ec4918e3e3 100644 --- a/src/util/src/tsocket.c +++ b/src/util/src/tsocket.c @@ -22,6 +22,8 @@ #define SIGPIPE EPIPE #endif +#define TCP_CONN_TIMEOUT 3 // conn timeout + int32_t taosGetFqdn(char *fqdn) { char hostname[1024]; hostname[1023] = '\0'; @@ -346,10 +348,47 @@ SOCKET taosOpenTcpClientSocket(uint32_t destIp, uint16_t destPort, uint32_t clie serverAddr.sin_addr.s_addr = destIp; serverAddr.sin_port = (uint16_t)htons((uint16_t)destPort); - ret = connect(sockFd, (struct sockaddr *)&serverAddr, sizeof(serverAddr)); +#ifdef _TD_LINUX + taosSetNonblocking(sockFd, 1); + ret = connect(sockFd, (struct sockaddr *)&serverAddr, sizeof(serverAddr)); + if (ret == -1) { + if (errno == EHOSTUNREACH) { + uError("failed to connect socket, ip:0x%x, port:%hu(%s)", destIp, destPort, strerror(errno)); + taosCloseSocket(sockFd); + return -1; + } else if (errno == EINPROGRESS || errno == EAGAIN || errno == EWOULDBLOCK) { + struct pollfd wfd[1]; + + wfd[0].fd = sockFd; + wfd[0].events = POLLOUT; + + int res = poll(wfd, 1, TCP_CONN_TIMEOUT); + if (res == -1 || res == 0) { + uError("failed to connect socket, ip:0x%x, port:%hu(poll error or conn timeout)", destIp, destPort); + taosCloseSocket(sockFd); // + return -1; + } + int optVal = -1, optLen = sizeof(int); + if ((0 != taosGetSockOpt(sockFd, SOL_SOCKET, SO_ERROR, &optVal, &optLen)) || (optVal != 0)) { + uError("failed to connect socket, ip:0x%x, port:%hu(connect host error)", destIp, destPort); + taosCloseSocket(sockFd); // + return -1; + } + ret = 0; + } else { // Other error + uError("failed to connect socket, ip:0x%x, port:%hu(target host cannot be reached)", destIp, destPort); + taosCloseSocket(sockFd); // + return -1; + } + } + taosSetNonblocking(sockFd, 0); + +#else + ret = connect(sockFd, (struct sockaddr *)&serverAddr, sizeof(serverAddr)); +#endif if (ret != 0) { - // uError("failed to connect socket, ip:0x%x, port:%hu(%s)", destIp, destPort, strerror(errno)); + uError("failed to connect socket, ip:0x%x, port:%hu(%s)", destIp, destPort, strerror(errno)); taosCloseSocket(sockFd); sockFd = -1; } else { From c001a86a089bd2822ad23de6ff0f9d420c23eda7 Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Thu, 1 Apr 2021 05:32:09 +0800 Subject: [PATCH 029/178] change log --- src/util/src/tsocket.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/util/src/tsocket.c b/src/util/src/tsocket.c index ec4918e3e3..5c0b322d15 100644 --- a/src/util/src/tsocket.c +++ b/src/util/src/tsocket.c @@ -364,7 +364,7 @@ SOCKET taosOpenTcpClientSocket(uint32_t destIp, uint16_t destPort, uint32_t clie int res = poll(wfd, 1, TCP_CONN_TIMEOUT); if (res == -1 || res == 0) { - uError("failed to connect socket, ip:0x%x, port:%hu(poll error or conn timeout)", destIp, destPort); + uError("failed to connect socket, ip:0x%x, port:%hu(poll error/conn timeout)", destIp, destPort); taosCloseSocket(sockFd); // return -1; } From 63f9bb45dcaad29686a3ab99ac251edd1b4ebdd2 Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Thu, 1 Apr 2021 06:23:36 +0800 Subject: [PATCH 030/178] change default timeout --- src/util/src/tsocket.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/util/src/tsocket.c b/src/util/src/tsocket.c index 5c0b322d15..77941cba82 100644 --- a/src/util/src/tsocket.c +++ b/src/util/src/tsocket.c @@ -22,7 +22,7 @@ #define SIGPIPE EPIPE #endif -#define TCP_CONN_TIMEOUT 3 // conn timeout +#define TCP_CONN_TIMEOUT 3000 // conn timeout int32_t taosGetFqdn(char *fqdn) { char hostname[1024]; From ac7baae88b67587184692a148fe97edac6121b4b Mon Sep 17 00:00:00 2001 From: liuyq-617 Date: Thu, 1 Apr 2021 10:14:25 +0800 Subject: [PATCH 031/178] fix travis output --- Jenkinsfile | 4 +++- tests/test-all.sh | 5 ----- 2 files changed, 3 insertions(+), 6 deletions(-) diff --git a/Jenkinsfile b/Jenkinsfile index 433b46067a..bf2454c903 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -42,11 +42,12 @@ def pre_test(){ killall -9 taosd ||echo "no taosd running" killall -9 gdb || echo "no gdb running" cd ${WKC} - git checkout develop git reset --hard HEAD~10 >/dev/null + git checkout develop git pull >/dev/null git fetch origin +refs/pull/${CHANGE_ID}/merge git checkout -qf FETCH_HEAD + git clean -dfx find ${WKC}/tests/pytest -name \'*\'.sql -exec rm -rf {} \\; cd ${WK} git reset --hard HEAD~10 @@ -55,6 +56,7 @@ def pre_test(){ cd ${WK} export TZ=Asia/Harbin date + git clean -dfx rm -rf ${WK}/debug mkdir debug cd debug diff --git a/tests/test-all.sh b/tests/test-all.sh index 883c1495e9..7bde698d95 100755 --- a/tests/test-all.sh +++ b/tests/test-all.sh @@ -25,9 +25,6 @@ function stopTaosd { function dohavecore(){ corefile=`find $corepath -mmin 1` core_file=`echo $corefile|cut -d " " -f2` - echo "corefile:$core_file" - echo "corepath:$corepath" - ls -l $corepath proc=`echo $corefile|cut -d "_" -f3` if [ -n "$corefile" ];then echo 'taosd or taos has generated core' @@ -82,7 +79,6 @@ function runSimCaseOneByOne { # fi end_time=`date +%s` echo execution time of $case was `expr $end_time - $start_time`s. | tee -a out.log - dohavecore 0 fi done < $1 } @@ -159,7 +155,6 @@ function runPyCaseOneByOne { else $line > /dev/null 2>&1 fi - dohavecore 0 fi done < $1 } From 58d76f3d26ec60a8ba6790a2f1fed4beb76bd8e5 Mon Sep 17 00:00:00 2001 From: liuyq-617 Date: Thu, 1 Apr 2021 10:41:37 +0800 Subject: [PATCH 032/178] fix --- tests/test-all.sh | 19 ++++++++----------- 1 file changed, 8 insertions(+), 11 deletions(-) diff --git a/tests/test-all.sh b/tests/test-all.sh index 7bde698d95..3c8aed7d18 100755 --- a/tests/test-all.sh +++ b/tests/test-all.sh @@ -28,23 +28,21 @@ function dohavecore(){ proc=`echo $corefile|cut -d "_" -f3` if [ -n "$corefile" ];then echo 'taosd or taos has generated core' + rm case.log if [[ "$tests_dir" == *"$IN_TDINTERNAL"* ]] && [[ $1 == 1 ]]; then cd ../../../ tar -zcPf $corepath'taos_'`date "+%Y_%m_%d_%H_%M_%S"`.tar.gz debug/build/bin/taosd debug/build/bin/tsim debug/build/lib/libtaos*so* if [[ $2 == 1 ]];then cp -r sim ~/sim_`date "+%Y_%m_%d_%H:%M:%S"` - rm -rf sim/case.log else cd community cp -r sim ~/sim_`date "+%Y_%m_%d_%H:%M:%S" ` - rm -rf sim/case.log fi else cd ../../ if [[ $1 == 1 ]];then tar -zcPf $corepath'taos_'`date "+%Y_%m_%d_%H_%M_%S"`.tar.gz debug/build/bin/taosd debug/build/bin/tsim debug/build/lib/libtaos*so* cp -r sim ~/sim_`date "+%Y_%m_%d_%H:%M:%S" ` - rm -rf sim/case.log fi fi if [[ $1 == 1 ]];then @@ -95,26 +93,25 @@ function runSimCaseOneByOnefq { date +%F\ %T | tee -a out.log if [[ "$tests_dir" == *"$IN_TDINTERNAL"* ]]; then echo -n $case - ./test.sh -f $case > ../../../sim/case.log 2>&1 && \ + ./test.sh -f $case > case.log 2>&1 && \ ( grep -q 'script.*'$case'.*failed.*, err.*lineNum' ../../../sim/tsim/log/taoslog0.0 && echo -e "${RED} failed${NC}" | tee -a out.log || echo -e "${GREEN} success${NC}" | tee -a out.log )|| \ ( grep -q 'script.*success.*m$' ../../../sim/tsim/log/taoslog0.0 && echo -e "${GREEN} success${NC}" | tee -a out.log ) || \ - ( echo -e "${RED} failed${NC}" | tee -a out.log && echo '=====================log=====================' && cat ../../../sim/case.log ) + ( echo -e "${RED} failed${NC}" | tee -a out.log && echo '=====================log=====================' && cat case.log ) else echo -n $case ./test.sh -f $case > ../../sim/case.log 2>&1 && \ ( grep -q 'script.*'$case'.*failed.*, err.*lineNum' ../../sim/tsim/log/taoslog0.0 && echo -e "${RED} failed${NC}" | tee -a out.log || echo -e "${GREEN} success${NC}" | tee -a out.log )|| \ ( grep -q 'script.*success.*m$' ../../sim/tsim/log/taoslog0.0 && echo -e "${GREEN} success${NC}" | tee -a out.log ) || \ - ( echo -e "${RED} failed${NC}" | tee -a out.log && echo '=====================log=====================' && cat ../../sim/case.log ) + ( echo -e "${RED} failed${NC}" | tee -a out.log && echo '=====================log=====================' && cat case.log ) fi out_log=`tail -1 out.log ` if [[ $out_log =~ 'failed' ]];then + rm case.log if [[ "$tests_dir" == *"$IN_TDINTERNAL"* ]]; then cp -r ../../../sim ~/sim_`date "+%Y_%m_%d_%H:%M:%S"` - rm -rf ../../../sim/case.log else cp -r ../../sim ~/sim_`date "+%Y_%m_%d_%H:%M:%S" ` - rm -rf ../../sim/case.log fi dohavecore $2 1 if [[ $2 == 1 ]];then @@ -180,7 +177,7 @@ function runPyCaseOneByOnefq() { start_time=`date +%s` date +%F\ %T | tee -a pytest-out.log echo -n $case - $line > ../../sim/case.log 2>&1 && \ + $line > case.log 2>&1 && \ echo -e "${GREEN} success${NC}" | tee -a pytest-out.log || \ echo -e "${RED} failed${NC}" | tee -a pytest-out.log end_time=`date +%s` @@ -188,8 +185,8 @@ function runPyCaseOneByOnefq() { if [[ $out_log =~ 'failed' ]];then cp -r ../../sim ~/sim_`date "+%Y_%m_%d_%H:%M:%S" ` echo '=====================log===================== ' - cat ../../sim/case.log - rm -rf ../../sim/case.log + cat case.log + rm -rf case.log dohavecore $2 2 if [[ $2 == 1 ]];then exit 8 From 73889873fc6786bf12dda6e3daf3837bf525c797 Mon Sep 17 00:00:00 2001 From: liuyq-617 Date: Thu, 1 Apr 2021 10:54:46 +0800 Subject: [PATCH 033/178] git clean --- Jenkinsfile | 2 -- 1 file changed, 2 deletions(-) diff --git a/Jenkinsfile b/Jenkinsfile index bf2454c903..bac0cce33b 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -48,7 +48,6 @@ def pre_test(){ git fetch origin +refs/pull/${CHANGE_ID}/merge git checkout -qf FETCH_HEAD git clean -dfx - find ${WKC}/tests/pytest -name \'*\'.sql -exec rm -rf {} \\; cd ${WK} git reset --hard HEAD~10 git checkout develop @@ -57,7 +56,6 @@ def pre_test(){ export TZ=Asia/Harbin date git clean -dfx - rm -rf ${WK}/debug mkdir debug cd debug cmake .. > /dev/null From 3e073149888f54a264d20dae63476b256e1f9419 Mon Sep 17 00:00:00 2001 From: Elias Soong Date: Thu, 1 Apr 2021 14:17:44 +0800 Subject: [PATCH 034/178] [TD-2518] : timestamp add support for time before year 1970. --- documentation20/cn/12.taos-sql/docs.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/documentation20/cn/12.taos-sql/docs.md b/documentation20/cn/12.taos-sql/docs.md index b4fa2b160a..dfda8997c2 100644 --- a/documentation20/cn/12.taos-sql/docs.md +++ b/documentation20/cn/12.taos-sql/docs.md @@ -43,7 +43,7 @@ TDengine缺省的时间戳是毫秒精度,但通过修改配置参数enableMic | | 类型 | Bytes | 说明 | | ---- | :-------: | ------ | ------------------------------------------------------------ | -| 1 | TIMESTAMP | 8 | 时间戳。缺省精度毫秒,可支持微秒。从格林威治时间 1970-01-01 00:00:00.000 (UTC/GMT) 开始,计时不能早于该时间。 | +| 1 | TIMESTAMP | 8 | 时间戳。缺省精度毫秒,可支持微秒。从格林威治时间 1970-01-01 00:00:00.000 (UTC/GMT) 开始,计时不能早于该时间。(从 2.0.18 版本开始,已经去除了这一时间范围限制) | | 2 | INT | 4 | 整型,范围 [-2^31+1, 2^31-1], -2^31 用作 NULL | | 3 | BIGINT | 8 | 长整型,范围 [-2^63+1, 2^63-1], -2^63 用于 NULL | | 4 | FLOAT | 4 | 浮点型,有效位数 6-7,范围 [-3.4E38, 3.4E38] | From 4aebd5bdf6604fb692d43d61d54ca4a6ba702ed7 Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Thu, 1 Apr 2021 15:13:54 +0800 Subject: [PATCH 035/178] [TD-3612]: fix vnode write msg double free issue --- src/dnode/src/dnodeVWrite.c | 2 +- src/vnode/src/vnodeWrite.c | 6 +++++- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/src/dnode/src/dnodeVWrite.c b/src/dnode/src/dnodeVWrite.c index 87b31e4604..84fd260d91 100644 --- a/src/dnode/src/dnodeVWrite.c +++ b/src/dnode/src/dnodeVWrite.c @@ -205,7 +205,7 @@ static void *dnodeProcessVWriteQueue(void *wparam) { pWrite->rpcMsg.ahandle, taosMsg[pWrite->pHead.msgType], qtypeStr[qtype], pWrite->pHead.version); pWrite->code = vnodeProcessWrite(pVnode, &pWrite->pHead, qtype, pWrite); - if (pWrite->code <= 0) pWrite->processedCount = 1; + if (pWrite->code <= 0) atomic_add_fetch_32(&pWrite->processedCount, 1); if (pWrite->code > 0) pWrite->code = 0; if (pWrite->code == 0 && pWrite->pHead.msgType != TSDB_MSG_TYPE_SUBMIT) forceFsync = true; diff --git a/src/vnode/src/vnodeWrite.c b/src/vnode/src/vnodeWrite.c index e318978a11..92e1ba804b 100644 --- a/src/vnode/src/vnodeWrite.c +++ b/src/vnode/src/vnodeWrite.c @@ -96,6 +96,7 @@ int32_t vnodeProcessWrite(void *vparam, void *wparam, int32_t qtype, void *rpara // write into WAL code = walWrite(pVnode->wal, pHead); if (code < 0) { + if (syncCode > 0) atomic_sub_fetch_32(&pWrite->processedCount, 1); vError("vgId:%d, hver:%" PRIu64 " vver:%" PRIu64 " code:0x%x", pVnode->vgId, pHead->version, pVnode->version, code); return code; } @@ -104,7 +105,10 @@ int32_t vnodeProcessWrite(void *vparam, void *wparam, int32_t qtype, void *rpara // write data locally code = (*vnodeProcessWriteMsgFp[pHead->msgType])(pVnode, pHead->cont, pRspRet); - if (code < 0) return code; + if (code < 0) { + if (syncCode > 0) atomic_sub_fetch_32(&pWrite->processedCount, 1); + return code; + } return syncCode; } From af996100a27410d42d047e8cbeb386ab0b68cdc8 Mon Sep 17 00:00:00 2001 From: zyyang <69311263+zyyang-taosdata@users.noreply.github.com> Date: Thu, 1 Apr 2021 15:15:50 +0800 Subject: [PATCH 036/178] [TD-3417]: remove.sh stop and delete the nginxd.service for enterprise edition (#5617) --- packaging/tools/remove.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packaging/tools/remove.sh b/packaging/tools/remove.sh index e63889aff1..9241f01efa 100755 --- a/packaging/tools/remove.sh +++ b/packaging/tools/remove.sh @@ -120,7 +120,7 @@ function clean_service_on_systemd() { if [ "$verMode" == "cluster" ]; then nginx_service_config="${service_config_dir}/${nginx_service_name}.service" - if [ -d ${bin_dir}/web ]; then + if [ -d ${install_nginxd_dir} ]; then if systemctl is-active --quiet ${nginx_service_name}; then echo "Nginx for TDengine is running, stopping it..." ${csudo} systemctl stop ${nginx_service_name} &> /dev/null || echo &> /dev/null From 598b1ce6bffaa97a5abdb15d03b753f52bcaf2d6 Mon Sep 17 00:00:00 2001 From: zyyang <69311263+zyyang-taosdata@users.noreply.github.com> Date: Thu, 1 Apr 2021 15:16:36 +0800 Subject: [PATCH 037/178] Feature/td 3411 (#5648) * [TD-3410]feature: adapted to nutz * change * change * change * change * [TD-3447]: JDBC-RESTful and JDBC-JNI support setObject * change * change * change * change * change * change * change * change * change * change * change * change * [TD-3548]: support ParameterMetaData in JDBC-RESTful and JDBC-JNI * change * change * [TD-3446]: JDBC-JNI and JDBC-RESTful do not need invoke Class.forName any more * change jdbc version * change * change * change * change * change * change * change * change * change version --- cmake/install.inc | 2 +- src/connector/jdbc/CMakeLists.txt | 2 +- src/connector/jdbc/deploy-pom.xml | 2 +- src/connector/jdbc/pom.xml | 10 ++- .../jdbc/AbstractParameterMetaData.java | 2 +- .../com/taosdata/jdbc/TSDBJNIConnector.java | 46 ++----------- .../taosdata/jdbc/rs/RestfulResultSet.java | 4 +- .../java/com/taosdata/jdbc/utils/OSUtils.java | 17 +++++ .../jdbc/cases/DriverAutoloadTest.java | 1 - ...ullValueInResultSetForJdbcRestfulTest.java | 64 +++++++++++++++++++ .../com/taosdata/jdbc/utils/OSUtilsTest.java | 30 +++++++++ 11 files changed, 133 insertions(+), 47 deletions(-) create mode 100644 src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/OSUtils.java create mode 100644 src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/NullValueInResultSetForJdbcRestfulTest.java create mode 100644 src/connector/jdbc/src/test/java/com/taosdata/jdbc/utils/OSUtilsTest.java diff --git a/cmake/install.inc b/cmake/install.inc index 01d3c8a4df..5823ef743e 100755 --- a/cmake/install.inc +++ b/cmake/install.inc @@ -32,7 +32,7 @@ ELSEIF (TD_WINDOWS) #INSTALL(TARGETS taos RUNTIME DESTINATION driver) #INSTALL(TARGETS shell RUNTIME DESTINATION .) IF (TD_MVN_INSTALLED) - INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/taos-jdbcdriver-2.0.24-dist.jar DESTINATION connector/jdbc) + INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/taos-jdbcdriver-2.0.25-dist.jar DESTINATION connector/jdbc) ENDIF () ELSEIF (TD_DARWIN) SET(TD_MAKE_INSTALL_SH "${TD_COMMUNITY_DIR}/packaging/tools/make_install.sh") diff --git a/src/connector/jdbc/CMakeLists.txt b/src/connector/jdbc/CMakeLists.txt index 540dc8eb58..eb158b1f76 100644 --- a/src/connector/jdbc/CMakeLists.txt +++ b/src/connector/jdbc/CMakeLists.txt @@ -8,7 +8,7 @@ IF (TD_MVN_INSTALLED) ADD_CUSTOM_COMMAND(OUTPUT ${JDBC_CMD_NAME} POST_BUILD COMMAND mvn -Dmaven.test.skip=true install -f ${CMAKE_CURRENT_SOURCE_DIR}/pom.xml - COMMAND ${CMAKE_COMMAND} -E copy ${CMAKE_CURRENT_SOURCE_DIR}/target/taos-jdbcdriver-2.0.24-dist.jar ${LIBRARY_OUTPUT_PATH} + COMMAND ${CMAKE_COMMAND} -E copy ${CMAKE_CURRENT_SOURCE_DIR}/target/taos-jdbcdriver-2.0.25-dist.jar ${LIBRARY_OUTPUT_PATH} COMMAND mvn -Dmaven.test.skip=true clean -f ${CMAKE_CURRENT_SOURCE_DIR}/pom.xml COMMENT "build jdbc driver") ADD_CUSTOM_TARGET(${JDBC_TARGET_NAME} ALL WORKING_DIRECTORY ${EXECUTABLE_OUTPUT_PATH} DEPENDS ${JDBC_CMD_NAME}) diff --git a/src/connector/jdbc/deploy-pom.xml b/src/connector/jdbc/deploy-pom.xml index 935b3f7e4a..eb8c92575c 100755 --- a/src/connector/jdbc/deploy-pom.xml +++ b/src/connector/jdbc/deploy-pom.xml @@ -5,7 +5,7 @@ com.taosdata.jdbc taos-jdbcdriver - 2.0.24 + 2.0.25 jar JDBCDriver diff --git a/src/connector/jdbc/pom.xml b/src/connector/jdbc/pom.xml index 5d7c89e2d2..1f75754b0c 100755 --- a/src/connector/jdbc/pom.xml +++ b/src/connector/jdbc/pom.xml @@ -3,7 +3,7 @@ 4.0.0 com.taosdata.jdbc taos-jdbcdriver - 2.0.24 + 2.0.25 jar JDBCDriver https://github.com/taosdata/TDengine/tree/master/src/connector/jdbc @@ -62,6 +62,14 @@ + + + src/main/resources + + **/*.md + + + org.apache.maven.plugins diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/AbstractParameterMetaData.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/AbstractParameterMetaData.java index 999df06fc7..7df7252ae2 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/AbstractParameterMetaData.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/AbstractParameterMetaData.java @@ -15,7 +15,7 @@ public abstract class AbstractParameterMetaData extends WrapperImpl implements P @Override public int getParameterCount() throws SQLException { - return parameters.length; + return parameters == null ? 0 : parameters.length; } @Override diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBJNIConnector.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBJNIConnector.java index 7d3741917c..5e3ffffa4f 100755 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBJNIConnector.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBJNIConnector.java @@ -29,45 +29,25 @@ public class TSDBJNIConnector { private static volatile Boolean isInitialized = false; private TaosInfo taosInfo = TaosInfo.getInstance(); + // Connection pointer used in C + private long taos = TSDBConstants.JNI_NULL_POINTER; + // result set status in current connection + private boolean isResultsetClosed = true; + private int affectedRows = -1; static { System.loadLibrary("taos"); System.out.println("java.library.path:" + System.getProperty("java.library.path")); } - /** - * Connection pointer used in C - */ - private long taos = TSDBConstants.JNI_NULL_POINTER; - - /** - * Result set pointer for the current connection - */ -// private long taosResultSetPointer = TSDBConstants.JNI_NULL_POINTER; - - /** - * result set status in current connection - */ - private boolean isResultsetClosed = true; - private int affectedRows = -1; - - /** - * Whether the connection is closed - */ public boolean isClosed() { return this.taos == TSDBConstants.JNI_NULL_POINTER; } - /** - * Returns the status of last result set in current connection - */ public boolean isResultsetClosed() { return this.isResultsetClosed; } - /** - * Initialize static variables in JNI to optimize performance - */ public static void init(String configDir, String locale, String charset, String timezone) throws SQLWarning { synchronized (isInitialized) { if (!isInitialized) { @@ -93,11 +73,6 @@ public class TSDBJNIConnector { public static native String getTsCharset(); - /** - * Get connection pointer - * - * @throws SQLException - */ public boolean connect(String host, int port, String dbName, String user, String password) throws SQLException { if (this.taos != TSDBConstants.JNI_NULL_POINTER) { // this.closeConnectionImp(this.taos); @@ -185,13 +160,6 @@ public class TSDBJNIConnector { private native String getErrMsgImp(long pSql); - /** - * Get resultset pointer - * Each connection should have a single open result set at a time - */ -// public long getResultSet() { -// return taosResultSetPointer; -// } private native long getResultSetImp(long connection, long pSql); public boolean isUpdateQuery(long pSql) { @@ -231,6 +199,7 @@ public class TSDBJNIConnector { // } // return resCode; // } + private native int freeResultSetImp(long connection, long result); /** @@ -323,8 +292,7 @@ public class TSDBJNIConnector { * Validate if a create table sql statement is correct without actually creating that table */ public boolean validateCreateTableSql(String sql) { - long connection = taos; - int res = validateCreateTableSqlImp(connection, sql.getBytes()); + int res = validateCreateTableSqlImp(taos, sql.getBytes()); return res != 0 ? false : true; } diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulResultSet.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulResultSet.java index 856f5257bf..5c2d4c45b0 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulResultSet.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulResultSet.java @@ -84,9 +84,9 @@ public class RestfulResultSet extends AbstractResultSet implements ResultSet { case TSDBConstants.TSDB_DATA_TYPE_TIMESTAMP: return new Timestamp(row.getDate(colIndex).getTime()); case TSDBConstants.TSDB_DATA_TYPE_BINARY: - return row.getString(colIndex).getBytes(); + return row.getString(colIndex) == null ? null : row.getString(colIndex).getBytes(); case TSDBConstants.TSDB_DATA_TYPE_NCHAR: - return row.getString(colIndex); + return row.getString(colIndex) == null ? null : row.getString(colIndex); default: return row.get(colIndex); } diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/OSUtils.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/OSUtils.java new file mode 100644 index 0000000000..a67b4763f9 --- /dev/null +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/OSUtils.java @@ -0,0 +1,17 @@ +package com.taosdata.jdbc.utils; + +public class OSUtils { + private static final String OS = System.getProperty("os.name").toLowerCase(); + + public static boolean isWindows() { + return OS.indexOf("win") >= 0; + } + + public static boolean isMac() { + return OS.indexOf("mac") >= 0; + } + + public static boolean isLinux() { + return OS.indexOf("nux") >= 0; + } +} diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/DriverAutoloadTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/DriverAutoloadTest.java index 580b2ac1b5..9826e6ed76 100644 --- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/DriverAutoloadTest.java +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/DriverAutoloadTest.java @@ -17,7 +17,6 @@ public class DriverAutoloadTest { @Test public void testRestful() throws SQLException { -// Class.forName("com.taosdata.jdbc.rs.RestfulDriver"); final String url = "jdbc:TAOS-RS://" + host + ":6041/?user=root&password=taosdata"; Connection conn = DriverManager.getConnection(url, properties); Assert.assertNotNull(conn); diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/NullValueInResultSetForJdbcRestfulTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/NullValueInResultSetForJdbcRestfulTest.java new file mode 100644 index 0000000000..f2ac94adc1 --- /dev/null +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/NullValueInResultSetForJdbcRestfulTest.java @@ -0,0 +1,64 @@ +package com.taosdata.jdbc.cases; + +import org.junit.After; +import org.junit.Before; +import org.junit.Test; + +import java.sql.*; + +public class NullValueInResultSetForJdbcRestfulTest { + + private static final String host = "127.0.0.1"; + Connection conn; + + @Test + public void test() { + try (Statement stmt = conn.createStatement()) { + ResultSet rs = stmt.executeQuery("select * from weather"); + ResultSetMetaData meta = rs.getMetaData(); + while (rs.next()) { + for (int i = 1; i <= meta.getColumnCount(); i++) { + Object value = rs.getObject(i); + System.out.print(meta.getColumnLabel(i) + ": " + value + "\t"); + } + System.out.println(); + } + + } catch (SQLException e) { + e.printStackTrace(); + } + } + + @Before + public void before() throws SQLException { + final String url = "jdbc:TAOS-RS://" + host + ":6041/?user=root&password=taosdata"; + conn = DriverManager.getConnection(url); + try (Statement stmt = conn.createStatement()) { + stmt.execute("drop database if exists test_null"); + stmt.execute("create database if not exists test_null"); + stmt.execute("use test_null"); + stmt.execute("create table weather(ts timestamp, f1 int, f2 bigint, f3 float, f4 double, f5 smallint, f6 tinyint, f7 bool, f8 binary(64), f9 nchar(64))"); + stmt.executeUpdate("insert into weather(ts, f1) values(now+1s, 1)"); + stmt.executeUpdate("insert into weather(ts, f2) values(now+2s, 2)"); + stmt.executeUpdate("insert into weather(ts, f3) values(now+3s, 3.0)"); + stmt.executeUpdate("insert into weather(ts, f4) values(now+4s, 4.0)"); + stmt.executeUpdate("insert into weather(ts, f5) values(now+5s, 5)"); + stmt.executeUpdate("insert into weather(ts, f6) values(now+6s, 6)"); + stmt.executeUpdate("insert into weather(ts, f7) values(now+7s, true)"); + stmt.executeUpdate("insert into weather(ts, f8) values(now+8s, 'hello')"); + stmt.executeUpdate("insert into weather(ts, f9) values(now+9s, '涛思数据')"); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + @After + public void after() { + try { + if (conn != null) + conn.close(); + } catch (SQLException e) { + e.printStackTrace(); + } + } +} diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/utils/OSUtilsTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/utils/OSUtilsTest.java new file mode 100644 index 0000000000..fd6c83ad1c --- /dev/null +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/utils/OSUtilsTest.java @@ -0,0 +1,30 @@ +package com.taosdata.jdbc.utils; + +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; + +public class OSUtilsTest { + + private String OS; + + @Test + public void inWindows() { + Assert.assertEquals(OS.indexOf("win") >= 0, OSUtils.isWindows()); + } + + @Test + public void isMac() { + Assert.assertEquals(OS.indexOf("mac") >= 0, OSUtils.isMac()); + } + + @Test + public void isLinux() { + Assert.assertEquals(OS.indexOf("nux") >= 0, OSUtils.isLinux()); + } + + @Before + public void before() { + OS = System.getProperty("os.name").toLowerCase(); + } +} \ No newline at end of file From f75a675b0dcbd8517ce984fca1d6f70657bc2c52 Mon Sep 17 00:00:00 2001 From: Ping Xiao Date: Thu, 1 Apr 2021 15:26:38 +0800 Subject: [PATCH 038/178] [TD-3588]: update test case --- tests/pytest/insert/metadataUpdate.py | 47 ++++++++++++--------------- 1 file changed, 20 insertions(+), 27 deletions(-) diff --git a/tests/pytest/insert/metadataUpdate.py b/tests/pytest/insert/metadataUpdate.py index 4c32b0e39a..1a960a20e6 100644 --- a/tests/pytest/insert/metadataUpdate.py +++ b/tests/pytest/insert/metadataUpdate.py @@ -11,13 +11,13 @@ # -*- coding: utf-8 -*- -import sys import taos from util.log import tdLog from util.cases import tdCases from util.sql import tdSql from util.dnodes import tdDnodes -from multiprocessing import Process +from multiprocessing import Process +import subprocess class TDTestCase: def init(self, conn, logSql): @@ -40,27 +40,22 @@ class TDTestCase: print("alter table done") def deleteTableAndRecreate(self): - self.host = "127.0.0.1" - self.user = "root" - self.password = "taosdata" self.config = tdDnodes.getSimCfgPath() - self.conn = taos.connect(host = self.host, user = self.user, password = self.password, config = self.config) - self.cursor = self.conn.cursor() - - self.cursor.execute("use test") - print("drop table stb") - self.cursor.execute("drop table stb") - - print("create table stb") - self.cursor.execute("create table if not exists stb (ts timestamp, col1 int) tags(areaid int, city nchar(20))") - print("insert data") + sqlCmds = "use test; drop table stb;" + sqlCmds += "create table if not exists stb (ts timestamp, col1 int) tags(areaid int, city nchar(20));" for i in range(self.tables): city = "beijing" if i % 2 == 0 else "shanghai" - self.cursor.execute("create table tb%d using stb tags(%d, '%s')" % (i, i, city)) - for j in range(self.rows): - self.cursor.execute("insert into tb%d values(%d, %d)" % (i, self.ts + j, j * 100000)) - + sqlCmds += "create table tb%d using stb tags(%d, '%s');" % (i, i, city) + for j in range(5): + sqlCmds += "insert into tb%d values(%d, %d);" % (i, self.ts + j, j * 100000) + command = ["taos", "-c", self.config, "-s", sqlCmds] + print("drop stb, recreate stb and insert data ") + result = subprocess.run(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, encoding="utf-8") + if result.returncode == 0: + print("success:", result) + else: + print("error:", result) def run(self): tdSql.prepare() @@ -100,19 +95,17 @@ class TDTestCase: tdSql.query("select count(*) from stb") tdSql.checkData(0, 0, 10000) - tdSql.query("select count(*) from tb1") + tdSql.query("select count(*) from tb0") tdSql.checkData(0, 0, 1000) - p = Process(target=self.deleteTableAndRecreate, args=()) - p.start() - p.join() - p.terminate() + # drop stable in subprocess + self.deleteTableAndRecreate() tdSql.query("select count(*) from stb") - tdSql.checkData(0, 0, 10000) + tdSql.checkData(0, 0, 5 * self.tables) - tdSql.query("select count(*) from tb1") - tdSql.checkData(0, 0, 1000) + tdSql.query("select count(*) from tb0") + tdSql.checkData(0, 0, 5) def stop(self): tdSql.close() From dc9c503f05fd0b5e3ce8dae2efcd2e497eaa5a4f Mon Sep 17 00:00:00 2001 From: dapan1121 <89396746@qq.com> Date: Thu, 1 Apr 2021 15:31:31 +0800 Subject: [PATCH 039/178] free issue --- src/query/src/qUtil.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/query/src/qUtil.c b/src/query/src/qUtil.c index 3e125d73da..9b0046fda0 100644 --- a/src/query/src/qUtil.c +++ b/src/query/src/qUtil.c @@ -67,7 +67,9 @@ void cleanupResultRowInfo(SResultRowInfo *pResultRowInfo) { } for(int32_t i = 0; i < pResultRowInfo->size; ++i) { - tfree(pResultRowInfo->pResult[i]->key); + if (pResultRowInfo->pResult[i]) { + tfree(pResultRowInfo->pResult[i]->key); + } } tfree(pResultRowInfo->pResult); From e2ed5b946037f0ccb22ef1ace72451a67ca37c8e Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Thu, 1 Apr 2021 16:03:38 +0800 Subject: [PATCH 040/178] [TD-3642]: fix staging directory resource leak --- src/dnode/src/dnodeMain.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/src/dnode/src/dnodeMain.c b/src/dnode/src/dnodeMain.c index ea0ef4655d..410e6bb188 100644 --- a/src/dnode/src/dnodeMain.c +++ b/src/dnode/src/dnodeMain.c @@ -238,9 +238,11 @@ static int32_t dnodeInitStorage() { } TDIR *tdir = tfsOpendir("vnode_bak/.staging"); - if (tfsReaddir(tdir) != NULL) { + bool stagingNotEmpty = tfsReaddir(tdir) != NULL; + tfsClosedir(tdir); + + if (stagingNotEmpty) { dError("vnode_bak/.staging dir not empty, fix it first."); - tfsClosedir(tdir); return -1; } From e39762deb1571554e603d26b6a404e8d90fa7ccb Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Thu, 1 Apr 2021 16:06:25 +0800 Subject: [PATCH 041/178] Hotfix/sangshuduo/td 3631 max stb count (#5645) * [TD-3631] : increase max super table count to 10000 * [TD-3631] : use smaller max stb count due to data structure limit. Co-authored-by: Shuduo Sang --- src/kit/taosdemo/taosdemo.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/kit/taosdemo/taosdemo.c b/src/kit/taosdemo/taosdemo.c index 9f367b41f8..d76185ca43 100644 --- a/src/kit/taosdemo/taosdemo.c +++ b/src/kit/taosdemo/taosdemo.c @@ -85,7 +85,7 @@ enum TEST_MODE { #define MAX_NUM_DATATYPE 10 #define MAX_DB_COUNT 8 -#define MAX_SUPER_TABLE_COUNT 8 +#define MAX_SUPER_TABLE_COUNT 200 #define MAX_COLUMN_COUNT 1024 #define MAX_TAG_COUNT 128 @@ -4090,7 +4090,7 @@ static bool getInfoFromJsonFile(char* file) { } bool ret = false; - int maxLen = 64000; + int maxLen = 6400000; char *content = calloc(1, maxLen + 1); int len = fread(content, 1, maxLen, fp); if (len <= 0) { From ad1c6534a93602953c7baf53c548b5f1747e7938 Mon Sep 17 00:00:00 2001 From: Elias Soong Date: Thu, 1 Apr 2021 16:40:04 +0800 Subject: [PATCH 042/178] [TD-3645] : add description about dimension microsecond in timestamp. --- documentation20/cn/12.taos-sql/docs.md | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/documentation20/cn/12.taos-sql/docs.md b/documentation20/cn/12.taos-sql/docs.md index dfda8997c2..58191e0bd8 100644 --- a/documentation20/cn/12.taos-sql/docs.md +++ b/documentation20/cn/12.taos-sql/docs.md @@ -29,17 +29,17 @@ taos> DESCRIBE meters; ## 支持的数据类型 -使用TDengine,最重要的是时间戳。创建并插入记录、查询历史记录的时候,均需要指定时间戳。时间戳有如下规则: +使用 TDengine,最重要的是时间戳。创建并插入记录、查询历史记录的时候,均需要指定时间戳。时间戳有如下规则: -- 时间格式为```YYYY-MM-DD HH:mm:ss.MS```, 默认时间分辨率为毫秒。比如:```2017-08-12 18:25:58.128``` -- 内部函数now是服务器的当前时间 -- 插入记录时,如果时间戳为now,插入数据时使用服务器当前时间 -- Epoch Time: 时间戳也可以是一个长整数,表示从1970-01-01 08:00:00.000开始的毫秒数 -- 时间可以加减,比如 now-2h,表明查询时刻向前推2个小时(最近2小时)。 数字后面的时间单位可以是 a(毫秒)、s(秒)、 m(分)、h(小时)、d(天)、w(周)。 比如select * from t1 where ts > now-2w and ts <= now-1w, 表示查询两周前整整一周的数据。 在指定降频操作(down sampling)的时间窗口(interval)时,时间单位还可以使用 n(自然月) 和 y(自然年)。 +- 时间格式为 ```YYYY-MM-DD HH:mm:ss.MS```,默认时间分辨率为毫秒。比如:```2017-08-12 18:25:58.128``` +- 内部函数 now 是客户端的当前时间 +- 插入记录时,如果时间戳为 now,插入数据时使用提交这条记录的客户端的当前时间 +- Epoch Time:时间戳也可以是一个长整数,表示从 1970-01-01 08:00:00.000 开始的毫秒数 +- 时间可以加减,比如 now-2h,表明查询时刻向前推 2 个小时(最近 2 小时)。数字后面的时间单位可以是 u(微秒)、a(毫秒)、s(秒)、m(分)、h(小时)、d(天)、w(周)。 比如 `select * from t1 where ts > now-2w and ts <= now-1w`,表示查询两周前整整一周的数据。在指定降频操作(down sampling)的时间窗口(interval)时,时间单位还可以使用 n(自然月) 和 y(自然年)。 -TDengine缺省的时间戳是毫秒精度,但通过修改配置参数enableMicrosecond就可支持微秒。 +TDengine 缺省的时间戳是毫秒精度,但通过修改配置参数 enableMicrosecond 就可以支持微秒。 -在TDengine中,普通表的数据模型中可使用以下10种数据类型。 +在TDengine中,普通表的数据模型中可使用以下 10 种数据类型。 | | 类型 | Bytes | 说明 | | ---- | :-------: | ------ | ------------------------------------------------------------ | From 71badf8c85fe832ce578d6e835cb96225954a9a0 Mon Sep 17 00:00:00 2001 From: dapan1121 <89396746@qq.com> Date: Thu, 1 Apr 2021 16:41:21 +0800 Subject: [PATCH 043/178] fix filter error --- src/query/src/qExecutor.c | 4 +- tests/script/general/parser/having_child.sim | 94 +++++++++++++++----- 2 files changed, 76 insertions(+), 22 deletions(-) diff --git a/src/query/src/qExecutor.c b/src/query/src/qExecutor.c index 71bc4913e0..d6ce220548 100644 --- a/src/query/src/qExecutor.c +++ b/src/query/src/qExecutor.c @@ -4709,7 +4709,7 @@ void doHavingImpl(SOperatorInfo *pOperator, SSDataBlock *pBlock) { //SColIndex* colIdx = &pExprInfo->base.colInfo; SColumnInfoData* p = taosArrayGet(pBlock->pDataBlock, i); - SColumnFilterElem filterElem = {.filterInfo = *pExprInfo->pFilter}; + SColumnFilterElem filterElem = {.filterInfo = pExprInfo->pFilter[m]}; if (doFilterData(p, r, &filterElem, fp)) { exprQualified = 1; @@ -5205,12 +5205,14 @@ int32_t initFilterFp(SExprInfo* pExpr, int32_t numOfOutput, SArray** fps) { int32_t upper = filterInfo->upperRelOptr; if (lower == TSDB_RELATION_INVALID && upper == TSDB_RELATION_INVALID) { qError("invalid rel optr"); + taosArrayDestroy(es); return TSDB_CODE_QRY_APP_ERROR; } __filter_func_t ffp = getFilterOperator(lower, upper); if (ffp == NULL) { qError("invalid filter info"); + taosArrayDestroy(es); return TSDB_CODE_QRY_APP_ERROR; } diff --git a/tests/script/general/parser/having_child.sim b/tests/script/general/parser/having_child.sim index f9c554aeab..1f29a63b91 100644 --- a/tests/script/general/parser/having_child.sim +++ b/tests/script/general/parser/having_child.sim @@ -769,7 +769,46 @@ sql_error select avg(f1),count(tb1.*),sum(f1),stddev(f1) from tb1 group by f1 ha sql_error select avg(f1),count(tb1.*),sum(f1),stddev(f1),LEASTSQUARES(f1,1,1) from tb1 group by f1 having LEASTSQUARES(f1,1,1) > 2; -sql_error select avg(f1),count(tb1.*),sum(f1),stddev(f1),LEASTSQUARES(f1,1,1) from tb1 group by f1 having sum(f1) > 2; +sql select avg(f1),count(tb1.*),sum(f1),stddev(f1),LEASTSQUARES(f1,1,1) from tb1 group by f1 having sum(f1) > 2; +if $rows != 3 then + return -1 +endi +if $data00 != 2.000000000 then + return -1 +endi +if $data01 != 2 then + return -1 +endi +if $data02 != 4 then + return -1 +endi +if $data03 != 0.000000000 then + return -1 +endi +if $data10 != 3.000000000 then + return -1 +endi +if $data11 != 2 then + return -1 +endi +if $data12 != 6 then + return -1 +endi +if $data13 != 0.000000000 then + return -1 +endi +if $data20 != 4.000000000 then + return -1 +endi +if $data21 != 2 then + return -1 +endi +if $data22 != 8 then + return -1 +endi +if $data23 != 0.000000000 then + return -1 +endi sql select avg(f1),count(tb1.*),sum(f1),stddev(f1) from tb1 group by f1 having min(f1) > 2; if $rows != 2 then @@ -1072,7 +1111,13 @@ sql_error select avg(f1),count(tb1.*),sum(f1),stddev(f1),min(f1),max(f1),first(f sql_error select PERCENTILE(f1) from tb1 group by f1 having sum(f1) > 1; -sql_error select PERCENTILE(f1,20) from tb1 group by f1 having sum(f1) > 1; +sql select PERCENTILE(f1,20) from tb1 group by f1 having sum(f1) = 4; +if $rows != 1 then + return -1 +endi +if $data00 != 2.000000000 then + return -1 +endi sql select aPERCENTILE(f1,20) from tb1 group by f1 having sum(f1) > 1; if $rows != 4 then @@ -1396,30 +1441,28 @@ sql_error select avg(f1),spread(f1,f2,tb1.f1) from tb1 group by id1 having avg(f sql_error select avg(f1),spread(f1,f2,tb1.f1),avg(id1) from tb1 group by id1 having avg(f1) > id1; -sql select avg(f1),spread(f1,f2,tb1.f1) from tb1 group by id1 having avg(f1) > 0; +sql_error select avg(f1),spread(f1,f2,tb1.f1) from tb1 group by id1 having avg(f1) > 0; + +sql select avg(f1),spread(f1,f2,tb1.f1) from tb1 group by f1 having avg(f1) > 0 and avg(f1) = 3; if $rows != 1 then return -1 endi -if $data00 != 2.500000000 then +if $data00 != 3.000000000 then return -1 endi -if $data01 != 3.000000000 then +if $data01 != 0.000000000 then return -1 endi -if $data02 != 3.000000000 then +if $data02 != 0.000000000 then return -1 endi -if $data03 != 3.000000000 then - return -1 -endi -if $data04 != 1 then +if $data03 != 0.000000000 then return -1 endi -sql select avg(f1),spread(f1,f2,tb1.f1) from tb1 group by id1 having avg(f1) < 2; -if $rows != 0 then - return -1 -endi +sql_error select avg(f1),spread(f1,f2,tb1.f1) from tb1 group by f1 having avg(f1) < 0 and avg(f1) = 3; + +sql_error select avg(f1),spread(f1,f2,tb1.f1) from tb1 group by id1 having avg(f1) < 2; sql select avg(f1),spread(f1,f2,tb1.f1) from tb1 where f1 > 0 group by f1 having avg(f1) > 0; if $rows != 4 then @@ -1814,23 +1857,32 @@ sql_error select avg(f1),spread(f1,f2,tb1.f1),f1,f6 from tb1 where f2 > 1 group sql_error select avg(f1),spread(f1,f2,tb1.f1),f1,f6 from tb1 where f2 > 1 group by id1 having last(f6) > 0; -sql select avg(f1),spread(f1,f2,tb1.f1) from tb1 where f2 > 1 group by id1 having last(f6) > 0; -if $rows != 1 then +sql select avg(f1),spread(f1,f2,tb1.f1) from tb1 where f2 > 1 and f2 < 4 group by f1 having last(f6) > 0; +if $rows != 2 then return -1 endi -if $data00 != 3.000000000 then +if $data00 != 2.000000000 then return -1 endi -if $data01 != 2.000000000 then +if $data01 != 0.000000000 then return -1 endi -if $data02 != 2.000000000 then +if $data02 != 0.000000000 then return -1 endi -if $data03 != 2.000000000 then +if $data03 != 0.000000000 then return -1 endi -if $data04 != 1 then +if $data10 != 3.000000000 then + return -1 +endi +if $data11 != 0.000000000 then + return -1 +endi +if $data12 != 0.000000000 then + return -1 +endi +if $data13 != 0.000000000 then return -1 endi From af7b50c43966c43e49619292414d5f4ea2a80b44 Mon Sep 17 00:00:00 2001 From: dapan1121 <89396746@qq.com> Date: Thu, 1 Apr 2021 16:59:39 +0800 Subject: [PATCH 044/178] fix issue --- src/client/src/tscLocalMerge.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/client/src/tscLocalMerge.c b/src/client/src/tscLocalMerge.c index 60eb0c9226..fb23c6e6dd 100644 --- a/src/client/src/tscLocalMerge.c +++ b/src/client/src/tscLocalMerge.c @@ -22,7 +22,7 @@ #include "tscUtil.h" #include "tschemautil.h" #include "tsclient.h" -#include "qutil.h" +#include "qUtil.h" typedef struct SCompareParam { SLocalDataSource **pLocalData; From c507d0c2e22bd56adf01d779f147bcf548492aca Mon Sep 17 00:00:00 2001 From: dapan1121 <89396746@qq.com> Date: Thu, 1 Apr 2021 17:03:36 +0800 Subject: [PATCH 045/178] fix bug --- src/client/src/tscSQLParser.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/client/src/tscSQLParser.c b/src/client/src/tscSQLParser.c index 3455395481..0d3f9e1984 100644 --- a/src/client/src/tscSQLParser.c +++ b/src/client/src/tscSQLParser.c @@ -34,7 +34,7 @@ #include "tstoken.h" #include "tstrbuild.h" #include "ttokendef.h" -#include "qutil.h" +#include "qUtil.h" #define DEFAULT_PRIMARY_TIMESTAMP_COL_NAME "_c0" From 4e9d4c2b58a4be9ba561dffd193beb25e5cf43d5 Mon Sep 17 00:00:00 2001 From: zyyang-taosdata Date: Thu, 1 Apr 2021 17:24:43 +0800 Subject: [PATCH 046/178] change version --- cmake/version.inc | 2 +- snap/snapcraft.yaml | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/cmake/version.inc b/cmake/version.inc index b1e09c9532..fe4c017c71 100755 --- a/cmake/version.inc +++ b/cmake/version.inc @@ -4,7 +4,7 @@ PROJECT(TDengine) IF (DEFINED VERNUMBER) SET(TD_VER_NUMBER ${VERNUMBER}) ELSE () - SET(TD_VER_NUMBER "2.0.18.0") + SET(TD_VER_NUMBER "2.0.19.0") ENDIF () IF (DEFINED VERCOMPATIBLE) diff --git a/snap/snapcraft.yaml b/snap/snapcraft.yaml index 88628b4db6..65bf5447e2 100644 --- a/snap/snapcraft.yaml +++ b/snap/snapcraft.yaml @@ -1,6 +1,6 @@ name: tdengine base: core18 -version: '2.0.18.0' +version: '2.0.19.0' icon: snap/gui/t-dengine.svg summary: an open-source big data platform designed and optimized for IoT. description: | @@ -72,7 +72,7 @@ parts: - usr/bin/taosd - usr/bin/taos - usr/bin/taosdemo - - usr/lib/libtaos.so.2.0.18.0 + - usr/lib/libtaos.so.2.0.19.0 - usr/lib/libtaos.so.1 - usr/lib/libtaos.so From c91fccf29cded78a19e862aa811356631fead602 Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Thu, 1 Apr 2021 17:49:19 +0800 Subject: [PATCH 047/178] Hotfix/sangshuduo/td 3633 taosdemo segfault (#5649) * [TD-3633] : fix taosdemo segfault. * [TD-3633] : fix taosdemo segfault. use snprintf instead of sprintf. Co-authored-by: Shuduo Sang --- src/kit/taosdemo/taosdemo.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/kit/taosdemo/taosdemo.c b/src/kit/taosdemo/taosdemo.c index d76185ca43..df1c7bee26 100644 --- a/src/kit/taosdemo/taosdemo.c +++ b/src/kit/taosdemo/taosdemo.c @@ -4455,7 +4455,7 @@ static int generateDataTail(char *tableName, int32_t tableSeq, break; } - buffer += sprintf(buffer, " %s", data); + buffer += snprintf(buffer, retLen + 1, "%s", data); k++; len += retLen; remainderBufLen -= retLen; From 91f78d4a4b5abf0e26f810b79524f0463661a4a5 Mon Sep 17 00:00:00 2001 From: dapan1121 <89396746@qq.com> Date: Fri, 2 Apr 2021 09:30:59 +0800 Subject: [PATCH 048/178] fix free issue --- src/query/src/qExecutor.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/query/src/qExecutor.c b/src/query/src/qExecutor.c index 1f8baeb6d8..5531e50f85 100644 --- a/src/query/src/qExecutor.c +++ b/src/query/src/qExecutor.c @@ -6422,6 +6422,9 @@ void freeQInfo(SQInfo *pQInfo) { SQueryRuntimeEnv* pRuntimeEnv = &pQInfo->runtimeEnv; releaseQueryBuf(pRuntimeEnv->tableqinfoGroupInfo.numOfTables); + + doDestroyTableQueryInfo(&pRuntimeEnv->tableqinfoGroupInfo); + teardownQueryRuntimeEnv(&pQInfo->runtimeEnv); SQuery *pQuery = pQInfo->runtimeEnv.pQuery; @@ -6457,7 +6460,6 @@ void freeQInfo(SQInfo *pQInfo) { } } - doDestroyTableQueryInfo(&pRuntimeEnv->tableqinfoGroupInfo); tfree(pQInfo->pBuf); tfree(pQInfo->sql); From 16d7fcc2a275910b94e657b77320ad14e2670bc5 Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Fri, 2 Apr 2021 09:35:02 +0800 Subject: [PATCH 049/178] [TD-3641] : fix interlace timestamp step issue. (#5654) Co-authored-by: Shuduo Sang --- src/kit/taosdemo/taosdemo.c | 19 ++++++++++++++----- 1 file changed, 14 insertions(+), 5 deletions(-) diff --git a/src/kit/taosdemo/taosdemo.c b/src/kit/taosdemo/taosdemo.c index df1c7bee26..7eb55acda8 100644 --- a/src/kit/taosdemo/taosdemo.c +++ b/src/kit/taosdemo/taosdemo.c @@ -506,7 +506,7 @@ static int taosRandom() #endif -static int createDatabases(); +static int createDatabasesAndStables(); static void createChildTables(); static int queryDbExec(TAOS *taos, char *command, QUERY_TYPE type, bool quiet); @@ -2416,7 +2416,7 @@ static int createSuperTable(TAOS * taos, char* dbName, return 0; } -static int createDatabases() { +static int createDatabasesAndStables() { TAOS * taos = NULL; int ret = 0; taos = taos_connect(g_Dbs.host, g_Dbs.user, g_Dbs.password, NULL, g_Dbs.port); @@ -4682,6 +4682,12 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) { for (int i = 0; i < batchPerTblTimes; i ++) { getTableName(tableName, pThreadInfo, tableSeq); + if (0 == strlen(tableName)) { + errorPrint("[%d] %s() LN%d, getTableName return null\n", + pThreadInfo->threadID, __func__, __LINE__); + return NULL; + exit(-1); + } int headLen; if (i == 0) { @@ -4728,7 +4734,7 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) { remainderBufLen -= dataLen; recOfBatch += batchPerTbl; - startTime += batchPerTbl * superTblInfo->timeStampStep; +// startTime += batchPerTbl * superTblInfo->timeStampStep; pThreadInfo->totalInsertRows += batchPerTbl; verbosePrint("[%d] %s() LN%d batchPerTbl=%d recOfBatch=%d\n", pThreadInfo->threadID, __func__, __LINE__, @@ -4738,9 +4744,12 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) { if (insertMode == INTERLACE_INSERT_MODE) { if (tableSeq == pThreadInfo->start_table_from + pThreadInfo->ntables) { // turn to first table - startTime += batchPerTbl * superTblInfo->timeStampStep; tableSeq = pThreadInfo->start_table_from; generatedRecPerTbl += batchPerTbl; + + startTime = pThreadInfo->start_time + + generatedRecPerTbl * superTblInfo->timeStampStep; + flagSleep = true; if (generatedRecPerTbl >= insertRows) break; @@ -5490,7 +5499,7 @@ static int insertTestProcess() { init_rand_data(); // create database and super tables - if(createDatabases() != 0) { + if(createDatabasesAndStables() != 0) { fclose(g_fpOfInsertResult); return -1; } From 2df1702dae2500f85f68bb644f717ba24378e18a Mon Sep 17 00:00:00 2001 From: dapan1121 <89396746@qq.com> Date: Fri, 2 Apr 2021 10:12:35 +0800 Subject: [PATCH 050/178] fix compile error in arm64 --- src/inc/taosmsg.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/inc/taosmsg.h b/src/inc/taosmsg.h index e590fdbcbb..d7ac7dd277 100644 --- a/src/inc/taosmsg.h +++ b/src/inc/taosmsg.h @@ -448,13 +448,13 @@ typedef struct SSqlFuncMsg { typedef struct SExprInfo { - SSqlFuncMsg base; SColumnFilterInfo * pFilter; struct tExprNode* pExpr; int16_t bytes; int16_t type; int32_t interBytes; int64_t uid; + SSqlFuncMsg base; } SExprInfo; /* From 0bc3fddad0dc8d43d7c3e7b70834aa119531c781 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=BC=A0=E9=87=91=E5=AF=8C?= Date: Fri, 2 Apr 2021 10:48:44 +0800 Subject: [PATCH 051/178] =?UTF-8?q?=E5=8E=BB=E6=8E=89=E5=A4=9A=E4=BD=99?= =?UTF-8?q?=E7=9A=84=E6=8B=AC=E5=8F=B7=E5=92=8C=E5=88=86=E5=8F=B7=20(#5656?= =?UTF-8?q?)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Update taosdemo.go 去掉多用的括号和分号 格式化代码 * Update taosdemo.go 地址改回127.0.0.1 --- tests/examples/go/taosdemo.go | 193 +++++++++++++++++----------------- 1 file changed, 99 insertions(+), 94 deletions(-) diff --git a/tests/examples/go/taosdemo.go b/tests/examples/go/taosdemo.go index 2c3a7d09b6..003f5aeddc 100644 --- a/tests/examples/go/taosdemo.go +++ b/tests/examples/go/taosdemo.go @@ -16,45 +16,47 @@ package main import ( "database/sql" + "flag" "fmt" - _ "github.com/taosdata/driver-go/taosSql" + "math/rand" "os" - "sync" "runtime" "strconv" + "sync" "time" - "flag" - "math/rand" + + _ "github.com/taosdata/driver-go/taosSql" + //"golang.org/x/sys/unix" ) const ( - maxLocationSize = 32 - maxSqlBufSize = 65480 + maxLocationSize = 32 + //maxSqlBufSize = 65480 ) -var locations = [maxLocationSize]string { - "Beijing", "Shanghai", "Guangzhou", "Shenzhen", - "HangZhou", "Tianjin", "Wuhan", "Changsha", - "Nanjing", "Xian"} +var locations = [maxLocationSize]string{ + "Beijing", "Shanghai", "Guangzhou", "Shenzhen", + "HangZhou", "Tianjin", "Wuhan", "Changsha", + "Nanjing", "Xian"} type config struct { - hostName string - serverPort int - user string - password string - dbName string - supTblName string - tablePrefix string - numOftables int - numOfRecordsPerTable int - numOfRecordsPerReq int - numOfThreads int - startTimestamp string - startTs int64 + hostName string + serverPort int + user string + password string + dbName string + supTblName string + tablePrefix string + numOftables int + numOfRecordsPerTable int + numOfRecordsPerReq int + numOfThreads int + startTimestamp string + startTs int64 - keep int - days int + keep int + days int } var configPara config @@ -62,7 +64,7 @@ var taosDriverName = "taosSql" var url string func init() { - flag.StringVar(&configPara.hostName, "h", "127.0.0.1","The host to connect to TDengine server.") + flag.StringVar(&configPara.hostName, "h", "127.0.0.1", "The host to connect to TDengine server.") flag.IntVar(&configPara.serverPort, "p", 6030, "The TCP/IP port number to use for the connection to TDengine server.") flag.StringVar(&configPara.user, "u", "root", "The TDengine user name to use when connecting to the server.") flag.StringVar(&configPara.password, "P", "taosdata", "The password to use when connecting to the server.") @@ -80,14 +82,14 @@ func init() { configPara.supTblName = "meters" startTs, err := time.ParseInLocation("2006-01-02 15:04:05", configPara.startTimestamp, time.Local) - if err==nil { - configPara.startTs = startTs.UnixNano() / 1e6 + if err == nil { + configPara.startTs = startTs.UnixNano() / 1e6 } } func printAllArgs() { fmt.Printf("\n============= args parse result: =============\n") - fmt.Printf("hostName: %v\n", configPara.hostName) + fmt.Printf("hostName: %v\n", configPara.hostName) fmt.Printf("serverPort: %v\n", configPara.serverPort) fmt.Printf("usr: %v\n", configPara.user) fmt.Printf("password: %v\n", configPara.password) @@ -104,10 +106,10 @@ func printAllArgs() { func main() { printAllArgs() fmt.Printf("Please press enter key to continue....\n") - fmt.Scanln() + _, _ = fmt.Scanln() url = "root:taosdata@/tcp(" + configPara.hostName + ":" + strconv.Itoa(configPara.serverPort) + ")/" - //url = fmt.Sprintf("%s:%s@/tcp(%s:%d)/%s?interpolateParams=true", configPara.user, configPara.password, configPara.hostName, configPara.serverPort, configPara.dbName) + //url = fmt.Sprintf("%s:%s@/tcp(%s:%d)/%s?interpolateParams=true", configPara.user, configPara.password, configPara.hostName, configPara.serverPort, configPara.dbName) // open connect to taos server //db, err := sql.Open(taosDriverName, url) //if err != nil { @@ -115,7 +117,7 @@ func main() { // os.Exit(1) //} //defer db.Close() - rand.Seed(time.Now().Unix()) + rand.Seed(time.Now().Unix()) createDatabase(configPara.dbName, configPara.supTblName) fmt.Printf("======== create database success! ========\n\n") @@ -138,7 +140,7 @@ func main() { func createDatabase(dbName string, supTblName string) { db, err := sql.Open(taosDriverName, url) if err != nil { - fmt.Println("Open database error: %s\n", err) + fmt.Printf("Open database error: %s\n", err) os.Exit(1) } defer db.Close() @@ -165,27 +167,27 @@ func createDatabase(dbName string, supTblName string) { checkErr(err, sqlStr) } -func multiThreadCreateTable(threads int, ntables int, dbName string, tablePrefix string) { +func multiThreadCreateTable(threads int, nTables int, dbName string, tablePrefix string) { st := time.Now().UnixNano() - if (threads < 1) { - threads = 1; + if threads < 1 { + threads = 1 } - a := ntables / threads; - if (a < 1) { - threads = ntables; - a = 1; + a := nTables / threads + if a < 1 { + threads = nTables + a = 1 } - b := ntables % threads; + b := nTables % threads - last := 0; + last := 0 endTblId := 0 wg := sync.WaitGroup{} for i := 0; i < threads; i++ { startTblId := last - if (i < b ) { + if i < b { endTblId = last + a } else { endTblId = last + a - 1 @@ -206,42 +208,43 @@ func createTable(dbName string, childTblPrefix string, startTblId int, endTblId db, err := sql.Open(taosDriverName, url) if err != nil { - fmt.Println("Open database error: %s\n", err) + fmt.Printf("Open database error: %s\n", err) os.Exit(1) } defer db.Close() - for i := startTblId; i <= endTblId; i++ { - sqlStr := "create table if not exists " + dbName + "." + childTblPrefix + strconv.Itoa(i) + " using " + dbName + ".meters tags('" + locations[i%maxLocationSize] + "', " + strconv.Itoa(i) + ");" - //fmt.Printf("sqlStr: %v\n", sqlStr) - _, err = db.Exec(sqlStr) - checkErr(err, sqlStr) - } - wg.Done() - runtime.Goexit() + for i := startTblId; i <= endTblId; i++ { + sqlStr := "create table if not exists " + dbName + "." + childTblPrefix + strconv.Itoa(i) + " using " + dbName + ".meters tags('" + locations[i%maxLocationSize] + "', " + strconv.Itoa(i) + ");" + //fmt.Printf("sqlStr: %v\n", sqlStr) + _, err = db.Exec(sqlStr) + checkErr(err, sqlStr) + } + wg.Done() + runtime.Goexit() } func generateRowData(ts int64) string { - voltage := rand.Int() % 1000 - current := 200 + rand.Float32() - phase := rand.Float32() - values := "( " + strconv.FormatInt(ts, 10) + ", " + strconv.FormatFloat(float64(current), 'f', 6, 64) + ", " + strconv.Itoa(voltage) + ", " + strconv.FormatFloat(float64(phase), 'f', 6, 64) + " ) " - return values + voltage := rand.Int() % 1000 + current := 200 + rand.Float32() + phase := rand.Float32() + values := "( " + strconv.FormatInt(ts, 10) + ", " + strconv.FormatFloat(float64(current), 'f', 6, 64) + ", " + strconv.Itoa(voltage) + ", " + strconv.FormatFloat(float64(phase), 'f', 6, 64) + " ) " + return values } + func insertData(dbName string, childTblPrefix string, startTblId int, endTblId int, wg *sync.WaitGroup) { //fmt.Printf("subThread[%d]: insert data to table from %d to %d \n", unix.Gettid(), startTblId, endTblId) // windows.GetCurrentThreadId() db, err := sql.Open(taosDriverName, url) if err != nil { - fmt.Println("Open database error: %s\n", err) + fmt.Printf("Open database error: %s\n", err) os.Exit(1) } defer db.Close() - tmpTs := configPara.startTs; + tmpTs := configPara.startTs //rand.New(rand.NewSource(time.Now().UnixNano())) - for tID := startTblId; tID <= endTblId; tID++{ + for tID := startTblId; tID <= endTblId; tID++ { totalNum := 0 for { sqlStr := "insert into " + dbName + "." + childTblPrefix + strconv.Itoa(tID) + " values " @@ -249,13 +252,13 @@ func insertData(dbName string, childTblPrefix string, startTblId int, endTblId i for { tmpTs += 1000 valuesOfRow := generateRowData(tmpTs) - currRowNum += 1 - totalNum += 1 + currRowNum += 1 + totalNum += 1 sqlStr = fmt.Sprintf("%s %s", sqlStr, valuesOfRow) - if (currRowNum >= configPara.numOfRecordsPerReq || totalNum >= configPara.numOfRecordsPerTable) { - break + if currRowNum >= configPara.numOfRecordsPerReq || totalNum >= configPara.numOfRecordsPerTable { + break } } @@ -265,12 +268,12 @@ func insertData(dbName string, childTblPrefix string, startTblId int, endTblId i count, err := res.RowsAffected() checkErr(err, "rows affected") - if (count != int64(currRowNum)) { - fmt.Printf("insert data, expect affected:%d, actual:%d\n", currRowNum, count) + if count != int64(currRowNum) { + fmt.Printf("insert data, expect affected:%d, actual:%d\n", currRowNum, count) os.Exit(1) } - if (totalNum >= configPara.numOfRecordsPerTable) { + if totalNum >= configPara.numOfRecordsPerTable { break } } @@ -279,44 +282,46 @@ func insertData(dbName string, childTblPrefix string, startTblId int, endTblId i wg.Done() runtime.Goexit() } -func multiThreadInsertData(threads int, ntables int, dbName string, tablePrefix string) { + +func multiThreadInsertData(threads int, nTables int, dbName string, tablePrefix string) { st := time.Now().UnixNano() - if (threads < 1) { - threads = 1; + if threads < 1 { + threads = 1 } - a := ntables / threads; - if (a < 1) { - threads = ntables; - a = 1; + a := nTables / threads + if a < 1 { + threads = nTables + a = 1 } - b := ntables % threads; + b := nTables % threads - last := 0; + last := 0 endTblId := 0 wg := sync.WaitGroup{} for i := 0; i < threads; i++ { startTblId := last - if (i < b ) { + if i < b { endTblId = last + a } else { endTblId = last + a - 1 } last = endTblId + 1 wg.Add(1) - go insertData(dbName, tablePrefix, startTblId , endTblId, &wg) + go insertData(dbName, tablePrefix, startTblId, endTblId, &wg) } wg.Wait() et := time.Now().UnixNano() fmt.Printf("insert data spent duration: %6.6fs\n", (float32(et-st))/1e9) } -func selectTest(dbName string, tbPrefix string, supTblName string){ + +func selectTest(dbName string, tbPrefix string, supTblName string) { db, err := sql.Open(taosDriverName, url) if err != nil { - fmt.Println("Open database error: %s\n", err) + fmt.Printf("Open database error: %s\n", err) os.Exit(1) } defer db.Close() @@ -332,12 +337,12 @@ func selectTest(dbName string, tbPrefix string, supTblName string){ fmt.Printf("query sql: %s\n", sqlStr) for rows.Next() { var ( - ts string - current float32 - voltage int - phase float32 - location string - groupid int + ts string + current float32 + voltage int + phase float32 + location string + groupid int ) err := rows.Scan(&ts, ¤t, &voltage, &phase, &location, &groupid) if err != nil { @@ -352,7 +357,7 @@ func selectTest(dbName string, tbPrefix string, supTblName string){ } // select sql 2 - sqlStr = "select avg(voltage), min(voltage), max(voltage) from " + dbName + "." + tbPrefix + strconv.Itoa( rand.Int() % configPara.numOftables) + sqlStr = "select avg(voltage), min(voltage), max(voltage) from " + dbName + "." + tbPrefix + strconv.Itoa(rand.Int()%configPara.numOftables) rows, err = db.Query(sqlStr) checkErr(err, sqlStr) @@ -360,9 +365,9 @@ func selectTest(dbName string, tbPrefix string, supTblName string){ fmt.Printf("\nquery sql: %s\n", sqlStr) for rows.Next() { var ( - voltageAvg float32 - voltageMin int - voltageMax int + voltageAvg float32 + voltageMin int + voltageMax int ) err := rows.Scan(&voltageAvg, &voltageMin, &voltageMax) if err != nil { @@ -385,10 +390,10 @@ func selectTest(dbName string, tbPrefix string, supTblName string){ fmt.Printf("\nquery sql: %s\n", sqlStr) for rows.Next() { var ( - lastTs string - lastCurrent float32 - lastVoltage int - lastPhase float32 + lastTs string + lastCurrent float32 + lastVoltage int + lastPhase float32 ) err := rows.Scan(&lastTs, &lastCurrent, &lastVoltage, &lastPhase) if err != nil { From 83fef54bc261d0fd58b353673b3401433762b5f8 Mon Sep 17 00:00:00 2001 From: dapan1121 <89396746@qq.com> Date: Fri, 2 Apr 2021 11:36:11 +0800 Subject: [PATCH 052/178] fix windows compile error --- src/client/src/tscSQLParser.c | 1 + src/query/src/qExecutor.c | 4 ++-- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/src/client/src/tscSQLParser.c b/src/client/src/tscSQLParser.c index 4448f0379b..067533c678 100644 --- a/src/client/src/tscSQLParser.c +++ b/src/client/src/tscSQLParser.c @@ -7446,3 +7446,4 @@ bool hasNormalColumnFilter(SQueryInfo* pQueryInfo) { + \ No newline at end of file diff --git a/src/query/src/qExecutor.c b/src/query/src/qExecutor.c index d6ce220548..a75a22d016 100644 --- a/src/query/src/qExecutor.c +++ b/src/query/src/qExecutor.c @@ -6051,7 +6051,7 @@ int32_t cloneExprFilterInfo(SColumnFilterInfo **dst, SColumnFilterInfo* src, int for (int32_t i = 0; i < filterNum; i++) { if ((*dst)[i].filterstr && dst[i]->len > 0) { - void *pz = calloc(1, (*dst)[i].len + 1); + void *pz = calloc(1, (size_t)(*dst)[i].len + 1); if (pz == NULL) { if (i == 0) { @@ -6063,7 +6063,7 @@ int32_t cloneExprFilterInfo(SColumnFilterInfo **dst, SColumnFilterInfo* src, int return TSDB_CODE_QRY_OUT_OF_MEMORY; } - memcpy(pz, (void *)src->pz, src->len + 1); + memcpy(pz, (void *)src->pz, (size_t)src->len + 1); (*dst)[i].pz = (int64_t)pz; } From 855c5b0ec1a101485593e90ea61fba47b7a4f26a Mon Sep 17 00:00:00 2001 From: dapan1121 <89396746@qq.com> Date: Fri, 2 Apr 2021 11:43:46 +0800 Subject: [PATCH 053/178] fix windows compile error --- src/client/src/tscSQLParser.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/src/client/src/tscSQLParser.c b/src/client/src/tscSQLParser.c index 067533c678..6de315d992 100644 --- a/src/client/src/tscSQLParser.c +++ b/src/client/src/tscSQLParser.c @@ -6020,7 +6020,7 @@ static int32_t doAddGroupbyColumnsOnDemand(SSqlCmd* pCmd, SQueryInfo* pQueryInfo if (TSDB_COL_IS_TAG(pColIndex->flag)) { SColumnIndex index = {.tableIndex = pQueryInfo->groupbyExpr.tableIndex, .columnIndex = colIndex}; - SSqlExpr* pExpr = tscSqlExprInsert(pQueryInfo, size - pQueryInfo->havingFieldNum, TSDB_FUNC_TAG, &index, type, bytes, getNewResColId(pQueryInfo), bytes, true); + SSqlExpr* pExpr = tscSqlExprInsert(pQueryInfo, (int32_t)size - pQueryInfo->havingFieldNum, TSDB_FUNC_TAG, &index, type, bytes, getNewResColId(pQueryInfo), bytes, true); memset(pExpr->aliasName, 0, sizeof(pExpr->aliasName)); tstrncpy(pExpr->aliasName, name, sizeof(pExpr->aliasName)); @@ -6773,7 +6773,7 @@ static int32_t checkQueryRangeForFill(SSqlCmd* pCmd, SQueryInfo* pQueryInfo) { ++pQueryInfo->havingFieldNum; size_t n = tscSqlExprNumOfExprs(pQueryInfo); - SSqlExpr* pSqlExpr = tscSqlExprGet(pQueryInfo, n - 1); + SSqlExpr* pSqlExpr = tscSqlExprGet(pQueryInfo, (int32_t)n - 1); int32_t slot = tscNumOfFields(pQueryInfo) - 1; SInternalField* pInfo = tscFieldInfoGetInternalField(&pQueryInfo->fieldsInfo, slot); @@ -7446,4 +7446,3 @@ bool hasNormalColumnFilter(SQueryInfo* pQueryInfo) { - \ No newline at end of file From 9478fafe9040627ce07a68ca335294875fd89462 Mon Sep 17 00:00:00 2001 From: Elias Soong Date: Fri, 2 Apr 2021 13:25:26 +0800 Subject: [PATCH 054/178] [TD-3649] : update compatibility about HuaWei EulerOS. --- documentation20/cn/02.getting-started/docs.md | 27 ++++++++++--------- 1 file changed, 14 insertions(+), 13 deletions(-) diff --git a/documentation20/cn/02.getting-started/docs.md b/documentation20/cn/02.getting-started/docs.md index f86e616c42..6b1b8592d7 100644 --- a/documentation20/cn/02.getting-started/docs.md +++ b/documentation20/cn/02.getting-started/docs.md @@ -179,19 +179,20 @@ taos> select avg(f1), max(f2), min(f3) from test.t10 interval(10s); ### TDengine服务器支持的平台列表 -| | **CentOS 6/7/8** | **Ubuntu 16/18/20** | **Other Linux** | **统信 UOS** | **银河/中标麒麟** | **凝思 V60/V80** | -| -------------- | --------------------- | ------------------------ | --------------- | --------------- | ------------------------- | --------------------- | -| X64 | ● | ● | | ○ | ● | ● | -| 树莓派 ARM32 | | ● | ● | | | | -| 龙芯 MIPS64 | | | ● | | | | -| 鲲鹏 ARM64 | | ○ | ○ | | ● | | -| 申威 Alpha64 | | | ○ | ● | | | -| 飞腾 ARM64 | | ○ 优麒麟 | | | | | -| 海光 X64 | ● | ● | ● | ○ | ● | ● | -| 瑞芯微 ARM64/32 | | | ○ | | | | -| 全志 ARM64/32 | | | ○ | | | | -| 炬力 ARM64/32 | | | ○ | | | | -| TI ARM32 | | | ○ | | | | +| | **CentOS 6/7/8** | **Ubuntu 16/18/20** | **Other Linux** | **统信 UOS** | **银河/中标麒麟** | **凝思 V60/V80** | **华为EulerOS** | +| -------------- | --------------------- | ------------------------ | --------------- | --------------- | ------------------------- | --------------------- | --------------------- | +| X64 | ● | ● | | ○ | ● | ● | ● | +| 树莓派 ARM32 | | ● | ● | | | | | +| 龙芯 MIPS64 | | | ● | | | | | +| 鲲鹏 ARM64 | | ○ | ○ | | ● | | | +| 申威 Alpha64 | | | ○ | ● | | | | +| 飞腾 ARM64 | | ○ 优麒麟 | | | | | | +| 海光 X64 | ● | ● | ● | ○ | ● | ● | | +| 瑞芯微 ARM64/32 | | | ○ | | | | | +| 全志 ARM64/32 | | | ○ | | | | | +| 炬力 ARM64/32 | | | ○ | | | | | +| TI ARM32 | | | ○ | | | | | +| 华为云 ARM64 | | | | | | | ● | 注: ● 表示经过官方测试验证, ○ 表示非官方测试验证。 From 207e5aafd1d93e2b4127c4063e132985d9ad72fc Mon Sep 17 00:00:00 2001 From: Elias Soong Date: Fri, 2 Apr 2021 13:42:48 +0800 Subject: [PATCH 055/178] [TD-3649] : fix minor typo. --- documentation20/cn/02.getting-started/docs.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/documentation20/cn/02.getting-started/docs.md b/documentation20/cn/02.getting-started/docs.md index 6b1b8592d7..43392e0325 100644 --- a/documentation20/cn/02.getting-started/docs.md +++ b/documentation20/cn/02.getting-started/docs.md @@ -179,7 +179,7 @@ taos> select avg(f1), max(f2), min(f3) from test.t10 interval(10s); ### TDengine服务器支持的平台列表 -| | **CentOS 6/7/8** | **Ubuntu 16/18/20** | **Other Linux** | **统信 UOS** | **银河/中标麒麟** | **凝思 V60/V80** | **华为EulerOS** | +| | **CentOS 6/7/8** | **Ubuntu 16/18/20** | **Other Linux** | **统信 UOS** | **银河/中标麒麟** | **凝思 V60/V80** | **华为 EulerOS** | | -------------- | --------------------- | ------------------------ | --------------- | --------------- | ------------------------- | --------------------- | --------------------- | | X64 | ● | ● | | ○ | ● | ● | ● | | 树莓派 ARM32 | | ● | ● | | | | | From 9870637abc2610a52aeddbcb9455843d8eab070d Mon Sep 17 00:00:00 2001 From: dapan1121 <89396746@qq.com> Date: Fri, 2 Apr 2021 14:40:37 +0800 Subject: [PATCH 056/178] fix negative time expression --- src/client/src/tscParseInsert.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/client/src/tscParseInsert.c b/src/client/src/tscParseInsert.c index 2b962333d5..103cc067b1 100644 --- a/src/client/src/tscParseInsert.c +++ b/src/client/src/tscParseInsert.c @@ -117,7 +117,7 @@ int tsParseTime(SStrToken *pToken, int64_t *time, char **next, char *error, int1 if (sToken.type == TK_PLUS) { useconds += interval; } else { - useconds = (useconds >= interval) ? useconds - interval : 0; + useconds = useconds - interval; } *next = pTokenEnd; From 8904a1644f9d12ef7775d24345d14326ebe6694f Mon Sep 17 00:00:00 2001 From: Ping Xiao Date: Fri, 2 Apr 2021 15:25:29 +0800 Subject: [PATCH 057/178] remove failed case --- tests/pytest/fulltest.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/pytest/fulltest.sh b/tests/pytest/fulltest.sh index bda52ffe6c..7ff41b13a6 100755 --- a/tests/pytest/fulltest.sh +++ b/tests/pytest/fulltest.sh @@ -178,7 +178,7 @@ python3 ./test.py -f stable/query_after_reset.py # perfbenchmark python3 ./test.py -f perfbenchmark/bug3433.py -python3 ./test.py -f perfbenchmark/bug3589.py +#python3 ./test.py -f perfbenchmark/bug3589.py #query From 9a922281abcc60b956aabc836234da1153a464a9 Mon Sep 17 00:00:00 2001 From: root Date: Fri, 2 Apr 2021 08:06:48 +0000 Subject: [PATCH 058/178] fix bug --- src/cq/src/cqMain.c | 6 ++++-- src/cq/test/cqtest.c | 2 +- src/inc/tcq.h | 2 +- src/inc/tsdb.h | 2 +- src/tsdb/src/tsdbMain.c | 4 ++-- src/tsdb/src/tsdbMeta.c | 4 ++-- 6 files changed, 11 insertions(+), 9 deletions(-) diff --git a/src/cq/src/cqMain.c b/src/cq/src/cqMain.c index fb0c1508cb..d4a874c97a 100644 --- a/src/cq/src/cqMain.c +++ b/src/cq/src/cqMain.c @@ -294,7 +294,7 @@ void cqStop(void *handle) { pthread_mutex_unlock(&pContext->mutex); } -void *cqCreate(void *handle, uint64_t uid, int32_t sid, const char* dstTable, char *sqlStr, STSchema *pSchema) { +void *cqCreate(void *handle, uint64_t uid, int32_t sid, const char* dstTable, char *sqlStr, STSchema *pSchema, int start) { if (tsEnableStream == 0) { return NULL; } @@ -326,7 +326,9 @@ void *cqCreate(void *handle, uint64_t uid, int32_t sid, const char* dstTable, ch pObj->rid = taosAddRef(cqObjRef, pObj); - cqCreateStream(pContext, pObj); + if(start) { + cqCreateStream(pContext, pObj); + } rid = pObj->rid; diff --git a/src/cq/test/cqtest.c b/src/cq/test/cqtest.c index f378835f0a..b1153397ba 100644 --- a/src/cq/test/cqtest.c +++ b/src/cq/test/cqtest.c @@ -70,7 +70,7 @@ int main(int argc, char *argv[]) { tdDestroyTSchemaBuilder(&schemaBuilder); for (int sid =1; sid<10; ++sid) { - cqCreate(pCq, sid, sid, NULL, "select avg(speed) from demo.t1 sliding(1s) interval(5s)", pSchema); + cqCreate(pCq, sid, sid, NULL, "select avg(speed) from demo.t1 sliding(1s) interval(5s)", pSchema, 1); } tdFreeSchema(pSchema); diff --git a/src/inc/tcq.h b/src/inc/tcq.h index 1941649d0a..552a40665a 100644 --- a/src/inc/tcq.h +++ b/src/inc/tcq.h @@ -42,7 +42,7 @@ void cqStart(void *handle); void cqStop(void *handle); // cqCreate is called by TSDB to start an instance of CQ -void *cqCreate(void *handle, uint64_t uid, int32_t sid, const char* dstTable, char *sqlStr, STSchema *pSchema); +void *cqCreate(void *handle, uint64_t uid, int32_t sid, const char* dstTable, char *sqlStr, STSchema *pSchema, int start); // cqDrop is called by TSDB to stop an instance of CQ, handle is the return value of cqCreate void cqDrop(void *handle); diff --git a/src/inc/tsdb.h b/src/inc/tsdb.h index 495bfa2384..85ee9f0443 100644 --- a/src/inc/tsdb.h +++ b/src/inc/tsdb.h @@ -51,7 +51,7 @@ typedef struct { void *cqH; int (*notifyStatus)(void *, int status, int eno); int (*eventCallBack)(void *); - void *(*cqCreateFunc)(void *handle, uint64_t uid, int32_t sid, const char *dstTable, char *sqlStr, STSchema *pSchema); + void *(*cqCreateFunc)(void *handle, uint64_t uid, int32_t sid, const char *dstTable, char *sqlStr, STSchema *pSchema, int start); void (*cqDropFunc)(void *handle); } STsdbAppH; diff --git a/src/tsdb/src/tsdbMain.c b/src/tsdb/src/tsdbMain.c index 8969f61596..99929f3542 100644 --- a/src/tsdb/src/tsdbMain.c +++ b/src/tsdb/src/tsdbMain.c @@ -526,7 +526,7 @@ static void tsdbStartStream(STsdbRepo *pRepo) { STable *pTable = pMeta->tables[i]; if (pTable && pTable->type == TSDB_STREAM_TABLE) { pTable->cqhandle = (*pRepo->appH.cqCreateFunc)(pRepo->appH.cqH, TABLE_UID(pTable), TABLE_TID(pTable), TABLE_NAME(pTable)->data, pTable->sql, - tsdbGetTableSchemaImpl(pTable, false, false, -1)); + tsdbGetTableSchemaImpl(pTable, false, false, -1), 0); } } } @@ -619,4 +619,4 @@ int tsdbRestoreInfo(STsdbRepo *pRepo) { tsdbDestroyReadH(&readh); return 0; -} \ No newline at end of file +} diff --git a/src/tsdb/src/tsdbMeta.c b/src/tsdb/src/tsdbMeta.c index 824f69608e..3e6263b9d3 100644 --- a/src/tsdb/src/tsdbMeta.c +++ b/src/tsdb/src/tsdbMeta.c @@ -840,7 +840,7 @@ static int tsdbAddTableToMeta(STsdbRepo *pRepo, STable *pTable, bool addIdx, boo if (lock && tsdbUnlockRepoMeta(pRepo) < 0) return -1; if (TABLE_TYPE(pTable) == TSDB_STREAM_TABLE && addIdx) { pTable->cqhandle = (*pRepo->appH.cqCreateFunc)(pRepo->appH.cqH, TABLE_UID(pTable), TABLE_TID(pTable), TABLE_NAME(pTable)->data, pTable->sql, - tsdbGetTableSchemaImpl(pTable, false, false, -1)); + tsdbGetTableSchemaImpl(pTable, false, false, -1), 1); } tsdbDebug("vgId:%d table %s tid %d uid %" PRIu64 " is added to meta", REPO_ID(pRepo), TABLE_CHAR_NAME(pTable), @@ -1322,4 +1322,4 @@ static int tsdbCheckTableTagVal(SKVRow *pKVRow, STSchema *pSchema) { } return 0; -} \ No newline at end of file +} From 945013bd1143577872ed71131e4895373e1c0d05 Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Fri, 2 Apr 2021 17:09:13 +0800 Subject: [PATCH 059/178] [TD-3653] : taosdemo json parameter cleanup. (#5665) Co-authored-by: Shuduo Sang --- src/kit/taosdemo/taosdemo.c | 14 -------------- 1 file changed, 14 deletions(-) diff --git a/src/kit/taosdemo/taosdemo.c b/src/kit/taosdemo/taosdemo.c index 7eb55acda8..13c8445e64 100644 --- a/src/kit/taosdemo/taosdemo.c +++ b/src/kit/taosdemo/taosdemo.c @@ -263,7 +263,6 @@ typedef struct SSuperTable_S { int lenOfTagOfOneRow; char* sampleDataBuf; - int sampleDataBufSize; //int sampleRowCount; //int sampleUsePos; @@ -3552,19 +3551,6 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { goto PARSE_OVER; } - cJSON* sampleDataBufSize = cJSON_GetObjectItem(stbInfo, "sample_buf_size"); - if (sampleDataBufSize && sampleDataBufSize->type == cJSON_Number) { - g_Dbs.db[i].superTbls[j].sampleDataBufSize = sampleDataBufSize->valueint; - if (g_Dbs.db[i].superTbls[j].sampleDataBufSize < 1024*1024) { - g_Dbs.db[i].superTbls[j].sampleDataBufSize = 1024*1024 + 1024; - } - } else if (!sampleDataBufSize) { - g_Dbs.db[i].superTbls[j].sampleDataBufSize = 1024*1024 + 1024; - } else { - printf("ERROR: failed to read json, sample_buf_size not found\n"); - goto PARSE_OVER; - } - cJSON *sampleFormat = cJSON_GetObjectItem(stbInfo, "sample_format"); if (sampleFormat && sampleFormat->type == cJSON_String && sampleFormat->valuestring != NULL) { From 2489d1f3c7953c6cb70b2521641c77d1146f007b Mon Sep 17 00:00:00 2001 From: Shengliang Guan Date: Fri, 2 Apr 2021 09:31:56 +0000 Subject: [PATCH 060/178] [TD-3458]: Fix the bug of losing tables in multiple replicas --- src/dnode/src/dnodeVWrite.c | 2 +- src/sync/src/syncMain.c | 7 ++++++- src/vnode/src/vnodeWrite.c | 6 +++++- 3 files changed, 12 insertions(+), 3 deletions(-) diff --git a/src/dnode/src/dnodeVWrite.c b/src/dnode/src/dnodeVWrite.c index 84fd260d91..26084a52eb 100644 --- a/src/dnode/src/dnodeVWrite.c +++ b/src/dnode/src/dnodeVWrite.c @@ -222,7 +222,7 @@ static void *dnodeProcessVWriteQueue(void *wparam) { dnodeSendRpcVWriteRsp(pVnode, pWrite, pWrite->code); } else { if (qtype == TAOS_QTYPE_FWD) { - vnodeConfirmForward(pVnode, pWrite->pHead.version, 0, pWrite->pHead.msgType != TSDB_MSG_TYPE_SUBMIT); + vnodeConfirmForward(pVnode, pWrite->pHead.version, pWrite->code, pWrite->pHead.msgType != TSDB_MSG_TYPE_SUBMIT); } if (pWrite->rspRet.rsp) { rpcFreeCont(pWrite->rspRet.rsp); diff --git a/src/sync/src/syncMain.c b/src/sync/src/syncMain.c index d21743d40a..42ff02b4c4 100644 --- a/src/sync/src/syncMain.c +++ b/src/sync/src/syncMain.c @@ -1459,7 +1459,12 @@ static int32_t syncForwardToPeerImpl(SSyncNode *pNode, void *data, void *mhandle if ((pNode->quorum > 1 || force) && code == 0) { code = syncSaveFwdInfo(pNode, pWalHead->version, mhandle); - if (code >= 0) code = 1; + if (code >= 0) { + code = 1; + } else { + pthread_mutex_unlock(&pNode->mutex); + return code; + } } int32_t retLen = taosWriteMsg(pPeer->peerFd, pSyncHead, fwdLen); diff --git a/src/vnode/src/vnodeWrite.c b/src/vnode/src/vnodeWrite.c index 92e1ba804b..a0be52db7a 100644 --- a/src/vnode/src/vnodeWrite.c +++ b/src/vnode/src/vnodeWrite.c @@ -91,13 +91,17 @@ int32_t vnodeProcessWrite(void *vparam, void *wparam, int32_t qtype, void *rpara int32_t syncCode = 0; bool force = (pWrite == NULL ? false : pWrite->pHead.msgType != TSDB_MSG_TYPE_SUBMIT); syncCode = syncForwardToPeer(pVnode->sync, pHead, pWrite, qtype, force); - if (syncCode < 0) return syncCode; + if (syncCode < 0) { + pHead->version = 0; + return syncCode; + } // write into WAL code = walWrite(pVnode->wal, pHead); if (code < 0) { if (syncCode > 0) atomic_sub_fetch_32(&pWrite->processedCount, 1); vError("vgId:%d, hver:%" PRIu64 " vver:%" PRIu64 " code:0x%x", pVnode->vgId, pHead->version, pVnode->version, code); + pHead->version = 0; return code; } From 0efdcdfd4edcd754d736aaf8d3fb94983f41cd93 Mon Sep 17 00:00:00 2001 From: Shengliang Guan Date: Fri, 2 Apr 2021 09:52:34 +0000 Subject: [PATCH 061/178] change fwds info save size --- src/sync/inc/syncInt.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/sync/inc/syncInt.h b/src/sync/inc/syncInt.h index 91613ae351..b4d9315a8e 100644 --- a/src/sync/inc/syncInt.h +++ b/src/sync/inc/syncInt.h @@ -35,7 +35,7 @@ extern "C" { #define SYNC_MAX_SIZE (TSDB_MAX_WAL_SIZE + sizeof(SWalHead) + sizeof(SSyncHead) + 16) #define SYNC_RECV_BUFFER_SIZE (5*1024*1024) -#define SYNC_MAX_FWDS 512 +#define SYNC_MAX_FWDS 1024 #define SYNC_FWD_TIMER 300 #define SYNC_ROLE_TIMER 15000 // ms #define SYNC_CHECK_INTERVAL 1000 // ms From a1c69e5a0728db3c9713625f373b3a727f2aa642 Mon Sep 17 00:00:00 2001 From: dapan1121 <89396746@qq.com> Date: Fri, 2 Apr 2021 18:21:19 +0800 Subject: [PATCH 062/178] fix taosd crash issue --- src/cq/src/cqMain.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/cq/src/cqMain.c b/src/cq/src/cqMain.c index d4a874c97a..f620eb4dd5 100644 --- a/src/cq/src/cqMain.c +++ b/src/cq/src/cqMain.c @@ -326,8 +326,10 @@ void *cqCreate(void *handle, uint64_t uid, int32_t sid, const char* dstTable, ch pObj->rid = taosAddRef(cqObjRef, pObj); - if(start) { + if(start && pContext->master) { cqCreateStream(pContext, pObj); + } else { + pObj->pContext = pContext; } rid = pObj->rid; From 0ebd1a98552ed6381329dfd10878c232916fa2ce Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Fri, 2 Apr 2021 18:38:38 +0800 Subject: [PATCH 063/178] sync/monitor-node-role: fix incorrect self & peer log --- src/sync/src/syncMain.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/sync/src/syncMain.c b/src/sync/src/syncMain.c index d21743d40a..ad8236b10d 100644 --- a/src/sync/src/syncMain.c +++ b/src/sync/src/syncMain.c @@ -1372,7 +1372,7 @@ static void syncMonitorNodeRole(void *param, void *tmrId) { if (/*pPeer->role > TAOS_SYNC_ROLE_UNSYNCED && */ nodeRole > TAOS_SYNC_ROLE_UNSYNCED) continue; if (/*pPeer->sstatus > TAOS_SYNC_STATUS_INIT || */ nodeSStatus > TAOS_SYNC_STATUS_INIT) continue; - sDebug("%s, check roles since self:%s sstatus:%s, peer:%s sstatus:%s", pPeer->id, syncRole[pPeer->role], + sDebug("%s, check roles since peer:%s sstatus:%s, self:%s sstatus:%s", pPeer->id, syncRole[pPeer->role], syncStatus[pPeer->sstatus], syncRole[nodeRole], syncStatus[nodeSStatus]); syncSendPeersStatusMsgToPeer(pPeer, 1, SYNC_STATUS_CHECK_ROLE, syncGenTranId()); break; From 65f01a454aa025de6559c89e895f3a8a4a724910 Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Fri, 2 Apr 2021 18:39:15 +0800 Subject: [PATCH 064/178] [TD-3657]: sdbUpdateMnodeRoles before check mnodeAllOnline --- src/mnode/src/mnodeDnode.c | 5 +++++ src/mnode/src/mnodeMnode.c | 2 ++ 2 files changed, 7 insertions(+) diff --git a/src/mnode/src/mnodeDnode.c b/src/mnode/src/mnodeDnode.c index db03da4fe1..80473ba5ae 100644 --- a/src/mnode/src/mnodeDnode.c +++ b/src/mnode/src/mnodeDnode.c @@ -628,6 +628,11 @@ static int32_t mnodeProcessDnodeStatusMsg(SMnodeMsg *pMsg) { bnNotify(); } + if (!tsEnableBalance) { + int32_t numOfMnodes = mnodeGetMnodesNum(); + if (numOfMnodes < tsNumOfMnodes) bnNotify(); + } + if (openVnodes != pDnode->openVnodes) { mnodeCheckUnCreatedVgroup(pDnode, pStatus->load, openVnodes); } diff --git a/src/mnode/src/mnodeMnode.c b/src/mnode/src/mnodeMnode.c index 49473d3e06..ca6d6400ae 100644 --- a/src/mnode/src/mnodeMnode.c +++ b/src/mnode/src/mnodeMnode.c @@ -381,6 +381,8 @@ static bool mnodeAllOnline() { void *pIter = NULL; bool allOnline = true; + sdbUpdateMnodeRoles(); + while (1) { SMnodeObj *pMnode = NULL; pIter = mnodeGetNextMnode(pIter, &pMnode); From e1739b86a90bc773e01ed1be112d0719b216e7ea Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Fri, 2 Apr 2021 19:22:04 +0800 Subject: [PATCH 065/178] Hotfix/sangshuduo/td 3607 taosdemo buffer overflow (#5659) * [TD-3607] : fix taosdemo buffer overflow. * [TD-3607] : taosdemo buffer overflow. add tmp buffer. * [TD-3607] : taosdemo buffer overflow. fix data generation. * [TD-3607] : taosdemo buffer overflow. fix normal table writting. * [TD-3607] : taosdemo buffer overflow. remove tail spaces. * [TD-3607] : taosdemo buffer overflow. fix taosdemo alter table test case. * [TD-3607] : taosdemo buffer overflow. fix taosdemo alter table case. * [TD-3607] : taosdemo buffer overflow. adjust limit offset count warning. * [TD-3607] : taosdemo buffer overflow. add more logic for child tables exist. * [TD-3607] : taosdemo buffer overflow. create database if database be dropped only. * [TD-3607] : fix taosdemo buffer overflow. adjust limit and offset test cases. * [TD-3607] : taosdemo buffer overflow. adjust sample data test case. Co-authored-by: Shuduo Sang --- src/kit/taosdemo/taosdemo.c | 181 ++++++++++-------- .../insert-tblimit-tboffset-createdb.json | 57 ++++++ .../insert-tblimit-tboffset-insertrec.json | 59 ++++++ .../tools/insert-tblimit-tboffset0.json | 4 +- .../tools/insert-tblimit1-tboffset.json | 4 +- tests/pytest/tools/taosdemo-sampledata.json | 2 - tests/pytest/tools/taosdemoTestLimitOffset.py | 5 +- tests/pytest/tools/taosdemoTestSampleData.py | 2 +- 8 files changed, 224 insertions(+), 90 deletions(-) create mode 100644 tests/pytest/tools/insert-tblimit-tboffset-createdb.json create mode 100644 tests/pytest/tools/insert-tblimit-tboffset-insertrec.json diff --git a/src/kit/taosdemo/taosdemo.c b/src/kit/taosdemo/taosdemo.c index 13c8445e64..58d4a1820c 100644 --- a/src/kit/taosdemo/taosdemo.c +++ b/src/kit/taosdemo/taosdemo.c @@ -2433,82 +2433,82 @@ static int createDatabasesAndStables() { taos_close(taos); return -1; } - } - int dataLen = 0; - dataLen += snprintf(command + dataLen, - BUFFER_SIZE - dataLen, "create database if not exists %s", g_Dbs.db[i].dbName); + int dataLen = 0; + dataLen += snprintf(command + dataLen, + BUFFER_SIZE - dataLen, "create database if not exists %s", g_Dbs.db[i].dbName); - if (g_Dbs.db[i].dbCfg.blocks > 0) { - dataLen += snprintf(command + dataLen, - BUFFER_SIZE - dataLen, " blocks %d", g_Dbs.db[i].dbCfg.blocks); - } - if (g_Dbs.db[i].dbCfg.cache > 0) { - dataLen += snprintf(command + dataLen, - BUFFER_SIZE - dataLen, " cache %d", g_Dbs.db[i].dbCfg.cache); - } - if (g_Dbs.db[i].dbCfg.days > 0) { - dataLen += snprintf(command + dataLen, - BUFFER_SIZE - dataLen, " days %d", g_Dbs.db[i].dbCfg.days); - } - if (g_Dbs.db[i].dbCfg.keep > 0) { - dataLen += snprintf(command + dataLen, - BUFFER_SIZE - dataLen, " keep %d", g_Dbs.db[i].dbCfg.keep); - } - if (g_Dbs.db[i].dbCfg.quorum > 1) { - dataLen += snprintf(command + dataLen, - BUFFER_SIZE - dataLen, " quorum %d", g_Dbs.db[i].dbCfg.quorum); - } - if (g_Dbs.db[i].dbCfg.replica > 0) { - dataLen += snprintf(command + dataLen, - BUFFER_SIZE - dataLen, " replica %d", g_Dbs.db[i].dbCfg.replica); - } - if (g_Dbs.db[i].dbCfg.update > 0) { - dataLen += snprintf(command + dataLen, - BUFFER_SIZE - dataLen, " update %d", g_Dbs.db[i].dbCfg.update); - } - //if (g_Dbs.db[i].dbCfg.maxtablesPerVnode > 0) { - // dataLen += snprintf(command + dataLen, - // BUFFER_SIZE - dataLen, "tables %d ", g_Dbs.db[i].dbCfg.maxtablesPerVnode); - //} - if (g_Dbs.db[i].dbCfg.minRows > 0) { - dataLen += snprintf(command + dataLen, - BUFFER_SIZE - dataLen, " minrows %d", g_Dbs.db[i].dbCfg.minRows); - } - if (g_Dbs.db[i].dbCfg.maxRows > 0) { - dataLen += snprintf(command + dataLen, - BUFFER_SIZE - dataLen, " maxrows %d", g_Dbs.db[i].dbCfg.maxRows); - } - if (g_Dbs.db[i].dbCfg.comp > 0) { - dataLen += snprintf(command + dataLen, - BUFFER_SIZE - dataLen, " comp %d", g_Dbs.db[i].dbCfg.comp); - } - if (g_Dbs.db[i].dbCfg.walLevel > 0) { - dataLen += snprintf(command + dataLen, - BUFFER_SIZE - dataLen, " wal %d", g_Dbs.db[i].dbCfg.walLevel); - } - if (g_Dbs.db[i].dbCfg.cacheLast > 0) { - dataLen += snprintf(command + dataLen, - BUFFER_SIZE - dataLen, " cachelast %d", g_Dbs.db[i].dbCfg.cacheLast); - } - if (g_Dbs.db[i].dbCfg.fsync > 0) { - dataLen += snprintf(command + dataLen, BUFFER_SIZE - dataLen, - " fsync %d", g_Dbs.db[i].dbCfg.fsync); - } - if ((0 == strncasecmp(g_Dbs.db[i].dbCfg.precision, "ms", strlen("ms"))) - || (0 == strncasecmp(g_Dbs.db[i].dbCfg.precision, - "us", strlen("us")))) { - dataLen += snprintf(command + dataLen, BUFFER_SIZE - dataLen, - " precision \'%s\';", g_Dbs.db[i].dbCfg.precision); - } + if (g_Dbs.db[i].dbCfg.blocks > 0) { + dataLen += snprintf(command + dataLen, + BUFFER_SIZE - dataLen, " blocks %d", g_Dbs.db[i].dbCfg.blocks); + } + if (g_Dbs.db[i].dbCfg.cache > 0) { + dataLen += snprintf(command + dataLen, + BUFFER_SIZE - dataLen, " cache %d", g_Dbs.db[i].dbCfg.cache); + } + if (g_Dbs.db[i].dbCfg.days > 0) { + dataLen += snprintf(command + dataLen, + BUFFER_SIZE - dataLen, " days %d", g_Dbs.db[i].dbCfg.days); + } + if (g_Dbs.db[i].dbCfg.keep > 0) { + dataLen += snprintf(command + dataLen, + BUFFER_SIZE - dataLen, " keep %d", g_Dbs.db[i].dbCfg.keep); + } + if (g_Dbs.db[i].dbCfg.quorum > 1) { + dataLen += snprintf(command + dataLen, + BUFFER_SIZE - dataLen, " quorum %d", g_Dbs.db[i].dbCfg.quorum); + } + if (g_Dbs.db[i].dbCfg.replica > 0) { + dataLen += snprintf(command + dataLen, + BUFFER_SIZE - dataLen, " replica %d", g_Dbs.db[i].dbCfg.replica); + } + if (g_Dbs.db[i].dbCfg.update > 0) { + dataLen += snprintf(command + dataLen, + BUFFER_SIZE - dataLen, " update %d", g_Dbs.db[i].dbCfg.update); + } + //if (g_Dbs.db[i].dbCfg.maxtablesPerVnode > 0) { + // dataLen += snprintf(command + dataLen, + // BUFFER_SIZE - dataLen, "tables %d ", g_Dbs.db[i].dbCfg.maxtablesPerVnode); + //} + if (g_Dbs.db[i].dbCfg.minRows > 0) { + dataLen += snprintf(command + dataLen, + BUFFER_SIZE - dataLen, " minrows %d", g_Dbs.db[i].dbCfg.minRows); + } + if (g_Dbs.db[i].dbCfg.maxRows > 0) { + dataLen += snprintf(command + dataLen, + BUFFER_SIZE - dataLen, " maxrows %d", g_Dbs.db[i].dbCfg.maxRows); + } + if (g_Dbs.db[i].dbCfg.comp > 0) { + dataLen += snprintf(command + dataLen, + BUFFER_SIZE - dataLen, " comp %d", g_Dbs.db[i].dbCfg.comp); + } + if (g_Dbs.db[i].dbCfg.walLevel > 0) { + dataLen += snprintf(command + dataLen, + BUFFER_SIZE - dataLen, " wal %d", g_Dbs.db[i].dbCfg.walLevel); + } + if (g_Dbs.db[i].dbCfg.cacheLast > 0) { + dataLen += snprintf(command + dataLen, + BUFFER_SIZE - dataLen, " cachelast %d", g_Dbs.db[i].dbCfg.cacheLast); + } + if (g_Dbs.db[i].dbCfg.fsync > 0) { + dataLen += snprintf(command + dataLen, BUFFER_SIZE - dataLen, + " fsync %d", g_Dbs.db[i].dbCfg.fsync); + } + if ((0 == strncasecmp(g_Dbs.db[i].dbCfg.precision, "ms", strlen("ms"))) + || (0 == strncasecmp(g_Dbs.db[i].dbCfg.precision, + "us", strlen("us")))) { + dataLen += snprintf(command + dataLen, BUFFER_SIZE - dataLen, + " precision \'%s\';", g_Dbs.db[i].dbCfg.precision); + } - debugPrint("%s() %d command: %s\n", __func__, __LINE__, command); - if (0 != queryDbExec(taos, command, NO_INSERT_TYPE, false)) { - taos_close(taos); - errorPrint( "\ncreate database %s failed!\n\n", g_Dbs.db[i].dbName); - return -1; + debugPrint("%s() %d command: %s\n", __func__, __LINE__, command); + if (0 != queryDbExec(taos, command, NO_INSERT_TYPE, false)) { + taos_close(taos); + errorPrint( "\ncreate database %s failed!\n\n", g_Dbs.db[i].dbName); + return -1; + } + printf("\ncreate database %s success!\n\n", g_Dbs.db[i].dbName); } - printf("\ncreate database %s success!\n\n", g_Dbs.db[i].dbName); debugPrint("%s() %d supertbl count:%d\n", __func__, __LINE__, g_Dbs.db[i].superTblCount); @@ -5132,16 +5132,14 @@ static void startMultiThreadInsertData(int threads, char* db_name, if (superTblInfo) { int limit, offset; - if (superTblInfo->childTblOffset >= superTblInfo->childTblCount) { - printf("WARNING: specified offset >= child table count! \n"); - if (!g_args.answer_yes) { - printf(" Press enter key to continue or Ctrl-C to stop\n\n"); - (void)getchar(); - } + if ((superTblInfo->childTblExists == TBL_NO_EXISTS) && + ((superTblInfo->childTblOffset != 0) || (superTblInfo->childTblLimit != 0))) { + printf("WARNING: offset and limit will not be used since the child tables are not exists!\n"); } - if (superTblInfo->childTblOffset >= 0) { - if (superTblInfo->childTblLimit <= 0) { + if ((superTblInfo->childTblExists == TBL_ALREADY_EXISTS) + && (superTblInfo->childTblOffset >= 0)) { + if (superTblInfo->childTblLimit < 0) { superTblInfo->childTblLimit = superTblInfo->childTblCount - superTblInfo->childTblOffset; } @@ -5149,13 +5147,32 @@ static void startMultiThreadInsertData(int threads, char* db_name, offset = superTblInfo->childTblOffset; limit = superTblInfo->childTblLimit; } else { - limit = superTblInfo->childTblCount; - offset = 0; + limit = superTblInfo->childTblCount; + offset = 0; } ntables = limit; startFrom = offset; + if ((superTblInfo->childTblExists != TBL_NO_EXISTS) + && ((superTblInfo->childTblOffset + superTblInfo->childTblLimit ) + > superTblInfo->childTblCount)) { + printf("WARNING: specified offset + limit > child table count!\n"); + if (!g_args.answer_yes) { + printf(" Press enter key to continue or Ctrl-C to stop\n\n"); + (void)getchar(); + } + } + + if ((superTblInfo->childTblExists != TBL_NO_EXISTS) + && (0 == superTblInfo->childTblLimit)) { + printf("WARNING: specified limit = 0, which cannot find table name to insert or query! \n"); + if (!g_args.answer_yes) { + printf(" Press enter key to continue or Ctrl-C to stop\n\n"); + (void)getchar(); + } + } + superTblInfo->childTblName = (char*)calloc(1, limit * TSDB_TABLE_NAME_LEN); if (superTblInfo->childTblName == NULL) { diff --git a/tests/pytest/tools/insert-tblimit-tboffset-createdb.json b/tests/pytest/tools/insert-tblimit-tboffset-createdb.json new file mode 100644 index 0000000000..8c3b39fb0b --- /dev/null +++ b/tests/pytest/tools/insert-tblimit-tboffset-createdb.json @@ -0,0 +1,57 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 4, + "thread_count_create_tbl": 4, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 0, + "num_of_records_per_req": 100, + "max_sql_len": 1024000, + "databases": [{ + "dbinfo": { + "name": "db", + "drop": "yes", + "replica": 1, + "days": 10, + "cache": 16, + "blocks": 8, + "precision": "ms", + "keep": 365, + "minRows": 100, + "maxRows": 4096, + "comp":2, + "walLevel":1, + "cachelast":0, + "quorum":1, + "fsync":3000, + "update": 0 + }, + "super_tables": [{ + "name": "stb", + "child_table_exists":"no", + "childtable_count": 100, + "childtable_prefix": "stb_", + "auto_create_table": "no", + "data_source": "rand", + "insert_mode": "taosc", + "insert_rows": 0, + "multi_thread_write_one_tbl": "no", + "number_of_tbl_in_one_sql": 0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":10}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] + }] + }] +} diff --git a/tests/pytest/tools/insert-tblimit-tboffset-insertrec.json b/tests/pytest/tools/insert-tblimit-tboffset-insertrec.json new file mode 100644 index 0000000000..a9efa7c31c --- /dev/null +++ b/tests/pytest/tools/insert-tblimit-tboffset-insertrec.json @@ -0,0 +1,59 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 4, + "thread_count_create_tbl": 4, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 0, + "num_of_records_per_req": 100, + "max_sql_len": 1024000, + "databases": [{ + "dbinfo": { + "name": "db", + "drop": "no", + "replica": 1, + "days": 10, + "cache": 16, + "blocks": 8, + "precision": "ms", + "keep": 365, + "minRows": 100, + "maxRows": 4096, + "comp":2, + "walLevel":1, + "cachelast":0, + "quorum":1, + "fsync":3000, + "update": 0 + }, + "super_tables": [{ + "name": "stb", + "child_table_exists":"yes", + "childtable_count": 100, + "childtable_prefix": "stb_", + "auto_create_table": "no", + "data_source": "rand", + "insert_mode": "taosc", + "insert_rows": 1000, + "childtable_limit": 33, + "childtable_offset": 33, + "multi_thread_write_one_tbl": "no", + "number_of_tbl_in_one_sql": 0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":10}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] + }] + }] +} diff --git a/tests/pytest/tools/insert-tblimit-tboffset0.json b/tests/pytest/tools/insert-tblimit-tboffset0.json index 7dcb2e0527..6bf3783cb2 100644 --- a/tests/pytest/tools/insert-tblimit-tboffset0.json +++ b/tests/pytest/tools/insert-tblimit-tboffset0.json @@ -15,7 +15,7 @@ "databases": [{ "dbinfo": { "name": "db", - "drop": "yes", + "drop": "no", "replica": 1, "days": 10, "cache": 16, @@ -33,7 +33,7 @@ }, "super_tables": [{ "name": "stb", - "child_table_exists":"no", + "child_table_exists":"yes", "childtable_count": 100, "childtable_prefix": "stb_", "auto_create_table": "no", diff --git a/tests/pytest/tools/insert-tblimit1-tboffset.json b/tests/pytest/tools/insert-tblimit1-tboffset.json index a33dc22d5d..292902093d 100644 --- a/tests/pytest/tools/insert-tblimit1-tboffset.json +++ b/tests/pytest/tools/insert-tblimit1-tboffset.json @@ -15,7 +15,7 @@ "databases": [{ "dbinfo": { "name": "db", - "drop": "yes", + "drop": "no", "replica": 1, "days": 10, "cache": 16, @@ -33,7 +33,7 @@ }, "super_tables": [{ "name": "stb", - "child_table_exists":"no", + "child_table_exists":"yes", "childtable_count": 100, "childtable_prefix": "stb_", "auto_create_table": "no", diff --git a/tests/pytest/tools/taosdemo-sampledata.json b/tests/pytest/tools/taosdemo-sampledata.json index 473c977773..d543b7e086 100644 --- a/tests/pytest/tools/taosdemo-sampledata.json +++ b/tests/pytest/tools/taosdemo-sampledata.json @@ -16,8 +16,6 @@ "name": "stb", "child_table_exists":"no", "childtable_count": 20, - "childtable_limit": 10, - "childtable_offset": 0, "childtable_prefix": "t_", "auto_create_table": "no", "data_source": "sample", diff --git a/tests/pytest/tools/taosdemoTestLimitOffset.py b/tests/pytest/tools/taosdemoTestLimitOffset.py index bce41e1c75..dd8a1bee70 100644 --- a/tests/pytest/tools/taosdemoTestLimitOffset.py +++ b/tests/pytest/tools/taosdemoTestLimitOffset.py @@ -51,7 +51,8 @@ class TDTestCase: else: tdLog.info("taosd found in %s" % buildPath) binPath = buildPath+ "/build/bin/" - os.system("%staosdemo -f tools/insert-tblimit-tboffset.json" % binPath) + os.system("%staosdemo -f tools/insert-tblimit-tboffset-createdb.json" % binPath) + os.system("%staosdemo -f tools/insert-tblimit-tboffset-insertrec.json" % binPath) tdSql.execute("use db") tdSql.query("select count(tbname) from db.stb") @@ -59,6 +60,7 @@ class TDTestCase: tdSql.query("select count(*) from db.stb") tdSql.checkData(0, 0, 33000) + os.system("%staosdemo -f tools/insert-tblimit-tboffset-createdb.json" % binPath) os.system("%staosdemo -f tools/insert-tblimit-tboffset0.json" % binPath) tdSql.execute("reset query cache") @@ -68,6 +70,7 @@ class TDTestCase: tdSql.query("select count(*) from db.stb") tdSql.checkData(0, 0, 20000) + os.system("%staosdemo -f tools/insert-tblimit-tboffset-createdb.json" % binPath) os.system("%staosdemo -f tools/insert-tblimit1-tboffset.json" % binPath) tdSql.execute("reset query cache") diff --git a/tests/pytest/tools/taosdemoTestSampleData.py b/tests/pytest/tools/taosdemoTestSampleData.py index 893c53984d..a8710a9df3 100644 --- a/tests/pytest/tools/taosdemoTestSampleData.py +++ b/tests/pytest/tools/taosdemoTestSampleData.py @@ -57,7 +57,7 @@ class TDTestCase: tdSql.query("select count(tbname) from db.stb") tdSql.checkData(0, 0, 20) tdSql.query("select count(*) from db.stb") - tdSql.checkData(0, 0, 200) + tdSql.checkData(0, 0, 400) def stop(self): tdSql.close() From 763a14c8767ea641b8f8a31ffc0d8cae09321c62 Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Sat, 3 Apr 2021 07:39:49 +0800 Subject: [PATCH 066/178] [TD-3660] : taosdemo gracefully exit if super table not exist. (#5670) Co-authored-by: Shuduo Sang --- src/kit/taosdemo/taosdemo.c | 260 ++++++++++++++++++++---------------- 1 file changed, 142 insertions(+), 118 deletions(-) diff --git a/src/kit/taosdemo/taosdemo.c b/src/kit/taosdemo/taosdemo.c index 58d4a1820c..7cb5f1b76b 100644 --- a/src/kit/taosdemo/taosdemo.c +++ b/src/kit/taosdemo/taosdemo.c @@ -2278,7 +2278,7 @@ static int getSuperTableFromServer(TAOS * taos, char* dbName, } static int createSuperTable(TAOS * taos, char* dbName, - SSuperTable* superTbls, bool use_metric) { + SSuperTable* superTbl) { char command[BUFFER_SIZE] = "\0"; char cols[STRING_LEN] = "\0"; @@ -2286,19 +2286,26 @@ static int createSuperTable(TAOS * taos, char* dbName, int len = 0; int lenOfOneRow = 0; - for (colIndex = 0; colIndex < superTbls->columnCount; colIndex++) { - char* dataType = superTbls->columns[colIndex].dataType; + + if (superTbl->columnCount == 0) { + errorPrint("%s() LN%d, super table column count is %d\n", + __func__, __LINE__, superTbl->columnCount); + return -1; + } + + for (colIndex = 0; colIndex < superTbl->columnCount; colIndex++) { + char* dataType = superTbl->columns[colIndex].dataType; if (strcasecmp(dataType, "BINARY") == 0) { len += snprintf(cols + len, STRING_LEN - len, ", col%d %s(%d)", colIndex, "BINARY", - superTbls->columns[colIndex].dataLen); - lenOfOneRow += superTbls->columns[colIndex].dataLen + 3; + superTbl->columns[colIndex].dataLen); + lenOfOneRow += superTbl->columns[colIndex].dataLen + 3; } else if (strcasecmp(dataType, "NCHAR") == 0) { len += snprintf(cols + len, STRING_LEN - len, ", col%d %s(%d)", colIndex, "NCHAR", - superTbls->columns[colIndex].dataLen); - lenOfOneRow += superTbls->columns[colIndex].dataLen + 3; + superTbl->columns[colIndex].dataLen); + lenOfOneRow += superTbl->columns[colIndex].dataLen + 3; } else if (strcasecmp(dataType, "INT") == 0) { len += snprintf(cols + len, STRING_LEN - len, ", col%d %s", colIndex, "INT"); lenOfOneRow += 11; @@ -2330,88 +2337,95 @@ static int createSuperTable(TAOS * taos, char* dbName, } } - superTbls->lenOfOneRow = lenOfOneRow + 20; // timestamp - //printf("%s.%s column count:%d, column length:%d\n\n", g_Dbs.db[i].dbName, g_Dbs.db[i].superTbls[j].sTblName, g_Dbs.db[i].superTbls[j].columnCount, lenOfOneRow); + superTbl->lenOfOneRow = lenOfOneRow + 20; // timestamp + //printf("%s.%s column count:%d, column length:%d\n\n", g_Dbs.db[i].dbName, g_Dbs.db[i].superTbl[j].sTblName, g_Dbs.db[i].superTbl[j].columnCount, lenOfOneRow); // save for creating child table - superTbls->colsOfCreateChildTable = (char*)calloc(len+20, 1); - if (NULL == superTbls->colsOfCreateChildTable) { - printf("Failed when calloc, size:%d", len+1); + superTbl->colsOfCreateChildTable = (char*)calloc(len+20, 1); + if (NULL == superTbl->colsOfCreateChildTable) { + errorPrint("%s() LN%d, Failed when calloc, size:%d", + __func__, __LINE__, len+1); taos_close(taos); exit(-1); } - snprintf(superTbls->colsOfCreateChildTable, len+20, "(ts timestamp%s)", cols); - verbosePrint("%s() LN%d: %s\n", __func__, __LINE__, superTbls->colsOfCreateChildTable); - if (use_metric) { - char tags[STRING_LEN] = "\0"; - int tagIndex; - len = 0; + snprintf(superTbl->colsOfCreateChildTable, len+20, "(ts timestamp%s)", cols); + verbosePrint("%s() LN%d: %s\n", __func__, __LINE__, superTbl->colsOfCreateChildTable); - int lenOfTagOfOneRow = 0; - len += snprintf(tags + len, STRING_LEN - len, "("); - for (tagIndex = 0; tagIndex < superTbls->tagCount; tagIndex++) { - char* dataType = superTbls->tags[tagIndex].dataType; - - if (strcasecmp(dataType, "BINARY") == 0) { - len += snprintf(tags + len, STRING_LEN - len, "t%d %s(%d), ", tagIndex, - "BINARY", superTbls->tags[tagIndex].dataLen); - lenOfTagOfOneRow += superTbls->tags[tagIndex].dataLen + 3; - } else if (strcasecmp(dataType, "NCHAR") == 0) { - len += snprintf(tags + len, STRING_LEN - len, "t%d %s(%d), ", tagIndex, - "NCHAR", superTbls->tags[tagIndex].dataLen); - lenOfTagOfOneRow += superTbls->tags[tagIndex].dataLen + 3; - } else if (strcasecmp(dataType, "INT") == 0) { - len += snprintf(tags + len, STRING_LEN - len, "t%d %s, ", tagIndex, - "INT"); - lenOfTagOfOneRow += superTbls->tags[tagIndex].dataLen + 11; - } else if (strcasecmp(dataType, "BIGINT") == 0) { - len += snprintf(tags + len, STRING_LEN - len, "t%d %s, ", tagIndex, - "BIGINT"); - lenOfTagOfOneRow += superTbls->tags[tagIndex].dataLen + 21; - } else if (strcasecmp(dataType, "SMALLINT") == 0) { - len += snprintf(tags + len, STRING_LEN - len, "t%d %s, ", tagIndex, - "SMALLINT"); - lenOfTagOfOneRow += superTbls->tags[tagIndex].dataLen + 6; - } else if (strcasecmp(dataType, "TINYINT") == 0) { - len += snprintf(tags + len, STRING_LEN - len, "t%d %s, ", tagIndex, - "TINYINT"); - lenOfTagOfOneRow += superTbls->tags[tagIndex].dataLen + 4; - } else if (strcasecmp(dataType, "BOOL") == 0) { - len += snprintf(tags + len, STRING_LEN - len, "t%d %s, ", tagIndex, - "BOOL"); - lenOfTagOfOneRow += superTbls->tags[tagIndex].dataLen + 6; - } else if (strcasecmp(dataType, "FLOAT") == 0) { - len += snprintf(tags + len, STRING_LEN - len, "t%d %s, ", tagIndex, - "FLOAT"); - lenOfTagOfOneRow += superTbls->tags[tagIndex].dataLen + 22; - } else if (strcasecmp(dataType, "DOUBLE") == 0) { - len += snprintf(tags + len, STRING_LEN - len, "t%d %s, ", tagIndex, - "DOUBLE"); - lenOfTagOfOneRow += superTbls->tags[tagIndex].dataLen + 42; - } else { - taos_close(taos); - printf("config error tag type : %s\n", dataType); - exit(-1); - } - } - len -= 2; - len += snprintf(tags + len, STRING_LEN - len, ")"); - - superTbls->lenOfTagOfOneRow = lenOfTagOfOneRow; - - snprintf(command, BUFFER_SIZE, - "create table if not exists %s.%s (ts timestamp%s) tags %s", - dbName, superTbls->sTblName, cols, tags); - verbosePrint("%s() LN%d: %s\n", __func__, __LINE__, command); - - if (0 != queryDbExec(taos, command, NO_INSERT_TYPE, false)) { - errorPrint( "create supertable %s failed!\n\n", - superTbls->sTblName); - return -1; - } - debugPrint("create supertable %s success!\n\n", superTbls->sTblName); + if (superTbl->tagCount == 0) { + errorPrint("%s() LN%d, super table tag count is %d\n", + __func__, __LINE__, superTbl->tagCount); + return -1; } + + char tags[STRING_LEN] = "\0"; + int tagIndex; + len = 0; + + int lenOfTagOfOneRow = 0; + len += snprintf(tags + len, STRING_LEN - len, "("); + for (tagIndex = 0; tagIndex < superTbl->tagCount; tagIndex++) { + char* dataType = superTbl->tags[tagIndex].dataType; + + if (strcasecmp(dataType, "BINARY") == 0) { + len += snprintf(tags + len, STRING_LEN - len, "t%d %s(%d), ", tagIndex, + "BINARY", superTbl->tags[tagIndex].dataLen); + lenOfTagOfOneRow += superTbl->tags[tagIndex].dataLen + 3; + } else if (strcasecmp(dataType, "NCHAR") == 0) { + len += snprintf(tags + len, STRING_LEN - len, "t%d %s(%d), ", tagIndex, + "NCHAR", superTbl->tags[tagIndex].dataLen); + lenOfTagOfOneRow += superTbl->tags[tagIndex].dataLen + 3; + } else if (strcasecmp(dataType, "INT") == 0) { + len += snprintf(tags + len, STRING_LEN - len, "t%d %s, ", tagIndex, + "INT"); + lenOfTagOfOneRow += superTbl->tags[tagIndex].dataLen + 11; + } else if (strcasecmp(dataType, "BIGINT") == 0) { + len += snprintf(tags + len, STRING_LEN - len, "t%d %s, ", tagIndex, + "BIGINT"); + lenOfTagOfOneRow += superTbl->tags[tagIndex].dataLen + 21; + } else if (strcasecmp(dataType, "SMALLINT") == 0) { + len += snprintf(tags + len, STRING_LEN - len, "t%d %s, ", tagIndex, + "SMALLINT"); + lenOfTagOfOneRow += superTbl->tags[tagIndex].dataLen + 6; + } else if (strcasecmp(dataType, "TINYINT") == 0) { + len += snprintf(tags + len, STRING_LEN - len, "t%d %s, ", tagIndex, + "TINYINT"); + lenOfTagOfOneRow += superTbl->tags[tagIndex].dataLen + 4; + } else if (strcasecmp(dataType, "BOOL") == 0) { + len += snprintf(tags + len, STRING_LEN - len, "t%d %s, ", tagIndex, + "BOOL"); + lenOfTagOfOneRow += superTbl->tags[tagIndex].dataLen + 6; + } else if (strcasecmp(dataType, "FLOAT") == 0) { + len += snprintf(tags + len, STRING_LEN - len, "t%d %s, ", tagIndex, + "FLOAT"); + lenOfTagOfOneRow += superTbl->tags[tagIndex].dataLen + 22; + } else if (strcasecmp(dataType, "DOUBLE") == 0) { + len += snprintf(tags + len, STRING_LEN - len, "t%d %s, ", tagIndex, + "DOUBLE"); + lenOfTagOfOneRow += superTbl->tags[tagIndex].dataLen + 42; + } else { + taos_close(taos); + printf("config error tag type : %s\n", dataType); + exit(-1); + } + } + + len -= 2; + len += snprintf(tags + len, STRING_LEN - len, ")"); + + superTbl->lenOfTagOfOneRow = lenOfTagOfOneRow; + + snprintf(command, BUFFER_SIZE, + "create table if not exists %s.%s (ts timestamp%s) tags %s", + dbName, superTbl->sTblName, cols, tags); + verbosePrint("%s() LN%d: %s\n", __func__, __LINE__, command); + + if (0 != queryDbExec(taos, command, NO_INSERT_TYPE, false)) { + errorPrint( "create supertable %s failed!\n\n", + superTbl->sTblName); + return -1; + } + debugPrint("create supertable %s success!\n\n", superTbl->sTblName); return 0; } @@ -2512,6 +2526,9 @@ static int createDatabasesAndStables() { debugPrint("%s() %d supertbl count:%d\n", __func__, __LINE__, g_Dbs.db[i].superTblCount); + + int validStbCount = 0; + for (int j = 0; j < g_Dbs.db[i].superTblCount; j++) { sprintf(command, "describe %s.%s;", g_Dbs.db[i].dbName, g_Dbs.db[i].superTbls[j].sTblName); @@ -2521,12 +2538,11 @@ static int createDatabasesAndStables() { if ((ret != 0) || (g_Dbs.db[i].drop)) { ret = createSuperTable(taos, g_Dbs.db[i].dbName, - &g_Dbs.db[i].superTbls[j], g_Dbs.use_metric); + &g_Dbs.db[i].superTbls[j]); if (0 != ret) { - errorPrint("\ncreate super table %d failed!\n\n", j); - taos_close(taos); - return -1; + errorPrint("create super table %d failed!\n\n", j); + continue; } } @@ -2535,10 +2551,13 @@ static int createDatabasesAndStables() { if (0 != ret) { errorPrint("\nget super table %s.%s info failed!\n\n", g_Dbs.db[i].dbName, g_Dbs.db[i].superTbls[j].sTblName); - taos_close(taos); - return -1; + continue; } + + validStbCount ++; } + + g_Dbs.db[i].superTblCount = validStbCount; } taos_close(taos); @@ -2723,27 +2742,29 @@ static void createChildTables() { int len; for (int i = 0; i < g_Dbs.dbCount; i++) { - if (g_Dbs.db[i].superTblCount > 0) { - // with super table - for (int j = 0; j < g_Dbs.db[i].superTblCount; j++) { - if ((AUTO_CREATE_SUBTBL == g_Dbs.db[i].superTbls[j].autoCreateTable) - || (TBL_ALREADY_EXISTS == g_Dbs.db[i].superTbls[j].childTblExists)) { - continue; + if (g_Dbs.use_metric) { + if (g_Dbs.db[i].superTblCount > 0) { + // with super table + for (int j = 0; j < g_Dbs.db[i].superTblCount; j++) { + if ((AUTO_CREATE_SUBTBL == g_Dbs.db[i].superTbls[j].autoCreateTable) + || (TBL_ALREADY_EXISTS == g_Dbs.db[i].superTbls[j].childTblExists)) { + continue; + } + + verbosePrint("%s() LN%d: %s\n", __func__, __LINE__, + g_Dbs.db[i].superTbls[j].colsOfCreateChildTable); + int startFrom = 0; + g_totalChildTables += g_Dbs.db[i].superTbls[j].childTblCount; + + verbosePrint("%s() LN%d: create %d child tables from %d\n", + __func__, __LINE__, g_totalChildTables, startFrom); + startMultiThreadCreateChildTable( + g_Dbs.db[i].superTbls[j].colsOfCreateChildTable, + g_Dbs.threadCountByCreateTbl, + startFrom, + g_Dbs.db[i].superTbls[j].childTblCount, + g_Dbs.db[i].dbName, &(g_Dbs.db[i].superTbls[j])); } - - verbosePrint("%s() LN%d: %s\n", __func__, __LINE__, - g_Dbs.db[i].superTbls[j].colsOfCreateChildTable); - int startFrom = 0; - g_totalChildTables += g_Dbs.db[i].superTbls[j].childTblCount; - - verbosePrint("%s() LN%d: create %d child tables from %d\n", - __func__, __LINE__, g_totalChildTables, startFrom); - startMultiThreadCreateChildTable( - g_Dbs.db[i].superTbls[j].colsOfCreateChildTable, - g_Dbs.threadCountByCreateTbl, - startFrom, - g_Dbs.db[i].superTbls[j].childTblCount, - g_Dbs.db[i].dbName, &(g_Dbs.db[i].superTbls[j])); } } else { // normal table @@ -5530,18 +5551,21 @@ static int insertTestProcess() { // create sub threads for inserting data //start = getCurrentTime(); for (int i = 0; i < g_Dbs.dbCount; i++) { - if (g_Dbs.db[i].superTblCount > 0) { - for (int j = 0; j < g_Dbs.db[i].superTblCount; j++) { - SSuperTable* superTblInfo = &g_Dbs.db[i].superTbls[j]; - if (0 == g_Dbs.db[i].superTbls[j].insertRows) { - continue; - } - startMultiThreadInsertData( - g_Dbs.threadCount, - g_Dbs.db[i].dbName, - g_Dbs.db[i].dbCfg.precision, - superTblInfo); + if (g_Dbs.use_metric) { + if (g_Dbs.db[i].superTblCount > 0) { + for (int j = 0; j < g_Dbs.db[i].superTblCount; j++) { + + SSuperTable* superTblInfo = &g_Dbs.db[i].superTbls[j]; + + if (superTblInfo && (superTblInfo->insertRows > 0)) { + startMultiThreadInsertData( + g_Dbs.threadCount, + g_Dbs.db[i].dbName, + g_Dbs.db[i].dbCfg.precision, + superTblInfo); + } } + } } else { startMultiThreadInsertData( g_Dbs.threadCount, From 31d5d302d384a48e1ac013567f14711fb455e6a1 Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Sat, 3 Apr 2021 13:49:26 +0800 Subject: [PATCH 067/178] [TD-3636] : taosdemo disorder rework. (#5671) Co-authored-by: Shuduo Sang --- src/kit/taosdemo/taosdemo.c | 105 ++++++++++++++++++++---------------- 1 file changed, 60 insertions(+), 45 deletions(-) diff --git a/src/kit/taosdemo/taosdemo.c b/src/kit/taosdemo/taosdemo.c index 7cb5f1b76b..5a88f51514 100644 --- a/src/kit/taosdemo/taosdemo.c +++ b/src/kit/taosdemo/taosdemo.c @@ -211,8 +211,8 @@ typedef struct SArguments_S { int num_of_tables; int num_of_DPT; int abort; - int disorderRatio; - int disorderRange; + int disorderRatio; // 0: no disorder, >0: x% + int disorderRange; // ms or us by database precision int method_of_delete; char ** arg_list; int64_t totalInsertRows; @@ -229,25 +229,25 @@ typedef struct SColumn_S { typedef struct SSuperTable_S { char sTblName[MAX_TB_NAME_SIZE+1]; int childTblCount; - bool childTblExists; // 0: no, 1: yes - int batchCreateTableNum; // 0: no batch, > 0: batch table number in one sql - int8_t autoCreateTable; // 0: create sub table, 1: auto create sub table + bool childTblExists; // 0: no, 1: yes + int batchCreateTableNum; // 0: no batch, > 0: batch table number in one sql + int8_t autoCreateTable; // 0: create sub table, 1: auto create sub table char childTblPrefix[MAX_TB_NAME_SIZE]; char dataSource[MAX_TB_NAME_SIZE+1]; // rand_gen or sample - char insertMode[MAX_TB_NAME_SIZE]; // taosc, restful + char insertMode[MAX_TB_NAME_SIZE]; // taosc, restful int childTblLimit; int childTblOffset; - int multiThreadWriteOneTbl; // 0: no, 1: yes - int interlaceRows; // - int disorderRatio; // 0: no disorder, >0: x% - int disorderRange; // ms or us by database precision - int maxSqlLen; // + int multiThreadWriteOneTbl; // 0: no, 1: yes + int interlaceRows; // + int disorderRatio; // 0: no disorder, >0: x% + int disorderRange; // ms or us by database precision + int maxSqlLen; // int insertInterval; // insert interval, will override global insert interval - int64_t insertRows; // 0: no limit + int64_t insertRows; // 0: no limit int timeStampStep; - char startTimestamp[MAX_TB_NAME_SIZE]; // + char startTimestamp[MAX_TB_NAME_SIZE]; char sampleFormat[MAX_TB_NAME_SIZE]; // csv, json char sampleFile[MAX_FILE_NAME_LEN+1]; char tagsFile[MAX_FILE_NAME_LEN+1]; @@ -487,7 +487,7 @@ static int taosRandom() return number; } -#else +#else // Not windows static void setupForAnsiEscape(void) {} static void resetAfterAnsiEscape(void) { @@ -499,11 +499,15 @@ static void resetAfterAnsiEscape(void) { static int taosRandom() { - srand(time(NULL)); + struct timeval tv; + + gettimeofday(&tv, NULL); + srand(tv.tv_usec); + return rand(); } -#endif +#endif // ifdef Windows static int createDatabasesAndStables(); static void createChildTables(); @@ -676,7 +680,7 @@ static void printHelp() { printf("%s%s%s%s\n", indent, "-x", indent, "Not insert only flag."); printf("%s%s%s%s\n", indent, "-y", indent, "Default input yes for prompt."); printf("%s%s%s%s\n", indent, "-O", indent, - "Insert mode--0: In order, > 0: disorder ratio. Default is in order."); + "Insert mode--0: In order, 1 ~ 50: disorder ratio. Default is in order."); printf("%s%s%s%s\n", indent, "-R", indent, "Out of order data's range, ms, default is 1000."); printf("%s%s%s%s\n", indent, "-g", indent, @@ -800,20 +804,21 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) { } else if (strcmp(argv[i], "-c") == 0) { strcpy(configDir, argv[++i]); } else if (strcmp(argv[i], "-O") == 0) { + arguments->disorderRatio = atoi(argv[++i]); - if (arguments->disorderRatio > 1 - || arguments->disorderRatio < 0) { + + if (arguments->disorderRatio > 50) + arguments->disorderRatio = 50; + + if (arguments->disorderRatio < 0) arguments->disorderRatio = 0; - } else if (arguments->disorderRatio == 1) { - arguments->disorderRange = 10; - } + } else if (strcmp(argv[i], "-R") == 0) { + arguments->disorderRange = atoi(argv[++i]); - if (arguments->disorderRange == 1 - && (arguments->disorderRange > 50 - || arguments->disorderRange <= 0)) { - arguments->disorderRange = 10; - } + if (arguments->disorderRange < 0) + arguments->disorderRange = 1000; + } else if (strcmp(argv[i], "-a") == 0) { arguments->replica = atoi(argv[++i]); if (arguments->replica > 3 || arguments->replica < 1) { @@ -997,8 +1002,9 @@ static void selectAndGetResult(TAOS *taos, char *command, char* resultFileName) taos_free_result(res); } -static double getCurrentTime() { +static double getCurrentTimeUs() { struct timeval tv; + if (gettimeofday(&tv, NULL) != 0) { perror("Failed to get current time in ms"); return 0.0; @@ -3669,6 +3675,12 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { cJSON* disorderRatio = cJSON_GetObjectItem(stbInfo, "disorder_ratio"); if (disorderRatio && disorderRatio->type == cJSON_Number) { + if (disorderRatio->valueint > 50) + disorderRatio->valueint = 50; + + if (disorderRatio->valueint < 0) + disorderRatio->valueint = 0; + g_Dbs.db[i].superTbls[j].disorderRatio = disorderRatio->valueint; } else if (!disorderRatio) { g_Dbs.db[i].superTbls[j].disorderRatio = 0; @@ -4329,6 +4341,8 @@ static int32_t generateData(char *recBuf, char **data_type, pstr += sprintf(pstr, ")"); + verbosePrint("%s() LN%d, recBuf:\n\t%s\n", __func__, __LINE__, recBuf); + return (int32_t)strlen(recBuf); } @@ -4440,7 +4454,7 @@ static int generateDataTail(char *tableName, int32_t tableSeq, pSamplePos); } else if (0 == strncasecmp(superTblInfo->dataSource, "rand", strlen("rand"))) { - int rand_num = rand_tinyint() % 100; + int rand_num = taosRandom() % 100; if (0 != superTblInfo->disorderRatio && rand_num < superTblInfo->disorderRatio) { int64_t d = startTime @@ -4467,15 +4481,16 @@ static int generateDataTail(char *tableName, int32_t tableSeq, len += retLen; remainderBufLen -= retLen; } else { - int rand_num = taosRandom() % 100; char **data_type = g_args.datatype; int lenOfBinary = g_args.len_of_binary; + int rand_num = taosRandom() % 100; if ((g_args.disorderRatio != 0) - && (rand_num < g_args.disorderRange)) { + && (rand_num < g_args.disorderRatio)) { int64_t d = startTime + DEFAULT_TIMESTAMP_STEP * k - - taosRandom() % 1000000 + rand_num; + - taosRandom() % g_args.disorderRange; + retLen = generateData(data, data_type, ncols_per_record, d, lenOfBinary); } else { @@ -5024,7 +5039,7 @@ static void callBack(void *param, TAOS_RES *res, int code) { int rand_num = taosRandom() % 100; if (0 != winfo->superTblInfo->disorderRatio && rand_num < winfo->superTblInfo->disorderRatio) { - int64_t d = winfo->lastTs - taosRandom() % 1000000 + rand_num; + int64_t d = winfo->lastTs - taosRandom() % winfo->superTblInfo->disorderRange; generateRowData(data, d, winfo->superTblInfo); } else { generateRowData(data, winfo->lastTs += 1000, winfo->superTblInfo); @@ -5116,7 +5131,7 @@ static void startMultiThreadInsertData(int threads, char* db_name, start_time = 1500000000000; } - double start = getCurrentTime(); + double start = getCurrentTimeUs(); // read sample data from file first if ((superTblInfo) && (0 == strncasecmp(superTblInfo->dataSource, @@ -5311,7 +5326,7 @@ static void startMultiThreadInsertData(int threads, char* db_name, if (cntDelay == 0) cntDelay = 1; avgDelay = (double)totalDelay / cntDelay; - double end = getCurrentTime(); + double end = getCurrentTimeUs(); double t = end - start; if (superTblInfo) { @@ -5390,7 +5405,7 @@ static void *readTable(void *sarg) { sprintf(command, "select %s from %s%d where ts>= %" PRId64, aggreFunc[j], tb_prefix, i, sTime); - double t = getCurrentTime(); + double t = getCurrentTimeUs(); TAOS_RES *pSql = taos_query(taos, command); int32_t code = taos_errno(pSql); @@ -5406,7 +5421,7 @@ static void *readTable(void *sarg) { count++; } - t = getCurrentTime() - t; + t = getCurrentTimeUs() - t; totalT += t; taos_free_result(pSql); @@ -5465,7 +5480,7 @@ static void *readMetric(void *sarg) { printf("Where condition: %s\n", condition); fprintf(fp, "%s\n", command); - double t = getCurrentTime(); + double t = getCurrentTimeUs(); TAOS_RES *pSql = taos_query(taos, command); int32_t code = taos_errno(pSql); @@ -5481,7 +5496,7 @@ static void *readMetric(void *sarg) { while (taos_fetch_row(pSql) != NULL) { count++; } - t = getCurrentTime() - t; + t = getCurrentTimeUs() - t; fprintf(fp, "| Speed: %12.2f(per s) | Latency: %.4f(ms) |\n", num_of_tables * num_of_DPT / t, t * 1000); @@ -5535,9 +5550,9 @@ static int insertTestProcess() { double end; // create child tables - start = getCurrentTime(); + start = getCurrentTimeUs(); createChildTables(); - end = getCurrentTime(); + end = getCurrentTimeUs(); if (g_totalChildTables > 0) { printf("Spent %.4f seconds to create %d tables with %d thread(s)\n\n", @@ -5549,7 +5564,7 @@ static int insertTestProcess() { taosMsleep(1000); // create sub threads for inserting data - //start = getCurrentTime(); + //start = getCurrentTimeUs(); for (int i = 0; i < g_Dbs.dbCount; i++) { if (g_Dbs.use_metric) { if (g_Dbs.db[i].superTblCount > 0) { @@ -5574,7 +5589,7 @@ static int insertTestProcess() { NULL); } } - //end = getCurrentTime(); + //end = getCurrentTimeUs(); //int64_t totalInsertRows = 0; //int64_t totalAffectedRows = 0; @@ -6395,7 +6410,7 @@ static void querySqlFile(TAOS* taos, char* sqlFile) char * line = NULL; size_t line_len = 0; - double t = getCurrentTime(); + double t = getCurrentTimeUs(); while ((read_len = tgetline(&line, &line_len, fp)) != -1) { if (read_len >= MAX_SQL_SIZE) continue; @@ -6426,7 +6441,7 @@ static void querySqlFile(TAOS* taos, char* sqlFile) cmd_len = 0; } - t = getCurrentTime() - t; + t = getCurrentTimeUs() - t; printf("run %s took %.6f second(s)\n\n", sqlFile, t); tmfree(cmd); From 35cbca8e02e039818bb06ef0385b55e1693412e8 Mon Sep 17 00:00:00 2001 From: Elias Soong Date: Sat, 3 Apr 2021 17:41:58 +0800 Subject: [PATCH 068/178] [TD-2639] : fix minor typo. --- documentation20/cn/02.getting-started/docs.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/documentation20/cn/02.getting-started/docs.md b/documentation20/cn/02.getting-started/docs.md index 43392e0325..db2885d302 100644 --- a/documentation20/cn/02.getting-started/docs.md +++ b/documentation20/cn/02.getting-started/docs.md @@ -101,7 +101,7 @@ $ taos -h 192.168.0.1 -s "use db; show tables;" ### 运行SQL命令脚本 -TDengine终端可以通过`source`命令来运行SQL命令脚本. +TDengine 终端可以通过 `source` 命令来运行 SQL 命令脚本. ```mysql taos> source ; @@ -109,10 +109,10 @@ taos> source ; ### Shell小技巧 -- 可以使用上下光标键查看已经历史输入的命令 -- 修改用户密码。在shell中使用alter user命令 +- 可以使用上下光标键查看历史输入的指令 +- 修改用户密码。在 shell 中使用 alter user 指令 - ctrl+c 中止正在进行中的查询 -- 执行`RESET QUERY CACHE`清空本地缓存的表的schema +- 执行 `RESET QUERY CACHE` 清空本地缓存的表 schema ## TDengine 极速体验 From a8d53d1f9098a2fa454051cbbb581e9bb490334b Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Sat, 3 Apr 2021 20:12:11 +0800 Subject: [PATCH 069/178] [TD-3580] : taosdump support human readable time format. (#5674) Co-authored-by: Shuduo Sang --- src/kit/taosdump/taosdump.c | 305 +++++++++++++++++++++--------------- 1 file changed, 180 insertions(+), 125 deletions(-) diff --git a/src/kit/taosdump/taosdump.c b/src/kit/taosdump/taosdump.c index 3cee5f1b1d..b645585ede 100644 --- a/src/kit/taosdump/taosdump.c +++ b/src/kit/taosdump/taosdump.c @@ -39,6 +39,22 @@ typedef struct { int8_t type; } SOColInfo; +#define debugPrint(fmt, ...) \ + do { if (g_args.debug_print || g_args.verbose_print) \ + fprintf(stderr, "DEBG: "fmt, __VA_ARGS__); } while(0) + +#define verbosePrint(fmt, ...) \ + do { if (g_args.verbose_print) \ + fprintf(stderr, "VERB: "fmt, __VA_ARGS__); } while(0) + +#define performancePrint(fmt, ...) \ + do { if (g_args.performance_print) \ + fprintf(stderr, "VERB: "fmt, __VA_ARGS__); } while(0) + +#define errorPrint(fmt, ...) \ + do { fprintf(stderr, "ERROR: "fmt, __VA_ARGS__); } while(0) + + // -------------------------- SHOW DATABASE INTERFACE----------------------- enum _show_db_index { TSDB_SHOW_DB_NAME_INDEX, @@ -46,7 +62,7 @@ enum _show_db_index { TSDB_SHOW_DB_NTABLES_INDEX, TSDB_SHOW_DB_VGROUPS_INDEX, TSDB_SHOW_DB_REPLICA_INDEX, - TSDB_SHOW_DB_QUORUM_INDEX, + TSDB_SHOW_DB_QUORUM_INDEX, TSDB_SHOW_DB_DAYS_INDEX, TSDB_SHOW_DB_KEEP_INDEX, TSDB_SHOW_DB_CACHE_INDEX, @@ -101,10 +117,10 @@ typedef struct { char name[TSDB_DB_NAME_LEN + 1]; char create_time[32]; int32_t ntables; - int32_t vgroups; + int32_t vgroups; int16_t replica; int16_t quorum; - int16_t days; + int16_t days; char keeplist[32]; //int16_t daysToKeep; //int16_t daysToKeep1; @@ -172,48 +188,50 @@ static char args_doc[] = "dbname [tbname ...]\n--databases dbname ...\n--all-dat /* The options we understand. */ static struct argp_option options[] = { // connection option - {"host", 'h', "HOST", 0, "Server host dumping data from. Default is localhost.", 0}, - {"user", 'u', "USER", 0, "User name used to connect to server. Default is root.", 0}, + {"host", 'h', "HOST", 0, "Server host dumping data from. Default is localhost.", 0}, + {"user", 'u', "USER", 0, "User name used to connect to server. Default is root.", 0}, #ifdef _TD_POWER_ - {"password", 'p', "PASSWORD", 0, "User password to connect to server. Default is powerdb.", 0}, + {"password", 'p', "PASSWORD", 0, "User password to connect to server. Default is powerdb.", 0}, #else - {"password", 'p', "PASSWORD", 0, "User password to connect to server. Default is taosdata.", 0}, + {"password", 'p', "PASSWORD", 0, "User password to connect to server. Default is taosdata.", 0}, #endif - {"port", 'P', "PORT", 0, "Port to connect", 0}, - {"cversion", 'v', "CVERION", 0, "client version", 0}, - {"mysqlFlag", 'q', "MYSQLFLAG", 0, "mysqlFlag, Default is 0", 0}, + {"port", 'P', "PORT", 0, "Port to connect", 0}, + {"cversion", 'v', "CVERION", 0, "client version", 0}, + {"mysqlFlag", 'q', "MYSQLFLAG", 0, "mysqlFlag, Default is 0", 0}, // input/output file - {"outpath", 'o', "OUTPATH", 0, "Output file path.", 1}, - {"inpath", 'i', "INPATH", 0, "Input file path.", 1}, - {"resultFile", 'r', "RESULTFILE", 0, "DumpOut/In Result file path and name.", 1}, + {"outpath", 'o', "OUTPATH", 0, "Output file path.", 1}, + {"inpath", 'i', "INPATH", 0, "Input file path.", 1}, + {"resultFile", 'r', "RESULTFILE", 0, "DumpOut/In Result file path and name.", 1}, #ifdef _TD_POWER_ - {"config", 'c', "CONFIG_DIR", 0, "Configure directory. Default is /etc/power/taos.cfg.", 1}, + {"config", 'c', "CONFIG_DIR", 0, "Configure directory. Default is /etc/power/taos.cfg.", 1}, #else - {"config", 'c', "CONFIG_DIR", 0, "Configure directory. Default is /etc/taos/taos.cfg.", 1}, + {"config", 'c', "CONFIG_DIR", 0, "Configure directory. Default is /etc/taos/taos.cfg.", 1}, #endif - {"encode", 'e', "ENCODE", 0, "Input file encoding.", 1}, + {"encode", 'e', "ENCODE", 0, "Input file encoding.", 1}, // dump unit options - {"all-databases", 'A', 0, 0, "Dump all databases.", 2}, - {"databases", 'B', 0, 0, "Dump assigned databases", 2}, + {"all-databases", 'A', 0, 0, "Dump all databases.", 2}, + {"databases", 'B', 0, 0, "Dump assigned databases", 2}, // dump format options - {"schemaonly", 's', 0, 0, "Only dump schema.", 3}, - {"with-property", 'M', 0, 0, "Dump schema with properties.", 3}, - {"start-time", 'S', "START_TIME", 0, "Start time to dump.", 3}, - {"end-time", 'E', "END_TIME", 0, "End time to dump.", 3}, - {"data-batch", 'N', "DATA_BATCH", 0, "Number of data point per insert statement. Default is 1.", 3}, - {"max-sql-len", 'L', "SQL_LEN", 0, "Max length of one sql. Default is 65480.", 3}, - {"table-batch", 't', "TABLE_BATCH", 0, "Number of table dumpout into one output file. Default is 1.", 3}, - {"thread_num", 'T', "THREAD_NUM", 0, "Number of thread for dump in file. Default is 5.", 3}, - {"allow-sys", 'a', 0, 0, "Allow to dump sys database", 3}, + {"schemaonly", 's', 0, 0, "Only dump schema.", 3}, + {"with-property", 'M', 0, 0, "Dump schema with properties.", 3}, + {"start-time", 'S', "START_TIME", 0, "Start time to dump.", 3}, + {"end-time", 'E', "END_TIME", 0, "End time to dump. Epoch or ISO8601/RFC3339 format is acceptable. For example: 2017-10-01T18:00:00+0800", 3}, + {"data-batch", 'N', "DATA_BATCH", 0, "Number of data point per insert statement. Default is 1.", 3}, + {"max-sql-len", 'L', "SQL_LEN", 0, "Max length of one sql. Default is 65480.", 3}, + {"table-batch", 't', "TABLE_BATCH", 0, "Number of table dumpout into one output file. Default is 1.", 3}, + {"thread_num", 'T', "THREAD_NUM", 0, "Number of thread for dump in file. Default is 5.", 3}, + {"allow-sys", 'a', 0, 0, "Allow to dump sys database", 3}, + {"debug", 'g', 0, 0, "Print debug info.", 1}, + {"verbose", 'v', 0, 0, "Print verbose debug info.", 1}, {0}}; /* Used by main to communicate with parse_opt. */ -struct arguments { +typedef struct arguments { // connection option char *host; char *user; char *password; - uint16_t port; + uint16_t port; char cversion[12]; uint16_t mysqlFlag; // output file @@ -238,9 +256,12 @@ struct arguments { int32_t thread_num; int abort; char **arg_list; - int arg_list_len; - bool isDumpIn; -}; + int arg_list_len; + bool isDumpIn; + bool debug_print; + bool verbose_print; + bool performance_print; +} SArguments; /* Parse a single option. */ static error_t parse_opt(int key, char *arg, struct argp_state *state) { @@ -286,6 +307,9 @@ static error_t parse_opt(int key, char *arg, struct argp_state *state) { tstrncpy(arguments->outpath, full_path.we_wordv[0], TSDB_FILENAME_LEN); wordfree(&full_path); break; + case 'g': + arguments->debug_print = true; + break; case 'i': arguments->isDumpIn = true; if (wordexp(arg, &full_path, 0) != 0) { @@ -387,7 +411,7 @@ int taosCheckParam(struct arguments *arguments); void taosFreeDbInfos(); static void taosStartDumpOutWorkThreads(void* taosCon, struct arguments* args, int32_t numOfThread, char *dbName); -struct arguments tsArguments = { +struct arguments g_args = { // connection option NULL, "root", @@ -400,18 +424,18 @@ struct arguments tsArguments = { "", 0, // outpath and inpath - "", + "", "", "./dump_result.txt", NULL, // dump unit option - false, + false, false, // dump format option - false, - false, - 0, - INT64_MAX, + false, + false, + 0, + INT64_MAX, 1, TSDB_MAX_SQL_LEN, 1, @@ -419,11 +443,14 @@ struct arguments tsArguments = { // other options 5, 0, - NULL, - 0, - false + NULL, + 0, + false, + false, // debug_print + false, // verbose_print + false // performance_print }; - + static int queryDbImpl(TAOS *taos, char *command) { int i; TAOS_RES *res = NULL; @@ -434,7 +461,7 @@ static int queryDbImpl(TAOS *taos, char *command) { taos_free_result(res); res = NULL; } - + res = taos_query(taos, command); code = taos_errno(res); if (0 == code) { @@ -453,13 +480,40 @@ static int queryDbImpl(TAOS *taos, char *command) { return 0; } +static void parse_args(int argc, char *argv[], SArguments *arguments) { + for (int i = 1; i < argc; i++) { + if (strcmp(argv[i], "-E") == 0) { + char *tmp = argv[++i]; + int64_t tmpEpoch; + if (strchr(tmp, ':') && strchr(tmp, '-')) { + if (TSDB_CODE_SUCCESS != taosParseTime( + tmp, &tmpEpoch, strlen(tmp), TSDB_TIME_PRECISION_MILLI, 0)) { + fprintf(stderr, "Input end time error!\n"); + return; + } + } else { + tmpEpoch = atoll(tmp); + } + + sprintf(argv[i], "%"PRId64"", tmpEpoch); + debugPrint("%s() LN%d, tmp is: %s, argv[%d]: %s\n", + __func__, __LINE__, tmp, i, argv[i]); + + } else if (strcmp(argv[i], "-g") == 0) { + arguments->debug_print = true; + } + } +} + int main(int argc, char *argv[]) { /* Parse our arguments; every option seen by parse_opt will be reflected in arguments. */ - argp_parse(&argp, argc, argv, 0, 0, &tsArguments); + parse_args(argc, argv, &g_args); - if (tsArguments.abort) { + argp_parse(&argp, argc, argv, 0, 0, &g_args); + + if (g_args.abort) { #ifndef _ALPINE error(10, 0, "ABORTED"); #else @@ -469,81 +523,82 @@ int main(int argc, char *argv[]) { printf("====== arguments config ======\n"); { - printf("host: %s\n", tsArguments.host); - printf("user: %s\n", tsArguments.user); - printf("password: %s\n", tsArguments.password); - printf("port: %u\n", tsArguments.port); - printf("cversion: %s\n", tsArguments.cversion); - printf("mysqlFlag: %d\n", tsArguments.mysqlFlag); - printf("outpath: %s\n", tsArguments.outpath); - printf("inpath: %s\n", tsArguments.inpath); - printf("resultFile: %s\n", tsArguments.resultFile); - printf("encode: %s\n", tsArguments.encode); - printf("all_databases: %d\n", tsArguments.all_databases); - printf("databases: %d\n", tsArguments.databases); - printf("schemaonly: %d\n", tsArguments.schemaonly); - printf("with_property: %d\n", tsArguments.with_property); - printf("start_time: %" PRId64 "\n", tsArguments.start_time); - printf("end_time: %" PRId64 "\n", tsArguments.end_time); - printf("data_batch: %d\n", tsArguments.data_batch); - printf("max_sql_len: %d\n", tsArguments.max_sql_len); - printf("table_batch: %d\n", tsArguments.table_batch); - printf("thread_num: %d\n", tsArguments.thread_num); - printf("allow_sys: %d\n", tsArguments.allow_sys); - printf("abort: %d\n", tsArguments.abort); - printf("isDumpIn: %d\n", tsArguments.isDumpIn); - printf("arg_list_len: %d\n", tsArguments.arg_list_len); + printf("host: %s\n", g_args.host); + printf("user: %s\n", g_args.user); + printf("password: %s\n", g_args.password); + printf("port: %u\n", g_args.port); + printf("cversion: %s\n", g_args.cversion); + printf("mysqlFlag: %d\n", g_args.mysqlFlag); + printf("outpath: %s\n", g_args.outpath); + printf("inpath: %s\n", g_args.inpath); + printf("resultFile: %s\n", g_args.resultFile); + printf("encode: %s\n", g_args.encode); + printf("all_databases: %d\n", g_args.all_databases); + printf("databases: %d\n", g_args.databases); + printf("schemaonly: %d\n", g_args.schemaonly); + printf("with_property: %d\n", g_args.with_property); + printf("start_time: %" PRId64 "\n", g_args.start_time); + printf("end_time: %" PRId64 "\n", g_args.end_time); + printf("data_batch: %d\n", g_args.data_batch); + printf("max_sql_len: %d\n", g_args.max_sql_len); + printf("table_batch: %d\n", g_args.table_batch); + printf("thread_num: %d\n", g_args.thread_num); + printf("allow_sys: %d\n", g_args.allow_sys); + printf("abort: %d\n", g_args.abort); + printf("isDumpIn: %d\n", g_args.isDumpIn); + printf("arg_list_len: %d\n", g_args.arg_list_len); + printf("debug_print: %d\n", g_args.debug_print); - for (int32_t i = 0; i < tsArguments.arg_list_len; i++) { - printf("arg_list[%d]: %s\n", i, tsArguments.arg_list[i]); + for (int32_t i = 0; i < g_args.arg_list_len; i++) { + printf("arg_list[%d]: %s\n", i, g_args.arg_list[i]); } - } + } printf("==============================\n"); - if (tsArguments.cversion[0] != 0){ - tstrncpy(version, tsArguments.cversion, 11); + if (g_args.cversion[0] != 0){ + tstrncpy(version, g_args.cversion, 11); } - if (taosCheckParam(&tsArguments) < 0) { + if (taosCheckParam(&g_args) < 0) { exit(EXIT_FAILURE); } - - g_fpOfResult = fopen(tsArguments.resultFile, "a"); + + g_fpOfResult = fopen(g_args.resultFile, "a"); if (NULL == g_fpOfResult) { - fprintf(stderr, "Failed to open %s for save result\n", tsArguments.resultFile); + fprintf(stderr, "Failed to open %s for save result\n", g_args.resultFile); return 1; }; fprintf(g_fpOfResult, "#############################################################################\n"); fprintf(g_fpOfResult, "============================== arguments config =============================\n"); { - fprintf(g_fpOfResult, "host: %s\n", tsArguments.host); - fprintf(g_fpOfResult, "user: %s\n", tsArguments.user); - fprintf(g_fpOfResult, "password: %s\n", tsArguments.password); - fprintf(g_fpOfResult, "port: %u\n", tsArguments.port); - fprintf(g_fpOfResult, "cversion: %s\n", tsArguments.cversion); - fprintf(g_fpOfResult, "mysqlFlag: %d\n", tsArguments.mysqlFlag); - fprintf(g_fpOfResult, "outpath: %s\n", tsArguments.outpath); - fprintf(g_fpOfResult, "inpath: %s\n", tsArguments.inpath); - fprintf(g_fpOfResult, "resultFile: %s\n", tsArguments.resultFile); - fprintf(g_fpOfResult, "encode: %s\n", tsArguments.encode); - fprintf(g_fpOfResult, "all_databases: %d\n", tsArguments.all_databases); - fprintf(g_fpOfResult, "databases: %d\n", tsArguments.databases); - fprintf(g_fpOfResult, "schemaonly: %d\n", tsArguments.schemaonly); - fprintf(g_fpOfResult, "with_property: %d\n", tsArguments.with_property); - fprintf(g_fpOfResult, "start_time: %" PRId64 "\n", tsArguments.start_time); - fprintf(g_fpOfResult, "end_time: %" PRId64 "\n", tsArguments.end_time); - fprintf(g_fpOfResult, "data_batch: %d\n", tsArguments.data_batch); - fprintf(g_fpOfResult, "max_sql_len: %d\n", tsArguments.max_sql_len); - fprintf(g_fpOfResult, "table_batch: %d\n", tsArguments.table_batch); - fprintf(g_fpOfResult, "thread_num: %d\n", tsArguments.thread_num); - fprintf(g_fpOfResult, "allow_sys: %d\n", tsArguments.allow_sys); - fprintf(g_fpOfResult, "abort: %d\n", tsArguments.abort); - fprintf(g_fpOfResult, "isDumpIn: %d\n", tsArguments.isDumpIn); - fprintf(g_fpOfResult, "arg_list_len: %d\n", tsArguments.arg_list_len); + fprintf(g_fpOfResult, "host: %s\n", g_args.host); + fprintf(g_fpOfResult, "user: %s\n", g_args.user); + fprintf(g_fpOfResult, "password: %s\n", g_args.password); + fprintf(g_fpOfResult, "port: %u\n", g_args.port); + fprintf(g_fpOfResult, "cversion: %s\n", g_args.cversion); + fprintf(g_fpOfResult, "mysqlFlag: %d\n", g_args.mysqlFlag); + fprintf(g_fpOfResult, "outpath: %s\n", g_args.outpath); + fprintf(g_fpOfResult, "inpath: %s\n", g_args.inpath); + fprintf(g_fpOfResult, "resultFile: %s\n", g_args.resultFile); + fprintf(g_fpOfResult, "encode: %s\n", g_args.encode); + fprintf(g_fpOfResult, "all_databases: %d\n", g_args.all_databases); + fprintf(g_fpOfResult, "databases: %d\n", g_args.databases); + fprintf(g_fpOfResult, "schemaonly: %d\n", g_args.schemaonly); + fprintf(g_fpOfResult, "with_property: %d\n", g_args.with_property); + fprintf(g_fpOfResult, "start_time: %" PRId64 "\n", g_args.start_time); + fprintf(g_fpOfResult, "end_time: %" PRId64 "\n", g_args.end_time); + fprintf(g_fpOfResult, "data_batch: %d\n", g_args.data_batch); + fprintf(g_fpOfResult, "max_sql_len: %d\n", g_args.max_sql_len); + fprintf(g_fpOfResult, "table_batch: %d\n", g_args.table_batch); + fprintf(g_fpOfResult, "thread_num: %d\n", g_args.thread_num); + fprintf(g_fpOfResult, "allow_sys: %d\n", g_args.allow_sys); + fprintf(g_fpOfResult, "abort: %d\n", g_args.abort); + fprintf(g_fpOfResult, "isDumpIn: %d\n", g_args.isDumpIn); + fprintf(g_fpOfResult, "arg_list_len: %d\n", g_args.arg_list_len); - for (int32_t i = 0; i < tsArguments.arg_list_len; i++) { - fprintf(g_fpOfResult, "arg_list[%d]: %s\n", i, tsArguments.arg_list[i]); + for (int32_t i = 0; i < g_args.arg_list_len; i++) { + fprintf(g_fpOfResult, "arg_list[%d]: %s\n", i, g_args.arg_list[i]); } } @@ -552,11 +607,11 @@ int main(int argc, char *argv[]) { time_t tTime = time(NULL); struct tm tm = *localtime(&tTime); - if (tsArguments.isDumpIn) { + if (g_args.isDumpIn) { fprintf(g_fpOfResult, "============================== DUMP IN ============================== \n"); fprintf(g_fpOfResult, "# DumpIn start time: %d-%02d-%02d %02d:%02d:%02d\n", tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday, tm.tm_hour, tm.tm_min, tm.tm_sec); - if (taosDumpIn(&tsArguments) < 0) { + if (taosDumpIn(&g_args) < 0) { fprintf(g_fpOfResult, "\n"); fclose(g_fpOfResult); return -1; @@ -565,7 +620,7 @@ int main(int argc, char *argv[]) { fprintf(g_fpOfResult, "============================== DUMP OUT ============================== \n"); fprintf(g_fpOfResult, "# DumpOut start time: %d-%02d-%02d %02d:%02d:%02d\n", tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday, tm.tm_hour, tm.tm_min, tm.tm_sec); - if (taosDumpOut(&tsArguments) < 0) { + if (taosDumpOut(&g_args) < 0) { fprintf(g_fpOfResult, "\n"); fclose(g_fpOfResult); return -1; @@ -573,9 +628,9 @@ int main(int argc, char *argv[]) { fprintf(g_fpOfResult, "\n============================== TOTAL STATISTICS ============================== \n"); fprintf(g_fpOfResult, "# total database count: %d\n", g_resultStatistics.totalDatabasesOfDumpOut); - fprintf(g_fpOfResult, "# total super table count: %d\n", g_resultStatistics.totalSuperTblsOfDumpOut); - fprintf(g_fpOfResult, "# total child table count: %"PRId64"\n", g_resultStatistics.totalChildTblsOfDumpOut); - fprintf(g_fpOfResult, "# total row count: %"PRId64"\n", g_resultStatistics.totalRowsOfDumpOut); + fprintf(g_fpOfResult, "# total super table count: %d\n", g_resultStatistics.totalSuperTblsOfDumpOut); + fprintf(g_fpOfResult, "# total child table count: %"PRId64"\n", g_resultStatistics.totalChildTblsOfDumpOut); + fprintf(g_fpOfResult, "# total row count: %"PRId64"\n", g_resultStatistics.totalRowsOfDumpOut); } fprintf(g_fpOfResult, "\n"); @@ -1236,8 +1291,8 @@ void* taosDumpOutWorkThreadFp(void *arg) FILE *fp = NULL; memset(tmpBuf, 0, TSDB_FILENAME_LEN + 128); - if (tsArguments.outpath[0] != 0) { - sprintf(tmpBuf, "%s/%s.tables.%d.sql", tsArguments.outpath, pThread->dbName, pThread->threadIndex); + if (g_args.outpath[0] != 0) { + sprintf(tmpBuf, "%s/%s.tables.%d.sql", g_args.outpath, pThread->dbName, pThread->threadIndex); } else { sprintf(tmpBuf, "%s.tables.%d.sql", pThread->dbName, pThread->threadIndex); } @@ -1270,7 +1325,7 @@ void* taosDumpOutWorkThreadFp(void *arg) ssize_t readLen = read(fd, &tableRecord, sizeof(STableRecord)); if (readLen <= 0) break; - int ret = taosDumpTable(tableRecord.name, tableRecord.metric, &tsArguments, fp, pThread->taosCon, pThread->dbName); + int ret = taosDumpTable(tableRecord.name, tableRecord.metric, &g_args, fp, pThread->taosCon, pThread->dbName); if (ret >= 0) { // TODO: sum table count and table rows by self pThread->tablesOfDumpOut++; @@ -1282,13 +1337,13 @@ void* taosDumpOutWorkThreadFp(void *arg) } tablesInOneFile++; - if (tablesInOneFile >= tsArguments.table_batch) { + if (tablesInOneFile >= g_args.table_batch) { fclose(fp); tablesInOneFile = 0; memset(tmpBuf, 0, TSDB_FILENAME_LEN + 128); - if (tsArguments.outpath[0] != 0) { - sprintf(tmpBuf, "%s/%s.tables.%d-%d.sql", tsArguments.outpath, pThread->dbName, pThread->threadIndex, fileNameIndex); + if (g_args.outpath[0] != 0) { + sprintf(tmpBuf, "%s/%s.tables.%d-%d.sql", g_args.outpath, pThread->dbName, pThread->threadIndex, fileNameIndex); } else { sprintf(tmpBuf, "%s.tables.%d-%d.sql", pThread->dbName, pThread->threadIndex, fileNameIndex); } @@ -1491,14 +1546,14 @@ int taosDumpDb(SDbInfo *dbInfo, struct arguments *arguments, FILE *fp, TAOS *tao taos_free_result(res); lseek(fd, 0, SEEK_SET); - int maxThreads = tsArguments.thread_num; + int maxThreads = g_args.thread_num; int tableOfPerFile ; - if (numOfTable <= tsArguments.thread_num) { + if (numOfTable <= g_args.thread_num) { tableOfPerFile = 1; maxThreads = numOfTable; } else { - tableOfPerFile = numOfTable / tsArguments.thread_num; - if (0 != numOfTable % tsArguments.thread_num) { + tableOfPerFile = numOfTable / g_args.thread_num; + if (0 != numOfTable % g_args.thread_num) { tableOfPerFile += 1; } } @@ -1806,9 +1861,9 @@ int taosDumpTableData(FILE *fp, char *tbname, struct arguments *arguments, TAOS* //} } - fprintf(fp, "\n"); + fprintf(fp, "\n"); atomic_add_fetch_64(&totalDumpOutRows, totalRows); - + taos_free_result(tmpResult); free(tmpBuffer); return totalRows; @@ -1824,7 +1879,7 @@ int taosCheckParam(struct arguments *arguments) { fprintf(stderr, "start time is larger than end time\n"); return -1; } - + if (arguments->arg_list_len == 0) { if ((!arguments->all_databases) && (!arguments->isDumpIn)) { fprintf(stderr, "taosdump requires parameters\n"); @@ -2214,7 +2269,7 @@ void* taosDumpInWorkThreadFp(void *arg) continue; } fprintf(stderr, "Success Open input file: %s\n", SQLFileName); - taosDumpInOneFile(pThread->taosCon, fp, tsfCharset, tsArguments.encode, SQLFileName); + taosDumpInOneFile(pThread->taosCon, fp, tsfCharset, g_args.encode, SQLFileName); } } From 3f273cbf899a582f453eeaf0f18067daead01454 Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Sun, 4 Apr 2021 10:50:20 +0800 Subject: [PATCH 070/178] Hotfix/sangshuduo/td 3653 taosdemo json param cleanup (#5676) * [TD-3653] : taosdemo json parameter cleanup. * [TD-36553] : taosdemo parse configDir in cmdline test. Co-authored-by: Shuduo Sang --- src/kit/taosdemo/taosdemo.c | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/src/kit/taosdemo/taosdemo.c b/src/kit/taosdemo/taosdemo.c index 5a88f51514..652e20d58b 100644 --- a/src/kit/taosdemo/taosdemo.c +++ b/src/kit/taosdemo/taosdemo.c @@ -694,19 +694,12 @@ static void printHelp() { static void parse_args(int argc, char *argv[], SArguments *arguments) { char **sptr; - wordexp_t full_path; for (int i = 1; i < argc; i++) { if (strcmp(argv[i], "-f") == 0) { arguments->metaFile = argv[++i]; } else if (strcmp(argv[i], "-c") == 0) { - char *configPath = argv[++i]; - if (wordexp(configPath, &full_path, 0) != 0) { - errorPrint( "Invalid path %s\n", configPath); - return; - } - taos_options(TSDB_OPTION_CONFIGDIR, full_path.we_wordv[0]); - wordfree(&full_path); + strcpy(configDir, argv[++i]); } else if (strcmp(argv[i], "-h") == 0) { arguments->host = argv[++i]; @@ -801,8 +794,6 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) { arguments->verbose_print = true; } else if (strcmp(argv[i], "-pp") == 0) { arguments->performance_print = true; - } else if (strcmp(argv[i], "-c") == 0) { - strcpy(configDir, argv[++i]); } else if (strcmp(argv[i], "-O") == 0) { arguments->disorderRatio = atoi(argv[++i]); @@ -1112,6 +1103,7 @@ static int printfInsertMeta() { printf("host: \033[33m%s:%u\033[0m\n", g_Dbs.host, g_Dbs.port); printf("user: \033[33m%s\033[0m\n", g_Dbs.user); printf("password: \033[33m%s\033[0m\n", g_Dbs.password); + printf("configDir: \033[33m%s\033[0m\n", configDir); printf("resultFile: \033[33m%s\033[0m\n", g_Dbs.resultFile); printf("thread num of insert data: \033[33m%d\033[0m\n", g_Dbs.threadCount); printf("thread num of create table: \033[33m%d\033[0m\n", g_Dbs.threadCountByCreateTbl); @@ -1297,6 +1289,7 @@ static void printfInsertMetaToFile(FILE* fp) { fprintf(fp, "host: %s:%u\n", g_Dbs.host, g_Dbs.port); fprintf(fp, "user: %s\n", g_Dbs.user); + fprintf(fp, "configDir: %s\n", configDir); fprintf(fp, "resultFile: %s\n", g_Dbs.resultFile); fprintf(fp, "thread num of insert data: %d\n", g_Dbs.threadCount); fprintf(fp, "thread num of create table: %d\n", g_Dbs.threadCountByCreateTbl); @@ -2616,7 +2609,6 @@ static void* createTable(void *sarg) len += snprintf(buffer + len, buff_len - len, "create table "); } - char* tagsValBuf = NULL; if (0 == superTblInfo->tagSource) { tagsValBuf = generateTagVaulesForStb(superTblInfo, i); @@ -2629,7 +2621,6 @@ static void* createTable(void *sarg) free(buffer); return NULL; } - len += snprintf(buffer + len, superTblInfo->maxSqlLen - len, "if not exists %s.%s%d using %s.%s tags %s ", @@ -2638,7 +2629,6 @@ static void* createTable(void *sarg) superTblInfo->sTblName, tagsValBuf); free(tagsValBuf); batchNum++; - if ((batchNum < superTblInfo->batchCreateTableNum) && ((superTblInfo->maxSqlLen - len) >= (superTblInfo->lenOfTagOfOneRow + 256))) { @@ -6525,6 +6515,16 @@ static void queryResult() { static void testCmdLine() { + if (strlen(configDir)) { + wordexp_t full_path; + if (wordexp(configDir, &full_path, 0) != 0) { + errorPrint( "Invalid path %s\n", configDir); + return; + } + taos_options(TSDB_OPTION_CONFIGDIR, full_path.we_wordv[0]); + wordfree(&full_path); + } + g_args.test_mode = INSERT_TEST; insertTestProcess(); From a13873e1f93659bf73c939df99bd2a6abfd33c7d Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Sun, 4 Apr 2021 22:12:48 +0800 Subject: [PATCH 071/178] Hotfix/sangshuduo/td 3197 fix taosdemo coverity scan (#5677) * [TD-3197] : fix taosdemo coverity scan issues. * [TD-3197] : fix taosdemo coverity scan issue. fix subscribeTest pids uninitialized. * [TD-3197] : fix taosdemo coverity scan issues. * [TD-3197] : fix coverity scan issues. check super tbl info pointer. * [TD-3197] : fix coverity scan issues. move sub tbl query thread join into loop * [TD-3197] : fix coverity scan issues. remove unused variable * [TD-3197] : fix coverity scan issues. use more secure random library * [TD-3197] : fix coverity scan issues. use strncpy for more safe * [TD-3197] : fix taosdemo coverity scan issue. replace arc4random with rand(). * [TD-3197] : fix coverity scan issues. check stb info pointer for start time * [TD-3197] : fix coverity scan issues. fix strcpy vulnerability * [TD-3197] : fix taosdemo coverity scan issue. modify taosdemoTest2. try to check database continously. * [TD-3197] : taosdemo coverity scan issues. Co-authored-by: Shuduo Sang --- src/kit/taosdemo/taosdemo.c | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/src/kit/taosdemo/taosdemo.c b/src/kit/taosdemo/taosdemo.c index 652e20d58b..87a08dee49 100644 --- a/src/kit/taosdemo/taosdemo.c +++ b/src/kit/taosdemo/taosdemo.c @@ -699,7 +699,7 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) { if (strcmp(argv[i], "-f") == 0) { arguments->metaFile = argv[++i]; } else if (strcmp(argv[i], "-c") == 0) { - strcpy(configDir, argv[++i]); + tstrncpy(configDir, argv[++i], MAX_FILE_NAME_LEN); } else if (strcmp(argv[i], "-h") == 0) { arguments->host = argv[++i]; @@ -2780,7 +2780,7 @@ static void createChildTables() { j++; } - len = snprintf(tblColsBuf + len, MAX_SQL_SIZE - len, ")"); + snprintf(tblColsBuf + len, MAX_SQL_SIZE - len, ")"); verbosePrint("%s() LN%d: dbName: %s num of tb: %d schema: %s\n", __func__, __LINE__, @@ -4431,6 +4431,8 @@ static int generateDataTail(char *tableName, int32_t tableSeq, int k = 0; for (k = 0; k < batch;) { char data[MAX_DATA_SIZE]; + memset(data, 0, MAX_DATA_SIZE); + int retLen = 0; if (superTblInfo) { @@ -4633,12 +4635,13 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) { return NULL; } - char tableName[TSDB_TABLE_NAME_LEN]; pThreadInfo->totalInsertRows = 0; pThreadInfo->totalAffectedRows = 0; + int nTimeStampStep = superTblInfo?superTblInfo->timeStampStep:DEFAULT_TIMESTAMP_STEP; + int64_t insertRows = (superTblInfo)?superTblInfo->insertRows:g_args.num_of_DPT; int insert_interval = superTblInfo?superTblInfo->insertInterval:g_args.insert_interval; @@ -4697,8 +4700,8 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) { if (0 == strlen(tableName)) { errorPrint("[%d] %s() LN%d, getTableName return null\n", pThreadInfo->threadID, __func__, __LINE__); + free(buffer); return NULL; - exit(-1); } int headLen; @@ -4760,7 +4763,7 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) { generatedRecPerTbl += batchPerTbl; startTime = pThreadInfo->start_time - + generatedRecPerTbl * superTblInfo->timeStampStep; + + generatedRecPerTbl * nTimeStampStep; flagSleep = true; if (generatedRecPerTbl >= insertRows) From 6d31f34f787294de0edce546a6aa698d0deb46f2 Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Mon, 5 Apr 2021 09:50:06 +0800 Subject: [PATCH 072/178] Hotfix/sangshuduo/td 3580 taosdump human timeformat (#5680) * [TD-3580] : taosdump support human readable time format. * [TD-3580] : taosdump support human readable time format. fix no-value-given segfault. Co-authored-by: Shuduo Sang --- src/kit/taosdump/taosdump.c | 28 ++++++++++++++++------------ 1 file changed, 16 insertions(+), 12 deletions(-) diff --git a/src/kit/taosdump/taosdump.c b/src/kit/taosdump/taosdump.c index b645585ede..9f176904fe 100644 --- a/src/kit/taosdump/taosdump.c +++ b/src/kit/taosdump/taosdump.c @@ -483,22 +483,26 @@ static int queryDbImpl(TAOS *taos, char *command) { static void parse_args(int argc, char *argv[], SArguments *arguments) { for (int i = 1; i < argc; i++) { if (strcmp(argv[i], "-E") == 0) { - char *tmp = argv[++i]; - int64_t tmpEpoch; - if (strchr(tmp, ':') && strchr(tmp, '-')) { - if (TSDB_CODE_SUCCESS != taosParseTime( + if (argv[i+1]) { + char *tmp = argv[++i]; + int64_t tmpEpoch; + if (strchr(tmp, ':') && strchr(tmp, '-')) { + if (TSDB_CODE_SUCCESS != taosParseTime( tmp, &tmpEpoch, strlen(tmp), TSDB_TIME_PRECISION_MILLI, 0)) { - fprintf(stderr, "Input end time error!\n"); - return; + fprintf(stderr, "Input end time error!\n"); + return; + } + } else { + tmpEpoch = atoll(tmp); } - } else { - tmpEpoch = atoll(tmp); - } - sprintf(argv[i], "%"PRId64"", tmpEpoch); - debugPrint("%s() LN%d, tmp is: %s, argv[%d]: %s\n", + sprintf(argv[i], "%"PRId64"", tmpEpoch); + debugPrint("%s() LN%d, tmp is: %s, argv[%d]: %s\n", __func__, __LINE__, tmp, i, argv[i]); - + } else { + fprintf(stderr, "Input end time error!\n"); + return; + } } else if (strcmp(argv[i], "-g") == 0) { arguments->debug_print = true; } From 7cd0648bb65953742b2f6adeba52358769deb812 Mon Sep 17 00:00:00 2001 From: liuyq-617 Date: Tue, 6 Apr 2021 10:26:06 +0800 Subject: [PATCH 073/178] [TD-3569]add taosd mem leak test into CI --- Jenkinsfile | 14 ++++++-------- tests/pytest/handle_taosd_val_log.sh | 2 +- 2 files changed, 7 insertions(+), 9 deletions(-) diff --git a/Jenkinsfile b/Jenkinsfile index bac0cce33b..7bebbe1c1d 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -185,14 +185,12 @@ pipeline { rm -rf /var/log/taos/* ./handle_crash_gen_val_log.sh ''' - catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') { - sh ''' - cd ${WKC}/tests/pytest - rm -rf /var/lib/taos/* - rm -rf /var/log/taos/* - ./handle_taosd_val_log.sh - ''' - } + sh ''' + cd ${WKC}/tests/pytest + rm -rf /var/lib/taos/* + rm -rf /var/log/taos/* + ./handle_taosd_val_log.sh + ''' timeout(time: 45, unit: 'MINUTES'){ sh ''' date diff --git a/tests/pytest/handle_taosd_val_log.sh b/tests/pytest/handle_taosd_val_log.sh index 6b7f669efe..3161b5ff89 100755 --- a/tests/pytest/handle_taosd_val_log.sh +++ b/tests/pytest/handle_taosd_val_log.sh @@ -21,7 +21,7 @@ rm -rf /var/lib/taos/* nohup valgrind --leak-check=yes $TAOSD_DIR > $TDIR/$VALGRIND_OUT 2> $TDIR/$VALGRIND_ERR & sleep 20 cd - -./crash_gen.sh -p -t 10 -s 200 +./crash_gen.sh -p -t 10 -s 1000 ps -ef |grep valgrind|grep -v grep|awk '{print $2}'|xargs kill -term while true do From 43d17f800336112e71d75f080ad9015fb716229a Mon Sep 17 00:00:00 2001 From: Elias Soong Date: Tue, 6 Apr 2021 11:27:28 +0800 Subject: [PATCH 074/178] [TD-3672] : update C# connector environment support. --- documentation20/cn/02.getting-started/docs.md | 2 +- documentation20/cn/08.connector/docs.md | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/documentation20/cn/02.getting-started/docs.md b/documentation20/cn/02.getting-started/docs.md index db2885d302..b46322cef2 100644 --- a/documentation20/cn/02.getting-started/docs.md +++ b/documentation20/cn/02.getting-started/docs.md @@ -212,7 +212,7 @@ taos> select avg(f1), max(f2), min(f3) from test.t10 interval(10s); | **Python** | ● | ● | ● | ○ | ● | ● | ● | -- | ● | | **Go** | ● | ● | ● | ○ | ● | ● | ○ | -- | -- | | **NodeJs** | ● | ● | ○ | ○ | ● | ● | ○ | -- | -- | -| **C#** | ○ | ● | ● | ○ | ○ | ○ | ○ | -- | -- | +| **C#** | ● | ● | ○ | ○ | ○ | ○ | ○ | -- | -- | | **RESTful** | ● | ● | ● | ● | ● | ● | ● | ● | ● | 注: ● 表示经过官方测试验证, ○ 表示非官方测试验证。 diff --git a/documentation20/cn/08.connector/docs.md b/documentation20/cn/08.connector/docs.md index fb50e20e51..6811315e7d 100644 --- a/documentation20/cn/08.connector/docs.md +++ b/documentation20/cn/08.connector/docs.md @@ -14,7 +14,7 @@ TDengine提供了丰富的应用程序开发接口,其中包括C/C++、Java、 | **Python** | ● | ● | ● | ○ | ● | ● | ○ | -- | ○ | | **Go** | ● | ● | ● | ○ | ● | ● | ○ | -- | -- | | **NodeJs** | ● | ● | ○ | ○ | ● | ● | ○ | -- | -- | -| **C#** | ○ | ● | ● | ○ | ○ | ○ | ○ | -- | -- | +| **C#** | ● | ● | ○ | ○ | ○ | ○ | ○ | -- | -- | | **RESTful** | ● | ● | ● | ● | ● | ● | ○ | ○ | ○ | 其中 ● 表示经过官方测试验证, ○ 表示非官方测试验证。 @@ -23,7 +23,7 @@ TDengine提供了丰富的应用程序开发接口,其中包括C/C++、Java、 * 在没有安装TDengine服务端软件的系统中使用连接器(除RESTful外)访问 TDengine 数据库,需要安装相应版本的客户端安装包来使应用驱动(Linux系统中文件名为libtaos.so,Windows系统中为taos.dll)被安装在系统中,否则会产生无法找到相应库文件的错误。 * 所有执行 SQL 语句的 API,例如 C/C++ Connector 中的 `tao_query`、`taos_query_a`、`taos_subscribe` 等,以及其它语言中与它们对应的API,每次都只能执行一条 SQL 语句,如果实际参数中包含了多条语句,它们的行为是未定义的。 -* 升级到TDengine到2.0.8.0版本的用户,必须更新JDBC连接TDengine必须升级taos-jdbcdriver到2.0.12及以上。 +* 升级到TDengine到2.0.8.0版本的用户,必须更新JDBC连接TDengine必须升级taos-jdbcdriver到2.0.12及以上。详细的版本依赖关系请参见 [taos-jdbcdriver 文档](https://www.taosdata.com/cn/documentation/connector/java#version)。 * 无论选用何种编程语言的连接器,2.0 及以上版本的 TDengine 推荐数据库应用的每个线程都建立一个独立的连接,或基于线程建立连接池,以避免连接内的“USE statement”状态量在线程之间相互干扰(但连接的查询和写入操作都是线程安全的)。 ## 安装连接器驱动步骤 From 29bb17877dbdfbf19bda7f355700dcb7e64e36e3 Mon Sep 17 00:00:00 2001 From: wu champion Date: Tue, 6 Apr 2021 11:33:24 +0800 Subject: [PATCH 075/178] add case for TD-3654 --- tests/pytest/fulltest.sh | 1 + tests/pytest/insert/bug3654.py | 53 ++++++++++++++++++++++++++++++++++ 2 files changed, 54 insertions(+) create mode 100644 tests/pytest/insert/bug3654.py diff --git a/tests/pytest/fulltest.sh b/tests/pytest/fulltest.sh index 7ff41b13a6..cc6eb719f2 100755 --- a/tests/pytest/fulltest.sh +++ b/tests/pytest/fulltest.sh @@ -22,6 +22,7 @@ python3 ./test.py -f insert/alterTableAndInsert.py python3 ./test.py -f insert/insertIntoTwoTables.py python3 ./test.py -f insert/before_1970.py python3 bug2265.py +python3 ./test.py -f insert/bug3654.py #table python3 ./test.py -f table/alter_wal0.py diff --git a/tests/pytest/insert/bug3654.py b/tests/pytest/insert/bug3654.py new file mode 100644 index 0000000000..41bedf771b --- /dev/null +++ b/tests/pytest/insert/bug3654.py @@ -0,0 +1,53 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import taos +import sys + +from util.log import * +from util.sql import * +from util.cases import * + +class TDTestCase: + + def init(self, conn, logSql): + tdLog.debug(f"start to excute {__file__}") + tdSql.init(conn.cursor()) + + def run(self): + tdSql.prepare() + + tdLog.printNoPrefix("==========step1:create table") + tdSql.execute("drop database if exists db") + tdSql.execute("create database db keep 36500,36500,36500") + tdSql.execute("use db") + tdSql.execute("create stable if not exists stb1 (ts timestamp, c1 int) tags(t11 int)") + tdSql.execute("create table t10 using stb1 tags(1)") + gapdate = (datetime.datetime.now() - datetime.datetime(1970, 1, 1, 8, 0, 0)).days + + tdLog.printNoPrefix("==========step2:insert data") + tdSql.execute(f"insert into t10 values (now-{gapdate}d, 1)") + tdSql.execute(f"insert into t10 values (now-{gapdate + 1}d, 2)") + + tdLog.printNoPrefix("==========step3:query") + tdSql.query("select * from t10 where c1=1") + tdSql.checkRows(1) + tdSql.query("select * from t10 where c1=2") + tdSql.checkRows(1) + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) \ No newline at end of file From 20ef6c39a1aad4611f3782154ecd83e0fc660de4 Mon Sep 17 00:00:00 2001 From: liuyq-617 Date: Tue, 6 Apr 2021 15:49:22 +0800 Subject: [PATCH 076/178] test --- tests/pytest/concurrent_inquiry.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/pytest/concurrent_inquiry.sh b/tests/pytest/concurrent_inquiry.sh index f426fbbcec..e5918792f4 100755 --- a/tests/pytest/concurrent_inquiry.sh +++ b/tests/pytest/concurrent_inquiry.sh @@ -1,4 +1,4 @@ -#!/bin/bash +#!/bin/bash # This is the script for us to try to cause the TDengine server or client to crash # From 331901bfb9f57b8ee13a50de71ff4b727e03ecc4 Mon Sep 17 00:00:00 2001 From: liuyq-617 Date: Tue, 6 Apr 2021 15:50:33 +0800 Subject: [PATCH 077/178] test --- tests/pytest/concurrent_inquiry.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/pytest/concurrent_inquiry.sh b/tests/pytest/concurrent_inquiry.sh index f426fbbcec..e5918792f4 100755 --- a/tests/pytest/concurrent_inquiry.sh +++ b/tests/pytest/concurrent_inquiry.sh @@ -1,4 +1,4 @@ -#!/bin/bash +#!/bin/bash # This is the script for us to try to cause the TDengine server or client to crash # From 985d95a54bb7564d45e9c21066a654217fc23ede Mon Sep 17 00:00:00 2001 From: liuyq-617 Date: Tue, 6 Apr 2021 16:13:11 +0800 Subject: [PATCH 078/178] [TD-3671]change target branch --- Jenkinsfile | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/Jenkinsfile b/Jenkinsfile index 7bebbe1c1d..2c64e71f51 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -50,7 +50,16 @@ def pre_test(){ git clean -dfx cd ${WK} git reset --hard HEAD~10 - git checkout develop + ''' + script { + if (env.CHANGE_TARGET == 'master') { + sh 'git checkout master' + } + else { + sh 'git checkout develop' + } + } + sh ''' git pull >/dev/null cd ${WK} export TZ=Asia/Harbin @@ -92,7 +101,8 @@ pipeline { git pull git fetch origin +refs/pull/${CHANGE_ID}/merge git checkout -qf FETCH_HEAD - ''' + ''' + script{ env.skipstage=sh(script:"cd ${WORKSPACE}.tes && git --no-pager diff --name-only FETCH_HEAD develop|grep -v -E '.*md|//src//connector|Jenkinsfile|test-all.sh' || echo 0 ",returnStdout:true) } From e15aacc946b00bf89863093753d422903eb81bd9 Mon Sep 17 00:00:00 2001 From: plum-lihui Date: Tue, 6 Apr 2021 16:27:01 +0800 Subject: [PATCH 079/178] [TD-3677]: test pr message 1 --- a.txt | 6 ++++++ 1 file changed, 6 insertions(+) create mode 100644 a.txt diff --git a/a.txt b/a.txt new file mode 100644 index 0000000000..e4d95ed166 --- /dev/null +++ b/a.txt @@ -0,0 +1,6 @@ +AAAAAA +BBBBB +CCCC +DDD +EE +F From ba76fe9897ea71c67c14f307723fb0185226d2d1 Mon Sep 17 00:00:00 2001 From: liuyq-617 Date: Tue, 6 Apr 2021 16:13:11 +0800 Subject: [PATCH 080/178] [TD-3671]change target branch --- Jenkinsfile | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/Jenkinsfile b/Jenkinsfile index d96eeaa724..4c9da9a928 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -48,7 +48,16 @@ def pre_test(){ find ${WKC}/tests/pytest -name \'*\'.sql -exec rm -rf {} \\; cd ${WK} git reset --hard HEAD~10 - git checkout develop + ''' + script { + if (env.CHANGE_TARGET == 'master') { + sh 'git checkout master' + } + else { + sh 'git checkout develop' + } + } + sh ''' git pull >/dev/null cd ${WK} export TZ=Asia/Harbin @@ -86,7 +95,8 @@ pipeline { git pull git fetch origin +refs/pull/${CHANGE_ID}/merge git checkout -qf FETCH_HEAD - ''' + ''' + script{ env.skipstage=sh(script:"cd ${WORKSPACE}.tes && git --no-pager diff --name-only FETCH_HEAD develop|grep -v -E '.*md|//src//connector|Jenkinsfile|test-all.sh' || echo 0 ",returnStdout:true) } From c023cf0b631262c162da64584ed02cfd743773a7 Mon Sep 17 00:00:00 2001 From: plum-lihui Date: Tue, 6 Apr 2021 16:27:14 +0800 Subject: [PATCH 081/178] [TD-3677]: test pr message 2 --- a.txt | 1 - 1 file changed, 1 deletion(-) diff --git a/a.txt b/a.txt index e4d95ed166..e0ec77d19c 100644 --- a/a.txt +++ b/a.txt @@ -3,4 +3,3 @@ BBBBB CCCC DDD EE -F From 1746351dea03b0494ad7cedc231014bdaddf4088 Mon Sep 17 00:00:00 2001 From: plum-lihui Date: Tue, 6 Apr 2021 16:27:27 +0800 Subject: [PATCH 082/178] [TD-3677]: test pr message 3 --- a.txt | 1 - 1 file changed, 1 deletion(-) diff --git a/a.txt b/a.txt index e0ec77d19c..6367d1243d 100644 --- a/a.txt +++ b/a.txt @@ -2,4 +2,3 @@ AAAAAA BBBBB CCCC DDD -EE From b63aab1b5a7a23046b595c0d0cefc5c691ecc339 Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Tue, 6 Apr 2021 16:46:26 +0800 Subject: [PATCH 083/178] Hotfix/sangshuduo/td 3197 fix taosdemo coverity scan (#5688) * [TD-3197] : fix taosdemo coverity scan issues. * [TD-3197] : fix taosdemo coverity scan issue. fix subscribeTest pids uninitialized. * [TD-3197] : fix taosdemo coverity scan issues. * [TD-3197] : fix coverity scan issues. check super tbl info pointer. * [TD-3197] : fix coverity scan issues. move sub tbl query thread join into loop * [TD-3197] : fix coverity scan issues. remove unused variable * [TD-3197] : fix coverity scan issues. use more secure random library * [TD-3197] : fix coverity scan issues. use strncpy for more safe * [TD-3197] : fix taosdemo coverity scan issue. replace arc4random with rand(). * [TD-3197] : fix coverity scan issues. check stb info pointer for start time * [TD-3197] : fix coverity scan issues. fix strcpy vulnerability * [TD-3197] : fix taosdemo coverity scan issue. modify taosdemoTest2. try to check database continously. * [TD-3197] : taosdemo coverity scan issues. * [TD-3197] : fix memory leak when parsing arguments. * [TD-3197] : fix cmake strip arguments. * [TD-3197] : taosdemo coverity scan. fix cmake string manipulation. Co-authored-by: Shuduo Sang --- src/kit/taosdemo/CMakeLists.txt | 13 ++++++------- src/kit/taosdemo/taosdemo.c | 2 ++ src/kit/taosdump/taosdump.c | 18 +++++++++++------- 3 files changed, 19 insertions(+), 14 deletions(-) diff --git a/src/kit/taosdemo/CMakeLists.txt b/src/kit/taosdemo/CMakeLists.txt index ba27044a87..4e38a8842e 100644 --- a/src/kit/taosdemo/CMakeLists.txt +++ b/src/kit/taosdemo/CMakeLists.txt @@ -9,19 +9,18 @@ IF (GIT_FOUND) EXECUTE_PROCESS( COMMAND ${GIT_EXECUTABLE} log --pretty=oneline -n 1 ${CMAKE_CURRENT_LIST_DIR}/taosdemo.c RESULT_VARIABLE RESULT - OUTPUT_VARIABLE TAOSDEMO_COMMIT) - EXECUTE_PROCESS( - COMMAND bash "-c" "echo '${TAOSDEMO_COMMIT}' | awk '{print $1}' | cut -c -9" - RESULT_VARIABLE RESULT OUTPUT_VARIABLE TAOSDEMO_COMMIT_SHA1) + STRING(SUBSTRING "${TAOSDEMO_COMMIT_SHA1}" 0 7 TAOSDEMO_COMMIT_SHA1) EXECUTE_PROCESS( COMMAND ${GIT_EXECUTABLE} status -z -s ${CMAKE_CURRENT_LIST_DIR}/taosdemo.c RESULT_VARIABLE RESULT OUTPUT_VARIABLE TAOSDEMO_STATUS) - EXECUTE_PROCESS( + IF (TD_LINUX) + EXECUTE_PROCESS( COMMAND bash "-c" "echo '${TAOSDEMO_STATUS}' | awk '{print $1}'" RESULT_VARIABLE RESULT OUTPUT_VARIABLE TAOSDEMO_STATUS) + ENDIF (TD_LINUX) MESSAGE("taosdemo.c status: " ${TAOSDEMO_STATUS}) ELSE() MESSAGE("Git not found") @@ -29,9 +28,9 @@ ELSE() SET(TAOSDEMO_STATUS "unknown") ENDIF (GIT_FOUND) -STRING(STRIP ${TAOSDEMO_COMMIT_SHA1} TAOSDEMO_COMMIT_SHA1) +STRING(STRIP "${TAOSDEMO_COMMIT_SHA1}" TAOSDEMO_COMMIT_SHA1) MESSAGE("taosdemo's latest commit in short is:" ${TAOSDEMO_COMMIT_SHA1}) -STRING(STRIP ${TAOSDEMO_STATUS} TAOSDEMO_STATUS) +STRING(STRIP "${TAOSDEMO_STATUS}" TAOSDEMO_STATUS) IF (TAOSDEMO_STATUS MATCHES "M") SET(TAOSDEMO_STATUS "modified") diff --git a/src/kit/taosdemo/taosdemo.c b/src/kit/taosdemo/taosdemo.c index 87a08dee49..edd7b86b1b 100644 --- a/src/kit/taosdemo/taosdemo.c +++ b/src/kit/taosdemo/taosdemo.c @@ -769,6 +769,7 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) { && strcasecmp(token, "BINARY") && strcasecmp(token, "NCHAR")) { printHelp(); + free(dupstr); ERROR_EXIT("Invalid data_type!\n"); exit(EXIT_FAILURE); } @@ -776,6 +777,7 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) { token = strsep(&running, ","); if (index >= MAX_NUM_DATATYPE) break; } + free(dupstr); sptr[index] = NULL; } } else if (strcmp(argv[i], "-w") == 0) { diff --git a/src/kit/taosdump/taosdump.c b/src/kit/taosdump/taosdump.c index 9f176904fe..fd6ee9f2fc 100644 --- a/src/kit/taosdump/taosdump.c +++ b/src/kit/taosdump/taosdump.c @@ -483,25 +483,29 @@ static int queryDbImpl(TAOS *taos, char *command) { static void parse_args(int argc, char *argv[], SArguments *arguments) { for (int i = 1; i < argc; i++) { if (strcmp(argv[i], "-E") == 0) { - if (argv[i+1]) { - char *tmp = argv[++i]; + char *tmp = strdup(argv[++i]); + + if (tmp) { int64_t tmpEpoch; if (strchr(tmp, ':') && strchr(tmp, '-')) { if (TSDB_CODE_SUCCESS != taosParseTime( - tmp, &tmpEpoch, strlen(tmp), TSDB_TIME_PRECISION_MILLI, 0)) { + tmp, &tmpEpoch, strlen(tmp), TSDB_TIME_PRECISION_MILLI, 0)) { fprintf(stderr, "Input end time error!\n"); + free(tmp); return; } } else { tmpEpoch = atoll(tmp); } - + sprintf(argv[i], "%"PRId64"", tmpEpoch); debugPrint("%s() LN%d, tmp is: %s, argv[%d]: %s\n", - __func__, __LINE__, tmp, i, argv[i]); + __func__, __LINE__, tmp, i, argv[i]); + + free(tmp); } else { - fprintf(stderr, "Input end time error!\n"); - return; + errorPrint("%s() LN%d, strdup() cannot allocate memory\n", __func__, __LINE__); + exit(-1); } } else if (strcmp(argv[i], "-g") == 0) { arguments->debug_print = true; From d38e4a1b581636476a263b5ee7443deb1161d079 Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Tue, 6 Apr 2021 16:47:13 +0800 Subject: [PATCH 084/178] Hotfix/sangshuduo/td 3197 fix taosdemo coverity scan (#5691) * [TD-3197] : fix taosdemo coverity scan issues. * [TD-3197] : fix taosdemo coverity scan issue. fix subscribeTest pids uninitialized. * [TD-3197] : fix taosdemo coverity scan issues. * [TD-3197] : fix coverity scan issues. check super tbl info pointer. * [TD-3197] : fix coverity scan issues. move sub tbl query thread join into loop * [TD-3197] : fix coverity scan issues. remove unused variable * [TD-3197] : fix coverity scan issues. use more secure random library * [TD-3197] : fix coverity scan issues. use strncpy for more safe * [TD-3197] : fix taosdemo coverity scan issue. replace arc4random with rand(). * [TD-3197] : fix coverity scan issues. check stb info pointer for start time * [TD-3197] : fix coverity scan issues. fix strcpy vulnerability * [TD-3197] : fix taosdemo coverity scan issue. modify taosdemoTest2. try to check database continously. * [TD-3197] : taosdemo coverity scan issues. * [TD-3197] : fix memory leak when parsing arguments. * [TD-3197] : fix cmake strip arguments. * [TD-3197] : taosdemo coverity scan. fix cmake string manipulation. Co-authored-by: Shuduo Sang --- src/kit/taosdemo/CMakeLists.txt | 13 ++++++------- src/kit/taosdemo/taosdemo.c | 2 ++ src/kit/taosdump/taosdump.c | 18 +++++++++++------- 3 files changed, 19 insertions(+), 14 deletions(-) diff --git a/src/kit/taosdemo/CMakeLists.txt b/src/kit/taosdemo/CMakeLists.txt index ba27044a87..4e38a8842e 100644 --- a/src/kit/taosdemo/CMakeLists.txt +++ b/src/kit/taosdemo/CMakeLists.txt @@ -9,19 +9,18 @@ IF (GIT_FOUND) EXECUTE_PROCESS( COMMAND ${GIT_EXECUTABLE} log --pretty=oneline -n 1 ${CMAKE_CURRENT_LIST_DIR}/taosdemo.c RESULT_VARIABLE RESULT - OUTPUT_VARIABLE TAOSDEMO_COMMIT) - EXECUTE_PROCESS( - COMMAND bash "-c" "echo '${TAOSDEMO_COMMIT}' | awk '{print $1}' | cut -c -9" - RESULT_VARIABLE RESULT OUTPUT_VARIABLE TAOSDEMO_COMMIT_SHA1) + STRING(SUBSTRING "${TAOSDEMO_COMMIT_SHA1}" 0 7 TAOSDEMO_COMMIT_SHA1) EXECUTE_PROCESS( COMMAND ${GIT_EXECUTABLE} status -z -s ${CMAKE_CURRENT_LIST_DIR}/taosdemo.c RESULT_VARIABLE RESULT OUTPUT_VARIABLE TAOSDEMO_STATUS) - EXECUTE_PROCESS( + IF (TD_LINUX) + EXECUTE_PROCESS( COMMAND bash "-c" "echo '${TAOSDEMO_STATUS}' | awk '{print $1}'" RESULT_VARIABLE RESULT OUTPUT_VARIABLE TAOSDEMO_STATUS) + ENDIF (TD_LINUX) MESSAGE("taosdemo.c status: " ${TAOSDEMO_STATUS}) ELSE() MESSAGE("Git not found") @@ -29,9 +28,9 @@ ELSE() SET(TAOSDEMO_STATUS "unknown") ENDIF (GIT_FOUND) -STRING(STRIP ${TAOSDEMO_COMMIT_SHA1} TAOSDEMO_COMMIT_SHA1) +STRING(STRIP "${TAOSDEMO_COMMIT_SHA1}" TAOSDEMO_COMMIT_SHA1) MESSAGE("taosdemo's latest commit in short is:" ${TAOSDEMO_COMMIT_SHA1}) -STRING(STRIP ${TAOSDEMO_STATUS} TAOSDEMO_STATUS) +STRING(STRIP "${TAOSDEMO_STATUS}" TAOSDEMO_STATUS) IF (TAOSDEMO_STATUS MATCHES "M") SET(TAOSDEMO_STATUS "modified") diff --git a/src/kit/taosdemo/taosdemo.c b/src/kit/taosdemo/taosdemo.c index 87a08dee49..edd7b86b1b 100644 --- a/src/kit/taosdemo/taosdemo.c +++ b/src/kit/taosdemo/taosdemo.c @@ -769,6 +769,7 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) { && strcasecmp(token, "BINARY") && strcasecmp(token, "NCHAR")) { printHelp(); + free(dupstr); ERROR_EXIT("Invalid data_type!\n"); exit(EXIT_FAILURE); } @@ -776,6 +777,7 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) { token = strsep(&running, ","); if (index >= MAX_NUM_DATATYPE) break; } + free(dupstr); sptr[index] = NULL; } } else if (strcmp(argv[i], "-w") == 0) { diff --git a/src/kit/taosdump/taosdump.c b/src/kit/taosdump/taosdump.c index 9f176904fe..fd6ee9f2fc 100644 --- a/src/kit/taosdump/taosdump.c +++ b/src/kit/taosdump/taosdump.c @@ -483,25 +483,29 @@ static int queryDbImpl(TAOS *taos, char *command) { static void parse_args(int argc, char *argv[], SArguments *arguments) { for (int i = 1; i < argc; i++) { if (strcmp(argv[i], "-E") == 0) { - if (argv[i+1]) { - char *tmp = argv[++i]; + char *tmp = strdup(argv[++i]); + + if (tmp) { int64_t tmpEpoch; if (strchr(tmp, ':') && strchr(tmp, '-')) { if (TSDB_CODE_SUCCESS != taosParseTime( - tmp, &tmpEpoch, strlen(tmp), TSDB_TIME_PRECISION_MILLI, 0)) { + tmp, &tmpEpoch, strlen(tmp), TSDB_TIME_PRECISION_MILLI, 0)) { fprintf(stderr, "Input end time error!\n"); + free(tmp); return; } } else { tmpEpoch = atoll(tmp); } - + sprintf(argv[i], "%"PRId64"", tmpEpoch); debugPrint("%s() LN%d, tmp is: %s, argv[%d]: %s\n", - __func__, __LINE__, tmp, i, argv[i]); + __func__, __LINE__, tmp, i, argv[i]); + + free(tmp); } else { - fprintf(stderr, "Input end time error!\n"); - return; + errorPrint("%s() LN%d, strdup() cannot allocate memory\n", __func__, __LINE__); + exit(-1); } } else if (strcmp(argv[i], "-g") == 0) { arguments->debug_print = true; From c9279cdc115636da78bf041323ad464dbaaf1070 Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Tue, 6 Apr 2021 17:04:31 +0800 Subject: [PATCH 085/178] refactor rpc --- src/rpc/src/rpcTcp.c | 155 +++++++++++++++++++++++++++++++++++++++---- 1 file changed, 143 insertions(+), 12 deletions(-) diff --git a/src/rpc/src/rpcTcp.c b/src/rpc/src/rpcTcp.c index 3162ab2e4c..286ed223c7 100644 --- a/src/rpc/src/rpcTcp.c +++ b/src/rpc/src/rpcTcp.c @@ -21,6 +21,13 @@ #include "rpcLog.h" #include "rpcHead.h" #include "rpcTcp.h" +#include "tlist.h" + +typedef struct SConnItem { + SOCKET fd; + uint32_t ip; + uint16_t port; +} SConnItem; typedef struct SFdObj { void *signature; @@ -38,6 +45,12 @@ typedef struct SThreadObj { pthread_t thread; SFdObj * pHead; pthread_mutex_t mutex; + // receive the notify from dispatch thread + + int notifyReceiveFd; + int notifySendFd; + SList *connQueue; + uint32_t ip; bool stop; EpollFd pollFd; @@ -69,6 +82,7 @@ typedef struct { } SServerObj; static void *taosProcessTcpData(void *param); +static void *taosProcessServerTcpData(void *param); static SFdObj *taosMallocFdObj(SThreadObj *pThreadObj, SOCKET fd); static void taosFreeFdObj(SFdObj *pFdObj); static void taosReportBrokenLink(SFdObj *pFdObj); @@ -124,6 +138,7 @@ void *taosInitTcpServer(uint32_t ip, uint16_t port, char *label, int numOfThread tstrncpy(pThreadObj->label, label, sizeof(pThreadObj->label)); pThreadObj->shandle = shandle; pThreadObj->stop = false; + pThreadObj->connQueue = tdListNew(sizeof(SConnItem)); } // initialize mutex, thread, fd which may fail @@ -142,7 +157,25 @@ void *taosInitTcpServer(uint32_t ip, uint16_t port, char *label, int numOfThread break; } - code = pthread_create(&(pThreadObj->thread), &thattr, taosProcessTcpData, (void *)(pThreadObj)); + int fds[2]; + if (pipe(fds)) { + tError("%s failed to create pipe", label); + code = -1; + break; + } + + pThreadObj->notifyReceiveFd = fds[0]; + pThreadObj->notifySendFd = fds[1]; + struct epoll_event event; + event.events = EPOLLIN | EPOLLRDHUP; + event.data.fd = pThreadObj->notifyReceiveFd; + if (epoll_ctl(pThreadObj->pollFd, EPOLL_CTL_ADD, pThreadObj->notifyReceiveFd , &event) < 0) { + tError("%s failed to create pipe", label); + code = -1; + break; + } + + code = pthread_create(&(pThreadObj->thread), &thattr, taosProcessServerTcpData, (void *)(pThreadObj)); if (code != 0) { tError("%s failed to create TCP process data thread(%s)", label, strerror(errno)); break; @@ -275,17 +308,12 @@ static void *taosAcceptTcpConnection(void *arg) { // pick up the thread to handle this connection pThreadObj = pServerObj->pThreadObj[threadId]; - SFdObj *pFdObj = taosMallocFdObj(pThreadObj, connFd); - if (pFdObj) { - pFdObj->ip = caddr.sin_addr.s_addr; - pFdObj->port = htons(caddr.sin_port); - tDebug("%s new TCP connection from %s:%hu, fd:%d FD:%p numOfFds:%d", pServerObj->label, - taosInetNtoa(caddr.sin_addr), pFdObj->port, connFd, pFdObj, pThreadObj->numOfFds); - } else { - taosCloseSocket(connFd); - tError("%s failed to malloc FdObj(%s) for connection from:%s:%hu", pServerObj->label, strerror(errno), - taosInetNtoa(caddr.sin_addr), htons(caddr.sin_port)); - } + pthread_mutex_lock(&(pThreadObj->mutex)); + SConnItem item = {.fd = connFd, .ip = caddr.sin_addr.s_addr, .port = htons(caddr.sin_port)}; + tdListAppend(pThreadObj->connQueue, &item); + pthread_mutex_unlock(&(pThreadObj->mutex)); + + write(pThreadObj->notifySendFd, "", 1); // pick up next thread for next connection threadId++; @@ -591,6 +619,109 @@ static void *taosProcessTcpData(void *param) { return NULL; } +static void *taosProcessServerTcpData(void *param) { + SThreadObj *pThreadObj = param; + SFdObj *pFdObj; + struct epoll_event events[maxEvents]; + SRecvInfo recvInfo; + + char bb[1]; +#ifdef __APPLE__ + taos_block_sigalrm(); +#endif // __APPLE__ + while (1) { + int fdNum = epoll_wait(pThreadObj->pollFd, events, maxEvents, TAOS_EPOLL_WAIT_TIME); + if (pThreadObj->stop) { + tDebug("%s TCP thread get stop event, exiting...", pThreadObj->label); + break; + } + if (fdNum < 0) continue; + + for (int i = 0; i < fdNum; ++i) { + if (events[i].data.fd == pThreadObj->notifyReceiveFd) { + if (events[i].events & EPOLLIN) { + read(pThreadObj->notifyReceiveFd, bb, 1); + + pthread_mutex_lock(&(pThreadObj->mutex)); + SListNode *head = tdListPopHead(pThreadObj->connQueue); + pthread_mutex_unlock(&(pThreadObj->mutex)); + + SConnItem item = {0}; + tdListNodeGetData(pThreadObj->connQueue, head, &item); + tfree(head); + + // register fd on epoll + SFdObj *pFdObj = taosMallocFdObj(pThreadObj, item.fd); + if (pFdObj) { + pFdObj->ip = item.ip; + pFdObj->port = item.port; + tDebug("%s new TCP connection from %u:%hu, fd:%d FD:%p numOfFds:%d", pThreadObj->label, + pFdObj->ip, pFdObj->port, item.fd, pFdObj, pThreadObj->numOfFds); + } else { + taosCloseSocket(item.fd); + tError("%s failed to malloc FdObj(%s) for connection from:%u:%hu", pThreadObj->label, strerror(errno), + pFdObj->ip, pFdObj->port); + } + } + continue; + } + pFdObj = events[i].data.ptr; + + if (events[i].events & EPOLLERR) { + tDebug("%s %p FD:%p epoll errors", pThreadObj->label, pFdObj->thandle, pFdObj); + taosReportBrokenLink(pFdObj); + continue; + } + + if (events[i].events & EPOLLRDHUP) { + tDebug("%s %p FD:%p RD hang up", pThreadObj->label, pFdObj->thandle, pFdObj); + taosReportBrokenLink(pFdObj); + continue; + } + + if (events[i].events & EPOLLHUP) { + tDebug("%s %p FD:%p hang up", pThreadObj->label, pFdObj->thandle, pFdObj); + taosReportBrokenLink(pFdObj); + continue; + } + + if (taosReadTcpData(pFdObj, &recvInfo) < 0) { + shutdown(pFdObj->fd, SHUT_WR); + continue; + } + + pFdObj->thandle = (*(pThreadObj->processData))(&recvInfo); + if (pFdObj->thandle == NULL) taosFreeFdObj(pFdObj); + } + + if (pThreadObj->stop) break; + } + + if (pThreadObj->connQueue) { + pThreadObj->connQueue = tdListFree(pThreadObj->connQueue); + } + // close pipe + close(pThreadObj->notifySendFd); + close(pThreadObj->notifyReceiveFd); + + if (pThreadObj->pollFd >=0) { + EpollClose(pThreadObj->pollFd); + pThreadObj->pollFd = -1; + } + + while (pThreadObj->pHead) { + SFdObj *pFdObj = pThreadObj->pHead; + pThreadObj->pHead = pFdObj->next; + taosReportBrokenLink(pFdObj); + } + + pthread_mutex_destroy(&(pThreadObj->mutex)); + tDebug("%s TCP thread exits ...", pThreadObj->label); + tfree(pThreadObj); + + return NULL; +} + static SFdObj *taosMallocFdObj(SThreadObj *pThreadObj, SOCKET fd) { struct epoll_event event; From 41b9b98e3c4e11df8492335516be7d3083d76af1 Mon Sep 17 00:00:00 2001 From: wu champion Date: Tue, 6 Apr 2021 18:06:59 +0800 Subject: [PATCH 086/178] [TD-3652] add case for TD-3652 to resolve TD-3590 --- tests/pytest/fulltest.sh | 2 + tests/pytest/query/queryStddevWithGroupby.py | 68 ++++++++++++++++++++ 2 files changed, 70 insertions(+) create mode 100644 tests/pytest/query/queryStddevWithGroupby.py diff --git a/tests/pytest/fulltest.sh b/tests/pytest/fulltest.sh index 7ff41b13a6..3266a1ab8f 100755 --- a/tests/pytest/fulltest.sh +++ b/tests/pytest/fulltest.sh @@ -22,6 +22,7 @@ python3 ./test.py -f insert/alterTableAndInsert.py python3 ./test.py -f insert/insertIntoTwoTables.py python3 ./test.py -f insert/before_1970.py python3 bug2265.py +python3 ./test.py -f insert/bug3654.py #table python3 ./test.py -f table/alter_wal0.py @@ -216,6 +217,7 @@ python3 ./test.py -f query/floatCompare.py python3 ./test.py -f query/query1970YearsAf.py python3 ./test.py -f query/bug3351.py python3 ./test.py -f query/bug3375.py +python3 ./test.py -f query/queryStddevWithGroupby.py diff --git a/tests/pytest/query/queryStddevWithGroupby.py b/tests/pytest/query/queryStddevWithGroupby.py new file mode 100644 index 0000000000..aee88ca9c5 --- /dev/null +++ b/tests/pytest/query/queryStddevWithGroupby.py @@ -0,0 +1,68 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + def querysqls(self): + tdSql.query("select stddev(c1) from t10 group by c1") + tdSql.checkRows(6) + tdSql.checkData(0, 0, 0) + tdSql.checkData(1, 0, 0) + tdSql.checkData(2, 0, 0) + tdSql.checkData(3, 0, 0) + tdSql.checkData(4, 0, 0) + tdSql.checkData(5, 0, 0) + tdSql.query("select stddev(c2) from t10") + tdSql.checkData(0, 0, 0.5) + + def run(self): + tdSql.execute("drop database if exists db") + tdSql.execute("create database if not exists db keep 36500") + tdSql.execute("use db") + + tdLog.printNoPrefix("==========step1:create table && insert data") + tdSql.execute("create stable stb1 (ts timestamp , c1 int ,c2 float) tags(t1 int)") + tdSql.execute("create table t10 using stb1 tags(1)") + tdSql.execute("insert into t10 values ('1969-12-31 00:00:00.000', 2,1)") + tdSql.execute("insert into t10 values ('1970-01-01 00:00:00.000', 3,1)") + tdSql.execute("insert into t10 values (0, 4,1)") + tdSql.execute("insert into t10 values (now-18725d, 1,2)") + tdSql.execute("insert into t10 values ('2021-04-06 00:00:00.000', 5,2)") + tdSql.execute("insert into t10 values (now+1d,6,2)") + + tdLog.printNoPrefix("==========step2:query and check") + self.querysqls() + + tdLog.printNoPrefix("==========step3:after wal,check again") + tdSql.query("show dnodes") + index = tdSql.getData(0, 0) + tdDnodes.stop(index) + tdDnodes.start(index) + self.querysqls() + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) \ No newline at end of file From 8ef4764a12dec4b462d001d09a73c9f72a48a089 Mon Sep 17 00:00:00 2001 From: plum-lihui Date: Tue, 6 Apr 2021 18:08:56 +0800 Subject: [PATCH 087/178] remove useless file --- a.txt | 4 ---- 1 file changed, 4 deletions(-) delete mode 100644 a.txt diff --git a/a.txt b/a.txt deleted file mode 100644 index 6367d1243d..0000000000 --- a/a.txt +++ /dev/null @@ -1,4 +0,0 @@ -AAAAAA -BBBBB -CCCC -DDD From 9f858447f74317054e0e67e19b4fccc8ec413702 Mon Sep 17 00:00:00 2001 From: wu champion Date: Tue, 6 Apr 2021 18:09:13 +0800 Subject: [PATCH 088/178] Update fulltest.sh --- tests/pytest/fulltest.sh | 1 - 1 file changed, 1 deletion(-) diff --git a/tests/pytest/fulltest.sh b/tests/pytest/fulltest.sh index 3266a1ab8f..9d52b76a67 100755 --- a/tests/pytest/fulltest.sh +++ b/tests/pytest/fulltest.sh @@ -22,7 +22,6 @@ python3 ./test.py -f insert/alterTableAndInsert.py python3 ./test.py -f insert/insertIntoTwoTables.py python3 ./test.py -f insert/before_1970.py python3 bug2265.py -python3 ./test.py -f insert/bug3654.py #table python3 ./test.py -f table/alter_wal0.py From 7db5c29d1ee95bd91f9976ecf2f7211167a9c01c Mon Sep 17 00:00:00 2001 From: wu champion Date: Tue, 6 Apr 2021 18:18:10 +0800 Subject: [PATCH 089/178] [TD-3295] add case for TD-3295 --- tests/pytest/fulltest.sh | 2 +- tests/pytest/query/queryJoin10tables.py | 201 ++++++++++++++++++++++++ 2 files changed, 202 insertions(+), 1 deletion(-) create mode 100644 tests/pytest/query/queryJoin10tables.py diff --git a/tests/pytest/fulltest.sh b/tests/pytest/fulltest.sh index 7ff41b13a6..c2db4b3503 100755 --- a/tests/pytest/fulltest.sh +++ b/tests/pytest/fulltest.sh @@ -216,7 +216,7 @@ python3 ./test.py -f query/floatCompare.py python3 ./test.py -f query/query1970YearsAf.py python3 ./test.py -f query/bug3351.py python3 ./test.py -f query/bug3375.py - +python3 ./test.py -f query/queryJoin10tables.py #stream diff --git a/tests/pytest/query/queryJoin10tables.py b/tests/pytest/query/queryJoin10tables.py new file mode 100644 index 0000000000..01a7397d44 --- /dev/null +++ b/tests/pytest/query/queryJoin10tables.py @@ -0,0 +1,201 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import taos +import sys + +from util.log import * +from util.sql import * +from util.cases import * +from util.dnodes import * + +class TDTestCase: + + def init(self, conn, logSql): + tdLog.debug(f"start to excute {__file__}") + tdSql.init(conn.cursor()) + + def createtable(self): + + # create stbles + tdSql.execute("create table if not exists stb1 (ts timestamp, c1 int) tags(t11 int, t12 int)") + tdSql.execute("create table if not exists stb2 (ts timestamp, c2 int) tags(t21 int, t22 int)") + tdSql.execute("create table if not exists stb3 (ts timestamp, c3 int) tags(t31 int, t32 int)") + tdSql.execute("create table if not exists stb4 (ts timestamp, c4 int) tags(t41 int, t42 int)") + tdSql.execute("create table if not exists stb5 (ts timestamp, c5 int) tags(t51 int, t52 int)") + tdSql.execute("create table if not exists stb6 (ts timestamp, c6 int) tags(t61 int, t62 int)") + tdSql.execute("create table if not exists stb7 (ts timestamp, c7 int) tags(t71 int, t72 int)") + tdSql.execute("create table if not exists stb8 (ts timestamp, c8 int) tags(t81 int, t82 int)") + tdSql.execute("create table if not exists stb9 (ts timestamp, c9 int) tags(t91 int, t92 int)") + tdSql.execute("create table if not exists stb10 (ts timestamp, c10 int) tags(t101 int, t102 int)") + tdSql.execute("create table if not exists stb11 (ts timestamp, c11 int) tags(t111 int, t112 int)") + + # create normal tables + tdSql.execute("create table t10 using stb1 tags(0, 9)") + tdSql.execute("create table t11 using stb1 tags(1, 8)") + tdSql.execute("create table t12 using stb1 tags(2, 7)") + tdSql.execute("create table t13 using stb1 tags(3, 6)") + tdSql.execute("create table t14 using stb1 tags(4, 5)") + tdSql.execute("create table t15 using stb1 tags(5, 4)") + tdSql.execute("create table t16 using stb1 tags(6, 3)") + tdSql.execute("create table t17 using stb1 tags(7, 2)") + tdSql.execute("create table t18 using stb1 tags(8, 1)") + tdSql.execute("create table t19 using stb1 tags(9, 0)") + tdSql.execute("create table t110 using stb1 tags(10, 10)") + + tdSql.execute("create table t20 using stb2 tags(0, 9)") + tdSql.execute("create table t21 using stb2 tags(1, 8)") + tdSql.execute("create table t22 using stb2 tags(2, 7)") + + tdSql.execute("create table t30 using stb3 tags(0, 9)") + tdSql.execute("create table t31 using stb3 tags(1, 8)") + tdSql.execute("create table t32 using stb3 tags(2, 7)") + + def inserttable(self): + for i in range(100): + if i<60: + tdSql.execute(f"insert into t20 values('2020-10-01 00:00:{i}.000', {i})") + tdSql.execute(f"insert into t21 values('2020-10-01 00:00:{i}.000', {i})") + tdSql.execute(f"insert into t22 values('2020-10-01 00:00:{i}.000', {i})") + tdSql.execute(f"insert into t30 values('2020-10-01 00:00:{i}.000', {i})") + tdSql.execute(f"insert into t31 values('2020-10-01 00:00:{i}.000', {i})") + tdSql.execute(f"insert into t32 values('2020-10-01 00:00:{i}.000', {i})") + else: + tdSql.execute(f"insert into t20 values('2020-10-01 00:01:{i-60}.000', {i})") + tdSql.execute(f"insert into t21 values('2020-10-01 00:01:{i-60}.000', {i})") + tdSql.execute(f"insert into t22 values('2020-10-01 00:01:{i-60}.000', {i})") + tdSql.execute(f"insert into t30 values('2020-10-01 00:01:{i-60}.000', {i})") + tdSql.execute(f"insert into t31 values('2020-10-01 00:01:{i-60}.000', {i})") + tdSql.execute(f"insert into t32 values('2020-10-01 00:01:{i-60}.000', {i})") + for j in range(11): + if i<60: + tdSql.execute(f"insert into t1{j} values('2020-10-01 00:00:{i}.000', {i})") + else: + tdSql.execute(f"insert into t1{j} values('2020-10-01 00:01:{i-60}.000', {i})") + + def queryjointable(self): + tdSql.error( + '''select from t10,t11,t12,t13,t14,t15,t16,t17,t18,t19 + where t10.ts=t11.ts and t10.ts=t12.ts and t10.ts=t13.ts and t10.ts=t14.ts and t10.ts=t15.ts + and t10.ts=t16.ts and t10.ts=t17.ts and t10.ts=t18.ts and t10.ts=t19.ts''' + ) + tdSql.error("select * from t10 where t10.ts=t11.ts") + tdSql.error("select * from where t10.ts=t11.ts") + tdSql.error("select * from t10,t11,t12,t13,t14,t15,t16,t17,t18,t19") + tdSql.error("select * from stb1, stb2, stb3 where stb1.ts=stb2.ts and stb1.ts=stb3.ts") + tdSql.error("select * from stb1, stb2, stb3 where stb1.t11=stb2.t21 and stb1.t11=stb3.t31") + tdSql.error("select * from stb1, stb2, stb3") + tdSql.error( + '''select * from stb1 + join stb2 on stb1.ts=stb2.ts and stb1.t11=stb2.t21 + join stb3 on stb1.ts=stb3.ts and stb1.t11=stb3.t31''' + ) + tdSql.error("select * from t10 join t11 on t10.ts=t11.ts join t12 on t11.ts=t12.ts") + tdSql.query( + '''select * from stb1,stb2,stb3 + where stb1.ts=stb2.ts and stb1.ts=stb3.ts and stb1.t11=stb2.t21 and stb1.t11 =stb3.t31''' + ) + tdSql.checkRows(300) + tdSql.query("select * from t11,t12,t13 where t11.ts=t12.ts and t11.ts=t13.ts") + tdSql.checkRows(100) + tdSql.error("selec * from t11,t12,t13 where t11.ts=t12.ts and t11.ts=t13.ts") + tdSql.error("select * form t11,t12,t13 where t11.ts=t12.ts and t11.ts=t13.ts") + tdSql.error("select * from t11,t12,t13 when t11.ts=t12.ts and t11.ts=t13.ts") + tdSql.error("select * from t11,t12,t13 when t11.ts <> t12.ts and t11.ts=t13.ts") + tdSql.error("select * from t11,t12,t13 when t11.ts != t12.ts and t11.ts=t13.ts") + tdSql.error("select * from t11,t12,t13 when t11.ts=t12.ts or t11.ts=t13.ts") + tdSql.error("select * from t11,t12,t13 when t11.ts=t12.ts=t13.ts") + tdSql.error("select * from t11,t12,t13 when t11.c1=t12.c2 and t11.c1=t13.c3") + tdSql.error("select * from t11,t12,t13 when t11.ts=t12.ts and t11.ts=t13.c3 and t11.c1=t13.ts") + tdSql.error("select ts from t11,t12,t13 when t11.ts=t12.ts and t11.ts=t13.ts") + tdSql.error("select * from t11,t12,t13 when t11.ts=ts and t11.ts=t13.ts") + tdSql.error("select * from t11,t12,t13 when t11.ts=t12.ts and t11.ts=t13.ts and ts>100") + tdSql.error("select * from t11,t12,stb1 when t11.ts=t12.ts and t11.ts=stb1.ts") + tdSql.error("select t14.ts from t11,t12,t13 when t11.ts=t12.ts and t11.ts=t13.ts") + tdSql.error("select * from t11,t12,t13 when t11.ts=t12.ts and t11.ts=t13.ts1") + tdSql.error("select * from t11,t12,t13 when t11.ts=t12.ts and t11.ts=t14.ts") + tdSql.error("select * from t11,t12,t13 when t11.ts=t12.ts") + tdSql.error("select * from t11,t12,t13 when t11.ts=t12.ts and t11.ts=t13.ts and t11.c1=t13.c3") + tdSql.error( + '''select * from t10,t11,t12,t13,t14,t15,t16,t17,t18,t19,t20 + where t10.ts=t11.ts and t10.ts=t12.ts and t10.ts=t13.ts and t10.ts=t14.ts and t10.ts=t15.ts + and t10.ts=t16.ts and t10.ts=t17.ts and t10.ts=t18.ts and t10.ts=t19.ts and t10.ts=t20.ts''' + ) + tdSql.error( + '''select * from t10,t11,t12,t13,t14,t15,t16,t17,t18,t19,t20 + where t10.ts=t11.ts and t10.ts=t12.ts and t10.ts=t13.ts and t10.ts=t14.ts and t10.ts=t15.ts + and t10.ts=t16.ts and t10.ts=t17.ts and t10.ts=t18.ts and t10.ts=t19.ts''' + ) + tdSql.error( + '''select * from t10,t11,t12,t13,t14,t15,t16,t17,t18,t19 + where t10.ts=t11.ts and t10.ts=t12.ts and t10.ts=t13.ts and t10.ts=t14.ts and t10.ts=t15.ts + and t10.ts=t16.ts and t10.ts=t17.ts and t10.ts=t18.ts and t10.ts=t19.ts and t10.c1=t19.c1''' + ) + tdSql.error( + '''select * from stb1,stb2,stb3 + where stb1.ts=stb2.ts and stb1.ts=stb3.ts and stb1.t11=stb2.t21''' + ) + tdSql.error( + '''select * from stb1,stb2,stb3 + where stb1.ts=stb2.ts and stb1.t11=stb2.t21 and stb1.t11=stb3.t31''' + ) + tdSql.error( + '''select * from stb1,stb2,stb3 + where stb1.ts=stb2.ts and stb1.ts=stb3.ts and stb1.t11=stb2.t21 and stb1.t11=stb3.t31 + and stb1.t12=stb3=t32''' + ) + tdSql.error( + '''select * from stb1,stb2,stb3,stb4,stb5,stb6,stb7,stb8,stb9,stb10,stb11 + where stb1.ts=stb2.ts and stb1.ts=stb3.ts and stb1.ts=stb4.ts and stb1.ts=stb5.ts and stb1.ts=stb6.ts + and stb1.ts=stb7.ts and stb1.ts=stb8.ts and stb1.ts=stb9.ts and stb1.ts=stb10.ts and stb1.ts=stb11.ts + and stb1.t11=stb2.t21 and stb1.t11=stb3.t31 and stb1.t11=stb4.t41 and stb1.t11=stb5.t51 + and stb1.t11=stb6.t61 and stb1.t11=stb7.t71 and stb1.t11=stb8.t81 and stb1.t11=stb9.t91 + and stb1.t11=stb10.t101 and stb1.t11=stb11.t111''' + ) + tdSql.error( + '''select * from stb1,stb2,stb3,stb4,stb5,stb6,stb7,stb8,stb9,stb10 + where stb1.ts=stb2.ts and stb1.ts=stb3.ts and stb1.ts=stb4.ts and stb1.ts=stb5.ts and stb1.ts=stb6.ts + and stb1.ts=stb7.ts and stb1.ts=stb8.ts and stb1.ts=stb9.ts and stb1.ts=stb10.ts and stb1.t11=stb2.t21 + and stb1.t11=stb3.t31 and stb1.t11=stb4.t41 and stb1.t11=stb5.t51 and stb1.t11=stb6.t61 + and stb1.t11=stb7.t71 and stb1.t11=stb8.t81 and stb1.t11=stb9.t91 and stb1.t11=stb10.t101 + and stb1.t12=stb11.t102''' + ) + + def run(self): + tdSql.prepare() + + tdLog.printNoPrefix("==========step1:create table") + self.createtable() + + tdLog.printNoPrefix("==========step2:insert data") + self.inserttable() + + tdLog.printNoPrefix("==========step3:query timestamp type") + self.queryjointable() + + # after wal and sync, check again + tdSql.query("show dnodes") + index = tdSql.getData(0, 0) + tdDnodes.stop(index) + tdDnodes.start(index) + + tdLog.printNoPrefix("==========step4:query again after wal") + self.queryjointable() + + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) \ No newline at end of file From 88973b54606c2fa14ece5ccacd2a96fbaee3da62 Mon Sep 17 00:00:00 2001 From: dapan1121 <89396746@qq.com> Date: Tue, 6 Apr 2021 18:26:55 +0800 Subject: [PATCH 090/178] TD-3675 --- src/query/src/qAggMain.c | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/src/query/src/qAggMain.c b/src/query/src/qAggMain.c index f18d093b89..e47545da95 100644 --- a/src/query/src/qAggMain.c +++ b/src/query/src/qAggMain.c @@ -2771,14 +2771,16 @@ static void percentile_function(SQLFunctionCtx *pCtx) { SPercentileInfo *pInfo = GET_ROWCELL_INTERBUF(pResInfo); if (pCtx->currentStage == REPEAT_SCAN && pInfo->stage == 0) { + pInfo->stage += 1; + // all data are null, set it completed if (pInfo->numOfElems == 0) { pResInfo->complete = true; + + return; } else { pInfo->pMemBucket = tMemBucketCreate(pCtx->inputBytes, pCtx->inputType, pInfo->minval, pInfo->maxval); } - - pInfo->stage += 1; } // the first stage, only acquire the min/max value @@ -2857,14 +2859,16 @@ static void percentile_function_f(SQLFunctionCtx *pCtx, int32_t index) { SPercentileInfo *pInfo = (SPercentileInfo *)GET_ROWCELL_INTERBUF(pResInfo); if (pCtx->currentStage == REPEAT_SCAN && pInfo->stage == 0) { + pInfo->stage += 1; + // all data are null, set it completed if (pInfo->numOfElems == 0) { pResInfo->complete = true; + + return; } else { pInfo->pMemBucket = tMemBucketCreate(pCtx->inputBytes, pCtx->inputType, pInfo->minval, pInfo->maxval); } - - pInfo->stage += 1; } if (pInfo->stage == 0) { From 2f1bd5855c94e2738da024293bd4f999eba82d62 Mon Sep 17 00:00:00 2001 From: liuyq-617 Date: Wed, 7 Apr 2021 09:14:20 +0800 Subject: [PATCH 091/178] fix changing target branch --- Jenkinsfile | 44 +++++++++++++++++++++++++++++++++++--------- 1 file changed, 35 insertions(+), 9 deletions(-) diff --git a/Jenkinsfile b/Jenkinsfile index 4c9da9a928..25bf86d9cc 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -6,6 +6,7 @@ node { } def skipstage=0 + def abortPreviousBuilds() { def currentJobName = env.JOB_NAME def currentBuildNumber = env.BUILD_NUMBER.toInteger() @@ -24,7 +25,7 @@ def abortPreviousBuilds() { build.doKill() //doTerm(),doKill(),doTerm() } } -//abort previous build +// abort previous build abortPreviousBuilds() def abort_previous(){ def buildNumber = env.BUILD_NUMBER as int @@ -32,20 +33,30 @@ def abort_previous(){ milestone(buildNumber) } def pre_test(){ - + + sh ''' sudo rmtaos || echo "taosd has not installed" ''' sh ''' - + killall -9 taosd ||echo "no taosd running" + killall -9 gdb || echo "no gdb running" cd ${WKC} - git checkout develop - git reset --hard HEAD~10 >/dev/null + git reset --hard HEAD~10 >/dev/null + ''' + script { + if (env.CHANGE_TARGET == 'master') { + sh 'git checkout master' + } + else { + sh 'git checkout develop' + } + } + sh''' git pull >/dev/null git fetch origin +refs/pull/${CHANGE_ID}/merge git checkout -qf FETCH_HEAD - git --no-pager diff --name-only FETCH_HEAD $(git merge-base FETCH_HEAD develop)|grep -v -E '.*md|//src//connector|Jenkinsfile' - find ${WKC}/tests/pytest -name \'*\'.sql -exec rm -rf {} \\; + git clean -dfx cd ${WK} git reset --hard HEAD~10 ''' @@ -62,7 +73,7 @@ def pre_test(){ cd ${WK} export TZ=Asia/Harbin date - rm -rf ${WK}/debug + git clean -dfx mkdir debug cd debug cmake .. > /dev/null @@ -88,6 +99,10 @@ pipeline { changeRequest() } steps { + script{ + abort_previous() + abortPreviousBuilds() + } sh''' cp -r ${WORKSPACE} ${WORKSPACE}.tes cd ${WORKSPACE}.tes @@ -189,6 +204,12 @@ pipeline { rm -rf /var/log/taos/* ./handle_crash_gen_val_log.sh ''' + sh ''' + cd ${WKC}/tests/pytest + rm -rf /var/lib/taos/* + rm -rf /var/log/taos/* + ./handle_taosd_val_log.sh + ''' timeout(time: 45, unit: 'MINUTES'){ sh ''' date @@ -219,6 +240,11 @@ pipeline { cd ${WKC}/tests ./test-all.sh b3fq date''' + sh ''' + date + cd ${WKC}/tests + ./test-all.sh full example + date''' } } } @@ -276,7 +302,7 @@ pipeline { date cd ${WKC}/tests ./test-all.sh b7fq - date''' + date''' } } } From 1d1c987bac210625c6dccbe0e2e1d275a45c249f Mon Sep 17 00:00:00 2001 From: liuyq-617 Date: Wed, 7 Apr 2021 09:20:32 +0800 Subject: [PATCH 092/178] fix changing targe branch --- Jenkinsfile | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/Jenkinsfile b/Jenkinsfile index 2c64e71f51..25bf86d9cc 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -42,8 +42,17 @@ def pre_test(){ killall -9 taosd ||echo "no taosd running" killall -9 gdb || echo "no gdb running" cd ${WKC} - git reset --hard HEAD~10 >/dev/null - git checkout develop + git reset --hard HEAD~10 >/dev/null + ''' + script { + if (env.CHANGE_TARGET == 'master') { + sh 'git checkout master' + } + else { + sh 'git checkout develop' + } + } + sh''' git pull >/dev/null git fetch origin +refs/pull/${CHANGE_ID}/merge git checkout -qf FETCH_HEAD From 65ebf17311f02bbea93e9b06fc289f886a5c5b9b Mon Sep 17 00:00:00 2001 From: liuyq-617 Date: Wed, 7 Apr 2021 09:34:13 +0800 Subject: [PATCH 093/178] fix --- Jenkinsfile | 41 +++++++++++++++++++++++++++++------------ 1 file changed, 29 insertions(+), 12 deletions(-) diff --git a/Jenkinsfile b/Jenkinsfile index bac0cce33b..25bf86d9cc 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -42,15 +42,33 @@ def pre_test(){ killall -9 taosd ||echo "no taosd running" killall -9 gdb || echo "no gdb running" cd ${WKC} - git reset --hard HEAD~10 >/dev/null - git checkout develop + git reset --hard HEAD~10 >/dev/null + ''' + script { + if (env.CHANGE_TARGET == 'master') { + sh 'git checkout master' + } + else { + sh 'git checkout develop' + } + } + sh''' git pull >/dev/null git fetch origin +refs/pull/${CHANGE_ID}/merge git checkout -qf FETCH_HEAD git clean -dfx cd ${WK} git reset --hard HEAD~10 - git checkout develop + ''' + script { + if (env.CHANGE_TARGET == 'master') { + sh 'git checkout master' + } + else { + sh 'git checkout develop' + } + } + sh ''' git pull >/dev/null cd ${WK} export TZ=Asia/Harbin @@ -92,7 +110,8 @@ pipeline { git pull git fetch origin +refs/pull/${CHANGE_ID}/merge git checkout -qf FETCH_HEAD - ''' + ''' + script{ env.skipstage=sh(script:"cd ${WORKSPACE}.tes && git --no-pager diff --name-only FETCH_HEAD develop|grep -v -E '.*md|//src//connector|Jenkinsfile|test-all.sh' || echo 0 ",returnStdout:true) } @@ -185,14 +204,12 @@ pipeline { rm -rf /var/log/taos/* ./handle_crash_gen_val_log.sh ''' - catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') { - sh ''' - cd ${WKC}/tests/pytest - rm -rf /var/lib/taos/* - rm -rf /var/log/taos/* - ./handle_taosd_val_log.sh - ''' - } + sh ''' + cd ${WKC}/tests/pytest + rm -rf /var/lib/taos/* + rm -rf /var/log/taos/* + ./handle_taosd_val_log.sh + ''' timeout(time: 45, unit: 'MINUTES'){ sh ''' date From 517b77e903a2e6e35bc056be613fe0b97936cf73 Mon Sep 17 00:00:00 2001 From: liuyq-617 Date: Wed, 7 Apr 2021 09:46:09 +0800 Subject: [PATCH 094/178] fix --- Jenkinsfile | 23 ++++++++++++++++++----- 1 file changed, 18 insertions(+), 5 deletions(-) diff --git a/Jenkinsfile b/Jenkinsfile index 25bf86d9cc..1931419a1b 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -46,13 +46,20 @@ def pre_test(){ ''' script { if (env.CHANGE_TARGET == 'master') { - sh 'git checkout master' + sh ''' + cd ${WKC} + git checkout master + ''' } else { - sh 'git checkout develop' + sh ''' + cd ${WKC} + git checkout develop + ''' } } sh''' + cd ${WKC} git pull >/dev/null git fetch origin +refs/pull/${CHANGE_ID}/merge git checkout -qf FETCH_HEAD @@ -62,15 +69,21 @@ def pre_test(){ ''' script { if (env.CHANGE_TARGET == 'master') { - sh 'git checkout master' + sh ''' + cd ${WKC} + git checkout master + ''' } else { - sh 'git checkout develop' + sh ''' + cd ${WKC} + git checkout develop + ''' } } sh ''' - git pull >/dev/null cd ${WK} + git pull >/dev/null export TZ=Asia/Harbin date git clean -dfx From 5d4d10d0bb73d18b0ffc36e54d6a19d7fd067f1f Mon Sep 17 00:00:00 2001 From: liuyq-617 Date: Wed, 7 Apr 2021 09:46:36 +0800 Subject: [PATCH 095/178] fix --- Jenkinsfile | 23 ++++++++++++++++++----- 1 file changed, 18 insertions(+), 5 deletions(-) diff --git a/Jenkinsfile b/Jenkinsfile index 25bf86d9cc..1931419a1b 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -46,13 +46,20 @@ def pre_test(){ ''' script { if (env.CHANGE_TARGET == 'master') { - sh 'git checkout master' + sh ''' + cd ${WKC} + git checkout master + ''' } else { - sh 'git checkout develop' + sh ''' + cd ${WKC} + git checkout develop + ''' } } sh''' + cd ${WKC} git pull >/dev/null git fetch origin +refs/pull/${CHANGE_ID}/merge git checkout -qf FETCH_HEAD @@ -62,15 +69,21 @@ def pre_test(){ ''' script { if (env.CHANGE_TARGET == 'master') { - sh 'git checkout master' + sh ''' + cd ${WKC} + git checkout master + ''' } else { - sh 'git checkout develop' + sh ''' + cd ${WKC} + git checkout develop + ''' } } sh ''' - git pull >/dev/null cd ${WK} + git pull >/dev/null export TZ=Asia/Harbin date git clean -dfx From ef0b6eade212a5fcf3e211844514d45635046d3a Mon Sep 17 00:00:00 2001 From: liuyq-617 Date: Wed, 7 Apr 2021 09:58:37 +0800 Subject: [PATCH 096/178] fix --- Jenkinsfile | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/Jenkinsfile b/Jenkinsfile index 1931419a1b..dfe9ed4389 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -70,13 +70,13 @@ def pre_test(){ script { if (env.CHANGE_TARGET == 'master') { sh ''' - cd ${WKC} + cd ${WK} git checkout master ''' } else { sh ''' - cd ${WKC} + cd ${WK} git checkout develop ''' } @@ -84,6 +84,7 @@ def pre_test(){ sh ''' cd ${WK} git pull >/dev/null + export TZ=Asia/Harbin date git clean -dfx From f9f08a5ec21a8045271397dcc341ffb0aa26ead5 Mon Sep 17 00:00:00 2001 From: liuyq-617 Date: Wed, 7 Apr 2021 09:59:17 +0800 Subject: [PATCH 097/178] fix --- Jenkinsfile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Jenkinsfile b/Jenkinsfile index 1931419a1b..e5b0263184 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -70,13 +70,13 @@ def pre_test(){ script { if (env.CHANGE_TARGET == 'master') { sh ''' - cd ${WKC} + cd ${WK} git checkout master ''' } else { sh ''' - cd ${WKC} + cd ${WK} git checkout develop ''' } From edcb9d5f0259840a30199ff823a914e7bf59b1a0 Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Wed, 7 Apr 2021 12:47:02 +0800 Subject: [PATCH 098/178] Hotfix/sangshuduo/td 3607 taosdemo buffer overflow (#5706) * [TD-3607] : fix taosdemo buffer overflow. * [TD-3607] : taosdemo buffer overflow. add tmp buffer. * [TD-3607] : taosdemo buffer overflow. fix data generation. * [TD-3607] : taosdemo buffer overflow. fix normal table writting. * [TD-3607] : taosdemo buffer overflow. remove tail spaces. * [TD-3607] : taosdemo buffer overflow. fix taosdemo alter table test case. * [TD-3607] : taosdemo buffer overflow. fix taosdemo alter table case. * [TD-3607] : taosdemo buffer overflow. adjust limit offset count warning. * [TD-3607] : taosdemo buffer overflow. add more logic for child tables exist. * [TD-3607] : taosdemo buffer overflow. create database if database be dropped only. * [TD-3607] : fix taosdemo buffer overflow. adjust limit and offset test cases. * [TD-3607] : taosdemo buffer overflow. adjust sample data test case. * [TD-3607]: taosdemo limit and offset. if limit+offset > count Co-authored-by: Shuduo Sang --- src/kit/taosdemo/taosdemo.c | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/src/kit/taosdemo/taosdemo.c b/src/kit/taosdemo/taosdemo.c index edd7b86b1b..713b39d98b 100644 --- a/src/kit/taosdemo/taosdemo.c +++ b/src/kit/taosdemo/taosdemo.c @@ -3527,18 +3527,18 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { } cJSON* childTbl_limit = cJSON_GetObjectItem(stbInfo, "childtable_limit"); - if (childTbl_limit) { + if ((childTbl_limit) && (g_Dbs.db[i].drop != true)) { if (childTbl_limit->type != cJSON_Number) { printf("ERROR: failed to read json, childtable_limit\n"); goto PARSE_OVER; } g_Dbs.db[i].superTbls[j].childTblLimit = childTbl_limit->valueint; } else { - g_Dbs.db[i].superTbls[j].childTblLimit = -1; // select ... limit -1 means all query result + g_Dbs.db[i].superTbls[j].childTblLimit = -1; // select ... limit -1 means all query result, drop = yes mean all table need recreate, limit value is invalid. } cJSON* childTbl_offset = cJSON_GetObjectItem(stbInfo, "childtable_offset"); - if (childTbl_offset) { + if ((childTbl_offset) && (g_Dbs.db[i].drop != true)) { if (childTbl_offset->type != cJSON_Number || 0 > childTbl_offset->valueint) { printf("ERROR: failed to read json, childtable_offset\n"); goto PARSE_OVER; @@ -5170,7 +5170,9 @@ static void startMultiThreadInsertData(int threads, char* db_name, if ((superTblInfo->childTblExists == TBL_ALREADY_EXISTS) && (superTblInfo->childTblOffset >= 0)) { - if (superTblInfo->childTblLimit < 0) { + if ((superTblInfo->childTblLimit < 0) + || ((superTblInfo->childTblOffset + superTblInfo->childTblLimit) + > (superTblInfo->childTblCount))) { superTblInfo->childTblLimit = superTblInfo->childTblCount - superTblInfo->childTblOffset; } From 891cf50ce9f425791aafb2c80472c1f670ae722d Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Wed, 7 Apr 2021 12:51:52 +0800 Subject: [PATCH 099/178] [TD-3579]: sort mnode syncCfg nodeInfo to fix leader election failure --- src/mnode/src/mnodeSdb.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/src/mnode/src/mnodeSdb.c b/src/mnode/src/mnodeSdb.c index 381cb11952..b470dd6359 100644 --- a/src/mnode/src/mnodeSdb.c +++ b/src/mnode/src/mnodeSdb.c @@ -315,6 +315,10 @@ void sdbUpdateAsync() { taosTmrReset(sdbUpdateSyncTmrFp, 200, NULL, tsMnodeTmr, &tsSdbTmr); } +static int node_cmp(const void *l, const void *r) { + return ((SNodeInfo *)l)->nodeId - ((SNodeInfo *)r)->nodeId; +} + int32_t sdbUpdateSync(void *pMnodes) { SMInfos *pMinfos = pMnodes; if (!mnodeIsRunning()) { @@ -382,6 +386,8 @@ int32_t sdbUpdateSync(void *pMnodes) { return TSDB_CODE_SUCCESS; } + qsort(syncCfg.nodeInfo, syncCfg.replica, sizeof(syncCfg.nodeInfo[0]), node_cmp); + sdbInfo("vgId:1, work as mnode, replica:%d", syncCfg.replica); for (int32_t i = 0; i < syncCfg.replica; ++i) { sdbInfo("vgId:1, mnode:%d, %s:%d", syncCfg.nodeInfo[i].nodeId, syncCfg.nodeInfo[i].nodeFqdn, @@ -1131,4 +1137,4 @@ static void *sdbWorkerFp(void *pWorker) { int32_t sdbGetReplicaNum() { return tsSdbMgmt.cfg.replica; -} \ No newline at end of file +} From b0d979ff326ac0e7ab5ad36e11f054c1be5b4e5f Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Wed, 7 Apr 2021 14:02:31 +0800 Subject: [PATCH 100/178] [TD-3607]: fix taosdemo limit and offset. (#5707) if offset+limit > count. Co-authored-by: Shuduo Sang --- src/kit/taosdemo/taosdemo.c | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/src/kit/taosdemo/taosdemo.c b/src/kit/taosdemo/taosdemo.c index edd7b86b1b..713b39d98b 100644 --- a/src/kit/taosdemo/taosdemo.c +++ b/src/kit/taosdemo/taosdemo.c @@ -3527,18 +3527,18 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { } cJSON* childTbl_limit = cJSON_GetObjectItem(stbInfo, "childtable_limit"); - if (childTbl_limit) { + if ((childTbl_limit) && (g_Dbs.db[i].drop != true)) { if (childTbl_limit->type != cJSON_Number) { printf("ERROR: failed to read json, childtable_limit\n"); goto PARSE_OVER; } g_Dbs.db[i].superTbls[j].childTblLimit = childTbl_limit->valueint; } else { - g_Dbs.db[i].superTbls[j].childTblLimit = -1; // select ... limit -1 means all query result + g_Dbs.db[i].superTbls[j].childTblLimit = -1; // select ... limit -1 means all query result, drop = yes mean all table need recreate, limit value is invalid. } cJSON* childTbl_offset = cJSON_GetObjectItem(stbInfo, "childtable_offset"); - if (childTbl_offset) { + if ((childTbl_offset) && (g_Dbs.db[i].drop != true)) { if (childTbl_offset->type != cJSON_Number || 0 > childTbl_offset->valueint) { printf("ERROR: failed to read json, childtable_offset\n"); goto PARSE_OVER; @@ -5170,7 +5170,9 @@ static void startMultiThreadInsertData(int threads, char* db_name, if ((superTblInfo->childTblExists == TBL_ALREADY_EXISTS) && (superTblInfo->childTblOffset >= 0)) { - if (superTblInfo->childTblLimit < 0) { + if ((superTblInfo->childTblLimit < 0) + || ((superTblInfo->childTblOffset + superTblInfo->childTblLimit) + > (superTblInfo->childTblCount))) { superTblInfo->childTblLimit = superTblInfo->childTblCount - superTblInfo->childTblOffset; } From ba3fbea58c29778c78bb8c56a2cb4ec178e24ad0 Mon Sep 17 00:00:00 2001 From: Ping Xiao Date: Wed, 7 Apr 2021 13:19:19 +0800 Subject: [PATCH 101/178] [TD-3676]: add test case --- tests/pytest/fulltest.sh | 2 + tests/pytest/topic/topicQuery.py | 82 ++++++++++++++++++++++++++++++++ 2 files changed, 84 insertions(+) create mode 100644 tests/pytest/topic/topicQuery.py diff --git a/tests/pytest/fulltest.sh b/tests/pytest/fulltest.sh index cc6eb719f2..1746f8d2db 100755 --- a/tests/pytest/fulltest.sh +++ b/tests/pytest/fulltest.sh @@ -258,6 +258,8 @@ python3 test.py -f subscribe/singlemeter.py #python3 test.py -f subscribe/stability.py python3 test.py -f subscribe/supertable.py +# topic +python3 ./test.py -f topic/topicQuery.py #======================p3-end=============== #======================p4-start=============== diff --git a/tests/pytest/topic/topicQuery.py b/tests/pytest/topic/topicQuery.py new file mode 100644 index 0000000000..d4f8c4127d --- /dev/null +++ b/tests/pytest/topic/topicQuery.py @@ -0,0 +1,82 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +from util.log import tdLog +from util.cases import tdCases +from util.sql import tdSql + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + self.ts = 1538548685000 + + def run(self): + tdSql.prepare() + + # test case for https://jira.taosdata.com:18080/browse/TD-3679 + print("==============step1") + tdSql.execute( + "create topic tq_test partitions 10") + tdSql.execute( + "insert into tq_test.p1(off, ts, content) values(0, %d, 'aaaa')" % self.ts) + tdSql.execute( + "insert into tq_test.p1(off, ts, content) values(1, %d, 'aaaa')" % (self.ts + 1)) + tdSql.execute( + "insert into tq_test.p1(off, ts, content) values(2, %d, 'aaaa')" % (self.ts + 2)) + tdSql.execute( + "insert into tq_test.p1(off, ts, content) values(3, %d, 'aaaa')" % (self.ts + 3)) + + print("==============step2") + + tdSql.query("select * from tq_test.p1") + tdSql.checkRows(4) + + tdSql.query("select * from tq_test.p1 where ts >= %d" % self.ts) + tdSql.checkRows(4) + + tdSql.query("select * from tq_test.p1 where ts > %d" % self.ts) + tdSql.checkRows(3) + + tdSql.query("select * from tq_test.p1 where ts = %d" % self.ts) + tdSql.checkRows(1) + + + tdSql.execute("use db") + tdSql.execute("create table test(ts timestamp, value int)") + tdSql.execute("insert into test values(%d, 1)" % self.ts) + tdSql.execute("insert into test values(%d, 1)" % (self.ts + 1)) + tdSql.execute("insert into test values(%d, 1)" % (self.ts + 2)) + tdSql.execute("insert into test values(%d, 1)" % (self.ts + 3)) + + tdSql.query("select * from test") + tdSql.checkRows(4) + + tdSql.query("select * from test where ts >= %d" % self.ts) + tdSql.checkRows(4) + + tdSql.query("select * from test where ts > %d" % self.ts) + tdSql.checkRows(3) + + tdSql.query("select * from test where ts = %d" % self.ts) + tdSql.checkRows(1) + + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) From 568a3ebc87a99e07e0e50633d97c44256861c037 Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Wed, 7 Apr 2021 17:55:54 +0800 Subject: [PATCH 102/178] Hotfix/sangshuduo/td 3607 for master (#5712) * test * [TD-3677]: test pr message 1 * [TD-3671]change target branch * [TD-3677]: test pr message 2 * [TD-3677]: test pr message 3 * Hotfix/sangshuduo/td 3197 fix taosdemo coverity scan (#5688) * [TD-3197] : fix taosdemo coverity scan issues. * [TD-3197] : fix taosdemo coverity scan issue. fix subscribeTest pids uninitialized. * [TD-3197] : fix taosdemo coverity scan issues. * [TD-3197] : fix coverity scan issues. check super tbl info pointer. * [TD-3197] : fix coverity scan issues. move sub tbl query thread join into loop * [TD-3197] : fix coverity scan issues. remove unused variable * [TD-3197] : fix coverity scan issues. use more secure random library * [TD-3197] : fix coverity scan issues. use strncpy for more safe * [TD-3197] : fix taosdemo coverity scan issue. replace arc4random with rand(). * [TD-3197] : fix coverity scan issues. check stb info pointer for start time * [TD-3197] : fix coverity scan issues. fix strcpy vulnerability * [TD-3197] : fix taosdemo coverity scan issue. modify taosdemoTest2. try to check database continously. * [TD-3197] : taosdemo coverity scan issues. * [TD-3197] : fix memory leak when parsing arguments. * [TD-3197] : fix cmake strip arguments. * [TD-3197] : taosdemo coverity scan. fix cmake string manipulation. Co-authored-by: Shuduo Sang * remove useless file * fix changing target branch * fix * fix * [TD-3607]: fix taosdemo limit and offset. if offset+limit > count. * [TD-3607]: taosdemo limit and offset. if child tbl not exist, dont take limit and offset value. Co-authored-by: huili <52318143+plum-lihui@users.noreply.github.com> Co-authored-by: liuyq-617 Co-authored-by: plum-lihui Co-authored-by: Elias Soong Co-authored-by: Shuduo Sang --- Jenkinsfile | 2 +- src/kit/taosdemo/taosdemo.c | 12 ++++++++---- 2 files changed, 9 insertions(+), 5 deletions(-) diff --git a/Jenkinsfile b/Jenkinsfile index 0d6939af31..dfe9ed4389 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -82,9 +82,9 @@ def pre_test(){ } } sh ''' - cd ${WK} git pull >/dev/null + export TZ=Asia/Harbin date git clean -dfx diff --git a/src/kit/taosdemo/taosdemo.c b/src/kit/taosdemo/taosdemo.c index 713b39d98b..30f096954c 100644 --- a/src/kit/taosdemo/taosdemo.c +++ b/src/kit/taosdemo/taosdemo.c @@ -3479,9 +3479,11 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { if (childTblExists && childTblExists->type == cJSON_String && childTblExists->valuestring != NULL) { - if (0 == strncasecmp(childTblExists->valuestring, "yes", 3)) { + if ((0 == strncasecmp(childTblExists->valuestring, "yes", 3)) + && (g_Dbs.db[i].drop == false)) { g_Dbs.db[i].superTbls[j].childTblExists = TBL_ALREADY_EXISTS; - } else if (0 == strncasecmp(childTblExists->valuestring, "no", 2)) { + } else if ((0 == strncasecmp(childTblExists->valuestring, "no", 2) + || (g_Dbs.db[i].drop == true))) { g_Dbs.db[i].superTbls[j].childTblExists = TBL_NO_EXISTS; } else { g_Dbs.db[i].superTbls[j].childTblExists = TBL_NO_EXISTS; @@ -3527,7 +3529,8 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { } cJSON* childTbl_limit = cJSON_GetObjectItem(stbInfo, "childtable_limit"); - if ((childTbl_limit) && (g_Dbs.db[i].drop != true)) { + if ((childTbl_limit) && (g_Dbs.db[i].drop != true) + && (g_Dbs.db[i].superTbls[j].childTblExists == TBL_ALREADY_EXISTS)) { if (childTbl_limit->type != cJSON_Number) { printf("ERROR: failed to read json, childtable_limit\n"); goto PARSE_OVER; @@ -3538,7 +3541,8 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { } cJSON* childTbl_offset = cJSON_GetObjectItem(stbInfo, "childtable_offset"); - if ((childTbl_offset) && (g_Dbs.db[i].drop != true)) { + if ((childTbl_offset) && (g_Dbs.db[i].drop != true) + && (g_Dbs.db[i].superTbls[j].childTblExists == TBL_ALREADY_EXISTS)) { if (childTbl_offset->type != cJSON_Number || 0 > childTbl_offset->valueint) { printf("ERROR: failed to read json, childtable_offset\n"); goto PARSE_OVER; From 5fec22e965b3ce90c82df5acc27d3305fcd2ce13 Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Wed, 7 Apr 2021 18:36:34 +0800 Subject: [PATCH 103/178] Hotfix/sangshuduo/td 3607 taosdemo buffer overflow (#5713) * [TD-3607] : fix taosdemo buffer overflow. * [TD-3607] : taosdemo buffer overflow. add tmp buffer. * [TD-3607] : taosdemo buffer overflow. fix data generation. * [TD-3607] : taosdemo buffer overflow. fix normal table writting. * [TD-3607] : taosdemo buffer overflow. remove tail spaces. * [TD-3607] : taosdemo buffer overflow. fix taosdemo alter table test case. * [TD-3607] : taosdemo buffer overflow. fix taosdemo alter table case. * [TD-3607] : taosdemo buffer overflow. adjust limit offset count warning. * [TD-3607] : taosdemo buffer overflow. add more logic for child tables exist. * [TD-3607] : taosdemo buffer overflow. create database if database be dropped only. * [TD-3607] : fix taosdemo buffer overflow. adjust limit and offset test cases. * [TD-3607] : taosdemo buffer overflow. adjust sample data test case. * [TD-3607]: taosdemo limit and offset. if limit+offset > count * [TD-3607]: taosdemo limit and offset. if child tbl not exist, dont take limit and offset value. Co-authored-by: Shuduo Sang --- src/kit/taosdemo/taosdemo.c | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/src/kit/taosdemo/taosdemo.c b/src/kit/taosdemo/taosdemo.c index 713b39d98b..30f096954c 100644 --- a/src/kit/taosdemo/taosdemo.c +++ b/src/kit/taosdemo/taosdemo.c @@ -3479,9 +3479,11 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { if (childTblExists && childTblExists->type == cJSON_String && childTblExists->valuestring != NULL) { - if (0 == strncasecmp(childTblExists->valuestring, "yes", 3)) { + if ((0 == strncasecmp(childTblExists->valuestring, "yes", 3)) + && (g_Dbs.db[i].drop == false)) { g_Dbs.db[i].superTbls[j].childTblExists = TBL_ALREADY_EXISTS; - } else if (0 == strncasecmp(childTblExists->valuestring, "no", 2)) { + } else if ((0 == strncasecmp(childTblExists->valuestring, "no", 2) + || (g_Dbs.db[i].drop == true))) { g_Dbs.db[i].superTbls[j].childTblExists = TBL_NO_EXISTS; } else { g_Dbs.db[i].superTbls[j].childTblExists = TBL_NO_EXISTS; @@ -3527,7 +3529,8 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { } cJSON* childTbl_limit = cJSON_GetObjectItem(stbInfo, "childtable_limit"); - if ((childTbl_limit) && (g_Dbs.db[i].drop != true)) { + if ((childTbl_limit) && (g_Dbs.db[i].drop != true) + && (g_Dbs.db[i].superTbls[j].childTblExists == TBL_ALREADY_EXISTS)) { if (childTbl_limit->type != cJSON_Number) { printf("ERROR: failed to read json, childtable_limit\n"); goto PARSE_OVER; @@ -3538,7 +3541,8 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { } cJSON* childTbl_offset = cJSON_GetObjectItem(stbInfo, "childtable_offset"); - if ((childTbl_offset) && (g_Dbs.db[i].drop != true)) { + if ((childTbl_offset) && (g_Dbs.db[i].drop != true) + && (g_Dbs.db[i].superTbls[j].childTblExists == TBL_ALREADY_EXISTS)) { if (childTbl_offset->type != cJSON_Number || 0 > childTbl_offset->valueint) { printf("ERROR: failed to read json, childtable_offset\n"); goto PARSE_OVER; From fdb5c5adc15d0f79f76489e83c1447249e76af44 Mon Sep 17 00:00:00 2001 From: dapan1121 <89396746@qq.com> Date: Wed, 7 Apr 2021 19:23:47 +0800 Subject: [PATCH 104/178] fix mem leak issue --- src/query/src/qExecutor.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/query/src/qExecutor.c b/src/query/src/qExecutor.c index bd5fdda0f9..dda67d77b2 100644 --- a/src/query/src/qExecutor.c +++ b/src/query/src/qExecutor.c @@ -4015,7 +4015,7 @@ static SFillColInfo* createFillColInfo(SExprInfo* pExpr, int32_t numOfOutput, in return pFillCol; } -int32_t doInitQInfo(SQInfo *pQInfo, STSBuf *pTsBuf, SArray* prevResult, void *tsdb, int32_t vgId, bool isSTableQuery) { +int32_t doInitQInfo(SQInfo *pQInfo, STSBuf *pTsBuf, void *tsdb, int32_t vgId, bool isSTableQuery) { SQueryRuntimeEnv *pRuntimeEnv = &pQInfo->runtimeEnv; SQuery *pQuery = pQInfo->runtimeEnv.pQuery; @@ -4026,8 +4026,6 @@ int32_t doInitQInfo(SQInfo *pQInfo, STSBuf *pTsBuf, SArray* prevResult, void *ts pQuery->timeWindowInterpo = timeWindowInterpoRequired(pQuery); pQuery->stabledev = isStabledev(pQuery); - pRuntimeEnv->prevResult = prevResult; - setScanLimitationByResultBuffer(pQuery); int32_t code = setupQueryHandle(tsdb, pQInfo, isSTableQuery); @@ -6383,6 +6381,8 @@ int32_t initQInfo(SQueryTableMsg *pQueryMsg, void *tsdb, int32_t vgId, SQInfo *p SArray* prevResult = NULL; if (pQueryMsg->prevResultLen > 0) { prevResult = interResFromBinary(param->prevResult, pQueryMsg->prevResultLen); + + pRuntimeEnv->prevResult = prevResult; } pQuery->precision = tsdbGetCfg(tsdb)->precision; @@ -6404,7 +6404,7 @@ int32_t initQInfo(SQueryTableMsg *pQueryMsg, void *tsdb, int32_t vgId, SQInfo *p } // filter the qualified - if ((code = doInitQInfo(pQInfo, pTsBuf, prevResult, tsdb, vgId, isSTable)) != TSDB_CODE_SUCCESS) { + if ((code = doInitQInfo(pQInfo, pTsBuf, tsdb, vgId, isSTable)) != TSDB_CODE_SUCCESS) { goto _error; } From 0eac169fb2ed6b0084e05133e8a468528164d950 Mon Sep 17 00:00:00 2001 From: dapan1121 <89396746@qq.com> Date: Wed, 7 Apr 2021 19:35:01 +0800 Subject: [PATCH 105/178] fix crash issue --- src/kit/shell/inc/shell.h | 2 +- src/kit/shell/src/shellCommand.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/kit/shell/inc/shell.h b/src/kit/shell/inc/shell.h index d0b7149541..2374150c52 100644 --- a/src/kit/shell/inc/shell.h +++ b/src/kit/shell/inc/shell.h @@ -27,7 +27,7 @@ #define MAX_IP_SIZE 20 #define MAX_PASSWORD_SIZE 20 #define MAX_HISTORY_SIZE 1000 -#define MAX_COMMAND_SIZE 65536 +#define MAX_COMMAND_SIZE 1048586 #define HISTORY_FILE ".taos_history" #define DEFAULT_RES_SHOW_NUM 100 diff --git a/src/kit/shell/src/shellCommand.c b/src/kit/shell/src/shellCommand.c index 16545a5fe8..9173ab0efd 100644 --- a/src/kit/shell/src/shellCommand.c +++ b/src/kit/shell/src/shellCommand.c @@ -238,7 +238,7 @@ void resetCommand(Command *cmd, const char s[]) { clearScreen(cmd->endOffset + prompt_size, cmd->screenOffset + prompt_size); memset(cmd->buffer, 0, MAX_COMMAND_SIZE); memset(cmd->command, 0, MAX_COMMAND_SIZE); - strcpy(cmd->command, s); + strncpy(cmd->command, s, MAX_COMMAND_SIZE); int size = 0; int width = 0; getMbSizeInfo(s, &size, &width); From e8b965c779cea5e678ba8b60ab6dd3530ac4e88a Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Thu, 8 Apr 2021 10:50:04 +0800 Subject: [PATCH 106/178] [TD-3683]: reduce buffer size for more stable table creation. (#5719) Co-authored-by: Shuduo Sang --- src/kit/taosdemo/taosdemo.c | 13 +++++-------- 1 file changed, 5 insertions(+), 8 deletions(-) diff --git a/src/kit/taosdemo/taosdemo.c b/src/kit/taosdemo/taosdemo.c index 30f096954c..8c0c755113 100644 --- a/src/kit/taosdemo/taosdemo.c +++ b/src/kit/taosdemo/taosdemo.c @@ -2573,10 +2573,7 @@ static void* createTable(void *sarg) int64_t lastPrintTime = taosGetTimestampMs(); int buff_len; - if (superTblInfo) - buff_len = superTblInfo->maxSqlLen; - else - buff_len = BUFFER_SIZE; + buff_len = BUFFER_SIZE / 8; char *buffer = calloc(buff_len, 1); if (buffer == NULL) { @@ -2624,7 +2621,7 @@ static void* createTable(void *sarg) return NULL; } len += snprintf(buffer + len, - superTblInfo->maxSqlLen - len, + buff_len - len, "if not exists %s.%s%d using %s.%s tags %s ", winfo->db_name, superTblInfo->childTblPrefix, i, winfo->db_name, @@ -2632,7 +2629,7 @@ static void* createTable(void *sarg) free(tagsValBuf); batchNum++; if ((batchNum < superTblInfo->batchCreateTableNum) - && ((superTblInfo->maxSqlLen - len) + && ((buff_len - len) >= (superTblInfo->lenOfTagOfOneRow + 256))) { continue; } @@ -5174,8 +5171,8 @@ static void startMultiThreadInsertData(int threads, char* db_name, if ((superTblInfo->childTblExists == TBL_ALREADY_EXISTS) && (superTblInfo->childTblOffset >= 0)) { - if ((superTblInfo->childTblLimit < 0) - || ((superTblInfo->childTblOffset + superTblInfo->childTblLimit) + if ((superTblInfo->childTblLimit < 0) + || ((superTblInfo->childTblOffset + superTblInfo->childTblLimit) > (superTblInfo->childTblCount))) { superTblInfo->childTblLimit = superTblInfo->childTblCount - superTblInfo->childTblOffset; From bbf49cb8c63932825e727ec847eb62bfdb3dffdc Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Thu, 8 Apr 2021 10:50:26 +0800 Subject: [PATCH 107/178] [TD-3683]: reduce buffer size for more stable table creation. (#5720) Co-authored-by: Shuduo Sang --- src/kit/taosdemo/taosdemo.c | 13 +++++-------- 1 file changed, 5 insertions(+), 8 deletions(-) diff --git a/src/kit/taosdemo/taosdemo.c b/src/kit/taosdemo/taosdemo.c index 30f096954c..8c0c755113 100644 --- a/src/kit/taosdemo/taosdemo.c +++ b/src/kit/taosdemo/taosdemo.c @@ -2573,10 +2573,7 @@ static void* createTable(void *sarg) int64_t lastPrintTime = taosGetTimestampMs(); int buff_len; - if (superTblInfo) - buff_len = superTblInfo->maxSqlLen; - else - buff_len = BUFFER_SIZE; + buff_len = BUFFER_SIZE / 8; char *buffer = calloc(buff_len, 1); if (buffer == NULL) { @@ -2624,7 +2621,7 @@ static void* createTable(void *sarg) return NULL; } len += snprintf(buffer + len, - superTblInfo->maxSqlLen - len, + buff_len - len, "if not exists %s.%s%d using %s.%s tags %s ", winfo->db_name, superTblInfo->childTblPrefix, i, winfo->db_name, @@ -2632,7 +2629,7 @@ static void* createTable(void *sarg) free(tagsValBuf); batchNum++; if ((batchNum < superTblInfo->batchCreateTableNum) - && ((superTblInfo->maxSqlLen - len) + && ((buff_len - len) >= (superTblInfo->lenOfTagOfOneRow + 256))) { continue; } @@ -5174,8 +5171,8 @@ static void startMultiThreadInsertData(int threads, char* db_name, if ((superTblInfo->childTblExists == TBL_ALREADY_EXISTS) && (superTblInfo->childTblOffset >= 0)) { - if ((superTblInfo->childTblLimit < 0) - || ((superTblInfo->childTblOffset + superTblInfo->childTblLimit) + if ((superTblInfo->childTblLimit < 0) + || ((superTblInfo->childTblOffset + superTblInfo->childTblLimit) > (superTblInfo->childTblCount))) { superTblInfo->childTblLimit = superTblInfo->childTblCount - superTblInfo->childTblOffset; From 57e77b5f416eba4164196d6c8438fd6af119d6ab Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Thu, 8 Apr 2021 12:10:51 +0800 Subject: [PATCH 108/178] enlarge default time --- src/rpc/src/rpcMain.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/rpc/src/rpcMain.c b/src/rpc/src/rpcMain.c index 133ae6d0ab..58d611ebb5 100644 --- a/src/rpc/src/rpcMain.c +++ b/src/rpc/src/rpcMain.c @@ -295,7 +295,7 @@ void *rpcOpen(const SRpcInit *pInit) { return NULL; } } else { - pRpc->pCache = rpcOpenConnCache(pRpc->sessions, rpcCloseConn, pRpc->tmrCtrl, pRpc->idleTime); + pRpc->pCache = rpcOpenConnCache(pRpc->sessions, rpcCloseConn, pRpc->tmrCtrl, pRpc->idleTime * 30); if ( pRpc->pCache == NULL ) { tError("%s failed to init connection cache", pRpc->label); rpcClose(pRpc); @@ -470,7 +470,7 @@ void rpcSendResponse(const SRpcMsg *pRsp) { taosTmrStopA(&pConn->pTimer); // set the idle timer to monitor the activity - taosTmrReset(rpcProcessIdleTimer, pRpc->idleTime, pConn, pRpc->tmrCtrl, &pConn->pIdleTimer); + taosTmrReset(rpcProcessIdleTimer, pRpc->idleTime * 30, pConn, pRpc->tmrCtrl, &pConn->pIdleTimer); rpcSendMsgToPeer(pConn, msg, msgLen); // if not set to secured, set it expcet NOT_READY case, since client wont treat it as secured From db481c5919fb84cda649eec2ce24fc141ec17403 Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Thu, 8 Apr 2021 13:06:30 +0800 Subject: [PATCH 109/178] Hotfix/sangshuduo/td 3607 taosdemo buffer overflow (#5721) * [TD-3607] : fix taosdemo buffer overflow. * [TD-3607] : taosdemo buffer overflow. add tmp buffer. * [TD-3607] : taosdemo buffer overflow. fix data generation. * [TD-3607] : taosdemo buffer overflow. fix normal table writting. * [TD-3607] : taosdemo buffer overflow. remove tail spaces. * [TD-3607] : taosdemo buffer overflow. fix taosdemo alter table test case. * [TD-3607] : taosdemo buffer overflow. fix taosdemo alter table case. * [TD-3607] : taosdemo buffer overflow. adjust limit offset count warning. * [TD-3607] : taosdemo buffer overflow. add more logic for child tables exist. * [TD-3607] : taosdemo buffer overflow. create database if database be dropped only. * [TD-3607] : fix taosdemo buffer overflow. adjust limit and offset test cases. * [TD-3607] : taosdemo buffer overflow. adjust sample data test case. * [TD-3607]: taosdemo limit and offset. if limit+offset > count * [TD-3607]: taosdemo limit and offset. if child tbl not exist, dont take limit and offset value. * fix taosdemo limit invalid warning condition. Co-authored-by: Shuduo Sang --- src/kit/taosdemo/taosdemo.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/kit/taosdemo/taosdemo.c b/src/kit/taosdemo/taosdemo.c index 8c0c755113..59b31d6172 100644 --- a/src/kit/taosdemo/taosdemo.c +++ b/src/kit/taosdemo/taosdemo.c @@ -5165,7 +5165,7 @@ static void startMultiThreadInsertData(int threads, char* db_name, int limit, offset; if ((superTblInfo->childTblExists == TBL_NO_EXISTS) && - ((superTblInfo->childTblOffset != 0) || (superTblInfo->childTblLimit != 0))) { + ((superTblInfo->childTblOffset != 0) || (superTblInfo->childTblLimit >= 0))) { printf("WARNING: offset and limit will not be used since the child tables are not exists!\n"); } From af90320a898372ea607f0fa8f45f991b4832ec8e Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Thu, 8 Apr 2021 13:07:18 +0800 Subject: [PATCH 110/178] Hotfix/sangshuduo/td 3607 taosdemo buffer overflow (#5723) * [TD-3607] : fix taosdemo buffer overflow. * [TD-3607] : taosdemo buffer overflow. add tmp buffer. * [TD-3607] : taosdemo buffer overflow. fix data generation. * [TD-3607] : taosdemo buffer overflow. fix normal table writting. * [TD-3607] : taosdemo buffer overflow. remove tail spaces. * [TD-3607] : taosdemo buffer overflow. fix taosdemo alter table test case. * [TD-3607] : taosdemo buffer overflow. fix taosdemo alter table case. * [TD-3607] : taosdemo buffer overflow. adjust limit offset count warning. * [TD-3607] : taosdemo buffer overflow. add more logic for child tables exist. * [TD-3607] : taosdemo buffer overflow. create database if database be dropped only. * [TD-3607] : fix taosdemo buffer overflow. adjust limit and offset test cases. * [TD-3607] : taosdemo buffer overflow. adjust sample data test case. * test * [TD-3677]: test pr message 1 * [TD-3671]change target branch * [TD-3677]: test pr message 2 * [TD-3677]: test pr message 3 * Hotfix/sangshuduo/td 3197 fix taosdemo coverity scan (#5688) * [TD-3197] : fix taosdemo coverity scan issues. * [TD-3197] : fix taosdemo coverity scan issue. fix subscribeTest pids uninitialized. * [TD-3197] : fix taosdemo coverity scan issues. * [TD-3197] : fix coverity scan issues. check super tbl info pointer. * [TD-3197] : fix coverity scan issues. move sub tbl query thread join into loop * [TD-3197] : fix coverity scan issues. remove unused variable * [TD-3197] : fix coverity scan issues. use more secure random library * [TD-3197] : fix coverity scan issues. use strncpy for more safe * [TD-3197] : fix taosdemo coverity scan issue. replace arc4random with rand(). * [TD-3197] : fix coverity scan issues. check stb info pointer for start time * [TD-3197] : fix coverity scan issues. fix strcpy vulnerability * [TD-3197] : fix taosdemo coverity scan issue. modify taosdemoTest2. try to check database continously. * [TD-3197] : taosdemo coverity scan issues. * [TD-3197] : fix memory leak when parsing arguments. * [TD-3197] : fix cmake strip arguments. * [TD-3197] : taosdemo coverity scan. fix cmake string manipulation. Co-authored-by: Shuduo Sang * remove useless file * fix changing target branch * fix * fix * [TD-3607]: taosdemo limit and offset. if limit+offset > count * Hotfix/sangshuduo/td 3607 taosdemo buffer overflow (#5706) * [TD-3607] : fix taosdemo buffer overflow. * [TD-3607] : taosdemo buffer overflow. add tmp buffer. * [TD-3607] : taosdemo buffer overflow. fix data generation. * [TD-3607] : taosdemo buffer overflow. fix normal table writting. * [TD-3607] : taosdemo buffer overflow. remove tail spaces. * [TD-3607] : taosdemo buffer overflow. fix taosdemo alter table test case. * [TD-3607] : taosdemo buffer overflow. fix taosdemo alter table case. * [TD-3607] : taosdemo buffer overflow. adjust limit offset count warning. * [TD-3607] : taosdemo buffer overflow. add more logic for child tables exist. * [TD-3607] : taosdemo buffer overflow. create database if database be dropped only. * [TD-3607] : fix taosdemo buffer overflow. adjust limit and offset test cases. * [TD-3607] : taosdemo buffer overflow. adjust sample data test case. * [TD-3607]: taosdemo limit and offset. if limit+offset > count Co-authored-by: Shuduo Sang * [TD-3607]: taosdemo limit and offset. if child tbl not exist, dont take limit and offset value. * Hotfix/sangshuduo/td 3607 taosdemo buffer overflow (#5713) * [TD-3607] : fix taosdemo buffer overflow. * [TD-3607] : taosdemo buffer overflow. add tmp buffer. * [TD-3607] : taosdemo buffer overflow. fix data generation. * [TD-3607] : taosdemo buffer overflow. fix normal table writting. * [TD-3607] : taosdemo buffer overflow. remove tail spaces. * [TD-3607] : taosdemo buffer overflow. fix taosdemo alter table test case. * [TD-3607] : taosdemo buffer overflow. fix taosdemo alter table case. * [TD-3607] : taosdemo buffer overflow. adjust limit offset count warning. * [TD-3607] : taosdemo buffer overflow. add more logic for child tables exist. * [TD-3607] : taosdemo buffer overflow. create database if database be dropped only. * [TD-3607] : fix taosdemo buffer overflow. adjust limit and offset test cases. * [TD-3607] : taosdemo buffer overflow. adjust sample data test case. * [TD-3607]: taosdemo limit and offset. if limit+offset > count * [TD-3607]: taosdemo limit and offset. if child tbl not exist, dont take limit and offset value. Co-authored-by: Shuduo Sang * fix taosdemo limit invalid warning condition. * [TD-3683]: reduce buffer size for more stable table creation. (#5719) Co-authored-by: Shuduo Sang Co-authored-by: Shuduo Sang Co-authored-by: huili <52318143+plum-lihui@users.noreply.github.com> Co-authored-by: liuyq-617 Co-authored-by: plum-lihui Co-authored-by: Elias Soong Co-authored-by: Shengliang Guan --- src/kit/taosdemo/taosdemo.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/kit/taosdemo/taosdemo.c b/src/kit/taosdemo/taosdemo.c index 8c0c755113..59b31d6172 100644 --- a/src/kit/taosdemo/taosdemo.c +++ b/src/kit/taosdemo/taosdemo.c @@ -5165,7 +5165,7 @@ static void startMultiThreadInsertData(int threads, char* db_name, int limit, offset; if ((superTblInfo->childTblExists == TBL_NO_EXISTS) && - ((superTblInfo->childTblOffset != 0) || (superTblInfo->childTblLimit != 0))) { + ((superTblInfo->childTblOffset != 0) || (superTblInfo->childTblLimit >= 0))) { printf("WARNING: offset and limit will not be used since the child tables are not exists!\n"); } From f81b77209b59f5aed329c01bc10d956019ac6f5e Mon Sep 17 00:00:00 2001 From: dapan1121 <89396746@qq.com> Date: Thu, 8 Apr 2021 15:26:20 +0800 Subject: [PATCH 111/178] TD-3707 --- src/client/src/tscSQLParser.c | 2 +- tests/script/general/parser/groupby.sim | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/src/client/src/tscSQLParser.c b/src/client/src/tscSQLParser.c index 5841aa0cd5..e5a55cdfd3 100644 --- a/src/client/src/tscSQLParser.c +++ b/src/client/src/tscSQLParser.c @@ -6166,7 +6166,7 @@ int32_t doFunctionsCompatibleCheck(SSqlCmd* pCmd, SQueryInfo* pQueryInfo) { } // projection query on super table does not compatible with "group by" syntax - if (tscNonOrderedProjectionQueryOnSTable(pQueryInfo, 0)) { + if (tscIsProjectionQuery(pQueryInfo)) { return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3); } diff --git a/tests/script/general/parser/groupby.sim b/tests/script/general/parser/groupby.sim index dd7331054c..124e76e85c 100644 --- a/tests/script/general/parser/groupby.sim +++ b/tests/script/general/parser/groupby.sim @@ -220,6 +220,7 @@ sql_error select sum(c3), ts, c2 from group_tb0 where c1 < 20 group by c1; sql_error select sum(c3), first(ts), c2 from group_tb0 where c1 < 20 group by c1; sql_error select first(c3), ts, c1, c2 from group_tb0 where c1 < 20 group by c1; sql_error select first(c3), last(c3), ts, c1 from group_tb0 where c1 < 20 group by c1; +sql_error select ts from group_tb0 group by c1; #===========================interval=====not support====================== sql_error select count(*), c1 from group_tb0 where c1<20 interval(1y) group by c1; From 278fc08441d07ca4107954ad1b32003ea9743d08 Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Thu, 8 Apr 2021 18:09:41 +0800 Subject: [PATCH 112/178] [TD-3710]: [sync/memory] fix peer connection invalid read post freeing --- src/sync/src/syncMain.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/sync/src/syncMain.c b/src/sync/src/syncMain.c index 72442eee6c..2f1576940b 100644 --- a/src/sync/src/syncMain.c +++ b/src/sync/src/syncMain.c @@ -551,7 +551,10 @@ static void syncClosePeerConn(SSyncPeer *pPeer) { if (pPeer->peerFd >= 0) { pPeer->peerFd = -1; void *pConn = pPeer->pConn; - if (pConn != NULL) syncFreeTcpConn(pPeer->pConn); + if (pConn != NULL) { + syncFreeTcpConn(pPeer->pConn); + pPeer->pConn = NULL; + } } } From ad8604202837e8b72978e48f849cf8171b20f27b Mon Sep 17 00:00:00 2001 From: dapan1121 <89396746@qq.com> Date: Thu, 8 Apr 2021 18:44:06 +0800 Subject: [PATCH 113/178] fix bug --- src/query/src/qFilterfunc.c | 4 ++-- src/tsdb/src/tsdbRead.c | 6 ------ 2 files changed, 2 insertions(+), 8 deletions(-) diff --git a/src/query/src/qFilterfunc.c b/src/query/src/qFilterfunc.c index 884f7e653f..dabce88423 100644 --- a/src/query/src/qFilterfunc.c +++ b/src/query/src/qFilterfunc.c @@ -124,7 +124,7 @@ bool greaterEqualOperator(SColumnFilterElem *pFilter, const char *minval, const bool equalOperator(SColumnFilterElem *pFilter, const char *minval, const char *maxval, int16_t type) { SColumnFilterInfo *pFilterInfo = &pFilter->filterInfo; - if (IS_SIGNED_NUMERIC_TYPE(type) || type == TSDB_DATA_TYPE_BOOL) { + if (IS_SIGNED_NUMERIC_TYPE(type) || type == TSDB_DATA_TYPE_BOOL || type == TSDB_DATA_TYPE_TIMESTAMP) { int64_t minv = -1, maxv = -1; GET_TYPED_DATA(minv, int64_t, type, minval); GET_TYPED_DATA(maxv, int64_t, type, maxval); @@ -202,7 +202,7 @@ bool likeOperator(SColumnFilterElem *pFilter, const char *minval, const char *ma bool notEqualOperator(SColumnFilterElem *pFilter, const char *minval, const char *maxval, int16_t type) { SColumnFilterInfo *pFilterInfo = &pFilter->filterInfo; - if (IS_SIGNED_NUMERIC_TYPE(type) || type == TSDB_DATA_TYPE_BOOL) { + if (IS_SIGNED_NUMERIC_TYPE(type) || type == TSDB_DATA_TYPE_BOOL || type == TSDB_DATA_TYPE_TIMESTAMP) { int64_t minv = -1, maxv = -1; GET_TYPED_DATA(minv, int64_t, type, minval); GET_TYPED_DATA(maxv, int64_t, type, maxval); diff --git a/src/tsdb/src/tsdbRead.c b/src/tsdb/src/tsdbRead.c index ea72760568..cd97b2a9d6 100644 --- a/src/tsdb/src/tsdbRead.c +++ b/src/tsdb/src/tsdbRead.c @@ -2861,12 +2861,6 @@ int32_t tsdbRetrieveDataBlockStatisInfo(TsdbQueryHandleT* pQueryHandle, SDataSta if (pHandle->statis[i].numOfNull == -1) { // set the column data are all NULL pHandle->statis[i].numOfNull = pBlockInfo->compBlock->numOfRows; } - - SColumnInfo* pColInfo = taosArrayGet(pHandle->pColumns, i); - if (pColInfo->type == TSDB_DATA_TYPE_TIMESTAMP) { - pHandle->statis[i].min = pBlockInfo->compBlock->keyFirst; - pHandle->statis[i].max = pBlockInfo->compBlock->keyLast; - } } int64_t elapsed = taosGetTimestampUs() - stime; From 0e72f1cb6d64ab34a8d712a5928b3eee65af1e71 Mon Sep 17 00:00:00 2001 From: Ping Xiao Date: Thu, 8 Apr 2021 19:01:28 +0800 Subject: [PATCH 114/178] [###################################################################] --- tests/pytest/insert/basic.py | 3 +++ tests/pytest/topic/topicQuery.py | 21 +++++++++++++++------ 2 files changed, 18 insertions(+), 6 deletions(-) diff --git a/tests/pytest/insert/basic.py b/tests/pytest/insert/basic.py index dcb5834d55..f23f38651a 100644 --- a/tests/pytest/insert/basic.py +++ b/tests/pytest/insert/basic.py @@ -43,6 +43,9 @@ class TDTestCase: tdSql.query("select * from tb") tdSql.checkRows(insertRows + 4) + # test case for https://jira.taosdata.com:18080/browse/TD-3716: + tdSql.error("insert into tb(now, 1)") + def stop(self): tdSql.close() tdLog.success("%s successfully executed" % __file__) diff --git a/tests/pytest/topic/topicQuery.py b/tests/pytest/topic/topicQuery.py index d4f8c4127d..1ee3c3a427 100644 --- a/tests/pytest/topic/topicQuery.py +++ b/tests/pytest/topic/topicQuery.py @@ -54,11 +54,11 @@ class TDTestCase: tdSql.execute("use db") - tdSql.execute("create table test(ts timestamp, value int)") - tdSql.execute("insert into test values(%d, 1)" % self.ts) - tdSql.execute("insert into test values(%d, 1)" % (self.ts + 1)) - tdSql.execute("insert into test values(%d, 1)" % (self.ts + 2)) - tdSql.execute("insert into test values(%d, 1)" % (self.ts + 3)) + tdSql.execute("create table test(ts timestamp, start timestamp, value int)") + tdSql.execute("insert into test values(%d, %d, 1)" % (self.ts, self.ts)) + tdSql.execute("insert into test values(%d, %d, 1)" % (self.ts + 1, self.ts + 1)) + tdSql.execute("insert into test values(%d, %d, 1)" % (self.ts + 2, self.ts + 2)) + tdSql.execute("insert into test values(%d, %d, 1)" % (self.ts + 3, self.ts + 3)) tdSql.query("select * from test") tdSql.checkRows(4) @@ -72,6 +72,15 @@ class TDTestCase: tdSql.query("select * from test where ts = %d" % self.ts) tdSql.checkRows(1) + tdSql.query("select * from test where start >= %d" % self.ts) + tdSql.checkRows(4) + + tdSql.query("select * from test where start > %d" % self.ts) + tdSql.checkRows(3) + + tdSql.query("select * from test where start = %d" % self.ts) + tdSql.checkRows(1) + def stop(self): tdSql.close() @@ -79,4 +88,4 @@ class TDTestCase: tdCases.addWindows(__file__, TDTestCase()) -tdCases.addLinux(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) \ No newline at end of file From 6a0af1593f1c802493089fae54ce15aa7e16efc1 Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Thu, 8 Apr 2021 21:51:05 +0800 Subject: [PATCH 115/178] Feature/sangshuduo/td 3408 taosdemo async query (#5730) * [TD-3408]: taosdemo support async query. * [TD-3408]: taosdemo support async query. refactor * [TD-3408]: taosdemo support async query. refactor 2 * [TD-3408]: taosdemo support async query. refactor 3 * [TD-3408]: taosdemo support async query. refactor 4 * [TD-3408]: taosdemo support specified sql more than one line. Co-authored-by: Shuduo Sang --- src/kit/taosdemo/taosdemo.c | 502 +++++++++++++++++++++--------------- 1 file changed, 287 insertions(+), 215 deletions(-) diff --git a/src/kit/taosdemo/taosdemo.c b/src/kit/taosdemo/taosdemo.c index 59b31d6172..6939f5dae5 100644 --- a/src/kit/taosdemo/taosdemo.c +++ b/src/kit/taosdemo/taosdemo.c @@ -67,6 +67,12 @@ enum TEST_MODE { INVAID_TEST }; +enum QUERY_MODE { + SYNC_QUERY_MODE, // 0 + ASYNC_QUERY_MODE, // 1 + INVALID_MODE +}; + #define MAX_SQL_SIZE 65536 #define BUFFER_SIZE (65536*2) #define MAX_USERNAME_SIZE 64 @@ -198,7 +204,7 @@ typedef struct SArguments_S { bool verbose_print; bool performance_print; char * output_file; - int mode; + int query_mode; char * datatype[MAX_NUM_DATATYPE + 1]; int len_of_binary; int num_of_CPR; @@ -351,7 +357,7 @@ typedef struct SpecifiedQueryInfo_S { int rate; // 0: unlimit > 0 loop/s int concurrent; int sqlCount; - int subscribeMode; // 0: sync, 1: async + int mode; // 0: sync, 1: async int subscribeInterval; // ms int queryTimes; int subscribeRestart; @@ -365,7 +371,7 @@ typedef struct SuperQueryInfo_S { char sTblName[MAX_TB_NAME_SIZE+1]; int rate; // 0: unlimit > 0 loop/s int threadCnt; - int subscribeMode; // 0: sync, 1: async + int mode; // 0: sync, 1: async int subscribeInterval; // ms int subscribeRestart; int subscribeKeepProgress; @@ -429,6 +435,8 @@ typedef struct SThreadInfo_S { int64_t maxDelay; int64_t minDelay; + // query + int querySeq; // sequence number of sql command } threadInfo; #ifdef WINDOWS @@ -714,7 +722,7 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) { } else if (strcmp(argv[i], "-s") == 0) { arguments->sqlFile = argv[++i]; } else if (strcmp(argv[i], "-q") == 0) { - arguments->mode = atoi(argv[++i]); + arguments->query_mode = atoi(argv[++i]); } else if (strcmp(argv[i], "-T") == 0) { arguments->num_of_threads = atoi(argv[++i]); } else if (strcmp(argv[i], "-i") == 0) { @@ -758,7 +766,7 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) { char *dupstr = strdup(argv[i]); char *running = dupstr; char *token = strsep(&running, ","); - while (token != NULL) { + while(token != NULL) { if (strcasecmp(token, "INT") && strcasecmp(token, "FLOAT") && strcasecmp(token, "TINYINT") @@ -964,7 +972,7 @@ static void getResult(TAOS_RES *res, char* resultFileName) { char temp[16000]; // fetch the records row by row - while ((row = taos_fetch_row(res))) { + while((row = taos_fetch_row(res))) { if (totalLen >= 100*1024*1024 - 32000) { if (fp) fprintf(fp, "%s", databuf); totalLen = 0; @@ -986,7 +994,8 @@ static void getResult(TAOS_RES *res, char* resultFileName) { static void selectAndGetResult(TAOS *taos, char *command, char* resultFileName) { TAOS_RES *res = taos_query(taos, command); if (res == NULL || taos_errno(res) != 0) { - printf("failed to sql:%s, reason:%s\n", command, taos_errstr(res)); + errorPrint("%s() LN%d, failed to execute sql:%s, reason:%s\n", + __func__, __LINE__, command, taos_errstr(res)); taos_free_result(res); return; } @@ -1163,7 +1172,8 @@ static int printfInsertMeta() { if (g_Dbs.db[i].dbCfg.precision[0] != 0) { if ((0 == strncasecmp(g_Dbs.db[i].dbCfg.precision, "ms", 2)) || (0 == strncasecmp(g_Dbs.db[i].dbCfg.precision, "us", 2))) { - printf(" precision: \033[33m%s\033[0m\n", g_Dbs.db[i].dbCfg.precision); + printf(" precision: \033[33m%s\033[0m\n", + g_Dbs.db[i].dbCfg.precision); } else { printf("\033[1m\033[40;31m precision error: %s\033[0m\n", g_Dbs.db[i].dbCfg.precision); @@ -1171,11 +1181,13 @@ static int printfInsertMeta() { } } - printf(" super table count: \033[33m%d\033[0m\n", g_Dbs.db[i].superTblCount); + printf(" super table count: \033[33m%d\033[0m\n", + g_Dbs.db[i].superTblCount); for (int j = 0; j < g_Dbs.db[i].superTblCount; j++) { printf(" super table[\033[33m%d\033[0m]:\n", j); - printf(" stbName: \033[33m%s\033[0m\n", g_Dbs.db[i].superTbls[j].sTblName); + printf(" stbName: \033[33m%s\033[0m\n", + g_Dbs.db[i].superTbls[j].sTblName); if (PRE_CREATE_SUBTBL == g_Dbs.db[i].superTbls[j].autoCreateTable) { printf(" autoCreateTable: \033[33m%s\033[0m\n", "no"); @@ -1241,7 +1253,7 @@ static int printfInsertMeta() { g_Dbs.db[i].superTbls[j].sampleFile); printf(" tagsFile: \033[33m%s\033[0m\n", g_Dbs.db[i].superTbls[j].tagsFile); - printf(" columnCount: \033[33m%d\033[0m\n ", + printf(" columnCount: \033[33m%d\033[0m\n", g_Dbs.db[i].superTbls[j].columnCount); for (int k = 0; k < g_Dbs.db[i].superTbls[j].columnCount; k++) { //printf("dataType:%s, dataLen:%d\t", g_Dbs.db[i].superTbls[j].columns[k].dataType, g_Dbs.db[i].superTbls[j].columns[k].dataLen); @@ -1459,41 +1471,61 @@ static void printfQueryMeta() { printf("\n"); printf("specified table query info: \n"); - printf("query interval: \033[33m%d\033[0m\n", g_queryInfo.specifiedQueryInfo.rate); + printf("query interval: \033[33m%d\033[0m\n", + g_queryInfo.specifiedQueryInfo.rate); printf("top query times:\033[33m%d\033[0m\n", g_args.query_times); - printf("concurrent: \033[33m%d\033[0m\n", g_queryInfo.specifiedQueryInfo.concurrent); - printf("sqlCount: \033[33m%d\033[0m\n", g_queryInfo.specifiedQueryInfo.sqlCount); + printf("concurrent: \033[33m%d\033[0m\n", + g_queryInfo.specifiedQueryInfo.concurrent); + printf("sqlCount: \033[33m%d\033[0m\n", + g_queryInfo.specifiedQueryInfo.sqlCount); printf("specified tbl query times:\n"); - printf(" \033[33m%d\033[0m\n", g_queryInfo.specifiedQueryInfo.queryTimes); + printf(" \033[33m%d\033[0m\n", + g_queryInfo.specifiedQueryInfo.queryTimes); if (SUBSCRIBE_TEST == g_args.test_mode) { - printf("mod: \033[33m%d\033[0m\n", g_queryInfo.specifiedQueryInfo.subscribeMode); - printf("interval: \033[33m%d\033[0m\n", g_queryInfo.specifiedQueryInfo.subscribeInterval); - printf("restart: \033[33m%d\033[0m\n", g_queryInfo.specifiedQueryInfo.subscribeRestart); - printf("keepProgress: \033[33m%d\033[0m\n", g_queryInfo.specifiedQueryInfo.subscribeKeepProgress); + printf("mod: \033[33m%d\033[0m\n", + g_queryInfo.specifiedQueryInfo.mode); + printf("interval: \033[33m%d\033[0m\n", + g_queryInfo.specifiedQueryInfo.subscribeInterval); + printf("restart: \033[33m%d\033[0m\n", + g_queryInfo.specifiedQueryInfo.subscribeRestart); + printf("keepProgress: \033[33m%d\033[0m\n", + g_queryInfo.specifiedQueryInfo.subscribeKeepProgress); } for (int i = 0; i < g_queryInfo.specifiedQueryInfo.sqlCount; i++) { - printf(" sql[%d]: \033[33m%s\033[0m\n", i, g_queryInfo.specifiedQueryInfo.sql[i]); + printf(" sql[%d]: \033[33m%s\033[0m\n", + i, g_queryInfo.specifiedQueryInfo.sql[i]); } printf("\n"); - printf("super table query info: \n"); - printf("query interval: \033[33m%d\033[0m\n", g_queryInfo.superQueryInfo.rate); - printf("threadCnt: \033[33m%d\033[0m\n", g_queryInfo.superQueryInfo.threadCnt); - printf("childTblCount: \033[33m%d\033[0m\n", g_queryInfo.superQueryInfo.childTblCount); - printf("stable name: \033[33m%s\033[0m\n", g_queryInfo.superQueryInfo.sTblName); - printf("stb query times:\033[33m%d\033[0m\n", g_queryInfo.superQueryInfo.queryTimes); + printf("super table query info:\n"); + printf("query interval: \033[33m%d\033[0m\n", + g_queryInfo.superQueryInfo.rate); + printf("threadCnt: \033[33m%d\033[0m\n", + g_queryInfo.superQueryInfo.threadCnt); + printf("childTblCount: \033[33m%d\033[0m\n", + g_queryInfo.superQueryInfo.childTblCount); + printf("stable name: \033[33m%s\033[0m\n", + g_queryInfo.superQueryInfo.sTblName); + printf("stb query times:\033[33m%d\033[0m\n", + g_queryInfo.superQueryInfo.queryTimes); if (SUBSCRIBE_TEST == g_args.test_mode) { - printf("mod: \033[33m%d\033[0m\n", g_queryInfo.superQueryInfo.subscribeMode); - printf("interval: \033[33m%d\033[0m\n", g_queryInfo.superQueryInfo.subscribeInterval); - printf("restart: \033[33m%d\033[0m\n", g_queryInfo.superQueryInfo.subscribeRestart); - printf("keepProgress: \033[33m%d\033[0m\n", g_queryInfo.superQueryInfo.subscribeKeepProgress); + printf("mod: \033[33m%d\033[0m\n", + g_queryInfo.superQueryInfo.mode); + printf("interval: \033[33m%d\033[0m\n", + g_queryInfo.superQueryInfo.subscribeInterval); + printf("restart: \033[33m%d\033[0m\n", + g_queryInfo.superQueryInfo.subscribeRestart); + printf("keepProgress: \033[33m%d\033[0m\n", + g_queryInfo.superQueryInfo.subscribeKeepProgress); } - printf("sqlCount: \033[33m%d\033[0m\n", g_queryInfo.superQueryInfo.sqlCount); + printf("sqlCount: \033[33m%d\033[0m\n", + g_queryInfo.superQueryInfo.sqlCount); for (int i = 0; i < g_queryInfo.superQueryInfo.sqlCount; i++) { - printf(" sql[%d]: \033[33m%s\033[0m\n", i, g_queryInfo.superQueryInfo.sql[i]); + printf(" sql[%d]: \033[33m%s\033[0m\n", + i, g_queryInfo.superQueryInfo.sql[i]); } printf("\n"); @@ -1637,7 +1669,7 @@ static int getDbFromServer(TAOS * taos, SDbInfo** dbInfos) { TAOS_FIELD *fields = taos_fetch_fields(res); - while ((row = taos_fetch_row(res)) != NULL) { + while((row = taos_fetch_row(res)) != NULL) { // sys database name : 'log' if (strncasecmp(row[TSDB_SHOW_DB_NAME_INDEX], "log", fields[TSDB_SHOW_DB_NAME_INDEX].bytes) == 0) { @@ -1670,7 +1702,8 @@ static int getDbFromServer(TAOS * taos, SDbInfo** dbInfos) { dbInfos[count]->wallevel = *((int8_t *)row[TSDB_SHOW_DB_WALLEVEL_INDEX]); dbInfos[count]->fsync = *((int32_t *)row[TSDB_SHOW_DB_FSYNC_INDEX]); dbInfos[count]->comp = (int8_t)(*((int8_t *)row[TSDB_SHOW_DB_COMP_INDEX])); - dbInfos[count]->cachelast = (int8_t)(*((int8_t *)row[TSDB_SHOW_DB_CACHELAST_INDEX])); + dbInfos[count]->cachelast = + (int8_t)(*((int8_t *)row[TSDB_SHOW_DB_CACHELAST_INDEX])); tstrncpy(dbInfos[count]->precision, (char *)row[TSDB_SHOW_DB_PRECISION_INDEX], @@ -1681,7 +1714,8 @@ static int getDbFromServer(TAOS * taos, SDbInfo** dbInfos) { count++; if (count > MAX_DATABASE_COUNT) { - errorPrint( "The database count overflow than %d\n", MAX_DATABASE_COUNT); + errorPrint("%s() LN%d, The database count overflow than %d\n", + __func__, __LINE__, MAX_DATABASE_COUNT); break; } } @@ -1691,6 +1725,7 @@ static int getDbFromServer(TAOS * taos, SDbInfo** dbInfos) { static void printfDbInfoForQueryToFile( char* filename, SDbInfo* dbInfos, int index) { + if (filename[0] == 0) return; @@ -1909,7 +1944,7 @@ static int postProceSql(char* host, uint16_t port, char* sqlstr) if (bytes == 0) break; sent+=bytes; - } while (sent < req_str_len); + } while(sent < req_str_len); memset(response_buf, 0, RESP_BUF_LEN); resp_len = sizeof(response_buf) - 1; @@ -1927,7 +1962,7 @@ static int postProceSql(char* host, uint16_t port, char* sqlstr) if (bytes == 0) break; received += bytes; - } while (received < resp_len); + } while(received < resp_len); if (received == resp_len) { free(request_buf); @@ -1951,7 +1986,8 @@ static int postProceSql(char* host, uint16_t port, char* sqlstr) static char* getTagValueFromTagSample(SSuperTable* stbInfo, int tagUsePos) { char* dataBuf = (char*)calloc(TSDB_MAX_SQL_LEN+1, 1); if (NULL == dataBuf) { - errorPrint("%s() LN%d, calloc failed! size:%d\n", __func__, __LINE__, TSDB_MAX_SQL_LEN+1); + errorPrint("%s() LN%d, calloc failed! size:%d\n", + __func__, __LINE__, TSDB_MAX_SQL_LEN+1); return NULL; } @@ -2155,7 +2191,7 @@ static int getChildNameOfSuperTableWithLimitAndOffset(TAOS * taos, } char* pTblName = childTblName; - while ((row = taos_fetch_row(res)) != NULL) { + while((row = taos_fetch_row(res)) != NULL) { int32_t* len = taos_fetch_lengths(res); tstrncpy(pTblName, (char *)row[0], len[0]+1); //printf("==== sub table name: %s\n", pTblName); @@ -2218,7 +2254,7 @@ static int getSuperTableFromServer(TAOS * taos, char* dbName, int tagIndex = 0; int columnIndex = 0; TAOS_FIELD *fields = taos_fetch_fields(res); - while ((row = taos_fetch_row(res)) != NULL) { + while((row = taos_fetch_row(res)) != NULL) { if (0 == count) { count++; continue; @@ -2765,7 +2801,7 @@ static void createChildTables() { // normal table len = snprintf(tblColsBuf, MAX_SQL_SIZE, "(TS TIMESTAMP"); int j = 0; - while (g_args.datatype[j]) { + while(g_args.datatype[j]) { if ((strncasecmp(g_args.datatype[j], "BINARY", strlen("BINARY")) == 0) || (strncasecmp(g_args.datatype[j], "NCHAR", strlen("NCHAR")) == 0)) { @@ -2824,7 +2860,7 @@ static int readTagFromCsvFileToMem(SSuperTable * superTblInfo) { return -1; } - while ((readLen = tgetline(&line, &n, fp)) != -1) { + while((readLen = tgetline(&line, &n, fp)) != -1) { if (('\r' == line[readLen - 1]) || ('\n' == line[readLen - 1])) { line[--readLen] = 0; } @@ -2888,7 +2924,7 @@ static int readSampleFromCsvFileToMem( assert(superTblInfo->sampleDataBuf); memset(superTblInfo->sampleDataBuf, 0, MAX_SAMPLES_ONCE_FROM_FILE * superTblInfo->lenOfOneRow); - while (1) { + while(1) { readLen = tgetline(&line, &n, fp); if (-1 == readLen) { if(0 != fseek(fp, 0, SEEK_SET)) { @@ -2967,7 +3003,8 @@ static bool getColumnAndTagTypeFromInsertJsonFile( if (countObj && countObj->type == cJSON_Number) { count = countObj->valueint; } else if (countObj && countObj->type != cJSON_Number) { - errorPrint("%s() LN%d, failed to read json, column count not found\n", __func__, __LINE__); + errorPrint("%s() LN%d, failed to read json, column count not found\n", + __func__, __LINE__); goto PARSE_OVER; } else { count = 1; @@ -2976,8 +3013,10 @@ static bool getColumnAndTagTypeFromInsertJsonFile( // column info memset(&columnCase, 0, sizeof(StrColumn)); cJSON *dataType = cJSON_GetObjectItem(column, "type"); - if (!dataType || dataType->type != cJSON_String || dataType->valuestring == NULL) { - errorPrint("%s() LN%d: failed to read json, column type not found\n", __func__, __LINE__); + if (!dataType || dataType->type != cJSON_String + || dataType->valuestring == NULL) { + errorPrint("%s() LN%d: failed to read json, column type not found\n", + __func__, __LINE__); goto PARSE_OVER; } //tstrncpy(superTbls->columns[k].dataType, dataType->valuestring, MAX_TB_NAME_SIZE); @@ -2987,7 +3026,8 @@ static bool getColumnAndTagTypeFromInsertJsonFile( if (dataLen && dataLen->type == cJSON_Number) { columnCase.dataLen = dataLen->valueint; } else if (dataLen && dataLen->type != cJSON_Number) { - debugPrint("%s() LN%d: failed to read json, column len not found\n", __func__, __LINE__); + debugPrint("%s() LN%d: failed to read json, column len not found\n", + __func__, __LINE__); goto PARSE_OVER; } else { columnCase.dataLen = 8; @@ -3007,13 +3047,15 @@ static bool getColumnAndTagTypeFromInsertJsonFile( // tags cJSON *tags = cJSON_GetObjectItem(stbInfo, "tags"); if (!tags || tags->type != cJSON_Array) { - debugPrint("%s() LN%d, failed to read json, tags not found\n", __func__, __LINE__); + errorPrint("%s() LN%d, failed to read json, tags not found\n", + __func__, __LINE__); goto PARSE_OVER; } int tagSize = cJSON_GetArraySize(tags); if (tagSize > MAX_TAG_COUNT) { - debugPrint("%s() LN%d, failed to read json, tags size overflow, max tag size is %d\n", __func__, __LINE__, MAX_TAG_COUNT); + errorPrint("%s() LN%d, failed to read json, tags size overflow, max tag size is %d\n", + __func__, __LINE__, MAX_TAG_COUNT); goto PARSE_OVER; } @@ -3036,8 +3078,10 @@ static bool getColumnAndTagTypeFromInsertJsonFile( // column info memset(&columnCase, 0, sizeof(StrColumn)); cJSON *dataType = cJSON_GetObjectItem(tag, "type"); - if (!dataType || dataType->type != cJSON_String || dataType->valuestring == NULL) { - printf("ERROR: failed to read json, tag type not found\n"); + if (!dataType || dataType->type != cJSON_String + || dataType->valuestring == NULL) { + errorPrint("%s() LN%d, failed to read json, tag type not found\n", + __func__, __LINE__); goto PARSE_OVER; } tstrncpy(columnCase.dataType, dataType->valuestring, MAX_TB_NAME_SIZE); @@ -3046,14 +3090,16 @@ static bool getColumnAndTagTypeFromInsertJsonFile( if (dataLen && dataLen->type == cJSON_Number) { columnCase.dataLen = dataLen->valueint; } else if (dataLen && dataLen->type != cJSON_Number) { - printf("ERROR: failed to read json, column len not found\n"); + errorPrint("%s() LN%d, failed to read json, column len not found\n", + __func__, __LINE__); goto PARSE_OVER; } else { columnCase.dataLen = 0; } for (int n = 0; n < count; ++n) { - tstrncpy(superTbls->tags[index].dataType, columnCase.dataType, MAX_TB_NAME_SIZE); + tstrncpy(superTbls->tags[index].dataType, columnCase.dataType, + MAX_TB_NAME_SIZE); superTbls->tags[index].dataLen = columnCase.dataLen; index++; } @@ -3063,9 +3109,6 @@ static bool getColumnAndTagTypeFromInsertJsonFile( ret = true; PARSE_OVER: - //free(content); - //cJSON_Delete(root); - //fclose(fp); return ret; } @@ -3142,7 +3185,8 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { } else if (!gInsertInterval) { g_args.insert_interval = 0; } else { - errorPrint("%s() LN%d, failed to read json, insert_interval input mistake\n", __func__, __LINE__); + errorPrint("%s() LN%d, failed to read json, insert_interval input mistake\n", + __func__, __LINE__); goto PARSE_OVER; } @@ -3163,7 +3207,8 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { } else if (!interlaceRows) { g_args.interlace_rows = 0; // 0 means progressive mode, > 0 mean interlace mode. max value is less or equ num_of_records_per_req } else { - errorPrint("%s() LN%d, failed to read json, interlace_rows input mistake\n", __func__, __LINE__); + errorPrint("%s() LN%d, failed to read json, interlace_rows input mistake\n", + __func__, __LINE__); goto PARSE_OVER; } @@ -3173,7 +3218,8 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { } else if (!maxSqlLen) { g_args.max_sql_len = TSDB_PAYLOAD_SIZE; } else { - errorPrint("%s() LN%d, failed to read json, max_sql_len input mistake\n", __func__, __LINE__); + errorPrint("%s() LN%d, failed to read json, max_sql_len input mistake\n", + __func__, __LINE__); goto PARSE_OVER; } @@ -3183,7 +3229,8 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { } else if (!numRecPerReq) { g_args.num_of_RPR = 0xffff; } else { - errorPrint("%s() LN%d, failed to read json, num_of_records_per_req not found\n", __func__, __LINE__); + errorPrint("%s() LN%d, failed to read json, num_of_records_per_req not found\n", + __func__, __LINE__); goto PARSE_OVER; } @@ -3509,7 +3556,8 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { } else if (!dataSource) { tstrncpy(g_Dbs.db[i].superTbls[j].dataSource, "rand", MAX_DB_NAME_SIZE); } else { - errorPrint("%s() LN%d, failed to read json, data_source not found\n", __func__, __LINE__); + errorPrint("%s() LN%d, failed to read json, data_source not found\n", + __func__, __LINE__); goto PARSE_OVER; } @@ -3584,7 +3632,8 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { } cJSON *sampleFile = cJSON_GetObjectItem(stbInfo, "sample_file"); - if (sampleFile && sampleFile->type == cJSON_String && sampleFile->valuestring != NULL) { + if (sampleFile && sampleFile->type == cJSON_String + && sampleFile->valuestring != NULL) { tstrncpy(g_Dbs.db[i].superTbls[j].sampleFile, sampleFile->valuestring, MAX_FILE_NAME_LEN); } else if (!sampleFile) { @@ -3727,9 +3776,6 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { ret = true; PARSE_OVER: - //free(content); - //cJSON_Delete(root); - //fclose(fp); return ret; } @@ -3795,7 +3841,8 @@ static bool getMetaFromQueryJsonFile(cJSON* root) { } else if (!gQueryTimes) { g_args.query_times = 1; } else { - errorPrint("%s() LN%d, failed to read json, query_times input mistake\n", __func__, __LINE__); + errorPrint("%s() LN%d, failed to read json, query_times input mistake\n", + __func__, __LINE__); goto PARSE_OVER; } @@ -3833,35 +3880,45 @@ static bool getMetaFromQueryJsonFile(cJSON* root) { g_queryInfo.specifiedQueryInfo.rate = 0; } - cJSON* specifiedQueryTimes = cJSON_GetObjectItem(specifiedQuery, "query_times"); + cJSON* specifiedQueryTimes = cJSON_GetObjectItem(specifiedQuery, + "query_times"); if (specifiedQueryTimes && specifiedQueryTimes->type == cJSON_Number) { g_queryInfo.specifiedQueryInfo.queryTimes = specifiedQueryTimes->valueint; } else if (!specifiedQueryTimes) { g_queryInfo.specifiedQueryInfo.queryTimes = g_args.query_times; } else { - errorPrint("%s() LN%d, failed to read json, query_times input mistake\n", __func__, __LINE__); + errorPrint("%s() LN%d, failed to read json, query_times input mistake\n", + __func__, __LINE__); goto PARSE_OVER; } cJSON* concurrent = cJSON_GetObjectItem(specifiedQuery, "concurrent"); if (concurrent && concurrent->type == cJSON_Number) { g_queryInfo.specifiedQueryInfo.concurrent = concurrent->valueint; + if (g_queryInfo.specifiedQueryInfo.concurrent <= 0) { + errorPrint("%s() LN%d, query sqlCount %d or concurrent %d is not correct.\n", + __func__, __LINE__, g_queryInfo.specifiedQueryInfo.sqlCount, + g_queryInfo.specifiedQueryInfo.concurrent); + goto PARSE_OVER; + } } else if (!concurrent) { g_queryInfo.specifiedQueryInfo.concurrent = 1; } - cJSON* mode = cJSON_GetObjectItem(specifiedQuery, "mode"); - if (mode && mode->type == cJSON_String && mode->valuestring != NULL) { - if (0 == strcmp("sync", mode->valuestring)) { - g_queryInfo.specifiedQueryInfo.subscribeMode = 0; - } else if (0 == strcmp("async", mode->valuestring)) { - g_queryInfo.specifiedQueryInfo.subscribeMode = 1; + cJSON* queryMode = cJSON_GetObjectItem(specifiedQuery, "mode"); + if (queryMode && queryMode->type == cJSON_String + && queryMode->valuestring != NULL) { + if (0 == strcmp("sync", queryMode->valuestring)) { + g_queryInfo.specifiedQueryInfo.mode = SYNC_QUERY_MODE; + } else if (0 == strcmp("async", queryMode->valuestring)) { + g_queryInfo.specifiedQueryInfo.mode = ASYNC_QUERY_MODE; } else { - printf("ERROR: failed to read json, subscribe mod error\n"); + errorPrint("%s() LN%d, failed to read json, query mode input error\n", + __func__, __LINE__); goto PARSE_OVER; } } else { - g_queryInfo.specifiedQueryInfo.subscribeMode = 0; + g_queryInfo.specifiedQueryInfo.mode = SYNC_QUERY_MODE; } cJSON* interval = cJSON_GetObjectItem(specifiedQuery, "interval"); @@ -3908,12 +3965,14 @@ static bool getMetaFromQueryJsonFile(cJSON* root) { if (!superSqls) { g_queryInfo.specifiedQueryInfo.sqlCount = 0; } else if (superSqls->type != cJSON_Array) { - printf("ERROR: failed to read json, super sqls not found\n"); + errorPrint("%s() LN%d, failed to read json, super sqls not found\n", + __func__, __LINE__); goto PARSE_OVER; } else { int superSqlSize = cJSON_GetArraySize(superSqls); if (superSqlSize > MAX_QUERY_SQL_COUNT) { - printf("ERROR: failed to read json, query sql size overflow, max is %d\n", MAX_QUERY_SQL_COUNT); + errorPrint("%s() LN%d, failed to read json, query sql size overflow, max is %d\n", + __func__, __LINE__, MAX_QUERY_SQL_COUNT); goto PARSE_OVER; } @@ -3965,7 +4024,8 @@ static bool getMetaFromQueryJsonFile(cJSON* root) { } else if (!superQueryTimes) { g_queryInfo.superQueryInfo.queryTimes = g_args.query_times; } else { - errorPrint("%s() LN%d, failed to read json, query_times input mistake\n", __func__, __LINE__); + errorPrint("%s() LN%d, failed to read json, query_times input mistake\n", + __func__, __LINE__); goto PARSE_OVER; } @@ -3984,25 +4044,30 @@ static bool getMetaFromQueryJsonFile(cJSON* root) { //} cJSON* stblname = cJSON_GetObjectItem(superQuery, "stblname"); - if (stblname && stblname->type == cJSON_String && stblname->valuestring != NULL) { - tstrncpy(g_queryInfo.superQueryInfo.sTblName, stblname->valuestring, MAX_TB_NAME_SIZE); + if (stblname && stblname->type == cJSON_String + && stblname->valuestring != NULL) { + tstrncpy(g_queryInfo.superQueryInfo.sTblName, stblname->valuestring, + MAX_TB_NAME_SIZE); } else { - printf("ERROR: failed to read json, super table name not found\n"); + errorPrint("%s() LN%d, failed to read json, super table name input error\n", + __func__, __LINE__); goto PARSE_OVER; } cJSON* submode = cJSON_GetObjectItem(superQuery, "mode"); - if (submode && submode->type == cJSON_String && submode->valuestring != NULL) { + if (submode && submode->type == cJSON_String + && submode->valuestring != NULL) { if (0 == strcmp("sync", submode->valuestring)) { - g_queryInfo.superQueryInfo.subscribeMode = 0; + g_queryInfo.superQueryInfo.mode = SYNC_QUERY_MODE; } else if (0 == strcmp("async", submode->valuestring)) { - g_queryInfo.superQueryInfo.subscribeMode = 1; + g_queryInfo.superQueryInfo.mode = ASYNC_QUERY_MODE; } else { - printf("ERROR: failed to read json, subscribe mod error\n"); + errorPrint("%s() LN%d, failed to read json, query mode input error\n", + __func__, __LINE__); goto PARSE_OVER; } } else { - g_queryInfo.superQueryInfo.subscribeMode = 0; + g_queryInfo.superQueryInfo.mode = SYNC_QUERY_MODE; } cJSON* subinterval = cJSON_GetObjectItem(superQuery, "interval"); @@ -4015,7 +4080,8 @@ static bool getMetaFromQueryJsonFile(cJSON* root) { } cJSON* subrestart = cJSON_GetObjectItem(superQuery, "restart"); - if (subrestart && subrestart->type == cJSON_String && subrestart->valuestring != NULL) { + if (subrestart && subrestart->type == cJSON_String + && subrestart->valuestring != NULL) { if (0 == strcmp("yes", subrestart->valuestring)) { g_queryInfo.superQueryInfo.subscribeRestart = 1; } else if (0 == strcmp("no", subrestart->valuestring)) { @@ -4049,12 +4115,14 @@ static bool getMetaFromQueryJsonFile(cJSON* root) { if (!subsqls) { g_queryInfo.superQueryInfo.sqlCount = 0; } else if (subsqls->type != cJSON_Array) { - printf("ERROR: failed to read json, super sqls not found\n"); + errorPrint("%s() LN%d: failed to read json, super sqls not found\n", + __func__, __LINE__); goto PARSE_OVER; } else { int superSqlSize = cJSON_GetArraySize(subsqls); if (superSqlSize > MAX_QUERY_SQL_COUNT) { - printf("ERROR: failed to read json, query sql size overflow, max is %d\n", MAX_QUERY_SQL_COUNT); + errorPrint("%s() LN%d, failed to read json, query sql size overflow, max is %d\n", + __func__, __LINE__, MAX_QUERY_SQL_COUNT); goto PARSE_OVER; } @@ -4064,19 +4132,25 @@ static bool getMetaFromQueryJsonFile(cJSON* root) { if (sql == NULL) continue; cJSON *sqlStr = cJSON_GetObjectItem(sql, "sql"); - if (!sqlStr || sqlStr->type != cJSON_String || sqlStr->valuestring == NULL) { - printf("ERROR: failed to read json, sql not found\n"); + if (!sqlStr || sqlStr->type != cJSON_String + || sqlStr->valuestring == NULL) { + errorPrint("%s() LN%d, failed to read json, sql not found\n", + __func__, __LINE__); goto PARSE_OVER; } - tstrncpy(g_queryInfo.superQueryInfo.sql[j], sqlStr->valuestring, MAX_QUERY_SQL_LENGTH); + tstrncpy(g_queryInfo.superQueryInfo.sql[j], sqlStr->valuestring, + MAX_QUERY_SQL_LENGTH); cJSON *result = cJSON_GetObjectItem(sql, "result"); - if (result != NULL && result->type == cJSON_String && result->valuestring != NULL){ - tstrncpy(g_queryInfo.superQueryInfo.result[j], result->valuestring, MAX_FILE_NAME_LEN); + if (result != NULL && result->type == cJSON_String + && result->valuestring != NULL){ + tstrncpy(g_queryInfo.superQueryInfo.result[j], + result->valuestring, MAX_FILE_NAME_LEN); } else if (NULL == result) { memset(g_queryInfo.superQueryInfo.result[j], 0, MAX_FILE_NAME_LEN); } else { - printf("ERROR: failed to read json, sub query result file not found\n"); + errorPrint("%s() LN%d, failed to read json, sub query result file not found\n", + __func__, __LINE__); goto PARSE_OVER; } } @@ -4086,9 +4160,6 @@ static bool getMetaFromQueryJsonFile(cJSON* root) { ret = true; PARSE_OVER: - //free(content); - //cJSON_Delete(root); - //fclose(fp); return ret; } @@ -5415,7 +5486,7 @@ static void *readTable(void *sarg) { return NULL; } - while (taos_fetch_row(pSql) != NULL) { + while(taos_fetch_row(pSql) != NULL) { count++; } @@ -5491,7 +5562,7 @@ static void *readMetric(void *sarg) { return NULL; } int count = 0; - while (taos_fetch_row(pSql) != NULL) { + while(taos_fetch_row(pSql) != NULL) { count++; } t = getCurrentTimeUs() - t; @@ -5602,7 +5673,7 @@ static int insertTestProcess() { return 0; } -static void *superQueryProcess(void *sarg) { +static void *specifiedQueryProcess(void *sarg) { threadInfo *winfo = (threadInfo *)sarg; if (winfo->taos == NULL) { @@ -5643,32 +5714,35 @@ static void *superQueryProcess(void *sarg) { } st = taosGetTimestampUs(); - for (int i = 0; i < g_queryInfo.specifiedQueryInfo.sqlCount; i++) { - if (0 == strncasecmp(g_queryInfo.queryMode, "taosc", 5)) { - int64_t t1 = taosGetTimestampUs(); - char tmpFile[MAX_FILE_NAME_LEN*2] = {0}; - if (g_queryInfo.specifiedQueryInfo.result[i][0] != 0) { - sprintf(tmpFile, "%s-%d", - g_queryInfo.specifiedQueryInfo.result[i], winfo->threadID); - } - selectAndGetResult(winfo->taos, g_queryInfo.specifiedQueryInfo.sql[i], tmpFile); - int64_t t2 = taosGetTimestampUs(); - printf("=[taosc] thread[%"PRId64"] complete one sql, Spent %f s\n", - taosGetSelfPthreadId(), (t2 - t1)/1000000.0); - } else { - int64_t t1 = taosGetTimestampUs(); - int retCode = postProceSql(g_queryInfo.host, - g_queryInfo.port, g_queryInfo.specifiedQueryInfo.sql[i]); - int64_t t2 = taosGetTimestampUs(); - printf("=[restful] thread[%"PRId64"] complete one sql, Spent %f s\n", - taosGetSelfPthreadId(), (t2 - t1)/1000000.0); - if (0 != retCode) { - printf("====restful return fail, threadID[%d]\n", winfo->threadID); - return NULL; - } + if (0 == strncasecmp(g_queryInfo.queryMode, "taosc", 5)) { + int64_t t1 = taosGetTimestampUs(); + char tmpFile[MAX_FILE_NAME_LEN*2] = {0}; + if (g_queryInfo.specifiedQueryInfo.result[winfo->querySeq][0] != 0) { + sprintf(tmpFile, "%s-%d", + g_queryInfo.specifiedQueryInfo.result[winfo->querySeq], + winfo->threadID); + } + selectAndGetResult(winfo->taos, + g_queryInfo.specifiedQueryInfo.sql[winfo->querySeq], tmpFile); + int64_t t2 = taosGetTimestampUs(); + printf("=[taosc] thread[%"PRId64"] complete one sql, Spent %f s\n", + taosGetSelfPthreadId(), (t2 - t1)/1000000.0); + } else { + int64_t t1 = taosGetTimestampUs(); + int retCode = postProceSql(g_queryInfo.host, + g_queryInfo.port, + g_queryInfo.specifiedQueryInfo.sql[winfo->querySeq]); + int64_t t2 = taosGetTimestampUs(); + printf("=[restful] thread[%"PRId64"] complete one sql, Spent %f s\n", + taosGetSelfPthreadId(), (t2 - t1)/1000000.0); + + if (0 != retCode) { + printf("====restful return fail, threadID[%d]\n", winfo->threadID); + return NULL; } } + et = taosGetTimestampUs(); printf("==thread[%"PRId64"] complete all sqls to specify tables once queries duration:%.6fs\n\n", taosGetSelfPthreadId(), (double)(et - st)/1000.0); @@ -5698,7 +5772,7 @@ static void replaceSubTblName(char* inSql, char* outSql, int tblIndex) { //printf("3: %s\n", outSql); } -static void *subQueryProcess(void *sarg) { +static void *superQueryProcess(void *sarg) { char sqlstr[1024]; threadInfo *winfo = (threadInfo *)sarg; @@ -5791,43 +5865,45 @@ static int queryTestProcess() { pthread_t *pids = NULL; threadInfo *infos = NULL; //==== create sub threads for query from specify table - if (g_queryInfo.specifiedQueryInfo.sqlCount > 0 - && g_queryInfo.specifiedQueryInfo.concurrent > 0) { + int nConcurrent = g_queryInfo.specifiedQueryInfo.concurrent; + int nSqlCount = g_queryInfo.specifiedQueryInfo.sqlCount; - pids = malloc(g_queryInfo.specifiedQueryInfo.concurrent * sizeof(pthread_t)); - if (NULL == pids) { - taos_close(taos); - ERROR_EXIT("memory allocation failed\n"); - } - infos = malloc(g_queryInfo.specifiedQueryInfo.concurrent * sizeof(threadInfo)); - if (NULL == infos) { + if ((nSqlCount > 0) && (nConcurrent > 0)) { + + pids = malloc(nConcurrent * nSqlCount * sizeof(pthread_t)); + infos = malloc(nConcurrent * nSqlCount * sizeof(threadInfo)); + + if ((NULL == pids) || (NULL == infos)) { taos_close(taos); - free(pids); ERROR_EXIT("memory allocation failed for create threads\n"); } - for (int i = 0; i < g_queryInfo.specifiedQueryInfo.concurrent; i++) { - threadInfo *t_info = infos + i; - t_info->threadID = i; + for (int i = 0; i < nConcurrent; i++) { + for (int j = 0; j < nSqlCount; j++) { + threadInfo *t_info = infos + i * nSqlCount + j; + t_info->threadID = i * nSqlCount + j; + t_info->querySeq = j; - if (0 == strncasecmp(g_queryInfo.queryMode, "taosc", 5)) { + if (0 == strncasecmp(g_queryInfo.queryMode, "taosc", 5)) { - char sqlStr[MAX_TB_NAME_SIZE*2]; - sprintf(sqlStr, "use %s", g_queryInfo.dbName); - verbosePrint("%s() %d sqlStr: %s\n", __func__, __LINE__, sqlStr); - if (0 != queryDbExec(taos, sqlStr, NO_INSERT_TYPE, false)) { + char sqlStr[MAX_TB_NAME_SIZE*2]; + sprintf(sqlStr, "use %s", g_queryInfo.dbName); + verbosePrint("%s() %d sqlStr: %s\n", __func__, __LINE__, sqlStr); + if (0 != queryDbExec(taos, sqlStr, NO_INSERT_TYPE, false)) { taos_close(taos); free(infos); free(pids); errorPrint( "use database %s failed!\n\n", g_queryInfo.dbName); return -1; + } } + + t_info->taos = NULL;// TODO: workaround to use separate taos connection; + + pthread_create(pids + i * nSqlCount + j, NULL, specifiedQueryProcess, + t_info); } - - t_info->taos = NULL;// TODO: workaround to use separate taos connection; - - pthread_create(pids + i, NULL, superQueryProcess, t_info); } } else { g_queryInfo.specifiedQueryInfo.concurrent = 0; @@ -5841,18 +5917,12 @@ static int queryTestProcess() { if ((g_queryInfo.superQueryInfo.sqlCount > 0) && (g_queryInfo.superQueryInfo.threadCnt > 0)) { pidsOfSub = malloc(g_queryInfo.superQueryInfo.threadCnt * sizeof(pthread_t)); - if (NULL == pidsOfSub) { - free(infos); - free(pids); - - ERROR_EXIT("memory allocation failed for create threads\n"); - } - infosOfSub = malloc(g_queryInfo.superQueryInfo.threadCnt * sizeof(threadInfo)); - if (NULL == infosOfSub) { - free(pidsOfSub); + + if ((NULL == pidsOfSub) || (NULL == infosOfSub)) { free(infos); free(pids); + ERROR_EXIT("memory allocation failed for create threads\n"); } @@ -5880,7 +5950,7 @@ static int queryTestProcess() { t_info->end_table_to = i < b ? startFrom + a : startFrom + a - 1; startFrom = t_info->end_table_to + 1; t_info->taos = NULL; // TODO: workaround to use separate taos connection; - pthread_create(pidsOfSub + i, NULL, subQueryProcess, t_info); + pthread_create(pidsOfSub + i, NULL, superQueryProcess, t_info); } g_queryInfo.superQueryInfo.threadCnt = threads; @@ -5888,8 +5958,12 @@ static int queryTestProcess() { g_queryInfo.superQueryInfo.threadCnt = 0; } - for (int i = 0; i < g_queryInfo.specifiedQueryInfo.concurrent; i++) { - pthread_join(pids[i], NULL); + if ((nSqlCount > 0) && (nConcurrent > 0)) { + for (int i = 0; i < nConcurrent; i++) { + for (int j = 0; j < nSqlCount; j++) { + pthread_join(pids[i * nSqlCount + j], NULL); + } + } } tmfree((char*)pids); @@ -5920,7 +5994,7 @@ static void subscribe_callback(TAOS_SUB* tsub, TAOS_RES *res, void* param, int c static TAOS_SUB* subscribeImpl(TAOS *taos, char *sql, char* topic, char* resultFileName) { TAOS_SUB* tsub = NULL; - if (g_queryInfo.specifiedQueryInfo.subscribeMode) { + if (g_queryInfo.specifiedQueryInfo.mode) { tsub = taos_subscribe(taos, g_queryInfo.specifiedQueryInfo.subscribeRestart, topic, sql, subscribe_callback, (void*)resultFileName, @@ -5996,13 +6070,13 @@ static void *subSubscribeProcess(void *sarg) { } //et = taosGetTimestampMs(); //printf("========thread[%"PRId64"] complete all sqls to super table once queries duration:%.4fs\n", taosGetSelfPthreadId(), (double)(et - st)/1000.0); - } while (0); + } while(0); // start loop to consume result TAOS_RES* res = NULL; - while (1) { + while(1) { for (int i = 0; i < g_queryInfo.superQueryInfo.sqlCount; i++) { - if (1 == g_queryInfo.superQueryInfo.subscribeMode) { + if (1 == g_queryInfo.superQueryInfo.mode) { continue; } @@ -6073,7 +6147,8 @@ static void *superSubscribeProcess(void *sarg) { sprintf(tmpFile, "%s-%d", g_queryInfo.specifiedQueryInfo.result[i], winfo->threadID); } - tsub[i] = subscribeImpl(winfo->taos, g_queryInfo.specifiedQueryInfo.sql[i], topic, tmpFile); + tsub[i] = subscribeImpl(winfo->taos, + g_queryInfo.specifiedQueryInfo.sql[i], topic, tmpFile); if (NULL == g_queryInfo.specifiedQueryInfo.tsub[i]) { taos_close(winfo->taos); return NULL; @@ -6081,13 +6156,13 @@ static void *superSubscribeProcess(void *sarg) { } //et = taosGetTimestampMs(); //printf("========thread[%"PRId64"] complete all sqls to super table once queries duration:%.4fs\n", taosGetSelfPthreadId(), (double)(et - st)/1000.0); - } while (0); + } while(0); // start loop to consume result TAOS_RES* res = NULL; - while (1) { + while(1) { for (int i = 0; i < g_queryInfo.specifiedQueryInfo.sqlCount; i++) { - if (1 == g_queryInfo.specifiedQueryInfo.subscribeMode) { + if (SYNC_QUERY_MODE == g_queryInfo.specifiedQueryInfo.mode) { continue; } @@ -6105,7 +6180,8 @@ static void *superSubscribeProcess(void *sarg) { taos_free_result(res); for (int i = 0; i < g_queryInfo.specifiedQueryInfo.sqlCount; i++) { - taos_unsubscribe(tsub[i], g_queryInfo.specifiedQueryInfo.subscribeKeepProgress); + taos_unsubscribe(tsub[i], + g_queryInfo.specifiedQueryInfo.subscribeKeepProgress); } taos_close(winfo->taos); @@ -6308,7 +6384,7 @@ static void setParaFromArg(){ g_Dbs.db[0].superTbls[0].childTblCount = g_args.num_of_tables; g_Dbs.threadCount = g_args.num_of_threads; g_Dbs.threadCountByCreateTbl = g_args.num_of_threads; - g_Dbs.queryMode = g_args.mode; + g_Dbs.queryMode = g_args.query_mode; g_Dbs.db[0].superTbls[0].autoCreateTable = PRE_CREATE_SUBTBL; g_Dbs.db[0].superTbls[0].childTblExists = TBL_NO_EXISTS; @@ -6410,7 +6486,7 @@ static void querySqlFile(TAOS* taos, char* sqlFile) double t = getCurrentTimeUs(); - while ((read_len = tgetline(&line, &line_len, fp)) != -1) { + while((read_len = tgetline(&line, &line_len, fp)) != -1) { if (read_len >= MAX_SQL_SIZE) continue; line[--read_len] = '\0'; @@ -6473,52 +6549,50 @@ static void testMetaFile() { } static void queryResult() { - // select - if (false == g_Dbs.insert_only) { - // query data + // query data - pthread_t read_id; - threadInfo *rInfo = malloc(sizeof(threadInfo)); - rInfo->start_time = 1500000000000; // 2017-07-14 10:40:00.000 - rInfo->start_table_from = 0; + pthread_t read_id; + threadInfo *rInfo = malloc(sizeof(threadInfo)); + assert(rInfo); + rInfo->start_time = 1500000000000; // 2017-07-14 10:40:00.000 + rInfo->start_table_from = 0; - //rInfo->do_aggreFunc = g_Dbs.do_aggreFunc; - if (g_args.use_metric) { - rInfo->ntables = g_Dbs.db[0].superTbls[0].childTblCount; - rInfo->end_table_to = g_Dbs.db[0].superTbls[0].childTblCount - 1; - rInfo->superTblInfo = &g_Dbs.db[0].superTbls[0]; - tstrncpy(rInfo->tb_prefix, - g_Dbs.db[0].superTbls[0].childTblPrefix, MAX_TB_NAME_SIZE); - } else { - rInfo->ntables = g_args.num_of_tables; - rInfo->end_table_to = g_args.num_of_tables -1; - tstrncpy(rInfo->tb_prefix, g_args.tb_prefix, MAX_TB_NAME_SIZE); - } + //rInfo->do_aggreFunc = g_Dbs.do_aggreFunc; + if (g_args.use_metric) { + rInfo->ntables = g_Dbs.db[0].superTbls[0].childTblCount; + rInfo->end_table_to = g_Dbs.db[0].superTbls[0].childTblCount - 1; + rInfo->superTblInfo = &g_Dbs.db[0].superTbls[0]; + tstrncpy(rInfo->tb_prefix, + g_Dbs.db[0].superTbls[0].childTblPrefix, MAX_TB_NAME_SIZE); + } else { + rInfo->ntables = g_args.num_of_tables; + rInfo->end_table_to = g_args.num_of_tables -1; + tstrncpy(rInfo->tb_prefix, g_args.tb_prefix, MAX_TB_NAME_SIZE); + } - rInfo->taos = taos_connect( - g_Dbs.host, - g_Dbs.user, - g_Dbs.password, - g_Dbs.db[0].dbName, - g_Dbs.port); - if (rInfo->taos == NULL) { - errorPrint( "Failed to connect to TDengine, reason:%s\n", - taos_errstr(NULL)); - free(rInfo); - exit(-1); - } + rInfo->taos = taos_connect( + g_Dbs.host, + g_Dbs.user, + g_Dbs.password, + g_Dbs.db[0].dbName, + g_Dbs.port); + if (rInfo->taos == NULL) { + errorPrint( "Failed to connect to TDengine, reason:%s\n", + taos_errstr(NULL)); + free(rInfo); + exit(-1); + } - tstrncpy(rInfo->fp, g_Dbs.resultFile, MAX_FILE_NAME_LEN); + tstrncpy(rInfo->fp, g_Dbs.resultFile, MAX_FILE_NAME_LEN); - if (!g_Dbs.use_metric) { - pthread_create(&read_id, NULL, readTable, rInfo); - } else { - pthread_create(&read_id, NULL, readMetric, rInfo); - } - pthread_join(read_id, NULL); - taos_close(rInfo->taos); - free(rInfo); - } + if (!g_Dbs.use_metric) { + pthread_create(&read_id, NULL, readTable, rInfo); + } else { + pthread_create(&read_id, NULL, readMetric, rInfo); + } + pthread_join(read_id, NULL); + taos_close(rInfo->taos); + free(rInfo); } static void testCmdLine() { @@ -6536,9 +6610,7 @@ static void testCmdLine() { g_args.test_mode = INSERT_TEST; insertTestProcess(); - if (g_Dbs.insert_only) - return; - else + if (false == g_Dbs.insert_only) queryResult(); } From 590b901518a1bdbf32ad42b6bae13a1a3462b9a7 Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Thu, 8 Apr 2021 21:52:07 +0800 Subject: [PATCH 116/178] Feature/sangshuduo/td 3408 taosdemo async query (#5731) * test * [TD-3677]: test pr message 1 * [TD-3671]change target branch * [TD-3677]: test pr message 2 * [TD-3677]: test pr message 3 * Hotfix/sangshuduo/td 3197 fix taosdemo coverity scan (#5688) * [TD-3197] : fix taosdemo coverity scan issues. * [TD-3197] : fix taosdemo coverity scan issue. fix subscribeTest pids uninitialized. * [TD-3197] : fix taosdemo coverity scan issues. * [TD-3197] : fix coverity scan issues. check super tbl info pointer. * [TD-3197] : fix coverity scan issues. move sub tbl query thread join into loop * [TD-3197] : fix coverity scan issues. remove unused variable * [TD-3197] : fix coverity scan issues. use more secure random library * [TD-3197] : fix coverity scan issues. use strncpy for more safe * [TD-3197] : fix taosdemo coverity scan issue. replace arc4random with rand(). * [TD-3197] : fix coverity scan issues. check stb info pointer for start time * [TD-3197] : fix coverity scan issues. fix strcpy vulnerability * [TD-3197] : fix taosdemo coverity scan issue. modify taosdemoTest2. try to check database continously. * [TD-3197] : taosdemo coverity scan issues. * [TD-3197] : fix memory leak when parsing arguments. * [TD-3197] : fix cmake strip arguments. * [TD-3197] : taosdemo coverity scan. fix cmake string manipulation. Co-authored-by: Shuduo Sang * remove useless file * fix changing target branch * fix * fix * Hotfix/sangshuduo/td 3607 taosdemo buffer overflow (#5706) * [TD-3607] : fix taosdemo buffer overflow. * [TD-3607] : taosdemo buffer overflow. add tmp buffer. * [TD-3607] : taosdemo buffer overflow. fix data generation. * [TD-3607] : taosdemo buffer overflow. fix normal table writting. * [TD-3607] : taosdemo buffer overflow. remove tail spaces. * [TD-3607] : taosdemo buffer overflow. fix taosdemo alter table test case. * [TD-3607] : taosdemo buffer overflow. fix taosdemo alter table case. * [TD-3607] : taosdemo buffer overflow. adjust limit offset count warning. * [TD-3607] : taosdemo buffer overflow. add more logic for child tables exist. * [TD-3607] : taosdemo buffer overflow. create database if database be dropped only. * [TD-3607] : fix taosdemo buffer overflow. adjust limit and offset test cases. * [TD-3607] : taosdemo buffer overflow. adjust sample data test case. * [TD-3607]: taosdemo limit and offset. if limit+offset > count Co-authored-by: Shuduo Sang * Hotfix/sangshuduo/td 3607 taosdemo buffer overflow (#5713) * [TD-3607] : fix taosdemo buffer overflow. * [TD-3607] : taosdemo buffer overflow. add tmp buffer. * [TD-3607] : taosdemo buffer overflow. fix data generation. * [TD-3607] : taosdemo buffer overflow. fix normal table writting. * [TD-3607] : taosdemo buffer overflow. remove tail spaces. * [TD-3607] : taosdemo buffer overflow. fix taosdemo alter table test case. * [TD-3607] : taosdemo buffer overflow. fix taosdemo alter table case. * [TD-3607] : taosdemo buffer overflow. adjust limit offset count warning. * [TD-3607] : taosdemo buffer overflow. add more logic for child tables exist. * [TD-3607] : taosdemo buffer overflow. create database if database be dropped only. * [TD-3607] : fix taosdemo buffer overflow. adjust limit and offset test cases. * [TD-3607] : taosdemo buffer overflow. adjust sample data test case. * [TD-3607]: taosdemo limit and offset. if limit+offset > count * [TD-3607]: taosdemo limit and offset. if child tbl not exist, dont take limit and offset value. Co-authored-by: Shuduo Sang * [TD-3683]: reduce buffer size for more stable table creation. (#5719) Co-authored-by: Shuduo Sang * [TD-3408]: taosdemo support async query. * [TD-3408]: taosdemo support async query. refactor * [TD-3408]: taosdemo support async query. refactor 2 * [TD-3408]: taosdemo support async query. refactor 3 * [TD-3408]: taosdemo support async query. refactor 4 * [TD-3408]: taosdemo support specified sql more than one line. Co-authored-by: huili <52318143+plum-lihui@users.noreply.github.com> Co-authored-by: liuyq-617 Co-authored-by: plum-lihui Co-authored-by: Elias Soong Co-authored-by: Shuduo Sang Co-authored-by: Shengliang Guan --- src/kit/taosdemo/taosdemo.c | 502 +++++++++++++++++++++--------------- 1 file changed, 287 insertions(+), 215 deletions(-) diff --git a/src/kit/taosdemo/taosdemo.c b/src/kit/taosdemo/taosdemo.c index 59b31d6172..6939f5dae5 100644 --- a/src/kit/taosdemo/taosdemo.c +++ b/src/kit/taosdemo/taosdemo.c @@ -67,6 +67,12 @@ enum TEST_MODE { INVAID_TEST }; +enum QUERY_MODE { + SYNC_QUERY_MODE, // 0 + ASYNC_QUERY_MODE, // 1 + INVALID_MODE +}; + #define MAX_SQL_SIZE 65536 #define BUFFER_SIZE (65536*2) #define MAX_USERNAME_SIZE 64 @@ -198,7 +204,7 @@ typedef struct SArguments_S { bool verbose_print; bool performance_print; char * output_file; - int mode; + int query_mode; char * datatype[MAX_NUM_DATATYPE + 1]; int len_of_binary; int num_of_CPR; @@ -351,7 +357,7 @@ typedef struct SpecifiedQueryInfo_S { int rate; // 0: unlimit > 0 loop/s int concurrent; int sqlCount; - int subscribeMode; // 0: sync, 1: async + int mode; // 0: sync, 1: async int subscribeInterval; // ms int queryTimes; int subscribeRestart; @@ -365,7 +371,7 @@ typedef struct SuperQueryInfo_S { char sTblName[MAX_TB_NAME_SIZE+1]; int rate; // 0: unlimit > 0 loop/s int threadCnt; - int subscribeMode; // 0: sync, 1: async + int mode; // 0: sync, 1: async int subscribeInterval; // ms int subscribeRestart; int subscribeKeepProgress; @@ -429,6 +435,8 @@ typedef struct SThreadInfo_S { int64_t maxDelay; int64_t minDelay; + // query + int querySeq; // sequence number of sql command } threadInfo; #ifdef WINDOWS @@ -714,7 +722,7 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) { } else if (strcmp(argv[i], "-s") == 0) { arguments->sqlFile = argv[++i]; } else if (strcmp(argv[i], "-q") == 0) { - arguments->mode = atoi(argv[++i]); + arguments->query_mode = atoi(argv[++i]); } else if (strcmp(argv[i], "-T") == 0) { arguments->num_of_threads = atoi(argv[++i]); } else if (strcmp(argv[i], "-i") == 0) { @@ -758,7 +766,7 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) { char *dupstr = strdup(argv[i]); char *running = dupstr; char *token = strsep(&running, ","); - while (token != NULL) { + while(token != NULL) { if (strcasecmp(token, "INT") && strcasecmp(token, "FLOAT") && strcasecmp(token, "TINYINT") @@ -964,7 +972,7 @@ static void getResult(TAOS_RES *res, char* resultFileName) { char temp[16000]; // fetch the records row by row - while ((row = taos_fetch_row(res))) { + while((row = taos_fetch_row(res))) { if (totalLen >= 100*1024*1024 - 32000) { if (fp) fprintf(fp, "%s", databuf); totalLen = 0; @@ -986,7 +994,8 @@ static void getResult(TAOS_RES *res, char* resultFileName) { static void selectAndGetResult(TAOS *taos, char *command, char* resultFileName) { TAOS_RES *res = taos_query(taos, command); if (res == NULL || taos_errno(res) != 0) { - printf("failed to sql:%s, reason:%s\n", command, taos_errstr(res)); + errorPrint("%s() LN%d, failed to execute sql:%s, reason:%s\n", + __func__, __LINE__, command, taos_errstr(res)); taos_free_result(res); return; } @@ -1163,7 +1172,8 @@ static int printfInsertMeta() { if (g_Dbs.db[i].dbCfg.precision[0] != 0) { if ((0 == strncasecmp(g_Dbs.db[i].dbCfg.precision, "ms", 2)) || (0 == strncasecmp(g_Dbs.db[i].dbCfg.precision, "us", 2))) { - printf(" precision: \033[33m%s\033[0m\n", g_Dbs.db[i].dbCfg.precision); + printf(" precision: \033[33m%s\033[0m\n", + g_Dbs.db[i].dbCfg.precision); } else { printf("\033[1m\033[40;31m precision error: %s\033[0m\n", g_Dbs.db[i].dbCfg.precision); @@ -1171,11 +1181,13 @@ static int printfInsertMeta() { } } - printf(" super table count: \033[33m%d\033[0m\n", g_Dbs.db[i].superTblCount); + printf(" super table count: \033[33m%d\033[0m\n", + g_Dbs.db[i].superTblCount); for (int j = 0; j < g_Dbs.db[i].superTblCount; j++) { printf(" super table[\033[33m%d\033[0m]:\n", j); - printf(" stbName: \033[33m%s\033[0m\n", g_Dbs.db[i].superTbls[j].sTblName); + printf(" stbName: \033[33m%s\033[0m\n", + g_Dbs.db[i].superTbls[j].sTblName); if (PRE_CREATE_SUBTBL == g_Dbs.db[i].superTbls[j].autoCreateTable) { printf(" autoCreateTable: \033[33m%s\033[0m\n", "no"); @@ -1241,7 +1253,7 @@ static int printfInsertMeta() { g_Dbs.db[i].superTbls[j].sampleFile); printf(" tagsFile: \033[33m%s\033[0m\n", g_Dbs.db[i].superTbls[j].tagsFile); - printf(" columnCount: \033[33m%d\033[0m\n ", + printf(" columnCount: \033[33m%d\033[0m\n", g_Dbs.db[i].superTbls[j].columnCount); for (int k = 0; k < g_Dbs.db[i].superTbls[j].columnCount; k++) { //printf("dataType:%s, dataLen:%d\t", g_Dbs.db[i].superTbls[j].columns[k].dataType, g_Dbs.db[i].superTbls[j].columns[k].dataLen); @@ -1459,41 +1471,61 @@ static void printfQueryMeta() { printf("\n"); printf("specified table query info: \n"); - printf("query interval: \033[33m%d\033[0m\n", g_queryInfo.specifiedQueryInfo.rate); + printf("query interval: \033[33m%d\033[0m\n", + g_queryInfo.specifiedQueryInfo.rate); printf("top query times:\033[33m%d\033[0m\n", g_args.query_times); - printf("concurrent: \033[33m%d\033[0m\n", g_queryInfo.specifiedQueryInfo.concurrent); - printf("sqlCount: \033[33m%d\033[0m\n", g_queryInfo.specifiedQueryInfo.sqlCount); + printf("concurrent: \033[33m%d\033[0m\n", + g_queryInfo.specifiedQueryInfo.concurrent); + printf("sqlCount: \033[33m%d\033[0m\n", + g_queryInfo.specifiedQueryInfo.sqlCount); printf("specified tbl query times:\n"); - printf(" \033[33m%d\033[0m\n", g_queryInfo.specifiedQueryInfo.queryTimes); + printf(" \033[33m%d\033[0m\n", + g_queryInfo.specifiedQueryInfo.queryTimes); if (SUBSCRIBE_TEST == g_args.test_mode) { - printf("mod: \033[33m%d\033[0m\n", g_queryInfo.specifiedQueryInfo.subscribeMode); - printf("interval: \033[33m%d\033[0m\n", g_queryInfo.specifiedQueryInfo.subscribeInterval); - printf("restart: \033[33m%d\033[0m\n", g_queryInfo.specifiedQueryInfo.subscribeRestart); - printf("keepProgress: \033[33m%d\033[0m\n", g_queryInfo.specifiedQueryInfo.subscribeKeepProgress); + printf("mod: \033[33m%d\033[0m\n", + g_queryInfo.specifiedQueryInfo.mode); + printf("interval: \033[33m%d\033[0m\n", + g_queryInfo.specifiedQueryInfo.subscribeInterval); + printf("restart: \033[33m%d\033[0m\n", + g_queryInfo.specifiedQueryInfo.subscribeRestart); + printf("keepProgress: \033[33m%d\033[0m\n", + g_queryInfo.specifiedQueryInfo.subscribeKeepProgress); } for (int i = 0; i < g_queryInfo.specifiedQueryInfo.sqlCount; i++) { - printf(" sql[%d]: \033[33m%s\033[0m\n", i, g_queryInfo.specifiedQueryInfo.sql[i]); + printf(" sql[%d]: \033[33m%s\033[0m\n", + i, g_queryInfo.specifiedQueryInfo.sql[i]); } printf("\n"); - printf("super table query info: \n"); - printf("query interval: \033[33m%d\033[0m\n", g_queryInfo.superQueryInfo.rate); - printf("threadCnt: \033[33m%d\033[0m\n", g_queryInfo.superQueryInfo.threadCnt); - printf("childTblCount: \033[33m%d\033[0m\n", g_queryInfo.superQueryInfo.childTblCount); - printf("stable name: \033[33m%s\033[0m\n", g_queryInfo.superQueryInfo.sTblName); - printf("stb query times:\033[33m%d\033[0m\n", g_queryInfo.superQueryInfo.queryTimes); + printf("super table query info:\n"); + printf("query interval: \033[33m%d\033[0m\n", + g_queryInfo.superQueryInfo.rate); + printf("threadCnt: \033[33m%d\033[0m\n", + g_queryInfo.superQueryInfo.threadCnt); + printf("childTblCount: \033[33m%d\033[0m\n", + g_queryInfo.superQueryInfo.childTblCount); + printf("stable name: \033[33m%s\033[0m\n", + g_queryInfo.superQueryInfo.sTblName); + printf("stb query times:\033[33m%d\033[0m\n", + g_queryInfo.superQueryInfo.queryTimes); if (SUBSCRIBE_TEST == g_args.test_mode) { - printf("mod: \033[33m%d\033[0m\n", g_queryInfo.superQueryInfo.subscribeMode); - printf("interval: \033[33m%d\033[0m\n", g_queryInfo.superQueryInfo.subscribeInterval); - printf("restart: \033[33m%d\033[0m\n", g_queryInfo.superQueryInfo.subscribeRestart); - printf("keepProgress: \033[33m%d\033[0m\n", g_queryInfo.superQueryInfo.subscribeKeepProgress); + printf("mod: \033[33m%d\033[0m\n", + g_queryInfo.superQueryInfo.mode); + printf("interval: \033[33m%d\033[0m\n", + g_queryInfo.superQueryInfo.subscribeInterval); + printf("restart: \033[33m%d\033[0m\n", + g_queryInfo.superQueryInfo.subscribeRestart); + printf("keepProgress: \033[33m%d\033[0m\n", + g_queryInfo.superQueryInfo.subscribeKeepProgress); } - printf("sqlCount: \033[33m%d\033[0m\n", g_queryInfo.superQueryInfo.sqlCount); + printf("sqlCount: \033[33m%d\033[0m\n", + g_queryInfo.superQueryInfo.sqlCount); for (int i = 0; i < g_queryInfo.superQueryInfo.sqlCount; i++) { - printf(" sql[%d]: \033[33m%s\033[0m\n", i, g_queryInfo.superQueryInfo.sql[i]); + printf(" sql[%d]: \033[33m%s\033[0m\n", + i, g_queryInfo.superQueryInfo.sql[i]); } printf("\n"); @@ -1637,7 +1669,7 @@ static int getDbFromServer(TAOS * taos, SDbInfo** dbInfos) { TAOS_FIELD *fields = taos_fetch_fields(res); - while ((row = taos_fetch_row(res)) != NULL) { + while((row = taos_fetch_row(res)) != NULL) { // sys database name : 'log' if (strncasecmp(row[TSDB_SHOW_DB_NAME_INDEX], "log", fields[TSDB_SHOW_DB_NAME_INDEX].bytes) == 0) { @@ -1670,7 +1702,8 @@ static int getDbFromServer(TAOS * taos, SDbInfo** dbInfos) { dbInfos[count]->wallevel = *((int8_t *)row[TSDB_SHOW_DB_WALLEVEL_INDEX]); dbInfos[count]->fsync = *((int32_t *)row[TSDB_SHOW_DB_FSYNC_INDEX]); dbInfos[count]->comp = (int8_t)(*((int8_t *)row[TSDB_SHOW_DB_COMP_INDEX])); - dbInfos[count]->cachelast = (int8_t)(*((int8_t *)row[TSDB_SHOW_DB_CACHELAST_INDEX])); + dbInfos[count]->cachelast = + (int8_t)(*((int8_t *)row[TSDB_SHOW_DB_CACHELAST_INDEX])); tstrncpy(dbInfos[count]->precision, (char *)row[TSDB_SHOW_DB_PRECISION_INDEX], @@ -1681,7 +1714,8 @@ static int getDbFromServer(TAOS * taos, SDbInfo** dbInfos) { count++; if (count > MAX_DATABASE_COUNT) { - errorPrint( "The database count overflow than %d\n", MAX_DATABASE_COUNT); + errorPrint("%s() LN%d, The database count overflow than %d\n", + __func__, __LINE__, MAX_DATABASE_COUNT); break; } } @@ -1691,6 +1725,7 @@ static int getDbFromServer(TAOS * taos, SDbInfo** dbInfos) { static void printfDbInfoForQueryToFile( char* filename, SDbInfo* dbInfos, int index) { + if (filename[0] == 0) return; @@ -1909,7 +1944,7 @@ static int postProceSql(char* host, uint16_t port, char* sqlstr) if (bytes == 0) break; sent+=bytes; - } while (sent < req_str_len); + } while(sent < req_str_len); memset(response_buf, 0, RESP_BUF_LEN); resp_len = sizeof(response_buf) - 1; @@ -1927,7 +1962,7 @@ static int postProceSql(char* host, uint16_t port, char* sqlstr) if (bytes == 0) break; received += bytes; - } while (received < resp_len); + } while(received < resp_len); if (received == resp_len) { free(request_buf); @@ -1951,7 +1986,8 @@ static int postProceSql(char* host, uint16_t port, char* sqlstr) static char* getTagValueFromTagSample(SSuperTable* stbInfo, int tagUsePos) { char* dataBuf = (char*)calloc(TSDB_MAX_SQL_LEN+1, 1); if (NULL == dataBuf) { - errorPrint("%s() LN%d, calloc failed! size:%d\n", __func__, __LINE__, TSDB_MAX_SQL_LEN+1); + errorPrint("%s() LN%d, calloc failed! size:%d\n", + __func__, __LINE__, TSDB_MAX_SQL_LEN+1); return NULL; } @@ -2155,7 +2191,7 @@ static int getChildNameOfSuperTableWithLimitAndOffset(TAOS * taos, } char* pTblName = childTblName; - while ((row = taos_fetch_row(res)) != NULL) { + while((row = taos_fetch_row(res)) != NULL) { int32_t* len = taos_fetch_lengths(res); tstrncpy(pTblName, (char *)row[0], len[0]+1); //printf("==== sub table name: %s\n", pTblName); @@ -2218,7 +2254,7 @@ static int getSuperTableFromServer(TAOS * taos, char* dbName, int tagIndex = 0; int columnIndex = 0; TAOS_FIELD *fields = taos_fetch_fields(res); - while ((row = taos_fetch_row(res)) != NULL) { + while((row = taos_fetch_row(res)) != NULL) { if (0 == count) { count++; continue; @@ -2765,7 +2801,7 @@ static void createChildTables() { // normal table len = snprintf(tblColsBuf, MAX_SQL_SIZE, "(TS TIMESTAMP"); int j = 0; - while (g_args.datatype[j]) { + while(g_args.datatype[j]) { if ((strncasecmp(g_args.datatype[j], "BINARY", strlen("BINARY")) == 0) || (strncasecmp(g_args.datatype[j], "NCHAR", strlen("NCHAR")) == 0)) { @@ -2824,7 +2860,7 @@ static int readTagFromCsvFileToMem(SSuperTable * superTblInfo) { return -1; } - while ((readLen = tgetline(&line, &n, fp)) != -1) { + while((readLen = tgetline(&line, &n, fp)) != -1) { if (('\r' == line[readLen - 1]) || ('\n' == line[readLen - 1])) { line[--readLen] = 0; } @@ -2888,7 +2924,7 @@ static int readSampleFromCsvFileToMem( assert(superTblInfo->sampleDataBuf); memset(superTblInfo->sampleDataBuf, 0, MAX_SAMPLES_ONCE_FROM_FILE * superTblInfo->lenOfOneRow); - while (1) { + while(1) { readLen = tgetline(&line, &n, fp); if (-1 == readLen) { if(0 != fseek(fp, 0, SEEK_SET)) { @@ -2967,7 +3003,8 @@ static bool getColumnAndTagTypeFromInsertJsonFile( if (countObj && countObj->type == cJSON_Number) { count = countObj->valueint; } else if (countObj && countObj->type != cJSON_Number) { - errorPrint("%s() LN%d, failed to read json, column count not found\n", __func__, __LINE__); + errorPrint("%s() LN%d, failed to read json, column count not found\n", + __func__, __LINE__); goto PARSE_OVER; } else { count = 1; @@ -2976,8 +3013,10 @@ static bool getColumnAndTagTypeFromInsertJsonFile( // column info memset(&columnCase, 0, sizeof(StrColumn)); cJSON *dataType = cJSON_GetObjectItem(column, "type"); - if (!dataType || dataType->type != cJSON_String || dataType->valuestring == NULL) { - errorPrint("%s() LN%d: failed to read json, column type not found\n", __func__, __LINE__); + if (!dataType || dataType->type != cJSON_String + || dataType->valuestring == NULL) { + errorPrint("%s() LN%d: failed to read json, column type not found\n", + __func__, __LINE__); goto PARSE_OVER; } //tstrncpy(superTbls->columns[k].dataType, dataType->valuestring, MAX_TB_NAME_SIZE); @@ -2987,7 +3026,8 @@ static bool getColumnAndTagTypeFromInsertJsonFile( if (dataLen && dataLen->type == cJSON_Number) { columnCase.dataLen = dataLen->valueint; } else if (dataLen && dataLen->type != cJSON_Number) { - debugPrint("%s() LN%d: failed to read json, column len not found\n", __func__, __LINE__); + debugPrint("%s() LN%d: failed to read json, column len not found\n", + __func__, __LINE__); goto PARSE_OVER; } else { columnCase.dataLen = 8; @@ -3007,13 +3047,15 @@ static bool getColumnAndTagTypeFromInsertJsonFile( // tags cJSON *tags = cJSON_GetObjectItem(stbInfo, "tags"); if (!tags || tags->type != cJSON_Array) { - debugPrint("%s() LN%d, failed to read json, tags not found\n", __func__, __LINE__); + errorPrint("%s() LN%d, failed to read json, tags not found\n", + __func__, __LINE__); goto PARSE_OVER; } int tagSize = cJSON_GetArraySize(tags); if (tagSize > MAX_TAG_COUNT) { - debugPrint("%s() LN%d, failed to read json, tags size overflow, max tag size is %d\n", __func__, __LINE__, MAX_TAG_COUNT); + errorPrint("%s() LN%d, failed to read json, tags size overflow, max tag size is %d\n", + __func__, __LINE__, MAX_TAG_COUNT); goto PARSE_OVER; } @@ -3036,8 +3078,10 @@ static bool getColumnAndTagTypeFromInsertJsonFile( // column info memset(&columnCase, 0, sizeof(StrColumn)); cJSON *dataType = cJSON_GetObjectItem(tag, "type"); - if (!dataType || dataType->type != cJSON_String || dataType->valuestring == NULL) { - printf("ERROR: failed to read json, tag type not found\n"); + if (!dataType || dataType->type != cJSON_String + || dataType->valuestring == NULL) { + errorPrint("%s() LN%d, failed to read json, tag type not found\n", + __func__, __LINE__); goto PARSE_OVER; } tstrncpy(columnCase.dataType, dataType->valuestring, MAX_TB_NAME_SIZE); @@ -3046,14 +3090,16 @@ static bool getColumnAndTagTypeFromInsertJsonFile( if (dataLen && dataLen->type == cJSON_Number) { columnCase.dataLen = dataLen->valueint; } else if (dataLen && dataLen->type != cJSON_Number) { - printf("ERROR: failed to read json, column len not found\n"); + errorPrint("%s() LN%d, failed to read json, column len not found\n", + __func__, __LINE__); goto PARSE_OVER; } else { columnCase.dataLen = 0; } for (int n = 0; n < count; ++n) { - tstrncpy(superTbls->tags[index].dataType, columnCase.dataType, MAX_TB_NAME_SIZE); + tstrncpy(superTbls->tags[index].dataType, columnCase.dataType, + MAX_TB_NAME_SIZE); superTbls->tags[index].dataLen = columnCase.dataLen; index++; } @@ -3063,9 +3109,6 @@ static bool getColumnAndTagTypeFromInsertJsonFile( ret = true; PARSE_OVER: - //free(content); - //cJSON_Delete(root); - //fclose(fp); return ret; } @@ -3142,7 +3185,8 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { } else if (!gInsertInterval) { g_args.insert_interval = 0; } else { - errorPrint("%s() LN%d, failed to read json, insert_interval input mistake\n", __func__, __LINE__); + errorPrint("%s() LN%d, failed to read json, insert_interval input mistake\n", + __func__, __LINE__); goto PARSE_OVER; } @@ -3163,7 +3207,8 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { } else if (!interlaceRows) { g_args.interlace_rows = 0; // 0 means progressive mode, > 0 mean interlace mode. max value is less or equ num_of_records_per_req } else { - errorPrint("%s() LN%d, failed to read json, interlace_rows input mistake\n", __func__, __LINE__); + errorPrint("%s() LN%d, failed to read json, interlace_rows input mistake\n", + __func__, __LINE__); goto PARSE_OVER; } @@ -3173,7 +3218,8 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { } else if (!maxSqlLen) { g_args.max_sql_len = TSDB_PAYLOAD_SIZE; } else { - errorPrint("%s() LN%d, failed to read json, max_sql_len input mistake\n", __func__, __LINE__); + errorPrint("%s() LN%d, failed to read json, max_sql_len input mistake\n", + __func__, __LINE__); goto PARSE_OVER; } @@ -3183,7 +3229,8 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { } else if (!numRecPerReq) { g_args.num_of_RPR = 0xffff; } else { - errorPrint("%s() LN%d, failed to read json, num_of_records_per_req not found\n", __func__, __LINE__); + errorPrint("%s() LN%d, failed to read json, num_of_records_per_req not found\n", + __func__, __LINE__); goto PARSE_OVER; } @@ -3509,7 +3556,8 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { } else if (!dataSource) { tstrncpy(g_Dbs.db[i].superTbls[j].dataSource, "rand", MAX_DB_NAME_SIZE); } else { - errorPrint("%s() LN%d, failed to read json, data_source not found\n", __func__, __LINE__); + errorPrint("%s() LN%d, failed to read json, data_source not found\n", + __func__, __LINE__); goto PARSE_OVER; } @@ -3584,7 +3632,8 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { } cJSON *sampleFile = cJSON_GetObjectItem(stbInfo, "sample_file"); - if (sampleFile && sampleFile->type == cJSON_String && sampleFile->valuestring != NULL) { + if (sampleFile && sampleFile->type == cJSON_String + && sampleFile->valuestring != NULL) { tstrncpy(g_Dbs.db[i].superTbls[j].sampleFile, sampleFile->valuestring, MAX_FILE_NAME_LEN); } else if (!sampleFile) { @@ -3727,9 +3776,6 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { ret = true; PARSE_OVER: - //free(content); - //cJSON_Delete(root); - //fclose(fp); return ret; } @@ -3795,7 +3841,8 @@ static bool getMetaFromQueryJsonFile(cJSON* root) { } else if (!gQueryTimes) { g_args.query_times = 1; } else { - errorPrint("%s() LN%d, failed to read json, query_times input mistake\n", __func__, __LINE__); + errorPrint("%s() LN%d, failed to read json, query_times input mistake\n", + __func__, __LINE__); goto PARSE_OVER; } @@ -3833,35 +3880,45 @@ static bool getMetaFromQueryJsonFile(cJSON* root) { g_queryInfo.specifiedQueryInfo.rate = 0; } - cJSON* specifiedQueryTimes = cJSON_GetObjectItem(specifiedQuery, "query_times"); + cJSON* specifiedQueryTimes = cJSON_GetObjectItem(specifiedQuery, + "query_times"); if (specifiedQueryTimes && specifiedQueryTimes->type == cJSON_Number) { g_queryInfo.specifiedQueryInfo.queryTimes = specifiedQueryTimes->valueint; } else if (!specifiedQueryTimes) { g_queryInfo.specifiedQueryInfo.queryTimes = g_args.query_times; } else { - errorPrint("%s() LN%d, failed to read json, query_times input mistake\n", __func__, __LINE__); + errorPrint("%s() LN%d, failed to read json, query_times input mistake\n", + __func__, __LINE__); goto PARSE_OVER; } cJSON* concurrent = cJSON_GetObjectItem(specifiedQuery, "concurrent"); if (concurrent && concurrent->type == cJSON_Number) { g_queryInfo.specifiedQueryInfo.concurrent = concurrent->valueint; + if (g_queryInfo.specifiedQueryInfo.concurrent <= 0) { + errorPrint("%s() LN%d, query sqlCount %d or concurrent %d is not correct.\n", + __func__, __LINE__, g_queryInfo.specifiedQueryInfo.sqlCount, + g_queryInfo.specifiedQueryInfo.concurrent); + goto PARSE_OVER; + } } else if (!concurrent) { g_queryInfo.specifiedQueryInfo.concurrent = 1; } - cJSON* mode = cJSON_GetObjectItem(specifiedQuery, "mode"); - if (mode && mode->type == cJSON_String && mode->valuestring != NULL) { - if (0 == strcmp("sync", mode->valuestring)) { - g_queryInfo.specifiedQueryInfo.subscribeMode = 0; - } else if (0 == strcmp("async", mode->valuestring)) { - g_queryInfo.specifiedQueryInfo.subscribeMode = 1; + cJSON* queryMode = cJSON_GetObjectItem(specifiedQuery, "mode"); + if (queryMode && queryMode->type == cJSON_String + && queryMode->valuestring != NULL) { + if (0 == strcmp("sync", queryMode->valuestring)) { + g_queryInfo.specifiedQueryInfo.mode = SYNC_QUERY_MODE; + } else if (0 == strcmp("async", queryMode->valuestring)) { + g_queryInfo.specifiedQueryInfo.mode = ASYNC_QUERY_MODE; } else { - printf("ERROR: failed to read json, subscribe mod error\n"); + errorPrint("%s() LN%d, failed to read json, query mode input error\n", + __func__, __LINE__); goto PARSE_OVER; } } else { - g_queryInfo.specifiedQueryInfo.subscribeMode = 0; + g_queryInfo.specifiedQueryInfo.mode = SYNC_QUERY_MODE; } cJSON* interval = cJSON_GetObjectItem(specifiedQuery, "interval"); @@ -3908,12 +3965,14 @@ static bool getMetaFromQueryJsonFile(cJSON* root) { if (!superSqls) { g_queryInfo.specifiedQueryInfo.sqlCount = 0; } else if (superSqls->type != cJSON_Array) { - printf("ERROR: failed to read json, super sqls not found\n"); + errorPrint("%s() LN%d, failed to read json, super sqls not found\n", + __func__, __LINE__); goto PARSE_OVER; } else { int superSqlSize = cJSON_GetArraySize(superSqls); if (superSqlSize > MAX_QUERY_SQL_COUNT) { - printf("ERROR: failed to read json, query sql size overflow, max is %d\n", MAX_QUERY_SQL_COUNT); + errorPrint("%s() LN%d, failed to read json, query sql size overflow, max is %d\n", + __func__, __LINE__, MAX_QUERY_SQL_COUNT); goto PARSE_OVER; } @@ -3965,7 +4024,8 @@ static bool getMetaFromQueryJsonFile(cJSON* root) { } else if (!superQueryTimes) { g_queryInfo.superQueryInfo.queryTimes = g_args.query_times; } else { - errorPrint("%s() LN%d, failed to read json, query_times input mistake\n", __func__, __LINE__); + errorPrint("%s() LN%d, failed to read json, query_times input mistake\n", + __func__, __LINE__); goto PARSE_OVER; } @@ -3984,25 +4044,30 @@ static bool getMetaFromQueryJsonFile(cJSON* root) { //} cJSON* stblname = cJSON_GetObjectItem(superQuery, "stblname"); - if (stblname && stblname->type == cJSON_String && stblname->valuestring != NULL) { - tstrncpy(g_queryInfo.superQueryInfo.sTblName, stblname->valuestring, MAX_TB_NAME_SIZE); + if (stblname && stblname->type == cJSON_String + && stblname->valuestring != NULL) { + tstrncpy(g_queryInfo.superQueryInfo.sTblName, stblname->valuestring, + MAX_TB_NAME_SIZE); } else { - printf("ERROR: failed to read json, super table name not found\n"); + errorPrint("%s() LN%d, failed to read json, super table name input error\n", + __func__, __LINE__); goto PARSE_OVER; } cJSON* submode = cJSON_GetObjectItem(superQuery, "mode"); - if (submode && submode->type == cJSON_String && submode->valuestring != NULL) { + if (submode && submode->type == cJSON_String + && submode->valuestring != NULL) { if (0 == strcmp("sync", submode->valuestring)) { - g_queryInfo.superQueryInfo.subscribeMode = 0; + g_queryInfo.superQueryInfo.mode = SYNC_QUERY_MODE; } else if (0 == strcmp("async", submode->valuestring)) { - g_queryInfo.superQueryInfo.subscribeMode = 1; + g_queryInfo.superQueryInfo.mode = ASYNC_QUERY_MODE; } else { - printf("ERROR: failed to read json, subscribe mod error\n"); + errorPrint("%s() LN%d, failed to read json, query mode input error\n", + __func__, __LINE__); goto PARSE_OVER; } } else { - g_queryInfo.superQueryInfo.subscribeMode = 0; + g_queryInfo.superQueryInfo.mode = SYNC_QUERY_MODE; } cJSON* subinterval = cJSON_GetObjectItem(superQuery, "interval"); @@ -4015,7 +4080,8 @@ static bool getMetaFromQueryJsonFile(cJSON* root) { } cJSON* subrestart = cJSON_GetObjectItem(superQuery, "restart"); - if (subrestart && subrestart->type == cJSON_String && subrestart->valuestring != NULL) { + if (subrestart && subrestart->type == cJSON_String + && subrestart->valuestring != NULL) { if (0 == strcmp("yes", subrestart->valuestring)) { g_queryInfo.superQueryInfo.subscribeRestart = 1; } else if (0 == strcmp("no", subrestart->valuestring)) { @@ -4049,12 +4115,14 @@ static bool getMetaFromQueryJsonFile(cJSON* root) { if (!subsqls) { g_queryInfo.superQueryInfo.sqlCount = 0; } else if (subsqls->type != cJSON_Array) { - printf("ERROR: failed to read json, super sqls not found\n"); + errorPrint("%s() LN%d: failed to read json, super sqls not found\n", + __func__, __LINE__); goto PARSE_OVER; } else { int superSqlSize = cJSON_GetArraySize(subsqls); if (superSqlSize > MAX_QUERY_SQL_COUNT) { - printf("ERROR: failed to read json, query sql size overflow, max is %d\n", MAX_QUERY_SQL_COUNT); + errorPrint("%s() LN%d, failed to read json, query sql size overflow, max is %d\n", + __func__, __LINE__, MAX_QUERY_SQL_COUNT); goto PARSE_OVER; } @@ -4064,19 +4132,25 @@ static bool getMetaFromQueryJsonFile(cJSON* root) { if (sql == NULL) continue; cJSON *sqlStr = cJSON_GetObjectItem(sql, "sql"); - if (!sqlStr || sqlStr->type != cJSON_String || sqlStr->valuestring == NULL) { - printf("ERROR: failed to read json, sql not found\n"); + if (!sqlStr || sqlStr->type != cJSON_String + || sqlStr->valuestring == NULL) { + errorPrint("%s() LN%d, failed to read json, sql not found\n", + __func__, __LINE__); goto PARSE_OVER; } - tstrncpy(g_queryInfo.superQueryInfo.sql[j], sqlStr->valuestring, MAX_QUERY_SQL_LENGTH); + tstrncpy(g_queryInfo.superQueryInfo.sql[j], sqlStr->valuestring, + MAX_QUERY_SQL_LENGTH); cJSON *result = cJSON_GetObjectItem(sql, "result"); - if (result != NULL && result->type == cJSON_String && result->valuestring != NULL){ - tstrncpy(g_queryInfo.superQueryInfo.result[j], result->valuestring, MAX_FILE_NAME_LEN); + if (result != NULL && result->type == cJSON_String + && result->valuestring != NULL){ + tstrncpy(g_queryInfo.superQueryInfo.result[j], + result->valuestring, MAX_FILE_NAME_LEN); } else if (NULL == result) { memset(g_queryInfo.superQueryInfo.result[j], 0, MAX_FILE_NAME_LEN); } else { - printf("ERROR: failed to read json, sub query result file not found\n"); + errorPrint("%s() LN%d, failed to read json, sub query result file not found\n", + __func__, __LINE__); goto PARSE_OVER; } } @@ -4086,9 +4160,6 @@ static bool getMetaFromQueryJsonFile(cJSON* root) { ret = true; PARSE_OVER: - //free(content); - //cJSON_Delete(root); - //fclose(fp); return ret; } @@ -5415,7 +5486,7 @@ static void *readTable(void *sarg) { return NULL; } - while (taos_fetch_row(pSql) != NULL) { + while(taos_fetch_row(pSql) != NULL) { count++; } @@ -5491,7 +5562,7 @@ static void *readMetric(void *sarg) { return NULL; } int count = 0; - while (taos_fetch_row(pSql) != NULL) { + while(taos_fetch_row(pSql) != NULL) { count++; } t = getCurrentTimeUs() - t; @@ -5602,7 +5673,7 @@ static int insertTestProcess() { return 0; } -static void *superQueryProcess(void *sarg) { +static void *specifiedQueryProcess(void *sarg) { threadInfo *winfo = (threadInfo *)sarg; if (winfo->taos == NULL) { @@ -5643,32 +5714,35 @@ static void *superQueryProcess(void *sarg) { } st = taosGetTimestampUs(); - for (int i = 0; i < g_queryInfo.specifiedQueryInfo.sqlCount; i++) { - if (0 == strncasecmp(g_queryInfo.queryMode, "taosc", 5)) { - int64_t t1 = taosGetTimestampUs(); - char tmpFile[MAX_FILE_NAME_LEN*2] = {0}; - if (g_queryInfo.specifiedQueryInfo.result[i][0] != 0) { - sprintf(tmpFile, "%s-%d", - g_queryInfo.specifiedQueryInfo.result[i], winfo->threadID); - } - selectAndGetResult(winfo->taos, g_queryInfo.specifiedQueryInfo.sql[i], tmpFile); - int64_t t2 = taosGetTimestampUs(); - printf("=[taosc] thread[%"PRId64"] complete one sql, Spent %f s\n", - taosGetSelfPthreadId(), (t2 - t1)/1000000.0); - } else { - int64_t t1 = taosGetTimestampUs(); - int retCode = postProceSql(g_queryInfo.host, - g_queryInfo.port, g_queryInfo.specifiedQueryInfo.sql[i]); - int64_t t2 = taosGetTimestampUs(); - printf("=[restful] thread[%"PRId64"] complete one sql, Spent %f s\n", - taosGetSelfPthreadId(), (t2 - t1)/1000000.0); - if (0 != retCode) { - printf("====restful return fail, threadID[%d]\n", winfo->threadID); - return NULL; - } + if (0 == strncasecmp(g_queryInfo.queryMode, "taosc", 5)) { + int64_t t1 = taosGetTimestampUs(); + char tmpFile[MAX_FILE_NAME_LEN*2] = {0}; + if (g_queryInfo.specifiedQueryInfo.result[winfo->querySeq][0] != 0) { + sprintf(tmpFile, "%s-%d", + g_queryInfo.specifiedQueryInfo.result[winfo->querySeq], + winfo->threadID); + } + selectAndGetResult(winfo->taos, + g_queryInfo.specifiedQueryInfo.sql[winfo->querySeq], tmpFile); + int64_t t2 = taosGetTimestampUs(); + printf("=[taosc] thread[%"PRId64"] complete one sql, Spent %f s\n", + taosGetSelfPthreadId(), (t2 - t1)/1000000.0); + } else { + int64_t t1 = taosGetTimestampUs(); + int retCode = postProceSql(g_queryInfo.host, + g_queryInfo.port, + g_queryInfo.specifiedQueryInfo.sql[winfo->querySeq]); + int64_t t2 = taosGetTimestampUs(); + printf("=[restful] thread[%"PRId64"] complete one sql, Spent %f s\n", + taosGetSelfPthreadId(), (t2 - t1)/1000000.0); + + if (0 != retCode) { + printf("====restful return fail, threadID[%d]\n", winfo->threadID); + return NULL; } } + et = taosGetTimestampUs(); printf("==thread[%"PRId64"] complete all sqls to specify tables once queries duration:%.6fs\n\n", taosGetSelfPthreadId(), (double)(et - st)/1000.0); @@ -5698,7 +5772,7 @@ static void replaceSubTblName(char* inSql, char* outSql, int tblIndex) { //printf("3: %s\n", outSql); } -static void *subQueryProcess(void *sarg) { +static void *superQueryProcess(void *sarg) { char sqlstr[1024]; threadInfo *winfo = (threadInfo *)sarg; @@ -5791,43 +5865,45 @@ static int queryTestProcess() { pthread_t *pids = NULL; threadInfo *infos = NULL; //==== create sub threads for query from specify table - if (g_queryInfo.specifiedQueryInfo.sqlCount > 0 - && g_queryInfo.specifiedQueryInfo.concurrent > 0) { + int nConcurrent = g_queryInfo.specifiedQueryInfo.concurrent; + int nSqlCount = g_queryInfo.specifiedQueryInfo.sqlCount; - pids = malloc(g_queryInfo.specifiedQueryInfo.concurrent * sizeof(pthread_t)); - if (NULL == pids) { - taos_close(taos); - ERROR_EXIT("memory allocation failed\n"); - } - infos = malloc(g_queryInfo.specifiedQueryInfo.concurrent * sizeof(threadInfo)); - if (NULL == infos) { + if ((nSqlCount > 0) && (nConcurrent > 0)) { + + pids = malloc(nConcurrent * nSqlCount * sizeof(pthread_t)); + infos = malloc(nConcurrent * nSqlCount * sizeof(threadInfo)); + + if ((NULL == pids) || (NULL == infos)) { taos_close(taos); - free(pids); ERROR_EXIT("memory allocation failed for create threads\n"); } - for (int i = 0; i < g_queryInfo.specifiedQueryInfo.concurrent; i++) { - threadInfo *t_info = infos + i; - t_info->threadID = i; + for (int i = 0; i < nConcurrent; i++) { + for (int j = 0; j < nSqlCount; j++) { + threadInfo *t_info = infos + i * nSqlCount + j; + t_info->threadID = i * nSqlCount + j; + t_info->querySeq = j; - if (0 == strncasecmp(g_queryInfo.queryMode, "taosc", 5)) { + if (0 == strncasecmp(g_queryInfo.queryMode, "taosc", 5)) { - char sqlStr[MAX_TB_NAME_SIZE*2]; - sprintf(sqlStr, "use %s", g_queryInfo.dbName); - verbosePrint("%s() %d sqlStr: %s\n", __func__, __LINE__, sqlStr); - if (0 != queryDbExec(taos, sqlStr, NO_INSERT_TYPE, false)) { + char sqlStr[MAX_TB_NAME_SIZE*2]; + sprintf(sqlStr, "use %s", g_queryInfo.dbName); + verbosePrint("%s() %d sqlStr: %s\n", __func__, __LINE__, sqlStr); + if (0 != queryDbExec(taos, sqlStr, NO_INSERT_TYPE, false)) { taos_close(taos); free(infos); free(pids); errorPrint( "use database %s failed!\n\n", g_queryInfo.dbName); return -1; + } } + + t_info->taos = NULL;// TODO: workaround to use separate taos connection; + + pthread_create(pids + i * nSqlCount + j, NULL, specifiedQueryProcess, + t_info); } - - t_info->taos = NULL;// TODO: workaround to use separate taos connection; - - pthread_create(pids + i, NULL, superQueryProcess, t_info); } } else { g_queryInfo.specifiedQueryInfo.concurrent = 0; @@ -5841,18 +5917,12 @@ static int queryTestProcess() { if ((g_queryInfo.superQueryInfo.sqlCount > 0) && (g_queryInfo.superQueryInfo.threadCnt > 0)) { pidsOfSub = malloc(g_queryInfo.superQueryInfo.threadCnt * sizeof(pthread_t)); - if (NULL == pidsOfSub) { - free(infos); - free(pids); - - ERROR_EXIT("memory allocation failed for create threads\n"); - } - infosOfSub = malloc(g_queryInfo.superQueryInfo.threadCnt * sizeof(threadInfo)); - if (NULL == infosOfSub) { - free(pidsOfSub); + + if ((NULL == pidsOfSub) || (NULL == infosOfSub)) { free(infos); free(pids); + ERROR_EXIT("memory allocation failed for create threads\n"); } @@ -5880,7 +5950,7 @@ static int queryTestProcess() { t_info->end_table_to = i < b ? startFrom + a : startFrom + a - 1; startFrom = t_info->end_table_to + 1; t_info->taos = NULL; // TODO: workaround to use separate taos connection; - pthread_create(pidsOfSub + i, NULL, subQueryProcess, t_info); + pthread_create(pidsOfSub + i, NULL, superQueryProcess, t_info); } g_queryInfo.superQueryInfo.threadCnt = threads; @@ -5888,8 +5958,12 @@ static int queryTestProcess() { g_queryInfo.superQueryInfo.threadCnt = 0; } - for (int i = 0; i < g_queryInfo.specifiedQueryInfo.concurrent; i++) { - pthread_join(pids[i], NULL); + if ((nSqlCount > 0) && (nConcurrent > 0)) { + for (int i = 0; i < nConcurrent; i++) { + for (int j = 0; j < nSqlCount; j++) { + pthread_join(pids[i * nSqlCount + j], NULL); + } + } } tmfree((char*)pids); @@ -5920,7 +5994,7 @@ static void subscribe_callback(TAOS_SUB* tsub, TAOS_RES *res, void* param, int c static TAOS_SUB* subscribeImpl(TAOS *taos, char *sql, char* topic, char* resultFileName) { TAOS_SUB* tsub = NULL; - if (g_queryInfo.specifiedQueryInfo.subscribeMode) { + if (g_queryInfo.specifiedQueryInfo.mode) { tsub = taos_subscribe(taos, g_queryInfo.specifiedQueryInfo.subscribeRestart, topic, sql, subscribe_callback, (void*)resultFileName, @@ -5996,13 +6070,13 @@ static void *subSubscribeProcess(void *sarg) { } //et = taosGetTimestampMs(); //printf("========thread[%"PRId64"] complete all sqls to super table once queries duration:%.4fs\n", taosGetSelfPthreadId(), (double)(et - st)/1000.0); - } while (0); + } while(0); // start loop to consume result TAOS_RES* res = NULL; - while (1) { + while(1) { for (int i = 0; i < g_queryInfo.superQueryInfo.sqlCount; i++) { - if (1 == g_queryInfo.superQueryInfo.subscribeMode) { + if (1 == g_queryInfo.superQueryInfo.mode) { continue; } @@ -6073,7 +6147,8 @@ static void *superSubscribeProcess(void *sarg) { sprintf(tmpFile, "%s-%d", g_queryInfo.specifiedQueryInfo.result[i], winfo->threadID); } - tsub[i] = subscribeImpl(winfo->taos, g_queryInfo.specifiedQueryInfo.sql[i], topic, tmpFile); + tsub[i] = subscribeImpl(winfo->taos, + g_queryInfo.specifiedQueryInfo.sql[i], topic, tmpFile); if (NULL == g_queryInfo.specifiedQueryInfo.tsub[i]) { taos_close(winfo->taos); return NULL; @@ -6081,13 +6156,13 @@ static void *superSubscribeProcess(void *sarg) { } //et = taosGetTimestampMs(); //printf("========thread[%"PRId64"] complete all sqls to super table once queries duration:%.4fs\n", taosGetSelfPthreadId(), (double)(et - st)/1000.0); - } while (0); + } while(0); // start loop to consume result TAOS_RES* res = NULL; - while (1) { + while(1) { for (int i = 0; i < g_queryInfo.specifiedQueryInfo.sqlCount; i++) { - if (1 == g_queryInfo.specifiedQueryInfo.subscribeMode) { + if (SYNC_QUERY_MODE == g_queryInfo.specifiedQueryInfo.mode) { continue; } @@ -6105,7 +6180,8 @@ static void *superSubscribeProcess(void *sarg) { taos_free_result(res); for (int i = 0; i < g_queryInfo.specifiedQueryInfo.sqlCount; i++) { - taos_unsubscribe(tsub[i], g_queryInfo.specifiedQueryInfo.subscribeKeepProgress); + taos_unsubscribe(tsub[i], + g_queryInfo.specifiedQueryInfo.subscribeKeepProgress); } taos_close(winfo->taos); @@ -6308,7 +6384,7 @@ static void setParaFromArg(){ g_Dbs.db[0].superTbls[0].childTblCount = g_args.num_of_tables; g_Dbs.threadCount = g_args.num_of_threads; g_Dbs.threadCountByCreateTbl = g_args.num_of_threads; - g_Dbs.queryMode = g_args.mode; + g_Dbs.queryMode = g_args.query_mode; g_Dbs.db[0].superTbls[0].autoCreateTable = PRE_CREATE_SUBTBL; g_Dbs.db[0].superTbls[0].childTblExists = TBL_NO_EXISTS; @@ -6410,7 +6486,7 @@ static void querySqlFile(TAOS* taos, char* sqlFile) double t = getCurrentTimeUs(); - while ((read_len = tgetline(&line, &line_len, fp)) != -1) { + while((read_len = tgetline(&line, &line_len, fp)) != -1) { if (read_len >= MAX_SQL_SIZE) continue; line[--read_len] = '\0'; @@ -6473,52 +6549,50 @@ static void testMetaFile() { } static void queryResult() { - // select - if (false == g_Dbs.insert_only) { - // query data + // query data - pthread_t read_id; - threadInfo *rInfo = malloc(sizeof(threadInfo)); - rInfo->start_time = 1500000000000; // 2017-07-14 10:40:00.000 - rInfo->start_table_from = 0; + pthread_t read_id; + threadInfo *rInfo = malloc(sizeof(threadInfo)); + assert(rInfo); + rInfo->start_time = 1500000000000; // 2017-07-14 10:40:00.000 + rInfo->start_table_from = 0; - //rInfo->do_aggreFunc = g_Dbs.do_aggreFunc; - if (g_args.use_metric) { - rInfo->ntables = g_Dbs.db[0].superTbls[0].childTblCount; - rInfo->end_table_to = g_Dbs.db[0].superTbls[0].childTblCount - 1; - rInfo->superTblInfo = &g_Dbs.db[0].superTbls[0]; - tstrncpy(rInfo->tb_prefix, - g_Dbs.db[0].superTbls[0].childTblPrefix, MAX_TB_NAME_SIZE); - } else { - rInfo->ntables = g_args.num_of_tables; - rInfo->end_table_to = g_args.num_of_tables -1; - tstrncpy(rInfo->tb_prefix, g_args.tb_prefix, MAX_TB_NAME_SIZE); - } + //rInfo->do_aggreFunc = g_Dbs.do_aggreFunc; + if (g_args.use_metric) { + rInfo->ntables = g_Dbs.db[0].superTbls[0].childTblCount; + rInfo->end_table_to = g_Dbs.db[0].superTbls[0].childTblCount - 1; + rInfo->superTblInfo = &g_Dbs.db[0].superTbls[0]; + tstrncpy(rInfo->tb_prefix, + g_Dbs.db[0].superTbls[0].childTblPrefix, MAX_TB_NAME_SIZE); + } else { + rInfo->ntables = g_args.num_of_tables; + rInfo->end_table_to = g_args.num_of_tables -1; + tstrncpy(rInfo->tb_prefix, g_args.tb_prefix, MAX_TB_NAME_SIZE); + } - rInfo->taos = taos_connect( - g_Dbs.host, - g_Dbs.user, - g_Dbs.password, - g_Dbs.db[0].dbName, - g_Dbs.port); - if (rInfo->taos == NULL) { - errorPrint( "Failed to connect to TDengine, reason:%s\n", - taos_errstr(NULL)); - free(rInfo); - exit(-1); - } + rInfo->taos = taos_connect( + g_Dbs.host, + g_Dbs.user, + g_Dbs.password, + g_Dbs.db[0].dbName, + g_Dbs.port); + if (rInfo->taos == NULL) { + errorPrint( "Failed to connect to TDengine, reason:%s\n", + taos_errstr(NULL)); + free(rInfo); + exit(-1); + } - tstrncpy(rInfo->fp, g_Dbs.resultFile, MAX_FILE_NAME_LEN); + tstrncpy(rInfo->fp, g_Dbs.resultFile, MAX_FILE_NAME_LEN); - if (!g_Dbs.use_metric) { - pthread_create(&read_id, NULL, readTable, rInfo); - } else { - pthread_create(&read_id, NULL, readMetric, rInfo); - } - pthread_join(read_id, NULL); - taos_close(rInfo->taos); - free(rInfo); - } + if (!g_Dbs.use_metric) { + pthread_create(&read_id, NULL, readTable, rInfo); + } else { + pthread_create(&read_id, NULL, readMetric, rInfo); + } + pthread_join(read_id, NULL); + taos_close(rInfo->taos); + free(rInfo); } static void testCmdLine() { @@ -6536,9 +6610,7 @@ static void testCmdLine() { g_args.test_mode = INSERT_TEST; insertTestProcess(); - if (g_Dbs.insert_only) - return; - else + if (false == g_Dbs.insert_only) queryResult(); } From 20a32bedb2d36051edafb91360e862538bd0a84c Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Fri, 9 Apr 2021 09:03:24 +0800 Subject: [PATCH 117/178] Hotfix/sangshuduo/td 3636 for master (#5735) * [TD-3652] add case for TD-3652 to resolve TD-3590 * Update fulltest.sh * [TD-3295] add case for TD-3295 * TD-3675 * Hotfix/sangshuduo/td 3607 for master (#5712) * test * [TD-3677]: test pr message 1 * [TD-3671]change target branch * [TD-3677]: test pr message 2 * [TD-3677]: test pr message 3 * Hotfix/sangshuduo/td 3197 fix taosdemo coverity scan (#5688) * [TD-3197] : fix taosdemo coverity scan issues. * [TD-3197] : fix taosdemo coverity scan issue. fix subscribeTest pids uninitialized. * [TD-3197] : fix taosdemo coverity scan issues. * [TD-3197] : fix coverity scan issues. check super tbl info pointer. * [TD-3197] : fix coverity scan issues. move sub tbl query thread join into loop * [TD-3197] : fix coverity scan issues. remove unused variable * [TD-3197] : fix coverity scan issues. use more secure random library * [TD-3197] : fix coverity scan issues. use strncpy for more safe * [TD-3197] : fix taosdemo coverity scan issue. replace arc4random with rand(). * [TD-3197] : fix coverity scan issues. check stb info pointer for start time * [TD-3197] : fix coverity scan issues. fix strcpy vulnerability * [TD-3197] : fix taosdemo coverity scan issue. modify taosdemoTest2. try to check database continously. * [TD-3197] : taosdemo coverity scan issues. * [TD-3197] : fix memory leak when parsing arguments. * [TD-3197] : fix cmake strip arguments. * [TD-3197] : taosdemo coverity scan. fix cmake string manipulation. Co-authored-by: Shuduo Sang * remove useless file * fix changing target branch * fix * fix * [TD-3607]: fix taosdemo limit and offset. if offset+limit > count. * [TD-3607]: taosdemo limit and offset. if child tbl not exist, dont take limit and offset value. Co-authored-by: huili <52318143+plum-lihui@users.noreply.github.com> Co-authored-by: liuyq-617 Co-authored-by: plum-lihui Co-authored-by: Elias Soong Co-authored-by: Shuduo Sang * fix mem leak issue * fix crash issue * [TD-3683]: reduce buffer size for more stable table creation. (#5720) Co-authored-by: Shuduo Sang * Hotfix/sangshuduo/td 3607 taosdemo buffer overflow (#5723) * [TD-3607] : fix taosdemo buffer overflow. * [TD-3607] : taosdemo buffer overflow. add tmp buffer. * [TD-3607] : taosdemo buffer overflow. fix data generation. * [TD-3607] : taosdemo buffer overflow. fix normal table writting. * [TD-3607] : taosdemo buffer overflow. remove tail spaces. * [TD-3607] : taosdemo buffer overflow. fix taosdemo alter table test case. * [TD-3607] : taosdemo buffer overflow. fix taosdemo alter table case. * [TD-3607] : taosdemo buffer overflow. adjust limit offset count warning. * [TD-3607] : taosdemo buffer overflow. add more logic for child tables exist. * [TD-3607] : taosdemo buffer overflow. create database if database be dropped only. * [TD-3607] : fix taosdemo buffer overflow. adjust limit and offset test cases. * [TD-3607] : taosdemo buffer overflow. adjust sample data test case. * test * [TD-3677]: test pr message 1 * [TD-3671]change target branch * [TD-3677]: test pr message 2 * [TD-3677]: test pr message 3 * Hotfix/sangshuduo/td 3197 fix taosdemo coverity scan (#5688) * [TD-3197] : fix taosdemo coverity scan issues. * [TD-3197] : fix taosdemo coverity scan issue. fix subscribeTest pids uninitialized. * [TD-3197] : fix taosdemo coverity scan issues. * [TD-3197] : fix coverity scan issues. check super tbl info pointer. * [TD-3197] : fix coverity scan issues. move sub tbl query thread join into loop * [TD-3197] : fix coverity scan issues. remove unused variable * [TD-3197] : fix coverity scan issues. use more secure random library * [TD-3197] : fix coverity scan issues. use strncpy for more safe * [TD-3197] : fix taosdemo coverity scan issue. replace arc4random with rand(). * [TD-3197] : fix coverity scan issues. check stb info pointer for start time * [TD-3197] : fix coverity scan issues. fix strcpy vulnerability * [TD-3197] : fix taosdemo coverity scan issue. modify taosdemoTest2. try to check database continously. * [TD-3197] : taosdemo coverity scan issues. * [TD-3197] : fix memory leak when parsing arguments. * [TD-3197] : fix cmake strip arguments. * [TD-3197] : taosdemo coverity scan. fix cmake string manipulation. Co-authored-by: Shuduo Sang * remove useless file * fix changing target branch * fix * fix * [TD-3607]: taosdemo limit and offset. if limit+offset > count * Hotfix/sangshuduo/td 3607 taosdemo buffer overflow (#5706) * [TD-3607] : fix taosdemo buffer overflow. * [TD-3607] : taosdemo buffer overflow. add tmp buffer. * [TD-3607] : taosdemo buffer overflow. fix data generation. * [TD-3607] : taosdemo buffer overflow. fix normal table writting. * [TD-3607] : taosdemo buffer overflow. remove tail spaces. * [TD-3607] : taosdemo buffer overflow. fix taosdemo alter table test case. * [TD-3607] : taosdemo buffer overflow. fix taosdemo alter table case. * [TD-3607] : taosdemo buffer overflow. adjust limit offset count warning. * [TD-3607] : taosdemo buffer overflow. add more logic for child tables exist. * [TD-3607] : taosdemo buffer overflow. create database if database be dropped only. * [TD-3607] : fix taosdemo buffer overflow. adjust limit and offset test cases. * [TD-3607] : taosdemo buffer overflow. adjust sample data test case. * [TD-3607]: taosdemo limit and offset. if limit+offset > count Co-authored-by: Shuduo Sang * [TD-3607]: taosdemo limit and offset. if child tbl not exist, dont take limit and offset value. * Hotfix/sangshuduo/td 3607 taosdemo buffer overflow (#5713) * [TD-3607] : fix taosdemo buffer overflow. * [TD-3607] : taosdemo buffer overflow. add tmp buffer. * [TD-3607] : taosdemo buffer overflow. fix data generation. * [TD-3607] : taosdemo buffer overflow. fix normal table writting. * [TD-3607] : taosdemo buffer overflow. remove tail spaces. * [TD-3607] : taosdemo buffer overflow. fix taosdemo alter table test case. * [TD-3607] : taosdemo buffer overflow. fix taosdemo alter table case. * [TD-3607] : taosdemo buffer overflow. adjust limit offset count warning. * [TD-3607] : taosdemo buffer overflow. add more logic for child tables exist. * [TD-3607] : taosdemo buffer overflow. create database if database be dropped only. * [TD-3607] : fix taosdemo buffer overflow. adjust limit and offset test cases. * [TD-3607] : taosdemo buffer overflow. adjust sample data test case. * [TD-3607]: taosdemo limit and offset. if limit+offset > count * [TD-3607]: taosdemo limit and offset. if child tbl not exist, dont take limit and offset value. Co-authored-by: Shuduo Sang * fix taosdemo limit invalid warning condition. * [TD-3683]: reduce buffer size for more stable table creation. (#5719) Co-authored-by: Shuduo Sang Co-authored-by: Shuduo Sang Co-authored-by: huili <52318143+plum-lihui@users.noreply.github.com> Co-authored-by: liuyq-617 Co-authored-by: plum-lihui Co-authored-by: Elias Soong Co-authored-by: Shengliang Guan * TD-3707 * Feature/sangshuduo/td 3408 taosdemo async query (#5731) * test * [TD-3677]: test pr message 1 * [TD-3671]change target branch * [TD-3677]: test pr message 2 * [TD-3677]: test pr message 3 * Hotfix/sangshuduo/td 3197 fix taosdemo coverity scan (#5688) * [TD-3197] : fix taosdemo coverity scan issues. * [TD-3197] : fix taosdemo coverity scan issue. fix subscribeTest pids uninitialized. * [TD-3197] : fix taosdemo coverity scan issues. * [TD-3197] : fix coverity scan issues. check super tbl info pointer. * [TD-3197] : fix coverity scan issues. move sub tbl query thread join into loop * [TD-3197] : fix coverity scan issues. remove unused variable * [TD-3197] : fix coverity scan issues. use more secure random library * [TD-3197] : fix coverity scan issues. use strncpy for more safe * [TD-3197] : fix taosdemo coverity scan issue. replace arc4random with rand(). * [TD-3197] : fix coverity scan issues. check stb info pointer for start time * [TD-3197] : fix coverity scan issues. fix strcpy vulnerability * [TD-3197] : fix taosdemo coverity scan issue. modify taosdemoTest2. try to check database continously. * [TD-3197] : taosdemo coverity scan issues. * [TD-3197] : fix memory leak when parsing arguments. * [TD-3197] : fix cmake strip arguments. * [TD-3197] : taosdemo coverity scan. fix cmake string manipulation. Co-authored-by: Shuduo Sang * remove useless file * fix changing target branch * fix * fix * Hotfix/sangshuduo/td 3607 taosdemo buffer overflow (#5706) * [TD-3607] : fix taosdemo buffer overflow. * [TD-3607] : taosdemo buffer overflow. add tmp buffer. * [TD-3607] : taosdemo buffer overflow. fix data generation. * [TD-3607] : taosdemo buffer overflow. fix normal table writting. * [TD-3607] : taosdemo buffer overflow. remove tail spaces. * [TD-3607] : taosdemo buffer overflow. fix taosdemo alter table test case. * [TD-3607] : taosdemo buffer overflow. fix taosdemo alter table case. * [TD-3607] : taosdemo buffer overflow. adjust limit offset count warning. * [TD-3607] : taosdemo buffer overflow. add more logic for child tables exist. * [TD-3607] : taosdemo buffer overflow. create database if database be dropped only. * [TD-3607] : fix taosdemo buffer overflow. adjust limit and offset test cases. * [TD-3607] : taosdemo buffer overflow. adjust sample data test case. * [TD-3607]: taosdemo limit and offset. if limit+offset > count Co-authored-by: Shuduo Sang * Hotfix/sangshuduo/td 3607 taosdemo buffer overflow (#5713) * [TD-3607] : fix taosdemo buffer overflow. * [TD-3607] : taosdemo buffer overflow. add tmp buffer. * [TD-3607] : taosdemo buffer overflow. fix data generation. * [TD-3607] : taosdemo buffer overflow. fix normal table writting. * [TD-3607] : taosdemo buffer overflow. remove tail spaces. * [TD-3607] : taosdemo buffer overflow. fix taosdemo alter table test case. * [TD-3607] : taosdemo buffer overflow. fix taosdemo alter table case. * [TD-3607] : taosdemo buffer overflow. adjust limit offset count warning. * [TD-3607] : taosdemo buffer overflow. add more logic for child tables exist. * [TD-3607] : taosdemo buffer overflow. create database if database be dropped only. * [TD-3607] : fix taosdemo buffer overflow. adjust limit and offset test cases. * [TD-3607] : taosdemo buffer overflow. adjust sample data test case. * [TD-3607]: taosdemo limit and offset. if limit+offset > count * [TD-3607]: taosdemo limit and offset. if child tbl not exist, dont take limit and offset value. Co-authored-by: Shuduo Sang * [TD-3683]: reduce buffer size for more stable table creation. (#5719) Co-authored-by: Shuduo Sang * [TD-3408]: taosdemo support async query. * [TD-3408]: taosdemo support async query. refactor * [TD-3408]: taosdemo support async query. refactor 2 * [TD-3408]: taosdemo support async query. refactor 3 * [TD-3408]: taosdemo support async query. refactor 4 * [TD-3408]: taosdemo support specified sql more than one line. Co-authored-by: huili <52318143+plum-lihui@users.noreply.github.com> Co-authored-by: liuyq-617 Co-authored-by: plum-lihui Co-authored-by: Elias Soong Co-authored-by: Shuduo Sang Co-authored-by: Shengliang Guan * [TD-3636]: fix taosdemo outorder range algorithm. patch for master. Co-authored-by: wu champion Co-authored-by: wu champion Co-authored-by: dapan1121 <89396746@qq.com> Co-authored-by: wu champion Co-authored-by: huili <52318143+plum-lihui@users.noreply.github.com> Co-authored-by: liuyq-617 Co-authored-by: plum-lihui Co-authored-by: Elias Soong Co-authored-by: Shuduo Sang Co-authored-by: Shengliang Guan Co-authored-by: haojun Liao --- Jenkinsfile | 1 - src/client/src/tscSQLParser.c | 2 +- src/kit/shell/inc/shell.h | 2 +- src/kit/shell/src/shellCommand.c | 2 +- src/kit/taosdemo/taosdemo.c | 48 ++--- src/query/src/qAggMain.c | 12 +- src/query/src/qExecutor.c | 8 +- tests/pytest/fulltest.sh | 4 +- tests/pytest/query/queryJoin10tables.py | 201 +++++++++++++++++++ tests/pytest/query/queryStddevWithGroupby.py | 68 +++++++ tests/script/general/parser/groupby.sim | 1 + 11 files changed, 312 insertions(+), 37 deletions(-) create mode 100644 tests/pytest/query/queryJoin10tables.py create mode 100644 tests/pytest/query/queryStddevWithGroupby.py diff --git a/Jenkinsfile b/Jenkinsfile index 3fdfd5f8b1..dfe9ed4389 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -82,7 +82,6 @@ def pre_test(){ } } sh ''' - cd ${WK} git pull >/dev/null diff --git a/src/client/src/tscSQLParser.c b/src/client/src/tscSQLParser.c index 6de315d992..e8f753f876 100644 --- a/src/client/src/tscSQLParser.c +++ b/src/client/src/tscSQLParser.c @@ -6156,7 +6156,7 @@ int32_t doFunctionsCompatibleCheck(SSqlCmd* pCmd, SQueryInfo* pQueryInfo) { } // projection query on super table does not compatible with "group by" syntax - if (tscNonOrderedProjectionQueryOnSTable(pQueryInfo, 0)) { + if (tscIsProjectionQuery(pQueryInfo)) { return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3); } diff --git a/src/kit/shell/inc/shell.h b/src/kit/shell/inc/shell.h index d0b7149541..2374150c52 100644 --- a/src/kit/shell/inc/shell.h +++ b/src/kit/shell/inc/shell.h @@ -27,7 +27,7 @@ #define MAX_IP_SIZE 20 #define MAX_PASSWORD_SIZE 20 #define MAX_HISTORY_SIZE 1000 -#define MAX_COMMAND_SIZE 65536 +#define MAX_COMMAND_SIZE 1048586 #define HISTORY_FILE ".taos_history" #define DEFAULT_RES_SHOW_NUM 100 diff --git a/src/kit/shell/src/shellCommand.c b/src/kit/shell/src/shellCommand.c index 16545a5fe8..9173ab0efd 100644 --- a/src/kit/shell/src/shellCommand.c +++ b/src/kit/shell/src/shellCommand.c @@ -238,7 +238,7 @@ void resetCommand(Command *cmd, const char s[]) { clearScreen(cmd->endOffset + prompt_size, cmd->screenOffset + prompt_size); memset(cmd->buffer, 0, MAX_COMMAND_SIZE); memset(cmd->command, 0, MAX_COMMAND_SIZE); - strcpy(cmd->command, s); + strncpy(cmd->command, s, MAX_COMMAND_SIZE); int size = 0; int width = 0; getMbSizeInfo(s, &size, &width); diff --git a/src/kit/taosdemo/taosdemo.c b/src/kit/taosdemo/taosdemo.c index 6939f5dae5..e804d93619 100644 --- a/src/kit/taosdemo/taosdemo.c +++ b/src/kit/taosdemo/taosdemo.c @@ -4520,22 +4520,23 @@ static int generateDataTail(char *tableName, int32_t tableSeq, pSamplePos); } else if (0 == strncasecmp(superTblInfo->dataSource, "rand", strlen("rand"))) { - int rand_num = taosRandom() % 100; - if (0 != superTblInfo->disorderRatio + int rand_num = taosRandom() % 100; + int randTail; + if (0 != superTblInfo->disorderRatio && rand_num < superTblInfo->disorderRatio) { - int64_t d = startTime - + superTblInfo->timeStampStep * k - - taosRandom() % superTblInfo->disorderRange; - retLen = generateRowData( + randTail = (superTblInfo->timeStampStep * k + + (taosRandom() % superTblInfo->disorderRange + 1)) * (-1); + debugPrint("rand data generated, back %d\n", randTail); + } else { + randTail = superTblInfo->timeStampStep * k; + } + + uint64_t d = startTime + + randTail; + retLen = generateRowData( data, d, superTblInfo); - } else { - retLen = generateRowData( - data, - startTime + superTblInfo->timeStampStep * k, - superTblInfo); - } } if (retLen > remainderBufLen) { @@ -4551,21 +4552,22 @@ static int generateDataTail(char *tableName, int32_t tableSeq, int lenOfBinary = g_args.len_of_binary; int rand_num = taosRandom() % 100; + int randTail; + if ((g_args.disorderRatio != 0) && (rand_num < g_args.disorderRatio)) { - - int64_t d = startTime + DEFAULT_TIMESTAMP_STEP * k - - taosRandom() % g_args.disorderRange; - - retLen = generateData(data, data_type, - ncols_per_record, d, lenOfBinary); + randTail = (DEFAULT_TIMESTAMP_STEP * k + + (taosRandom() % g_args.disorderRange + 1)) * (-1); + debugPrint("rand data generated, back %d\n", randTail); } else { - retLen = generateData(data, data_type, - ncols_per_record, - startTime + DEFAULT_TIMESTAMP_STEP * k, - lenOfBinary); + randTail = DEFAULT_TIMESTAMP_STEP * k; } + retLen = generateData(data, data_type, + ncols_per_record, + startTime + randTail, + lenOfBinary); + if (len > remainderBufLen) break; @@ -5106,7 +5108,7 @@ static void callBack(void *param, TAOS_RES *res, int code) { int rand_num = taosRandom() % 100; if (0 != winfo->superTblInfo->disorderRatio && rand_num < winfo->superTblInfo->disorderRatio) { - int64_t d = winfo->lastTs - taosRandom() % winfo->superTblInfo->disorderRange; + int64_t d = winfo->lastTs - (taosRandom() % winfo->superTblInfo->disorderRange + 1); generateRowData(data, d, winfo->superTblInfo); } else { generateRowData(data, winfo->lastTs += 1000, winfo->superTblInfo); diff --git a/src/query/src/qAggMain.c b/src/query/src/qAggMain.c index f18d093b89..e47545da95 100644 --- a/src/query/src/qAggMain.c +++ b/src/query/src/qAggMain.c @@ -2771,14 +2771,16 @@ static void percentile_function(SQLFunctionCtx *pCtx) { SPercentileInfo *pInfo = GET_ROWCELL_INTERBUF(pResInfo); if (pCtx->currentStage == REPEAT_SCAN && pInfo->stage == 0) { + pInfo->stage += 1; + // all data are null, set it completed if (pInfo->numOfElems == 0) { pResInfo->complete = true; + + return; } else { pInfo->pMemBucket = tMemBucketCreate(pCtx->inputBytes, pCtx->inputType, pInfo->minval, pInfo->maxval); } - - pInfo->stage += 1; } // the first stage, only acquire the min/max value @@ -2857,14 +2859,16 @@ static void percentile_function_f(SQLFunctionCtx *pCtx, int32_t index) { SPercentileInfo *pInfo = (SPercentileInfo *)GET_ROWCELL_INTERBUF(pResInfo); if (pCtx->currentStage == REPEAT_SCAN && pInfo->stage == 0) { + pInfo->stage += 1; + // all data are null, set it completed if (pInfo->numOfElems == 0) { pResInfo->complete = true; + + return; } else { pInfo->pMemBucket = tMemBucketCreate(pCtx->inputBytes, pCtx->inputType, pInfo->minval, pInfo->maxval); } - - pInfo->stage += 1; } if (pInfo->stage == 0) { diff --git a/src/query/src/qExecutor.c b/src/query/src/qExecutor.c index a561072524..4082a2a662 100644 --- a/src/query/src/qExecutor.c +++ b/src/query/src/qExecutor.c @@ -4020,7 +4020,7 @@ static SFillColInfo* createFillColInfo(SExprInfo* pExpr, int32_t numOfOutput, in return pFillCol; } -int32_t doInitQInfo(SQInfo *pQInfo, STSBuf *pTsBuf, SArray* prevResult, void *tsdb, int32_t vgId, bool isSTableQuery) { +int32_t doInitQInfo(SQInfo *pQInfo, STSBuf *pTsBuf, void *tsdb, int32_t vgId, bool isSTableQuery) { SQueryRuntimeEnv *pRuntimeEnv = &pQInfo->runtimeEnv; SQuery *pQuery = pQInfo->runtimeEnv.pQuery; @@ -4031,8 +4031,6 @@ int32_t doInitQInfo(SQInfo *pQInfo, STSBuf *pTsBuf, SArray* prevResult, void *ts pQuery->timeWindowInterpo = timeWindowInterpoRequired(pQuery); pQuery->stabledev = isStabledev(pQuery); - pRuntimeEnv->prevResult = prevResult; - setScanLimitationByResultBuffer(pQuery); int32_t code = setupQueryHandle(tsdb, pQInfo, isSTableQuery); @@ -6654,6 +6652,8 @@ int32_t initQInfo(SQueryTableMsg *pQueryMsg, void *tsdb, int32_t vgId, SQInfo *p SArray* prevResult = NULL; if (pQueryMsg->prevResultLen > 0) { prevResult = interResFromBinary(param->prevResult, pQueryMsg->prevResultLen); + + pRuntimeEnv->prevResult = prevResult; } pQuery->precision = tsdbGetCfg(tsdb)->precision; @@ -6675,7 +6675,7 @@ int32_t initQInfo(SQueryTableMsg *pQueryMsg, void *tsdb, int32_t vgId, SQInfo *p } // filter the qualified - if ((code = doInitQInfo(pQInfo, pTsBuf, prevResult, tsdb, vgId, isSTable)) != TSDB_CODE_SUCCESS) { + if ((code = doInitQInfo(pQInfo, pTsBuf, tsdb, vgId, isSTable)) != TSDB_CODE_SUCCESS) { goto _error; } diff --git a/tests/pytest/fulltest.sh b/tests/pytest/fulltest.sh index cc6eb719f2..b64fb2be11 100755 --- a/tests/pytest/fulltest.sh +++ b/tests/pytest/fulltest.sh @@ -217,8 +217,8 @@ python3 ./test.py -f query/floatCompare.py python3 ./test.py -f query/query1970YearsAf.py python3 ./test.py -f query/bug3351.py python3 ./test.py -f query/bug3375.py - - +python3 ./test.py -f query/queryJoin10tables.py +python3 ./test.py -f query/queryStddevWithGroupby.py #stream python3 ./test.py -f stream/metric_1.py diff --git a/tests/pytest/query/queryJoin10tables.py b/tests/pytest/query/queryJoin10tables.py new file mode 100644 index 0000000000..01a7397d44 --- /dev/null +++ b/tests/pytest/query/queryJoin10tables.py @@ -0,0 +1,201 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import taos +import sys + +from util.log import * +from util.sql import * +from util.cases import * +from util.dnodes import * + +class TDTestCase: + + def init(self, conn, logSql): + tdLog.debug(f"start to excute {__file__}") + tdSql.init(conn.cursor()) + + def createtable(self): + + # create stbles + tdSql.execute("create table if not exists stb1 (ts timestamp, c1 int) tags(t11 int, t12 int)") + tdSql.execute("create table if not exists stb2 (ts timestamp, c2 int) tags(t21 int, t22 int)") + tdSql.execute("create table if not exists stb3 (ts timestamp, c3 int) tags(t31 int, t32 int)") + tdSql.execute("create table if not exists stb4 (ts timestamp, c4 int) tags(t41 int, t42 int)") + tdSql.execute("create table if not exists stb5 (ts timestamp, c5 int) tags(t51 int, t52 int)") + tdSql.execute("create table if not exists stb6 (ts timestamp, c6 int) tags(t61 int, t62 int)") + tdSql.execute("create table if not exists stb7 (ts timestamp, c7 int) tags(t71 int, t72 int)") + tdSql.execute("create table if not exists stb8 (ts timestamp, c8 int) tags(t81 int, t82 int)") + tdSql.execute("create table if not exists stb9 (ts timestamp, c9 int) tags(t91 int, t92 int)") + tdSql.execute("create table if not exists stb10 (ts timestamp, c10 int) tags(t101 int, t102 int)") + tdSql.execute("create table if not exists stb11 (ts timestamp, c11 int) tags(t111 int, t112 int)") + + # create normal tables + tdSql.execute("create table t10 using stb1 tags(0, 9)") + tdSql.execute("create table t11 using stb1 tags(1, 8)") + tdSql.execute("create table t12 using stb1 tags(2, 7)") + tdSql.execute("create table t13 using stb1 tags(3, 6)") + tdSql.execute("create table t14 using stb1 tags(4, 5)") + tdSql.execute("create table t15 using stb1 tags(5, 4)") + tdSql.execute("create table t16 using stb1 tags(6, 3)") + tdSql.execute("create table t17 using stb1 tags(7, 2)") + tdSql.execute("create table t18 using stb1 tags(8, 1)") + tdSql.execute("create table t19 using stb1 tags(9, 0)") + tdSql.execute("create table t110 using stb1 tags(10, 10)") + + tdSql.execute("create table t20 using stb2 tags(0, 9)") + tdSql.execute("create table t21 using stb2 tags(1, 8)") + tdSql.execute("create table t22 using stb2 tags(2, 7)") + + tdSql.execute("create table t30 using stb3 tags(0, 9)") + tdSql.execute("create table t31 using stb3 tags(1, 8)") + tdSql.execute("create table t32 using stb3 tags(2, 7)") + + def inserttable(self): + for i in range(100): + if i<60: + tdSql.execute(f"insert into t20 values('2020-10-01 00:00:{i}.000', {i})") + tdSql.execute(f"insert into t21 values('2020-10-01 00:00:{i}.000', {i})") + tdSql.execute(f"insert into t22 values('2020-10-01 00:00:{i}.000', {i})") + tdSql.execute(f"insert into t30 values('2020-10-01 00:00:{i}.000', {i})") + tdSql.execute(f"insert into t31 values('2020-10-01 00:00:{i}.000', {i})") + tdSql.execute(f"insert into t32 values('2020-10-01 00:00:{i}.000', {i})") + else: + tdSql.execute(f"insert into t20 values('2020-10-01 00:01:{i-60}.000', {i})") + tdSql.execute(f"insert into t21 values('2020-10-01 00:01:{i-60}.000', {i})") + tdSql.execute(f"insert into t22 values('2020-10-01 00:01:{i-60}.000', {i})") + tdSql.execute(f"insert into t30 values('2020-10-01 00:01:{i-60}.000', {i})") + tdSql.execute(f"insert into t31 values('2020-10-01 00:01:{i-60}.000', {i})") + tdSql.execute(f"insert into t32 values('2020-10-01 00:01:{i-60}.000', {i})") + for j in range(11): + if i<60: + tdSql.execute(f"insert into t1{j} values('2020-10-01 00:00:{i}.000', {i})") + else: + tdSql.execute(f"insert into t1{j} values('2020-10-01 00:01:{i-60}.000', {i})") + + def queryjointable(self): + tdSql.error( + '''select from t10,t11,t12,t13,t14,t15,t16,t17,t18,t19 + where t10.ts=t11.ts and t10.ts=t12.ts and t10.ts=t13.ts and t10.ts=t14.ts and t10.ts=t15.ts + and t10.ts=t16.ts and t10.ts=t17.ts and t10.ts=t18.ts and t10.ts=t19.ts''' + ) + tdSql.error("select * from t10 where t10.ts=t11.ts") + tdSql.error("select * from where t10.ts=t11.ts") + tdSql.error("select * from t10,t11,t12,t13,t14,t15,t16,t17,t18,t19") + tdSql.error("select * from stb1, stb2, stb3 where stb1.ts=stb2.ts and stb1.ts=stb3.ts") + tdSql.error("select * from stb1, stb2, stb3 where stb1.t11=stb2.t21 and stb1.t11=stb3.t31") + tdSql.error("select * from stb1, stb2, stb3") + tdSql.error( + '''select * from stb1 + join stb2 on stb1.ts=stb2.ts and stb1.t11=stb2.t21 + join stb3 on stb1.ts=stb3.ts and stb1.t11=stb3.t31''' + ) + tdSql.error("select * from t10 join t11 on t10.ts=t11.ts join t12 on t11.ts=t12.ts") + tdSql.query( + '''select * from stb1,stb2,stb3 + where stb1.ts=stb2.ts and stb1.ts=stb3.ts and stb1.t11=stb2.t21 and stb1.t11 =stb3.t31''' + ) + tdSql.checkRows(300) + tdSql.query("select * from t11,t12,t13 where t11.ts=t12.ts and t11.ts=t13.ts") + tdSql.checkRows(100) + tdSql.error("selec * from t11,t12,t13 where t11.ts=t12.ts and t11.ts=t13.ts") + tdSql.error("select * form t11,t12,t13 where t11.ts=t12.ts and t11.ts=t13.ts") + tdSql.error("select * from t11,t12,t13 when t11.ts=t12.ts and t11.ts=t13.ts") + tdSql.error("select * from t11,t12,t13 when t11.ts <> t12.ts and t11.ts=t13.ts") + tdSql.error("select * from t11,t12,t13 when t11.ts != t12.ts and t11.ts=t13.ts") + tdSql.error("select * from t11,t12,t13 when t11.ts=t12.ts or t11.ts=t13.ts") + tdSql.error("select * from t11,t12,t13 when t11.ts=t12.ts=t13.ts") + tdSql.error("select * from t11,t12,t13 when t11.c1=t12.c2 and t11.c1=t13.c3") + tdSql.error("select * from t11,t12,t13 when t11.ts=t12.ts and t11.ts=t13.c3 and t11.c1=t13.ts") + tdSql.error("select ts from t11,t12,t13 when t11.ts=t12.ts and t11.ts=t13.ts") + tdSql.error("select * from t11,t12,t13 when t11.ts=ts and t11.ts=t13.ts") + tdSql.error("select * from t11,t12,t13 when t11.ts=t12.ts and t11.ts=t13.ts and ts>100") + tdSql.error("select * from t11,t12,stb1 when t11.ts=t12.ts and t11.ts=stb1.ts") + tdSql.error("select t14.ts from t11,t12,t13 when t11.ts=t12.ts and t11.ts=t13.ts") + tdSql.error("select * from t11,t12,t13 when t11.ts=t12.ts and t11.ts=t13.ts1") + tdSql.error("select * from t11,t12,t13 when t11.ts=t12.ts and t11.ts=t14.ts") + tdSql.error("select * from t11,t12,t13 when t11.ts=t12.ts") + tdSql.error("select * from t11,t12,t13 when t11.ts=t12.ts and t11.ts=t13.ts and t11.c1=t13.c3") + tdSql.error( + '''select * from t10,t11,t12,t13,t14,t15,t16,t17,t18,t19,t20 + where t10.ts=t11.ts and t10.ts=t12.ts and t10.ts=t13.ts and t10.ts=t14.ts and t10.ts=t15.ts + and t10.ts=t16.ts and t10.ts=t17.ts and t10.ts=t18.ts and t10.ts=t19.ts and t10.ts=t20.ts''' + ) + tdSql.error( + '''select * from t10,t11,t12,t13,t14,t15,t16,t17,t18,t19,t20 + where t10.ts=t11.ts and t10.ts=t12.ts and t10.ts=t13.ts and t10.ts=t14.ts and t10.ts=t15.ts + and t10.ts=t16.ts and t10.ts=t17.ts and t10.ts=t18.ts and t10.ts=t19.ts''' + ) + tdSql.error( + '''select * from t10,t11,t12,t13,t14,t15,t16,t17,t18,t19 + where t10.ts=t11.ts and t10.ts=t12.ts and t10.ts=t13.ts and t10.ts=t14.ts and t10.ts=t15.ts + and t10.ts=t16.ts and t10.ts=t17.ts and t10.ts=t18.ts and t10.ts=t19.ts and t10.c1=t19.c1''' + ) + tdSql.error( + '''select * from stb1,stb2,stb3 + where stb1.ts=stb2.ts and stb1.ts=stb3.ts and stb1.t11=stb2.t21''' + ) + tdSql.error( + '''select * from stb1,stb2,stb3 + where stb1.ts=stb2.ts and stb1.t11=stb2.t21 and stb1.t11=stb3.t31''' + ) + tdSql.error( + '''select * from stb1,stb2,stb3 + where stb1.ts=stb2.ts and stb1.ts=stb3.ts and stb1.t11=stb2.t21 and stb1.t11=stb3.t31 + and stb1.t12=stb3=t32''' + ) + tdSql.error( + '''select * from stb1,stb2,stb3,stb4,stb5,stb6,stb7,stb8,stb9,stb10,stb11 + where stb1.ts=stb2.ts and stb1.ts=stb3.ts and stb1.ts=stb4.ts and stb1.ts=stb5.ts and stb1.ts=stb6.ts + and stb1.ts=stb7.ts and stb1.ts=stb8.ts and stb1.ts=stb9.ts and stb1.ts=stb10.ts and stb1.ts=stb11.ts + and stb1.t11=stb2.t21 and stb1.t11=stb3.t31 and stb1.t11=stb4.t41 and stb1.t11=stb5.t51 + and stb1.t11=stb6.t61 and stb1.t11=stb7.t71 and stb1.t11=stb8.t81 and stb1.t11=stb9.t91 + and stb1.t11=stb10.t101 and stb1.t11=stb11.t111''' + ) + tdSql.error( + '''select * from stb1,stb2,stb3,stb4,stb5,stb6,stb7,stb8,stb9,stb10 + where stb1.ts=stb2.ts and stb1.ts=stb3.ts and stb1.ts=stb4.ts and stb1.ts=stb5.ts and stb1.ts=stb6.ts + and stb1.ts=stb7.ts and stb1.ts=stb8.ts and stb1.ts=stb9.ts and stb1.ts=stb10.ts and stb1.t11=stb2.t21 + and stb1.t11=stb3.t31 and stb1.t11=stb4.t41 and stb1.t11=stb5.t51 and stb1.t11=stb6.t61 + and stb1.t11=stb7.t71 and stb1.t11=stb8.t81 and stb1.t11=stb9.t91 and stb1.t11=stb10.t101 + and stb1.t12=stb11.t102''' + ) + + def run(self): + tdSql.prepare() + + tdLog.printNoPrefix("==========step1:create table") + self.createtable() + + tdLog.printNoPrefix("==========step2:insert data") + self.inserttable() + + tdLog.printNoPrefix("==========step3:query timestamp type") + self.queryjointable() + + # after wal and sync, check again + tdSql.query("show dnodes") + index = tdSql.getData(0, 0) + tdDnodes.stop(index) + tdDnodes.start(index) + + tdLog.printNoPrefix("==========step4:query again after wal") + self.queryjointable() + + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) \ No newline at end of file diff --git a/tests/pytest/query/queryStddevWithGroupby.py b/tests/pytest/query/queryStddevWithGroupby.py new file mode 100644 index 0000000000..aee88ca9c5 --- /dev/null +++ b/tests/pytest/query/queryStddevWithGroupby.py @@ -0,0 +1,68 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + def querysqls(self): + tdSql.query("select stddev(c1) from t10 group by c1") + tdSql.checkRows(6) + tdSql.checkData(0, 0, 0) + tdSql.checkData(1, 0, 0) + tdSql.checkData(2, 0, 0) + tdSql.checkData(3, 0, 0) + tdSql.checkData(4, 0, 0) + tdSql.checkData(5, 0, 0) + tdSql.query("select stddev(c2) from t10") + tdSql.checkData(0, 0, 0.5) + + def run(self): + tdSql.execute("drop database if exists db") + tdSql.execute("create database if not exists db keep 36500") + tdSql.execute("use db") + + tdLog.printNoPrefix("==========step1:create table && insert data") + tdSql.execute("create stable stb1 (ts timestamp , c1 int ,c2 float) tags(t1 int)") + tdSql.execute("create table t10 using stb1 tags(1)") + tdSql.execute("insert into t10 values ('1969-12-31 00:00:00.000', 2,1)") + tdSql.execute("insert into t10 values ('1970-01-01 00:00:00.000', 3,1)") + tdSql.execute("insert into t10 values (0, 4,1)") + tdSql.execute("insert into t10 values (now-18725d, 1,2)") + tdSql.execute("insert into t10 values ('2021-04-06 00:00:00.000', 5,2)") + tdSql.execute("insert into t10 values (now+1d,6,2)") + + tdLog.printNoPrefix("==========step2:query and check") + self.querysqls() + + tdLog.printNoPrefix("==========step3:after wal,check again") + tdSql.query("show dnodes") + index = tdSql.getData(0, 0) + tdDnodes.stop(index) + tdDnodes.start(index) + self.querysqls() + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) \ No newline at end of file diff --git a/tests/script/general/parser/groupby.sim b/tests/script/general/parser/groupby.sim index dd7331054c..124e76e85c 100644 --- a/tests/script/general/parser/groupby.sim +++ b/tests/script/general/parser/groupby.sim @@ -220,6 +220,7 @@ sql_error select sum(c3), ts, c2 from group_tb0 where c1 < 20 group by c1; sql_error select sum(c3), first(ts), c2 from group_tb0 where c1 < 20 group by c1; sql_error select first(c3), ts, c1, c2 from group_tb0 where c1 < 20 group by c1; sql_error select first(c3), last(c3), ts, c1 from group_tb0 where c1 < 20 group by c1; +sql_error select ts from group_tb0 group by c1; #===========================interval=====not support====================== sql_error select count(*), c1 from group_tb0 where c1<20 interval(1y) group by c1; From b7579334deed7b4c629ec977a6b8b4c7e3f47513 Mon Sep 17 00:00:00 2001 From: Shengliang Guan Date: Fri, 9 Apr 2021 02:53:30 +0000 Subject: [PATCH 118/178] [TD-3682]: Insufficient disk space may cause oom --- src/common/src/tglobal.c | 2 +- src/sync/src/syncMain.c | 11 +++++++++-- src/vnode/inc/vnodeInt.h | 1 + src/vnode/src/vnodeWrite.c | 24 ++++++++++++++++++++---- 4 files changed, 31 insertions(+), 7 deletions(-) diff --git a/src/common/src/tglobal.c b/src/common/src/tglobal.c index 5f4ce046ed..ea5bf7954d 100644 --- a/src/common/src/tglobal.c +++ b/src/common/src/tglobal.c @@ -212,7 +212,7 @@ float tsAvailTmpDirectorySpace = 0; float tsAvailDataDirGB = 0; float tsUsedDataDirGB = 0; float tsReservedTmpDirectorySpace = 1.0f; -float tsMinimalDataDirGB = 1.0f; +float tsMinimalDataDirGB = 2.0f; int32_t tsTotalMemoryMB = 0; uint32_t tsVersion = 0; diff --git a/src/sync/src/syncMain.c b/src/sync/src/syncMain.c index 72442eee6c..29ced21291 100644 --- a/src/sync/src/syncMain.c +++ b/src/sync/src/syncMain.c @@ -997,17 +997,24 @@ static void syncProcessForwardFromPeer(char *cont, SSyncPeer *pPeer) { sTrace("%s, forward is received, hver:%" PRIu64 ", len:%d", pPeer->id, pHead->version, pHead->len); + int32_t code = 0; if (nodeRole == TAOS_SYNC_ROLE_SLAVE) { // nodeVersion = pHead->version; - (*pNode->writeToCacheFp)(pNode->vgId, pHead, TAOS_QTYPE_FWD, NULL); + code = (*pNode->writeToCacheFp)(pNode->vgId, pHead, TAOS_QTYPE_FWD, NULL); } else { if (nodeSStatus != TAOS_SYNC_STATUS_INIT) { - syncSaveIntoBuffer(pPeer, pHead); + code = syncSaveIntoBuffer(pPeer, pHead); } else { sError("%s, forward discarded since sstatus:%s, hver:%" PRIu64, pPeer->id, syncStatus[nodeSStatus], pHead->version); + code = -1; } } + + if (code != 0) { + sError("%s, failed to process fwd msg, hver:%" PRIu64 ", len:%d", pPeer->id, pHead->version, pHead->len); + syncRestartConnection(pPeer); + } } static void syncProcessPeersStatusMsg(SPeersStatus *pPeersStatus, SSyncPeer *pPeer) { diff --git a/src/vnode/inc/vnodeInt.h b/src/vnode/inc/vnodeInt.h index 4aa07196a7..d770a38e37 100644 --- a/src/vnode/inc/vnodeInt.h +++ b/src/vnode/inc/vnodeInt.h @@ -37,6 +37,7 @@ extern int32_t vDebugFlag; typedef struct { int32_t vgId; // global vnode group ID int32_t refCount; // reference count + int64_t queuedWMsgSize; int32_t queuedWMsg; int32_t queuedRMsg; int32_t flowctrlLevel; diff --git a/src/vnode/src/vnodeWrite.c b/src/vnode/src/vnodeWrite.c index a0be52db7a..aab685e678 100644 --- a/src/vnode/src/vnodeWrite.c +++ b/src/vnode/src/vnodeWrite.c @@ -25,6 +25,7 @@ #include "vnodeStatus.h" #define MAX_QUEUED_MSG_NUM 100000 +#define MAX_QUEUED_MSG_SIZE 1024*1024*1024 //1GB extern void * tsDnodeTmr; static int32_t (*vnodeProcessWriteMsgFp[TSDB_MSG_TYPE_MAX])(SVnodeObj *, void *pCont, SRspRet *); @@ -269,6 +270,13 @@ static int32_t vnodeWriteToWQueueImp(SVWriteMsg *pWrite) { } } + if (tsAvailDataDirGB <= tsMinimalDataDirGB) { + vError("vgId:%d, failed to write into vwqueue since no diskspace, avail:%fGB", pVnode->vgId, tsAvailDataDirGB); + taosFreeQitem(pWrite); + vnodeRelease(pVnode); + return TSDB_CODE_VND_NO_DISKSPACE; + } + if (!vnodeInReadyOrUpdatingStatus(pVnode)) { vError("vgId:%d, failed to write into vwqueue, vstatus is %s, refCount:%d pVnode:%p", pVnode->vgId, vnodeStatus[pVnode->status], pVnode->refCount, pVnode); @@ -278,14 +286,17 @@ static int32_t vnodeWriteToWQueueImp(SVWriteMsg *pWrite) { } int32_t queued = atomic_add_fetch_32(&pVnode->queuedWMsg, 1); - if (queued > MAX_QUEUED_MSG_NUM) { + int64_t queuedSize = atomic_add_fetch_64(&pVnode->queuedWMsgSize, pWrite->pHead.len); + + if (queued > MAX_QUEUED_MSG_NUM || queuedSize > MAX_QUEUED_MSG_SIZE) { int32_t ms = (queued / MAX_QUEUED_MSG_NUM) * 10 + 3; if (ms > 100) ms = 100; vDebug("vgId:%d, too many msg:%d in vwqueue, flow control %dms", pVnode->vgId, queued, ms); taosMsleep(ms); } - vTrace("vgId:%d, write into vwqueue, refCount:%d queued:%d", pVnode->vgId, pVnode->refCount, pVnode->queuedWMsg); + vTrace("vgId:%d, write into vwqueue, refCount:%d queued:%d size:%" PRId64, pVnode->vgId, pVnode->refCount, + pVnode->queuedWMsg, pVnode->queuedWMsgSize); taosWriteQitem(pVnode->wqueue, pWrite->qtype, pWrite); return TSDB_CODE_SUCCESS; @@ -308,7 +319,10 @@ void vnodeFreeFromWQueue(void *vparam, SVWriteMsg *pWrite) { SVnodeObj *pVnode = vparam; int32_t queued = atomic_sub_fetch_32(&pVnode->queuedWMsg, 1); - vTrace("vgId:%d, msg:%p, app:%p, free from vwqueue, queued:%d", pVnode->vgId, pWrite, pWrite->rpcMsg.ahandle, queued); + int64_t queuedSize = atomic_sub_fetch_64(&pVnode->queuedWMsgSize, pWrite->pHead.len); + + vTrace("vgId:%d, msg:%p, app:%p, free from vwqueue, queued:%d size:%" PRId64, pVnode->vgId, pWrite, + pWrite->rpcMsg.ahandle, queued, queuedSize); taosFreeQitem(pWrite); vnodeRelease(pVnode); @@ -344,7 +358,9 @@ static void vnodeFlowCtrlMsgToWQueue(void *param, void *tmrId) { static int32_t vnodePerformFlowCtrl(SVWriteMsg *pWrite) { SVnodeObj *pVnode = pWrite->pVnode; if (pWrite->qtype != TAOS_QTYPE_RPC) return 0; - if (pVnode->queuedWMsg < MAX_QUEUED_MSG_NUM && pVnode->flowctrlLevel <= 0) return 0; + if (pVnode->queuedWMsg < MAX_QUEUED_MSG_NUM && pVnode->queuedWMsgSize < MAX_QUEUED_MSG_SIZE && + pVnode->flowctrlLevel <= 0) + return 0; if (tsEnableFlowCtrl == 0) { int32_t ms = (int32_t)pow(2, pVnode->flowctrlLevel + 2); From 0a95895c9d4b284e65f5db370804238d86b24829 Mon Sep 17 00:00:00 2001 From: dapan1121 <89396746@qq.com> Date: Fri, 9 Apr 2021 10:59:03 +0800 Subject: [PATCH 119/178] fix taos crash issue --- src/client/src/tscParseInsert.c | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/src/client/src/tscParseInsert.c b/src/client/src/tscParseInsert.c index 926ee44b70..920937928f 100644 --- a/src/client/src/tscParseInsert.c +++ b/src/client/src/tscParseInsert.c @@ -937,6 +937,10 @@ static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql, char** boundC return ret; } + if (sql == NULL) { + return TSDB_CODE_TSC_INVALID_SQL; + } + code = tscGetTableMetaEx(pSql, pTableMetaInfo, true); if (TSDB_CODE_TSC_ACTION_IN_PROGRESS == code) { return code; @@ -945,6 +949,10 @@ static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql, char** boundC } else { sql = sToken.z; + if (sql == NULL) { + return TSDB_CODE_TSC_INVALID_SQL; + } + code = tscGetTableMetaEx(pSql, pTableMetaInfo, false); if (pCmd->curSql == NULL) { assert(code == TSDB_CODE_TSC_ACTION_IN_PROGRESS); @@ -952,10 +960,6 @@ static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql, char** boundC } *sqlstr = sql; - - if (*sqlstr == NULL) { - code = TSDB_CODE_TSC_INVALID_SQL; - } return code; } From 3da3b0de53251fa42ff6f4044d575a54260b7897 Mon Sep 17 00:00:00 2001 From: dapan1121 <89396746@qq.com> Date: Fri, 9 Apr 2021 11:05:33 +0800 Subject: [PATCH 120/178] asyncdemo crash issue --- tests/examples/c/asyncdemo.c | 44 +++++++++++++++++++++++------------- 1 file changed, 28 insertions(+), 16 deletions(-) diff --git a/tests/examples/c/asyncdemo.c b/tests/examples/c/asyncdemo.c index d711ce22c1..f2a96dd825 100644 --- a/tests/examples/c/asyncdemo.c +++ b/tests/examples/c/asyncdemo.c @@ -28,7 +28,8 @@ int points = 5; int numOfTables = 3; -int tablesProcessed = 0; +int tablesInsertProcessed = 0; +int tablesSelectProcessed = 0; int64_t st, et; typedef struct { @@ -134,6 +135,9 @@ int main(int argc, char *argv[]) gettimeofday(&systemTime, NULL); st = systemTime.tv_sec * 1000000 + systemTime.tv_usec; + tablesInsertProcessed = 0; + tablesSelectProcessed = 0; + for (i = 0; iname); - tablesProcessed++; - if (tablesProcessed >= numOfTables) { + tablesInsertProcessed++; + if (tablesInsertProcessed >= numOfTables) { gettimeofday(&systemTime, NULL); et = systemTime.tv_sec * 1000000 + systemTime.tv_usec; printf("%lld mseconds to insert %d data points\n", (et - st) / 1000, points*numOfTables); @@ -251,15 +263,17 @@ void taos_retrieve_call_back(void *param, TAOS_RES *tres, int numOfRows) //taos_free_result(tres); printf("%d rows data retrieved from %s\n", pTable->rowsRetrieved, pTable->name); - tablesProcessed++; - if (tablesProcessed >= numOfTables) { + tablesSelectProcessed++; + if (tablesSelectProcessed >= numOfTables) { gettimeofday(&systemTime, NULL); et = systemTime.tv_sec * 1000000 + systemTime.tv_usec; printf("%lld mseconds to query %d data rows\n", (et - st) / 1000, points * numOfTables); } + + taos_free_result(tres); } - taos_free_result(tres); + } void taos_select_call_back(void *param, TAOS_RES *tres, int code) @@ -276,6 +290,4 @@ void taos_select_call_back(void *param, TAOS_RES *tres, int code) taos_cleanup(); exit(1); } - - taos_free_result(tres); } From 5a200ff74645be90469fe90b4d08c5f592d1b793 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Fri, 9 Apr 2021 12:04:57 +0800 Subject: [PATCH 121/178] [td-225]additional memory border check for the temp query output buffer --- src/query/inc/qUtil.h | 15 ++++++++++++--- src/query/src/qExecutor.c | 8 +++----- src/query/src/qUtil.c | 2 +- 3 files changed, 16 insertions(+), 9 deletions(-) diff --git a/src/query/inc/qUtil.h b/src/query/inc/qUtil.h index cdd8b0707a..cb8c9679ec 100644 --- a/src/query/inc/qUtil.h +++ b/src/query/inc/qUtil.h @@ -52,11 +52,20 @@ static FORCE_INLINE SResultRow *getResultRow(SResultRowInfo *pResultRowInfo, int return pResultRowInfo->pResult[slot]; } -static FORCE_INLINE char *getPosInResultPage(SQuery *pQuery, tFilePage* page, int32_t rowOffset, int16_t offset) { - assert(rowOffset >= 0 && pQuery != NULL); +static FORCE_INLINE char* getPosInResultPage(SQueryRuntimeEnv* pRuntimeEnv, tFilePage* page, int32_t rowOffset, + int16_t offset, int32_t size) { + assert(rowOffset >= 0 && pRuntimeEnv != NULL); + + SQuery* pQuery = pRuntimeEnv->pQuery; + int64_t pageSize = pRuntimeEnv->pResultBuf->pageSize; int32_t numOfRows = (int32_t)GET_ROW_PARAM_FOR_MULTIOUTPUT(pQuery, pQuery->topBotQuery, pQuery->stableQuery); - return ((char *)page->data) + rowOffset + offset * numOfRows; + + // buffer overflow check + int64_t bufEnd = (rowOffset + offset * numOfRows + size); + assert(page->num <= pageSize && bufEnd <= page->num); + + return ((char*)page->data) + rowOffset + offset * numOfRows; } bool isNullOperator(SColumnFilterElem *pFilter, const char* minval, const char* maxval, int16_t type); diff --git a/src/query/src/qExecutor.c b/src/query/src/qExecutor.c index bd5fdda0f9..20b3ce8f55 100644 --- a/src/query/src/qExecutor.c +++ b/src/query/src/qExecutor.c @@ -3243,7 +3243,7 @@ void setResultRowOutputBufInitCtx(SQueryRuntimeEnv *pRuntimeEnv, SResultRow *pRe continue; } - pCtx[i].pOutput = getPosInResultPage(pRuntimeEnv->pQuery, bufPage, pResult->offset, offset); + pCtx[i].pOutput = getPosInResultPage(pRuntimeEnv, bufPage, pResult->offset, offset, pCtx[i].outputBytes); offset += pCtx[i].outputBytes; int32_t functionId = pCtx[i].functionId; @@ -3301,7 +3301,7 @@ void setResultOutputBuf(SQueryRuntimeEnv *pRuntimeEnv, SResultRow *pResult, SQLF int16_t offset = 0; for (int32_t i = 0; i < numOfCols; ++i) { - pCtx[i].pOutput = getPosInResultPage(pRuntimeEnv->pQuery, page, pResult->offset, offset); + pCtx[i].pOutput = getPosInResultPage(pRuntimeEnv, page, pResult->offset, offset, pCtx[i].outputBytes); offset += pCtx[i].outputBytes; int32_t functionId = pCtx[i].functionId; @@ -3509,8 +3509,6 @@ void setIntervalQueryRange(SQueryRuntimeEnv *pRuntimeEnv, TSKEY key) { */ static int32_t doCopyToSDataBlock(SQueryRuntimeEnv* pRuntimeEnv, SGroupResInfo* pGroupResInfo, int32_t orderType, SSDataBlock* pBlock) { - SQuery *pQuery = pRuntimeEnv->pQuery; - int32_t numOfRows = getNumOfTotalRes(pGroupResInfo); int32_t numOfResult = pBlock->info.rows; // there are already exists result rows @@ -3545,7 +3543,7 @@ static int32_t doCopyToSDataBlock(SQueryRuntimeEnv* pRuntimeEnv, SGroupResInfo* int32_t bytes = pColInfoData->info.bytes; char *out = pColInfoData->pData + numOfResult * bytes; - char *in = getPosInResultPage(pQuery, page, pRow->offset, offset); + char *in = getPosInResultPage(pRuntimeEnv, page, pRow->offset, offset, bytes); memcpy(out, in, bytes * numOfRowsToCopy); offset += bytes; diff --git a/src/query/src/qUtil.c b/src/query/src/qUtil.c index 9b0046fda0..aa793add84 100644 --- a/src/query/src/qUtil.c +++ b/src/query/src/qUtil.c @@ -140,7 +140,7 @@ void clearResultRow(SQueryRuntimeEnv *pRuntimeEnv, SResultRow *pResultRow, int16 SResultRowCellInfo *pResultInfo = &pResultRow->pCellInfo[i]; int16_t size = pRuntimeEnv->pQuery->pExpr1[i].bytes; - char * s = getPosInResultPage(pRuntimeEnv->pQuery, page, pResultRow->offset, offset); + char * s = getPosInResultPage(pRuntimeEnv, page, pResultRow->offset, offset, size); memset(s, 0, size); offset += size; From d8d85667cb11bde9cb09e7b07dc4e8b21e5237a5 Mon Sep 17 00:00:00 2001 From: robotspace Date: Fri, 9 Apr 2021 13:05:51 +0800 Subject: [PATCH 122/178] Resolve problems that is met in CentOS7. (#5738) * Set value for std with c99 to avoid compile error on CentOS7. * Get parameter from stack by same sequence. * Add performance test. --- tests/examples/lua/README.md | 4 ++ tests/examples/lua/benchmark.lua | 67 ++++++++++++++++++++++++++++++ tests/examples/lua/build.sh | 2 +- tests/examples/lua/lua51/build.sh | 2 +- tests/examples/lua/lua_connector.c | 2 +- 5 files changed, 74 insertions(+), 3 deletions(-) create mode 100644 tests/examples/lua/benchmark.lua diff --git a/tests/examples/lua/README.md b/tests/examples/lua/README.md index dd9c9d0787..32d6a4cace 100644 --- a/tests/examples/lua/README.md +++ b/tests/examples/lua/README.md @@ -19,6 +19,10 @@ Run lua sample: lua test.lua ``` +## Run performance test: +``` +time lua benchmark.lua +``` ## OpenResty Dependencies - OpenResty: ``` diff --git a/tests/examples/lua/benchmark.lua b/tests/examples/lua/benchmark.lua new file mode 100644 index 0000000000..900e7891d8 --- /dev/null +++ b/tests/examples/lua/benchmark.lua @@ -0,0 +1,67 @@ +local driver = require "luaconnector" + +local config = { + password = "taosdata", + host = "127.0.0.1", + port = 6030, + database = "", + user = "root", + + max_packet_size = 1024 * 1024 +} + +local conn +local res = driver.connect(config) +if res.code ~=0 then + print("connect--- failed: "..res.error) + return +else + conn = res.conn + print("connect--- pass.") +end + +local res = driver.query(conn,"drop database if exists demo") + +res = driver.query(conn,"create database demo") +if res.code ~=0 then + print("create db--- failed: "..res.error) + return +else + print("create db--- pass.") +end + +res = driver.query(conn,"use demo") +if res.code ~=0 then + print("select db--- failed: "..res.error) + return +else + print("select db--- pass.") +end + +res = driver.query(conn,"create table m1 (ts timestamp, speed int,owner binary(20))") +if res.code ~=0 then + print("create table---failed: "..res.error) + return +else + print("create table--- pass.") +end + +local base = 1617330000000 +local index =0 +local count = 100000 +local t +while( index < count ) +do + t = base + index + local q=string.format([[insert into m1 values (%d,0,'robotspace')]],t) +res = driver.query(conn,q) +if res.code ~=0 then + print("insert records failed: "..res.error) + return +else + +end + index = index+1 +end +print(string.format([["Done. %d records has been stored."]],count)) +driver.close(conn) diff --git a/tests/examples/lua/build.sh b/tests/examples/lua/build.sh index 8018a3b0d8..cbd47bdfd2 100755 --- a/tests/examples/lua/build.sh +++ b/tests/examples/lua/build.sh @@ -1,2 +1,2 @@ -gcc lua_connector.c -fPIC -shared -o luaconnector.so -Wall -ltaos +gcc -std=c99 lua_connector.c -fPIC -shared -o luaconnector.so -Wall -ltaos diff --git a/tests/examples/lua/lua51/build.sh b/tests/examples/lua/lua51/build.sh index da2981bf7d..3b52ed1448 100755 --- a/tests/examples/lua/lua51/build.sh +++ b/tests/examples/lua/lua51/build.sh @@ -1,2 +1,2 @@ -gcc lua_connector51.c -fPIC -shared -o luaconnector51.so -Wall -ltaos +gcc -std=c99 lua_connector51.c -fPIC -shared -o luaconnector51.so -Wall -ltaos diff --git a/tests/examples/lua/lua_connector.c b/tests/examples/lua/lua_connector.c index 920d2cdc35..8078ed2665 100644 --- a/tests/examples/lua/lua_connector.c +++ b/tests/examples/lua/lua_connector.c @@ -23,7 +23,7 @@ static int l_connect(lua_State *L){ luaL_checktype(L, 1, LUA_TTABLE); - lua_getfield(L,-1,"host"); + lua_getfield(L,1,"host"); if (lua_isstring(L,-1)){ host = lua_tostring(L, -1); // printf("host = %s\n", host); From ad073f168cace2fb7274a2ecd2ed1dd2ce290735 Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Fri, 9 Apr 2021 13:32:13 +0800 Subject: [PATCH 123/178] [TD-3636]: fix taosdemo outorder range algorithm. (#5736) patch for master. Co-authored-by: Shuduo Sang --- src/kit/taosdemo/taosdemo.c | 48 +++++++++++++++++++------------------ 1 file changed, 25 insertions(+), 23 deletions(-) diff --git a/src/kit/taosdemo/taosdemo.c b/src/kit/taosdemo/taosdemo.c index 6939f5dae5..e804d93619 100644 --- a/src/kit/taosdemo/taosdemo.c +++ b/src/kit/taosdemo/taosdemo.c @@ -4520,22 +4520,23 @@ static int generateDataTail(char *tableName, int32_t tableSeq, pSamplePos); } else if (0 == strncasecmp(superTblInfo->dataSource, "rand", strlen("rand"))) { - int rand_num = taosRandom() % 100; - if (0 != superTblInfo->disorderRatio + int rand_num = taosRandom() % 100; + int randTail; + if (0 != superTblInfo->disorderRatio && rand_num < superTblInfo->disorderRatio) { - int64_t d = startTime - + superTblInfo->timeStampStep * k - - taosRandom() % superTblInfo->disorderRange; - retLen = generateRowData( + randTail = (superTblInfo->timeStampStep * k + + (taosRandom() % superTblInfo->disorderRange + 1)) * (-1); + debugPrint("rand data generated, back %d\n", randTail); + } else { + randTail = superTblInfo->timeStampStep * k; + } + + uint64_t d = startTime + + randTail; + retLen = generateRowData( data, d, superTblInfo); - } else { - retLen = generateRowData( - data, - startTime + superTblInfo->timeStampStep * k, - superTblInfo); - } } if (retLen > remainderBufLen) { @@ -4551,21 +4552,22 @@ static int generateDataTail(char *tableName, int32_t tableSeq, int lenOfBinary = g_args.len_of_binary; int rand_num = taosRandom() % 100; + int randTail; + if ((g_args.disorderRatio != 0) && (rand_num < g_args.disorderRatio)) { - - int64_t d = startTime + DEFAULT_TIMESTAMP_STEP * k - - taosRandom() % g_args.disorderRange; - - retLen = generateData(data, data_type, - ncols_per_record, d, lenOfBinary); + randTail = (DEFAULT_TIMESTAMP_STEP * k + + (taosRandom() % g_args.disorderRange + 1)) * (-1); + debugPrint("rand data generated, back %d\n", randTail); } else { - retLen = generateData(data, data_type, - ncols_per_record, - startTime + DEFAULT_TIMESTAMP_STEP * k, - lenOfBinary); + randTail = DEFAULT_TIMESTAMP_STEP * k; } + retLen = generateData(data, data_type, + ncols_per_record, + startTime + randTail, + lenOfBinary); + if (len > remainderBufLen) break; @@ -5106,7 +5108,7 @@ static void callBack(void *param, TAOS_RES *res, int code) { int rand_num = taosRandom() % 100; if (0 != winfo->superTblInfo->disorderRatio && rand_num < winfo->superTblInfo->disorderRatio) { - int64_t d = winfo->lastTs - taosRandom() % winfo->superTblInfo->disorderRange; + int64_t d = winfo->lastTs - (taosRandom() % winfo->superTblInfo->disorderRange + 1); generateRowData(data, d, winfo->superTblInfo); } else { generateRowData(data, winfo->lastTs += 1000, winfo->superTblInfo); From 3a14b45f40977f6d46f67c17d8019f1a66678aaa Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Fri, 9 Apr 2021 14:56:25 +0800 Subject: [PATCH 124/178] fixbug crash --- src/rpc/src/rpcMain.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/rpc/src/rpcMain.c b/src/rpc/src/rpcMain.c index 58d611ebb5..98d5c1ed54 100644 --- a/src/rpc/src/rpcMain.c +++ b/src/rpc/src/rpcMain.c @@ -997,8 +997,8 @@ static SRpcConn *rpcProcessMsgHead(SRpcInfo *pRpc, SRecvInfo *pRecv, SRpcReqCont } if ( rpcIsReq(pHead->msgType) ) { - terrno = rpcProcessReqHead(pConn, pHead); pConn->connType = pRecv->connType; + terrno = rpcProcessReqHead(pConn, pHead); // stop idle timer taosTmrStopA(&pConn->pIdleTimer); @@ -1367,7 +1367,8 @@ static void rpcProcessConnError(void *param, void *id) { tDebug("%s %p, connection error happens", pRpc->label, pContext->ahandle); - if (pContext->numOfTry >= pContext->epSet.numOfEps) { + if (pContext->numOfTry >= pContext->epSet.numOfEps + || pContex->msgType == TSDB_MSG_TYPE_FETCH) { rpcMsg.msgType = pContext->msgType+1; rpcMsg.ahandle = pContext->ahandle; rpcMsg.code = pContext->code; From 1511e0c32f13255564836f9d9b9e48255b8b615f Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Fri, 9 Apr 2021 15:01:29 +0800 Subject: [PATCH 125/178] fixbug crash --- src/rpc/src/rpcMain.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/rpc/src/rpcMain.c b/src/rpc/src/rpcMain.c index 98d5c1ed54..3e8c1d9180 100644 --- a/src/rpc/src/rpcMain.c +++ b/src/rpc/src/rpcMain.c @@ -1368,7 +1368,7 @@ static void rpcProcessConnError(void *param, void *id) { tDebug("%s %p, connection error happens", pRpc->label, pContext->ahandle); if (pContext->numOfTry >= pContext->epSet.numOfEps - || pContex->msgType == TSDB_MSG_TYPE_FETCH) { + || pContext->msgType == TSDB_MSG_TYPE_FETCH) { rpcMsg.msgType = pContext->msgType+1; rpcMsg.ahandle = pContext->ahandle; rpcMsg.code = pContext->code; From 807f9b88750e755856708cd80bca2a1826f03ab2 Mon Sep 17 00:00:00 2001 From: dapan1121 <89396746@qq.com> Date: Fri, 9 Apr 2021 15:24:47 +0800 Subject: [PATCH 126/178] fix bug --- src/query/src/qResultbuf.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/src/query/src/qResultbuf.c b/src/query/src/qResultbuf.c index c5dd6b3cac..f83caf2d8f 100644 --- a/src/query/src/qResultbuf.c +++ b/src/query/src/qResultbuf.c @@ -287,6 +287,10 @@ static void lruListMoveToFront(SList *pList, SPageInfo* pi) { tdListPrependNode(pList, pi->pn); } +static FORCE_INLINE size_t getAllocPageSize(int32_t pageSize) { + return pageSize + POINTER_BYTES + 2 + sizeof(tFilePage); +} + tFilePage* getNewDataBuf(SDiskbasedResultBuf* pResultBuf, int32_t groupId, int32_t* pageId) { pResultBuf->statis.getPages += 1; @@ -311,7 +315,7 @@ tFilePage* getNewDataBuf(SDiskbasedResultBuf* pResultBuf, int32_t groupId, int32 // allocate buf if (availablePage == NULL) { - pi->pData = calloc(1, pResultBuf->pageSize + POINTER_BYTES + 2); // add extract bytes in case of zipped buffer increased. + pi->pData = calloc(1, getAllocPageSize(pResultBuf->pageSize)); // add extract bytes in case of zipped buffer increased. } else { pi->pData = availablePage; } @@ -355,7 +359,7 @@ tFilePage* getResBufPage(SDiskbasedResultBuf* pResultBuf, int32_t id) { } if (availablePage == NULL) { - (*pi)->pData = calloc(1, pResultBuf->pageSize + POINTER_BYTES); + (*pi)->pData = calloc(1, getAllocPageSize(pResultBuf->pageSize)); } else { (*pi)->pData = availablePage; } From 9b0af30e3c732f145d62420db4f8e84761540f93 Mon Sep 17 00:00:00 2001 From: dapan1121 <89396746@qq.com> Date: Fri, 9 Apr 2021 15:58:57 +0800 Subject: [PATCH 127/178] fix bug --- src/client/inc/tsclient.h | 2 +- src/client/src/tscSql.c | 7 ++++--- src/client/src/tscUtil.c | 3 ++- 3 files changed, 7 insertions(+), 5 deletions(-) diff --git a/src/client/inc/tsclient.h b/src/client/inc/tsclient.h index 4869f65645..2a78de0ba4 100644 --- a/src/client/inc/tsclient.h +++ b/src/client/inc/tsclient.h @@ -125,7 +125,6 @@ typedef struct SInternalField { typedef struct SFieldInfo { int16_t numOfOutput; // number of column in result - TAOS_FIELD* final; SArray *internalField; // SArray } SFieldInfo; @@ -316,6 +315,7 @@ typedef struct { char ** buffer; // Buffer used to put multibytes encoded using unicode (wchar_t) SColumnIndex* pColumnIndex; + TAOS_FIELD* final; SArithmeticSupport *pArithSup; // support the arithmetic expression calculation on agg functions struct SLocalMerger *pLocalMerger; } SSqlRes; diff --git a/src/client/src/tscSql.c b/src/client/src/tscSql.c index 93d0e9fd09..13c8f025ea 100644 --- a/src/client/src/tscSql.c +++ b/src/client/src/tscSql.c @@ -405,6 +405,7 @@ int taos_affected_rows(TAOS_RES *tres) { TAOS_FIELD *taos_fetch_fields(TAOS_RES *res) { SSqlObj *pSql = (SSqlObj *)res; + SSqlRes *pRes = &pSql->res; if (pSql == NULL || pSql->signature != pSql) return 0; SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0); @@ -419,7 +420,7 @@ TAOS_FIELD *taos_fetch_fields(TAOS_RES *res) { SFieldInfo *pFieldInfo = &pQueryInfo->fieldsInfo; - if (pFieldInfo->final == NULL) { + if (pRes->final == NULL) { TAOS_FIELD* f = calloc(pFieldInfo->numOfOutput, sizeof(TAOS_FIELD)); int32_t j = 0; @@ -439,10 +440,10 @@ TAOS_FIELD *taos_fetch_fields(TAOS_RES *res) { } } - pFieldInfo->final = f; + pRes->final = f; } - return pFieldInfo->final; + return pRes->final; } static bool needToFetchNewBlock(SSqlObj* pSql) { diff --git a/src/client/src/tscUtil.c b/src/client/src/tscUtil.c index 438e5618df..7d6d74b425 100644 --- a/src/client/src/tscUtil.c +++ b/src/client/src/tscUtil.c @@ -429,6 +429,8 @@ static void tscDestroyResPointerInfo(SSqlRes* pRes) { tfree(pRes->pArithSup->data); tfree(pRes->pArithSup); } + + tfree(pRes->final); pRes->data = NULL; // pRes->data points to the buffer of pRsp, no need to free } @@ -1153,7 +1155,6 @@ void tscFieldInfoClear(SFieldInfo* pFieldInfo) { } taosArrayDestroy(pFieldInfo->internalField); - tfree(pFieldInfo->final); memset(pFieldInfo, 0, sizeof(SFieldInfo)); } From cfba07e788d5ded79a8bba47b066cb972a822443 Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Fri, 9 Apr 2021 16:14:08 +0800 Subject: [PATCH 128/178] [TD-3722]: taosdemo performance regression due to always set random seed. (#5740) Co-authored-by: Shuduo Sang --- src/kit/taosdemo/taosdemo.c | 5 ----- 1 file changed, 5 deletions(-) diff --git a/src/kit/taosdemo/taosdemo.c b/src/kit/taosdemo/taosdemo.c index e804d93619..65ba7d50fe 100644 --- a/src/kit/taosdemo/taosdemo.c +++ b/src/kit/taosdemo/taosdemo.c @@ -507,11 +507,6 @@ static void resetAfterAnsiEscape(void) { static int taosRandom() { - struct timeval tv; - - gettimeofday(&tv, NULL); - srand(tv.tv_usec); - return rand(); } From e39139d1c033f41739aafd3f4f35645a548bdae1 Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Fri, 9 Apr 2021 15:46:28 +0800 Subject: [PATCH 129/178] [TD-3706]: [mnode] validate super table columns and tags count --- src/inc/taoserror.h | 1 + src/mnode/src/mnodeTable.c | 28 ++++++++++++++++++++-------- src/util/src/terror.c | 1 + 3 files changed, 22 insertions(+), 8 deletions(-) diff --git a/src/inc/taoserror.h b/src/inc/taoserror.h index 8daccee1f2..eff4eecbc1 100644 --- a/src/inc/taoserror.h +++ b/src/inc/taoserror.h @@ -163,6 +163,7 @@ int32_t* taosGetErrno(); #define TSDB_CODE_MND_INVALID_TABLE_NAME TAOS_DEF_ERROR_CODE(0, 0x0362) //"Table does not exist") #define TSDB_CODE_MND_INVALID_TABLE_TYPE TAOS_DEF_ERROR_CODE(0, 0x0363) //"Invalid table type in tsdb") #define TSDB_CODE_MND_TOO_MANY_TAGS TAOS_DEF_ERROR_CODE(0, 0x0364) //"Too many tags") +#define TSDB_CODE_MND_TOO_MANY_COLUMNS TAOS_DEF_ERROR_CODE(0, 0x0365) //"Too many columns") #define TSDB_CODE_MND_TOO_MANY_TIMESERIES TAOS_DEF_ERROR_CODE(0, 0x0366) //"Too many time series") #define TSDB_CODE_MND_NOT_SUPER_TABLE TAOS_DEF_ERROR_CODE(0, 0x0367) //"Not super table") // operation only available for super table #define TSDB_CODE_MND_COL_NAME_TOO_LONG TAOS_DEF_ERROR_CODE(0, 0x0368) //"Tag name too long") diff --git a/src/mnode/src/mnodeTable.c b/src/mnode/src/mnodeTable.c index 39eca8819d..2a8e941fcb 100644 --- a/src/mnode/src/mnodeTable.c +++ b/src/mnode/src/mnodeTable.c @@ -1037,6 +1037,19 @@ static int32_t mnodeProcessCreateSuperTableMsg(SMnodeMsg *pMsg) { SCreateTableMsg* pCreate = (SCreateTableMsg*)((char*)pCreate1 + sizeof(SCMCreateTableMsg)); + int16_t numOfTags = htons(pCreate->numOfTags); + if (numOfTags > TSDB_MAX_TAGS) { + mError("msg:%p, app:%p table:%s, failed to create, too many tags", pMsg, pMsg->rpcMsg.ahandle, pCreate->tableName); + return TSDB_CODE_MND_TOO_MANY_TAGS; + } + + int16_t numOfColumns = htons(pCreate->numOfColumns); + int32_t numOfCols = numOfColumns + numOfTags; + if (numOfCols > TSDB_MAX_COLUMNS) { + mError("msg:%p, app:%p table:%s, failed to create, too many columns", pMsg, pMsg->rpcMsg.ahandle, pCreate->tableName); + return TSDB_CODE_MND_TOO_MANY_COLUMNS; + } + SSTableObj * pStable = calloc(1, sizeof(SSTableObj)); if (pStable == NULL) { mError("msg:%p, app:%p table:%s, failed to create, no enough memory", pMsg, pMsg->rpcMsg.ahandle, pCreate->tableName); @@ -1050,10 +1063,9 @@ static int32_t mnodeProcessCreateSuperTableMsg(SMnodeMsg *pMsg) { pStable->uid = (us << 24) + ((sdbGetVersion() & ((1ul << 16) - 1ul)) << 8) + (taosRand() & ((1ul << 8) - 1ul)); pStable->sversion = 0; pStable->tversion = 0; - pStable->numOfColumns = htons(pCreate->numOfColumns); - pStable->numOfTags = htons(pCreate->numOfTags); + pStable->numOfColumns = numOfColumns; + pStable->numOfTags = numOfTags; - int32_t numOfCols = pStable->numOfColumns + pStable->numOfTags; int32_t schemaSize = numOfCols * sizeof(SSchema); pStable->schema = (SSchema *)calloc(1, schemaSize); if (pStable->schema == NULL) { @@ -1064,11 +1076,6 @@ static int32_t mnodeProcessCreateSuperTableMsg(SMnodeMsg *pMsg) { memcpy(pStable->schema, pCreate->schema, numOfCols * sizeof(SSchema)); - if (pStable->numOfColumns > TSDB_MAX_COLUMNS || pStable->numOfTags > TSDB_MAX_TAGS) { - mError("msg:%p, app:%p table:%s, failed to create, too many columns", pMsg, pMsg->rpcMsg.ahandle, pCreate->tableName); - return TSDB_CODE_MND_INVALID_TABLE_NAME; - } - pStable->nextColId = 0; for (int32_t col = 0; col < numOfCols; col++) { @@ -1340,6 +1347,11 @@ static int32_t mnodeAddSuperTableColumn(SMnodeMsg *pMsg, SSchema schema[], int32 return TSDB_CODE_MND_APP_ERROR; } + if (pStable->numOfColumns + ncols + pStable->numOfTags > TSDB_MAX_COLUMNS) { + mError("msg:%p, app:%p stable:%s, add column, too many columns", pMsg, pMsg->rpcMsg.ahandle, pStable->info.tableId); + return TSDB_CODE_MND_TOO_MANY_COLUMNS; + } + for (int32_t i = 0; i < ncols; i++) { if (mnodeFindSuperTableColumnIndex(pStable, schema[i].name) > 0) { mError("msg:%p, app:%p stable:%s, add column, column:%s already exist", pMsg, pMsg->rpcMsg.ahandle, diff --git a/src/util/src/terror.c b/src/util/src/terror.c index 918ccc4935..586a886f47 100644 --- a/src/util/src/terror.c +++ b/src/util/src/terror.c @@ -175,6 +175,7 @@ TAOS_DEFINE_ERROR(TSDB_CODE_MND_INVALID_TABLE_ID, "Table name too long") TAOS_DEFINE_ERROR(TSDB_CODE_MND_INVALID_TABLE_NAME, "Table does not exist") TAOS_DEFINE_ERROR(TSDB_CODE_MND_INVALID_TABLE_TYPE, "Invalid table type in tsdb") TAOS_DEFINE_ERROR(TSDB_CODE_MND_TOO_MANY_TAGS, "Too many tags") +TAOS_DEFINE_ERROR(TSDB_CODE_MND_TOO_MANY_COLUMNS, "Too many columns") TAOS_DEFINE_ERROR(TSDB_CODE_MND_TOO_MANY_TIMESERIES, "Too many time series") TAOS_DEFINE_ERROR(TSDB_CODE_MND_NOT_SUPER_TABLE, "Not super table") // operation only available for super table TAOS_DEFINE_ERROR(TSDB_CODE_MND_COL_NAME_TOO_LONG, "Tag name too long") From 824b3726f995076b01fe07d501c292487ae11c98 Mon Sep 17 00:00:00 2001 From: Shengliang Guan Date: Fri, 9 Apr 2021 08:52:29 +0000 Subject: [PATCH 130/178] [TD-3606]: When adding a new node to the cluster, the locale and charset are no longer checked --- src/mnode/src/mnodeDnode.c | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/src/mnode/src/mnodeDnode.c b/src/mnode/src/mnodeDnode.c index 80473ba5ae..85d9f94b88 100644 --- a/src/mnode/src/mnodeDnode.c +++ b/src/mnode/src/mnodeDnode.c @@ -437,14 +437,14 @@ static int32_t mnodeCheckClusterCfgPara(const SClusterCfg *clusterCfg) { return TAOS_DN_OFF_TIME_ZONE_NOT_MATCH; } - if (0 != strncasecmp(clusterCfg->locale, tsLocale, strlen(tsLocale))) { - mError("\"locale\"[%s - %s] cfg parameters inconsistent", clusterCfg->locale, tsLocale); - return TAOS_DN_OFF_LOCALE_NOT_MATCH; - } - if (0 != strncasecmp(clusterCfg->charset, tsCharset, strlen(tsCharset))) { - mError("\"charset\"[%s - %s] cfg parameters inconsistent.", clusterCfg->charset, tsCharset); - return TAOS_DN_OFF_CHARSET_NOT_MATCH; - } + // if (0 != strncasecmp(clusterCfg->locale, tsLocale, strlen(tsLocale))) { + // mError("\"locale\"[%s - %s] cfg parameters inconsistent", clusterCfg->locale, tsLocale); + // return TAOS_DN_OFF_LOCALE_NOT_MATCH; + // } + // if (0 != strncasecmp(clusterCfg->charset, tsCharset, strlen(tsCharset))) { + // mError("\"charset\"[%s - %s] cfg parameters inconsistent.", clusterCfg->charset, tsCharset); + // return TAOS_DN_OFF_CHARSET_NOT_MATCH; + // } if (clusterCfg->enableBalance != tsEnableBalance) { mError("\"balance\"[%d - %d] cfg parameters inconsistent", clusterCfg->enableBalance, tsEnableBalance); From 0cac7f96d0f022394aa79d55ec770aef74f9a96e Mon Sep 17 00:00:00 2001 From: liuyq-617 Date: Fri, 9 Apr 2021 18:37:40 +0800 Subject: [PATCH 131/178] [TD-3727]Suppressing errors in valgrind --- tests/pytest/crash_gen/valgrind_taos.supp | 85 +++++++++++++++++++++++ tests/pytest/handle_crash_gen_val_log.sh | 7 +- tests/pytest/handle_taosd_val_log.sh | 7 +- 3 files changed, 87 insertions(+), 12 deletions(-) diff --git a/tests/pytest/crash_gen/valgrind_taos.supp b/tests/pytest/crash_gen/valgrind_taos.supp index 123858b3db..5eb5403395 100644 --- a/tests/pytest/crash_gen/valgrind_taos.supp +++ b/tests/pytest/crash_gen/valgrind_taos.supp @@ -17247,3 +17247,88 @@ fun:_PyEval_EvalFrameDefault fun:_PyEval_EvalCodeWithName } +{ + + Memcheck:Leak + match-leak-kinds: definite + fun:malloc + obj:/usr/lib/python3/dist-packages/_cffi_backend.cpython-38-x86_64-linux-gnu.so + obj:/usr/lib/python3/dist-packages/_cffi_backend.cpython-38-x86_64-linux-gnu.so + fun:_PyEval_EvalFrameDefault + fun:_PyFunction_Vectorcall + fun:_PyEval_EvalFrameDefault + fun:_PyEval_EvalCodeWithName + fun:_PyEval_EvalCodeWithName + obj:/usr/bin/python3.8 + obj:/usr/bin/python3.8 + fun:PyVectorcall_Call + fun:_PyEval_EvalFrameDefault +} +{ + + Memcheck:Leak + match-leak-kinds: definite + fun:malloc + obj:/usr/lib/python3/dist-packages/_cffi_backend.cpython-38-x86_64-linux-gnu.so + obj:/usr/lib/python3/dist-packages/_cffi_backend.cpython-38-x86_64-linux-gnu.so + obj:/usr/lib/python3/dist-packages/_cffi_backend.cpython-38-x86_64-linux-gnu.so + obj:/usr/bin/python3.8 + fun:_PyEval_EvalFrameDefault + fun:_PyFunction_Vectorcall + fun:_PyEval_EvalFrameDefault + obj:/usr/bin/python3.8 + fun:_PyEval_EvalFrameDefault + obj:/usr/bin/python3.8 + fun:_PyEval_EvalFrameDefault +} +{ + + Memcheck:Leak + match-leak-kinds: definite + fun:malloc + obj:/usr/lib/python3/dist-packages/_cffi_backend.cpython-38-x86_64-linux-gnu.so + obj:/usr/lib/python3/dist-packages/_cffi_backend.cpython-38-x86_64-linux-gnu.so + fun:_PyEval_EvalFrameDefault + fun:_PyFunction_Vectorcall + fun:_PyEval_EvalFrameDefault + fun:_PyEval_EvalCodeWithName + fun:PyEval_EvalCode + obj:/usr/bin/python3.8 + obj:/usr/bin/python3.8 + fun:PyVectorcall_Call + fun:_PyEval_EvalFrameDefault +} +{ + + Memcheck:Leak + match-leak-kinds: definite + fun:malloc + obj:/usr/lib/python3/dist-packages/_cffi_backend.cpython-38-x86_64-linux-gnu.so + obj:/usr/lib/python3/dist-packages/_cffi_backend.cpython-38-x86_64-linux-gnu.so + obj:/usr/bin/python3.8 + fun:_PyEval_EvalFrameDefault + fun:_PyFunction_Vectorcall + fun:_PyEval_EvalFrameDefault + obj:/usr/bin/python3.8 + fun:_PyEval_EvalFrameDefault + obj:/usr/bin/python3.8 + fun:_PyEval_EvalFrameDefault + fun:_PyEval_EvalCodeWithName +} +{ + + Memcheck:Leak + match-leak-kinds: definite + fun:malloc + obj:/usr/lib/python3/dist-packages/_cffi_backend.cpython-38-x86_64-linux-gnu.so + obj:/usr/bin/python3.8 + obj:/usr/bin/python3.8 + obj:/usr/bin/python3.8 + fun:PyObject_CallMethod + fun:PyInit__openssl + fun:_PyImport_LoadDynamicModuleWithSpec + obj:/usr/bin/python3.8 + obj:/usr/bin/python3.8 + fun:PyVectorcall_Call + fun:_PyEval_EvalFrameDefault +} diff --git a/tests/pytest/handle_crash_gen_val_log.sh b/tests/pytest/handle_crash_gen_val_log.sh index 55c10639d7..01cc62aaf8 100755 --- a/tests/pytest/handle_crash_gen_val_log.sh +++ b/tests/pytest/handle_crash_gen_val_log.sh @@ -36,16 +36,11 @@ for defiMemError in `grep 'definitely lost:' crash_gen-definitely-lost-out.log | do defiMemError=(${defiMemError//,/}) if [ -n "$defiMemError" ]; then - if [ "$defiMemError" -gt 0 -a "$defiMemError" -lt 1013 ]; then + if [ "$defiMemError" -gt 0 ]; then cat valgrind.err echo -e "${RED} ## Memory errors number valgrind reports \ Definitely lost is $defiMemError. More than our threshold! ## ${NC}" exit 8 - elif [ "$defiMemError" -gt 1013 ];then #add for azure - cat valgrind.err - echo -e "${RED} ## Memory errors number valgrind reports \ - Definitely lost is $defiMemError. More than our threshold! ## ${NC}" - exit 8 fi fi done diff --git a/tests/pytest/handle_taosd_val_log.sh b/tests/pytest/handle_taosd_val_log.sh index 3161b5ff89..829bc68225 100755 --- a/tests/pytest/handle_taosd_val_log.sh +++ b/tests/pytest/handle_taosd_val_log.sh @@ -53,16 +53,11 @@ for defiMemError in `grep 'definitely lost:' taosd-definitely-lost-out.log | awk do defiMemError=(${defiMemError//,/}) if [ -n "$defiMemError" ]; then - if [ "$defiMemError" -gt 0 -a "$defiMemError" -lt 1013 ]; then + if [ "$defiMemError" -gt 0 ]; then cat $VALGRIND_ERR echo -e "${RED} ## Memory errors number valgrind reports \ Definitely lost is $defiMemError. More than our threshold! ## ${NC}" exit 8 - elif [ "$defiMemError" -gt 1013 ];then #add for azure - cat $VALGRIND_ERR - echo -e "${RED} ## Memory errors number valgrind reports \ - Definitely lost is $defiMemError. More than our threshold! ## ${NC}" - exit 8 fi fi done From f4a0c1d97f6418ea01b0cd1630d699b06c2c4255 Mon Sep 17 00:00:00 2001 From: liuyq-617 Date: Fri, 9 Apr 2021 18:37:40 +0800 Subject: [PATCH 132/178] [TD-3727]Suppressing errors in valgrind --- tests/pytest/crash_gen/valgrind_taos.supp | 85 +++++++++++++++++++++++ tests/pytest/handle_crash_gen_val_log.sh | 7 +- tests/pytest/handle_taosd_val_log.sh | 7 +- 3 files changed, 87 insertions(+), 12 deletions(-) diff --git a/tests/pytest/crash_gen/valgrind_taos.supp b/tests/pytest/crash_gen/valgrind_taos.supp index 123858b3db..5eb5403395 100644 --- a/tests/pytest/crash_gen/valgrind_taos.supp +++ b/tests/pytest/crash_gen/valgrind_taos.supp @@ -17247,3 +17247,88 @@ fun:_PyEval_EvalFrameDefault fun:_PyEval_EvalCodeWithName } +{ + + Memcheck:Leak + match-leak-kinds: definite + fun:malloc + obj:/usr/lib/python3/dist-packages/_cffi_backend.cpython-38-x86_64-linux-gnu.so + obj:/usr/lib/python3/dist-packages/_cffi_backend.cpython-38-x86_64-linux-gnu.so + fun:_PyEval_EvalFrameDefault + fun:_PyFunction_Vectorcall + fun:_PyEval_EvalFrameDefault + fun:_PyEval_EvalCodeWithName + fun:_PyEval_EvalCodeWithName + obj:/usr/bin/python3.8 + obj:/usr/bin/python3.8 + fun:PyVectorcall_Call + fun:_PyEval_EvalFrameDefault +} +{ + + Memcheck:Leak + match-leak-kinds: definite + fun:malloc + obj:/usr/lib/python3/dist-packages/_cffi_backend.cpython-38-x86_64-linux-gnu.so + obj:/usr/lib/python3/dist-packages/_cffi_backend.cpython-38-x86_64-linux-gnu.so + obj:/usr/lib/python3/dist-packages/_cffi_backend.cpython-38-x86_64-linux-gnu.so + obj:/usr/bin/python3.8 + fun:_PyEval_EvalFrameDefault + fun:_PyFunction_Vectorcall + fun:_PyEval_EvalFrameDefault + obj:/usr/bin/python3.8 + fun:_PyEval_EvalFrameDefault + obj:/usr/bin/python3.8 + fun:_PyEval_EvalFrameDefault +} +{ + + Memcheck:Leak + match-leak-kinds: definite + fun:malloc + obj:/usr/lib/python3/dist-packages/_cffi_backend.cpython-38-x86_64-linux-gnu.so + obj:/usr/lib/python3/dist-packages/_cffi_backend.cpython-38-x86_64-linux-gnu.so + fun:_PyEval_EvalFrameDefault + fun:_PyFunction_Vectorcall + fun:_PyEval_EvalFrameDefault + fun:_PyEval_EvalCodeWithName + fun:PyEval_EvalCode + obj:/usr/bin/python3.8 + obj:/usr/bin/python3.8 + fun:PyVectorcall_Call + fun:_PyEval_EvalFrameDefault +} +{ + + Memcheck:Leak + match-leak-kinds: definite + fun:malloc + obj:/usr/lib/python3/dist-packages/_cffi_backend.cpython-38-x86_64-linux-gnu.so + obj:/usr/lib/python3/dist-packages/_cffi_backend.cpython-38-x86_64-linux-gnu.so + obj:/usr/bin/python3.8 + fun:_PyEval_EvalFrameDefault + fun:_PyFunction_Vectorcall + fun:_PyEval_EvalFrameDefault + obj:/usr/bin/python3.8 + fun:_PyEval_EvalFrameDefault + obj:/usr/bin/python3.8 + fun:_PyEval_EvalFrameDefault + fun:_PyEval_EvalCodeWithName +} +{ + + Memcheck:Leak + match-leak-kinds: definite + fun:malloc + obj:/usr/lib/python3/dist-packages/_cffi_backend.cpython-38-x86_64-linux-gnu.so + obj:/usr/bin/python3.8 + obj:/usr/bin/python3.8 + obj:/usr/bin/python3.8 + fun:PyObject_CallMethod + fun:PyInit__openssl + fun:_PyImport_LoadDynamicModuleWithSpec + obj:/usr/bin/python3.8 + obj:/usr/bin/python3.8 + fun:PyVectorcall_Call + fun:_PyEval_EvalFrameDefault +} diff --git a/tests/pytest/handle_crash_gen_val_log.sh b/tests/pytest/handle_crash_gen_val_log.sh index 55c10639d7..01cc62aaf8 100755 --- a/tests/pytest/handle_crash_gen_val_log.sh +++ b/tests/pytest/handle_crash_gen_val_log.sh @@ -36,16 +36,11 @@ for defiMemError in `grep 'definitely lost:' crash_gen-definitely-lost-out.log | do defiMemError=(${defiMemError//,/}) if [ -n "$defiMemError" ]; then - if [ "$defiMemError" -gt 0 -a "$defiMemError" -lt 1013 ]; then + if [ "$defiMemError" -gt 0 ]; then cat valgrind.err echo -e "${RED} ## Memory errors number valgrind reports \ Definitely lost is $defiMemError. More than our threshold! ## ${NC}" exit 8 - elif [ "$defiMemError" -gt 1013 ];then #add for azure - cat valgrind.err - echo -e "${RED} ## Memory errors number valgrind reports \ - Definitely lost is $defiMemError. More than our threshold! ## ${NC}" - exit 8 fi fi done diff --git a/tests/pytest/handle_taosd_val_log.sh b/tests/pytest/handle_taosd_val_log.sh index 3161b5ff89..829bc68225 100755 --- a/tests/pytest/handle_taosd_val_log.sh +++ b/tests/pytest/handle_taosd_val_log.sh @@ -53,16 +53,11 @@ for defiMemError in `grep 'definitely lost:' taosd-definitely-lost-out.log | awk do defiMemError=(${defiMemError//,/}) if [ -n "$defiMemError" ]; then - if [ "$defiMemError" -gt 0 -a "$defiMemError" -lt 1013 ]; then + if [ "$defiMemError" -gt 0 ]; then cat $VALGRIND_ERR echo -e "${RED} ## Memory errors number valgrind reports \ Definitely lost is $defiMemError. More than our threshold! ## ${NC}" exit 8 - elif [ "$defiMemError" -gt 1013 ];then #add for azure - cat $VALGRIND_ERR - echo -e "${RED} ## Memory errors number valgrind reports \ - Definitely lost is $defiMemError. More than our threshold! ## ${NC}" - exit 8 fi fi done From 898cc038e754dab7df191f7d5255f7a77b242413 Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Fri, 9 Apr 2021 18:39:48 +0800 Subject: [PATCH 133/178] [TD-3681]: fix syncdb precondition checkings --- src/mnode/src/mnodeVgroup.c | 1 + src/sync/src/syncMain.c | 10 ++++------ src/vnode/src/vnodeMain.c | 7 ++++++- 3 files changed, 11 insertions(+), 7 deletions(-) diff --git a/src/mnode/src/mnodeVgroup.c b/src/mnode/src/mnodeVgroup.c index 7eb3122d83..7222c8d1a0 100644 --- a/src/mnode/src/mnodeVgroup.c +++ b/src/mnode/src/mnodeVgroup.c @@ -994,6 +994,7 @@ void mnodeSendSyncVgroupMsg(SVgObj *pVgroup) { mDebug("vgId:%d, send sync all vnodes msg, numOfVnodes:%d db:%s", pVgroup->vgId, pVgroup->numOfVnodes, pVgroup->dbName); for (int32_t i = 0; i < pVgroup->numOfVnodes; ++i) { + if (pVgroup->vnodeGid[i].role != TAOS_SYNC_ROLE_SLAVE) continue; SRpcEpSet epSet = mnodeGetEpSetFromIp(pVgroup->vnodeGid[i].pDnode->dnodeEp); mDebug("vgId:%d, index:%d, send sync vnode msg to dnode %s", pVgroup->vgId, i, pVgroup->vnodeGid[i].pDnode->dnodeEp); diff --git a/src/sync/src/syncMain.c b/src/sync/src/syncMain.c index 29ced21291..2d33f68af0 100644 --- a/src/sync/src/syncMain.c +++ b/src/sync/src/syncMain.c @@ -409,23 +409,22 @@ void syncConfirmForward(int64_t rid, uint64_t version, int32_t code, bool force) syncReleaseNode(pNode); } -#if 1 void syncRecover(int64_t rid) { SSyncPeer *pPeer; SSyncNode *pNode = syncAcquireNode(rid); if (pNode == NULL) return; - // to do: add a few lines to check if recover is OK - // if take this node to unsync state, the whole system may not work - nodeRole = TAOS_SYNC_ROLE_UNSYNCED; (*pNode->notifyRoleFp)(pNode->vgId, nodeRole); - nodeVersion = 0; pthread_mutex_lock(&pNode->mutex); + nodeVersion = 0; + for (int32_t i = 0; i < pNode->replica; ++i) { + if (i == pNode->selfIndex) continue; + pPeer = pNode->peerInfo[i]; if (pPeer->peerFd >= 0) { syncRestartConnection(pPeer); @@ -436,7 +435,6 @@ void syncRecover(int64_t rid) { syncReleaseNode(pNode); } -#endif int32_t syncGetNodesRole(int64_t rid, SNodesRole *pNodesRole) { SSyncNode *pNode = syncAcquireNode(rid); diff --git a/src/vnode/src/vnodeMain.c b/src/vnode/src/vnodeMain.c index 69d94aff87..ee8ed9e0fa 100644 --- a/src/vnode/src/vnodeMain.c +++ b/src/vnode/src/vnodeMain.c @@ -99,8 +99,13 @@ int32_t vnodeSync(int32_t vgId) { return TSDB_CODE_VND_INVALID_VGROUP_ID; } - if (pVnode->role != TAOS_SYNC_ROLE_MASTER) { + if (pVnode->role == TAOS_SYNC_ROLE_SLAVE) { vInfo("vgId:%d, vnode will sync, refCount:%d pVnode:%p", pVnode->vgId, pVnode->refCount, pVnode); + + pVnode->version = 0; + pVnode->fversion = 0; + walResetVersion(pVnode->wal, pVnode->fversion); + syncRecover(pVnode->sync); } From defd55e58777b601f8f23ac00fb9c7a1f4af6858 Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Fri, 9 Apr 2021 18:50:29 +0800 Subject: [PATCH 134/178] [TD-3705]: taosdemo columns or tags count overflow. (#5745) * [TD-3705]: taosdemo columns or tags count overflow. * [TD-3705]: taosdemo columns or tags count overflow. compare columns+tags with max columns count. Co-authored-by: Shuduo Sang --- src/kit/taosdemo/taosdemo.c | 124 +++++++++++++++++++++++------------- 1 file changed, 78 insertions(+), 46 deletions(-) diff --git a/src/kit/taosdemo/taosdemo.c b/src/kit/taosdemo/taosdemo.c index 65ba7d50fe..49a3297bf6 100644 --- a/src/kit/taosdemo/taosdemo.c +++ b/src/kit/taosdemo/taosdemo.c @@ -2364,7 +2364,8 @@ static int createSuperTable(TAOS * taos, char* dbName, lenOfOneRow += 21; } else { taos_close(taos); - printf("config error data type : %s\n", dataType); + errorPrint("%s() LN%d, config error data type : %s\n", + __func__, __LINE__, dataType); exit(-1); } } @@ -2382,7 +2383,8 @@ static int createSuperTable(TAOS * taos, char* dbName, } snprintf(superTbl->colsOfCreateChildTable, len+20, "(ts timestamp%s)", cols); - verbosePrint("%s() LN%d: %s\n", __func__, __LINE__, superTbl->colsOfCreateChildTable); + verbosePrint("%s() LN%d: %s\n", + __func__, __LINE__, superTbl->colsOfCreateChildTable); if (superTbl->tagCount == 0) { errorPrint("%s() LN%d, super table tag count is %d\n", @@ -2437,7 +2439,8 @@ static int createSuperTable(TAOS * taos, char* dbName, lenOfTagOfOneRow += superTbl->tags[tagIndex].dataLen + 42; } else { taos_close(taos); - printf("config error tag type : %s\n", dataType); + errorPrint("%s() LN%d, config error tag type : %s\n", + __func__, __LINE__, dataType); exit(-1); } } @@ -2732,7 +2735,8 @@ static int startMultiThreadCreateChildTable( db_name, g_Dbs.port); if (t_info->taos == NULL) { - errorPrint( "Failed to connect to TDengine, reason:%s\n", taos_errstr(NULL)); + errorPrint( "%s() LN%d, Failed to connect to TDengine, reason:%s\n", + __func__, __LINE__, taos_errstr(NULL)); free(pids); free(infos); return -1; @@ -2793,35 +2797,35 @@ static void createChildTables() { } } } else { - // normal table - len = snprintf(tblColsBuf, MAX_SQL_SIZE, "(TS TIMESTAMP"); - int j = 0; - while(g_args.datatype[j]) { - if ((strncasecmp(g_args.datatype[j], "BINARY", strlen("BINARY")) == 0) - || (strncasecmp(g_args.datatype[j], - "NCHAR", strlen("NCHAR")) == 0)) { - snprintf(tblColsBuf + len, MAX_SQL_SIZE - len, - ", COL%d %s(60)", j, g_args.datatype[j]); - } else { - snprintf(tblColsBuf + len, MAX_SQL_SIZE - len, - ", COL%d %s", j, g_args.datatype[j]); - } - len = strlen(tblColsBuf); - j++; - } + // normal table + len = snprintf(tblColsBuf, MAX_SQL_SIZE, "(TS TIMESTAMP"); + int j = 0; + while(g_args.datatype[j]) { + if ((strncasecmp(g_args.datatype[j], "BINARY", strlen("BINARY")) == 0) + || (strncasecmp(g_args.datatype[j], + "NCHAR", strlen("NCHAR")) == 0)) { + snprintf(tblColsBuf + len, MAX_SQL_SIZE - len, + ", COL%d %s(60)", j, g_args.datatype[j]); + } else { + snprintf(tblColsBuf + len, MAX_SQL_SIZE - len, + ", COL%d %s", j, g_args.datatype[j]); + } + len = strlen(tblColsBuf); + j++; + } - snprintf(tblColsBuf + len, MAX_SQL_SIZE - len, ")"); + snprintf(tblColsBuf + len, MAX_SQL_SIZE - len, ")"); - verbosePrint("%s() LN%d: dbName: %s num of tb: %d schema: %s\n", - __func__, __LINE__, - g_Dbs.db[i].dbName, g_args.num_of_tables, tblColsBuf); - startMultiThreadCreateChildTable( - tblColsBuf, - g_Dbs.threadCountByCreateTbl, - 0, - g_args.num_of_tables, - g_Dbs.db[i].dbName, - NULL); + verbosePrint("%s() LN%d: dbName: %s num of tb: %d schema: %s\n", + __func__, __LINE__, + g_Dbs.db[i].dbName, g_args.num_of_tables, tblColsBuf); + startMultiThreadCreateChildTable( + tblColsBuf, + g_Dbs.threadCountByCreateTbl, + 0, + g_args.num_of_tables, + g_Dbs.db[i].dbName, + NULL); } } } @@ -3035,6 +3039,13 @@ static bool getColumnAndTagTypeFromInsertJsonFile( index++; } } + + if (index > MAX_COLUMN_COUNT) { + errorPrint("%s() LN%d, failed to read json, column size overflow, max column size is %d\n", + __func__, __LINE__, MAX_COLUMN_COUNT); + goto PARSE_OVER; + } + superTbls->columnCount = index; count = 1; @@ -3099,8 +3110,20 @@ static bool getColumnAndTagTypeFromInsertJsonFile( index++; } } + + if (index > MAX_TAG_COUNT) { + errorPrint("%s() LN%d, failed to read json, tags size overflow, max tag size is %d\n", + __func__, __LINE__, MAX_TAG_COUNT); + goto PARSE_OVER; + } + superTbls->tagCount = index; + if ((superTbls->columnCount + superTbls->tagCount) > MAX_COLUMN_COUNT) { + errorPrint("%s() LN%d, columns + tags is more than max columns count: %d\n", + __func__, __LINE__, MAX_TAG_COUNT); + goto PARSE_OVER; + } ret = true; PARSE_OVER: @@ -4324,13 +4347,20 @@ static int generateRowData(char* recBuf, int64_t timestamp, SSuperTable* stbInfo "%f, ", rand_double()); } else if (0 == strncasecmp(stbInfo->columns[i].dataType, "smallint", 8)) { - dataLen += snprintf(pstr + dataLen, maxLen - dataLen, "%d, ", rand_smallint()); - } else if (0 == strncasecmp(stbInfo->columns[i].dataType, "tinyint", 7)) { - dataLen += snprintf(pstr + dataLen, maxLen - dataLen, "%d, ", rand_tinyint()); - } else if (0 == strncasecmp(stbInfo->columns[i].dataType, "bool", 4)) { - dataLen += snprintf(pstr + dataLen, maxLen - dataLen, "%d, ", rand_bool()); - } else if (0 == strncasecmp(stbInfo->columns[i].dataType, "timestamp", 9)) { - dataLen += snprintf(pstr + dataLen, maxLen - dataLen, "%"PRId64", ", rand_bigint()); + dataLen += snprintf(pstr + dataLen, maxLen - dataLen, + "%d, ", rand_smallint()); + } else if (0 == strncasecmp(stbInfo->columns[i].dataType, + "tinyint", strlen("tinyint"))) { + dataLen += snprintf(pstr + dataLen, maxLen - dataLen, + "%d, ", rand_tinyint()); + } else if (0 == strncasecmp(stbInfo->columns[i].dataType, + "bool", strlen("bool"))) { + dataLen += snprintf(pstr + dataLen, maxLen - dataLen, + "%d, ", rand_bool()); + } else if (0 == strncasecmp(stbInfo->columns[i].dataType, + "timestamp", strlen("timestamp"))) { + dataLen += snprintf(pstr + dataLen, maxLen - dataLen, + "%"PRId64", ", rand_bigint()); } else { errorPrint( "No support data type: %s\n", stbInfo->columns[i].dataType); return -1; @@ -4422,7 +4452,8 @@ static int prepareSampleDataForSTable(SSuperTable *superTblInfo) { int ret = readSampleFromCsvFileToMem(superTblInfo); if (0 != ret) { - errorPrint("%s() LN%d, read sample from csv file failed.\n", __func__, __LINE__); + errorPrint("%s() LN%d, read sample from csv file failed.\n", + __func__, __LINE__); tmfree(sampleDataBuf); superTblInfo->sampleDataBuf = NULL; return -1; @@ -4444,7 +4475,8 @@ static int execInsert(threadInfo *pThreadInfo, char *buffer, int k) } else { if (0 != postProceSql(g_Dbs.host, g_Dbs.port, buffer)) { affectedRows = -1; - printf("========restful return fail, threadID[%d]\n", pThreadInfo->threadID); + printf("========restful return fail, threadID[%d]\n", + pThreadInfo->threadID); } else { affectedRows = k; } @@ -4601,7 +4633,8 @@ static int generateSQLHead(char *tableName, int32_t tableSeq, tableSeq % superTblInfo->tagSampleCount); } if (NULL == tagsValBuf) { - errorPrint("%s() LN%d, tag buf failed to allocate memory\n", __func__, __LINE__); + errorPrint("%s() LN%d, tag buf failed to allocate memory\n", + __func__, __LINE__); return -1; } @@ -4698,11 +4731,11 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) { // TODO: prompt tbl count multple interlace rows and batch // - char* buffer = calloc(superTblInfo?superTblInfo->maxSqlLen:g_args.max_sql_len, 1); + int maxSqlLen = superTblInfo?superTblInfo->maxSqlLen:g_args.max_sql_len; + char* buffer = calloc(maxSqlLen, 1); if (NULL == buffer) { - errorPrint( "Failed to alloc %d Bytes, reason:%s\n", - superTblInfo?superTblInfo->maxSqlLen:g_args.max_sql_len, - strerror(errno)); + errorPrint( "%s() LN%d, Failed to alloc %d Bytes, reason:%s\n", + __func__, __LINE__, maxSqlLen, strerror(errno)); return NULL; } @@ -4751,7 +4784,6 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) { bool flagSleep = true; int sleepTimeTotal = 0; - int maxSqlLen = superTblInfo?superTblInfo->maxSqlLen:g_args.max_sql_len; int remainderBufLen; while(pThreadInfo->totalInsertRows < pThreadInfo->ntables * insertRows) { From e8a6b6a5a1e4806ce29ca9f80fe7059eb9ab0730 Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Fri, 9 Apr 2021 20:00:59 +0800 Subject: [PATCH 135/178] Hotfix/sangshuduo/td 3722 taosdemo performance regression (#5750) * TD-3675 * fix mem leak issue * fix crash issue * Hotfix/sangshuduo/td 3607 taosdemo buffer overflow (#5723) * [TD-3607] : fix taosdemo buffer overflow. * [TD-3607] : taosdemo buffer overflow. add tmp buffer. * [TD-3607] : taosdemo buffer overflow. fix data generation. * [TD-3607] : taosdemo buffer overflow. fix normal table writting. * [TD-3607] : taosdemo buffer overflow. remove tail spaces. * [TD-3607] : taosdemo buffer overflow. fix taosdemo alter table test case. * [TD-3607] : taosdemo buffer overflow. fix taosdemo alter table case. * [TD-3607] : taosdemo buffer overflow. adjust limit offset count warning. * [TD-3607] : taosdemo buffer overflow. add more logic for child tables exist. * [TD-3607] : taosdemo buffer overflow. create database if database be dropped only. * [TD-3607] : fix taosdemo buffer overflow. adjust limit and offset test cases. * [TD-3607] : taosdemo buffer overflow. adjust sample data test case. * test * [TD-3677]: test pr message 1 * [TD-3671]change target branch * [TD-3677]: test pr message 2 * [TD-3677]: test pr message 3 * Hotfix/sangshuduo/td 3197 fix taosdemo coverity scan (#5688) * [TD-3197] : fix taosdemo coverity scan issues. * [TD-3197] : fix taosdemo coverity scan issue. fix subscribeTest pids uninitialized. * [TD-3197] : fix taosdemo coverity scan issues. * [TD-3197] : fix coverity scan issues. check super tbl info pointer. * [TD-3197] : fix coverity scan issues. move sub tbl query thread join into loop * [TD-3197] : fix coverity scan issues. remove unused variable * [TD-3197] : fix coverity scan issues. use more secure random library * [TD-3197] : fix coverity scan issues. use strncpy for more safe * [TD-3197] : fix taosdemo coverity scan issue. replace arc4random with rand(). * [TD-3197] : fix coverity scan issues. check stb info pointer for start time * [TD-3197] : fix coverity scan issues. fix strcpy vulnerability * [TD-3197] : fix taosdemo coverity scan issue. modify taosdemoTest2. try to check database continously. * [TD-3197] : taosdemo coverity scan issues. * [TD-3197] : fix memory leak when parsing arguments. * [TD-3197] : fix cmake strip arguments. * [TD-3197] : taosdemo coverity scan. fix cmake string manipulation. Co-authored-by: Shuduo Sang * remove useless file * fix changing target branch * fix * fix * [TD-3607]: taosdemo limit and offset. if limit+offset > count * Hotfix/sangshuduo/td 3607 taosdemo buffer overflow (#5706) * [TD-3607] : fix taosdemo buffer overflow. * [TD-3607] : taosdemo buffer overflow. add tmp buffer. * [TD-3607] : taosdemo buffer overflow. fix data generation. * [TD-3607] : taosdemo buffer overflow. fix normal table writting. * [TD-3607] : taosdemo buffer overflow. remove tail spaces. * [TD-3607] : taosdemo buffer overflow. fix taosdemo alter table test case. * [TD-3607] : taosdemo buffer overflow. fix taosdemo alter table case. * [TD-3607] : taosdemo buffer overflow. adjust limit offset count warning. * [TD-3607] : taosdemo buffer overflow. add more logic for child tables exist. * [TD-3607] : taosdemo buffer overflow. create database if database be dropped only. * [TD-3607] : fix taosdemo buffer overflow. adjust limit and offset test cases. * [TD-3607] : taosdemo buffer overflow. adjust sample data test case. * [TD-3607]: taosdemo limit and offset. if limit+offset > count Co-authored-by: Shuduo Sang * [TD-3607]: taosdemo limit and offset. if child tbl not exist, dont take limit and offset value. * Hotfix/sangshuduo/td 3607 taosdemo buffer overflow (#5713) * [TD-3607] : fix taosdemo buffer overflow. * [TD-3607] : taosdemo buffer overflow. add tmp buffer. * [TD-3607] : taosdemo buffer overflow. fix data generation. * [TD-3607] : taosdemo buffer overflow. fix normal table writting. * [TD-3607] : taosdemo buffer overflow. remove tail spaces. * [TD-3607] : taosdemo buffer overflow. fix taosdemo alter table test case. * [TD-3607] : taosdemo buffer overflow. fix taosdemo alter table case. * [TD-3607] : taosdemo buffer overflow. adjust limit offset count warning. * [TD-3607] : taosdemo buffer overflow. add more logic for child tables exist. * [TD-3607] : taosdemo buffer overflow. create database if database be dropped only. * [TD-3607] : fix taosdemo buffer overflow. adjust limit and offset test cases. * [TD-3607] : taosdemo buffer overflow. adjust sample data test case. * [TD-3607]: taosdemo limit and offset. if limit+offset > count * [TD-3607]: taosdemo limit and offset. if child tbl not exist, dont take limit and offset value. Co-authored-by: Shuduo Sang * fix taosdemo limit invalid warning condition. * [TD-3683]: reduce buffer size for more stable table creation. (#5719) Co-authored-by: Shuduo Sang Co-authored-by: Shuduo Sang Co-authored-by: huili <52318143+plum-lihui@users.noreply.github.com> Co-authored-by: liuyq-617 Co-authored-by: plum-lihui Co-authored-by: Elias Soong Co-authored-by: Shengliang Guan * TD-3707 * Feature/sangshuduo/td 3408 taosdemo async query (#5731) * test * [TD-3677]: test pr message 1 * [TD-3671]change target branch * [TD-3677]: test pr message 2 * [TD-3677]: test pr message 3 * Hotfix/sangshuduo/td 3197 fix taosdemo coverity scan (#5688) * [TD-3197] : fix taosdemo coverity scan issues. * [TD-3197] : fix taosdemo coverity scan issue. fix subscribeTest pids uninitialized. * [TD-3197] : fix taosdemo coverity scan issues. * [TD-3197] : fix coverity scan issues. check super tbl info pointer. * [TD-3197] : fix coverity scan issues. move sub tbl query thread join into loop * [TD-3197] : fix coverity scan issues. remove unused variable * [TD-3197] : fix coverity scan issues. use more secure random library * [TD-3197] : fix coverity scan issues. use strncpy for more safe * [TD-3197] : fix taosdemo coverity scan issue. replace arc4random with rand(). * [TD-3197] : fix coverity scan issues. check stb info pointer for start time * [TD-3197] : fix coverity scan issues. fix strcpy vulnerability * [TD-3197] : fix taosdemo coverity scan issue. modify taosdemoTest2. try to check database continously. * [TD-3197] : taosdemo coverity scan issues. * [TD-3197] : fix memory leak when parsing arguments. * [TD-3197] : fix cmake strip arguments. * [TD-3197] : taosdemo coverity scan. fix cmake string manipulation. Co-authored-by: Shuduo Sang * remove useless file * fix changing target branch * fix * fix * Hotfix/sangshuduo/td 3607 taosdemo buffer overflow (#5706) * [TD-3607] : fix taosdemo buffer overflow. * [TD-3607] : taosdemo buffer overflow. add tmp buffer. * [TD-3607] : taosdemo buffer overflow. fix data generation. * [TD-3607] : taosdemo buffer overflow. fix normal table writting. * [TD-3607] : taosdemo buffer overflow. remove tail spaces. * [TD-3607] : taosdemo buffer overflow. fix taosdemo alter table test case. * [TD-3607] : taosdemo buffer overflow. fix taosdemo alter table case. * [TD-3607] : taosdemo buffer overflow. adjust limit offset count warning. * [TD-3607] : taosdemo buffer overflow. add more logic for child tables exist. * [TD-3607] : taosdemo buffer overflow. create database if database be dropped only. * [TD-3607] : fix taosdemo buffer overflow. adjust limit and offset test cases. * [TD-3607] : taosdemo buffer overflow. adjust sample data test case. * [TD-3607]: taosdemo limit and offset. if limit+offset > count Co-authored-by: Shuduo Sang * Hotfix/sangshuduo/td 3607 taosdemo buffer overflow (#5713) * [TD-3607] : fix taosdemo buffer overflow. * [TD-3607] : taosdemo buffer overflow. add tmp buffer. * [TD-3607] : taosdemo buffer overflow. fix data generation. * [TD-3607] : taosdemo buffer overflow. fix normal table writting. * [TD-3607] : taosdemo buffer overflow. remove tail spaces. * [TD-3607] : taosdemo buffer overflow. fix taosdemo alter table test case. * [TD-3607] : taosdemo buffer overflow. fix taosdemo alter table case. * [TD-3607] : taosdemo buffer overflow. adjust limit offset count warning. * [TD-3607] : taosdemo buffer overflow. add more logic for child tables exist. * [TD-3607] : taosdemo buffer overflow. create database if database be dropped only. * [TD-3607] : fix taosdemo buffer overflow. adjust limit and offset test cases. * [TD-3607] : taosdemo buffer overflow. adjust sample data test case. * [TD-3607]: taosdemo limit and offset. if limit+offset > count * [TD-3607]: taosdemo limit and offset. if child tbl not exist, dont take limit and offset value. Co-authored-by: Shuduo Sang * [TD-3683]: reduce buffer size for more stable table creation. (#5719) Co-authored-by: Shuduo Sang * [TD-3408]: taosdemo support async query. * [TD-3408]: taosdemo support async query. refactor * [TD-3408]: taosdemo support async query. refactor 2 * [TD-3408]: taosdemo support async query. refactor 3 * [TD-3408]: taosdemo support async query. refactor 4 * [TD-3408]: taosdemo support specified sql more than one line. Co-authored-by: huili <52318143+plum-lihui@users.noreply.github.com> Co-authored-by: liuyq-617 Co-authored-by: plum-lihui Co-authored-by: Elias Soong Co-authored-by: Shuduo Sang Co-authored-by: Shengliang Guan * [TD-3722]: taosdemo performance regression due to always set random seed. Co-authored-by: dapan1121 <89396746@qq.com> Co-authored-by: Shuduo Sang Co-authored-by: huili <52318143+plum-lihui@users.noreply.github.com> Co-authored-by: liuyq-617 Co-authored-by: plum-lihui Co-authored-by: Elias Soong Co-authored-by: Shengliang Guan Co-authored-by: haojun Liao --- src/kit/taosdemo/taosdemo.c | 5 ----- 1 file changed, 5 deletions(-) diff --git a/src/kit/taosdemo/taosdemo.c b/src/kit/taosdemo/taosdemo.c index e804d93619..65ba7d50fe 100644 --- a/src/kit/taosdemo/taosdemo.c +++ b/src/kit/taosdemo/taosdemo.c @@ -507,11 +507,6 @@ static void resetAfterAnsiEscape(void) { static int taosRandom() { - struct timeval tv; - - gettimeofday(&tv, NULL); - srand(tv.tv_usec); - return rand(); } From 677c7dcd931c051c5ccc67ca3c48411c369374a6 Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Fri, 9 Apr 2021 21:22:04 +0800 Subject: [PATCH 136/178] [TD-3715]: taosdemo interlace rows must less than insert rows. (#5743) Co-authored-by: Shuduo Sang --- src/kit/taosdemo/taosdemo.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/src/kit/taosdemo/taosdemo.c b/src/kit/taosdemo/taosdemo.c index 49a3297bf6..c6d5933dc7 100644 --- a/src/kit/taosdemo/taosdemo.c +++ b/src/kit/taosdemo/taosdemo.c @@ -4718,8 +4718,13 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) { pThreadInfo->threadID, __func__, __LINE__); SSuperTable* superTblInfo = pThreadInfo->superTblInfo; + + int64_t insertRows = (superTblInfo)?superTblInfo->insertRows:g_args.num_of_DPT; int interlaceRows = superTblInfo?superTblInfo->interlaceRows:g_args.interlace_rows; + if (interlaceRows > insertRows) + interlaceRows = insertRows; + int insertMode; if (interlaceRows > 0) { @@ -4746,7 +4751,6 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) { int nTimeStampStep = superTblInfo?superTblInfo->timeStampStep:DEFAULT_TIMESTAMP_STEP; - int64_t insertRows = (superTblInfo)?superTblInfo->insertRows:g_args.num_of_DPT; int insert_interval = superTblInfo?superTblInfo->insertInterval:g_args.insert_interval; uint64_t st = 0; From 619a41ca2b89e49e6148d01c0f67228910c3a092 Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Fri, 9 Apr 2021 21:22:47 +0800 Subject: [PATCH 137/178] Hotfix/sangshuduo/td 3715 interlacerows morethan insertrows (#5744) * [TD-3652] add case for TD-3652 to resolve TD-3590 * Update fulltest.sh * [TD-3295] add case for TD-3295 * TD-3675 * Hotfix/sangshuduo/td 3607 for master (#5712) * test * [TD-3677]: test pr message 1 * [TD-3671]change target branch * [TD-3677]: test pr message 2 * [TD-3677]: test pr message 3 * Hotfix/sangshuduo/td 3197 fix taosdemo coverity scan (#5688) * [TD-3197] : fix taosdemo coverity scan issues. * [TD-3197] : fix taosdemo coverity scan issue. fix subscribeTest pids uninitialized. * [TD-3197] : fix taosdemo coverity scan issues. * [TD-3197] : fix coverity scan issues. check super tbl info pointer. * [TD-3197] : fix coverity scan issues. move sub tbl query thread join into loop * [TD-3197] : fix coverity scan issues. remove unused variable * [TD-3197] : fix coverity scan issues. use more secure random library * [TD-3197] : fix coverity scan issues. use strncpy for more safe * [TD-3197] : fix taosdemo coverity scan issue. replace arc4random with rand(). * [TD-3197] : fix coverity scan issues. check stb info pointer for start time * [TD-3197] : fix coverity scan issues. fix strcpy vulnerability * [TD-3197] : fix taosdemo coverity scan issue. modify taosdemoTest2. try to check database continously. * [TD-3197] : taosdemo coverity scan issues. * [TD-3197] : fix memory leak when parsing arguments. * [TD-3197] : fix cmake strip arguments. * [TD-3197] : taosdemo coverity scan. fix cmake string manipulation. Co-authored-by: Shuduo Sang * remove useless file * fix changing target branch * fix * fix * [TD-3607]: fix taosdemo limit and offset. if offset+limit > count. * [TD-3607]: taosdemo limit and offset. if child tbl not exist, dont take limit and offset value. Co-authored-by: huili <52318143+plum-lihui@users.noreply.github.com> Co-authored-by: liuyq-617 Co-authored-by: plum-lihui Co-authored-by: Elias Soong Co-authored-by: Shuduo Sang * fix mem leak issue * fix crash issue * [TD-3683]: reduce buffer size for more stable table creation. (#5720) Co-authored-by: Shuduo Sang * Hotfix/sangshuduo/td 3607 taosdemo buffer overflow (#5723) * [TD-3607] : fix taosdemo buffer overflow. * [TD-3607] : taosdemo buffer overflow. add tmp buffer. * [TD-3607] : taosdemo buffer overflow. fix data generation. * [TD-3607] : taosdemo buffer overflow. fix normal table writting. * [TD-3607] : taosdemo buffer overflow. remove tail spaces. * [TD-3607] : taosdemo buffer overflow. fix taosdemo alter table test case. * [TD-3607] : taosdemo buffer overflow. fix taosdemo alter table case. * [TD-3607] : taosdemo buffer overflow. adjust limit offset count warning. * [TD-3607] : taosdemo buffer overflow. add more logic for child tables exist. * [TD-3607] : taosdemo buffer overflow. create database if database be dropped only. * [TD-3607] : fix taosdemo buffer overflow. adjust limit and offset test cases. * [TD-3607] : taosdemo buffer overflow. adjust sample data test case. * test * [TD-3677]: test pr message 1 * [TD-3671]change target branch * [TD-3677]: test pr message 2 * [TD-3677]: test pr message 3 * Hotfix/sangshuduo/td 3197 fix taosdemo coverity scan (#5688) * [TD-3197] : fix taosdemo coverity scan issues. * [TD-3197] : fix taosdemo coverity scan issue. fix subscribeTest pids uninitialized. * [TD-3197] : fix taosdemo coverity scan issues. * [TD-3197] : fix coverity scan issues. check super tbl info pointer. * [TD-3197] : fix coverity scan issues. move sub tbl query thread join into loop * [TD-3197] : fix coverity scan issues. remove unused variable * [TD-3197] : fix coverity scan issues. use more secure random library * [TD-3197] : fix coverity scan issues. use strncpy for more safe * [TD-3197] : fix taosdemo coverity scan issue. replace arc4random with rand(). * [TD-3197] : fix coverity scan issues. check stb info pointer for start time * [TD-3197] : fix coverity scan issues. fix strcpy vulnerability * [TD-3197] : fix taosdemo coverity scan issue. modify taosdemoTest2. try to check database continously. * [TD-3197] : taosdemo coverity scan issues. * [TD-3197] : fix memory leak when parsing arguments. * [TD-3197] : fix cmake strip arguments. * [TD-3197] : taosdemo coverity scan. fix cmake string manipulation. Co-authored-by: Shuduo Sang * remove useless file * fix changing target branch * fix * fix * [TD-3607]: taosdemo limit and offset. if limit+offset > count * Hotfix/sangshuduo/td 3607 taosdemo buffer overflow (#5706) * [TD-3607] : fix taosdemo buffer overflow. * [TD-3607] : taosdemo buffer overflow. add tmp buffer. * [TD-3607] : taosdemo buffer overflow. fix data generation. * [TD-3607] : taosdemo buffer overflow. fix normal table writting. * [TD-3607] : taosdemo buffer overflow. remove tail spaces. * [TD-3607] : taosdemo buffer overflow. fix taosdemo alter table test case. * [TD-3607] : taosdemo buffer overflow. fix taosdemo alter table case. * [TD-3607] : taosdemo buffer overflow. adjust limit offset count warning. * [TD-3607] : taosdemo buffer overflow. add more logic for child tables exist. * [TD-3607] : taosdemo buffer overflow. create database if database be dropped only. * [TD-3607] : fix taosdemo buffer overflow. adjust limit and offset test cases. * [TD-3607] : taosdemo buffer overflow. adjust sample data test case. * [TD-3607]: taosdemo limit and offset. if limit+offset > count Co-authored-by: Shuduo Sang * [TD-3607]: taosdemo limit and offset. if child tbl not exist, dont take limit and offset value. * Hotfix/sangshuduo/td 3607 taosdemo buffer overflow (#5713) * [TD-3607] : fix taosdemo buffer overflow. * [TD-3607] : taosdemo buffer overflow. add tmp buffer. * [TD-3607] : taosdemo buffer overflow. fix data generation. * [TD-3607] : taosdemo buffer overflow. fix normal table writting. * [TD-3607] : taosdemo buffer overflow. remove tail spaces. * [TD-3607] : taosdemo buffer overflow. fix taosdemo alter table test case. * [TD-3607] : taosdemo buffer overflow. fix taosdemo alter table case. * [TD-3607] : taosdemo buffer overflow. adjust limit offset count warning. * [TD-3607] : taosdemo buffer overflow. add more logic for child tables exist. * [TD-3607] : taosdemo buffer overflow. create database if database be dropped only. * [TD-3607] : fix taosdemo buffer overflow. adjust limit and offset test cases. * [TD-3607] : taosdemo buffer overflow. adjust sample data test case. * [TD-3607]: taosdemo limit and offset. if limit+offset > count * [TD-3607]: taosdemo limit and offset. if child tbl not exist, dont take limit and offset value. Co-authored-by: Shuduo Sang * fix taosdemo limit invalid warning condition. * [TD-3683]: reduce buffer size for more stable table creation. (#5719) Co-authored-by: Shuduo Sang Co-authored-by: Shuduo Sang Co-authored-by: huili <52318143+plum-lihui@users.noreply.github.com> Co-authored-by: liuyq-617 Co-authored-by: plum-lihui Co-authored-by: Elias Soong Co-authored-by: Shengliang Guan * TD-3707 * Feature/sangshuduo/td 3408 taosdemo async query (#5731) * test * [TD-3677]: test pr message 1 * [TD-3671]change target branch * [TD-3677]: test pr message 2 * [TD-3677]: test pr message 3 * Hotfix/sangshuduo/td 3197 fix taosdemo coverity scan (#5688) * [TD-3197] : fix taosdemo coverity scan issues. * [TD-3197] : fix taosdemo coverity scan issue. fix subscribeTest pids uninitialized. * [TD-3197] : fix taosdemo coverity scan issues. * [TD-3197] : fix coverity scan issues. check super tbl info pointer. * [TD-3197] : fix coverity scan issues. move sub tbl query thread join into loop * [TD-3197] : fix coverity scan issues. remove unused variable * [TD-3197] : fix coverity scan issues. use more secure random library * [TD-3197] : fix coverity scan issues. use strncpy for more safe * [TD-3197] : fix taosdemo coverity scan issue. replace arc4random with rand(). * [TD-3197] : fix coverity scan issues. check stb info pointer for start time * [TD-3197] : fix coverity scan issues. fix strcpy vulnerability * [TD-3197] : fix taosdemo coverity scan issue. modify taosdemoTest2. try to check database continously. * [TD-3197] : taosdemo coverity scan issues. * [TD-3197] : fix memory leak when parsing arguments. * [TD-3197] : fix cmake strip arguments. * [TD-3197] : taosdemo coverity scan. fix cmake string manipulation. Co-authored-by: Shuduo Sang * remove useless file * fix changing target branch * fix * fix * Hotfix/sangshuduo/td 3607 taosdemo buffer overflow (#5706) * [TD-3607] : fix taosdemo buffer overflow. * [TD-3607] : taosdemo buffer overflow. add tmp buffer. * [TD-3607] : taosdemo buffer overflow. fix data generation. * [TD-3607] : taosdemo buffer overflow. fix normal table writting. * [TD-3607] : taosdemo buffer overflow. remove tail spaces. * [TD-3607] : taosdemo buffer overflow. fix taosdemo alter table test case. * [TD-3607] : taosdemo buffer overflow. fix taosdemo alter table case. * [TD-3607] : taosdemo buffer overflow. adjust limit offset count warning. * [TD-3607] : taosdemo buffer overflow. add more logic for child tables exist. * [TD-3607] : taosdemo buffer overflow. create database if database be dropped only. * [TD-3607] : fix taosdemo buffer overflow. adjust limit and offset test cases. * [TD-3607] : taosdemo buffer overflow. adjust sample data test case. * [TD-3607]: taosdemo limit and offset. if limit+offset > count Co-authored-by: Shuduo Sang * Hotfix/sangshuduo/td 3607 taosdemo buffer overflow (#5713) * [TD-3607] : fix taosdemo buffer overflow. * [TD-3607] : taosdemo buffer overflow. add tmp buffer. * [TD-3607] : taosdemo buffer overflow. fix data generation. * [TD-3607] : taosdemo buffer overflow. fix normal table writting. * [TD-3607] : taosdemo buffer overflow. remove tail spaces. * [TD-3607] : taosdemo buffer overflow. fix taosdemo alter table test case. * [TD-3607] : taosdemo buffer overflow. fix taosdemo alter table case. * [TD-3607] : taosdemo buffer overflow. adjust limit offset count warning. * [TD-3607] : taosdemo buffer overflow. add more logic for child tables exist. * [TD-3607] : taosdemo buffer overflow. create database if database be dropped only. * [TD-3607] : fix taosdemo buffer overflow. adjust limit and offset test cases. * [TD-3607] : taosdemo buffer overflow. adjust sample data test case. * [TD-3607]: taosdemo limit and offset. if limit+offset > count * [TD-3607]: taosdemo limit and offset. if child tbl not exist, dont take limit and offset value. Co-authored-by: Shuduo Sang * [TD-3683]: reduce buffer size for more stable table creation. (#5719) Co-authored-by: Shuduo Sang * [TD-3408]: taosdemo support async query. * [TD-3408]: taosdemo support async query. refactor * [TD-3408]: taosdemo support async query. refactor 2 * [TD-3408]: taosdemo support async query. refactor 3 * [TD-3408]: taosdemo support async query. refactor 4 * [TD-3408]: taosdemo support specified sql more than one line. Co-authored-by: huili <52318143+plum-lihui@users.noreply.github.com> Co-authored-by: liuyq-617 Co-authored-by: plum-lihui Co-authored-by: Elias Soong Co-authored-by: Shuduo Sang Co-authored-by: Shengliang Guan * [TD-3636]: fix taosdemo outorder range algorithm. (#5736) patch for master. Co-authored-by: Shuduo Sang * [TD-3715]: taosdemo interlace rows must less than insert rows. Co-authored-by: wu champion Co-authored-by: wu champion Co-authored-by: dapan1121 <89396746@qq.com> Co-authored-by: wu champion Co-authored-by: huili <52318143+plum-lihui@users.noreply.github.com> Co-authored-by: liuyq-617 Co-authored-by: plum-lihui Co-authored-by: Elias Soong Co-authored-by: Shuduo Sang Co-authored-by: Shengliang Guan Co-authored-by: haojun Liao --- src/kit/taosdemo/taosdemo.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/src/kit/taosdemo/taosdemo.c b/src/kit/taosdemo/taosdemo.c index 65ba7d50fe..56aff9de6e 100644 --- a/src/kit/taosdemo/taosdemo.c +++ b/src/kit/taosdemo/taosdemo.c @@ -4685,8 +4685,13 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) { pThreadInfo->threadID, __func__, __LINE__); SSuperTable* superTblInfo = pThreadInfo->superTblInfo; + + int64_t insertRows = (superTblInfo)?superTblInfo->insertRows:g_args.num_of_DPT; int interlaceRows = superTblInfo?superTblInfo->interlaceRows:g_args.interlace_rows; + if (interlaceRows > insertRows) + interlaceRows = insertRows; + int insertMode; if (interlaceRows > 0) { @@ -4713,7 +4718,6 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) { int nTimeStampStep = superTblInfo?superTblInfo->timeStampStep:DEFAULT_TIMESTAMP_STEP; - int64_t insertRows = (superTblInfo)?superTblInfo->insertRows:g_args.num_of_DPT; int insert_interval = superTblInfo?superTblInfo->insertInterval:g_args.insert_interval; uint64_t st = 0; From 395c2d98fcb2c442584dde03203453ea74fccda0 Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Fri, 9 Apr 2021 21:49:23 +0800 Subject: [PATCH 138/178] Hotfix/sangshuduo/td 3215 bus error arm32 (#5753) * [TD-3215] fix bus error on arm32. fix indent. * [TD-3215]: arm32 porting. fix cache function for data type convertion mistake. --- src/client/inc/tsclient.h | 8 ++++---- src/inc/query.h | 2 +- src/query/src/queryMain.c | 5 ++--- src/vnode/src/vnodeRead.c | 2 +- 4 files changed, 8 insertions(+), 9 deletions(-) diff --git a/src/client/inc/tsclient.h b/src/client/inc/tsclient.h index 7041ab6a50..5d326931cb 100644 --- a/src/client/inc/tsclient.h +++ b/src/client/inc/tsclient.h @@ -273,10 +273,10 @@ typedef struct { char * curSql; // current sql, resume position of sql after parsing paused int8_t parseFinished; - char reserve2[3]; // fix bus error on arm32 + char reserve2[3]; // fix bus error on arm32 int16_t numOfCols; - char reserve3[2]; // fix bus error on arm32 + char reserve3[2]; // fix bus error on arm32 uint32_t allocSize; char * payload; int32_t payloadLen; @@ -286,9 +286,9 @@ typedef struct { int32_t numOfParams; int8_t dataSourceType; // load data from file or not - char reserve4[3]; // fix bus error on arm32 + char reserve4[3]; // fix bus error on arm32 int8_t submitSchema; // submit block is built with table schema - char reserve5[3]; // fix bus error on arm32 + char reserve5[3]; // fix bus error on arm32 STagData tagData; // NOTE: pTagData->data is used as a variant length array SName **pTableNameList; // all involved tableMeta list of current insert sql statement. diff --git a/src/inc/query.h b/src/inc/query.h index c9dabcef54..461e8723e7 100644 --- a/src/inc/query.h +++ b/src/inc/query.h @@ -88,7 +88,7 @@ void* qOpenQueryMgmt(int32_t vgId); void qQueryMgmtNotifyClosed(void* pExecutor); void qQueryMgmtReOpen(void *pExecutor); void qCleanupQueryMgmt(void* pExecutor); -void** qRegisterQInfo(void* pMgmt, uint64_t qId, uint64_t qInfo); +void** qRegisterQInfo(void* pMgmt, uint64_t qId, void *qInfo); void** qAcquireQInfo(void* pMgmt, uint64_t key); void** qReleaseQInfo(void* pMgmt, void* pQInfo, bool freeHandle); bool checkQIdEqual(void *qHandle, uint64_t qId); diff --git a/src/query/src/queryMain.c b/src/query/src/queryMain.c index 2ff0c9676e..06a7ee210f 100644 --- a/src/query/src/queryMain.c +++ b/src/query/src/queryMain.c @@ -475,7 +475,7 @@ void qCleanupQueryMgmt(void* pQMgmt) { qDebug("vgId:%d, queryMgmt cleanup completed", vgId); } -void** qRegisterQInfo(void* pMgmt, uint64_t qId, uint64_t qInfo) { +void** qRegisterQInfo(void* pMgmt, uint64_t qId, void *qInfo) { if (pMgmt == NULL) { terrno = TSDB_CODE_VND_INVALID_VGROUP_ID; return NULL; @@ -516,8 +516,7 @@ void** qAcquireQInfo(void* pMgmt, uint64_t _key) { return NULL; } - TSDB_CACHE_PTR_TYPE key = (TSDB_CACHE_PTR_TYPE)_key; - void** handle = taosCacheAcquireByKey(pQueryMgmt->qinfoPool, &key, sizeof(TSDB_CACHE_PTR_TYPE)); + void** handle = taosCacheAcquireByKey(pQueryMgmt->qinfoPool, &_key, sizeof(_key)); if (handle == NULL || *handle == NULL) { terrno = TSDB_CODE_QRY_INVALID_QHANDLE; return NULL; diff --git a/src/vnode/src/vnodeRead.c b/src/vnode/src/vnodeRead.c index 0836ade77f..a3dd8e1354 100644 --- a/src/vnode/src/vnodeRead.c +++ b/src/vnode/src/vnodeRead.c @@ -239,7 +239,7 @@ static int32_t vnodeProcessQueryMsg(SVnodeObj *pVnode, SVReadMsg *pRead) { // current connect is broken if (code == TSDB_CODE_SUCCESS) { - handle = qRegisterQInfo(pVnode->qMgmt, qId, (uint64_t)pQInfo); + handle = qRegisterQInfo(pVnode->qMgmt, qId, pQInfo); if (handle == NULL) { // failed to register qhandle pRsp->code = terrno; terrno = 0; From b2b514f81bf9e5e703b6db568813508987f7e418 Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Fri, 9 Apr 2021 23:23:28 +0800 Subject: [PATCH 139/178] [TD-3705]: taosdemo columns or tags count overflow. (#5745) (#5760) * [TD-3705]: taosdemo columns or tags count overflow. * [TD-3705]: taosdemo columns or tags count overflow. compare columns+tags with max columns count. Co-authored-by: Shuduo Sang Co-authored-by: Shuduo Sang --- src/kit/taosdemo/taosdemo.c | 124 +++++++++++++++++++++++------------- 1 file changed, 78 insertions(+), 46 deletions(-) diff --git a/src/kit/taosdemo/taosdemo.c b/src/kit/taosdemo/taosdemo.c index 56aff9de6e..c6d5933dc7 100644 --- a/src/kit/taosdemo/taosdemo.c +++ b/src/kit/taosdemo/taosdemo.c @@ -2364,7 +2364,8 @@ static int createSuperTable(TAOS * taos, char* dbName, lenOfOneRow += 21; } else { taos_close(taos); - printf("config error data type : %s\n", dataType); + errorPrint("%s() LN%d, config error data type : %s\n", + __func__, __LINE__, dataType); exit(-1); } } @@ -2382,7 +2383,8 @@ static int createSuperTable(TAOS * taos, char* dbName, } snprintf(superTbl->colsOfCreateChildTable, len+20, "(ts timestamp%s)", cols); - verbosePrint("%s() LN%d: %s\n", __func__, __LINE__, superTbl->colsOfCreateChildTable); + verbosePrint("%s() LN%d: %s\n", + __func__, __LINE__, superTbl->colsOfCreateChildTable); if (superTbl->tagCount == 0) { errorPrint("%s() LN%d, super table tag count is %d\n", @@ -2437,7 +2439,8 @@ static int createSuperTable(TAOS * taos, char* dbName, lenOfTagOfOneRow += superTbl->tags[tagIndex].dataLen + 42; } else { taos_close(taos); - printf("config error tag type : %s\n", dataType); + errorPrint("%s() LN%d, config error tag type : %s\n", + __func__, __LINE__, dataType); exit(-1); } } @@ -2732,7 +2735,8 @@ static int startMultiThreadCreateChildTable( db_name, g_Dbs.port); if (t_info->taos == NULL) { - errorPrint( "Failed to connect to TDengine, reason:%s\n", taos_errstr(NULL)); + errorPrint( "%s() LN%d, Failed to connect to TDengine, reason:%s\n", + __func__, __LINE__, taos_errstr(NULL)); free(pids); free(infos); return -1; @@ -2793,35 +2797,35 @@ static void createChildTables() { } } } else { - // normal table - len = snprintf(tblColsBuf, MAX_SQL_SIZE, "(TS TIMESTAMP"); - int j = 0; - while(g_args.datatype[j]) { - if ((strncasecmp(g_args.datatype[j], "BINARY", strlen("BINARY")) == 0) - || (strncasecmp(g_args.datatype[j], - "NCHAR", strlen("NCHAR")) == 0)) { - snprintf(tblColsBuf + len, MAX_SQL_SIZE - len, - ", COL%d %s(60)", j, g_args.datatype[j]); - } else { - snprintf(tblColsBuf + len, MAX_SQL_SIZE - len, - ", COL%d %s", j, g_args.datatype[j]); - } - len = strlen(tblColsBuf); - j++; - } + // normal table + len = snprintf(tblColsBuf, MAX_SQL_SIZE, "(TS TIMESTAMP"); + int j = 0; + while(g_args.datatype[j]) { + if ((strncasecmp(g_args.datatype[j], "BINARY", strlen("BINARY")) == 0) + || (strncasecmp(g_args.datatype[j], + "NCHAR", strlen("NCHAR")) == 0)) { + snprintf(tblColsBuf + len, MAX_SQL_SIZE - len, + ", COL%d %s(60)", j, g_args.datatype[j]); + } else { + snprintf(tblColsBuf + len, MAX_SQL_SIZE - len, + ", COL%d %s", j, g_args.datatype[j]); + } + len = strlen(tblColsBuf); + j++; + } - snprintf(tblColsBuf + len, MAX_SQL_SIZE - len, ")"); + snprintf(tblColsBuf + len, MAX_SQL_SIZE - len, ")"); - verbosePrint("%s() LN%d: dbName: %s num of tb: %d schema: %s\n", - __func__, __LINE__, - g_Dbs.db[i].dbName, g_args.num_of_tables, tblColsBuf); - startMultiThreadCreateChildTable( - tblColsBuf, - g_Dbs.threadCountByCreateTbl, - 0, - g_args.num_of_tables, - g_Dbs.db[i].dbName, - NULL); + verbosePrint("%s() LN%d: dbName: %s num of tb: %d schema: %s\n", + __func__, __LINE__, + g_Dbs.db[i].dbName, g_args.num_of_tables, tblColsBuf); + startMultiThreadCreateChildTable( + tblColsBuf, + g_Dbs.threadCountByCreateTbl, + 0, + g_args.num_of_tables, + g_Dbs.db[i].dbName, + NULL); } } } @@ -3035,6 +3039,13 @@ static bool getColumnAndTagTypeFromInsertJsonFile( index++; } } + + if (index > MAX_COLUMN_COUNT) { + errorPrint("%s() LN%d, failed to read json, column size overflow, max column size is %d\n", + __func__, __LINE__, MAX_COLUMN_COUNT); + goto PARSE_OVER; + } + superTbls->columnCount = index; count = 1; @@ -3099,8 +3110,20 @@ static bool getColumnAndTagTypeFromInsertJsonFile( index++; } } + + if (index > MAX_TAG_COUNT) { + errorPrint("%s() LN%d, failed to read json, tags size overflow, max tag size is %d\n", + __func__, __LINE__, MAX_TAG_COUNT); + goto PARSE_OVER; + } + superTbls->tagCount = index; + if ((superTbls->columnCount + superTbls->tagCount) > MAX_COLUMN_COUNT) { + errorPrint("%s() LN%d, columns + tags is more than max columns count: %d\n", + __func__, __LINE__, MAX_TAG_COUNT); + goto PARSE_OVER; + } ret = true; PARSE_OVER: @@ -4324,13 +4347,20 @@ static int generateRowData(char* recBuf, int64_t timestamp, SSuperTable* stbInfo "%f, ", rand_double()); } else if (0 == strncasecmp(stbInfo->columns[i].dataType, "smallint", 8)) { - dataLen += snprintf(pstr + dataLen, maxLen - dataLen, "%d, ", rand_smallint()); - } else if (0 == strncasecmp(stbInfo->columns[i].dataType, "tinyint", 7)) { - dataLen += snprintf(pstr + dataLen, maxLen - dataLen, "%d, ", rand_tinyint()); - } else if (0 == strncasecmp(stbInfo->columns[i].dataType, "bool", 4)) { - dataLen += snprintf(pstr + dataLen, maxLen - dataLen, "%d, ", rand_bool()); - } else if (0 == strncasecmp(stbInfo->columns[i].dataType, "timestamp", 9)) { - dataLen += snprintf(pstr + dataLen, maxLen - dataLen, "%"PRId64", ", rand_bigint()); + dataLen += snprintf(pstr + dataLen, maxLen - dataLen, + "%d, ", rand_smallint()); + } else if (0 == strncasecmp(stbInfo->columns[i].dataType, + "tinyint", strlen("tinyint"))) { + dataLen += snprintf(pstr + dataLen, maxLen - dataLen, + "%d, ", rand_tinyint()); + } else if (0 == strncasecmp(stbInfo->columns[i].dataType, + "bool", strlen("bool"))) { + dataLen += snprintf(pstr + dataLen, maxLen - dataLen, + "%d, ", rand_bool()); + } else if (0 == strncasecmp(stbInfo->columns[i].dataType, + "timestamp", strlen("timestamp"))) { + dataLen += snprintf(pstr + dataLen, maxLen - dataLen, + "%"PRId64", ", rand_bigint()); } else { errorPrint( "No support data type: %s\n", stbInfo->columns[i].dataType); return -1; @@ -4422,7 +4452,8 @@ static int prepareSampleDataForSTable(SSuperTable *superTblInfo) { int ret = readSampleFromCsvFileToMem(superTblInfo); if (0 != ret) { - errorPrint("%s() LN%d, read sample from csv file failed.\n", __func__, __LINE__); + errorPrint("%s() LN%d, read sample from csv file failed.\n", + __func__, __LINE__); tmfree(sampleDataBuf); superTblInfo->sampleDataBuf = NULL; return -1; @@ -4444,7 +4475,8 @@ static int execInsert(threadInfo *pThreadInfo, char *buffer, int k) } else { if (0 != postProceSql(g_Dbs.host, g_Dbs.port, buffer)) { affectedRows = -1; - printf("========restful return fail, threadID[%d]\n", pThreadInfo->threadID); + printf("========restful return fail, threadID[%d]\n", + pThreadInfo->threadID); } else { affectedRows = k; } @@ -4601,7 +4633,8 @@ static int generateSQLHead(char *tableName, int32_t tableSeq, tableSeq % superTblInfo->tagSampleCount); } if (NULL == tagsValBuf) { - errorPrint("%s() LN%d, tag buf failed to allocate memory\n", __func__, __LINE__); + errorPrint("%s() LN%d, tag buf failed to allocate memory\n", + __func__, __LINE__); return -1; } @@ -4703,11 +4736,11 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) { // TODO: prompt tbl count multple interlace rows and batch // - char* buffer = calloc(superTblInfo?superTblInfo->maxSqlLen:g_args.max_sql_len, 1); + int maxSqlLen = superTblInfo?superTblInfo->maxSqlLen:g_args.max_sql_len; + char* buffer = calloc(maxSqlLen, 1); if (NULL == buffer) { - errorPrint( "Failed to alloc %d Bytes, reason:%s\n", - superTblInfo?superTblInfo->maxSqlLen:g_args.max_sql_len, - strerror(errno)); + errorPrint( "%s() LN%d, Failed to alloc %d Bytes, reason:%s\n", + __func__, __LINE__, maxSqlLen, strerror(errno)); return NULL; } @@ -4755,7 +4788,6 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) { bool flagSleep = true; int sleepTimeTotal = 0; - int maxSqlLen = superTblInfo?superTblInfo->maxSqlLen:g_args.max_sql_len; int remainderBufLen; while(pThreadInfo->totalInsertRows < pThreadInfo->ntables * insertRows) { From 00904c14078c22f6a77b3d40ccec35bb5ac176d0 Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Sat, 10 Apr 2021 01:41:49 +0800 Subject: [PATCH 140/178] remove invalid qId --- src/rpc/src/rpcMain.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/rpc/src/rpcMain.c b/src/rpc/src/rpcMain.c index 133ae6d0ab..cd6b3cf80c 100644 --- a/src/rpc/src/rpcMain.c +++ b/src/rpc/src/rpcMain.c @@ -1367,7 +1367,7 @@ static void rpcProcessConnError(void *param, void *id) { tDebug("%s %p, connection error happens", pRpc->label, pContext->ahandle); - if (pContext->numOfTry >= pContext->epSet.numOfEps) { + if (pContext->numOfTry >= pContext->epSet.numOfEps || pContext->msgType == TSDB_MSG_TYPE_FETCH) { rpcMsg.msgType = pContext->msgType+1; rpcMsg.ahandle = pContext->ahandle; rpcMsg.code = pContext->code; From 6d593fe80af7a8e3ffaf1b5569360efc2264268f Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Sat, 10 Apr 2021 23:14:01 +0800 Subject: [PATCH 141/178] [td-3571]: enable update the vgroup info during super table query. --- src/client/src/tscServer.c | 47 ++++++++++++++++++++++++++++++-------- 1 file changed, 37 insertions(+), 10 deletions(-) diff --git a/src/client/src/tscServer.c b/src/client/src/tscServer.c index b8beed392e..f0b88bb113 100644 --- a/src/client/src/tscServer.c +++ b/src/client/src/tscServer.c @@ -34,6 +34,7 @@ int tscKeepConn[TSDB_SQL_MAX] = {0}; TSKEY tscGetSubscriptionProgress(void* sub, int64_t uid, TSKEY dflt); void tscUpdateSubscriptionProgress(void* sub, int64_t uid, TSKEY ts); void tscSaveSubscriptionProgress(void* sub); +static int32_t extractSTableQueryVgroupId(STableMetaInfo* pTableMetaInfo); static int32_t minMsgSize() { return tsRpcHeadSize + 100; } static int32_t getWaitingTimeInterval(int32_t count) { @@ -78,7 +79,8 @@ static void tscEpSetHtons(SRpcEpSet *s) { for (int32_t i = 0; i < s->numOfEps; i++) { s->port[i] = htons(s->port[i]); } -} +} + bool tscEpSetIsEqual(SRpcEpSet *s1, SRpcEpSet *s2) { if (s1->numOfEps != s2->numOfEps || s1->inUse != s2->inUse) { return false; @@ -118,12 +120,15 @@ static void tscUpdateVgroupInfo(SSqlObj *pObj, SRpcEpSet *pEpSet) { return; } - int32_t vgId = pTableMetaInfo->pTableMeta->vgId; + int32_t vgId = -1; if (pTableMetaInfo->pTableMeta->tableType == TSDB_SUPER_TABLE) { - assert(vgId == 0); - return; + vgId = extractSTableQueryVgroupId(pTableMetaInfo); + } else { + vgId = pTableMetaInfo->pTableMeta->vgId; } + assert(vgId > 0); + SNewVgroupInfo vgroupInfo = {.vgId = -1}; taosHashGetClone(tscVgroupMap, &vgId, sizeof(vgId), NULL, &vgroupInfo, sizeof(SNewVgroupInfo)); assert(vgroupInfo.numOfEps > 0 && vgroupInfo.vgId > 0); @@ -140,6 +145,27 @@ static void tscUpdateVgroupInfo(SSqlObj *pObj, SRpcEpSet *pEpSet) { taosHashPut(tscVgroupMap, &vgId, sizeof(vgId), &vgroupInfo, sizeof(SNewVgroupInfo)); } +int32_t extractSTableQueryVgroupId(STableMetaInfo* pTableMetaInfo) { + assert(pTableMetaInfo != NULL); + + int32_t vgIndex = pTableMetaInfo->vgroupIndex; + int32_t vgId = -1; + + if (pTableMetaInfo->pVgroupTables == NULL) { + SVgroupsInfo *pVgroupInfo = pTableMetaInfo->vgroupList; + assert(pVgroupInfo->vgroups[vgIndex].vgId > 0 && vgIndex < pTableMetaInfo->vgroupList->numOfVgroups); + vgId = pVgroupInfo->vgroups[vgIndex].vgId; + } else { + int32_t numOfVgroups = (int32_t)taosArrayGetSize(pTableMetaInfo->pVgroupTables); + assert(vgIndex >= 0 && vgIndex < numOfVgroups); + + SVgroupTableInfo *pTableIdList = taosArrayGet(pTableMetaInfo->pVgroupTables, vgIndex); + vgId = pTableIdList->vgInfo.vgId; + } + + return vgId; +} + void tscProcessHeartBeatRsp(void *param, TAOS_RES *tres, int code) { STscObj *pObj = (STscObj *)param; if (pObj == NULL) return; @@ -515,21 +541,22 @@ int tscBuildFetchMsg(SSqlObj *pSql, SSqlInfo *pInfo) { if (UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) { int32_t vgIndex = pTableMetaInfo->vgroupIndex; + int32_t vgId = -1; + if (pTableMetaInfo->pVgroupTables == NULL) { SVgroupsInfo *pVgroupInfo = pTableMetaInfo->vgroupList; assert(pVgroupInfo->vgroups[vgIndex].vgId > 0 && vgIndex < pTableMetaInfo->vgroupList->numOfVgroups); - - pRetrieveMsg->header.vgId = htonl(pVgroupInfo->vgroups[vgIndex].vgId); - tscDebug("%p build fetch msg from vgId:%d, vgIndex:%d, qId:%" PRIu64, pSql, pVgroupInfo->vgroups[vgIndex].vgId, vgIndex, pSql->res.qId); + vgId = pVgroupInfo->vgroups[vgIndex].vgId; } else { int32_t numOfVgroups = (int32_t)taosArrayGetSize(pTableMetaInfo->pVgroupTables); assert(vgIndex >= 0 && vgIndex < numOfVgroups); SVgroupTableInfo* pTableIdList = taosArrayGet(pTableMetaInfo->pVgroupTables, vgIndex); - - pRetrieveMsg->header.vgId = htonl(pTableIdList->vgInfo.vgId); - tscDebug("%p build fetch msg from vgId:%d, vgIndex:%d, qId:%" PRIu64, pSql, pTableIdList->vgInfo.vgId, vgIndex, pSql->res.qId); + vgId = pTableIdList->vgInfo.vgId; } + + pRetrieveMsg->header.vgId = htonl(vgId); + tscDebug("%p build fetch msg from vgId:%d, vgIndex:%d, qId:%" PRIu64, pSql, vgId, vgIndex, pSql->res.qId); } else { STableMeta* pTableMeta = pTableMetaInfo->pTableMeta; pRetrieveMsg->header.vgId = htonl(pTableMeta->vgId); From 023e7959f4c7cce17cdc9e728a1362404037e8c3 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Sun, 11 Apr 2021 11:13:33 +0800 Subject: [PATCH 142/178] [td-225]fix error by merge --- src/client/inc/tsclient.h | 11 ----------- 1 file changed, 11 deletions(-) diff --git a/src/client/inc/tsclient.h b/src/client/inc/tsclient.h index 9072b6f349..c91943e232 100644 --- a/src/client/inc/tsclient.h +++ b/src/client/inc/tsclient.h @@ -146,17 +146,6 @@ typedef struct SInternalField { SExprFilter *pFieldFilters; } SInternalField; -typedef struct SFieldInfo { - int16_t numOfOutput; // number of column in result - SArray *internalField; // SArray -} SFieldInfo; - -typedef struct SColumn { - SColumnIndex colIndex; - int32_t numOfFilters; - SColumnFilterInfo *filterInfo; -} SColumn; - typedef struct SCond { uint64_t uid; int32_t len; // length of tag query condition data From 5e9cd734c6987a3eabc9944c90752f9616b6dd95 Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Sat, 10 Apr 2021 02:23:44 +0800 Subject: [PATCH 143/178] enlarge connection pool timeout --- src/rpc/src/rpcMain.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/rpc/src/rpcMain.c b/src/rpc/src/rpcMain.c index cd6b3cf80c..7205bafd7d 100644 --- a/src/rpc/src/rpcMain.c +++ b/src/rpc/src/rpcMain.c @@ -295,7 +295,7 @@ void *rpcOpen(const SRpcInit *pInit) { return NULL; } } else { - pRpc->pCache = rpcOpenConnCache(pRpc->sessions, rpcCloseConn, pRpc->tmrCtrl, pRpc->idleTime); + pRpc->pCache = rpcOpenConnCache(pRpc->sessions, rpcCloseConn, pRpc->tmrCtrl, pRpc->idleTime*30); if ( pRpc->pCache == NULL ) { tError("%s failed to init connection cache", pRpc->label); rpcClose(pRpc); @@ -470,7 +470,7 @@ void rpcSendResponse(const SRpcMsg *pRsp) { taosTmrStopA(&pConn->pTimer); // set the idle timer to monitor the activity - taosTmrReset(rpcProcessIdleTimer, pRpc->idleTime, pConn, pRpc->tmrCtrl, &pConn->pIdleTimer); + taosTmrReset(rpcProcessIdleTimer, pRpc->idleTime*30, pConn, pRpc->tmrCtrl, &pConn->pIdleTimer); rpcSendMsgToPeer(pConn, msg, msgLen); // if not set to secured, set it expcet NOT_READY case, since client wont treat it as secured From a7f44f38e0be1da90cd8a541b9b34f0f11672101 Mon Sep 17 00:00:00 2001 From: dapan1121 <89396746@qq.com> Date: Sat, 10 Apr 2021 11:37:10 +0800 Subject: [PATCH 144/178] use global uid as query id --- src/common/inc/tglobal.h | 1 + src/common/src/tglobal.c | 1 + src/dnode/src/dnodeCfg.c | 2 ++ src/inc/query.h | 1 + src/query/src/qExecutor.c | 28 ++++++++++++++++++++++++++-- src/query/src/queryMain.c | 1 + src/vnode/src/vnodeRead.c | 3 ++- 7 files changed, 34 insertions(+), 3 deletions(-) diff --git a/src/common/inc/tglobal.h b/src/common/inc/tglobal.h index 3f96466cc0..26475834d5 100644 --- a/src/common/inc/tglobal.h +++ b/src/common/inc/tglobal.h @@ -39,6 +39,7 @@ extern int8_t tsEnableTelemetryReporting; extern char tsEmail[]; extern char tsArbitrator[]; extern int8_t tsArbOnline; +extern int32_t tsDnodeId; // common extern int tsRpcTimer; diff --git a/src/common/src/tglobal.c b/src/common/src/tglobal.c index ea5bf7954d..69b01e6c08 100644 --- a/src/common/src/tglobal.c +++ b/src/common/src/tglobal.c @@ -43,6 +43,7 @@ int8_t tsEnableVnodeBak = 1; int8_t tsEnableTelemetryReporting = 1; int8_t tsArbOnline = 0; char tsEmail[TSDB_FQDN_LEN] = {0}; +int32_t tsDnodeId = 0; // common int32_t tsRpcTimer = 1000; diff --git a/src/dnode/src/dnodeCfg.c b/src/dnode/src/dnodeCfg.c index fd5956b37f..c573d709f5 100644 --- a/src/dnode/src/dnodeCfg.c +++ b/src/dnode/src/dnodeCfg.c @@ -17,6 +17,7 @@ #include "os.h" #include "cJSON.h" #include "dnodeCfg.h" +#include "tglobal.h" static SDnodeCfg tsCfg = {0}; static pthread_mutex_t tsCfgMutex; @@ -70,6 +71,7 @@ static void dnodeResetCfg(SDnodeCfg *cfg) { pthread_mutex_lock(&tsCfgMutex); tsCfg.dnodeId = cfg->dnodeId; + tsDnodeId = cfg->dnodeId; tstrncpy(tsCfg.clusterId, cfg->clusterId, TSDB_CLUSTER_ID_LEN); dnodePrintCfg(cfg); dnodeWriteCfg(); diff --git a/src/inc/query.h b/src/inc/query.h index c9dabcef54..d4ff811a8d 100644 --- a/src/inc/query.h +++ b/src/inc/query.h @@ -92,6 +92,7 @@ void** qRegisterQInfo(void* pMgmt, uint64_t qId, uint64_t qInfo); void** qAcquireQInfo(void* pMgmt, uint64_t key); void** qReleaseQInfo(void* pMgmt, void* pQInfo, bool freeHandle); bool checkQIdEqual(void *qHandle, uint64_t qId); +int64_t genQueryId(void); #ifdef __cplusplus } diff --git a/src/query/src/qExecutor.c b/src/query/src/qExecutor.c index dda67d77b2..c7e4223db5 100644 --- a/src/query/src/qExecutor.c +++ b/src/query/src/qExecutor.c @@ -105,6 +105,30 @@ int32_t getMaximumIdleDurationSec() { return tsShellActivityTimer * 2; } + +int64_t genQueryId(void) { + int64_t uid = 0; + int64_t did = tsDnodeId; + + uid = did << 54; + + int64_t pid = ((int64_t)taosGetPId()) & 0x3FF; + + uid |= pid << 44; + + int64_t ts = taosGetTimestampMs() & 0x1FFFFFFFF; + + uid |= ts << 11; + + int64_t sid = atomic_add_fetch_64(&queryHandleId, 1) & 0x7FF; + + uid |= sid; + + return uid; +} + + + static void getNextTimeWindow(SQuery* pQuery, STimeWindow* tw) { int32_t factor = GET_FORWARD_DIRECTION_FACTOR(pQuery->order.order); if (pQuery->interval.intervalUnit != 'n' && pQuery->interval.intervalUnit != 'y') { @@ -6184,6 +6208,8 @@ SQInfo* createQInfoImpl(SQueryTableMsg* pQueryMsg, SSqlGroupbyExpr* pGroupbyExpr goto _cleanup_qinfo; } + pQInfo->qId = *qId; + // to make sure third party won't overwrite this structure pQInfo->signature = pQInfo; SQuery* pQuery = &pQInfo->query; @@ -6316,8 +6342,6 @@ SQInfo* createQInfoImpl(SQueryTableMsg* pQueryMsg, SSqlGroupbyExpr* pGroupbyExpr // todo refactor pQInfo->query.queryBlockDist = (numOfOutput == 1 && pExprs[0].base.colInfo.colId == TSDB_BLOCK_DIST_COLUMN_INDEX); - pQInfo->qId = atomic_add_fetch_64(&queryHandleId, 1); - *qId = pQInfo->qId; qDebug("qmsg:%p QInfo:%" PRIu64 "-%p created", pQueryMsg, pQInfo->qId, pQInfo); return pQInfo; diff --git a/src/query/src/queryMain.c b/src/query/src/queryMain.c index 2ff0c9676e..838f6aa0c9 100644 --- a/src/query/src/queryMain.c +++ b/src/query/src/queryMain.c @@ -197,6 +197,7 @@ int32_t qCreateQueryInfo(void* tsdb, int32_t vgId, SQueryTableMsg* pQueryMsg, qi return code; } + bool qTableQuery(qinfo_t qinfo, uint64_t *qId) { SQInfo *pQInfo = (SQInfo *)qinfo; assert(pQInfo && pQInfo->signature == pQInfo); diff --git a/src/vnode/src/vnodeRead.c b/src/vnode/src/vnodeRead.c index 0836ade77f..5a7fc52931 100644 --- a/src/vnode/src/vnodeRead.c +++ b/src/vnode/src/vnodeRead.c @@ -208,6 +208,7 @@ static void vnodeBuildNoResultQueryRsp(SRspRet *pRet) { pRsp->completed = true; } + static int32_t vnodeProcessQueryMsg(SVnodeObj *pVnode, SVReadMsg *pRead) { void * pCont = pRead->pCont; int32_t contLen = pRead->contLen; @@ -226,7 +227,7 @@ static int32_t vnodeProcessQueryMsg(SVnodeObj *pVnode, SVReadMsg *pRead) { if (contLen != 0) { qinfo_t pQInfo = NULL; - uint64_t qId = 0; + uint64_t qId = genQueryId(); code = qCreateQueryInfo(pVnode->tsdb, pVnode->vgId, pQueryTableMsg, &pQInfo, &qId); SQueryTableRsp *pRsp = (SQueryTableRsp *)rpcMallocCont(sizeof(SQueryTableRsp)); From bbda420d2bacf26380b6cbc3c336c1f09e569512 Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Sat, 10 Apr 2021 12:22:18 +0800 Subject: [PATCH 145/178] Feature/sangshuduo/td 3735 make test framework work on mac (#5761) * [TD-3735]: make test framework work on mac * [TD-3735]: make test framework work on mac change cut fields to -f for mac compatibility. --- tests/test-all.sh | 38 +++++++++++++++++++++++++++++++++----- 1 file changed, 33 insertions(+), 5 deletions(-) diff --git a/tests/test-all.sh b/tests/test-all.sh index 3c8aed7d18..980629e6d2 100755 --- a/tests/test-all.sh +++ b/tests/test-all.sh @@ -80,6 +80,7 @@ function runSimCaseOneByOne { fi done < $1 } + function runSimCaseOneByOnefq { start=`sed -n "/$1-start/=" jenkins/basic.txt` @@ -155,6 +156,7 @@ function runPyCaseOneByOne { fi done < $1 } + function runPyCaseOneByOnefq() { cd $tests_dir/pytest if [[ $1 =~ full ]] ; then @@ -202,13 +204,37 @@ function runPyCaseOneByOnefq() { rm -rf ../../sim/case.log } +###################### +# main entry +###################### + +unameOut="$(uname -s)" +case "${unameOut}" in + Linux*) OS=Linux;; + Darwin*) OS=Darwin;; + CYGWIN*) OS=Windows;; + *) OS=Unknown;; +esac + +case "${OS}" in + Linux*) TAOSLIB=libtaos.so;; + Darwin*) TAOSLIB=libtaos.dylib;; + Windows*) TAOSLIB=taos.dll;; + Unknown) TAOSLIB="UNKNOWN:${unameOut}";; +esac + +echo TAOSLIB is ${TAOSLIB} + totalFailed=0 totalPyFailed=0 totalJDBCFailed=0 totalUnitFailed=0 totalExampleFailed=0 -corepath=`grep -oP '.*(?=core_)' /proc/sys/kernel/core_pattern||grep -oP '.*(?=core-)' /proc/sys/kernel/core_pattern` +if [ "${OS}" == "Linux" ]; then + corepath=`grep -oP '.*(?=core_)' /proc/sys/kernel/core_pattern||grep -oP '.*(?=core-)' /proc/sys/kernel/core_pattern` +fi + if [ "$2" != "jdbc" ] && [ "$2" != "python" ] && [ "$2" != "unit" ] && [ "$2" != "example" ]; then echo "### run TSIM test case ###" cd $tests_dir/script @@ -290,11 +316,11 @@ if [ "$2" != "sim" ] && [ "$2" != "jdbc" ] && [ "$2" != "unit" ] && [ "$2" != " fi TOP_DIR=`pwd` - TAOSLIB_DIR=`find . -name "libtaos.so"|grep -w lib|head -n1` + TAOSLIB_DIR=`find . -name "${TAOSLIB}"|grep -w lib|head -n1` if [[ "$TAOSLIB_DIR" == *"$IN_TDINTERNAL"* ]]; then - LIB_DIR=`find . -name "libtaos.so"|grep -w lib|head -n1|cut -d '/' --fields=2,3,4,5` + LIB_DIR=`find . -name "${TAOSLIB}"|grep -w lib|head -n1|cut -d '/' -f 2,3,4,5` else - LIB_DIR=`find . -name "libtaos.so"|grep -w lib|head -n1|cut -d '/' --fields=2,3,4` + LIB_DIR=`find . -name "${TAOSLIB}"|grep -w lib|head -n1|cut -d '/' -f 2,3,4` fi export LD_LIBRARY_PATH=$TOP_DIR/$LIB_DIR:$LD_LIBRARY_PATH @@ -499,7 +525,9 @@ if [ "$2" != "sim" ] && [ "$2" != "python" ] && [ "$2" != "jdbc" ] && [ "$2" != echo -e "\n${RED} ### Total $totalExampleFailed examples failed! ### ${NC}" fi - dohavecore 1 + if [ "${OS}" == "Linux" ]; then + dohavecore 1 + fi fi From cdbb75609f87fbbd364d994faef8f30c922c214c Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Sat, 10 Apr 2021 13:40:27 +0800 Subject: [PATCH 146/178] Feature/sangshuduo/td 3735 make test framework work on mac (#5761) (#5763) * [TD-3735]: make test framework work on mac * [TD-3735]: make test framework work on mac change cut fields to -f for mac compatibility. --- tests/test-all.sh | 38 +++++++++++++++++++++++++++++++++----- 1 file changed, 33 insertions(+), 5 deletions(-) diff --git a/tests/test-all.sh b/tests/test-all.sh index 3c8aed7d18..980629e6d2 100755 --- a/tests/test-all.sh +++ b/tests/test-all.sh @@ -80,6 +80,7 @@ function runSimCaseOneByOne { fi done < $1 } + function runSimCaseOneByOnefq { start=`sed -n "/$1-start/=" jenkins/basic.txt` @@ -155,6 +156,7 @@ function runPyCaseOneByOne { fi done < $1 } + function runPyCaseOneByOnefq() { cd $tests_dir/pytest if [[ $1 =~ full ]] ; then @@ -202,13 +204,37 @@ function runPyCaseOneByOnefq() { rm -rf ../../sim/case.log } +###################### +# main entry +###################### + +unameOut="$(uname -s)" +case "${unameOut}" in + Linux*) OS=Linux;; + Darwin*) OS=Darwin;; + CYGWIN*) OS=Windows;; + *) OS=Unknown;; +esac + +case "${OS}" in + Linux*) TAOSLIB=libtaos.so;; + Darwin*) TAOSLIB=libtaos.dylib;; + Windows*) TAOSLIB=taos.dll;; + Unknown) TAOSLIB="UNKNOWN:${unameOut}";; +esac + +echo TAOSLIB is ${TAOSLIB} + totalFailed=0 totalPyFailed=0 totalJDBCFailed=0 totalUnitFailed=0 totalExampleFailed=0 -corepath=`grep -oP '.*(?=core_)' /proc/sys/kernel/core_pattern||grep -oP '.*(?=core-)' /proc/sys/kernel/core_pattern` +if [ "${OS}" == "Linux" ]; then + corepath=`grep -oP '.*(?=core_)' /proc/sys/kernel/core_pattern||grep -oP '.*(?=core-)' /proc/sys/kernel/core_pattern` +fi + if [ "$2" != "jdbc" ] && [ "$2" != "python" ] && [ "$2" != "unit" ] && [ "$2" != "example" ]; then echo "### run TSIM test case ###" cd $tests_dir/script @@ -290,11 +316,11 @@ if [ "$2" != "sim" ] && [ "$2" != "jdbc" ] && [ "$2" != "unit" ] && [ "$2" != " fi TOP_DIR=`pwd` - TAOSLIB_DIR=`find . -name "libtaos.so"|grep -w lib|head -n1` + TAOSLIB_DIR=`find . -name "${TAOSLIB}"|grep -w lib|head -n1` if [[ "$TAOSLIB_DIR" == *"$IN_TDINTERNAL"* ]]; then - LIB_DIR=`find . -name "libtaos.so"|grep -w lib|head -n1|cut -d '/' --fields=2,3,4,5` + LIB_DIR=`find . -name "${TAOSLIB}"|grep -w lib|head -n1|cut -d '/' -f 2,3,4,5` else - LIB_DIR=`find . -name "libtaos.so"|grep -w lib|head -n1|cut -d '/' --fields=2,3,4` + LIB_DIR=`find . -name "${TAOSLIB}"|grep -w lib|head -n1|cut -d '/' -f 2,3,4` fi export LD_LIBRARY_PATH=$TOP_DIR/$LIB_DIR:$LD_LIBRARY_PATH @@ -499,7 +525,9 @@ if [ "$2" != "sim" ] && [ "$2" != "python" ] && [ "$2" != "jdbc" ] && [ "$2" != echo -e "\n${RED} ### Total $totalExampleFailed examples failed! ### ${NC}" fi - dohavecore 1 + if [ "${OS}" == "Linux" ]; then + dohavecore 1 + fi fi From d3999c7bf3fed26df015bf541386c1df5a81a152 Mon Sep 17 00:00:00 2001 From: dapan1121 <89396746@qq.com> Date: Sat, 10 Apr 2021 14:01:29 +0800 Subject: [PATCH 147/178] fix bug --- src/cq/src/cqMain.c | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/src/cq/src/cqMain.c b/src/cq/src/cqMain.c index f620eb4dd5..d4d202267c 100644 --- a/src/cq/src/cqMain.c +++ b/src/cq/src/cqMain.c @@ -73,6 +73,7 @@ static void cqProcessStreamRes(void *param, TAOS_RES *tres, TAOS_ROW row); static void cqCreateStream(SCqContext *pContext, SCqObj *pObj); int32_t cqObjRef = -1; +int32_t cqVnodeNum = 0; void cqRmFromList(SCqObj *pObj) { //LOCK in caller @@ -166,6 +167,8 @@ void *cqOpen(void *ahandle, const SCqCfg *pCfg) { return NULL; } + atomic_add_fetch_32(&cqVnodeNum, 1); + cqCreateRef(); pContext->tmrCtrl = taosTmrInit(0, 0, 0, "CQ"); @@ -240,6 +243,13 @@ void cqClose(void *handle) { if (hasCq == 0) { freeSCqContext(pContext); } + + int32_t remainn = atomic_sub_fetch_32(&cqVnodeNum, 1); + if (remainn <= 0) { + int32_t ref = cqObjRef; + cqObjRef = -1; + taosCloseRef(ref); + } } void cqStart(void *handle) { From 4908874523c848d65f3c96db44399420f6dab6bb Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Sat, 10 Apr 2021 14:12:32 +0800 Subject: [PATCH 148/178] Hotfix/sangshuduo/td 3215 bus error arm32 (#5764) * [TD-3215] fix bus error on arm32. fix indent. * [TD-3215]: arm32 porting. fix cache function for data type convertion mistake. From f5db431c19973e56a11c1e644e521634a7d83fb2 Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Sat, 10 Apr 2021 17:11:58 +0800 Subject: [PATCH 149/178] reset timeout --- src/rpc/src/rpcMain.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/rpc/src/rpcMain.c b/src/rpc/src/rpcMain.c index 7205bafd7d..cd6b3cf80c 100644 --- a/src/rpc/src/rpcMain.c +++ b/src/rpc/src/rpcMain.c @@ -295,7 +295,7 @@ void *rpcOpen(const SRpcInit *pInit) { return NULL; } } else { - pRpc->pCache = rpcOpenConnCache(pRpc->sessions, rpcCloseConn, pRpc->tmrCtrl, pRpc->idleTime*30); + pRpc->pCache = rpcOpenConnCache(pRpc->sessions, rpcCloseConn, pRpc->tmrCtrl, pRpc->idleTime); if ( pRpc->pCache == NULL ) { tError("%s failed to init connection cache", pRpc->label); rpcClose(pRpc); @@ -470,7 +470,7 @@ void rpcSendResponse(const SRpcMsg *pRsp) { taosTmrStopA(&pConn->pTimer); // set the idle timer to monitor the activity - taosTmrReset(rpcProcessIdleTimer, pRpc->idleTime*30, pConn, pRpc->tmrCtrl, &pConn->pIdleTimer); + taosTmrReset(rpcProcessIdleTimer, pRpc->idleTime, pConn, pRpc->tmrCtrl, &pConn->pIdleTimer); rpcSendMsgToPeer(pConn, msg, msgLen); // if not set to secured, set it expcet NOT_READY case, since client wont treat it as secured From 483574a09cad33e7ce67b9ab0637c5abad59a4b2 Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Sat, 10 Apr 2021 17:24:43 +0800 Subject: [PATCH 150/178] [TD-3733]: taosdemo buffer processing refactor. (#5766) Co-authored-by: Shuduo Sang --- src/kit/taosdemo/taosdemo.c | 123 ++++++++++++++++++++++-------------- 1 file changed, 77 insertions(+), 46 deletions(-) diff --git a/src/kit/taosdemo/taosdemo.c b/src/kit/taosdemo/taosdemo.c index c6d5933dc7..c34b00fc8c 100644 --- a/src/kit/taosdemo/taosdemo.c +++ b/src/kit/taosdemo/taosdemo.c @@ -2949,9 +2949,6 @@ static int readSampleFromCsvFileToMem( continue; } - verbosePrint("readLen=%ld stb->lenOfOneRow=%d getRows=%d\n", (long)readLen, - superTblInfo->lenOfOneRow, getRows); - memcpy(superTblInfo->sampleDataBuf + getRows * superTblInfo->lenOfOneRow, line, readLen); getRows++; @@ -3527,6 +3524,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { goto PARSE_OVER; } + /* cJSON* batchCreateTbl = cJSON_GetObjectItem(stbInfo, "batch_create_tbl_num"); if (batchCreateTbl && batchCreateTbl->type == cJSON_Number) { g_Dbs.db[i].superTbls[j].batchCreateTableNum = batchCreateTbl->valueint; @@ -3536,6 +3534,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { printf("ERROR: failed to read json, batch_create_tbl_num not found\n"); goto PARSE_OVER; } + */ cJSON *childTblExists = cJSON_GetObjectItem(stbInfo, "child_table_exists"); // yes, no if (childTblExists @@ -4619,7 +4618,8 @@ static int generateDataTail(char *tableName, int32_t tableSeq, } static int generateSQLHead(char *tableName, int32_t tableSeq, - threadInfo* pThreadInfo, SSuperTable* superTblInfo, char *buffer) + threadInfo* pThreadInfo, SSuperTable* superTblInfo, + char *buffer, int remainderBufLen) { int len; if (superTblInfo) { @@ -4671,7 +4671,62 @@ static int generateSQLHead(char *tableName, int32_t tableSeq, return len; } -static int generateProgressiveDataBuffer(char *pTblName, +static int generateInterlaceDataBuffer( + char *tableName, int batchPerTbl, int i, int batchPerTblTimes, + int32_t tableSeq, + threadInfo *pThreadInfo, char *buffer, + int64_t insertRows, + int64_t startTime, + int *pRemainderBufLen) +{ + char *pstr = buffer; + SSuperTable* superTblInfo = pThreadInfo->superTblInfo; + + int headLen = generateSQLHead(tableName, tableSeq, pThreadInfo, + superTblInfo, pstr, *pRemainderBufLen); + + if (headLen <= 0) { + return 0; + } + // generate data buffer + verbosePrint("[%d] %s() LN%d i=%d buffer:\n%s\n", + pThreadInfo->threadID, __func__, __LINE__, i, buffer); + + pstr += headLen; + *pRemainderBufLen -= headLen; + + int dataLen = 0; + + verbosePrint("[%d] %s() LN%d i=%d batchPerTblTimes=%d batchPerTbl = %d\n", + pThreadInfo->threadID, __func__, __LINE__, + i, batchPerTblTimes, batchPerTbl); + + if (superTblInfo) { + if (0 == strncasecmp(superTblInfo->startTimestamp, "now", 3)) { + startTime = taosGetTimestamp(pThreadInfo->time_precision); + } + } else { + startTime = 1500000000000; + } + int k = generateDataTail( + tableName, tableSeq, pThreadInfo, superTblInfo, + batchPerTbl, pstr, *pRemainderBufLen, insertRows, 0, + startTime, + &(pThreadInfo->samplePos), &dataLen); + + if (k > 0) { + pstr += dataLen; + *pRemainderBufLen -= dataLen; + } else { + pstr -= headLen; + pstr[0] = '\0'; + } + + return k; +} + +static int generateProgressiveDataBuffer( + char *tableName, int32_t tableSeq, threadInfo *pThreadInfo, char *buffer, int64_t insertRows, @@ -4691,6 +4746,7 @@ static int generateProgressiveDataBuffer(char *pTblName, assert(buffer != NULL); + int k = 0; int maxSqlLen = superTblInfo?superTblInfo->maxSqlLen:g_args.max_sql_len; int remainderBufLen = maxSqlLen; @@ -4698,14 +4754,17 @@ static int generateProgressiveDataBuffer(char *pTblName, char *pstr = buffer; - int headLen = generateSQLHead(pTblName, tableSeq, pThreadInfo, superTblInfo, - buffer); + int headLen = generateSQLHead(tableName, tableSeq, pThreadInfo, superTblInfo, + buffer, remainderBufLen); + + if (headLen <= 0) { + return 0; + } pstr += headLen; remainderBufLen -= headLen; - int k; int dataLen; - k = generateDataTail(pTblName, tableSeq, pThreadInfo, superTblInfo, + k = generateDataTail(tableName, tableSeq, pThreadInfo, superTblInfo, g_args.num_of_RPR, pstr, remainderBufLen, insertRows, startFrom, startTime, pSamplePos, &dataLen); @@ -4811,50 +4870,23 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) { return NULL; } - int headLen; - if (i == 0) { - headLen = generateSQLHead(tableName, tableSeq, pThreadInfo, - superTblInfo, pstr); - } else { - headLen = snprintf(pstr, TSDB_TABLE_NAME_LEN, "%s.%s values", - pThreadInfo->db_name, - tableName); - } - - // generate data buffer - verbosePrint("[%d] %s() LN%d i=%d buffer:\n%s\n", - pThreadInfo->threadID, __func__, __LINE__, i, buffer); - - pstr += headLen; - remainderBufLen -= headLen; - - int dataLen = 0; - - verbosePrint("[%d] %s() LN%d i=%d batchPerTblTimes=%d batchPerTbl = %d\n", - pThreadInfo->threadID, __func__, __LINE__, - i, batchPerTblTimes, batchPerTbl); - - if (superTblInfo) { - if (0 == strncasecmp(superTblInfo->startTimestamp, "now", 3)) { - startTime = taosGetTimestamp(pThreadInfo->time_precision); - } - } else { - startTime = 1500000000000; - } - int generated = generateDataTail( - tableName, tableSeq, pThreadInfo, superTblInfo, - batchPerTbl, pstr, remainderBufLen, insertRows, 0, + int generated = generateInterlaceDataBuffer( + tableName, batchPerTbl, i, batchPerTblTimes, + tableSeq, + pThreadInfo, pstr, + insertRows, startTime, - &(pThreadInfo->samplePos), &dataLen); + &remainderBufLen); if (generated < 0) { debugPrint("[%d] %s() LN%d, generated data is %d\n", pThreadInfo->threadID, __func__, __LINE__, generated); goto free_and_statistics_interlace; + } else if (generated == 0) { + break; } - pstr += dataLen; - remainderBufLen -= dataLen; + tableSeq ++; recOfBatch += batchPerTbl; // startTime += batchPerTbl * superTblInfo->timeStampStep; pThreadInfo->totalInsertRows += batchPerTbl; @@ -4862,7 +4894,6 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) { pThreadInfo->threadID, __func__, __LINE__, batchPerTbl, recOfBatch); - tableSeq ++; if (insertMode == INTERLACE_INSERT_MODE) { if (tableSeq == pThreadInfo->start_table_from + pThreadInfo->ntables) { // turn to first table From 247cf3693fed60ff69076df14b9fb51edfd2c8ab Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Sat, 10 Apr 2021 18:41:46 +0800 Subject: [PATCH 151/178] [td-225] --- src/client/src/tscServer.c | 2 +- src/client/src/tscSubquery.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/client/src/tscServer.c b/src/client/src/tscServer.c index 7085318e35..b8beed392e 100644 --- a/src/client/src/tscServer.c +++ b/src/client/src/tscServer.c @@ -520,7 +520,7 @@ int tscBuildFetchMsg(SSqlObj *pSql, SSqlInfo *pInfo) { assert(pVgroupInfo->vgroups[vgIndex].vgId > 0 && vgIndex < pTableMetaInfo->vgroupList->numOfVgroups); pRetrieveMsg->header.vgId = htonl(pVgroupInfo->vgroups[vgIndex].vgId); - tscDebug("%p build fetch msg from vgId:%d, vgIndex:%d, qhandle:%" PRIX64, pSql, pVgroupInfo->vgroups[vgIndex].vgId, vgIndex, pSql->res.qId); + tscDebug("%p build fetch msg from vgId:%d, vgIndex:%d, qId:%" PRIu64, pSql, pVgroupInfo->vgroups[vgIndex].vgId, vgIndex, pSql->res.qId); } else { int32_t numOfVgroups = (int32_t)taosArrayGetSize(pTableMetaInfo->pVgroupTables); assert(vgIndex >= 0 && vgIndex < numOfVgroups); diff --git a/src/client/src/tscSubquery.c b/src/client/src/tscSubquery.c index 299cf03805..484610818e 100644 --- a/src/client/src/tscSubquery.c +++ b/src/client/src/tscSubquery.c @@ -2986,7 +2986,7 @@ void tscRetrieveDataRes(void *param, TAOS_RES *tres, int code) { } tscDebug("%p sub:%p query complete, ep:%s, vgId:%d, orderOfSub:%d, retrieve data", trsupport->pParentSql, pSql, - pVgroup->epAddr[0].fqdn, pVgroup->vgId, trsupport->subqueryIndex); + pVgroup->epAddr[pSql->epSet.inUse].fqdn, pVgroup->vgId, trsupport->subqueryIndex); if (pSql->res.qId == 0) { // qhandle is NULL, code is TSDB_CODE_SUCCESS means no results generated from this vnode tscRetrieveFromDnodeCallBack(param, pSql, 0); From 35c7506dd096edd534c5b93eef4b4afc6ac647cc Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Sat, 10 Apr 2021 22:48:35 +0800 Subject: [PATCH 152/178] reset tcp --- src/rpc/src/rpcTcp.c | 155 ++++--------------------------------------- 1 file changed, 12 insertions(+), 143 deletions(-) diff --git a/src/rpc/src/rpcTcp.c b/src/rpc/src/rpcTcp.c index 286ed223c7..3162ab2e4c 100644 --- a/src/rpc/src/rpcTcp.c +++ b/src/rpc/src/rpcTcp.c @@ -21,13 +21,6 @@ #include "rpcLog.h" #include "rpcHead.h" #include "rpcTcp.h" -#include "tlist.h" - -typedef struct SConnItem { - SOCKET fd; - uint32_t ip; - uint16_t port; -} SConnItem; typedef struct SFdObj { void *signature; @@ -45,12 +38,6 @@ typedef struct SThreadObj { pthread_t thread; SFdObj * pHead; pthread_mutex_t mutex; - // receive the notify from dispatch thread - - int notifyReceiveFd; - int notifySendFd; - SList *connQueue; - uint32_t ip; bool stop; EpollFd pollFd; @@ -82,7 +69,6 @@ typedef struct { } SServerObj; static void *taosProcessTcpData(void *param); -static void *taosProcessServerTcpData(void *param); static SFdObj *taosMallocFdObj(SThreadObj *pThreadObj, SOCKET fd); static void taosFreeFdObj(SFdObj *pFdObj); static void taosReportBrokenLink(SFdObj *pFdObj); @@ -138,7 +124,6 @@ void *taosInitTcpServer(uint32_t ip, uint16_t port, char *label, int numOfThread tstrncpy(pThreadObj->label, label, sizeof(pThreadObj->label)); pThreadObj->shandle = shandle; pThreadObj->stop = false; - pThreadObj->connQueue = tdListNew(sizeof(SConnItem)); } // initialize mutex, thread, fd which may fail @@ -157,25 +142,7 @@ void *taosInitTcpServer(uint32_t ip, uint16_t port, char *label, int numOfThread break; } - int fds[2]; - if (pipe(fds)) { - tError("%s failed to create pipe", label); - code = -1; - break; - } - - pThreadObj->notifyReceiveFd = fds[0]; - pThreadObj->notifySendFd = fds[1]; - struct epoll_event event; - event.events = EPOLLIN | EPOLLRDHUP; - event.data.fd = pThreadObj->notifyReceiveFd; - if (epoll_ctl(pThreadObj->pollFd, EPOLL_CTL_ADD, pThreadObj->notifyReceiveFd , &event) < 0) { - tError("%s failed to create pipe", label); - code = -1; - break; - } - - code = pthread_create(&(pThreadObj->thread), &thattr, taosProcessServerTcpData, (void *)(pThreadObj)); + code = pthread_create(&(pThreadObj->thread), &thattr, taosProcessTcpData, (void *)(pThreadObj)); if (code != 0) { tError("%s failed to create TCP process data thread(%s)", label, strerror(errno)); break; @@ -308,12 +275,17 @@ static void *taosAcceptTcpConnection(void *arg) { // pick up the thread to handle this connection pThreadObj = pServerObj->pThreadObj[threadId]; - pthread_mutex_lock(&(pThreadObj->mutex)); - SConnItem item = {.fd = connFd, .ip = caddr.sin_addr.s_addr, .port = htons(caddr.sin_port)}; - tdListAppend(pThreadObj->connQueue, &item); - pthread_mutex_unlock(&(pThreadObj->mutex)); - - write(pThreadObj->notifySendFd, "", 1); + SFdObj *pFdObj = taosMallocFdObj(pThreadObj, connFd); + if (pFdObj) { + pFdObj->ip = caddr.sin_addr.s_addr; + pFdObj->port = htons(caddr.sin_port); + tDebug("%s new TCP connection from %s:%hu, fd:%d FD:%p numOfFds:%d", pServerObj->label, + taosInetNtoa(caddr.sin_addr), pFdObj->port, connFd, pFdObj, pThreadObj->numOfFds); + } else { + taosCloseSocket(connFd); + tError("%s failed to malloc FdObj(%s) for connection from:%s:%hu", pServerObj->label, strerror(errno), + taosInetNtoa(caddr.sin_addr), htons(caddr.sin_port)); + } // pick up next thread for next connection threadId++; @@ -619,109 +591,6 @@ static void *taosProcessTcpData(void *param) { return NULL; } -static void *taosProcessServerTcpData(void *param) { - SThreadObj *pThreadObj = param; - SFdObj *pFdObj; - struct epoll_event events[maxEvents]; - SRecvInfo recvInfo; - - char bb[1]; -#ifdef __APPLE__ - taos_block_sigalrm(); -#endif // __APPLE__ - while (1) { - int fdNum = epoll_wait(pThreadObj->pollFd, events, maxEvents, TAOS_EPOLL_WAIT_TIME); - if (pThreadObj->stop) { - tDebug("%s TCP thread get stop event, exiting...", pThreadObj->label); - break; - } - if (fdNum < 0) continue; - - for (int i = 0; i < fdNum; ++i) { - if (events[i].data.fd == pThreadObj->notifyReceiveFd) { - if (events[i].events & EPOLLIN) { - read(pThreadObj->notifyReceiveFd, bb, 1); - - pthread_mutex_lock(&(pThreadObj->mutex)); - SListNode *head = tdListPopHead(pThreadObj->connQueue); - pthread_mutex_unlock(&(pThreadObj->mutex)); - - SConnItem item = {0}; - tdListNodeGetData(pThreadObj->connQueue, head, &item); - tfree(head); - - // register fd on epoll - SFdObj *pFdObj = taosMallocFdObj(pThreadObj, item.fd); - if (pFdObj) { - pFdObj->ip = item.ip; - pFdObj->port = item.port; - tDebug("%s new TCP connection from %u:%hu, fd:%d FD:%p numOfFds:%d", pThreadObj->label, - pFdObj->ip, pFdObj->port, item.fd, pFdObj, pThreadObj->numOfFds); - } else { - taosCloseSocket(item.fd); - tError("%s failed to malloc FdObj(%s) for connection from:%u:%hu", pThreadObj->label, strerror(errno), - pFdObj->ip, pFdObj->port); - } - } - continue; - } - pFdObj = events[i].data.ptr; - - if (events[i].events & EPOLLERR) { - tDebug("%s %p FD:%p epoll errors", pThreadObj->label, pFdObj->thandle, pFdObj); - taosReportBrokenLink(pFdObj); - continue; - } - - if (events[i].events & EPOLLRDHUP) { - tDebug("%s %p FD:%p RD hang up", pThreadObj->label, pFdObj->thandle, pFdObj); - taosReportBrokenLink(pFdObj); - continue; - } - - if (events[i].events & EPOLLHUP) { - tDebug("%s %p FD:%p hang up", pThreadObj->label, pFdObj->thandle, pFdObj); - taosReportBrokenLink(pFdObj); - continue; - } - - if (taosReadTcpData(pFdObj, &recvInfo) < 0) { - shutdown(pFdObj->fd, SHUT_WR); - continue; - } - - pFdObj->thandle = (*(pThreadObj->processData))(&recvInfo); - if (pFdObj->thandle == NULL) taosFreeFdObj(pFdObj); - } - - if (pThreadObj->stop) break; - } - - if (pThreadObj->connQueue) { - pThreadObj->connQueue = tdListFree(pThreadObj->connQueue); - } - // close pipe - close(pThreadObj->notifySendFd); - close(pThreadObj->notifyReceiveFd); - - if (pThreadObj->pollFd >=0) { - EpollClose(pThreadObj->pollFd); - pThreadObj->pollFd = -1; - } - - while (pThreadObj->pHead) { - SFdObj *pFdObj = pThreadObj->pHead; - pThreadObj->pHead = pFdObj->next; - taosReportBrokenLink(pFdObj); - } - - pthread_mutex_destroy(&(pThreadObj->mutex)); - tDebug("%s TCP thread exits ...", pThreadObj->label); - tfree(pThreadObj); - - return NULL; -} - static SFdObj *taosMallocFdObj(SThreadObj *pThreadObj, SOCKET fd) { struct epoll_event event; From 5e43fc0d245bcf2dc23e0a51ce8697efa014c90a Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Sun, 11 Apr 2021 11:09:54 +0800 Subject: [PATCH 153/178] Hotfix/sangshuduo/td 3733 taosdemo buffer processing (#5769) * [TD-3733]: taosdemo buffer processing refactor. * [TD-3733]: taosdemo generate SQL head buffer process. * [TD-3733]: taosdemo generate sql head buffer processing. fix compile issue for windows. * [TD-3733]: taosdemo generate sql head buffer processing. fix max_sql_len. generate data buffer refactor. * resolve conflict with master. Co-authored-by: Shuduo Sang --- src/client/inc/tsclient.h | 11 ----- src/kit/taosdemo/taosdemo.c | 89 ++++++++++++++++++++++--------------- 2 files changed, 52 insertions(+), 48 deletions(-) diff --git a/src/client/inc/tsclient.h b/src/client/inc/tsclient.h index 9072b6f349..c91943e232 100644 --- a/src/client/inc/tsclient.h +++ b/src/client/inc/tsclient.h @@ -146,17 +146,6 @@ typedef struct SInternalField { SExprFilter *pFieldFilters; } SInternalField; -typedef struct SFieldInfo { - int16_t numOfOutput; // number of column in result - SArray *internalField; // SArray -} SFieldInfo; - -typedef struct SColumn { - SColumnIndex colIndex; - int32_t numOfFilters; - SColumnFilterInfo *filterInfo; -} SColumn; - typedef struct SCond { uint64_t uid; int32_t len; // length of tag query condition data diff --git a/src/kit/taosdemo/taosdemo.c b/src/kit/taosdemo/taosdemo.c index c34b00fc8c..ed2b74c973 100644 --- a/src/kit/taosdemo/taosdemo.c +++ b/src/kit/taosdemo/taosdemo.c @@ -3211,9 +3211,9 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { // rows per table need be less than insert batch if (g_args.interlace_rows > g_args.num_of_RPR) { - printf("NOTICE: interlace rows value %d > num_of_records_per_request %d\n\n", + printf("NOTICE: interlace rows value %d > num_of_records_per_req %d\n\n", g_args.interlace_rows, g_args.num_of_RPR); - printf(" interlace rows value will be set to num_of_records_per_request %d\n\n", + printf(" interlace rows value will be set to num_of_records_per_req %d\n\n", g_args.num_of_RPR); printf(" press Enter key to continue or Ctrl-C to stop."); (void)getchar(); @@ -3682,14 +3682,15 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { int32_t len = maxSqlLen->valueint; if (len > TSDB_MAX_ALLOWED_SQL_LEN) { len = TSDB_MAX_ALLOWED_SQL_LEN; - } else if (len < TSDB_MAX_SQL_LEN) { - len = TSDB_MAX_SQL_LEN; + } else if (len < 5) { + len = 5; } g_Dbs.db[i].superTbls[j].maxSqlLen = len; } else if (!maxSqlLen) { g_Dbs.db[i].superTbls[j].maxSqlLen = g_args.max_sql_len; } else { - printf("ERROR: failed to read json, maxSqlLen not found\n"); + errorPrint("%s() LN%d, failed to read json, maxSqlLen input mistake\n", + __func__, __LINE__); goto PARSE_OVER; } @@ -3715,9 +3716,9 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { g_Dbs.db[i].superTbls[j].interlaceRows = interlaceRows->valueint; // rows per table need be less than insert batch if (g_Dbs.db[i].superTbls[j].interlaceRows > g_args.num_of_RPR) { - printf("NOTICE: db[%d].superTbl[%d]'s interlace rows value %d > num_of_records_per_request %d\n\n", + printf("NOTICE: db[%d].superTbl[%d]'s interlace rows value %d > num_of_records_per_req %d\n\n", i, j, g_Dbs.db[i].superTbls[j].interlaceRows, g_args.num_of_RPR); - printf(" interlace rows value will be set to num_of_records_per_request %d\n\n", + printf(" interlace rows value will be set to num_of_records_per_req %d\n\n", g_args.num_of_RPR); printf(" press Enter key to continue or Ctrl-C to stop."); (void)getchar(); @@ -4511,13 +4512,15 @@ static void getTableName(char *pTblName, threadInfo* pThreadInfo, int tableSeq) } } -static int generateDataTail(char *tableName, int32_t tableSeq, - threadInfo* pThreadInfo, SSuperTable* superTblInfo, +static int generateDataTail( + SSuperTable* superTblInfo, int batch, char* buffer, int remainderBufLen, int64_t insertRows, int64_t startFrom, uint64_t startTime, int *pSamplePos, int *dataLen) { int len = 0; int ncols_per_record = 1; // count first col ts + char *pstr = buffer; + if (superTblInfo == NULL) { int datatypeSeq = 0; while(g_args.datatype[datatypeSeq]) { @@ -4546,15 +4549,14 @@ static int generateDataTail(char *tableName, int32_t tableSeq, pSamplePos); } else if (0 == strncasecmp(superTblInfo->dataSource, "rand", strlen("rand"))) { - int rand_num = taosRandom() % 100; - int randTail; - if (0 != superTblInfo->disorderRatio - && rand_num < superTblInfo->disorderRatio) { - randTail = (superTblInfo->timeStampStep * k - + (taosRandom() % superTblInfo->disorderRange + 1)) * (-1); - debugPrint("rand data generated, back %d\n", randTail); - } else { - randTail = superTblInfo->timeStampStep * k; + + int randTail = superTblInfo->timeStampStep * k; + if (superTblInfo->disorderRatio > 0) { + int rand_num = taosRandom() % 100; + if(rand_num < superTblInfo->disorderRatio) { + randTail = (randTail + (taosRandom() % superTblInfo->disorderRange + 1)) * (-1); + debugPrint("rand data generated, back %d\n", randTail); + } } uint64_t d = startTime @@ -4569,7 +4571,7 @@ static int generateDataTail(char *tableName, int32_t tableSeq, break; } - buffer += snprintf(buffer, retLen + 1, "%s", data); + pstr += snprintf(pstr , retLen + 1, "%s", data); k++; len += retLen; remainderBufLen -= retLen; @@ -4597,7 +4599,7 @@ static int generateDataTail(char *tableName, int32_t tableSeq, if (len > remainderBufLen) break; - buffer += sprintf(buffer, " %s", data); + pstr += sprintf(pstr, " %s", data); k++; len += retLen; remainderBufLen -= retLen; @@ -4622,6 +4624,10 @@ static int generateSQLHead(char *tableName, int32_t tableSeq, char *buffer, int remainderBufLen) { int len; + +#define HEAD_BUFF_LEN 1024*24 // 16*1024 + (192+32)*2 + insert into .. + char headBuf[HEAD_BUFF_LEN]; + if (superTblInfo) { if (AUTO_CREATE_SUBTBL == superTblInfo->autoCreateTable) { char* tagsValBuf = NULL; @@ -4638,8 +4644,9 @@ static int generateSQLHead(char *tableName, int32_t tableSeq, return -1; } - len = snprintf(buffer, - superTblInfo->maxSqlLen, + len = snprintf( + headBuf, + HEAD_BUFF_LEN, "insert into %s.%s using %s.%s tags %s values", pThreadInfo->db_name, tableName, @@ -4648,26 +4655,34 @@ static int generateSQLHead(char *tableName, int32_t tableSeq, tagsValBuf); tmfree(tagsValBuf); } else if (TBL_ALREADY_EXISTS == superTblInfo->childTblExists) { - len = snprintf(buffer, - superTblInfo->maxSqlLen, + len = snprintf( + headBuf, + HEAD_BUFF_LEN, "insert into %s.%s values", pThreadInfo->db_name, tableName); } else { - len = snprintf(buffer, - superTblInfo->maxSqlLen, + len = snprintf( + headBuf, + HEAD_BUFF_LEN, "insert into %s.%s values", pThreadInfo->db_name, tableName); } } else { - len = snprintf(buffer, - g_args.max_sql_len, + len = snprintf( + headBuf, + HEAD_BUFF_LEN, "insert into %s.%s values", pThreadInfo->db_name, tableName); } + if (len > remainderBufLen) + return -1; + + tstrncpy(buffer, headBuf, len + 1); + return len; } @@ -4709,7 +4724,7 @@ static int generateInterlaceDataBuffer( startTime = 1500000000000; } int k = generateDataTail( - tableName, tableSeq, pThreadInfo, superTblInfo, + superTblInfo, batchPerTbl, pstr, *pRemainderBufLen, insertRows, 0, startTime, &(pThreadInfo->samplePos), &dataLen); @@ -4764,7 +4779,7 @@ static int generateProgressiveDataBuffer( remainderBufLen -= headLen; int dataLen; - k = generateDataTail(tableName, tableSeq, pThreadInfo, superTblInfo, + k = generateDataTail(superTblInfo, g_args.num_of_RPR, pstr, remainderBufLen, insertRows, startFrom, startTime, pSamplePos, &dataLen); @@ -4781,8 +4796,8 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) { int64_t insertRows = (superTblInfo)?superTblInfo->insertRows:g_args.num_of_DPT; int interlaceRows = superTblInfo?superTblInfo->interlaceRows:g_args.interlace_rows; - if (interlaceRows > insertRows) - interlaceRows = insertRows; + if (interlaceRows > g_args.num_of_RPR) + interlaceRows = g_args.num_of_RPR; int insertMode; @@ -4847,8 +4862,6 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) { bool flagSleep = true; int sleepTimeTotal = 0; - int remainderBufLen; - while(pThreadInfo->totalInsertRows < pThreadInfo->ntables * insertRows) { if ((flagSleep) && (insert_interval)) { st = taosGetTimestampUs(); @@ -4856,7 +4869,7 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) { } // generate data memset(buffer, 0, maxSqlLen); - remainderBufLen = maxSqlLen; + int remainderBufLen = maxSqlLen; char *pstr = buffer; int recOfBatch = 0; @@ -5102,11 +5115,13 @@ static void* syncWriteProgressive(threadInfo *pThreadInfo) { */ } // num_of_DPT - if ((tableSeq == pThreadInfo->ntables - 1) && superTblInfo && + if (g_args.verbose_print) { + if ((tableSeq == pThreadInfo->ntables - 1) && superTblInfo && (0 == strncasecmp( superTblInfo->dataSource, "sample", strlen("sample")))) { - printf("%s() LN%d samplePos=%d\n", + verbosePrint("%s() LN%d samplePos=%d\n", __func__, __LINE__, pThreadInfo->samplePos); + } } } // tableSeq From 7153904339abd2408c3c9fff3ae725d283d4488b Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Sun, 11 Apr 2021 21:31:47 +0800 Subject: [PATCH 154/178] [TD-3317]: taosdemo interlace insertion. (#5774) patch for master. Co-authored-by: Shuduo Sang --- src/kit/taosdemo/taosdemo.c | 74 +++++++++++++++-------- tests/pytest/tools/taosdemoPerformance.py | 4 +- 2 files changed, 50 insertions(+), 28 deletions(-) diff --git a/src/kit/taosdemo/taosdemo.c b/src/kit/taosdemo/taosdemo.c index ed2b74c973..49e550cc01 100644 --- a/src/kit/taosdemo/taosdemo.c +++ b/src/kit/taosdemo/taosdemo.c @@ -18,6 +18,7 @@ when in some thread query return error, thread don't exit, but return, otherwise coredump in other thread. */ +#include #define _GNU_SOURCE #define CURL_STATICLIB @@ -3242,7 +3243,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { if (numRecPerReq && numRecPerReq->type == cJSON_Number) { g_args.num_of_RPR = numRecPerReq->valueint; } else if (!numRecPerReq) { - g_args.num_of_RPR = 0xffff; + g_args.num_of_RPR = INT32_MAX; } else { errorPrint("%s() LN%d, failed to read json, num_of_records_per_req not found\n", __func__, __LINE__); @@ -4647,7 +4648,7 @@ static int generateSQLHead(char *tableName, int32_t tableSeq, len = snprintf( headBuf, HEAD_BUFF_LEN, - "insert into %s.%s using %s.%s tags %s values", + "%s.%s using %s.%s tags %s values", pThreadInfo->db_name, tableName, pThreadInfo->db_name, @@ -4658,14 +4659,14 @@ static int generateSQLHead(char *tableName, int32_t tableSeq, len = snprintf( headBuf, HEAD_BUFF_LEN, - "insert into %s.%s values", + "%s.%s values", pThreadInfo->db_name, tableName); } else { len = snprintf( headBuf, HEAD_BUFF_LEN, - "insert into %s.%s values", + "%s.%s values", pThreadInfo->db_name, tableName); } @@ -4673,7 +4674,7 @@ static int generateSQLHead(char *tableName, int32_t tableSeq, len = snprintf( headBuf, HEAD_BUFF_LEN, - "insert into %s.%s values", + "%s.%s values", pThreadInfo->db_name, tableName); } @@ -4694,6 +4695,7 @@ static int generateInterlaceDataBuffer( int64_t startTime, int *pRemainderBufLen) { + assert(buffer); char *pstr = buffer; SSuperTable* superTblInfo = pThreadInfo->superTblInfo; @@ -4723,18 +4725,20 @@ static int generateInterlaceDataBuffer( } else { startTime = 1500000000000; } + int k = generateDataTail( superTblInfo, batchPerTbl, pstr, *pRemainderBufLen, insertRows, 0, startTime, &(pThreadInfo->samplePos), &dataLen); - if (k > 0) { + if (k == batchPerTbl) { pstr += dataLen; *pRemainderBufLen -= dataLen; } else { pstr -= headLen; pstr[0] = '\0'; + k = 0; } return k; @@ -4745,7 +4749,8 @@ static int generateProgressiveDataBuffer( int32_t tableSeq, threadInfo *pThreadInfo, char *buffer, int64_t insertRows, - int64_t startFrom, int64_t startTime, int *pSamplePos) + int64_t startFrom, int64_t startTime, int *pSamplePos, + int *pRemainderBufLen) { SSuperTable* superTblInfo = pThreadInfo->superTblInfo; @@ -4760,27 +4765,24 @@ static int generateProgressiveDataBuffer( } assert(buffer != NULL); - - int k = 0; - int maxSqlLen = superTblInfo?superTblInfo->maxSqlLen:g_args.max_sql_len; - int remainderBufLen = maxSqlLen; - - memset(buffer, 0, maxSqlLen); - char *pstr = buffer; + int k = 0; + + memset(buffer, 0, *pRemainderBufLen); + int headLen = generateSQLHead(tableName, tableSeq, pThreadInfo, superTblInfo, - buffer, remainderBufLen); + buffer, *pRemainderBufLen); if (headLen <= 0) { return 0; } pstr += headLen; - remainderBufLen -= headLen; + *pRemainderBufLen -= headLen; int dataLen; k = generateDataTail(superTblInfo, - g_args.num_of_RPR, pstr, remainderBufLen, insertRows, startFrom, + g_args.num_of_RPR, pstr, *pRemainderBufLen, insertRows, startFrom, startTime, pSamplePos, &dataLen); @@ -4842,18 +4844,17 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) { int64_t startTime = pThreadInfo->start_time; - int batchPerTblTimes; - int batchPerTbl; - assert(pThreadInfo->ntables > 0); if (interlaceRows > g_args.num_of_RPR) interlaceRows = g_args.num_of_RPR; - batchPerTbl = interlaceRows; + int batchPerTbl = interlaceRows; + + int batchPerTblTimes; if ((interlaceRows > 0) && (pThreadInfo->ntables > 1)) { batchPerTblTimes = - (g_args.num_of_RPR / (interlaceRows * pThreadInfo->ntables)) + 1; + g_args.num_of_RPR / interlaceRows; } else { batchPerTblTimes = 1; } @@ -4862,6 +4863,9 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) { bool flagSleep = true; int sleepTimeTotal = 0; + char *strInsertInto = "insert into "; + int nInsertBufLen = strlen(strInsertInto); + while(pThreadInfo->totalInsertRows < pThreadInfo->ntables * insertRows) { if ((flagSleep) && (insert_interval)) { st = taosGetTimestampUs(); @@ -4872,6 +4876,11 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) { int remainderBufLen = maxSqlLen; char *pstr = buffer; + + int len = snprintf(pstr, nInsertBufLen + 1, "%s", strInsertInto); + pstr += len; + remainderBufLen -= len; + int recOfBatch = 0; for (int i = 0; i < batchPerTblTimes; i ++) { @@ -4883,6 +4892,7 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) { return NULL; } + int oldRemainderLen = remainderBufLen; int generated = generateInterlaceDataBuffer( tableName, batchPerTbl, i, batchPerTblTimes, tableSeq, @@ -4901,6 +4911,7 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) { tableSeq ++; recOfBatch += batchPerTbl; + pstr += (oldRemainderLen - remainderBufLen); // startTime += batchPerTbl * superTblInfo->timeStampStep; pThreadInfo->totalInsertRows += batchPerTbl; verbosePrint("[%d] %s() LN%d batchPerTbl=%d recOfBatch=%d\n", @@ -5012,11 +5023,12 @@ static void* syncWriteProgressive(threadInfo *pThreadInfo) { debugPrint("%s() LN%d: ### progressive write\n", __func__, __LINE__); SSuperTable* superTblInfo = pThreadInfo->superTblInfo; + int maxSqlLen = superTblInfo?superTblInfo->maxSqlLen:g_args.max_sql_len; - char* buffer = calloc(superTblInfo?superTblInfo->maxSqlLen:g_args.max_sql_len, 1); + char* buffer = calloc(maxSqlLen, 1); if (NULL == buffer) { errorPrint( "Failed to alloc %d Bytes, reason:%s\n", - superTblInfo?superTblInfo->maxSqlLen:g_args.max_sql_len, + maxSqlLen, strerror(errno)); return NULL; } @@ -5059,10 +5071,20 @@ static void* syncWriteProgressive(threadInfo *pThreadInfo) { __func__, __LINE__, pThreadInfo->threadID, tableSeq, tableName); + int remainderBufLen = maxSqlLen; + char *pstr = buffer; + int nInsertBufLen = strlen("insert into "); + + int len = snprintf(pstr, nInsertBufLen + 1, "%s", "insert into "); + + pstr += len; + remainderBufLen -= len; + int generated = generateProgressiveDataBuffer( - tableName, tableSeq, pThreadInfo, buffer, insertRows, + tableName, tableSeq, pThreadInfo, pstr, insertRows, i, start_time, - &(pThreadInfo->samplePos)); + &(pThreadInfo->samplePos), + &remainderBufLen); if (generated > 0) i += generated; else diff --git a/tests/pytest/tools/taosdemoPerformance.py b/tests/pytest/tools/taosdemoPerformance.py index 1156cc2484..8ce99d0969 100644 --- a/tests/pytest/tools/taosdemoPerformance.py +++ b/tests/pytest/tools/taosdemoPerformance.py @@ -51,7 +51,7 @@ class taosdemoPerformace: "insert_rows": 100000, "multi_thread_write_one_tbl": "no", "number_of_tbl_in_one_sql": 0, - "rows_per_tbl": 100, + "interlace_rows": 100, "max_sql_len": 1024000, "disorder_ratio": 0, "disorder_range": 1000, @@ -159,4 +159,4 @@ if __name__ == '__main__': perftest = taosdemoPerformace(args.commit_id, args.database_name) perftest.insertData() - perftest.createTablesAndStoreData() \ No newline at end of file + perftest.createTablesAndStoreData() From 67fd99e75629bd82bfef62eb5f4f71f93c1629f4 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Sun, 11 Apr 2021 23:04:48 +0800 Subject: [PATCH 155/178] [td-225] update the local cached epset in sqlObj. --- src/client/src/tscSchemaUtil.c | 3 ++- src/client/src/tscServer.c | 43 +++++++++++++++++++++++++--------- 2 files changed, 34 insertions(+), 12 deletions(-) diff --git a/src/client/src/tscSchemaUtil.c b/src/client/src/tscSchemaUtil.c index 67352ca71c..2ea382132b 100644 --- a/src/client/src/tscSchemaUtil.c +++ b/src/client/src/tscSchemaUtil.c @@ -144,8 +144,9 @@ SNewVgroupInfo createNewVgroupInfo(SVgroupMsg *pVgroupMsg) { SNewVgroupInfo info = {0}; info.numOfEps = pVgroupMsg->numOfEps; info.vgId = pVgroupMsg->vgId; - info.inUse = 0; + info.inUse = 0; // 0 is the default value of inUse in case of multiple replica + assert(info.numOfEps >= 1 && info.vgId >= 1); for(int32_t i = 0; i < pVgroupMsg->numOfEps; ++i) { tstrncpy(info.ep[i].fqdn, pVgroupMsg->epAddr[i].fqdn, TSDB_FQDN_LEN); info.ep[i].port = pVgroupMsg->epAddr[i].port; diff --git a/src/client/src/tscServer.c b/src/client/src/tscServer.c index cfdf2d75dd..8889e25177 100644 --- a/src/client/src/tscServer.c +++ b/src/client/src/tscServer.c @@ -113,8 +113,8 @@ static void tscDumpEpSetFromVgroupInfo(SRpcEpSet *pEpSet, SNewVgroupInfo *pVgrou } } -static void tscUpdateVgroupInfo(SSqlObj *pObj, SRpcEpSet *pEpSet) { - SSqlCmd *pCmd = &pObj->cmd; +static void tscUpdateVgroupInfo(SSqlObj *pSql, SRpcEpSet *pEpSet) { + SSqlCmd *pCmd = &pSql->cmd; STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, pCmd->clauseIndex, 0); if (pTableMetaInfo == NULL || pTableMetaInfo->pTableMeta == NULL) { return; @@ -143,6 +143,12 @@ static void tscUpdateVgroupInfo(SSqlObj *pObj, SRpcEpSet *pEpSet) { tscDebug("after: EndPoint in use:%d, numOfEps:%d", vgroupInfo.inUse, vgroupInfo.numOfEps); taosHashPut(tscVgroupMap, &vgId, sizeof(vgId), &vgroupInfo, sizeof(SNewVgroupInfo)); + + // Update the local cached epSet info cached by SqlObj + int32_t inUse = pSql->epSet.inUse; + tscDumpEpSetFromVgroupInfo(&pSql->epSet, &vgroupInfo); + tscDebug("%p update the epSet in SqlObj, in use before:%d, after:%d", pSql, inUse, pSql->epSet.inUse); + } int32_t extractSTableQueryVgroupId(STableMetaInfo* pTableMetaInfo) { @@ -2007,7 +2013,7 @@ int tscProcessTableMetaRsp(SSqlObj *pSql) { (vgroupInfo.inUse < 0)) { // vgroup info exists, compare with it vgroupInfo = createNewVgroupInfo(&pMetaMsg->vgroup); taosHashPut(tscVgroupMap, &vgId, sizeof(vgId), &vgroupInfo, sizeof(vgroupInfo)); - tscDebug("add new VgroupInfo, vgId:%d, total:%d", vgId, (int32_t) taosHashGetSize(tscVgroupMap)); + tscDebug("add new VgroupInfo, vgId:%d, total cached:%d", vgId, (int32_t) taosHashGetSize(tscVgroupMap)); } } @@ -2159,18 +2165,33 @@ int tscProcessSTableVgroupRsp(SSqlObj *pSql) { tscError("%p empty vgroup info", pSql); } else { for (int32_t j = 0; j < pInfo->vgroupList->numOfVgroups; ++j) { - //just init, no need to lock - SVgroupInfo *pVgroups = &pInfo->vgroupList->vgroups[j]; + // just init, no need to lock + SVgroupInfo *pVgroup = &pInfo->vgroupList->vgroups[j]; SVgroupMsg *vmsg = &pVgroupMsg->vgroups[j]; - pVgroups->vgId = htonl(vmsg->vgId); - pVgroups->numOfEps = vmsg->numOfEps; + vmsg->vgId = htonl(vmsg->vgId); + vmsg->numOfEps = vmsg->numOfEps; + for (int32_t k = 0; k < vmsg->numOfEps; ++k) { + vmsg->epAddr[k].port = htons(vmsg->epAddr[k].port); + } - assert(pVgroups->numOfEps >= 1 && pVgroups->vgId >= 1); + SNewVgroupInfo newVi = createNewVgroupInfo(vmsg); + pVgroup->numOfEps = newVi.numOfEps; + pVgroup->vgId = newVi.vgId; + for (int32_t k = 0; k < vmsg->numOfEps; ++k) { + pVgroup->epAddr[k].port = newVi.ep[k].port; + pVgroup->epAddr[k].fqdn = strndup(newVi.ep[k].fqdn, TSDB_FQDN_LEN); + } - for (int32_t k = 0; k < pVgroups->numOfEps; ++k) { - pVgroups->epAddr[k].port = htons(vmsg->epAddr[k].port); - pVgroups->epAddr[k].fqdn = strndup(vmsg->epAddr[k].fqdn, tListLen(vmsg->epAddr[k].fqdn)); + // check if current buffer contains the vgroup info. + // If not, add it + SNewVgroupInfo existVgroupInfo = {.inUse = -1}; + taosHashGetClone(tscVgroupMap, &newVi.vgId, sizeof(newVi.vgId), NULL, &existVgroupInfo, sizeof(SNewVgroupInfo)); + + if (((existVgroupInfo.inUse >= 0) && !vgroupInfoIdentical(&existVgroupInfo, vmsg)) || + (existVgroupInfo.inUse < 0)) { // vgroup info exists, compare with it + taosHashPut(tscVgroupMap, &newVi.vgId, sizeof(newVi.vgId), &newVi, sizeof(newVi)); + tscDebug("add new VgroupInfo, vgId:%d, total cached:%d", newVi.vgId, (int32_t) taosHashGetSize(tscVgroupMap)); } } } From ea771cee5915aa46e6ab2d2e5564fc584b3e2305 Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Mon, 12 Apr 2021 14:35:32 +0800 Subject: [PATCH 156/178] [TD-3752]: taosdemo columns+tags+ts must less then 1024. (#5782) Co-authored-by: Shuduo Sang --- src/kit/taosdemo/taosdemo.c | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/src/kit/taosdemo/taosdemo.c b/src/kit/taosdemo/taosdemo.c index 49e550cc01..815a842d60 100644 --- a/src/kit/taosdemo/taosdemo.c +++ b/src/kit/taosdemo/taosdemo.c @@ -2980,7 +2980,7 @@ static bool getColumnAndTagTypeFromInsertJsonFile( } int columnSize = cJSON_GetArraySize(columns); - if (columnSize > MAX_COLUMN_COUNT) { + if ((columnSize + 1/* ts */) > MAX_COLUMN_COUNT) { errorPrint("%s() LN%d, failed to read json, column size overflow, max column size is %d\n", __func__, __LINE__, MAX_COLUMN_COUNT); goto PARSE_OVER; @@ -3038,8 +3038,8 @@ static bool getColumnAndTagTypeFromInsertJsonFile( } } - if (index > MAX_COLUMN_COUNT) { - errorPrint("%s() LN%d, failed to read json, column size overflow, max column size is %d\n", + if ((index + 1 /* ts */) > MAX_COLUMN_COUNT) { + errorPrint("%s() LN%d, failed to read json, column size overflow, allowed max column size is %d\n", __func__, __LINE__, MAX_COLUMN_COUNT); goto PARSE_OVER; } @@ -3110,16 +3110,16 @@ static bool getColumnAndTagTypeFromInsertJsonFile( } if (index > MAX_TAG_COUNT) { - errorPrint("%s() LN%d, failed to read json, tags size overflow, max tag size is %d\n", + errorPrint("%s() LN%d, failed to read json, tags size overflow, allowed max tag count is %d\n", __func__, __LINE__, MAX_TAG_COUNT); goto PARSE_OVER; } superTbls->tagCount = index; - if ((superTbls->columnCount + superTbls->tagCount) > MAX_COLUMN_COUNT) { - errorPrint("%s() LN%d, columns + tags is more than max columns count: %d\n", - __func__, __LINE__, MAX_TAG_COUNT); + if ((superTbls->columnCount + superTbls->tagCount + 1 /* ts */) > MAX_COLUMN_COUNT) { + errorPrint("%s() LN%d, columns + tags is more than allowed max columns count: %d\n", + __func__, __LINE__, MAX_COLUMN_COUNT); goto PARSE_OVER; } ret = true; From c1289b407554d8920c5d3b3da7559b52e1f6d765 Mon Sep 17 00:00:00 2001 From: Shengliang Guan Date: Mon, 12 Apr 2021 08:41:20 +0000 Subject: [PATCH 157/178] [TD-3723] --- src/mnode/src/mnodeSdb.c | 2 +- src/sync/inc/syncInt.h | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/mnode/src/mnodeSdb.c b/src/mnode/src/mnodeSdb.c index b470dd6359..505d3c519c 100644 --- a/src/mnode/src/mnodeSdb.c +++ b/src/mnode/src/mnodeSdb.c @@ -1025,7 +1025,7 @@ static int32_t sdbWriteToQueue(SSdbRow *pRow, int32_t qtype) { int32_t queued = atomic_add_fetch_32(&tsSdbMgmt.queuedMsg, 1); if (queued > MAX_QUEUED_MSG_NUM) { - sdbDebug("vgId:1, too many msg:%d in sdb queue, flow control", queued); + sdbInfo("vgId:1, too many msg:%d in sdb queue, flow control", queued); taosMsleep(1); } diff --git a/src/sync/inc/syncInt.h b/src/sync/inc/syncInt.h index b4d9315a8e..ec6dfcbc82 100644 --- a/src/sync/inc/syncInt.h +++ b/src/sync/inc/syncInt.h @@ -35,7 +35,7 @@ extern "C" { #define SYNC_MAX_SIZE (TSDB_MAX_WAL_SIZE + sizeof(SWalHead) + sizeof(SSyncHead) + 16) #define SYNC_RECV_BUFFER_SIZE (5*1024*1024) -#define SYNC_MAX_FWDS 1024 +#define SYNC_MAX_FWDS 4096 #define SYNC_FWD_TIMER 300 #define SYNC_ROLE_TIMER 15000 // ms #define SYNC_CHECK_INTERVAL 1000 // ms From b6ee0c8fc77ae35a47ca510530b8f99f368a4be1 Mon Sep 17 00:00:00 2001 From: Steven Li Date: Tue, 13 Apr 2021 05:49:22 +0000 Subject: [PATCH 158/178] Moved over changes from feature/crash_gen to feature/crash_gen_master --- tests/pytest/crash_gen/README.md | 44 ++++- tests/pytest/crash_gen/crash_gen_main.py | 13 +- tests/pytest/crash_gen/service_manager.py | 219 ++++++++++++++++------ 3 files changed, 206 insertions(+), 70 deletions(-) diff --git a/tests/pytest/crash_gen/README.md b/tests/pytest/crash_gen/README.md index 6788ab1a63..8cdddd5b57 100644 --- a/tests/pytest/crash_gen/README.md +++ b/tests/pytest/crash_gen/README.md @@ -6,6 +6,25 @@ To effectively test and debug our TDengine product, we have developed a simple t exercise various functions of the system in a randomized fashion, hoping to expose maximum number of problems, hopefully without a pre-determined scenario. +# Features + +This tool can run as a test client with the following features: + +1. Any number of concurrent threads +1. Any number of test steps/loops +1. Auto-create and writing to multiple databases +1. Ignore specific error codes +1. Write small or large data blocks +1. Auto-generate out-of-sequence data, if needed +1. Verify the result of write operations +1. Concurrent writing to a shadow database for later data verification +1. User specified number of replicas to use, against clusters + +This tool can also use to start a TDengine service, either in stand-alone mode or +cluster mode. The features include: + +1. User specified number of D-Nodes to create/use. + # Preparation To run this tool, please ensure the followed preparation work is done first. @@ -16,7 +35,7 @@ To run this tool, please ensure the followed preparation work is done first. Ubuntu 20.04LTS as our own development environment, and suggest you also use such an environment if possible. -# Simple Execution +# Simple Execution as Client Test Tool To run the tool with the simplest method, follow the steps below: @@ -28,19 +47,21 @@ To run the tool with the simplest method, follow the steps below: That's it! -# Running Clusters +# Running Server-side Clusters This tool also makes it easy to test/verify the clustering capabilities of TDengine. You can start a cluster quite easily with the following command: ``` $ cd tests/pytest/ -$ ./crash_gen.sh -e -o 3 +$ rm -rf ../../build/cluster_dnode_?; ./crash_gen.sh -e -o 3 # first part optional ``` The `-e` option above tells the tool to start the service, and do not run any tests, while the `-o 3` option tells the tool to start 3 DNodes and join them together in a cluster. -Obviously you can adjust the the number here. +Obviously you can adjust the the number here. The `rm -rf` command line is optional +to clean up previous cluster data, so that we can start from a clean state with no data +at all. ## Behind the Scenes @@ -89,8 +110,9 @@ The exhaustive features of the tool is available through the `-h` option: ``` $ ./crash_gen.sh -h -usage: crash_gen_bootstrap.py [-h] [-a] [-b MAX_DBS] [-c CONNECTOR_TYPE] [-d] [-e] [-g IGNORE_ERRORS] [-i MAX_REPLICAS] [-l] [-n] [-o NUM_DNODES] [-p] [-r] - [-s MAX_STEPS] [-t NUM_THREADS] [-v] [-x] +usage: crash_gen_bootstrap.py [-h] [-a] [-b MAX_DBS] [-c CONNECTOR_TYPE] [-d] [-e] [-g IGNORE_ERRORS] + [-i NUM_REPLICAS] [-k] [-l] [-m] [-n] + [-o NUM_DNODES] [-p] [-r] [-s MAX_STEPS] [-t NUM_THREADS] [-v] [-w] [-x] TDengine Auto Crash Generator (PLEASE NOTICE the Prerequisites Below) --------------------------------------------------------------------- @@ -109,11 +131,14 @@ optional arguments: -e, --run-tdengine Run TDengine service in foreground (default: false) -g IGNORE_ERRORS, --ignore-errors IGNORE_ERRORS Ignore error codes, comma separated, 0x supported (default: None) - -i MAX_REPLICAS, --max-replicas MAX_REPLICAS - Maximum number of replicas to use, when testing against clusters. (default: 1) + -i NUM_REPLICAS, --num-replicas NUM_REPLICAS + Number (fixed) of replicas to use, when testing against clusters. (default: 1) + -k, --track-memory-leaks + Use Valgrind tool to track memory leaks (default: false) -l, --larger-data Write larger amount of data during write operations (default: false) + -m, --mix-oos-data Mix out-of-sequence data into the test data stream (default: true) -n, --dynamic-db-table-names - Use non-fixed names for dbs/tables, useful for multi-instance executions (default: false) + Use non-fixed names for dbs/tables, for -b, useful for multi-instance executions (default: false) -o NUM_DNODES, --num-dnodes NUM_DNODES Number of Dnodes to initialize, used with -e option. (default: 1) -p, --per-thread-db-connection @@ -124,6 +149,7 @@ optional arguments: -t NUM_THREADS, --num-threads NUM_THREADS Number of threads to run (default: 10) -v, --verify-data Verify data written in a number of places by reading back (default: false) + -w, --use-shadow-db Use a shaddow database to verify data integrity (default: false) -x, --continue-on-exception Continue execution after encountering unexpected/disallowed errors/exceptions (default: false) ``` diff --git a/tests/pytest/crash_gen/crash_gen_main.py b/tests/pytest/crash_gen/crash_gen_main.py index 0fe94bad1d..44295e8bee 100755 --- a/tests/pytest/crash_gen/crash_gen_main.py +++ b/tests/pytest/crash_gen/crash_gen_main.py @@ -1574,9 +1574,9 @@ class TaskCreateDb(StateTransitionTask): def _executeInternal(self, te: TaskExecutor, wt: WorkerThread): # was: self.execWtSql(wt, "create database db") repStr = "" - if gConfig.max_replicas != 1: + if gConfig.num_replicas != 1: # numReplica = Dice.throw(gConfig.max_replicas) + 1 # 1,2 ... N - numReplica = gConfig.max_replicas # fixed, always + numReplica = gConfig.num_replicas # fixed, always repStr = "replica {}".format(numReplica) updatePostfix = "update 1" if gConfig.verify_data else "" # allow update only when "verify data" is active dbName = self._db.getName() @@ -2394,11 +2394,16 @@ class MainExec: help='Ignore error codes, comma separated, 0x supported (default: None)') parser.add_argument( '-i', - '--max-replicas', + '--num-replicas', action='store', default=1, type=int, - help='Maximum number of replicas to use, when testing against clusters. (default: 1)') + help='Number (fixed) of replicas to use, when testing against clusters. (default: 1)') + parser.add_argument( + '-k', + '--track-memory-leaks', + action='store_true', + help='Use Valgrind tool to track memory leaks (default: false)') parser.add_argument( '-l', '--larger-data', diff --git a/tests/pytest/crash_gen/service_manager.py b/tests/pytest/crash_gen/service_manager.py index ae6f8d5d3a..cdbf2db4da 100644 --- a/tests/pytest/crash_gen/service_manager.py +++ b/tests/pytest/crash_gen/service_manager.py @@ -19,6 +19,7 @@ from queue import Queue, Empty from .misc import Logging, Status, CrashGenError, Dice, Helper, Progress from .db import DbConn, DbTarget +import crash_gen.settings class TdeInstance(): """ @@ -132,6 +133,7 @@ keep 36500 walLevel 1 # # maxConnections 100 +quorum 2 """ cfgContent = cfgTemplate.format_map(cfgValues) f = open(cfgFile, "w") @@ -164,7 +166,12 @@ walLevel 1 return "127.0.0.1" def getServiceCmdLine(self): # to start the instance - return [self.getExecFile(), '-c', self.getCfgDir()] # used in subproce.Popen() + cmdLine = [] + if crash_gen.settings.gConfig.track_memory_leaks: + Logging.info("Invoking VALGRIND on service...") + cmdLine = ['valgrind', '--leak-check=yes'] + cmdLine += ["exec " + self.getExecFile(), '-c', self.getCfgDir()] # used in subproce.Popen() + return cmdLine def _getDnodes(self, dbc): dbc.query("show dnodes") @@ -202,7 +209,7 @@ walLevel 1 self.generateCfgFile() # service side generates config file, client does not self.rotateLogs() - self._smThread.start(self.getServiceCmdLine()) + self._smThread.start(self.getServiceCmdLine(), self.getLogDir()) # May raise exceptions def stop(self): self._smThread.stop() @@ -225,7 +232,7 @@ class TdeSubProcess: # RET_SUCCESS = -4 def __init__(self): - self.subProcess = None + self.subProcess = None # type: subprocess.Popen # if tInst is None: # raise CrashGenError("Empty instance not allowed in TdeSubProcess") # self._tInst = tInst # Default create at ServiceManagerThread @@ -263,7 +270,7 @@ class TdeSubProcess: # print("Starting TDengine with env: ", myEnv.items()) # print("Starting TDengine via Shell: {}".format(cmdLineStr)) - useShell = True + useShell = True # Needed to pass environments into it self.subProcess = subprocess.Popen( # ' '.join(cmdLine) if useShell else cmdLine, # shell=useShell, @@ -276,12 +283,12 @@ class TdeSubProcess: env=myEnv ) # had text=True, which interferred with reading EOF - STOP_SIGNAL = signal.SIGKILL # signal.SIGKILL/SIGINT # What signal to use (in kill) to stop a taosd process? + STOP_SIGNAL = signal.SIGINT # signal.SIGKILL/SIGINT # What signal to use (in kill) to stop a taosd process? SIG_KILL_RETCODE = 137 # ref: https://stackoverflow.com/questions/43268156/process-finished-with-exit-code-137-in-pycharm def stop(self): """ - Stop a sub process, and try to return a meaningful return code. + Stop a sub process, DO NOT return anything, process all conditions INSIDE Common POSIX signal values (from man -7 signal): SIGHUP 1 @@ -301,40 +308,99 @@ class TdeSubProcess: """ if not self.subProcess: Logging.error("Sub process already stopped") - return # -1 + return retCode = self.subProcess.poll() # ret -N means killed with signal N, otherwise it's from exit(N) if retCode: # valid return code, process ended - retCode = -retCode # only if valid + # retCode = -retCode # only if valid Logging.warning("TSP.stop(): process ended itself") self.subProcess = None - return retCode + return # process still alive, let's interrupt it - Logging.info("Terminate running process, send SIG_{} and wait...".format(self.STOP_SIGNAL)) - # sub process should end, then IPC queue should end, causing IO thread to end - topSubProc = psutil.Process(self.subProcess.pid) - for child in topSubProc.children(recursive=True): # or parent.children() for recursive=False - child.send_signal(self.STOP_SIGNAL) - time.sleep(0.2) # 200 ms - # topSubProc.send_signal(sig) # now kill the main sub process (likely the Shell) + self._stopForSure(self.subProcess, self.STOP_SIGNAL) # success if no exception + self.subProcess = None - self.subProcess.send_signal(self.STOP_SIGNAL) # main sub process (likely the Shell) - self.subProcess.wait(20) - retCode = self.subProcess.returncode # should always be there - # May throw subprocess.TimeoutExpired exception above, therefore - # The process is guranteed to have ended by now - self.subProcess = None - if retCode == self.SIG_KILL_RETCODE: - Logging.info("TSP.stop(): sub proc KILLED, as expected") - elif retCode == (- self.STOP_SIGNAL): - Logging.info("TSP.stop(), sub process STOPPED, as expected") - elif retCode != 0: # != (- signal.SIGINT): - Logging.error("TSP.stop(): Failed to stop sub proc properly w/ SIG {}, retCode={}".format( - self.STOP_SIGNAL, retCode)) - else: - Logging.info("TSP.stop(): sub proc successfully terminated with SIG {}".format(self.STOP_SIGNAL)) - return - retCode + # sub process should end, then IPC queue should end, causing IO thread to end + + @classmethod + def _stopForSure(cls, proc: subprocess.Popen, sig: int): + ''' + Stop a process and all sub processes with a singal, and SIGKILL if necessary + ''' + def doKillTdService(proc: subprocess.Popen, sig: int): + Logging.info("Killing sub-sub process {} with signal {}".format(proc.pid, sig)) + proc.send_signal(sig) + try: + retCode = proc.wait(20) + if (- retCode) == signal.SIGSEGV: # Crashed + Logging.warning("Process {} CRASHED, please check CORE file!".format(proc.pid)) + elif (- retCode) == sig : + Logging.info("TD service terminated with expected return code {}".format(sig)) + else: + Logging.warning("TD service terminated, EXPECTING ret code {}, got {}".format(sig, -retCode)) + return True # terminated successfully + except subprocess.TimeoutExpired as err: + Logging.warning("Failed to kill sub-sub process {} with signal {}".format(proc.pid, sig)) + return False # failed to terminate + + + def doKillChild(child: psutil.Process, sig: int): + Logging.info("Killing sub-sub process {} with signal {}".format(child.pid, sig)) + child.send_signal(sig) + try: + retCode = child.wait(20) + if (- retCode) == signal.SIGSEGV: # Crashed + Logging.warning("Process {} CRASHED, please check CORE file!".format(child.pid)) + elif (- retCode) == sig : + Logging.info("Sub-sub process terminated with expected return code {}".format(sig)) + else: + Logging.warning("Process terminated, EXPECTING ret code {}, got {}".format(sig, -retCode)) + return True # terminated successfully + except psutil.TimeoutExpired as err: + Logging.warning("Failed to kill sub-sub process {} with signal {}".format(child.pid, sig)) + return False # did not terminate + + def doKill(proc: subprocess.Popen, sig: int): + pid = proc.pid + try: + topSubProc = psutil.Process(pid) + for child in topSubProc.children(recursive=True): # or parent.children() for recursive=False + Logging.warning("Unexpected child to be killed") + doKillChild(child, sig) + except psutil.NoSuchProcess as err: + Logging.info("Process not found, can't kill, pid = {}".format(pid)) + + return doKillTdService(proc, sig) + # TODO: re-examine if we need to kill the top process, which is always the SHELL for now + # try: + # proc.wait(1) # SHELL process here, may throw subprocess.TimeoutExpired exception + # # expRetCode = self.SIG_KILL_RETCODE if sig==signal.SIGKILL else (-sig) + # # if retCode == expRetCode: + # # Logging.info("Process terminated with expected return code {}".format(retCode)) + # # else: + # # Logging.warning("Process terminated, EXPECTING ret code {}, got {}".format(expRetCode, retCode)) + # # return True # success + # except subprocess.TimeoutExpired as err: + # Logging.warning("Failed to kill process {} with signal {}".format(pid, sig)) + # return False # failed to kill + + def softKill(proc, sig): + return doKill(proc, sig) + + def hardKill(proc): + return doKill(proc, signal.SIGKILL) + + + + pid = proc.pid + Logging.info("Terminate running processes under {}, with SIG #{} and wait...".format(pid, sig)) + if softKill(proc, sig): + return# success + if sig != signal.SIGKILL: # really was soft above + if hardKill(proc): + return + raise CrashGenError("Failed to stop process, pid={}".format(pid)) class ServiceManager: PAUSE_BETWEEN_IPC_CHECK = 1.2 # seconds between checks on STDOUT of sub process @@ -560,7 +626,8 @@ class ServiceManagerThread: # self._tInstNum = tInstNum # instance serial number in cluster, ZERO based # self._tInst = tInst or TdeInstance() # Need an instance - self._thread = None # The actual thread, # type: threading.Thread + self._thread = None # The actual thread, # type: threading.Thread + self._thread2 = None # watching stderr self._status = Status(Status.STATUS_STOPPED) # The status of the underlying service, actually. def __repr__(self): @@ -568,11 +635,20 @@ class ServiceManagerThread: self.getStatus(), self._tdeSubProcess) def getStatus(self): + ''' + Get the status of the process being managed. (misnomer alert!) + ''' return self._status # Start the thread (with sub process), and wait for the sub service # to become fully operational - def start(self, cmdLine): + def start(self, cmdLine : str, logDir: str): + ''' + Request the manager thread to start a new sub process, and manage it. + + :param cmdLine: the command line to invoke + :param logDir: the logging directory, to hold stdout/stderr files + ''' if self._thread: raise RuntimeError("Unexpected _thread") if self._tdeSubProcess: @@ -582,20 +658,30 @@ class ServiceManagerThread: self._status.set(Status.STATUS_STARTING) self._tdeSubProcess = TdeSubProcess() - self._tdeSubProcess.start(cmdLine) + self._tdeSubProcess.start(cmdLine) # TODO: verify process is running self._ipcQueue = Queue() self._thread = threading.Thread( # First thread captures server OUTPUT target=self.svcOutputReader, - args=(self._tdeSubProcess.getStdOut(), self._ipcQueue)) + args=(self._tdeSubProcess.getStdOut(), self._ipcQueue, logDir)) self._thread.daemon = True # thread dies with the program self._thread.start() + time.sleep(0.01) + if not self._thread.is_alive(): # What happened? + Logging.info("Failed to started process to monitor STDOUT") + self.stop() + raise CrashGenError("Failed to start thread to monitor STDOUT") + Logging.info("Successfully started process to monitor STDOUT") self._thread2 = threading.Thread( # 2nd thread captures server ERRORs target=self.svcErrorReader, - args=(self._tdeSubProcess.getStdErr(), self._ipcQueue)) + args=(self._tdeSubProcess.getStdErr(), self._ipcQueue, logDir)) self._thread2.daemon = True # thread dies with the program self._thread2.start() + time.sleep(0.01) + if not self._thread2.is_alive(): + self.stop() + raise CrashGenError("Failed to start thread to monitor STDERR") # wait for service to start for i in range(0, 100): @@ -643,7 +729,7 @@ class ServiceManagerThread: Logging.info("Service already stopped") return if self.getStatus().isStopping(): - Logging.info("Service is already being stopped") + Logging.info("Service is already being stopped, pid: {}".format(self._tdeSubProcess.getPid())) return # Linux will send Control-C generated SIGINT to the TDengine process # already, ref: @@ -653,14 +739,14 @@ class ServiceManagerThread: self._status.set(Status.STATUS_STOPPING) # retCode = self._tdeSubProcess.stop() - try: - retCode = self._tdeSubProcess.stop() - # print("Attempted to stop sub process, got return code: {}".format(retCode)) - if retCode == signal.SIGSEGV : # SGV - Logging.error("[[--ERROR--]]: TDengine service SEGV fault (check core file!)") - except subprocess.TimeoutExpired as err: - Logging.info("Time out waiting for TDengine service process to exit") - else: + # try: + # retCode = self._tdeSubProcess.stop() + # # print("Attempted to stop sub process, got return code: {}".format(retCode)) + # if retCode == signal.SIGSEGV : # SGV + # Logging.error("[[--ERROR--]]: TDengine service SEGV fault (check core file!)") + # except subprocess.TimeoutExpired as err: + # Logging.info("Time out waiting for TDengine service process to exit") + if not self._tdeSubProcess.stop(): # everything withing if self._tdeSubProcess.isRunning(): # still running, should now never happen Logging.error("FAILED to stop sub process, it is still running... pid = {}".format( self._tdeSubProcess.getPid())) @@ -683,16 +769,18 @@ class ServiceManagerThread: raise RuntimeError( "SMT.Join(): Unexpected status: {}".format(self._status)) - if self._thread: - self._thread.join() - self._thread = None - self._status.set(Status.STATUS_STOPPED) - # STD ERR thread - self._thread2.join() - self._thread2 = None + if self._thread or self._thread2 : + if self._thread: + self._thread.join() + self._thread = None + if self._thread2: # STD ERR thread + self._thread2.join() + self._thread2 = None else: print("Joining empty thread, doing nothing") + self._status.set(Status.STATUS_STOPPED) + def _trimQueue(self, targetSize): if targetSize <= 0: return # do nothing @@ -739,11 +827,22 @@ class ServiceManagerThread: print(pBar, end="", flush=True) print('\b\b\b\b', end="", flush=True) - def svcOutputReader(self, out: IO, queue): + def svcOutputReader(self, out: IO, queue, logDir: str): + ''' + The infinite routine that processes the STDOUT stream for the sub process being managed. + + :param out: the IO stream object used to fetch the data from + :param queue: the queue where we dump the roughly parsed line-by-line data + :param logDir: where we should dump a verbatim output file + ''' + os.makedirs(logDir, exist_ok=True) + logFile = os.path.join(logDir,'stdout.log') + fOut = open(logFile, 'wb') # Important Reference: https://stackoverflow.com/questions/375427/non-blocking-read-on-a-subprocess-pipe-in-python # print("This is the svcOutput Reader...") # for line in out : for line in iter(out.readline, b''): + fOut.write(line) # print("Finished reading a line: {}".format(line)) # print("Adding item to queue...") try: @@ -772,10 +871,16 @@ class ServiceManagerThread: # queue.put(line) # meaning sub process must have died Logging.info("EOF for TDengine STDOUT: {}".format(self)) - out.close() + out.close() # Close the stream + fOut.close() # Close the output file - def svcErrorReader(self, err: IO, queue): + def svcErrorReader(self, err: IO, queue, logDir: str): + os.makedirs(logDir, exist_ok=True) + logFile = os.path.join(logDir,'stderr.log') + fErr = open(logFile, 'wb') for line in iter(err.readline, b''): + fErr.write(line) Logging.info("TDengine STDERR: {}".format(line)) Logging.info("EOF for TDengine STDERR: {}".format(self)) - err.close() \ No newline at end of file + err.close() + fErr.close() \ No newline at end of file From 4c152b6724d252efa95344715ef053d4f8068a20 Mon Sep 17 00:00:00 2001 From: dapan1121 <89396746@qq.com> Date: Tue, 13 Apr 2021 14:36:01 +0800 Subject: [PATCH 159/178] fix client crash issue --- src/query/src/qExtbuffer.c | 1 + 1 file changed, 1 insertion(+) diff --git a/src/query/src/qExtbuffer.c b/src/query/src/qExtbuffer.c index a73f385282..73b5b81e52 100644 --- a/src/query/src/qExtbuffer.c +++ b/src/query/src/qExtbuffer.c @@ -266,6 +266,7 @@ int32_t tExtMemBufferFlush(tExtMemBuffer *pMemBuffer) { size_t retVal = fwrite((char *)&(first->item), pMemBuffer->pageSize, 1, pMemBuffer->file); if (retVal <= 0) { // failed to write to buffer, may be not enough space ret = TAOS_SYSTEM_ERROR(errno); + pMemBuffer->pHead = first; return ret; } From 9e370964ff5704740142c196c52fe408da4559b1 Mon Sep 17 00:00:00 2001 From: dapan1121 <89396746@qq.com> Date: Tue, 13 Apr 2021 14:38:24 +0800 Subject: [PATCH 160/178] taosd mem leak issue --- src/query/src/qExecutor.c | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/src/query/src/qExecutor.c b/src/query/src/qExecutor.c index dc524d95a9..ad5c3a2db3 100644 --- a/src/query/src/qExecutor.c +++ b/src/query/src/qExecutor.c @@ -1878,6 +1878,17 @@ static void doFreeQueryHandle(SQueryRuntimeEnv* pRuntimeEnv) { assert(pMemRef->ref == 0 && pMemRef->snapshot.imem == NULL && pMemRef->snapshot.mem == NULL); } +static void destroyTsComp(SQueryRuntimeEnv *pRuntimeEnv, SQuery *pQuery) { + if (isTsCompQuery(pQuery)) { + SColumnInfoData* pColInfoData = taosArrayGet(pRuntimeEnv->outputBuf->pDataBlock, 0); + FILE *f = *(FILE **)pColInfoData->pData; // TODO refactor + if (f) { + fclose(f); + *(FILE **)pColInfoData->pData = NULL; + } + } +} + static void teardownQueryRuntimeEnv(SQueryRuntimeEnv *pRuntimeEnv) { SQuery *pQuery = pRuntimeEnv->pQuery; SQInfo* pQInfo = (SQInfo*) pRuntimeEnv->qinfo; @@ -1896,6 +1907,8 @@ static void teardownQueryRuntimeEnv(SQueryRuntimeEnv *pRuntimeEnv) { destroyResultBuf(pRuntimeEnv->pResultBuf); doFreeQueryHandle(pRuntimeEnv); + destroyTsComp(pRuntimeEnv, pQuery); + pRuntimeEnv->pTsBuf = tsBufDestroy(pRuntimeEnv->pTsBuf); tfree(pRuntimeEnv->keyBuf); @@ -6863,6 +6876,7 @@ int32_t doDumpQueryResult(SQInfo *pQInfo, char *data) { } fclose(f); + *(FILE **)pColInfoData->pData = NULL; } // all data returned, set query over From 640062a53d571e2e791e019b1f722b0e5a6019bd Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Tue, 13 Apr 2021 16:24:42 +0800 Subject: [PATCH 161/178] Hotfix/sangshuduo/td 3401 query statistic (#5795) * [TD-3401]: taosdemo query statistic. refactor func name. * [TD-3401]: taosdemo query statistic. refactor func name 2. * [TD-3401]: taosdemo support query statistic. implementation. * cleanup Co-authored-by: Shuduo Sang --- src/kit/taosdemo/insert-interlace.json | 1 - src/kit/taosdemo/insert.json | 1 - src/kit/taosdemo/query.json | 37 ++- src/kit/taosdemo/taosdemo.c | 265 ++++++++++-------- .../taosdemo/src/main/resources/insert.json | 2 - .../cluster/clusterEnvSetup/insert.json | 2 - tests/pytest/perfbenchmark/bug3433.py | 4 - tests/pytest/query/query1970YearsAf.py | 4 +- tests/pytest/tools/insert-interlace.json | 1 - .../insert-tblimit-tboffset-createdb.json | 2 - .../insert-tblimit-tboffset-insertrec.json | 2 - .../pytest/tools/insert-tblimit-tboffset.json | 2 - .../tools/insert-tblimit-tboffset0.json | 2 - .../tools/insert-tblimit1-tboffset.json | 2 - tests/pytest/tools/taosdemo-sampledata.json | 2 - tests/pytest/tools/taosdemoPerformance.py | 2 - 16 files changed, 183 insertions(+), 148 deletions(-) diff --git a/src/kit/taosdemo/insert-interlace.json b/src/kit/taosdemo/insert-interlace.json index 344db4fd00..cf3e1de2f4 100644 --- a/src/kit/taosdemo/insert-interlace.json +++ b/src/kit/taosdemo/insert-interlace.json @@ -40,7 +40,6 @@ "data_source": "rand", "insert_mode": "taosc", "insert_rows": 1000, - "multi_thread_write_one_tbl": "no", "interlace_rows": 20, "max_sql_len": 1024000, "disorder_ratio": 0, diff --git a/src/kit/taosdemo/insert.json b/src/kit/taosdemo/insert.json index f0e3ab1d50..5f152a2d0c 100644 --- a/src/kit/taosdemo/insert.json +++ b/src/kit/taosdemo/insert.json @@ -40,7 +40,6 @@ "data_source": "rand", "insert_mode": "taosc", "insert_rows": 100000, - "multi_thread_write_one_tbl": "no", "interlace_rows": 0, "max_sql_len": 1024000, "disorder_ratio": 0, diff --git a/src/kit/taosdemo/query.json b/src/kit/taosdemo/query.json index 33ac120bda..d84f997c32 100644 --- a/src/kit/taosdemo/query.json +++ b/src/kit/taosdemo/query.json @@ -1,5 +1,5 @@ { - "filetype":"query", + "filetype": "query", "cfgdir": "/etc/taos", "host": "127.0.0.1", "port": 6030, @@ -7,13 +7,30 @@ "password": "taosdata", "confirm_parameter_prompt": "yes", "databases": "dbx", - "specified_table_query": - {"query_interval":1, "concurrent":4, - "sqls": [{"sql": "select last_row(*) from stb where color='red'", "result": "./query_res0.txt"}, - {"sql": "select count(*) from stb_01", "result": "./query_res1.txt"}] - }, - "super_table_query": - {"stblname": "stb", "query_interval":1, "threads":4, - "sqls": [{"sql": "select last_row(*) from xxxx", "result": "./query_res2.txt"}] - } + "query_times": 1, + "specified_table_query": { + "query_interval": 1, + "concurrent": 4, + "sqls": [ + { + "sql": "select last_row(*) from stb where color='red'", + "result": "./query_res0.txt" + }, + { + "sql": "select count(*) from stb_01", + "result": "./query_res1.txt" + } + ] + }, + "super_table_query": { + "stblname": "stb", + "query_interval": 1, + "threads": 4, + "sqls": [ + { + "sql": "select last_row(*) from xxxx", + "result": "./query_res2.txt" + } + ] + } } diff --git a/src/kit/taosdemo/taosdemo.c b/src/kit/taosdemo/taosdemo.c index 815a842d60..dd2891fd32 100644 --- a/src/kit/taosdemo/taosdemo.c +++ b/src/kit/taosdemo/taosdemo.c @@ -366,6 +366,7 @@ typedef struct SpecifiedQueryInfo_S { char sql[MAX_QUERY_SQL_COUNT][MAX_QUERY_SQL_LENGTH+1]; char result[MAX_QUERY_SQL_COUNT][MAX_FILE_NAME_LEN+1]; TAOS_SUB* tsub[MAX_QUERY_SQL_COUNT]; + int totalQueried; } SpecifiedQueryInfo; typedef struct SuperQueryInfo_S { @@ -385,6 +386,7 @@ typedef struct SuperQueryInfo_S { TAOS_SUB* tsub[MAX_QUERY_SQL_COUNT]; char* childTblName; + int totalQueried; } SuperQueryInfo; typedef struct SQueryMetaInfo_S { @@ -398,6 +400,7 @@ typedef struct SQueryMetaInfo_S { SpecifiedQueryInfo specifiedQueryInfo; SuperQueryInfo superQueryInfo; + int totalQueried; } SQueryMetaInfo; typedef struct SThreadInfo_S { @@ -2602,8 +2605,8 @@ static int createDatabasesAndStables() { static void* createTable(void *sarg) { - threadInfo *winfo = (threadInfo *)sarg; - SSuperTable* superTblInfo = winfo->superTblInfo; + threadInfo *pThreadInfo = (threadInfo *)sarg; + SSuperTable* superTblInfo = pThreadInfo->superTblInfo; int64_t lastPrintTime = taosGetTimestampMs(); @@ -2621,15 +2624,15 @@ static void* createTable(void *sarg) verbosePrint("%s() LN%d: Creating table from %d to %d\n", __func__, __LINE__, - winfo->start_table_from, winfo->end_table_to); + pThreadInfo->start_table_from, pThreadInfo->end_table_to); - for (int i = winfo->start_table_from; i <= winfo->end_table_to; i++) { + for (int i = pThreadInfo->start_table_from; i <= pThreadInfo->end_table_to; i++) { if (0 == g_Dbs.use_metric) { snprintf(buffer, buff_len, "create table if not exists %s.%s%d %s;", - winfo->db_name, + pThreadInfo->db_name, g_args.tb_prefix, i, - winfo->cols); + pThreadInfo->cols); } else { if (superTblInfo == NULL) { errorPrint("%s() LN%d, use metric, but super table info is NULL\n", @@ -2658,8 +2661,8 @@ static void* createTable(void *sarg) len += snprintf(buffer + len, buff_len - len, "if not exists %s.%s%d using %s.%s tags %s ", - winfo->db_name, superTblInfo->childTblPrefix, - i, winfo->db_name, + pThreadInfo->db_name, superTblInfo->childTblPrefix, + i, pThreadInfo->db_name, superTblInfo->sTblName, tagsValBuf); free(tagsValBuf); batchNum++; @@ -2673,7 +2676,7 @@ static void* createTable(void *sarg) len = 0; verbosePrint("%s() LN%d %s\n", __func__, __LINE__, buffer); - if (0 != queryDbExec(winfo->taos, buffer, NO_INSERT_TYPE, false)){ + if (0 != queryDbExec(pThreadInfo->taos, buffer, NO_INSERT_TYPE, false)){ errorPrint( "queryDbExec() failed. buffer:\n%s\n", buffer); free(buffer); return NULL; @@ -2682,14 +2685,14 @@ static void* createTable(void *sarg) int64_t currentPrintTime = taosGetTimestampMs(); if (currentPrintTime - lastPrintTime > 30*1000) { printf("thread[%d] already create %d - %d tables\n", - winfo->threadID, winfo->start_table_from, i); + pThreadInfo->threadID, pThreadInfo->start_table_from, i); lastPrintTime = currentPrintTime; } } if (0 != len) { verbosePrint("%s() %d buffer: %s\n", __func__, __LINE__, buffer); - if (0 != queryDbExec(winfo->taos, buffer, NO_INSERT_TYPE, false)) { + if (0 != queryDbExec(pThreadInfo->taos, buffer, NO_INSERT_TYPE, false)) { errorPrint( "queryDbExec() failed. buffer:\n%s\n", buffer); } } @@ -3694,7 +3697,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { __func__, __LINE__); goto PARSE_OVER; } - +/* cJSON *multiThreadWriteOneTbl = cJSON_GetObjectItem(stbInfo, "multi_thread_write_one_tbl"); // no , yes if (multiThreadWriteOneTbl @@ -3711,7 +3714,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { printf("ERROR: failed to read json, multiThreadWriteOneTbl not found\n"); goto PARSE_OVER; } - +*/ cJSON* interlaceRows = cJSON_GetObjectItem(stbInfo, "interlace_rows"); if (interlaceRows && interlaceRows->type == cJSON_Number) { g_Dbs.db[i].superTbls[j].interlaceRows = interlaceRows->valueint; @@ -5159,45 +5162,45 @@ free_and_statistics_2: static void* syncWrite(void *sarg) { - threadInfo *winfo = (threadInfo *)sarg; - SSuperTable* superTblInfo = winfo->superTblInfo; + threadInfo *pThreadInfo = (threadInfo *)sarg; + SSuperTable* superTblInfo = pThreadInfo->superTblInfo; int interlaceRows = superTblInfo?superTblInfo->interlaceRows:g_args.interlace_rows; if (interlaceRows > 0) { // interlace mode - return syncWriteInterlace(winfo); + return syncWriteInterlace(pThreadInfo); } else { // progressive mode - return syncWriteProgressive(winfo); + return syncWriteProgressive(pThreadInfo); } } static void callBack(void *param, TAOS_RES *res, int code) { - threadInfo* winfo = (threadInfo*)param; - SSuperTable* superTblInfo = winfo->superTblInfo; + threadInfo* pThreadInfo = (threadInfo*)param; + SSuperTable* superTblInfo = pThreadInfo->superTblInfo; int insert_interval = superTblInfo?superTblInfo->insertInterval:g_args.insert_interval; if (insert_interval) { - winfo->et = taosGetTimestampUs(); - if (((winfo->et - winfo->st)/1000) < insert_interval) { - taosMsleep(insert_interval - (winfo->et - winfo->st)/1000); // ms + pThreadInfo->et = taosGetTimestampUs(); + if (((pThreadInfo->et - pThreadInfo->st)/1000) < insert_interval) { + taosMsleep(insert_interval - (pThreadInfo->et - pThreadInfo->st)/1000); // ms } } - char *buffer = calloc(1, winfo->superTblInfo->maxSqlLen); + char *buffer = calloc(1, pThreadInfo->superTblInfo->maxSqlLen); char data[MAX_DATA_SIZE]; char *pstr = buffer; - pstr += sprintf(pstr, "insert into %s.%s%d values", winfo->db_name, winfo->tb_prefix, - winfo->start_table_from); -// if (winfo->counter >= winfo->superTblInfo->insertRows) { - if (winfo->counter >= g_args.num_of_RPR) { - winfo->start_table_from++; - winfo->counter = 0; + pstr += sprintf(pstr, "insert into %s.%s%d values", pThreadInfo->db_name, pThreadInfo->tb_prefix, + pThreadInfo->start_table_from); +// if (pThreadInfo->counter >= pThreadInfo->superTblInfo->insertRows) { + if (pThreadInfo->counter >= g_args.num_of_RPR) { + pThreadInfo->start_table_from++; + pThreadInfo->counter = 0; } - if (winfo->start_table_from > winfo->end_table_to) { - tsem_post(&winfo->lock_sem); + if (pThreadInfo->start_table_from > pThreadInfo->end_table_to) { + tsem_post(&pThreadInfo->lock_sem); free(buffer); taos_free_result(res); return; @@ -5205,46 +5208,46 @@ static void callBack(void *param, TAOS_RES *res, int code) { for (int i = 0; i < g_args.num_of_RPR; i++) { int rand_num = taosRandom() % 100; - if (0 != winfo->superTblInfo->disorderRatio - && rand_num < winfo->superTblInfo->disorderRatio) { - int64_t d = winfo->lastTs - (taosRandom() % winfo->superTblInfo->disorderRange + 1); - generateRowData(data, d, winfo->superTblInfo); + if (0 != pThreadInfo->superTblInfo->disorderRatio + && rand_num < pThreadInfo->superTblInfo->disorderRatio) { + int64_t d = pThreadInfo->lastTs - (taosRandom() % pThreadInfo->superTblInfo->disorderRange + 1); + generateRowData(data, d, pThreadInfo->superTblInfo); } else { - generateRowData(data, winfo->lastTs += 1000, winfo->superTblInfo); + generateRowData(data, pThreadInfo->lastTs += 1000, pThreadInfo->superTblInfo); } pstr += sprintf(pstr, "%s", data); - winfo->counter++; + pThreadInfo->counter++; - if (winfo->counter >= winfo->superTblInfo->insertRows) { + if (pThreadInfo->counter >= pThreadInfo->superTblInfo->insertRows) { break; } } if (insert_interval) { - winfo->st = taosGetTimestampUs(); + pThreadInfo->st = taosGetTimestampUs(); } - taos_query_a(winfo->taos, buffer, callBack, winfo); + taos_query_a(pThreadInfo->taos, buffer, callBack, pThreadInfo); free(buffer); taos_free_result(res); } static void *asyncWrite(void *sarg) { - threadInfo *winfo = (threadInfo *)sarg; - SSuperTable* superTblInfo = winfo->superTblInfo; + threadInfo *pThreadInfo = (threadInfo *)sarg; + SSuperTable* superTblInfo = pThreadInfo->superTblInfo; - winfo->st = 0; - winfo->et = 0; - winfo->lastTs = winfo->start_time; + pThreadInfo->st = 0; + pThreadInfo->et = 0; + pThreadInfo->lastTs = pThreadInfo->start_time; int insert_interval = superTblInfo?superTblInfo->insertInterval:g_args.insert_interval; if (insert_interval) { - winfo->st = taosGetTimestampUs(); + pThreadInfo->st = taosGetTimestampUs(); } - taos_query_a(winfo->taos, "show databases", callBack, winfo); + taos_query_a(pThreadInfo->taos, "show databases", callBack, pThreadInfo); - tsem_wait(&(winfo->lock_sem)); + tsem_wait(&(pThreadInfo->lock_sem)); return NULL; } @@ -5774,10 +5777,10 @@ static int insertTestProcess() { return 0; } -static void *specifiedQueryProcess(void *sarg) { - threadInfo *winfo = (threadInfo *)sarg; +static void *specifiedTableQuery(void *sarg) { + threadInfo *pThreadInfo = (threadInfo *)sarg; - if (winfo->taos == NULL) { + if (pThreadInfo->taos == NULL) { TAOS * taos = NULL; taos = taos_connect(g_queryInfo.host, g_queryInfo.user, @@ -5786,17 +5789,17 @@ static void *specifiedQueryProcess(void *sarg) { g_queryInfo.port); if (taos == NULL) { errorPrint("[%d] Failed to connect to TDengine, reason:%s\n", - winfo->threadID, taos_errstr(NULL)); + pThreadInfo->threadID, taos_errstr(NULL)); return NULL; } else { - winfo->taos = taos; + pThreadInfo->taos = taos; } } char sqlStr[MAX_DB_NAME_SIZE + 5]; sprintf(sqlStr, "use %s", g_queryInfo.dbName); - if (0 != queryDbExec(winfo->taos, sqlStr, NO_INSERT_TYPE, false)) { - taos_close(winfo->taos); + if (0 != queryDbExec(pThreadInfo->taos, sqlStr, NO_INSERT_TYPE, false)) { + taos_close(pThreadInfo->taos); errorPrint( "use database %s failed!\n\n", g_queryInfo.dbName); return NULL; @@ -5807,11 +5810,15 @@ static void *specifiedQueryProcess(void *sarg) { int queryTimes = g_queryInfo.specifiedQueryInfo.queryTimes; + int totalQueried = 0; + int64_t lastPrintTime = taosGetTimestampMs(); + int64_t startTs = taosGetTimestampMs(); + while(queryTimes --) { if (g_queryInfo.specifiedQueryInfo.rate && (et - st) < (int64_t)g_queryInfo.specifiedQueryInfo.rate*1000) { taosMsleep(g_queryInfo.specifiedQueryInfo.rate*1000 - (et - st)); // ms - //printf("========sleep duration:%"PRId64 "========inserted rows:%d, table range:%d - %d\n", (1000 - (et - st)), i, winfo->start_table_from, winfo->end_table_to); + //printf("========sleep duration:%"PRId64 "========inserted rows:%d, table range:%d - %d\n", (1000 - (et - st)), i, pThreadInfo->start_table_from, pThreadInfo->end_table_to); } st = taosGetTimestampUs(); @@ -5819,13 +5826,13 @@ static void *specifiedQueryProcess(void *sarg) { if (0 == strncasecmp(g_queryInfo.queryMode, "taosc", 5)) { int64_t t1 = taosGetTimestampUs(); char tmpFile[MAX_FILE_NAME_LEN*2] = {0}; - if (g_queryInfo.specifiedQueryInfo.result[winfo->querySeq][0] != 0) { + if (g_queryInfo.specifiedQueryInfo.result[pThreadInfo->querySeq][0] != 0) { sprintf(tmpFile, "%s-%d", - g_queryInfo.specifiedQueryInfo.result[winfo->querySeq], - winfo->threadID); + g_queryInfo.specifiedQueryInfo.result[pThreadInfo->querySeq], + pThreadInfo->threadID); } - selectAndGetResult(winfo->taos, - g_queryInfo.specifiedQueryInfo.sql[winfo->querySeq], tmpFile); + selectAndGetResult(pThreadInfo->taos, + g_queryInfo.specifiedQueryInfo.sql[pThreadInfo->querySeq], tmpFile); int64_t t2 = taosGetTimestampUs(); printf("=[taosc] thread[%"PRId64"] complete one sql, Spent %f s\n", taosGetSelfPthreadId(), (t2 - t1)/1000000.0); @@ -5833,25 +5840,37 @@ static void *specifiedQueryProcess(void *sarg) { int64_t t1 = taosGetTimestampUs(); int retCode = postProceSql(g_queryInfo.host, g_queryInfo.port, - g_queryInfo.specifiedQueryInfo.sql[winfo->querySeq]); + g_queryInfo.specifiedQueryInfo.sql[pThreadInfo->querySeq]); + if (0 != retCode) { + printf("====restful return fail, threadID[%d]\n", pThreadInfo->threadID); + return NULL; + } int64_t t2 = taosGetTimestampUs(); printf("=[restful] thread[%"PRId64"] complete one sql, Spent %f s\n", taosGetSelfPthreadId(), (t2 - t1)/1000000.0); - if (0 != retCode) { - printf("====restful return fail, threadID[%d]\n", winfo->threadID); - return NULL; - } } + totalQueried ++; + g_queryInfo.specifiedQueryInfo.totalQueried ++; et = taosGetTimestampUs(); printf("==thread[%"PRId64"] complete all sqls to specify tables once queries duration:%.6fs\n\n", taosGetSelfPthreadId(), (double)(et - st)/1000.0); + + int64_t currentPrintTime = taosGetTimestampMs(); + int64_t endTs = taosGetTimestampMs(); + if (currentPrintTime - lastPrintTime > 30*1000) { + printf("thread[%d] has currently completed queries: %d, QPS: %10.2f\n", + pThreadInfo->threadID, + totalQueried, + totalQueried/((endTs-startTs)/1000.0)); + } + lastPrintTime = currentPrintTime; } return NULL; } -static void replaceSubTblName(char* inSql, char* outSql, int tblIndex) { +static void replaceChildTblName(char* inSql, char* outSql, int tblIndex) { char sourceString[32] = "xxxx"; char subTblName[MAX_TB_NAME_SIZE*3]; sprintf(subTblName, "%s.%s", @@ -5873,11 +5892,11 @@ static void replaceSubTblName(char* inSql, char* outSql, int tblIndex) { //printf("3: %s\n", outSql); } -static void *superQueryProcess(void *sarg) { +static void *superTableQuery(void *sarg) { char sqlstr[1024]; - threadInfo *winfo = (threadInfo *)sarg; + threadInfo *pThreadInfo = (threadInfo *)sarg; - if (winfo->taos == NULL) { + if (pThreadInfo->taos == NULL) { TAOS * taos = NULL; taos = taos_connect(g_queryInfo.host, g_queryInfo.user, @@ -5886,10 +5905,10 @@ static void *superQueryProcess(void *sarg) { g_queryInfo.port); if (taos == NULL) { errorPrint("[%d] Failed to connect to TDengine, reason:%s\n", - winfo->threadID, taos_errstr(NULL)); + pThreadInfo->threadID, taos_errstr(NULL)); return NULL; } else { - winfo->taos = taos; + pThreadInfo->taos = taos; } } @@ -5897,33 +5916,49 @@ static void *superQueryProcess(void *sarg) { int64_t et = (int64_t)g_queryInfo.superQueryInfo.rate*1000; int queryTimes = g_queryInfo.superQueryInfo.queryTimes; + int totalQueried = 0; + int64_t startTs = taosGetTimestampMs(); + int64_t lastPrintTime = taosGetTimestampMs(); while(queryTimes --) { if (g_queryInfo.superQueryInfo.rate && (et - st) < (int64_t)g_queryInfo.superQueryInfo.rate*1000) { taosMsleep(g_queryInfo.superQueryInfo.rate*1000 - (et - st)); // ms - //printf("========sleep duration:%"PRId64 "========inserted rows:%d, table range:%d - %d\n", (1000 - (et - st)), i, winfo->start_table_from, winfo->end_table_to); + //printf("========sleep duration:%"PRId64 "========inserted rows:%d, table range:%d - %d\n", (1000 - (et - st)), i, pThreadInfo->start_table_from, pThreadInfo->end_table_to); } st = taosGetTimestampUs(); - for (int i = winfo->start_table_from; i <= winfo->end_table_to; i++) { + for (int i = pThreadInfo->start_table_from; i <= pThreadInfo->end_table_to; i++) { for (int j = 0; j < g_queryInfo.superQueryInfo.sqlCount; j++) { memset(sqlstr,0,sizeof(sqlstr)); - replaceSubTblName(g_queryInfo.superQueryInfo.sql[j], sqlstr, i); + replaceChildTblName(g_queryInfo.superQueryInfo.sql[j], sqlstr, i); char tmpFile[MAX_FILE_NAME_LEN*2] = {0}; if (g_queryInfo.superQueryInfo.result[j][0] != 0) { sprintf(tmpFile, "%s-%d", g_queryInfo.superQueryInfo.result[j], - winfo->threadID); + pThreadInfo->threadID); } - selectAndGetResult(winfo->taos, sqlstr, tmpFile); + selectAndGetResult(pThreadInfo->taos, sqlstr, tmpFile); + + totalQueried++; + g_queryInfo.superQueryInfo.totalQueried ++; + + int64_t currentPrintTime = taosGetTimestampMs(); + int64_t endTs = taosGetTimestampMs(); + if (currentPrintTime - lastPrintTime > 30*1000) { + printf("thread[%d] has currently completed queries: %d, QPS: %10.2f\n", + pThreadInfo->threadID, + totalQueried, + totalQueried/((endTs-startTs)/1000.0)); + } + lastPrintTime = currentPrintTime; } } et = taosGetTimestampUs(); printf("####thread[%"PRId64"] complete all sqls to allocate all sub-tables[%d - %d] once queries duration:%.4fs\n\n", taosGetSelfPthreadId(), - winfo->start_table_from, - winfo->end_table_to, + pThreadInfo->start_table_from, + pThreadInfo->end_table_to, (double)(et - st)/1000000.0); } @@ -5969,6 +6004,8 @@ static int queryTestProcess() { int nConcurrent = g_queryInfo.specifiedQueryInfo.concurrent; int nSqlCount = g_queryInfo.specifiedQueryInfo.sqlCount; + int64_t startTs = taosGetTimestampMs(); + if ((nSqlCount > 0) && (nConcurrent > 0)) { pids = malloc(nConcurrent * nSqlCount * sizeof(pthread_t)); @@ -6002,7 +6039,7 @@ static int queryTestProcess() { t_info->taos = NULL;// TODO: workaround to use separate taos connection; - pthread_create(pids + i * nSqlCount + j, NULL, specifiedQueryProcess, + pthread_create(pids + i * nSqlCount + j, NULL, specifiedTableQuery, t_info); } } @@ -6051,7 +6088,7 @@ static int queryTestProcess() { t_info->end_table_to = i < b ? startFrom + a : startFrom + a - 1; startFrom = t_info->end_table_to + 1; t_info->taos = NULL; // TODO: workaround to use separate taos connection; - pthread_create(pidsOfSub + i, NULL, superQueryProcess, t_info); + pthread_create(pidsOfSub + i, NULL, superTableQuery, t_info); } g_queryInfo.superQueryInfo.threadCnt = threads; @@ -6078,6 +6115,14 @@ static int queryTestProcess() { tmfree((char*)infosOfSub); // taos_close(taos);// TODO: workaround to use separate taos connection; + int64_t endTs = taosGetTimestampMs(); + + int totalQueried = g_queryInfo.specifiedQueryInfo.totalQueried + + g_queryInfo.superQueryInfo.totalQueried; + + printf("==== completed total queries: %d, the QPS of all threads: %10.2f====\n", + totalQueried, + totalQueried/((endTs-startTs)/1000.0)); return 0; } @@ -6114,12 +6159,12 @@ static TAOS_SUB* subscribeImpl(TAOS *taos, char *sql, char* topic, char* resultF return tsub; } -static void *subSubscribeProcess(void *sarg) { - threadInfo *winfo = (threadInfo *)sarg; +static void *superSubscribe(void *sarg) { + threadInfo *pThreadInfo = (threadInfo *)sarg; char subSqlstr[1024]; TAOS_SUB* tsub[MAX_QUERY_SQL_COUNT] = {0}; - if (winfo->taos == NULL) { + if (pThreadInfo->taos == NULL) { TAOS * taos = NULL; taos = taos_connect(g_queryInfo.host, g_queryInfo.user, @@ -6128,17 +6173,17 @@ static void *subSubscribeProcess(void *sarg) { g_queryInfo.port); if (taos == NULL) { errorPrint("[%d] Failed to connect to TDengine, reason:%s\n", - winfo->threadID, taos_errstr(NULL)); + pThreadInfo->threadID, taos_errstr(NULL)); return NULL; } else { - winfo->taos = taos; + pThreadInfo->taos = taos; } } char sqlStr[MAX_TB_NAME_SIZE*2]; sprintf(sqlStr, "use %s", g_queryInfo.dbName); - if (0 != queryDbExec(winfo->taos, sqlStr, NO_INSERT_TYPE, false)) { - taos_close(winfo->taos); + if (0 != queryDbExec(pThreadInfo->taos, sqlStr, NO_INSERT_TYPE, false)) { + taos_close(pThreadInfo->taos); errorPrint( "use database %s failed!\n\n", g_queryInfo.dbName); return NULL; @@ -6149,7 +6194,7 @@ static void *subSubscribeProcess(void *sarg) { do { //if (g_queryInfo.specifiedQueryInfo.rate && (et - st) < g_queryInfo.specifiedQueryInfo.rate*1000) { // taosMsleep(g_queryInfo.specifiedQueryInfo.rate*1000 - (et - st)); // ms - // //printf("========sleep duration:%"PRId64 "========inserted rows:%d, table range:%d - %d\n", (1000 - (et - st)), i, winfo->start_table_from, winfo->end_table_to); + // //printf("========sleep duration:%"PRId64 "========inserted rows:%d, table range:%d - %d\n", (1000 - (et - st)), i, pThreadInfo->start_table_from, pThreadInfo->end_table_to); //} //st = taosGetTimestampMs(); @@ -6157,15 +6202,15 @@ static void *subSubscribeProcess(void *sarg) { for (int i = 0; i < g_queryInfo.superQueryInfo.sqlCount; i++) { sprintf(topic, "taosdemo-subscribe-%d", i); memset(subSqlstr,0,sizeof(subSqlstr)); - replaceSubTblName(g_queryInfo.superQueryInfo.sql[i], subSqlstr, i); + replaceChildTblName(g_queryInfo.superQueryInfo.sql[i], subSqlstr, i); char tmpFile[MAX_FILE_NAME_LEN*2] = {0}; if (g_queryInfo.superQueryInfo.result[i][0] != 0) { sprintf(tmpFile, "%s-%d", - g_queryInfo.superQueryInfo.result[i], winfo->threadID); + g_queryInfo.superQueryInfo.result[i], pThreadInfo->threadID); } - tsub[i] = subscribeImpl(winfo->taos, subSqlstr, topic, tmpFile); + tsub[i] = subscribeImpl(pThreadInfo->taos, subSqlstr, topic, tmpFile); if (NULL == tsub[i]) { - taos_close(winfo->taos); + taos_close(pThreadInfo->taos); return NULL; } } @@ -6187,7 +6232,7 @@ static void *subSubscribeProcess(void *sarg) { if (g_queryInfo.superQueryInfo.result[i][0] != 0) { sprintf(tmpFile, "%s-%d", g_queryInfo.superQueryInfo.result[i], - winfo->threadID); + pThreadInfo->threadID); } getResult(res, tmpFile); } @@ -6199,15 +6244,15 @@ static void *subSubscribeProcess(void *sarg) { taos_unsubscribe(tsub[i], g_queryInfo.superQueryInfo.subscribeKeepProgress); } - taos_close(winfo->taos); + taos_close(pThreadInfo->taos); return NULL; } -static void *superSubscribeProcess(void *sarg) { - threadInfo *winfo = (threadInfo *)sarg; +static void *specifiedSubscribe(void *sarg) { + threadInfo *pThreadInfo = (threadInfo *)sarg; TAOS_SUB* tsub[MAX_QUERY_SQL_COUNT] = {0}; - if (winfo->taos == NULL) { + if (pThreadInfo->taos == NULL) { TAOS * taos = NULL; taos = taos_connect(g_queryInfo.host, g_queryInfo.user, @@ -6216,18 +6261,18 @@ static void *superSubscribeProcess(void *sarg) { g_queryInfo.port); if (taos == NULL) { errorPrint("[%d] Failed to connect to TDengine, reason:%s\n", - winfo->threadID, taos_errstr(NULL)); + pThreadInfo->threadID, taos_errstr(NULL)); return NULL; } else { - winfo->taos = taos; + pThreadInfo->taos = taos; } } char sqlStr[MAX_TB_NAME_SIZE*2]; sprintf(sqlStr, "use %s", g_queryInfo.dbName); debugPrint("%s() %d sqlStr: %s\n", __func__, __LINE__, sqlStr); - if (0 != queryDbExec(winfo->taos, sqlStr, NO_INSERT_TYPE, false)) { - taos_close(winfo->taos); + if (0 != queryDbExec(pThreadInfo->taos, sqlStr, NO_INSERT_TYPE, false)) { + taos_close(pThreadInfo->taos); return NULL; } @@ -6236,7 +6281,7 @@ static void *superSubscribeProcess(void *sarg) { do { //if (g_queryInfo.specifiedQueryInfo.rate && (et - st) < g_queryInfo.specifiedQueryInfo.rate*1000) { // taosMsleep(g_queryInfo.specifiedQueryInfo.rate*1000 - (et - st)); // ms - // //printf("========sleep duration:%"PRId64 "========inserted rows:%d, table range:%d - %d\n", (1000 - (et - st)), i, winfo->start_table_from, winfo->end_table_to); + // //printf("========sleep duration:%"PRId64 "========inserted rows:%d, table range:%d - %d\n", (1000 - (et - st)), i, pThreadInfo->start_table_from, pThreadInfo->end_table_to); //} //st = taosGetTimestampMs(); @@ -6246,12 +6291,12 @@ static void *superSubscribeProcess(void *sarg) { char tmpFile[MAX_FILE_NAME_LEN*2] = {0}; if (g_queryInfo.superQueryInfo.result[i][0] != 0) { sprintf(tmpFile, "%s-%d", - g_queryInfo.specifiedQueryInfo.result[i], winfo->threadID); + g_queryInfo.specifiedQueryInfo.result[i], pThreadInfo->threadID); } - tsub[i] = subscribeImpl(winfo->taos, + tsub[i] = subscribeImpl(pThreadInfo->taos, g_queryInfo.specifiedQueryInfo.sql[i], topic, tmpFile); if (NULL == g_queryInfo.specifiedQueryInfo.tsub[i]) { - taos_close(winfo->taos); + taos_close(pThreadInfo->taos); return NULL; } } @@ -6272,7 +6317,7 @@ static void *superSubscribeProcess(void *sarg) { char tmpFile[MAX_FILE_NAME_LEN*2] = {0}; if (g_queryInfo.specifiedQueryInfo.result[i][0] != 0) { sprintf(tmpFile, "%s-%d", - g_queryInfo.specifiedQueryInfo.result[i], winfo->threadID); + g_queryInfo.specifiedQueryInfo.result[i], pThreadInfo->threadID); } getResult(res, tmpFile); } @@ -6285,7 +6330,7 @@ static void *superSubscribeProcess(void *sarg) { g_queryInfo.specifiedQueryInfo.subscribeKeepProgress); } - taos_close(winfo->taos); + taos_close(pThreadInfo->taos); return NULL; } @@ -6343,7 +6388,7 @@ static int subscribeTestProcess() { threadInfo *t_info = infos + i; t_info->threadID = i; t_info->taos = NULL; // TODO: workaround to use separate taos connection; - pthread_create(pids + i, NULL, superSubscribeProcess, t_info); + pthread_create(pids + i, NULL, specifiedSubscribe, t_info); } //==== create sub threads for query from sub table @@ -6386,7 +6431,7 @@ static int subscribeTestProcess() { t_info->end_table_to = i < b ? startFrom + a : startFrom + a - 1; startFrom = t_info->end_table_to + 1; t_info->taos = NULL; // TODO: workaround to use separate taos connection; - pthread_create(pidsOfSub + i, NULL, subSubscribeProcess, t_info); + pthread_create(pidsOfSub + i, NULL, superSubscribe, t_info); } g_queryInfo.superQueryInfo.threadCnt = threads; diff --git a/tests/examples/JDBC/taosdemo/src/main/resources/insert.json b/tests/examples/JDBC/taosdemo/src/main/resources/insert.json index 35c7773175..7578083d33 100644 --- a/tests/examples/JDBC/taosdemo/src/main/resources/insert.json +++ b/tests/examples/JDBC/taosdemo/src/main/resources/insert.json @@ -36,8 +36,6 @@ "insert_mode": "taosc", "insert_rate": 0, "insert_rows": 100, - "multi_thread_write_one_tbl": "no", - "number_of_tbl_in_one_sql": 0, "interlace_rows": 3, "max_sql_len": 1024, "disorder_ratio": 0, diff --git a/tests/pytest/cluster/clusterEnvSetup/insert.json b/tests/pytest/cluster/clusterEnvSetup/insert.json index 4548ef74e6..2f3cf0f0d9 100644 --- a/tests/pytest/cluster/clusterEnvSetup/insert.json +++ b/tests/pytest/cluster/clusterEnvSetup/insert.json @@ -37,8 +37,6 @@ "insert_mode": "taosc", "insert_rate": 0, "insert_rows": 100000, - "multi_thread_write_one_tbl": "no", - "number_of_tbl_in_one_sql": 1, "interlace_rows": 100, "max_sql_len": 1024000, "disorder_ratio": 0, diff --git a/tests/pytest/perfbenchmark/bug3433.py b/tests/pytest/perfbenchmark/bug3433.py index 80e9cce0dc..e4480df6b6 100644 --- a/tests/pytest/perfbenchmark/bug3433.py +++ b/tests/pytest/perfbenchmark/bug3433.py @@ -91,8 +91,6 @@ class TDTestCase: "data_source": "rand", "insert_mode": "taosc", "insert_rows": 1, - "multi_thread_write_one_tbl": "no", - "number_of_tbl_in_one_sql": 0, "rows_per_tbl": 1, "max_sql_len": 65480, "disorder_ratio": 0, @@ -135,8 +133,6 @@ class TDTestCase: "data_source": "rand", "insert_mode": "taosc", "insert_rows": 1, - "multi_thread_write_one_tbl": "no", - "number_of_tbl_in_one_sql": 0, "rows_per_tbl": 1, "max_sql_len": 65480, "disorder_ratio": 0, diff --git a/tests/pytest/query/query1970YearsAf.py b/tests/pytest/query/query1970YearsAf.py index e66a79f5e6..93404afd59 100644 --- a/tests/pytest/query/query1970YearsAf.py +++ b/tests/pytest/query/query1970YearsAf.py @@ -82,8 +82,6 @@ class TDTestCase: "data_source": "rand", "insert_mode": "taosc", "insert_rows": 5000, - "multi_thread_write_one_tbl": "no", - "number_of_tbl_in_one_sql": 0, "rows_per_tbl": 1, "max_sql_len": 65480, "disorder_ratio": 0, @@ -255,4 +253,4 @@ class TDTestCase: tdCases.addLinux(__file__, TDTestCase()) -tdCases.addWindows(__file__, TDTestCase()) \ No newline at end of file +tdCases.addWindows(__file__, TDTestCase()) diff --git a/tests/pytest/tools/insert-interlace.json b/tests/pytest/tools/insert-interlace.json index d4767ad064..a5c545d159 100644 --- a/tests/pytest/tools/insert-interlace.json +++ b/tests/pytest/tools/insert-interlace.json @@ -41,7 +41,6 @@ "data_source": "rand", "insert_mode": "taosc", "insert_rows": 250, - "multi_thread_write_one_tbl": "no", "interlace_rows": 80, "max_sql_len": 1024000, "disorder_ratio": 0, diff --git a/tests/pytest/tools/insert-tblimit-tboffset-createdb.json b/tests/pytest/tools/insert-tblimit-tboffset-createdb.json index 8c3b39fb0b..9220bc1d17 100644 --- a/tests/pytest/tools/insert-tblimit-tboffset-createdb.json +++ b/tests/pytest/tools/insert-tblimit-tboffset-createdb.json @@ -40,8 +40,6 @@ "data_source": "rand", "insert_mode": "taosc", "insert_rows": 0, - "multi_thread_write_one_tbl": "no", - "number_of_tbl_in_one_sql": 0, "max_sql_len": 1024000, "disorder_ratio": 0, "disorder_range": 1000, diff --git a/tests/pytest/tools/insert-tblimit-tboffset-insertrec.json b/tests/pytest/tools/insert-tblimit-tboffset-insertrec.json index a9efa7c31c..164d4fe8be 100644 --- a/tests/pytest/tools/insert-tblimit-tboffset-insertrec.json +++ b/tests/pytest/tools/insert-tblimit-tboffset-insertrec.json @@ -42,8 +42,6 @@ "insert_rows": 1000, "childtable_limit": 33, "childtable_offset": 33, - "multi_thread_write_one_tbl": "no", - "number_of_tbl_in_one_sql": 0, "max_sql_len": 1024000, "disorder_ratio": 0, "disorder_range": 1000, diff --git a/tests/pytest/tools/insert-tblimit-tboffset.json b/tests/pytest/tools/insert-tblimit-tboffset.json index f3d3e864ba..0b8e0bd6c5 100644 --- a/tests/pytest/tools/insert-tblimit-tboffset.json +++ b/tests/pytest/tools/insert-tblimit-tboffset.json @@ -42,8 +42,6 @@ "insert_rows": 1000, "childtable_limit": 33, "childtable_offset": 33, - "multi_thread_write_one_tbl": "no", - "number_of_tbl_in_one_sql": 0, "max_sql_len": 1024000, "disorder_ratio": 0, "disorder_range": 1000, diff --git a/tests/pytest/tools/insert-tblimit-tboffset0.json b/tests/pytest/tools/insert-tblimit-tboffset0.json index 6bf3783cb2..55d9e19055 100644 --- a/tests/pytest/tools/insert-tblimit-tboffset0.json +++ b/tests/pytest/tools/insert-tblimit-tboffset0.json @@ -42,8 +42,6 @@ "insert_rows": 1000, "childtable_limit": 20, "childtable_offset": 0, - "multi_thread_write_one_tbl": "no", - "number_of_tbl_in_one_sql": 0, "max_sql_len": 1024000, "disorder_ratio": 0, "disorder_range": 1000, diff --git a/tests/pytest/tools/insert-tblimit1-tboffset.json b/tests/pytest/tools/insert-tblimit1-tboffset.json index 292902093d..3a88665661 100644 --- a/tests/pytest/tools/insert-tblimit1-tboffset.json +++ b/tests/pytest/tools/insert-tblimit1-tboffset.json @@ -42,8 +42,6 @@ "insert_rows": 1000, "childtable_limit": 1, "childtable_offset": 50, - "multi_thread_write_one_tbl": "no", - "number_of_tbl_in_one_sql": 0, "max_sql_len": 1024000, "disorder_ratio": 0, "disorder_range": 1000, diff --git a/tests/pytest/tools/taosdemo-sampledata.json b/tests/pytest/tools/taosdemo-sampledata.json index d543b7e086..ce1c68b393 100644 --- a/tests/pytest/tools/taosdemo-sampledata.json +++ b/tests/pytest/tools/taosdemo-sampledata.json @@ -22,8 +22,6 @@ "insert_mode": "taosc", "insert_rate": 0, "insert_rows": 20, - "multi_thread_write_one_tbl": "no", - "number_of_tbl_in_one_sql": 0, "max_sql_len": 1048000, "timestamp_step": 1000, "start_timestamp": "2020-1-1 00:00:00", diff --git a/tests/pytest/tools/taosdemoPerformance.py b/tests/pytest/tools/taosdemoPerformance.py index 8ce99d0969..b50180e2b3 100644 --- a/tests/pytest/tools/taosdemoPerformance.py +++ b/tests/pytest/tools/taosdemoPerformance.py @@ -49,8 +49,6 @@ class taosdemoPerformace: "batch_create_tbl_num": 10, "insert_mode": "taosc", "insert_rows": 100000, - "multi_thread_write_one_tbl": "no", - "number_of_tbl_in_one_sql": 0, "interlace_rows": 100, "max_sql_len": 1024000, "disorder_ratio": 0, From db17a59b71cda7a2f46b34edc6efc0684858969f Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Tue, 13 Apr 2021 20:24:59 +0800 Subject: [PATCH 162/178] [TD-3079]: use multi-level storage with vnode's wal files --- src/vnode/src/vnodeMain.c | 39 ++++++++++++++++++++++++++++++++++++--- 1 file changed, 36 insertions(+), 3 deletions(-) diff --git a/src/vnode/src/vnodeMain.c b/src/vnode/src/vnodeMain.c index ee8ed9e0fa..0921c5ce48 100644 --- a/src/vnode/src/vnodeMain.c +++ b/src/vnode/src/vnodeMain.c @@ -232,9 +232,28 @@ int32_t vnodeAlter(void *vparam, SCreateVnodeMsg *pVnodeCfg) { return code; } +static void vnodeFindWalRootDir(int32_t vgId, char *walRootDir) { + char vnodeDir[TSDB_FILENAME_LEN] = "\0"; + snprintf(vnodeDir, TSDB_FILENAME_LEN, "/vnode/vnode%d/wal", vgId); + + TDIR *tdir = tfsOpendir(vnodeDir); + if (!tdir) return; + + const TFILE *tfile = tfsReaddir(tdir); + if (!tfile) { + tfsClosedir(tdir); + return; + } + + sprintf(walRootDir, "%s/vnode/vnode%d", TFS_DISK_PATH(tfile->level, tfile->id), vgId); + + tfsClosedir(tdir); +} + int32_t vnodeOpen(int32_t vgId) { char temp[TSDB_FILENAME_LEN * 3]; char rootDir[TSDB_FILENAME_LEN * 2]; + char walRootDir[TSDB_FILENAME_LEN * 2] = {0}; snprintf(rootDir, TSDB_FILENAME_LEN * 2, "%s/vnode%d", tsVnodeDir, vgId); SVnodeObj *pVnode = calloc(sizeof(SVnodeObj), 1); @@ -321,7 +340,21 @@ int32_t vnodeOpen(int32_t vgId) { } } - sprintf(temp, "%s/wal", rootDir); + // walRootDir for wal & syncInfo.path (not empty dir of /vnode/vnode{pVnode->vgId}/wal) + vnodeFindWalRootDir(pVnode->vgId, walRootDir); + if (walRootDir[0] == 0) { + int level = -1, id = -1; + + tfsAllocDisk(TFS_PRIMARY_LEVEL, &level, &id); + if (level < 0 || id < 0) { + vnodeCleanUp(pVnode); + return terrno; + } + + sprintf(walRootDir, "%s/vnode/vnode%d", TFS_DISK_PATH(level, id), vgId); + } + + sprintf(temp, "%s/wal", walRootDir); pVnode->walCfg.vgId = pVnode->vgId; pVnode->wal = walOpen(temp, &pVnode->walCfg); if (pVnode->wal == NULL) { @@ -353,7 +386,7 @@ int32_t vnodeOpen(int32_t vgId) { pVnode->events = NULL; - vDebug("vgId:%d, vnode is opened in %s, pVnode:%p", pVnode->vgId, rootDir, pVnode); + vDebug("vgId:%d, vnode is opened in %s - %s, pVnode:%p", pVnode->vgId, rootDir, walRootDir, pVnode); vnodeAddIntoHash(pVnode); @@ -361,7 +394,7 @@ int32_t vnodeOpen(int32_t vgId) { syncInfo.vgId = pVnode->vgId; syncInfo.version = pVnode->version; syncInfo.syncCfg = pVnode->syncCfg; - tstrncpy(syncInfo.path, rootDir, TSDB_FILENAME_LEN); + tstrncpy(syncInfo.path, walRootDir, TSDB_FILENAME_LEN); syncInfo.getWalInfoFp = vnodeGetWalInfo; syncInfo.writeToCacheFp = vnodeWriteToCache; syncInfo.confirmForward = vnodeConfirmForard; From e3ac8a05239153151f5686ef428225016eee476e Mon Sep 17 00:00:00 2001 From: Ping Xiao Date: Wed, 14 Apr 2021 11:04:30 +0800 Subject: [PATCH 163/178] [TD-2986]: add demo example into CI --- tests/examples/c/demo.c | 2 +- tests/test-all.sh | 9 +++++++++ 2 files changed, 10 insertions(+), 1 deletion(-) diff --git a/tests/examples/c/demo.c b/tests/examples/c/demo.c index e074e64966..0b1cd7b5d2 100644 --- a/tests/examples/c/demo.c +++ b/tests/examples/c/demo.c @@ -66,7 +66,7 @@ int main(int argc, char *argv[]) { printf("failed to connect to server, reason:%s\n", "null taos"/*taos_errstr(taos)*/); exit(1); } - for (int i = 0; i < 4000000; i++) { + for (int i = 0; i < 100; i++) { Test(taos, qstr, i); } taos_close(taos); diff --git a/tests/test-all.sh b/tests/test-all.sh index 980629e6d2..47e5de6aa0 100755 --- a/tests/test-all.sh +++ b/tests/test-all.sh @@ -516,6 +516,15 @@ if [ "$2" != "sim" ] && [ "$2" != "python" ] && [ "$2" != "jdbc" ] && [ "$2" != echo "asyncdemo pass" totalExamplePass=`expr $totalExamplePass + 1` fi + + ./demo 127.0.0.1 > /dev/null 2>&1 + if [ $? != "0" ]; then + echo "demo failed" + totalExampleFailed=`expr $totalExampleFailed + 1` + else + echo "demo pass" + totalExamplePass=`expr $totalExamplePass + 1` + fi if [ "$totalExamplePass" -gt "0" ]; then echo -e "\n${GREEN} ### Total $totalExamplePass examples succeed! ### ${NC}" From 3cb71e2dfcccdde56769323d6e984d58ad10f3b6 Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Wed, 14 Apr 2021 14:54:06 +0800 Subject: [PATCH 164/178] [TD-3766]: fix fsync alter calculation --- src/wal/src/walMgmt.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/wal/src/walMgmt.c b/src/wal/src/walMgmt.c index 39ce2657aa..55ab9b031b 100644 --- a/src/wal/src/walMgmt.c +++ b/src/wal/src/walMgmt.c @@ -104,7 +104,7 @@ int32_t walAlter(void *handle, SWalCfg *pCfg) { pWal->level = pCfg->walLevel; pWal->fsyncPeriod = pCfg->fsyncPeriod; - pWal->fsyncSeq = pCfg->fsyncPeriod % 1000; + pWal->fsyncSeq = pCfg->fsyncPeriod / 1000; if (pWal->fsyncSeq <= 0) pWal->fsyncSeq = 1; return TSDB_CODE_SUCCESS; From 73d605476c6aface791989a0c5da00983679b485 Mon Sep 17 00:00:00 2001 From: dapan1121 <89396746@qq.com> Date: Wed, 14 Apr 2021 17:01:28 +0800 Subject: [PATCH 165/178] dulicated CQ created when wal is re-executed issue --- src/client/src/tscStream.c | 2 +- src/cq/src/cqMain.c | 29 ++++++++++++++++++++++++----- 2 files changed, 25 insertions(+), 6 deletions(-) diff --git a/src/client/src/tscStream.c b/src/client/src/tscStream.c index 7699e6f459..d90b5fead1 100644 --- a/src/client/src/tscStream.c +++ b/src/client/src/tscStream.c @@ -619,7 +619,7 @@ TAOS_STREAM *taos_open_stream(TAOS *taos, const char *sqlstr, void (*fp)(void *p if (code == TSDB_CODE_SUCCESS) { tscCreateStream(pStream, pSql, code); } else if (code != TSDB_CODE_TSC_ACTION_IN_PROGRESS) { - tscError("%p open stream failed, sql:%s, code:%s", pSql, sqlstr, tstrerror(pRes->code)); + tscError("%p open stream failed, sql:%s, code:%s", pSql, sqlstr, tstrerror(code)); taosReleaseRef(tscObjRef, pSql->self); free(pStream); return NULL; diff --git a/src/cq/src/cqMain.c b/src/cq/src/cqMain.c index d4d202267c..fb683786ad 100644 --- a/src/cq/src/cqMain.c +++ b/src/cq/src/cqMain.c @@ -310,8 +310,23 @@ void *cqCreate(void *handle, uint64_t uid, int32_t sid, const char* dstTable, ch } SCqContext *pContext = handle; int64_t rid = 0; + + pthread_mutex_lock(&pContext->mutex); + + SCqObj *pObj = pContext->pHead; + while (pObj) { + if (pObj->uid == uid) { + rid = pObj->rid; + pthread_mutex_unlock(&pContext->mutex); + return (void *)rid; + } + + pObj = pObj->next; + } - SCqObj *pObj = calloc(sizeof(SCqObj), 1); + pthread_mutex_unlock(&pContext->mutex); + + pObj = calloc(sizeof(SCqObj), 1); if (pObj == NULL) return NULL; pObj->uid = uid; @@ -386,12 +401,15 @@ static void doCreateStream(void *param, TAOS_RES *result, int32_t code) { if (pObj == NULL) { return; } - + SCqContext* pContext = pObj->pContext; - SSqlObj* pSql = (SSqlObj*)result; - if (atomic_val_compare_exchange_ptr(&(pContext->dbConn), NULL, pSql->pTscObj) != NULL) { - taos_close(pSql->pTscObj); + SSqlObj* pSql = (SSqlObj*)result; + if (code == TSDB_CODE_SUCCESS) { + if (atomic_val_compare_exchange_ptr(&(pContext->dbConn), NULL, pSql->pTscObj) != NULL) { + taos_close(pSql->pTscObj); + } } + pthread_mutex_lock(&pContext->mutex); cqCreateStream(pContext, pObj); pthread_mutex_unlock(&pContext->mutex); @@ -427,6 +445,7 @@ static void cqCreateStream(SCqContext *pContext, SCqObj *pObj) { pObj->tmrId = taosTmrStart(cqProcessCreateTimer, 1000, (void *)pObj->rid, pContext->tmrCtrl); return; } + pObj->tmrId = 0; if (pObj->pStream == NULL) { From 432200c091fb025999466bcc2192734cb101459a Mon Sep 17 00:00:00 2001 From: dapan1121 <89396746@qq.com> Date: Wed, 14 Apr 2021 17:28:37 +0800 Subject: [PATCH 166/178] fix bug --- src/client/src/tscSQLParser.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/client/src/tscSQLParser.c b/src/client/src/tscSQLParser.c index e8f753f876..46d925d9f4 100644 --- a/src/client/src/tscSQLParser.c +++ b/src/client/src/tscSQLParser.c @@ -1608,7 +1608,7 @@ bool isValidDistinctSql(SQueryInfo* pQueryInfo) { int32_t parseSelectClause(SSqlCmd* pCmd, int32_t clauseIndex, SArray* pSelectList, bool isSTable, bool joinQuery, bool timeWindowQuery) { assert(pSelectList != NULL && pCmd != NULL); - + const char* msg1 = "too many columns in selection clause"; const char* msg2 = "functions or others can not be mixed up"; const char* msg3 = "not support query expression"; const char* msg5 = "invalid function name"; @@ -1657,7 +1657,7 @@ int32_t parseSelectClause(SSqlCmd* pCmd, int32_t clauseIndex, SArray* pSelectLis } if (pQueryInfo->fieldsInfo.numOfOutput > TSDB_MAX_COLUMNS) { - return TSDB_CODE_TSC_INVALID_SQL; + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1); } } From b1193284b810611de437c2a07319599b29458d79 Mon Sep 17 00:00:00 2001 From: dapan1121 <89396746@qq.com> Date: Thu, 15 Apr 2021 09:34:28 +0800 Subject: [PATCH 167/178] crash issue by query cancelled --- src/query/src/qExecutor.c | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/src/query/src/qExecutor.c b/src/query/src/qExecutor.c index ad5c3a2db3..f793333243 100644 --- a/src/query/src/qExecutor.c +++ b/src/query/src/qExecutor.c @@ -1879,12 +1879,14 @@ static void doFreeQueryHandle(SQueryRuntimeEnv* pRuntimeEnv) { } static void destroyTsComp(SQueryRuntimeEnv *pRuntimeEnv, SQuery *pQuery) { - if (isTsCompQuery(pQuery)) { + if (isTsCompQuery(pQuery) && pRuntimeEnv->outputBuf && pRuntimeEnv->outputBuf->pDataBlock && taosArrayGetSize(pRuntimeEnv->outputBuf->pDataBlock) > 0) { SColumnInfoData* pColInfoData = taosArrayGet(pRuntimeEnv->outputBuf->pDataBlock, 0); - FILE *f = *(FILE **)pColInfoData->pData; // TODO refactor - if (f) { - fclose(f); - *(FILE **)pColInfoData->pData = NULL; + if (pColInfoData) { + FILE *f = *(FILE **)pColInfoData->pData; // TODO refactor + if (f) { + fclose(f); + *(FILE **)pColInfoData->pData = NULL; + } } } } From 091d22e2c26dd3ad36e4ea1cbcfe70bc7e61999d Mon Sep 17 00:00:00 2001 From: dapan1121 <89396746@qq.com> Date: Thu, 15 Apr 2021 10:57:19 +0800 Subject: [PATCH 168/178] fix bug --- src/client/src/tscSQLParser.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/client/src/tscSQLParser.c b/src/client/src/tscSQLParser.c index e8f753f876..ed4b059a0d 100644 --- a/src/client/src/tscSQLParser.c +++ b/src/client/src/tscSQLParser.c @@ -6326,7 +6326,7 @@ void tscPrintSelectClause(SSqlObj* pSql, int32_t subClauseIndex) { int32_t totalBufSize = 1024; - char str[1024] = {0}; + char str[1024+1] = {0}; int32_t offset = 0; offset += sprintf(str, "num:%d [", size); From 8b72cf391c5444f92133bb794700b15b9a96ba59 Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Thu, 15 Apr 2021 11:06:43 +0800 Subject: [PATCH 169/178] [TD-3775]: taosdemo interlace rows more than insert rows. (#5819) Co-authored-by: Shuduo Sang --- src/kit/taosdemo/taosdemo.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/kit/taosdemo/taosdemo.c b/src/kit/taosdemo/taosdemo.c index dd2891fd32..f531cc15f7 100644 --- a/src/kit/taosdemo/taosdemo.c +++ b/src/kit/taosdemo/taosdemo.c @@ -4801,6 +4801,9 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) { int64_t insertRows = (superTblInfo)?superTblInfo->insertRows:g_args.num_of_DPT; int interlaceRows = superTblInfo?superTblInfo->interlaceRows:g_args.interlace_rows; + if (interlaceRows > insertRows) + interlaceRows = insertRows; + if (interlaceRows > g_args.num_of_RPR) interlaceRows = g_args.num_of_RPR; @@ -4849,9 +4852,6 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) { assert(pThreadInfo->ntables > 0); - if (interlaceRows > g_args.num_of_RPR) - interlaceRows = g_args.num_of_RPR; - int batchPerTbl = interlaceRows; int batchPerTblTimes; From db0f06afb298a7f69eccb3e1e8b42c35d542728d Mon Sep 17 00:00:00 2001 From: wu champion Date: Thu, 15 Apr 2021 11:40:37 +0800 Subject: [PATCH 170/178] [TD-3587] add case for TD-3587 to CI --- tests/perftest-scripts/perftest-query.sh | 3 + tests/pytest/perfbenchmark/joinPerformance.py | 355 ++++++++++++++++++ 2 files changed, 358 insertions(+) create mode 100644 tests/pytest/perfbenchmark/joinPerformance.py diff --git a/tests/perftest-scripts/perftest-query.sh b/tests/perftest-scripts/perftest-query.sh index 8b9fa1a546..cc44b22418 100755 --- a/tests/perftest-scripts/perftest-query.sh +++ b/tests/perftest-scripts/perftest-query.sh @@ -73,6 +73,9 @@ function runQueryPerfTest { python3 insert/insertFromCSVPerformance.py -c $LOCAL_COMMIT | tee -a $PERFORMANCE_TEST_REPORT python3 tools/taosdemoPerformance.py -c $LOCAL_COMMIT | tee -a $PERFORMANCE_TEST_REPORT + + python3 perfbenchmark/joinPerformance.py | tee -a $PERFORMANCE_TEST_REPORT + } diff --git a/tests/pytest/perfbenchmark/joinPerformance.py b/tests/pytest/perfbenchmark/joinPerformance.py new file mode 100644 index 0000000000..acd4f88c9b --- /dev/null +++ b/tests/pytest/perfbenchmark/joinPerformance.py @@ -0,0 +1,355 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import taos +import os +import json +import argparse +import subprocess +import datetime +import re + +from multiprocessing import cpu_count +# from util.log import * +# from util.sql import * +# from util.cases import * +# from util.dnodes import * + +class JoinPerf: + + def __init__(self, clearCache, dbName, keep): + self.clearCache = clearCache + self.dbname = dbName + self.drop = "yes" + self.keep = keep + self.host = "127.0.0.1" + self.user = "root" + self.password = "taosdata" + self.config = "/etc/taosperf" + self.conn = taos.connect( + self.host, + self.user, + self.password, + self.config) + + # def init(self, conn, logSql): + # tdLog.debug(f"start to excute {__file__}") + # tdSql.init(conn.cursor()) + + def getBuildPath(self) -> str: + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + for root, dirs, files in os.walk(projPath): + if ("taosd" in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + buildPath = root[:len(root) - len("/debug/build/bin")] + break + return buildPath + + def getCfgDir(self) -> str: + return self.getBuildPath() + "/sim/dnode1/cfg" + + # def initConnection(self): + # return self.getCfgDir() + + # def connectDB(self): + # self.conn = taos.connect( + # self.host, + # self.user, + # self.password, + # self.getCfgDir()) + # return self.conn.cursor() + + def dbinfocfg(self) -> dict: + return { + "name": self.dbname, + "drop": self.drop, + "replica": 1, + "days": 10, + "cache": 16, + "blocks": 8, + "precision": "ms", + "keep": self.keep, + "minRows": 100, + "maxRows": 4096, + "comp": 2, + "walLevel": 1, + "cachelast": 0, + "quorum": 1, + "fsync": 3000, + "update": 0 + } + + # def column_tag_count(self, **column_tag) -> list : + # return [{"type": type, "count": count} for type, count in column_tag.items()] + + def type_check(func): + def wrapper(self, **kwargs): + num_types = ["int", "float", "bigint", "tinyint", "smallint", "double"] + str_types = ["binary", "nchar"] + for k ,v in kwargs.items(): + if k.lower() not in num_types and k.lower() not in str_types: + return f"args {k} type error, not allowed" + elif not isinstance(v, (int, list, tuple)): + return f"value {v} type error, not allowed" + elif k.lower() in num_types and not isinstance(v, int): + return f"arg {v} takes 1 positional argument must be type int " + elif isinstance(v, (list,tuple)) and len(v) > 2: + return f"arg {v} takes from 1 to 2 positional arguments but more than 2 were given " + elif isinstance(v,(list,tuple)) and [ False for _ in v if not isinstance(_, int) ]: + return f"arg {v} takes from 1 to 2 positional arguments must be type int " + else: + pass + return func(self, **kwargs) + return wrapper + + @type_check + def column_tag_count(self, **column_tag) -> list : + init_column_tag = [] + for k, v in column_tag.items(): + if re.search(k, "int, float, bigint, tinyint, smallint, double", re.IGNORECASE): + init_column_tag.append({"type": k, "count": v}) + elif re.search(k, "binary, nchar", re.IGNORECASE): + if isinstance(v, int): + init_column_tag.append({"type": k, "count": v, "len":8}) + elif len(v) == 1: + init_column_tag.append({"type": k, "count": v[0], "len": 8}) + else: + init_column_tag.append({"type": k, "count": v[0], "len": v[1]}) + return init_column_tag + + def stbcfg(self, stb: str, child_tab_count: int, prechildtab: str, columns: dict, tags: dict) -> dict: + return { + "name": stb, + "child_table_exists": "no", + "childtable_count": child_tab_count, + "childtable_prefix": prechildtab, + "auto_create_table": "no", + "data_source": "rand", + "insert_mode": "taosc", + "insert_rows": 50, + "multi_thread_write_one_tbl": "no", + "number_of_tbl_in_one_sql": 0, + "rows_per_tbl": 1, + "max_sql_len": 65480, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 20000, + "start_timestamp": "1969-12-31 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": self.column_tag_count(**columns), + "tags": self.column_tag_count(**tags) + } + + def createcfg(self,db: dict, stbs: list) -> dict: + return { + "filetype": "insert", + "cfgdir": self.config, + "host": self.host, + "port": 6030, + "user": self.user, + "password": self.password, + "thread_count": cpu_count(), + "thread_count_create_tbl": cpu_count(), + "result_file": "/tmp/insert_res.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 0, + "num_of_records_per_req": 100, + "max_sql_len": 1024000, + "databases": [{ + "dbinfo": db, + "super_tables": stbs + }] + } + + def createinsertfile(self,db: dict, stbs: list) -> str: + date = datetime.datetime.now().strftime("%Y%m%d%H%M") + file_create_table = f"/tmp/insert_{date}.json" + + with open(file_create_table, 'w') as f: + json.dump(self.createcfg(db, stbs), f) + + return file_create_table + + def querysqls(self, sql: str) -> list: + return [{"sql":sql,"result":""}] + + def querycfg(self, sql: str) -> dict: + return { + "filetype": "query", + "cfgdir": self.config, + "host": self.host, + "port": 6030, + "user": self.user, + "password": self.password, + "confirm_parameter_prompt": "yes", + "databases": "db", + "specified_table_query": { + "query_interval": 0, + "concurrent": cpu_count(), + "sqls": self.querysqls(sql) + } + } + + def createqueryfile(self, sql: str): + date = datetime.datetime.now().strftime("%Y%m%d%H%M") + file_query_table = f"/tmp/query_{date}.json" + + with open(file_query_table,"w") as f: + json.dump(self.querycfg(sql), f) + + return file_query_table + + def taosdemotable(self, filepath, resultfile="/dev/null"): + taosdemopath = self.getBuildPath() + "/debug/build/bin" + with open(filepath,"r") as f: + filetype = json.load(f)["filetype"] + if filetype == "insert": + taosdemo_table_cmd = f"{taosdemopath}/taosdemo -f {filepath} > {resultfile} 2>&1" + else: + taosdemo_table_cmd = f"yes | {taosdemopath}/taosdemo -f {filepath} > {resultfile} 2>&1" + _ = subprocess.check_output(taosdemo_table_cmd, shell=True).decode("utf-8") + + def droptmpfile(self): + drop_file_cmd = "rm -f /tmp/query_* " + _ = subprocess.check_output(drop_file_cmd, shell=True).decode("utf-8") + drop_file_cmd = "rm -f querySystemInfo-*" + _ = subprocess.check_output(drop_file_cmd, shell=True).decode("utf-8") + drop_file_cmd = "rm -f /tmp/insert_* " + _ = subprocess.check_output(drop_file_cmd, shell=True).decode("utf-8") + + def run(self): + print("========== join on table schema performance ==========") + if self.clearCache == True: + # must be root permission + subprocess.check_output("echo 3 > /proc/sys/vm/drop_caches") + + # cfg = { + # 'enableRecordSql': '1' + # } + # tdDnodes.deploy(1, cfg) + + # tdLog.printNoPrefix("==========step1: create 1024 columns on different data type==========") + # db = self.dbinfocfg() + # stblist = [] + # # the supertable config for each type in column_tag_types + # column_tag_types = ["INT","FLOAT","BIGINT","TINYINT","SMALLINT","DOUBLE","BINARY","NCHAR"] + # for i in range(len(column_tag_types)): + # tagtype = { + # "INT": 0, + # "FLOAT": 0, + # "BIGINT": 0, + # "TINYINT": 0, + # "SMALLINT": 0, + # "DOUBLE": 0, + # "BINARY": 0, + # "NCHAR": 0 + # } + # columntype = tagtype.copy() + # tagtype["INT"] = 2 + # if column_tag_types[i] == "BINARY": + # columntype[column_tag_types[i]] = [509, 10] + # elif column_tag_types[i] == "NCHAR": + # columntype[column_tag_types[i]] = [350, 10] + # else: + # columntype[column_tag_types[i]] = 1021 + # supertable = self.stbcfg( + # stb=f"stb{i}", + # child_tab_count=2, + # prechildtab=f"t{i}", + # columns=columntype, + # tags=tagtype + # ) + # stblist.append(supertable) + # self.taosdemotable(self.createinsertfile(db=db, stbs=stblist)) + + # tdLog.printNoPrefix("==========step2: execute query operation==========") + # tdLog.printNoPrefix("==========execute query operation==========") + sqls = { + "nchar":"select * from t00,t01 where t00.ts=t01.ts" + } + for type, sql in sqls.items(): + result_file = f"/tmp/queryResult_{type}.log" + self.taosdemotable(self.createqueryfile(sql), resultfile=result_file) + if result_file: + # tdLog.printNoPrefix(f"execute type {type} sql, the sql is: {sql}") + print(f"execute type {type} sql, the sql is: {sql}") + max_sql_time_cmd = f''' + grep Spent {result_file} | awk 'NR==1{{max=$7;next}}{{max=max>$7?max:$7}}END{{print "Max=",max,"s"}}' + ''' + max_sql_time = subprocess.check_output(max_sql_time_cmd, shell=True).decode("UTF-8") + # tdLog.printNoPrefix(f"type: {type} sql time : {max_sql_time}") + print(f"type: {type} sql time : {max_sql_time}") + + min_sql_time_cmd = f''' + grep Spent {result_file} | awk 'NR==1{{min=$7;next}}{{min=min<$7?min:$7}}END{{print "Min=",min,"s"}}' + ''' + min_sql_time = subprocess.check_output(min_sql_time_cmd, shell=True).decode("UTF-8") + # tdLog.printNoPrefix(f"type: {type} sql time : {min_sql_time}") + print(f"type: {type} sql time : {min_sql_time}") + + avg_sql_time_cmd = f''' + grep Spent {result_file} |awk '{{sum+=$7}}END{{print "Average=",sum/NR,"s"}}' + ''' + avg_sql_time = subprocess.check_output(avg_sql_time_cmd, shell=True).decode("UTF-8") + # tdLog.printNoPrefix(f"type: {type} sql time : {avg_sql_time}") + print(f"type: {type} sql time : {avg_sql_time}") + + # tdLog.printNoPrefix(f"==========type {type} sql is over==========") + + # tdLog.printNoPrefix("==========query operation is over==========") + + self.droptmpfile() + # tdLog.printNoPrefix("==========tmp file has been deleted==========") + + # def stop(self): + # tdSql.close() + # tdLog.success(f"{__file__} successfully executed") + +# tdCases.addLinux(__file__, TDTestCase()) +# tdCases.addWindows(__file__, TDTestCase()) + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument( + '-r', + '--remove-cache', + action='store_true', + default=False, + help='clear cache before query (default: False)') + parser.add_argument( + '-d', + '--database-name', + action='store', + default='db', + type=str, + help='Database name to be created (default: db)') + parser.add_argument( + '-k', + '--keep-time', + action='store', + default=36500, + type=int, + help='Database keep parameters (default: 36500)') + + args = parser.parse_args() + jointest = JoinPerf(args.remove_cache, args.database_name, args.keep_time) + jointest.run() \ No newline at end of file From 0840dce0e04391a8949ac795c76b699e5dcc891b Mon Sep 17 00:00:00 2001 From: dapan1121 <89396746@qq.com> Date: Thu, 15 Apr 2021 14:03:33 +0800 Subject: [PATCH 171/178] taos crash on windows issue --- src/kit/shell/src/shellWindows.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/kit/shell/src/shellWindows.c b/src/kit/shell/src/shellWindows.c index 11c1ac34d8..9c778491cc 100644 --- a/src/kit/shell/src/shellWindows.c +++ b/src/kit/shell/src/shellWindows.c @@ -199,7 +199,7 @@ void updateBuffer(Command *cmd) { } int isReadyGo(Command *cmd) { - char total[MAX_COMMAND_SIZE]; + char *total = malloc(MAX_COMMAND_SIZE); memset(total, 0, MAX_COMMAND_SIZE); sprintf(total, "%s%s", cmd->buffer, cmd->command); From 1e9e8acdae7f2aafed470dbb25ca782f09ce36c6 Mon Sep 17 00:00:00 2001 From: dapan1121 <89396746@qq.com> Date: Thu, 15 Apr 2021 14:07:58 +0800 Subject: [PATCH 172/178] rebuild topbottom struct --- src/query/src/qAggMain.c | 48 ++++++++++++++++++++++------------------ 1 file changed, 27 insertions(+), 21 deletions(-) diff --git a/src/query/src/qAggMain.c b/src/query/src/qAggMain.c index e47545da95..505057ef33 100644 --- a/src/query/src/qAggMain.c +++ b/src/query/src/qAggMain.c @@ -2482,6 +2482,29 @@ static STopBotInfo *getTopBotOutputInfo(SQLFunctionCtx *pCtx) { } } + +/* + * keep the intermediate results during scan data blocks in the format of: + * +-----------------------------------+-------------one value pair-----------+------------next value pair-----------+ + * |-------------pointer area----------|----ts---+-----+-----n tags-----------|----ts---+-----+-----n tags-----------| + * +..[Value Pointer1][Value Pointer2].|timestamp|value|tags1|tags2|....|tagsn|timestamp|value|tags1|tags2|....|tagsn+ + */ +static void buildTopBotStruct(STopBotInfo *pTopBotInfo, SQLFunctionCtx *pCtx) { + char *tmp = (char *)pTopBotInfo + sizeof(STopBotInfo); + pTopBotInfo->res = (tValuePair**) tmp; + tmp += POINTER_BYTES * pCtx->param[0].i64; + + size_t size = sizeof(tValuePair) + pCtx->tagInfo.tagsLen; +// assert(pCtx->param[0].i64 > 0); + + for (int32_t i = 0; i < pCtx->param[0].i64; ++i) { + pTopBotInfo->res[i] = (tValuePair*) tmp; + pTopBotInfo->res[i]->pTags = tmp + sizeof(tValuePair); + tmp += size; + } +} + + bool topbot_datablock_filter(SQLFunctionCtx *pCtx, const char *minval, const char *maxval) { SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx); if (pResInfo == NULL) { @@ -2495,6 +2518,10 @@ bool topbot_datablock_filter(SQLFunctionCtx *pCtx, const char *minval, const cha return true; } + if ((void *)pTopBotInfo->res[0] != (void *)((char *)pTopBotInfo + sizeof(STopBotInfo) + POINTER_BYTES * pCtx->param[0].i64)) { + buildTopBotStruct(pTopBotInfo, pCtx); + } + tValuePair **pRes = (tValuePair**) pTopBotInfo->res; if (pCtx->functionId == TSDB_FUNC_TOP) { @@ -2534,27 +2561,6 @@ bool topbot_datablock_filter(SQLFunctionCtx *pCtx, const char *minval, const cha } } -/* - * keep the intermediate results during scan data blocks in the format of: - * +-----------------------------------+-------------one value pair-----------+------------next value pair-----------+ - * |-------------pointer area----------|----ts---+-----+-----n tags-----------|----ts---+-----+-----n tags-----------| - * +..[Value Pointer1][Value Pointer2].|timestamp|value|tags1|tags2|....|tagsn|timestamp|value|tags1|tags2|....|tagsn+ - */ -static void buildTopBotStruct(STopBotInfo *pTopBotInfo, SQLFunctionCtx *pCtx) { - char *tmp = (char *)pTopBotInfo + sizeof(STopBotInfo); - pTopBotInfo->res = (tValuePair**) tmp; - tmp += POINTER_BYTES * pCtx->param[0].i64; - - size_t size = sizeof(tValuePair) + pCtx->tagInfo.tagsLen; -// assert(pCtx->param[0].i64 > 0); - - for (int32_t i = 0; i < pCtx->param[0].i64; ++i) { - pTopBotInfo->res[i] = (tValuePair*) tmp; - pTopBotInfo->res[i]->pTags = tmp + sizeof(tValuePair); - tmp += size; - } -} - static bool top_bottom_function_setup(SQLFunctionCtx *pCtx) { if (!function_setup(pCtx)) { return false; From ab54c5d3a1497636e8c3a5b70ab3d7a37b429c60 Mon Sep 17 00:00:00 2001 From: Ping Xiao Date: Thu, 15 Apr 2021 15:58:48 +0800 Subject: [PATCH 173/178] Run performance test on master branch --- tests/perftest-scripts/perftest-query.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/perftest-scripts/perftest-query.sh b/tests/perftest-scripts/perftest-query.sh index cc44b22418..0325f552b1 100755 --- a/tests/perftest-scripts/perftest-query.sh +++ b/tests/perftest-scripts/perftest-query.sh @@ -40,8 +40,8 @@ function buildTDengine { git remote update > /dev/null git reset --hard HEAD - git checkout develop - REMOTE_COMMIT=`git rev-parse --short remotes/origin/develop` + git checkout master + REMOTE_COMMIT=`git rev-parse --short remotes/origin/master` LOCAL_COMMIT=`git rev-parse --short @` echo " LOCAL: $LOCAL_COMMIT" From 42d0f5a3dcb53267a75ad8d49fe9ddc634bd1f0f Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Thu, 15 Apr 2021 17:29:27 +0800 Subject: [PATCH 174/178] [TD-3783]: taosdemo for windows generates rand string invalid. (#5828) --- src/kit/taosdemo/taosdemo.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/kit/taosdemo/taosdemo.c b/src/kit/taosdemo/taosdemo.c index f531cc15f7..1bb9b8767a 100644 --- a/src/kit/taosdemo/taosdemo.c +++ b/src/kit/taosdemo/taosdemo.c @@ -1063,7 +1063,7 @@ static void rand_string(char *str, int size) { //--size; int n; for (n = 0; n < size - 1; n++) { - int key = rand_tinyint() % (int)(sizeof(charset) - 1); + int key = abs(rand_tinyint()) % (int)(sizeof(charset) - 1); str[n] = charset[key]; } str[n] = 0; From b9f3474e2766261549ce0f31f5c690d5c7bd2635 Mon Sep 17 00:00:00 2001 From: dapan1121 <72057773+dapan1121@users.noreply.github.com> Date: Fri, 16 Apr 2021 11:20:22 +0800 Subject: [PATCH 175/178] Update shellWindows.c free mem --- src/kit/shell/src/shellWindows.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/src/kit/shell/src/shellWindows.c b/src/kit/shell/src/shellWindows.c index 9c778491cc..87d11a3516 100644 --- a/src/kit/shell/src/shellWindows.c +++ b/src/kit/shell/src/shellWindows.c @@ -206,8 +206,12 @@ int isReadyGo(Command *cmd) { char *reg_str = "(^.*;\\s*$)|(^\\s*$)|(^\\s*exit\\s*$)|(^\\s*q\\s*$)|(^\\s*quit\\s*$)|(^" "\\s*clear\\s*$)"; - if (regex_match(total, reg_str, REG_EXTENDED | REG_ICASE)) return 1; + if (regex_match(total, reg_str, REG_EXTENDED | REG_ICASE)) { + free(total); + return 1; + } + free(total); return 0; } From a26c8d2b811f66df9eb93b44279136b089a84cce Mon Sep 17 00:00:00 2001 From: Shengliang Guan Date: Fri, 16 Apr 2021 11:45:13 +0800 Subject: [PATCH 176/178] Revert "[TD-3079]: use multi-level storage with vnode's wal files" --- src/vnode/src/vnodeMain.c | 39 +++------------------------------------ 1 file changed, 3 insertions(+), 36 deletions(-) diff --git a/src/vnode/src/vnodeMain.c b/src/vnode/src/vnodeMain.c index 0921c5ce48..ee8ed9e0fa 100644 --- a/src/vnode/src/vnodeMain.c +++ b/src/vnode/src/vnodeMain.c @@ -232,28 +232,9 @@ int32_t vnodeAlter(void *vparam, SCreateVnodeMsg *pVnodeCfg) { return code; } -static void vnodeFindWalRootDir(int32_t vgId, char *walRootDir) { - char vnodeDir[TSDB_FILENAME_LEN] = "\0"; - snprintf(vnodeDir, TSDB_FILENAME_LEN, "/vnode/vnode%d/wal", vgId); - - TDIR *tdir = tfsOpendir(vnodeDir); - if (!tdir) return; - - const TFILE *tfile = tfsReaddir(tdir); - if (!tfile) { - tfsClosedir(tdir); - return; - } - - sprintf(walRootDir, "%s/vnode/vnode%d", TFS_DISK_PATH(tfile->level, tfile->id), vgId); - - tfsClosedir(tdir); -} - int32_t vnodeOpen(int32_t vgId) { char temp[TSDB_FILENAME_LEN * 3]; char rootDir[TSDB_FILENAME_LEN * 2]; - char walRootDir[TSDB_FILENAME_LEN * 2] = {0}; snprintf(rootDir, TSDB_FILENAME_LEN * 2, "%s/vnode%d", tsVnodeDir, vgId); SVnodeObj *pVnode = calloc(sizeof(SVnodeObj), 1); @@ -340,21 +321,7 @@ int32_t vnodeOpen(int32_t vgId) { } } - // walRootDir for wal & syncInfo.path (not empty dir of /vnode/vnode{pVnode->vgId}/wal) - vnodeFindWalRootDir(pVnode->vgId, walRootDir); - if (walRootDir[0] == 0) { - int level = -1, id = -1; - - tfsAllocDisk(TFS_PRIMARY_LEVEL, &level, &id); - if (level < 0 || id < 0) { - vnodeCleanUp(pVnode); - return terrno; - } - - sprintf(walRootDir, "%s/vnode/vnode%d", TFS_DISK_PATH(level, id), vgId); - } - - sprintf(temp, "%s/wal", walRootDir); + sprintf(temp, "%s/wal", rootDir); pVnode->walCfg.vgId = pVnode->vgId; pVnode->wal = walOpen(temp, &pVnode->walCfg); if (pVnode->wal == NULL) { @@ -386,7 +353,7 @@ int32_t vnodeOpen(int32_t vgId) { pVnode->events = NULL; - vDebug("vgId:%d, vnode is opened in %s - %s, pVnode:%p", pVnode->vgId, rootDir, walRootDir, pVnode); + vDebug("vgId:%d, vnode is opened in %s, pVnode:%p", pVnode->vgId, rootDir, pVnode); vnodeAddIntoHash(pVnode); @@ -394,7 +361,7 @@ int32_t vnodeOpen(int32_t vgId) { syncInfo.vgId = pVnode->vgId; syncInfo.version = pVnode->version; syncInfo.syncCfg = pVnode->syncCfg; - tstrncpy(syncInfo.path, walRootDir, TSDB_FILENAME_LEN); + tstrncpy(syncInfo.path, rootDir, TSDB_FILENAME_LEN); syncInfo.getWalInfoFp = vnodeGetWalInfo; syncInfo.writeToCacheFp = vnodeWriteToCache; syncInfo.confirmForward = vnodeConfirmForard; From 31e82030d957489962558a4ca8275fe2437aa2b0 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Sat, 17 Apr 2021 15:53:16 +0800 Subject: [PATCH 177/178] [td-3779] : fix bug during filter data block when handling last query. --- src/query/src/qAggMain.c | 4 ++-- src/query/src/qExecutor.c | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src/query/src/qAggMain.c b/src/query/src/qAggMain.c index e47545da95..bbfc77d3f3 100644 --- a/src/query/src/qAggMain.c +++ b/src/query/src/qAggMain.c @@ -744,7 +744,7 @@ static int32_t firstDistFuncRequired(SQLFunctionCtx *pCtx, STimeWindow* w, int32 if (pInfo->hasResult != DATA_SET_FLAG) { return BLK_DATA_ALL_NEEDED; } else { // data in current block is not earlier than current result - return (pInfo->ts <= w->skey) ? BLK_DATA_NO_NEEDED : BLK_DATA_ALL_NEEDED; + return (pInfo->ts <= w->skey) ? BLK_DATA_DISCARD : BLK_DATA_ALL_NEEDED; } } @@ -764,7 +764,7 @@ static int32_t lastDistFuncRequired(SQLFunctionCtx *pCtx, STimeWindow* w, int32_ if (pInfo->hasResult != DATA_SET_FLAG) { return BLK_DATA_ALL_NEEDED; } else { - return (pInfo->ts > w->ekey) ? BLK_DATA_NO_NEEDED : BLK_DATA_ALL_NEEDED; + return (pInfo->ts > w->ekey) ? BLK_DATA_DISCARD : BLK_DATA_ALL_NEEDED; } } diff --git a/src/query/src/qExecutor.c b/src/query/src/qExecutor.c index dc524d95a9..00aed20078 100644 --- a/src/query/src/qExecutor.c +++ b/src/query/src/qExecutor.c @@ -2639,7 +2639,7 @@ int32_t loadDataBlockOnDemand(SQueryRuntimeEnv* pRuntimeEnv, STableScanInfo* pTa } SDataBlockInfo* pBlockInfo = &pBlock->info; - if ((*status) == BLK_DATA_NO_NEEDED) { + if ((*status) == BLK_DATA_NO_NEEDED || (*status) == BLK_DATA_DISCARD) { qDebug("QInfo:%"PRIu64" data block discard, brange:%" PRId64 "-%" PRId64 ", rows:%d", pQInfo->qId, pBlockInfo->window.skey, pBlockInfo->window.ekey, pBlockInfo->rows); pCost->discardBlocks += 1; From b0bf30aac3890794bc16eb684149d72008398715 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Sat, 17 Apr 2021 23:29:01 +0800 Subject: [PATCH 178/178] [td-3779] --- src/query/src/qAggMain.c | 4 ++-- src/query/src/qExecutor.c | 39 ++++++++++++++++++++++++++++++++++++++- 2 files changed, 40 insertions(+), 3 deletions(-) diff --git a/src/query/src/qAggMain.c b/src/query/src/qAggMain.c index bbfc77d3f3..e47545da95 100644 --- a/src/query/src/qAggMain.c +++ b/src/query/src/qAggMain.c @@ -744,7 +744,7 @@ static int32_t firstDistFuncRequired(SQLFunctionCtx *pCtx, STimeWindow* w, int32 if (pInfo->hasResult != DATA_SET_FLAG) { return BLK_DATA_ALL_NEEDED; } else { // data in current block is not earlier than current result - return (pInfo->ts <= w->skey) ? BLK_DATA_DISCARD : BLK_DATA_ALL_NEEDED; + return (pInfo->ts <= w->skey) ? BLK_DATA_NO_NEEDED : BLK_DATA_ALL_NEEDED; } } @@ -764,7 +764,7 @@ static int32_t lastDistFuncRequired(SQLFunctionCtx *pCtx, STimeWindow* w, int32_ if (pInfo->hasResult != DATA_SET_FLAG) { return BLK_DATA_ALL_NEEDED; } else { - return (pInfo->ts > w->ekey) ? BLK_DATA_DISCARD : BLK_DATA_ALL_NEEDED; + return (pInfo->ts > w->ekey) ? BLK_DATA_NO_NEEDED : BLK_DATA_ALL_NEEDED; } } diff --git a/src/query/src/qExecutor.c b/src/query/src/qExecutor.c index 00aed20078..c4d8371d48 100644 --- a/src/query/src/qExecutor.c +++ b/src/query/src/qExecutor.c @@ -2137,6 +2137,40 @@ static bool onlyFirstQuery(SQuery *pQuery) { return onlyOneQueryType(pQuery, TSD static bool onlyLastQuery(SQuery *pQuery) { return onlyOneQueryType(pQuery, TSDB_FUNC_LAST, TSDB_FUNC_LAST_DST); } +static int32_t updateBlockLoadStatus(SQuery *pQuery, int32_t status) { + bool hasFirstLastFunc = false; + bool hasOtherFunc = false; + + if (status == BLK_DATA_ALL_NEEDED || status == BLK_DATA_DISCARD) { + return status; + } + + for (int32_t i = 0; i < pQuery->numOfOutput; ++i) { + int32_t functionId = pQuery->pExpr1[i].base.functionId; + + if (functionId == TSDB_FUNC_TS || functionId == TSDB_FUNC_TS_DUMMY || functionId == TSDB_FUNC_TAG || + functionId == TSDB_FUNC_TAG_DUMMY) { + continue; + } + + if (functionId == TSDB_FUNC_FIRST_DST || functionId == TSDB_FUNC_LAST_DST) { + hasFirstLastFunc = true; + } else { + hasOtherFunc = true; + } + } + + if (hasFirstLastFunc && status == BLK_DATA_NO_NEEDED) { + if(!hasOtherFunc) { + return BLK_DATA_DISCARD; + } else{ + return BLK_DATA_ALL_NEEDED; + } + } + + return status; +} + static void doExchangeTimeWindow(SQInfo* pQInfo, STimeWindow* win) { SQuery* pQuery = &pQInfo->query; size_t t = taosArrayGetSize(pQuery->tableGroupInfo.pGroupList); @@ -2578,11 +2612,12 @@ int32_t loadDataBlockOnDemand(SQueryRuntimeEnv* pRuntimeEnv, STableScanInfo* pTa pBlock->pDataBlock = NULL; pBlock->pBlockStatis = NULL; + SQInfo* pQInfo = pRuntimeEnv->qinfo; SQuery* pQuery = pRuntimeEnv->pQuery; + int64_t groupId = pQuery->current->groupIndex; bool ascQuery = QUERY_IS_ASC_QUERY(pQuery); - SQInfo* pQInfo = pRuntimeEnv->qinfo; SQueryCostInfo* pCost = &pQInfo->summary; if (pRuntimeEnv->pTsBuf != NULL) { @@ -2639,6 +2674,8 @@ int32_t loadDataBlockOnDemand(SQueryRuntimeEnv* pRuntimeEnv, STableScanInfo* pTa } SDataBlockInfo* pBlockInfo = &pBlock->info; + *status = updateBlockLoadStatus(pRuntimeEnv->pQuery, *status); + if ((*status) == BLK_DATA_NO_NEEDED || (*status) == BLK_DATA_DISCARD) { qDebug("QInfo:%"PRIu64" data block discard, brange:%" PRId64 "-%" PRId64 ", rows:%d", pQInfo->qId, pBlockInfo->window.skey, pBlockInfo->window.ekey, pBlockInfo->rows);