From 6b066bcf1a809b5d65873ed9929fed5b5ace80ba Mon Sep 17 00:00:00 2001 From: Shengliang Guan Date: Sun, 8 Nov 2020 14:46:12 +0000 Subject: [PATCH 01/52] TD-1919 --- src/inc/tsdb.h | 4 ++-- src/mnode/src/mnodeSdb.c | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src/inc/tsdb.h b/src/inc/tsdb.h index 02ca99e4b8..3ef618ab36 100644 --- a/src/inc/tsdb.h +++ b/src/inc/tsdb.h @@ -126,7 +126,7 @@ uint32_t tsdbGetFileInfo(TSDB_REPO_T *repo, char *name, uint32_t *index, uint32_ // the TSDB repository info typedef struct STsdbRepoInfo { STsdbCfg tsdbCfg; - int64_t version; // version of the repository + uint64_t version; // version of the repository int64_t tsdbTotalDataSize; // the original inserted data size int64_t tsdbTotalDiskSize; // the total disk size taken by this TSDB repository // TODO: Other informations to add @@ -136,7 +136,7 @@ STsdbRepoInfo *tsdbGetStatus(TSDB_REPO_T *pRepo); // the meter information report structure typedef struct { STableCfg tableCfg; - int64_t version; + uint64_t version; int64_t tableTotalDataSize; // In bytes int64_t tableTotalDiskSize; // In bytes } STableInfo; diff --git a/src/mnode/src/mnodeSdb.c b/src/mnode/src/mnodeSdb.c index 6b6a49db93..e5c8b752b5 100644 --- a/src/mnode/src/mnodeSdb.c +++ b/src/mnode/src/mnodeSdb.c @@ -71,7 +71,7 @@ typedef struct _SSdbTable { typedef struct { ESyncRole role; ESdbStatus status; - int64_t version; + uint64_t version; int64_t sync; void * wal; SSyncCfg cfg; From 7314a0dd088bb28849e926ecd13da5abe9f16a4a Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Sun, 8 Nov 2020 23:17:01 +0800 Subject: [PATCH 02/52] [TD-1978] --- src/client/src/tscFunctionImpl.c | 162 +--------- src/client/src/tscLocalMerge.c | 8 +- src/common/src/ttypes.c | 34 +-- src/inc/taosdef.h | 86 +++--- src/query/inc/qFill.h | 48 +-- src/query/src/qExecutor.c | 10 +- src/query/src/qFill.c | 503 +++++++++++++++---------------- src/util/src/tcompare.c | 1 - 8 files changed, 329 insertions(+), 523 deletions(-) diff --git a/src/client/src/tscFunctionImpl.c b/src/client/src/tscFunctionImpl.c index affa4aee83..50709ecf36 100644 --- a/src/client/src/tscFunctionImpl.c +++ b/src/client/src/tscFunctionImpl.c @@ -28,6 +28,7 @@ #include "tscompression.h" #include "tsqlfunction.h" #include "tutil.h" +#include "ttype.h" #define GET_INPUT_CHAR(x) (((char *)((x)->aInputElemBuf)) + ((x)->startOffset) * ((x)->inputBytes)) #define GET_INPUT_CHAR_INDEX(x, y) (GET_INPUT_CHAR(x) + (y) * (x)->inputBytes) @@ -2479,28 +2480,8 @@ static void percentile_function(SQLFunctionCtx *pCtx) { continue; } - // TODO extract functions double v = 0; - switch (pCtx->inputType) { - case TSDB_DATA_TYPE_TINYINT: - v = GET_INT8_VAL(data); - break; - case TSDB_DATA_TYPE_SMALLINT: - v = GET_INT16_VAL(data); - break; - case TSDB_DATA_TYPE_BIGINT: - v = (double)(GET_INT64_VAL(data)); - break; - case TSDB_DATA_TYPE_FLOAT: - v = GET_FLOAT_VAL(data); - break; - case TSDB_DATA_TYPE_DOUBLE: - v = GET_DOUBLE_VAL(data); - break; - default: - v = GET_INT32_VAL(data); - break; - } + GET_TYPED_DATA(v, pCtx->inputType, data); if (v < GET_DOUBLE_VAL(&pInfo->minval)) { SET_DOUBLE_VAL(&pInfo->minval, v); @@ -2541,30 +2522,10 @@ static void percentile_function_f(SQLFunctionCtx *pCtx, int32_t index) { SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx); SPercentileInfo *pInfo = (SPercentileInfo *)GET_ROWCELL_INTERBUF(pResInfo); - if (pInfo->stage == 0) { - // TODO extract functions + double v = 0; - switch (pCtx->inputType) { - case TSDB_DATA_TYPE_TINYINT: - v = GET_INT8_VAL(pData); - break; - case TSDB_DATA_TYPE_SMALLINT: - v = GET_INT16_VAL(pData); - break; - case TSDB_DATA_TYPE_BIGINT: - v = (double)(GET_INT64_VAL(pData)); - break; - case TSDB_DATA_TYPE_FLOAT: - v = GET_FLOAT_VAL(pData); - break; - case TSDB_DATA_TYPE_DOUBLE: - v = GET_DOUBLE_VAL(pData); - break; - default: - v = GET_INT32_VAL(pData); - break; - } + GET_TYPED_DATA(v, pCtx->inputType, pData); if (v < GET_DOUBLE_VAL(&pInfo->minval)) { SET_DOUBLE_VAL(&pInfo->minval, v); @@ -2653,29 +2614,9 @@ static void apercentile_function(SQLFunctionCtx *pCtx) { } notNullElems += 1; + double v = 0; - - switch (pCtx->inputType) { - case TSDB_DATA_TYPE_TINYINT: - v = GET_INT8_VAL(data); - break; - case TSDB_DATA_TYPE_SMALLINT: - v = GET_INT16_VAL(data); - break; - case TSDB_DATA_TYPE_BIGINT: - v = (double)(GET_INT64_VAL(data)); - break; - case TSDB_DATA_TYPE_FLOAT: - v = GET_FLOAT_VAL(data); - break; - case TSDB_DATA_TYPE_DOUBLE: - v = GET_DOUBLE_VAL(data); - break; - default: - v = GET_INT32_VAL(data); - break; - } - + GET_TYPED_DATA(v, pCtx->inputType, data); tHistogramAdd(&pInfo->pHisto, v); } @@ -2700,26 +2641,7 @@ static void apercentile_function_f(SQLFunctionCtx *pCtx, int32_t index) { SAPercentileInfo *pInfo = getAPerctInfo(pCtx); double v = 0; - switch (pCtx->inputType) { - case TSDB_DATA_TYPE_TINYINT: - v = GET_INT8_VAL(pData); - break; - case TSDB_DATA_TYPE_SMALLINT: - v = GET_INT16_VAL(pData); - break; - case TSDB_DATA_TYPE_BIGINT: - v = (double)(GET_INT64_VAL(pData)); - break; - case TSDB_DATA_TYPE_FLOAT: - v = GET_FLOAT_VAL(pData); - break; - case TSDB_DATA_TYPE_DOUBLE: - v = GET_DOUBLE_VAL(pData); - break; - default: - v = GET_INT32_VAL(pData); - break; - } + GET_TYPED_DATA(v, pCtx->inputType, pData); tHistogramAdd(&pInfo->pHisto, v); @@ -4142,22 +4064,7 @@ static void rate_function(SQLFunctionCtx *pCtx) { notNullElems++; int64_t v = 0; - switch (pCtx->inputType) { - case TSDB_DATA_TYPE_TINYINT: - v = (int64_t)GET_INT8_VAL(pData); - break; - case TSDB_DATA_TYPE_SMALLINT: - v = (int64_t)GET_INT16_VAL(pData); - break; - case TSDB_DATA_TYPE_INT: - v = (int64_t)GET_INT32_VAL(pData); - break; - case TSDB_DATA_TYPE_BIGINT: - v = (int64_t)GET_INT64_VAL(pData); - break; - default: - assert(0); - } + GET_TYPED_DATA(v, pCtx->inputType, pData); if ((INT64_MIN == pRateInfo->firstValue) || (INT64_MIN == pRateInfo->firstKey)) { pRateInfo->firstValue = v; @@ -4207,22 +4114,7 @@ static void rate_function_f(SQLFunctionCtx *pCtx, int32_t index) { TSKEY *primaryKey = pCtx->ptsList; int64_t v = 0; - switch (pCtx->inputType) { - case TSDB_DATA_TYPE_TINYINT: - v = (int64_t)GET_INT8_VAL(pData); - break; - case TSDB_DATA_TYPE_SMALLINT: - v = (int64_t)GET_INT16_VAL(pData); - break; - case TSDB_DATA_TYPE_INT: - v = (int64_t)GET_INT32_VAL(pData); - break; - case TSDB_DATA_TYPE_BIGINT: - v = (int64_t)GET_INT64_VAL(pData); - break; - default: - assert(0); - } + GET_TYPED_DATA(v, pCtx->inputType, pData); if ((INT64_MIN == pRateInfo->firstValue) || (INT64_MIN == pRateInfo->firstKey)) { pRateInfo->firstValue = v; @@ -4349,22 +4241,7 @@ static void irate_function(SQLFunctionCtx *pCtx) { notNullElems++; int64_t v = 0; - switch (pCtx->inputType) { - case TSDB_DATA_TYPE_TINYINT: - v = (int64_t)GET_INT8_VAL(pData); - break; - case TSDB_DATA_TYPE_SMALLINT: - v = (int64_t)GET_INT16_VAL(pData); - break; - case TSDB_DATA_TYPE_INT: - v = (int64_t)GET_INT32_VAL(pData); - break; - case TSDB_DATA_TYPE_BIGINT: - v = (int64_t)GET_INT64_VAL(pData); - break; - default: - assert(0); - } + GET_TYPED_DATA(v, pCtx->inputType, pData); // TODO: calc once if only call this function once ???? if ((INT64_MIN == pRateInfo->lastKey) || (INT64_MIN == pRateInfo->lastValue)) { @@ -4409,23 +4286,8 @@ static void irate_function_f(SQLFunctionCtx *pCtx, int32_t index) { TSKEY *primaryKey = pCtx->ptsList; int64_t v = 0; - switch (pCtx->inputType) { - case TSDB_DATA_TYPE_TINYINT: - v = (int64_t)GET_INT8_VAL(pData); - break; - case TSDB_DATA_TYPE_SMALLINT: - v = (int64_t)GET_INT16_VAL(pData); - break; - case TSDB_DATA_TYPE_INT: - v = (int64_t)GET_INT32_VAL(pData); - break; - case TSDB_DATA_TYPE_BIGINT: - v = (int64_t)GET_INT64_VAL(pData); - break; - default: - assert(0); - } - + GET_TYPED_DATA(v, pCtx->inputType, pData); + pRateInfo->firstKey = pRateInfo->lastKey; pRateInfo->firstValue = pRateInfo->lastValue; diff --git a/src/client/src/tscLocalMerge.c b/src/client/src/tscLocalMerge.c index c67edf5b5a..ee72a96e47 100644 --- a/src/client/src/tscLocalMerge.c +++ b/src/client/src/tscLocalMerge.c @@ -913,7 +913,7 @@ static void doFillResult(SSqlObj *pSql, SLocalReducer *pLocalReducer, bool doneO } while (1) { - int64_t newRows = taosGenerateDataBlock(pFillInfo, pResPages, pLocalReducer->resColModel->capacity); + int64_t newRows = taosFillResultDataBlock(pFillInfo, pResPages, pLocalReducer->resColModel->capacity); if (pQueryInfo->limit.offset < newRows) { newRows -= pQueryInfo->limit.offset; @@ -942,7 +942,7 @@ static void doFillResult(SSqlObj *pSql, SLocalReducer *pLocalReducer, bool doneO } // all output in current group are completed - int32_t totalRemainRows = (int32_t)getFilledNumOfRes(pFillInfo, actualETime, pLocalReducer->resColModel->capacity); + int32_t totalRemainRows = (int32_t)getNumOfResWithFill(pFillInfo, actualETime, pLocalReducer->resColModel->capacity); if (totalRemainRows <= 0) { break; } @@ -1291,7 +1291,7 @@ static bool doBuildFilledResultForGroup(SSqlObj *pSql) { int64_t etime = *(int64_t *)(pFinalDataBuf->data + TSDB_KEYSIZE * (pFillInfo->numOfRows - 1)); // the first column must be the timestamp column - int32_t rows = (int32_t)getFilledNumOfRes(pFillInfo, etime, pLocalReducer->resColModel->capacity); + int32_t rows = (int32_t) getNumOfResWithFill(pFillInfo, etime, pLocalReducer->resColModel->capacity); if (rows > 0) { // do fill gap doFillResult(pSql, pLocalReducer, false); } @@ -1320,7 +1320,7 @@ static bool doHandleLastRemainData(SSqlObj *pSql) { ((pRes->numOfRowsGroup < pQueryInfo->limit.limit && pQueryInfo->limit.limit > 0) || (pQueryInfo->limit.limit < 0))) { int64_t etime = (pQueryInfo->order.order == TSDB_ORDER_ASC)? pQueryInfo->window.ekey : pQueryInfo->window.skey; - int32_t rows = (int32_t)getFilledNumOfRes(pFillInfo, etime, pLocalReducer->resColModel->capacity); + int32_t rows = (int32_t)getNumOfResWithFill(pFillInfo, etime, pLocalReducer->resColModel->capacity); if (rows > 0) { doFillResult(pSql, pLocalReducer, true); } diff --git a/src/common/src/ttypes.c b/src/common/src/ttypes.c index 50554ce08e..f28481977f 100644 --- a/src/common/src/ttypes.c +++ b/src/common/src/ttypes.c @@ -355,32 +355,6 @@ bool isValidDataType(int32_t type) { return type >= TSDB_DATA_TYPE_NULL && type <= TSDB_DATA_TYPE_NCHAR; } -//bool isNull(const char *val, int32_t type) { -// switch (type) { -// case TSDB_DATA_TYPE_BOOL: -// return *(uint8_t *)val == TSDB_DATA_BOOL_NULL; -// case TSDB_DATA_TYPE_TINYINT: -// return *(uint8_t *)val == TSDB_DATA_TINYINT_NULL; -// case TSDB_DATA_TYPE_SMALLINT: -// return *(uint16_t *)val == TSDB_DATA_SMALLINT_NULL; -// case TSDB_DATA_TYPE_INT: -// return *(uint32_t *)val == TSDB_DATA_INT_NULL; -// case TSDB_DATA_TYPE_BIGINT: -// case TSDB_DATA_TYPE_TIMESTAMP: -// return *(uint64_t *)val == TSDB_DATA_BIGINT_NULL; -// case TSDB_DATA_TYPE_FLOAT: -// return *(uint32_t *)val == TSDB_DATA_FLOAT_NULL; -// case TSDB_DATA_TYPE_DOUBLE: -// return *(uint64_t *)val == TSDB_DATA_DOUBLE_NULL; -// case TSDB_DATA_TYPE_NCHAR: -// return *(uint32_t*) varDataVal(val) == TSDB_DATA_NCHAR_NULL; -// case TSDB_DATA_TYPE_BINARY: -// return *(uint8_t *) varDataVal(val) == TSDB_DATA_BINARY_NULL; -// default: -// return false; -// }; -//} - void setVardataNull(char* val, int32_t type) { if (type == TSDB_DATA_TYPE_BINARY) { varDataSetLen(val, sizeof(int8_t)); @@ -433,14 +407,10 @@ void setNullN(char *val, int32_t type, int32_t bytes, int32_t numOfElems) { *(uint64_t *)(val + i * tDataTypeDesc[type].nSize) = TSDB_DATA_DOUBLE_NULL; } break; - case TSDB_DATA_TYPE_NCHAR: // todo : without length? - for (int32_t i = 0; i < numOfElems; ++i) { - *(uint32_t *)(val + i * bytes) = TSDB_DATA_NCHAR_NULL; - } - break; + case TSDB_DATA_TYPE_NCHAR: case TSDB_DATA_TYPE_BINARY: for (int32_t i = 0; i < numOfElems; ++i) { - *(uint8_t *)(val + i * bytes) = TSDB_DATA_BINARY_NULL; + setVardataNull(val + i * bytes, type); } break; default: { diff --git a/src/inc/taosdef.h b/src/inc/taosdef.h index aee60da201..8eb6661273 100644 --- a/src/inc/taosdef.h +++ b/src/inc/taosdef.h @@ -61,13 +61,23 @@ typedef struct tstr { // Bytes for each type. extern const int32_t TYPE_BYTES[11]; + // TODO: replace and remove code below -#define CHAR_BYTES sizeof(char) -#define SHORT_BYTES sizeof(int16_t) -#define INT_BYTES sizeof(int32_t) -#define LONG_BYTES sizeof(int64_t) -#define FLOAT_BYTES sizeof(float) -#define DOUBLE_BYTES sizeof(double) +#define CHAR_BYTES sizeof(char) +#define SHORT_BYTES sizeof(int16_t) +#define INT_BYTES sizeof(int32_t) +#define LONG_BYTES sizeof(int64_t) +#define FLOAT_BYTES sizeof(float) +#define DOUBLE_BYTES sizeof(double) +#define POINTER_BYTES sizeof(void *) // 8 by default assert(sizeof(ptrdiff_t) == sizseof(void*) + +#define TSDB_KEYSIZE sizeof(TSKEY) + +#if LINUX +#define TSDB_NCHAR_SIZE sizeof(wchar_t) +#else +#define TSDB_NCHAR_SIZE sizeof(int32_t) +#endif // NULL definition #define TSDB_DATA_BOOL_NULL 0x02 @@ -101,10 +111,12 @@ extern const int32_t TYPE_BYTES[11]; #define TSDB_TIME_PRECISION_MILLI 0 #define TSDB_TIME_PRECISION_MICRO 1 #define TSDB_TIME_PRECISION_NANO 2 -#define TSDB_TICK_PER_SECOND(precision) ((precision)==TSDB_TIME_PRECISION_MILLI ? 1e3L : ((precision)==TSDB_TIME_PRECISION_MICRO ? 1e6L : 1e9L)) #define TSDB_TIME_PRECISION_MILLI_STR "ms" #define TSDB_TIME_PRECISION_MICRO_STR "us" +#define TSDB_TIME_PRECISION_NANO_STR "ns" + +#define TSDB_TICK_PER_SECOND(precision) ((precision)==TSDB_TIME_PRECISION_MILLI ? 1e3L : ((precision)==TSDB_TIME_PRECISION_MICRO ? 1e6L : 1e9L)) #define T_MEMBER_SIZE(type, member) sizeof(((type *)0)->member) #define T_APPEND_MEMBER(dst, ptr, type, member) \ @@ -118,15 +130,6 @@ do { \ (src) = (void *)((char *)src + sizeof(type));\ } while(0) -#define TSDB_KEYSIZE sizeof(TSKEY) - -#if LINUX - #define TSDB_NCHAR_SIZE sizeof(wchar_t) -#else - #define TSDB_NCHAR_SIZE 4 -#endif -//#define TSDB_CHAR_TERMINATED_SPACE 1 - #define GET_INT8_VAL(x) (*(int8_t *)(x)) #define GET_INT16_VAL(x) (*(int16_t *)(x)) #define GET_INT32_VAL(x) (*(int32_t *)(x)) @@ -172,7 +175,6 @@ typedef struct tDataTypeDescriptor { } tDataTypeDescriptor; extern tDataTypeDescriptor tDataTypeDesc[11]; -#define POINTER_BYTES sizeof(void *) // 8 by default assert(sizeof(ptrdiff_t) == sizseof(void*) bool isValidDataType(int32_t type); //bool isNull(const char *val, int32_t type); @@ -266,10 +268,6 @@ void tsDataSwap(void *pLeft, void *pRight, int32_t type, int32_t size, void* buf #define TSDB_AUTH_LEN 16 #define TSDB_KEY_LEN 16 #define TSDB_VERSION_LEN 12 -#define TSDB_STREET_LEN 64 -#define TSDB_CITY_LEN 20 -#define TSDB_STATE_LEN 20 -#define TSDB_COUNTRY_LEN 20 #define TSDB_LOCALE_LEN 64 #define TSDB_TIMEZONE_LEN 96 #define TSDB_LABEL_LEN 8 @@ -388,27 +386,27 @@ void tsDataSwap(void *pLeft, void *pRight, int32_t type, int32_t size, void* buf * 1. ordinary sub query for select * from super_table * 2. all sqlobj generated by createSubqueryObj with this flag */ -#define TSDB_QUERY_TYPE_SUBQUERY 0x02u -#define TSDB_QUERY_TYPE_STABLE_SUBQUERY 0x04u // two-stage subquery for super table +#define TSDB_QUERY_TYPE_SUBQUERY 0x02u +#define TSDB_QUERY_TYPE_STABLE_SUBQUERY 0x04u // two-stage subquery for super table -#define TSDB_QUERY_TYPE_TABLE_QUERY 0x08u // query ordinary table; below only apply to client side -#define TSDB_QUERY_TYPE_STABLE_QUERY 0x10u // query on super table -#define TSDB_QUERY_TYPE_JOIN_QUERY 0x20u // join query -#define TSDB_QUERY_TYPE_PROJECTION_QUERY 0x40u // select *,columns... query -#define TSDB_QUERY_TYPE_JOIN_SEC_STAGE 0x80u // join sub query at the second stage +#define TSDB_QUERY_TYPE_TABLE_QUERY 0x08u // query ordinary table; below only apply to client side +#define TSDB_QUERY_TYPE_STABLE_QUERY 0x10u // query on super table +#define TSDB_QUERY_TYPE_JOIN_QUERY 0x20u // join query +#define TSDB_QUERY_TYPE_PROJECTION_QUERY 0x40u // select *,columns... query +#define TSDB_QUERY_TYPE_JOIN_SEC_STAGE 0x80u // join sub query at the second stage -#define TSDB_QUERY_TYPE_TAG_FILTER_QUERY 0x400u -#define TSDB_QUERY_TYPE_INSERT 0x100u // insert type -#define TSDB_QUERY_TYPE_MULTITABLE_QUERY 0x200u -#define TSDB_QUERY_TYPE_STMT_INSERT 0x800u // stmt insert type +#define TSDB_QUERY_TYPE_TAG_FILTER_QUERY 0x400u +#define TSDB_QUERY_TYPE_INSERT 0x100u // insert type +#define TSDB_QUERY_TYPE_MULTITABLE_QUERY 0x200u +#define TSDB_QUERY_TYPE_STMT_INSERT 0x800u // stmt insert type #define TSDB_QUERY_HAS_TYPE(x, _type) (((x) & (_type)) != 0) #define TSDB_QUERY_SET_TYPE(x, _type) ((x) |= (_type)) #define TSDB_QUERY_CLEAR_TYPE(x, _type) ((x) &= (~_type)) #define TSDB_QUERY_RESET_TYPE(x) ((x) = TSDB_QUERY_TYPE_NON_TYPE) -#define TSDB_ORDER_ASC 1 -#define TSDB_ORDER_DESC 2 +#define TSDB_ORDER_ASC 1 +#define TSDB_ORDER_DESC 2 #define TSDB_DEFAULT_CLUSTER_HASH_SIZE 1 #define TSDB_DEFAULT_MNODES_HASH_SIZE 5 @@ -420,17 +418,17 @@ void tsDataSwap(void *pLeft, void *pRight, int32_t type, int32_t size, void* buf #define TSDB_DEFAULT_STABLES_HASH_SIZE 100 #define TSDB_DEFAULT_CTABLES_HASH_SIZE 20000 -#define TSDB_PORT_DNODESHELL 0 -#define TSDB_PORT_DNODEDNODE 5 -#define TSDB_PORT_SYNC 10 -#define TSDB_PORT_HTTP 11 -#define TSDB_PORT_ARBITRATOR 12 +#define TSDB_PORT_DNODESHELL 0 +#define TSDB_PORT_DNODEDNODE 5 +#define TSDB_PORT_SYNC 10 +#define TSDB_PORT_HTTP 11 +#define TSDB_PORT_ARBITRATOR 12 -#define TAOS_QTYPE_RPC 0 -#define TAOS_QTYPE_FWD 1 -#define TAOS_QTYPE_WAL 2 -#define TAOS_QTYPE_CQ 3 -#define TAOS_QTYPE_QUERY 4 +#define TAOS_QTYPE_RPC 0 +#define TAOS_QTYPE_FWD 1 +#define TAOS_QTYPE_WAL 2 +#define TAOS_QTYPE_CQ 3 +#define TAOS_QTYPE_QUERY 4 typedef enum { TSDB_SUPER_TABLE = 0, // super table diff --git a/src/query/inc/qFill.h b/src/query/inc/qFill.h index 1b5aca77c4..9589d01cc4 100644 --- a/src/query/inc/qFill.h +++ b/src/query/inc/qFill.h @@ -28,6 +28,7 @@ typedef struct { STColumn col; // column info int16_t functionId; // sql function id int16_t flag; // column flag: TAG COLUMN|NORMAL COLUMN + int16_t tagIndex; // index of current tag in SFillTagColInfo array list union {int64_t i; double d;} fillVal; } SFillColInfo; @@ -37,26 +38,29 @@ typedef struct { } SFillTagColInfo; typedef struct SFillInfo { - TSKEY start; // start timestamp - TSKEY endKey; // endKey for fill - int32_t order; // order [TSDB_ORDER_ASC|TSDB_ORDER_DESC] - int32_t fillType; // fill type - int32_t numOfRows; // number of rows in the input data block - int32_t rowIdx; // rowIdx - int32_t numOfTotal; // number of filled rows in one round - int32_t numOfCurrent; // number of filled rows in current results + TSKEY start; // start timestamp + TSKEY end; // endKey for fill + TSKEY currentKey; // current active timestamp, the value may be changed during the fill procedure. + int32_t order; // order [TSDB_ORDER_ASC|TSDB_ORDER_DESC] + int32_t type; // fill type + int32_t numOfRows; // number of rows in the input data block + int32_t index; // active row index + int32_t numOfTotal; // number of filled rows in one round + int32_t numOfCurrent; // number of filled rows in current results - int32_t numOfTags; // number of tags - int32_t numOfCols; // number of columns, including the tags columns - int32_t rowSize; // size of each row - SFillTagColInfo* pTags; // tags value for filling gap + int32_t numOfTags; // number of tags + int32_t numOfCols; // number of columns, including the tags columns + int32_t rowSize; // size of each row SInterval interval; - char * prevValues; // previous row of data, to generate the interpolation results - char * nextValues; // next row of data - char** pData; // original result data block involved in filling data - int32_t capacityInRows; // data buffer size in rows - int8_t precision; // time resoluation - SFillColInfo* pFillCol; // column info for fill operations + char * prevValues; // previous row of data, to generate the interpolation results + char * nextValues; // next row of data + char** pData; // original result data block involved in filling data + int32_t alloc; // data buffer size in rows + int8_t precision; // time resoluation + + SFillColInfo* pFillCol; // column info for fill operations + SFillTagColInfo* pTags; // tags value for filling gap + void* handle; // for dubug purpose } SFillInfo; typedef struct SPoint { @@ -74,17 +78,17 @@ void* taosDestroyFillInfo(SFillInfo *pFillInfo); void taosFillSetStartInfo(SFillInfo* pFillInfo, int32_t numOfRows, TSKEY endKey); -void taosFillCopyInputDataFromFilePage(SFillInfo* pFillInfo, tFilePage** pInput); +void taosFillCopyInputDataFromFilePage(SFillInfo* pFillInfo, const tFilePage** pInput); -void taosFillCopyInputDataFromOneFilePage(SFillInfo* pFillInfo, tFilePage* pInput); +void taosFillCopyInputDataFromOneFilePage(SFillInfo* pFillInfo, const tFilePage* pInput); -int64_t getFilledNumOfRes(SFillInfo* pFillInfo, int64_t ekey, int32_t maxNumOfRows); +int64_t getNumOfResWithFill(SFillInfo* pFillInfo, int64_t ekey, int32_t maxNumOfRows); int32_t taosNumOfRemainRows(SFillInfo *pFillInfo); int32_t taosGetLinearInterpolationVal(int32_t type, SPoint *point1, SPoint *point2, SPoint *point); -int64_t taosGenerateDataBlock(SFillInfo* pFillInfo, tFilePage** output, int32_t capacity); +int64_t taosFillResultDataBlock(SFillInfo* pFillInfo, tFilePage** output, int32_t capacity); #ifdef __cplusplus } diff --git a/src/query/src/qExecutor.c b/src/query/src/qExecutor.c index 10417f03b8..e463495616 100644 --- a/src/query/src/qExecutor.c +++ b/src/query/src/qExecutor.c @@ -4116,7 +4116,7 @@ bool queryHasRemainResForTableQuery(SQueryRuntimeEnv* pRuntimeEnv) { * first result row in the actual result set will fill nothing. */ if (Q_STATUS_EQUAL(pQuery->status, QUERY_COMPLETED)) { - int32_t numOfTotal = (int32_t)getFilledNumOfRes(pFillInfo, pQuery->window.ekey, (int32_t)pQuery->rec.capacity); + int32_t numOfTotal = (int32_t)getNumOfResWithFill(pFillInfo, pQuery->window.ekey, (int32_t)pQuery->rec.capacity); return numOfTotal > 0; } @@ -4174,7 +4174,7 @@ int32_t doFillGapsInResults(SQueryRuntimeEnv* pRuntimeEnv, tFilePage **pDst, int SFillInfo* pFillInfo = pRuntimeEnv->pFillInfo; while (1) { - int32_t ret = (int32_t)taosGenerateDataBlock(pFillInfo, (tFilePage**)pQuery->sdata, (int32_t)pQuery->rec.capacity); + int32_t ret = (int32_t)taosFillResultDataBlock(pFillInfo, (tFilePage**)pQuery->sdata, (int32_t)pQuery->rec.capacity); // todo apply limit output function /* reached the start position of according to offset value, return immediately */ @@ -4519,6 +4519,7 @@ static SFillColInfo* taosCreateFillColInfo(SQuery* pQuery) { pFillCol[i].col.bytes = pExprInfo->bytes; pFillCol[i].col.type = (int8_t)pExprInfo->type; pFillCol[i].col.offset = offset; + pFillCol[i].tagIndex = -2; pFillCol[i].flag = TSDB_COL_NORMAL; // always be ta normal column for table query pFillCol[i].functionId = pExprInfo->base.functionId; pFillCol[i].fillVal.i = pQuery->fillVal[i]; @@ -4532,7 +4533,6 @@ static SFillColInfo* taosCreateFillColInfo(SQuery* pQuery) { int32_t doInitQInfo(SQInfo *pQInfo, STSBuf *pTsBuf, void *tsdb, int32_t vgId, bool isSTableQuery) { SQueryRuntimeEnv *pRuntimeEnv = &pQInfo->runtimeEnv; - int32_t code = TSDB_CODE_SUCCESS; SQuery *pQuery = pQInfo->runtimeEnv.pQuery; pRuntimeEnv->topBotQuery = isTopBottomQuery(pQuery); @@ -4540,7 +4540,7 @@ int32_t doInitQInfo(SQInfo *pQInfo, STSBuf *pTsBuf, void *tsdb, int32_t vgId, bo setScanLimitationByResultBuffer(pQuery); - code = setupQueryHandle(tsdb, pQInfo, isSTableQuery); + int32_t code = setupQueryHandle(tsdb, pQInfo, isSTableQuery); if (code != TSDB_CODE_SUCCESS) { return code; } @@ -5430,7 +5430,7 @@ static void tableIntervalProcess(SQInfo *pQInfo, STableQueryInfo* pTableInfo) { break; } else { taosFillSetStartInfo(pRuntimeEnv->pFillInfo, (int32_t)pQuery->rec.rows, pQuery->window.ekey); - taosFillCopyInputDataFromFilePage(pRuntimeEnv->pFillInfo, (tFilePage**) pQuery->sdata); + taosFillCopyInputDataFromFilePage(pRuntimeEnv->pFillInfo, (const tFilePage**) pQuery->sdata); numOfFilled = 0; pQuery->rec.rows = doFillGapsInResults(pRuntimeEnv, (tFilePage **)pQuery->sdata, &numOfFilled); diff --git a/src/query/src/qFill.c b/src/query/src/qFill.c index 788779b2bb..cf9821b890 100644 --- a/src/query/src/qFill.c +++ b/src/query/src/qFill.c @@ -13,28 +13,33 @@ * along with this program. If not, see . */ -#include "qFill.h" #include "os.h" -#include "qExtbuffer.h" + #include "taosdef.h" #include "taosmsg.h" #include "tsqlfunction.h" +#include "ttype.h" + +#include "qFill.h" +#include "qExtbuffer.h" +#include "queryLog.h" #define FILL_IS_ASC_FILL(_f) ((_f)->order == TSDB_ORDER_ASC) SFillInfo* taosInitFillInfo(int32_t order, TSKEY skey, int32_t numOfTags, int32_t capacity, int32_t numOfCols, - int64_t slidingTime, int8_t slidingUnit, int8_t precision, int32_t fillType, SFillColInfo* pFillCol) { + int64_t slidingTime, int8_t slidingUnit, int8_t precision, int32_t fillType, + SFillColInfo* pCol) { if (fillType == TSDB_FILL_NONE) { return NULL; } - + SFillInfo* pFillInfo = calloc(1, sizeof(SFillInfo)); - + taosResetFillInfo(pFillInfo, skey); - - pFillInfo->order = order; - pFillInfo->fillType = fillType; - pFillInfo->pFillCol = pFillCol; + + pFillInfo->order = order; + pFillInfo->type = fillType; + pFillInfo->pFillCol = pCol; pFillInfo->numOfTags = numOfTags; pFillInfo->numOfCols = numOfCols; pFillInfo->precision = precision; @@ -47,11 +52,12 @@ SFillInfo* taosInitFillInfo(int32_t order, TSKEY skey, int32_t numOfTags, int32_ pFillInfo->pData = malloc(POINTER_BYTES * numOfCols); if (numOfTags > 0) { pFillInfo->pTags = calloc(pFillInfo->numOfTags, sizeof(SFillTagColInfo)); - for(int32_t i = 0; i < numOfTags; ++i) { - pFillInfo->pTags[i].col.colId = -2; + for (int32_t i = 0; i < numOfTags; ++i) { + pFillInfo->pTags[i].col.colId = -2; // TODO } } + // there are no duplicated tags in the SFillTagColInfo list int32_t rowsize = 0; int32_t k = 0; for (int32_t i = 0; i < numOfCols; ++i) { @@ -60,7 +66,7 @@ SFillInfo* taosInitFillInfo(int32_t order, TSKEY skey, int32_t numOfTags, int32_ if (TSDB_COL_IS_TAG(pColInfo->flag)) { bool exists = false; - for(int32_t j = 0; j < k; ++j) { + for (int32_t j = 0; j < k; ++j) { if (pFillInfo->pTags[j].col.colId == pColInfo->col.colId) { exists = true; break; @@ -70,6 +76,7 @@ SFillInfo* taosInitFillInfo(int32_t order, TSKEY skey, int32_t numOfTags, int32_ if (!exists) { pFillInfo->pTags[k].col.colId = pColInfo->col.colId; pFillInfo->pTags[k].tagVal = calloc(1, pColInfo->col.bytes); + pColInfo->tagIndex = k; k += 1; } @@ -78,14 +85,15 @@ SFillInfo* taosInitFillInfo(int32_t order, TSKEY skey, int32_t numOfTags, int32_ } pFillInfo->rowSize = rowsize; - pFillInfo->capacityInRows = capacity; - + pFillInfo->alloc = capacity; + return pFillInfo; } void taosResetFillInfo(SFillInfo* pFillInfo, TSKEY startTimestamp) { pFillInfo->start = startTimestamp; - pFillInfo->rowIdx = -1; + pFillInfo->currentKey = startTimestamp; + pFillInfo->index = -1; pFillInfo->numOfRows = 0; pFillInfo->numOfCurrent = 0; pFillInfo->numOfTotal = 0; @@ -112,20 +120,20 @@ void* taosDestroyFillInfo(SFillInfo* pFillInfo) { } void taosFillSetStartInfo(SFillInfo* pFillInfo, int32_t numOfRows, TSKEY endKey) { - if (pFillInfo->fillType == TSDB_FILL_NONE) { + if (pFillInfo->type == TSDB_FILL_NONE) { return; } - pFillInfo->endKey = endKey; - if (pFillInfo->order != TSDB_ORDER_ASC) { - pFillInfo->endKey = taosTimeTruncate(endKey, &pFillInfo->interval, pFillInfo->precision); + pFillInfo->end = endKey; + if (!FILL_IS_ASC_FILL(pFillInfo)) { + pFillInfo->end = taosTimeTruncate(endKey, &pFillInfo->interval, pFillInfo->precision); } - pFillInfo->rowIdx = 0; + pFillInfo->index = 0; pFillInfo->numOfRows = numOfRows; // ensure the space - if (pFillInfo->capacityInRows < numOfRows) { + if (pFillInfo->alloc < numOfRows) { for(int32_t i = 0; i < pFillInfo->numOfCols; ++i) { char* tmp = realloc(pFillInfo->pData[i], numOfRows*pFillInfo->pFillCol[i].col.bytes); assert(tmp != NULL); // todo handle error @@ -136,42 +144,38 @@ void taosFillSetStartInfo(SFillInfo* pFillInfo, int32_t numOfRows, TSKEY endKey) } } -void taosFillCopyInputDataFromFilePage(SFillInfo* pFillInfo, tFilePage** pInput) { - // copy the data into source data buffer +// copy the data into source data buffer +void taosFillCopyInputDataFromFilePage(SFillInfo* pFillInfo, const tFilePage** pInput) { for (int32_t i = 0; i < pFillInfo->numOfCols; ++i) { memcpy(pFillInfo->pData[i], pInput[i]->data, pFillInfo->numOfRows * pFillInfo->pFillCol[i].col.bytes); } } -void taosFillCopyInputDataFromOneFilePage(SFillInfo* pFillInfo, tFilePage* pInput) { +void taosFillCopyInputDataFromOneFilePage(SFillInfo* pFillInfo, const tFilePage* pInput) { assert(pFillInfo->numOfRows == pInput->num); for(int32_t i = 0; i < pFillInfo->numOfCols; ++i) { SFillColInfo* pCol = &pFillInfo->pFillCol[i]; - char* data = pInput->data + pCol->col.offset * pInput->num; + const char* data = pInput->data + pCol->col.offset * pInput->num; memcpy(pFillInfo->pData[i], data, (size_t)(pInput->num * pCol->col.bytes)); if (TSDB_COL_IS_TAG(pCol->flag)) { // copy the tag value to tag value buffer - for (int32_t j = 0; j < pFillInfo->numOfTags; ++j) { - SFillTagColInfo* pTag = &pFillInfo->pTags[j]; - if (pTag->col.colId == pCol->col.colId) { - memcpy(pTag->tagVal, data, pCol->col.bytes); - break; - } - } + SFillTagColInfo* pTag = &pFillInfo->pTags[pCol->tagIndex]; + assert (pTag->col.colId == pCol->col.colId); + memcpy(pTag->tagVal, data, pCol->col.bytes); } } } -int64_t getFilledNumOfRes(SFillInfo* pFillInfo, TSKEY ekey, int32_t maxNumOfRows) { +int64_t getNumOfResWithFill(SFillInfo* pFillInfo, TSKEY ekey, int32_t maxNumOfRows) { int64_t* tsList = (int64_t*) pFillInfo->pData[0]; int32_t numOfRows = taosNumOfRemainRows(pFillInfo); TSKEY ekey1 = ekey; if (!FILL_IS_ASC_FILL(pFillInfo)) { - pFillInfo->endKey = taosTimeTruncate(ekey, &pFillInfo->interval, pFillInfo->precision); + pFillInfo->end = taosTimeTruncate(ekey, &pFillInfo->interval, pFillInfo->precision); } int64_t numOfRes = -1; @@ -179,20 +183,20 @@ int64_t getFilledNumOfRes(SFillInfo* pFillInfo, TSKEY ekey, int32_t maxNumOfRows TSKEY lastKey = tsList[pFillInfo->numOfRows - 1]; numOfRes = taosTimeCountInterval( lastKey, - pFillInfo->start, + pFillInfo->currentKey, pFillInfo->interval.sliding, pFillInfo->interval.slidingUnit, pFillInfo->precision); numOfRes += 1; assert(numOfRes >= numOfRows); } else { // reach the end of data - if ((ekey1 < pFillInfo->start && FILL_IS_ASC_FILL(pFillInfo)) || - (ekey1 > pFillInfo->start && !FILL_IS_ASC_FILL(pFillInfo))) { + if ((ekey1 < pFillInfo->currentKey && FILL_IS_ASC_FILL(pFillInfo)) || + (ekey1 > pFillInfo->currentKey && !FILL_IS_ASC_FILL(pFillInfo))) { return 0; } numOfRes = taosTimeCountInterval( ekey1, - pFillInfo->start, + pFillInfo->currentKey, pFillInfo->interval.sliding, pFillInfo->interval.slidingUnit, pFillInfo->precision); @@ -203,315 +207,284 @@ int64_t getFilledNumOfRes(SFillInfo* pFillInfo, TSKEY ekey, int32_t maxNumOfRows } int32_t taosNumOfRemainRows(SFillInfo* pFillInfo) { - if (pFillInfo->rowIdx == -1 || pFillInfo->numOfRows == 0) { + if (pFillInfo->numOfRows == 0 || (pFillInfo->numOfRows > 0 && pFillInfo->index >= pFillInfo->numOfRows)) { return 0; } - return pFillInfo->numOfRows - pFillInfo->rowIdx; + return pFillInfo->numOfRows - pFillInfo->index; } -// todo: refactor -static double linearInterpolationImpl(double v1, double v2, double k1, double k2, double k) { - return v1 + (v2 - v1) * (k - k1) / (k2 - k1); -} +#define DO_INTERPOLATION(_v1, _v2, _k1, _k2, _k) ((_v1) + ((_v2) - (_v1)) * (((double)(_k)) - ((double)(_k1))) / (((double)(_k2)) - ((double)(_k1)))) int32_t taosGetLinearInterpolationVal(int32_t type, SPoint* point1, SPoint* point2, SPoint* point) { - switch (type) { - case TSDB_DATA_TYPE_INT: { - *(int32_t*)point->val = (int32_t)linearInterpolationImpl(*(int32_t*)point1->val, *(int32_t*)point2->val, (double)point1->key, - (double)point2->key, (double)point->key); - break; - } - case TSDB_DATA_TYPE_FLOAT: { - *(float*)point->val = (float) - linearInterpolationImpl(*(float*)point1->val, *(float*)point2->val, (double)point1->key, (double)point2->key, (double)point->key); - break; - }; - case TSDB_DATA_TYPE_DOUBLE: { - *(double*)point->val = - linearInterpolationImpl(*(double*)point1->val, *(double*)point2->val, (double)point1->key, (double)point2->key, (double)point->key); - break; - }; - case TSDB_DATA_TYPE_TIMESTAMP: - case TSDB_DATA_TYPE_BIGINT: { - *(int64_t*)point->val = (int64_t)linearInterpolationImpl((double)(*(int64_t*)point1->val), (double)(*(int64_t*)point2->val), (double)point1->key, - (double)point2->key, (double)point->key); - break; - }; - case TSDB_DATA_TYPE_SMALLINT: { - *(int16_t*)point->val = (int16_t)linearInterpolationImpl(*(int16_t*)point1->val, *(int16_t*)point2->val, (double)point1->key, - (double)point2->key, (double)point->key); - break; - }; - case TSDB_DATA_TYPE_TINYINT: { - *(int8_t*) point->val = (int8_t) - linearInterpolationImpl(*(int8_t*)point1->val, *(int8_t*)point2->val, (double)point1->key, (double)point2->key, (double)point->key); - break; - }; - default: { - // TODO: Deal with interpolation with bool and strings and timestamp - return -1; - } + double v1 = -1; + double v2 = -1; + + GET_TYPED_DATA(v1, type, point1->val); + GET_TYPED_DATA(v2, type, point2->val); + + double r = DO_INTERPOLATION(v1, v2, point1->key, point2->key, point->key); + + switch(type) { + case TSDB_DATA_TYPE_TINYINT: *(int8_t*) point->val = (int8_t) r;break; + case TSDB_DATA_TYPE_SMALLINT: *(int16_t*) point->val = (int16_t) r;break; + case TSDB_DATA_TYPE_INT: *(int32_t*) point->val = (int32_t) r;break; + case TSDB_DATA_TYPE_BIGINT: *(int64_t*) point->val = (int64_t) r;break; + case TSDB_DATA_TYPE_DOUBLE: *(double*) point->val = (double) r;break; + case TSDB_DATA_TYPE_FLOAT: *(float*) point->val = (float) r;break; + default: + assert(0); } - return 0; + return TSDB_CODE_SUCCESS; } -static void setTagsValue(SFillInfo* pFillInfo, tFilePage** data, int32_t num) { +static void setTagsValue(SFillInfo* pFillInfo, tFilePage** data, int32_t genRows) { for(int32_t j = 0; j < pFillInfo->numOfCols; ++j) { SFillColInfo* pCol = &pFillInfo->pFillCol[j]; if (TSDB_COL_IS_NORMAL_COL(pCol->flag)) { continue; } - char* val1 = elePtrAt(data[j]->data, pCol->col.bytes, num); + char* val1 = elePtrAt(data[j]->data, pCol->col.bytes, genRows); - for(int32_t i = 0; i < pFillInfo->numOfTags; ++i) { - SFillTagColInfo* pTag = &pFillInfo->pTags[i]; - if (pTag->col.colId == pCol->col.colId) { - assignVal(val1, pTag->tagVal, pCol->col.bytes, pCol->col.type); - break; - } - } + assert(pCol->tagIndex >= 0 && pCol->tagIndex < pFillInfo->numOfTags); + SFillTagColInfo* pTag = &pFillInfo->pTags[pCol->tagIndex]; + + assert (pTag->col.colId == pCol->col.colId); + assignVal(val1, pTag->tagVal, pCol->col.bytes, pCol->col.type); } } -static void doFillResultImpl(SFillInfo* pFillInfo, tFilePage** data, int32_t* num, char** srcData, int64_t ts, - bool outOfBound) { - char* prevValues = pFillInfo->prevValues; - char* nextValues = pFillInfo->nextValues; +static void setNullValueForRow(SFillInfo* pFillInfo, tFilePage** data, int32_t numOfCol, int32_t rowIndex) { + // the first are always the timestamp column, so start from the second column. + for (int32_t i = 1; i < numOfCol; ++i) { + SFillColInfo* pCol = &pFillInfo->pFillCol[i]; + + char* output = elePtrAt(data[i]->data, pCol->col.bytes, rowIndex); + setNull(output, pCol->col.type, pCol->col.bytes); + } +} + +static void doFillOneRowResult(SFillInfo* pFillInfo, tFilePage** data, char** srcData, int64_t ts, bool outOfBound) { + char* prev = pFillInfo->prevValues; + char* next = pFillInfo->nextValues; SPoint point1, point2, point; int32_t step = GET_FORWARD_DIRECTION_FACTOR(pFillInfo->order); - char* val = elePtrAt(data[0]->data, TSDB_KEYSIZE, *num); - *(TSKEY*) val = pFillInfo->start; - - int32_t numOfValCols = pFillInfo->numOfCols - pFillInfo->numOfTags; + // set the primary timestamp column value + int32_t index = pFillInfo->numOfCurrent; + char* val = elePtrAt(data[0]->data, TSDB_KEYSIZE, index); + *(TSKEY*) val = pFillInfo->currentKey; // set the other values - if (pFillInfo->fillType == TSDB_FILL_PREV) { - char* p = FILL_IS_ASC_FILL(pFillInfo) ? prevValues : nextValues; + if (pFillInfo->type == TSDB_FILL_PREV) { + char* p = FILL_IS_ASC_FILL(pFillInfo) ? prev : next; if (p != NULL) { - for (int32_t i = 1; i < numOfValCols; ++i) { + for (int32_t i = 1; i < pFillInfo->numOfCols; ++i) { SFillColInfo* pCol = &pFillInfo->pFillCol[i]; - - char* val1 = elePtrAt(data[i]->data, pCol->col.bytes, *num); - if (isNull(p + pCol->col.offset, pCol->col.type)) { - if (pCol->col.type == TSDB_DATA_TYPE_BINARY || pCol->col.type == TSDB_DATA_TYPE_NCHAR) { - setVardataNull(val1, pCol->col.type); - } else { - setNull(val1, pCol->col.type, pCol->col.bytes); - } - } else { - assignVal(val1, p + pCol->col.offset, pCol->col.bytes, pCol->col.type); + if (TSDB_COL_IS_TAG(pCol->flag)) { + continue; } + + char* output = elePtrAt(data[i]->data, pCol->col.bytes, index); + assignVal(output, p + pCol->col.offset, pCol->col.bytes, pCol->col.type); } } else { // no prev value yet, set the value for NULL - for (int32_t i = 1; i < numOfValCols; ++i) { - SFillColInfo* pCol = &pFillInfo->pFillCol[i]; - - char* val1 = elePtrAt(data[i]->data, pCol->col.bytes, *num); - if (pCol->col.type == TSDB_DATA_TYPE_BINARY||pCol->col.type == TSDB_DATA_TYPE_NCHAR) { - setVardataNull(val1, pCol->col.type); - } else { - setNull(val1, pCol->col.type, pCol->col.bytes); - } - } + setNullValueForRow(pFillInfo, data, pFillInfo->numOfCols, index); } - - setTagsValue(pFillInfo, data, *num); - } else if (pFillInfo->fillType == TSDB_FILL_LINEAR) { + } else if (pFillInfo->type == TSDB_FILL_LINEAR) { // TODO : linear interpolation supports NULL value - if (prevValues != NULL && !outOfBound) { - for (int32_t i = 1; i < numOfValCols; ++i) { + if (prev != NULL && !outOfBound) { + for (int32_t i = 1; i < pFillInfo->numOfCols; ++i) { SFillColInfo* pCol = &pFillInfo->pFillCol[i]; - + if (TSDB_COL_IS_TAG(pCol->flag)) { + continue; + } + int16_t type = pCol->col.type; int16_t bytes = pCol->col.bytes; - char *val1 = elePtrAt(data[i]->data, pCol->col.bytes, *num); - if (type == TSDB_DATA_TYPE_BINARY|| type == TSDB_DATA_TYPE_NCHAR) { - setVardataNull(val1, pCol->col.type); - continue; - } else if (type == TSDB_DATA_TYPE_BOOL) { + char *val1 = elePtrAt(data[i]->data, pCol->col.bytes, index); + if (type == TSDB_DATA_TYPE_BINARY|| type == TSDB_DATA_TYPE_NCHAR || type == TSDB_DATA_TYPE_BOOL) { setNull(val1, pCol->col.type, bytes); continue; } - point1 = (SPoint){.key = *(TSKEY*)(prevValues), .val = prevValues + pCol->col.offset}; - point2 = (SPoint){.key = ts, .val = srcData[i] + pFillInfo->rowIdx * bytes}; + point1 = (SPoint){.key = *(TSKEY*)(prev), .val = prev + pCol->col.offset}; + point2 = (SPoint){.key = ts, .val = srcData[i] + pFillInfo->index * bytes}; point = (SPoint){.key = pFillInfo->start, .val = val1}; taosGetLinearInterpolationVal(type, &point1, &point2, &point); } - - setTagsValue(pFillInfo, data, *num); - } else { - for (int32_t i = 1; i < numOfValCols; ++i) { - SFillColInfo* pCol = &pFillInfo->pFillCol[i]; - - char* val1 = elePtrAt(data[i]->data, pCol->col.bytes, *num); - - if (pCol->col.type == TSDB_DATA_TYPE_BINARY || pCol->col.type == TSDB_DATA_TYPE_NCHAR) { - setVardataNull(val1, pCol->col.type); - } else { - setNull(val1, pCol->col.type, pCol->col.bytes); - } - } - - setTagsValue(pFillInfo, data, *num); - + setNullValueForRow(pFillInfo, data, pFillInfo->numOfCols, index); } } else { /* fill the default value */ - for (int32_t i = 1; i < numOfValCols; ++i) { + for (int32_t i = 1; i < pFillInfo->numOfCols; ++i) { SFillColInfo* pCol = &pFillInfo->pFillCol[i]; - - char* val1 = elePtrAt(data[i]->data, pCol->col.bytes, *num); + if (TSDB_COL_IS_TAG(pCol->flag)) { + continue; + } + + char* val1 = elePtrAt(data[i]->data, pCol->col.bytes, index); assignVal(val1, (char*)&pCol->fillVal.i, pCol->col.bytes, pCol->col.type); } - - setTagsValue(pFillInfo, data, *num); } - pFillInfo->start = taosTimeAdd(pFillInfo->start, pFillInfo->interval.sliding * step, pFillInfo->interval.slidingUnit, pFillInfo->precision); + setTagsValue(pFillInfo, data, index); + pFillInfo->currentKey = taosTimeAdd(pFillInfo->currentKey, pFillInfo->interval.sliding * step, pFillInfo->interval.slidingUnit, pFillInfo->precision); pFillInfo->numOfCurrent++; - - (*num) += 1; } -static void initBeforeAfterDataBuf(SFillInfo* pFillInfo, char** nextValues) { - if (*nextValues != NULL) { +static void initBeforeAfterDataBuf(SFillInfo* pFillInfo, char** next) { + if (*next != NULL) { return; } - *nextValues = calloc(1, pFillInfo->rowSize); + *next = calloc(1, pFillInfo->rowSize); for (int i = 1; i < pFillInfo->numOfCols; i++) { SFillColInfo* pCol = &pFillInfo->pFillCol[i]; - - if (pCol->col.type == TSDB_DATA_TYPE_BINARY||pCol->col.type == TSDB_DATA_TYPE_NCHAR) { - setVardataNull(*nextValues + pCol->col.offset, pCol->col.type); - } else { - setNull(*nextValues + pCol->col.offset, pCol->col.type, pCol->col.bytes); - } + setNull(*next + pCol->col.offset, pCol->col.type, pCol->col.bytes); } } -int32_t generateDataBlockImpl(SFillInfo* pFillInfo, tFilePage** data, int32_t numOfRows, int32_t outputRows, char** srcData) { - int32_t num = 0; +static void copyCurrentRowIntoBuf(SFillInfo* pFillInfo, char** srcData, int32_t numOfTags, char* buf) { + int32_t rowIndex = pFillInfo->index; + for (int32_t i = 0; i < pFillInfo->numOfCols; ++i) { + SFillColInfo* pCol = &pFillInfo->pFillCol[i]; + memcpy(buf + pCol->col.offset, srcData[i] + rowIndex * pCol->col.bytes, pCol->col.bytes); + } +} + +static int32_t fillResultImpl(SFillInfo* pFillInfo, tFilePage** data, int32_t outputRows) { pFillInfo->numOfCurrent = 0; - char** prevValues = &pFillInfo->prevValues; - char** nextValues = &pFillInfo->nextValues; + char** srcData = pFillInfo->pData; + char** prev = &pFillInfo->prevValues; + char** next = &pFillInfo->nextValues; int32_t numOfTags = pFillInfo->numOfTags; int32_t step = GET_FORWARD_DIRECTION_FACTOR(pFillInfo->order); - if (numOfRows == 0) { - /* - * These data are generated according to fill strategy, since the current timestamp is out of time window of - * real result set. Note that we need to keep the direct previous result rows, to generated the filled data. - */ - while (num < outputRows) { - doFillResultImpl(pFillInfo, data, &num, srcData, pFillInfo->start, true); - } - - pFillInfo->numOfTotal += pFillInfo->numOfCurrent; - return outputRows; + if (FILL_IS_ASC_FILL(pFillInfo)) { + assert(pFillInfo->currentKey >= pFillInfo->start); } else { - while (1) { - int64_t ts = ((int64_t*)pFillInfo->pData[0])[pFillInfo->rowIdx]; + assert(pFillInfo->currentKey <= pFillInfo->start); + } - if ((pFillInfo->start < ts && FILL_IS_ASC_FILL(pFillInfo)) || - (pFillInfo->start > ts && !FILL_IS_ASC_FILL(pFillInfo))) { - /* set the next value for interpolation */ - initBeforeAfterDataBuf(pFillInfo, nextValues); - - int32_t offset = pFillInfo->rowIdx; - for (int32_t i = 0; i < pFillInfo->numOfCols - numOfTags; ++i) { - SFillColInfo* pCol = &pFillInfo->pFillCol[i]; - memcpy(*nextValues + pCol->col.offset, srcData[i] + offset * pCol->col.bytes, pCol->col.bytes); - } + while (pFillInfo->numOfCurrent < outputRows) { + int64_t ts = ((int64_t*)pFillInfo->pData[0])[pFillInfo->index]; + + if ((pFillInfo->currentKey < ts && FILL_IS_ASC_FILL(pFillInfo)) || + (pFillInfo->currentKey > ts && !FILL_IS_ASC_FILL(pFillInfo))) { + /* set the next value for interpolation */ + initBeforeAfterDataBuf(pFillInfo, next); + copyCurrentRowIntoBuf(pFillInfo, srcData, numOfTags, *next); + } + + if (((pFillInfo->currentKey < ts && FILL_IS_ASC_FILL(pFillInfo)) || (pFillInfo->currentKey > ts && !FILL_IS_ASC_FILL(pFillInfo))) && + pFillInfo->numOfCurrent < outputRows) { + + // fill the gap between two actual input rows + while (((pFillInfo->currentKey < ts && FILL_IS_ASC_FILL(pFillInfo)) || + (pFillInfo->currentKey > ts && !FILL_IS_ASC_FILL(pFillInfo))) && + pFillInfo->numOfCurrent < outputRows) { + doFillOneRowResult(pFillInfo, data, srcData, ts, false); } - if (((pFillInfo->start < ts && FILL_IS_ASC_FILL(pFillInfo)) || - (pFillInfo->start > ts && !FILL_IS_ASC_FILL(pFillInfo))) && num < outputRows) { - - while (((pFillInfo->start < ts && FILL_IS_ASC_FILL(pFillInfo)) || - (pFillInfo->start > ts && !FILL_IS_ASC_FILL(pFillInfo))) && num < outputRows) { - doFillResultImpl(pFillInfo, data, &num, srcData, ts, false); - } - - /* output buffer is full, abort */ - if ((num == outputRows && FILL_IS_ASC_FILL(pFillInfo)) || (num < 0 && !FILL_IS_ASC_FILL(pFillInfo))) { - pFillInfo->numOfTotal += pFillInfo->numOfCurrent; - return outputRows; - } - } else { - assert(pFillInfo->start == ts); - initBeforeAfterDataBuf(pFillInfo, prevValues); - - // assign rows to dst buffer - for (int32_t i = 0; i < pFillInfo->numOfCols; ++i) { - SFillColInfo* pCol = &pFillInfo->pFillCol[i]; - if (TSDB_COL_IS_TAG(pCol->flag)) { - continue; - } - - char* val1 = elePtrAt(data[i]->data, pCol->col.bytes, num); - char* src = elePtrAt(srcData[i], pCol->col.bytes, pFillInfo->rowIdx); - - if (i == 0 || - (pCol->functionId != TSDB_FUNC_COUNT && !isNull(src, pCol->col.type)) || - (pCol->functionId == TSDB_FUNC_COUNT && GET_INT64_VAL(src) != 0)) { - assignVal(val1, src, pCol->col.bytes, pCol->col.type); - memcpy(*prevValues + pCol->col.offset, src, pCol->col.bytes); - } else { // i > 0 and data is null , do interpolation - if (pFillInfo->fillType == TSDB_FILL_PREV) { - assignVal(val1, *prevValues + pCol->col.offset, pCol->col.bytes, pCol->col.type); - } else if (pFillInfo->fillType == TSDB_FILL_LINEAR) { - assignVal(val1, src, pCol->col.bytes, pCol->col.type); - memcpy(*prevValues + pCol->col.offset, src, pCol->col.bytes); - } else { - assignVal(val1, (char*) &pCol->fillVal.i, pCol->col.bytes, pCol->col.type); - } - } - } - - // set the tag value for final result - setTagsValue(pFillInfo, data, num); - - pFillInfo->start = taosTimeAdd(pFillInfo->start, pFillInfo->interval.sliding*step, pFillInfo->interval.slidingUnit, pFillInfo->precision); - pFillInfo->rowIdx += 1; - - pFillInfo->numOfCurrent +=1; - num += 1; - } - - if ((pFillInfo->rowIdx >= pFillInfo->numOfRows && FILL_IS_ASC_FILL(pFillInfo)) || - (pFillInfo->rowIdx < 0 && !FILL_IS_ASC_FILL(pFillInfo)) || num >= outputRows) { - if (pFillInfo->rowIdx >= pFillInfo->numOfRows || pFillInfo->rowIdx < 0) { - pFillInfo->rowIdx = -1; - pFillInfo->numOfRows = 0; - - /* the raw data block is exhausted, next value does not exists */ - taosTFree(*nextValues); - } - + // output buffer is full, abort + if (pFillInfo->numOfCurrent == outputRows) { pFillInfo->numOfTotal += pFillInfo->numOfCurrent; - return num; + return outputRows; } + } else { + assert(pFillInfo->currentKey == ts); + initBeforeAfterDataBuf(pFillInfo, prev); + + // assign rows to dst buffer + for (int32_t i = 0; i < pFillInfo->numOfCols; ++i) { + SFillColInfo* pCol = &pFillInfo->pFillCol[i]; + if (TSDB_COL_IS_TAG(pCol->flag)) { + continue; + } + + char* output = elePtrAt(data[i]->data, pCol->col.bytes, pFillInfo->numOfCurrent); + char* src = elePtrAt(srcData[i], pCol->col.bytes, pFillInfo->index); + + if (i == 0 || (pCol->functionId != TSDB_FUNC_COUNT && !isNull(src, pCol->col.type)) || + (pCol->functionId == TSDB_FUNC_COUNT && GET_INT64_VAL(src) != 0)) { + assignVal(output, src, pCol->col.bytes, pCol->col.type); + memcpy(*prev + pCol->col.offset, src, pCol->col.bytes); + } else { // i > 0 and data is null , do interpolation + if (pFillInfo->type == TSDB_FILL_PREV) { + assignVal(output, *prev + pCol->col.offset, pCol->col.bytes, pCol->col.type); + } else if (pFillInfo->type == TSDB_FILL_LINEAR) { + assignVal(output, src, pCol->col.bytes, pCol->col.type); + memcpy(*prev + pCol->col.offset, src, pCol->col.bytes); + } else { + assignVal(output, (char*)&pCol->fillVal.i, pCol->col.bytes, pCol->col.type); + } + } + } + + // set the tag value for final result + setTagsValue(pFillInfo, data, pFillInfo->numOfCurrent); + + pFillInfo->currentKey = taosTimeAdd(pFillInfo->currentKey, pFillInfo->interval.sliding * step, + pFillInfo->interval.slidingUnit, pFillInfo->precision); + pFillInfo->index += 1; + pFillInfo->numOfCurrent += 1; + } + + if (pFillInfo->index >= pFillInfo->numOfRows || pFillInfo->numOfCurrent >= outputRows) { + /* the raw data block is exhausted, next value does not exists */ + if (pFillInfo->index >= pFillInfo->numOfRows) { + taosTFree(*next); + } + + pFillInfo->numOfTotal += pFillInfo->numOfCurrent; + return pFillInfo->numOfCurrent; } } + + return pFillInfo->numOfCurrent; } -int64_t taosGenerateDataBlock(SFillInfo* pFillInfo, tFilePage** output, int32_t capacity) { - int32_t remain = taosNumOfRemainRows(pFillInfo); // todo use iterator? +static int64_t fillExternalResults(SFillInfo* pFillInfo, tFilePage** output, int64_t resultCapacity) { + /* + * These data are generated according to fill strategy, since the current timestamp is out of the time window of + * real result set. Note that we need to keep the direct previous result rows, to generated the filled data. + */ + pFillInfo->numOfCurrent = 0; + while (pFillInfo->numOfCurrent < resultCapacity) { + doFillOneRowResult(pFillInfo, output, pFillInfo->pData, pFillInfo->start, true); + } - int32_t rows = (int32_t)getFilledNumOfRes(pFillInfo, pFillInfo->endKey, capacity); - int32_t numOfRes = generateDataBlockImpl(pFillInfo, output, remain, rows, pFillInfo->pData); - assert(numOfRes == rows); + pFillInfo->numOfTotal += pFillInfo->numOfCurrent; + assert(pFillInfo->numOfCurrent == resultCapacity); + return resultCapacity; +} + +int64_t taosFillResultDataBlock(SFillInfo* pFillInfo, tFilePage** output, int32_t capacity) { + int32_t remain = taosNumOfRemainRows(pFillInfo); + + int64_t numOfRes = getNumOfResWithFill(pFillInfo, pFillInfo->end, capacity); + assert(numOfRes <= capacity); + + // no data existed for fill operation now, append result according to the fill strategy + if (remain == 0) { + return fillExternalResults(pFillInfo, output, numOfRes); + } + + fillResultImpl(pFillInfo, output, numOfRes); + assert(numOfRes == pFillInfo->numOfCurrent); + + qDebug("generated fill result, src block:%d, index:%d, startKey:%"PRId64", currentKey:%"PRId64", current:%d, total:%d", + pFillInfo->numOfRows, pFillInfo->index, pFillInfo->start, pFillInfo->currentKey, pFillInfo->numOfCurrent, + pFillInfo->numOfTotal); + return numOfRes; } diff --git a/src/util/src/tcompare.c b/src/util/src/tcompare.c index 1090ab8101..ff67b1f3ec 100644 --- a/src/util/src/tcompare.c +++ b/src/util/src/tcompare.c @@ -1,7 +1,6 @@ #include "taosdef.h" #include "tcompare.h" #include "tarray.h" -#include "tutil.h" int32_t compareInt32Val(const void *pLeft, const void *pRight) { int32_t left = GET_INT32_VAL(pLeft), right = GET_INT32_VAL(pRight); From 276b7a6dbb33b29cd61783c2d7490459705290d4 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Mon, 9 Nov 2020 11:31:13 +0800 Subject: [PATCH 03/52] [TD-1978] --- src/client/src/tscLocalMerge.c | 2 +- src/inc/ttype.h | 36 +++++++++++++ src/query/inc/qFill.h | 2 +- src/query/src/qExecutor.c | 2 +- src/query/src/qFill.c | 96 ++++++++++++++++++---------------- 5 files changed, 91 insertions(+), 47 deletions(-) create mode 100644 src/inc/ttype.h diff --git a/src/client/src/tscLocalMerge.c b/src/client/src/tscLocalMerge.c index ee72a96e47..10c5e7d32e 100644 --- a/src/client/src/tscLocalMerge.c +++ b/src/client/src/tscLocalMerge.c @@ -370,7 +370,7 @@ void tscCreateLocalReducer(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrd SFillColInfo* pFillCol = createFillColInfo(pQueryInfo); pReducer->pFillInfo = taosInitFillInfo(pQueryInfo->order.order, revisedSTime, pQueryInfo->groupbyExpr.numOfGroupCols, 4096, (int32_t)numOfCols, pQueryInfo->interval.sliding, pQueryInfo->interval.slidingUnit, - tinfo.precision, pQueryInfo->fillType, pFillCol); + tinfo.precision, pQueryInfo->fillType, pFillCol, pSql); } } diff --git a/src/inc/ttype.h b/src/inc/ttype.h new file mode 100644 index 0000000000..caa7c58f40 --- /dev/null +++ b/src/inc/ttype.h @@ -0,0 +1,36 @@ +#ifndef TDENGINE_TTYPE_H +#define TDENGINE_TTYPE_H + +#ifdef __cplusplus +extern "C" { +#endif + +#include "taosdef.h" + +#define GET_TYPED_DATA(_v, _type, _data) \ + switch (_type) { \ + case TSDB_DATA_TYPE_TINYINT: \ + (_v) = GET_INT8_VAL(_data); \ + break; \ + case TSDB_DATA_TYPE_SMALLINT: \ + (_v) = GET_INT16_VAL(_data); \ + break; \ + case TSDB_DATA_TYPE_BIGINT: \ + (_v) = (GET_INT64_VAL(_data)); \ + break; \ + case TSDB_DATA_TYPE_FLOAT: \ + (_v) = GET_FLOAT_VAL(_data); \ + break; \ + case TSDB_DATA_TYPE_DOUBLE: \ + (_v) = GET_DOUBLE_VAL(_data); \ + break; \ + default: \ + (_v) = GET_INT32_VAL(_data); \ + break; \ + }; + +#ifdef __cplusplus +} +#endif + +#endif // TDENGINE_TTYPE_H diff --git a/src/query/inc/qFill.h b/src/query/inc/qFill.h index 9589d01cc4..385ae88543 100644 --- a/src/query/inc/qFill.h +++ b/src/query/inc/qFill.h @@ -70,7 +70,7 @@ typedef struct SPoint { SFillInfo* taosInitFillInfo(int32_t order, TSKEY skey, int32_t numOfTags, int32_t capacity, int32_t numOfCols, int64_t slidingTime, int8_t slidingUnit, int8_t precision, int32_t fillType, - SFillColInfo* pFillCol); + SFillColInfo* pFillCol, void* handle); void taosResetFillInfo(SFillInfo* pFillInfo, TSKEY startTimestamp); diff --git a/src/query/src/qExecutor.c b/src/query/src/qExecutor.c index e463495616..6191f536a3 100644 --- a/src/query/src/qExecutor.c +++ b/src/query/src/qExecutor.c @@ -4628,7 +4628,7 @@ int32_t doInitQInfo(SQInfo *pQInfo, STSBuf *pTsBuf, void *tsdb, int32_t vgId, bo pRuntimeEnv->pFillInfo = taosInitFillInfo(pQuery->order.order, w.skey, 0, (int32_t)pQuery->rec.capacity, pQuery->numOfOutput, pQuery->interval.sliding, pQuery->interval.slidingUnit, (int8_t)pQuery->precision, - pQuery->fillType, pColInfo); + pQuery->fillType, pColInfo, pQInfo); } setQueryStatus(pQuery, QUERY_NOT_COMPLETED); diff --git a/src/query/src/qFill.c b/src/query/src/qFill.c index cf9821b890..a4830f7d88 100644 --- a/src/query/src/qFill.c +++ b/src/query/src/qFill.c @@ -26,39 +26,10 @@ #define FILL_IS_ASC_FILL(_f) ((_f)->order == TSDB_ORDER_ASC) -SFillInfo* taosInitFillInfo(int32_t order, TSKEY skey, int32_t numOfTags, int32_t capacity, int32_t numOfCols, - int64_t slidingTime, int8_t slidingUnit, int8_t precision, int32_t fillType, - SFillColInfo* pCol) { - if (fillType == TSDB_FILL_NONE) { - return NULL; - } - - SFillInfo* pFillInfo = calloc(1, sizeof(SFillInfo)); - - taosResetFillInfo(pFillInfo, skey); - - pFillInfo->order = order; - pFillInfo->type = fillType; - pFillInfo->pFillCol = pCol; - pFillInfo->numOfTags = numOfTags; - pFillInfo->numOfCols = numOfCols; - pFillInfo->precision = precision; - - pFillInfo->interval.interval = slidingTime; - pFillInfo->interval.intervalUnit = slidingUnit; - pFillInfo->interval.sliding = slidingTime; - pFillInfo->interval.slidingUnit = slidingUnit; - - pFillInfo->pData = malloc(POINTER_BYTES * numOfCols); - if (numOfTags > 0) { - pFillInfo->pTags = calloc(pFillInfo->numOfTags, sizeof(SFillTagColInfo)); - for (int32_t i = 0; i < numOfTags; ++i) { - pFillInfo->pTags[i].col.colId = -2; // TODO - } - } - - // there are no duplicated tags in the SFillTagColInfo list +// there are no duplicated tags in the SFillTagColInfo list +static int32_t setTagColumnInfo(SFillInfo* pFillInfo, int32_t numOfCols, int32_t capacity) { int32_t rowsize = 0; + int32_t k = 0; for (int32_t i = 0; i < numOfCols; ++i) { SFillColInfo* pColInfo = &pFillInfo->pFillCol[i]; @@ -81,11 +52,49 @@ SFillInfo* taosInitFillInfo(int32_t order, TSKEY skey, int32_t numOfTags, int32_ k += 1; } } + rowsize += pColInfo->col.bytes; } - pFillInfo->rowSize = rowsize; - pFillInfo->alloc = capacity; + assert(k < pFillInfo->numOfTags); + return rowsize; +} + +SFillInfo* taosInitFillInfo(int32_t order, TSKEY skey, int32_t numOfTags, int32_t capacity, int32_t numOfCols, + int64_t slidingTime, int8_t slidingUnit, int8_t precision, int32_t fillType, + SFillColInfo* pCol, void* handle) { + if (fillType == TSDB_FILL_NONE) { + return NULL; + } + + SFillInfo* pFillInfo = calloc(1, sizeof(SFillInfo)); + + taosResetFillInfo(pFillInfo, skey); + + pFillInfo->order = order; + pFillInfo->type = fillType; + pFillInfo->pFillCol = pCol; + pFillInfo->numOfTags = numOfTags; + pFillInfo->numOfCols = numOfCols; + pFillInfo->precision = precision; + pFillInfo->alloc = capacity; + pFillInfo->handle = handle; + + pFillInfo->interval.interval = slidingTime; + pFillInfo->interval.intervalUnit = slidingUnit; + pFillInfo->interval.sliding = slidingTime; + pFillInfo->interval.slidingUnit = slidingUnit; + + pFillInfo->pData = malloc(POINTER_BYTES * numOfCols); + if (numOfTags > 0) { + pFillInfo->pTags = calloc(pFillInfo->numOfTags, sizeof(SFillTagColInfo)); + for (int32_t i = 0; i < numOfTags; ++i) { + pFillInfo->pTags[i].col.colId = -2; // TODO + } + } + + pFillInfo->rowSize = setTagColumnInfo(pFillInfo, pFillInfo->numOfCols, pFillInfo->alloc); + assert(pFillInfo->rowSize > 0); return pFillInfo; } @@ -350,7 +359,7 @@ static void initBeforeAfterDataBuf(SFillInfo* pFillInfo, char** next) { } } -static void copyCurrentRowIntoBuf(SFillInfo* pFillInfo, char** srcData, int32_t numOfTags, char* buf) { +static void copyCurrentRowIntoBuf(SFillInfo* pFillInfo, char** srcData, char* buf) { int32_t rowIndex = pFillInfo->index; for (int32_t i = 0; i < pFillInfo->numOfCols; ++i) { SFillColInfo* pCol = &pFillInfo->pFillCol[i]; @@ -365,7 +374,6 @@ static int32_t fillResultImpl(SFillInfo* pFillInfo, tFilePage** data, int32_t ou char** prev = &pFillInfo->prevValues; char** next = &pFillInfo->nextValues; - int32_t numOfTags = pFillInfo->numOfTags; int32_t step = GET_FORWARD_DIRECTION_FACTOR(pFillInfo->order); if (FILL_IS_ASC_FILL(pFillInfo)) { @@ -381,7 +389,7 @@ static int32_t fillResultImpl(SFillInfo* pFillInfo, tFilePage** data, int32_t ou (pFillInfo->currentKey > ts && !FILL_IS_ASC_FILL(pFillInfo))) { /* set the next value for interpolation */ initBeforeAfterDataBuf(pFillInfo, next); - copyCurrentRowIntoBuf(pFillInfo, srcData, numOfTags, *next); + copyCurrentRowIntoBuf(pFillInfo, srcData, *next); } if (((pFillInfo->currentKey < ts && FILL_IS_ASC_FILL(pFillInfo)) || (pFillInfo->currentKey > ts && !FILL_IS_ASC_FILL(pFillInfo))) && @@ -476,15 +484,15 @@ int64_t taosFillResultDataBlock(SFillInfo* pFillInfo, tFilePage** output, int32_ // no data existed for fill operation now, append result according to the fill strategy if (remain == 0) { - return fillExternalResults(pFillInfo, output, numOfRes); + fillExternalResults(pFillInfo, output, numOfRes); + } else { + fillResultImpl(pFillInfo, output, numOfRes); + assert(numOfRes == pFillInfo->numOfCurrent); } - fillResultImpl(pFillInfo, output, numOfRes); - assert(numOfRes == pFillInfo->numOfCurrent); - - qDebug("generated fill result, src block:%d, index:%d, startKey:%"PRId64", currentKey:%"PRId64", current:%d, total:%d", - pFillInfo->numOfRows, pFillInfo->index, pFillInfo->start, pFillInfo->currentKey, pFillInfo->numOfCurrent, - pFillInfo->numOfTotal); + qDebug("fill:%p, generated fill result, src block:%d, index:%d, brange:%"PRId64"-%"PRId64", currentKey:%"PRId64", current:%d, total:%d, %p", + pFillInfo, pFillInfo->numOfRows, pFillInfo->index, pFillInfo->start, pFillInfo->end, pFillInfo->currentKey, pFillInfo->numOfCurrent, + pFillInfo->numOfTotal, pFillInfo->handle); return numOfRes; } From 2b9a3b1e1bf04dd1bda80b4cd20cc274c085aac5 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Mon, 9 Nov 2020 13:29:12 +0800 Subject: [PATCH 04/52] [TD-1978] --- src/client/src/tscSQLParser.c | 58 ++++- src/client/src/tscSchemaUtil.c | 10 - src/common/inc/tname.h | 6 +- src/common/src/tname.c | 11 + src/query/inc/qAst.h | 9 +- src/query/inc/qSqlparser.h | 3 - src/query/src/qAst.c | 444 ++------------------------------- 7 files changed, 85 insertions(+), 456 deletions(-) diff --git a/src/client/src/tscSQLParser.c b/src/client/src/tscSQLParser.c index f497263f2a..fe4416dee9 100644 --- a/src/client/src/tscSQLParser.c +++ b/src/client/src/tscSQLParser.c @@ -69,7 +69,10 @@ static void getColumnName(tSQLExprItem* pItem, char* resultFieldName, int32_t na static int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t colIndex, tSQLExprItem* pItem, bool finalResult); static int32_t insertResultField(SQueryInfo* pQueryInfo, int32_t outputIndex, SColumnList* pIdList, int16_t bytes, int8_t type, char* fieldName, SSqlExpr* pSqlExpr); -static int32_t changeFunctionID(int32_t optr, int16_t* functionId); + +static int32_t convertFunctionId(int32_t optr, int16_t* functionId); +static uint8_t convertOptr(SStrToken *pToken); + static int32_t parseSelectClause(SSqlCmd* pCmd, int32_t clauseIndex, tSQLExprList* pSelection, bool isSTable, bool joinQuery); static bool validateIpAddress(const char* ip, size_t size); @@ -123,6 +126,45 @@ static int32_t doCheckForStream(SSqlObj* pSql, SSqlInfo* pInfo); static int32_t doCheckForQuery(SSqlObj* pSql, SQuerySQL* pQuerySql, int32_t index); static int32_t exprTreeFromSqlExpr(SSqlCmd* pCmd, tExprNode **pExpr, const tSQLExpr* pSqlExpr, SQueryInfo* pQueryInfo, SArray* pCols, int64_t *uid); +static uint8_t convertOptr(SStrToken *pToken) { + switch (pToken->type) { + case TK_LT: + return TSDB_RELATION_LESS; + case TK_LE: + return TSDB_RELATION_LESS_EQUAL; + case TK_GT: + return TSDB_RELATION_GREATER; + case TK_GE: + return TSDB_RELATION_GREATER_EQUAL; + case TK_NE: + return TSDB_RELATION_NOT_EQUAL; + case TK_AND: + return TSDB_RELATION_AND; + case TK_OR: + return TSDB_RELATION_OR; + case TK_EQ: + return TSDB_RELATION_EQUAL; + case TK_PLUS: + return TSDB_BINARY_OP_ADD; + case TK_MINUS: + return TSDB_BINARY_OP_SUBTRACT; + case TK_STAR: + return TSDB_BINARY_OP_MULTIPLY; + case TK_SLASH: + case TK_DIVIDE: + return TSDB_BINARY_OP_DIVIDE; + case TK_REM: + return TSDB_BINARY_OP_REMAINDER; + case TK_LIKE: + return TSDB_RELATION_LIKE; + case TK_ISNULL: + return TSDB_RELATION_ISNULL; + case TK_NOTNULL: + return TSDB_RELATION_NOTNULL; + default: { return 0; } + } +} + /* * Used during parsing query sql. Since the query sql usually small in length, error position * is not needed in the final error message. @@ -1725,7 +1767,7 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col } int16_t functionID = 0; - if (changeFunctionID(optr, &functionID) != TSDB_CODE_SUCCESS) { + if (convertFunctionId(optr, &functionID) != TSDB_CODE_SUCCESS) { return TSDB_CODE_TSC_INVALID_SQL; } @@ -1861,7 +1903,7 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col int32_t intermediateResSize = 0; int16_t functionID = 0; - if (changeFunctionID(optr, &functionID) != TSDB_CODE_SUCCESS) { + if (convertFunctionId(optr, &functionID) != TSDB_CODE_SUCCESS) { return TSDB_CODE_TSC_INVALID_SQL; } @@ -1932,7 +1974,7 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col bool requireAllFields = (pItem->pNode->pParam == NULL); int16_t functionID = 0; - if (changeFunctionID(optr, &functionID) != TSDB_CODE_SUCCESS) { + if (convertFunctionId(optr, &functionID) != TSDB_CODE_SUCCESS) { return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg9); } @@ -2121,7 +2163,7 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col * for dp = 100, it is max, */ int16_t functionId = 0; - if (changeFunctionID(optr, &functionId) != TSDB_CODE_SUCCESS) { + if (convertFunctionId(optr, &functionId) != TSDB_CODE_SUCCESS) { return TSDB_CODE_TSC_INVALID_SQL; } tscInsertPrimaryTSSourceColumn(pQueryInfo, &index); @@ -2138,7 +2180,7 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col } int16_t functionId = 0; - if (changeFunctionID(optr, &functionId) != TSDB_CODE_SUCCESS) { + if (convertFunctionId(optr, &functionId) != TSDB_CODE_SUCCESS) { return TSDB_CODE_TSC_INVALID_SQL; } @@ -2397,7 +2439,7 @@ int32_t getColumnIndexByName(SSqlCmd* pCmd, const SStrToken* pToken, SQueryInfo* return doGetColumnIndexByName(pCmd, &tmpToken, pQueryInfo, pIndex); } -int32_t changeFunctionID(int32_t optr, int16_t* functionId) { +int32_t convertFunctionId(int32_t optr, int16_t* functionId) { switch (optr) { case TK_COUNT: *functionId = TSDB_FUNC_COUNT; @@ -6535,7 +6577,7 @@ int32_t exprTreeFromSqlExpr(SSqlCmd* pCmd, tExprNode **pExpr, const tSQLExpr* pS (*pExpr)->_node.pRight = pRight; SStrToken t = {.type = pSqlExpr->nSQLOptr}; - (*pExpr)->_node.optr = getBinaryExprOptr(&t); + (*pExpr)->_node.optr = convertOptr(&t); assert((*pExpr)->_node.optr != 0); diff --git a/src/client/src/tscSchemaUtil.c b/src/client/src/tscSchemaUtil.c index 441ad8d838..fcc93ffadc 100644 --- a/src/client/src/tscSchemaUtil.c +++ b/src/client/src/tscSchemaUtil.c @@ -130,16 +130,6 @@ SSchema* tscGetColumnSchemaById(STableMeta* pTableMeta, int16_t colId) { return NULL; } -struct SSchema tscGetTbnameColumnSchema() { - struct SSchema s = { - .colId = TSDB_TBNAME_COLUMN_INDEX, - .type = TSDB_DATA_TYPE_BINARY, - .bytes = TSDB_TABLE_NAME_LEN - }; - - strcpy(s.name, TSQL_TBNAME_L); - return s; -} static void tscInitCorVgroupInfo(SCorVgroupInfo *corVgroupInfo, SVgroupInfo *vgroupInfo) { corVgroupInfo->version = 0; corVgroupInfo->inUse = 0; diff --git a/src/common/inc/tname.h b/src/common/inc/tname.h index 6b73d98b81..5563e590e7 100644 --- a/src/common/inc/tname.h +++ b/src/common/inc/tname.h @@ -35,6 +35,10 @@ bool tscValidateTableNameLength(size_t len); SColumnFilterInfo* tscFilterInfoClone(const SColumnFilterInfo* src, int32_t numOfFilters); -// int64_t taosGetIntervalStartTimestamp(int64_t startTime, int64_t slidingTime, int64_t intervalTime, char timeUnit, int16_t precision); +/** + * get the schema for the "tbname" column. it is a built column + * @return + */ +SSchema tscGetTbnameColumnSchema(); #endif // TDENGINE_NAME_H diff --git a/src/common/src/tname.c b/src/common/src/tname.c index 8879e9e797..bea8c52ef2 100644 --- a/src/common/src/tname.c +++ b/src/common/src/tname.c @@ -188,3 +188,14 @@ void extractTableNameFromToken(SStrToken* pToken, SStrToken* pTable) { pToken->z = r; } } + +SSchema tscGetTbnameColumnSchema() { + struct SSchema s = { + .colId = TSDB_TBNAME_COLUMN_INDEX, + .type = TSDB_DATA_TYPE_BINARY, + .bytes = TSDB_TABLE_NAME_LEN + }; + + strcpy(s.name, TSQL_TBNAME_L); + return s; +} \ No newline at end of file diff --git a/src/query/inc/qAst.h b/src/query/inc/qAst.h index d3e60c21dc..b05d08b567 100644 --- a/src/query/inc/qAst.h +++ b/src/query/inc/qAst.h @@ -81,14 +81,13 @@ void tExprTreeTraverse(tExprNode *pExpr, SSkipList *pSkipList, SArray *result, S void tExprTreeCalcTraverse(tExprNode *pExprs, int32_t numOfRows, char *pOutput, void *param, int32_t order, char *(*cb)(void *, const char*, int32_t)); -uint8_t getBinaryExprOptr(SStrToken *pToken); - -void tExprNodeDestroy(tExprNode *pNode, void (*fp)(void *)); -void exprTreeToBinary(SBufferWriter* bw, tExprNode* pExprTree); - tExprNode* exprTreeFromBinary(const void* data, size_t size); tExprNode* exprTreeFromTableName(const char* tbnameCond); +void exprTreeToBinary(SBufferWriter* bw, tExprNode* pExprTree); + +void tExprNodeDestroy(tExprNode *pNode, void (*fp)(void *)); + #ifdef __cplusplus } #endif diff --git a/src/query/inc/qSqlparser.h b/src/query/inc/qSqlparser.h index 25da04710d..aaa1bb66b0 100644 --- a/src/query/inc/qSqlparser.h +++ b/src/query/inc/qSqlparser.h @@ -309,9 +309,6 @@ void tSQLSetColumnType(TAOS_FIELD *pField, SStrToken *pToken); void *ParseAlloc(void *(*mallocProc)(size_t)); -// convert the sql filter expression into binary data -int32_t tSQLExprToBinary(tSQLExpr* pExpr, SStringBuilder* sb); - enum { TSQL_NODE_TYPE_EXPR = 0x1, TSQL_NODE_TYPE_ID = 0x2, diff --git a/src/query/src/qAst.c b/src/query/src/qAst.c index bc2866fb6f..3a1b89c013 100644 --- a/src/query/src/qAst.c +++ b/src/query/src/qAst.c @@ -13,12 +13,10 @@ * along with this program. If not, see . */ - #include "os.h" #include "exception.h" #include "qAst.h" -#include "qSqlparser.h" #include "qSyntaxtreefunction.h" #include "taosdef.h" #include "taosmsg.h" @@ -30,200 +28,19 @@ #include "tskiplist.h" #include "tsqlfunction.h" #include "tstoken.h" -#include "ttokendef.h" -#include "tulog.h" +#include "tschemautil.h" -/* - * - * @date 2018-2-15 - * @version 0.2 operation for column filter - * - * @Description parse tag query expression to build ast - * ver 0.2, filter the result on first column with high priority to limit the candidate set - * ver 0.3, pipeline filter in the form of: (a+2)/9 > 14 - * - */ -static tExprNode *tExprNodeCreate(SSchema *pSchema, int32_t numOfCols, SStrToken *pToken); +typedef struct { + char* v; + int32_t optr; +} SEndPoint; -static tExprNode *createSyntaxTree(SSchema *pSchema, int32_t numOfCols, char *str, int32_t *i); -static void destroySyntaxTree(tExprNode *); +typedef struct { + SEndPoint* start; + SEndPoint* end; +} SQueryCond; -static uint8_t isQueryOnPrimaryKey(const char *primaryColumnName, const tExprNode *pLeft, const tExprNode *pRight); - -/* - * Check the filter value type on the right hand side based on the column id on the left hand side, - * the filter value type must be identical to field type for relational operation - * As for binary arithmetic operation, it is not necessary to do so. - */ -static void reviseBinaryExprIfNecessary(tExprNode **pLeft, tExprNode **pRight, uint8_t *optr) { - if (*optr >= TSDB_RELATION_LESS && *optr <= TSDB_RELATION_LIKE) { - // make sure that the type of data on both sides of relational comparision are identical - if ((*pLeft)->nodeType == TSQL_NODE_VALUE) { - tVariantTypeSetType((*pLeft)->pVal, (*pRight)->pSchema->type); - } else if ((*pRight)->nodeType == TSQL_NODE_VALUE) { - tVariantTypeSetType((*pRight)->pVal, (*pLeft)->pSchema->type); - } - - } else if (*optr >= TSDB_BINARY_OP_ADD && *optr <= TSDB_BINARY_OP_REMAINDER) { - if ((*pLeft)->nodeType == TSQL_NODE_VALUE) { - /* convert to int/bigint may cause the precision loss */ - tVariantTypeSetType((*pLeft)->pVal, TSDB_DATA_TYPE_DOUBLE); - } else if ((*pRight)->nodeType == TSQL_NODE_VALUE) { - /* convert to int/bigint may cause the precision loss */ - tVariantTypeSetType((*pRight)->pVal, TSDB_DATA_TYPE_DOUBLE); - } - } - - /* - * for expressions that are suitable for switch principle, - * switch left and left and right hand side in expr if possible - */ - if ((*pLeft)->nodeType == TSQL_NODE_VALUE && (*pRight)->nodeType == TSQL_NODE_COL) { - if (*optr >= TSDB_RELATION_GREATER && *optr <= TSDB_RELATION_GREATER_EQUAL && *optr != TSDB_RELATION_EQUAL) { - SWAP(*pLeft, *pRight, tExprNode *); - } - - switch (*optr) { - case TSDB_RELATION_GREATER: - (*optr) = TSDB_RELATION_LESS; - break; - case TSDB_RELATION_LESS: - (*optr) = TSDB_RELATION_GREATER; - break; - case TSDB_RELATION_GREATER_EQUAL: - (*optr) = TSDB_RELATION_LESS_EQUAL; - break; - case TSDB_RELATION_LESS_EQUAL: - (*optr) = TSDB_RELATION_GREATER_EQUAL; - break; - default:; - // for other type of operations, do nothing - } - } -} - -static tExprNode *tExprNodeCreate(SSchema *pSchema, int32_t numOfCols, SStrToken *pToken) { - /* if the token is not a value, return false */ - if (pToken->type == TK_RP || (pToken->type != TK_INTEGER && pToken->type != TK_FLOAT && pToken->type != TK_ID && - pToken->type != TK_TBNAME && pToken->type != TK_STRING && pToken->type != TK_BOOL)) { - return NULL; - } - - size_t nodeSize = sizeof(tExprNode); - tExprNode *pNode = NULL; - - if (pToken->type == TK_ID || pToken->type == TK_TBNAME) { - int32_t i = 0; - if (pToken->type == TK_ID) { - do { - SStrToken tableToken = {0}; - extractTableNameFromToken(pToken, &tableToken); - - size_t len = strlen(pSchema[i].name); - if (strncmp(pToken->z, pSchema[i].name, pToken->n) == 0 && pToken->n == len) break; - } while (++i < numOfCols); - - if (i == numOfCols) { // column name is not valid, parse the expression failed - return NULL; - } - } - - nodeSize += sizeof(SSchema); - - pNode = calloc(1, nodeSize); - pNode->pSchema = (struct SSchema *)((char *)pNode + sizeof(tExprNode)); - pNode->nodeType = TSQL_NODE_COL; - - if (pToken->type == TK_ID) { - memcpy(pNode->pSchema, &pSchema[i], sizeof(SSchema)); - } else { - pNode->pSchema->type = TSDB_DATA_TYPE_BINARY; - pNode->pSchema->bytes = TSDB_TABLE_NAME_LEN - 1; - strcpy(pNode->pSchema->name, TSQL_TBNAME_L); - pNode->pSchema->colId = -1; - } - - } else { - nodeSize += sizeof(tVariant); - pNode = calloc(1, nodeSize); - pNode->pVal = (tVariant *)((char *)pNode + sizeof(tExprNode)); - - toTSDBType(pToken->type); - tVariantCreate(pNode->pVal, pToken); - pNode->nodeType = TSQL_NODE_VALUE; - } - - return pNode; -} - -uint8_t getBinaryExprOptr(SStrToken *pToken) { - switch (pToken->type) { - case TK_LT: - return TSDB_RELATION_LESS; - case TK_LE: - return TSDB_RELATION_LESS_EQUAL; - case TK_GT: - return TSDB_RELATION_GREATER; - case TK_GE: - return TSDB_RELATION_GREATER_EQUAL; - case TK_NE: - return TSDB_RELATION_NOT_EQUAL; - case TK_AND: - return TSDB_RELATION_AND; - case TK_OR: - return TSDB_RELATION_OR; - case TK_EQ: - return TSDB_RELATION_EQUAL; - case TK_PLUS: - return TSDB_BINARY_OP_ADD; - case TK_MINUS: - return TSDB_BINARY_OP_SUBTRACT; - case TK_STAR: - return TSDB_BINARY_OP_MULTIPLY; - case TK_SLASH: - case TK_DIVIDE: - return TSDB_BINARY_OP_DIVIDE; - case TK_REM: - return TSDB_BINARY_OP_REMAINDER; - case TK_LIKE: - return TSDB_RELATION_LIKE; - case TK_ISNULL: - return TSDB_RELATION_ISNULL; - case TK_NOTNULL: - return TSDB_RELATION_NOTNULL; - default: { return 0; } - } -} - -// previous generated expr is reduced as the left child -static tExprNode *parseRemainStr(char *pstr, tExprNode *pExpr, SSchema *pSchema, int32_t optr, - int32_t numOfCols, int32_t *i) { - // set the previous generated node as the left child of new root - pExpr->nodeType = TSQL_NODE_EXPR; - - // remain is the right child - tExprNode *pRight = createSyntaxTree(pSchema, numOfCols, pstr, i); - if (pRight == NULL || (pRight->nodeType == TSQL_NODE_COL && pExpr->nodeType != TSQL_NODE_VALUE) || - (pExpr->nodeType == TSQL_NODE_VALUE && pRight->nodeType != TSQL_NODE_COL)) { - tExprNodeDestroy(pExpr, NULL); - tExprNodeDestroy(pRight, NULL); - return NULL; - } - - tExprNode *pNewExpr = (tExprNode *)calloc(1, sizeof(tExprNode)); - uint8_t k = optr; - reviseBinaryExprIfNecessary(&pExpr, &pRight, &k); - pNewExpr->_node.pLeft = pExpr; - pNewExpr->_node.pRight = pRight; - pNewExpr->_node.optr = k; - - pNewExpr->_node.hasPK = isQueryOnPrimaryKey(pSchema[0].name, pExpr, pRight); - pNewExpr->nodeType = TSQL_NODE_EXPR; - - return pNewExpr; -} - -uint8_t isQueryOnPrimaryKey(const char *primaryColumnName, const tExprNode *pLeft, const tExprNode *pRight) { +static uint8_t UNUSED_FUNC isQueryOnPrimaryKey(const char *primaryColumnName, const tExprNode *pLeft, const tExprNode *pRight) { if (pLeft->nodeType == TSQL_NODE_COL) { // if left node is the primary column,return true return (strcmp(primaryColumnName, pLeft->pSchema->name) == 0) ? 1 : 0; @@ -236,103 +53,6 @@ uint8_t isQueryOnPrimaryKey(const char *primaryColumnName, const tExprNode *pLef } } -static tExprNode *createSyntaxTree(SSchema *pSchema, int32_t numOfCols, char *str, int32_t *i) { - SStrToken t0 = tStrGetToken(str, i, false, 0, NULL); - if (t0.n == 0) { - return NULL; - } - - tExprNode *pLeft = NULL; - if (t0.type == TK_LP) { // start new left child branch - pLeft = createSyntaxTree(pSchema, numOfCols, str, i); - } else { - if (t0.type == TK_RP) { - return NULL; - } - - pLeft = tExprNodeCreate(pSchema, numOfCols, &t0); - } - - if (pLeft == NULL) { - return NULL; - } - - t0 = tStrGetToken(str, i, false, 0, NULL); - if (t0.n == 0 || t0.type == TK_RP) { - if (pLeft->nodeType != TSQL_NODE_EXPR) { // if left is not the expr, it is not a legal expr - tExprNodeDestroy(pLeft, NULL); - return NULL; - } - - return pLeft; - } - - // get the operator of expr - uint8_t optr = getBinaryExprOptr(&t0); - if (optr == 0) { - uError("not support binary operator:%d", t0.type); - tExprNodeDestroy(pLeft, NULL); - return NULL; - } - - assert(pLeft != NULL); - tExprNode *pRight = NULL; - - if (t0.type == TK_AND || t0.type == TK_OR || t0.type == TK_LP) { - pRight = createSyntaxTree(pSchema, numOfCols, str, i); - } else { - /* - * In case that pLeft is a field identification, - * we parse the value in expression according to queried field type, - * if we do not get the information, in case of value of field presented first, - * we revised the value after the binary expression is completed. - */ - t0 = tStrGetToken(str, i, true, 0, NULL); - if (t0.n == 0) { - tExprNodeDestroy(pLeft, NULL); // illegal expression - return NULL; - } - - if (t0.type == TK_LP) { - pRight = createSyntaxTree(pSchema, numOfCols, str, i); - } else { - pRight = tExprNodeCreate(pSchema, numOfCols, &t0); - } - } - - if (pRight == NULL) { - tExprNodeDestroy(pLeft, NULL); - return NULL; - } - - /* create binary expr as the child of new parent node */ - tExprNode *pExpr = (tExprNode *)calloc(1, sizeof(tExprNode)); - reviseBinaryExprIfNecessary(&pLeft, &pRight, &optr); - - pExpr->_node.hasPK = isQueryOnPrimaryKey(pSchema[0].name, pLeft, pRight); - pExpr->_node.pLeft = pLeft; - pExpr->_node.pRight = pRight; - pExpr->_node.optr = optr; - - t0 = tStrGetToken(str, i, true, 0, NULL); - - if (t0.n == 0 || t0.type == TK_RP) { - pExpr->nodeType = TSQL_NODE_EXPR; - return pExpr; - } else { - uint8_t localOptr = getBinaryExprOptr(&t0); - if (localOptr == 0) { - uError("not support binary operator:%d", t0.type); - free(pExpr); - return NULL; - } - - return parseRemainStr(str, pExpr, pSchema, localOptr, numOfCols, i); - } -} - -static void UNUSED_FUNC destroySyntaxTree(tExprNode *pNode) { tExprNodeDestroy(pNode, NULL); } - void tExprNodeDestroy(tExprNode *pNode, void (*fp)(void *)) { if (pNode == NULL) { return; @@ -372,16 +92,6 @@ void tExprTreeDestroy(tExprNode **pExpr, void (*fp)(void *)) { *pExpr = NULL; } -typedef struct { - char* v; - int32_t optr; -} SEndPoint; - -typedef struct { - SEndPoint* start; - SEndPoint* end; -} SQueryCond; - // todo check for malloc failure static int32_t setQueryCond(tQueryInfo *queryColInfo, SQueryCond* pCond) { int32_t optr = queryColInfo->optr; @@ -395,13 +105,10 @@ static int32_t setQueryCond(tQueryInfo *queryColInfo, SQueryCond* pCond) { pCond->end = calloc(1, sizeof(SEndPoint)); pCond->end->optr = queryColInfo->optr; pCond->end->v = queryColInfo->q; - } else if (optr == TSDB_RELATION_IN) { - printf("relation is in\n"); - assert(0); - } else if (optr == TSDB_RELATION_LIKE) { - printf("relation is like\n"); + } else if (optr == TSDB_RELATION_IN || optr == TSDB_RELATION_LIKE) { assert(0); } + return TSDB_CODE_SUCCESS; } @@ -529,99 +236,6 @@ static void tQueryIndexColumn(SSkipList* pSkipList, tQueryInfo* pQueryInfo, SArr tSkipListDestroyIter(iter); } -int32_t merge(SArray *pLeft, SArray *pRight, SArray *pFinalRes) { -// assert(pFinalRes->pRes == 0); -// -// pFinalRes->pRes = calloc((size_t)(pLeft->num + pRight->num), POINTER_BYTES); -// pFinalRes->num = 0; -// -// // sort according to address -// tSkipListNode **pLeftNodes = (tSkipListNode **)pLeft->pRes; -// qsort(pLeftNodes, pLeft->num, sizeof(pLeft->pRes[0]), compareByAddr); -// -// tSkipListNode **pRightNodes = (tSkipListNode **)pRight->pRes; -// qsort(pRightNodes, pRight->num, sizeof(pRight->pRes[0]), compareByAddr); -// -// int32_t i = 0, j = 0; -// -// // merge two sorted arrays in O(n) time -// while (i < pLeft->num && j < pRight->num) { -// int64_t ret = (int64_t)pLeftNodes[i] - (int64_t)pRightNodes[j]; -// -// if (ret < 0) { -// pFinalRes->pRes[pFinalRes->num++] = pLeftNodes[i++]; -// } else if (ret > 0) { -// pFinalRes->pRes[pFinalRes->num++] = pRightNodes[j++]; -// } else { // pNode->key > pkey[i] -// pFinalRes->pRes[pFinalRes->num++] = pRightNodes[j++]; -// i++; -// } -// } -// -// while (i < pLeft->num) { -// pFinalRes->pRes[pFinalRes->num++] = pLeftNodes[i++]; -// } -// -// while (j < pRight->num) { -// pFinalRes->pRes[pFinalRes->num++] = pRightNodes[j++]; -// } -// -// return pFinalRes->num; - return 0; -} - -int32_t intersect(SArray *pLeft, SArray *pRight, SArray *pFinalRes) { -// int64_t num = MIN(pLeft->num, pRight->num); -// -// assert(pFinalRes->pRes == 0); -// -// pFinalRes->pRes = calloc(num, POINTER_BYTES); -// pFinalRes->num = 0; -// -// // sort according to address -// tSkipListNode **pLeftNodes = (tSkipListNode **)pLeft->pRes; -// qsort(pLeftNodes, pLeft->num, sizeof(pLeft->pRes[0]), compareByAddr); -// -// tSkipListNode **pRightNodes = (tSkipListNode **)pRight->pRes; -// qsort(pRightNodes, pRight->num, sizeof(pRight->pRes[0]), compareByAddr); -// -// int32_t i = 0, j = 0; -// // merge two sorted arrays in O(n) time -// while (i < pLeft->num && j < pRight->num) { -// int64_t ret = (int64_t)pLeftNodes[i] - (int64_t)pRightNodes[j]; -// -// if (ret < 0) { -// i++; -// } else if (ret > 0) { -// j++; -// } else { // pNode->key > pkey[i] -// pFinalRes->pRes[pFinalRes->num++] = pRightNodes[j]; -// i++; -// j++; -// } -// } -// -// return pFinalRes->num; - return 0; -} - -/* - * traverse the result and apply the function to each item to check if the item is qualified or not - */ -static UNUSED_FUNC void tArrayTraverse(tExprNode *pExpr, __result_filter_fn_t fp, SArray *pResult) { - assert(pExpr->_node.pLeft->nodeType == TSQL_NODE_COL && pExpr->_node.pRight->nodeType == TSQL_NODE_VALUE && fp != NULL); - - // scan the result array list and check for each item in the list - for (int32_t i = 0; i < taosArrayGetSize(pResult); ++i) { - void* item = taosArrayGet(pResult, i); - if (fp(item, pExpr->_node.info)) { - i++; - } else { - taosArrayRemove(pResult, i); - } - } -} - static bool filterItem(tExprNode *pExpr, const void *pItem, SExprTraverseSupp *param) { tExprNode *pLeft = pExpr->_node.pLeft; tExprNode *pRight = pExpr->_node.pRight; @@ -649,32 +263,6 @@ static bool filterItem(tExprNode *pExpr, const void *pItem, SExprTraverseSupp *p return param->nodeFilterFn(pItem, pExpr->_node.info); } -/** - * Apply the filter expression on non-indexed tag columns to each element in the result list, which is generated - * by filtering on indexed tag column. So the whole result set only needs to be iterated once to generate - * result that is satisfied to the filter expression, no matter how the filter expression consisting of. - * - * @param pExpr filter expression on non-indexed tag columns. - * @param pResult results from filter on the indexed tag column, which is usually the first tag column - * @param pSchema tag schemas - * @param fp filter callback function - */ -static UNUSED_FUNC void exprTreeTraverseImpl(tExprNode *pExpr, SArray *pResult, SExprTraverseSupp *param) { - size_t size = taosArrayGetSize(pResult); - - SArray* array = taosArrayInit(size, POINTER_BYTES); - for (int32_t i = 0; i < size; ++i) { - void *pItem = taosArrayGetP(pResult, i); - - if (filterItem(pExpr, pItem, param)) { - taosArrayPush(array, &pItem); - } - } - - taosArrayCopy(pResult, array); - taosArrayDestroy(array); -} - static void tSQLBinaryTraverseOnSkipList(tExprNode *pExpr, SArray *pResult, SSkipList *pSkipList, SExprTraverseSupp *param ) { SSkipListIterator* iter = tSkipListCreateIter(pSkipList); @@ -750,7 +338,7 @@ void tExprTreeTraverse(tExprNode *pExpr, SSkipList *pSkipList, SArray *result, S //apply the hierarchical expression to every node in skiplist for find the qualified nodes tSQLBinaryTraverseOnSkipList(pExpr, result, pSkipList, param); - #if 0 +#if 0 /* * (weight == 1 && pExpr->nSQLBinaryOptr == TSDB_RELATION_AND) is handled here * @@ -972,6 +560,7 @@ tExprNode* exprTreeFromBinary(const void* data, size_t size) { if (size == 0) { return NULL; } + SBufferReader br = tbufInitReader(data, size, false); return exprTreeFromBinaryImpl(&br); } @@ -995,10 +584,7 @@ tExprNode* exprTreeFromTableName(const char* tbnameCond) { SSchema* pSchema = exception_calloc(1, sizeof(SSchema)); left->pSchema = pSchema; - pSchema->type = TSDB_DATA_TYPE_BINARY; - pSchema->bytes = TSDB_TABLE_NAME_LEN - 1; - strcpy(pSchema->name, TSQL_TBNAME_L); - pSchema->colId = -1; + *pSchema = tscGetTbnameColumnSchema(); tExprNode* right = exception_calloc(1, sizeof(tExprNode)); expr->_node.pRight = right; From 9487c1b9c634a2e631d35aa2fefc2d4ea2e32bf4 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Mon, 9 Nov 2020 16:24:25 +0800 Subject: [PATCH 05/52] [TD-1978] --- src/client/src/tscSQLParser.c | 293 ++++++++++++++++++++-------------- src/common/inc/tname.h | 4 - src/query/inc/qAst.h | 3 +- src/query/inc/qSqlparser.h | 96 +++++------ src/query/inc/sql.y | 50 +++--- src/query/src/qParserImpl.c | 190 +++++++--------------- src/query/src/sql.c | 105 ++++++------ src/util/inc/tarray.h | 14 ++ src/util/src/tarray.c | 16 ++ 9 files changed, 375 insertions(+), 396 deletions(-) diff --git a/src/client/src/tscSQLParser.c b/src/client/src/tscSQLParser.c index fe4416dee9..f76a373f23 100644 --- a/src/client/src/tscSQLParser.c +++ b/src/client/src/tscSQLParser.c @@ -56,11 +56,11 @@ static SSqlExpr* doAddProjectCol(SQueryInfo* pQueryInfo, int32_t outputIndex, in static int32_t setShowInfo(SSqlObj* pSql, SSqlInfo* pInfo); static char* getAccountId(SSqlObj* pSql); -static bool has(tFieldList* pFieldList, int32_t startIdx, const char* name); +static bool has(SArray* pFieldList, int32_t startIdx, const char* name); static void getCurrentDBName(SSqlObj* pSql, SStrToken* pDBToken); static bool hasSpecifyDB(SStrToken* pTableName); -static bool validateTableColumnInfo(tFieldList* pFieldList, SSqlCmd* pCmd); -static bool validateTagParams(tFieldList* pTagsList, tFieldList* pFieldList, SSqlCmd* pCmd); +static bool validateTableColumnInfo(SArray* pFieldList, SSqlCmd* pCmd); +static bool validateTagParams(SArray* pTagsList, SArray* pFieldList, SSqlCmd* pCmd); static int32_t setObjFullName(char* fullName, const char* account, SStrToken* pDB, SStrToken* tableName, int32_t* len); @@ -80,7 +80,7 @@ static bool hasUnsupportFunctionsForSTableQuery(SSqlCmd* pCmd, SQueryInfo* pQuer static bool functionCompatibleCheck(SQueryInfo* pQueryInfo, bool joinQuery); static void setColumnOffsetValueInResultset(SQueryInfo* pQueryInfo); -static int32_t parseGroupbyClause(SQueryInfo* pQueryInfo, tVariantList* pList, SSqlCmd* pCmd); +static int32_t parseGroupbyClause(SQueryInfo* pQueryInfo, SArray* pList, SSqlCmd* pCmd); static int32_t parseIntervalClause(SSqlObj* pSql, SQueryInfo* pQueryInfo, SQuerySQL* pQuerySql); static int32_t parseOffsetClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SQuerySQL* pQuerySql); @@ -905,7 +905,7 @@ int32_t tscSetTableFullName(STableMetaInfo* pTableMetaInfo, SStrToken* pzTableNa return TSDB_CODE_SUCCESS; } -static bool validateTableColumnInfo(tFieldList* pFieldList, SSqlCmd* pCmd) { +static bool validateTableColumnInfo(SArray* pFieldList, SSqlCmd* pCmd) { assert(pFieldList != NULL); const char* msg = "illegal number of columns"; @@ -917,20 +917,22 @@ static bool validateTableColumnInfo(tFieldList* pFieldList, SSqlCmd* pCmd) { const char* msg6 = "invalid column name"; // number of fields no less than 2 - if (pFieldList->nField <= 1 || pFieldList->nField > TSDB_MAX_COLUMNS) { + size_t numOfCols = taosArrayGetSize(pFieldList); + if (numOfCols <= 1 || numOfCols > TSDB_MAX_COLUMNS) { invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg); return false; } // first column must be timestamp - if (pFieldList->p[0].type != TSDB_DATA_TYPE_TIMESTAMP) { + TAOS_FIELD* pField = taosArrayGet(pFieldList, 0); + if (pField->type != TSDB_DATA_TYPE_TIMESTAMP) { invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1); return false; } int32_t nLen = 0; - for (int32_t i = 0; i < pFieldList->nField; ++i) { - TAOS_FIELD* pField = &pFieldList->p[i]; + for (int32_t i = 0; i < numOfCols; ++i) { + pField = taosArrayGet(pFieldList, i); if (pField->bytes == 0) { invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg5); @@ -954,7 +956,7 @@ static bool validateTableColumnInfo(tFieldList* pFieldList, SSqlCmd* pCmd) { } // field name must be unique - if (has(pFieldList, i + 1, pFieldList->p[i].name) == true) { + if (has(pFieldList, i + 1, pField->name) == true) { invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3); return false; } @@ -971,7 +973,7 @@ static bool validateTableColumnInfo(tFieldList* pFieldList, SSqlCmd* pCmd) { return true; } -static bool validateTagParams(tFieldList* pTagsList, tFieldList* pFieldList, SSqlCmd* pCmd) { +static bool validateTagParams(SArray* pTagsList, SArray* pFieldList, SSqlCmd* pCmd) { assert(pTagsList != NULL); const char* msg1 = "invalid number of tag columns"; @@ -983,18 +985,21 @@ static bool validateTagParams(tFieldList* pTagsList, tFieldList* pFieldList, SSq const char* msg7 = "invalid binary/nchar tag length"; // number of fields at least 1 - if (pTagsList->nField < 1 || pTagsList->nField > TSDB_MAX_TAGS) { + size_t numOfTags = taosArrayGetSize(pTagsList); + if (numOfTags < 1 || numOfTags > TSDB_MAX_TAGS) { invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1); return false; } int32_t nLen = 0; - for (int32_t i = 0; i < pTagsList->nField; ++i) { - if (pTagsList->p[i].bytes == 0) { + for (int32_t i = 0; i < numOfTags; ++i) { + TAOS_FIELD* p = taosArrayGet(pTagsList, i); + if (p->bytes == 0) { invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg7); return false; } - nLen += pTagsList->p[i].bytes; + + nLen += p->bytes; } // max tag row length must be less than TSDB_MAX_TAGS_LEN @@ -1004,37 +1009,41 @@ static bool validateTagParams(tFieldList* pTagsList, tFieldList* pFieldList, SSq } // field name must be unique - for (int32_t i = 0; i < pTagsList->nField; ++i) { - if (has(pFieldList, 0, pTagsList->p[i].name) == true) { + for (int32_t i = 0; i < numOfTags; ++i) { + TAOS_FIELD* p = taosArrayGet(pTagsList, i); + + if (has(pFieldList, 0, p->name) == true) { invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3); return false; } } /* timestamp in tag is not allowed */ - for (int32_t i = 0; i < pTagsList->nField; ++i) { - if (pTagsList->p[i].type == TSDB_DATA_TYPE_TIMESTAMP) { + for (int32_t i = 0; i < numOfTags; ++i) { + TAOS_FIELD* p = taosArrayGet(pTagsList, i); + + if (p->type == TSDB_DATA_TYPE_TIMESTAMP) { invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg4); return false; } - if (pTagsList->p[i].type < TSDB_DATA_TYPE_BOOL || pTagsList->p[i].type > TSDB_DATA_TYPE_NCHAR) { + if (p->type < TSDB_DATA_TYPE_BOOL || p->type > TSDB_DATA_TYPE_NCHAR) { invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg5); return false; } - if ((pTagsList->p[i].type == TSDB_DATA_TYPE_BINARY && pTagsList->p[i].bytes <= 0) || - (pTagsList->p[i].type == TSDB_DATA_TYPE_NCHAR && pTagsList->p[i].bytes <= 0)) { + if ((p->type == TSDB_DATA_TYPE_BINARY && p->bytes <= 0) || + (p->type == TSDB_DATA_TYPE_NCHAR && p->bytes <= 0)) { invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg7); return false; } - if (validateColumnName(pTagsList->p[i].name) != TSDB_CODE_SUCCESS) { + if (validateColumnName(p->name) != TSDB_CODE_SUCCESS) { invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg6); return false; } - if (has(pTagsList, i + 1, pTagsList->p[i].name) == true) { + if (has(pTagsList, i + 1, p->name) == true) { invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3); return false; } @@ -1181,9 +1190,10 @@ bool validateOneColumn(SSqlCmd* pCmd, TAOS_FIELD* pColField) { } /* is contained in pFieldList or not */ -static bool has(tFieldList* pFieldList, int32_t startIdx, const char* name) { - for (int32_t j = startIdx; j < pFieldList->nField; ++j) { - TAOS_FIELD* field = pFieldList->p + j; +static bool has(SArray* pFieldList, int32_t startIdx, const char* name) { + size_t numOfCols = taosArrayGetSize(pFieldList); + for (int32_t j = startIdx; j < numOfCols; ++j) { + TAOS_FIELD* field = taosArrayGet(pFieldList, j); if (strncasecmp(name, field->name, sizeof(field->name) - 1) == 0) return true; } @@ -2796,7 +2806,7 @@ static bool functionCompatibleCheck(SQueryInfo* pQueryInfo, bool joinQuery) { return true; } -int32_t parseGroupbyClause(SQueryInfo* pQueryInfo, tVariantList* pList, SSqlCmd* pCmd) { +int32_t parseGroupbyClause(SQueryInfo* pQueryInfo, SArray* pList, SSqlCmd* pCmd) { const char* msg1 = "too many columns in group by clause"; const char* msg2 = "invalid column name in group by clause"; const char* msg3 = "columns from one table allowed as group by columns"; @@ -2816,8 +2826,8 @@ int32_t parseGroupbyClause(SQueryInfo* pQueryInfo, tVariantList* pList, SSqlCmd* pQueryInfo->colList = taosArrayInit(4, POINTER_BYTES); } - pQueryInfo->groupbyExpr.numOfGroupCols = pList->nExpr; - if (pList->nExpr > TSDB_MAX_TAGS) { + pQueryInfo->groupbyExpr.numOfGroupCols = taosArrayGetSize(pList); + if (pQueryInfo->groupbyExpr.numOfGroupCols > TSDB_MAX_TAGS) { return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1); } @@ -2830,9 +2840,12 @@ int32_t parseGroupbyClause(SQueryInfo* pQueryInfo, tVariantList* pList, SSqlCmd* SSchema s = tscGetTbnameColumnSchema(); int32_t tableIndex = COLUMN_INDEX_INITIAL_VAL; - - for (int32_t i = 0; i < pList->nExpr; ++i) { - tVariant* pVar = &pList->a[i].pVar; + + size_t num = taosArrayGetSize(pList); + for (int32_t i = 0; i < num; ++i) { + tVariantListItem * pItem = taosArrayGet(pList, i); + tVariant* pVar = &pItem->pVar; + SStrToken token = {pVar->nLen, pVar->nType, pVar->pz}; SColumnIndex index = COLUMN_INDEX_INITIALIZER; @@ -2893,7 +2906,7 @@ int32_t parseGroupbyClause(SQueryInfo* pQueryInfo, tVariantList* pList, SSqlCmd* taosArrayPush(pGroupExpr->columnInfo, &colIndex); pQueryInfo->groupbyExpr.orderType = TSDB_ORDER_ASC; - if (i == 0 && pList->nExpr > 1) { + if (i == 0 && num > 1) { return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg7); } } @@ -4407,8 +4420,8 @@ int32_t tsRewriteFieldNameIfNecessary(SSqlCmd* pCmd, SQueryInfo* pQueryInfo) { } int32_t parseFillClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SQuerySQL* pQuerySQL) { - tVariantList* pFillToken = pQuerySQL->fillType; - tVariantListItem* pItem = &pFillToken->a[0]; + SArray* pFillToken = pQuerySQL->fillType; + tVariantListItem* pItem = taosArrayGet(pFillToken, 0); const int32_t START_INTERPO_COL_IDX = 1; @@ -4448,12 +4461,13 @@ int32_t parseFillClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SQuerySQL* pQuery } else if (strncasecmp(pItem->pVar.pz, "value", 5) == 0 && pItem->pVar.nLen == 5) { pQueryInfo->fillType = TSDB_FILL_SET_VALUE; - if (pFillToken->nExpr == 1) { + size_t num = taosArrayGetSize(pFillToken); + if (num == 1) { return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1); } int32_t startPos = 1; - int32_t numOfFillVal = pFillToken->nExpr - 1; + int32_t numOfFillVal = num - 1; /* for point interpolation query, we do not have the timestamp column */ if (tscIsPointInterpQuery(pQueryInfo)) { @@ -4463,7 +4477,7 @@ int32_t parseFillClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SQuerySQL* pQuery numOfFillVal = (int32_t)size; } } else { - numOfFillVal = (pFillToken->nExpr > (int32_t)size) ? (int32_t)size : pFillToken->nExpr; + numOfFillVal = (num > (int32_t)size) ? (int32_t)size : num; } int32_t j = 1; @@ -4476,14 +4490,15 @@ int32_t parseFillClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SQuerySQL* pQuery continue; } - int32_t ret = tVariantDump(&pFillToken->a[j].pVar, (char*)&pQueryInfo->fillVal[i], pFields->type, true); + tVariant* p = taosArrayGet(pFillToken, j); + int32_t ret = tVariantDump(p, (char*)&pQueryInfo->fillVal[i], pFields->type, true); if (ret != TSDB_CODE_SUCCESS) { return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg); } } - if ((pFillToken->nExpr < size) || ((pFillToken->nExpr - 1 < size) && (tscIsPointInterpQuery(pQueryInfo)))) { - tVariantListItem* lastItem = &pFillToken->a[pFillToken->nExpr - 1]; + if ((num < size) || ((num - 1 < size) && (tscIsPointInterpQuery(pQueryInfo)))) { + tVariantListItem* lastItem = taosArrayGetLast(pFillToken); for (int32_t i = numOfFillVal; i < size; ++i) { TAOS_FIELD* pFields = tscFieldInfoGetField(&pQueryInfo->fieldsInfo, i); @@ -4532,7 +4547,7 @@ int32_t parseOrderbyClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SQuerySQL* pQu return TSDB_CODE_SUCCESS; } - tVariantList* pSortorder = pQuerySql->pSortOrder; + SArray* pSortorder = pQuerySql->pSortOrder; /* * for table query, there is only one or none order option is allowed, which is the @@ -4540,18 +4555,19 @@ int32_t parseOrderbyClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SQuerySQL* pQu * * for super table query, the order option must be less than 3. */ + size_t size = taosArrayGetSize(pSortorder); if (UTIL_TABLE_IS_NORMAL_TABLE(pTableMetaInfo)) { - if (pSortorder->nExpr > 1) { + if (size > 1) { return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg0); } } else { - if (pSortorder->nExpr > 2) { + if (size > 2) { return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3); } } // handle the first part of order by - tVariant* pVar = &pSortorder->a[0].pVar; + tVariant* pVar = taosArrayGet(pSortorder, 0); // e.g., order by 1 asc, return directly with out further check. if (pVar->nType >= TSDB_DATA_TYPE_TINYINT && pVar->nType <= TSDB_DATA_TYPE_BIGINT) { @@ -4594,10 +4610,13 @@ int32_t parseOrderbyClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SQuerySQL* pQu assert(!(orderByTags && orderByTS)); } - if (pSortorder->nExpr == 1) { + size_t s = taosArrayGetSize(pSortorder); + if (s == 1) { if (orderByTags) { pQueryInfo->groupbyExpr.orderIndex = index.columnIndex - tscGetNumOfColumns(pTableMetaInfo->pTableMeta); - pQueryInfo->groupbyExpr.orderType = pQuerySql->pSortOrder->a[0].sortOrder; + + tVariantListItem* p1 = taosArrayGet(pQuerySql->pSortOrder, 0); + pQueryInfo->groupbyExpr.orderType = p1->sortOrder; } else if (isTopBottomQuery(pQueryInfo)) { /* order of top/bottom query in interval is not valid */ SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, 0); @@ -4608,11 +4627,14 @@ int32_t parseOrderbyClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SQuerySQL* pQu return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2); } - pQueryInfo->order.order = pQuerySql->pSortOrder->a[0].sortOrder; + tVariantListItem* p1 = taosArrayGet(pQuerySql->pSortOrder, 0); + pQueryInfo->order.order = p1->sortOrder; pQueryInfo->order.orderColId = pSchema[index.columnIndex].colId; return TSDB_CODE_SUCCESS; } else { - pQueryInfo->order.order = pSortorder->a[0].sortOrder; + tVariantListItem* p1 = taosArrayGet(pQuerySql->pSortOrder, 0); + + pQueryInfo->order.order = p1->sortOrder; pQueryInfo->order.orderColId = PRIMARYKEY_TIMESTAMP_COL_INDEX; // orderby ts query on super table @@ -4622,16 +4644,18 @@ int32_t parseOrderbyClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SQuerySQL* pQu } } - if (pSortorder->nExpr == 2) { + if (s == 2) { + tVariantListItem *pItem = taosArrayGet(pQuerySql->pSortOrder, 0); if (orderByTags) { pQueryInfo->groupbyExpr.orderIndex = index.columnIndex - tscGetNumOfColumns(pTableMetaInfo->pTableMeta); - pQueryInfo->groupbyExpr.orderType = pQuerySql->pSortOrder->a[0].sortOrder; + pQueryInfo->groupbyExpr.orderType = pItem->sortOrder; } else { - pQueryInfo->order.order = pSortorder->a[0].sortOrder; + pQueryInfo->order.order = pItem->sortOrder; pQueryInfo->order.orderColId = PRIMARYKEY_TIMESTAMP_COL_INDEX; } - tVariant* pVar2 = &pSortorder->a[1].pVar; + pItem = taosArrayGet(pQuerySql->pSortOrder, 1); + tVariant* pVar2 = &pItem->pVar; SStrToken cname = {pVar2->nLen, pVar2->nType, pVar2->pz}; if (getColumnIndexByName(pCmd, &cname, pQueryInfo, &index) != TSDB_CODE_SUCCESS) { return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1); @@ -4640,7 +4664,8 @@ int32_t parseOrderbyClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SQuerySQL* pQu if (index.columnIndex != PRIMARYKEY_TIMESTAMP_COL_INDEX) { return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2); } else { - pQueryInfo->order.order = pSortorder->a[1].sortOrder; + tVariantListItem* p1 = taosArrayGet(pSortorder, 1); + pQueryInfo->order.order = p1->sortOrder; pQueryInfo->order.orderColId = PRIMARYKEY_TIMESTAMP_COL_INDEX; } } @@ -4664,12 +4689,14 @@ int32_t parseOrderbyClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SQuerySQL* pQu return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2); } - pQueryInfo->order.order = pQuerySql->pSortOrder->a[0].sortOrder; + tVariantListItem* pItem = taosArrayGet(pQuerySql->pSortOrder, 0); + pQueryInfo->order.order = pItem->sortOrder; pQueryInfo->order.orderColId = pSchema[index.columnIndex].colId; return TSDB_CODE_SUCCESS; } - pQueryInfo->order.order = pQuerySql->pSortOrder->a[0].sortOrder; + tVariantListItem* pItem = taosArrayGet(pQuerySql->pSortOrder, 0); + pQueryInfo->order.order = pItem->sortOrder; } return TSDB_CODE_SUCCESS; @@ -4737,27 +4764,28 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) { } if (pAlterSQL->type == TSDB_ALTER_TABLE_ADD_TAG_COLUMN) { - tFieldList* pFieldList = pAlterSQL->pAddColumns; - if (pFieldList->nField > 1) { + SArray* pFieldList = pAlterSQL->pAddColumns; + if (taosArrayGetSize(pFieldList) > 1) { return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg5); } - if (!validateOneTags(pCmd, &pFieldList->p[0])) { + TAOS_FIELD* p = taosArrayGet(pFieldList, 0); + if (!validateOneTags(pCmd, p)) { return TSDB_CODE_TSC_INVALID_SQL; } - tscFieldInfoAppend(&pQueryInfo->fieldsInfo, &pFieldList->p[0]); + tscFieldInfoAppend(&pQueryInfo->fieldsInfo, p); } else if (pAlterSQL->type == TSDB_ALTER_TABLE_DROP_TAG_COLUMN) { if (tscGetNumOfTags(pTableMeta) == 1) { return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg7); } // numOfTags == 1 - if (pAlterSQL->varList->nExpr > 1) { + if (taosArrayGetSize(pAlterSQL->varList) > 1) { return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg8); } - tVariantListItem* pItem = &pAlterSQL->varList->a[0]; + tVariantListItem* pItem = taosArrayGet(pAlterSQL->varList, 0); if (pItem->pVar.nLen >= TSDB_COL_NAME_LEN) { return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg9); } @@ -4782,13 +4810,13 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) { TAOS_FIELD f = tscCreateField(TSDB_DATA_TYPE_INT, name1, tDataTypeDesc[TSDB_DATA_TYPE_INT].nSize); tscFieldInfoAppend(&pQueryInfo->fieldsInfo, &f); } else if (pAlterSQL->type == TSDB_ALTER_TABLE_CHANGE_TAG_COLUMN) { - tVariantList* pVarList = pAlterSQL->varList; - if (pVarList->nExpr > 2) { + SArray* pVarList = pAlterSQL->varList; + if (taosArrayGetSize(pVarList) > 2) { return TSDB_CODE_TSC_INVALID_SQL; } - tVariantListItem* pSrcItem = &pAlterSQL->varList->a[0]; - tVariantListItem* pDstItem = &pAlterSQL->varList->a[1]; + tVariantListItem* pSrcItem = taosArrayGet(pAlterSQL->varList, 0); + tVariantListItem* pDstItem = taosArrayGet(pAlterSQL->varList, 1); if (pSrcItem->pVar.nLen >= TSDB_COL_NAME_LEN || pDstItem->pVar.nLen >= TSDB_COL_NAME_LEN) { return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg9); @@ -4811,13 +4839,17 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) { return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg19); } + tVariantListItem* pItem = taosArrayGet(pVarList, 0); + char name[TSDB_COL_NAME_LEN] = {0}; - strncpy(name, pVarList->a[0].pVar.pz, pVarList->a[0].pVar.nLen); + strncpy(name, pItem->pVar.pz, pItem->pVar.nLen); TAOS_FIELD f = tscCreateField(TSDB_DATA_TYPE_INT, name, tDataTypeDesc[TSDB_DATA_TYPE_INT].nSize); tscFieldInfoAppend(&pQueryInfo->fieldsInfo, &f); + pItem = taosArrayGet(pVarList, 1); memset(name, 0, tListLen(name)); - strncpy(name, pVarList->a[1].pVar.pz, pVarList->a[1].pVar.nLen); + + strncpy(name, pItem->pVar.pz, pItem->pVar.nLen); f = tscCreateField(TSDB_DATA_TYPE_INT, name, tDataTypeDesc[TSDB_DATA_TYPE_INT].nSize); tscFieldInfoAppend(&pQueryInfo->fieldsInfo, &f); } else if (pAlterSQL->type == TSDB_ALTER_TABLE_UPDATE_TAG_VAL) { @@ -4825,12 +4857,12 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) { // the following is used to handle tags value for table created according to super table pCmd->command = TSDB_SQL_UPDATE_TAGS_VAL; - tVariantList* pVarList = pAlterSQL->varList; - tVariant* pTagName = &pVarList->a[0].pVar; + SArray* pVarList = pAlterSQL->varList; + tVariantListItem* item = taosArrayGet(pVarList, 0); int16_t numOfTags = tscGetNumOfTags(pTableMeta); SColumnIndex columnIndex = COLUMN_INDEX_INITIALIZER; - SStrToken name = {.type = TK_STRING, .z = pTagName->pz, .n = pTagName->nLen}; + SStrToken name = {.type = TK_STRING, .z = item->pVar.pz, .n = item->pVar.nLen}; if (getColumnIndexByName(pCmd, &name, pQueryInfo, &columnIndex) != TSDB_CODE_SUCCESS) { return TSDB_CODE_TSC_INVALID_SQL; } @@ -4839,8 +4871,9 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) { return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg12); } + tVariantListItem* pItem = taosArrayGet(pVarList, 1); SSchema* pTagsSchema = tscGetTableColumnSchema(pTableMetaInfo->pTableMeta, columnIndex.columnIndex); - if (tVariantDump(&pVarList->a[1].pVar, pAlterSQL->tagData.data, pTagsSchema->type, true) != TSDB_CODE_SUCCESS) { + if (tVariantDump(&pItem->pVar, pAlterSQL->tagData.data, pTagsSchema->type, true) != TSDB_CODE_SUCCESS) { return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg13); } @@ -4885,7 +4918,8 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) { } // copy the tag value to msg body - tVariantDump(&pVarList->a[1].pVar, pUpdateMsg->data + schemaLen, pTagsSchema->type, true); + pItem = taosArrayGet(pVarList, 1); + tVariantDump(&pItem->pVar, pUpdateMsg->data + schemaLen, pTagsSchema->type, true); int32_t len = 0; if (pTagsSchema->type != TSDB_DATA_TYPE_BINARY && pTagsSchema->type != TSDB_DATA_TYPE_NCHAR) { @@ -4900,27 +4934,29 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) { pUpdateMsg->head.contLen = htonl(total); } else if (pAlterSQL->type == TSDB_ALTER_TABLE_ADD_COLUMN) { - tFieldList* pFieldList = pAlterSQL->pAddColumns; - if (pFieldList->nField > 1) { + SArray* pFieldList = pAlterSQL->pAddColumns; + if (taosArrayGetSize(pFieldList) > 1) { const char* msg = "only support add one column"; return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg); } - if (!validateOneColumn(pCmd, &pFieldList->p[0])) { + TAOS_FIELD* p = taosArrayGet(pFieldList, 0); + if (!validateOneColumn(pCmd, p)) { return TSDB_CODE_TSC_INVALID_SQL; } - tscFieldInfoAppend(&pQueryInfo->fieldsInfo, &pFieldList->p[0]); + tscFieldInfoAppend(&pQueryInfo->fieldsInfo, p); } else if (pAlterSQL->type == TSDB_ALTER_TABLE_DROP_COLUMN) { if (tscGetNumOfColumns(pTableMeta) == TSDB_MIN_COLUMNS) { // return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg15); } - if (pAlterSQL->varList->nExpr > 1) { + size_t size = taosArrayGetSize(pAlterSQL->varList); + if (size > 1) { return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg16); } - tVariantListItem* pItem = &pAlterSQL->varList->a[0]; + tVariantListItem* pItem = taosArrayGet(pAlterSQL->varList, 0); SColumnIndex columnIndex = COLUMN_INDEX_INITIALIZER; SStrToken name = {.type = TK_STRING, .z = pItem->pVar.pz, .n = pItem->pVar.nLen}; @@ -5291,21 +5327,28 @@ static int32_t setKeepOption(SSqlCmd* pCmd, SCreateDbMsg* pMsg, SCreateDBInfo* p pMsg->daysToKeep1 = htonl(-1); pMsg->daysToKeep2 = htonl(-1); - tVariantList* pKeep = pCreateDb->keep; + SArray* pKeep = pCreateDb->keep; if (pKeep != NULL) { - switch (pKeep->nExpr) { - case 1: - pMsg->daysToKeep = htonl((int32_t)pKeep->a[0].pVar.i64Key); + size_t s = taosArrayGetSize(pKeep); + tVariantListItem* p0 = taosArrayGet(pKeep, 0); + switch (s) { + case 1: { + pMsg->daysToKeep = htonl((int32_t)p0->pVar.i64Key); + } break; case 2: { - pMsg->daysToKeep = htonl((int32_t)pKeep->a[0].pVar.i64Key); - pMsg->daysToKeep1 = htonl((int32_t)pKeep->a[1].pVar.i64Key); + tVariantListItem* p1 = taosArrayGet(pKeep, 1); + pMsg->daysToKeep = htonl((int32_t)p0->pVar.i64Key); + pMsg->daysToKeep1 = htonl((int32_t)p1->pVar.i64Key); break; } case 3: { - pMsg->daysToKeep = htonl((int32_t)pKeep->a[0].pVar.i64Key); - pMsg->daysToKeep1 = htonl((int32_t)pKeep->a[1].pVar.i64Key); - pMsg->daysToKeep2 = htonl((int32_t)pKeep->a[2].pVar.i64Key); + tVariantListItem* p1 = taosArrayGet(pKeep, 1); + tVariantListItem* p2 = taosArrayGet(pKeep, 2); + + pMsg->daysToKeep = htonl((int32_t)p0->pVar.i64Key); + pMsg->daysToKeep1 = htonl((int32_t)p1->pVar.i64Key); + pMsg->daysToKeep2 = htonl((int32_t)p2->pVar.i64Key); break; } default: { return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg); } @@ -5991,8 +6034,8 @@ int32_t doCheckForCreateTable(SSqlObj* pSql, int32_t subClauseIndex, SSqlInfo* p SCreateTableSQL* pCreateTable = pInfo->pCreateTableInfo; - tFieldList* pFieldList = pCreateTable->colInfo.pColumns; - tFieldList* pTagList = pCreateTable->colInfo.pTagColumns; + SArray* pFieldList = pCreateTable->colInfo.pColumns; + SArray* pTagList = pCreateTable->colInfo.pTagColumns; assert(pFieldList != NULL); @@ -6014,18 +6057,22 @@ int32_t doCheckForCreateTable(SSqlObj* pSql, int32_t subClauseIndex, SSqlInfo* p } int32_t col = 0; - for (; col < pFieldList->nField; ++col) { - tscFieldInfoAppend(&pQueryInfo->fieldsInfo, &pFieldList->p[col]); + size_t numOfFields = taosArrayGetSize(pFieldList); + + for (; col < numOfFields; ++col) { + TAOS_FIELD* p = taosArrayGet(pTagList, col); + tscFieldInfoAppend(&pQueryInfo->fieldsInfo, p); } - pCmd->numOfCols = (int16_t)pFieldList->nField; + pCmd->numOfCols = (int16_t)numOfFields; if (pTagList != NULL) { // create super table[optional] - for (int32_t i = 0; i < pTagList->nField; ++i) { - tscFieldInfoAppend(&pQueryInfo->fieldsInfo, &pTagList->p[i]); + for (int32_t i = 0; i < numOfFields; ++i) { + TAOS_FIELD* p = taosArrayGet(pTagList, i); + tscFieldInfoAppend(&pQueryInfo->fieldsInfo, p); } - pCmd->count = pTagList->nField; + pCmd->count = numOfFields; } return TSDB_CODE_SUCCESS; @@ -6066,14 +6113,15 @@ int32_t doCheckForCreateFromStable(SSqlObj* pSql, SSqlInfo* pInfo) { // get meter meta from mnode tstrncpy(pCreateTable->usingInfo.tagdata.name, pStableMeterMetaInfo->name, sizeof(pCreateTable->usingInfo.tagdata.name)); - tVariantList* pList = pInfo->pCreateTableInfo->usingInfo.pTagVals; + SArray* pList = pInfo->pCreateTableInfo->usingInfo.pTagVals; code = tscGetTableMeta(pSql, pStableMeterMetaInfo); if (code != TSDB_CODE_SUCCESS) { return code; } - if (tscGetNumOfTags(pStableMeterMetaInfo->pTableMeta) != pList->nExpr) { + size_t size = taosArrayGetSize(pList); + if (tscGetNumOfTags(pStableMeterMetaInfo->pTableMeta) != size) { return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg5); } @@ -6087,18 +6135,19 @@ int32_t doCheckForCreateFromStable(SSqlObj* pSql, SSqlInfo* pInfo) { } int32_t ret = TSDB_CODE_SUCCESS; - for (int32_t i = 0; i < pList->nExpr; ++i) { + for (int32_t i = 0; i < size; ++i) { SSchema* pSchema = &pTagSchema[i]; - + tVariantListItem* pItem = taosArrayGet(pList, i); + char tagVal[TSDB_MAX_TAGS_LEN]; if (pSchema->type == TSDB_DATA_TYPE_BINARY || pSchema->type == TSDB_DATA_TYPE_NCHAR) { - if (pList->a[i].pVar.nLen > pSchema->bytes) { + if (pItem->pVar.nLen > pSchema->bytes) { tdDestroyKVRowBuilder(&kvRowBuilder); return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3); } } - ret = tVariantDump(&(pList->a[i].pVar), tagVal, pSchema->type, true); + ret = tVariantDump(&(pItem->pVar), tagVal, pSchema->type, true); // check again after the convert since it may be converted from binary to nchar. if (pSchema->type == TSDB_DATA_TYPE_BINARY || pSchema->type == TSDB_DATA_TYPE_NCHAR) { @@ -6165,13 +6214,13 @@ int32_t doCheckForStream(SSqlObj* pSql, SSqlInfo* pInfo) { return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1); } - tVariantList* pSrcMeterName = pInfo->pCreateTableInfo->pSelect->from; - if (pSrcMeterName == NULL || pSrcMeterName->nExpr == 0) { + SArray* pSrcMeterName = pInfo->pCreateTableInfo->pSelect->from; + if (pSrcMeterName == NULL || taosArrayGetSize(pSrcMeterName) == 0) { return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg6); } - tVariant* pVar = &pSrcMeterName->a[0].pVar; - SStrToken srcToken = {.z = pVar->pz, .n = pVar->nLen, .type = TK_STRING}; + tVariantListItem* p1 = taosArrayGet(pSrcMeterName, 0); + SStrToken srcToken = {.z = p1->pVar.pz, .n = p1->pVar.nLen, .type = TK_STRING}; if (tscValidateName(&srcToken) != TSDB_CODE_SUCCESS) { return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1); } @@ -6236,7 +6285,7 @@ int32_t doCheckForStream(SSqlObj* pSql, SSqlInfo* pInfo) { return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3); } - tVariantListItem* pItem = &pQuerySql->fillType->a[0]; + tVariantListItem* pItem = taosArrayGet(pQuerySql->fillType, 0); if (pItem->pVar.nType == TSDB_DATA_TYPE_BINARY) { if (!((strncmp(pItem->pVar.pz, "none", 4) == 0 && pItem->pVar.nLen == 4) || (strncmp(pItem->pVar.pz, "null", 4) == 0 && pItem->pVar.nLen == 4))) { @@ -6251,7 +6300,7 @@ int32_t doCheckForStream(SSqlObj* pSql, SSqlInfo* pInfo) { } int32_t doCheckForQuery(SSqlObj* pSql, SQuerySQL* pQuerySql, int32_t index) { - assert(pQuerySql != NULL && (pQuerySql->from == NULL || pQuerySql->from->nExpr > 0)); + assert(pQuerySql != NULL && (pQuerySql->from == NULL || taosArrayGetSize(pQuerySql->from) > 0)); const char* msg0 = "invalid table name"; const char* msg2 = "point interpolation query needs timestamp"; @@ -6293,19 +6342,21 @@ int32_t doCheckForQuery(SSqlObj* pSql, SQuerySQL* pQuerySql, int32_t index) { return doLocalQueryProcess(pCmd, pQueryInfo, pQuerySql); } - if (pQuerySql->from->nExpr > TSDB_MAX_JOIN_TABLE_NUM * 2) { + size_t fromSize = taosArrayGetSize(pQuerySql->from); + if (fromSize > TSDB_MAX_JOIN_TABLE_NUM * 2) { return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg7); } pQueryInfo->command = TSDB_SQL_SELECT; - if (pQuerySql->from->nExpr > 4) { + if (fromSize > 4) { return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg10); } // set all query tables, which are maybe more than one. - for (int32_t i = 0; i < pQuerySql->from->nExpr; ) { - tVariant* pTableItem = &pQuerySql->from->a[i].pVar; + for (int32_t i = 0; i < fromSize; ) { + tVariantListItem* item = taosArrayGet(pQuerySql->from, i); + tVariant* pTableItem = &item->pVar; if (pTableItem->nType != TSDB_DATA_TYPE_BINARY) { return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg0); @@ -6330,21 +6381,21 @@ int32_t doCheckForQuery(SSqlObj* pSql, SQuerySQL* pQuerySql, int32_t index) { return code; } - tVariant* pTableItem1 = &pQuerySql->from->a[i + 1].pVar; - if (pTableItem1->nType != TSDB_DATA_TYPE_BINARY) { + tVariantListItem* p1 = taosArrayGet(pQuerySql->from, i); + if (p1->pVar.nType != TSDB_DATA_TYPE_BINARY) { return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg11); } - SStrToken aliasName = {.z = pTableItem1->pz, .n = pTableItem1->nLen, .type = TK_STRING}; + SStrToken aliasName = {.z = p1->pVar.pz, .n = p1->pVar.nLen, .type = TK_STRING}; if (tscValidateName(&aliasName) != TSDB_CODE_SUCCESS) { return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg11); } // has no table alias name - if (memcmp(pTableItem->pz, pTableItem1->pz, pTableItem1->nLen) == 0) { + if (memcmp(pTableItem->pz, p1->pVar.pz, p1->pVar.nLen) == 0) { extractTableName(pTableMetaInfo1->name, pTableMetaInfo1->aliasName); } else { - tstrncpy(pTableMetaInfo1->aliasName, pTableItem1->pz, sizeof(pTableMetaInfo1->aliasName)); + tstrncpy(pTableMetaInfo1->aliasName, p1->pVar.pz, sizeof(pTableMetaInfo1->aliasName)); } code = tscGetTableMeta(pSql, pTableMetaInfo1); @@ -6355,7 +6406,7 @@ int32_t doCheckForQuery(SSqlObj* pSql, SQuerySQL* pQuerySql, int32_t index) { i += 2; } - assert(pQueryInfo->numOfTables == pQuerySql->from->nExpr / 2); + assert(pQueryInfo->numOfTables == taosArrayGetSize(pQuerySql->from) / 2); bool isSTable = false; if (UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) { @@ -6389,12 +6440,12 @@ int32_t doCheckForQuery(SSqlObj* pSql, SQuerySQL* pQuerySql, int32_t index) { pQueryInfo->window.ekey = pQueryInfo->window.ekey / 1000; } } else { // set the time rang - if (pQuerySql->from->nExpr > 2) { // it is a join query, no wher clause is not allowed. + if (taosArrayGetSize(pQuerySql->from) > 2) { // it is a join query, no wher clause is not allowed. return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), "condition missing for join query "); } } - int32_t joinQuery = (pQuerySql->from != NULL && pQuerySql->from->nExpr > 2); + int32_t joinQuery = (pQuerySql->from != NULL && taosArrayGetSize(pQuerySql->from) > 2); if (parseSelectClause(pCmd, index, pQuerySql->pSelection, isSTable, joinQuery) != TSDB_CODE_SUCCESS) { return TSDB_CODE_TSC_INVALID_SQL; diff --git a/src/common/inc/tname.h b/src/common/inc/tname.h index 5563e590e7..6c48ca72f3 100644 --- a/src/common/inc/tname.h +++ b/src/common/inc/tname.h @@ -35,10 +35,6 @@ bool tscValidateTableNameLength(size_t len); SColumnFilterInfo* tscFilterInfoClone(const SColumnFilterInfo* src, int32_t numOfFilters); -/** - * get the schema for the "tbname" column. it is a built column - * @return - */ SSchema tscGetTbnameColumnSchema(); #endif // TDENGINE_NAME_H diff --git a/src/query/inc/qAst.h b/src/query/inc/qAst.h index b05d08b567..28c1c7b838 100644 --- a/src/query/inc/qAst.h +++ b/src/query/inc/qAst.h @@ -74,8 +74,6 @@ typedef struct tExprNode { }; } tExprNode; -void tExprTreeDestroy(tExprNode **pExprs, void (*fp)(void*)); - void tExprTreeTraverse(tExprNode *pExpr, SSkipList *pSkipList, SArray *result, SExprTraverseSupp *param); void tExprTreeCalcTraverse(tExprNode *pExprs, int32_t numOfRows, char *pOutput, void *param, int32_t order, @@ -87,6 +85,7 @@ tExprNode* exprTreeFromTableName(const char* tbnameCond); void exprTreeToBinary(SBufferWriter* bw, tExprNode* pExprTree); void tExprNodeDestroy(tExprNode *pNode, void (*fp)(void *)); +void tExprTreeDestroy(tExprNode **pExprs, void (*fp)(void*)); #ifdef __cplusplus } diff --git a/src/query/inc/qSqlparser.h b/src/query/inc/qSqlparser.h index aaa1bb66b0..c665a122dd 100644 --- a/src/query/inc/qSqlparser.h +++ b/src/query/inc/qSqlparser.h @@ -20,10 +20,10 @@ extern "C" { #endif -#include #include "taos.h" #include "taosmsg.h" #include "tstoken.h" +#include "tstrbuild.h" #include "tvariant.h" #define ParseTOKENTYPE SStrToken @@ -37,12 +37,6 @@ extern char tTokenTypeSwitcher[13]; (x) = tTokenTypeSwitcher[(x)]; \ } \ } while (0) - -typedef struct tFieldList { - int32_t nField; - int32_t nAlloc; - TAOS_FIELD *p; -} tFieldList; typedef struct SLimitVal { int64_t limit; @@ -59,12 +53,6 @@ typedef struct tVariantListItem { uint8_t sortOrder; } tVariantListItem; -typedef struct tVariantList { - int32_t nExpr; /* Number of expressions on the list */ - int32_t nAlloc; /* Number of entries allocated below */ - tVariantListItem *a; /* One entry for each expression */ -} tVariantList; - typedef struct SIntervalVal { SStrToken interval; SStrToken offset; @@ -72,16 +60,16 @@ typedef struct SIntervalVal { typedef struct SQuerySQL { struct tSQLExprList *pSelection; // select clause - tVariantList * from; // from clause + SArray * from; // from clause SArray struct tSQLExpr * pWhere; // where clause [optional] - tVariantList * pGroupby; // groupby clause, only for tags[optional] - tVariantList * pSortOrder; // orderby [optional] + SArray * pGroupby; // groupby clause, only for tags[optional], SArray + SArray * pSortOrder; // orderby [optional], SArray SStrToken interval; // interval [optional] SStrToken offset; // offset window [optional] SStrToken sliding; // sliding window [optional] SLimitVal limit; // limit offset [optional] SLimitVal slimit; // group limit offset [optional] - tVariantList * fillType; // fill type[optional] + SArray * fillType; // fill type[optional], SArray SStrToken selectToken; // sql string } SQuerySQL; @@ -91,26 +79,25 @@ typedef struct SCreateTableSQL { int8_t type; // create normal table/from super table/ stream struct { - tFieldList *pTagColumns; // for normal table, pTagColumns = NULL; - tFieldList *pColumns; + SArray *pTagColumns; // SArray + SArray *pColumns; // SArray } colInfo; struct { - SStrToken stableName; // super table name, for using clause - tVariantList *pTagVals; // create by using metric, tag value - STagData tagdata; + SStrToken stableName; // super table name, for using clause + SArray *pTagVals; // create by using metric, tag value + STagData tagdata; } usingInfo; - SQuerySQL *pSelect; + SQuerySQL *pSelect; } SCreateTableSQL; typedef struct SAlterTableSQL { SStrToken name; int16_t type; STagData tagData; - - tFieldList * pAddColumns; - tVariantList *varList; // set t=val or: change src dst + SArray *pAddColumns; // SArray + SArray *varList; // set t=val or: change src dst, SArray } SAlterTableSQL; typedef struct SCreateDBInfo { @@ -130,7 +117,7 @@ typedef struct SCreateDBInfo { SStrToken precision; bool ignoreExists; - tVariantList *keep; + SArray *keep; } SCreateDBInfo; typedef struct SCreateAcctSQL { @@ -168,7 +155,7 @@ typedef struct tDCLSQL { SCreateDBInfo dbOpt; SCreateAcctSQL acctOpt; SShowInfo showOpt; - SStrToken ip; + SStrToken ip; }; SUserInfo user; @@ -181,33 +168,32 @@ typedef struct SSubclauseInfo { // "UNION" multiple select sub-clause } SSubclauseInfo; typedef struct SSqlInfo { - int32_t type; - bool valid; + int32_t type; + bool valid; union { SCreateTableSQL *pCreateTableInfo; - SAlterTableSQL * pAlterInfo; - tDCLSQL * pDCLInfo; + SAlterTableSQL *pAlterInfo; + tDCLSQL *pDCLInfo; }; - SSubclauseInfo subclauseInfo; - char pzErrMsg[256]; + SSubclauseInfo subclauseInfo; + char pzErrMsg[256]; } SSqlInfo; typedef struct tSQLExpr { - // TK_FUNCTION: sql function, TK_LE: less than(binary expr) - uint32_t nSQLOptr; + uint32_t nSQLOptr; // TK_FUNCTION: sql function, TK_LE: less than(binary expr) // the full sql string of function(col, param), which is actually the raw // field name, since the function name is kept in nSQLOptr already - SStrToken operand; - SStrToken colInfo; // field id - tVariant val; // value only for string, float, int - + SStrToken operand; + SStrToken colInfo; // field id + tVariant val; // value only for string, float, int + SStrToken token; // original sql expr string + struct tSQLExpr *pLeft; // left child struct tSQLExpr *pRight; // right child struct tSQLExprList *pParam; // function parameters - SStrToken token; // original sql expr string } tSQLExpr; // used in select clause. select from xxx @@ -238,16 +224,9 @@ void Parse(void *yyp, int yymajor, ParseTOKENTYPE yyminor, SSqlInfo *); */ void ParseFree(void *p, void (*freeProc)(void *)); -tVariantList *tVariantListAppend(tVariantList *pList, tVariant *pVar, uint8_t sortOrder); - -tVariantList *tVariantListInsert(tVariantList *pList, tVariant *pVar, uint8_t sortOrder, int32_t index); - -tVariantList *tVariantListAppendToken(tVariantList *pList, SStrToken *pAliasToken, uint8_t sortOrder); -void tVariantListDestroy(tVariantList *pList); - -tFieldList *tFieldListAppend(tFieldList *pList, TAOS_FIELD *pField); - -void tFieldListDestroy(tFieldList *pList); +SArray *tVariantListAppend(SArray *pList, tVariant *pVar, uint8_t sortOrder); +SArray *tVariantListInsert(SArray *pList, tVariant *pVar, uint8_t sortOrder, int32_t index); +SArray *tVariantListAppendToken(SArray *pList, SStrToken *pAliasToken, uint8_t sortOrder); tSQLExpr *tSQLExprCreate(tSQLExpr *pLeft, tSQLExpr *pRight, int32_t optType); @@ -257,17 +236,16 @@ tSQLExprList *tSQLExprListAppend(tSQLExprList *pList, tSQLExpr *pNode, SStrToken void tSQLExprListDestroy(tSQLExprList *pList); -SQuerySQL *tSetQuerySQLElems(SStrToken *pSelectToken, tSQLExprList *pSelection, tVariantList *pFrom, tSQLExpr *pWhere, - tVariantList *pGroupby, tVariantList *pSortOrder, SIntervalVal *pInterval, - SStrToken *pSliding, tVariantList *pFill, SLimitVal *pLimit, SLimitVal *pGLimit); +SQuerySQL *tSetQuerySQLElems(SStrToken *pSelectToken, tSQLExprList *pSelection, SArray *pFrom, tSQLExpr *pWhere, + SArray *pGroupby, SArray *pSortOrder, SIntervalVal *pInterval, + SStrToken *pSliding, SArray *pFill, SLimitVal *pLimit, SLimitVal *pGLimit); -SCreateTableSQL *tSetCreateSQLElems(tFieldList *pCols, tFieldList *pTags, SStrToken *pMetricName, - tVariantList *pTagVals, SQuerySQL *pSelect, int32_t type); +SCreateTableSQL *tSetCreateSQLElems(SArray *pCols, SArray *pTags, SStrToken *pMetricName, + SArray *pTagVals, SQuerySQL *pSelect, int32_t type); -void tSQLExprNodeDestroy(tSQLExpr *pExpr); -tSQLExpr *tSQLExprNodeClone(tSQLExpr *pExpr); +void tSQLExprNodeDestroy(tSQLExpr *pExpr); -SAlterTableSQL *tAlterTableSQLElems(SStrToken *pMeterName, tFieldList *pCols, tVariantList *pVals, int32_t type); +SAlterTableSQL *tAlterTableSQLElems(SStrToken *pMeterName, SArray *pCols, SArray *pVals, int32_t type); void destroyAllSelectClause(SSubclauseInfo *pSql); void doDestroyQuerySql(SQuerySQL *pSql); diff --git a/src/query/inc/sql.y b/src/query/inc/sql.y index e5d1185330..e4021de22e 100644 --- a/src/query/inc/sql.y +++ b/src/query/inc/sql.y @@ -223,8 +223,8 @@ acct_optr(Y) ::= pps(C) tseries(D) storage(P) streams(F) qtime(Q) dbs(E) users(K Y.stat = M; } -%type keep {tVariantList*} -%destructor keep {tVariantListDestroy($$);} +%type keep {SArray*} +%destructor keep {taosArrayDestroy($$);} keep(Y) ::= KEEP tagitemlist(X). { Y = X; } cache(Y) ::= CACHE INTEGER(X). { Y = X; } @@ -324,10 +324,10 @@ create_table_args(A) ::= AS select(S). { } %type column{TAOS_FIELD} -%type columnlist{tFieldList*} -%destructor columnlist {tFieldListDestroy($$);} -columnlist(A) ::= columnlist(X) COMMA column(Y). {A = tFieldListAppend(X, &Y); } -columnlist(A) ::= column(X). {A = tFieldListAppend(NULL, &X);} +%type columnlist{SArray*} +%destructor columnlist {taosArrayDestroy($$);} +columnlist(A) ::= columnlist(X) COMMA column(Y). {A = taosArrayPush(X, &Y); } +columnlist(A) ::= column(X). {A = taosArrayInit(4, sizeof(TAOS_FIELD)); taosArrayPush(A, &X);} // The information used for a column is the name and type of column: // tinyint smallint int bigint float double bool timestamp binary(x) nchar(x) @@ -335,8 +335,8 @@ column(A) ::= ids(X) typename(Y). { tSQLSetColumnInfo(&A, &X, &Y); } -%type tagitemlist {tVariantList*} -%destructor tagitemlist {tVariantListDestroy($$);} +%type tagitemlist {SArray*} +%destructor tagitemlist {taosArrayDestroy($$);} %type tagitem {tVariant} tagitemlist(A) ::= tagitemlist(X) COMMA tagitem(Y). { A = tVariantListAppend(X, &Y, -1); } @@ -429,11 +429,11 @@ as(X) ::= ids(Y). { X = Y; } as(X) ::= . { X.n = 0; } // A complete FROM clause. -%type from {tVariantList*} +%type from {SArray*} // current not support query from no-table from(A) ::= FROM tablelist(X). {A = X;} -%type tablelist {tVariantList*} +%type tablelist {SArray*} tablelist(A) ::= ids(X) cpxName(Y). { toTSDBType(X.type); X.n += Y.n; @@ -473,15 +473,15 @@ interval_opt(N) ::= INTERVAL LP tmvar(E) RP. {N.interval = E; N.offset.n = 0; interval_opt(N) ::= INTERVAL LP tmvar(E) COMMA tmvar(O) RP. {N.interval = E; N.offset = O;} interval_opt(N) ::= . {memset(&N, 0, sizeof(N));} -%type fill_opt {tVariantList*} -%destructor fill_opt {tVariantListDestroy($$);} +%type fill_opt {SArray*} +%destructor fill_opt {taosArrayDestroy($$);} fill_opt(N) ::= . {N = 0; } fill_opt(N) ::= FILL LP ID(Y) COMMA tagitemlist(X) RP. { tVariant A = {0}; toTSDBType(Y.type); tVariantCreate(&A, &Y); - tVariantListInsert(X, &A, -1, 0); + taosArrayPush(X, &A); N = X; } @@ -494,11 +494,11 @@ fill_opt(N) ::= FILL LP ID(Y) RP. { sliding_opt(K) ::= SLIDING LP tmvar(E) RP. {K = E; } sliding_opt(K) ::= . {K.n = 0; K.z = NULL; K.type = 0; } -%type orderby_opt {tVariantList*} -%destructor orderby_opt {tVariantListDestroy($$);} +%type orderby_opt {SArray*} +%destructor orderby_opt {taosArrayDestroy($$);} -%type sortlist {tVariantList*} -%destructor sortlist {tVariantListDestroy($$);} +%type sortlist {SArray*} +%destructor sortlist {taosArrayDestroy($$);} %type sortitem {tVariant} %destructor sortitem {tVariantDestroy(&$$);} @@ -528,10 +528,10 @@ sortorder(A) ::= DESC. {A = TSDB_ORDER_DESC;} sortorder(A) ::= . {A = TSDB_ORDER_ASC;} //default is descend order //group by clause -%type groupby_opt {tVariantList*} -%destructor groupby_opt {tVariantListDestroy($$);} -%type grouplist {tVariantList*} -%destructor grouplist {tVariantListDestroy($$);} +%type groupby_opt {SArray*} +%destructor groupby_opt {taosArrayDestroy($$);} +%type grouplist {SArray*} +%destructor grouplist {taosArrayDestroy($$);} groupby_opt(A) ::= . {A = 0;} groupby_opt(A) ::= GROUP BY grouplist(X). {A = X;} @@ -654,7 +654,7 @@ cmd ::= ALTER TABLE ids(X) cpxName(F) DROP COLUMN ids(A). { X.n += F.n; toTSDBType(A.type); - tVariantList* K = tVariantListAppendToken(NULL, &A, -1); + SArray* K = tVariantListAppendToken(NULL, &A, -1); SAlterTableSQL* pAlterTable = tAlterTableSQLElems(&X, NULL, K, TSDB_ALTER_TABLE_DROP_COLUMN); setSQLInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); @@ -670,7 +670,7 @@ cmd ::= ALTER TABLE ids(X) cpxName(Z) DROP TAG ids(Y). { X.n += Z.n; toTSDBType(Y.type); - tVariantList* A = tVariantListAppendToken(NULL, &Y, -1); + SArray* A = tVariantListAppendToken(NULL, &Y, -1); SAlterTableSQL* pAlterTable = tAlterTableSQLElems(&X, NULL, A, TSDB_ALTER_TABLE_DROP_TAG_COLUMN); setSQLInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); @@ -680,7 +680,7 @@ cmd ::= ALTER TABLE ids(X) cpxName(F) CHANGE TAG ids(Y) ids(Z). { X.n += F.n; toTSDBType(Y.type); - tVariantList* A = tVariantListAppendToken(NULL, &Y, -1); + SArray* A = tVariantListAppendToken(NULL, &Y, -1); toTSDBType(Z.type); A = tVariantListAppendToken(A, &Z, -1); @@ -693,7 +693,7 @@ cmd ::= ALTER TABLE ids(X) cpxName(F) SET TAG ids(Y) EQ tagitem(Z). { X.n += F.n; toTSDBType(Y.type); - tVariantList* A = tVariantListAppendToken(NULL, &Y, -1); + SArray* A = tVariantListAppendToken(NULL, &Y, -1); A = tVariantListAppend(A, &Z, -1); SAlterTableSQL* pAlterTable = tAlterTableSQLElems(&X, NULL, A, TSDB_ALTER_TABLE_UPDATE_TAG_VAL); diff --git a/src/query/src/qParserImpl.c b/src/query/src/qParserImpl.c index 7e8128f200..f457a201d4 100644 --- a/src/query/src/qParserImpl.c +++ b/src/query/src/qParserImpl.c @@ -15,11 +15,9 @@ #include "os.h" #include "qSqlparser.h" -#include "queryLog.h" #include "taosdef.h" #include "taosmsg.h" #include "tcmdtype.h" -#include "tglobal.h" #include "tstoken.h" #include "tstrbuild.h" #include "ttokendef.h" @@ -314,130 +312,56 @@ void tSQLExprDestroy(tSQLExpr *pExpr) { tSQLExprNodeDestroy(pExpr); } -static void *tVariantListExpand(tVariantList *pList) { - if (pList->nAlloc <= pList->nExpr) { // - int32_t newSize = (pList->nAlloc << 1) + 4; - - void *ptr = realloc(pList->a, newSize * sizeof(pList->a[0])); - if (ptr == 0) { - return NULL; - } - - pList->nAlloc = newSize; - pList->a = ptr; - } - - assert(pList->a != 0); - return pList; -} - -tVariantList *tVariantListAppend(tVariantList *pList, tVariant *pVar, uint8_t sortOrder) { +SArray *tVariantListAppendToken(SArray *pList, SStrToken *pToken, uint8_t order) { if (pList == NULL) { - pList = calloc(1, sizeof(tVariantList)); - } - - if (tVariantListExpand(pList) == NULL) { - return pList; - } - - if (pVar) { - tVariantListItem *pItem = &pList->a[pList->nExpr++]; - /* - * Here we do not employ the assign function, since we need the pz attribute of structure - * , which is the point to char string, to free it! - * - * Otherwise, the original pointer may be lost, which causes memory leak. - */ - memcpy(pItem, pVar, sizeof(tVariant)); - pItem->sortOrder = sortOrder; - } - return pList; -} - -tVariantList *tVariantListInsert(tVariantList *pList, tVariant *pVar, uint8_t sortOrder, int32_t index) { - if (pList == NULL || index >= pList->nExpr) { - return tVariantListAppend(NULL, pVar, sortOrder); - } - - if (tVariantListExpand(pList) == NULL) { - return pList; - } - - if (pVar) { - memmove(&pList->a[index + 1], &pList->a[index], sizeof(tVariantListItem) * (pList->nExpr - index)); - - tVariantListItem *pItem = &pList->a[index]; - /* - * Here we do not employ the assign function, since we need the pz attribute of structure - * , which is the point to char string, to free it! - * - * Otherwise, the original pointer may be lost, which causes memory leak. - */ - memcpy(pItem, pVar, sizeof(tVariant)); - pItem->sortOrder = sortOrder; - - pList->nExpr++; - } - - return pList; -} - -void tVariantListDestroy(tVariantList *pList) { - if (pList == NULL) return; - - for (int32_t i = 0; i < pList->nExpr; ++i) { - tVariantDestroy(&pList->a[i].pVar); - } - - free(pList->a); - free(pList); -} - -tVariantList *tVariantListAppendToken(tVariantList *pList, SStrToken *pToken, uint8_t sortOrder) { - if (pList == NULL) { - pList = calloc(1, sizeof(tVariantList)); - } - - if (tVariantListExpand(pList) == NULL) { - return pList; + pList = taosArrayInit(4, sizeof(tVariantListItem)); } if (pToken) { - tVariant t = {0}; - tVariantCreate(&t, pToken); + tVariantListItem item = {0}; + tVariantCreate(&item.pVar, pToken); + item.sortOrder = order; - tVariantListItem *pItem = &pList->a[pList->nExpr++]; - memcpy(pItem, &t, sizeof(tVariant)); - pItem->sortOrder = sortOrder; + taosArrayPush(pList, &item); } + return pList; } -tFieldList *tFieldListAppend(tFieldList *pList, TAOS_FIELD *pField) { - if (pList == NULL) pList = calloc(1, sizeof(tFieldList)); - - if (pList->nAlloc <= pList->nField) { // - pList->nAlloc = (pList->nAlloc << 1) + 4; - pList->p = realloc(pList->p, pList->nAlloc * sizeof(pList->p[0])); - if (pList->p == 0) { - pList->nField = pList->nAlloc = 0; - return pList; - } +SArray *tVariantListAppend(SArray *pList, tVariant *pVar, uint8_t sortOrder) { + if (pList == NULL) { + pList = taosArrayInit(4, sizeof(tVariantListItem)); } - assert(pList->p != 0); - if (pField) { - struct TAOS_FIELD *pItem = (struct TAOS_FIELD *)&pList->p[pList->nField++]; - memcpy(pItem, pField, sizeof(TAOS_FIELD)); + if (pVar == NULL) { + return pList; } + + /* + * Here we do not employ the assign function, since we need the pz attribute of structure + * , which is the point to char string, to free it! + * + * Otherwise, the original pointer may be lost, which causes memory leak. + */ + tVariantListItem item = {0}; + item.pVar = *pVar; + item.sortOrder = sortOrder; + + taosArrayPush(pList, &item); return pList; } -void tFieldListDestroy(tFieldList *pList) { - if (pList == NULL) return; +SArray *tVariantListInsert(SArray *pList, tVariant *pVar, uint8_t sortOrder, int32_t index) { + if (pList == NULL || pVar == NULL || index >= taosArrayGetSize(pList)) { + return tVariantListAppend(NULL, pVar, sortOrder); + } - free(pList->p); - free(pList); + tVariantListItem item = {0}; + item.pVar = *pVar; + item.sortOrder = sortOrder; + + taosArrayInsert(pList, index, &item); + return pList; } void setDBName(SStrToken *pCpxName, SStrToken *pDB) { @@ -499,9 +423,9 @@ void tSQLSetColumnType(TAOS_FIELD *pField, SStrToken *type) { /* * extract the select info out of sql string */ -SQuerySQL *tSetQuerySQLElems(SStrToken *pSelectToken, tSQLExprList *pSelection, tVariantList *pFrom, tSQLExpr *pWhere, - tVariantList *pGroupby, tVariantList *pSortOrder, SIntervalVal *pInterval, - SStrToken *pSliding, tVariantList *pFill, SLimitVal *pLimit, SLimitVal *pGLimit) { +SQuerySQL *tSetQuerySQLElems(SStrToken *pSelectToken, tSQLExprList *pSelection, SArray *pFrom, tSQLExpr *pWhere, + SArray *pGroupby, SArray *pSortOrder, SIntervalVal *pInterval, + SStrToken *pSliding, SArray *pFill, SLimitVal *pLimit, SLimitVal *pGLimit) { assert(pSelection != NULL); SQuerySQL *pQuery = calloc(1, sizeof(SQuerySQL)); @@ -535,6 +459,11 @@ SQuerySQL *tSetQuerySQLElems(SStrToken *pSelectToken, tSQLExprList *pSelection, return pQuery; } +void freeVariant(void *pItem) { + tVariantListItem* p = (tVariantListItem*) pItem; + tVariantDestroy(&p->pVar); +} + void doDestroyQuerySql(SQuerySQL *pQuerySql) { if (pQuerySql == NULL) { return; @@ -547,17 +476,18 @@ void doDestroyQuerySql(SQuerySQL *pQuerySql) { tSQLExprDestroy(pQuerySql->pWhere); pQuerySql->pWhere = NULL; - tVariantListDestroy(pQuerySql->pSortOrder); + taosArrayDestroyEx(pQuerySql->pSortOrder, freeVariant); pQuerySql->pSortOrder = NULL; - - tVariantListDestroy(pQuerySql->pGroupby); + + taosArrayDestroyEx(pQuerySql->pGroupby, freeVariant); pQuerySql->pGroupby = NULL; - - tVariantListDestroy(pQuerySql->from); + + taosArrayDestroyEx(pQuerySql->from, freeVariant); pQuerySql->from = NULL; - - tVariantListDestroy(pQuerySql->fillType); - + + taosArrayDestroyEx(pQuerySql->fillType, freeVariant); + pQuerySql->fillType = NULL; + free(pQuerySql); } @@ -574,8 +504,8 @@ void destroyAllSelectClause(SSubclauseInfo *pClause) { taosTFree(pClause->pClause); } -SCreateTableSQL *tSetCreateSQLElems(tFieldList *pCols, tFieldList *pTags, SStrToken *pStableName, - tVariantList *pTagVals, SQuerySQL *pSelect, int32_t type) { +SCreateTableSQL *tSetCreateSQLElems(SArray *pCols, SArray *pTags, SStrToken *pStableName, + SArray *pTagVals, SQuerySQL *pSelect, int32_t type) { SCreateTableSQL *pCreate = calloc(1, sizeof(SCreateTableSQL)); switch (type) { @@ -607,7 +537,7 @@ SCreateTableSQL *tSetCreateSQLElems(tFieldList *pCols, tFieldList *pTags, SStrTo return pCreate; } -SAlterTableSQL *tAlterTableSQLElems(SStrToken *pMeterName, tFieldList *pCols, tVariantList *pVals, int32_t type) { +SAlterTableSQL *tAlterTableSQLElems(SStrToken *pMeterName, SArray *pCols, SArray *pVals, int32_t type) { SAlterTableSQL *pAlterTable = calloc(1, sizeof(SAlterTableSQL)); pAlterTable->name = *pMeterName; @@ -637,14 +567,14 @@ void SQLInfoDestroy(SSqlInfo *pInfo) { SCreateTableSQL *pCreateTableInfo = pInfo->pCreateTableInfo; doDestroyQuerySql(pCreateTableInfo->pSelect); - tFieldListDestroy(pCreateTableInfo->colInfo.pColumns); - tFieldListDestroy(pCreateTableInfo->colInfo.pTagColumns); + taosArrayDestroy(pCreateTableInfo->colInfo.pColumns); + taosArrayDestroy(pCreateTableInfo->colInfo.pTagColumns); - tVariantListDestroy(pCreateTableInfo->usingInfo.pTagVals); + taosArrayDestroyEx(pCreateTableInfo->usingInfo.pTagVals, freeVariant); taosTFree(pInfo->pCreateTableInfo); } else if (pInfo->type == TSDB_SQL_ALTER_TABLE) { - tVariantListDestroy(pInfo->pAlterInfo->varList); - tFieldListDestroy(pInfo->pAlterInfo->pAddColumns); + taosArrayDestroyEx(pInfo->pAlterInfo->varList, freeVariant); + taosArrayDestroy(pInfo->pAlterInfo->pAddColumns); taosTFree(pInfo->pAlterInfo); } else { @@ -653,7 +583,7 @@ void SQLInfoDestroy(SSqlInfo *pInfo) { } if (pInfo->pDCLInfo != NULL && pInfo->type == TSDB_SQL_CREATE_DB) { - tVariantListDestroy(pInfo->pDCLInfo->dbOpt.keep); + taosArrayDestroyEx(pInfo->pDCLInfo->dbOpt.keep, freeVariant); } taosTFree(pInfo->pDCLInfo); diff --git a/src/query/src/sql.c b/src/query/src/sql.c index a18efdeb74..92756d3802 100644 --- a/src/query/src/sql.c +++ b/src/query/src/sql.c @@ -115,9 +115,8 @@ typedef union { int64_t yy271; tVariant yy312; SIntervalVal yy314; + SArray* yy347; SCreateTableSQL* yy374; - tFieldList* yy449; - tVariantList* yy494; } YYMINORTYPE; #ifndef YYSTACKDEPTH #define YYSTACKDEPTH 100 @@ -1361,18 +1360,14 @@ static void yy_destructor( /********* Begin destructor definitions ***************************************/ case 226: /* keep */ case 227: /* tagitemlist */ + case 243: /* columnlist */ case 251: /* fill_opt */ case 253: /* groupby_opt */ case 254: /* orderby_opt */ case 264: /* sortlist */ case 268: /* grouplist */ { -tVariantListDestroy((yypminor->yy494)); -} - break; - case 243: /* columnlist */ -{ -tFieldListDestroy((yypminor->yy449)); +taosArrayDestroy((yypminor->yy347)); } break; case 244: /* select */ @@ -2228,7 +2223,7 @@ static void yy_reduce( yymsp[-8].minor.yy73 = yylhsminor.yy73; break; case 72: /* keep ::= KEEP tagitemlist */ -{ yymsp[-1].minor.yy494 = yymsp[0].minor.yy494; } +{ yymsp[-1].minor.yy347 = yymsp[0].minor.yy347; } break; case 73: /* cache ::= CACHE INTEGER */ case 74: /* replica ::= REPLICA INTEGER */ yytestcase(yyruleno==74); @@ -2303,7 +2298,7 @@ static void yy_reduce( break; case 98: /* db_optr ::= db_optr keep */ case 102: /* alter_db_optr ::= alter_db_optr keep */ yytestcase(yyruleno==102); -{ yylhsminor.yy158 = yymsp[-1].minor.yy158; yylhsminor.yy158.keep = yymsp[0].minor.yy494; } +{ yylhsminor.yy158 = yymsp[-1].minor.yy158; yylhsminor.yy158.keep = yymsp[0].minor.yy347; } yymsp[-1].minor.yy158 = yylhsminor.yy158; break; case 99: /* alter_db_optr ::= */ @@ -2346,20 +2341,20 @@ static void yy_reduce( break; case 113: /* create_table_args ::= LP columnlist RP */ { - yymsp[-2].minor.yy374 = tSetCreateSQLElems(yymsp[-1].minor.yy449, NULL, NULL, NULL, NULL, TSQL_CREATE_TABLE); + yymsp[-2].minor.yy374 = tSetCreateSQLElems(yymsp[-1].minor.yy347, NULL, NULL, NULL, NULL, TSQL_CREATE_TABLE); setSQLInfo(pInfo, yymsp[-2].minor.yy374, NULL, TSDB_SQL_CREATE_TABLE); } break; case 114: /* create_table_args ::= LP columnlist RP TAGS LP columnlist RP */ { - yymsp[-6].minor.yy374 = tSetCreateSQLElems(yymsp[-5].minor.yy449, yymsp[-1].minor.yy449, NULL, NULL, NULL, TSQL_CREATE_STABLE); + yymsp[-6].minor.yy374 = tSetCreateSQLElems(yymsp[-5].minor.yy347, yymsp[-1].minor.yy347, NULL, NULL, NULL, TSQL_CREATE_STABLE); setSQLInfo(pInfo, yymsp[-6].minor.yy374, NULL, TSDB_SQL_CREATE_TABLE); } break; case 115: /* create_table_args ::= USING ids cpxName TAGS LP tagitemlist RP */ { yymsp[-5].minor.yy0.n += yymsp[-4].minor.yy0.n; - yymsp[-6].minor.yy374 = tSetCreateSQLElems(NULL, NULL, &yymsp[-5].minor.yy0, yymsp[-1].minor.yy494, NULL, TSQL_CREATE_TABLE_FROM_STABLE); + yymsp[-6].minor.yy374 = tSetCreateSQLElems(NULL, NULL, &yymsp[-5].minor.yy0, yymsp[-1].minor.yy347, NULL, TSQL_CREATE_TABLE_FROM_STABLE); setSQLInfo(pInfo, yymsp[-6].minor.yy374, NULL, TSDB_SQL_CREATE_TABLE); } break; @@ -2370,12 +2365,12 @@ static void yy_reduce( } break; case 117: /* columnlist ::= columnlist COMMA column */ -{yylhsminor.yy449 = tFieldListAppend(yymsp[-2].minor.yy449, &yymsp[0].minor.yy181); } - yymsp[-2].minor.yy449 = yylhsminor.yy449; +{yylhsminor.yy347 = taosArrayPush(yymsp[-2].minor.yy347, &yymsp[0].minor.yy181); } + yymsp[-2].minor.yy347 = yylhsminor.yy347; break; case 118: /* columnlist ::= column */ -{yylhsminor.yy449 = tFieldListAppend(NULL, &yymsp[0].minor.yy181);} - yymsp[0].minor.yy449 = yylhsminor.yy449; +{yylhsminor.yy347 = taosArrayInit(4, sizeof(TAOS_FIELD)); taosArrayPush(yylhsminor.yy347, &yymsp[0].minor.yy181);} + yymsp[0].minor.yy347 = yylhsminor.yy347; break; case 119: /* column ::= ids typename */ { @@ -2384,12 +2379,12 @@ static void yy_reduce( yymsp[-1].minor.yy181 = yylhsminor.yy181; break; case 120: /* tagitemlist ::= tagitemlist COMMA tagitem */ -{ yylhsminor.yy494 = tVariantListAppend(yymsp[-2].minor.yy494, &yymsp[0].minor.yy312, -1); } - yymsp[-2].minor.yy494 = yylhsminor.yy494; +{ yylhsminor.yy347 = tVariantListAppend(yymsp[-2].minor.yy347, &yymsp[0].minor.yy312, -1); } + yymsp[-2].minor.yy347 = yylhsminor.yy347; break; case 121: /* tagitemlist ::= tagitem */ -{ yylhsminor.yy494 = tVariantListAppend(NULL, &yymsp[0].minor.yy312, -1); } - yymsp[0].minor.yy494 = yylhsminor.yy494; +{ yylhsminor.yy347 = tVariantListAppend(NULL, &yymsp[0].minor.yy312, -1); } + yymsp[0].minor.yy347 = yylhsminor.yy347; break; case 122: /* tagitem ::= INTEGER */ case 123: /* tagitem ::= FLOAT */ yytestcase(yyruleno==123); @@ -2416,7 +2411,7 @@ static void yy_reduce( break; case 131: /* select ::= SELECT selcollist from where_opt interval_opt fill_opt sliding_opt groupby_opt orderby_opt having_opt slimit_opt limit_opt */ { - yylhsminor.yy150 = tSetQuerySQLElems(&yymsp[-11].minor.yy0, yymsp[-10].minor.yy224, yymsp[-9].minor.yy494, yymsp[-8].minor.yy66, yymsp[-4].minor.yy494, yymsp[-3].minor.yy494, &yymsp[-7].minor.yy314, &yymsp[-5].minor.yy0, yymsp[-6].minor.yy494, &yymsp[0].minor.yy188, &yymsp[-1].minor.yy188); + yylhsminor.yy150 = tSetQuerySQLElems(&yymsp[-11].minor.yy0, yymsp[-10].minor.yy224, yymsp[-9].minor.yy347, yymsp[-8].minor.yy66, yymsp[-4].minor.yy347, yymsp[-3].minor.yy347, &yymsp[-7].minor.yy314, &yymsp[-5].minor.yy0, yymsp[-6].minor.yy347, &yymsp[0].minor.yy188, &yymsp[-1].minor.yy188); } yymsp[-11].minor.yy150 = yylhsminor.yy150; break; @@ -2475,45 +2470,45 @@ static void yy_reduce( { yymsp[1].minor.yy0.n = 0; } break; case 145: /* from ::= FROM tablelist */ -{yymsp[-1].minor.yy494 = yymsp[0].minor.yy494;} +{yymsp[-1].minor.yy347 = yymsp[0].minor.yy347;} break; case 146: /* tablelist ::= ids cpxName */ { toTSDBType(yymsp[-1].minor.yy0.type); yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n; - yylhsminor.yy494 = tVariantListAppendToken(NULL, &yymsp[-1].minor.yy0, -1); - yylhsminor.yy494 = tVariantListAppendToken(yylhsminor.yy494, &yymsp[-1].minor.yy0, -1); // table alias name + yylhsminor.yy347 = tVariantListAppendToken(NULL, &yymsp[-1].minor.yy0, -1); + yylhsminor.yy347 = tVariantListAppendToken(yylhsminor.yy347, &yymsp[-1].minor.yy0, -1); // table alias name } - yymsp[-1].minor.yy494 = yylhsminor.yy494; + yymsp[-1].minor.yy347 = yylhsminor.yy347; break; case 147: /* tablelist ::= ids cpxName ids */ { toTSDBType(yymsp[-2].minor.yy0.type); toTSDBType(yymsp[0].minor.yy0.type); yymsp[-2].minor.yy0.n += yymsp[-1].minor.yy0.n; - yylhsminor.yy494 = tVariantListAppendToken(NULL, &yymsp[-2].minor.yy0, -1); - yylhsminor.yy494 = tVariantListAppendToken(yylhsminor.yy494, &yymsp[0].minor.yy0, -1); + yylhsminor.yy347 = tVariantListAppendToken(NULL, &yymsp[-2].minor.yy0, -1); + yylhsminor.yy347 = tVariantListAppendToken(yylhsminor.yy347, &yymsp[0].minor.yy0, -1); } - yymsp[-2].minor.yy494 = yylhsminor.yy494; + yymsp[-2].minor.yy347 = yylhsminor.yy347; break; case 148: /* tablelist ::= tablelist COMMA ids cpxName */ { toTSDBType(yymsp[-1].minor.yy0.type); yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n; - yylhsminor.yy494 = tVariantListAppendToken(yymsp[-3].minor.yy494, &yymsp[-1].minor.yy0, -1); - yylhsminor.yy494 = tVariantListAppendToken(yylhsminor.yy494, &yymsp[-1].minor.yy0, -1); + yylhsminor.yy347 = tVariantListAppendToken(yymsp[-3].minor.yy347, &yymsp[-1].minor.yy0, -1); + yylhsminor.yy347 = tVariantListAppendToken(yylhsminor.yy347, &yymsp[-1].minor.yy0, -1); } - yymsp[-3].minor.yy494 = yylhsminor.yy494; + yymsp[-3].minor.yy347 = yylhsminor.yy347; break; case 149: /* tablelist ::= tablelist COMMA ids cpxName ids */ { toTSDBType(yymsp[-2].minor.yy0.type); toTSDBType(yymsp[0].minor.yy0.type); yymsp[-2].minor.yy0.n += yymsp[-1].minor.yy0.n; - yylhsminor.yy494 = tVariantListAppendToken(yymsp[-4].minor.yy494, &yymsp[-2].minor.yy0, -1); - yylhsminor.yy494 = tVariantListAppendToken(yylhsminor.yy494, &yymsp[0].minor.yy0, -1); + yylhsminor.yy347 = tVariantListAppendToken(yymsp[-4].minor.yy347, &yymsp[-2].minor.yy0, -1); + yylhsminor.yy347 = tVariantListAppendToken(yylhsminor.yy347, &yymsp[0].minor.yy0, -1); } - yymsp[-4].minor.yy494 = yylhsminor.yy494; + yymsp[-4].minor.yy347 = yylhsminor.yy347; break; case 150: /* tmvar ::= VARIABLE */ {yylhsminor.yy0 = yymsp[0].minor.yy0;} @@ -2529,7 +2524,7 @@ static void yy_reduce( {memset(&yymsp[1].minor.yy314, 0, sizeof(yymsp[1].minor.yy314));} break; case 154: /* fill_opt ::= */ -{yymsp[1].minor.yy494 = 0; } +{yymsp[1].minor.yy347 = 0; } break; case 155: /* fill_opt ::= FILL LP ID COMMA tagitemlist RP */ { @@ -2537,14 +2532,14 @@ static void yy_reduce( toTSDBType(yymsp[-3].minor.yy0.type); tVariantCreate(&A, &yymsp[-3].minor.yy0); - tVariantListInsert(yymsp[-1].minor.yy494, &A, -1, 0); - yymsp[-5].minor.yy494 = yymsp[-1].minor.yy494; + tVariantListInsert(yymsp[-1].minor.yy347, &A, -1, 0); + yymsp[-5].minor.yy347 = yymsp[-1].minor.yy347; } break; case 156: /* fill_opt ::= FILL LP ID RP */ { toTSDBType(yymsp[-1].minor.yy0.type); - yymsp[-3].minor.yy494 = tVariantListAppendToken(NULL, &yymsp[-1].minor.yy0, -1); + yymsp[-3].minor.yy347 = tVariantListAppendToken(NULL, &yymsp[-1].minor.yy0, -1); } break; case 157: /* sliding_opt ::= SLIDING LP tmvar RP */ @@ -2555,23 +2550,23 @@ static void yy_reduce( break; case 159: /* orderby_opt ::= */ case 167: /* groupby_opt ::= */ yytestcase(yyruleno==167); -{yymsp[1].minor.yy494 = 0;} +{yymsp[1].minor.yy347 = 0;} break; case 160: /* orderby_opt ::= ORDER BY sortlist */ case 168: /* groupby_opt ::= GROUP BY grouplist */ yytestcase(yyruleno==168); -{yymsp[-2].minor.yy494 = yymsp[0].minor.yy494;} +{yymsp[-2].minor.yy347 = yymsp[0].minor.yy347;} break; case 161: /* sortlist ::= sortlist COMMA item sortorder */ { - yylhsminor.yy494 = tVariantListAppend(yymsp[-3].minor.yy494, &yymsp[-1].minor.yy312, yymsp[0].minor.yy82); + yylhsminor.yy347 = tVariantListAppend(yymsp[-3].minor.yy347, &yymsp[-1].minor.yy312, yymsp[0].minor.yy82); } - yymsp[-3].minor.yy494 = yylhsminor.yy494; + yymsp[-3].minor.yy347 = yylhsminor.yy347; break; case 162: /* sortlist ::= item sortorder */ { - yylhsminor.yy494 = tVariantListAppend(NULL, &yymsp[-1].minor.yy312, yymsp[0].minor.yy82); + yylhsminor.yy347 = tVariantListAppend(NULL, &yymsp[-1].minor.yy312, yymsp[0].minor.yy82); } - yymsp[-1].minor.yy494 = yylhsminor.yy494; + yymsp[-1].minor.yy347 = yylhsminor.yy347; break; case 163: /* item ::= ids cpxName */ { @@ -2593,15 +2588,15 @@ static void yy_reduce( break; case 169: /* grouplist ::= grouplist COMMA item */ { - yylhsminor.yy494 = tVariantListAppend(yymsp[-2].minor.yy494, &yymsp[0].minor.yy312, -1); + yylhsminor.yy347 = tVariantListAppend(yymsp[-2].minor.yy347, &yymsp[0].minor.yy312, -1); } - yymsp[-2].minor.yy494 = yylhsminor.yy494; + yymsp[-2].minor.yy347 = yylhsminor.yy347; break; case 170: /* grouplist ::= item */ { - yylhsminor.yy494 = tVariantListAppend(NULL, &yymsp[0].minor.yy312, -1); + yylhsminor.yy347 = tVariantListAppend(NULL, &yymsp[0].minor.yy312, -1); } - yymsp[0].minor.yy494 = yylhsminor.yy494; + yymsp[0].minor.yy347 = yylhsminor.yy347; break; case 171: /* having_opt ::= */ case 181: /* where_opt ::= */ yytestcase(yyruleno==181); @@ -2771,7 +2766,7 @@ static void yy_reduce( case 221: /* cmd ::= ALTER TABLE ids cpxName ADD COLUMN columnlist */ { yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n; - SAlterTableSQL* pAlterTable = tAlterTableSQLElems(&yymsp[-4].minor.yy0, yymsp[0].minor.yy449, NULL, TSDB_ALTER_TABLE_ADD_COLUMN); + SAlterTableSQL* pAlterTable = tAlterTableSQLElems(&yymsp[-4].minor.yy0, yymsp[0].minor.yy347, NULL, TSDB_ALTER_TABLE_ADD_COLUMN); setSQLInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } break; @@ -2780,7 +2775,7 @@ static void yy_reduce( yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n; toTSDBType(yymsp[0].minor.yy0.type); - tVariantList* K = tVariantListAppendToken(NULL, &yymsp[0].minor.yy0, -1); + SArray* K = tVariantListAppendToken(NULL, &yymsp[0].minor.yy0, -1); SAlterTableSQL* pAlterTable = tAlterTableSQLElems(&yymsp[-4].minor.yy0, NULL, K, TSDB_ALTER_TABLE_DROP_COLUMN); setSQLInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); @@ -2789,7 +2784,7 @@ static void yy_reduce( case 223: /* cmd ::= ALTER TABLE ids cpxName ADD TAG columnlist */ { yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n; - SAlterTableSQL* pAlterTable = tAlterTableSQLElems(&yymsp[-4].minor.yy0, yymsp[0].minor.yy449, NULL, TSDB_ALTER_TABLE_ADD_TAG_COLUMN); + SAlterTableSQL* pAlterTable = tAlterTableSQLElems(&yymsp[-4].minor.yy0, yymsp[0].minor.yy347, NULL, TSDB_ALTER_TABLE_ADD_TAG_COLUMN); setSQLInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } break; @@ -2798,7 +2793,7 @@ static void yy_reduce( yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n; toTSDBType(yymsp[0].minor.yy0.type); - tVariantList* A = tVariantListAppendToken(NULL, &yymsp[0].minor.yy0, -1); + SArray* A = tVariantListAppendToken(NULL, &yymsp[0].minor.yy0, -1); SAlterTableSQL* pAlterTable = tAlterTableSQLElems(&yymsp[-4].minor.yy0, NULL, A, TSDB_ALTER_TABLE_DROP_TAG_COLUMN); setSQLInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); @@ -2809,7 +2804,7 @@ static void yy_reduce( yymsp[-5].minor.yy0.n += yymsp[-4].minor.yy0.n; toTSDBType(yymsp[-1].minor.yy0.type); - tVariantList* A = tVariantListAppendToken(NULL, &yymsp[-1].minor.yy0, -1); + SArray* A = tVariantListAppendToken(NULL, &yymsp[-1].minor.yy0, -1); toTSDBType(yymsp[0].minor.yy0.type); A = tVariantListAppendToken(A, &yymsp[0].minor.yy0, -1); @@ -2823,7 +2818,7 @@ static void yy_reduce( yymsp[-6].minor.yy0.n += yymsp[-5].minor.yy0.n; toTSDBType(yymsp[-2].minor.yy0.type); - tVariantList* A = tVariantListAppendToken(NULL, &yymsp[-2].minor.yy0, -1); + SArray* A = tVariantListAppendToken(NULL, &yymsp[-2].minor.yy0, -1); A = tVariantListAppend(A, &yymsp[0].minor.yy312, -1); SAlterTableSQL* pAlterTable = tAlterTableSQLElems(&yymsp[-6].minor.yy0, NULL, A, TSDB_ALTER_TABLE_UPDATE_TAG_VAL); diff --git a/src/util/inc/tarray.h b/src/util/inc/tarray.h index 71838af150..bf922fe9c4 100644 --- a/src/util/inc/tarray.h +++ b/src/util/inc/tarray.h @@ -70,6 +70,13 @@ void* taosArrayGet(const SArray* pArray, size_t index); */ void* taosArrayGetP(const SArray* pArray, size_t index); +/** + * get the last element in the array list + * @param pArray + * @return + */ +void* taosArrayGetLast(const SArray* pArray); + /** * return the size of array * @param pArray @@ -117,6 +124,13 @@ void taosArrayClear(SArray* pArray); */ void taosArrayDestroy(SArray* pArray); +/** + * + * @param pArray + * @param fp + */ +void taosArrayDestroyEx(SArray* pArray, void (*fp)(void*)); + /** * sort the array * @param pArray diff --git a/src/util/src/tarray.c b/src/util/src/tarray.c index 65147b38de..56e75ff18b 100644 --- a/src/util/src/tarray.c +++ b/src/util/src/tarray.c @@ -99,6 +99,10 @@ void* taosArrayGetP(const SArray* pArray, size_t index) { return *(void**)d; } +void* taosArrayGetLast(const SArray* pArray) { + return TARRAY_GET_ELEM(pArray, pArray->size - 1); +} + size_t taosArrayGetSize(const SArray* pArray) { return pArray->size; } void* taosArrayInsert(SArray* pArray, size_t index, void* pData) { @@ -189,6 +193,18 @@ void taosArrayDestroy(SArray* pArray) { free(pArray); } +void taosArrayDestroyEx(SArray* pArray, void (*fp)(void*)) { + if (fp == NULL) { + return taosArrayDestroy(pArray); + } + + for(int32_t i = 0; i < pArray->size; ++i) { + fp(TARRAY_GET_ELEM(pArray, i)); + } + + taosArrayDestroy(pArray); +} + void taosArraySort(SArray* pArray, int (*compar)(const void*, const void*)) { assert(pArray != NULL); assert(compar != NULL); From 909ca7edebff8d5232cf2c6e495ae6b309495bfd Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Mon, 9 Nov 2020 20:00:28 +0800 Subject: [PATCH 06/52] [TD-225] --- src/client/src/tscSQLParser.c | 7 ++++--- src/query/inc/sql.y | 10 +++++----- src/query/src/qFill.c | 12 ++++++++++-- src/query/src/sql.c | 18 ++++++++++++------ src/util/src/tarray.c | 4 ++++ 5 files changed, 35 insertions(+), 16 deletions(-) diff --git a/src/client/src/tscSQLParser.c b/src/client/src/tscSQLParser.c index f76a373f23..e436ae0a44 100644 --- a/src/client/src/tscSQLParser.c +++ b/src/client/src/tscSQLParser.c @@ -6060,19 +6060,20 @@ int32_t doCheckForCreateTable(SSqlObj* pSql, int32_t subClauseIndex, SSqlInfo* p size_t numOfFields = taosArrayGetSize(pFieldList); for (; col < numOfFields; ++col) { - TAOS_FIELD* p = taosArrayGet(pTagList, col); + TAOS_FIELD* p = taosArrayGet(pFieldList, col); tscFieldInfoAppend(&pQueryInfo->fieldsInfo, p); } pCmd->numOfCols = (int16_t)numOfFields; if (pTagList != NULL) { // create super table[optional] - for (int32_t i = 0; i < numOfFields; ++i) { + size_t numOfTags = taosArrayGetSize(pTagList); + for (int32_t i = 0; i < numOfTags; ++i) { TAOS_FIELD* p = taosArrayGet(pTagList, i); tscFieldInfoAppend(&pQueryInfo->fieldsInfo, p); } - pCmd->count = numOfFields; + pCmd->count = numOfTags; } return TSDB_CODE_SUCCESS; diff --git a/src/query/inc/sql.y b/src/query/inc/sql.y index e4021de22e..e46df1827a 100644 --- a/src/query/inc/sql.y +++ b/src/query/inc/sql.y @@ -326,7 +326,7 @@ create_table_args(A) ::= AS select(S). { %type column{TAOS_FIELD} %type columnlist{SArray*} %destructor columnlist {taosArrayDestroy($$);} -columnlist(A) ::= columnlist(X) COMMA column(Y). {A = taosArrayPush(X, &Y); } +columnlist(A) ::= columnlist(X) COMMA column(Y). {taosArrayPush(X, &Y); A = X; } columnlist(A) ::= column(X). {A = taosArrayInit(4, sizeof(TAOS_FIELD)); taosArrayPush(A, &X);} // The information used for a column is the name and type of column: @@ -481,7 +481,7 @@ fill_opt(N) ::= FILL LP ID(Y) COMMA tagitemlist(X) RP. { toTSDBType(Y.type); tVariantCreate(&A, &Y); - taosArrayPush(X, &A); + tVariantListInsert(X, &A, -1, 0); N = X; } @@ -553,11 +553,11 @@ having_opt(A) ::= HAVING expr(X). {A = X;} //limit-offset subclause %type limit_opt {SLimitVal} limit_opt(A) ::= . {A.limit = -1; A.offset = 0;} -limit_opt(A) ::= LIMIT signed(X). {A.limit = X; A.offset = 0;} +limit_opt(A) ::= LIMIT signed(X). {printf("aa1, %d\n", X); A.limit = X; A.offset = 0;} limit_opt(A) ::= LIMIT signed(X) OFFSET signed(Y). - {A.limit = X; A.offset = Y;} + {printf("aa2\n, %d\n", X); A.limit = X; A.offset = Y;} limit_opt(A) ::= LIMIT signed(X) COMMA signed(Y). - {A.limit = Y; A.offset = X;} + {printf("aa3\n, %d\n", X); A.limit = Y; A.offset = X;} %type slimit_opt {SLimitVal} slimit_opt(A) ::= . {A.limit = -1; A.offset = 0;} diff --git a/src/query/src/qFill.c b/src/query/src/qFill.c index a4830f7d88..a8cab4ba2a 100644 --- a/src/query/src/qFill.c +++ b/src/query/src/qFill.c @@ -37,26 +37,34 @@ static int32_t setTagColumnInfo(SFillInfo* pFillInfo, int32_t numOfCols, int32_t if (TSDB_COL_IS_TAG(pColInfo->flag)) { bool exists = false; + int32_t index = -1; for (int32_t j = 0; j < k; ++j) { if (pFillInfo->pTags[j].col.colId == pColInfo->col.colId) { exists = true; + index = j; break; } } if (!exists) { - pFillInfo->pTags[k].col.colId = pColInfo->col.colId; + SSchema* pSchema = &pFillInfo->pTags[k].col; + pSchema->colId = pColInfo->col.colId; + pSchema->type = pColInfo->col.type; + pSchema->bytes = pColInfo->col.bytes; + pFillInfo->pTags[k].tagVal = calloc(1, pColInfo->col.bytes); pColInfo->tagIndex = k; k += 1; + } else { + pColInfo->tagIndex = index; } } rowsize += pColInfo->col.bytes; } - assert(k < pFillInfo->numOfTags); + assert(k <= pFillInfo->numOfTags); return rowsize; } diff --git a/src/query/src/sql.c b/src/query/src/sql.c index 92756d3802..a99ff2f052 100644 --- a/src/query/src/sql.c +++ b/src/query/src/sql.c @@ -2365,7 +2365,7 @@ static void yy_reduce( } break; case 117: /* columnlist ::= columnlist COMMA column */ -{yylhsminor.yy347 = taosArrayPush(yymsp[-2].minor.yy347, &yymsp[0].minor.yy181); } +{taosArrayPush(yymsp[-2].minor.yy347, &yymsp[0].minor.yy181); yylhsminor.yy347 = yymsp[-2].minor.yy347; } yymsp[-2].minor.yy347 = yylhsminor.yy347; break; case 118: /* columnlist ::= column */ @@ -2612,15 +2612,21 @@ static void yy_reduce( {yymsp[1].minor.yy188.limit = -1; yymsp[1].minor.yy188.offset = 0;} break; case 174: /* limit_opt ::= LIMIT signed */ - case 178: /* slimit_opt ::= SLIMIT signed */ yytestcase(yyruleno==178); -{yymsp[-1].minor.yy188.limit = yymsp[0].minor.yy271; yymsp[-1].minor.yy188.offset = 0;} +{printf("aa1, %d\n", yymsp[0].minor.yy271); yymsp[-1].minor.yy188.limit = yymsp[0].minor.yy271; yymsp[-1].minor.yy188.offset = 0;} break; case 175: /* limit_opt ::= LIMIT signed OFFSET signed */ - case 179: /* slimit_opt ::= SLIMIT signed SOFFSET signed */ yytestcase(yyruleno==179); -{yymsp[-3].minor.yy188.limit = yymsp[-2].minor.yy271; yymsp[-3].minor.yy188.offset = yymsp[0].minor.yy271;} +{printf("aa2\n, %d\n", yymsp[-2].minor.yy271); yymsp[-3].minor.yy188.limit = yymsp[-2].minor.yy271; yymsp[-3].minor.yy188.offset = yymsp[0].minor.yy271;} break; case 176: /* limit_opt ::= LIMIT signed COMMA signed */ - case 180: /* slimit_opt ::= SLIMIT signed COMMA signed */ yytestcase(yyruleno==180); +{printf("aa3\n, %d\n", yymsp[-2].minor.yy271); yymsp[-3].minor.yy188.limit = yymsp[0].minor.yy271; yymsp[-3].minor.yy188.offset = yymsp[-2].minor.yy271;} + break; + case 178: /* slimit_opt ::= SLIMIT signed */ +{yymsp[-1].minor.yy188.limit = yymsp[0].minor.yy271; yymsp[-1].minor.yy188.offset = 0;} + break; + case 179: /* slimit_opt ::= SLIMIT signed SOFFSET signed */ +{yymsp[-3].minor.yy188.limit = yymsp[-2].minor.yy271; yymsp[-3].minor.yy188.offset = yymsp[0].minor.yy271;} + break; + case 180: /* slimit_opt ::= SLIMIT signed COMMA signed */ {yymsp[-3].minor.yy188.limit = yymsp[0].minor.yy271; yymsp[-3].minor.yy188.offset = yymsp[-2].minor.yy271;} break; case 183: /* expr ::= LP expr RP */ diff --git a/src/util/src/tarray.c b/src/util/src/tarray.c index 56e75ff18b..36b2bb9e17 100644 --- a/src/util/src/tarray.c +++ b/src/util/src/tarray.c @@ -194,6 +194,10 @@ void taosArrayDestroy(SArray* pArray) { } void taosArrayDestroyEx(SArray* pArray, void (*fp)(void*)) { + if (pArray == NULL) { + return; + } + if (fp == NULL) { return taosArrayDestroy(pArray); } From 837701752cdb64de3af4b3d9ddc78897bd0db5e4 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Tue, 10 Nov 2020 10:32:43 +0800 Subject: [PATCH 07/52] [TD-225] --- src/tsdb/src/tsdbRead.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/tsdb/src/tsdbRead.c b/src/tsdb/src/tsdbRead.c index 554b67acea..a96f764ea0 100644 --- a/src/tsdb/src/tsdbRead.c +++ b/src/tsdb/src/tsdbRead.c @@ -2314,12 +2314,13 @@ void filterPrepare(void* expr, void* param) { if (pInfo->optr == TSDB_RELATION_IN) { pInfo->q = (char*) pCond->arr; - } else { + } else if (pCond != NULL) { uint32_t size = pCond->nLen * TSDB_NCHAR_SIZE; if (size < (uint32_t)pSchema->bytes) { size = pSchema->bytes; } - pInfo->q = calloc(1, size + TSDB_NCHAR_SIZE); // to make sure tonchar does not cause invalid write, since the '\0' needs at least sizeof(wchar_t) space. + + pInfo->q = calloc(1, size + TSDB_NCHAR_SIZE); // to make sure tonchar does not cause invalid write, since the '\0' needs at least sizeof(wchar_t) space. tVariantDump(pCond, pInfo->q, pSchema->type, true); } } From 5178b4ffe8f74f2582e6bbe4cc814eff231b3619 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Tue, 10 Nov 2020 21:47:50 +0800 Subject: [PATCH 08/52] [TD-2036]: fix arithmetic query error in the client. --- src/client/src/tscSQLParser.c | 5 ++++- src/query/src/qParserImpl.c | 9 ++++----- 2 files changed, 8 insertions(+), 6 deletions(-) diff --git a/src/client/src/tscSQLParser.c b/src/client/src/tscSQLParser.c index e436ae0a44..4c67dcd4f5 100644 --- a/src/client/src/tscSQLParser.c +++ b/src/client/src/tscSQLParser.c @@ -2767,7 +2767,10 @@ bool hasUnsupportFunctionsForSTableQuery(SSqlCmd* pCmd, SQueryInfo* pQueryInfo) static bool functionCompatibleCheck(SQueryInfo* pQueryInfo, bool joinQuery) { int32_t startIdx = 0; - + + size_t numOfExpr = tscSqlExprNumOfExprs(pQueryInfo); + assert(numOfExpr > 0); + SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, startIdx); int32_t functionID = pExpr->functionId; diff --git a/src/query/src/qParserImpl.c b/src/query/src/qParserImpl.c index f457a201d4..a0d2fff7f2 100644 --- a/src/query/src/qParserImpl.c +++ b/src/query/src/qParserImpl.c @@ -225,13 +225,12 @@ tSQLExpr *tSQLExprCreate(tSQLExpr *pLeft, tSQLExpr *pRight, int32_t optrType) { tSQLExprDestroy(pLeft); tSQLExprDestroy(pRight); - } else if ((pLeft->val.nType == TSDB_DATA_TYPE_DOUBLE && pRight->val.nType == TSDB_DATA_TYPE_BIGINT) || - (pRight->val.nType == TSDB_DATA_TYPE_DOUBLE && pLeft->val.nType == TSDB_DATA_TYPE_BIGINT)) { + } else if (pLeft->nSQLOptr == TK_FLOAT || pRight->nSQLOptr == TK_FLOAT) { pExpr->val.nType = TSDB_DATA_TYPE_DOUBLE; - pExpr->nSQLOptr = TK_FLOAT; + pExpr->nSQLOptr = TK_FLOAT; - double left = pLeft->val.nType == TSDB_DATA_TYPE_DOUBLE ? pLeft->val.dKey : pLeft->val.i64Key; - double right = pRight->val.nType == TSDB_DATA_TYPE_DOUBLE ? pRight->val.dKey : pRight->val.i64Key; + double left = (pLeft->val.nType == TSDB_DATA_TYPE_DOUBLE) ? pLeft->val.dKey : pLeft->val.i64Key; + double right = (pRight->val.nType == TSDB_DATA_TYPE_DOUBLE) ? pRight->val.dKey : pRight->val.i64Key; switch (optrType) { case TK_PLUS: { From c8a7410009263e103e28ae974b336723e3289a36 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Tue, 10 Nov 2020 21:49:53 +0800 Subject: [PATCH 09/52] [TD-225] add a test case. --- tests/script/general/parser/constCol.sim | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/tests/script/general/parser/constCol.sim b/tests/script/general/parser/constCol.sim index 7ae496f1ac..b5a4d4abea 100644 --- a/tests/script/general/parser/constCol.sim +++ b/tests/script/general/parser/constCol.sim @@ -347,6 +347,17 @@ if $rows != 3 then return -1 endi +sql select 0.1 + 0.2 from t1 +if $rows != 3 then + return -1 +endi + +print =============================> td-2036 +if $data00 != 0.3000000 then + print expect: 0.3000000, actual:$data00 + return -1 +endi + print ======================udc with normal column group by sql_error select from t1 From 8d1c2d377d679d988012679232ef561b82c8a4fa Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Tue, 10 Nov 2020 22:45:12 +0800 Subject: [PATCH 10/52] [TD-2036] refactor code --- src/client/src/tscUtil.c | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/src/client/src/tscUtil.c b/src/client/src/tscUtil.c index 879eeeaded..8980b55c41 100644 --- a/src/client/src/tscUtil.c +++ b/src/client/src/tscUtil.c @@ -71,7 +71,8 @@ void tsSetSTableQueryCond(STagCond* pTagCond, uint64_t uid, SBufferWriter* bw) { } bool tscQueryTags(SQueryInfo* pQueryInfo) { - for (int32_t i = 0; i < pQueryInfo->fieldsInfo.numOfOutput; ++i) { + int32_t numOfCols = tscSqlExprNumOfExprs(pQueryInfo); + for (int32_t i = 0; i < numOfCols; ++i) { SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, i); int32_t functId = pExpr->functionId; @@ -201,13 +202,9 @@ bool tscIsProjectionQuery(SQueryInfo* pQueryInfo) { bool tscIsPointInterpQuery(SQueryInfo* pQueryInfo) { size_t size = tscSqlExprNumOfExprs(pQueryInfo); - for (int32_t i = 0; i < size; ++i) { SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, i); assert(pExpr != NULL); -// if (pExpr == NULL) { -// return false; -// } int32_t functionId = pExpr->functionId; if (functionId == TSDB_FUNC_TAG) { From fcdc8512d21dbe7881975aad95d41cd87f9099cf Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Tue, 10 Nov 2020 22:50:34 +0800 Subject: [PATCH 11/52] [TD-1980] release mem/imem table reference once the data in cache are all scanned. --- src/query/src/qExecutor.c | 2 +- src/tsdb/src/tsdbMemTable.c | 2 +- src/tsdb/src/tsdbRead.c | 221 +++++++++++++++++++----------------- 3 files changed, 121 insertions(+), 104 deletions(-) diff --git a/src/query/src/qExecutor.c b/src/query/src/qExecutor.c index 6191f536a3..8ede4b7c61 100644 --- a/src/query/src/qExecutor.c +++ b/src/query/src/qExecutor.c @@ -6926,7 +6926,7 @@ int32_t qDumpRetrieveResult(qinfo_t qinfo, SRetrieveTableRsp **pRsp, int32_t *co if (IS_QUERY_KILLED(pQInfo) || Q_STATUS_EQUAL(pQuery->status, QUERY_OVER)) { // here current thread hold the refcount, so it is safe to free tsdbQueryHandle. - doFreeQueryHandle(pQInfo); +// doFreeQueryHandle(pQInfo); *continueExec = false; (*pRsp)->completed = 1; // notify no more result to client } else { diff --git a/src/tsdb/src/tsdbMemTable.c b/src/tsdb/src/tsdbMemTable.c index 8864602ba1..0cd29cddb1 100644 --- a/src/tsdb/src/tsdbMemTable.c +++ b/src/tsdb/src/tsdbMemTable.c @@ -197,7 +197,7 @@ void tsdbUnTakeMemSnapShot(STsdbRepo *pRepo, SMemTable *pMem, SMemTable *pIMem) tsdbUnRefMemTable(pRepo, pIMem); } - tsdbDebug("vgId:%d utake memory snapshot, pMem %p pIMem %p", REPO_ID(pRepo), pMem, pIMem); + tsdbDebug("vgId:%d untake memory snapshot, pMem %p pIMem %p", REPO_ID(pRepo), pMem, pIMem); } void *tsdbAllocBytes(STsdbRepo *pRepo, int bytes) { diff --git a/src/tsdb/src/tsdbRead.c b/src/tsdb/src/tsdbRead.c index a96f764ea0..0a34feebad 100644 --- a/src/tsdb/src/tsdbRead.c +++ b/src/tsdb/src/tsdbRead.c @@ -120,8 +120,6 @@ typedef struct STsdbQueryHandle { SDataCols *pDataCols; // in order to hold current file data block int32_t allocSize; // allocated data block size SMemRef *pMemRef; -// SMemTable *mem; // mem-table -// SMemTable *imem; // imem-table, acquired from snapshot SArray *defaultLoadColumn;// default load column SDataBlockLoadInfo dataBlockLoadInfo; /* record current block load information */ SLoadCompBlockInfo compBlockLoadInfo; /* record current compblock information in SQuery */ @@ -194,9 +192,12 @@ static void tsdbMayTakeMemSnapshot(STsdbQueryHandle* pQueryHandle) { } static void tsdbMayUnTakeMemSnapshot(STsdbQueryHandle* pQueryHandle) { - assert(pQueryHandle != NULL && pQueryHandle->pMemRef != NULL); - + assert(pQueryHandle != NULL); SMemRef* pMemRef = pQueryHandle->pMemRef; + if (pMemRef == NULL) { // it has been freed + return; + } + if (--pMemRef->ref == 0) { tsdbUnTakeMemSnapShot(pQueryHandle->pTsdb, pMemRef->mem, pMemRef->imem); pMemRef->mem = NULL; @@ -205,6 +206,7 @@ static void tsdbMayUnTakeMemSnapshot(STsdbQueryHandle* pQueryHandle) { pQueryHandle->pMemRef = NULL; } + static SArray* createCheckInfoFromTableGroup(STsdbQueryHandle* pQueryHandle, STableGroupInfo* pGroupList, STsdbMeta* pMeta) { size_t sizeOfGroup = taosArrayGetSize(pGroupList->pGroupList); assert(sizeOfGroup >= 1 && pMeta != NULL); @@ -1821,6 +1823,8 @@ static bool doHasDataInBuffer(STsdbQueryHandle* pQueryHandle) { pQueryHandle->activeIndex += 1; } + // no data in memtable or imemtable, decrease the memory reference. + tsdbMayUnTakeMemSnapshot(pQueryHandle); return false; } @@ -1947,6 +1951,110 @@ static void destroyHelper(void* param) { free(param); } +static bool getNeighborRows(STsdbQueryHandle* pQueryHandle) { + assert (pQueryHandle->type == TSDB_QUERY_TYPE_EXTERNAL); + + SDataBlockInfo blockInfo = {{0}, 0}; + + pQueryHandle->type = TSDB_QUERY_TYPE_ALL; + pQueryHandle->order = TSDB_ORDER_DESC; + + if (!tsdbNextDataBlock((void*) pQueryHandle)) { + return false; + } + + tsdbRetrieveDataBlockInfo((void*) pQueryHandle, &blockInfo); + /*SArray *pDataBlock = */tsdbRetrieveDataBlock((void*) pQueryHandle, pQueryHandle->defaultLoadColumn); + if (terrno != TSDB_CODE_SUCCESS) { + return false; + } + + if (pQueryHandle->cur.win.ekey == pQueryHandle->window.skey) { + // data already retrieve, discard other data rows and return + int32_t numOfCols = (int32_t)(QH_GET_NUM_OF_COLS(pQueryHandle)); + for (int32_t i = 0; i < numOfCols; ++i) { + SColumnInfoData* pCol = taosArrayGet(pQueryHandle->pColumns, i); + memcpy((char*)pCol->pData, (char*)pCol->pData + pCol->info.bytes * (pQueryHandle->cur.rows - 1), pCol->info.bytes); + } + + pQueryHandle->cur.win = (STimeWindow){pQueryHandle->window.skey, pQueryHandle->window.skey}; + pQueryHandle->window = pQueryHandle->cur.win; + pQueryHandle->cur.rows = 1; + pQueryHandle->type = TSDB_QUERY_TYPE_ALL; + return true; + } else { + STimeWindow win = (STimeWindow) {pQueryHandle->window.skey, INT64_MAX}; + STsdbQueryCond cond = { + .order = TSDB_ORDER_ASC, + .numOfCols = (int32_t)(QH_GET_NUM_OF_COLS(pQueryHandle)) + }; + cond.twindow = win; + + cond.colList = calloc(cond.numOfCols, sizeof(SColumnInfo)); + if (cond.colList == NULL) { + terrno = TSDB_CODE_QRY_OUT_OF_MEMORY; + return false; + } + + for(int32_t i = 0; i < cond.numOfCols; ++i) { + SColumnInfoData* pColInfoData = taosArrayGet(pQueryHandle->pColumns, i); + memcpy(&cond.colList[i], &pColInfoData->info, sizeof(SColumnInfo)); + } + + STsdbQueryHandle* pSecQueryHandle = tsdbQueryTablesImpl(pQueryHandle->pTsdb, &cond, pQueryHandle->qinfo, pQueryHandle->pMemRef); + taosTFree(cond.colList); + + pSecQueryHandle->pTableCheckInfo = createCheckInfoFromCheckInfo(pQueryHandle->pTableCheckInfo, pSecQueryHandle->window.skey); + if (pSecQueryHandle->pTableCheckInfo == NULL) { + tsdbCleanupQueryHandle(pSecQueryHandle); + return false; + } + + if (!tsdbNextDataBlock((void*) pSecQueryHandle)) { + tsdbCleanupQueryHandle(pSecQueryHandle); + return false; + } + + tsdbRetrieveDataBlockInfo((void*) pSecQueryHandle, &blockInfo); + tsdbRetrieveDataBlock((void*) pSecQueryHandle, pSecQueryHandle->defaultLoadColumn); + + int32_t numOfCols = (int32_t)(QH_GET_NUM_OF_COLS(pSecQueryHandle)); + size_t si = taosArrayGetSize(pSecQueryHandle->pTableCheckInfo); + + for (int32_t i = 0; i < numOfCols; ++i) { + SColumnInfoData* pCol = taosArrayGet(pQueryHandle->pColumns, i); + memcpy((char*)pCol->pData, (char*)pCol->pData + pCol->info.bytes * (pQueryHandle->cur.rows - 1), pCol->info.bytes); + + SColumnInfoData* pCol1 = taosArrayGet(pSecQueryHandle->pColumns, i); + assert(pCol->info.colId == pCol1->info.colId); + + memcpy((char*)pCol->pData + pCol->info.bytes, pCol1->pData, pCol1->info.bytes); + } + + SColumnInfoData* pTSCol = taosArrayGet(pQueryHandle->pColumns, 0); + + // it is ascending order + pQueryHandle->order = TSDB_ORDER_DESC; + pQueryHandle->window = pQueryHandle->cur.win; + pQueryHandle->cur.win = (STimeWindow){((TSKEY*)pTSCol->pData)[0], ((TSKEY*)pTSCol->pData)[1]}; + pQueryHandle->cur.rows = 2; + pQueryHandle->cur.mixBlock = true; + + int32_t step = -1;// one step for ascending order traverse + for (int32_t j = 0; j < si; ++j) { + STableCheckInfo* pCheckInfo = (STableCheckInfo*) taosArrayGet(pQueryHandle->pTableCheckInfo, j); + pCheckInfo->lastKey = pQueryHandle->cur.win.ekey + step; + } + + tsdbCleanupQueryHandle(pSecQueryHandle); + } + + //disable it after retrieve data + pQueryHandle->type = TSDB_QUERY_TYPE_EXTERNAL; + pQueryHandle->checkFiles = false; + return true; +} + // handle data in cache situation bool tsdbNextDataBlock(TsdbQueryHandleT* pHandle) { STsdbQueryHandle* pQueryHandle = (STsdbQueryHandle*) pHandle; @@ -1957,106 +2065,15 @@ bool tsdbNextDataBlock(TsdbQueryHandleT* pHandle) { size_t numOfTables = taosArrayGetSize(pQueryHandle->pTableCheckInfo); assert(numOfTables > 0); - SDataBlockInfo blockInfo = {{0}, 0}; if (pQueryHandle->type == TSDB_QUERY_TYPE_EXTERNAL) { - pQueryHandle->type = TSDB_QUERY_TYPE_ALL; - pQueryHandle->order = TSDB_ORDER_DESC; + SMemRef* pMemRef = pQueryHandle->pMemRef; + tsdbMayTakeMemSnapshot(pQueryHandle); + bool ret = getNeighborRows(pQueryHandle); + tsdbMayUnTakeMemSnapshot(pQueryHandle); - if (!tsdbNextDataBlock(pHandle)) { - return false; - } - - tsdbRetrieveDataBlockInfo(pHandle, &blockInfo); - /*SArray *pDataBlock = */tsdbRetrieveDataBlock(pHandle, pQueryHandle->defaultLoadColumn); - if (terrno != TSDB_CODE_SUCCESS) { - return false; - } - - if (pQueryHandle->cur.win.ekey == pQueryHandle->window.skey) { - // data already retrieve, discard other data rows and return - int32_t numOfCols = (int32_t)(QH_GET_NUM_OF_COLS(pQueryHandle)); - for (int32_t i = 0; i < numOfCols; ++i) { - SColumnInfoData* pCol = taosArrayGet(pQueryHandle->pColumns, i); - memcpy((char*)pCol->pData, (char*)pCol->pData + pCol->info.bytes * (pQueryHandle->cur.rows - 1), pCol->info.bytes); - } - - pQueryHandle->cur.win = (STimeWindow){pQueryHandle->window.skey, pQueryHandle->window.skey}; - pQueryHandle->window = pQueryHandle->cur.win; - pQueryHandle->cur.rows = 1; - pQueryHandle->type = TSDB_QUERY_TYPE_ALL; - return true; - } else { - STimeWindow win = (STimeWindow) {pQueryHandle->window.skey, INT64_MAX}; - STsdbQueryCond cond = { - .order = TSDB_ORDER_ASC, - .numOfCols = (int32_t)(QH_GET_NUM_OF_COLS(pQueryHandle)) - }; - cond.twindow = win; - - cond.colList = calloc(cond.numOfCols, sizeof(SColumnInfo)); - if (cond.colList == NULL) { - terrno = TSDB_CODE_QRY_OUT_OF_MEMORY; - return false; - } - - for(int32_t i = 0; i < cond.numOfCols; ++i) { - SColumnInfoData* pColInfoData = taosArrayGet(pQueryHandle->pColumns, i); - memcpy(&cond.colList[i], &pColInfoData->info, sizeof(SColumnInfo)); - } - - STsdbQueryHandle* pSecQueryHandle = tsdbQueryTablesImpl(pQueryHandle->pTsdb, &cond, pQueryHandle->qinfo, pQueryHandle->pMemRef); - - taosTFree(cond.colList); - - pSecQueryHandle->pTableCheckInfo = createCheckInfoFromCheckInfo(pQueryHandle->pTableCheckInfo, pSecQueryHandle->window.skey); - if (pSecQueryHandle->pTableCheckInfo == NULL) { - tsdbCleanupQueryHandle(pSecQueryHandle); - return false; - } - - if (!tsdbNextDataBlock((void*) pSecQueryHandle)) { - tsdbCleanupQueryHandle(pSecQueryHandle); - return false; - } - - tsdbRetrieveDataBlockInfo((void*) pSecQueryHandle, &blockInfo); - tsdbRetrieveDataBlock((void*) pSecQueryHandle, pSecQueryHandle->defaultLoadColumn); - - int32_t numOfCols = (int32_t)(QH_GET_NUM_OF_COLS(pSecQueryHandle)); - size_t si = taosArrayGetSize(pSecQueryHandle->pTableCheckInfo); - - for (int32_t i = 0; i < numOfCols; ++i) { - SColumnInfoData* pCol = taosArrayGet(pQueryHandle->pColumns, i); - memcpy((char*)pCol->pData, (char*)pCol->pData + pCol->info.bytes * (pQueryHandle->cur.rows - 1), pCol->info.bytes); - - SColumnInfoData* pCol1 = taosArrayGet(pSecQueryHandle->pColumns, i); - assert(pCol->info.colId == pCol1->info.colId); - - memcpy((char*)pCol->pData + pCol->info.bytes, pCol1->pData, pCol1->info.bytes); - } - - SColumnInfoData* pTSCol = taosArrayGet(pQueryHandle->pColumns, 0); - - // it is ascending order - pQueryHandle->order = TSDB_ORDER_DESC; - pQueryHandle->window = pQueryHandle->cur.win; - pQueryHandle->cur.win = (STimeWindow){((TSKEY*)pTSCol->pData)[0], ((TSKEY*)pTSCol->pData)[1]}; - pQueryHandle->cur.rows = 2; - pQueryHandle->cur.mixBlock = true; - - int32_t step = -1;// one step for ascending order traverse - for (int32_t j = 0; j < si; ++j) { - STableCheckInfo* pCheckInfo = (STableCheckInfo*) taosArrayGet(pQueryHandle->pTableCheckInfo, j); - pCheckInfo->lastKey = pQueryHandle->cur.win.ekey + step; - } - - tsdbCleanupQueryHandle(pSecQueryHandle); - } - - //disable it after retrieve data - pQueryHandle->type = TSDB_QUERY_TYPE_EXTERNAL; - pQueryHandle->checkFiles = false; - return true; + // restore the pMemRef + pQueryHandle->pMemRef = pMemRef; + return ret; } if (pQueryHandle->checkFiles) { From ec9042a6e85dbfb586990fc841598bfbe4109777 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Tue, 10 Nov 2020 22:53:23 +0800 Subject: [PATCH 12/52] [TD-1943]: fix memory leak. --- src/client/src/tscServer.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/client/src/tscServer.c b/src/client/src/tscServer.c index 46b4de259b..3c97bf6fa2 100644 --- a/src/client/src/tscServer.c +++ b/src/client/src/tscServer.c @@ -365,7 +365,7 @@ void tscProcessMsgFromServer(SRpcMsg *rpcMsg, SRpcEpSet *pEpSet) { memcpy(pRes->pRsp, rpcMsg->pCont, pRes->rspLen); } } else { - pRes->pRsp = NULL; + taosTFree(pRes->pRsp); } /* From b8ffb3b04754666b70dbb15750adc1f45a9c16a6 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Wed, 11 Nov 2020 21:45:14 +0800 Subject: [PATCH 13/52] [TD-225] --- src/client/src/tscServer.c | 2 +- tests/examples/c/apitest.c | 77 ++-- tests/examples/c/asyncdemo.c | 41 +- tests/examples/c/demo.c | 59 ++- tests/perftest-scripts/perftest-query.sh | 92 +++++ tests/pytest/alter/db_update_options.py | 71 ++++ .../{crash_gen.py => crash_gen_main.py} | 274 +++++++++---- tests/pytest/crash_gen/db.py | 6 + tests/pytest/crash_gen/misc.py | 9 +- tests/pytest/crash_gen/service_manager.py | 85 ++-- tests/pytest/crash_gen_bootstrap.py | 2 +- tests/pytest/fulltest.sh | 19 +- tests/pytest/functions/function_percentile.py | 13 +- tests/pytest/handle_crash_gen_val_log.sh | 4 +- tests/pytest/insert/ningsiInsert.py | 88 ++++ tests/pytest/query/bug1874.py | 59 +++ tests/pytest/query/bug1875.py | 60 +++ tests/pytest/query/bug1876.py | 58 +++ tests/pytest/query/kill_query.py | 82 ++++ tests/pytest/query/queryLike.py | 45 ++ tests/pytest/query/query_performance.py | 219 ++++++++++ tests/pytest/update/allow_update-0.py | 170 ++++++++ tests/pytest/update/allow_update.py | 266 ++++++++++++ tests/pytest/update/append_commit_data-0.py | 84 ++++ tests/pytest/update/append_commit_data.py | 84 ++++ tests/pytest/update/append_commit_last-0.py | 90 ++++ tests/pytest/update/append_commit_last.py | 85 ++++ tests/pytest/update/merge_commit_data-0.py | 351 ++++++++++++++++ tests/pytest/update/merge_commit_data.py | 351 ++++++++++++++++ tests/pytest/update/merge_commit_data2.py | 352 ++++++++++++++++ .../update/merge_commit_data2_update0.py | 384 ++++++++++++++++++ tests/pytest/update/merge_commit_last-0.py | 309 ++++++++++++++ tests/pytest/update/merge_commit_last.py | 321 +++++++++++++++ tests/pytest/wal/addOldWalTest.py | 70 ++++ tests/script/general/http/restful_full.sim | 2 +- .../script/general/parser/join_multivnode.sim | 114 +----- tests/script/general/parser/stream_on_sys.sim | 12 +- tests/script/general/wal/kill.sim | 77 ++++ tests/script/general/wal/sync.sim | 124 ++++++ tests/script/jenkins/basic.txt | 3 + tests/test-all.sh | 7 +- tests/tsim/inc/sim.h | 89 ++-- tests/tsim/inc/simParse.h | 2 +- tests/tsim/src/simExe.c | 227 +++++------ tests/tsim/src/simMain.c | 10 +- tests/tsim/src/simParse.c | 204 +++++----- tests/tsim/src/simSystem.c | 51 ++- 47 files changed, 4633 insertions(+), 571 deletions(-) create mode 100755 tests/perftest-scripts/perftest-query.sh create mode 100644 tests/pytest/alter/db_update_options.py rename tests/pytest/crash_gen/{crash_gen.py => crash_gen_main.py} (90%) create mode 100644 tests/pytest/insert/ningsiInsert.py create mode 100644 tests/pytest/query/bug1874.py create mode 100644 tests/pytest/query/bug1875.py create mode 100644 tests/pytest/query/bug1876.py create mode 100644 tests/pytest/query/kill_query.py create mode 100644 tests/pytest/query/queryLike.py create mode 100644 tests/pytest/query/query_performance.py create mode 100644 tests/pytest/update/allow_update-0.py create mode 100644 tests/pytest/update/allow_update.py create mode 100644 tests/pytest/update/append_commit_data-0.py create mode 100644 tests/pytest/update/append_commit_data.py create mode 100644 tests/pytest/update/append_commit_last-0.py create mode 100644 tests/pytest/update/append_commit_last.py create mode 100644 tests/pytest/update/merge_commit_data-0.py create mode 100644 tests/pytest/update/merge_commit_data.py create mode 100644 tests/pytest/update/merge_commit_data2.py create mode 100644 tests/pytest/update/merge_commit_data2_update0.py create mode 100644 tests/pytest/update/merge_commit_last-0.py create mode 100644 tests/pytest/update/merge_commit_last.py create mode 100644 tests/pytest/wal/addOldWalTest.py create mode 100644 tests/script/general/wal/kill.sim create mode 100644 tests/script/general/wal/sync.sim diff --git a/src/client/src/tscServer.c b/src/client/src/tscServer.c index 2dc12c99ea..8549fa09bb 100644 --- a/src/client/src/tscServer.c +++ b/src/client/src/tscServer.c @@ -361,7 +361,7 @@ void tscProcessMsgFromServer(SRpcMsg *rpcMsg, SRpcEpSet *pEpSet) { memcpy(pRes->pRsp, rpcMsg->pCont, pRes->rspLen); } } else { - taosTFree(pRes->pRsp); + tfree(pRes->pRsp); } /* diff --git a/tests/examples/c/apitest.c b/tests/examples/c/apitest.c index 759e16d1de..be60a88ad7 100644 --- a/tests/examples/c/apitest.c +++ b/tests/examples/c/apitest.c @@ -9,26 +9,40 @@ static void prepare_data(TAOS* taos) { - taos_query(taos, "drop database if exists test;"); + TAOS_RES *result; + result = taos_query(taos, "drop database if exists test;"); + taos_free_result(result); usleep(100000); - taos_query(taos, "create database test;"); + result = taos_query(taos, "create database test;"); + taos_free_result(result); usleep(100000); taos_select_db(taos, "test"); - taos_query(taos, "create table meters(ts timestamp, a int) tags(area int);"); + result = taos_query(taos, "create table meters(ts timestamp, a int) tags(area int);"); + taos_free_result(result); - taos_query(taos, "create table t0 using meters tags(0);"); - taos_query(taos, "create table t1 using meters tags(1);"); - taos_query(taos, "create table t2 using meters tags(2);"); - taos_query(taos, "create table t3 using meters tags(3);"); - taos_query(taos, "create table t4 using meters tags(4);"); - taos_query(taos, "create table t5 using meters tags(5);"); - taos_query(taos, "create table t6 using meters tags(6);"); - taos_query(taos, "create table t7 using meters tags(7);"); - taos_query(taos, "create table t8 using meters tags(8);"); - taos_query(taos, "create table t9 using meters tags(9);"); + result = taos_query(taos, "create table t0 using meters tags(0);"); + taos_free_result(result); + result = taos_query(taos, "create table t1 using meters tags(1);"); + taos_free_result(result); + result = taos_query(taos, "create table t2 using meters tags(2);"); + taos_free_result(result); + result = taos_query(taos, "create table t3 using meters tags(3);"); + taos_free_result(result); + result = taos_query(taos, "create table t4 using meters tags(4);"); + taos_free_result(result); + result = taos_query(taos, "create table t5 using meters tags(5);"); + taos_free_result(result); + result = taos_query(taos, "create table t6 using meters tags(6);"); + taos_free_result(result); + result = taos_query(taos, "create table t7 using meters tags(7);"); + taos_free_result(result); + result = taos_query(taos, "create table t8 using meters tags(8);"); + taos_free_result(result); + result = taos_query(taos, "create table t9 using meters tags(9);"); + taos_free_result(result); - TAOS_RES* res = taos_query(taos, "insert into t0 values('2020-01-01 00:00:00.000', 0)" + result = taos_query(taos, "insert into t0 values('2020-01-01 00:00:00.000', 0)" " ('2020-01-01 00:01:00.000', 0)" " ('2020-01-01 00:02:00.000', 0)" " t1 values('2020-01-01 00:00:00.000', 0)" @@ -46,10 +60,11 @@ static void prepare_data(TAOS* taos) { " t7 values('2020-01-01 00:01:02.000', 0)" " t8 values('2020-01-01 00:01:02.000', 0)" " t9 values('2020-01-01 00:01:02.000', 0)"); - int affected = taos_affected_rows(res); + int affected = taos_affected_rows(result); if (affected != 18) { printf("\033[31m%d rows affected by last insert statement, but it should be 18\033[0m\n", affected); } + taos_free_result(result); // super tables subscription usleep(1000000); } @@ -135,6 +150,7 @@ static void verify_query(TAOS* taos) { res = taos_query(taos, "select * from meters"); taos_stop_query(res); + taos_free_result(res); } @@ -153,23 +169,30 @@ static void verify_subscribe(TAOS* taos) { res = taos_consume(tsub); check_row_count(__LINE__, res, 0); - taos_query(taos, "insert into t0 values('2020-01-01 00:02:00.001', 0);"); - taos_query(taos, "insert into t8 values('2020-01-01 00:01:03.000', 0);"); + TAOS_RES *result; + result = taos_query(taos, "insert into t0 values('2020-01-01 00:02:00.001', 0);"); + taos_free_result(result); + result = taos_query(taos, "insert into t8 values('2020-01-01 00:01:03.000', 0);"); + taos_free_result(result); res = taos_consume(tsub); check_row_count(__LINE__, res, 2); - taos_query(taos, "insert into t2 values('2020-01-01 00:01:02.001', 0);"); - taos_query(taos, "insert into t1 values('2020-01-01 00:03:00.001', 0);"); + result = taos_query(taos, "insert into t2 values('2020-01-01 00:01:02.001', 0);"); + taos_free_result(result); + result = taos_query(taos, "insert into t1 values('2020-01-01 00:03:00.001', 0);"); + taos_free_result(result); res = taos_consume(tsub); check_row_count(__LINE__, res, 2); - taos_query(taos, "insert into t1 values('2020-01-01 00:03:00.002', 0);"); + result = taos_query(taos, "insert into t1 values('2020-01-01 00:03:00.002', 0);"); + taos_free_result(result); res = taos_consume(tsub); check_row_count(__LINE__, res, 1); // keep progress information and restart subscription taos_unsubscribe(tsub, 1); - taos_query(taos, "insert into t0 values('2020-01-01 00:04:00.000', 0);"); + result = taos_query(taos, "insert into t0 values('2020-01-01 00:04:00.000', 0);"); + taos_free_result(result); tsub = taos_subscribe(taos, 1, "test", "select * from meters;", NULL, NULL, 0); res = taos_consume(tsub); check_row_count(__LINE__, res, 24); @@ -196,7 +219,8 @@ static void verify_subscribe(TAOS* taos) { res = taos_consume(tsub); check_row_count(__LINE__, res, 0); - taos_query(taos, "insert into t0 values('2020-01-01 00:04:00.001', 0);"); + result = taos_query(taos, "insert into t0 values('2020-01-01 00:04:00.001', 0);"); + taos_free_result(result); res = taos_consume(tsub); check_row_count(__LINE__, res, 1); @@ -205,7 +229,8 @@ static void verify_subscribe(TAOS* taos) { int blockFetch = 0; tsub = taos_subscribe(taos, 1, "test", "select * from meters;", subscribe_callback, &blockFetch, 1000); usleep(2000000); - taos_query(taos, "insert into t0 values('2020-01-01 00:05:00.001', 0);"); + result = taos_query(taos, "insert into t0 values('2020-01-01 00:05:00.001', 0);"); + taos_free_result(result); usleep(2000000); taos_unsubscribe(tsub, 0); } @@ -213,8 +238,9 @@ static void verify_subscribe(TAOS* taos) { void verify_prepare(TAOS* taos) { TAOS_RES* result = taos_query(taos, "drop database if exists test;"); + taos_free_result(result); usleep(100000); - taos_query(taos, "create database test;"); + result = taos_query(taos, "create database test;"); int code = taos_errno(result); if (code != 0) { @@ -429,7 +455,8 @@ void verify_stream(TAOS* taos) { NULL); printf("waiting for stream data\n"); usleep(100000); - taos_query(taos, "insert into t0 values(now, 0)(now+5s,1)(now+10s, 2);"); + TAOS_RES* result = taos_query(taos, "insert into t0 values(now, 0)(now+5s,1)(now+10s, 2);"); + taos_free_result(result); usleep(200000000); taos_close_stream(strm); } diff --git a/tests/examples/c/asyncdemo.c b/tests/examples/c/asyncdemo.c index 1e523bd7fe..225c4f7541 100644 --- a/tests/examples/c/asyncdemo.c +++ b/tests/examples/c/asyncdemo.c @@ -46,6 +46,34 @@ void taos_insert_call_back(void *param, TAOS_RES *tres, int code); void taos_select_call_back(void *param, TAOS_RES *tres, int code); void taos_error(TAOS *taos); +static void queryDB(TAOS *taos, char *command) { + int i; + TAOS_RES *pSql = NULL; + int32_t code = -1; + + for (i = 0; i < 5; i++) { + if (NULL != pSql) { + taos_free_result(pSql); + pSql = NULL; + } + + pSql = taos_query(taos, command); + code = taos_errno(pSql); + if (0 == code) { + break; + } + } + + if (code != 0) { + fprintf(stderr, "Failed to run %s, reason: %s\n", command, taos_errstr(pSql)); + taos_free_result(pSql); + taos_close(taos); + exit(EXIT_FAILURE); + } + + taos_free_result(pSql); +} + int main(int argc, char *argv[]) { TAOS *taos; @@ -78,16 +106,14 @@ int main(int argc, char *argv[]) printf("success to connect to server\n"); - sprintf(sql, "drop database %s", db); - taos_query(taos, sql); + sprintf(sql, "drop database if exists %s", db); + queryDB(taos, sql); sprintf(sql, "create database %s", db); - if (taos_query(taos, sql) != 0) - taos_error(taos); + queryDB(taos, sql); sprintf(sql, "use %s", db); - if (taos_query(taos, sql) != 0) - taos_error(taos); + queryDB(taos, sql); strcpy(prefix, "asytbl_"); for (i = 0; i < numOfTables; ++i) { @@ -95,8 +121,7 @@ int main(int argc, char *argv[]) tableList[i].taos = taos; sprintf(tableList[i].name, "%s%d", prefix, i); sprintf(sql, "create table %s%d (ts timestamp, volume bigint)", prefix, i); - if (taos_query(taos, sql) != 0) - taos_error(taos); + queryDB(taos, sql); } gettimeofday(&systemTime, NULL); diff --git a/tests/examples/c/demo.c b/tests/examples/c/demo.c index 8f8a66a325..59b9c74827 100644 --- a/tests/examples/c/demo.c +++ b/tests/examples/c/demo.c @@ -22,6 +22,34 @@ #include #include // TAOS header file +static void queryDB(TAOS *taos, char *command) { + int i; + TAOS_RES *pSql = NULL; + int32_t code = -1; + + for (i = 0; i < 5; i++) { + if (NULL != pSql) { + taos_free_result(pSql); + pSql = NULL; + } + + pSql = taos_query(taos, command); + code = taos_errno(pSql); + if (0 == code) { + break; + } + } + + if (code != 0) { + fprintf(stderr, "Failed to run %s, reason: %s\n", command, taos_errstr(pSql)); + taos_free_result(pSql); + taos_close(taos); + exit(EXIT_FAILURE); + } + + taos_free_result(pSql); +} + int main(int argc, char *argv[]) { TAOS * taos; char qstr[1024]; @@ -44,22 +72,26 @@ int main(int argc, char *argv[]) { printf("success to connect to server\n"); - taos_query(taos, "drop database demo"); + //taos_query(taos, "drop database demo"); + queryDB(taos, "drop database if exists demo"); - result = taos_query(taos, "create database demo"); - if (result == NULL) { - printf("failed to create database, reason:%s\n", "null result"/*taos_errstr(taos)*/); - exit(1); - } + //result = taos_query(taos, "create database demo"); + //if (result == NULL) { + // printf("failed to create database, reason:%s\n", "null result"/*taos_errstr(taos)*/); + // exit(1); + //} + queryDB(taos, "create database demo"); printf("success to create database\n"); - taos_query(taos, "use demo"); + //taos_query(taos, "use demo"); + queryDB(taos, "use demo"); // create table - if (taos_query(taos, "create table m1 (ts timestamp, ti tinyint, si smallint, i int, bi bigint, f float, d double, b binary(10))") == 0) { - printf("failed to create table, reason:%s\n", taos_errstr(result)); - exit(1); - } + //if (taos_query(taos, "create table m1 (ts timestamp, ti tinyint, si smallint, i int, bi bigint, f float, d double, b binary(10))") == 0) { + // printf("failed to create table, reason:%s\n", taos_errstr(result)); + // exit(1); + //} + queryDB(taos, "create table m1 (ts timestamp, ti tinyint, si smallint, i int, bi bigint, f float, d double, b binary(10))"); printf("success to create table\n"); // sleep for one second to make sure table is created on data node @@ -80,8 +112,10 @@ int main(int argc, char *argv[]) { printf("insert row: %i\n", i); } else { printf("failed to insert row: %i, reason:%s\n", i, "null result"/*taos_errstr(result)*/); + taos_free_result(result); exit(1); } + taos_free_result(result); //sleep(1); } @@ -91,7 +125,8 @@ int main(int argc, char *argv[]) { sprintf(qstr, "SELECT * FROM m1"); result = taos_query(taos, qstr); if (result == NULL || taos_errno(result) != 0) { - printf("failed to select, reason:%s\n", taos_errstr(result)); + printf("failed to select, reason:%s\n", taos_errstr(result)); + taos_free_result(result); exit(1); } diff --git a/tests/perftest-scripts/perftest-query.sh b/tests/perftest-scripts/perftest-query.sh new file mode 100755 index 0000000000..bb5d9e0a9d --- /dev/null +++ b/tests/perftest-scripts/perftest-query.sh @@ -0,0 +1,92 @@ +#!/bin/bash + +today=`date +"%Y%m%d"` +WORK_DIR=/home/ubuntu/pxiao/ +PERFORMANCE_TEST_REPORT=$TDENGINE_DIR/tests/performance-test-report-$today.log + +# Coloured Echoes # +function red_echo { echo -e "\033[31m$@\033[0m"; } # +function green_echo { echo -e "\033[32m$@\033[0m"; } # +function yellow_echo { echo -e "\033[33m$@\033[0m"; } # +function white_echo { echo -e "\033[1;37m$@\033[0m"; } # +# Coloured Printfs # +function red_printf { printf "\033[31m$@\033[0m"; } # +function green_printf { printf "\033[32m$@\033[0m"; } # +function yellow_printf { printf "\033[33m$@\033[0m"; } # +function white_printf { printf "\033[1;37m$@\033[0m"; } # +# Debugging Outputs # +function white_brackets { local args="$@"; white_printf "["; printf "${args}"; white_printf "]"; } # +function echoInfo { local args="$@"; white_brackets $(green_printf "INFO") && echo " ${args}"; } # +function echoWarn { local args="$@"; echo "$(white_brackets "$(yellow_printf "WARN")" && echo " ${args}";)" 1>&2; } # +function echoError { local args="$@"; echo "$(white_brackets "$(red_printf "ERROR")" && echo " ${args}";)" 1>&2; } # + + +function stopTaosd { + echo "Stop taosd" + systemctl stop taosd + snap stop tdengine + PID=`ps -ef|grep -w taosd | grep -v grep | awk '{print $2}'` + while [ -n "$PID" ] + do + pkill -TERM -x taosd + sleep 1 + PID=`ps -ef|grep -w taosd | grep -v grep | awk '{print $2}'` + done +} + +function buildTDengine { + echoInfo "Build TDengine" + cd $WORK_DIR/TDengine + + git remote update > /dev/null + REMOTE_COMMIT=`git rev-parse --short remotes/origin/develop` + LOCAL_COMMIT=`git rev-parse --short @` + + echo " LOCAL: $LOCAL_COMMIT" + echo "REMOTE: $REMOTE_COMMIT" + if [ "$LOCAL_COMMIT" == "$REMOTE_COMMIT" ]; then + echo "repo up-to-date" + else + echo "repo need to pull" + git pull > /dev/null + + LOCAL_COMMIT=`git rev-parse --short @` + cd debug + rm -rf * + cmake .. > /dev/null + make > /dev/null + make install + fi +} + +function runQueryPerfTest { + nohup $WORK_DIR/TDengine/debug/build/bin/taosd -c /etc/taodperf/ > /dev/null 2>&1 & + echoInfo "Run Performance Test" + cd $WORK_DIR/TDengine/tests/pytest + + python3 query/queryPerformance.py | tee -a $PERFORMANCE_TEST_REPORT +} + + +function sendReport { + echo "send report" + receiver="pxiao@taosdata.com" + mimebody="MIME-Version: 1.0\nContent-Type: text/html; charset=utf-8\n" + + cd $TDENGINE_DIR + + sed -i 's/\x1b\[[0-9;]*m//g' $PERFORMANCE_TEST_REPORT + BODY_CONTENT=`cat $PERFORMANCE_TEST_REPORT` + echo -e "to: ${receiver}\nsubject: Query Performace Report ${today}, commit ID: ${LOCAL_COMMIT}\n\n${today}:\n${BODY_CONTENT}" | \ + (cat - && uuencode $PERFORMANCE_TEST_REPORT performance-test-report-$today.log) | \ + ssmtp "${receiver}" && echo "Report Sent!" +} + + +stopTaosd +buildTDengine +runQueryPerfTest + +echoInfo "Send Report" +sendReport +echoInfo "End of Test" diff --git a/tests/pytest/alter/db_update_options.py b/tests/pytest/alter/db_update_options.py new file mode 100644 index 0000000000..224e0f25b0 --- /dev/null +++ b/tests/pytest/alter/db_update_options.py @@ -0,0 +1,71 @@ + +# -*- coding: utf-8 -*- + +import random +import string +import subprocess +import sys +from util.log import * +from util.cases import * +from util.sql import * + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + def run(self): + tdLog.debug("check database") + tdSql.prepare() + + # check default update value + sql = "create database if not exists db" + tdSql.execute(sql) + tdSql.query('show databases') + tdSql.checkRows(1) + tdSql.checkData(0,16,0) + + sql = "alter database db update 1" + + # check update value + tdSql.execute(sql) + tdSql.query('show databases') + tdSql.checkRows(1) + tdSql.checkData(0,16,1) + + + sql = "alter database db update 0" + tdSql.execute(sql) + tdSql.query('show databases') + tdSql.checkRows(1) + tdSql.checkData(0,16,0) + + sql = "alter database db update -1" + tdSql.error(sql) + + sql = "alter database db update 100" + tdSql.error(sql) + + tdSql.query('show databases') + tdSql.checkRows(1) + tdSql.checkData(0,16,0) + + tdSql.execute('drop database db') + tdSql.error('create database db update 100') + tdSql.error('create database db update -1') + + tdSql.execute('create database db update 1') + + tdSql.query('show databases') + tdSql.checkRows(1) + tdSql.checkData(0,16,1) + + tdSql.execute('drop database db') + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/pytest/crash_gen/crash_gen.py b/tests/pytest/crash_gen/crash_gen_main.py similarity index 90% rename from tests/pytest/crash_gen/crash_gen.py rename to tests/pytest/crash_gen/crash_gen_main.py index 8d2b0080bc..8d68457ec8 100755 --- a/tests/pytest/crash_gen/crash_gen.py +++ b/tests/pytest/crash_gen/crash_gen_main.py @@ -38,9 +38,9 @@ import resource from guppy import hpy import gc -from .service_manager import ServiceManager, TdeInstance -from .misc import Logging, Status, CrashGenError, Dice, Helper, Progress -from .db import DbConn, MyTDSql, DbConnNative, DbManager +from crash_gen.service_manager import ServiceManager, TdeInstance +from crash_gen.misc import Logging, Status, CrashGenError, Dice, Helper, Progress +from crash_gen.db import DbConn, MyTDSql, DbConnNative, DbManager import taos import requests @@ -243,7 +243,7 @@ class WorkerThread: class ThreadCoordinator: - WORKER_THREAD_TIMEOUT = 180 # one minute + WORKER_THREAD_TIMEOUT = 120 # Normal: 120 def __init__(self, pool: ThreadPool, dbManager: DbManager): self._curStep = -1 # first step is 0 @@ -388,9 +388,9 @@ class ThreadCoordinator: self._syncAtBarrier() # For now just cross the barrier Progress.emit(Progress.END_THREAD_STEP) except threading.BrokenBarrierError as err: - Logging.info("Main loop aborted, caused by worker thread time-out") + Logging.info("Main loop aborted, caused by worker thread(s) time-out") self._execStats.registerFailure("Aborted due to worker thread timeout") - print("\n\nWorker Thread time-out detected, important thread info:") + print("\n\nWorker Thread time-out detected, TAOS related threads are:") ts = ThreadStacks() ts.print(filterInternal=True) workerTimeout = True @@ -435,7 +435,7 @@ class ThreadCoordinator: Logging.debug("\r\n\n--> Main thread ready to finish up...") Logging.debug("Main thread joining all threads") self._pool.joinAll() # Get all threads to finish - Logging.info("\nAll worker threads finished") + Logging.info(". . . All worker threads finished") # No CR/LF before self._execStats.endExec() def cleanup(self): # free resources @@ -1072,17 +1072,18 @@ class Database: t3 = datetime.datetime(2012, 1, 1) # default "keep" is 10 years t4 = datetime.datetime.fromtimestamp( t3.timestamp() + elSec2) # see explanation above - Logging.info("Setting up TICKS to start from: {}".format(t4)) + Logging.debug("Setting up TICKS to start from: {}".format(t4)) return t4 @classmethod def getNextTick(cls): with cls._clsLock: # prevent duplicate tick - if cls._lastLaggingTick==0: + if cls._lastLaggingTick==0 or cls._lastTick==0 : # not initialized # 10k at 1/20 chance, should be enough to avoid overlaps - cls._lastLaggingTick = cls.setupLastTick() + datetime.timedelta(0, -10000) - if cls._lastTick==0: # should be quite a bit into the future - cls._lastTick = cls.setupLastTick() + tick = cls.setupLastTick() + cls._lastTick = tick + cls._lastLaggingTick = tick + datetime.timedelta(0, -10000) + # if : # should be quite a bit into the future if Dice.throw(20) == 0: # 1 in 20 chance, return lagging tick cls._lastLaggingTick += datetime.timedelta(0, 1) # Go back in time 100 seconds @@ -1177,6 +1178,8 @@ class Task(): instead. But a task is always associated with a DB ''' taskSn = 100 + _lock = threading.Lock() + _tableLocks: Dict[str, threading.Lock] = {} @classmethod def allocTaskNum(cls): @@ -1198,6 +1201,8 @@ class Task(): self._execStats = execStats self._db = db # A task is always associated/for a specific DB + + def isSuccess(self): return self._err is None @@ -1237,6 +1242,7 @@ class Task(): 0x0B, # Unable to establish connection, more details in TD-1648 0x200, # invalid SQL, TODO: re-examine with TD-934 0x20F, # query terminated, possibly due to vnoding being dropped, see TD-1776 + 0x213, # "Disconnected from service", result of "kill connection ???" 0x217, # "db not selected", client side defined error code # 0x218, # "Table does not exist" client side defined error code 0x360, # Table already exists @@ -1318,7 +1324,7 @@ class Task(): self._err = err self._aborted = True except Exception as e: - self.logInfo("Non-TAOS exception encountered") + Logging.info("Non-TAOS exception encountered with: {}".format(self.__class__.__name__)) self._err = e self._aborted = True traceback.print_exc() @@ -1351,6 +1357,24 @@ class Task(): def getQueryResult(self, wt: WorkerThread): # execute an SQL on the worker thread return wt.getQueryResult() + def lockTable(self, ftName): # full table name + # print(" <<" + ftName + '_', end="", flush=True) + with Task._lock: + if not ftName in Task._tableLocks: + Task._tableLocks[ftName] = threading.Lock() + + Task._tableLocks[ftName].acquire() + + def unlockTable(self, ftName): + # print('_' + ftName + ">> ", end="", flush=True) + with Task._lock: + if not ftName in self._tableLocks: + raise RuntimeError("Corrupt state, no such lock") + lock = Task._tableLocks[ftName] + if not lock.locked(): + raise RuntimeError("Corrupte state, already unlocked") + lock.release() + class ExecutionStats: def __init__(self): @@ -1461,7 +1485,7 @@ class StateTransitionTask(Task): _baseTableNumber = None - _endState = None + _endState = None # TODO: no longter used? @classmethod def getInfo(cls): # each sub class should supply their own information @@ -1486,7 +1510,7 @@ class StateTransitionTask(Task): @classmethod def getRegTableName(cls, i): - if ( StateTransitionTask._baseTableNumber is None): + if ( StateTransitionTask._baseTableNumber is None): # Set it one time StateTransitionTask._baseTableNumber = Dice.throw( 999) if gConfig.dynamic_db_table_names else 0 return "reg_table_{}".format(StateTransitionTask._baseTableNumber + i) @@ -1544,8 +1568,11 @@ class TaskCreateSuperTable(StateTransitionTask): sTable = self._db.getFixedSuperTable() # type: TdSuperTable # wt.execSql("use db") # should always be in place + sTable.create(wt.getDbConn(), self._db.getName(), - {'ts':'timestamp', 'speed':'int'}, {'b':'binary(200)', 'f':'float'}) + {'ts':'timestamp', 'speed':'int'}, {'b':'binary(200)', 'f':'float'}, + dropIfExists = True + ) # self.execWtSql(wt,"create table db.{} (ts timestamp, speed int) tags (b binary(200), f float) ".format(tblName)) # No need to create the regular tables, INSERT will do that # automatically @@ -1558,14 +1585,41 @@ class TdSuperTable: def getName(self): return self._stName + def drop(self, dbc, dbName, skipCheck = False): + if self.exists(dbc, dbName) : # if myself exists + fullTableName = dbName + '.' + self._stName + dbc.execute("DROP TABLE {}".format(fullTableName)) + else: + if not skipCheck: + raise CrashGenError("Cannot drop non-existant super table: {}".format(self._stName)) + + def exists(self, dbc, dbName): + dbc.execute("USE " + dbName) + return dbc.existsSuperTable(self._stName) + # TODO: odd semantic, create() method is usually static? - def create(self, dbc, dbName, cols: dict, tags: dict): + def create(self, dbc, dbName, cols: dict, tags: dict, + dropIfExists = False + ): + '''Creating a super table''' - sql = "CREATE TABLE {}.{} ({}) TAGS ({})".format( - dbName, - self._stName, - ",".join(['%s %s'%(k,v) for (k,v) in cols.items()]), - ",".join(['%s %s'%(k,v) for (k,v) in tags.items()]) + dbc.execute("USE " + dbName) + fullTableName = dbName + '.' + self._stName + if dbc.existsSuperTable(self._stName): + if dropIfExists: + dbc.execute("DROP TABLE {}".format(fullTableName)) + else: # error + raise CrashGenError("Cannot create super table, already exists: {}".format(self._stName)) + + # Now let's create + sql = "CREATE TABLE {} ({})".format( + fullTableName, + ",".join(['%s %s'%(k,v) for (k,v) in cols.items()])) + if tags is None : + sql += " TAGS (dummy int) " + else: + sql += " TAGS ({})".format( + ",".join(['%s %s'%(k,v) for (k,v) in tags.items()]) ) dbc.execute(sql) @@ -1583,14 +1637,25 @@ class TdSuperTable: def hasRegTables(self, dbc: DbConn, dbName: str): return dbc.query("SELECT * FROM {}.{}".format(dbName, self._stName)) > 0 - def ensureTable(self, dbc: DbConn, dbName: str, regTableName: str): + def ensureTable(self, task: Task, dbc: DbConn, dbName: str, regTableName: str): sql = "select tbname from {}.{} where tbname in ('{}')".format(dbName, self._stName, regTableName) if dbc.query(sql) >= 1 : # reg table exists already return - sql = "CREATE TABLE {}.{} USING {}.{} tags ({})".format( - dbName, regTableName, dbName, self._stName, self._getTagStrForSql(dbc, dbName) - ) - dbc.execute(sql) + + # acquire a lock first, so as to be able to *verify*. More details in TD-1471 + fullTableName = dbName + '.' + regTableName + if task is not None: # optional lock + task.lockTable(fullTableName) + Progress.emit(Progress.CREATE_TABLE_ATTEMPT) # ATTEMPT to create a new table + # print("(" + fullTableName[-3:] + ")", end="", flush=True) + try: + sql = "CREATE TABLE {} USING {}.{} tags ({})".format( + fullTableName, dbName, self._stName, self._getTagStrForSql(dbc, dbName) + ) + dbc.execute(sql) + finally: + if task is not None: + task.unlockTable(fullTableName) # no matter what def _getTagStrForSql(self, dbc, dbName: str) : tags = self._getTags(dbc, dbName) @@ -1809,7 +1874,7 @@ class TaskRestartService(StateTransitionTask): with self._classLock: if self._isRunning: - print("Skipping restart task, another running already") + Logging.info("Skipping restart task, another running already") return self._isRunning = True @@ -1847,13 +1912,88 @@ class TaskAddData(StateTransitionTask): def canBeginFrom(cls, state: AnyState): return state.canAddData() + def _addDataInBatch(self, db, dbc, regTableName, te: TaskExecutor): + numRecords = self.LARGE_NUMBER_OF_RECORDS if gConfig.larger_data else self.SMALL_NUMBER_OF_RECORDS + fullTableName = db.getName() + '.' + regTableName + + sql = "insert into {} values ".format(fullTableName) + for j in range(numRecords): # number of records per table + nextInt = db.getNextInt() + nextTick = db.getNextTick() + sql += "('{}', {});".format(nextTick, nextInt) + dbc.execute(sql) + + def _addData(self, db, dbc, regTableName, te: TaskExecutor): # implied: NOT in batches + numRecords = self.LARGE_NUMBER_OF_RECORDS if gConfig.larger_data else self.SMALL_NUMBER_OF_RECORDS + + for j in range(numRecords): # number of records per table + nextInt = db.getNextInt() + nextTick = db.getNextTick() + if gConfig.record_ops: + self.prepToRecordOps() + self.fAddLogReady.write("Ready to write {} to {}\n".format(nextInt, regTableName)) + self.fAddLogReady.flush() + os.fsync(self.fAddLogReady) + + # TODO: too ugly trying to lock the table reliably, refactor... + fullTableName = db.getName() + '.' + regTableName + if gConfig.verify_data: + self.lockTable(fullTableName) + # print("_w" + str(nextInt % 100), end="", flush=True) # Trace what was written + + try: + sql = "insert into {} values ('{}', {});".format( # removed: tags ('{}', {}) + fullTableName, + # ds.getFixedSuperTableName(), + # ds.getNextBinary(), ds.getNextFloat(), + nextTick, nextInt) + dbc.execute(sql) + except: # Any exception at all + if gConfig.verify_data: + self.unlockTable(fullTableName) + raise + + # Now read it back and verify, we might encounter an error if table is dropped + if gConfig.verify_data: # only if command line asks for it + try: + readBack = dbc.queryScalar("SELECT speed from {}.{} WHERE ts='{}'". + format(db.getName(), regTableName, nextTick)) + if readBack != nextInt : + raise taos.error.ProgrammingError( + "Failed to read back same data, wrote: {}, read: {}" + .format(nextInt, readBack), 0x999) + except taos.error.ProgrammingError as err: + errno = Helper.convertErrno(err.errno) + if errno in [0x991, 0x992] : # not a single result + raise taos.error.ProgrammingError( + "Failed to read back same data for tick: {}, wrote: {}, read: {}" + .format(nextTick, nextInt, "Empty Result" if errno==0x991 else "Multiple Result"), + errno) + elif errno in [0x218, 0x362]: # table doesn't exist + # do nothing + dummy = 0 + else: + # Re-throw otherwise + raise + finally: + self.unlockTable(fullTableName) # Unlock the table no matter what + + # Successfully wrote the data into the DB, let's record it somehow + te.recordDataMark(nextInt) + + if gConfig.record_ops: + self.fAddLogDone.write("Wrote {} to {}\n".format(nextInt, regTableName)) + self.fAddLogDone.flush() + os.fsync(self.fAddLogDone) + def _executeInternal(self, te: TaskExecutor, wt: WorkerThread): # ds = self._dbManager # Quite DANGEROUS here, may result in multi-thread client access db = self._db dbc = wt.getDbConn() - tblSeq = list(range( - self.LARGE_NUMBER_OF_TABLES if gConfig.larger_data else self.SMALL_NUMBER_OF_TABLES)) - random.shuffle(tblSeq) + numTables = self.LARGE_NUMBER_OF_TABLES if gConfig.larger_data else self.SMALL_NUMBER_OF_TABLES + numRecords = self.LARGE_NUMBER_OF_RECORDS if gConfig.larger_data else self.SMALL_NUMBER_OF_RECORDS + tblSeq = list(range(numTables )) + random.shuffle(tblSeq) # now we have random sequence for i in tblSeq: if (i in self.activeTable): # wow already active print("x", end="", flush=True) # concurrent insertion @@ -1861,60 +2001,20 @@ class TaskAddData(StateTransitionTask): self.activeTable.add(i) # marking it active sTable = db.getFixedSuperTable() - regTableName = self.getRegTableName(i) # "db.reg_table_{}".format(i) - sTable.ensureTable(wt.getDbConn(), db.getName(), regTableName) # Ensure the table exists + regTableName = self.getRegTableName(i) # "db.reg_table_{}".format(i) + fullTableName = db.getName() + '.' + regTableName + # self._lockTable(fullTableName) # "create table" below. Stop it if the table is "locked" + sTable.ensureTable(self, wt.getDbConn(), db.getName(), regTableName) # Ensure the table exists + # self._unlockTable(fullTableName) - for j in range(self.LARGE_NUMBER_OF_RECORDS if gConfig.larger_data else self.SMALL_NUMBER_OF_RECORDS): # number of records per table - nextInt = db.getNextInt() - nextTick = db.getNextTick() - if gConfig.record_ops: - self.prepToRecordOps() - self.fAddLogReady.write("Ready to write {} to {}\n".format(nextInt, regTableName)) - self.fAddLogReady.flush() - os.fsync(self.fAddLogReady) - sql = "insert into {}.{} values ('{}', {});".format( # removed: tags ('{}', {}) - db.getName(), - regTableName, - # ds.getFixedSuperTableName(), - # ds.getNextBinary(), ds.getNextFloat(), - nextTick, nextInt) - dbc.execute(sql) - # Successfully wrote the data into the DB, let's record it - # somehow - te.recordDataMark(nextInt) - if gConfig.record_ops: - self.fAddLogDone.write( - "Wrote {} to {}\n".format( - nextInt, regTableName)) - self.fAddLogDone.flush() - os.fsync(self.fAddLogDone) - - # Now read it back and verify, we might encounter an error if table is dropped - if gConfig.verify_data: # only if command line asks for it - try: - readBack = dbc.queryScalar("SELECT speed from {}.{} WHERE ts= '{}'". - format(db.getName(), regTableName, nextTick)) - if readBack != nextInt : - raise taos.error.ProgrammingError( - "Failed to read back same data, wrote: {}, read: {}" - .format(nextInt, readBack), 0x999) - except taos.error.ProgrammingError as err: - errno = Helper.convertErrno(err.errno) - if errno in [0x991, 0x992] : # not a single result - raise taos.error.ProgrammingError( - "Failed to read back same data for tick: {}, wrote: {}, read: {}" - .format(nextTick, nextInt, "Empty Result" if errno==0x991 else "Multiple Result"), - errno) - # Re-throw no matter what - raise - + if Dice.throw(1) == 0: # 1 in 2 chance + self._addData(db, dbc, regTableName, te) + else: + self._addDataInBatch(db, dbc, regTableName, te) self.activeTable.discard(i) # not raising an error, unlike remove - - - class ThreadStacks: # stack info for all threads def __init__(self): self._allStacks = {} @@ -1936,17 +2036,19 @@ class ThreadStacks: # stack info for all threads '__init__']: # the thread that extracted the stack continue # ignore # Now print - print("\n<----- Thread Info for ID: {}".format(thNid)) + print("\n<----- Thread Info for LWP/ID: {} (Execution stopped at Bottom Frame) <-----".format(thNid)) + stackFrame = 0 for frame in stack: # print(frame) - print("File {filename}, line {lineno}, in {name}".format( - filename=frame.filename, lineno=frame.lineno, name=frame.name)) + print("[{sf}] File {filename}, line {lineno}, in {name}".format( + sf=stackFrame, filename=frame.filename, lineno=frame.lineno, name=frame.name)) print(" {}".format(frame.line)) - print("-----> End of Thread Info\n") + stackFrame += 1 + print("-----> End of Thread Info ----->\n") class ClientManager: def __init__(self): - print("Starting service manager") + Logging.info("Starting service manager") # signal.signal(signal.SIGTERM, self.sigIntHandler) # signal.signal(signal.SIGINT, self.sigIntHandler) @@ -2048,7 +2150,7 @@ class ClientManager: thPool = ThreadPool(gConfig.num_threads, gConfig.max_steps) self.tc = ThreadCoordinator(thPool, dbManager) - print("Starting client instance to: {}".format(tInst)) + Logging.info("Starting client instance: {}".format(tInst)) self.tc.run() # print("exec stats: {}".format(self.tc.getExecStats())) # print("TC failed = {}".format(self.tc.isFailed())) diff --git a/tests/pytest/crash_gen/db.py b/tests/pytest/crash_gen/db.py index 43c855647c..2a4b362f82 100644 --- a/tests/pytest/crash_gen/db.py +++ b/tests/pytest/crash_gen/db.py @@ -95,6 +95,11 @@ class DbConn: # print("dbs = {}, str = {}, ret2={}, type2={}".format(dbs, dbName,ret2, type(dbName))) return dbName in dbs # TODO: super weird type mangling seen, once here + def existsSuperTable(self, stName): + self.query("show stables") + sts = [v[0] for v in self.getQueryResult()] + return stName in sts + def hasTables(self): return self.query("show tables") > 0 @@ -240,6 +245,7 @@ class MyTDSql: def _execInternal(self, sql): startTime = time.time() + # Logging.debug("Executing SQL: " + sql) ret = self._cursor.execute(sql) # print("\nSQL success: {}".format(sql)) queryTime = time.time() - startTime diff --git a/tests/pytest/crash_gen/misc.py b/tests/pytest/crash_gen/misc.py index 34a33c6af6..2d2ce99d95 100644 --- a/tests/pytest/crash_gen/misc.py +++ b/tests/pytest/crash_gen/misc.py @@ -27,7 +27,7 @@ class LoggingFilter(logging.Filter): class MyLoggingAdapter(logging.LoggerAdapter): def process(self, msg, kwargs): - return "[{}] {}".format(threading.get_ident() % 10000, msg), kwargs + return "[{:04d}] {}".format(threading.get_ident() % 10000, msg), kwargs # return '[%s] %s' % (self.extra['connid'], msg), kwargs @@ -51,7 +51,7 @@ class Logging: _logger.addHandler(ch) # Logging adapter, to be used as a logger - print("setting logger variable") + # print("setting logger variable") # global logger cls.logger = MyLoggingAdapter(_logger, []) @@ -166,6 +166,9 @@ class Progress: SERVICE_RECONNECT_START = 4 SERVICE_RECONNECT_SUCCESS = 5 SERVICE_RECONNECT_FAILURE = 6 + SERVICE_START_NAP = 7 + CREATE_TABLE_ATTEMPT = 8 + tokens = { STEP_BOUNDARY: '.', BEGIN_THREAD_STEP: '[', @@ -174,6 +177,8 @@ class Progress: SERVICE_RECONNECT_START: '', SERVICE_RECONNECT_FAILURE: '.xr>', + SERVICE_START_NAP: '_zz', + CREATE_TABLE_ATTEMPT: '_c', } @classmethod diff --git a/tests/pytest/crash_gen/service_manager.py b/tests/pytest/crash_gen/service_manager.py index 196e9d944a..d249abc439 100644 --- a/tests/pytest/crash_gen/service_manager.py +++ b/tests/pytest/crash_gen/service_manager.py @@ -47,6 +47,17 @@ class TdeInstance(): .format(selfPath, projPath)) return buildPath + @classmethod + def prepareGcovEnv(cls, env): + # Ref: https://gcc.gnu.org/onlinedocs/gcc/Cross-profiling.html + bPath = cls._getBuildPath() # build PATH + numSegments = len(bPath.split('/')) - 1 # "/x/TDengine/build" should yield 3 + numSegments = numSegments - 1 # DEBUG only + env['GCOV_PREFIX'] = bPath + '/svc_gcov' + env['GCOV_PREFIX_STRIP'] = str(numSegments) # Strip every element, plus, ENV needs strings + Logging.info("Preparing GCOV environement to strip {} elements and use path: {}".format( + numSegments, env['GCOV_PREFIX'] )) + def __init__(self, subdir='test', tInstNum=0, port=6030, fepPort=6030): self._buildDir = self._getBuildPath() self._subdir = '/' + subdir # TODO: tolerate "/" @@ -217,6 +228,11 @@ class TdeSubProcess: # raise CrashGenError("Empty instance not allowed in TdeSubProcess") # self._tInst = tInst # Default create at ServiceManagerThread + def __repr__(self): + if self.subProcess is None: + return '[TdeSubProc: Empty]' + return '[TdeSubProc: pid = {}]'.format(self.getPid()) + def getStdOut(self): return self.subProcess.stdout @@ -235,17 +251,30 @@ class TdeSubProcess: # Sanity check if self.subProcess: # already there raise RuntimeError("Corrupt process state") - + + # Prepare environment variables for coverage information + # Ref: https://stackoverflow.com/questions/2231227/python-subprocess-popen-with-a-modified-environment + myEnv = os.environ.copy() + TdeInstance.prepareGcovEnv(myEnv) + + # print(myEnv) + # print(myEnv.items()) + # print("Starting TDengine via Shell: {}".format(cmdLineStr)) + + useShell = True self.subProcess = subprocess.Popen( - cmdLine, - shell=False, + ' '.join(cmdLine) if useShell else cmdLine, + shell=useShell, # svcCmdSingle, shell=True, # capture core dump? stdout=subprocess.PIPE, stderr=subprocess.PIPE, # bufsize=1, # not supported in binary mode - close_fds=ON_POSIX + close_fds=ON_POSIX, + env=myEnv ) # had text=True, which interferred with reading EOF + STOP_SIGNAL = signal.SIGKILL # What signal to use (in kill) to stop a taosd process? + def stop(self): """ Stop a sub process, and try to return a meaningful return code. @@ -267,7 +296,7 @@ class TdeSubProcess: SIGUSR2 12 """ if not self.subProcess: - print("Sub process already stopped") + Logging.error("Sub process already stopped") return # -1 retCode = self.subProcess.poll() # ret -N means killed with signal N, otherwise it's from exit(N) @@ -278,20 +307,25 @@ class TdeSubProcess: return retCode # process still alive, let's interrupt it - print("Terminate running process, send SIG_INT and wait...") - # sub process should end, then IPC queue should end, causing IO thread to end - # sig = signal.SIGINT - sig = signal.SIGKILL - self.subProcess.send_signal(sig) # SIGNINT or SIGKILL + Logging.info("Terminate running process, send SIG_{} and wait...".format(self.STOP_SIGNAL)) + # sub process should end, then IPC queue should end, causing IO thread to end + topSubProc = psutil.Process(self.subProcess.pid) + for child in topSubProc.children(recursive=True): # or parent.children() for recursive=False + child.send_signal(self.STOP_SIGNAL) + time.sleep(0.2) # 200 ms + # topSubProc.send_signal(sig) # now kill the main sub process (likely the Shell) + + self.subProcess.send_signal(self.STOP_SIGNAL) # main sub process (likely the Shell) self.subProcess.wait(20) retCode = self.subProcess.returncode # should always be there # May throw subprocess.TimeoutExpired exception above, therefore # The process is guranteed to have ended by now self.subProcess = None if retCode != 0: # != (- signal.SIGINT): - Logging.error("TSP.stop(): Failed to stop sub proc properly w/ SIG {}, retCode={}".format(sig, retCode)) + Logging.error("TSP.stop(): Failed to stop sub proc properly w/ SIG {}, retCode={}".format( + self.STOP_SIGNAL, retCode)) else: - Logging.info("TSP.stop(): sub proc successfully terminated with SIG {}".format(sig)) + Logging.info("TSP.stop(): sub proc successfully terminated with SIG {}".format(self.STOP_SIGNAL)) return - retCode class ServiceManager: @@ -439,7 +473,7 @@ class ServiceManager: time.sleep(self.PAUSE_BETWEEN_IPC_CHECK) # pause, before next round # raise CrashGenError("dummy") - print("Service Manager Thread (with subprocess) ended, main thread exiting...") + Logging.info("Service Manager Thread (with subprocess) ended, main thread exiting...") def _getFirstInstance(self): return self._tInsts[0] @@ -452,7 +486,7 @@ class ServiceManager: # Find if there's already a taosd service, and then kill it for proc in psutil.process_iter(): if proc.name() == 'taosd': - print("Killing an existing TAOSD process in 2 seconds... press CTRL-C to interrupt") + Logging.info("Killing an existing TAOSD process in 2 seconds... press CTRL-C to interrupt") time.sleep(2.0) proc.kill() # print("Process: {}".format(proc.name())) @@ -559,7 +593,8 @@ class ServiceManagerThread: for i in range(0, 100): time.sleep(1.0) # self.procIpcBatch() # don't pump message during start up - print("_zz_", end="", flush=True) + Progress.emit(Progress.SERVICE_START_NAP) + # print("_zz_", end="", flush=True) if self._status.isRunning(): Logging.info("[] TDengine service READY to process requests") Logging.info("[] TAOS service started: {}".format(self)) @@ -595,12 +630,12 @@ class ServiceManagerThread: def stop(self): # can be called from both main thread or signal handler - print("Terminating TDengine service running as the sub process...") + Logging.info("Terminating TDengine service running as the sub process...") if self.getStatus().isStopped(): - print("Service already stopped") + Logging.info("Service already stopped") return if self.getStatus().isStopping(): - print("Service is already being stopped") + Logging.info("Service is already being stopped") return # Linux will send Control-C generated SIGINT to the TDengine process # already, ref: @@ -616,10 +651,10 @@ class ServiceManagerThread: if retCode == signal.SIGSEGV : # SGV Logging.error("[[--ERROR--]]: TDengine service SEGV fault (check core file!)") except subprocess.TimeoutExpired as err: - print("Time out waiting for TDengine service process to exit") + Logging.info("Time out waiting for TDengine service process to exit") else: if self._tdeSubProcess.isRunning(): # still running, should now never happen - print("FAILED to stop sub process, it is still running... pid = {}".format( + Logging.error("FAILED to stop sub process, it is still running... pid = {}".format( self._tdeSubProcess.getPid())) else: self._tdeSubProcess = None # not running any more @@ -683,9 +718,9 @@ class ServiceManagerThread: return # we are done with THIS BATCH else: # got line, printing out if forceOutput: - Logging.info(line) + Logging.info('[TAOSD] ' + line) else: - Logging.debug(line) + Logging.debug('[TAOSD] ' + line) print(">", end="", flush=True) _ProgressBars = ["--", "//", "||", "\\\\"] @@ -728,11 +763,11 @@ class ServiceManagerThread: # queue.put(line) # meaning sub process must have died - Logging.info("\nEnd of stream detected for TDengine STDOUT: {}".format(self)) + Logging.info("EOF for TDengine STDOUT: {}".format(self)) out.close() def svcErrorReader(self, err: IO, queue): for line in iter(err.readline, b''): - print("\nTDengine Service (taosd) ERROR (from stderr): {}".format(line)) - Logging.info("\nEnd of stream detected for TDengine STDERR: {}".format(self)) + Logging.info("TDengine STDERR: {}".format(line)) + Logging.info("EOF for TDengine STDERR: {}".format(self)) err.close() \ No newline at end of file diff --git a/tests/pytest/crash_gen_bootstrap.py b/tests/pytest/crash_gen_bootstrap.py index a3417d21a8..fd12284b9d 100644 --- a/tests/pytest/crash_gen_bootstrap.py +++ b/tests/pytest/crash_gen_bootstrap.py @@ -11,7 +11,7 @@ ################################################################### import sys -from crash_gen.crash_gen import MainExec +from crash_gen.crash_gen_main import MainExec if __name__ == "__main__": diff --git a/tests/pytest/fulltest.sh b/tests/pytest/fulltest.sh index b14321a4ef..a48dbdc480 100755 --- a/tests/pytest/fulltest.sh +++ b/tests/pytest/fulltest.sh @@ -161,7 +161,7 @@ python3 ./test.py -f stream/metric_1.py python3 ./test.py -f stream/new.py python3 ./test.py -f stream/stream1.py python3 ./test.py -f stream/stream2.py -python3 ./test.py -f stream/parser.py +#python3 ./test.py -f stream/parser.py python3 ./test.py -f stream/history.py #alter table @@ -207,3 +207,20 @@ python3 test.py -f tools/taosdemo.py python3 test.py -f subscribe/singlemeter.py #python3 test.py -f subscribe/stability.py python3 test.py -f subscribe/supertable.py + + +# update +python3 ./test.py -f update/allow_update.py +python3 ./test.py -f update/allow_update-0.py +python3 ./test.py -f update/append_commit_data.py +python3 ./test.py -f update/append_commit_last-0.py +python3 ./test.py -f update/append_commit_last.py +python3 ./test.py -f update/merge_commit_data.py +python3 ./test.py -f update/merge_commit_data-0.py +python3 ./test.py -f update/merge_commit_data2.py +python3 ./test.py -f update/merge_commit_data2_update0.py +python3 ./test.py -f update/merge_commit_last-0.py +python3 ./test.py -f update/merge_commit_last.py + +# wal +python3 ./test.py -f wal/addOldWalTest.py \ No newline at end of file diff --git a/tests/pytest/functions/function_percentile.py b/tests/pytest/functions/function_percentile.py index aaeb94372e..e63d65f2e6 100644 --- a/tests/pytest/functions/function_percentile.py +++ b/tests/pytest/functions/function_percentile.py @@ -130,8 +130,19 @@ class TDTestCase: tdSql.query("select percentile(col6, 100) from test") tdSql.checkData(0, 0, np.percentile(floatData, 100)) tdSql.query("select apercentile(col6, 100) from test") - print("apercentile result: %s" % tdSql.getData(0, 0)) + print("apercentile result: %s" % tdSql.getData(0, 0)) + tdSql.execute("create table meters (ts timestamp, voltage int) tags(loc nchar(20))") + tdSql.execute("create table t0 using meters tags('beijing')") + tdSql.execute("create table t1 using meters tags('shanghai')") + for i in range(self.rowNum): + tdSql.execute("insert into t0 values(%d, %d)" % (self.ts + i, i + 1)) + tdSql.execute("insert into t1 values(%d, %d)" % (self.ts + i, i + 1)) + + tdSql.error("select percentile(voltage, 20) from meters") + tdSql.query("select apercentile(voltage, 20) from meters") + print("apercentile result: %s" % tdSql.getData(0, 0)) + def stop(self): tdSql.close() tdLog.success("%s successfully executed" % __file__) diff --git a/tests/pytest/handle_crash_gen_val_log.sh b/tests/pytest/handle_crash_gen_val_log.sh index 1a4c12a16c..2d48de65c9 100755 --- a/tests/pytest/handle_crash_gen_val_log.sh +++ b/tests/pytest/handle_crash_gen_val_log.sh @@ -5,7 +5,9 @@ GREEN='\033[1;32m' GREEN_DARK='\033[0;32m' GREEN_UNDERLINE='\033[4;32m' NC='\033[0m' - +nohup /root/TDinternal/debug/build/bin/taosd -c /root/TDinternal/community/sim/dnode1/cfg >/dev/null & +./crash_gen.sh --valgrind -p -t 10 -s 100 -b 4 +pidof taosd|xargs kill grep 'start to execute\|ERROR SUMMARY' valgrind.err|grep -v 'grep'|uniq|tee crash_gen_mem_err.log for memError in `grep 'ERROR SUMMARY' crash_gen_mem_err.log | awk '{print $4}'` diff --git a/tests/pytest/insert/ningsiInsert.py b/tests/pytest/insert/ningsiInsert.py new file mode 100644 index 0000000000..bcad2b03ed --- /dev/null +++ b/tests/pytest/insert/ningsiInsert.py @@ -0,0 +1,88 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import requests, json +import threading +import string +import random +import time + +class RestfulInsert: + def init(self): + self.header = {'Authorization': 'Basic cm9vdDp0YW9zZGF0YQ=='} + self.url = "http://ningsi60:6041/rest/sql" + self.ts = 1104508800000 + self.numOfThreads = 10 + self.numOfTables = 3000 + self.dbName = 'netmonitortaos' + self.stbName = 'devinfomt' + self.prefix = 'dev' + + def get_random_string(self, length): + letters = string.ascii_lowercase + result_str = ''.join(random.choice(letters) for i in range(length)) + return result_str + + def createTables(self, threadID): + print("create table: thread %d started" % threadID) + tablesPerThread = int (self.numOfTables / self.numOfThreads) + for i in range(tablesPerThread): + data = "create table '%s'.dev_%d using '%s'.'%s' tags('%s', '%s')" % (self.dbName, i + threadID * tablesPerThread, self.dbName, self.stbName, self.get_random_string(25), self.get_random_string(25)) + response = requests.post(self.url, data, headers = self.header) + if response.status_code != 200: + print(response.content) + + def insertData(self, threadID): + print("insert data: thread %d started" % threadID) + tablesPerThread = int (self.numOfTables / self.numOfThreads) + base_ts = self.ts + while True: + i = 0 + for i in range(tablesPerThread): + data = "insert into %s.dev_%d values(%d, '%s', '%s', %d, %d, %d)" % (self.dbName, i + threadID * tablesPerThread, base_ts, self.get_random_string(25), self.get_random_string(30), random.randint(1, 10000), random.randint(1, 10000), random.randint(1, 10000)) + response = requests.post(self.url, data, headers = self.header) + if response.status_code != 200: + print(response.content) + + time.sleep(30) + base_ts = base_ts + 1 + + def run(self): + data = "create database if not exists %s keep 7300" % self.dbName + requests.post(self.url, data, headers = self.header) + + data = "create table '%s'.'%s' (timeid timestamp, devdesc binary(50), devname binary(50), cpu bigint, temp bigint, ram bigint) tags(devid binary(50), modelid binary(50))" % (self.dbName, self.stbName) + requests.post(self.url, data, headers = self.header) + + threads = [] + for i in range(self.numOfThreads): + thread = threading.Thread(target=self.createTables, args=(i,)) + thread.start() + threads.append(thread) + + for i in range(self.numOfThreads): + threads[i].join() + + threads = [] + for i in range(self.numOfThreads): + thread = threading.Thread(target=self.insertData, args=(i,)) + thread.start() + threads.append(thread) + + for i in range(self.numOfThreads): + threads[i].join() + +ri = RestfulInsert() +ri.init() +ri.run() diff --git a/tests/pytest/query/bug1874.py b/tests/pytest/query/bug1874.py new file mode 100644 index 0000000000..7177484870 --- /dev/null +++ b/tests/pytest/query/bug1874.py @@ -0,0 +1,59 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + def run(self): + tdSql.prepare() + print("==========step1") + print("create table && insert data") + + tdSql.execute("create table join_mt0 (ts timestamp, c1 int, c2 float, c3 bigint, c4 smallint, c5 tinyint, c6 double, c7 bool, c8 binary(10), c9 nchar(9)) TAGS(t1 int, t2 binary(12))") + tdSql.execute("create table join_mt1 (ts timestamp, c1 int, c2 float, c3 bigint, c4 smallint, c5 tinyint, c6 double, c7 bool, c8 binary(10), c9 nchar(9)) TAGS(t1 int, t2 binary(12))") + stable=0 + insertRows = 1000 + tbnum = 3 + t0 = 1604298064000 + tdLog.info("insert %d rows" % (insertRows)) + for i in range(tbnum): + tdSql.execute("create table join_tb%d using join_mt%d tags(%d,'abc')" %(i,stable,i)) + for j in range(insertRows): + ret = tdSql.execute( + "insert into join_tb%d values (%d , %d,%d,%d,%d,%d,%d,%d, '%s','%s')" % + (i,t0+i,i%100,i%100,i%100,i%100,i%100,i%100,i%100,'binary'+str(i%100),'nchar'+str(i%100))) + stable=stable+1 + for i in range(tbnum): + tdSql.execute("create table join_1_tb%d using join_mt%d tags(%d,'abc')" %(i,stable,i)) + for j in range(insertRows): + ret = tdSql.execute( + "insert into join_tb%d values (%d , %d,%d,%d,%d,%d,%d,%d, '%s','%s')" % + (i,t0+i,i%100,i%100,i%100,i%100,i%100,i%100,i%100,'binary'+str(i%100),'nchar'+str(i%100))) + print("==========step2") + print("join query ") + tdLog.info("select count(join_mt0.c1), first(join_mt0.c1) from join_mt0, join_mt1 where join_mt0.t1=join_mt1.t1 and join_mt0.ts=join_mt1.ts interval(10a) group by join_mt0.t1, join_mt0.t2, join_mt1.t1 order by join_mt0.ts desc, join_mt1.ts asc limit 10;") + tdSql.error("select count(join_mt0.c1), first(join_mt0.c1) from join_mt0, join_mt1 where join_mt0.t1=join_mt1.t1 and join_mt0.ts=join_mt1.ts interval(10a) group by join_mt0.t1, join_mt0.t2, join_mt1.t1 order by join_mt0.ts desc, join_mt1.ts asc limit 10;") + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) \ No newline at end of file diff --git a/tests/pytest/query/bug1875.py b/tests/pytest/query/bug1875.py new file mode 100644 index 0000000000..12e22a7a25 --- /dev/null +++ b/tests/pytest/query/bug1875.py @@ -0,0 +1,60 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + def run(self): + tdSql.prepare() + print("==========step1") + print("create table && insert data") + + tdSql.execute("create table join_mt0 (ts timestamp, c1 int, c2 float, c3 bigint, c4 smallint, c5 tinyint, c6 double, c7 bool, c8 binary(10), c9 nchar(9)) TAGS(t1 int, t2 binary(12))") + tdSql.execute("create table join_mt1 (ts timestamp, c1 int, c2 float, c3 bigint, c4 smallint, c5 tinyint, c6 double, c7 bool, c8 binary(10), c9 nchar(9)) TAGS(t1 int, t2 binary(12))") + stable=0 + insertRows = 1000 + tbnum = 3 + t0 = 1604298064000 + tdLog.info("insert %d rows" % (insertRows)) + for i in range(tbnum): + tdSql.execute("create table join_tb%d using join_mt%d tags(%d,'abc')" %(i,stable,i)) + for j in range(insertRows): + ret = tdSql.execute( + "insert into join_tb%d values (%d , %d,%d,%d,%d,%d,%d,%d, '%s','%s')" % + (i,t0+i,i%100,i%100,i%100,i%100,i%100,i%100,i%100,'binary'+str(i%100),'nchar'+str(i%100))) + stable=stable+1 + for i in range(tbnum): + tdSql.execute("create table join_1_tb%d using join_mt%d tags(%d,'abc')" %(i,stable,i)) + for j in range(insertRows): + ret = tdSql.execute( + "insert into join_tb%d values (%d , %d,%d,%d,%d,%d,%d,%d, '%s','%s')" % + (i,t0+i,i%100,i%100,i%100,i%100,i%100,i%100,i%100,'binary'+str(i%100),'nchar'+str(i%100))) + print("==========step2") + print("join query ") + tdLog.info("select count(join_mt0.c1), first(join_mt0.c1) from join_mt0, join_mt1 where join_mt0.t1=join_mt1.t1 and join_mt0.ts=join_mt1.ts interval(10a) group by join_mt0.t1, join_mt0.t2, join_mt1.t1 order by join_mt0.ts desc, join_mt1.ts asc limit 10;") + tdSql.error("select count(join_mt0.c1), first(join_mt0.c1), first(join_mt1.c9) from join_mt0, join_mt1 where join_mt0.t1=join_mt1.t1 and join_mt0.ts=join_mt1.ts interval(10a) group by join_mt0.t1, join_mt0.t2 order by join_mt0.t1 desc") + tdSql.error("select count(join_mt0.c1), first(join_mt0.c1), first(join_mt1.c9) from join_mt0, join_mt1 where join_mt0.t1=join_mt1.t1 and join_mt0.ts=join_mt1.ts interval(10a) group by join_mt0.t1, join_mt0.t2 order by join_mt0.t1 desc limit 3;") + tdSql.error("select count(join_mt0.c1), first(join_mt0.c1), first(join_mt1.c9) from join_mt0, join_mt1 where join_mt0.t1=join_mt1.t1 and join_mt0.ts=join_mt1.ts interval(10a) group by join_mt0.t1, join_mt0.t2 order by join_mt0.t1 desc slimit 3;") + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) \ No newline at end of file diff --git a/tests/pytest/query/bug1876.py b/tests/pytest/query/bug1876.py new file mode 100644 index 0000000000..a1cc4b6eb2 --- /dev/null +++ b/tests/pytest/query/bug1876.py @@ -0,0 +1,58 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + def run(self): + tdSql.prepare() + print("==========step1") + print("create table && insert data") + + tdSql.execute("create table join_mt0 (ts timestamp, c1 int, c2 float, c3 bigint, c4 smallint, c5 tinyint, c6 double, c7 bool, c8 binary(10), c9 nchar(9)) TAGS(t1 int, t2 binary(12))") + tdSql.execute("create table join_mt1 (ts timestamp, c1 int, c2 float, c3 bigint, c4 smallint, c5 tinyint, c6 double, c7 bool, c8 binary(10), c9 nchar(9)) TAGS(t1 int, t2 binary(12))") + stable=0 + insertRows = 1000 + tbnum = 3 + t0 = 1604298064000 + tdLog.info("insert %d rows" % (insertRows)) + for i in range(tbnum): + tdSql.execute("create table join_tb%d using join_mt%d tags(%d,'abc')" %(i,stable,i)) + for j in range(insertRows): + ret = tdSql.execute( + "insert into join_tb%d values (%d , %d,%d,%d,%d,%d,%d,%d, '%s','%s')" % + (i,t0+i,i%100,i%100,i%100,i%100,i%100,i%100,i%100,'binary'+str(i%100),'nchar'+str(i%100))) + stable=stable+1 + for i in range(tbnum): + tdSql.execute("create table join_1_tb%d using join_mt%d tags(%d,'abc')" %(i,stable,i)) + for j in range(insertRows): + ret = tdSql.execute( + "insert into join_tb%d values (%d , %d,%d,%d,%d,%d,%d,%d, '%s','%s')" % + (i,t0+i,i%100,i%100,i%100,i%100,i%100,i%100,i%100,'binary'+str(i%100),'nchar'+str(i%100))) + print("==========step2") + print("join query ") + tdLog.info("select count(join_mt0.c1), first(join_mt0.c1) from join_mt0, join_mt1 where join_mt0.t1=join_mt1.t1 and join_mt0.ts=join_mt1.ts interval(10a) group by join_mt0.t1, join_mt0.t2, join_mt1.t1 order by join_mt0.ts desc, join_mt1.ts asc limit 10;") + tdSql.error("select count(join_mt0.c1), first(join_mt0.c1)-first(join_mt1.c1), first(join_mt1.c9) from join_mt0, join_mt1 where join_mt0.t1=join_mt1.t1 and join_mt0.ts=join_mt1.ts;") + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) \ No newline at end of file diff --git a/tests/pytest/query/kill_query.py b/tests/pytest/query/kill_query.py new file mode 100644 index 0000000000..8975eea268 --- /dev/null +++ b/tests/pytest/query/kill_query.py @@ -0,0 +1,82 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import taos +from util.log import tdLog +from util.cases import tdCases +from util.sql import tdSql +from util.dnodes import tdDnodes +import os +import threading +import time + + +class TDTestCase: + """ + kill query + """ + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + + def query(self): + conn = taos.connect(host='127.0.0.1', user='root', password='taosdata', config='/etc/config') + cursor = conn.cursor() + while True: + cursor.execute('show queries;') + print('show queries!') + temp = cursor.fetchall() + if temp: + print(temp[0][0]) + cursor.execute('kill query %s ;' % temp[0][0]) + print('kill query success') + break + time.sleep(0.5) + cursor.close() + conn.close() + + def run(self): + tdSql.prepare() + + print("==============step1") + os.system('yes | sudo taosdemo -n 100') + print('insert into test.meters 10000000 rows') + + + t1 = threading.Thread(target=self.query) + t1.setDaemon(True) + t1.start() + + print("==============step2") + tdSql.execute('use test;') + try: + print('============begin select * from 10000000 rows') + tdSql.query('select * from test.meters;') + # print(tdSql.queryResult) + except Exception as e: + if not "ProgrammingError('Query terminated'" in str(e): + raise Exception('fail') + + print('success') + print('kill query success') + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) + diff --git a/tests/pytest/query/queryLike.py b/tests/pytest/query/queryLike.py new file mode 100644 index 0000000000..3c3b030f8f --- /dev/null +++ b/tests/pytest/query/queryLike.py @@ -0,0 +1,45 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import taos +from util.log import * +from util.cases import * +from util.sql import * + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor()) + + def run(self): + tdSql.prepare() + + tdSql.execute("create table cars(ts timestamp, c nchar(2)) tags(t1 nchar(2))") + tdSql.execute("insert into car0 using cars tags('aa') values(now, 'bb');") + tdSql.query("select count(*) from cars where t1 like '%50 90 30 04 00 00%'") + tdSql.checkRows(0) + + tdSql.execute("create table test_cars(ts timestamp, c nchar(2)) tags(t1 nchar(20))") + tdSql.execute("insert into car1 using test_cars tags('150 90 30 04 00 002') values(now, 'bb');") + tdSql.query("select * from test_cars where t1 like '%50 90 30 04 00 00%'") + tdSql.checkRows(1) + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/pytest/query/query_performance.py b/tests/pytest/query/query_performance.py new file mode 100644 index 0000000000..c31569ac13 --- /dev/null +++ b/tests/pytest/query/query_performance.py @@ -0,0 +1,219 @@ +import time +import taos +import csv +import numpy as np +import random +import os +import requests +import json +import sys + +""" +需要第三方库: taos,requests,numpy +当前机器已经启动taosd服务 +使用方法见底部示例 +""" + + + +class Ding: + """ + 发送消息到钉钉, + urls: 钉钉群的token组成的list,可以发多个钉钉群,需要提前加白名单或其他放行策略 + at_mobiles: 需要@的人的手机号组成的list + msg: 要发送的str + """ + def __init__(self, url_list, at_mobiles): + self.urls = url_list + self.at_mobiles = at_mobiles + + def send_message(self, msg): + data1 = { + "msgtype": "text", + "text": { + "content": msg + }, + "at": { + "atMobiles": self.at_mobiles, + "isAtAll": False + } + } + + header = {'Content-Type': 'application/json; charset=utf-8'} + + for url in self.urls: + requests.post(url=url, data=json.dumps(data1), headers=header) + + + + +class TDConn: + def __init__(self, config:dict): + self.host = config['host'] + self.user = config['user'] + self.password = config['password'] + self.config = config['config'] + self.conn = None + self.cursor = None + + def connect(self): + conn = taos.connect(host=self.host, user=self.user, password=self.password, config=self.config) + cursor = conn.cursor() + self.conn = conn + self.cursor = cursor + print('connect ...') + return self.cursor + + def close(self): + self.cursor.close() + self.conn.close() + print('close ... ') + + +class Tool: + """ + 可能有用 + """ + @staticmethod + def str_gen(num): + return ''.join(random.sample('abcdefghijklmnopqrstuvwxyz', num)) + + @staticmethod + def float_gen(n, m): + return random.uniform(n, m) + + @staticmethod + def int_gen(n, m): + return random.randint(n, m) + +class Demo: + def __init__(self, engine): + self.engine = engine['engine'](engine['config']) + self.cursor = self.engine.connect() + + + def date_gen(self, db, number_per_table, type_of_cols, num_of_cols_per_record, num_of_tables): + """ + :目前都是 taosdemo 的参数 + :return: + """ + sql = 'yes | sudo taosdemo -d {db} -n {number_per_table} -b {type_of_cols} -l {num_of_cols_per_record} ' \ + '-t {num_of_tables}'.format(db=db, number_per_table=number_per_table, type_of_cols=type_of_cols, + num_of_cols_per_record=num_of_cols_per_record, num_of_tables=num_of_tables) + os.system(sql) + print('insert data completed') + + + # def main(self, db, circle, csv_name, case_func, result_csv, nums, ding_flag): + def main(self, every_num_per_table, result_csv, all_result_csv, values): + db = values['db_name'] + number_per_table = every_num_per_table + type_of_cols = values['col_type'] + num_of_cols_per_record = values['col_num'] + num_of_tables = values['table_num'] + self.date_gen(db=db, number_per_table=number_per_table, type_of_cols=type_of_cols, + num_of_cols_per_record=num_of_cols_per_record, num_of_tables=num_of_tables) + + circle = values['circle'] + # print(every_num_per_table, result_csv, values) + csv_name = result_csv + case_func = values['sql_func'] + nums = num_of_tables * number_per_table + ding_flag = values['ding_flag'] + + _data = [] + f = open(csv_name,'w',encoding='utf-8') + f1 = open(all_result_csv,'a',encoding='utf-8') + csv_writer = csv.writer(f) + csv_writer1 = csv.writer(f1) + csv_writer.writerow(["number", "elapse", 'sql']) + self.cursor.execute('use {db};'.format(db=db)) + + + for i in range(circle): + self.cursor.execute('reset query cache;') + sql = case_func() + start = time.time() + self.cursor.execute(sql) + self.cursor.fetchall() + end = time.time() + _data.append(end-start) + elapse = '%.4f' %(end -start) + print(sql, i, elapse, '\n') + csv_writer.writerow([i+1, elapse, sql]) + + # time.sleep(1) + _list = [nums, np.mean(_data)] + _str = '总数据: %s 条 , table数: %s , 每个table数据数: %s , 数据类型: %s \n' % \ + (nums, num_of_tables, number_per_table, type_of_cols) + # print('avg : ', np.mean(_data), '\n') + _str += '平均值 : %.4f 秒\n' % np.mean(_data) + for each in (50, 80, 90, 95): + _list.append(np.percentile(_data,each)) + _str += ' %d 分位数 : %.4f 秒\n' % (each , np.percentile(_data,each)) + + print(_str) + if ding_flag: + ding = Ding(values['ding_config']['urls'], values['ding_config']['at_mobiles']) + ding.send_message(_str) + csv_writer1.writerow(_list) + f.close() + f1.close() + self.engine.close() + + +def run(engine, test_cases: dict, result_dir): + for each_case, values in test_cases.items(): + for every_num_per_table in values['number_per_table']: + result_csv = result_dir + '{case}_table{table_num}_{number_per_table}.csv'.\ + format(case=each_case, table_num=values['table_num'], number_per_table=every_num_per_table) + all_result_csv = result_dir + '{case_all}_result.csv'.format(case_all=each_case) + d = Demo(engine) + # print(each_case, result_csv) + d.main(every_num_per_table, result_csv, all_result_csv, values) + + + +if __name__ == '__main__': + """ + 测试用例在test_cases中添加。 + result_dir: 报告生成目录,会生成每次测试结果,和具体某一用例的统计结果.需注意目录权限需要执行用户可写。 + case1、case2 : 具体用例名称 + engine: 数据库引擎,目前只有taosd。使用时需开启taosd服务。 + table_num: 造数据时的table数目 + circle: 循环测试次数,求平均值 + number_per_table:需要传list,多个数值代表会按照list内的数值逐个测试 + col_num:table col的数目 + col_type: 表中数据类型 + db_name: 造数据的db名,默认用test + sql_func: 当前测试的sql方法,需要自己定义 + ding_flag: 如果需要钉钉发送数据,flag设置真值, + ding_config: 如ding_flag 设置为真值,此项才有意义。ding_flag为假时此项可以为空。urls传入一list,内容为要发送的群的token, + 需提前设置白名单,at_mobiles传入一list,内容为在群内需要@的人的手机号 + """ + engine_dict = { + 'taosd': {'engine': TDConn, 'config': + {'host': '127.0.0.1', 'user': 'root', 'password': 'taosdata', 'config':'/etc/taos'}} + } + + def case1(): + return 'select * from meters where f1 = {n};'.format(n=random.randint(1,30)) + + def case2(): + return 'select * from meters where f1 = %.4f;' %random.uniform(1,30) + + + result_dir = '/usr/local/demo/benchmarktestdata/' + test_cases = { + 'case1': {'engine':'taosd', 'table_num': 10, 'circle': 100, 'number_per_table':[10, 100], 'col_num': 5, + 'col_type': 'INT', 'db_name': 'test', 'sql_func': case1, 'ding_flag': True, + 'ding_config': + {'urls': [r'https://oapi.dingtalk.com/robot/send?access_token=xxxxxxx0cd93'], + 'at_mobiles':[17000000000,],}}, + 'case2': {'engine':'taosd', 'table_num': 10, 'circle': 50, 'number_per_table':[10, 100], 'col_num': 5, + 'col_type': 'FLOAT', 'db_name': 'test', 'sql_func': case2, 'ding_flag': False, + 'ding_config': None + } + } + + run(engine_dict['taosd'], test_cases, result_dir) diff --git a/tests/pytest/update/allow_update-0.py b/tests/pytest/update/allow_update-0.py new file mode 100644 index 0000000000..69e23883f3 --- /dev/null +++ b/tests/pytest/update/allow_update-0.py @@ -0,0 +1,170 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import taos +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + self.numOfRecords = 10 + self.ts = 1604295582000 + + def restartTaosd(self): + tdDnodes.stop(1) + tdDnodes.start(1) + tdSql.execute("use udb") + + def run(self): + tdSql.prepare() + startTs = self.ts + + print("==============step1") + tdSql.execute("create database udb update 0") + tdSql.execute("use udb") + tdSql.execute("create table t (ts timestamp, a int)") + tdSql.execute("insert into t values (%d, 1)" % (startTs)) + tdSql.execute("insert into t values (%d, 1)" % (startTs - 3)) + tdSql.execute("insert into t values (%d, 1)" % (startTs + 3)) + + tdSql.query("select * from t") + tdSql.checkRows(3) + + tdSql.query("select a from t") + tdSql.checkData(0, 0, 1) + tdSql.checkData(1, 0, 1) + tdSql.checkData(2, 0, 1) + + print("==============step2") + tdSql.execute("insert into t values (%d, 2)" % (startTs)) + tdSql.execute("insert into t values (%d, 2)" % (startTs - 3)) + tdSql.execute("insert into t values (%d, 2)" % (startTs + 3)) + + tdSql.query("select * from t") + tdSql.checkRows(3) + + tdSql.query("select a from t") + tdSql.checkData(0, 0, 1) + tdSql.checkData(1, 0, 1) + tdSql.checkData(2, 0, 1) + + print("==============step3") + tdSql.execute("insert into t values (%d, 3)" % (startTs - 4)) + tdSql.execute("insert into t values (%d, 3)" % (startTs - 2)) + tdSql.execute("insert into t values (%d, 3)" % (startTs + 2)) + tdSql.execute("insert into t values (%d, 3)" % (startTs + 4)) + + tdSql.query("select * from t") + tdSql.checkRows(7) + + tdSql.query("select a from t") + tdSql.checkData(0, 0, 3) + tdSql.checkData(1, 0, 1) + tdSql.checkData(2, 0, 3) + tdSql.checkData(3, 0, 1) + tdSql.checkData(4, 0, 3) + tdSql.checkData(5, 0, 1) + tdSql.checkData(6, 0, 3) + + print("==============step4") + tdSql.execute("insert into t values (%d, 4)" % (startTs - 4)) + tdSql.execute("insert into t values (%d, 4)" % (startTs - 2)) + tdSql.execute("insert into t values (%d, 4)" % (startTs + 2)) + tdSql.execute("insert into t values (%d, 4)" % (startTs + 4)) + + tdSql.query("select * from t") + tdSql.checkRows(7) + + tdSql.query("select a from t") + tdSql.checkData(0, 0, 3) + tdSql.checkData(1, 0, 1) + tdSql.checkData(2, 0, 3) + tdSql.checkData(3, 0, 1) + tdSql.checkData(4, 0, 3) + tdSql.checkData(5, 0, 1) + tdSql.checkData(6, 0, 3) + + print("==============step5") + tdSql.execute("insert into t values (%d, 5)" % (startTs - 1)) + tdSql.execute("insert into t values (%d, 5)" % (startTs + 1)) + + tdSql.query("select * from t") + tdSql.checkRows(9) + + tdSql.query("select a from t") + tdSql.checkData(0, 0, 3) + tdSql.checkData(1, 0, 1) + tdSql.checkData(2, 0, 3) + tdSql.checkData(3, 0, 5) + tdSql.checkData(4, 0, 1) + tdSql.checkData(5, 0, 5) + tdSql.checkData(6, 0, 3) + tdSql.checkData(7, 0, 1) + tdSql.checkData(8, 0, 3) + + print("==============step6") + tdSql.execute("insert into t values (%d, 6)" % (startTs - 4)) + tdSql.execute("insert into t values (%d, 6)" % (startTs - 3)) + tdSql.execute("insert into t values (%d, 6)" % (startTs - 2)) + tdSql.execute("insert into t values (%d, 6)" % (startTs - 1)) + tdSql.execute("insert into t values (%d, 6)" % (startTs)) + tdSql.execute("insert into t values (%d, 6)" % (startTs + 1)) + tdSql.execute("insert into t values (%d, 6)" % (startTs + 2)) + tdSql.execute("insert into t values (%d, 6)" % (startTs + 3)) + tdSql.execute("insert into t values (%d, 6)" % (startTs + 4)) + + tdSql.query("select * from t") + tdSql.checkRows(9) + + tdSql.query("select a from t") + tdSql.checkData(0, 0, 3) + tdSql.checkData(1, 0, 1) + tdSql.checkData(2, 0, 3) + tdSql.checkData(3, 0, 5) + tdSql.checkData(4, 0, 1) + tdSql.checkData(5, 0, 5) + tdSql.checkData(6, 0, 3) + tdSql.checkData(7, 0, 1) + tdSql.checkData(8, 0, 3) + + # restart taosd to commit, and check + self.restartTaosd(); + + tdSql.query("select * from t") + tdSql.checkRows(9) + + tdSql.query("select a from t") + tdSql.checkData(0, 0, 3) + tdSql.checkData(1, 0, 1) + tdSql.checkData(2, 0, 3) + tdSql.checkData(3, 0, 5) + tdSql.checkData(4, 0, 1) + tdSql.checkData(5, 0, 5) + tdSql.checkData(6, 0, 3) + tdSql.checkData(7, 0, 1) + tdSql.checkData(8, 0, 3) + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/pytest/update/allow_update.py b/tests/pytest/update/allow_update.py new file mode 100644 index 0000000000..fa122ff5cf --- /dev/null +++ b/tests/pytest/update/allow_update.py @@ -0,0 +1,266 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import taos +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + self.numOfRecords = 10 + self.ts = 1604295582000 + + def restartTaosd(self): + tdDnodes.stop(1) + tdDnodes.start(1) + tdSql.execute("use udb") + + def run(self): + tdSql.prepare() + startTs = self.ts + + tdSql.execute("create database udb update 1") + tdSql.execute("use udb") + tdSql.execute("create table t (ts timestamp, a int)") + + print("==============step1") + tdSql.execute("insert into t values (%d, 1)" % (startTs)) + tdSql.execute("insert into t values (%d, 1)" % (startTs - 3)) + tdSql.execute("insert into t values (%d, 1)" % (startTs + 3)) + + tdSql.query("select * from t") + tdSql.checkRows(3) + + tdSql.query("select a from t") + tdSql.checkData(0, 0, 1) + tdSql.checkData(1, 0, 1) + tdSql.checkData(2, 0, 1) + + print("==============step2") + tdSql.execute("insert into t values (%d, 2)" % (startTs)) + tdSql.execute("insert into t values (%d, 2)" % (startTs - 3)) + tdSql.execute("insert into t values (%d, 2)" % (startTs + 3)) + + tdSql.query("select * from t") + tdSql.checkRows(3) + + tdSql.query("select a from t") + tdSql.checkData(0, 0, 2) + tdSql.checkData(1, 0, 2) + tdSql.checkData(2, 0, 2) + + print("==============step3") + tdSql.execute("insert into t values (%d, 3)" % (startTs - 4)) + tdSql.execute("insert into t values (%d, 3)" % (startTs - 2)) + tdSql.execute("insert into t values (%d, 3)" % (startTs + 2)) + tdSql.execute("insert into t values (%d, 3)" % (startTs + 4)) + + tdSql.query("select * from t") + tdSql.checkRows(7) + + tdSql.query("select a from t") + tdSql.checkData(0, 0, 3) + tdSql.checkData(1, 0, 2) + tdSql.checkData(2, 0, 3) + tdSql.checkData(3, 0, 2) + tdSql.checkData(4, 0, 3) + tdSql.checkData(5, 0, 2) + tdSql.checkData(6, 0, 3) + + print("==============step4") + tdSql.execute("insert into t values (%d, 4)" % (startTs - 4)) + tdSql.execute("insert into t values (%d, 4)" % (startTs - 2)) + tdSql.execute("insert into t values (%d, 4)" % (startTs + 2)) + tdSql.execute("insert into t values (%d, 4)" % (startTs + 4)) + + tdSql.query("select * from t") + tdSql.checkRows(7) + + tdSql.query("select a from t") + tdSql.checkData(0, 0, 4) + tdSql.checkData(1, 0, 2) + tdSql.checkData(2, 0, 4) + tdSql.checkData(3, 0, 2) + tdSql.checkData(4, 0, 4) + tdSql.checkData(5, 0, 2) + tdSql.checkData(6, 0, 4) + + print("==============step5") + tdSql.execute("insert into t values (%d, 5)" % (startTs - 1)) + tdSql.execute("insert into t values (%d, 5)" % (startTs + 1)) + + tdSql.query("select * from t") + tdSql.checkRows(9) + + tdSql.query("select a from t") + tdSql.checkData(0, 0, 4) + tdSql.checkData(1, 0, 2) + tdSql.checkData(2, 0, 4) + tdSql.checkData(3, 0, 5) + tdSql.checkData(4, 0, 2) + tdSql.checkData(5, 0, 5) + tdSql.checkData(6, 0, 4) + tdSql.checkData(7, 0, 2) + tdSql.checkData(8, 0, 4) + + print("==============step6") + tdSql.execute("insert into t values (%d, 6)" % (startTs - 4)) + tdSql.execute("insert into t values (%d, 6)" % (startTs - 3)) + tdSql.execute("insert into t values (%d, 6)" % (startTs - 2)) + tdSql.execute("insert into t values (%d, 6)" % (startTs - 1)) + tdSql.execute("insert into t values (%d, 6)" % (startTs)) + tdSql.execute("insert into t values (%d, 6)" % (startTs + 1)) + tdSql.execute("insert into t values (%d, 6)" % (startTs + 2)) + tdSql.execute("insert into t values (%d, 6)" % (startTs + 3)) + tdSql.execute("insert into t values (%d, 6)" % (startTs + 4)) + + tdSql.query("select * from t") + tdSql.checkRows(9) + + tdSql.query("select a from t") + tdSql.checkData(0, 0, 6) + tdSql.checkData(1, 0, 6) + tdSql.checkData(2, 0, 6) + tdSql.checkData(3, 0, 6) + tdSql.checkData(4, 0, 6) + tdSql.checkData(5, 0, 6) + tdSql.checkData(6, 0, 6) + tdSql.checkData(7, 0, 6) + tdSql.checkData(8, 0, 6) + + # restart taosd to commit, and check + self.restartTaosd(); + + tdSql.query("select * from t") + tdSql.checkRows(9) + + tdSql.query("select a from t") + tdSql.checkData(0, 0, 6) + tdSql.checkData(1, 0, 6) + tdSql.checkData(2, 0, 6) + tdSql.checkData(3, 0, 6) + tdSql.checkData(4, 0, 6) + tdSql.checkData(5, 0, 6) + tdSql.checkData(6, 0, 6) + tdSql.checkData(7, 0, 6) + tdSql.checkData(8, 0, 6) + + tdSql.execute("create table subt (ts timestamp, a int, b float, c binary(16), d bool)") + + print("==============step7") + tdSql.execute("insert into subt (ts, a, c) values (%d, 1, 'c+0')" % (startTs)) + tdSql.execute("insert into subt (ts, a, c) values (%d, 1, 'c-3')" % (startTs - 3)) + tdSql.execute("insert into subt (ts, a, c) values (%d, 1, 'c+3')" % (startTs + 3)) + + tdSql.query("select * from subt") + tdSql.checkRows(3) + + tdSql.query("select a,b,c,d from subt") + tdSql.checkData(0, 0, 1) + tdSql.checkData(1, 0, 1) + tdSql.checkData(2, 0, 1) + tdSql.checkData(0, 1, None) + tdSql.checkData(1, 1, None) + tdSql.checkData(2, 1, None) + tdSql.checkData(0, 2, 'c-3') + tdSql.checkData(1, 2, 'c+0') + tdSql.checkData(2, 2, 'c+3') + tdSql.checkData(0, 3, None) + tdSql.checkData(1, 3, None) + tdSql.checkData(2, 3, None) + + print("==============step8") + tdSql.execute("insert into subt (ts, b, d) values (%d, 2.0, true)" % (startTs)) + tdSql.execute("insert into subt (ts, b, d) values (%d, 2.0, true)" % (startTs - 3)) + tdSql.execute("insert into subt (ts, b, d) values (%d, 2.0, false)" % (startTs + 3)) + + tdSql.query("select * from subt") + tdSql.checkRows(3) + + tdSql.query("select a,b,c,d from subt") + tdSql.checkData(0, 0, None) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(0, 1, 2.0) + tdSql.checkData(1, 1, 2.0) + tdSql.checkData(2, 1, 2.0) + tdSql.checkData(0, 2, None) + tdSql.checkData(1, 2, None) + tdSql.checkData(2, 2, None) + tdSql.checkData(0, 3, 1) + tdSql.checkData(1, 3, 1) + tdSql.checkData(2, 3, 0) + + # restart taosd to commit, and check + self.restartTaosd(); + + tdSql.query("select * from subt") + tdSql.checkRows(3) + + tdSql.query("select a,b,c,d from subt") + tdSql.checkData(0, 0, None) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(0, 1, 2.0) + tdSql.checkData(1, 1, 2.0) + tdSql.checkData(2, 1, 2.0) + tdSql.checkData(0, 2, None) + tdSql.checkData(1, 2, None) + tdSql.checkData(2, 2, None) + tdSql.checkData(0, 3, 1) + tdSql.checkData(1, 3, 1) + tdSql.checkData(2, 3, 0) + + + + tdSql.execute("create table ct (ts timestamp, a int, b float, c binary(128))") + + print("==============step9") + insertRows = 20000 + for i in range(0, insertRows): + tdSql.execute("insert into ct values (%d , %d, %d, 'aabbccddeeffgghhiijjkkllmmoonn112233445566778899xxyyzz')" % (startTs + i, i, i)) + + tdSql.query("select * from ct") + tdSql.checkRows(insertRows) + + for i in range(0, insertRows): + tdSql.execute("insert into ct values (%d , %d, %d, 'aabbccddeeffgghhiijjkkllmmoonn112233445566778899xxyyzz')" % (startTs + i, i+insertRows, i+insertRows)) + + tdSql.query("select * from ct") + tdSql.checkRows(insertRows) + + tdSql.query("select a,b from ct limit 3") + tdSql.checkData(0, 0, insertRows+0) + tdSql.checkData(1, 0, insertRows+1) + tdSql.checkData(2, 0, insertRows+2) + + tdSql.checkData(0, 1, insertRows+0) + tdSql.checkData(1, 1, insertRows+1) + tdSql.checkData(2, 1, insertRows+2) + + + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/pytest/update/append_commit_data-0.py b/tests/pytest/update/append_commit_data-0.py new file mode 100644 index 0000000000..b844a50a08 --- /dev/null +++ b/tests/pytest/update/append_commit_data-0.py @@ -0,0 +1,84 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + def run(self): + print("==========step1") + print("create table && insert data") + s = 'reset query cache' + tdSql.execute(s) + s = 'drop database if exists db' + tdSql.execute(s) + s = 'create database db' + tdSql.execute(s) + s = 'use db' + tdSql.execute(s) + ret = tdSql.execute('create table t1 (ts timestamp, a int)') + + insertRows = 200 + t0 = 1604298064000 + tdLog.info("insert %d rows" % (insertRows)) + for i in range(0, insertRows): + ret = tdSql.execute( + 'insert into t1 values (%d , 1)' % + (t0+i)) + print("==========step2") + print("restart to commit ") + tdDnodes.stop(1) + tdDnodes.start(1) + tdSql.query("select * from db.t1") + tdSql.checkRows(insertRows) + for k in range(0,100): + tdLog.info("insert %d rows" % (insertRows)) + for i in range (0,insertRows): + ret = tdSql.execute( + 'insert into db.t1 values(%d,1)' % + (t0+k*200+i) + ) + tdDnodes.stop(1) + tdDnodes.start(1) + tdSql.query("select * from db.t1") + tdSql.checkRows(insertRows+200*k) + print("==========step2") + print("insert into another table ") + s = 'use db' + tdSql.execute(s) + ret = tdSql.execute('create table t2 (ts timestamp, a int)') + insertRows = 20000 + for i in range(0, insertRows): + ret = tdSql.execute( + 'insert into t2 values (%d, 1)' % + (t0+i)) + tdDnodes.stop(1) + tdDnodes.start(1) + tdSql.query("select * from t2") + tdSql.checkRows(insertRows) + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/pytest/update/append_commit_data.py b/tests/pytest/update/append_commit_data.py new file mode 100644 index 0000000000..3169b748e0 --- /dev/null +++ b/tests/pytest/update/append_commit_data.py @@ -0,0 +1,84 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + def run(self): + print("==========step1") + print("create table && insert data") + s = 'reset query cache' + tdSql.execute(s) + s = 'drop database if exists db' + tdSql.execute(s) + s = 'create database db update 1' + tdSql.execute(s) + s = 'use db' + tdSql.execute(s) + ret = tdSql.execute('create table t1 (ts timestamp, a int)') + + insertRows = 200 + t0 = 1604298064000 + tdLog.info("insert %d rows" % (insertRows)) + for i in range(0, insertRows): + ret = tdSql.execute( + 'insert into t1 values (%d , 1)' % + (t0+i)) + print("==========step2") + print("restart to commit ") + tdDnodes.stop(1) + tdDnodes.start(1) + tdSql.query("select * from db.t1") + tdSql.checkRows(insertRows) + for k in range(0,100): + tdLog.info("insert %d rows" % (insertRows)) + for i in range (0,insertRows): + ret = tdSql.execute( + 'insert into db.t1 values(%d,1)' % + (t0+k*200+i) + ) + tdDnodes.stop(1) + tdDnodes.start(1) + tdSql.query("select * from db.t1") + tdSql.checkRows(insertRows+200*k) + print("==========step2") + print("insert into another table ") + s = 'use db' + tdSql.execute(s) + ret = tdSql.execute('create table t2 (ts timestamp, a int)') + insertRows = 20000 + for i in range(0, insertRows): + ret = tdSql.execute( + 'insert into t2 values (%d, 1)' % + (t0+i)) + tdDnodes.stop(1) + tdDnodes.start(1) + tdSql.query("select * from t2") + tdSql.checkRows(insertRows) + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/pytest/update/append_commit_last-0.py b/tests/pytest/update/append_commit_last-0.py new file mode 100644 index 0000000000..c884207f2b --- /dev/null +++ b/tests/pytest/update/append_commit_last-0.py @@ -0,0 +1,90 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import taos +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor()) + + self.ts = 1604298064000 + + def restartTaosd(self): + tdDnodes.stop(1) + tdDnodes.startWithoutSleep(1) + tdSql.execute("use db") + + def run(self): + tdSql.prepare() + + print("==============step1") + tdSql.execute("create table t1 (ts timestamp, a int)") + + for i in range(10): + tdSql.execute("insert into t1 values(%d, 1)" % (self.ts + i)) + self.restartTaosd() + tdSql.query("select * from t1") + tdSql.checkRows(i + 1) + tdSql.query("select sum(a) from t1") + tdSql.checkData(0, 0, i + 1) + + print("==============step2") + tdSql.execute("create table t2 (ts timestamp, a int)") + tdSql.execute("insert into t2 values(%d, 1)" % self.ts) + self.restartTaosd() + tdSql.query("select * from t2") + tdSql.checkRows(1) + tdSql.checkData(0, 1, 1) + + for i in range(1, 151): + tdSql.execute("insert into t2 values(%d, 1)" % (self.ts + i)) + + self.restartTaosd() + tdSql.query("select * from t2") + tdSql.checkRows(151) + tdSql.query("select sum(a) from t2") + tdSql.checkData(0, 0, 151) + + + print("==============step3") + tdSql.execute("create table t3 (ts timestamp, a int)") + tdSql.execute("insert into t3 values(%d, 1)" % self.ts) + self.restartTaosd() + tdSql.query("select * from t3") + tdSql.checkRows(1) + tdSql.checkData(0, 1, 1) + + for i in range(8): + for j in range(1, 11): + tdSql.execute("insert into t3 values(%d, 1)" % (self.ts + i * 10 + j)) + + self.restartTaosd() + tdSql.query("select * from t3") + tdSql.checkRows(81) + tdSql.query("select sum(a) from t3") + tdSql.checkData(0, 0, 81) + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/pytest/update/append_commit_last.py b/tests/pytest/update/append_commit_last.py new file mode 100644 index 0000000000..013983f940 --- /dev/null +++ b/tests/pytest/update/append_commit_last.py @@ -0,0 +1,85 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import taos +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor()) + + self.ts = 1604298064000 + + def restartTaosd(self): + tdDnodes.stop(1) + tdDnodes.startWithoutSleep(1) + tdSql.execute("use udb") + + def run(self): + tdSql.prepare() + + print("==============step1") + tdSql.execute("create database udb update 1") + tdSql.execute("use udb") + tdSql.execute("create table t1 (ts timestamp, a int)") + + for i in range(10): + tdSql.execute("insert into t1 values(%d, 1)" % (self.ts + i)) + self.restartTaosd() + tdSql.query("select * from t1") + tdSql.checkRows(i + 1) + + + print("==============step2") + tdSql.execute("create table t2 (ts timestamp, a int)") + tdSql.execute("insert into t2 values(%d, 1)" % self.ts) + self.restartTaosd() + tdSql.query("select * from t2") + tdSql.checkRows(1) + + for i in range(1, 151): + tdSql.execute("insert into t2 values(%d, 1)" % (self.ts + i)) + + self.restartTaosd() + tdSql.query("select * from t2") + tdSql.checkRows(151) + + + print("==============step3") + tdSql.execute("create table t3 (ts timestamp, a int)") + tdSql.execute("insert into t3 values(%d, 1)" % self.ts) + self.restartTaosd() + tdSql.query("select * from t3") + tdSql.checkRows(1) + + for i in range(8): + for j in range(1, 11): + tdSql.execute("insert into t3 values(%d, 1)" % (self.ts + i * 10 + j)) + + self.restartTaosd() + tdSql.query("select * from t3") + tdSql.checkRows(81) + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/pytest/update/merge_commit_data-0.py b/tests/pytest/update/merge_commit_data-0.py new file mode 100644 index 0000000000..14d435f7f2 --- /dev/null +++ b/tests/pytest/update/merge_commit_data-0.py @@ -0,0 +1,351 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + def run(self): + print("==========step1") + print("UPDATE THE WHOLE DATA BLOCK REPEATEDLY") + s = 'reset query cache' + tdSql.execute(s) + s = 'drop database if exists db' + tdSql.execute(s) + s = 'create database db days 30' + tdSql.execute(s) + s = 'use db' + tdSql.execute(s) + ret = tdSql.execute('create table t1 (ts timestamp, a int)') + + insertRows = 200 + t0 = 1603152000000 + tdLog.info("insert %d rows" % (insertRows)) + for i in range(0, insertRows): + ret = tdSql.execute( + 'insert into t1 values (%d , 1)' % + (t0 + i)) + tdDnodes.stop(1) + tdDnodes.start(1) + tdSql.query("select * from t1") + tdSql.checkRows(insertRows) + + for k in range(0,10): + for i in range (0,insertRows): + ret = tdSql.execute( + 'insert into t1 values(%d,1)' % + (t0+i) + ) + tdSql.query("select * from t1") + tdSql.checkRows(insertRows) + tdDnodes.stop(1) + tdDnodes.start(1) + tdSql.query("select * from t1") + tdSql.checkRows(insertRows) + print("==========step2") + print("PREPEND DATA ") + ret = tdSql.execute('create table t2 (ts timestamp, a int)') + insertRows = 200 + for i in range(0, insertRows): + ret = tdSql.execute( + 'insert into t2 values (%d , 1)' % + (t0+i)) + tdSql.query("select * from t2") + tdSql.checkRows(insertRows) + tdDnodes.stop(1) + tdDnodes.start(1) + tdSql.query("select * from t2") + tdSql.checkRows(insertRows) + for i in range(-100,0): + ret = tdSql.execute( + 'insert into t2 values (%d , 1)' % + (t0+i)) + tdSql.query("select * from t2") + tdSql.checkRows(insertRows+100) + tdDnodes.stop(1) + tdDnodes.start(1) + tdSql.query("select * from t2") + tdSql.checkRows(insertRows+100) + print("==========step3") + print("PREPEND MASSIVE DATA ") + ret = tdSql.execute('create table t3 (ts timestamp, a int)') + insertRows = 200 + for i in range(0, insertRows): + ret = tdSql.execute( + 'insert into t3 values (%d , 1)' % + (t0+i)) + tdSql.query("select * from t3") + tdSql.checkRows(insertRows) + tdDnodes.stop(1) + tdDnodes.start(1) + tdSql.query("select * from t3") + tdSql.checkRows(insertRows) + for i in range(-6000,0): + ret = tdSql.execute( + 'insert into t3 values (%d , 1)' % + (t0+i)) + tdSql.query("select * from t3") + tdSql.checkRows(insertRows+6000) + tdDnodes.stop(1) + tdDnodes.start(1) + tdSql.query("select * from t3") + tdSql.checkRows(insertRows+6000) + print("==========step4") + print("APPEND DATA") + ret = tdSql.execute('create table t4 (ts timestamp, a int)') + insertRows = 200 + for i in range(0, insertRows): + ret = tdSql.execute( + 'insert into t4 values (%d , 1)' % + (t0+i)) + tdSql.query("select * from t4") + tdSql.checkRows(insertRows) + tdDnodes.stop(1) + tdDnodes.start(1) + tdSql.query("select * from t4") + tdSql.checkRows(insertRows) + for i in range(0,100): + ret = tdSql.execute( + 'insert into t4 values (%d , 1)' % + (t0+200+i)) + tdSql.query("select * from t4") + tdSql.checkRows(insertRows+100) + tdDnodes.stop(1) + tdDnodes.start(1) + tdSql.query("select * from t4") + tdSql.checkRows(insertRows+100) + print("==========step5") + print("APPEND DATA") + ret = tdSql.execute('create table t5 (ts timestamp, a int)') + insertRows = 200 + for i in range(0, insertRows): + ret = tdSql.execute( + 'insert into t5 values (%d , 1)' % + (t0+i)) + tdSql.query("select * from t5") + tdSql.checkRows(insertRows) + tdDnodes.stop(1) + tdDnodes.start(1) + tdSql.query("select * from t5") + tdSql.checkRows(insertRows) + for i in range(0,6000): + ret = tdSql.execute( + 'insert into t5 values (%d , 1)' % + (t0+200+i)) + tdSql.query("select * from t5") + tdSql.checkRows(insertRows+6000) + tdDnodes.stop(1) + tdDnodes.start(1) + tdSql.query("select * from t5") + tdSql.checkRows(insertRows+6000) + print("==========step6") + print("UPDATE BLOCK IN TWO STEP") + ret = tdSql.execute('create table t6 (ts timestamp, a int)') + insertRows = 200 + for i in range(0, insertRows): + ret = tdSql.execute( + 'insert into t6 values (%d , 1)' % + (t0+i)) + tdSql.query("select * from t6") + tdSql.checkRows(insertRows) + tdDnodes.stop(1) + tdDnodes.start(1) + tdSql.query("select * from t6") + tdSql.checkRows(insertRows) + for i in range(0,100): + ret = tdSql.execute( + 'insert into t6 values (%d , 2)' % + (t0+i)) + tdSql.query("select * from t6") + tdSql.checkRows(insertRows) + tdSql.query("select sum(a) from t6") + tdSql.checkData(0,0,'200') + tdDnodes.stop(1) + tdDnodes.start(1) + tdSql.query("select * from t6") + tdSql.checkRows(insertRows) + tdSql.query("select sum(a) from t6") + tdSql.checkData(0,0,'200') + for i in range(0,200): + ret = tdSql.execute( + 'insert into t6 values (%d , 2)' % + (t0+i)) + tdSql.query("select * from t6") + tdSql.checkRows(insertRows) + tdSql.query("select sum(a) from t6") + tdSql.checkData(0,0,'200') + tdDnodes.stop(1) + tdDnodes.start(1) + tdSql.query("select * from t6") + tdSql.checkRows(insertRows) + tdSql.query("select sum(a) from t6") + tdSql.checkData(0,0,'200') + print("==========step7") + print("UPDATE LAST HALF AND INSERT LITTLE DATA") + ret = tdSql.execute('create table t7 (ts timestamp, a int)') + insertRows = 200 + for i in range(0, insertRows): + ret = tdSql.execute( + 'insert into t7 values (%d , 1)' % + (t0+i)) + tdSql.query("select * from t7") + tdSql.checkRows(insertRows) + tdDnodes.stop(1) + tdDnodes.start(1) + tdSql.query("select * from t7") + tdSql.checkRows(insertRows) + for i in range(100,300): + ret = tdSql.execute( + 'insert into t7 values (%d , 2)' % + (t0+i)) + tdSql.query("select * from t7") + tdSql.checkRows(300) + tdSql.query("select sum(a) from t7") + tdSql.checkData(0,0,'400') + tdDnodes.stop(1) + tdDnodes.start(1) + tdSql.query("select * from t7") + tdSql.checkRows(300) + tdSql.query("select sum(a) from t7") + tdSql.checkData(0,0,'400') + print("==========step8") + print("UPDATE LAST HALF AND INSERT MASSIVE DATA") + ret = tdSql.execute('create table t8 (ts timestamp, a int)') + insertRows = 200 + for i in range(0, insertRows): + ret = tdSql.execute( + 'insert into t8 values (%d , 1)' % + (t0+i)) + tdSql.query("select * from t8") + tdSql.checkRows(insertRows) + tdDnodes.stop(1) + tdDnodes.start(1) + tdSql.query("select * from t8") + tdSql.checkRows(insertRows) + for i in range(6000): + ret = tdSql.execute( + 'insert into t8 values (%d , 2)' % + (t0+i)) + tdSql.query("select * from t8") + tdSql.checkRows(6000) + tdSql.query("select sum(a) from t8") + tdSql.checkData(0,0,'11800') + tdDnodes.stop(1) + tdDnodes.start(1) + tdSql.query("select * from t8") + tdSql.checkRows(6000) + tdSql.query("select sum(a) from t8") + tdSql.checkData(0,0,'11800') + print("==========step9") + print("UPDATE FIRST HALF AND PREPEND LITTLE DATA") + ret = tdSql.execute('create table t9 (ts timestamp, a int)') + insertRows = 200 + for i in range(0, insertRows): + ret = tdSql.execute( + 'insert into t9 values (%d , 1)' % + (t0+i)) + tdSql.query("select * from t9") + tdSql.checkRows(insertRows) + tdDnodes.stop(1) + tdDnodes.start(1) + tdSql.query("select * from t9") + tdSql.checkRows(insertRows) + for i in range(-100,100): + ret = tdSql.execute( + 'insert into t9 values (%d , 2)' % + (t0+i)) + tdSql.query("select * from t9") + tdSql.checkRows(300) + tdSql.query("select sum(a) from t9") + tdSql.checkData(0,0,'400') + tdDnodes.stop(1) + tdDnodes.start(1) + tdSql.query("select * from t9") + tdSql.checkRows(300) + tdSql.query("select sum(a) from t9") + tdSql.checkData(0,0,'400') + print("==========step10") + print("UPDATE FIRST HALF AND PREPEND MASSIVE DATA") + ret = tdSql.execute('create table t10 (ts timestamp, a int)') + insertRows = 200 + for i in range(0, insertRows): + ret = tdSql.execute( + 'insert into t10 values (%d , 1)' % + (t0+i)) + tdSql.query("select * from t10") + tdSql.checkRows(insertRows) + tdDnodes.stop(1) + tdDnodes.start(1) + tdSql.query("select * from t10") + tdSql.checkRows(insertRows) + for i in range(-6000,100): + ret = tdSql.execute( + 'insert into t10 values (%d , 2)' % + (t0+i)) + tdSql.query("select * from t10") + tdSql.checkRows(6200) + tdSql.query("select sum(a) from t10") + tdSql.checkData(0,0,'12200') + tdDnodes.stop(1) + tdDnodes.start(1) + tdSql.query("select * from t10") + tdSql.checkRows(6200) + tdSql.query("select sum(a) from t10") + tdSql.checkData(0,0,'12200') + print("==========step11") + print("UPDATE FIRST HALF AND APPEND MASSIVE DATA") + ret = tdSql.execute('create table t11 (ts timestamp, a int)') + insertRows = 200 + for i in range(0, insertRows): + ret = tdSql.execute( + 'insert into t11 values (%d , 1)' % + (t0+i)) + tdSql.query("select * from t11") + tdSql.checkRows(insertRows) + tdDnodes.stop(1) + tdDnodes.start(1) + tdSql.query("select * from t11") + tdSql.checkRows(insertRows) + for i in range(100): + ret = tdSql.execute( + 'insert into t11 values (%d , 2)' % + (t0+i)) + for i in range(200,6000): + ret = tdSql.execute( + 'insert into t11 values (%d , 2)' % + (t0+i)) + tdSql.query("select * from t11") + tdSql.checkRows(6000) + tdSql.query("select sum(a) from t11") + tdSql.checkData(0,0,'11800') + tdDnodes.stop(1) + tdDnodes.start(1) + tdSql.query("select * from t11") + tdSql.checkRows(6000) + tdSql.query("select sum(a) from t11") + tdSql.checkData(0,0,'11800') + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/pytest/update/merge_commit_data.py b/tests/pytest/update/merge_commit_data.py new file mode 100644 index 0000000000..4fb6765361 --- /dev/null +++ b/tests/pytest/update/merge_commit_data.py @@ -0,0 +1,351 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + def run(self): + print("==========step1") + print("UPDATE THE WHOLE DATA BLOCK REPEATEDLY") + s = 'reset query cache' + tdSql.execute(s) + s = 'drop database if exists db' + tdSql.execute(s) + s = 'create database db update 1 days 30' + tdSql.execute(s) + s = 'use db' + tdSql.execute(s) + ret = tdSql.execute('create table t1 (ts timestamp, a int)') + + insertRows = 200 + t0 = 1603152000000 + tdLog.info("insert %d rows" % (insertRows)) + for i in range(0, insertRows): + ret = tdSql.execute( + 'insert into t1 values (%d , 1)' % + (t0 + i)) + tdDnodes.stop(1) + tdDnodes.start(1) + tdSql.query("select * from t1") + tdSql.checkRows(insertRows) + + for k in range(0,10): + for i in range (0,insertRows): + ret = tdSql.execute( + 'insert into t1 values(%d,1)' % + (t0+i) + ) + tdSql.query("select * from t1") + tdSql.checkRows(insertRows) + tdDnodes.stop(1) + tdDnodes.start(1) + tdSql.query("select * from t1") + tdSql.checkRows(insertRows) + print("==========step2") + print("PREPEND DATA ") + ret = tdSql.execute('create table t2 (ts timestamp, a int)') + insertRows = 200 + for i in range(0, insertRows): + ret = tdSql.execute( + 'insert into t2 values (%d , 1)' % + (t0+i)) + tdSql.query("select * from t2") + tdSql.checkRows(insertRows) + tdDnodes.stop(1) + tdDnodes.start(1) + tdSql.query("select * from t2") + tdSql.checkRows(insertRows) + for i in range(-100,0): + ret = tdSql.execute( + 'insert into t2 values (%d , 1)' % + (t0+i)) + tdSql.query("select * from t2") + tdSql.checkRows(insertRows+100) + tdDnodes.stop(1) + tdDnodes.start(1) + tdSql.query("select * from t2") + tdSql.checkRows(insertRows+100) + print("==========step3") + print("PREPEND MASSIVE DATA ") + ret = tdSql.execute('create table t3 (ts timestamp, a int)') + insertRows = 200 + for i in range(0, insertRows): + ret = tdSql.execute( + 'insert into t3 values (%d , 1)' % + (t0+i)) + tdSql.query("select * from t3") + tdSql.checkRows(insertRows) + tdDnodes.stop(1) + tdDnodes.start(1) + tdSql.query("select * from t3") + tdSql.checkRows(insertRows) + for i in range(-6000,0): + ret = tdSql.execute( + 'insert into t3 values (%d , 1)' % + (t0+i)) + tdSql.query("select * from t3") + tdSql.checkRows(insertRows+6000) + tdDnodes.stop(1) + tdDnodes.start(1) + tdSql.query("select * from t3") + tdSql.checkRows(insertRows+6000) + print("==========step4") + print("APPEND DATA") + ret = tdSql.execute('create table t4 (ts timestamp, a int)') + insertRows = 200 + for i in range(0, insertRows): + ret = tdSql.execute( + 'insert into t4 values (%d , 1)' % + (t0+i)) + tdSql.query("select * from t4") + tdSql.checkRows(insertRows) + tdDnodes.stop(1) + tdDnodes.start(1) + tdSql.query("select * from t4") + tdSql.checkRows(insertRows) + for i in range(0,100): + ret = tdSql.execute( + 'insert into t4 values (%d , 1)' % + (t0+200+i)) + tdSql.query("select * from t4") + tdSql.checkRows(insertRows+100) + tdDnodes.stop(1) + tdDnodes.start(1) + tdSql.query("select * from t4") + tdSql.checkRows(insertRows+100) + print("==========step5") + print("APPEND DATA") + ret = tdSql.execute('create table t5 (ts timestamp, a int)') + insertRows = 200 + for i in range(0, insertRows): + ret = tdSql.execute( + 'insert into t5 values (%d , 1)' % + (t0+i)) + tdSql.query("select * from t5") + tdSql.checkRows(insertRows) + tdDnodes.stop(1) + tdDnodes.start(1) + tdSql.query("select * from t5") + tdSql.checkRows(insertRows) + for i in range(0,6000): + ret = tdSql.execute( + 'insert into t5 values (%d , 1)' % + (t0+200+i)) + tdSql.query("select * from t5") + tdSql.checkRows(insertRows+6000) + tdDnodes.stop(1) + tdDnodes.start(1) + tdSql.query("select * from t5") + tdSql.checkRows(insertRows+6000) + print("==========step6") + print("UPDATE BLOCK IN TWO STEP") + ret = tdSql.execute('create table t6 (ts timestamp, a int)') + insertRows = 200 + for i in range(0, insertRows): + ret = tdSql.execute( + 'insert into t6 values (%d , 1)' % + (t0+i)) + tdSql.query("select * from t6") + tdSql.checkRows(insertRows) + tdDnodes.stop(1) + tdDnodes.start(1) + tdSql.query("select * from t6") + tdSql.checkRows(insertRows) + for i in range(0,100): + ret = tdSql.execute( + 'insert into t6 values (%d , 2)' % + (t0+i)) + tdSql.query("select * from t6") + tdSql.checkRows(insertRows) + tdSql.query("select sum(a) from t6") + tdSql.checkData(0,0,'300') + tdDnodes.stop(1) + tdDnodes.start(1) + tdSql.query("select * from t6") + tdSql.checkRows(insertRows) + tdSql.query("select sum(a) from t6") + tdSql.checkData(0,0,'300') + for i in range(0,200): + ret = tdSql.execute( + 'insert into t6 values (%d , 2)' % + (t0+i)) + tdSql.query("select * from t6") + tdSql.checkRows(insertRows) + tdSql.query("select sum(a) from t6") + tdSql.checkData(0,0,'400') + tdDnodes.stop(1) + tdDnodes.start(1) + tdSql.query("select * from t6") + tdSql.checkRows(insertRows) + tdSql.query("select sum(a) from t6") + tdSql.checkData(0,0,'400') + print("==========step7") + print("UPDATE LAST HALF AND INSERT LITTLE DATA") + ret = tdSql.execute('create table t7 (ts timestamp, a int)') + insertRows = 200 + for i in range(0, insertRows): + ret = tdSql.execute( + 'insert into t7 values (%d , 1)' % + (t0+i)) + tdSql.query("select * from t7") + tdSql.checkRows(insertRows) + tdDnodes.stop(1) + tdDnodes.start(1) + tdSql.query("select * from t7") + tdSql.checkRows(insertRows) + for i in range(100,300): + ret = tdSql.execute( + 'insert into t7 values (%d , 2)' % + (t0+i)) + tdSql.query("select * from t7") + tdSql.checkRows(300) + tdSql.query("select sum(a) from t7") + tdSql.checkData(0,0,'500') + tdDnodes.stop(1) + tdDnodes.start(1) + tdSql.query("select * from t7") + tdSql.checkRows(300) + tdSql.query("select sum(a) from t7") + tdSql.checkData(0,0,'500') + print("==========step8") + print("UPDATE LAST HALF AND INSERT MASSIVE DATA") + ret = tdSql.execute('create table t8 (ts timestamp, a int)') + insertRows = 200 + for i in range(0, insertRows): + ret = tdSql.execute( + 'insert into t8 values (%d , 1)' % + (t0+i)) + tdSql.query("select * from t8") + tdSql.checkRows(insertRows) + tdDnodes.stop(1) + tdDnodes.start(1) + tdSql.query("select * from t8") + tdSql.checkRows(insertRows) + for i in range(6000): + ret = tdSql.execute( + 'insert into t8 values (%d , 2)' % + (t0+i)) + tdSql.query("select * from t8") + tdSql.checkRows(6000) + tdSql.query("select sum(a) from t8") + tdSql.checkData(0,0,'12000') + tdDnodes.stop(1) + tdDnodes.start(1) + tdSql.query("select * from t8") + tdSql.checkRows(6000) + tdSql.query("select sum(a) from t8") + tdSql.checkData(0,0,'12000') + print("==========step9") + print("UPDATE FIRST HALF AND PREPEND LITTLE DATA") + ret = tdSql.execute('create table t9 (ts timestamp, a int)') + insertRows = 200 + for i in range(0, insertRows): + ret = tdSql.execute( + 'insert into t9 values (%d , 1)' % + (t0+i)) + tdSql.query("select * from t9") + tdSql.checkRows(insertRows) + tdDnodes.stop(1) + tdDnodes.start(1) + tdSql.query("select * from t9") + tdSql.checkRows(insertRows) + for i in range(-100,100): + ret = tdSql.execute( + 'insert into t9 values (%d , 2)' % + (t0+i)) + tdSql.query("select * from t9") + tdSql.checkRows(300) + tdSql.query("select sum(a) from t9") + tdSql.checkData(0,0,'500') + tdDnodes.stop(1) + tdDnodes.start(1) + tdSql.query("select * from t9") + tdSql.checkRows(300) + tdSql.query("select sum(a) from t9") + tdSql.checkData(0,0,'500') + print("==========step10") + print("UPDATE FIRST HALF AND PREPEND MASSIVE DATA") + ret = tdSql.execute('create table t10 (ts timestamp, a int)') + insertRows = 200 + for i in range(0, insertRows): + ret = tdSql.execute( + 'insert into t10 values (%d , 1)' % + (t0+i)) + tdSql.query("select * from t10") + tdSql.checkRows(insertRows) + tdDnodes.stop(1) + tdDnodes.start(1) + tdSql.query("select * from t10") + tdSql.checkRows(insertRows) + for i in range(-6000,100): + ret = tdSql.execute( + 'insert into t10 values (%d , 2)' % + (t0+i)) + tdSql.query("select * from t10") + tdSql.checkRows(6200) + tdSql.query("select sum(a) from t10") + tdSql.checkData(0,0,'12300') + tdDnodes.stop(1) + tdDnodes.start(1) + tdSql.query("select * from t10") + tdSql.checkRows(6200) + tdSql.query("select sum(a) from t10") + tdSql.checkData(0,0,'12300') + print("==========step11") + print("UPDATE FIRST HALF AND APPEND MASSIVE DATA") + ret = tdSql.execute('create table t11 (ts timestamp, a int)') + insertRows = 200 + for i in range(0, insertRows): + ret = tdSql.execute( + 'insert into t11 values (%d , 1)' % + (t0+i)) + tdSql.query("select * from t11") + tdSql.checkRows(insertRows) + tdDnodes.stop(1) + tdDnodes.start(1) + tdSql.query("select * from t11") + tdSql.checkRows(insertRows) + for i in range(100): + ret = tdSql.execute( + 'insert into t11 values (%d , 2)' % + (t0+i)) + for i in range(200,6000): + ret = tdSql.execute( + 'insert into t11 values (%d , 2)' % + (t0+i)) + tdSql.query("select * from t11") + tdSql.checkRows(6000) + tdSql.query("select sum(a) from t11") + tdSql.checkData(0,0,'11900') + tdDnodes.stop(1) + tdDnodes.start(1) + tdSql.query("select * from t11") + tdSql.checkRows(6000) + tdSql.query("select sum(a) from t11") + tdSql.checkData(0,0,'11900') + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/pytest/update/merge_commit_data2.py b/tests/pytest/update/merge_commit_data2.py new file mode 100644 index 0000000000..3f0fc718ad --- /dev/null +++ b/tests/pytest/update/merge_commit_data2.py @@ -0,0 +1,352 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * +import time + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + + def restart_taosd(self,db): + tdDnodes.stop(1) + tdDnodes.startWithoutSleep(1) + tdSql.execute("use %s;" % db) + + def date_to_timestamp_microseconds(self, date): + datetime_obj = datetime.strptime(date, "%Y-%m-%d %H:%M:%S.%f") + obj_stamp = int(time.mktime(datetime_obj.timetuple()) * 1000.0 + datetime_obj.microsecond / 1000.0) + return obj_stamp + + def timestamp_microseconds_to_date(self, timestamp): + d = datetime.datetime.fromtimestamp(timestamp/1000) + str1 = d.strftime("%Y-%m-%d %H:%M:%S.%f") + return str1 + + + + def run(self): + print("==========step1") + print("create table && insert data") + sql = 'reset query cache' + tdSql.execute(sql) + sql = 'drop database if exists db' + tdSql.execute(sql) + sql = 'create database db update 1 days 30;' + tdSql.execute(sql) + sql = 'use db;' + tdSql.execute(sql) + tdSql.execute('create table t1 (ts timestamp, a int)') + + + print("==================================1 start") + insert_rows = 200 + t0 = 1603152000000 + tdLog.info("insert %d rows" % insert_rows) + for i in range(insert_rows): + tdSql.execute('insert into t1 values (%d , 1)' %(t0+i)) + print("==========step2") + print("restart to commit ") + self.restart_taosd('db') + + print('check query result after restart') + tdSql.query('select * from db.t1;') + for i in range(insert_rows): + tdSql.checkData(i, 1, 1) + + print("==========step3") + print('insert data') + for i in range(insert_rows): + tdSql.execute('insert into t1 values (%d , 1)' %(t0+i+5000)) + print('check query result before restart') + tdSql.query('select * from db.t1;') + for i in range(insert_rows, insert_rows*2): + tdSql.checkData(i, 1, 1) + + self.restart_taosd('db') + print('check query result after restart') + tdSql.query('select * from db.t1;') + for i in range(insert_rows, insert_rows*2): + tdSql.checkData(i, 1, 1) + + print("==========step4") + print('insert data') + for i in range(insert_rows): + tdSql.execute('insert into t1 values (%d , 2)' %(t0+i)) + for i in range(insert_rows): + tdSql.execute('insert into t1 values (%d , 1)' %(t0+i+5000)) + + print('check query result before restart') + tdSql.query('select * from db.t1;') + print(tdSql.queryResult) + for i in range(insert_rows): + tdSql.checkData(i, 1, 2) + for i in range(insert_rows, insert_rows*2): + tdSql.checkData(i, 1, 1) + + print('check query result after restart') + self.restart_taosd('db') + tdSql.query('select * from db.t1;') + # print(tdSql.queryResult) + for i in range(insert_rows): + tdSql.checkData(i, 1, 2) + for i in range(insert_rows, insert_rows*2): + tdSql.checkData(i, 1, 1) + + print("==================================2 start") + print("==========step1") + print("create table && insert data") + tdSql.execute('create table t2 (ts timestamp, a int)') + insert_rows = 200 + t0 = 1603152000000 + tdLog.info("insert %d rows" % insert_rows) + for i in range(insert_rows): + tdSql.execute('insert into t2 values (%d , 1)' %(t0+i)) + print('restart to commit') + self.restart_taosd('db') + for i in range(insert_rows): + tdSql.execute('insert into t2 values (%d , 1)' %(t0+i+5000)) + print('restart to commit') + self.restart_taosd('db') + + for k in range(10): + for i in range(10): + tdSql.execute('insert into t2 values (%d , 1)' %(t0 + 200 + k * 10 + i)) + print('insert into t2 values (%d , 1)' %(t0 + 200 + k * 10 + i)) + + + print("==========step2") + print('check query result before restart') + tdSql.query('select * from db.t2;') + for i in range(insert_rows*2+100): + tdSql.checkData(i, 1, 1) + # print(tdSql.queryResult) + print('restart to commit') + self.restart_taosd('db') + print('check query result after restart') + tdSql.query('select * from db.t2;') + for i in range(insert_rows*2+100): + tdSql.checkData(i, 1, 1) + + + print("==================================3 start") + print("==========step1") + print("create table && insert data") + tdSql.execute('create table t3 (ts timestamp, a int)') + insert_rows = 200 + t0 = 1603152000000 + tdLog.info("insert %d rows" % insert_rows) + for i in range(insert_rows): + tdSql.execute('insert into t3 values (%d , 1)' %(t0+i)) + print('restart to commit') + self.restart_taosd('db') + + for i in range(insert_rows): + tdSql.execute('insert into t3 values (%d , 1)' %(t0+i+5000)) + print('restart to commit') + self.restart_taosd('db') + + for i in range(5200): + tdSql.execute('insert into t3 values (%d , 2)' %(t0+i)) + + print("==========step2") + print('check query result before restart') + tdSql.query('select * from db.t3;') + for i in range(5200): + tdSql.checkData(i, 1, 2) + # print(tdSql.queryResult) + print('restart to commit') + self.restart_taosd('db') + print('check query result after restart') + tdSql.query('select * from db.t3;') + for i in range(5200): + tdSql.checkData(i, 1, 2) + + print("==================================4 start") + print("==========step1") + print("create table && insert data") + tdSql.execute('create table t4 (ts timestamp, a int)') + insert_rows = 200 + t0 = 1603152000000 + tdLog.info("insert %d rows" % insert_rows) + for i in range(insert_rows): + tdSql.execute('insert into t4 values (%d , 1)' %(t0+i)) + print('restart to commit') + self.restart_taosd('db') + + for i in range(insert_rows): + tdSql.execute('insert into t4 values (%d , 1)' %(t0+i+5000)) + print('restart to commit') + self.restart_taosd('db') + + for i in range(100): + tdSql.execute('insert into t4 values (%d , 2)' %(t0+i)) + + for i in range(200, 5000): + tdSql.execute('insert into t4 values (%d , 2)' %(t0+i)) + + for i in range(100): + tdSql.execute('insert into t4 values (%d , 1)' %(t0+i+5000)) + + print('check query result before restart') + tdSql.query('select * from db.t4;') + for i in range(100): + tdSql.checkData(i, 1, 2) + for i in range(100, 200): + tdSql.checkData(i, 1, 1) + for i in range(200, 5000): + tdSql.checkData(i, 1, 2) + for i in range(5000, 5200): + tdSql.checkData(i, 1, 1) + + print('check query result after restart') + self.restart_taosd('db') + tdSql.query('select * from db.t4;') + for i in range(100): + tdSql.checkData(i, 1, 2) + for i in range(100, 200): + tdSql.checkData(i, 1, 1) + for i in range(200, 5000): + tdSql.checkData(i, 1, 2) + for i in range(5000, 5200): + tdSql.checkData(i, 1, 1) + + print("==================================5 start") + print("==========step1") + print("create table && insert data") + tdSql.execute('create table t5 (ts timestamp, a int)') + insert_rows = 200 + t0 = 1603152000000 + tdLog.info("insert %d rows" % insert_rows) + for i in range(insert_rows): + tdSql.execute('insert into t5 values (%d , 1)' %(t0+i)) + print('restart to commit') + self.restart_taosd('db') + + for i in range(insert_rows): + tdSql.execute('insert into t5 values (%d , 1)' %(t0+i+5000)) + print('restart to commit') + self.restart_taosd('db') + + for i in range(100, 200): + tdSql.execute('insert into t5 values (%d , 2)' %(t0+i)) + + for i in range(200, 5000): + tdSql.execute('insert into t5 values (%d , 2)' %(t0+i)) + + for i in range(100, 200): + tdSql.execute('insert into t5 values (%d , 2)' %(t0+i+5000)) + + print('check query result before restart') + tdSql.query('select * from db.t5;') + for i in range(100): + tdSql.checkData(i, 1, 1) + for i in range(100, 5000): + tdSql.checkData(i, 1, 2) + for i in range(5000, 5100): + tdSql.checkData(i, 1, 1) + for i in range(5100, 5200): + tdSql.checkData(i, 1, 2) + + print('check query result after restart') + self.restart_taosd('db') + tdSql.query('select * from db.t5;') + for i in range(100): + tdSql.checkData(i, 1, 1) + for i in range(100, 5000): + tdSql.checkData(i, 1, 2) + for i in range(5000, 5100): + tdSql.checkData(i, 1, 1) + for i in range(5100, 5200): + tdSql.checkData(i, 1, 2) + + print("==================================6 start") + print("==========step1") + print("create table && insert data") + tdSql.execute('create table t6 (ts timestamp, a int)') + insert_rows = 200 + t0 = 1603152000000 + tdLog.info("insert %d rows" % insert_rows) + for i in range(insert_rows): + tdSql.execute('insert into t6 values (%d , 1)' %(t0+i)) + print('restart to commit') + self.restart_taosd('db') + + for i in range(insert_rows): + tdSql.execute('insert into t6 values (%d , 1)' %(t0+i+5000)) + print('restart to commit') + self.restart_taosd('db') + + for i in range(-1000, 10000): + tdSql.execute('insert into t6 values (%d , 2)' %(t0+i)) + + print('check query result before restart') + tdSql.query('select * from db.t6;') + tdSql.checkRows(11000) + for i in range(11000): + tdSql.checkData(i, 1, 2) + + print('check query result after restart') + self.restart_taosd('db') + tdSql.query('select * from db.t6;') + tdSql.checkRows(11000) + for i in range(11000): + tdSql.checkData(i, 1, 2) + + + print("==================================7 start") + print("==========step1") + print("create table && insert data") + tdSql.execute('create table t7 (ts timestamp, a int)') + insert_rows = 200 + t0 = 1603152000000 + tdLog.info("insert %d rows" % insert_rows) + for i in range(insert_rows): + tdSql.execute('insert into t7 values (%d , 1)' %(t0+i)) + + for i in range(insert_rows): + tdSql.execute('insert into t7 values (%d , 1)' %(t0+i+5000)) + print('restart to commit') + self.restart_taosd('db') + + for i in range(-1000, 10000): + tdSql.execute('insert into t7 values (%d , 2)' %(t0+i)) + + print('check query result before restart') + tdSql.query('select * from db.t7;') + tdSql.checkRows(11000) + for i in range(11000): + tdSql.checkData(i, 1, 2) + + print('check query result after restart') + self.restart_taosd('db') + tdSql.query('select * from db.t7;') + tdSql.checkRows(11000) + for i in range(11000): + tdSql.checkData(i, 1, 2) + + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/pytest/update/merge_commit_data2_update0.py b/tests/pytest/update/merge_commit_data2_update0.py new file mode 100644 index 0000000000..def50e0466 --- /dev/null +++ b/tests/pytest/update/merge_commit_data2_update0.py @@ -0,0 +1,384 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * +import time + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + + def restart_taosd(self,db): + tdDnodes.stop(1) + tdDnodes.startWithoutSleep(1) + tdSql.execute("use %s;" % db) + + def date_to_timestamp_microseconds(self, date): + datetime_obj = datetime.strptime(date, "%Y-%m-%d %H:%M:%S.%f") + obj_stamp = int(time.mktime(datetime_obj.timetuple()) * 1000.0 + datetime_obj.microsecond / 1000.0) + return obj_stamp + + def timestamp_microseconds_to_date(self, timestamp): + d = datetime.datetime.fromtimestamp(timestamp/1000) + str1 = d.strftime("%Y-%m-%d %H:%M:%S.%f") + return str1 + + + + def run(self): + print("==========step1") + print("create table && insert data") + sql = 'reset query cache' + tdSql.execute(sql) + sql = 'drop database if exists db' + tdSql.execute(sql) + sql = 'create database db update 0 days 30;' + tdSql.execute(sql) + sql = 'use db;' + tdSql.execute(sql) + tdSql.execute('create table t1 (ts timestamp, a int)') + + + print("==================================1 start") + insert_rows = 200 + t0 = 1603152000000 + tdLog.info("insert %d rows" % insert_rows) + for i in range(insert_rows): + tdSql.execute('insert into t1 values (%d , 1)' %(t0+i)) + print("==========step2") + print("restart to commit ") + self.restart_taosd('db') + + print('check query result after restart') + tdSql.query('select * from db.t1;') + for i in range(insert_rows): + tdSql.checkData(i, 1, 1) + + print("==========step3") + print('insert data') + for i in range(insert_rows): + tdSql.execute('insert into t1 values (%d , 1)' %(t0+i+5000)) + print('check query result before restart') + tdSql.query('select * from db.t1;') + for i in range(insert_rows, insert_rows*2): + tdSql.checkData(i, 1, 1) + + self.restart_taosd('db') + print('check query result after restart') + tdSql.query('select * from db.t1;') + for i in range(insert_rows, insert_rows*2): + tdSql.checkData(i, 1, 1) + + print("==========step4") + print('insert data') + for i in range(insert_rows): + tdSql.execute('insert into t1 values (%d , 2)' %(t0+i)) + for i in range(insert_rows): + tdSql.execute('insert into t1 values (%d , 1)' %(t0+i+5000)) + + print('check query result before restart') + tdSql.query('select * from db.t1;') + print(tdSql.queryResult) + for i in range(insert_rows): + tdSql.checkData(i, 1, 1) + for i in range(insert_rows, insert_rows*2): + tdSql.checkData(i, 1, 1) + + print('check query result after restart') + self.restart_taosd('db') + tdSql.query('select * from db.t1;') + # print(tdSql.queryResult) + for i in range(insert_rows): + tdSql.checkData(i, 1, 1) + for i in range(insert_rows, insert_rows*2): + tdSql.checkData(i, 1, 1) + + print("==================================2 start") + print("==========step1") + print("create table && insert data") + tdSql.execute('create table t2 (ts timestamp, a int)') + insert_rows = 200 + t0 = 1603152000000 + tdLog.info("insert %d rows" % insert_rows) + for i in range(insert_rows): + tdSql.execute('insert into t2 values (%d , 1)' %(t0+i)) + print('restart to commit') + self.restart_taosd('db') + for i in range(insert_rows): + tdSql.execute('insert into t2 values (%d , 1)' %(t0+i+5000)) + print('restart to commit') + self.restart_taosd('db') + + for k in range(10): + for i in range(10): + tdSql.execute('insert into t2 values (%d , 1)' %(t0 + 200 + k * 10 + i)) + # print('insert into t2 values (%d , 1)' %(t0 + 200 + k * 10 + i)) + + + print("==========step2") + print('check query result before restart') + tdSql.query('select * from db.t2;') + for i in range(insert_rows*2+100): + tdSql.checkData(i, 1, 1) + # print(tdSql.queryResult) + print('restart to commit') + self.restart_taosd('db') + print('check query result after restart') + tdSql.query('select * from db.t2;') + for i in range(insert_rows*2+100): + tdSql.checkData(i, 1, 1) + + + print("==================================3 start") + print("==========step1") + print("create table && insert data") + tdSql.execute('create table t3 (ts timestamp, a int)') + insert_rows = 200 + t0 = 1603152000000 + tdLog.info("insert %d rows" % insert_rows) + for i in range(insert_rows): + tdSql.execute('insert into t3 values (%d , 1)' %(t0+i)) + print('restart to commit') + self.restart_taosd('db') + + for i in range(insert_rows): + tdSql.execute('insert into t3 values (%d , 1)' %(t0+i+5000)) + print('restart to commit') + self.restart_taosd('db') + + for i in range(5200): + tdSql.execute('insert into t3 values (%d , 2)' %(t0+i)) + + print("==========step2") + print('check query result before restart') + tdSql.query('select * from db.t3;') + for i in range(200): + tdSql.checkData(i, 1, 1) + for i in range(200, 5000): + tdSql.checkData(i, 1, 2) + for i in range(5000, 5200): + tdSql.checkData(i, 1, 1) + # print(tdSql.queryResult) + print('restart to commit') + self.restart_taosd('db') + print('check query result after restart') + tdSql.query('select * from db.t3;') + for i in range(200): + tdSql.checkData(i, 1, 1) + for i in range(200, 5000): + tdSql.checkData(i, 1, 2) + for i in range(5000, 5200): + tdSql.checkData(i, 1, 1) + + print("==================================4 start") + print("==========step1") + print("create table && insert data") + tdSql.execute('create table t4 (ts timestamp, a int)') + insert_rows = 200 + t0 = 1603152000000 + tdLog.info("insert %d rows" % insert_rows) + for i in range(insert_rows): + tdSql.execute('insert into t4 values (%d , 1)' %(t0+i)) + print('restart to commit') + self.restart_taosd('db') + + for i in range(insert_rows): + tdSql.execute('insert into t4 values (%d , 1)' %(t0+i+5000)) + print('restart to commit') + self.restart_taosd('db') + + for i in range(100): + tdSql.execute('insert into t4 values (%d , 2)' %(t0+i)) + + for i in range(200, 5000): + tdSql.execute('insert into t4 values (%d , 2)' %(t0+i)) + + for i in range(100): + tdSql.execute('insert into t4 values (%d , 1)' %(t0+i+5000)) + + print('check query result before restart') + tdSql.query('select * from db.t4;') + for i in range(200): + tdSql.checkData(i, 1, 1) + for i in range(200, 5000): + tdSql.checkData(i, 1, 2) + for i in range(5000, 5200): + tdSql.checkData(i, 1, 1) + + print('check query result after restart') + self.restart_taosd('db') + tdSql.query('select * from db.t4;') + for i in range(200): + tdSql.checkData(i, 1, 1) + for i in range(200, 5000): + tdSql.checkData(i, 1, 2) + for i in range(5000, 5200): + tdSql.checkData(i, 1, 1) + # + print("==================================5 start") + print("==========step1") + print("create table && insert data") + tdSql.execute('create table t5 (ts timestamp, a int)') + insert_rows = 200 + t0 = 1603152000000 + tdLog.info("insert %d rows" % insert_rows) + for i in range(insert_rows): + tdSql.execute('insert into t5 values (%d , 1)' %(t0+i)) + print('restart to commit') + self.restart_taosd('db') + + for i in range(insert_rows): + tdSql.execute('insert into t5 values (%d , 1)' %(t0+i+5000)) + print('restart to commit') + self.restart_taosd('db') + + for i in range(100, 200): + tdSql.execute('insert into t5 values (%d , 2)' %(t0+i)) + + for i in range(200, 5000): + tdSql.execute('insert into t5 values (%d , 2)' %(t0+i)) + + for i in range(100, 200): + tdSql.execute('insert into t5 values (%d , 2)' %(t0+i+5000)) + + print('check query result before restart') + tdSql.query('select * from db.t5;') + for i in range(200): + tdSql.checkData(i, 1, 1) + for i in range(200, 5000): + tdSql.checkData(i, 1, 2) + for i in range(5000, 5200): + tdSql.checkData(i, 1, 1) + + print('check query result after restart') + self.restart_taosd('db') + tdSql.query('select * from db.t5;') + for i in range(200): + tdSql.checkData(i, 1, 1) + for i in range(200, 5000): + tdSql.checkData(i, 1, 2) + for i in range(5000, 5200): + tdSql.checkData(i, 1, 1) + + print("==================================6 start") + print("==========step1") + print("create table && insert data") + tdSql.execute('create table t6 (ts timestamp, a int)') + insert_rows = 200 + t0 = 1603152000000 + tdLog.info("insert %d rows" % insert_rows) + for i in range(insert_rows): + tdSql.execute('insert into t6 values (%d , 1)' %(t0+i)) + print('restart to commit') + self.restart_taosd('db') + + for i in range(insert_rows): + tdSql.execute('insert into t6 values (%d , 1)' %(t0+i+5000)) + print('restart to commit') + self.restart_taosd('db') + + for i in range(-1000, 10000): + tdSql.execute('insert into t6 values (%d , 2)' %(t0+i)) + + print('check query result before restart') + tdSql.query('select * from db.t6;') + tdSql.checkRows(11000) + for i in range(1000): + tdSql.checkData(i, 1, 2) + for i in range(1000,1200): + tdSql.checkData(i, 1, 1) + for i in range(1200,6000): + tdSql.checkData(i, 1, 2) + for i in range(6000,6200): + tdSql.checkData(i, 1, 1) + for i in range(6200, 11000): + tdSql.checkData(i, 1, 2) + + print('check query result after restart') + self.restart_taosd('db') + tdSql.query('select * from db.t6;') + tdSql.checkRows(11000) + for i in range(1000): + tdSql.checkData(i, 1, 2) + for i in range(1000,1200): + tdSql.checkData(i, 1, 1) + for i in range(1200,6000): + tdSql.checkData(i, 1, 2) + for i in range(6000,6200): + tdSql.checkData(i, 1, 1) + for i in range(6200, 11000): + tdSql.checkData(i, 1, 2) + + + print("==================================7 start") + print("==========step1") + print("create table && insert data") + tdSql.execute('create table t7 (ts timestamp, a int)') + insert_rows = 200 + t0 = 1603152000000 + tdLog.info("insert %d rows" % insert_rows) + for i in range(insert_rows): + tdSql.execute('insert into t7 values (%d , 1)' %(t0+i)) + + for i in range(insert_rows): + tdSql.execute('insert into t7 values (%d , 1)' %(t0+i+5000)) + print('restart to commit') + self.restart_taosd('db') + + for i in range(-1000, 10000): + tdSql.execute('insert into t7 values (%d , 2)' %(t0+i)) + + print('check query result before restart') + tdSql.query('select * from db.t7;') + tdSql.checkRows(11000) + for i in range(1000): + tdSql.checkData(i, 1, 2) + for i in range(1000,1200): + tdSql.checkData(i, 1, 1) + for i in range(1200,6000): + tdSql.checkData(i, 1, 2) + for i in range(6000,6200): + tdSql.checkData(i, 1, 1) + for i in range(6200, 11000): + tdSql.checkData(i, 1, 2) + + print('check query result after restart') + self.restart_taosd('db') + tdSql.query('select * from db.t7;') + tdSql.checkRows(11000) + for i in range(1000): + tdSql.checkData(i, 1, 2) + for i in range(1000,1200): + tdSql.checkData(i, 1, 1) + for i in range(1200,6000): + tdSql.checkData(i, 1, 2) + for i in range(6000,6200): + tdSql.checkData(i, 1, 1) + for i in range(6200, 11000): + tdSql.checkData(i, 1, 2) + + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/pytest/update/merge_commit_last-0.py b/tests/pytest/update/merge_commit_last-0.py new file mode 100644 index 0000000000..8a247f3809 --- /dev/null +++ b/tests/pytest/update/merge_commit_last-0.py @@ -0,0 +1,309 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import taos +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor()) + + self.ts = 1603152000000 + + def restartTaosd(self): + tdDnodes.stop(1) + tdDnodes.startWithoutSleep(1) + tdSql.execute("use db") + + def run(self): + tdSql.prepare() + + print("==============step 1: UPDATE THE LAST RECORD REPEATEDLY") + tdSql.execute("create table t1 (ts timestamp, a int)") + + for i in range(5): + tdSql.execute("insert into t1 values(%d, %d)" % (self.ts, i)) + self.restartTaosd() + tdSql.query("select * from t1") + tdSql.checkRows(1) + tdSql.checkData(0, 1, 0) + + print("==============step 2: UPDATE THE WHOLE LAST BLOCK") + tdSql.execute("create table t2 (ts timestamp, a int)") + + for i in range(50): + tdSql.execute("insert into t2 values(%d, 1)" % (self.ts + i)) + + self.restartTaosd() + tdSql.query("select * from t2") + tdSql.checkRows(50) + tdSql.query("select sum(a) from t2") + tdSql.checkData(0, 0, 50) + + for i in range(50): + tdSql.execute("insert into t2 values(%d, 2)" % (self.ts + i)) + tdSql.query("select * from t2") + tdSql.checkRows(50) + tdSql.query("select sum(a) from t2") + tdSql.checkData(0, 0, 50) + + self.restartTaosd() + tdSql.query("select * from t2") + tdSql.checkRows(50) + tdSql.query("select sum(a) from t2") + tdSql.checkData(0, 0, 50) + + print("==============step 3: UPDATE PART OF THE LAST BLOCK") + tdSql.execute("create table t3 (ts timestamp, a int)") + + for i in range(50): + tdSql.execute("insert into t3 values(%d, 1)" % (self.ts + i)) + self.restartTaosd() + tdSql.query("select * from t3") + tdSql.checkRows(50) + tdSql.query("select sum(a) from t3") + tdSql.checkData(0, 0, 50) + + for i in range(25): + tdSql.execute("insert into t3 values(%d, 2)" % (self.ts + i)) + + tdSql.query("select * from t3") + tdSql.checkRows(50) + tdSql.query("select sum(a) from t3") + tdSql.checkData(0, 0, 50) + + self.restartTaosd() + tdSql.query("select * from t3") + tdSql.checkRows(50) + tdSql.query("select sum(a) from t3") + tdSql.checkData(0, 0, 50) + + print("==============step 4: UPDATE AND INSERT APPEND AT END OF DATA") + tdSql.execute("create table t4 (ts timestamp, a int)") + + for i in range(50): + tdSql.execute("insert into t4 values(%d, 1)" % (self.ts + i)) + + self.restartTaosd() + tdSql.query("select * from t4") + tdSql.checkRows(50) + tdSql.query("select sum(a) from t4") + tdSql.checkData(0, 0, 50) + + for i in range(25): + tdSql.execute("insert into t4 values(%d, 2)" % (self.ts + i)) + + for i in range(50, 60): + tdSql.execute("insert into t4 values(%d, 2)" % (self.ts + i)) + + tdSql.query("select * from t4") + tdSql.checkRows(60) + tdSql.query("select sum(a) from t4") + tdSql.checkData(0, 0, 70) + + self.restartTaosd() + tdSql.query("select * from t4") + tdSql.checkRows(60) + tdSql.query("select sum(a) from t4") + tdSql.checkData(0, 0, 70) + + print("==============step 5: UPDATE AND INSERT PREPEND SOME DATA") + tdSql.execute("create table t5 (ts timestamp, a int)") + + for i in range(50): + tdSql.execute("insert into t5 values(%d, 1)" % (self.ts + i)) + + self.restartTaosd() + tdSql.query("select * from t5") + tdSql.checkRows(50) + tdSql.query("select sum(a) from t5") + tdSql.checkData(0, 0, 50) + + for i in range(-10, 0): + tdSql.execute("insert into t5 values(%d, 2)" % (self.ts + i)) + + for i in range(25): + tdSql.execute("insert into t5 values(%d, 2)" % (self.ts + i)) + + tdSql.query("select * from t5") + tdSql.checkRows(60) + tdSql.query("select sum(a) from t5") + tdSql.checkData(0, 0, 70) + + self.restartTaosd() + tdSql.query("select * from t5") + tdSql.checkRows(60) + tdSql.query("select sum(a) from t5") + tdSql.checkData(0, 0, 70) + + for i in range(-10, 0): + tdSql.execute("insert into t5 values(%d, 3)" % (self.ts + i)) + + for i in range(25, 50): + tdSql.execute("insert into t5 values(%d, 3)" % (self.ts + i)) + + tdSql.query("select * from t5") + tdSql.checkRows(60) + tdSql.query("select sum(a) from t5") + tdSql.checkData(0, 0, 70) + + self.restartTaosd() + tdSql.query("select * from t5") + tdSql.checkRows(60) + tdSql.query("select sum(a) from t5") + tdSql.checkData(0, 0, 70) + + + print("==============step 6: INSERT AHEAD A LOT OF DATA") + tdSql.execute("create table t6 (ts timestamp, a int)") + + for i in range(50): + tdSql.execute("insert into t6 values(%d, 1)" % (self.ts + i)) + + self.restartTaosd() + tdSql.query("select * from t6") + tdSql.checkRows(50) + tdSql.query("select sum(a) from t6") + tdSql.checkData(0, 0, 50) + + for i in range(-1000, 0): + tdSql.execute("insert into t6 values(%d, 2)" % (self.ts + i)) + + tdSql.query("select * from t6") + tdSql.checkRows(1050) + tdSql.query("select sum(a) from t6") + tdSql.checkData(0, 0, 2050) + + self.restartTaosd() + tdSql.query("select * from t6") + tdSql.checkRows(1050) + tdSql.query("select sum(a) from t6") + tdSql.checkData(0, 0, 2050) + + print("==============step 7: INSERT AHEAD A LOT AND UPDATE") + tdSql.execute("create table t7 (ts timestamp, a int)") + + for i in range(50): + tdSql.execute("insert into t7 values(%d, 1)" % (self.ts + i)) + + self.restartTaosd() + tdSql.query("select * from t7") + tdSql.checkRows(50) + tdSql.query("select sum(a) from t7") + tdSql.checkData(0, 0, 50) + + for i in range(-1000, 25): + tdSql.execute("insert into t7 values(%d, 2)" % (self.ts + i)) + + tdSql.query("select * from t7") + tdSql.checkRows(1050) + tdSql.query("select sum(a) from t7") + tdSql.checkData(0, 0, 2050) + + self.restartTaosd() + tdSql.query("select * from t7") + tdSql.checkRows(1050) + tdSql.query("select sum(a) from t7") + tdSql.checkData(0, 0, 2050) + + print("==============step 8: INSERT AFTER A LOT AND UPDATE") + tdSql.execute("create table t8 (ts timestamp, a int)") + + for i in range(50): + tdSql.execute("insert into t8 values(%d, 1)" % (self.ts + i)) + + self.restartTaosd() + tdSql.query("select * from t8") + tdSql.checkRows(50) + tdSql.query("select sum(a) from t8") + tdSql.checkData(0, 0, 50) + + for i in range(25, 6000): + tdSql.execute("insert into t8 values(%d, 2)" % (self.ts + i)) + + tdSql.query("select * from t8") + tdSql.checkRows(6000) + tdSql.query("select sum(a) from t8") + tdSql.checkData(0, 0, 11950) + + self.restartTaosd() + tdSql.query("select * from t8") + tdSql.checkRows(6000) + tdSql.query("select sum(a) from t8") + tdSql.checkData(0, 0, 11950) + + print("==============step 9: UPDATE ONLY MIDDLE") + tdSql.execute("create table t9 (ts timestamp, a int)") + + for i in range(50): + tdSql.execute("insert into t9 values(%d, 1)" % (self.ts + i)) + + self.restartTaosd() + tdSql.query("select * from t9") + tdSql.checkRows(50) + tdSql.query("select sum(a) from t9") + tdSql.checkData(0, 0, 50) + + for i in range(20, 30): + tdSql.execute("insert into t9 values(%d, 2)" % (self.ts + i)) + + tdSql.query("select * from t9") + tdSql.checkRows(50) + tdSql.query("select sum(a) from t9") + tdSql.checkData(0, 0, 50) + + self.restartTaosd() + tdSql.query("select * from t9") + tdSql.checkRows(50) + tdSql.query("select sum(a) from t9") + tdSql.checkData(0, 0, 50) + + print("==============step 10: A LOT OF DATA COVER THE WHOLE BLOCK") + tdSql.execute("create table t10 (ts timestamp, a int)") + + for i in range(50): + tdSql.execute("insert into t10 values(%d, 1)" % (self.ts + i)) + + self.restartTaosd() + tdSql.query("select * from t10") + tdSql.checkRows(50) + tdSql.query("select sum(a) from t10") + tdSql.checkData(0, 0, 50) + + for i in range(-4000, 4000): + tdSql.execute("insert into t10 values(%d, 2)" % (self.ts + i)) + + tdSql.query("select * from t10") + tdSql.checkRows(8000) + tdSql.query("select sum(a) from t10") + tdSql.checkData(0, 0, 15950) + + self.restartTaosd() + tdSql.query("select * from t10") + tdSql.checkRows(8000) + tdSql.query("select sum(a) from t10") + tdSql.checkData(0, 0, 15950) + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/pytest/update/merge_commit_last.py b/tests/pytest/update/merge_commit_last.py new file mode 100644 index 0000000000..183cca0a1e --- /dev/null +++ b/tests/pytest/update/merge_commit_last.py @@ -0,0 +1,321 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import taos +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor()) + + self.ts = 1603152000000 + + def restartTaosd(self): + tdDnodes.stop(1) + tdDnodes.startWithoutSleep(1) + tdSql.execute("use udb") + + def run(self): + tdSql.prepare() + + tdSql.execute("create database udb update 1 days 30") + tdSql.execute("use udb") + + print("==============step 1: UPDATE THE LAST RECORD REPEATEDLY") + tdSql.execute("create table t1 (ts timestamp, a int)") + + for i in range(5): + tdSql.execute("insert into t1 values(%d, %d)" % (self.ts, i)) + self.restartTaosd() + tdSql.query("select * from t1") + tdSql.checkRows(1) + tdSql.checkData(0, 1, i) + + print("==============step 2: UPDATE THE WHOLE LAST BLOCK") + tdSql.execute("create table t2 (ts timestamp, a int)") + + for i in range(50): + tdSql.execute("insert into t2 values(%d, 1)" % (self.ts + i)) + self.restartTaosd() + tdSql.query("select * from t2") + tdSql.checkRows(50) + for i in range(50): + tdSql.checkData(i, 1, 1) + + for i in range(50): + tdSql.execute("insert into t2 values(%d, 2)" % (self.ts + i)) + tdSql.query("select * from t2") + for i in range(50): + tdSql.checkData(i, 1, 2) + + self.restartTaosd() + tdSql.query("select * from t2") + tdSql.checkRows(50) + for i in range(50): + tdSql.checkData(i, 1, 2) + + print("==============step 3: UPDATE PART OF THE LAST BLOCK") + tdSql.execute("create table t3 (ts timestamp, a int)") + + for i in range(50): + tdSql.execute("insert into t3 values(%d, 1)" % (self.ts + i)) + self.restartTaosd() + tdSql.query("select * from t3") + tdSql.checkRows(50) + for i in range(50): + tdSql.checkData(i, 1, 1) + + for i in range(25): + tdSql.execute("insert into t3 values(%d, 2)" % (self.ts + i)) + + tdSql.query("select * from t3") + for i in range(25): + tdSql.checkData(i, 1, 2) + for i in range(25, 50): + tdSql.checkData(i, 1, 1) + + self.restartTaosd() + tdSql.query("select * from t3") + tdSql.checkRows(50) + for i in range(25): + tdSql.checkData(i, 1, 2) + for i in range(25, 50): + tdSql.checkData(i, 1, 1) + + print("==============step 4: UPDATE AND INSERT APPEND AT END OF DATA") + tdSql.execute("create table t4 (ts timestamp, a int)") + + for i in range(50): + tdSql.execute("insert into t4 values(%d, 1)" % (self.ts + i)) + + self.restartTaosd() + tdSql.query("select * from t4") + tdSql.checkRows(50) + for i in range(50): + tdSql.checkData(i, 1, 1) + + for i in range(25): + tdSql.execute("insert into t4 values(%d, 2)" % (self.ts + i)) + + for i in range(50, 60): + tdSql.execute("insert into t4 values(%d, 2)" % (self.ts + i)) + + tdSql.query("select * from t4") + tdSql.checkRows(60) + for i in range(25): + tdSql.checkData(i, 1, 2) + for i in range(25, 50): + tdSql.checkData(i, 1, 1) + for i in range(50, 60): + tdSql.checkData(i, 1, 2) + + self.restartTaosd() + tdSql.query("select * from t4") + tdSql.checkRows(60) + for i in range(25): + tdSql.checkData(i, 1, 2) + for i in range(25, 50): + tdSql.checkData(i, 1, 1) + for i in range(50, 60): + tdSql.checkData(i, 1, 2) + + print("==============step 5: UPDATE AND INSERT PREPEND SOME DATA") + tdSql.execute("create table t5 (ts timestamp, a int)") + + for i in range(50): + tdSql.execute("insert into t5 values(%d, 1)" % (self.ts + i)) + + self.restartTaosd() + tdSql.query("select * from t5") + tdSql.checkRows(50) + for i in range(50): + tdSql.checkData(i, 1, 1) + + for i in range(-10, 0): + tdSql.execute("insert into t5 values(%d, 2)" % (self.ts + i)) + + for i in range(25): + tdSql.execute("insert into t5 values(%d, 2)" % (self.ts + i)) + + tdSql.query("select * from t5") + tdSql.checkRows(60) + tdSql.query("select sum(a) from t5") + tdSql.checkData(0, 0, 95) + + self.restartTaosd() + tdSql.query("select * from t5") + tdSql.checkRows(60) + tdSql.query("select sum(a) from t5") + tdSql.checkData(0, 0, 95) + + for i in range(-10, 0): + tdSql.execute("insert into t5 values(%d, 3)" % (self.ts + i)) + + for i in range(25, 50): + tdSql.execute("insert into t5 values(%d, 3)" % (self.ts + i)) + + tdSql.query("select * from t5") + tdSql.checkRows(60) + tdSql.query("select sum(a) from t5") + tdSql.checkData(0, 0, 155) + + self.restartTaosd() + tdSql.query("select * from t5") + tdSql.checkRows(60) + tdSql.query("select sum(a) from t5") + tdSql.checkData(0, 0, 155) + + + print("==============step 6: INSERT AHEAD A LOT OF DATA") + tdSql.execute("create table t6 (ts timestamp, a int)") + + for i in range(50): + tdSql.execute("insert into t6 values(%d, 1)" % (self.ts + i)) + + self.restartTaosd() + tdSql.query("select * from t6") + tdSql.checkRows(50) + tdSql.query("select sum(a) from t6") + tdSql.checkData(0, 0, 50) + + for i in range(-1000, 0): + tdSql.execute("insert into t6 values(%d, 2)" % (self.ts + i)) + + tdSql.query("select * from t6") + tdSql.checkRows(1050) + tdSql.query("select sum(a) from t6") + tdSql.checkData(0, 0, 2050) + + self.restartTaosd() + tdSql.query("select * from t6") + tdSql.checkRows(1050) + tdSql.query("select sum(a) from t6") + tdSql.checkData(0, 0, 2050) + + print("==============step 7: INSERT AHEAD A LOT AND UPDATE") + tdSql.execute("create table t7 (ts timestamp, a int)") + + for i in range(50): + tdSql.execute("insert into t7 values(%d, 1)" % (self.ts + i)) + + self.restartTaosd() + tdSql.query("select * from t7") + tdSql.checkRows(50) + tdSql.query("select sum(a) from t7") + tdSql.checkData(0, 0, 50) + + for i in range(-1000, 25): + tdSql.execute("insert into t7 values(%d, 2)" % (self.ts + i)) + + tdSql.query("select * from t7") + tdSql.checkRows(1050) + tdSql.query("select sum(a) from t7") + tdSql.checkData(0, 0, 2075) + + self.restartTaosd() + tdSql.query("select * from t7") + tdSql.checkRows(1050) + tdSql.query("select sum(a) from t7") + tdSql.checkData(0, 0, 2075) + + print("==============step 8: INSERT AFTER A LOT AND UPDATE") + tdSql.execute("create table t8 (ts timestamp, a int)") + + for i in range(50): + tdSql.execute("insert into t8 values(%d, 1)" % (self.ts + i)) + + self.restartTaosd() + tdSql.query("select * from t8") + tdSql.checkRows(50) + tdSql.query("select sum(a) from t8") + tdSql.checkData(0, 0, 50) + + for i in range(25, 6000): + tdSql.execute("insert into t8 values(%d, 2)" % (self.ts + i)) + + tdSql.query("select * from t8") + tdSql.checkRows(6000) + tdSql.query("select sum(a) from t8") + tdSql.checkData(0, 0, 11975) + + self.restartTaosd() + tdSql.query("select * from t8") + tdSql.checkRows(6000) + tdSql.query("select sum(a) from t8") + tdSql.checkData(0, 0, 11975) + + print("==============step 9: UPDATE ONLY MIDDLE") + tdSql.execute("create table t9 (ts timestamp, a int)") + + for i in range(50): + tdSql.execute("insert into t9 values(%d, 1)" % (self.ts + i)) + + self.restartTaosd() + tdSql.query("select * from t9") + tdSql.checkRows(50) + tdSql.query("select sum(a) from t9") + tdSql.checkData(0, 0, 50) + + for i in range(20, 30): + tdSql.execute("insert into t9 values(%d, 2)" % (self.ts + i)) + + tdSql.query("select * from t9") + tdSql.checkRows(50) + tdSql.query("select sum(a) from t9") + tdSql.checkData(0, 0, 60) + + self.restartTaosd() + tdSql.query("select * from t9") + tdSql.checkRows(50) + tdSql.query("select sum(a) from t9") + tdSql.checkData(0, 0, 60) + + print("==============step 10: A LOT OF DATA COVER THE WHOLE BLOCK") + tdSql.execute("create table t10 (ts timestamp, a int)") + + for i in range(50): + tdSql.execute("insert into t10 values(%d, 1)" % (self.ts + i)) + + self.restartTaosd() + tdSql.query("select * from t10") + tdSql.checkRows(50) + tdSql.query("select sum(a) from t10") + tdSql.checkData(0, 0, 50) + + for i in range(-4000, 4000): + tdSql.execute("insert into t10 values(%d, 2)" % (self.ts + i)) + + tdSql.query("select * from t10") + tdSql.checkRows(8000) + tdSql.query("select sum(a) from t10") + tdSql.checkData(0, 0, 16000) + + self.restartTaosd() + tdSql.query("select * from t10") + tdSql.checkRows(8000) + tdSql.query("select sum(a) from t10") + tdSql.checkData(0, 0, 16000) + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/pytest/wal/addOldWalTest.py b/tests/pytest/wal/addOldWalTest.py new file mode 100644 index 0000000000..2f4dcd5ce8 --- /dev/null +++ b/tests/pytest/wal/addOldWalTest.py @@ -0,0 +1,70 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import os +import taos +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor()) + + def createOldDir(self): + oldDir = tdDnodes.getDnodesRootDir() + "dnode1/data/vnode/vnode2/wal/old" + os.system("sudo mkdir -p %s" % oldDir) + + def createOldDirAndAddWal(self): + oldDir = tdDnodes.getDnodesRootDir() + "dnode1/data/vnode/vnode2/wal/old" + os.system("sudo echo 'test' >> %s/wal" % oldDir) + + + def run(self): + tdSql.prepare() + + tdSql.execute("create table t1(ts timestamp, a int)") + tdSql.execute("insert into t1 values(now, 1)") + + # create old dir only + self.createOldDir() + os.system("sudo kill -9 $(pgrep taosd)") + tdDnodes.start(1) + + tdSql.execute("use db") + tdSql.query("select * from t1") + tdSql.checkRows(1) + tdSql.checkData(0, 1, 1) + + # create old dir and add wal under old dir + self.createOldDir() + self.createOldDirAndAddWal() + os.system("sudo kill -9 $(pgrep taosd)") + tdDnodes.start(1) + + tdSql.query("select * from t1") + tdSql.checkRows(1) + tdSql.checkData(0, 1, 1) + + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/script/general/http/restful_full.sim b/tests/script/general/http/restful_full.sim index 8d2f1a7c00..94ecb59f75 100644 --- a/tests/script/general/http/restful_full.sim +++ b/tests/script/general/http/restful_full.sim @@ -81,7 +81,7 @@ print =============== step2 - no db #11 system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'show databases' 127.0.0.1:7111/rest/sql print 11-> $system_content -if $system_content != @{"status":"succ","head":["name","created_time","ntables","vgroups","replica","quorum","days","keep1,keep2,keep(D)","cache(MB)","blocks","minrows","maxrows","wallevel","fsync","comp","precision","status"],"data":[],"rows":0}@ then +if $system_content != @{"status":"succ","head":["name","created_time","ntables","vgroups","replica","quorum","days","keep1,keep2,keep(D)","cache(MB)","blocks","minrows","maxrows","wallevel","fsync","comp","precision","update","status"],"data":[],"rows":0}@ then return -1 endi diff --git a/tests/script/general/parser/join_multivnode.sim b/tests/script/general/parser/join_multivnode.sim index c5fcf575ae..76230f79f0 100644 --- a/tests/script/general/parser/join_multivnode.sim +++ b/tests/script/general/parser/join_multivnode.sim @@ -134,66 +134,8 @@ sql select join_mt0.ts, join_mt1.t1, join_mt0.t1, join_mt1.tbname, join_mt0.tbna #1970-01-01 08:01:40.800 | 10 | 45.000000000 | 0 | true | false | 0 | #1970-01-01 08:01:40.790 | 10 | 945.000000000 | 90 | true | true | 0 | -sql select count(join_mt0.c1), sum(join_mt1.c2), first(join_mt0.c5), last(join_mt1.c7), first(join_mt1.c7) from join_mt0, join_mt1 where join_mt0.t1=join_mt1.t1 and join_mt0.ts=join_mt1.ts interval(10a) group by join_mt0.t1 order by join_mt0.ts desc limit 20 offset 19; -if $rows != 20 then - return -1 -endi +sql_error select count(join_mt0.c1), sum(join_mt1.c2), first(join_mt0.c5), last(join_mt1.c7), first(join_mt1.c7) from join_mt0, join_mt1 where join_mt0.t1=join_mt1.t1 and join_mt0.ts=join_mt1.ts interval(10a) group by join_mt0.t1 order by join_mt0.ts desc limit 20 offset 19; -if $data00 != @70-01-01 08:01:40.800@ then - return -1 -endi - -if $data01 != 10 then - return -1 -endi - -if $data02 != 45.000000000 then - return -1 -endi - -if $data03 != 0 then - return -1 -endi - -if $data04 != 1 then - return -1 -endi - -if $data05 != 0 then - return -1 -endi - -if $data06 != 0 then - return -1 -endi - -if $data10 != @70-01-01 08:01:40.790@ then - return -1 -endi - -if $data11 != 10 then - return -1 -endi - -if $data12 != 945.000000000 then - return -1 -endi - -if $data13 != 90 then - return -1 -endi - -if $data14 != 1 then - return -1 -endi - -if $data15 != 1 then - return -1 -endi - -if $data16 != 0 then - return -1 -endi sql select count(join_mt0.c1), sum(join_mt0.c2)/count(*), avg(c2), first(join_mt0.c5), last(c7) from join_mt0 interval(10a) group by join_mt0.t1 order by join_mt0.ts desc; if $rows != 100 then @@ -261,59 +203,9 @@ if $data16 != 2 then endi # this function will cause shell crash -sql select count(join_mt0.c1), first(join_mt0.c1) from join_mt0, join_mt1 where join_mt0.t1=join_mt1.t1 and join_mt0.ts=join_mt1.ts interval(10a) group by join_mt0.t1 order by join_mt0.ts desc; -if $rows != 100 then - return -1 -endi +sql_error select count(join_mt0.c1), first(join_mt0.c1) from join_mt0, join_mt1 where join_mt0.t1=join_mt1.t1 and join_mt0.ts=join_mt1.ts interval(10a) group by join_mt0.t1 order by join_mt0.ts desc; -if $data00 != @70-01-01 08:01:40.990@ then - return -1 -endi - -if $data01 != 10 then - return -1 -endi - -if $data02 != 90 then - return -1 -endi - -if $data03 != 0 then - return -1 -endi - -if $data11 != 10 then - return -1 -endi - -if $data12 != 80 then - return -1 -endi - -if $data13 != 0 then - return -1 -endi - -sql select last(join_mt1.c7), first(join_mt1.c7) from join_mt0, join_mt1 where join_mt0.t1=join_mt1.t1 and join_mt0.ts=join_mt1.ts interval(10m) group by join_mt0.t1 order by join_mt0.ts asc; -if $rows != 1 then - return -1 -endi - -if $data00 != @70-01-01 08:00:00.000@ then - return -1 -endi - -if $data01 != 1 then - return -1 -endi - -if $data02 != 0 then - return -1 -endi - -if $data03 != 0 then - return -1 -endi +sql_error select last(join_mt1.c7), first(join_mt1.c7) from join_mt0, join_mt1 where join_mt0.t1=join_mt1.t1 and join_mt0.ts=join_mt1.ts interval(10m) group by join_mt0.t1 order by join_mt0.ts asc; sql_error select count(join_mt0.c1), first(join_mt0.c1)-last(join_mt1.c1), first(join_mt1.c9) from join_mt0, join_mt1 where join_mt0.t1=join_mt1.t1 and join_mt0.ts=join_mt1.ts;", NULL); sql select count(join_mt0.c1), first(join_mt0.c1)/count(*), first(join_mt1.c9) from join_mt0, join_mt1 where join_mt0.t1=join_mt1.t1 and join_mt0.ts=join_mt1.ts;", NULL); diff --git a/tests/script/general/parser/stream_on_sys.sim b/tests/script/general/parser/stream_on_sys.sim index 5507b4db48..845a484488 100644 --- a/tests/script/general/parser/stream_on_sys.sim +++ b/tests/script/general/parser/stream_on_sys.sim @@ -22,12 +22,12 @@ $i = 0 sql use $db -sql create table cpustrm as select count(*), avg(cpu_taosd), max(cpu_taosd), min(cpu_taosd), avg(cpu_system), max(cpu_cores), min(cpu_cores), last(cpu_cores) from log.dn1 interval(4s) sliding(2s) -sql create table memstrm as select count(*), avg(mem_taosd), max(mem_taosd), min(mem_taosd), avg(mem_system), first(mem_total), last(mem_total) from log.dn1 interval(4s) sliding(2s) -sql create table diskstrm as select count(*), avg(disk_used), last(disk_used), avg(disk_total), first(disk_total) from log.dn1 interval(4s) sliding(2s) -sql create table bandstrm as select count(*), avg(band_speed), last(band_speed) from log.dn1 interval(4s) sliding(2s) -sql create table reqstrm as select count(*), avg(req_http), last(req_http), avg(req_select), last(req_select), avg(req_insert), last(req_insert) from log.dn1 interval(4s) sliding(2s) -sql create table iostrm as select count(*), avg(io_read), last(io_read), avg(io_write), last(io_write) from log.dn1 interval(4s) sliding(2s) +sql create table cpustrm as select count(*), avg(cpu_taosd), max(cpu_taosd), min(cpu_taosd), avg(cpu_system), max(cpu_cores), min(cpu_cores), last(cpu_cores) from log.dn1 interval(4s) +sql create table memstrm as select count(*), avg(mem_taosd), max(mem_taosd), min(mem_taosd), avg(mem_system), first(mem_total), last(mem_total) from log.dn1 interval(4s) +sql create table diskstrm as select count(*), avg(disk_used), last(disk_used), avg(disk_total), first(disk_total) from log.dn1 interval(4s) +sql create table bandstrm as select count(*), avg(band_speed), last(band_speed) from log.dn1 interval(4s) +sql create table reqstrm as select count(*), avg(req_http), last(req_http), avg(req_select), last(req_select), avg(req_insert), last(req_insert) from log.dn1 interval(4s) +sql create table iostrm as select count(*), avg(io_read), last(io_read), avg(io_write), last(io_write) from log.dn1 interval(4s) sleep 120000 sql select * from cpustrm if $rows <= 0 then diff --git a/tests/script/general/wal/kill.sim b/tests/script/general/wal/kill.sim new file mode 100644 index 0000000000..7f103874a5 --- /dev/null +++ b/tests/script/general/wal/kill.sim @@ -0,0 +1,77 @@ +system sh/stop_dnodes.sh +system sh/deploy.sh -n dnode1 -i 1 + +print ============== deploy +system sh/exec.sh -n dnode1 -s start +sleep 3001 +sql connect + +sql create database d1 +sql use d1 + +sql create table t1 (ts timestamp, i int) +sql insert into t1 values(now, 1); + +print =============== step3 +sleep 3000 +sql select * from t1; +print rows: $rows +if $rows != 1 then + return -1 +endi +system sh/exec.sh -n dnode1 -s stop -x SIGKILL +sleep 3000 + +print =============== step4 +system sh/exec.sh -n dnode1 -s start -x SIGKILL +sleep 3000 +sql select * from t1; +print rows: $rows +if $rows != 1 then + return -1 +endi +system sh/exec.sh -n dnode1 -s stop -x SIGKILL +sleep 3000 + +print =============== step5 +system sh/exec.sh -n dnode1 -s start -x SIGKILL +sleep 3000 +sql select * from t1; +print rows: $rows +if $rows != 1 then + return -1 +endi +system sh/exec.sh -n dnode1 -s stop -x SIGKILL +sleep 3000 + +print =============== step6 +system sh/exec.sh -n dnode1 -s start -x SIGKILL +sleep 3000 +sql select * from t1; +print rows: $rows +if $rows != 1 then + return -1 +endi +system sh/exec.sh -n dnode1 -s stop -x SIGKILL +sleep 3000 + +print =============== step7 +system sh/exec.sh -n dnode1 -s start -x SIGKILL +sleep 3000 +sql select * from t1; +print rows: $rows +if $rows != 1 then + return -1 +endi +system sh/exec.sh -n dnode1 -s stop -x SIGKILL +sleep 3000 + +print =============== step8 +system sh/exec.sh -n dnode1 -s start -x SIGKILL +sleep 3000 +sql select * from t1; +print rows: $rows +if $rows != 1 then + return -1 +endi +system sh/exec.sh -n dnode1 -s stop -x SIGKILL diff --git a/tests/script/general/wal/sync.sim b/tests/script/general/wal/sync.sim new file mode 100644 index 0000000000..abaf22f919 --- /dev/null +++ b/tests/script/general/wal/sync.sim @@ -0,0 +1,124 @@ +system sh/stop_dnodes.sh + +system sh/deploy.sh -n dnode1 -i 1 +system sh/deploy.sh -n dnode2 -i 2 +system sh/deploy.sh -n dnode3 -i 3 + +system sh/cfg.sh -n dnode1 -c numOfMnodes -v 3 +system sh/cfg.sh -n dnode2 -c numOfMnodes -v 3 +system sh/cfg.sh -n dnode3 -c numOfMnodes -v 3 + +system sh/cfg.sh -n dnode1 -c mnodeEqualVnodeNum -v 4 +system sh/cfg.sh -n dnode2 -c mnodeEqualVnodeNum -v 4 +system sh/cfg.sh -n dnode3 -c mnodeEqualVnodeNum -v 4 + +system sh/cfg.sh -n dnode1 -c http -v 1 +system sh/cfg.sh -n dnode2 -c http -v 1 +system sh/cfg.sh -n dnode3 -c http -v 1 + +system sh/cfg.sh -n dnode1 -c maxTablesPerVnode -v 20000 +system sh/cfg.sh -n dnode2 -c maxTablesPerVnode -v 20000 +system sh/cfg.sh -n dnode3 -c maxTablesPerVnode -v 20000 + +system sh/cfg.sh -n dnode1 -c replica -v 3 +system sh/cfg.sh -n dnode2 -c replica -v 3 +system sh/cfg.sh -n dnode3 -c replica -v 3 + +system sh/cfg.sh -n dnode1 -c maxSQLLength -v 940032 +system sh/cfg.sh -n dnode2 -c maxSQLLength -v 940032 +system sh/cfg.sh -n dnode3 -c maxSQLLength -v 940032 + +print ============== deploy + +system sh/exec.sh -n dnode1 -s start +sleep 5001 +sql connect + +sql create dnode $hostname2 +sql create dnode $hostname3 +system sh/exec.sh -n dnode2 -s start +system sh/exec.sh -n dnode3 -s start + +print =============== step1 +$x = 0 +show1: + $x = $x + 1 + sleep 2000 + if $x == 5 then + return -1 + endi +sql show mnodes -x show1 +$mnode1Role = $data2_1 +print mnode1Role $mnode1Role +$mnode2Role = $data2_2 +print mnode2Role $mnode2Role +$mnode3Role = $data2_3 +print mnode3Role $mnode3Role + +if $mnode1Role != master then + goto show1 +endi +if $mnode2Role != slave then + goto show1 +endi +if $mnode3Role != slave then + goto show1 +endi + +print =============== step2 +sql create database d1 replica 3 +sql use d1 + +sql create table table_rest (ts timestamp, i int) +print sql length is 870KB +restful d1 table_rest 1591072800 30000 +restful d1 table_rest 1591172800 30000 +restful d1 table_rest 1591272800 30000 +restful d1 table_rest 1591372800 30000 +restful d1 table_rest 1591472800 30000 +restful d1 table_rest 1591572800 30000 +restful d1 table_rest 1591672800 30000 +restful d1 table_rest 1591772800 30000 +restful d1 table_rest 1591872800 30000 +restful d1 table_rest 1591972800 30000 + +sql select * from table_rest; +print rows: $rows +if $rows != 300000 then + return -1 +endi + +print =============== step3 +system sh/exec.sh -n dnode1 -s stop -x SIGINT +sleep 5000 +sql select * from table_rest; +print rows: $rows +if $rows != 300000 then + return -1 +endi +system sh/exec.sh -n dnode1 -s start -x SIGINT +sleep 5000 + +print =============== step4 +system sh/exec.sh -n dnode2 -s stop -x SIGINT +sleep 5000 +sql select * from table_rest; +print rows: $rows +if $rows != 300000 then + return -1 +endi +system sh/exec.sh -n dnode2 -s start -x SIGINT +sleep 5000 + +print =============== step5 +system sh/exec.sh -n dnode3 -s stop -x SIGINT +sleep 5000 +sql select * from table_rest; +print rows: $rows +if $rows != 300000 then + return -1 +endi + +system sh/exec.sh -n dnode1 -s stop -x SIGINT +system sh/exec.sh -n dnode2 -s stop -x SIGINT +system sh/exec.sh -n dnode3 -s stop -x SIGINT \ No newline at end of file diff --git a/tests/script/jenkins/basic.txt b/tests/script/jenkins/basic.txt index 1b2fe37c71..2a84172da9 100644 --- a/tests/script/jenkins/basic.txt +++ b/tests/script/jenkins/basic.txt @@ -236,6 +236,9 @@ cd ../../../debug; make ./test.sh -f general/vector/table_query.sim ./test.sh -f general/vector/table_time.sim +./test.sh -f general/wal/sync.sim +./test.sh -f general/wal/kill.sim + ./test.sh -f unique/account/account_create.sim ./test.sh -f unique/account/account_delete.sim ./test.sh -f unique/account/account_len.sim diff --git a/tests/test-all.sh b/tests/test-all.sh index f4e992eb5a..214360f3b8 100755 --- a/tests/test-all.sh +++ b/tests/test-all.sh @@ -26,7 +26,12 @@ function runPyCaseOneByOne { while read -r line; do if [[ $line =~ ^python.* ]]; then if [[ $line != *sleep* ]]; then - case=`echo $line|awk '{print $NF}'` + + if [[ $line =~ '-r' ]];then + case=`echo $line|awk '{print $4}'` + else + case=`echo $line|awk '{print $NF}'` + fi start_time=`date +%s` $line > /dev/null 2>&1 && \ echo -e "${GREEN}$case success${NC}" | tee -a pytest-out.log || \ diff --git a/tests/tsim/inc/sim.h b/tests/tsim/inc/sim.h index 58e58a442c..01e5016557 100644 --- a/tests/tsim/inc/sim.h +++ b/tests/tsim/inc/sim.h @@ -100,7 +100,7 @@ typedef struct _cmd_t { int16_t cmdno; int16_t nlen; char name[MAX_SIM_CMD_NAME_LEN]; - bool (*parseCmd)(char *, struct _cmd_t *, int); + bool (*parseCmd)(char *, struct _cmd_t *, int32_t); bool (*executeCmd)(struct _script_t *script, char *option); struct _cmd_t *next; } SCommand; @@ -111,7 +111,7 @@ typedef struct { int16_t errorJump; // sql jump flag, while '-x' exist in sql cmd, this flag // will be SQL_JUMP_TRUE, otherwise is SQL_JUMP_FALSE */ int16_t lineNum; // correspodning line number in original file - int optionOffset; // relative option offset + int32_t optionOffset;// relative option offset } SCmdLine; typedef struct _var_t { @@ -121,59 +121,56 @@ typedef struct _var_t { } SVariable; typedef struct _script_t { - int type; - bool killed; - - void *taos; - char rows[12]; // number of rows data retrieved - char data[MAX_QUERY_ROW_NUM][MAX_QUERY_COL_NUM] - [MAX_QUERY_VALUE_LEN]; // query results - char system_exit_code[12]; - char system_ret_content[MAX_SYSTEM_RESULT_LEN]; - - int varLen; - int linePos; // current cmd position - int numOfLines; // number of lines in the script - int bgScriptLen; - char fileName[MAX_FILE_NAME_LEN]; // script file name - char error[MAX_ERROR_LEN]; - char *optionBuffer; + int32_t type; + bool killed; + void * taos; + char rows[12]; // number of rows data retrieved + char data[MAX_QUERY_ROW_NUM][MAX_QUERY_COL_NUM][MAX_QUERY_VALUE_LEN]; // query results + char system_exit_code[12]; + char system_ret_content[MAX_SYSTEM_RESULT_LEN]; + int32_t varLen; + int32_t linePos; // current cmd position + int32_t numOfLines; // number of lines in the script + int32_t bgScriptLen; + char fileName[MAX_FILE_NAME_LEN]; // script file name + char error[MAX_ERROR_LEN]; + char * optionBuffer; SCmdLine *lines; // command list SVariable variables[MAX_VAR_LEN]; + pthread_t bgPid; + char auth[128]; struct _script_t *bgScripts[MAX_BACKGROUND_SCRIPT_NUM]; - char auth[128]; } SScript; extern SScript *simScriptList[MAX_MAIN_SCRIPT_NUM]; extern SCommand simCmdList[]; -extern int simScriptPos; -extern int simScriptSucced; -extern int simDebugFlag; -extern char tsScriptDir[]; -extern bool simAsyncQuery; +extern int32_t simScriptPos; +extern int32_t simScriptSucced; +extern int32_t simDebugFlag; +extern char tsScriptDir[]; +extern bool simAsyncQuery; SScript *simParseScript(char *fileName); - SScript *simProcessCallOver(SScript *script); -void *simExecuteScript(void *script); -void simInitsimCmdList(); -bool simSystemInit(); -void simSystemCleanUp(); -char *simGetVariable(SScript *script, char *varName, int varLen); -bool simExecuteExpCmd(SScript *script, char *option); -bool simExecuteTestCmd(SScript *script, char *option); -bool simExecuteGotoCmd(SScript *script, char *option); -bool simExecuteRunCmd(SScript *script, char *option); -bool simExecuteRunBackCmd(SScript *script, char *option); -bool simExecuteSystemCmd(SScript *script, char *option); -bool simExecuteSystemContentCmd(SScript *script, char *option); -bool simExecutePrintCmd(SScript *script, char *option); -bool simExecuteSleepCmd(SScript *script, char *option); -bool simExecuteReturnCmd(SScript *script, char *option); -bool simExecuteSqlCmd(SScript *script, char *option); -bool simExecuteSqlErrorCmd(SScript *script, char *rest); -bool simExecuteSqlSlowCmd(SScript *script, char *option); -bool simExecuteRestfulCmd(SScript *script, char *rest); -void simVisuallizeOption(SScript *script, char *src, char *dst); +void * simExecuteScript(void *script); +void simInitsimCmdList(); +bool simSystemInit(); +void simSystemCleanUp(); +char * simGetVariable(SScript *script, char *varName, int32_t varLen); +bool simExecuteExpCmd(SScript *script, char *option); +bool simExecuteTestCmd(SScript *script, char *option); +bool simExecuteGotoCmd(SScript *script, char *option); +bool simExecuteRunCmd(SScript *script, char *option); +bool simExecuteRunBackCmd(SScript *script, char *option); +bool simExecuteSystemCmd(SScript *script, char *option); +bool simExecuteSystemContentCmd(SScript *script, char *option); +bool simExecutePrintCmd(SScript *script, char *option); +bool simExecuteSleepCmd(SScript *script, char *option); +bool simExecuteReturnCmd(SScript *script, char *option); +bool simExecuteSqlCmd(SScript *script, char *option); +bool simExecuteSqlErrorCmd(SScript *script, char *rest); +bool simExecuteSqlSlowCmd(SScript *script, char *option); +bool simExecuteRestfulCmd(SScript *script, char *rest); +void simVisuallizeOption(SScript *script, char *src, char *dst); #endif \ No newline at end of file diff --git a/tests/tsim/inc/simParse.h b/tests/tsim/inc/simParse.h index d3f92add71..ef7d8e5ce7 100644 --- a/tests/tsim/inc/simParse.h +++ b/tests/tsim/inc/simParse.h @@ -50,6 +50,6 @@ typedef struct { char sexpLen[MAX_NUM_BLOCK]; /*switch expression length */ } SBlock; -bool simParseExpression(char *token, int lineNum); +bool simParseExpression(char *token, int32_t lineNum); #endif \ No newline at end of file diff --git a/tests/tsim/src/simExe.c b/tests/tsim/src/simExe.c index 7f786dfaa9..2db750cdd3 100644 --- a/tests/tsim/src/simExe.c +++ b/tests/tsim/src/simExe.c @@ -13,6 +13,7 @@ * along with this program. If not, see . */ +#define _DEFAULT_SOURCE #include "os.h" #include "sim.h" #include "taos.h" @@ -38,30 +39,28 @@ void simLogSql(char *sql, bool useSharp) { } else { fprintf(fp, "%s;\n", sql); } - + fflush(fp); } char *simParseArbitratorName(char *varName); char *simParseHostName(char *varName); -char *simGetVariable(SScript *script, char *varName, int varLen) { +char *simGetVariable(SScript *script, char *varName, int32_t varLen) { if (strncmp(varName, "hostname", 8) == 0) { return simParseHostName(varName); } if (strncmp(varName, "arbitrator", 10) == 0) { - return simParseArbitratorName(varName); + return simParseArbitratorName(varName); } if (strncmp(varName, "error", varLen) == 0) return script->error; if (strncmp(varName, "rows", varLen) == 0) return script->rows; - if (strncmp(varName, "system_exit", varLen) == 0) - return script->system_exit_code; + if (strncmp(varName, "system_exit", varLen) == 0) return script->system_exit_code; - if (strncmp(varName, "system_content", varLen) == 0) - return script->system_ret_content; + if (strncmp(varName, "system_content", varLen) == 0) return script->system_ret_content; // variable like data2_192.168.0.1 if (strncmp(varName, "data", 4) == 0) { @@ -70,16 +69,16 @@ char *simGetVariable(SScript *script, char *varName, int varLen) { } if (varName[5] == '_') { - int col = varName[4] - '0'; + int32_t col = varName[4] - '0'; if (col < 0 || col >= MAX_QUERY_COL_NUM) { return "null"; } - char *keyName; - int keyLen; + char * keyName; + int32_t keyLen; paGetToken(varName + 6, &keyName, &keyLen); - for (int i = 0; i < MAX_QUERY_ROW_NUM; ++i) { + for (int32_t i = 0; i < MAX_QUERY_ROW_NUM; ++i) { if (strncmp(keyName, script->data[i][0], keyLen) == 0) { simDebug("script:%s, keyName:%s, keyValue:%s", script->fileName, script->data[i][0], script->data[i][col]); return script->data[i][col]; @@ -87,16 +86,16 @@ char *simGetVariable(SScript *script, char *varName, int varLen) { } return "null"; } else if (varName[6] == '_') { - int col = (varName[4] - '0') * 10 + (varName[5] - '0'); + int32_t col = (varName[4] - '0') * 10 + (varName[5] - '0'); if (col < 0 || col >= MAX_QUERY_COL_NUM) { return "null"; } - char *keyName; - int keyLen; + char * keyName; + int32_t keyLen; paGetToken(varName + 7, &keyName, &keyLen); - for (int i = 0; i < MAX_QUERY_ROW_NUM; ++i) { + for (int32_t i = 0; i < MAX_QUERY_ROW_NUM; ++i) { if (strncmp(keyName, script->data[i][0], keyLen) == 0) { simTrace("script:%s, keyName:%s, keyValue:%s", script->fileName, script->data[i][0], script->data[i][col]); return script->data[i][col]; @@ -104,8 +103,8 @@ char *simGetVariable(SScript *script, char *varName, int varLen) { } return "null"; } else { - int row = varName[4] - '0'; - int col = varName[5] - '0'; + int32_t row = varName[4] - '0'; + int32_t col = varName[5] - '0'; if (row < 0 || row >= MAX_QUERY_ROW_NUM) { return "null"; } @@ -118,7 +117,7 @@ char *simGetVariable(SScript *script, char *varName, int varLen) { } } - for (int i = 0; i < script->varLen; ++i) { + for (int32_t i = 0; i < script->varLen; ++i) { SVariable *var = &script->variables[i]; if (var->varNameLen != varLen) { continue; @@ -144,11 +143,11 @@ char *simGetVariable(SScript *script, char *varName, int varLen) { return var->varValue; } -int simExecuteExpression(SScript *script, char *exp) { - char *op1, *op2, *var1, *var2, *var3, *rest; - int op1Len, op2Len, var1Len, var2Len, var3Len, val0, val1; - char t0[512], t1[512], t2[512], t3[1024]; - int result; +int32_t simExecuteExpression(SScript *script, char *exp) { + char * op1, *op2, *var1, *var2, *var3, *rest; + int32_t op1Len, op2Len, var1Len, var2Len, var3Len, val0, val1; + char t0[512], t1[512], t2[512], t3[1024]; + int32_t result; rest = paGetToken(exp, &var1, &var1Len); rest = paGetToken(rest, &op1, &op1Len); @@ -234,7 +233,7 @@ bool simExecuteExpCmd(SScript *script, char *option) { } bool simExecuteTestCmd(SScript *script, char *option) { - int result; + int32_t result; result = simExecuteExpression(script, option); if (result >= 0) @@ -285,13 +284,12 @@ bool simExecuteRunBackCmd(SScript *script, char *option) { sprintf(script->error, "lineNum:%d. parse file:%s error", script->lines[script->linePos].lineNum, fileName); return false; } - simInfo("script:%s, start to execute in background", newScript->fileName); newScript->type = SIM_SCRIPT_TYPE_BACKGROUND; script->bgScripts[script->bgScriptLen++] = newScript; + simInfo("script:%s, start to execute in background,", newScript->fileName); - pthread_t pid; - if (pthread_create(&pid, NULL, simExecuteScript, (void *)newScript) != 0) { + if (pthread_create(&newScript->bgPid, NULL, simExecuteScript, (void *)newScript) != 0) { sprintf(script->error, "lineNum:%d. create background thread failed", script->lines[script->linePos].lineNum); return false; } @@ -307,13 +305,13 @@ bool simExecuteSystemCmd(SScript *script, char *option) { simVisuallizeOption(script, option, buf + strlen(buf)); simLogSql(buf, true); - int code = system(buf); - int repeatTimes = 0; + int32_t code = system(buf); + int32_t repeatTimes = 0; while (code < 0) { - simError("script:%s, failed to execute %s , code %d, errno:%d %s, repeatTimes:%d", - script->fileName, buf, code, errno, strerror(errno), repeatTimes); + simError("script:%s, failed to execute %s , code %d, errno:%d %s, repeatTimes:%d", script->fileName, buf, code, + errno, strerror(errno), repeatTimes); taosMsleep(1000); -#ifdef LINUX +#ifdef LINUX signal(SIGCHLD, SIG_DFL); #endif if (repeatTimes++ >= 10) { @@ -368,8 +366,8 @@ bool simExecutePrintCmd(SScript *script, char *rest) { } bool simExecuteSleepCmd(SScript *script, char *option) { - int delta; - char buf[1024]; + int32_t delta; + char buf[1024]; simVisuallizeOption(script, option, buf); option = buf; @@ -395,7 +393,7 @@ bool simExecuteReturnCmd(SScript *script, char *option) { simVisuallizeOption(script, option, buf); option = buf; - int ret = 1; + int32_t ret = 1; if (option && option[0] != 0) ret = atoi(option); if (ret < 0) { @@ -411,8 +409,8 @@ bool simExecuteReturnCmd(SScript *script, char *option) { } void simVisuallizeOption(SScript *script, char *src, char *dst) { - char *var, *token, *value; - int dstLen, srcLen, tokenLen; + char * var, *token, *value; + int32_t dstLen, srcLen, tokenLen; dst[0] = 0, dstLen = 0; @@ -420,14 +418,14 @@ void simVisuallizeOption(SScript *script, char *src, char *dst) { var = strchr(src, '$'); if (var == NULL) break; if (var && ((var - src - 1) > 0) && *(var - 1) == '\\') { - srcLen = (int)(var - src - 1); + srcLen = (int32_t)(var - src - 1); memcpy(dst + dstLen, src, srcLen); dstLen += srcLen; src = var; break; } - srcLen = (int)(var - src); + srcLen = (int32_t)(var - src); memcpy(dst + dstLen, src, srcLen); dstLen += srcLen; @@ -435,13 +433,13 @@ void simVisuallizeOption(SScript *script, char *src, char *dst) { value = simGetVariable(script, token, tokenLen); strcpy(dst + dstLen, value); - dstLen += (int)strlen(value); + dstLen += (int32_t)strlen(value); } strcpy(dst + dstLen, src); } -void simCloseRestFulConnect(SScript *script) { +void simCloseRestFulConnect(SScript *script) { memset(script->auth, 0, sizeof(script->auth)); } @@ -465,7 +463,7 @@ void simCloseTaosdConnect(SScript *script) { // {"status":"succ","code":0,"desc":"/KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04"} // {"status":"succ","head":["affected_rows"],"data":[[1]],"rows":1} // {"status":"succ","head":["ts","i"],"data":[["2017-12-25 21:28:41.022",1],["2017-12-25 21:28:42.022",2],["2017-12-25 21:28:43.022",3],["2017-12-25 21:28:44.022",4],["2017-12-25 21:28:45.022",5],["2017-12-25 21:28:46.022",6],["2017-12-25 21:28:47.022",7],["2017-12-25 21:28:48.022",8],["2017-12-25 21:28:49.022",9],["2017-12-25 21:28:50.022",10]],"rows":10} -int simParseHttpCommandResult(SScript *script, char *command) { +int32_t simParseHttpCommandResult(SScript *script, char *command) { cJSON* root = cJSON_Parse(command); if (root == NULL) { simError("script:%s, failed to parse json, response:%s", script->fileName, command); @@ -492,14 +490,15 @@ int simParseHttpCommandResult(SScript *script, char *command) { cJSON_Delete(root); return -1; } - int retcode = (int)code->valueint; + int32_t retcode = (int32_t)code->valueint; if (retcode != 1017) { - simError("script:%s, json:status:%s not equal to succ, response:%s", script->fileName, status->valuestring, command); + simError("script:%s, json:status:%s not equal to succ, response:%s", script->fileName, status->valuestring, + command); cJSON_Delete(root); return retcode; } else { simDebug("script:%s, json:status:%s not equal to succ, but code is %d, response:%s", script->fileName, - status->valuestring, retcode, command); + status->valuestring, retcode, command); cJSON_Delete(root); return 0; } @@ -524,27 +523,27 @@ int simParseHttpCommandResult(SScript *script, char *command) { return -1; } - int rowsize = cJSON_GetArraySize(data); + int32_t rowsize = cJSON_GetArraySize(data); if (rowsize < 0) { simError("script:%s, failed to parse json:data, data size %d, response:%s", script->fileName, rowsize, command); cJSON_Delete(root); return -1; } - int rowIndex = 0; + int32_t rowIndex = 0; sprintf(script->rows, "%d", rowsize); - for (int r = 0; r < rowsize; ++r) { + for (int32_t r = 0; r < rowsize; ++r) { cJSON *row = cJSON_GetArrayItem(data, r); if (row == NULL) continue; if (rowIndex++ >= 10) break; - int colsize = cJSON_GetArraySize(row); + int32_t colsize = cJSON_GetArraySize(row); if (colsize < 0) { break; } colsize = MIN(10, colsize); - for (int c = 0; c < colsize; ++c) { + for (int32_t c = 0; c < colsize; ++c) { cJSON *col = cJSON_GetArrayItem(row, c); if (col->valuestring != NULL) { strcpy(script->data[r][c], col->valuestring); @@ -561,7 +560,7 @@ int simParseHttpCommandResult(SScript *script, char *command) { return 0; } -int simExecuteRestFulCommand(SScript *script, char *command) { +int32_t simExecuteRestFulCommand(SScript *script, char *command) { char buf[5000] = {0}; sprintf(buf, "%s 2>/dev/null", command); @@ -571,13 +570,13 @@ int simExecuteRestFulCommand(SScript *script, char *command) { return -1; } - int mallocSize = 2000; - int alreadyReadSize = 0; - char* content = malloc(mallocSize); + int32_t mallocSize = 2000; + int32_t alreadyReadSize = 0; + char * content = malloc(mallocSize); while (!feof(fp)) { - int availSize = mallocSize - alreadyReadSize; - int len = (int)fread(content + alreadyReadSize, 1, availSize, fp); + int32_t availSize = mallocSize - alreadyReadSize; + int32_t len = (int32_t)fread(content + alreadyReadSize, 1, availSize, fp); if (len >= availSize) { alreadyReadSize += len; mallocSize *= 2; @@ -595,10 +594,11 @@ bool simCreateRestFulConnect(SScript *script, char *user, char *pass) { sprintf(command, "curl 127.0.0.1:6041/rest/login/%s/%s", user, pass); bool success = false; - for (int attempt = 0; attempt < 10; ++attempt) { + for (int32_t attempt = 0; attempt < 10; ++attempt) { success = simExecuteRestFulCommand(script, command) == 0; if (!success) { - simDebug("script:%s, user:%s connect taosd failed:%s, attempt:%d", script->fileName, user, taos_errstr(NULL), attempt); + simDebug("script:%s, user:%s connect taosd failed:%s, attempt:%d", script->fileName, user, taos_errstr(NULL), + attempt); taosMsleep(1000); } else { simDebug("script:%s, user:%s connect taosd successed, attempt:%d", script->fileName, user, attempt); @@ -607,7 +607,8 @@ bool simCreateRestFulConnect(SScript *script, char *user, char *pass) { } if (!success) { - sprintf(script->error, "lineNum:%d. connect taosd failed:%s", script->lines[script->linePos].lineNum, taos_errstr(NULL)); + sprintf(script->error, "lineNum:%d. connect taosd failed:%s", script->lines[script->linePos].lineNum, + taos_errstr(NULL)); return false; } @@ -619,10 +620,11 @@ bool simCreateNativeConnect(SScript *script, char *user, char *pass) { simCloseTaosdConnect(script); void *taos = NULL; taosMsleep(2000); - for (int attempt = 0; attempt < 10; ++attempt) { + for (int32_t attempt = 0; attempt < 10; ++attempt) { taos = taos_connect(NULL, user, pass, NULL, tsDnodeShellPort); if (taos == NULL) { - simDebug("script:%s, user:%s connect taosd failed:%s, attempt:%d", script->fileName, user, taos_errstr(NULL), attempt); + simDebug("script:%s, user:%s connect taosd failed:%s, attempt:%d", script->fileName, user, taos_errstr(NULL), + attempt); taosMsleep(1000); } else { simDebug("script:%s, user:%s connect taosd successed, attempt:%d", script->fileName, user, attempt); @@ -631,7 +633,8 @@ bool simCreateNativeConnect(SScript *script, char *user, char *pass) { } if (taos == NULL) { - sprintf(script->error, "lineNum:%d. connect taosd failed:%s", script->lines[script->linePos].lineNum, taos_errstr(NULL)); + sprintf(script->error, "lineNum:%d. connect taosd failed:%s", script->lines[script->linePos].lineNum, + taos_errstr(NULL)); return false; } @@ -642,9 +645,9 @@ bool simCreateNativeConnect(SScript *script, char *user, char *pass) { } bool simCreateTaosdConnect(SScript *script, char *rest) { - char *user = TSDB_DEFAULT_USER; - char *token; - int tokenLen; + char * user = TSDB_DEFAULT_USER; + char * token; + int32_t tokenLen; rest = paGetToken(rest, &token, &tokenLen); rest = paGetToken(rest, &token, &tokenLen); if (tokenLen != 0) { @@ -659,26 +662,27 @@ bool simCreateTaosdConnect(SScript *script, char *rest) { } bool simExecuteNativeSqlCommand(SScript *script, char *rest, bool isSlow) { - char timeStr[30] = {0}; - time_t tt; + char timeStr[30] = {0}; + time_t tt; struct tm *tp; - SCmdLine *line = &script->lines[script->linePos]; - int ret = -1; + SCmdLine * line = &script->lines[script->linePos]; + int32_t ret = -1; - TAOS_RES* pSql = NULL; - - for (int attempt = 0; attempt < 10; ++attempt) { + TAOS_RES *pSql = NULL; + + for (int32_t attempt = 0; attempt < 10; ++attempt) { simLogSql(rest, false); pSql = taos_query(script->taos, rest); ret = taos_errno(pSql); - + if (ret == TSDB_CODE_MND_TABLE_ALREADY_EXIST || ret == TSDB_CODE_MND_DB_ALREADY_EXIST) { - simDebug("script:%s, taos:%p, %s success, ret:%d:%s", script->fileName, script->taos, rest, ret & 0XFFFF, tstrerror(ret)); + simDebug("script:%s, taos:%p, %s success, ret:%d:%s", script->fileName, script->taos, rest, ret & 0XFFFF, + tstrerror(ret)); ret = 0; break; } else if (ret != 0) { - simDebug("script:%s, taos:%p, %s failed, ret:%d:%s, error:%s", - script->fileName, script->taos, rest, ret & 0XFFFF, tstrerror(ret), taos_errstr(pSql)); + simDebug("script:%s, taos:%p, %s failed, ret:%d:%s, error:%s", script->fileName, script->taos, rest, ret & 0XFFFF, + tstrerror(ret), taos_errstr(pSql)); if (line->errorJump == SQL_JUMP_TRUE) { script->linePos = line->jump; @@ -689,7 +693,7 @@ bool simExecuteNativeSqlCommand(SScript *script, char *rest, bool isSlow) { } else { break; } - + taos_free_result(pSql); } @@ -698,8 +702,8 @@ bool simExecuteNativeSqlCommand(SScript *script, char *rest, bool isSlow) { return false; } - int numOfRows = 0; - int num_fields = taos_field_count(pSql); + int32_t numOfRows = 0; + int32_t num_fields = taos_field_count(pSql); if (num_fields != 0) { if (pSql == NULL) { simDebug("script:%s, taos:%p, %s failed, result is null", script->fileName, script->taos, rest); @@ -717,9 +721,9 @@ bool simExecuteNativeSqlCommand(SScript *script, char *rest, bool isSlow) { while ((row = taos_fetch_row(pSql))) { if (numOfRows < MAX_QUERY_ROW_NUM) { TAOS_FIELD *fields = taos_fetch_fields(pSql); - int* length = taos_fetch_lengths(pSql); - - for (int i = 0; i < num_fields; i++) { + int32_t * length = taos_fetch_lengths(pSql); + + for (int32_t i = 0; i < num_fields; i++) { char *value = NULL; if (i < MAX_QUERY_COL_NUM) { value = script->data[numOfRows][i]; @@ -735,8 +739,7 @@ bool simExecuteNativeSqlCommand(SScript *script, char *rest, bool isSlow) { switch (fields[i].type) { case TSDB_DATA_TYPE_BOOL: - sprintf(value, "%s", - ((((int)(*((char *)row[i]))) == 1) ? "1" : "0")); + sprintf(value, "%s", ((((int32_t)(*((char *)row[i]))) == 1) ? "1" : "0")); break; case TSDB_DATA_TYPE_TINYINT: sprintf(value, "%d", *((int8_t *)row[i])); @@ -779,9 +782,8 @@ bool simExecuteNativeSqlCommand(SScript *script, char *rest, bool isSlow) { tp = localtime(&tt); strftime(timeStr, 64, "%y-%m-%d %H:%M:%S", tp); - sprintf(value, "%s.%03d", timeStr, - (int)(*((int64_t *)row[i]) % 1000)); - + sprintf(value, "%s.%03d", timeStr, (int32_t)(*((int64_t *)row[i]) % 1000)); + break; default: break; @@ -814,17 +816,16 @@ bool simExecuteRestFulSqlCommand(SScript *script, char *rest) { char command[4096]; sprintf(command, "curl -H 'Authorization: Taosd %s' -d \"%s\" 127.0.0.1:6041/rest/sql", script->auth, rest); - int ret = -1; - for (int attempt = 0; attempt < 10; ++attempt) { + int32_t ret = -1; + for (int32_t attempt = 0; attempt < 10; ++attempt) { ret = simExecuteRestFulCommand(script, command); - if (ret == TSDB_CODE_MND_TABLE_ALREADY_EXIST || - ret == TSDB_CODE_MND_DB_ALREADY_EXIST) { - simDebug("script:%s, taos:%p, %s success, ret:%d:%s", script->fileName, script->taos, rest, ret & 0XFFFF, tstrerror(ret)); + if (ret == TSDB_CODE_MND_TABLE_ALREADY_EXIST || ret == TSDB_CODE_MND_DB_ALREADY_EXIST) { + simDebug("script:%s, taos:%p, %s success, ret:%d:%s", script->fileName, script->taos, rest, ret & 0XFFFF, + tstrerror(ret)); ret = 0; break; } else if (ret != 0) { - simDebug("script:%s, taos:%p, %s failed, ret:%d", - script->fileName, script->taos, rest, ret); + simDebug("script:%s, taos:%p, %s failed, ret:%d", script->fileName, script->taos, rest, ret); if (line->errorJump == SQL_JUMP_TRUE) { script->linePos = line->jump; @@ -854,8 +855,8 @@ bool simExecuteSqlImpCmd(SScript *script, char *rest, bool isSlow) { simDebug("script:%s, exec:%s", script->fileName, rest); strcpy(script->rows, "-1"); - for (int row = 0; row < MAX_QUERY_ROW_NUM; ++row) { - for (int col = 0; col < MAX_QUERY_COL_NUM; ++col) { + for (int32_t row = 0; row < MAX_QUERY_ROW_NUM; ++row) { + for (int32_t col = 0; col < MAX_QUERY_COL_NUM; ++col) { strcpy(script->data[row][col], "null"); } } @@ -903,23 +904,23 @@ bool simExecuteSqlSlowCmd(SScript *script, char *rest) { bool simExecuteRestfulCmd(SScript *script, char *rest) { FILE *fp = NULL; - char filename[256]; - sprintf(filename, "%s/tmp.sql", tsScriptDir); + char filename[256]; + sprintf(filename, "%s/tmp.sql", tsScriptDir); fp = fopen(filename, "w"); if (fp == NULL) { fprintf(stderr, "ERROR: failed to open file: %s\n", filename); return false; } - char db[64] = {0}; - char tb[64] = {0}; - char gzip[32] = {0}; + char db[64] = {0}; + char tb[64] = {0}; + char gzip[32] = {0}; int32_t ts; int32_t times; sscanf(rest, "%s %s %d %d %s", db, tb, &ts, ×, gzip); - + fprintf(fp, "insert into %s.%s values ", db, tb); - for (int i = 0; i < times; ++i) { + for (int32_t i = 0; i < times; ++i) { fprintf(fp, "(%d000, %d)", ts + i, ts); } fprintf(fp, " \n"); @@ -951,8 +952,8 @@ bool simExecuteSqlErrorCmd(SScript *script, char *rest) { simDebug("script:%s, exec:%s", script->fileName, rest); strcpy(script->rows, "-1"); - for (int row = 0; row < MAX_QUERY_ROW_NUM; ++row) { - for (int col = 0; col < MAX_QUERY_COL_NUM; ++col) { + for (int32_t row = 0; row < MAX_QUERY_ROW_NUM; ++row) { + for (int32_t col = 0; col < MAX_QUERY_COL_NUM; ++col) { strcpy(script->data[row][col], "null"); } } @@ -981,27 +982,27 @@ bool simExecuteSqlErrorCmd(SScript *script, char *rest) { return true; } - int ret; - TAOS_RES* pSql = NULL; + int32_t ret; + TAOS_RES *pSql = NULL; if (simAsyncQuery) { char command[4096]; sprintf(command, "curl -H 'Authorization: Taosd %s' -d '%s' 127.0.0.1:6041/rest/sql", script->auth, rest); ret = simExecuteRestFulCommand(script, command); - } - else { + } else { pSql = taos_query(script->taos, rest); ret = taos_errno(pSql); taos_free_result(pSql); } if (ret != TSDB_CODE_SUCCESS) { - simDebug("script:%s, taos:%p, %s execute, expect failed, so success, ret:%d:%s", - script->fileName, script->taos, rest, ret & 0XFFFF, tstrerror(ret)); + simDebug("script:%s, taos:%p, %s execute, expect failed, so success, ret:%d:%s", script->fileName, script->taos, + rest, ret & 0XFFFF, tstrerror(ret)); script->linePos++; return true; } - - sprintf(script->error, "lineNum:%d. sql:%s expect failed, but success, ret:%d:%s", line->lineNum, rest, ret & 0XFFFF, tstrerror(ret)); + + sprintf(script->error, "lineNum:%d. sql:%s expect failed, but success, ret:%d:%s", line->lineNum, rest, ret & 0XFFFF, + tstrerror(ret)); return false; } diff --git a/tests/tsim/src/simMain.c b/tests/tsim/src/simMain.c index ef1a488f60..33fd24dd58 100644 --- a/tests/tsim/src/simMain.c +++ b/tests/tsim/src/simMain.c @@ -13,6 +13,7 @@ * along with this program. If not, see . */ +#define _DEFAULT_SOURCE #include "os.h" #include "tglobal.h" #include "sim.h" @@ -20,15 +21,15 @@ bool simAsyncQuery = false; -void simHandleSignal(int signo) { +void simHandleSignal(int32_t signo) { simSystemCleanUp(); exit(1); } -int main(int argc, char *argv[]) { +int32_t main(int32_t argc, char *argv[]) { char scriptFile[MAX_FILE_NAME_LEN] = "sim_main_test.sim"; - for (int i = 1; i < argc; ++i) { + for (int32_t i = 1; i < argc; ++i) { if (strcmp(argv[i], "-c") == 0 && i < argc - 1) { tstrncpy(configDir, argv[++i], MAX_FILE_NAME_LEN); } else if (strcmp(argv[i], "-f") == 0 && i < argc - 1) { @@ -37,8 +38,7 @@ int main(int argc, char *argv[]) { simAsyncQuery = true; } else { printf("usage: %s [options] \n", argv[0]); - printf(" [-c config]: config directory, default is: %s\n", - configDir); + printf(" [-c config]: config directory, default is: %s\n", configDir); printf(" [-f script]: script filename\n"); exit(0); } diff --git a/tests/tsim/src/simParse.c b/tests/tsim/src/simParse.c index 2e6121304f..b909f5bd8f 100644 --- a/tests/tsim/src/simParse.c +++ b/tests/tsim/src/simParse.c @@ -57,6 +57,7 @@ * */ +#define _DEFAULT_SOURCE #include "os.h" #include "sim.h" #include "simParse.h" @@ -64,16 +65,16 @@ #undef TAOS_MEM_CHECK static SCommand *cmdHashList[MAX_NUM_CMD]; -static SCmdLine cmdLine[MAX_CMD_LINES]; -static char parseErr[MAX_ERROR_LEN]; -static char optionBuffer[MAX_OPTION_BUFFER]; -static int numOfLines, optionOffset; -static SLabel label, dest; -static SBlock block; +static SCmdLine cmdLine[MAX_CMD_LINES]; +static char parseErr[MAX_ERROR_LEN]; +static char optionBuffer[MAX_OPTION_BUFFER]; +static int32_t numOfLines, optionOffset; +static SLabel label, dest; +static SBlock block; -int simHashCmd(char *token, int tokenLen) { - int i; - int hash = 0; +int32_t simHashCmd(char *token, int32_t tokenLen) { + int32_t i; + int32_t hash = 0; for (i = 0; i < tokenLen; ++i) hash += token[i]; @@ -82,8 +83,8 @@ int simHashCmd(char *token, int tokenLen) { return hash; } -SCommand *simCheckCmd(char *token, int tokenLen) { - int hash; +SCommand *simCheckCmd(char *token, int32_t tokenLen) { + int32_t hash; SCommand *node; hash = simHashCmd(token, tokenLen); @@ -102,10 +103,10 @@ SCommand *simCheckCmd(char *token, int tokenLen) { } void simAddCmdIntoHash(SCommand *pCmd) { - int hash; + int32_t hash; SCommand *node; - hash = simHashCmd(pCmd->name, (int)strlen(pCmd->name)); + hash = simHashCmd(pCmd->name, (int32_t)strlen(pCmd->name)); node = cmdHashList[hash]; pCmd->next = node; cmdHashList[hash] = pCmd; @@ -122,7 +123,7 @@ void simResetParser() { } SScript *simBuildScriptObj(char *fileName) { - int i, destPos; + int32_t i, destPos; /* process labels */ @@ -176,11 +177,11 @@ SScript *simBuildScriptObj(char *fileName) { } SScript *simParseScript(char *fileName) { - FILE *fd; - int tokenLen, lineNum = 0; - char buffer[MAX_LINE_LEN], name[128], *token, *rest; + FILE * fd; + int32_t tokenLen, lineNum = 0; + char buffer[MAX_LINE_LEN], name[128], *token, *rest; SCommand *pCmd; - SScript *script; + SScript * script; if ((fileName[0] == '.') || (fileName[0] == '/')) { strcpy(name, fileName); @@ -199,12 +200,13 @@ SScript *simParseScript(char *fileName) { if (fgets(buffer, sizeof(buffer), fd) == NULL) continue; lineNum++; - int cmdlen = (int)strlen(buffer); - if (buffer[cmdlen - 1] == '\r' || buffer[cmdlen - 1] == '\n') + int32_t cmdlen = (int32_t)strlen(buffer); + if (buffer[cmdlen - 1] == '\r' || buffer[cmdlen - 1] == '\n') { buffer[cmdlen - 1] = 0; + } rest = buffer; - for (int i = 0; i < cmdlen; ++i) { + for (int32_t i = 0; i < cmdlen; ++i) { if (buffer[i] == '\r' || buffer[i] == '\n') { buffer[i] = ' '; } @@ -249,9 +251,9 @@ SScript *simParseScript(char *fileName) { return script; } -int simCheckExpression(char *exp) { - char *op1, *op2, *op, *rest; - int op1Len, op2Len, opLen; +int32_t simCheckExpression(char *exp) { + char * op1, *op2, *op, *rest; + int32_t op1Len, op2Len, opLen; rest = paGetToken(exp, &op1, &op1Len); if (op1Len == 0) { @@ -282,8 +284,7 @@ int simCheckExpression(char *exp) { return -1; } } else if (opLen == 2) { - if (op[1] != '=' || - (op[0] != '=' && op[0] != '<' && op[0] != '>' && op[0] != '!')) { + if (op[1] != '=' || (op[0] != '=' && op[0] != '<' && op[0] != '>' && op[0] != '!')) { sprintf(parseErr, "left side of assignment must be variable"); return -1; } @@ -294,10 +295,10 @@ int simCheckExpression(char *exp) { rest = paGetToken(rest, &op, &opLen); - if (opLen == 0) return (int)(rest - exp); + if (opLen == 0) return (int32_t)(rest - exp); /* if it is key word "then" */ - if (strncmp(op, "then", 4) == 0) return (int)(op - exp); + if (strncmp(op, "then", 4) == 0) return (int32_t)(op - exp); rest = paGetToken(rest, &op2, &op2Len); if (op2Len == 0) { @@ -310,16 +311,15 @@ int simCheckExpression(char *exp) { return -1; } - if (op[0] == '+' || op[0] == '-' || op[0] == '*' || op[0] == '/' || - op[0] == '.') { - return (int)(rest - exp); + if (op[0] == '+' || op[0] == '-' || op[0] == '*' || op[0] == '/' || op[0] == '.') { + return (int32_t)(rest - exp); } return -1; } -bool simParseExpression(char *token, int lineNum) { - int expLen; +bool simParseExpression(char *token, int32_t lineNum) { + int32_t expLen; expLen = simCheckExpression(token); if (expLen <= 0) return -1; @@ -335,9 +335,9 @@ bool simParseExpression(char *token, int lineNum) { return true; } -bool simParseIfCmd(char *rest, SCommand *pCmd, int lineNum) { - char *ret; - int expLen; +bool simParseIfCmd(char *rest, SCommand *pCmd, int32_t lineNum) { + char * ret; + int32_t expLen; expLen = simCheckExpression(rest); @@ -364,8 +364,8 @@ bool simParseIfCmd(char *rest, SCommand *pCmd, int lineNum) { return true; } -bool simParseElifCmd(char *rest, SCommand *pCmd, int lineNum) { - int expLen; +bool simParseElifCmd(char *rest, SCommand *pCmd, int32_t lineNum) { + int32_t expLen; expLen = simCheckExpression(rest); @@ -382,8 +382,7 @@ bool simParseElifCmd(char *rest, SCommand *pCmd, int lineNum) { } cmdLine[numOfLines].cmdno = SIM_CMD_GOTO; - block.jump[block.top - 1][(uint8_t)block.numJump[block.top - 1]] = - &(cmdLine[numOfLines].jump); + block.jump[block.top - 1][(uint8_t)block.numJump[block.top - 1]] = &(cmdLine[numOfLines].jump); block.numJump[block.top - 1]++; numOfLines++; @@ -402,7 +401,7 @@ bool simParseElifCmd(char *rest, SCommand *pCmd, int lineNum) { return true; } -bool simParseElseCmd(char *rest, SCommand *pCmd, int lineNum) { +bool simParseElseCmd(char *rest, SCommand *pCmd, int32_t lineNum) { if (block.top < 1) { sprintf(parseErr, "no matching if"); return false; @@ -414,8 +413,7 @@ bool simParseElseCmd(char *rest, SCommand *pCmd, int lineNum) { } cmdLine[numOfLines].cmdno = SIM_CMD_GOTO; - block.jump[block.top - 1][(uint8_t)block.numJump[block.top - 1]] = - &(cmdLine[numOfLines].jump); + block.jump[block.top - 1][(uint8_t)block.numJump[block.top - 1]] = &(cmdLine[numOfLines].jump); block.numJump[block.top - 1]++; numOfLines++; @@ -426,8 +424,8 @@ bool simParseElseCmd(char *rest, SCommand *pCmd, int lineNum) { return true; } -bool simParseEndiCmd(char *rest, SCommand *pCmd, int lineNum) { - int i; +bool simParseEndiCmd(char *rest, SCommand *pCmd, int32_t lineNum) { + int32_t i; if (block.top < 1) { sprintf(parseErr, "no matching if"); @@ -441,8 +439,9 @@ bool simParseEndiCmd(char *rest, SCommand *pCmd, int lineNum) { if (block.pos[block.top - 1]) *(block.pos[block.top - 1]) = numOfLines; - for (i = 0; i < block.numJump[block.top - 1]; ++i) + for (i = 0; i < block.numJump[block.top - 1]; ++i) { *(block.jump[block.top - 1][i]) = numOfLines; + } block.numJump[block.top - 1] = 0; block.top--; @@ -450,8 +449,8 @@ bool simParseEndiCmd(char *rest, SCommand *pCmd, int lineNum) { return true; } -bool simParseWhileCmd(char *rest, SCommand *pCmd, int lineNum) { - int expLen; +bool simParseWhileCmd(char *rest, SCommand *pCmd, int32_t lineNum) { + int32_t expLen; expLen = simCheckExpression(rest); @@ -473,8 +472,8 @@ bool simParseWhileCmd(char *rest, SCommand *pCmd, int lineNum) { return true; } -bool simParseEndwCmd(char *rest, SCommand *pCmd, int lineNum) { - int i; +bool simParseEndwCmd(char *rest, SCommand *pCmd, int32_t lineNum) { + int32_t i; if (block.top < 1) { sprintf(parseErr, "no matching while"); @@ -493,17 +492,18 @@ bool simParseEndwCmd(char *rest, SCommand *pCmd, int lineNum) { *(block.pos[block.top - 1]) = numOfLines; - for (i = 0; i < block.numJump[block.top - 1]; ++i) + for (i = 0; i < block.numJump[block.top - 1]; ++i) { *(block.jump[block.top - 1][i]) = numOfLines; + } block.top--; return true; } -bool simParseSwitchCmd(char *rest, SCommand *pCmd, int lineNum) { - char *token; - int tokenLen; +bool simParseSwitchCmd(char *rest, SCommand *pCmd, int32_t lineNum) { + char * token; + int32_t tokenLen; rest = paGetToken(rest, &token, &tokenLen); if (tokenLen == 0) { @@ -524,9 +524,9 @@ bool simParseSwitchCmd(char *rest, SCommand *pCmd, int lineNum) { return true; } -bool simParseCaseCmd(char *rest, SCommand *pCmd, int lineNum) { - char *token; - int tokenLen; +bool simParseCaseCmd(char *rest, SCommand *pCmd, int32_t lineNum) { + char * token; + int32_t tokenLen; rest = paGetToken(rest, &token, &tokenLen); if (tokenLen == 0) { @@ -544,16 +544,16 @@ bool simParseCaseCmd(char *rest, SCommand *pCmd, int lineNum) { return false; } - if (block.pos[block.top - 1] != NULL) + if (block.pos[block.top - 1] != NULL) { *(block.pos[block.top - 1]) = numOfLines; + } block.pos[block.top - 1] = &(cmdLine[numOfLines].jump); cmdLine[numOfLines].cmdno = SIM_CMD_TEST; cmdLine[numOfLines].lineNum = lineNum; cmdLine[numOfLines].optionOffset = optionOffset; - memcpy(optionBuffer + optionOffset, block.sexp[block.top - 1], - block.sexpLen[block.top - 1]); + memcpy(optionBuffer + optionOffset, block.sexp[block.top - 1], block.sexpLen[block.top - 1]); optionOffset += block.sexpLen[block.top - 1]; *(optionBuffer + optionOffset++) = ' '; *(optionBuffer + optionOffset++) = '='; @@ -567,20 +567,18 @@ bool simParseCaseCmd(char *rest, SCommand *pCmd, int lineNum) { return true; } -bool simParseBreakCmd(char *rest, SCommand *pCmd, int lineNum) { +bool simParseBreakCmd(char *rest, SCommand *pCmd, int32_t lineNum) { if (block.top < 1) { sprintf(parseErr, "no blcok exists"); return false; } - if (block.type[block.top - 1] != BLOCK_SWITCH && - block.type[block.top - 1] != BLOCK_WHILE) { + if (block.type[block.top - 1] != BLOCK_SWITCH && block.type[block.top - 1] != BLOCK_WHILE) { sprintf(parseErr, "not in switch or while block"); return false; } - block.jump[block.top - 1][(uint8_t)block.numJump[block.top - 1]] = - &(cmdLine[numOfLines].jump); + block.jump[block.top - 1][(uint8_t)block.numJump[block.top - 1]] = &(cmdLine[numOfLines].jump); block.numJump[block.top - 1]++; cmdLine[numOfLines].cmdno = SIM_CMD_GOTO; @@ -590,7 +588,7 @@ bool simParseBreakCmd(char *rest, SCommand *pCmd, int lineNum) { return true; } -bool simParseDefaultCmd(char *rest, SCommand *pCmd, int lineNum) { +bool simParseDefaultCmd(char *rest, SCommand *pCmd, int32_t lineNum) { if (block.top < 1) { sprintf(parseErr, "no matching switch"); return false; @@ -601,14 +599,15 @@ bool simParseDefaultCmd(char *rest, SCommand *pCmd, int lineNum) { return false; } - if (block.pos[block.top - 1] != NULL) + if (block.pos[block.top - 1] != NULL) { *(block.pos[block.top - 1]) = numOfLines; + } return true; } -bool simParseEndsCmd(char *rest, SCommand *pCmd, int lineNum) { - int i; +bool simParseEndsCmd(char *rest, SCommand *pCmd, int32_t lineNum) { + int32_t i; if (block.top < 1) { sprintf(parseErr, "no matching switch"); @@ -620,8 +619,9 @@ bool simParseEndsCmd(char *rest, SCommand *pCmd, int lineNum) { return false; } - for (i = 0; i < block.numJump[block.top - 1]; ++i) + for (i = 0; i < block.numJump[block.top - 1]; ++i) { *(block.jump[block.top - 1][i]) = numOfLines; + } block.numJump[block.top - 1] = 0; block.top--; @@ -629,7 +629,7 @@ bool simParseEndsCmd(char *rest, SCommand *pCmd, int lineNum) { return true; } -bool simParseContinueCmd(char *rest, SCommand *pCmd, int lineNum) { +bool simParseContinueCmd(char *rest, SCommand *pCmd, int32_t lineNum) { if (block.top < 1) { sprintf(parseErr, "no matching while"); return false; @@ -648,14 +648,14 @@ bool simParseContinueCmd(char *rest, SCommand *pCmd, int lineNum) { return true; } -bool simParsePrintCmd(char *rest, SCommand *pCmd, int lineNum) { - int expLen; +bool simParsePrintCmd(char *rest, SCommand *pCmd, int32_t lineNum) { + int32_t expLen; rest++; cmdLine[numOfLines].cmdno = SIM_CMD_PRINT; cmdLine[numOfLines].lineNum = lineNum; cmdLine[numOfLines].optionOffset = optionOffset; - expLen = (int)strlen(rest); + expLen = (int32_t)strlen(rest); memcpy(optionBuffer + optionOffset, rest, expLen); optionOffset += expLen + 1; *(optionBuffer + optionOffset - 1) = 0; @@ -665,8 +665,8 @@ bool simParsePrintCmd(char *rest, SCommand *pCmd, int lineNum) { } void simCheckSqlOption(char *rest) { - int valueLen; - char *value, *xpos; + int32_t valueLen; + char * value, *xpos; xpos = strstr(rest, " -x"); // need a blank if (xpos) { @@ -682,15 +682,15 @@ void simCheckSqlOption(char *rest) { } } -bool simParseSqlCmd(char *rest, SCommand *pCmd, int lineNum) { - int expLen; +bool simParseSqlCmd(char *rest, SCommand *pCmd, int32_t lineNum) { + int32_t expLen; rest++; simCheckSqlOption(rest); cmdLine[numOfLines].cmdno = SIM_CMD_SQL; cmdLine[numOfLines].lineNum = lineNum; cmdLine[numOfLines].optionOffset = optionOffset; - expLen = (int)strlen(rest); + expLen = (int32_t)strlen(rest); memcpy(optionBuffer + optionOffset, rest, expLen); optionOffset += expLen + 1; *(optionBuffer + optionOffset - 1) = 0; @@ -699,14 +699,14 @@ bool simParseSqlCmd(char *rest, SCommand *pCmd, int lineNum) { return true; } -bool simParseSqlErrorCmd(char *rest, SCommand *pCmd, int lineNum) { - int expLen; +bool simParseSqlErrorCmd(char *rest, SCommand *pCmd, int32_t lineNum) { + int32_t expLen; rest++; cmdLine[numOfLines].cmdno = SIM_CMD_SQL_ERROR; cmdLine[numOfLines].lineNum = lineNum; cmdLine[numOfLines].optionOffset = optionOffset; - expLen = (int)strlen(rest); + expLen = (int32_t)strlen(rest); memcpy(optionBuffer + optionOffset, rest, expLen); optionOffset += expLen + 1; *(optionBuffer + optionOffset - 1) = 0; @@ -715,26 +715,26 @@ bool simParseSqlErrorCmd(char *rest, SCommand *pCmd, int lineNum) { return true; } -bool simParseSqlSlowCmd(char *rest, SCommand *pCmd, int lineNum) { +bool simParseSqlSlowCmd(char *rest, SCommand *pCmd, int32_t lineNum) { simParseSqlCmd(rest, pCmd, lineNum); cmdLine[numOfLines - 1].cmdno = SIM_CMD_SQL_SLOW; return true; } -bool simParseRestfulCmd(char *rest, SCommand *pCmd, int lineNum) { +bool simParseRestfulCmd(char *rest, SCommand *pCmd, int32_t lineNum) { simParseSqlCmd(rest, pCmd, lineNum); cmdLine[numOfLines - 1].cmdno = SIM_CMD_RESTFUL; return true; } -bool simParseSystemCmd(char *rest, SCommand *pCmd, int lineNum) { - int expLen; +bool simParseSystemCmd(char *rest, SCommand *pCmd, int32_t lineNum) { + int32_t expLen; rest++; cmdLine[numOfLines].cmdno = SIM_CMD_SYSTEM; cmdLine[numOfLines].lineNum = lineNum; cmdLine[numOfLines].optionOffset = optionOffset; - expLen = (int)strlen(rest); + expLen = (int32_t)strlen(rest); memcpy(optionBuffer + optionOffset, rest, expLen); optionOffset += expLen + 1; *(optionBuffer + optionOffset - 1) = 0; @@ -743,15 +743,15 @@ bool simParseSystemCmd(char *rest, SCommand *pCmd, int lineNum) { return true; } -bool simParseSystemContentCmd(char *rest, SCommand *pCmd, int lineNum) { +bool simParseSystemContentCmd(char *rest, SCommand *pCmd, int32_t lineNum) { simParseSystemCmd(rest, pCmd, lineNum); cmdLine[numOfLines - 1].cmdno = SIM_CMD_SYSTEM_CONTENT; return true; } -bool simParseSleepCmd(char *rest, SCommand *pCmd, int lineNum) { - char *token; - int tokenLen; +bool simParseSleepCmd(char *rest, SCommand *pCmd, int32_t lineNum) { + char * token; + int32_t tokenLen; cmdLine[numOfLines].cmdno = SIM_CMD_SLEEP; cmdLine[numOfLines].lineNum = lineNum; @@ -768,9 +768,9 @@ bool simParseSleepCmd(char *rest, SCommand *pCmd, int lineNum) { return true; } -bool simParseReturnCmd(char *rest, SCommand *pCmd, int lineNum) { - char *token; - int tokenLen; +bool simParseReturnCmd(char *rest, SCommand *pCmd, int32_t lineNum) { + char * token; + int32_t tokenLen; cmdLine[numOfLines].cmdno = SIM_CMD_RETURN; cmdLine[numOfLines].lineNum = lineNum; @@ -787,9 +787,9 @@ bool simParseReturnCmd(char *rest, SCommand *pCmd, int lineNum) { return true; } -bool simParseGotoCmd(char *rest, SCommand *pCmd, int lineNum) { - char *token; - int tokenLen; +bool simParseGotoCmd(char *rest, SCommand *pCmd, int32_t lineNum) { + char * token; + int32_t tokenLen; rest = paGetToken(rest, &token, &tokenLen); @@ -810,9 +810,9 @@ bool simParseGotoCmd(char *rest, SCommand *pCmd, int lineNum) { return true; } -bool simParseRunCmd(char *rest, SCommand *pCmd, int lineNum) { - char *token; - int tokenLen; +bool simParseRunCmd(char *rest, SCommand *pCmd, int32_t lineNum) { + char * token; + int32_t tokenLen; rest = paGetToken(rest, &token, &tokenLen); @@ -832,14 +832,14 @@ bool simParseRunCmd(char *rest, SCommand *pCmd, int lineNum) { return true; } -bool simParseRunBackCmd(char *rest, SCommand *pCmd, int lineNum) { +bool simParseRunBackCmd(char *rest, SCommand *pCmd, int32_t lineNum) { simParseRunCmd(rest, pCmd, lineNum); cmdLine[numOfLines - 1].cmdno = SIM_CMD_RUN_BACK; return true; } void simInitsimCmdList() { - int cmdno; + int32_t cmdno; memset(simCmdList, 0, SIM_CMD_END * sizeof(SCommand)); /* internal command */ diff --git a/tests/tsim/src/simSystem.c b/tests/tsim/src/simSystem.c index 17df7f306a..693ade7b35 100644 --- a/tests/tsim/src/simSystem.c +++ b/tests/tsim/src/simSystem.c @@ -13,6 +13,7 @@ * along with this program. If not, see . */ +#define _DEFAULT_SOURCE #include "os.h" #include "sim.h" #include "taos.h" @@ -24,11 +25,11 @@ SScript *simScriptList[MAX_MAIN_SCRIPT_NUM]; SCommand simCmdList[SIM_CMD_END]; -int simScriptPos = -1; -int simScriptSucced = 0; -int simDebugFlag = 135; -void simCloseTaosdConnect(SScript *script); -char simHostName[128]; +int32_t simScriptPos = -1; +int32_t simScriptSucced = 0; +int32_t simDebugFlag = 135; +void simCloseTaosdConnect(SScript *script); +char simHostName[128]; char *simParseArbitratorName(char *varName) { static char hostName[140]; @@ -39,8 +40,8 @@ char *simParseArbitratorName(char *varName) { char *simParseHostName(char *varName) { static char hostName[140]; - int index = atoi(varName + 8); - int port = 7100; + int32_t index = atoi(varName + 8); + int32_t port = 7100; switch (index) { case 1: port = 7100; @@ -70,9 +71,9 @@ char *simParseHostName(char *varName) { port = 7900; break; } - + sprintf(hostName, "'%s:%d'", simHostName, port); - //simInfo("hostName:%s", hostName); + // simInfo("hostName:%s", hostName); return hostName; } @@ -88,39 +89,45 @@ void simSystemCleanUp() {} void simFreeScript(SScript *script) { if (script->type == SIM_SCRIPT_TYPE_MAIN) { - for (int i = 0; i < script->bgScriptLen; ++i) { + simInfo("script:%s, background script num:%d, stop them", script->fileName, script->bgScriptLen); + + for (int32_t i = 0; i < script->bgScriptLen; ++i) { SScript *bgScript = script->bgScripts[i]; + simInfo("script:%s, set stop flag", script->fileName); bgScript->killed = true; + if (taosCheckPthreadValid(bgScript->bgPid)) { + pthread_join(bgScript->bgPid, NULL); + } } } + simDebug("script:%s, is freed", script->fileName); taos_close(script->taos); - taosTFree(script->lines); - taosTFree(script->optionBuffer); - taosTFree(script); + tfree(script->lines); + tfree(script->optionBuffer); + tfree(script); } SScript *simProcessCallOver(SScript *script) { if (script->type == SIM_SCRIPT_TYPE_MAIN) { if (script->killed) { - simInfo("script:" FAILED_PREFIX "%s" FAILED_POSTFIX ", " FAILED_PREFIX - "failed" FAILED_POSTFIX ", error:%s", - script->fileName, script->error); + simInfo("script:" FAILED_PREFIX "%s" FAILED_POSTFIX ", " FAILED_PREFIX "failed" FAILED_POSTFIX ", error:%s", + script->fileName, script->error); exit(-1); } else { - simInfo("script:" SUCCESS_PREFIX "%s" SUCCESS_POSTFIX ", " SUCCESS_PREFIX - "success" SUCCESS_POSTFIX, - script->fileName); + simInfo("script:" SUCCESS_PREFIX "%s" SUCCESS_POSTFIX ", " SUCCESS_PREFIX "success" SUCCESS_POSTFIX, + script->fileName); simCloseTaosdConnect(script); simScriptSucced++; simScriptPos--; + + simFreeScript(script); if (simScriptPos == -1) { simInfo("----------------------------------------------------------------------"); simInfo("Simulation Test Done, " SUCCESS_PREFIX "%d" SUCCESS_POSTFIX " Passed:\n", simScriptSucced); exit(0); } - simFreeScript(script); return simScriptList[simScriptPos]; } } else { @@ -143,11 +150,11 @@ void *simExecuteScript(void *inputScript) { if (script == NULL) break; } else { SCmdLine *line = &script->lines[script->linePos]; - char *option = script->optionBuffer + line->optionOffset; + char * option = script->optionBuffer + line->optionOffset; simDebug("script:%s, line:%d with option \"%s\"", script->fileName, line->lineNum, option); SCommand *cmd = &simCmdList[line->cmdno]; - int ret = (*(cmd->executeCmd))(script, option); + int32_t ret = (*(cmd->executeCmd))(script, option); if (!ret) { script->killed = true; } From 645514c4a683f736751ab003d2c34e4d052062b6 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Thu, 12 Nov 2020 11:03:51 +0800 Subject: [PATCH 14/52] [TD-225] --- src/client/src/tscSQLParser.c | 8 +- src/query/src/qExecutor.c | 86 ++++++++++++---- src/query/src/sql.c | 6 +- src/rpc/src/rpcTcp.c | 2 +- tests/script/general/parser/sliding.sim | 131 ++++++++++++++++-------- 5 files changed, 163 insertions(+), 70 deletions(-) diff --git a/src/client/src/tscSQLParser.c b/src/client/src/tscSQLParser.c index b3b77e576c..17ff4997b5 100644 --- a/src/client/src/tscSQLParser.c +++ b/src/client/src/tscSQLParser.c @@ -820,7 +820,7 @@ int32_t parseSlidingClause(SSqlObj* pSql, SQueryInfo* pQueryInfo, SQuerySQL* pQu const char* msg1 = "sliding value no larger than the interval value"; const char* msg2 = "sliding value can not less than 1% of interval value"; const char* msg3 = "does not support sliding when interval is natural month/year"; - const char* msg4 = "sliding not support yet in ordinary query"; +// const char* msg4 = "sliding not support yet in ordinary query"; const static int32_t INTERVAL_SLIDING_FACTOR = 100; SSqlCmd* pCmd = &pSql->cmd; @@ -856,9 +856,9 @@ int32_t parseSlidingClause(SSqlObj* pSql, SQueryInfo* pQueryInfo, SQuerySQL* pQu return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2); } - if (pQueryInfo->interval.sliding != pQueryInfo->interval.interval && pSql->pStream == NULL) { - return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg4); - } +// if (pQueryInfo->interval.sliding != pQueryInfo->interval.interval && pSql->pStream == NULL) { +// return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg4); +// } return TSDB_CODE_SUCCESS; } diff --git a/src/query/src/qExecutor.c b/src/query/src/qExecutor.c index f3bedb141d..17c8955352 100644 --- a/src/query/src/qExecutor.c +++ b/src/query/src/qExecutor.c @@ -170,8 +170,6 @@ static void getNextTimeWindow(SQuery* pQuery, STimeWindow* tw) { tw->ekey -= 1; } -#define GET_NEXT_TIMEWINDOW(_q, tw) getNextTimeWindow((_q), (tw)) - #define SET_STABLE_QUERY_OVER(_q) ((_q)->tableIndex = (int32_t)((_q)->tableqinfoGroupInfo.numOfTables)) #define IS_STASBLE_QUERY_OVER(_q) ((_q)->tableIndex >= (int32_t)((_q)->tableqinfoGroupInfo.numOfTables)) @@ -827,7 +825,7 @@ static int32_t getNextQualifiedWindow(SQueryRuntimeEnv *pRuntimeEnv, STimeWindow TSKEY *primaryKeys, __block_search_fn_t searchFn, int32_t prevPosition) { SQuery *pQuery = pRuntimeEnv->pQuery; - GET_NEXT_TIMEWINDOW(pQuery, pNext); + getNextTimeWindow(pQuery, pNext); // next time window is not in current block if ((pNext->skey > pDataBlockInfo->window.ekey && QUERY_IS_ASC_QUERY(pQuery)) || @@ -1342,9 +1340,9 @@ static void rowwiseApplyFunctions(SQueryRuntimeEnv *pRuntimeEnv, SDataStatis *pS int32_t index = pWindowResInfo->curIndex; while (1) { - GET_NEXT_TIMEWINDOW(pQuery, &nextWin); + getNextTimeWindow(pQuery, &nextWin); if ((nextWin.skey > pQuery->window.ekey && QUERY_IS_ASC_QUERY(pQuery)) || - (nextWin.skey < pQuery->window.ekey && !QUERY_IS_ASC_QUERY(pQuery))) { + (nextWin.ekey < pQuery->window.ekey && !QUERY_IS_ASC_QUERY(pQuery))) { break; } @@ -2202,7 +2200,7 @@ static bool overlapWithTimeWindow(SQuery* pQuery, SDataBlockInfo* pBlockInfo) { } while(1) { - GET_NEXT_TIMEWINDOW(pQuery, &w); + getNextTimeWindow(pQuery, &w); if (w.skey > pBlockInfo->window.ekey) { break; } @@ -2221,7 +2219,7 @@ static bool overlapWithTimeWindow(SQuery* pQuery, SDataBlockInfo* pBlockInfo) { } while(1) { - GET_NEXT_TIMEWINDOW(pQuery, &w); + getNextTimeWindow(pQuery, &w); if (w.ekey < pBlockInfo->window.skey) { break; } @@ -2536,7 +2534,7 @@ static int64_t doScanAllDataBlocks(SQueryRuntimeEnv *pRuntimeEnv) { setQueryStatus(pQuery, QUERY_COMPLETED); } - if (QUERY_IS_INTERVAL_QUERY(pQuery) && IS_MASTER_SCAN(pRuntimeEnv)) { + if (QUERY_IS_INTERVAL_QUERY(pQuery) && (IS_MASTER_SCAN(pRuntimeEnv)|| pRuntimeEnv->scanFlag == REPEAT_SCAN)) { if (Q_STATUS_EQUAL(pQuery->status, QUERY_COMPLETED)) { closeAllTimeWindow(&pRuntimeEnv->windowResInfo); pRuntimeEnv->windowResInfo.curIndex = pRuntimeEnv->windowResInfo.size - 1; // point to the last time window @@ -4354,14 +4352,17 @@ static bool skipTimeInterval(SQueryRuntimeEnv *pRuntimeEnv, TSKEY* start) { STimeWindow win = getActiveTimeWindow(pWindowResInfo, pWindowResInfo->prevSKey, pQuery); while (pQuery->limit.offset > 0) { + STimeWindow tw = win; + if ((win.ekey <= blockInfo.window.ekey && QUERY_IS_ASC_QUERY(pQuery)) || (win.ekey >= blockInfo.window.skey && !QUERY_IS_ASC_QUERY(pQuery))) { pQuery->limit.offset -= 1; pWindowResInfo->prevSKey = win.skey; - } - STimeWindow tw = win; - GET_NEXT_TIMEWINDOW(pQuery, &tw); + getNextTimeWindow(pQuery, &tw); + } else { // current window does not ended in current data block, try next data block + getNextTimeWindow(pQuery, &tw); + } if (pQuery->limit.offset == 0) { if ((tw.skey <= blockInfo.window.ekey && QUERY_IS_ASC_QUERY(pQuery)) || @@ -4414,16 +4415,60 @@ static bool skipTimeInterval(SQueryRuntimeEnv *pRuntimeEnv, TSKEY* start) { SArray * pDataBlock = tsdbRetrieveDataBlock(pRuntimeEnv->pQueryHandle, NULL); SColumnInfoData *pColInfoData = taosArrayGet(pDataBlock, 0); - tw = win; - int32_t startPos = - getNextQualifiedWindow(pRuntimeEnv, &tw, &blockInfo, pColInfoData->pData, binarySearchForKey, -1); - assert(startPos >= 0); + if ((win.ekey > blockInfo.window.ekey && QUERY_IS_ASC_QUERY(pQuery)) || + (win.ekey < blockInfo.window.skey && !QUERY_IS_ASC_QUERY(pQuery))) { + pQuery->limit.offset -= 1; + } - // set the abort info - pQuery->pos = startPos; - pTableQueryInfo->lastKey = ((TSKEY *)pColInfoData->pData)[startPos]; - pWindowResInfo->prevSKey = tw.skey; - win = tw; + if (pQuery->limit.offset == 0) { + if ((tw.skey <= blockInfo.window.ekey && QUERY_IS_ASC_QUERY(pQuery)) || + (tw.ekey >= blockInfo.window.skey && !QUERY_IS_ASC_QUERY(pQuery))) { + // load the data block and check data remaining in current data block + // TODO optimize performance + SArray * pDataBlock = tsdbRetrieveDataBlock(pRuntimeEnv->pQueryHandle, NULL); + SColumnInfoData *pColInfoData = taosArrayGet(pDataBlock, 0); + + tw = win; + int32_t startPos = + getNextQualifiedWindow(pRuntimeEnv, &tw, &blockInfo, pColInfoData->pData, binarySearchForKey, -1); + assert(startPos >= 0); + + // set the abort info + pQuery->pos = startPos; + + // reset the query start timestamp + pTableQueryInfo->win.skey = ((TSKEY *)pColInfoData->pData)[startPos]; + pQuery->window.skey = pTableQueryInfo->win.skey; + *start = pTableQueryInfo->win.skey; + + pWindowResInfo->prevSKey = tw.skey; + int32_t index = pRuntimeEnv->windowResInfo.curIndex; + + int32_t numOfRes = tableApplyFunctionsOnBlock(pRuntimeEnv, &blockInfo, NULL, binarySearchForKey, pDataBlock); + pRuntimeEnv->windowResInfo.curIndex = index; // restore the window index + + qDebug("QInfo:%p check data block, brange:%" PRId64 "-%" PRId64 ", numOfRows:%d, numOfRes:%d, lastKey:%"PRId64, + GET_QINFO_ADDR(pRuntimeEnv), blockInfo.window.skey, blockInfo.window.ekey, blockInfo.rows, numOfRes, pQuery->current->lastKey); + + return true; + } else { // do nothing + *start = tw.skey; + pQuery->window.skey = tw.skey; + pWindowResInfo->prevSKey = tw.skey; + return true; + } + } else { + tw = win; + int32_t startPos = + getNextQualifiedWindow(pRuntimeEnv, &tw, &blockInfo, pColInfoData->pData, binarySearchForKey, -1); + assert(startPos >= 0); + + // set the abort info + pQuery->pos = startPos; + pTableQueryInfo->lastKey = ((TSKEY *)pColInfoData->pData)[startPos]; + pWindowResInfo->prevSKey = tw.skey; + win = tw; + } } else { break; // offset is not 0, and next time window begins or ends in the next block. } @@ -6926,7 +6971,6 @@ int32_t qDumpRetrieveResult(qinfo_t qinfo, SRetrieveTableRsp **pRsp, int32_t *co if (IS_QUERY_KILLED(pQInfo) || Q_STATUS_EQUAL(pQuery->status, QUERY_OVER)) { // here current thread hold the refcount, so it is safe to free tsdbQueryHandle. -// doFreeQueryHandle(pQInfo); *continueExec = false; (*pRsp)->completed = 1; // notify no more result to client } else { diff --git a/src/query/src/sql.c b/src/query/src/sql.c index 9b8eb8fd41..da2c56ee9e 100644 --- a/src/query/src/sql.c +++ b/src/query/src/sql.c @@ -2628,13 +2628,13 @@ static void yy_reduce( {yymsp[1].minor.yy216.limit = -1; yymsp[1].minor.yy216.offset = 0;} break; case 177: /* limit_opt ::= LIMIT signed */ -{printf("aa1, %d\n", yymsp[0].minor.yy207); yymsp[-1].minor.yy216.limit = yymsp[0].minor.yy207; yymsp[-1].minor.yy216.offset = 0;} +{yymsp[-1].minor.yy216.limit = yymsp[0].minor.yy207; yymsp[-1].minor.yy216.offset = 0;} break; case 178: /* limit_opt ::= LIMIT signed OFFSET signed */ -{printf("aa2\n, %d\n", yymsp[-2].minor.yy207); yymsp[-3].minor.yy216.limit = yymsp[-2].minor.yy207; yymsp[-3].minor.yy216.offset = yymsp[0].minor.yy207;} +{yymsp[-3].minor.yy216.limit = yymsp[-2].minor.yy207; yymsp[-3].minor.yy216.offset = yymsp[0].minor.yy207;} break; case 179: /* limit_opt ::= LIMIT signed COMMA signed */ -{printf("aa3\n, %d\n", yymsp[-2].minor.yy207); yymsp[-3].minor.yy216.limit = yymsp[0].minor.yy207; yymsp[-3].minor.yy216.offset = yymsp[-2].minor.yy207;} +{yymsp[-3].minor.yy216.limit = yymsp[0].minor.yy207; yymsp[-3].minor.yy216.offset = yymsp[-2].minor.yy207;} break; case 181: /* slimit_opt ::= SLIMIT signed */ {yymsp[-1].minor.yy216.limit = yymsp[0].minor.yy207; yymsp[-1].minor.yy216.offset = 0;} diff --git a/src/rpc/src/rpcTcp.c b/src/rpc/src/rpcTcp.c index 7b32d3416d..bbabb5d47b 100644 --- a/src/rpc/src/rpcTcp.c +++ b/src/rpc/src/rpcTcp.c @@ -373,8 +373,8 @@ void *taosOpenTcpClientConnection(void *shandle, void *thandle, uint32_t ip, uin tDebug("%s %p TCP connection to 0x%x:%hu is created, localPort:%hu FD:%p numOfFds:%d", pThreadObj->label, thandle, ip, port, localPort, pFdObj, pThreadObj->numOfFds); } else { - taosCloseSocket(fd); tError("%s failed to malloc client FdObj(%s)", pThreadObj->label, strerror(errno)); + taosCloseSocket(fd); } return pFdObj; diff --git a/tests/script/general/parser/sliding.sim b/tests/script/general/parser/sliding.sim index ec0e31311a..b9fd66334d 100644 --- a/tests/script/general/parser/sliding.sim +++ b/tests/script/general/parser/sliding.sim @@ -4,6 +4,7 @@ system sh/deploy.sh -n dnode1 -i 1 system sh/cfg.sh -n dnode1 -c walLevel -v 1 system sh/cfg.sh -n dnode1 -c debugFlag -v 135 system sh/cfg.sh -n dnode1 -c rpcDebugFlag -v 135 +system sh/cfg.sh -n dnode1 -c maxtablespervnode -v 4 system sh/exec.sh -n dnode1 -s start sleep 1000 sql connect @@ -28,7 +29,7 @@ $mt = $mtPrefix . $i sql drop database if exists $db -x step1 step1: -sql create database if not exists $db maxtables 4 keep 36500 +sql create database if not exists $db keep 36500 sql use $db sql create table $mt (ts timestamp, c1 int, c2 float, c3 bigint, c4 smallint, c5 tinyint, c6 double, c7 bool, c8 binary(10), c9 nchar(9)) TAGS(t1 int, t2 binary(12)) @@ -169,19 +170,20 @@ if $data94 != 90 then endi sql select count(c2),last(c4) from sliding_tb0 interval(30s) sliding(10s) order by ts asc; -if $row != 30 then +if $row != 32 then return -1 endi -if $data00 != @00-01-01 00:00:00.000@ then +if $data00 != @99-12-31 23:59:40.000@ then + print expect 12-31 23:59:40.000, actual: $data00 return -1 endi -if $data01 != 1000 then +if $data01 != 334 then return -1 endi -if $data02 != 99 then +if $data02 != 33 then return -1 endi @@ -304,11 +306,11 @@ if $data13 != 9.810708435 then endi sql select count(c2),last(c4),stddev(c3) from sliding_tb0 interval(30s) sliding(20s) order by ts asc limit 100 offset 1; -if $row != 14 then +if $row != 15 then return -1 endi -if $data00 != @00-01-01 00:00:20.000@ then +if $data00 != @00-01-01 00:00:00.000@ then return -1 endi @@ -316,7 +318,7 @@ if $data01 != 1000 then return -1 endi -if $data02 != 66 then +if $data02 != 99 then return -1 endi @@ -324,7 +326,7 @@ if $data03 != 28.866070048 then return -1 endi -if $data90 != @00-01-01 00:03:20.000@ then +if $data90 != @00-01-01 00:03:00.000@ then return -1 endi @@ -332,15 +334,70 @@ if $data91 != 1000 then return -1 endi -if $data92 != 66 then +if $data92 != 99 then + return -1 +endi + +sql select count(c2),last(c4),stddev(c3) from sliding_tb0 interval(30s) sliding(20s) order by ts asc limit 100 offset 5; +if $row != 11 then + return -1 +endi + +sql select count(c2),last(c4),stddev(c3) from sliding_tb0 interval(30s) sliding(20s) order by ts asc limit 100 offset 6; +if $row != 10 then + return -1 +endi + +sql select count(c2),last(c4),stddev(c3) from sliding_tb0 interval(30s) sliding(20s) order by ts asc limit 100 offset 7; +if $row != 9 then + return -1 +endi + +sql select count(c2),last(c4),stddev(c3) from sliding_tb0 interval(30s) sliding(20s) order by ts asc limit 100 offset 8; +if $row != 8 then + return -1 +endi + +sql select count(c2),last(c4),stddev(c3) from sliding_tb0 interval(30s) sliding(20s) order by ts asc limit 100 offset 9; +if $row != 7 then + return -1 +endi + +sql select count(c2),last(c4),stddev(c3) from sliding_tb0 interval(30s) sliding(20s) order by ts asc limit 100 offset 10; +if $row != 6 then + return -1 +endi + +sql select count(c2),last(c4),stddev(c3) from sliding_tb0 interval(30s) sliding(20s) order by ts asc limit 100 offset 11; +if $row != 5 then + return -1 +endi + +sql select count(c2),last(c4),stddev(c3) from sliding_tb0 interval(30s) sliding(20s) order by ts asc limit 100 offset 12; +if $row != 4 then + return -1 +endi + +sql select count(c2),last(c4),stddev(c3) from sliding_tb0 interval(30s) sliding(20s) order by ts asc limit 100 offset 13; +if $row != 3 then return -1 endi sql select count(c2),last(c4),stddev(c3) from sliding_tb0 interval(30s) sliding(20s) order by ts asc limit 100 offset 14; +if $row != 2 then + return -1 +endi + +sql select count(c2),last(c4),stddev(c3) from sliding_tb0 interval(30s) sliding(20s) order by ts asc limit 100 offset 15; if $row != 1 then return -1 endi +sql select count(c2),last(c4),stddev(c3) from sliding_tb0 interval(30s) sliding(20s) order by ts asc limit 100 offset 16; +if $row != 0 then + return -1 +endi + sql select count(c2),last(c4),stddev(c3),spread(c3) from sliding_tb0 where c2 = 0 interval(30s) order by ts desc; if $row != 10 then return -1 @@ -364,16 +421,26 @@ if $data03 != 0.000000000 then endi sql select count(c2),last(c4),stddev(c3),spread(c3) from sliding_tb0 where c2 = 0 interval(30s) sliding(20s) order by ts desc limit 1 offset 15; +if $row != 1 then + return -1 +endi + +sql select count(c2),last(c4),stddev(c3),spread(c3) from sliding_tb0 where c2 = 0 interval(30s) sliding(20s) order by ts desc limit 1 offset 16; if $row != 0 then return -1 endi -sql select count(*),stddev(c1),count(c1),first(c2),last(c3) from sliding_tb0 where ts>'2000-1-1 00:00:00' and ts<'2000-1-1 00:00:01.002' and c2 >= 0 interval(30s) sliding(10a) order by ts asc limit 1000; -if $row != 100 then +sql select count(c2), first(c3),stddev(c4) from sliding_tb0 interval(10a) order by ts desc limit 10 offset 2; +if $data00 != @00-01-01 00:04:59.910@ then + return -1 +endi + +sql select count(*),stddev(c1),count(c1),first(c2),last(c3) from sliding_tb0 where ts>'2000-1-1 00:00:00' and ts<'2000-1-1 00:00:01.002' and c2 >= 0 interval(30s) sliding(10s) order by ts asc limit 1000; +if $row != 3 then return -1 endi -if $data00 != @00-01-01 00:00:00.000@ then +if $data00 != @99-12-31 23:59:40.000@ then return -1 endi @@ -385,7 +452,7 @@ if $data05 != 33 then return -1 endi -if $data10 != @00-01-01 00:00:00.010@ then +if $data10 != @99-12-31 23:59:50.000@ then return -1 endi @@ -397,48 +464,28 @@ if $data15 != 33 then return -1 endi -if $data95 != 33 then +if $data25 != 33 then return -1 endi -sql select count(*),stddev(c1),count(c1),first(c2),last(c3) from sliding_tb0 where ts>'2000-1-1 00:00:00' and ts<'2000-1-1 00:00:01.002' and c2 >= 0 interval(30s) sliding(10a) order by ts desc limit 1000; -if $row != 100 then +sql select count(*),stddev(c1),count(c1),first(c2),last(c3) from sliding_tb0 where ts>'2000-1-1 00:00:00' and ts<'2000-1-1 00:00:01.002' and c2 >= 0 interval(30s) sliding(10s) order by ts desc limit 1000; +if $row != 1 then return -1 endi -if $data00 != @00-01-01 00:00:00.990@ then +if $data00 != @99-12-31 23:59:40.000@ then return -1 endi -if $data01 != 1 then +if $data01 != 33 then return -1 endi -if $data02 != 0.000000000 then +if $data02 != 9.521904571 then return -1 endi -if $data03 != 1 then - return -1 -endi - -if $data90 != @00-01-01 00:00:00.900@ then - return -1 -endi - -if $data91 != 4 then - return -1 -endi - -if $data92 != 1.118033989 then - return -1 -endi - -if $data93 != 4 then - return -1 -endi - -if $data94 != 30.00000 then +if $data03 != 33 then return -1 endi @@ -457,5 +504,7 @@ sql_error select sum(c1) from sliding_tb0 interval(0) sliding(0); sql_error select sum(c1) from sliding_tb0 interval(0m) sliding(0m); sql_error select sum(c1) from sliding_tb0 interval(m) sliding(m); sql_error select sum(c1) from sliding_tb0 sliding(4m); +sql_error select count(*) from sliding_tb0 interval(1s) sliding(10s); +sql_error select count(*) from sliding_tb0 interval(10s) sliding(10a); system sh/exec.sh -n dnode1 -s stop -x SIGINT From 0f63a29b58b2116210299023cf473a1c74dfcd76 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Fri, 13 Nov 2020 11:44:28 +0800 Subject: [PATCH 15/52] [TD-225] --- src/query/src/qParserImpl.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/query/src/qParserImpl.c b/src/query/src/qParserImpl.c index 7ca2e1369a..560882d33c 100644 --- a/src/query/src/qParserImpl.c +++ b/src/query/src/qParserImpl.c @@ -355,7 +355,8 @@ SArray *tVariantListInsert(SArray *pList, tVariant *pVar, uint8_t sortOrder, int return tVariantListAppend(NULL, pVar, sortOrder); } - tVariantListItem item = {0}; + tVariantListItem item; + item.pVar = *pVar; item.sortOrder = sortOrder; From 067ee44bdb9e7a98d4b8a437890e64c2de041ac3 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Fri, 13 Nov 2020 11:48:53 +0800 Subject: [PATCH 16/52] [TD-225] --- src/util/src/tarray.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/util/src/tarray.c b/src/util/src/tarray.c index 36b2bb9e17..bec2fac7df 100644 --- a/src/util/src/tarray.c +++ b/src/util/src/tarray.c @@ -199,7 +199,8 @@ void taosArrayDestroyEx(SArray* pArray, void (*fp)(void*)) { } if (fp == NULL) { - return taosArrayDestroy(pArray); + taosArrayDestroy(pArray); + return; } for(int32_t i = 0; i < pArray->size; ++i) { From ff928289dfb2e215e5f9556633c80e54113dc149 Mon Sep 17 00:00:00 2001 From: Hongze Cheng Date: Fri, 13 Nov 2020 12:04:21 +0800 Subject: [PATCH 17/52] [TD-2081]: fix alter blocks problem --- src/tsdb/src/tsdbMain.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/src/tsdb/src/tsdbMain.c b/src/tsdb/src/tsdbMain.c index 2ded3b668b..a18fa6d0ef 100644 --- a/src/tsdb/src/tsdbMain.c +++ b/src/tsdb/src/tsdbMain.c @@ -294,6 +294,7 @@ STsdbCfg *tsdbGetCfg(const TSDB_REPO_T *repo) { int32_t tsdbConfigRepo(TSDB_REPO_T *repo, STsdbCfg *pCfg) { // TODO: think about multithread cases STsdbRepo *pRepo = (STsdbRepo *)repo; + STsdbCfg config = pRepo->config; STsdbCfg * pRCfg = &pRepo->config; if (tsdbCheckAndSetDefaultCfg(pCfg) < 0) return -1; @@ -308,22 +309,25 @@ int32_t tsdbConfigRepo(TSDB_REPO_T *repo, STsdbCfg *pCfg) { bool configChanged = false; if (pRCfg->compression != pCfg->compression) { tsdbAlterCompression(pRepo, pCfg->compression); + config.compression = pCfg->compression; configChanged = true; } if (pRCfg->keep != pCfg->keep) { if (tsdbAlterKeep(pRepo, pCfg->keep) < 0) { tsdbError("vgId:%d failed to configure repo when alter keep since %s", REPO_ID(pRepo), tstrerror(terrno)); + config.keep = pCfg->keep; return -1; } configChanged = true; } if (pRCfg->totalBlocks != pCfg->totalBlocks) { tsdbAlterCacheTotalBlocks(pRepo, pCfg->totalBlocks); + config.totalBlocks = pCfg->totalBlocks; configChanged = true; } if (configChanged) { - if (tsdbSaveConfig(pRepo->rootDir, &pRepo->config) < 0) { + if (tsdbSaveConfig(pRepo->rootDir, &config) < 0) { tsdbError("vgId:%d failed to configure repository while save config since %s", REPO_ID(pRepo), tstrerror(terrno)); return -1; } From 2da4b6743eee01313a1fea52cb97d6aeca0b3308 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Fri, 13 Nov 2020 13:07:04 +0800 Subject: [PATCH 18/52] [TD-225] --- src/client/src/tscFunctionImpl.c | 16 ++++++++-------- src/inc/ttype.h | 14 +++++++------- src/query/src/qFill.c | 4 ++-- 3 files changed, 17 insertions(+), 17 deletions(-) diff --git a/src/client/src/tscFunctionImpl.c b/src/client/src/tscFunctionImpl.c index 7df911da65..f5a27311f2 100644 --- a/src/client/src/tscFunctionImpl.c +++ b/src/client/src/tscFunctionImpl.c @@ -2481,7 +2481,7 @@ static void percentile_function(SQLFunctionCtx *pCtx) { } double v = 0; - GET_TYPED_DATA(v, pCtx->inputType, data); + GET_TYPED_DATA(v, double, pCtx->inputType, data); if (v < GET_DOUBLE_VAL(&pInfo->minval)) { SET_DOUBLE_VAL(&pInfo->minval, v); @@ -2525,7 +2525,7 @@ static void percentile_function_f(SQLFunctionCtx *pCtx, int32_t index) { if (pInfo->stage == 0) { double v = 0; - GET_TYPED_DATA(v, pCtx->inputType, pData); + GET_TYPED_DATA(v, double, pCtx->inputType, pData); if (v < GET_DOUBLE_VAL(&pInfo->minval)) { SET_DOUBLE_VAL(&pInfo->minval, v); @@ -2616,7 +2616,7 @@ static void apercentile_function(SQLFunctionCtx *pCtx) { notNullElems += 1; double v = 0; - GET_TYPED_DATA(v, pCtx->inputType, data); + GET_TYPED_DATA(v, double, pCtx->inputType, data); tHistogramAdd(&pInfo->pHisto, v); } @@ -2641,7 +2641,7 @@ static void apercentile_function_f(SQLFunctionCtx *pCtx, int32_t index) { SAPercentileInfo *pInfo = getAPerctInfo(pCtx); double v = 0; - GET_TYPED_DATA(v, pCtx->inputType, pData); + GET_TYPED_DATA(v, double, pCtx->inputType, pData); tHistogramAdd(&pInfo->pHisto, v); @@ -4064,7 +4064,7 @@ static void rate_function(SQLFunctionCtx *pCtx) { notNullElems++; int64_t v = 0; - GET_TYPED_DATA(v, pCtx->inputType, pData); + GET_TYPED_DATA(v, int64_t, pCtx->inputType, pData); if ((INT64_MIN == pRateInfo->firstValue) || (INT64_MIN == pRateInfo->firstKey)) { pRateInfo->firstValue = v; @@ -4114,7 +4114,7 @@ static void rate_function_f(SQLFunctionCtx *pCtx, int32_t index) { TSKEY *primaryKey = pCtx->ptsList; int64_t v = 0; - GET_TYPED_DATA(v, pCtx->inputType, pData); + GET_TYPED_DATA(v, int64_t, pCtx->inputType, pData); if ((INT64_MIN == pRateInfo->firstValue) || (INT64_MIN == pRateInfo->firstKey)) { pRateInfo->firstValue = v; @@ -4241,7 +4241,7 @@ static void irate_function(SQLFunctionCtx *pCtx) { notNullElems++; int64_t v = 0; - GET_TYPED_DATA(v, pCtx->inputType, pData); + GET_TYPED_DATA(v, int64_t, pCtx->inputType, pData); // TODO: calc once if only call this function once ???? if ((INT64_MIN == pRateInfo->lastKey) || (INT64_MIN == pRateInfo->lastValue)) { @@ -4286,7 +4286,7 @@ static void irate_function_f(SQLFunctionCtx *pCtx, int32_t index) { TSKEY *primaryKey = pCtx->ptsList; int64_t v = 0; - GET_TYPED_DATA(v, pCtx->inputType, pData); + GET_TYPED_DATA(v, int64_t, pCtx->inputType, pData); pRateInfo->firstKey = pRateInfo->lastKey; pRateInfo->firstValue = pRateInfo->lastValue; diff --git a/src/inc/ttype.h b/src/inc/ttype.h index caa7c58f40..7d5779c43f 100644 --- a/src/inc/ttype.h +++ b/src/inc/ttype.h @@ -7,25 +7,25 @@ extern "C" { #include "taosdef.h" -#define GET_TYPED_DATA(_v, _type, _data) \ +#define GET_TYPED_DATA(_v, _finalType, _type, _data) \ switch (_type) { \ case TSDB_DATA_TYPE_TINYINT: \ - (_v) = GET_INT8_VAL(_data); \ + (_v) = (_finalType)GET_INT8_VAL(_data); \ break; \ case TSDB_DATA_TYPE_SMALLINT: \ - (_v) = GET_INT16_VAL(_data); \ + (_v) = (_finalType)GET_INT16_VAL(_data); \ break; \ case TSDB_DATA_TYPE_BIGINT: \ - (_v) = (GET_INT64_VAL(_data)); \ + (_v) = (_finalType)(GET_INT64_VAL(_data)); \ break; \ case TSDB_DATA_TYPE_FLOAT: \ - (_v) = GET_FLOAT_VAL(_data); \ + (_v) = (_finalType)GET_FLOAT_VAL(_data); \ break; \ case TSDB_DATA_TYPE_DOUBLE: \ - (_v) = GET_DOUBLE_VAL(_data); \ + (_v) = (_finalType)GET_DOUBLE_VAL(_data); \ break; \ default: \ - (_v) = GET_INT32_VAL(_data); \ + (_v) = (_finalType)GET_INT32_VAL(_data); \ break; \ }; diff --git a/src/query/src/qFill.c b/src/query/src/qFill.c index 8560bef35d..8a5efafced 100644 --- a/src/query/src/qFill.c +++ b/src/query/src/qFill.c @@ -237,8 +237,8 @@ int32_t taosGetLinearInterpolationVal(int32_t type, SPoint* point1, SPoint* poin double v1 = -1; double v2 = -1; - GET_TYPED_DATA(v1, type, point1->val); - GET_TYPED_DATA(v2, type, point2->val); + GET_TYPED_DATA(v1, double, type, point1->val); + GET_TYPED_DATA(v2, double, type, point2->val); double r = DO_INTERPOLATION(v1, v2, point1->key, point2->key, point->key); From 4cea4359405791ab97dd28a3dd6b36f4153692ba Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Fri, 13 Nov 2020 13:17:56 +0800 Subject: [PATCH 19/52] [TD-225] --- src/query/src/qFill.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/query/src/qFill.c b/src/query/src/qFill.c index 8a5efafced..1764289219 100644 --- a/src/query/src/qFill.c +++ b/src/query/src/qFill.c @@ -494,7 +494,7 @@ int64_t taosFillResultDataBlock(SFillInfo* pFillInfo, tFilePage** output, int32_ if (remain == 0) { fillExternalResults(pFillInfo, output, numOfRes); } else { - fillResultImpl(pFillInfo, output, numOfRes); + fillResultImpl(pFillInfo, output, (int32_t) numOfRes); assert(numOfRes == pFillInfo->numOfCurrent); } From 8801d354e153fd87f45b173d60467d45f5849fad Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Fri, 13 Nov 2020 13:34:28 +0800 Subject: [PATCH 20/52] [TD-225] --- src/query/src/qParserImpl.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/src/query/src/qParserImpl.c b/src/query/src/qParserImpl.c index 560882d33c..e594f8d956 100644 --- a/src/query/src/qParserImpl.c +++ b/src/query/src/qParserImpl.c @@ -388,8 +388,6 @@ void tSQLSetColumnInfo(TAOS_FIELD *pField, SStrToken *pName, TAOS_FIELD *pType) void tSQLSetColumnType(TAOS_FIELD *pField, SStrToken *type) { pField->type = -1; - int32_t LENGTH_SIZE_OF_STR = 2; // in case of nchar and binary, there two bytes to keep the length of binary|nchar. - for (int8_t i = 0; i < tListLen(tDataTypeDesc); ++i) { if ((strncasecmp(type->z, tDataTypeDesc[i].aName, tDataTypeDesc[i].nameLen) == 0) && (type->n == tDataTypeDesc[i].nameLen)) { @@ -405,14 +403,14 @@ void tSQLSetColumnType(TAOS_FIELD *pField, SStrToken *type) { if (type->type == 0) { pField->bytes = 0; } else { - pField->bytes = -(int32_t)type->type * TSDB_NCHAR_SIZE + LENGTH_SIZE_OF_STR; + pField->bytes = (int16_t)(-(int32_t)type->type * TSDB_NCHAR_SIZE + VARSTR_HEADER_SIZE); } } else if (i == TSDB_DATA_TYPE_BINARY) { /* for binary, the TOKENTYPE is the length of binary */ if (type->type == 0) { pField->bytes = 0; } else { - pField->bytes = -(int32_t) type->type + LENGTH_SIZE_OF_STR; + pField->bytes = (int16_t) (-(int32_t) type->type + VARSTR_HEADER_SIZE); } } break; From e59ee37c850b54962ad97d918570416f12bbaba6 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Fri, 13 Nov 2020 13:36:42 +0800 Subject: [PATCH 21/52] [TD-1955]: fix bugs in refresh threads --- src/util/src/tcache.c | 4 +- tests/examples/c/asyncdemo.c | 14 +++++- tests/pytest/fulltest.sh | 2 +- tests/pytest/tools/insert.json | 50 +++++++++++++++++++ tests/pytest/tools/lowa.py | 66 ++++++++++++++++++++++++++ tests/script/general/wal/maxtables.sim | 46 ++++++++++++++++++ tests/script/jenkins/basic.txt | 1 + 7 files changed, 178 insertions(+), 5 deletions(-) create mode 100644 tests/pytest/tools/insert.json create mode 100644 tests/pytest/tools/lowa.py create mode 100644 tests/script/general/wal/maxtables.sim diff --git a/src/util/src/tcache.c b/src/util/src/tcache.c index 9bdaf73700..c3579b23f2 100644 --- a/src/util/src/tcache.c +++ b/src/util/src/tcache.c @@ -658,8 +658,7 @@ void* taosCacheTimedRefresh(void *handle) { int64_t count = 0; while(1) { - taosMsleep(500); - + usleep(500*1000); // check if current cache object will be deleted every 500ms. if (pCacheObj->deleting) { uDebug("%s refresh threads quit", pCacheObj->name); @@ -677,6 +676,7 @@ void* taosCacheTimedRefresh(void *handle) { continue; } + uDebug("%s refresh thread timed scan", pCacheObj->name); pCacheObj->statistics.refreshCount++; // refresh data in hash table diff --git a/tests/examples/c/asyncdemo.c b/tests/examples/c/asyncdemo.c index 225c4f7541..c6cc89b31d 100644 --- a/tests/examples/c/asyncdemo.c +++ b/tests/examples/c/asyncdemo.c @@ -68,6 +68,7 @@ static void queryDB(TAOS *taos, char *command) { fprintf(stderr, "Failed to run %s, reason: %s\n", command, taos_errstr(pSql)); taos_free_result(pSql); taos_close(taos); + taos_cleanup(); exit(EXIT_FAILURE); } @@ -176,6 +177,7 @@ void taos_error(TAOS *con) { fprintf(stderr, "TDengine error: %s\n", taos_errstr(con)); taos_close(con); + taos_cleanup(); exit(1); } @@ -211,6 +213,8 @@ void taos_insert_call_back(void *param, TAOS_RES *tres, int code) printf("%lld mseconds to insert %d data points\n", (et - st) / 1000, points*numOfTables); } } + + taos_free_result(tres); } void taos_retrieve_call_back(void *param, TAOS_RES *tres, int numOfRows) @@ -222,7 +226,7 @@ void taos_retrieve_call_back(void *param, TAOS_RES *tres, int numOfRows) for (int i = 0; iname, numOfRows); - taos_free_result(tres); + //taos_free_result(tres); printf("%d rows data retrieved from %s\n", pTable->rowsRetrieved, pTable->name); tablesProcessed++; @@ -246,6 +250,8 @@ void taos_retrieve_call_back(void *param, TAOS_RES *tres, int numOfRows) printf("%lld mseconds to query %d data rows\n", (et - st) / 1000, points * numOfTables); } } + + taos_free_result(tres); } void taos_select_call_back(void *param, TAOS_RES *tres, int code) @@ -261,6 +267,10 @@ void taos_select_call_back(void *param, TAOS_RES *tres, int code) } else { printf("%s select failed, code:%d\n", pTable->name, code); + taos_free_result(tres); + taos_cleanup(); exit(1); } + + taos_free_result(tres); } diff --git a/tests/pytest/fulltest.sh b/tests/pytest/fulltest.sh index a48dbdc480..15cadf38e7 100755 --- a/tests/pytest/fulltest.sh +++ b/tests/pytest/fulltest.sh @@ -154,7 +154,7 @@ python3 ./test.py -f query/queryConnection.py python3 ./test.py -f query/queryCountCSVData.py python3 ./test.py -f query/natualInterval.py python3 ./test.py -f query/bug1471.py -python3 ./test.py -f query/dataLossTest.py +#python3 ./test.py -f query/dataLossTest.py #stream python3 ./test.py -f stream/metric_1.py diff --git a/tests/pytest/tools/insert.json b/tests/pytest/tools/insert.json new file mode 100644 index 0000000000..c3fa78076b --- /dev/null +++ b/tests/pytest/tools/insert.json @@ -0,0 +1,50 @@ +{ + "filetype":"insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 1, + "databases": [{ + "dbinfo": { + "name": "db01", + "replica": 1, + "days": 10, + "cache": 16, + "blocks": 8, + "precision": "ms", + "update": 0, + "maxtablesPerVnode": 1000 + }, + "super_tables": [{ + "name": "stb01", + "childtable_count": 100, + "childtable_prefix": "stb01_", + "auto_create_table": "no", + "data_source": "rand", + "insert_mode": "taosc", + "insert_rate": 0, + "insert_rows": 1000, + "timestamp_step": 1000, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "/home/data/sample.csv", + "tags_file": "", + "columns": [{ + "type": "SMALLINT" + }, { + "type": "BOOL" + }, { + "type": "BINARY", + "len": 6 + }], + "tags": [{ + "type": "INT" + },{ + "type": "BINARY", + "len": 4 + }] + }] + }] +} diff --git a/tests/pytest/tools/lowa.py b/tests/pytest/tools/lowa.py new file mode 100644 index 0000000000..523229dd46 --- /dev/null +++ b/tests/pytest/tools/lowa.py @@ -0,0 +1,66 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import os +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + self.numberOfTables = 10000 + self.numberOfRecords = 100 + + def getBuildPath(self): + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + for root, dirs, files in os.walk(projPath): + if ("taosd" in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + buildPath = root[:len(root)-len("/build/bin")] + break + return buildPath + + def run(self): + tdSql.prepare() + buildPath = self.getBuildPath() + if (buildPath == ""): + tdLog.exit("taosd not found!") + else: + tdLog.info("taosd found in %s" % buildPath) + binPath = buildPath+ "/build/bin/" + os.system("yes | %slowa -f tools/insert.json" % binPath) + + tdSql.execute("use db01") + tdSql.query("select count(*) from stb01") + tdSql.checkData(0, 0, 100000) + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/script/general/wal/maxtables.sim b/tests/script/general/wal/maxtables.sim new file mode 100644 index 0000000000..e504c7e92e --- /dev/null +++ b/tests/script/general/wal/maxtables.sim @@ -0,0 +1,46 @@ +system sh/stop_dnodes.sh +system sh/deploy.sh -n dnode1 -i 1 +system sh/cfg.sh -n dnode1 -c maxTablesPerVnode -v 100 +system sh/cfg.sh -n dnode1 -c maxVgroupsPerDb -v 1 +system sh/cfg.sh -n dnode1 -c tableIncStepPerVnode -v 2 + + +print ============== deploy +system sh/exec.sh -n dnode1 -s start +sleep 3001 +sql connect + +sql create database d1 +sql use d1 +sql create table st (ts timestamp, tbcol int) TAGS(tgcol int) + +$i = 0 +while $i < 100 + $tb = t . $i + sql create table $tb using st tags( $i ) + sql insert into $tb values (now , $i ) + $i = $i + 1 +endw + +sql_error sql create table tt (ts timestamp, i int) + +print =============== step3 +sql select * from st; +if $rows != 100 then + return -1 +endi + +system sh/exec.sh -n dnode1 -s stop -x SIGINT +system sh/cfg.sh -n dnode1 -c maxTablesPerVnode -v 4 +sleep 3000 + +print =============== step4 +system sh/exec.sh -n dnode1 -s start +sleep 3000 + +sql select * from st; +if $rows != 100 then + return -1 +endi + +system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/jenkins/basic.txt b/tests/script/jenkins/basic.txt index 2a84172da9..daf92679bd 100644 --- a/tests/script/jenkins/basic.txt +++ b/tests/script/jenkins/basic.txt @@ -238,6 +238,7 @@ cd ../../../debug; make ./test.sh -f general/wal/sync.sim ./test.sh -f general/wal/kill.sim +./test.sh -f general/wal/maxtables.sim ./test.sh -f unique/account/account_create.sim ./test.sh -f unique/account/account_delete.sim From 29545fa5601e30f7bb82d589f647bffeeab6632f Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Fri, 13 Nov 2020 13:46:33 +0800 Subject: [PATCH 22/52] [TD-225] --- src/query/src/qParserImpl.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/query/src/qParserImpl.c b/src/query/src/qParserImpl.c index e594f8d956..8b3f3550b8 100644 --- a/src/query/src/qParserImpl.c +++ b/src/query/src/qParserImpl.c @@ -317,7 +317,7 @@ SArray *tVariantListAppendToken(SArray *pList, SStrToken *pToken, uint8_t order) } if (pToken) { - tVariantListItem item = {0}; + tVariantListItem item; tVariantCreate(&item.pVar, pToken); item.sortOrder = order; From e2125102c806f35cf1784f4183ffb5aea72ec08e Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Fri, 13 Nov 2020 14:01:36 +0800 Subject: [PATCH 23/52] [TD-225] --- src/query/src/qParserImpl.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/query/src/qParserImpl.c b/src/query/src/qParserImpl.c index 8b3f3550b8..9edcdb0083 100644 --- a/src/query/src/qParserImpl.c +++ b/src/query/src/qParserImpl.c @@ -342,7 +342,7 @@ SArray *tVariantListAppend(SArray *pList, tVariant *pVar, uint8_t sortOrder) { * * Otherwise, the original pointer may be lost, which causes memory leak. */ - tVariantListItem item = {0}; + tVariantListItem item; item.pVar = *pVar; item.sortOrder = sortOrder; From b3d582bb45a11075dbe129c59c7a00535f40095f Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Fri, 13 Nov 2020 15:04:16 +0800 Subject: [PATCH 24/52] [TD-225] --- src/util/src/tcache.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/src/util/src/tcache.c b/src/util/src/tcache.c index c3579b23f2..2571f11ba4 100644 --- a/src/util/src/tcache.c +++ b/src/util/src/tcache.c @@ -658,7 +658,12 @@ void* taosCacheTimedRefresh(void *handle) { int64_t count = 0; while(1) { +#if defined LINUX usleep(500*1000); +#else + taosMsleep(500); +#endif + // check if current cache object will be deleted every 500ms. if (pCacheObj->deleting) { uDebug("%s refresh threads quit", pCacheObj->name); From 68078a6cab5970e4858b384d81cd16162bef3b74 Mon Sep 17 00:00:00 2001 From: Shengliang Guan Date: Fri, 13 Nov 2020 08:12:11 +0000 Subject: [PATCH 25/52] TD-2086 --- src/sync/src/syncMain.c | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/src/sync/src/syncMain.c b/src/sync/src/syncMain.c index 8d90315c48..11fddfa3b5 100644 --- a/src/sync/src/syncMain.c +++ b/src/sync/src/syncMain.c @@ -498,7 +498,7 @@ static SSyncPeer *syncAddPeer(SSyncNode *pNode, const SNodeInfo *pInfo) { int32_t ret = strcmp(pPeer->fqdn, tsNodeFqdn); if (pPeer->nodeId == 0 || (ret > 0) || (ret == 0 && pPeer->port > tsSyncPort)) { int32_t checkMs = 100 + (pNode->vgId * 10) % 100; - if (pNode->vgId > 1) checkMs = tsStatusInterval * 2000 + checkMs; + if (pNode->vgId > 1) checkMs = tsStatusInterval * 1000 + checkMs; sDebug("%s, start to check peer connection after %d ms", pPeer->id, checkMs); taosTmrReset(syncCheckPeerConnection, checkMs, pPeer, tsSyncTmrCtrl, &pPeer->timer); } @@ -575,6 +575,15 @@ static void syncChooseMaster(SSyncNode *pNode) { if (index == pNode->selfIndex) { sInfo("vgId:%d, start to work as master", pNode->vgId); nodeRole = TAOS_SYNC_ROLE_MASTER; + + for (int32_t i = 0; i < pNode->replica; ++i) { + pPeer = pNode->peerInfo[i]; + if (pPeer->version == nodeVersion) { + pPeer->role = TAOS_SYNC_ROLE_SLAVE; + sInfo("%s, it shall work as slave", pPeer->id); + } + } + syncResetFlowCtrl(pNode); (*pNode->notifyRole)(pNode->ahandle, nodeRole); } else { @@ -1209,7 +1218,7 @@ static int32_t syncForwardToPeerImpl(SSyncNode *pNode, void *data, void *mhandle int32_t fwdLen; int32_t code = 0; - if (nodeRole == TAOS_SYNC_ROLE_SLAVE && pWalHead->version != nodeVersion + 1) { + if (nodeRole == TAOS_SYNC_ROLE_SLAVE && pWalHead->version > nodeVersion + 1) { sError("vgId:%d, received ver:%" PRIu64 ", inconsistent with last ver:%" PRIu64 ", restart connection", pNode->vgId, pWalHead->version, nodeVersion); for (int32_t i = 0; i < pNode->replica; ++i) { From 141cc3d1888cd9dfba4eb5250c714d93822059da Mon Sep 17 00:00:00 2001 From: Shengliang Guan Date: Fri, 13 Nov 2020 08:23:32 +0000 Subject: [PATCH 26/52] TD-1948 --- src/dnode/src/dnodeVWrite.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/dnode/src/dnodeVWrite.c b/src/dnode/src/dnodeVWrite.c index e0345eb1f6..bd807556e0 100644 --- a/src/dnode/src/dnodeVWrite.c +++ b/src/dnode/src/dnodeVWrite.c @@ -212,7 +212,7 @@ static void *dnodeProcessVWriteQueue(void *param) { pWrite->code = vnodeProcessWrite(pVnode, pWrite->pHead, qtype, &pWrite->rspRet); if (pWrite->code <= 0) pWrite->processedCount = 1; - if (pWrite->pHead->msgType != TSDB_MSG_TYPE_SUBMIT) forceFsync = true; + if (pWrite->code == 0 && pWrite->pHead->msgType != TSDB_MSG_TYPE_SUBMIT) forceFsync = true; dTrace("msg:%p is processed in vwrite queue, result:%s", pWrite, tstrerror(pWrite->code)); } From 4e42b8cf5e49dfffebbf28c284cf2d1d3038f7ae Mon Sep 17 00:00:00 2001 From: Shengliang Guan Date: Fri, 13 Nov 2020 08:46:02 +0000 Subject: [PATCH 27/52] TD-1948 --- src/sync/src/syncMain.c | 1 + 1 file changed, 1 insertion(+) diff --git a/src/sync/src/syncMain.c b/src/sync/src/syncMain.c index 11fddfa3b5..ae177fb938 100644 --- a/src/sync/src/syncMain.c +++ b/src/sync/src/syncMain.c @@ -580,6 +580,7 @@ static void syncChooseMaster(SSyncNode *pNode) { pPeer = pNode->peerInfo[i]; if (pPeer->version == nodeVersion) { pPeer->role = TAOS_SYNC_ROLE_SLAVE; + pPeer->sstatus = TAOS_SYNC_STATUS_CACHE; sInfo("%s, it shall work as slave", pPeer->id); } } From 150d6c8f3a2e1fc722201b27890e2225c4fd7b73 Mon Sep 17 00:00:00 2001 From: Shengliang Guan Date: Fri, 13 Nov 2020 17:06:42 +0800 Subject: [PATCH 28/52] compile error in windows --- src/client/src/tscSQLParser.c | 8 ++++---- src/client/src/tscUtil.c | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/src/client/src/tscSQLParser.c b/src/client/src/tscSQLParser.c index 17ff4997b5..c13de00136 100644 --- a/src/client/src/tscSQLParser.c +++ b/src/client/src/tscSQLParser.c @@ -2830,7 +2830,7 @@ int32_t parseGroupbyClause(SQueryInfo* pQueryInfo, SArray* pList, SSqlCmd* pCmd) pQueryInfo->colList = taosArrayInit(4, POINTER_BYTES); } - pQueryInfo->groupbyExpr.numOfGroupCols = taosArrayGetSize(pList); + pQueryInfo->groupbyExpr.numOfGroupCols = (int16_t)taosArrayGetSize(pList); if (pQueryInfo->groupbyExpr.numOfGroupCols > TSDB_MAX_TAGS) { return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1); } @@ -4471,7 +4471,7 @@ int32_t parseFillClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SQuerySQL* pQuery } int32_t startPos = 1; - int32_t numOfFillVal = num - 1; + int32_t numOfFillVal = (int32_t)(num - 1); /* for point interpolation query, we do not have the timestamp column */ if (tscIsPointInterpQuery(pQueryInfo)) { @@ -4481,7 +4481,7 @@ int32_t parseFillClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SQuerySQL* pQuery numOfFillVal = (int32_t)size; } } else { - numOfFillVal = (num > (int32_t)size) ? (int32_t)size : num; + numOfFillVal = (int16_t)((num > (int32_t)size) ? (int32_t)size : num); } int32_t j = 1; @@ -6078,7 +6078,7 @@ int32_t doCheckForCreateTable(SSqlObj* pSql, int32_t subClauseIndex, SSqlInfo* p tscFieldInfoAppend(&pQueryInfo->fieldsInfo, p); } - pCmd->count = numOfTags; + pCmd->count =(int32_t) numOfTags; } return TSDB_CODE_SUCCESS; diff --git a/src/client/src/tscUtil.c b/src/client/src/tscUtil.c index c329571e06..4f04a82549 100644 --- a/src/client/src/tscUtil.c +++ b/src/client/src/tscUtil.c @@ -71,7 +71,7 @@ void tsSetSTableQueryCond(STagCond* pTagCond, uint64_t uid, SBufferWriter* bw) { } bool tscQueryTags(SQueryInfo* pQueryInfo) { - int32_t numOfCols = tscSqlExprNumOfExprs(pQueryInfo); + int32_t numOfCols = (int32_t)(pQueryInfo); for (int32_t i = 0; i < numOfCols; ++i) { SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, i); int32_t functId = pExpr->functionId; From 3a9b5392134dfa7bc9f6d96a3ef4ac8dc1896e51 Mon Sep 17 00:00:00 2001 From: Shengliang Guan Date: Fri, 13 Nov 2020 09:44:22 +0000 Subject: [PATCH 29/52] TD-1948 --- src/sync/src/syncMain.c | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/src/sync/src/syncMain.c b/src/sync/src/syncMain.c index 8d90315c48..766dbaf174 100644 --- a/src/sync/src/syncMain.c +++ b/src/sync/src/syncMain.c @@ -1209,13 +1209,16 @@ static int32_t syncForwardToPeerImpl(SSyncNode *pNode, void *data, void *mhandle int32_t fwdLen; int32_t code = 0; - if (nodeRole == TAOS_SYNC_ROLE_SLAVE && pWalHead->version != nodeVersion + 1) { - sError("vgId:%d, received ver:%" PRIu64 ", inconsistent with last ver:%" PRIu64 ", restart connection", pNode->vgId, - pWalHead->version, nodeVersion); - for (int32_t i = 0; i < pNode->replica; ++i) { - pPeer = pNode->peerInfo[i]; - syncRestartConnection(pPeer); + if (pWalHead->version > nodeVersion + 1) { + sError("vgId:%d, hver:%" PRIu64 ", inconsistent with ver:%" PRIu64, pNode->vgId, pWalHead->version, nodeVersion); + if (nodeRole == TAOS_SYNC_ROLE_SLAVE) { + sInfo("vgId:%d, restart connection", pNode->vgId); + for (int32_t i = 0; i < pNode->replica; ++i) { + pPeer = pNode->peerInfo[i]; + syncRestartConnection(pPeer); + } } + return TSDB_CODE_SYN_INVALID_VERSION; } From d5cd0ae9cb3210f5a8fd5e5520c19cb873cbe220 Mon Sep 17 00:00:00 2001 From: Shengliang Guan Date: Fri, 13 Nov 2020 10:03:24 +0000 Subject: [PATCH 30/52] TD-1948 --- src/sync/src/syncMain.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/sync/src/syncMain.c b/src/sync/src/syncMain.c index adc420f376..44f66bc70c 100644 --- a/src/sync/src/syncMain.c +++ b/src/sync/src/syncMain.c @@ -576,6 +576,7 @@ static void syncChooseMaster(SSyncNode *pNode) { sInfo("vgId:%d, start to work as master", pNode->vgId); nodeRole = TAOS_SYNC_ROLE_MASTER; +#if 0 for (int32_t i = 0; i < pNode->replica; ++i) { pPeer = pNode->peerInfo[i]; if (pPeer->version == nodeVersion) { @@ -584,7 +585,7 @@ static void syncChooseMaster(SSyncNode *pNode) { sInfo("%s, it shall work as slave", pPeer->id); } } - +#endif syncResetFlowCtrl(pNode); (*pNode->notifyRole)(pNode->ahandle, nodeRole); } else { From 98837549f389798dcf9c8f96d22f9d08a0b33d97 Mon Sep 17 00:00:00 2001 From: Hongze Cheng Date: Fri, 13 Nov 2020 10:54:43 +0000 Subject: [PATCH 31/52] simple change --- src/tsdb/inc/tsdbMain.h | 13 ++++++ src/tsdb/src/tsdbMain.c | 74 --------------------------------- src/tsdb/src/tsdbMemTable.c | 83 +++++++++++++++++++++++++++++++++++++ 3 files changed, 96 insertions(+), 74 deletions(-) diff --git a/src/tsdb/inc/tsdbMain.h b/src/tsdb/inc/tsdbMain.h index 0962e7c2cb..6d5f106d20 100644 --- a/src/tsdb/inc/tsdbMain.h +++ b/src/tsdb/inc/tsdbMain.h @@ -208,6 +208,18 @@ typedef struct { } SFileGroupIter; // ------------------ tsdbMain.c +typedef struct { + int32_t totalLen; + int32_t len; + SDataRow row; +} SSubmitBlkIter; + +typedef struct { + int32_t totalLen; + int32_t len; + void * pMsg; +} SSubmitMsgIter; + typedef struct { int8_t state; @@ -430,6 +442,7 @@ void tsdbCloseBufPool(STsdbRepo* pRepo); SListNode* tsdbAllocBufBlockFromPool(STsdbRepo* pRepo); // ------------------ tsdbMemTable.c +int tsdbInsertDataToTable(STsdbRepo* pRepo, SSubmitBlk* pBlock, TSKEY now, int32_t* affectedrows); int tsdbUpdateRowInMem(STsdbRepo* pRepo, SDataRow row, STable* pTable); int tsdbRefMemTable(STsdbRepo* pRepo, SMemTable* pMemTable); int tsdbUnRefMemTable(STsdbRepo* pRepo, SMemTable* pMemTable); diff --git a/src/tsdb/src/tsdbMain.c b/src/tsdb/src/tsdbMain.c index 2ded3b668b..3bf5c830bd 100644 --- a/src/tsdb/src/tsdbMain.c +++ b/src/tsdb/src/tsdbMain.c @@ -32,18 +32,6 @@ #define TSDB_DEFAULT_COMPRESSION TWO_STAGE_COMP #define IS_VALID_COMPRESSION(compression) (((compression) >= NO_COMPRESSION) && ((compression) <= TWO_STAGE_COMP)) -typedef struct { - int32_t totalLen; - int32_t len; - SDataRow row; -} SSubmitBlkIter; - -typedef struct { - int32_t totalLen; - int32_t len; - void * pMsg; -} SSubmitMsgIter; - static int32_t tsdbCheckAndSetDefaultCfg(STsdbCfg *pCfg); static int32_t tsdbSetRepoEnv(char *rootDir, STsdbCfg *pCfg); static int32_t tsdbUnsetRepoEnv(char *rootDir); @@ -53,11 +41,8 @@ static char * tsdbGetCfgFname(char *rootDir); static STsdbRepo * tsdbNewRepo(char *rootDir, STsdbAppH *pAppH, STsdbCfg *pCfg); static void tsdbFreeRepo(STsdbRepo *pRepo); static int tsdbInitSubmitMsgIter(SSubmitMsg *pMsg, SSubmitMsgIter *pIter); -static int32_t tsdbInsertDataToTable(STsdbRepo *pRepo, SSubmitBlk *pBlock, TSKEY now, int32_t *affectedrows); static int tsdbGetSubmitMsgNext(SSubmitMsgIter *pIter, SSubmitBlk **pPBlock); -static SDataRow tsdbGetSubmitBlkNext(SSubmitBlkIter *pIter); static int tsdbRestoreInfo(STsdbRepo *pRepo); -static int tsdbInitSubmitBlkIter(SSubmitBlk *pBlock, SSubmitBlkIter *pIter); static void tsdbAlterCompression(STsdbRepo *pRepo, int8_t compression); static int tsdbAlterKeep(STsdbRepo *pRepo, int32_t keep); static int tsdbAlterCacheTotalBlocks(STsdbRepo *pRepo, int totalBlocks); @@ -748,43 +733,6 @@ static int tsdbInitSubmitMsgIter(SSubmitMsg *pMsg, SSubmitMsgIter *pIter) { return 0; } -static int32_t tsdbInsertDataToTable(STsdbRepo *pRepo, SSubmitBlk *pBlock, TSKEY now, int32_t *affectedrows) { - STsdbMeta *pMeta = pRepo->tsdbMeta; - int64_t points = 0; - - ASSERT(pBlock->tid < pMeta->maxTables); - STable *pTable = pMeta->tables[pBlock->tid]; - ASSERT(pTable != NULL && TABLE_UID(pTable) == pBlock->uid); - - SSubmitBlkIter blkIter = {0}; - SDataRow row = NULL; - - TSKEY minKey = now - tsMsPerDay[pRepo->config.precision] * pRepo->config.keep; - TSKEY maxKey = now + tsMsPerDay[pRepo->config.precision] * pRepo->config.daysPerFile; - - tsdbInitSubmitBlkIter(pBlock, &blkIter); - while ((row = tsdbGetSubmitBlkNext(&blkIter)) != NULL) { - if (dataRowKey(row) < minKey || dataRowKey(row) > maxKey) { - tsdbError("vgId:%d table %s tid %d uid %" PRIu64 " timestamp is out of range! now %" PRId64 " minKey %" PRId64 - " maxKey %" PRId64, - REPO_ID(pRepo), TABLE_CHAR_NAME(pTable), TABLE_TID(pTable), TABLE_UID(pTable), now, minKey, maxKey); - terrno = TSDB_CODE_TDB_TIMESTAMP_OUT_OF_RANGE; - return -1; - } - - if (tsdbUpdateRowInMem(pRepo, row, pTable) < 0) return -1; - - (*affectedrows)++; - points++; - } - - STSchema *pSchema = tsdbGetTableSchemaByVersion(pTable, pBlock->sversion); - pRepo->stat.pointsWritten += points * schemaNCols(pSchema); - pRepo->stat.totalStorage += points * schemaVLen(pSchema); - - return 0; -} - static int tsdbGetSubmitMsgNext(SSubmitMsgIter *pIter, SSubmitBlk **pPBlock) { if (pIter->len == 0) { pIter->len += TSDB_SUBMIT_MSG_HEAD_SIZE; @@ -804,20 +752,6 @@ static int tsdbGetSubmitMsgNext(SSubmitMsgIter *pIter, SSubmitBlk **pPBlock) { return 0; } -static SDataRow tsdbGetSubmitBlkNext(SSubmitBlkIter *pIter) { - SDataRow row = pIter->row; - if (row == NULL) return NULL; - - pIter->len += dataRowLen(row); - if (pIter->len >= pIter->totalLen) { - pIter->row = NULL; - } else { - pIter->row = (char *)row + dataRowLen(row); - } - - return row; -} - static int tsdbRestoreInfo(STsdbRepo *pRepo) { STsdbMeta * pMeta = pRepo->tsdbMeta; STsdbFileH *pFileH = pRepo->tsdbFileH; @@ -851,14 +785,6 @@ _err: return -1; } -static int tsdbInitSubmitBlkIter(SSubmitBlk *pBlock, SSubmitBlkIter *pIter) { - if (pBlock->dataLen <= 0) return -1; - pIter->totalLen = pBlock->dataLen; - pIter->len = 0; - pIter->row = (SDataRow)(pBlock->data+pBlock->schemaLen); - return 0; -} - static void tsdbAlterCompression(STsdbRepo *pRepo, int8_t compression) { int8_t ocompression = pRepo->config.compression; pRepo->config.compression = compression; diff --git a/src/tsdb/src/tsdbMemTable.c b/src/tsdb/src/tsdbMemTable.c index 021c152260..ff64496f27 100644 --- a/src/tsdb/src/tsdbMemTable.c +++ b/src/tsdb/src/tsdbMemTable.c @@ -32,8 +32,55 @@ static SCommitIter *tsdbCreateCommitIters(STsdbRepo *pRepo); static void tsdbDestroyCommitIters(SCommitIter *iters, int maxTables); static int tsdbAdjustMemMaxTables(SMemTable *pMemTable, int maxTables); static int tsdbAppendTableRowToCols(STable *pTable, SDataCols *pCols, STSchema **ppSchema, SDataRow row); +static int tsdbInitSubmitBlkIter(SSubmitBlk *pBlock, SSubmitBlkIter *pIter); +static SDataRow tsdbGetSubmitBlkNext(SSubmitBlkIter *pIter); + +static FORCE_INLINE int tsdbCheckRowRange(STsdbRepo *pRepo, STable *pTable, SDataRow row, TSKEY minKey, TSKEY maxKey, + TSKEY now); // ---------------- INTERNAL FUNCTIONS ---------------- +int tsdbInsertDataToTable(STsdbRepo *pRepo, SSubmitBlk *pBlock, TSKEY now, int32_t *affectedrows) { + STsdbMeta * pMeta = pRepo->tsdbMeta; + int64_t points = 0; + STable * pTable = NULL; + SSubmitBlkIter blkIter = {0}; + SDataRow row = NULL; + TSKEY minKey = 0; + TSKEY maxKey = 0; + void ** pData = NULL; + // int rowCounter = 0; + + ASSERT(pBlock->tid < pMeta->maxTables); + pTable = pMeta->tables[pBlock->tid]; + ASSERT(pTable != NULL && TABLE_UID(pTable) == pBlock->uid); + + minKey = now - tsMsPerDay[pRepo->config.precision] * pRepo->config.keep; + maxKey = now + tsMsPerDay[pRepo->config.precision] * pRepo->config.daysPerFile; + + pData = (void **)calloc(pBlock->numOfRows, sizeof(void *)); + if (pData == NULL) { + terrno = TSDB_CODE_TDB_OUT_OF_MEMORY; + return -1; + } + + tsdbInitSubmitBlkIter(pBlock, &blkIter); + + while ((row = tsdbGetSubmitBlkNext(&blkIter)) != NULL) { + if (tsdbCheckRowRange(pRepo, pTable, row, minKey, maxKey, now) < 0) return -1; + + if (tsdbUpdateRowInMem(pRepo, row, pTable) < 0) return -1; + + (*affectedrows)++; + points++; + } + + STSchema *pSchema = tsdbGetTableSchemaByVersion(pTable, pBlock->sversion); + pRepo->stat.pointsWritten += points * schemaNCols(pSchema); + pRepo->stat.totalStorage += points * schemaVLen(pSchema); + + return 0; +} + int tsdbUpdateRowInMem(STsdbRepo *pRepo, SDataRow row, STable *pTable) { STsdbCfg * pCfg = &pRepo->config; STsdbMeta * pMeta = pRepo->tsdbMeta; @@ -847,5 +894,41 @@ static int tsdbAppendTableRowToCols(STable *pTable, SDataCols *pCols, STSchema * tdAppendDataRowToDataCol(row, *ppSchema, pCols); } + return 0; +} + +static int tsdbInitSubmitBlkIter(SSubmitBlk *pBlock, SSubmitBlkIter *pIter) { + if (pBlock->dataLen <= 0) return -1; + pIter->totalLen = pBlock->dataLen; + pIter->len = 0; + pIter->row = (SDataRow)(pBlock->data+pBlock->schemaLen); + return 0; +} + +static SDataRow tsdbGetSubmitBlkNext(SSubmitBlkIter *pIter) { + SDataRow row = pIter->row; + if (row == NULL) return NULL; + + pIter->len += dataRowLen(row); + if (pIter->len >= pIter->totalLen) { + pIter->row = NULL; + } else { + pIter->row = (char *)row + dataRowLen(row); + } + + return row; +} + +static FORCE_INLINE int tsdbCheckRowRange(STsdbRepo *pRepo, STable *pTable, SDataRow row, TSKEY minKey, TSKEY maxKey, + TSKEY now) { + if (dataRowKey(row) < minKey || dataRowKey(row) > maxKey) { + tsdbError("vgId:%d table %s tid %d uid %" PRIu64 " timestamp is out of range! now %" PRId64 " minKey %" PRId64 + " maxKey %" PRId64 " row key %" PRId64, + REPO_ID(pRepo), TABLE_CHAR_NAME(pTable), TABLE_TID(pTable), TABLE_UID(pTable), now, minKey, maxKey, + dataRowKey(row)); + terrno = TSDB_CODE_TDB_TIMESTAMP_OUT_OF_RANGE; + return -1; + } + return 0; } \ No newline at end of file From 66f104bbfa09298ac1cf8e3f8517ee854141bdf6 Mon Sep 17 00:00:00 2001 From: Shengliang Guan Date: Fri, 13 Nov 2020 11:08:59 +0000 Subject: [PATCH 32/52] TD-1982 definite lost while forward msg --- src/dnode/src/dnodeVWrite.c | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/src/dnode/src/dnodeVWrite.c b/src/dnode/src/dnodeVWrite.c index bd807556e0..9d1d16e51e 100644 --- a/src/dnode/src/dnodeVWrite.c +++ b/src/dnode/src/dnodeVWrite.c @@ -225,11 +225,13 @@ static void *dnodeProcessVWriteQueue(void *param) { taosGetQitem(pWorker->qall, &qtype, (void **)&pWrite); if (qtype == TAOS_QTYPE_RPC) { dnodeSendRpcVWriteRsp(pVnode, pWrite, pWrite->code); - } else if (qtype == TAOS_QTYPE_FWD) { - vnodeConfirmForward(pVnode, pWrite->pHead->version, 0); - taosFreeQitem(pWrite); - vnodeRelease(pVnode); } else { + if (qtype == TAOS_QTYPE_FWD) { + vnodeConfirmForward(pVnode, pWrite->pHead->version, 0); + } + if (pWrite->rspRet.rsp) { + rpcFreeCont(pWrite->rspRet.rsp); + } taosFreeQitem(pWrite); vnodeRelease(pVnode); } From 98e3431eaf58fe1de7f5d989bc149501a5dc691c Mon Sep 17 00:00:00 2001 From: Jeff Tao Date: Fri, 13 Nov 2020 14:07:59 +0000 Subject: [PATCH 33/52] taosAcquireRef return a pointer, not an integer --- src/sync/src/syncMain.c | 2 +- src/util/tests/trefTest.c | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src/sync/src/syncMain.c b/src/sync/src/syncMain.c index 8d90315c48..4b51c770e7 100644 --- a/src/sync/src/syncMain.c +++ b/src/sync/src/syncMain.c @@ -1097,7 +1097,7 @@ static void syncProcessBrokenLink(void *param) { SSyncPeer *pPeer = param; SSyncNode *pNode = pPeer->pSyncNode; - if (taosAcquireRef(tsSyncRefId, pNode->rid) < 0) return; + if (taosAcquireRef(tsSyncRefId, pNode->rid) == NULL) return; pthread_mutex_lock(&(pNode->mutex)); sDebug("%s, TCP link is broken(%s)", pPeer->id, strerror(errno)); diff --git a/src/util/tests/trefTest.c b/src/util/tests/trefTest.c index 6887b24abd..454860410b 100644 --- a/src/util/tests/trefTest.c +++ b/src/util/tests/trefTest.c @@ -77,8 +77,8 @@ void *acquireRelease(void *param) { printf("a"); id = random() % pSpace->refNum; - code = taosAcquireRef(pSpace->rsetId, pSpace->p[id]); - if (code >= 0) { + void *p = taosAcquireRef(pSpace->rsetId, pSpace->p[id]); + if (p) { usleep(id % 5 + 1); taosReleaseRef(pSpace->rsetId, pSpace->p[id]); } From 4c56bf4e26ddc33a730b98c9743881b975a83d83 Mon Sep 17 00:00:00 2001 From: Shengliang Guan Date: Sat, 14 Nov 2020 10:09:04 +0800 Subject: [PATCH 34/52] TD-2087 --- src/vnode/src/vnodeMain.c | 15 +++++++++++++-- src/wal/src/walMgmt.c | 8 -------- src/wal/src/walWrite.c | 4 ++++ 3 files changed, 17 insertions(+), 10 deletions(-) diff --git a/src/vnode/src/vnodeMain.c b/src/vnode/src/vnodeMain.c index 021c392c52..3dd1bae37f 100644 --- a/src/vnode/src/vnodeMain.c +++ b/src/vnode/src/vnodeMain.c @@ -315,7 +315,13 @@ int32_t vnodeOpen(int32_t vnode, char *rootDir) { pVnode->version = walGetVersion(pVnode->wal); } - tsdbSyncCommit(pVnode->tsdb); + code = tsdbSyncCommit(pVnode->tsdb); + if (code != 0) { + vError("vgId:%d, failed to commit after restore from wal since %s", pVnode->vgId, tstrerror(code)); + vnodeCleanUp(pVnode); + return code; + } + walRemoveAllOldFiles(pVnode->wal); walRenew(pVnode->wal); @@ -412,6 +418,7 @@ void vnodeRelease(void *pVnodeRaw) { } if (pVnode->wal) { + walRemoveAllOldFiles(pVnode->wal); walClose(pVnode->wal); pVnode->wal = NULL; } @@ -589,7 +596,11 @@ static int vnodeProcessTsdbStatus(void *arg, int status) { if (status == TSDB_STATUS_COMMIT_START) { pVnode->fversion = pVnode->version; vDebug("vgId:%d, start commit, fver:%" PRIu64 " vver:%" PRIu64, pVnode->vgId, pVnode->fversion, pVnode->version); - return walRenew(pVnode->wal); + if (pVnode->status == TAOS_VN_STATUS_INIT) { + return 0; + } else { + return walRenew(pVnode->wal); + } } if (status == TSDB_STATUS_COMMIT_OVER) { diff --git a/src/wal/src/walMgmt.c b/src/wal/src/walMgmt.c index fb49f38217..36c190be3e 100644 --- a/src/wal/src/walMgmt.c +++ b/src/wal/src/walMgmt.c @@ -124,15 +124,7 @@ void walClose(void *handle) { SWal *pWal = handle; pthread_mutex_lock(&pWal->mutex); - taosClose(pWal->fd); - - if (pWal->keep != TAOS_WAL_KEEP) { - walRemoveAllOldFiles(pWal); - } else { - wDebug("vgId:%d, wal:%p file:%s, it is closed and kept", pWal->vgId, pWal, pWal->name); - } - pthread_mutex_unlock(&pWal->mutex); taosRemoveRef(tsWal.refId, pWal->rid); } diff --git a/src/wal/src/walWrite.c b/src/wal/src/walWrite.c index 48021eecfc..72464d4309 100644 --- a/src/wal/src/walWrite.c +++ b/src/wal/src/walWrite.c @@ -67,6 +67,7 @@ void walRemoveOneOldFile(void *handle) { SWal *pWal = handle; if (pWal == NULL) return; if (pWal->keep == TAOS_WAL_KEEP) return; + if (pWal->fd <= 0) return; pthread_mutex_lock(&pWal->mutex); @@ -91,6 +92,8 @@ void walRemoveAllOldFiles(void *handle) { SWal * pWal = handle; int64_t fileId = -1; + + pthread_mutex_lock(&pWal->mutex); while (walGetNextFile(pWal, &fileId) >= 0) { snprintf(pWal->name, sizeof(pWal->name), "%s/%s%" PRId64, pWal->path, WAL_PREFIX, fileId); @@ -100,6 +103,7 @@ void walRemoveAllOldFiles(void *handle) { wInfo("vgId:%d, wal:%p file:%s, it is removed", pWal->vgId, pWal, pWal->name); } } + pthread_mutex_unlock(&pWal->mutex); } int32_t walWrite(void *handle, SWalHead *pHead) { From 5a207951ec641212d08571bd52b49296aecb3d41 Mon Sep 17 00:00:00 2001 From: zyyang Date: Sat, 14 Nov 2020 10:33:46 +0800 Subject: [PATCH 35/52] change version --- cmake/version.inc | 2 +- snap/snapcraft.yaml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/cmake/version.inc b/cmake/version.inc index a248f76f48..741f76da43 100644 --- a/cmake/version.inc +++ b/cmake/version.inc @@ -4,7 +4,7 @@ PROJECT(TDengine) IF (DEFINED VERNUMBER) SET(TD_VER_NUMBER ${VERNUMBER}) ELSE () - SET(TD_VER_NUMBER "2.0.6.0") + SET(TD_VER_NUMBER "2.0.7.0") ENDIF () IF (DEFINED VERCOMPATIBLE) diff --git a/snap/snapcraft.yaml b/snap/snapcraft.yaml index 233b7a15b4..1738ff7ec8 100644 --- a/snap/snapcraft.yaml +++ b/snap/snapcraft.yaml @@ -1,6 +1,6 @@ name: tdengine base: core18 -version: '2.0.6.0' +version: '2.0.7.0' icon: snap/gui/t-dengine.svg summary: an open-source big data platform designed and optimized for IoT. description: | From 6b0b783aaff83027613ad8836703cd5137ec4b50 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Sat, 14 Nov 2020 11:06:42 +0800 Subject: [PATCH 36/52] [TD-225] --- src/client/src/tscUtil.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/client/src/tscUtil.c b/src/client/src/tscUtil.c index 4f04a82549..080ef9f2d2 100644 --- a/src/client/src/tscUtil.c +++ b/src/client/src/tscUtil.c @@ -71,7 +71,7 @@ void tsSetSTableQueryCond(STagCond* pTagCond, uint64_t uid, SBufferWriter* bw) { } bool tscQueryTags(SQueryInfo* pQueryInfo) { - int32_t numOfCols = (int32_t)(pQueryInfo); + int32_t numOfCols = (int32_t) tscSqlExprNumOfExprs(pQueryInfo); for (int32_t i = 0; i < numOfCols; ++i) { SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, i); int32_t functId = pExpr->functionId; From 21388a3b0f8a728060afa7d568f1bdfbef75e875 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Sat, 14 Nov 2020 11:50:50 +0800 Subject: [PATCH 37/52] [TD-225] --- src/client/src/tscServer.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/client/src/tscServer.c b/src/client/src/tscServer.c index 8549fa09bb..d8c920daa7 100644 --- a/src/client/src/tscServer.c +++ b/src/client/src/tscServer.c @@ -892,7 +892,7 @@ int tscBuildQueryMsg(SSqlObj *pSql, SSqlInfo *pInfo) { int32_t msgLen = (int32_t)(pMsg - pCmd->payload); - tscDebug("%p msg built success,len:%d bytes", pSql, msgLen); + tscDebug("%p msg built success, len:%d bytes", pSql, msgLen); pCmd->payloadLen = msgLen; pSql->cmd.msgType = TSDB_MSG_TYPE_QUERY; From c8832d65fbbc4736e46e7fc90a7a8470e8289aed Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Sat, 14 Nov 2020 13:09:38 +0800 Subject: [PATCH 38/52] [TD-225] --- src/inc/taosmsg.h | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/src/inc/taosmsg.h b/src/inc/taosmsg.h index 743616e08e..25b77d6845 100644 --- a/src/inc/taosmsg.h +++ b/src/inc/taosmsg.h @@ -481,15 +481,15 @@ typedef struct { int64_t limit; int64_t offset; uint32_t queryType; // denote another query process - int16_t numOfOutput; // final output columns numbers + int16_t numOfOutput; // final output columns numbers int16_t tagNameRelType; // relation of tag criteria and tbname criteria - int16_t fillType; // interpolate type - uint64_t fillVal; // default value array list - int32_t tsOffset; // offset value in current msg body, NOTE: ts list is compressed - int32_t tsLen; // total length of ts comp block - int32_t tsNumOfBlocks; // ts comp block numbers - int32_t tsOrder; // ts comp block order - int32_t numOfTags; // number of tags columns involved + int16_t fillType; // interpolate type + uint64_t fillVal; // default value array list + int32_t tsOffset; // offset value in current msg body, NOTE: ts list is compressed + int32_t tsLen; // total length of ts comp block + int32_t tsNumOfBlocks; // ts comp block numbers + int32_t tsOrder; // ts comp block order + int32_t numOfTags; // number of tags columns involved SColumnInfo colList[]; } SQueryTableMsg; From 03653225b4efadb5f74911a98e398ddbb2369437 Mon Sep 17 00:00:00 2001 From: liuyq-617 Date: Sat, 14 Nov 2020 15:03:24 +0800 Subject: [PATCH 39/52] [TD-1888]add test case --- tests/pytest/fulltest.sh | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tests/pytest/fulltest.sh b/tests/pytest/fulltest.sh index 15cadf38e7..3a9aa00617 100755 --- a/tests/pytest/fulltest.sh +++ b/tests/pytest/fulltest.sh @@ -155,6 +155,9 @@ python3 ./test.py -f query/queryCountCSVData.py python3 ./test.py -f query/natualInterval.py python3 ./test.py -f query/bug1471.py #python3 ./test.py -f query/dataLossTest.py +python3 ./test.py -f query/bug1874.py +python3 ./test.py -f query/bug1875.py +python3 ./test.py -f query/bug1876.py #stream python3 ./test.py -f stream/metric_1.py From a3580fd06fce6dd9a04efbac78001c4977f4a898 Mon Sep 17 00:00:00 2001 From: Hongze Cheng Date: Sat, 14 Nov 2020 08:51:21 +0000 Subject: [PATCH 40/52] implement TD-2088 --- src/tsdb/inc/tsdbMain.h | 2 - src/tsdb/src/tsdbMain.c | 202 --------------- src/tsdb/src/tsdbMemTable.c | 494 ++++++++++++++++++++++++++---------- src/util/inc/tskiplist.h | 1 + src/util/src/tskiplist.c | 168 +++++++++--- 5 files changed, 486 insertions(+), 381 deletions(-) diff --git a/src/tsdb/inc/tsdbMain.h b/src/tsdb/inc/tsdbMain.h index 6d5f106d20..a905f9803f 100644 --- a/src/tsdb/inc/tsdbMain.h +++ b/src/tsdb/inc/tsdbMain.h @@ -442,8 +442,6 @@ void tsdbCloseBufPool(STsdbRepo* pRepo); SListNode* tsdbAllocBufBlockFromPool(STsdbRepo* pRepo); // ------------------ tsdbMemTable.c -int tsdbInsertDataToTable(STsdbRepo* pRepo, SSubmitBlk* pBlock, TSKEY now, int32_t* affectedrows); -int tsdbUpdateRowInMem(STsdbRepo* pRepo, SDataRow row, STable* pTable); int tsdbRefMemTable(STsdbRepo* pRepo, SMemTable* pMemTable); int tsdbUnRefMemTable(STsdbRepo* pRepo, SMemTable* pMemTable); int tsdbTakeMemSnapshot(STsdbRepo* pRepo, SMemTable** pMem, SMemTable** pIMem); diff --git a/src/tsdb/src/tsdbMain.c b/src/tsdb/src/tsdbMain.c index 94830e6070..3a3a824afa 100644 --- a/src/tsdb/src/tsdbMain.c +++ b/src/tsdb/src/tsdbMain.c @@ -40,8 +40,6 @@ static int tsdbLoadConfig(char *rootDir, STsdbCfg *pCfg); static char * tsdbGetCfgFname(char *rootDir); static STsdbRepo * tsdbNewRepo(char *rootDir, STsdbAppH *pAppH, STsdbCfg *pCfg); static void tsdbFreeRepo(STsdbRepo *pRepo); -static int tsdbInitSubmitMsgIter(SSubmitMsg *pMsg, SSubmitMsgIter *pIter); -static int tsdbGetSubmitMsgNext(SSubmitMsgIter *pIter, SSubmitBlk **pPBlock); static int tsdbRestoreInfo(STsdbRepo *pRepo); static void tsdbAlterCompression(STsdbRepo *pRepo, int8_t compression); static int tsdbAlterKeep(STsdbRepo *pRepo, int32_t keep); @@ -49,8 +47,6 @@ static int tsdbAlterCacheTotalBlocks(STsdbRepo *pRepo, int totalBlocks); static int keyFGroupCompFunc(const void *key, const void *fgroup); static int tsdbEncodeCfg(void **buf, STsdbCfg *pCfg); static void * tsdbDecodeCfg(void *buf, STsdbCfg *pCfg); -static int tsdbCheckTableSchema(STsdbRepo *pRepo, SSubmitBlk *pBlock, STable *pTable); -static int tsdbScanAndConvertSubmitMsg(STsdbRepo *pRepo, SSubmitMsg *pMsg); static void tsdbStartStream(STsdbRepo *pRepo); static void tsdbStopStream(STsdbRepo *pRepo); @@ -162,40 +158,6 @@ void tsdbCloseRepo(TSDB_REPO_T *repo, int toCommit) { tsdbDebug("vgId:%d repository is closed", vgId); } -int32_t tsdbInsertData(TSDB_REPO_T *repo, SSubmitMsg *pMsg, SShellSubmitRspMsg *pRsp) { - STsdbRepo * pRepo = (STsdbRepo *)repo; - SSubmitMsgIter msgIter = {0}; - - if (tsdbScanAndConvertSubmitMsg(pRepo, pMsg) < 0) { - if (terrno != TSDB_CODE_TDB_TABLE_RECONFIGURE) { - tsdbError("vgId:%d failed to insert data since %s", REPO_ID(pRepo), tstrerror(terrno)); - } - return -1; - } - - if (tsdbInitSubmitMsgIter(pMsg, &msgIter) < 0) { - tsdbError("vgId:%d failed to insert data since %s", REPO_ID(pRepo), tstrerror(terrno)); - return -1; - } - - SSubmitBlk *pBlock = NULL; - int32_t affectedrows = 0; - - TSKEY now = taosGetTimestamp(pRepo->config.precision); - while (true) { - tsdbGetSubmitMsgNext(&msgIter, &pBlock); - if (pBlock == NULL) break; - if (tsdbInsertDataToTable(pRepo, pBlock, now, &affectedrows) < 0) { - return -1; - } - } - - if (pRsp != NULL) pRsp->affectedRows = htonl(affectedrows); - - if (tsdbCheckCommit(pRepo) < 0) return -1; - return 0; -} - uint32_t tsdbGetFileInfo(TSDB_REPO_T *repo, char *name, uint32_t *index, uint32_t eindex, int64_t *size) { STsdbRepo *pRepo = (STsdbRepo *)repo; // STsdbMeta *pMeta = pRepo->tsdbMeta; @@ -720,42 +682,6 @@ static void tsdbFreeRepo(STsdbRepo *pRepo) { } } -static int tsdbInitSubmitMsgIter(SSubmitMsg *pMsg, SSubmitMsgIter *pIter) { - if (pMsg == NULL) { - terrno = TSDB_CODE_TDB_SUBMIT_MSG_MSSED_UP; - return -1; - } - - pIter->totalLen = pMsg->length; - pIter->len = 0; - pIter->pMsg = pMsg; - if (pMsg->length <= TSDB_SUBMIT_MSG_HEAD_SIZE) { - terrno = TSDB_CODE_TDB_SUBMIT_MSG_MSSED_UP; - return -1; - } - - return 0; -} - -static int tsdbGetSubmitMsgNext(SSubmitMsgIter *pIter, SSubmitBlk **pPBlock) { - if (pIter->len == 0) { - pIter->len += TSDB_SUBMIT_MSG_HEAD_SIZE; - } else { - SSubmitBlk *pSubmitBlk = (SSubmitBlk *)POINTER_SHIFT(pIter->pMsg, pIter->len); - pIter->len += (sizeof(SSubmitBlk) + pSubmitBlk->dataLen + pSubmitBlk->schemaLen); - } - - if (pIter->len > pIter->totalLen) { - terrno = TSDB_CODE_TDB_SUBMIT_MSG_MSSED_UP; - *pPBlock = NULL; - return -1; - } - - *pPBlock = (pIter->len == pIter->totalLen) ? NULL : (SSubmitBlk *)POINTER_SHIFT(pIter->pMsg, pIter->len); - - return 0; -} - static int tsdbRestoreInfo(STsdbRepo *pRepo) { STsdbMeta * pMeta = pRepo->tsdbMeta; STsdbFileH *pFileH = pRepo->tsdbFileH; @@ -885,134 +811,6 @@ static void *tsdbDecodeCfg(void *buf, STsdbCfg *pCfg) { return buf; } -static int tsdbCheckTableSchema(STsdbRepo *pRepo, SSubmitBlk *pBlock, STable *pTable) { - ASSERT(pTable != NULL); - - STSchema *pSchema = tsdbGetTableSchemaImpl(pTable, false, false, -1); - int sversion = schemaVersion(pSchema); - - if (pBlock->sversion == sversion) { - return 0; - } else { - if (TABLE_TYPE(pTable) == TSDB_STREAM_TABLE) { // stream table is not allowed to change schema - terrno = TSDB_CODE_TDB_IVD_TB_SCHEMA_VERSION; - return -1; - } - } - - if (pBlock->sversion > sversion) { // may need to update table schema - if (pBlock->schemaLen > 0) { - tsdbDebug( - "vgId:%d table %s tid %d uid %" PRIu64 " schema version %d is out of data, client version %d, update...", - REPO_ID(pRepo), TABLE_CHAR_NAME(pTable), TABLE_TID(pTable), TABLE_UID(pTable), sversion, pBlock->sversion); - ASSERT(pBlock->schemaLen % sizeof(STColumn) == 0); - int numOfCols = pBlock->schemaLen / sizeof(STColumn); - STColumn *pTCol = (STColumn *)pBlock->data; - - STSchemaBuilder schemaBuilder = {0}; - if (tdInitTSchemaBuilder(&schemaBuilder, pBlock->sversion) < 0) { - terrno = TSDB_CODE_TDB_OUT_OF_MEMORY; - tsdbError("vgId:%d failed to update schema of table %s since %s", REPO_ID(pRepo), TABLE_CHAR_NAME(pTable), - tstrerror(terrno)); - return -1; - } - - for (int i = 0; i < numOfCols; i++) { - if (tdAddColToSchema(&schemaBuilder, pTCol[i].type, htons(pTCol[i].colId), htons(pTCol[i].bytes)) < 0) { - terrno = TSDB_CODE_TDB_OUT_OF_MEMORY; - tsdbError("vgId:%d failed to update schema of table %s since %s", REPO_ID(pRepo), TABLE_CHAR_NAME(pTable), - tstrerror(terrno)); - tdDestroyTSchemaBuilder(&schemaBuilder); - return -1; - } - } - - STSchema *pNSchema = tdGetSchemaFromBuilder(&schemaBuilder); - if (pNSchema == NULL) { - terrno = TSDB_CODE_TDB_OUT_OF_MEMORY; - tdDestroyTSchemaBuilder(&schemaBuilder); - return -1; - } - - tdDestroyTSchemaBuilder(&schemaBuilder); - tsdbUpdateTableSchema(pRepo, pTable, pNSchema, true); - } else { - tsdbDebug( - "vgId:%d table %s tid %d uid %" PRIu64 " schema version %d is out of data, client version %d, reconfigure...", - REPO_ID(pRepo), TABLE_CHAR_NAME(pTable), TABLE_TID(pTable), TABLE_UID(pTable), sversion, pBlock->sversion); - terrno = TSDB_CODE_TDB_TABLE_RECONFIGURE; - return -1; - } - } else { - ASSERT(pBlock->sversion >= 0); - if (tsdbGetTableSchemaImpl(pTable, false, false, pBlock->sversion) == NULL) { - tsdbError("vgId:%d invalid submit schema version %d to table %s tid %d from client", REPO_ID(pRepo), - pBlock->sversion, TABLE_CHAR_NAME(pTable), TABLE_TID(pTable)); - } - terrno = TSDB_CODE_TDB_IVD_TB_SCHEMA_VERSION; - return -1; - } - - return 0; -} - -static int tsdbScanAndConvertSubmitMsg(STsdbRepo *pRepo, SSubmitMsg *pMsg) { - ASSERT(pMsg != NULL); - STsdbMeta * pMeta = pRepo->tsdbMeta; - SSubmitMsgIter msgIter = {0}; - SSubmitBlk * pBlock = NULL; - - terrno = TSDB_CODE_SUCCESS; - pMsg->length = htonl(pMsg->length); - pMsg->numOfBlocks = htonl(pMsg->numOfBlocks); - - if (tsdbInitSubmitMsgIter(pMsg, &msgIter) < 0) return -1; - while (true) { - if (tsdbGetSubmitMsgNext(&msgIter, &pBlock) < 0) return -1; - if (pBlock == NULL) break; - - pBlock->uid = htobe64(pBlock->uid); - pBlock->tid = htonl(pBlock->tid); - pBlock->sversion = htonl(pBlock->sversion); - pBlock->dataLen = htonl(pBlock->dataLen); - pBlock->schemaLen = htonl(pBlock->schemaLen); - pBlock->numOfRows = htons(pBlock->numOfRows); - - if (pBlock->tid <= 0 || pBlock->tid >= pMeta->maxTables) { - tsdbError("vgId:%d failed to get table to insert data, uid %" PRIu64 " tid %d", REPO_ID(pRepo), pBlock->uid, - pBlock->tid); - terrno = TSDB_CODE_TDB_INVALID_TABLE_ID; - return -1; - } - - STable *pTable = pMeta->tables[pBlock->tid]; - if (pTable == NULL || TABLE_UID(pTable) != pBlock->uid) { - tsdbError("vgId:%d failed to get table to insert data, uid %" PRIu64 " tid %d", REPO_ID(pRepo), pBlock->uid, - pBlock->tid); - terrno = TSDB_CODE_TDB_INVALID_TABLE_ID; - return -1; - } - - if (TABLE_TYPE(pTable) == TSDB_SUPER_TABLE) { - tsdbError("vgId:%d invalid action trying to insert a super table %s", REPO_ID(pRepo), TABLE_CHAR_NAME(pTable)); - terrno = TSDB_CODE_TDB_INVALID_ACTION; - return -1; - } - - // Check schema version and update schema if needed - if (tsdbCheckTableSchema(pRepo, pBlock, pTable) < 0) { - if (terrno == TSDB_CODE_TDB_TABLE_RECONFIGURE) { - continue; - } else { - return -1; - } - } - } - - if (terrno != TSDB_CODE_SUCCESS) return -1; - return 0; -} - static int tsdbAlterCacheTotalBlocks(STsdbRepo *pRepo, int totalBlocks) { // TODO // STsdbCache *pCache = pRepo->tsdbCache; diff --git a/src/tsdb/src/tsdbMemTable.c b/src/tsdb/src/tsdbMemTable.c index ff64496f27..a790974810 100644 --- a/src/tsdb/src/tsdbMemTable.c +++ b/src/tsdb/src/tsdbMemTable.c @@ -34,148 +34,46 @@ static int tsdbAdjustMemMaxTables(SMemTable *pMemTable, int maxTables); static int tsdbAppendTableRowToCols(STable *pTable, SDataCols *pCols, STSchema **ppSchema, SDataRow row); static int tsdbInitSubmitBlkIter(SSubmitBlk *pBlock, SSubmitBlkIter *pIter); static SDataRow tsdbGetSubmitBlkNext(SSubmitBlkIter *pIter); +static int tsdbScanAndConvertSubmitMsg(STsdbRepo *pRepo, SSubmitMsg *pMsg); +static int tsdbInsertDataToTable(STsdbRepo *pRepo, SSubmitBlk *pBlock, int32_t *affectedrows); +static int tsdbCopyRowToMem(STsdbRepo *pRepo, SDataRow row, STable *pTable, void **ppRow); +static int tsdbInitSubmitMsgIter(SSubmitMsg *pMsg, SSubmitMsgIter *pIter); +static int tsdbGetSubmitMsgNext(SSubmitMsgIter *pIter, SSubmitBlk **pPBlock); +static int tsdbCheckTableSchema(STsdbRepo *pRepo, SSubmitBlk *pBlock, STable *pTable); +static int tsdbInsertDataToTableImpl(STsdbRepo *pRepo, STable *pTable, void **rows, int rowCounter); static FORCE_INLINE int tsdbCheckRowRange(STsdbRepo *pRepo, STable *pTable, SDataRow row, TSKEY minKey, TSKEY maxKey, TSKEY now); +int32_t tsdbInsertData(TSDB_REPO_T *repo, SSubmitMsg *pMsg, SShellSubmitRspMsg *pRsp) { + STsdbRepo * pRepo = (STsdbRepo *)repo; + SSubmitMsgIter msgIter = {0}; + SSubmitBlk * pBlock = NULL; + int32_t affectedrows = 0; + + if (tsdbScanAndConvertSubmitMsg(pRepo, pMsg) < 0) { + if (terrno != TSDB_CODE_TDB_TABLE_RECONFIGURE) { + tsdbError("vgId:%d failed to insert data since %s", REPO_ID(pRepo), tstrerror(terrno)); + } + return -1; + } + + tsdbInitSubmitMsgIter(pMsg, &msgIter); + while (true) { + tsdbGetSubmitMsgNext(&msgIter, &pBlock); + if (pBlock == NULL) break; + if (tsdbInsertDataToTable(pRepo, pBlock, &affectedrows) < 0) { + return -1; + } + } + + if (pRsp != NULL) pRsp->affectedRows = htonl(affectedrows); + + if (tsdbCheckCommit(pRepo) < 0) return -1; + return 0; +} + // ---------------- INTERNAL FUNCTIONS ---------------- -int tsdbInsertDataToTable(STsdbRepo *pRepo, SSubmitBlk *pBlock, TSKEY now, int32_t *affectedrows) { - STsdbMeta * pMeta = pRepo->tsdbMeta; - int64_t points = 0; - STable * pTable = NULL; - SSubmitBlkIter blkIter = {0}; - SDataRow row = NULL; - TSKEY minKey = 0; - TSKEY maxKey = 0; - void ** pData = NULL; - // int rowCounter = 0; - - ASSERT(pBlock->tid < pMeta->maxTables); - pTable = pMeta->tables[pBlock->tid]; - ASSERT(pTable != NULL && TABLE_UID(pTable) == pBlock->uid); - - minKey = now - tsMsPerDay[pRepo->config.precision] * pRepo->config.keep; - maxKey = now + tsMsPerDay[pRepo->config.precision] * pRepo->config.daysPerFile; - - pData = (void **)calloc(pBlock->numOfRows, sizeof(void *)); - if (pData == NULL) { - terrno = TSDB_CODE_TDB_OUT_OF_MEMORY; - return -1; - } - - tsdbInitSubmitBlkIter(pBlock, &blkIter); - - while ((row = tsdbGetSubmitBlkNext(&blkIter)) != NULL) { - if (tsdbCheckRowRange(pRepo, pTable, row, minKey, maxKey, now) < 0) return -1; - - if (tsdbUpdateRowInMem(pRepo, row, pTable) < 0) return -1; - - (*affectedrows)++; - points++; - } - - STSchema *pSchema = tsdbGetTableSchemaByVersion(pTable, pBlock->sversion); - pRepo->stat.pointsWritten += points * schemaNCols(pSchema); - pRepo->stat.totalStorage += points * schemaVLen(pSchema); - - return 0; -} - -int tsdbUpdateRowInMem(STsdbRepo *pRepo, SDataRow row, STable *pTable) { - STsdbCfg * pCfg = &pRepo->config; - STsdbMeta * pMeta = pRepo->tsdbMeta; - TKEY tkey = dataRowTKey(row); - TSKEY key = dataRowKey(row); - SMemTable * pMemTable = pRepo->mem; - STableData *pTableData = NULL; - bool isRowDelete = TKEY_IS_DELETED(tkey); - - if (isRowDelete) { - if (!pCfg->update) { - tsdbWarn("vgId:%d vnode is not allowed to update but try to delete a data row", REPO_ID(pRepo)); - terrno = TSDB_CODE_TDB_INVALID_ACTION; - return -1; - } - - if (key > TABLE_LASTKEY(pTable)) { - tsdbTrace("vgId:%d skip to delete row key %" PRId64 " which is larger than table lastKey %" PRId64, - REPO_ID(pRepo), key, TABLE_LASTKEY(pTable)); - return 0; - } - } - - void *pRow = tsdbAllocBytes(pRepo, dataRowLen(row)); - if (pRow == NULL) { - tsdbError("vgId:%d failed to insert row with key %" PRId64 " to table %s while allocate %d bytes since %s", - REPO_ID(pRepo), key, TABLE_CHAR_NAME(pTable), dataRowLen(row), tstrerror(terrno)); - return -1; - } - - dataRowCpy(pRow, row); - - // Operations above may change pRepo->mem, retake those values - ASSERT(pRepo->mem != NULL); - pMemTable = pRepo->mem; - - if (TABLE_TID(pTable) >= pMemTable->maxTables) { - if (tsdbAdjustMemMaxTables(pMemTable, pMeta->maxTables) < 0) { - tsdbFreeBytes(pRepo, pRow, dataRowLen(row)); - return -1; - } - } - pTableData = pMemTable->tData[TABLE_TID(pTable)]; - - if (pTableData == NULL || pTableData->uid != TABLE_UID(pTable)) { - if (pTableData != NULL) { - taosWLockLatch(&(pMemTable->latch)); - pMemTable->tData[TABLE_TID(pTable)] = NULL; - tsdbFreeTableData(pTableData); - taosWUnLockLatch(&(pMemTable->latch)); - } - - pTableData = tsdbNewTableData(pCfg, pTable); - if (pTableData == NULL) { - tsdbError("vgId:%d failed to insert row with key %" PRId64 - " to table %s while create new table data object since %s", - REPO_ID(pRepo), key, TABLE_CHAR_NAME(pTable), tstrerror(terrno)); - tsdbFreeBytes(pRepo, (void *)pRow, dataRowLen(row)); - return -1; - } - - pRepo->mem->tData[TABLE_TID(pTable)] = pTableData; - } - - ASSERT((pTableData != NULL) && pTableData->uid == TABLE_UID(pTable)); - - int64_t oldSize = SL_SIZE(pTableData->pData); - if (tSkipListPut(pTableData->pData, pRow) == NULL) { - tsdbFreeBytes(pRepo, (void *)pRow, dataRowLen(row)); - } else { - int64_t deltaSize = SL_SIZE(pTableData->pData) - oldSize; - if (isRowDelete) { - if (TABLE_LASTKEY(pTable) == key) { - // TODO: need to update table last key here (may from file) - } - } else { - if (TABLE_LASTKEY(pTable) < key) TABLE_LASTKEY(pTable) = key; - } - - if (pMemTable->keyFirst > key) pMemTable->keyFirst = key; - if (pMemTable->keyLast < key) pMemTable->keyLast = key; - pMemTable->numOfRows += deltaSize; - - if (pTableData->keyFirst > key) pTableData->keyFirst = key; - if (pTableData->keyLast < key) pTableData->keyLast = key; - pTableData->numOfRows += deltaSize; - } - - tsdbTrace("vgId:%d a row is %s table %s tid %d uid %" PRIu64 " key %" PRIu64, REPO_ID(pRepo), - isRowDelete ? "deleted from" : "updated in", TABLE_CHAR_NAME(pTable), TABLE_TID(pTable), TABLE_UID(pTable), - key); - - return 0; -} - int tsdbRefMemTable(STsdbRepo *pRepo, SMemTable *pMemTable) { if (pMemTable == NULL) return 0; int ref = T_REF_INC(pMemTable); @@ -930,5 +828,327 @@ static FORCE_INLINE int tsdbCheckRowRange(STsdbRepo *pRepo, STable *pTable, SDat return -1; } + return 0; +} + +static int tsdbScanAndConvertSubmitMsg(STsdbRepo *pRepo, SSubmitMsg *pMsg) { + ASSERT(pMsg != NULL); + STsdbMeta * pMeta = pRepo->tsdbMeta; + SSubmitMsgIter msgIter = {0}; + SSubmitBlk * pBlock = NULL; + SSubmitBlkIter blkIter = {0}; + SDataRow row = NULL; + TSKEY now = taosGetTimestamp(pRepo->config.precision); + TSKEY minKey = now - tsMsPerDay[pRepo->config.precision] * pRepo->config.keep; + TSKEY maxKey = now + tsMsPerDay[pRepo->config.precision] * pRepo->config.daysPerFile; + + terrno = TSDB_CODE_SUCCESS; + pMsg->length = htonl(pMsg->length); + pMsg->numOfBlocks = htonl(pMsg->numOfBlocks); + + if (tsdbInitSubmitMsgIter(pMsg, &msgIter) < 0) return -1; + while (true) { + if (tsdbGetSubmitMsgNext(&msgIter, &pBlock) < 0) return -1; + if (pBlock == NULL) break; + + pBlock->uid = htobe64(pBlock->uid); + pBlock->tid = htonl(pBlock->tid); + pBlock->sversion = htonl(pBlock->sversion); + pBlock->dataLen = htonl(pBlock->dataLen); + pBlock->schemaLen = htonl(pBlock->schemaLen); + pBlock->numOfRows = htons(pBlock->numOfRows); + + if (pBlock->tid <= 0 || pBlock->tid >= pMeta->maxTables) { + tsdbError("vgId:%d failed to get table to insert data, uid %" PRIu64 " tid %d", REPO_ID(pRepo), pBlock->uid, + pBlock->tid); + terrno = TSDB_CODE_TDB_INVALID_TABLE_ID; + return -1; + } + + STable *pTable = pMeta->tables[pBlock->tid]; + if (pTable == NULL || TABLE_UID(pTable) != pBlock->uid) { + tsdbError("vgId:%d failed to get table to insert data, uid %" PRIu64 " tid %d", REPO_ID(pRepo), pBlock->uid, + pBlock->tid); + terrno = TSDB_CODE_TDB_INVALID_TABLE_ID; + return -1; + } + + if (TABLE_TYPE(pTable) == TSDB_SUPER_TABLE) { + tsdbError("vgId:%d invalid action trying to insert a super table %s", REPO_ID(pRepo), TABLE_CHAR_NAME(pTable)); + terrno = TSDB_CODE_TDB_INVALID_ACTION; + return -1; + } + + // Check schema version and update schema if needed + if (tsdbCheckTableSchema(pRepo, pBlock, pTable) < 0) { + if (terrno == TSDB_CODE_TDB_TABLE_RECONFIGURE) { + continue; + } else { + return -1; + } + } + + tsdbInitSubmitBlkIter(pBlock, &blkIter); + while ((row = tsdbGetSubmitBlkNext(&blkIter)) != NULL) { + if (tsdbCheckRowRange(pRepo, pTable, row, minKey, maxKey, now) < 0) { + return -1; + } + } + } + + if (terrno != TSDB_CODE_SUCCESS) return -1; + return 0; +} + +static int tsdbInsertDataToTable(STsdbRepo *pRepo, SSubmitBlk *pBlock, int32_t *affectedrows) { + STsdbMeta * pMeta = pRepo->tsdbMeta; + int64_t points = 0; + STable * pTable = NULL; + SSubmitBlkIter blkIter = {0}; + SDataRow row = NULL; + void ** rows = NULL; + int rowCounter = 0; + + ASSERT(pBlock->tid < pMeta->maxTables); + pTable = pMeta->tables[pBlock->tid]; + ASSERT(pTable != NULL && TABLE_UID(pTable) == pBlock->uid); + + rows = (void **)calloc(pBlock->numOfRows, sizeof(void *)); + if (rows == NULL) { + terrno = TSDB_CODE_TDB_OUT_OF_MEMORY; + return -1; + } + + tsdbInitSubmitBlkIter(pBlock, &blkIter); + while ((row = tsdbGetSubmitBlkNext(&blkIter)) != NULL) { + if (tsdbCopyRowToMem(pRepo, row, pTable, &(rows[rowCounter])) < 0) { + free(rows); + return -1; + } + + (*affectedrows)++; + points++; + + if (rows[rowCounter] != NULL) { + rowCounter++; + } + } + + if (tsdbInsertDataToTableImpl(pRepo, pTable, rows, rowCounter) < 0) { + free(rows); + return -1; + } + + STSchema *pSchema = tsdbGetTableSchemaByVersion(pTable, pBlock->sversion); + pRepo->stat.pointsWritten += points * schemaNCols(pSchema); + pRepo->stat.totalStorage += points * schemaVLen(pSchema); + + free(rows); + return 0; +} + +static int tsdbCopyRowToMem(STsdbRepo *pRepo, SDataRow row, STable *pTable, void **ppRow) { + STsdbCfg * pCfg = &pRepo->config; + TKEY tkey = dataRowTKey(row); + TSKEY key = dataRowKey(row); + bool isRowDelete = TKEY_IS_DELETED(tkey); + + if (isRowDelete) { + if (!pCfg->update) { + tsdbWarn("vgId:%d vnode is not allowed to update but try to delete a data row", REPO_ID(pRepo)); + terrno = TSDB_CODE_TDB_INVALID_ACTION; + return -1; + } + + if (key > TABLE_LASTKEY(pTable)) { + tsdbTrace("vgId:%d skip to delete row key %" PRId64 " which is larger than table lastKey %" PRId64, + REPO_ID(pRepo), key, TABLE_LASTKEY(pTable)); + return 0; + } + } + + void *pRow = tsdbAllocBytes(pRepo, dataRowLen(row)); + if (pRow == NULL) { + tsdbError("vgId:%d failed to insert row with key %" PRId64 " to table %s while allocate %d bytes since %s", + REPO_ID(pRepo), key, TABLE_CHAR_NAME(pTable), dataRowLen(row), tstrerror(terrno)); + return -1; + } + + dataRowCpy(pRow, row); + ppRow[0] = pRow; + + tsdbTrace("vgId:%d a row is %s table %s tid %d uid %" PRIu64 " key %" PRIu64, REPO_ID(pRepo), + isRowDelete ? "deleted from" : "updated in", TABLE_CHAR_NAME(pTable), TABLE_TID(pTable), TABLE_UID(pTable), + key); + + return 0; +} + +static int tsdbInitSubmitMsgIter(SSubmitMsg *pMsg, SSubmitMsgIter *pIter) { + if (pMsg == NULL) { + terrno = TSDB_CODE_TDB_SUBMIT_MSG_MSSED_UP; + return -1; + } + + pIter->totalLen = pMsg->length; + pIter->len = 0; + pIter->pMsg = pMsg; + if (pMsg->length <= TSDB_SUBMIT_MSG_HEAD_SIZE) { + terrno = TSDB_CODE_TDB_SUBMIT_MSG_MSSED_UP; + return -1; + } + + return 0; +} + +static int tsdbGetSubmitMsgNext(SSubmitMsgIter *pIter, SSubmitBlk **pPBlock) { + if (pIter->len == 0) { + pIter->len += TSDB_SUBMIT_MSG_HEAD_SIZE; + } else { + SSubmitBlk *pSubmitBlk = (SSubmitBlk *)POINTER_SHIFT(pIter->pMsg, pIter->len); + pIter->len += (sizeof(SSubmitBlk) + pSubmitBlk->dataLen + pSubmitBlk->schemaLen); + } + + if (pIter->len > pIter->totalLen) { + terrno = TSDB_CODE_TDB_SUBMIT_MSG_MSSED_UP; + *pPBlock = NULL; + return -1; + } + + *pPBlock = (pIter->len == pIter->totalLen) ? NULL : (SSubmitBlk *)POINTER_SHIFT(pIter->pMsg, pIter->len); + + return 0; +} + +static int tsdbCheckTableSchema(STsdbRepo *pRepo, SSubmitBlk *pBlock, STable *pTable) { + ASSERT(pTable != NULL); + + STSchema *pSchema = tsdbGetTableSchemaImpl(pTable, false, false, -1); + int sversion = schemaVersion(pSchema); + + if (pBlock->sversion == sversion) { + return 0; + } else { + if (TABLE_TYPE(pTable) == TSDB_STREAM_TABLE) { // stream table is not allowed to change schema + terrno = TSDB_CODE_TDB_IVD_TB_SCHEMA_VERSION; + return -1; + } + } + + if (pBlock->sversion > sversion) { // may need to update table schema + if (pBlock->schemaLen > 0) { + tsdbDebug( + "vgId:%d table %s tid %d uid %" PRIu64 " schema version %d is out of data, client version %d, update...", + REPO_ID(pRepo), TABLE_CHAR_NAME(pTable), TABLE_TID(pTable), TABLE_UID(pTable), sversion, pBlock->sversion); + ASSERT(pBlock->schemaLen % sizeof(STColumn) == 0); + int numOfCols = pBlock->schemaLen / sizeof(STColumn); + STColumn *pTCol = (STColumn *)pBlock->data; + + STSchemaBuilder schemaBuilder = {0}; + if (tdInitTSchemaBuilder(&schemaBuilder, pBlock->sversion) < 0) { + terrno = TSDB_CODE_TDB_OUT_OF_MEMORY; + tsdbError("vgId:%d failed to update schema of table %s since %s", REPO_ID(pRepo), TABLE_CHAR_NAME(pTable), + tstrerror(terrno)); + return -1; + } + + for (int i = 0; i < numOfCols; i++) { + if (tdAddColToSchema(&schemaBuilder, pTCol[i].type, htons(pTCol[i].colId), htons(pTCol[i].bytes)) < 0) { + terrno = TSDB_CODE_TDB_OUT_OF_MEMORY; + tsdbError("vgId:%d failed to update schema of table %s since %s", REPO_ID(pRepo), TABLE_CHAR_NAME(pTable), + tstrerror(terrno)); + tdDestroyTSchemaBuilder(&schemaBuilder); + return -1; + } + } + + STSchema *pNSchema = tdGetSchemaFromBuilder(&schemaBuilder); + if (pNSchema == NULL) { + terrno = TSDB_CODE_TDB_OUT_OF_MEMORY; + tdDestroyTSchemaBuilder(&schemaBuilder); + return -1; + } + + tdDestroyTSchemaBuilder(&schemaBuilder); + tsdbUpdateTableSchema(pRepo, pTable, pNSchema, true); + } else { + tsdbDebug( + "vgId:%d table %s tid %d uid %" PRIu64 " schema version %d is out of data, client version %d, reconfigure...", + REPO_ID(pRepo), TABLE_CHAR_NAME(pTable), TABLE_TID(pTable), TABLE_UID(pTable), sversion, pBlock->sversion); + terrno = TSDB_CODE_TDB_TABLE_RECONFIGURE; + return -1; + } + } else { + ASSERT(pBlock->sversion >= 0); + if (tsdbGetTableSchemaImpl(pTable, false, false, pBlock->sversion) == NULL) { + tsdbError("vgId:%d invalid submit schema version %d to table %s tid %d from client", REPO_ID(pRepo), + pBlock->sversion, TABLE_CHAR_NAME(pTable), TABLE_TID(pTable)); + } + terrno = TSDB_CODE_TDB_IVD_TB_SCHEMA_VERSION; + return -1; + } + + return 0; +} + +static int tsdbInsertDataToTableImpl(STsdbRepo *pRepo, STable *pTable, void **rows, int rowCounter) { + if (rowCounter < 1) return 0; + + SMemTable * pMemTable = NULL; + STableData *pTableData = NULL; + STsdbMeta * pMeta = pRepo->tsdbMeta; + STsdbCfg * pCfg = &(pRepo->config); + + ASSERT(pRepo->mem != NULL); + pMemTable = pRepo->mem; + + if (TABLE_TID(pTable) >= pMemTable->maxTables) { + if (tsdbAdjustMemMaxTables(pMemTable, pMeta->maxTables) < 0) { + for (int i = rowCounter - 1; i >= 0; i--) { + tsdbFreeBytes(pRepo, rows[i], dataRowLen(rows[i])); + } + return -1; + } + } + pTableData = pMemTable->tData[TABLE_TID(pTable)]; + + if (pTableData == NULL || pTableData->uid != TABLE_UID(pTable)) { + if (pTableData != NULL) { + taosWLockLatch(&(pMemTable->latch)); + pMemTable->tData[TABLE_TID(pTable)] = NULL; + tsdbFreeTableData(pTableData); + taosWUnLockLatch(&(pMemTable->latch)); + } + + pTableData = tsdbNewTableData(pCfg, pTable); + if (pTableData == NULL) { + tsdbError("vgId:%d failed to insert data to table %s uid %" PRId64 " tid %d since %s", REPO_ID(pRepo), + TABLE_CHAR_NAME(pTable), TABLE_UID(pTable), TABLE_TID(pTable), tstrerror(terrno)); + for (int i = rowCounter - 1; i >= 0; i--) { + tsdbFreeBytes(pRepo, rows[i], dataRowLen(rows[i])); + } + return -1; + } + + pRepo->mem->tData[TABLE_TID(pTable)] = pTableData; + } + + ASSERT((pTableData != NULL) && pTableData->uid == TABLE_UID(pTable)); + + int64_t osize = SL_SIZE(pTableData->pData); + tSkipListPutBatch(pTableData->pData, rows, rowCounter); + int64_t dsize = SL_SIZE(pTableData->pData) - osize; + + if (pMemTable->keyFirst > dataRowKey(rows[0])) pMemTable->keyFirst = dataRowKey(rows[0]); + if (pMemTable->keyLast < dataRowKey(rows[rowCounter - 1])) pMemTable->keyLast = dataRowKey(rows[rowCounter - 1]); + pMemTable->numOfRows += dsize; + + if (pTableData->keyFirst > dataRowKey(rows[0])) pTableData->keyFirst = dataRowKey(rows[0]); + if (pTableData->keyLast < dataRowKey(rows[rowCounter - 1])) pTableData->keyLast = dataRowKey(rows[rowCounter - 1]); + pTableData->numOfRows += dsize; + + // TODO: impl delete row thing + if (TABLE_LASTKEY(pTable) < dataRowKey(rows[rowCounter-1])) TABLE_LASTKEY(pTable) = dataRowKey(rows[rowCounter-1]); + return 0; } \ No newline at end of file diff --git a/src/util/inc/tskiplist.h b/src/util/inc/tskiplist.h index 65b6482520..2c4d1a86ef 100644 --- a/src/util/inc/tskiplist.h +++ b/src/util/inc/tskiplist.h @@ -131,6 +131,7 @@ SSkipList *tSkipListCreate(uint8_t maxLevel, uint8_t keyType, uint16_t keyLen, _ __sl_key_fn_t fn); void tSkipListDestroy(SSkipList *pSkipList); SSkipListNode * tSkipListPut(SSkipList *pSkipList, void *pData); +void tSkipListPutBatch(SSkipList *pSkipList, void **ppData, int ndata); SArray * tSkipListGet(SSkipList *pSkipList, SSkipListKey pKey); void tSkipListPrint(SSkipList *pSkipList, int16_t nlevel); SSkipListIterator *tSkipListCreateIter(SSkipList *pSkipList); diff --git a/src/util/src/tskiplist.c b/src/util/src/tskiplist.c index 5ecc8ce119..06d9287eaf 100644 --- a/src/util/src/tskiplist.c +++ b/src/util/src/tskiplist.c @@ -24,10 +24,12 @@ static SSkipListNode * getPriorNode(SSkipList *pSkipList, const char *val, in static void tSkipListRemoveNodeImpl(SSkipList *pSkipList, SSkipListNode *pNode); static void tSkipListCorrectLevel(SSkipList *pSkipList); static SSkipListIterator *doCreateSkipListIterator(SSkipList *pSkipList, int32_t order); -static void tSkipListDoInsert(SSkipList *pSkipList, SSkipListNode **backward, SSkipListNode *pNode); -static bool tSkipListGetPosToPut(SSkipList *pSkipList, SSkipListNode **backward, void *pData); -static SSkipListNode * tSkipListNewNode(uint8_t level); +static void tSkipListDoInsert(SSkipList *pSkipList, SSkipListNode **direction, SSkipListNode *pNode, bool isForward); +static bool tSkipListGetPosToPut(SSkipList *pSkipList, SSkipListNode **backward, void *pData); +static SSkipListNode *tSkipListNewNode(uint8_t level); #define tSkipListFreeNode(n) tfree((n)) +static SSkipListNode *tSkipListPutImpl(SSkipList *pSkipList, void *pData, SSkipListNode **direction, bool isForward, + bool hasDup); static FORCE_INLINE int tSkipListWLock(SSkipList *pSkipList); static FORCE_INLINE int tSkipListRLock(SSkipList *pSkipList); @@ -109,32 +111,87 @@ SSkipListNode *tSkipListPut(SSkipList *pSkipList, void *pData) { if (pSkipList == NULL || pData == NULL) return NULL; SSkipListNode *backward[MAX_SKIP_LIST_LEVEL] = {0}; - uint8_t dupMode = SL_DUP_MODE(pSkipList); SSkipListNode *pNode = NULL; tSkipListWLock(pSkipList); bool hasDup = tSkipListGetPosToPut(pSkipList, backward, pData); - - if (hasDup && (dupMode == SL_DISCARD_DUP_KEY || dupMode == SL_UPDATE_DUP_KEY)) { - if (dupMode == SL_UPDATE_DUP_KEY) { - pNode = SL_NODE_GET_BACKWARD_POINTER(backward[0], 0); - atomic_store_ptr(&(pNode->pData), pData); - } - } else { - pNode = tSkipListNewNode(getSkipListRandLevel(pSkipList)); - if (pNode != NULL) { - pNode->pData = pData; - - tSkipListDoInsert(pSkipList, backward, pNode); - } - } + pNode = tSkipListPutImpl(pSkipList, pData, backward, false, hasDup); tSkipListUnlock(pSkipList); return pNode; } +// Put a batch of data into skiplist. The batch of data must be in ascending order +void tSkipListPutBatch(SSkipList *pSkipList, void **ppData, int ndata) { + SSkipListNode *backward[MAX_SKIP_LIST_LEVEL] = {0}; + SSkipListNode *forward[MAX_SKIP_LIST_LEVEL] = {0}; + bool hasDup = false; + char * pKey = NULL; + char * pDataKey = NULL; + int compare = 0; + + tSkipListWLock(pSkipList); + + // backward to put the first data + hasDup = tSkipListGetPosToPut(pSkipList, backward, ppData[0]); + tSkipListPutImpl(pSkipList, ppData[0], backward, false, hasDup); + + for (int level = 0; level < pSkipList->maxLevel - 1; level++) { + forward[level] = SL_NODE_GET_BACKWARD_POINTER(backward[level], level); + } + + // forward to put the rest of data + for (int idata = 1; idata < ndata; idata++) { + pDataKey = pSkipList->keyFn(ppData[idata]); + + // Compare max key + pKey = SL_GET_MAX_KEY(pSkipList); + compare = pSkipList->comparFn(pDataKey, pKey); + if (compare > 0) { + for (int i = 0; i < pSkipList->maxLevel; i++) { + forward[i] = SL_NODE_GET_BACKWARD_POINTER(pSkipList->pTail, i); + } + + hasDup = false; + } else { + SSkipListNode *px = pSkipList->pHead; + for (int i = pSkipList->maxLevel - 1; i >= 0; --i) { + if (i < pSkipList->level) { + // set new px + if (forward[i] != pSkipList->pHead) { + if (px == pSkipList->pHead || + pSkipList->comparFn(SL_GET_NODE_KEY(pSkipList, forward[i]), SL_GET_NODE_KEY(pSkipList, px)) > 0) { + px = forward[i]; + } + } + + SSkipListNode *p = SL_NODE_GET_FORWARD_POINTER(px, i); + while (p != pSkipList->pTail) { + pKey = SL_GET_NODE_KEY(pSkipList, p); + + compare = pSkipList->comparFn(pKey, pDataKey); + if (compare >= 0) { + if (compare == 0) hasDup = true; + break; + } else { + px = p; + p = SL_NODE_GET_FORWARD_POINTER(px, i); + } + } + } + + forward[i] = px; + } + } + + tSkipListPutImpl(pSkipList, ppData[idata], forward, true, hasDup); + } + + tSkipListUnlock(pSkipList); +} + uint32_t tSkipListRemove(SSkipList *pSkipList, SSkipListKey key) { uint32_t count = 0; @@ -310,22 +367,25 @@ void tSkipListPrint(SSkipList *pSkipList, int16_t nlevel) { } } -static void tSkipListDoInsert(SSkipList *pSkipList, SSkipListNode **backward, SSkipListNode *pNode) { +static void tSkipListDoInsert(SSkipList *pSkipList, SSkipListNode **direction, SSkipListNode *pNode, bool isForward) { for (int32_t i = 0; i < pNode->level; ++i) { - if (i >= pSkipList->level) { - SL_NODE_GET_FORWARD_POINTER(pNode, i) = pSkipList->pTail; - SL_NODE_GET_BACKWARD_POINTER(pNode, i) = pSkipList->pHead; - SL_NODE_GET_FORWARD_POINTER(pSkipList->pHead, i) = pNode; - SL_NODE_GET_BACKWARD_POINTER(pSkipList->pTail, i) = pNode; + SSkipListNode *x = direction[i]; + if (isForward) { + SL_NODE_GET_BACKWARD_POINTER(pNode, i) = x; + + SSkipListNode *next = SL_NODE_GET_FORWARD_POINTER(x, i); + SL_NODE_GET_BACKWARD_POINTER(next, i) = pNode; + + SL_NODE_GET_FORWARD_POINTER(pNode, i) = next; + SL_NODE_GET_FORWARD_POINTER(x, i) = pNode; } else { - SSkipListNode *x = backward[i]; SL_NODE_GET_FORWARD_POINTER(pNode, i) = x; SSkipListNode *prev = SL_NODE_GET_BACKWARD_POINTER(x, i); SL_NODE_GET_FORWARD_POINTER(prev, i) = pNode; - SL_NODE_GET_BACKWARD_POINTER(x, i) = pNode; SL_NODE_GET_BACKWARD_POINTER(pNode, i) = prev; + SL_NODE_GET_BACKWARD_POINTER(x, i) = pNode; } } @@ -377,7 +437,7 @@ static bool tSkipListGetPosToPut(SSkipList *pSkipList, SSkipListNode **backward, char * pDataKey = pSkipList->keyFn(pData); if (pSkipList->size == 0) { - for (int i = 0; i < pSkipList->level; i++) { + for (int i = 0; i < pSkipList->maxLevel; i++) { backward[i] = pSkipList->pTail; } } else { @@ -387,7 +447,7 @@ static bool tSkipListGetPosToPut(SSkipList *pSkipList, SSkipListNode **backward, pKey = SL_GET_MAX_KEY(pSkipList); compare = pSkipList->comparFn(pDataKey, pKey); if (compare >= 0) { - for (int i = 0; i < pSkipList->level; i++) { + for (int i = 0; i < pSkipList->maxLevel; i++) { backward[i] = pSkipList->pTail; } @@ -398,7 +458,7 @@ static bool tSkipListGetPosToPut(SSkipList *pSkipList, SSkipListNode **backward, pKey = SL_GET_MIN_KEY(pSkipList); compare = pSkipList->comparFn(pDataKey, pKey); if (compare < 0) { - for (int i = 0; i < pSkipList->level; i++) { + for (int i = 0; i < pSkipList->maxLevel; i++) { backward[i] = SL_NODE_GET_FORWARD_POINTER(pSkipList->pHead, i); } @@ -406,18 +466,20 @@ static bool tSkipListGetPosToPut(SSkipList *pSkipList, SSkipListNode **backward, } SSkipListNode *px = pSkipList->pTail; - for (int i = pSkipList->level - 1; i >= 0; --i) { - SSkipListNode *p = SL_NODE_GET_BACKWARD_POINTER(px, i); - while (p != pSkipList->pHead) { - pKey = SL_GET_NODE_KEY(pSkipList, p); + for (int i = pSkipList->maxLevel - 1; i >= 0; --i) { + if (i < pSkipList->level) { + SSkipListNode *p = SL_NODE_GET_BACKWARD_POINTER(px, i); + while (p != pSkipList->pHead) { + pKey = SL_GET_NODE_KEY(pSkipList, p); - compare = pSkipList->comparFn(pKey, pDataKey); - if (compare <= 0) { - if (compare == 0 && !hasDupKey) hasDupKey = true; - break; - } else { - px = p; - p = SL_NODE_GET_BACKWARD_POINTER(px, i); + compare = pSkipList->comparFn(pKey, pDataKey); + if (compare <= 0) { + if (compare == 0 && !hasDupKey) hasDupKey = true; + break; + } else { + px = p; + p = SL_NODE_GET_BACKWARD_POINTER(px, i); + } } } @@ -579,6 +641,32 @@ static SSkipListNode *tSkipListNewNode(uint8_t level) { return pNode; } +static SSkipListNode *tSkipListPutImpl(SSkipList *pSkipList, void *pData, SSkipListNode **direction, bool isForward, + bool hasDup) { + uint8_t dupMode = SL_DUP_MODE(pSkipList); + SSkipListNode *pNode = NULL; + + if (hasDup && (dupMode == SL_DISCARD_DUP_KEY || dupMode == SL_UPDATE_DUP_KEY)) { + if (dupMode == SL_UPDATE_DUP_KEY) { + if (isForward) { + pNode = SL_NODE_GET_FORWARD_POINTER(direction[0], 0); + } else { + pNode = SL_NODE_GET_BACKWARD_POINTER(direction[0], 0); + } + atomic_store_ptr(&(pNode->pData), pData); + } + } else { + pNode = tSkipListNewNode(getSkipListRandLevel(pSkipList)); + if (pNode != NULL) { + pNode->pData = pData; + + tSkipListDoInsert(pSkipList, direction, pNode, isForward); + } + } + + return pNode; +} + // static int32_t tSkipListEndParQuery(SSkipList *pSkipList, SSkipListNode *pStartNode, SSkipListKey *pEndKey, // int32_t cond, SSkipListNode ***pRes) { // pthread_rwlock_rdlock(&pSkipList->lock); From f94431100d806f2333880642c67a3f62c7d19895 Mon Sep 17 00:00:00 2001 From: Shengliang Guan Date: Sat, 14 Nov 2020 09:07:54 +0000 Subject: [PATCH 41/52] log --- src/util/src/tref.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/util/src/tref.c b/src/util/src/tref.c index 760d1c0eb4..4c1a87c960 100644 --- a/src/util/src/tref.c +++ b/src/util/src/tref.c @@ -426,11 +426,11 @@ static int taosDecRefCount(int rsetId, int64_t rid, int remove) { (*pSet->fp)(pNode->p); - uTrace("rsetId:%d p:%p rid:%" PRId64 "is removed, count:%d, free mem: %p", rsetId, pNode->p, rid, pSet->count, pNode); + uTrace("rsetId:%d p:%p rid:%" PRId64 " is removed, count:%d, free mem: %p", rsetId, pNode->p, rid, pSet->count, pNode); free(pNode); released = 1; } else { - uTrace("rsetId:%d p:%p rid:%" PRId64 "is released, count:%d", rsetId, pNode->p, rid, pNode->count); + uTrace("rsetId:%d p:%p rid:%" PRId64 " is released, count:%d", rsetId, pNode->p, rid, pNode->count); } } else { uTrace("rsetId:%d rid:%" PRId64 " is not there, failed to release/remove", rsetId, rid); From 72491dc5b79b590facfc1259866f894613826d43 Mon Sep 17 00:00:00 2001 From: Jeff Tao Date: Sat, 14 Nov 2020 13:17:28 +0000 Subject: [PATCH 42/52] return reference ID via parameter instead of return value --- src/client/src/tscServer.c | 6 +----- src/client/src/tscSystem.c | 2 ++ src/dnode/src/dnodeMain.c | 2 ++ src/dnode/src/dnodePeer.c | 4 ++-- src/inc/trpc.h | 4 +++- src/rpc/src/rpcMain.c | 18 ++++++++---------- src/rpc/test/rclient.c | 2 +- src/sync/test/syncClient.c | 2 +- 8 files changed, 20 insertions(+), 20 deletions(-) diff --git a/src/client/src/tscServer.c b/src/client/src/tscServer.c index d8c920daa7..dc55e3f159 100644 --- a/src/client/src/tscServer.c +++ b/src/client/src/tscServer.c @@ -241,11 +241,7 @@ int tscSendMsgToServer(SSqlObj *pSql) { .code = 0 }; - // NOTE: the rpc context should be acquired before sending data to server. - // Otherwise, the pSql object may have been released already during the response function, which is - // processMsgFromServer function. In the meanwhile, the assignment of the rpc context to sql object will absolutely - // cause crash. - pSql->rpcRid = rpcSendRequest(pObj->pDnodeConn, &pSql->epSet, &rpcMsg); + rpcSendRequest(pObj->pDnodeConn, &pSql->epSet, &rpcMsg, &pSql->rpcRid); return TSDB_CODE_SUCCESS; } diff --git a/src/client/src/tscSystem.c b/src/client/src/tscSystem.c index 01046a2840..9e9a00550a 100644 --- a/src/client/src/tscSystem.c +++ b/src/client/src/tscSystem.c @@ -105,6 +105,7 @@ void taos_init_imp(void) { taosReadGlobalCfg(); taosCheckGlobalCfg(); + rpcInit(); tscDebug("starting to initialize TAOS client ..."); tscDebug("Local End Point is:%s", tsLocalEp); } @@ -179,6 +180,7 @@ void taos_cleanup(void) { taosCloseRef(tscRefId); taosCleanupKeywordsTable(); taosCloseLog(); + if (tscEmbedded == 0) rpcCleanup(); m = tscTmr; if (m != NULL && atomic_val_compare_exchange_ptr(&tscTmr, m, 0) == m) { diff --git a/src/dnode/src/dnodeMain.c b/src/dnode/src/dnodeMain.c index f4c0ee565e..130be0af20 100644 --- a/src/dnode/src/dnodeMain.c +++ b/src/dnode/src/dnodeMain.c @@ -20,6 +20,7 @@ #include "tconfig.h" #include "tglobal.h" #include "twal.h" +#include "trpc.h" #include "dnode.h" #include "dnodeInt.h" #include "dnodeMgmt.h" @@ -54,6 +55,7 @@ typedef struct { } SDnodeComponent; static const SDnodeComponent tsDnodeComponents[] = { + {"rpc", rpcInit, rpcCleanup}, {"storage", dnodeInitStorage, dnodeCleanupStorage}, {"dnodecfg", dnodeInitCfg, dnodeCleanupCfg}, {"dnodeeps", dnodeInitEps, dnodeCleanupEps}, diff --git a/src/dnode/src/dnodePeer.c b/src/dnode/src/dnodePeer.c index afa712a965..fe8b7442e0 100644 --- a/src/dnode/src/dnodePeer.c +++ b/src/dnode/src/dnodePeer.c @@ -169,7 +169,7 @@ void dnodeAddClientRspHandle(uint8_t msgType, void (*fp)(SRpcMsg *rpcMsg)) { } void dnodeSendMsgToDnode(SRpcEpSet *epSet, SRpcMsg *rpcMsg) { - rpcSendRequest(tsClientRpc, epSet, rpcMsg); + rpcSendRequest(tsClientRpc, epSet, rpcMsg, NULL); } void dnodeSendMsgToMnodeRecv(SRpcMsg *rpcMsg, SRpcMsg *rpcRsp) { @@ -180,4 +180,4 @@ void dnodeSendMsgToMnodeRecv(SRpcMsg *rpcMsg, SRpcMsg *rpcRsp) { void dnodeSendMsgToDnodeRecv(SRpcMsg *rpcMsg, SRpcMsg *rpcRsp, SRpcEpSet *epSet) { rpcSendRecv(tsClientRpc, epSet, rpcMsg, rpcRsp); -} \ No newline at end of file +} diff --git a/src/inc/trpc.h b/src/inc/trpc.h index e430a43807..0ce2e3da14 100644 --- a/src/inc/trpc.h +++ b/src/inc/trpc.h @@ -78,12 +78,14 @@ typedef struct SRpcInit { int (*afp)(char *tableId, char *spi, char *encrypt, char *secret, char *ckey); } SRpcInit; +int32_t rpcInit(); +void rpcCleanup(); void *rpcOpen(const SRpcInit *pRpc); void rpcClose(void *); void *rpcMallocCont(int contLen); void rpcFreeCont(void *pCont); void *rpcReallocCont(void *ptr, int contLen); -int64_t rpcSendRequest(void *thandle, const SRpcEpSet *pEpSet, SRpcMsg *pMsg); +void rpcSendRequest(void *thandle, const SRpcEpSet *pEpSet, SRpcMsg *pMsg, int64_t *rid); void rpcSendResponse(const SRpcMsg *pMsg); void rpcSendRedirectRsp(void *pConn, const SRpcEpSet *pEpSet); int rpcGetConnInfo(void *thandle, SRpcConnInfo *pInfo); diff --git a/src/rpc/src/rpcMain.c b/src/rpc/src/rpcMain.c index f963eeb68d..acceaf9d7a 100644 --- a/src/rpc/src/rpcMain.c +++ b/src/rpc/src/rpcMain.c @@ -135,7 +135,7 @@ int tsRpcOverhead; static int tsRpcRefId = -1; static int32_t tsRpcNum = 0; -static pthread_once_t tsRpcInit = PTHREAD_ONCE_INIT; +//static pthread_once_t tsRpcInit = PTHREAD_ONCE_INIT; // server:0 client:1 tcp:2 udp:0 #define RPC_CONN_UDPS 0 @@ -221,13 +221,15 @@ static void rpcFree(void *p) { free(p); } -void rpcInit(void) { +int32_t rpcInit(void) { tsProgressTimer = tsRpcTimer/2; tsRpcMaxRetry = tsRpcMaxTime * 1000/tsProgressTimer; tsRpcHeadSize = RPC_MSG_OVERHEAD; tsRpcOverhead = sizeof(SRpcReqContext); tsRpcRefId = taosOpenRef(200, rpcFree); + + return 0; } void rpcCleanup(void) { @@ -238,7 +240,7 @@ void rpcCleanup(void) { void *rpcOpen(const SRpcInit *pInit) { SRpcInfo *pRpc; - pthread_once(&tsRpcInit, rpcInit); + //pthread_once(&tsRpcInit, rpcInit); pRpc = (SRpcInfo *)calloc(1, sizeof(SRpcInfo)); if (pRpc == NULL) return NULL; @@ -379,7 +381,7 @@ void *rpcReallocCont(void *ptr, int contLen) { return start + sizeof(SRpcReqContext) + sizeof(SRpcHead); } -int64_t rpcSendRequest(void *shandle, const SRpcEpSet *pEpSet, SRpcMsg *pMsg) { +void rpcSendRequest(void *shandle, const SRpcEpSet *pEpSet, SRpcMsg *pMsg, int64_t *pRid) { SRpcInfo *pRpc = (SRpcInfo *)shandle; SRpcReqContext *pContext; @@ -405,14 +407,10 @@ int64_t rpcSendRequest(void *shandle, const SRpcEpSet *pEpSet, SRpcMsg *pMsg) { || type == TSDB_MSG_TYPE_CM_SHOW ) pContext->connType = RPC_CONN_TCPC; - // set the handle to pContext, so app can cancel the request - if (pMsg->handle) *((void **)pMsg->handle) = pContext; - pContext->rid = taosAddRef(tsRpcRefId, pContext); + if (pRid) *pRid = pContext->rid; rpcSendReqToServer(pRpc, pContext); - - return pContext->rid; } void rpcSendResponse(const SRpcMsg *pRsp) { @@ -528,7 +526,7 @@ void rpcSendRecv(void *shandle, SRpcEpSet *pEpSet, SRpcMsg *pMsg, SRpcMsg *pRsp) pContext->pRsp = pRsp; pContext->pSet = pEpSet; - rpcSendRequest(shandle, pEpSet, pMsg); + rpcSendRequest(shandle, pEpSet, pMsg, NULL); tsem_wait(&sem); tsem_destroy(&sem); diff --git a/src/rpc/test/rclient.c b/src/rpc/test/rclient.c index 7a963e9ce4..5721525ade 100644 --- a/src/rpc/test/rclient.c +++ b/src/rpc/test/rclient.c @@ -57,7 +57,7 @@ static void *sendRequest(void *param) { rpcMsg.ahandle = pInfo; rpcMsg.msgType = 1; tDebug("thread:%d, send request, contLen:%d num:%d", pInfo->index, pInfo->msgSize, pInfo->num); - rpcSendRequest(pInfo->pRpc, &pInfo->epSet, &rpcMsg); + rpcSendRequest(pInfo->pRpc, &pInfo->epSet, &rpcMsg, NULL); if ( pInfo->num % 20000 == 0 ) tInfo("thread:%d, %d requests have been sent", pInfo->index, pInfo->num); tsem_wait(&pInfo->rspSem); diff --git a/src/sync/test/syncClient.c b/src/sync/test/syncClient.c index 23264dc8a0..23ea54ee0c 100644 --- a/src/sync/test/syncClient.c +++ b/src/sync/test/syncClient.c @@ -57,7 +57,7 @@ void *sendRequest(void *param) { rpcMsg.ahandle = pInfo; rpcMsg.msgType = 1; uDebug("thread:%d, send request, contLen:%d num:%d", pInfo->index, pInfo->msgSize, pInfo->num); - rpcSendRequest(pInfo->pRpc, &pInfo->epSet, &rpcMsg); + rpcSendRequest(pInfo->pRpc, &pInfo->epSet, &rpcMsg, NULL); if (pInfo->num % 20000 == 0) { uInfo("thread:%d, %d requests have been sent", pInfo->index, pInfo->num); } From dfb0921c1f392d80851aa927bf1c28092dd4680c Mon Sep 17 00:00:00 2001 From: Hongze Cheng Date: Sat, 14 Nov 2020 21:40:55 +0800 Subject: [PATCH 43/52] fix coredump --- src/util/src/tskiplist.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/util/src/tskiplist.c b/src/util/src/tskiplist.c index 06d9287eaf..4ae13bd7e5 100644 --- a/src/util/src/tskiplist.c +++ b/src/util/src/tskiplist.c @@ -138,7 +138,7 @@ void tSkipListPutBatch(SSkipList *pSkipList, void **ppData, int ndata) { hasDup = tSkipListGetPosToPut(pSkipList, backward, ppData[0]); tSkipListPutImpl(pSkipList, ppData[0], backward, false, hasDup); - for (int level = 0; level < pSkipList->maxLevel - 1; level++) { + for (int level = 0; level < pSkipList->maxLevel; level++) { forward[level] = SL_NODE_GET_BACKWARD_POINTER(backward[level], level); } From 79cf4d60270705a11edb4d001e6a7926a83bddda Mon Sep 17 00:00:00 2001 From: Shengliang Guan Date: Sun, 15 Nov 2020 11:37:17 +0800 Subject: [PATCH 44/52] TD-1924 --- src/sync/src/syncMain.c | 1 + src/sync/src/syncRetrieve.c | 2 ++ src/util/src/tsocket.c | 4 ++-- 3 files changed, 5 insertions(+), 2 deletions(-) diff --git a/src/sync/src/syncMain.c b/src/sync/src/syncMain.c index 93739ca3d1..302f08bdb9 100644 --- a/src/sync/src/syncMain.c +++ b/src/sync/src/syncMain.c @@ -578,6 +578,7 @@ static void syncChooseMaster(SSyncNode *pNode) { #if 0 for (int32_t i = 0; i < pNode->replica; ++i) { + if (i == index) continue; pPeer = pNode->peerInfo[i]; if (pPeer->version == nodeVersion) { pPeer->role = TAOS_SYNC_ROLE_SLAVE; diff --git a/src/sync/src/syncRetrieve.c b/src/sync/src/syncRetrieve.c index 968f5becad..21151f1199 100644 --- a/src/sync/src/syncRetrieve.c +++ b/src/sync/src/syncRetrieve.c @@ -182,6 +182,8 @@ static int32_t syncReadOneWalRecord(int32_t sfd, SWalHead *pHead, uint32_t *pEve return 0; } + assert(pHead->len <= TSDB_MAX_WAL_SIZE); + ret = read(sfd, pHead->cont, pHead->len); if (ret < 0) return -1; diff --git a/src/util/src/tsocket.c b/src/util/src/tsocket.c index 1003bc6178..11932ac03a 100644 --- a/src/util/src/tsocket.c +++ b/src/util/src/tsocket.c @@ -107,7 +107,7 @@ int32_t taosWriteMsg(SOCKET fd, void *buf, int32_t nbytes) { while (nleft > 0) { nwritten = (int32_t)taosWriteSocket(fd, (char *)ptr, (size_t)nleft); if (nwritten <= 0) { - if (errno == EINTR) + if (errno == EINTR || errno == EAGAIN || errno == EWOULDBLOCK) continue; else return -1; @@ -133,7 +133,7 @@ int32_t taosReadMsg(SOCKET fd, void *buf, int32_t nbytes) { if (nread == 0) { break; } else if (nread < 0) { - if (errno == EINTR) { + if (errno == EINTR || errno == EAGAIN || errno == EWOULDBLOCK) { continue; } else { return -1; From d06dc6ff6d4d0194b7910653669892a0583a1f38 Mon Sep 17 00:00:00 2001 From: zjd1988 <942002795@qq.com> Date: Mon, 16 Nov 2020 09:13:10 +0800 Subject: [PATCH 45/52] current codes not work as expedted --- src/util/src/tlog.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/util/src/tlog.c b/src/util/src/tlog.c index 4157d88c45..ad3a922304 100644 --- a/src/util/src/tlog.c +++ b/src/util/src/tlog.c @@ -287,17 +287,17 @@ static int32_t taosOpenLogFile(char *fn, int32_t maxLines, int32_t maxFileNum) { tsLogObj.fileNum = maxFileNum; taosGetLogFileName(fn); + if (strlen(fn) < LOG_FILE_NAME_LEN + 50 - 2) { strcpy(name, fn); strcat(name, ".0"); } + bool log0Exist = stat(name, &logstat0) >= 0; if (strlen(fn) < LOG_FILE_NAME_LEN + 50 - 2) { strcpy(name, fn); strcat(name, ".1"); } - - bool log0Exist = stat(name, &logstat0) >= 0; bool log1Exist = stat(name, &logstat1) >= 0; // if none of the log files exist, open 0, if both exists, open the old one From aba0e02db03e133258f447115c737a8708363456 Mon Sep 17 00:00:00 2001 From: Shengliang Guan Date: Mon, 16 Nov 2020 03:24:39 +0000 Subject: [PATCH 46/52] TD-2111 --- src/sync/src/taosTcpPool.c | 2 +- src/vnode/inc/vnodeInt.h | 2 +- src/vnode/src/vnodeCfg.c | 4 ++-- src/wal/inc/walInt.h | 2 +- 4 files changed, 5 insertions(+), 5 deletions(-) diff --git a/src/sync/src/taosTcpPool.c b/src/sync/src/taosTcpPool.c index 3024d7d4e3..eade0222be 100644 --- a/src/sync/src/taosTcpPool.c +++ b/src/sync/src/taosTcpPool.c @@ -327,5 +327,5 @@ static void taosStopPoolThread(SThreadObj *pThread) { } pthread_join(thread, NULL); - if (fd >= 0) taosClose(fd); + taosClose(fd); } diff --git a/src/vnode/inc/vnodeInt.h b/src/vnode/inc/vnodeInt.h index 317d4904cb..9f1c98774b 100644 --- a/src/vnode/inc/vnodeInt.h +++ b/src/vnode/inc/vnodeInt.h @@ -58,7 +58,7 @@ typedef struct { char *rootDir; tsem_t sem; int8_t dropped; - char db[TSDB_DB_NAME_LEN]; + char db[TSDB_ACCT_LEN + TSDB_DB_NAME_LEN]; } SVnodeObj; void vnodeInitWriteFp(void); diff --git a/src/vnode/src/vnodeCfg.c b/src/vnode/src/vnodeCfg.c index e2e57a7566..2d56157328 100644 --- a/src/vnode/src/vnodeCfg.c +++ b/src/vnode/src/vnodeCfg.c @@ -25,7 +25,7 @@ #include "vnodeCfg.h" static void vnodeLoadCfg(SVnodeObj *pVnode, SCreateVnodeMsg* vnodeMsg) { - strcpy(pVnode->db, vnodeMsg->db); + tstrncpy(pVnode->db, vnodeMsg->db, sizeof(pVnode->db)); pVnode->cfgVersion = vnodeMsg->cfg.cfgVersion; pVnode->tsdbCfg.cacheBlockSize = vnodeMsg->cfg.cacheBlockSize; pVnode->tsdbCfg.totalBlocks = vnodeMsg->cfg.totalBlocks; @@ -97,7 +97,7 @@ int32_t vnodeReadCfg(SVnodeObj *pVnode) { vError("vgId:%d, failed to read %s, db not found", pVnode->vgId, file); goto PARSE_VCFG_ERROR; } - strcpy(vnodeMsg.db, db->valuestring); + tstrncpy(vnodeMsg.db, db->valuestring, sizeof(vnodeMsg.db)); cJSON *cfgVersion = cJSON_GetObjectItem(root, "cfgVersion"); if (!cfgVersion || cfgVersion->type != cJSON_Number) { diff --git a/src/wal/inc/walInt.h b/src/wal/inc/walInt.h index 36311c8f5d..b0edabfbd8 100644 --- a/src/wal/inc/walInt.h +++ b/src/wal/inc/walInt.h @@ -37,7 +37,7 @@ extern int32_t wDebugFlag; #define WAL_MAX_SIZE (TSDB_MAX_WAL_SIZE + sizeof(SWalHead) + 16) #define WAL_SIGNATURE ((uint32_t)(0xFAFBFDFE)) #define WAL_PATH_LEN (TSDB_FILENAME_LEN + 12) -#define WAL_FILE_LEN (TSDB_FILENAME_LEN + 32) +#define WAL_FILE_LEN (WAL_PATH_LEN + 32) #define WAL_FILE_NUM 3 typedef struct { From 73431c12d631d6015d7c19d4f6753441221bc03b Mon Sep 17 00:00:00 2001 From: Hui Li Date: Mon, 16 Nov 2020 12:39:09 +0800 Subject: [PATCH 47/52] [TD-1751]add script for geting client ip add and new connection time from taosdlog file --- packaging/tools/get_client.sh | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) create mode 100755 packaging/tools/get_client.sh diff --git a/packaging/tools/get_client.sh b/packaging/tools/get_client.sh new file mode 100755 index 0000000000..0d34ecb311 --- /dev/null +++ b/packaging/tools/get_client.sh @@ -0,0 +1,21 @@ +#!/bin/bash +# + +log_dir=$1 +result_file=$2 + +if [ ! -n "$1" ];then + echo "Pleas input the director of taosdlog." + echo "usage: ./get_client.sh " + exit 1 +else + log_dir=$1 +fi + +if [ ! -n "$2" ];then + result_file=clientInfo.txt +else + result_file=$2 +fi + +grep "new TCP connection" ${log_dir}/taosdlog.* | sed -e "s/0x.* from / /"|sed -e "s/,.*$//"|sed -e "s/:[0-9]*$//"|sort -r|uniq -f 2|sort -k 3 -r|uniq -f 2 > ${result_file} From 71e99dced0e17b50c1b6c382dabd247df37ec2d2 Mon Sep 17 00:00:00 2001 From: Hui Li Date: Mon, 16 Nov 2020 12:42:00 +0800 Subject: [PATCH 48/52] [TD-2101] release taosdump for install package --- packaging/deb/makedeb.sh | 2 +- packaging/rpm/tdengine.spec | 2 +- packaging/tools/makeclient.sh | 3 +- packaging/tools/makeclient_power.sh | 4 +- packaging/tools/makepkg.sh | 3 +- packaging/tools/makepkg_power.sh | 2 + src/kit/taosdump/CMakeLists.txt | 16 + src/kit/taosdump/taosdump.c | 2246 +++++++++++++++++++++++++++ src/kit/taosdump/taosdump.sh | 48 + 9 files changed, 2319 insertions(+), 7 deletions(-) create mode 100644 src/kit/taosdump/CMakeLists.txt create mode 100644 src/kit/taosdump/taosdump.c create mode 100755 src/kit/taosdump/taosdump.sh diff --git a/packaging/deb/makedeb.sh b/packaging/deb/makedeb.sh index 516d91258a..edc7de9692 100755 --- a/packaging/deb/makedeb.sh +++ b/packaging/deb/makedeb.sh @@ -48,7 +48,7 @@ cp ${compile_dir}/../packaging/deb/taosd ${pkg_dir}${install_home_pat cp ${compile_dir}/../packaging/tools/post.sh ${pkg_dir}${install_home_path}/script cp ${compile_dir}/../packaging/tools/preun.sh ${pkg_dir}${install_home_path}/script cp ${compile_dir}/build/bin/taosdemo ${pkg_dir}${install_home_path}/bin -#cp ${compile_dir}/build/bin/taosdump ${pkg_dir}${install_home_path}/bin +cp ${compile_dir}/build/bin/taosdump ${pkg_dir}${install_home_path}/bin cp ${compile_dir}/build/bin/taosd ${pkg_dir}${install_home_path}/bin cp ${compile_dir}/build/bin/taos ${pkg_dir}${install_home_path}/bin cp ${compile_dir}/build/lib/${libfile} ${pkg_dir}${install_home_path}/driver diff --git a/packaging/rpm/tdengine.spec b/packaging/rpm/tdengine.spec index 19d932e074..8c23ab802d 100644 --- a/packaging/rpm/tdengine.spec +++ b/packaging/rpm/tdengine.spec @@ -58,7 +58,7 @@ cp %{_compiledir}/../packaging/tools/preun.sh %{buildroot}%{homepath}/scri cp %{_compiledir}/build/bin/taos %{buildroot}%{homepath}/bin cp %{_compiledir}/build/bin/taosd %{buildroot}%{homepath}/bin cp %{_compiledir}/build/bin/taosdemo %{buildroot}%{homepath}/bin -#cp %{_compiledir}/build/bin/taosdump %{buildroot}%{homepath}/bin +cp %{_compiledir}/build/bin/taosdump %{buildroot}%{homepath}/bin cp %{_compiledir}/build/lib/${libfile} %{buildroot}%{homepath}/driver cp %{_compiledir}/../src/inc/taos.h %{buildroot}%{homepath}/include cp %{_compiledir}/../src/inc/taoserror.h %{buildroot}%{homepath}/include diff --git a/packaging/tools/makeclient.sh b/packaging/tools/makeclient.sh index 4f4c6dee1d..ee79a56040 100755 --- a/packaging/tools/makeclient.sh +++ b/packaging/tools/makeclient.sh @@ -45,8 +45,7 @@ if [ "$osType" != "Darwin" ]; then strip ${build_dir}/bin/taos bin_files="${build_dir}/bin/taos ${script_dir}/remove_client.sh" else - #bin_files="${build_dir}/bin/taos ${build_dir}/bin/taosdump ${build_dir}/bin/taosdemo ${script_dir}/remove_client.sh ${script_dir}/set_core.sh" - bin_files="${build_dir}/bin/taos ${build_dir}/bin/taosdemo ${script_dir}/remove_client.sh ${script_dir}/set_core.sh" + bin_files="${build_dir}/bin/taos ${build_dir}/bin/taosdump ${build_dir}/bin/taosdemo ${script_dir}/remove_client.sh ${script_dir}/set_core.sh ${script_dir}/get_client.sh" fi lib_files="${build_dir}/lib/libtaos.so.${version}" else diff --git a/packaging/tools/makeclient_power.sh b/packaging/tools/makeclient_power.sh index 7e8bef0dff..fdb3e0e5cc 100755 --- a/packaging/tools/makeclient_power.sh +++ b/packaging/tools/makeclient_power.sh @@ -76,8 +76,10 @@ if [ "$osType" != "Darwin" ]; then else cp ${build_dir}/bin/taos ${install_dir}/bin/power cp ${script_dir}/remove_power.sh ${install_dir}/bin - cp ${build_dir}/bin/taosdemo ${install_dir}/bin/powerdemo + cp ${build_dir}/bin/taosdemo ${install_dir}/bin/powerdemo + cp ${build_dir}/bin/taosdump ${install_dir}/bin/powerdump cp ${script_dir}/set_core.sh ${install_dir}/bin + cp ${script_dir}/get_client.sh ${install_dir}/bin fi else cp ${bin_files} ${install_dir}/bin diff --git a/packaging/tools/makepkg.sh b/packaging/tools/makepkg.sh index 2be60709aa..5ae5cbbcdc 100755 --- a/packaging/tools/makepkg.sh +++ b/packaging/tools/makepkg.sh @@ -36,8 +36,7 @@ if [ "$pagMode" == "lite" ]; then strip ${build_dir}/bin/taos bin_files="${build_dir}/bin/taosd ${build_dir}/bin/taos ${script_dir}/remove.sh" else - #bin_files="${build_dir}/bin/taosd ${build_dir}/bin/taos ${build_dir}/bin/taosdump ${build_dir}/bin/taosdemo ${build_dir}/bin/tarbitrator ${script_dir}/remove.sh ${script_dir}/set_core.sh" - bin_files="${build_dir}/bin/taosd ${build_dir}/bin/taos ${build_dir}/bin/taosdemo ${build_dir}/bin/tarbitrator ${script_dir}/remove.sh ${script_dir}/set_core.sh" + bin_files="${build_dir}/bin/taosd ${build_dir}/bin/taos ${build_dir}/bin/taosdump ${build_dir}/bin/taosdemo ${build_dir}/bin/tarbitrator ${script_dir}/remove.sh ${script_dir}/set_core.sh ${script_dir}/get_client.sh" fi lib_files="${build_dir}/lib/libtaos.so.${version}" diff --git a/packaging/tools/makepkg_power.sh b/packaging/tools/makepkg_power.sh index 0ffcb63e3f..1384948469 100755 --- a/packaging/tools/makepkg_power.sh +++ b/packaging/tools/makepkg_power.sh @@ -77,8 +77,10 @@ else cp ${build_dir}/bin/taosd ${install_dir}/bin/powerd cp ${script_dir}/remove_power.sh ${install_dir}/bin cp ${build_dir}/bin/taosdemo ${install_dir}/bin/powerdemo + cp ${build_dir}/bin/taosdump ${install_dir}/bin/powerdump cp ${build_dir}/bin/tarbitrator ${install_dir}/bin cp ${script_dir}/set_core.sh ${install_dir}/bin + cp ${script_dir}/get_client.sh ${install_dir}/bin fi chmod a+x ${install_dir}/bin/* || : diff --git a/src/kit/taosdump/CMakeLists.txt b/src/kit/taosdump/CMakeLists.txt new file mode 100644 index 0000000000..dcdbd48615 --- /dev/null +++ b/src/kit/taosdump/CMakeLists.txt @@ -0,0 +1,16 @@ +CMAKE_MINIMUM_REQUIRED(VERSION 2.8) +PROJECT(TDengine) + +INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/client/inc) +INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/query/inc) +INCLUDE_DIRECTORIES(inc) +AUX_SOURCE_DIRECTORY(. SRC) + +IF (TD_LINUX) + ADD_EXECUTABLE(taosdump ${SRC}) + IF (TD_SOMODE_STATIC) + TARGET_LINK_LIBRARIES(taosdump taos_static) + ELSE () + TARGET_LINK_LIBRARIES(taosdump taos) + ENDIF () +ENDIF () diff --git a/src/kit/taosdump/taosdump.c b/src/kit/taosdump/taosdump.c new file mode 100644 index 0000000000..5d6b141a9b --- /dev/null +++ b/src/kit/taosdump/taosdump.c @@ -0,0 +1,2246 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#include +#include "os.h" +#include "taos.h" +#include "taosdef.h" +#include "taosmsg.h" +#include "tglobal.h" +#include "tsclient.h" +#include "tsdb.h" +#include "tutil.h" + +#define COMMAND_SIZE 65536 +//#define DEFAULT_DUMP_FILE "taosdump.sql" + +int converStringToReadable(char *str, int size, char *buf, int bufsize); +int convertNCharToReadable(char *str, int size, char *buf, int bufsize); +void taosDumpCharset(FILE *fp); +void taosLoadFileCharset(FILE *fp, char *fcharset); + +typedef struct { + short bytes; + int8_t type; +} SOColInfo; + +// -------------------------- SHOW DATABASE INTERFACE----------------------- +enum _show_db_index { + TSDB_SHOW_DB_NAME_INDEX, + TSDB_SHOW_DB_CREATED_TIME_INDEX, + TSDB_SHOW_DB_VGROUPS_INDEX, + TSDB_SHOW_DB_NTABLES_INDEX, + TSDB_SHOW_DB_REPLICA_INDEX, + TSDB_SHOW_DB_DAYS_INDEX, + TSDB_SHOW_DB_KEEP_INDEX, + TSDB_SHOW_DB_TABLES_INDEX, + TSDB_SHOW_DB_ROWS_INDEX, + TSDB_SHOW_DB_CACHE_INDEX, + TSDB_SHOW_DB_ABLOCKS_INDEX, + TSDB_SHOW_DB_TBLOCKS_INDEX, + TSDB_SHOW_DB_CTIME_INDEX, + TSDB_SHOW_DB_CLOG_INDEX, + TSDB_SHOW_DB_COMP_INDEX, + TSDB_MAX_SHOW_DB +}; + +// -----------------------------------------SHOW TABLES CONFIGURE ------------------------------------- +enum _show_tables_index { + TSDB_SHOW_TABLES_NAME_INDEX, + TSDB_SHOW_TABLES_CREATED_TIME_INDEX, + TSDB_SHOW_TABLES_COLUMNS_INDEX, + TSDB_SHOW_TABLES_METRIC_INDEX, + TSDB_MAX_SHOW_TABLES +}; + +// ---------------------------------- DESCRIBE METRIC CONFIGURE ------------------------------ +enum _describe_table_index { + TSDB_DESCRIBE_METRIC_FIELD_INDEX, + TSDB_DESCRIBE_METRIC_TYPE_INDEX, + TSDB_DESCRIBE_METRIC_LENGTH_INDEX, + TSDB_DESCRIBE_METRIC_NOTE_INDEX, + TSDB_MAX_DESCRIBE_METRIC +}; + +typedef struct { + char field[TSDB_COL_NAME_LEN + 1]; + char type[16]; + int length; + char note[128]; +} SColDes; + +typedef struct { + char name[TSDB_COL_NAME_LEN + 1]; + SColDes cols[]; +} STableDef; + +extern char version[]; + +typedef struct { + char name[TSDB_DB_NAME_LEN + 1]; + int32_t replica; + int32_t days; + int32_t keep; + int32_t tables; + int32_t rows; + int32_t cache; + int32_t ablocks; + int32_t tblocks; + int32_t ctime; + int32_t clog; + int32_t comp; +} SDbInfo; + +typedef struct { + char name[TSDB_TABLE_NAME_LEN + 1]; + char metric[TSDB_TABLE_NAME_LEN + 1]; +} STableRecord; + +typedef struct { + bool isMetric; + STableRecord tableRecord; +} STableRecordInfo; + +typedef struct { + pthread_t threadID; + int32_t threadIndex; + int32_t totalThreads; + char dbName[TSDB_TABLE_NAME_LEN + 1]; + void *taosCon; +} SThreadParaObj; + +static int64_t totalDumpOutRows = 0; + +SDbInfo **dbInfos = NULL; + +const char *argp_program_version = version; +const char *argp_program_bug_address = ""; + +/* Program documentation. */ +static char doc[] = ""; +/* "Argp example #4 -- a program with somewhat more complicated\ */ +/* options\ */ +/* \vThis part of the documentation comes *after* the options;\ */ +/* note that the text is automatically filled, but it's possible\ */ +/* to force a line-break, e.g.\n<-- here."; */ + +/* A description of the arguments we accept. */ +static char args_doc[] = "dbname [tbname ...]\n--databases dbname ...\n--all-databases\n-i inpath\n-o outpath"; + +/* Keys for options without short-options. */ +#define OPT_ABORT 1 /* –abort */ + +/* The options we understand. */ +static struct argp_option options[] = { + // connection option + {"host", 'h', "HOST", 0, "Server host dumping data from. Default is localhost.", 0}, + {"user", 'u', "USER", 0, "User name used to connect to server. Default is root.", 0}, + #ifdef _TD_POWER_ + {"password", 'p', "PASSWORD", 0, "User password to connect to server. Default is powerdb.", 0}, + #else + {"password", 'p', "PASSWORD", 0, "User password to connect to server. Default is taosdata.", 0}, + #endif + {"port", 'P', "PORT", 0, "Port to connect", 0}, + {"cversion", 'v', "CVERION", 0, "client version", 0}, + {"mysqlFlag", 'q', "MYSQLFLAG", 0, "mysqlFlag, Default is 0", 0}, + // input/output file + {"outpath", 'o', "OUTPATH", 0, "Output file path.", 1}, + {"inpath", 'i', "INPATH", 0, "Input file path.", 1}, + #ifdef _TD_POWER_ + {"config", 'c', "CONFIG_DIR", 0, "Configure directory. Default is /etc/power/taos.cfg.", 1}, + #else + {"config", 'c', "CONFIG_DIR", 0, "Configure directory. Default is /etc/taos/taos.cfg.", 1}, + #endif + {"encode", 'e', "ENCODE", 0, "Input file encoding.", 1}, + // dump unit options + {"all-databases", 'A', 0, 0, "Dump all databases.", 2}, + {"databases", 'B', 0, 0, "Dump assigned databases", 2}, + // dump format options + {"schemaonly", 's', 0, 0, "Only dump schema.", 3}, + {"with-property", 'M', 0, 0, "Dump schema with properties.", 3}, + {"start-time", 'S', "START_TIME", 0, "Start time to dump.", 3}, + {"end-time", 'E', "END_TIME", 0, "End time to dump.", 3}, + {"data-batch", 'N', "DATA_BATCH", 0, "Number of data point per insert statement. Default is 1.", 3}, + {"table-batch", 't', "TABLE_BATCH", 0, "Number of table dumpout into one output file. Default is 1.", 3}, + {"thread_num", 'T', "THREAD_NUM", 0, "Number of thread for dump in file. Default is 5.", 3}, + {"allow-sys", 'a', 0, 0, "Allow to dump sys database", 3}, + {0}}; + +/* Used by main to communicate with parse_opt. */ +struct arguments { + // connection option + char *host; + char *user; + char *password; + uint16_t port; + char cversion[12]; + uint16_t mysqlFlag; + // output file + char outpath[TSDB_FILENAME_LEN+1]; + char inpath[TSDB_FILENAME_LEN+1]; + char *encode; + // dump unit option + bool all_databases; + bool databases; + // dump format option + bool schemaonly; + bool with_property; + int64_t start_time; + int64_t end_time; + int32_t data_batch; + int32_t table_batch; // num of table which will be dump into one output file. + bool allow_sys; + // other options + int32_t thread_num; + int abort; + char **arg_list; + int arg_list_len; + bool isDumpIn; +}; + +/* Parse a single option. */ +static error_t parse_opt(int key, char *arg, struct argp_state *state) { + /* Get the input argument from argp_parse, which we + know is a pointer to our arguments structure. */ + struct arguments *arguments = state->input; + wordexp_t full_path; + + switch (key) { + // connection option + case 'a': + arguments->allow_sys = true; + break; + case 'h': + arguments->host = arg; + break; + case 'u': + arguments->user = arg; + break; + case 'p': + arguments->password = arg; + break; + case 'P': + arguments->port = atoi(arg); + break; + case 'q': + arguments->mysqlFlag = atoi(arg); + break; + case 'v': + if (wordexp(arg, &full_path, 0) != 0) { + fprintf(stderr, "Invalid client vesion %s\n", arg); + return -1; + } + tstrncpy(arguments->cversion, full_path.we_wordv[0], 11); + wordfree(&full_path); + break; + // output file path + case 'o': + if (wordexp(arg, &full_path, 0) != 0) { + fprintf(stderr, "Invalid path %s\n", arg); + return -1; + } + tstrncpy(arguments->outpath, full_path.we_wordv[0], TSDB_FILENAME_LEN); + wordfree(&full_path); + break; + case 'i': + arguments->isDumpIn = true; + if (wordexp(arg, &full_path, 0) != 0) { + fprintf(stderr, "Invalid path %s\n", arg); + return -1; + } + tstrncpy(arguments->inpath, full_path.we_wordv[0], TSDB_FILENAME_LEN); + wordfree(&full_path); + break; + case 'c': + if (wordexp(arg, &full_path, 0) != 0) { + fprintf(stderr, "Invalid path %s\n", arg); + return -1; + } + tstrncpy(configDir, full_path.we_wordv[0], TSDB_FILENAME_LEN); + wordfree(&full_path); + break; + case 'e': + arguments->encode = arg; + break; + // dump unit option + case 'A': + arguments->all_databases = true; + break; + case 'B': + arguments->databases = true; + break; + // dump format option + case 's': + arguments->schemaonly = true; + break; + case 'M': + arguments->with_property = true; + break; + case 'S': + // parse time here. + arguments->start_time = atol(arg); + break; + case 'E': + arguments->end_time = atol(arg); + break; + case 'N': + arguments->data_batch = atoi(arg); + break; + case 't': + arguments->table_batch = atoi(arg); + break; + case 'T': + arguments->thread_num = atoi(arg); + break; + case OPT_ABORT: + arguments->abort = 1; + break; + case ARGP_KEY_ARG: + arguments->arg_list = &state->argv[state->next - 1]; + arguments->arg_list_len = state->argc - state->next + 1; + state->next = state->argc; + break; + + default: + return ARGP_ERR_UNKNOWN; + } + return 0; +} + +/* Our argp parser. */ +static struct argp argp = {options, parse_opt, args_doc, doc}; + +int taosDumpOut(struct arguments *arguments); +int taosDumpIn(struct arguments *arguments); +void taosDumpCreateDbClause(SDbInfo *dbInfo, bool isDumpProperty, FILE *fp); +int taosDumpDb(SDbInfo *dbInfo, struct arguments *arguments, FILE *fp, TAOS *taosCon); +int32_t taosDumpStable(char *table, FILE *fp, TAOS* taosCon); +void taosDumpCreateTableClause(STableDef *tableDes, int numOfCols, FILE *fp); +void taosDumpCreateMTableClause(STableDef *tableDes, char *metric, int numOfCols, FILE *fp); +int32_t taosDumpTable(char *table, char *metric, struct arguments *arguments, FILE *fp, TAOS* taosCon); +int taosDumpTableData(FILE *fp, char *tbname, struct arguments *arguments, TAOS* taosCon); +int taosCheckParam(struct arguments *arguments); +void taosFreeDbInfos(); +static void taosStartDumpOutWorkThreads(struct arguments* args, int32_t numOfThread, char *dbName); + +struct arguments tsArguments = { + // connection option + NULL, + "root", + #ifdef _TD_POWER_ + "powerdb", + #else + "taosdata", + #endif + 0, + "", + 0, + // outpath and inpath + "", + "", + NULL, + // dump unit option + false, + false, + // dump format option + false, + false, + 0, + INT64_MAX, + 1, + 1, + false, + // other options + 5, + 0, + NULL, + 0, + false +}; + +int queryDB(TAOS *taos, char *command) { + TAOS_RES *pSql = NULL; + int32_t code = -1; + + pSql = taos_query(taos, command); + code = taos_errno(pSql); + if (code) { + fprintf(stderr, "sql error: %s, reason:%s\n", command, taos_errstr(pSql)); + } + taos_free_result(pSql); + return code; +} + +int main(int argc, char *argv[]) { + + /* Parse our arguments; every option seen by parse_opt will be + reflected in arguments. */ + argp_parse(&argp, argc, argv, 0, 0, &tsArguments); + + if (tsArguments.abort) { + #ifndef _ALPINE + error(10, 0, "ABORTED"); + #else + abort(); + #endif + } + + printf("====== arguments config ======\n"); + { + printf("host: %s\n", tsArguments.host); + printf("user: %s\n", tsArguments.user); + printf("password: %s\n", tsArguments.password); + printf("port: %u\n", tsArguments.port); + printf("cversion: %s\n", tsArguments.cversion); + printf("mysqlFlag: %d\n", tsArguments.mysqlFlag); + printf("outpath: %s\n", tsArguments.outpath); + printf("inpath: %s\n", tsArguments.inpath); + printf("encode: %s\n", tsArguments.encode); + printf("all_databases: %d\n", tsArguments.all_databases); + printf("databases: %d\n", tsArguments.databases); + printf("schemaonly: %d\n", tsArguments.schemaonly); + printf("with_property: %d\n", tsArguments.with_property); + printf("start_time: %" PRId64 "\n", tsArguments.start_time); + printf("end_time: %" PRId64 "\n", tsArguments.end_time); + printf("data_batch: %d\n", tsArguments.data_batch); + printf("table_batch: %d\n", tsArguments.table_batch); + printf("allow_sys: %d\n", tsArguments.allow_sys); + printf("abort: %d\n", tsArguments.abort); + printf("isDumpIn: %d\n", tsArguments.isDumpIn); + printf("arg_list_len: %d\n", tsArguments.arg_list_len); + + for (int32_t i = 0; i < tsArguments.arg_list_len; i++) { + printf("arg_list[%d]: %s\n", i, tsArguments.arg_list[i]); + } + } + printf("==============================\n"); + + if (tsArguments.cversion[0] != 0){ + tstrncpy(version, tsArguments.cversion, 11); + } + + if (taosCheckParam(&tsArguments) < 0) { + exit(EXIT_FAILURE); + } + + if (tsArguments.isDumpIn) { + if (taosDumpIn(&tsArguments) < 0) return -1; + } else { + if (taosDumpOut(&tsArguments) < 0) return -1; + } + + return 0; +} + +void taosFreeDbInfos() { + if (dbInfos == NULL) return; + for (int i = 0; i < 128; i++) tfree(dbInfos[i]); + tfree(dbInfos); +} + +// check table is normal table or super table +int taosGetTableRecordInfo(char *table, STableRecordInfo *pTableRecordInfo, TAOS *taosCon) { + TAOS_ROW row = NULL; + bool isSet = false; + TAOS_RES *result = NULL; + + memset(pTableRecordInfo, 0, sizeof(STableRecordInfo)); + + char* tempCommand = (char *)malloc(COMMAND_SIZE); + if (tempCommand == NULL) { + fprintf(stderr, "failed to allocate memory\n"); + return -1; + } + + sprintf(tempCommand, "show tables like %s", table); + + result = taos_query(taosCon, tempCommand); + int32_t code = taos_errno(result); + + if (code != 0) { + fprintf(stderr, "failed to run command %s\n", tempCommand); + free(tempCommand); + taos_free_result(result); + return -1; + } + + TAOS_FIELD *fields = taos_fetch_fields(result); + + while ((row = taos_fetch_row(result)) != NULL) { + isSet = true; + pTableRecordInfo->isMetric = false; + strncpy(pTableRecordInfo->tableRecord.name, (char *)row[TSDB_SHOW_TABLES_NAME_INDEX], + fields[TSDB_SHOW_TABLES_NAME_INDEX].bytes); + strncpy(pTableRecordInfo->tableRecord.metric, (char *)row[TSDB_SHOW_TABLES_METRIC_INDEX], + fields[TSDB_SHOW_TABLES_METRIC_INDEX].bytes); + break; + } + + taos_free_result(result); + result = NULL; + + if (isSet) { + free(tempCommand); + return 0; + } + + sprintf(tempCommand, "show stables like %s", table); + + result = taos_query(taosCon, tempCommand); + code = taos_errno(result); + + if (code != 0) { + fprintf(stderr, "failed to run command %s\n", tempCommand); + free(tempCommand); + taos_free_result(result); + return -1; + } + + while ((row = taos_fetch_row(result)) != NULL) { + isSet = true; + pTableRecordInfo->isMetric = true; + tstrncpy(pTableRecordInfo->tableRecord.metric, table, TSDB_TABLE_NAME_LEN); + break; + } + + taos_free_result(result); + result = NULL; + + if (isSet) { + free(tempCommand); + return 0; + } + fprintf(stderr, "invalid table/metric %s\n", table); + free(tempCommand); + return -1; +} + + +int32_t taosSaveAllNormalTableToTempFile(TAOS *taosCon, char*meter, char* metric, int* fd) { + STableRecord tableRecord; + + if (-1 == *fd) { + *fd = open(".tables.tmp.0", O_RDWR | O_CREAT, S_IRWXU | S_IRGRP | S_IXGRP | S_IROTH); + if (*fd == -1) { + fprintf(stderr, "failed to open temp file: .tables.tmp.0\n"); + return -1; + } + } + + memset(&tableRecord, 0, sizeof(STableRecord)); + tstrncpy(tableRecord.name, meter, TSDB_TABLE_NAME_LEN); + tstrncpy(tableRecord.metric, metric, TSDB_TABLE_NAME_LEN); + + taosWrite(*fd, &tableRecord, sizeof(STableRecord)); + return 0; +} + + +int32_t taosSaveTableOfMetricToTempFile(TAOS *taosCon, char* metric, struct arguments *arguments, int32_t* totalNumOfThread) { + TAOS_ROW row; + int fd = -1; + STableRecord tableRecord; + + char* tmpCommand = (char *)malloc(COMMAND_SIZE); + if (tmpCommand == NULL) { + fprintf(stderr, "failed to allocate memory\n"); + return -1; + } + + sprintf(tmpCommand, "select tbname from %s", metric); + + TAOS_RES *result = taos_query(taosCon, tmpCommand); + int32_t code = taos_errno(result); + if (code != 0) { + fprintf(stderr, "failed to run command %s\n", tmpCommand); + free(tmpCommand); + taos_free_result(result); + return -1; + } + + TAOS_FIELD *fields = taos_fetch_fields(result); + + int32_t numOfTable = 0; + int32_t numOfThread = *totalNumOfThread; + char tmpFileName[TSDB_FILENAME_LEN + 1]; + while ((row = taos_fetch_row(result)) != NULL) { + if (0 == numOfTable) { + memset(tmpFileName, 0, TSDB_FILENAME_LEN); + sprintf(tmpFileName, ".tables.tmp.%d", numOfThread); + fd = open(tmpFileName, O_RDWR | O_CREAT, S_IRWXU | S_IRGRP | S_IXGRP | S_IROTH); + if (fd == -1) { + fprintf(stderr, "failed to open temp file: %s\n", tmpFileName); + taos_free_result(result); + for (int32_t loopCnt = 0; loopCnt < numOfThread; loopCnt++) { + sprintf(tmpFileName, ".tables.tmp.%d", loopCnt); + (void)remove(tmpFileName); + } + free(tmpCommand); + return -1; + } + + numOfThread++; + } + + memset(&tableRecord, 0, sizeof(STableRecord)); + tstrncpy(tableRecord.name, (char *)row[0], fields[0].bytes); + tstrncpy(tableRecord.metric, metric, TSDB_TABLE_NAME_LEN); + + taosWrite(fd, &tableRecord, sizeof(STableRecord)); + + numOfTable++; + + if (numOfTable >= arguments->table_batch) { + numOfTable = 0; + close(fd); + fd = -1; + } + } + + if (fd >= 0) { + close(fd); + fd = -1; + } + + taos_free_result(result); + + *totalNumOfThread = numOfThread; + + free(tmpCommand); + + return 0; +} + +int taosDumpOut(struct arguments *arguments) { + TAOS *taos = NULL; + TAOS_RES *result = NULL; + char *command = NULL; + + TAOS_ROW row; + FILE *fp = NULL; + int32_t count = 0; + STableRecordInfo tableRecordInfo; + + char tmpBuf[TSDB_FILENAME_LEN+9] = {0}; + if (arguments->outpath[0] != 0) { + sprintf(tmpBuf, "%s/dbs.sql", arguments->outpath); + } else { + sprintf(tmpBuf, "dbs.sql"); + } + + fp = fopen(tmpBuf, "w"); + if (fp == NULL) { + fprintf(stderr, "failed to open file %s\n", tmpBuf); + return -1; + } + + dbInfos = (SDbInfo **)calloc(128, sizeof(SDbInfo *)); + if (dbInfos == NULL) { + fprintf(stderr, "failed to allocate memory\n"); + goto _exit_failure; + } + + command = (char *)malloc(COMMAND_SIZE); + if (command == NULL) { + fprintf(stderr, "failed to allocate memory\n"); + goto _exit_failure; + } + + /* Connect to server */ + taos = taos_connect(arguments->host, arguments->user, arguments->password, NULL, arguments->port); + if (taos == NULL) { + fprintf(stderr, "failed to connect to TDengine server\n"); + goto _exit_failure; + } + + /* --------------------------------- Main Code -------------------------------- */ + /* if (arguments->databases || arguments->all_databases) { // dump part of databases or all databases */ + /* */ + taosDumpCharset(fp); + + sprintf(command, "show databases"); + result = taos_query(taos, command); + int32_t code = taos_errno(result); + + if (code != 0) { + fprintf(stderr, "failed to run command: %s, reason: %s\n", command, taos_errstr(taos)); + goto _exit_failure; + } + + TAOS_FIELD *fields = taos_fetch_fields(result); + + while ((row = taos_fetch_row(result)) != NULL) { + // sys database name : 'monitor', but subsequent version changed to 'log' + if (strncasecmp(row[TSDB_SHOW_DB_NAME_INDEX], "monitor", fields[TSDB_SHOW_DB_NAME_INDEX].bytes) == 0 && + (!arguments->allow_sys)) + continue; + + if (arguments->databases) { // input multi dbs + for (int i = 0; arguments->arg_list[i]; i++) { + if (strncasecmp(arguments->arg_list[i], (char *)row[TSDB_SHOW_DB_NAME_INDEX], + fields[TSDB_SHOW_DB_NAME_INDEX].bytes) == 0) + goto _dump_db_point; + } + continue; + } else if (!arguments->all_databases) { // only input one db + if (strncasecmp(arguments->arg_list[0], (char *)row[TSDB_SHOW_DB_NAME_INDEX], + fields[TSDB_SHOW_DB_NAME_INDEX].bytes) == 0) + goto _dump_db_point; + else + continue; + } + + _dump_db_point: + + dbInfos[count] = (SDbInfo *)calloc(1, sizeof(SDbInfo)); + if (dbInfos[count] == NULL) { + fprintf(stderr, "failed to allocate memory\n"); + goto _exit_failure; + } + + strncpy(dbInfos[count]->name, (char *)row[TSDB_SHOW_DB_NAME_INDEX], fields[TSDB_SHOW_DB_NAME_INDEX].bytes); + #if 0 + dbInfos[count]->replica = (int)(*((int16_t *)row[TSDB_SHOW_DB_REPLICA_INDEX])); + dbInfos[count]->days = (int)(*((int16_t *)row[TSDB_SHOW_DB_DAYS_INDEX])); + dbInfos[count]->keep = *((int *)row[TSDB_SHOW_DB_KEEP_INDEX]); + dbInfos[count]->tables = *((int *)row[TSDB_SHOW_DB_TABLES_INDEX]); + dbInfos[count]->rows = *((int *)row[TSDB_SHOW_DB_ROWS_INDEX]); + dbInfos[count]->cache = *((int *)row[TSDB_SHOW_DB_CACHE_INDEX]); + dbInfos[count]->ablocks = *((int *)row[TSDB_SHOW_DB_ABLOCKS_INDEX]); + dbInfos[count]->tblocks = (int)(*((int16_t *)row[TSDB_SHOW_DB_TBLOCKS_INDEX])); + dbInfos[count]->ctime = *((int *)row[TSDB_SHOW_DB_CTIME_INDEX]); + dbInfos[count]->clog = (int)(*((int8_t *)row[TSDB_SHOW_DB_CLOG_INDEX])); + dbInfos[count]->comp = (int)(*((int8_t *)row[TSDB_SHOW_DB_COMP_INDEX])); +#endif + + count++; + + if (arguments->databases) { + if (count > arguments->arg_list_len) break; + + } else if (!arguments->all_databases) { + if (count >= 1) break; + } + } + + if (count == 0) { + fprintf(stderr, "No databases valid to dump\n"); + goto _exit_failure; + } + + if (arguments->databases || arguments->all_databases) { // case: taosdump --databases dbx dby ... OR taosdump --all-databases + for (int i = 0; i < count; i++) { + taosDumpDb(dbInfos[i], arguments, fp, taos); + } + } else { + if (arguments->arg_list_len == 1) { // case: taosdump + taosDumpDb(dbInfos[0], arguments, fp, taos); + } else { // case: taosdump tablex tabley ... + taosDumpCreateDbClause(dbInfos[0], arguments->with_property, fp); + + sprintf(command, "use %s", dbInfos[0]->name); + + result = taos_query(taos, command); + int32_t code = taos_errno(result); + if (code != 0) { + fprintf(stderr, "invalid database %s\n", dbInfos[0]->name); + goto _exit_failure; + } + + fprintf(fp, "USE %s;\n\n", dbInfos[0]->name); + + int32_t totalNumOfThread = 1; // 0: all normal talbe into .tables.tmp.0 + int normalTblFd = -1; + int32_t retCode; + for (int i = 1; arguments->arg_list[i]; i++) { + if (taosGetTableRecordInfo(arguments->arg_list[i], &tableRecordInfo, taos) < 0) { + fprintf(stderr, "input the invalide table %s\n", arguments->arg_list[i]); + continue; + } + + if (tableRecordInfo.isMetric) { // dump all table of this metric + (void)taosDumpStable(tableRecordInfo.tableRecord.metric, fp, taos); + retCode = taosSaveTableOfMetricToTempFile(taos, tableRecordInfo.tableRecord.metric, arguments, &totalNumOfThread); + } else { + if (tableRecordInfo.tableRecord.metric[0] != '\0') { // dump this sub table and it's metric + (void)taosDumpStable(tableRecordInfo.tableRecord.metric, fp, taos); + } + retCode = taosSaveAllNormalTableToTempFile(taos, tableRecordInfo.tableRecord.name, tableRecordInfo.tableRecord.metric, &normalTblFd); + } + + if (retCode < 0) { + if (-1 != normalTblFd){ + taosClose(normalTblFd); + } + goto _clean_tmp_file; + } + } + + if (-1 != normalTblFd){ + taosClose(normalTblFd); + } + + // start multi threads to dumpout + taosStartDumpOutWorkThreads(arguments, totalNumOfThread, dbInfos[0]->name); + + char tmpFileName[TSDB_FILENAME_LEN + 1]; + _clean_tmp_file: + for (int loopCnt = 0; loopCnt < totalNumOfThread; loopCnt++) { + sprintf(tmpFileName, ".tables.tmp.%d", loopCnt); + remove(tmpFileName); + } + } + } + + /* Close the handle and return */ + fclose(fp); + taos_close(taos); + taos_free_result(result); + tfree(command); + taosFreeDbInfos(); + fprintf(stderr, "dump out rows: %" PRId64 "\n", totalDumpOutRows); + return 0; + +_exit_failure: + fclose(fp); + taos_close(taos); + taos_free_result(result); + tfree(command); + taosFreeDbInfos(); + fprintf(stderr, "dump out rows: %" PRId64 "\n", totalDumpOutRows); + return -1; +} + +int taosGetTableDes(char *table, STableDef *tableDes, TAOS* taosCon, bool isSuperTable) { + TAOS_ROW row = NULL; + TAOS_RES *tmpResult = NULL; + int count = 0; + + char* tempCommand = (char *)malloc(COMMAND_SIZE); + if (tempCommand == NULL) { + fprintf(stderr, "failed to allocate memory\n"); + return -1; + } + + char* tbuf = (char *)malloc(COMMAND_SIZE); + if (tbuf == NULL) { + fprintf(stderr, "failed to allocate memory\n"); + free(tempCommand); + return -1; + } + + sprintf(tempCommand, "describe %s", table); + + tmpResult = taos_query(taosCon, tempCommand); + int32_t code = taos_errno(tmpResult); + if (code != 0) { + fprintf(stderr, "failed to run command %s\n", tempCommand); + free(tempCommand); + free(tbuf); + taos_free_result(tmpResult); + return -1; + } + + TAOS_FIELD *fields = taos_fetch_fields(tmpResult); + + tstrncpy(tableDes->name, table, TSDB_COL_NAME_LEN); + + while ((row = taos_fetch_row(tmpResult)) != NULL) { + strncpy(tableDes->cols[count].field, (char *)row[TSDB_DESCRIBE_METRIC_FIELD_INDEX], + fields[TSDB_DESCRIBE_METRIC_FIELD_INDEX].bytes); + strncpy(tableDes->cols[count].type, (char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX], + fields[TSDB_DESCRIBE_METRIC_TYPE_INDEX].bytes); + tableDes->cols[count].length = *((int *)row[TSDB_DESCRIBE_METRIC_LENGTH_INDEX]); + strncpy(tableDes->cols[count].note, (char *)row[TSDB_DESCRIBE_METRIC_NOTE_INDEX], + fields[TSDB_DESCRIBE_METRIC_NOTE_INDEX].bytes); + + count++; + } + + taos_free_result(tmpResult); + tmpResult = NULL; + + if (isSuperTable) { + free(tempCommand); + free(tbuf); + return count; + } + + // if chidl-table have tag, using select tagName from table to get tagValue + for (int i = 0 ; i < count; i++) { + if (strcmp(tableDes->cols[i].note, "TAG") != 0) continue; + + + sprintf(tempCommand, "select %s from %s", tableDes->cols[i].field, table); + + tmpResult = taos_query(taosCon, tempCommand); + code = taos_errno(tmpResult); + if (code != 0) { + fprintf(stderr, "failed to run command %s\n", tempCommand); + free(tempCommand); + free(tbuf); + taos_free_result(tmpResult); + return -1; + } + + fields = taos_fetch_fields(tmpResult); + + row = taos_fetch_row(tmpResult); + if (NULL == row) { + fprintf(stderr, " fetch failed to run command %s\n", tempCommand); + free(tempCommand); + free(tbuf); + taos_free_result(tmpResult); + return -1; + } + + if (row[0] == NULL) { + sprintf(tableDes->cols[i].note, "%s", "NULL"); + taos_free_result(tmpResult); + tmpResult = NULL; + continue; + } + + int32_t* length = taos_fetch_lengths(tmpResult); + + //int32_t* length = taos_fetch_lengths(tmpResult); + switch (fields[0].type) { + case TSDB_DATA_TYPE_BOOL: + sprintf(tableDes->cols[i].note, "%d", ((((int32_t)(*((char *)row[0]))) == 1) ? 1 : 0)); + break; + case TSDB_DATA_TYPE_TINYINT: + sprintf(tableDes->cols[i].note, "%d", *((int8_t *)row[0])); + break; + case TSDB_DATA_TYPE_SMALLINT: + sprintf(tableDes->cols[i].note, "%d", *((int16_t *)row[0])); + break; + case TSDB_DATA_TYPE_INT: + sprintf(tableDes->cols[i].note, "%d", *((int32_t *)row[0])); + break; + case TSDB_DATA_TYPE_BIGINT: + sprintf(tableDes->cols[i].note, "%" PRId64 "", *((int64_t *)row[0])); + break; + case TSDB_DATA_TYPE_FLOAT: + sprintf(tableDes->cols[i].note, "%f", GET_FLOAT_VAL(row[0])); + break; + case TSDB_DATA_TYPE_DOUBLE: + sprintf(tableDes->cols[i].note, "%f", GET_DOUBLE_VAL(row[0])); + break; + case TSDB_DATA_TYPE_BINARY: + memset(tableDes->cols[i].note, 0, sizeof(tableDes->cols[i].note)); + tableDes->cols[i].note[0] = '\''; + converStringToReadable((char *)row[0], length[0], tbuf, COMMAND_SIZE); + char* pstr = stpcpy(&(tableDes->cols[i].note[1]), tbuf); + *(pstr++) = '\''; + break; + case TSDB_DATA_TYPE_NCHAR: + memset(tableDes->cols[i].note, 0, sizeof(tableDes->cols[i].note)); + convertNCharToReadable((char *)row[0], length[0], tbuf, COMMAND_SIZE); + sprintf(tableDes->cols[i].note, "\'%s\'", tbuf); + break; + case TSDB_DATA_TYPE_TIMESTAMP: + sprintf(tableDes->cols[i].note, "%" PRId64 "", *(int64_t *)row[0]); + #if 0 + if (!arguments->mysqlFlag) { + sprintf(tableDes->cols[i].note, "%" PRId64 "", *(int64_t *)row[0]); + } else { + char buf[64] = "\0"; + int64_t ts = *((int64_t *)row[0]); + time_t tt = (time_t)(ts / 1000); + struct tm *ptm = localtime(&tt); + strftime(buf, 64, "%y-%m-%d %H:%M:%S", ptm); + sprintf(tableDes->cols[i].note, "\'%s.%03d\'", buf, (int)(ts % 1000)); + } + #endif + break; + default: + break; + } + + taos_free_result(tmpResult); + tmpResult = NULL; + } + + free(tempCommand); + free(tbuf); + + return count; +} + +int32_t taosDumpTable(char *table, char *metric, struct arguments *arguments, FILE *fp, TAOS* taosCon) { + int count = 0; + + STableDef *tableDes = (STableDef *)calloc(1, sizeof(STableDef) + sizeof(SColDes) * TSDB_MAX_COLUMNS); + + if (metric != NULL && metric[0] != '\0') { // dump table schema which is created by using super table + /* + count = taosGetTableDes(metric, tableDes, taosCon); + + if (count < 0) { + free(tableDes); + return -1; + } + + taosDumpCreateTableClause(tableDes, count, fp); + + memset(tableDes, 0, sizeof(STableDef) + sizeof(SColDes) * TSDB_MAX_COLUMNS); + */ + + count = taosGetTableDes(table, tableDes, taosCon, false); + + if (count < 0) { + free(tableDes); + return -1; + } + + // create child-table using super-table + taosDumpCreateMTableClause(tableDes, metric, count, fp); + + } else { // dump table definition + count = taosGetTableDes(table, tableDes, taosCon, false); + + if (count < 0) { + free(tableDes); + return -1; + } + + // create normal-table or super-table + taosDumpCreateTableClause(tableDes, count, fp); + } + + free(tableDes); + + return taosDumpTableData(fp, table, arguments, taosCon); +} + +void taosDumpCreateDbClause(SDbInfo *dbInfo, bool isDumpProperty, FILE *fp) { + + char* tmpCommand = (char *)malloc(COMMAND_SIZE); + if (tmpCommand == NULL) { + fprintf(stderr, "failed to allocate memory\n"); + return; + } + + char *pstr = tmpCommand; + + pstr += sprintf(pstr, "CREATE DATABASE IF NOT EXISTS %s", dbInfo->name); + if (isDumpProperty) { + pstr += sprintf(pstr, + " REPLICA %d DAYS %d KEEP %d TABLES %d ROWS %d CACHE %d ABLOCKS %d TBLOCKS %d CTIME %d CLOG %d COMP %d", + dbInfo->replica, dbInfo->days, dbInfo->keep, dbInfo->tables, dbInfo->rows, dbInfo->cache, + dbInfo->ablocks, dbInfo->tblocks, dbInfo->ctime, dbInfo->clog, dbInfo->comp); + } + + pstr += sprintf(pstr, ";"); + + fprintf(fp, "%s\n\n", tmpCommand); + free(tmpCommand); +} + +void* taosDumpOutWorkThreadFp(void *arg) +{ + SThreadParaObj *pThread = (SThreadParaObj*)arg; + STableRecord tableRecord; + int fd; + + char tmpFileName[TSDB_FILENAME_LEN*4] = {0}; + sprintf(tmpFileName, ".tables.tmp.%d", pThread->threadIndex); + fd = open(tmpFileName, O_RDWR | O_CREAT, S_IRWXU | S_IRGRP | S_IXGRP | S_IROTH); + if (fd == -1) { + fprintf(stderr, "taosDumpTableFp() failed to open temp file: %s\n", tmpFileName); + return NULL; + } + + FILE *fp = NULL; + memset(tmpFileName, 0, TSDB_FILENAME_LEN + 128); + + if (tsArguments.outpath[0] != 0) { + sprintf(tmpFileName, "%s/%s.tables.%d.sql", tsArguments.outpath, pThread->dbName, pThread->threadIndex); + } else { + sprintf(tmpFileName, "%s.tables.%d.sql", pThread->dbName, pThread->threadIndex); + } + + fp = fopen(tmpFileName, "w"); + if (fp == NULL) { + fprintf(stderr, "failed to open file %s\n", tmpFileName); + close(fd); + return NULL; + } + + memset(tmpFileName, 0, TSDB_FILENAME_LEN); + sprintf(tmpFileName, "use %s", pThread->dbName); + + TAOS_RES* tmpResult = taos_query(pThread->taosCon, tmpFileName); + int32_t code = taos_errno(tmpResult); + if (code != 0) { + fprintf(stderr, "invalid database %s\n", pThread->dbName); + taos_free_result(tmpResult); + fclose(fp); + close(fd); + return NULL; + } + + fprintf(fp, "USE %s;\n\n", pThread->dbName); + while (1) { + ssize_t readLen = read(fd, &tableRecord, sizeof(STableRecord)); + if (readLen <= 0) break; + taosDumpTable(tableRecord.name, tableRecord.metric, &tsArguments, fp, pThread->taosCon); + } + + taos_free_result(tmpResult); + close(fd); + fclose(fp); + + return NULL; +} + +static void taosStartDumpOutWorkThreads(struct arguments* args, int32_t numOfThread, char *dbName) +{ + pthread_attr_t thattr; + SThreadParaObj *threadObj = (SThreadParaObj *)calloc(numOfThread, sizeof(SThreadParaObj)); + for (int t = 0; t < numOfThread; ++t) { + SThreadParaObj *pThread = threadObj + t; + pThread->threadIndex = t; + pThread->totalThreads = numOfThread; + tstrncpy(pThread->dbName, dbName, TSDB_TABLE_NAME_LEN); + pThread->taosCon = taos_connect(args->host, args->user, args->password, NULL, args->port); + + if (pThread->taosCon == NULL) { + fprintf(stderr, "ERROR: thread:%d failed connect to TDengine, error:%s\n", pThread->threadIndex, taos_errstr(pThread->taosCon)); + exit(0); + } + + pthread_attr_init(&thattr); + pthread_attr_setdetachstate(&thattr, PTHREAD_CREATE_JOINABLE); + + if (pthread_create(&(pThread->threadID), &thattr, taosDumpOutWorkThreadFp, (void*)pThread) != 0) { + fprintf(stderr, "ERROR: thread:%d failed to start\n", pThread->threadIndex); + exit(0); + } + } + + for (int32_t t = 0; t < numOfThread; ++t) { + pthread_join(threadObj[t].threadID, NULL); + } + + for (int32_t t = 0; t < numOfThread; ++t) { + taos_close(threadObj[t].taosCon); + } + free(threadObj); +} + + + +int32_t taosDumpStable(char *table, FILE *fp, TAOS* taosCon) { + int count = 0; + + STableDef *tableDes = (STableDef *)calloc(1, sizeof(STableDef) + sizeof(SColDes) * TSDB_MAX_COLUMNS); + if (NULL == tableDes) { + fprintf(stderr, "failed to allocate memory\n"); + exit(-1); + } + + count = taosGetTableDes(table, tableDes, taosCon, true); + + if (count < 0) { + free(tableDes); + fprintf(stderr, "failed to get stable schema\n"); + exit(-1); + } + + taosDumpCreateTableClause(tableDes, count, fp); + + free(tableDes); + return 0; +} + + +int32_t taosDumpCreateSuperTableClause(TAOS* taosCon, char* dbName, FILE *fp) +{ + TAOS_ROW row; + int fd = -1; + STableRecord tableRecord; + + char* tmpCommand = (char *)malloc(COMMAND_SIZE); + if (tmpCommand == NULL) { + fprintf(stderr, "failed to allocate memory\n"); + exit(-1); + } + + sprintf(tmpCommand, "use %s", dbName); + + TAOS_RES* tmpResult = taos_query(taosCon, tmpCommand); + int32_t code = taos_errno(tmpResult); + if (code != 0) { + fprintf(stderr, "invalid database %s, error: %s\n", dbName, taos_errstr(taosCon)); + free(tmpCommand); + taos_free_result(tmpResult); + exit(-1); + } + + taos_free_result(tmpResult); + + sprintf(tmpCommand, "show stables"); + + tmpResult = taos_query(taosCon, tmpCommand); + code = taos_errno(tmpResult); + if (code != 0) { + fprintf(stderr, "failed to run command %s, error: %s\n", tmpCommand, taos_errstr(taosCon)); + free(tmpCommand); + taos_free_result(tmpResult); + exit(-1); + } + + TAOS_FIELD *fields = taos_fetch_fields(tmpResult); + + char tmpFileName[TSDB_FILENAME_LEN + 1]; + memset(tmpFileName, 0, TSDB_FILENAME_LEN); + sprintf(tmpFileName, ".stables.tmp"); + fd = open(tmpFileName, O_RDWR | O_CREAT, S_IRWXU | S_IRGRP | S_IXGRP | S_IROTH); + if (fd == -1) { + fprintf(stderr, "failed to open temp file: %s\n", tmpFileName); + taos_free_result(tmpResult); + free(tmpCommand); + (void)remove(".stables.tmp"); + exit(-1); + } + + while ((row = taos_fetch_row(tmpResult)) != NULL) { + memset(&tableRecord, 0, sizeof(STableRecord)); + strncpy(tableRecord.name, (char *)row[TSDB_SHOW_TABLES_NAME_INDEX], fields[TSDB_SHOW_TABLES_NAME_INDEX].bytes); + taosWrite(fd, &tableRecord, sizeof(STableRecord)); + } + + taos_free_result(tmpResult); + (void)lseek(fd, 0, SEEK_SET); + + while (1) { + ssize_t readLen = read(fd, &tableRecord, sizeof(STableRecord)); + if (readLen <= 0) break; + + (void)taosDumpStable(tableRecord.name, fp, taosCon); + } + + close(fd); + (void)remove(".stables.tmp"); + + free(tmpCommand); + return 0; +} + + +int taosDumpDb(SDbInfo *dbInfo, struct arguments *arguments, FILE *fp, TAOS *taosCon) { + TAOS_ROW row; + int fd = -1; + STableRecord tableRecord; + + taosDumpCreateDbClause(dbInfo, arguments->with_property, fp); + + char* tmpCommand = (char *)malloc(COMMAND_SIZE); + if (tmpCommand == NULL) { + fprintf(stderr, "failed to allocate memory\n"); + return -1; + } + + sprintf(tmpCommand, "use %s", dbInfo->name); + + TAOS_RES* tmpResult = taos_query(taosCon, tmpCommand); + int32_t code = taos_errno(tmpResult); + if (code != 0) { + fprintf(stderr, "invalid database %s\n", dbInfo->name); + free(tmpCommand); + taos_free_result(tmpResult); + return -1; + } + taos_free_result(tmpResult); + + fprintf(fp, "USE %s;\n\n", dbInfo->name); + + (void)taosDumpCreateSuperTableClause(taosCon, dbInfo->name, fp); + + sprintf(tmpCommand, "show tables"); + + tmpResult = taos_query(taosCon, tmpCommand); + code = taos_errno(tmpResult); + if (code != 0) { + fprintf(stderr, "failed to run command %s\n", tmpCommand); + free(tmpCommand); + taos_free_result(tmpResult); + return -1; + } + + TAOS_FIELD *fields = taos_fetch_fields(tmpResult); + + int32_t numOfTable = 0; + int32_t numOfThread = 0; + char tmpFileName[TSDB_FILENAME_LEN + 1]; + while ((row = taos_fetch_row(tmpResult)) != NULL) { + if (0 == numOfTable) { + memset(tmpFileName, 0, TSDB_FILENAME_LEN); + sprintf(tmpFileName, ".tables.tmp.%d", numOfThread); + fd = open(tmpFileName, O_RDWR | O_CREAT, S_IRWXU | S_IRGRP | S_IXGRP | S_IROTH); + if (fd == -1) { + fprintf(stderr, "failed to open temp file: %s\n", tmpFileName); + taos_free_result(tmpResult); + for (int32_t loopCnt = 0; loopCnt < numOfThread; loopCnt++) { + sprintf(tmpFileName, ".tables.tmp.%d", loopCnt); + (void)remove(tmpFileName); + } + free(tmpCommand); + return -1; + } + + numOfThread++; + } + + memset(&tableRecord, 0, sizeof(STableRecord)); + tstrncpy(tableRecord.name, (char *)row[TSDB_SHOW_TABLES_NAME_INDEX], fields[TSDB_SHOW_TABLES_NAME_INDEX].bytes); + tstrncpy(tableRecord.metric, (char *)row[TSDB_SHOW_TABLES_METRIC_INDEX], fields[TSDB_SHOW_TABLES_METRIC_INDEX].bytes); + + taosWrite(fd, &tableRecord, sizeof(STableRecord)); + + numOfTable++; + + if (numOfTable >= arguments->table_batch) { + numOfTable = 0; + close(fd); + fd = -1; + } + } + + if (fd >= 0) { + close(fd); + fd = -1; + } + + taos_free_result(tmpResult); + + // start multi threads to dumpout + taosStartDumpOutWorkThreads(arguments, numOfThread, dbInfo->name); + for (int loopCnt = 0; loopCnt < numOfThread; loopCnt++) { + sprintf(tmpFileName, ".tables.tmp.%d", loopCnt); + (void)remove(tmpFileName); + } + + free(tmpCommand); + + return 0; +} + +void taosDumpCreateTableClause(STableDef *tableDes, int numOfCols, FILE *fp) { + int counter = 0; + int count_temp = 0; + + char* tmpBuf = (char *)malloc(COMMAND_SIZE); + if (tmpBuf == NULL) { + fprintf(stderr, "failed to allocate memory\n"); + return; + } + + char* pstr = tmpBuf; + + pstr += sprintf(tmpBuf, "CREATE TABLE IF NOT EXISTS %s", tableDes->name); + + for (; counter < numOfCols; counter++) { + if (tableDes->cols[counter].note[0] != '\0') break; + + if (counter == 0) { + pstr += sprintf(pstr, " (%s %s", tableDes->cols[counter].field, tableDes->cols[counter].type); + } else { + pstr += sprintf(pstr, ", %s %s", tableDes->cols[counter].field, tableDes->cols[counter].type); + } + + if (strcasecmp(tableDes->cols[counter].type, "binary") == 0 || + strcasecmp(tableDes->cols[counter].type, "nchar") == 0) { + pstr += sprintf(pstr, "(%d)", tableDes->cols[counter].length); + } + } + + count_temp = counter; + + for (; counter < numOfCols; counter++) { + if (counter == count_temp) { + pstr += sprintf(pstr, ") TAGS (%s %s", tableDes->cols[counter].field, tableDes->cols[counter].type); + } else { + pstr += sprintf(pstr, ", %s %s", tableDes->cols[counter].field, tableDes->cols[counter].type); + } + + if (strcasecmp(tableDes->cols[counter].type, "binary") == 0 || + strcasecmp(tableDes->cols[counter].type, "nchar") == 0) { + pstr += sprintf(pstr, "(%d)", tableDes->cols[counter].length); + } + } + + pstr += sprintf(pstr, ");"); + + fprintf(fp, "%s\n", tmpBuf); + + free(tmpBuf); +} + +void taosDumpCreateMTableClause(STableDef *tableDes, char *metric, int numOfCols, FILE *fp) { + int counter = 0; + int count_temp = 0; + + char* tmpBuf = (char *)malloc(COMMAND_SIZE); + if (tmpBuf == NULL) { + fprintf(stderr, "failed to allocate memory\n"); + return; + } + + char *pstr = NULL; + pstr = tmpBuf; + + pstr += sprintf(tmpBuf, "CREATE TABLE IF NOT EXISTS %s USING %s TAGS (", tableDes->name, metric); + + for (; counter < numOfCols; counter++) { + if (tableDes->cols[counter].note[0] != '\0') break; + } + + assert(counter < numOfCols); + count_temp = counter; + + for (; counter < numOfCols; counter++) { + if (counter != count_temp) { + if (strcasecmp(tableDes->cols[counter].type, "binary") == 0 || + strcasecmp(tableDes->cols[counter].type, "nchar") == 0) { + //pstr += sprintf(pstr, ", \'%s\'", tableDes->cols[counter].note); + pstr += sprintf(pstr, ", %s", tableDes->cols[counter].note); + } else { + pstr += sprintf(pstr, ", %s", tableDes->cols[counter].note); + } + } else { + if (strcasecmp(tableDes->cols[counter].type, "binary") == 0 || + strcasecmp(tableDes->cols[counter].type, "nchar") == 0) { + //pstr += sprintf(pstr, "\'%s\'", tableDes->cols[counter].note); + pstr += sprintf(pstr, "%s", tableDes->cols[counter].note); + } else { + pstr += sprintf(pstr, "%s", tableDes->cols[counter].note); + } + /* pstr += sprintf(pstr, "%s", tableDes->cols[counter].note); */ + } + + /* if (strcasecmp(tableDes->cols[counter].type, "binary") == 0 || strcasecmp(tableDes->cols[counter].type, "nchar") + * == 0) { */ + /* pstr += sprintf(pstr, "(%d)", tableDes->cols[counter].length); */ + /* } */ + } + + pstr += sprintf(pstr, ");"); + + fprintf(fp, "%s\n", tmpBuf); + free(tmpBuf); +} + +int taosDumpTableData(FILE *fp, char *tbname, struct arguments *arguments, TAOS* taosCon) { + /* char temp[MAX_COMMAND_SIZE] = "\0"; */ + int64_t totalRows = 0; + int count = 0; + char *pstr = NULL; + TAOS_ROW row = NULL; + int numFields = 0; + char *tbuf = NULL; + + char* tmpCommand = (char *)calloc(1, COMMAND_SIZE); + if (tmpCommand == NULL) { + fprintf(stderr, "failed to allocate memory\n"); + return -1; + } + + char* tmpBuffer = (char *)calloc(1, COMMAND_SIZE); + if (tmpBuffer == NULL) { + fprintf(stderr, "failed to allocate memory\n"); + free(tmpCommand); + return -1; + } + + pstr = tmpBuffer; + + if (arguments->schemaonly) { + free(tmpCommand); + free(tmpBuffer); + return 0; + } + + sprintf(tmpCommand, + "select * from %s where _c0 >= %" PRId64 " and _c0 <= %" PRId64 " order by _c0 asc", + tbname, + arguments->start_time, + arguments->end_time); + + TAOS_RES* tmpResult = taos_query(taosCon, tmpCommand); + int32_t code = taos_errno(tmpResult); + if (code != 0) { + fprintf(stderr, "failed to run command %s, reason: %s\n", tmpCommand, taos_errstr(taosCon)); + free(tmpCommand); + free(tmpBuffer); + taos_free_result(tmpResult); + return -1; + } + + numFields = taos_field_count(tmpResult); + assert(numFields > 0); + TAOS_FIELD *fields = taos_fetch_fields(tmpResult); + tbuf = (char *)malloc(COMMAND_SIZE); + if (tbuf == NULL) { + fprintf(stderr, "No enough memory\n"); + free(tmpCommand); + free(tmpBuffer); + taos_free_result(tmpResult); + return -1; + } + + char sqlStr[8] = "\0"; + if (arguments->mysqlFlag) { + sprintf(sqlStr, "INSERT"); + } else { + sprintf(sqlStr, "IMPORT"); + } + + int rowFlag = 0; + count = 0; + while ((row = taos_fetch_row(tmpResult)) != NULL) { + pstr = tmpBuffer; + + int32_t* length = taos_fetch_lengths(tmpResult); // act len + + if (count == 0) { + pstr += sprintf(pstr, "%s INTO %s VALUES (", sqlStr, tbname); + } else { + if (arguments->mysqlFlag) { + if (0 == rowFlag) { + pstr += sprintf(pstr, "("); + rowFlag++; + } else { + pstr += sprintf(pstr, ", ("); + } + } else { + pstr += sprintf(pstr, "("); + } + } + + for (int col = 0; col < numFields; col++) { + if (col != 0) pstr += sprintf(pstr, ", "); + + if (row[col] == NULL) { + pstr += sprintf(pstr, "NULL"); + continue; + } + + switch (fields[col].type) { + case TSDB_DATA_TYPE_BOOL: + pstr += sprintf(pstr, "%d", ((((int32_t)(*((char *)row[col]))) == 1) ? 1 : 0)); + break; + case TSDB_DATA_TYPE_TINYINT: + pstr += sprintf(pstr, "%d", *((int8_t *)row[col])); + break; + case TSDB_DATA_TYPE_SMALLINT: + pstr += sprintf(pstr, "%d", *((int16_t *)row[col])); + break; + case TSDB_DATA_TYPE_INT: + pstr += sprintf(pstr, "%d", *((int32_t *)row[col])); + break; + case TSDB_DATA_TYPE_BIGINT: + pstr += sprintf(pstr, "%" PRId64 "", *((int64_t *)row[col])); + break; + case TSDB_DATA_TYPE_FLOAT: + pstr += sprintf(pstr, "%f", GET_FLOAT_VAL(row[col])); + break; + case TSDB_DATA_TYPE_DOUBLE: + pstr += sprintf(pstr, "%f", GET_DOUBLE_VAL(row[col])); + break; + case TSDB_DATA_TYPE_BINARY: + *(pstr++) = '\''; + converStringToReadable((char *)row[col], length[col], tbuf, COMMAND_SIZE); + pstr = stpcpy(pstr, tbuf); + *(pstr++) = '\''; + break; + case TSDB_DATA_TYPE_NCHAR: + convertNCharToReadable((char *)row[col], length[col], tbuf, COMMAND_SIZE); + pstr += sprintf(pstr, "\'%s\'", tbuf); + break; + case TSDB_DATA_TYPE_TIMESTAMP: + if (!arguments->mysqlFlag) { + pstr += sprintf(pstr, "%" PRId64 "", *(int64_t *)row[col]); + } else { + char buf[64] = "\0"; + int64_t ts = *((int64_t *)row[col]); + time_t tt = (time_t)(ts / 1000); + struct tm *ptm = localtime(&tt); + strftime(buf, 64, "%y-%m-%d %H:%M:%S", ptm); + pstr += sprintf(pstr, "\'%s.%03d\'", buf, (int)(ts % 1000)); + } + break; + default: + break; + } + } + + pstr += sprintf(pstr, ") "); + + totalRows++; + count++; + fprintf(fp, "%s", tmpBuffer); + + if (count >= arguments->data_batch) { + fprintf(fp, ";\n"); + count = 0; + } //else { + //fprintf(fp, "\\\n"); + //} + } + + atomic_add_fetch_64(&totalDumpOutRows, totalRows); + + fprintf(fp, "\n"); + + if (tbuf) { + free(tbuf); + } + + taos_free_result(tmpResult); + tmpResult = NULL; + free(tmpCommand); + free(tmpBuffer); + return 0; +} + +int taosCheckParam(struct arguments *arguments) { + if (arguments->all_databases && arguments->databases) { + fprintf(stderr, "conflict option --all-databases and --databases\n"); + return -1; + } + + if (arguments->start_time > arguments->end_time) { + fprintf(stderr, "start time is larger than end time\n"); + return -1; + } + + if (arguments->arg_list_len == 0) { + if ((!arguments->all_databases) && (!arguments->isDumpIn)) { + fprintf(stderr, "taosdump requires parameters\n"); + return -1; + } + } +/* + if (arguments->isDumpIn && (strcmp(arguments->outpath, DEFAULT_DUMP_FILE) != 0)) { + fprintf(stderr, "duplicate parameter input and output file path\n"); + return -1; + } +*/ + if (!arguments->isDumpIn && arguments->encode != NULL) { + fprintf(stderr, "invalid option in dump out\n"); + return -1; + } + + if (arguments->table_batch <= 0) { + fprintf(stderr, "invalid option in dump out\n"); + return -1; + } + + return 0; +} + +bool isEmptyCommand(char *cmd) { + char *pchar = cmd; + + while (*pchar != '\0') { + if (*pchar != ' ') return false; + pchar++; + } + + return true; +} + +void taosReplaceCtrlChar(char *str) { + _Bool ctrlOn = false; + char *pstr = NULL; + + for (pstr = str; *str != '\0'; ++str) { + if (ctrlOn) { + switch (*str) { + case 'n': + *pstr = '\n'; + pstr++; + break; + case 'r': + *pstr = '\r'; + pstr++; + break; + case 't': + *pstr = '\t'; + pstr++; + break; + case '\\': + *pstr = '\\'; + pstr++; + break; + case '\'': + *pstr = '\''; + pstr++; + break; + default: + break; + } + ctrlOn = false; + } else { + if (*str == '\\') { + ctrlOn = true; + } else { + *pstr = *str; + pstr++; + } + } + } + + *pstr = '\0'; +} + +char *ascii_literal_list[] = { + "\\x00", "\\x01", "\\x02", "\\x03", "\\x04", "\\x05", "\\x06", "\\x07", "\\x08", "\\t", "\\n", "\\x0b", "\\x0c", + "\\r", "\\x0e", "\\x0f", "\\x10", "\\x11", "\\x12", "\\x13", "\\x14", "\\x15", "\\x16", "\\x17", "\\x18", "\\x19", + "\\x1a", "\\x1b", "\\x1c", "\\x1d", "\\x1e", "\\x1f", " ", "!", "\\\"", "#", "$", "%", "&", + "\\'", "(", ")", "*", "+", ",", "-", ".", "/", "0", "1", "2", "3", + "4", "5", "6", "7", "8", "9", ":", ";", "<", "=", ">", "?", "@", + "A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M", + "N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z", + "[", "\\\\", "]", "^", "_", "`", "a", "b", "c", "d", "e", "f", "g", + "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", + "u", "v", "w", "x", "y", "z", "{", "|", "}", "~", "\\x7f", "\\x80", "\\x81", + "\\x82", "\\x83", "\\x84", "\\x85", "\\x86", "\\x87", "\\x88", "\\x89", "\\x8a", "\\x8b", "\\x8c", "\\x8d", "\\x8e", + "\\x8f", "\\x90", "\\x91", "\\x92", "\\x93", "\\x94", "\\x95", "\\x96", "\\x97", "\\x98", "\\x99", "\\x9a", "\\x9b", + "\\x9c", "\\x9d", "\\x9e", "\\x9f", "\\xa0", "\\xa1", "\\xa2", "\\xa3", "\\xa4", "\\xa5", "\\xa6", "\\xa7", "\\xa8", + "\\xa9", "\\xaa", "\\xab", "\\xac", "\\xad", "\\xae", "\\xaf", "\\xb0", "\\xb1", "\\xb2", "\\xb3", "\\xb4", "\\xb5", + "\\xb6", "\\xb7", "\\xb8", "\\xb9", "\\xba", "\\xbb", "\\xbc", "\\xbd", "\\xbe", "\\xbf", "\\xc0", "\\xc1", "\\xc2", + "\\xc3", "\\xc4", "\\xc5", "\\xc6", "\\xc7", "\\xc8", "\\xc9", "\\xca", "\\xcb", "\\xcc", "\\xcd", "\\xce", "\\xcf", + "\\xd0", "\\xd1", "\\xd2", "\\xd3", "\\xd4", "\\xd5", "\\xd6", "\\xd7", "\\xd8", "\\xd9", "\\xda", "\\xdb", "\\xdc", + "\\xdd", "\\xde", "\\xdf", "\\xe0", "\\xe1", "\\xe2", "\\xe3", "\\xe4", "\\xe5", "\\xe6", "\\xe7", "\\xe8", "\\xe9", + "\\xea", "\\xeb", "\\xec", "\\xed", "\\xee", "\\xef", "\\xf0", "\\xf1", "\\xf2", "\\xf3", "\\xf4", "\\xf5", "\\xf6", + "\\xf7", "\\xf8", "\\xf9", "\\xfa", "\\xfb", "\\xfc", "\\xfd", "\\xfe", "\\xff"}; + +int converStringToReadable(char *str, int size, char *buf, int bufsize) { + char *pstr = str; + char *pbuf = buf; + while (size > 0) { + if (*pstr == '\0') break; + pbuf = stpcpy(pbuf, ascii_literal_list[((uint8_t)(*pstr))]); + pstr++; + size--; + } + *pbuf = '\0'; + return 0; +} + +int convertNCharToReadable(char *str, int size, char *buf, int bufsize) { + char *pstr = str; + char *pbuf = buf; + // TODO + wchar_t wc; + while (size > 0) { + if (*pstr == '\0') break; + int byte_width = mbtowc(&wc, pstr, MB_CUR_MAX); + if (byte_width < 0) { + fprintf(stderr, "mbtowc() return fail.\n"); + exit(-1); + } + + if ((int)wc < 256) { + pbuf = stpcpy(pbuf, ascii_literal_list[(int)wc]); + } else { + memcpy(pbuf, pstr, byte_width); + pbuf += byte_width; + } + pstr += byte_width; + } + + *pbuf = '\0'; + + return 0; +} + +void taosDumpCharset(FILE *fp) { + char charsetline[256]; + + (void)fseek(fp, 0, SEEK_SET); + sprintf(charsetline, "#!%s\n", tsCharset); + (void)fwrite(charsetline, strlen(charsetline), 1, fp); +} + +void taosLoadFileCharset(FILE *fp, char *fcharset) { + char * line = NULL; + size_t line_size = 0; + + (void)fseek(fp, 0, SEEK_SET); + ssize_t size = getline(&line, &line_size, fp); + if (size <= 2) { + goto _exit_no_charset; + } + + if (strncmp(line, "#!", 2) != 0) { + goto _exit_no_charset; + } + if (line[size - 1] == '\n') { + line[size - 1] = '\0'; + size--; + } + strcpy(fcharset, line + 2); + + tfree(line); + return; + +_exit_no_charset: + (void)fseek(fp, 0, SEEK_SET); + *fcharset = '\0'; + tfree(line); + return; +} + +// ======== dumpIn support multi threads functions ================================// + +static char **tsDumpInSqlFiles = NULL; +static int32_t tsSqlFileNum = 0; +static char tsDbSqlFile[TSDB_FILENAME_LEN] = {0}; +static char tsfCharset[64] = {0}; +static int taosGetFilesNum(const char *directoryName, const char *prefix) +{ + char cmd[1024] = { 0 }; + sprintf(cmd, "ls %s/*.%s | wc -l ", directoryName, prefix); + + FILE *fp = popen(cmd, "r"); + if (fp == NULL) { + fprintf(stderr, "ERROR: failed to execute:%s, error:%s\n", cmd, strerror(errno)); + exit(0); + } + + int fileNum = 0; + if (fscanf(fp, "%d", &fileNum) != 1) { + fprintf(stderr, "ERROR: failed to execute:%s, parse result error\n", cmd); + exit(0); + } + + if (fileNum <= 0) { + fprintf(stderr, "ERROR: directory:%s is empry\n", directoryName); + exit(0); + } + + pclose(fp); + return fileNum; +} + +static void taosParseDirectory(const char *directoryName, const char *prefix, char **fileArray, int totalFiles) +{ + char cmd[1024] = { 0 }; + sprintf(cmd, "ls %s/*.%s | sort", directoryName, prefix); + + FILE *fp = popen(cmd, "r"); + if (fp == NULL) { + fprintf(stderr, "ERROR: failed to execute:%s, error:%s\n", cmd, strerror(errno)); + exit(0); + } + + int fileNum = 0; + while (fscanf(fp, "%128s", fileArray[fileNum++])) { + if (strcmp(fileArray[fileNum-1], tsDbSqlFile) == 0) { + fileNum--; + } + if (fileNum >= totalFiles) { + break; + } + } + + if (fileNum != totalFiles) { + fprintf(stderr, "ERROR: directory:%s changed while read\n", directoryName); + pclose(fp); + exit(0); + } + + pclose(fp); +} + +static void taosCheckTablesSQLFile(const char *directoryName) +{ + char cmd[1024] = { 0 }; + sprintf(cmd, "ls %s/dbs.sql", directoryName); + + FILE *fp = popen(cmd, "r"); + if (fp == NULL) { + fprintf(stderr, "ERROR: failed to execute:%s, error:%s\n", cmd, strerror(errno)); + exit(0); + } + + while (fscanf(fp, "%128s", tsDbSqlFile)) { + break; + } + + pclose(fp); +} + +static void taosMallocSQLFiles() +{ + tsDumpInSqlFiles = (char**)calloc(tsSqlFileNum, sizeof(char*)); + for (int i = 0; i < tsSqlFileNum; i++) { + tsDumpInSqlFiles[i] = calloc(1, TSDB_FILENAME_LEN); + } +} + +static void taosFreeSQLFiles() +{ + for (int i = 0; i < tsSqlFileNum; i++) { + tfree(tsDumpInSqlFiles[i]); + } + tfree(tsDumpInSqlFiles); +} + +static void taosGetDirectoryFileList(char *inputDir) +{ + struct stat fileStat; + if (stat(inputDir, &fileStat) < 0) { + fprintf(stderr, "ERROR: %s not exist\n", inputDir); + exit(0); + } + + if (fileStat.st_mode & S_IFDIR) { + taosCheckTablesSQLFile(inputDir); + tsSqlFileNum = taosGetFilesNum(inputDir, "sql"); + int totalSQLFileNum = tsSqlFileNum; + if (tsDbSqlFile[0] != 0) { + tsSqlFileNum--; + } + taosMallocSQLFiles(); + taosParseDirectory(inputDir, "sql", tsDumpInSqlFiles, tsSqlFileNum); + fprintf(stdout, "\nstart to dispose %d files in %s\n", totalSQLFileNum, inputDir); + } + else { + fprintf(stderr, "ERROR: %s is not a directory\n", inputDir); + exit(0); + } +} + +static FILE* taosOpenDumpInFile(char *fptr) { + wordexp_t full_path; + + if (wordexp(fptr, &full_path, 0) != 0) { + fprintf(stderr, "ERROR: illegal file name: %s\n", fptr); + return NULL; + } + + char *fname = full_path.we_wordv[0]; + + FILE *f = fopen(fname, "r"); + if (f == NULL) { + fprintf(stderr, "ERROR: failed to open file %s\n", fname); + wordfree(&full_path); + return NULL; + } + + wordfree(&full_path); + + return f; +} + +int taosDumpInOneFile_old(TAOS * taos, FILE* fp, char* fcharset, char* encode) { + char *command = NULL; + char *lcommand = NULL; + int tsize = 0; + char *line = NULL; + _Bool isRun = true; + size_t line_size = 0; + char *pstr = NULL; + char *lstr = NULL; + size_t inbytesleft = 0; + size_t outbytesleft = COMMAND_SIZE; + char *tcommand = NULL; + char *charsetOfFile = NULL; + iconv_t cd = (iconv_t)(-1); + + command = (char *)malloc(COMMAND_SIZE); + lcommand = (char *)malloc(COMMAND_SIZE); + if (command == NULL || lcommand == NULL) { + fprintf(stderr, "failed to connect to allocate memory\n"); + goto _dumpin_exit_failure; + } + + // Resolve locale + if (*fcharset != '\0') { + charsetOfFile = fcharset; + } else { + charsetOfFile = encode; + } + + if (charsetOfFile != NULL && strcasecmp(tsCharset, charsetOfFile) != 0) { + cd = iconv_open(tsCharset, charsetOfFile); + if (cd == ((iconv_t)(-1))) { + fprintf(stderr, "Failed to open iconv handle\n"); + goto _dumpin_exit_failure; + } + } + + pstr = command; + int64_t linenu = 0; + while (1) { + ssize_t size = getline(&line, &line_size, fp); + linenu++; + if (size <= 0) break; + if (size == 1) { + if (pstr != command) { + inbytesleft = pstr - command; + memset(lcommand, 0, COMMAND_SIZE); + pstr = command; + lstr = lcommand; + outbytesleft = COMMAND_SIZE; + if (cd != ((iconv_t)(-1))) { + iconv(cd, &pstr, &inbytesleft, &lstr, &outbytesleft); + tcommand = lcommand; + } else { + tcommand = command; + } + + taosReplaceCtrlChar(tcommand); + + if (queryDB(taos, tcommand) != 0) { + fprintf(stderr, "error sql: linenu: %" PRId64 " failed\n", linenu); + exit(0); + } + + pstr = command; + pstr[0] = '\0'; + tsize = 0; + isRun = true; + } + + continue; + } + + /* if (line[0] == '-' && line[1] == '-') continue; */ + + line[size - 1] = 0; + + if (tsize + size - 1 > COMMAND_SIZE) { + fprintf(stderr, "command is too long\n"); + goto _dumpin_exit_failure; + } + + if (line[size - 2] == '\\') { + line[size - 2] = ' '; + isRun = false; + } else { + isRun = true; + } + + memcpy(pstr, line, size - 1); + pstr += (size - 1); + *pstr = '\0'; + + if (!isRun) continue; + + if (command != pstr && !isEmptyCommand(command)) { + inbytesleft = pstr - command; + memset(lcommand, 0, COMMAND_SIZE); + pstr = command; + lstr = lcommand; + outbytesleft = COMMAND_SIZE; + if (cd != ((iconv_t)(-1))) { + iconv(cd, &pstr, &inbytesleft, &lstr, &outbytesleft); + tcommand = lcommand; + } else { + tcommand = command; + } + taosReplaceCtrlChar(tcommand); + if (queryDB(taos, tcommand) != 0) { + fprintf(stderr, "error sql: linenu:%" PRId64 " failed\n", linenu); + exit(0); + } + } + + pstr = command; + *pstr = '\0'; + tsize = 0; + } + + if (pstr != command) { + inbytesleft = pstr - command; + memset(lcommand, 0, COMMAND_SIZE); + pstr = command; + lstr = lcommand; + outbytesleft = COMMAND_SIZE; + if (cd != ((iconv_t)(-1))) { + iconv(cd, &pstr, &inbytesleft, &lstr, &outbytesleft); + tcommand = lcommand; + } else { + tcommand = command; + } + taosReplaceCtrlChar(lcommand); + if (queryDB(taos, tcommand) != 0) + fprintf(stderr, "error sql: linenu:%" PRId64 " failed \n", linenu); + } + + if (cd != ((iconv_t)(-1))) iconv_close(cd); + tfree(line); + tfree(command); + tfree(lcommand); + taos_close(taos); + fclose(fp); + return 0; + +_dumpin_exit_failure: + if (cd != ((iconv_t)(-1))) iconv_close(cd); + tfree(command); + tfree(lcommand); + taos_close(taos); + fclose(fp); + return -1; +} + +int taosDumpInOneFile(TAOS * taos, FILE* fp, char* fcharset, char* encode, char* fileName) { + int read_len = 0; + char * cmd = NULL; + size_t cmd_len = 0; + char * line = NULL; + size_t line_len = 0; + + cmd = (char *)malloc(COMMAND_SIZE); + if (cmd == NULL) { + fprintf(stderr, "failed to allocate memory\n"); + return -1; + } + + int lineNo = 0; + while ((read_len = getline(&line, &line_len, fp)) != -1) { + ++lineNo; + if (read_len >= COMMAND_SIZE) continue; + line[--read_len] = '\0'; + + //if (read_len == 0 || isCommentLine(line)) { // line starts with # + if (read_len == 0 ) { + continue; + } + + if (line[read_len - 1] == '\\') { + line[read_len - 1] = ' '; + memcpy(cmd + cmd_len, line, read_len); + cmd_len += read_len; + continue; + } + + memcpy(cmd + cmd_len, line, read_len); + cmd[read_len + cmd_len]= '\0'; + if (queryDB(taos, cmd)) { + fprintf(stderr, "error sql: linenu:%d, file:%s\n", lineNo, fileName); + } + + memset(cmd, 0, COMMAND_SIZE); + cmd_len = 0; + } + + tfree(cmd); + tfree(line); + fclose(fp); + return 0; +} + +void* taosDumpInWorkThreadFp(void *arg) +{ + SThreadParaObj *pThread = (SThreadParaObj*)arg; + for (int32_t f = 0; f < tsSqlFileNum; ++f) { + if (f % pThread->totalThreads == pThread->threadIndex) { + char *SQLFileName = tsDumpInSqlFiles[f]; + FILE* fp = taosOpenDumpInFile(SQLFileName); + if (NULL == fp) { + continue; + } + fprintf(stderr, "Success Open input file: %s\n", SQLFileName); + taosDumpInOneFile(pThread->taosCon, fp, tsfCharset, tsArguments.encode, SQLFileName); + } + } + + return NULL; +} + +static void taosStartDumpInWorkThreads(struct arguments *args) +{ + pthread_attr_t thattr; + SThreadParaObj *pThread; + int32_t totalThreads = args->thread_num; + + if (totalThreads > tsSqlFileNum) { + totalThreads = tsSqlFileNum; + } + + SThreadParaObj *threadObj = (SThreadParaObj *)calloc(totalThreads, sizeof(SThreadParaObj)); + for (int32_t t = 0; t < totalThreads; ++t) { + pThread = threadObj + t; + pThread->threadIndex = t; + pThread->totalThreads = totalThreads; + pThread->taosCon = taos_connect(args->host, args->user, args->password, NULL, args->port); + if (pThread->taosCon == NULL) { + fprintf(stderr, "ERROR: thread:%d failed connect to TDengine, error:%s\n", pThread->threadIndex, taos_errstr(pThread->taosCon)); + exit(0); + } + + pthread_attr_init(&thattr); + pthread_attr_setdetachstate(&thattr, PTHREAD_CREATE_JOINABLE); + + if (pthread_create(&(pThread->threadID), &thattr, taosDumpInWorkThreadFp, (void*)pThread) != 0) { + fprintf(stderr, "ERROR: thread:%d failed to start\n", pThread->threadIndex); + exit(0); + } + } + + for (int t = 0; t < totalThreads; ++t) { + pthread_join(threadObj[t].threadID, NULL); + } + + for (int t = 0; t < totalThreads; ++t) { + taos_close(threadObj[t].taosCon); + } + free(threadObj); +} + + +int taosDumpIn(struct arguments *arguments) { + assert(arguments->isDumpIn); + + TAOS *taos = NULL; + FILE *fp = NULL; + + taos = taos_connect(arguments->host, arguments->user, arguments->password, NULL, arguments->port); + if (taos == NULL) { + fprintf(stderr, "failed to connect to TDengine server\n"); + return -1; + } + + taosGetDirectoryFileList(arguments->inpath); + + if (tsDbSqlFile[0] != 0) { + fp = taosOpenDumpInFile(tsDbSqlFile); + if (NULL == fp) { + fprintf(stderr, "failed to open input file %s\n", tsDbSqlFile); + return -1; + } + fprintf(stderr, "Success Open input file: %s\n", tsDbSqlFile); + + taosLoadFileCharset(fp, tsfCharset); + + taosDumpInOneFile(taos, fp, tsfCharset, arguments->encode, tsDbSqlFile); + } + + taosStartDumpInWorkThreads(arguments); + + taos_close(taos); + taosFreeSQLFiles(); + return 0; +} + + diff --git a/src/kit/taosdump/taosdump.sh b/src/kit/taosdump/taosdump.sh new file mode 100755 index 0000000000..6d32c090db --- /dev/null +++ b/src/kit/taosdump/taosdump.sh @@ -0,0 +1,48 @@ +taos1_6="/root/mnt/work/test/td1.6/build/bin/taos" +taosdump1_6="/root/mnt/work/test/td1.6/build/bin/taosdump" +taoscfg1_6="/root/mnt/work/test/td1.6/test/cfg" + +taos2_0="/root/mnt/work/test/td2.0/build/bin/taos" +taosdump2_0="/root/mnt/work/test/td2.0/build/bin/taosdump" +taoscfg2_0="/root/mnt/work/test/td2.0/test/cfg" + +data_dir="/root/mnt/work/test/td1.6/output" +table_list="/root/mnt/work/test/td1.6/tables" + +DBNAME="test" +NTABLES=$(wc -l ${table_list} | awk '{print $1;}') +NTABLES_PER_DUMP=101 + +mkdir -p ${data_dir} +i=0 +round=0 +command="${taosdump1_6} -c ${taoscfg1_6} -o ${data_dir} -N 100 -T 20 ${DBNAME}" +while IFS= read -r line +do + i=$((i+1)) + + command="${command} ${line}" + + if [[ "$i" -eq ${NTABLES_PER_DUMP} ]]; then + round=$((round+1)) + echo "Starting round ${round} dump out..." + rm -f ${data_dir}/* + ${command} + echo "Starting round ${round} dump in..." + ${taosdump2_0} -c ${taoscfg2_0} -i ${data_dir} + + # Reset variables + # command="${taosdump1_6} -c ${taoscfg1_6} -o ${data_dir} -N 100 ${DBNAME}" + command="${taosdump1_6} -c ${taoscfg1_6} -o ${data_dir} -N 100 -T 20 ${DBNAME}" + i=0 + fi +done < "${table_list}" + +if [[ ${i} -ne "0" ]]; then + round=$((round+1)) + echo "Starting round ${round} dump out..." + rm -f ${data_dir}/* + ${command} + echo "Starting round ${round} dump in..." + ${taosdump2_0} -c ${taoscfg2_0} -i ${data_dir} +fi From 5d61ea87f06e615c77b37184bd8124a8d0383c8b Mon Sep 17 00:00:00 2001 From: Hui Li Date: Mon, 16 Nov 2020 12:42:24 +0800 Subject: [PATCH 49/52] [TD-2101] release taosdump for install package --- src/kit/CMakeLists.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/src/kit/CMakeLists.txt b/src/kit/CMakeLists.txt index 77db79e220..66e8cf7398 100644 --- a/src/kit/CMakeLists.txt +++ b/src/kit/CMakeLists.txt @@ -3,3 +3,4 @@ PROJECT(TDengine) ADD_SUBDIRECTORY(shell) ADD_SUBDIRECTORY(taosdemo) +ADD_SUBDIRECTORY(taosdump) From 176d247efcba6f256e3906f093de32dac86697ca Mon Sep 17 00:00:00 2001 From: Hongze Cheng Date: Mon, 16 Nov 2020 14:11:26 +0800 Subject: [PATCH 50/52] resolve some static scan error --- src/tsdb/src/tsdbFile.c | 2 +- src/tsdb/src/tsdbMemTable.c | 107 ++++++++++++++++++++++-------------- src/tsdb/src/tsdbRWHelper.c | 2 +- 3 files changed, 67 insertions(+), 44 deletions(-) diff --git a/src/tsdb/src/tsdbFile.c b/src/tsdb/src/tsdbFile.c index 1efde49ed0..58ae2c3ae1 100644 --- a/src/tsdb/src/tsdbFile.c +++ b/src/tsdb/src/tsdbFile.c @@ -516,7 +516,7 @@ void tsdbGetFileInfoImpl(char *fname, uint32_t *magic, int64_t *size) { SFile file; SFile * pFile = &file; - strncpy(pFile->fname, fname, TSDB_FILENAME_LEN); + strncpy(pFile->fname, fname, TSDB_FILENAME_LEN - 1); pFile->fd = -1; if (tsdbOpenFile(pFile, O_RDONLY) < 0) goto _err; diff --git a/src/tsdb/src/tsdbMemTable.c b/src/tsdb/src/tsdbMemTable.c index 7f7d47a886..9f9c52222e 100644 --- a/src/tsdb/src/tsdbMemTable.c +++ b/src/tsdb/src/tsdbMemTable.c @@ -18,7 +18,6 @@ #define TSDB_DATA_SKIPLIST_LEVEL 5 -static void tsdbFreeBytes(STsdbRepo *pRepo, void *ptr, int bytes); static SMemTable * tsdbNewMemTable(STsdbRepo *pRepo); static void tsdbFreeMemTable(SMemTable *pMemTable); static STableData *tsdbNewTableData(STsdbCfg *pCfg, STable *pTable); @@ -41,6 +40,7 @@ static int tsdbInitSubmitMsgIter(SSubmitMsg *pMsg, SSubmitMsgIter *pIte static int tsdbGetSubmitMsgNext(SSubmitMsgIter *pIter, SSubmitBlk **pPBlock); static int tsdbCheckTableSchema(STsdbRepo *pRepo, SSubmitBlk *pBlock, STable *pTable); static int tsdbInsertDataToTableImpl(STsdbRepo *pRepo, STable *pTable, void **rows, int rowCounter); +static void tsdbFreeRows(STsdbRepo *pRepo, void **rows, int rowCounter); static FORCE_INLINE int tsdbCheckRowRange(STsdbRepo *pRepo, STable *pTable, SDataRow row, TSKEY minKey, TSKEY maxKey, TSKEY now); @@ -97,7 +97,7 @@ int tsdbUnRefMemTable(STsdbRepo *pRepo, SMemTable *pMemTable) { } int code = pthread_cond_signal(&pBufPool->poolNotEmpty); if (code != 0) { - tsdbUnlockRepo(pRepo); + if (tsdbUnlockRepo(pRepo) < 0) return -1; tsdbError("vgId:%d failed to signal pool not empty since %s", REPO_ID(pRepo), strerror(code)); terrno = TAOS_SYSTEM_ERROR(code); return -1; @@ -134,6 +134,8 @@ int tsdbTakeMemSnapshot(STsdbRepo *pRepo, SMemTable **pMem, SMemTable **pIMem) { } void tsdbUnTakeMemSnapShot(STsdbRepo *pRepo, SMemTable *pMem, SMemTable *pIMem) { + tsdbDebug("vgId:%d untake memory snapshot, pMem %p pIMem %p", REPO_ID(pRepo), pMem, pIMem); + if (pMem != NULL) { taosRUnLockLatch(&(pMem->latch)); tsdbUnRefMemTable(pRepo, pMem); @@ -142,8 +144,6 @@ void tsdbUnTakeMemSnapShot(STsdbRepo *pRepo, SMemTable *pMem, SMemTable *pIMem) if (pIMem != NULL) { tsdbUnRefMemTable(pRepo, pIMem); } - - tsdbDebug("vgId:%d untake memory snapshot, pMem %p pIMem %p", REPO_ID(pRepo), pMem, pIMem); } void *tsdbAllocBytes(STsdbRepo *pRepo, int bytes) { @@ -175,6 +175,10 @@ void *tsdbAllocBytes(STsdbRepo *pRepo, int bytes) { ASSERT(pRepo->mem->extraBuffList != NULL); SListNode *pNode = (SListNode *)malloc(sizeof(SListNode) + bytes); if (pNode == NULL) { + if (listNEles(pRepo->mem->extraBuffList) == 0) { + tdListFree(pRepo->mem->extraBuffList); + pRepo->mem->extraBuffList = NULL; + } terrno = TSDB_CODE_TDB_OUT_OF_MEMORY; return NULL; } @@ -205,18 +209,18 @@ void *tsdbAllocBytes(STsdbRepo *pRepo, int bytes) { } int tsdbAsyncCommit(STsdbRepo *pRepo) { + if (pRepo->mem == NULL) return 0; + SMemTable *pIMem = pRepo->imem; - if (pRepo->mem != NULL) { - sem_wait(&(pRepo->readyToCommit)); + sem_wait(&(pRepo->readyToCommit)); - if (pRepo->appH.notifyStatus) pRepo->appH.notifyStatus(pRepo->appH.appH, TSDB_STATUS_COMMIT_START); - if (tsdbLockRepo(pRepo) < 0) return -1; - pRepo->imem = pRepo->mem; - pRepo->mem = NULL; - tsdbScheduleCommit(pRepo); - if (tsdbUnlockRepo(pRepo) < 0) return -1; - } + if (pRepo->appH.notifyStatus) pRepo->appH.notifyStatus(pRepo->appH.appH, TSDB_STATUS_COMMIT_START); + if (tsdbLockRepo(pRepo) < 0) return -1; + pRepo->imem = pRepo->mem; + pRepo->mem = NULL; + tsdbScheduleCommit(pRepo); + if (tsdbUnlockRepo(pRepo) < 0) return -1; if (tsdbUnRefMemTable(pRepo, pIMem) < 0) return -1; @@ -414,25 +418,6 @@ _exit: } // ---------------- LOCAL FUNCTIONS ---------------- -static void tsdbFreeBytes(STsdbRepo *pRepo, void *ptr, int bytes) { - ASSERT(pRepo->mem != NULL); - if (pRepo->mem->extraBuffList == NULL) { - STsdbBufBlock *pBufBlock = tsdbGetCurrBufBlock(pRepo); - ASSERT(pBufBlock != NULL); - pBufBlock->offset -= bytes; - pBufBlock->remain += bytes; - ASSERT(ptr == POINTER_SHIFT(pBufBlock->data, pBufBlock->offset)); - tsdbTrace("vgId:%d free %d bytes to TSDB buffer pool, nBlocks %d offset %d remain %d", REPO_ID(pRepo), bytes, - listNEles(pRepo->mem->bufBlockList), pBufBlock->offset, pBufBlock->remain); - } else { - SListNode *pNode = (SListNode *)POINTER_SHIFT(ptr, -(int)(sizeof(SListNode))); - ASSERT(listTail(pRepo->mem->extraBuffList) == pNode); - tdListPopNode(pRepo->mem->extraBuffList, pNode); - free(pNode); - tsdbTrace("vgId:%d free %d bytes to SYSTEM buffer pool", REPO_ID(pRepo), bytes); - } -} - static SMemTable* tsdbNewMemTable(STsdbRepo *pRepo) { STsdbMeta *pMeta = pRepo->tsdbMeta; @@ -922,8 +907,8 @@ static int tsdbInsertDataToTable(STsdbRepo *pRepo, SSubmitBlk *pBlock, int32_t * tsdbInitSubmitBlkIter(pBlock, &blkIter); while ((row = tsdbGetSubmitBlkNext(&blkIter)) != NULL) { if (tsdbCopyRowToMem(pRepo, row, pTable, &(rows[rowCounter])) < 0) { - free(rows); - return -1; + tsdbFreeRows(pRepo, rows, rowCounter); + goto _err; } (*affectedrows)++; @@ -935,8 +920,7 @@ static int tsdbInsertDataToTable(STsdbRepo *pRepo, SSubmitBlk *pBlock, int32_t * } if (tsdbInsertDataToTableImpl(pRepo, pTable, rows, rowCounter) < 0) { - free(rows); - return -1; + goto _err; } STSchema *pSchema = tsdbGetTableSchemaByVersion(pTable, pBlock->sversion); @@ -945,6 +929,10 @@ static int tsdbInsertDataToTable(STsdbRepo *pRepo, SSubmitBlk *pBlock, int32_t * free(rows); return 0; + +_err: + free(rows); + return -1; } static int tsdbCopyRowToMem(STsdbRepo *pRepo, SDataRow row, STable *pTable, void **ppRow) { @@ -1104,9 +1092,7 @@ static int tsdbInsertDataToTableImpl(STsdbRepo *pRepo, STable *pTable, void **ro if (TABLE_TID(pTable) >= pMemTable->maxTables) { if (tsdbAdjustMemMaxTables(pMemTable, pMeta->maxTables) < 0) { - for (int i = rowCounter - 1; i >= 0; i--) { - tsdbFreeBytes(pRepo, rows[i], dataRowLen(rows[i])); - } + tsdbFreeRows(pRepo, rows, rowCounter); return -1; } } @@ -1124,9 +1110,7 @@ static int tsdbInsertDataToTableImpl(STsdbRepo *pRepo, STable *pTable, void **ro if (pTableData == NULL) { tsdbError("vgId:%d failed to insert data to table %s uid %" PRId64 " tid %d since %s", REPO_ID(pRepo), TABLE_CHAR_NAME(pTable), TABLE_UID(pTable), TABLE_TID(pTable), tstrerror(terrno)); - for (int i = rowCounter - 1; i >= 0; i--) { - tsdbFreeBytes(pRepo, rows[i], dataRowLen(rows[i])); - } + tsdbFreeRows(pRepo, rows, rowCounter); return -1; } @@ -1151,4 +1135,43 @@ static int tsdbInsertDataToTableImpl(STsdbRepo *pRepo, STable *pTable, void **ro if (TABLE_LASTKEY(pTable) < dataRowKey(rows[rowCounter-1])) TABLE_LASTKEY(pTable) = dataRowKey(rows[rowCounter-1]); return 0; +} + +static void tsdbFreeRows(STsdbRepo *pRepo, void **rows, int rowCounter) { + ASSERT(pRepo->mem != NULL); + STsdbBufPool *pBufPool = pRepo->pPool; + + for (int i = rowCounter - 1; i >= 0; --i) { + SDataRow row = (SDataRow)rows[i]; + int bytes = (int)dataRowLen(row); + + if (pRepo->mem->extraBuffList == NULL) { + STsdbBufBlock *pBufBlock = tsdbGetCurrBufBlock(pRepo); + ASSERT(pBufBlock != NULL && pBufBlock->offset >= bytes); + + pBufBlock->offset -= bytes; + pBufBlock->remain += bytes; + ASSERT(row == POINTER_SHIFT(pBufBlock->data, pBufBlock->offset)); + tsdbTrace("vgId:%d free %d bytes to TSDB buffer pool, nBlocks %d offset %d remain %d", REPO_ID(pRepo), bytes, + listNEles(pRepo->mem->bufBlockList), pBufBlock->offset, pBufBlock->remain); + + if (pBufBlock->offset == 0) { // return the block to buffer pool + tsdbLockRepo(pRepo); + SListNode *pNode = tdListPopTail(pRepo->mem->bufBlockList); + tdListPrependNode(pBufPool->bufBlockList, pNode); + tsdbUnlockRepo(pRepo); + } + } else { + ASSERT(listNEles(pRepo->mem->extraBuffList) > 0); + SListNode *pNode = tdListPopTail(pRepo->mem->extraBuffList); + ASSERT(row == pNode->data); + free(pNode); + tsdbTrace("vgId:%d free %d bytes to SYSTEM buffer pool", REPO_ID(pRepo), bytes); + + if (listNEles(pRepo->mem->extraBuffList) == 0) { + tdListFree(pRepo->mem->extraBuffList); + pRepo->mem->extraBuffList = NULL; + } + } + } } \ No newline at end of file diff --git a/src/tsdb/src/tsdbRWHelper.c b/src/tsdb/src/tsdbRWHelper.c index 98f815f78a..5b65b2185a 100644 --- a/src/tsdb/src/tsdbRWHelper.c +++ b/src/tsdb/src/tsdbRWHelper.c @@ -1595,7 +1595,7 @@ static int tsdbProcessMergeCommit(SRWHelper *pHelper, SCommitIter *pCommitIter, tblkIdx++; } else if (oBlock.numOfRows + pMergeInfo->rowsInserted - pMergeInfo->rowsDeleteSucceed == 0) { // Delete the block and do some stuff - ASSERT(pMergeInfo->keyFirst == INT64_MAX && pMergeInfo->keyFirst == INT64_MIN); + // ASSERT(pMergeInfo->keyFirst == INT64_MAX && pMergeInfo->keyFirst == INT64_MIN); if (tsdbDeleteSuperBlock(pHelper, tblkIdx) < 0) return -1; *pCommitIter->pIter = slIter; if (oBlock.last && pHelper->hasOldLastBlock) pHelper->hasOldLastBlock = false; From a8eb725de4697caefc8260cfc6360876bbd179ea Mon Sep 17 00:00:00 2001 From: Hui Li Date: Mon, 16 Nov 2020 14:54:27 +0800 Subject: [PATCH 51/52] [NONE] --- src/kit/shell/src/shellLinux.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/kit/shell/src/shellLinux.c b/src/kit/shell/src/shellLinux.c index 57f0f65be4..04f5824d8d 100644 --- a/src/kit/shell/src/shellLinux.c +++ b/src/kit/shell/src/shellLinux.c @@ -46,7 +46,7 @@ static struct argp_option options[] = { {"thread", 'T', "THREADNUM", 0, "Number of threads when using multi-thread to import data."}, {"database", 'd', "DATABASE", 0, "Database to use when connecting to the server."}, {"timezone", 't', "TIMEZONE", 0, "Time zone of the shell, default is local."}, - {"netrole", 'n', "NETROLE", 0, "Net role when network connectivity test, default is NULL, valid option: client | server."}, + {"netrole", 'n', "NETROLE", 0, "Net role when network connectivity test, default is NULL, options: client|clients|server."}, {"endport", 'e', "ENDPORT", 0, "Net test end port, default is 6042."}, {"pktlen", 'l', "PKTLEN", 0, "Packet length used for net test, default is 1000 bytes."}, {0}}; From a803f240457de99b563facb1953bb8a1d622ed99 Mon Sep 17 00:00:00 2001 From: Hongze Cheng Date: Mon, 16 Nov 2020 16:00:57 +0800 Subject: [PATCH 52/52] [TD-2066]: pass error code when commit failed --- src/inc/tsdb.h | 2 +- src/tsdb/src/tsdbCommit.c | 337 ++++++++++++++++++++++++++++++++++++ src/tsdb/src/tsdbFile.c | 3 +- src/tsdb/src/tsdbMemTable.c | 299 +------------------------------- src/util/src/tkvstore.c | 2 + src/vnode/src/vnodeMain.c | 8 +- 6 files changed, 349 insertions(+), 302 deletions(-) create mode 100644 src/tsdb/src/tsdbCommit.c diff --git a/src/inc/tsdb.h b/src/inc/tsdb.h index a913deea37..474214be7e 100644 --- a/src/inc/tsdb.h +++ b/src/inc/tsdb.h @@ -46,7 +46,7 @@ extern "C" { typedef struct { void *appH; void *cqH; - int (*notifyStatus)(void *, int status); + int (*notifyStatus)(void *, int status, int eno); int (*eventCallBack)(void *); void *(*cqCreateFunc)(void *handle, uint64_t uid, int sid, char *sqlStr, STSchema *pSchema); void (*cqDropFunc)(void *handle); diff --git a/src/tsdb/src/tsdbCommit.c b/src/tsdb/src/tsdbCommit.c new file mode 100644 index 0000000000..0112f40ffd --- /dev/null +++ b/src/tsdb/src/tsdbCommit.c @@ -0,0 +1,337 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ +#include "tsdbMain.h" + +static int tsdbCommitTSData(STsdbRepo *pRepo); +static int tsdbCommitMeta(STsdbRepo *pRepo); +static void tsdbEndCommit(STsdbRepo *pRepo, int eno); +static int tsdbHasDataToCommit(SCommitIter *iters, int nIters, TSKEY minKey, TSKEY maxKey); +static int tsdbCommitToFile(STsdbRepo *pRepo, int fid, SCommitIter *iters, SRWHelper *pHelper, SDataCols *pDataCols); +static SCommitIter *tsdbCreateCommitIters(STsdbRepo *pRepo); +static void tsdbDestroyCommitIters(SCommitIter *iters, int maxTables); + +void *tsdbCommitData(STsdbRepo *pRepo) { + SMemTable * pMem = pRepo->imem; + + tsdbInfo("vgId:%d start to commit! keyFirst %" PRId64 " keyLast %" PRId64 " numOfRows %" PRId64 " meta rows: %d", + REPO_ID(pRepo), pMem->keyFirst, pMem->keyLast, pMem->numOfRows, listNEles(pMem->actList)); + + // Commit to update meta file + if (tsdbCommitMeta(pRepo) < 0) { + tsdbError("vgId:%d error occurs while committing META data since %s", REPO_ID(pRepo), tstrerror(terrno)); + goto _err; + } + + // Create the iterator to read from cache + if (tsdbCommitTSData(pRepo) < 0) { + tsdbError("vgId:%d error occurs while committing TS data since %s", REPO_ID(pRepo), tstrerror(terrno)); + goto _err; + } + + tsdbFitRetention(pRepo); + + tsdbInfo("vgId:%d commit over, succeed", REPO_ID(pRepo)); + tsdbEndCommit(pRepo, TSDB_CODE_SUCCESS); + + return NULL; + +_err: + ASSERT(terrno != TSDB_CODE_SUCCESS); + tsdbInfo("vgId:%d commit over, failed", REPO_ID(pRepo)); + tsdbEndCommit(pRepo, terrno); + + return NULL; +} + +static int tsdbCommitTSData(STsdbRepo *pRepo) { + SMemTable * pMem = pRepo->imem; + SDataCols * pDataCols = NULL; + STsdbMeta * pMeta = pRepo->tsdbMeta; + SCommitIter *iters = NULL; + SRWHelper whelper = {0}; + STsdbCfg * pCfg = &(pRepo->config); + + if (pMem->numOfRows <= 0) return 0; + + iters = tsdbCreateCommitIters(pRepo); + if (iters == NULL) { + tsdbError("vgId:%d failed to create commit iterator since %s", REPO_ID(pRepo), tstrerror(terrno)); + goto _err; + } + + if (tsdbInitWriteHelper(&whelper, pRepo) < 0) { + tsdbError("vgId:%d failed to init write helper since %s", REPO_ID(pRepo), tstrerror(terrno)); + goto _err; + } + + if ((pDataCols = tdNewDataCols(pMeta->maxRowBytes, pMeta->maxCols, pCfg->maxRowsPerFileBlock)) == NULL) { + terrno = TSDB_CODE_TDB_OUT_OF_MEMORY; + tsdbError("vgId:%d failed to init data cols with maxRowBytes %d maxCols %d maxRowsPerFileBlock %d since %s", + REPO_ID(pRepo), pMeta->maxCols, pMeta->maxRowBytes, pCfg->maxRowsPerFileBlock, tstrerror(terrno)); + goto _err; + } + + int sfid = (int)(TSDB_KEY_FILEID(pMem->keyFirst, pCfg->daysPerFile, pCfg->precision)); + int efid = (int)(TSDB_KEY_FILEID(pMem->keyLast, pCfg->daysPerFile, pCfg->precision)); + + // Loop to commit to each file + for (int fid = sfid; fid <= efid; fid++) { + if (tsdbCommitToFile(pRepo, fid, iters, &whelper, pDataCols) < 0) { + tsdbError("vgId:%d failed to commit to file %d since %s", REPO_ID(pRepo), fid, tstrerror(terrno)); + goto _err; + } + } + + tdFreeDataCols(pDataCols); + tsdbDestroyCommitIters(iters, pMem->maxTables); + tsdbDestroyHelper(&whelper); + + return 0; + +_err: + tdFreeDataCols(pDataCols); + tsdbDestroyCommitIters(iters, pMem->maxTables); + tsdbDestroyHelper(&whelper); + + return -1; +} + +static int tsdbCommitMeta(STsdbRepo *pRepo) { + SMemTable *pMem = pRepo->imem; + STsdbMeta *pMeta = pRepo->tsdbMeta; + SActObj * pAct = NULL; + SActCont * pCont = NULL; + + if (listNEles(pMem->actList) <= 0) return 0; + + if (tdKVStoreStartCommit(pMeta->pStore) < 0) { + tsdbError("vgId:%d failed to commit data while start commit meta since %s", REPO_ID(pRepo), tstrerror(terrno)); + goto _err; + } + + SListNode *pNode = NULL; + + while ((pNode = tdListPopHead(pMem->actList)) != NULL) { + pAct = (SActObj *)pNode->data; + if (pAct->act == TSDB_UPDATE_META) { + pCont = (SActCont *)POINTER_SHIFT(pAct, sizeof(SActObj)); + if (tdUpdateKVStoreRecord(pMeta->pStore, pAct->uid, (void *)(pCont->cont), pCont->len) < 0) { + tsdbError("vgId:%d failed to update meta with uid %" PRIu64 " since %s", REPO_ID(pRepo), pAct->uid, + tstrerror(terrno)); + tdKVStoreEndCommit(pMeta->pStore); + goto _err; + } + } else if (pAct->act == TSDB_DROP_META) { + if (tdDropKVStoreRecord(pMeta->pStore, pAct->uid) < 0) { + tsdbError("vgId:%d failed to drop meta with uid %" PRIu64 " since %s", REPO_ID(pRepo), pAct->uid, + tstrerror(terrno)); + tdKVStoreEndCommit(pMeta->pStore); + goto _err; + } + } else { + ASSERT(false); + } + } + + if (tdKVStoreEndCommit(pMeta->pStore) < 0) { + tsdbError("vgId:%d failed to commit data while end commit meta since %s", REPO_ID(pRepo), tstrerror(terrno)); + goto _err; + } + + return 0; + +_err: + return -1; +} + +static void tsdbEndCommit(STsdbRepo *pRepo, int eno) { + if (pRepo->appH.notifyStatus) pRepo->appH.notifyStatus(pRepo->appH.appH, TSDB_STATUS_COMMIT_OVER, eno); + sem_post(&(pRepo->readyToCommit)); +} + +static int tsdbHasDataToCommit(SCommitIter *iters, int nIters, TSKEY minKey, TSKEY maxKey) { + for (int i = 0; i < nIters; i++) { + TSKEY nextKey = tsdbNextIterKey((iters + i)->pIter); + if (nextKey != TSDB_DATA_TIMESTAMP_NULL && (nextKey >= minKey && nextKey <= maxKey)) return 1; + } + return 0; +} + +static int tsdbCommitToFile(STsdbRepo *pRepo, int fid, SCommitIter *iters, SRWHelper *pHelper, SDataCols *pDataCols) { + char * dataDir = NULL; + STsdbCfg * pCfg = &pRepo->config; + STsdbFileH *pFileH = pRepo->tsdbFileH; + SFileGroup *pGroup = NULL; + SMemTable * pMem = pRepo->imem; + bool newLast = false; + + TSKEY minKey = 0, maxKey = 0; + tsdbGetFidKeyRange(pCfg->daysPerFile, pCfg->precision, fid, &minKey, &maxKey); + + // Check if there are data to commit to this file + int hasDataToCommit = tsdbHasDataToCommit(iters, pMem->maxTables, minKey, maxKey); + if (!hasDataToCommit) { + tsdbDebug("vgId:%d no data to commit to file %d", REPO_ID(pRepo), fid); + return 0; + } + + // Create and open files for commit + dataDir = tsdbGetDataDirName(pRepo->rootDir); + if (dataDir == NULL) { + terrno = TSDB_CODE_TDB_OUT_OF_MEMORY; + return -1; + } + + if ((pGroup = tsdbCreateFGroupIfNeed(pRepo, dataDir, fid)) == NULL) { + tsdbError("vgId:%d failed to create file group %d since %s", REPO_ID(pRepo), fid, tstrerror(terrno)); + goto _err; + } + + // Open files for write/read + if (tsdbSetAndOpenHelperFile(pHelper, pGroup) < 0) { + tsdbError("vgId:%d failed to set helper file since %s", REPO_ID(pRepo), tstrerror(terrno)); + goto _err; + } + + newLast = TSDB_NLAST_FILE_OPENED(pHelper); + + if (tsdbLoadCompIdx(pHelper, NULL) < 0) { + tsdbError("vgId:%d failed to load SCompIdx part since %s", REPO_ID(pRepo), tstrerror(terrno)); + goto _err; + } + + // Loop to commit data in each table + for (int tid = 1; tid < pMem->maxTables; tid++) { + SCommitIter *pIter = iters + tid; + if (pIter->pTable == NULL) continue; + + taosRLockLatch(&(pIter->pTable->latch)); + + if (tsdbSetHelperTable(pHelper, pIter->pTable, pRepo) < 0) goto _err; + + if (pIter->pIter != NULL) { + if (tdInitDataCols(pDataCols, tsdbGetTableSchemaImpl(pIter->pTable, false, false, -1)) < 0) { + terrno = TSDB_CODE_TDB_OUT_OF_MEMORY; + goto _err; + } + + if (tsdbCommitTableData(pHelper, pIter, pDataCols, maxKey) < 0) { + taosRUnLockLatch(&(pIter->pTable->latch)); + tsdbError("vgId:%d failed to write data of table %s tid %d uid %" PRIu64 " since %s", REPO_ID(pRepo), + TABLE_CHAR_NAME(pIter->pTable), TABLE_TID(pIter->pTable), TABLE_UID(pIter->pTable), + tstrerror(terrno)); + goto _err; + } + } + + taosRUnLockLatch(&(pIter->pTable->latch)); + + // Move the last block to the new .l file if neccessary + if (tsdbMoveLastBlockIfNeccessary(pHelper) < 0) { + tsdbError("vgId:%d, failed to move last block, since %s", REPO_ID(pRepo), tstrerror(terrno)); + goto _err; + } + + // Write the SCompBlock part + if (tsdbWriteCompInfo(pHelper) < 0) { + tsdbError("vgId:%d, failed to write compInfo part since %s", REPO_ID(pRepo), tstrerror(terrno)); + goto _err; + } + } + + if (tsdbWriteCompIdx(pHelper) < 0) { + tsdbError("vgId:%d failed to write compIdx part to file %d since %s", REPO_ID(pRepo), fid, tstrerror(terrno)); + goto _err; + } + + tfree(dataDir); + tsdbCloseHelperFile(pHelper, 0, pGroup); + + pthread_rwlock_wrlock(&(pFileH->fhlock)); + + (void)rename(helperNewHeadF(pHelper)->fname, helperHeadF(pHelper)->fname); + pGroup->files[TSDB_FILE_TYPE_HEAD].info = helperNewHeadF(pHelper)->info; + + if (newLast) { + (void)rename(helperNewLastF(pHelper)->fname, helperLastF(pHelper)->fname); + pGroup->files[TSDB_FILE_TYPE_LAST].info = helperNewLastF(pHelper)->info; + } else { + pGroup->files[TSDB_FILE_TYPE_LAST].info = helperLastF(pHelper)->info; + } + + pGroup->files[TSDB_FILE_TYPE_DATA].info = helperDataF(pHelper)->info; + + pthread_rwlock_unlock(&(pFileH->fhlock)); + + return 0; + +_err: + tfree(dataDir); + tsdbCloseHelperFile(pHelper, 1, NULL); + return -1; +} + +static SCommitIter *tsdbCreateCommitIters(STsdbRepo *pRepo) { + SMemTable *pMem = pRepo->imem; + STsdbMeta *pMeta = pRepo->tsdbMeta; + + SCommitIter *iters = (SCommitIter *)calloc(pMem->maxTables, sizeof(SCommitIter)); + if (iters == NULL) { + terrno = TSDB_CODE_TDB_OUT_OF_MEMORY; + return NULL; + } + + if (tsdbRLockRepoMeta(pRepo) < 0) goto _err; + + // reference all tables + for (int i = 0; i < pMem->maxTables; i++) { + if (pMeta->tables[i] != NULL) { + tsdbRefTable(pMeta->tables[i]); + iters[i].pTable = pMeta->tables[i]; + } + } + + if (tsdbUnlockRepoMeta(pRepo) < 0) goto _err; + + for (int i = 0; i < pMem->maxTables; i++) { + if ((iters[i].pTable != NULL) && (pMem->tData[i] != NULL) && (TABLE_UID(iters[i].pTable) == pMem->tData[i]->uid)) { + if ((iters[i].pIter = tSkipListCreateIter(pMem->tData[i]->pData)) == NULL) { + terrno = TSDB_CODE_TDB_OUT_OF_MEMORY; + goto _err; + } + + tSkipListIterNext(iters[i].pIter); + } + } + + return iters; + +_err: + tsdbDestroyCommitIters(iters, pMem->maxTables); + return NULL; +} + +static void tsdbDestroyCommitIters(SCommitIter *iters, int maxTables) { + if (iters == NULL) return; + + for (int i = 1; i < maxTables; i++) { + if (iters[i].pTable != NULL) { + tsdbUnRefTable(iters[i].pTable); + tSkipListDestroyIter(iters[i].pIter); + } + } + + free(iters); +} diff --git a/src/tsdb/src/tsdbFile.c b/src/tsdb/src/tsdbFile.c index 58ae2c3ae1..03c50d42f7 100644 --- a/src/tsdb/src/tsdbFile.c +++ b/src/tsdb/src/tsdbFile.c @@ -256,7 +256,8 @@ SFileGroup *tsdbCreateFGroupIfNeed(STsdbRepo *pRepo, char *dataDir, int fid) { pFileH->pFGroup[pFileH->nFGroups++] = fGroup; qsort((void *)(pFileH->pFGroup), pFileH->nFGroups, sizeof(SFileGroup), compFGroup); pthread_rwlock_unlock(&pFileH->fhlock); - return tsdbSearchFGroup(pFileH, fid, TD_EQ); + pGroup = tsdbSearchFGroup(pFileH, fid, TD_EQ); + ASSERT(pGroup != NULL); } return pGroup; diff --git a/src/tsdb/src/tsdbMemTable.c b/src/tsdb/src/tsdbMemTable.c index 9f9c52222e..6d7835438c 100644 --- a/src/tsdb/src/tsdbMemTable.c +++ b/src/tsdb/src/tsdbMemTable.c @@ -23,12 +23,6 @@ static void tsdbFreeMemTable(SMemTable *pMemTable); static STableData *tsdbNewTableData(STsdbCfg *pCfg, STable *pTable); static void tsdbFreeTableData(STableData *pTableData); static char * tsdbGetTsTupleKey(const void *data); -static int tsdbCommitMeta(STsdbRepo *pRepo); -static void tsdbEndCommit(STsdbRepo *pRepo); -static int tsdbHasDataToCommit(SCommitIter *iters, int nIters, TSKEY minKey, TSKEY maxKey); -static int tsdbCommitToFile(STsdbRepo *pRepo, int fid, SCommitIter *iters, SRWHelper *pHelper, SDataCols *pDataCols); -static SCommitIter *tsdbCreateCommitIters(STsdbRepo *pRepo); -static void tsdbDestroyCommitIters(SCommitIter *iters, int maxTables); static int tsdbAdjustMemMaxTables(SMemTable *pMemTable, int maxTables); static int tsdbAppendTableRowToCols(STable *pTable, SDataCols *pCols, STSchema **ppSchema, SDataRow row); static int tsdbInitSubmitBlkIter(SSubmitBlk *pBlock, SSubmitBlkIter *pIter); @@ -215,7 +209,7 @@ int tsdbAsyncCommit(STsdbRepo *pRepo) { sem_wait(&(pRepo->readyToCommit)); - if (pRepo->appH.notifyStatus) pRepo->appH.notifyStatus(pRepo->appH.appH, TSDB_STATUS_COMMIT_START); + if (pRepo->appH.notifyStatus) pRepo->appH.notifyStatus(pRepo->appH.appH, TSDB_STATUS_COMMIT_START, TSDB_CODE_SUCCESS); if (tsdbLockRepo(pRepo) < 0) return -1; pRepo->imem = pRepo->mem; pRepo->mem = NULL; @@ -355,68 +349,6 @@ int tsdbLoadDataFromCache(STable *pTable, SSkipListIterator *pIter, TSKEY maxKey return 0; } -void *tsdbCommitData(STsdbRepo *pRepo) { - SMemTable * pMem = pRepo->imem; - STsdbCfg * pCfg = &pRepo->config; - SDataCols * pDataCols = NULL; - STsdbMeta * pMeta = pRepo->tsdbMeta; - SCommitIter *iters = NULL; - SRWHelper whelper = {0}; - ASSERT(pMem != NULL); - - tsdbInfo("vgId:%d start to commit! keyFirst %" PRId64 " keyLast %" PRId64 " numOfRows %" PRId64, REPO_ID(pRepo), - pMem->keyFirst, pMem->keyLast, pMem->numOfRows); - - // Create the iterator to read from cache - if (pMem->numOfRows > 0) { - iters = tsdbCreateCommitIters(pRepo); - if (iters == NULL) { - tsdbError("vgId:%d failed to create commit iterator since %s", REPO_ID(pRepo), tstrerror(terrno)); - goto _exit; - } - - if (tsdbInitWriteHelper(&whelper, pRepo) < 0) { - tsdbError("vgId:%d failed to init write helper since %s", REPO_ID(pRepo), tstrerror(terrno)); - goto _exit; - } - - if ((pDataCols = tdNewDataCols(pMeta->maxRowBytes, pMeta->maxCols, pCfg->maxRowsPerFileBlock)) == NULL) { - terrno = TSDB_CODE_TDB_OUT_OF_MEMORY; - tsdbError("vgId:%d failed to init data cols with maxRowBytes %d maxCols %d maxRowsPerFileBlock %d since %s", - REPO_ID(pRepo), pMeta->maxCols, pMeta->maxRowBytes, pCfg->maxRowsPerFileBlock, tstrerror(terrno)); - goto _exit; - } - - int sfid = (int)(TSDB_KEY_FILEID(pMem->keyFirst, pCfg->daysPerFile, pCfg->precision)); - int efid = (int)(TSDB_KEY_FILEID(pMem->keyLast, pCfg->daysPerFile, pCfg->precision)); - - // Loop to commit to each file - for (int fid = sfid; fid <= efid; fid++) { - if (tsdbCommitToFile(pRepo, fid, iters, &whelper, pDataCols) < 0) { - tsdbError("vgId:%d failed to commit to file %d since %s", REPO_ID(pRepo), fid, tstrerror(terrno)); - goto _exit; - } - } - } - - // Commit to update meta file - if (tsdbCommitMeta(pRepo) < 0) { - tsdbError("vgId:%d failed to commit data while committing meta data since %s", REPO_ID(pRepo), tstrerror(terrno)); - goto _exit; - } - - tsdbFitRetention(pRepo); - -_exit: - tdFreeDataCols(pDataCols); - tsdbDestroyCommitIters(iters, pMem->maxTables); - tsdbDestroyHelper(&whelper); - tsdbInfo("vgId:%d commit over", pRepo->config.tsdbId); - tsdbEndCommit(pRepo); - - return NULL; -} - // ---------------- LOCAL FUNCTIONS ---------------- static SMemTable* tsdbNewMemTable(STsdbRepo *pRepo) { STsdbMeta *pMeta = pRepo->tsdbMeta; @@ -508,240 +440,11 @@ static void tsdbFreeTableData(STableData *pTableData) { static char *tsdbGetTsTupleKey(const void *data) { return dataRowTuple((SDataRow)data); } - -static int tsdbCommitMeta(STsdbRepo *pRepo) { - SMemTable *pMem = pRepo->imem; - STsdbMeta *pMeta = pRepo->tsdbMeta; - SActObj * pAct = NULL; - SActCont * pCont = NULL; - - if (listNEles(pMem->actList) > 0) { - if (tdKVStoreStartCommit(pMeta->pStore) < 0) { - tsdbError("vgId:%d failed to commit data while start commit meta since %s", REPO_ID(pRepo), tstrerror(terrno)); - goto _err; - } - - SListNode *pNode = NULL; - - while ((pNode = tdListPopHead(pMem->actList)) != NULL) { - pAct = (SActObj *)pNode->data; - if (pAct->act == TSDB_UPDATE_META) { - pCont = (SActCont *)POINTER_SHIFT(pAct, sizeof(SActObj)); - if (tdUpdateKVStoreRecord(pMeta->pStore, pAct->uid, (void *)(pCont->cont), pCont->len) < 0) { - tsdbError("vgId:%d failed to update meta with uid %" PRIu64 " since %s", REPO_ID(pRepo), pAct->uid, - tstrerror(terrno)); - tdKVStoreEndCommit(pMeta->pStore); - goto _err; - } - } else if (pAct->act == TSDB_DROP_META) { - if (tdDropKVStoreRecord(pMeta->pStore, pAct->uid) < 0) { - tsdbError("vgId:%d failed to drop meta with uid %" PRIu64 " since %s", REPO_ID(pRepo), pAct->uid, - tstrerror(terrno)); - tdKVStoreEndCommit(pMeta->pStore); - goto _err; - } - } else { - ASSERT(false); - } - } - - if (tdKVStoreEndCommit(pMeta->pStore) < 0) { - tsdbError("vgId:%d failed to commit data while end commit meta since %s", REPO_ID(pRepo), tstrerror(terrno)); - goto _err; - } - } - - return 0; - -_err: - return -1; -} - -static void tsdbEndCommit(STsdbRepo *pRepo) { - if (pRepo->appH.notifyStatus) pRepo->appH.notifyStatus(pRepo->appH.appH, TSDB_STATUS_COMMIT_OVER); - sem_post(&(pRepo->readyToCommit)); -} - -static int tsdbHasDataToCommit(SCommitIter *iters, int nIters, TSKEY minKey, TSKEY maxKey) { - for (int i = 0; i < nIters; i++) { - TSKEY nextKey = tsdbNextIterKey((iters + i)->pIter); - if (nextKey != TSDB_DATA_TIMESTAMP_NULL && (nextKey >= minKey && nextKey <= maxKey)) return 1; - } - return 0; -} - void tsdbGetFidKeyRange(int daysPerFile, int8_t precision, int fileId, TSKEY *minKey, TSKEY *maxKey) { *minKey = fileId * daysPerFile * tsMsPerDay[precision]; *maxKey = *minKey + daysPerFile * tsMsPerDay[precision] - 1; } -static int tsdbCommitToFile(STsdbRepo *pRepo, int fid, SCommitIter *iters, SRWHelper *pHelper, SDataCols *pDataCols) { - char * dataDir = NULL; - STsdbCfg * pCfg = &pRepo->config; - STsdbFileH *pFileH = pRepo->tsdbFileH; - SFileGroup *pGroup = NULL; - SMemTable * pMem = pRepo->imem; - bool newLast = false; - - TSKEY minKey = 0, maxKey = 0; - tsdbGetFidKeyRange(pCfg->daysPerFile, pCfg->precision, fid, &minKey, &maxKey); - - // Check if there are data to commit to this file - int hasDataToCommit = tsdbHasDataToCommit(iters, pMem->maxTables, minKey, maxKey); - if (!hasDataToCommit) { - tsdbDebug("vgId:%d no data to commit to file %d", REPO_ID(pRepo), fid); - return 0; - } - - // Create and open files for commit - dataDir = tsdbGetDataDirName(pRepo->rootDir); - if (dataDir == NULL) { - terrno = TSDB_CODE_TDB_OUT_OF_MEMORY; - return -1; - } - - if ((pGroup = tsdbCreateFGroupIfNeed(pRepo, dataDir, fid)) == NULL) { - tsdbError("vgId:%d failed to create file group %d since %s", REPO_ID(pRepo), fid, tstrerror(terrno)); - goto _err; - } - - // Open files for write/read - if (tsdbSetAndOpenHelperFile(pHelper, pGroup) < 0) { - tsdbError("vgId:%d failed to set helper file since %s", REPO_ID(pRepo), tstrerror(terrno)); - goto _err; - } - - newLast = TSDB_NLAST_FILE_OPENED(pHelper); - - if (tsdbLoadCompIdx(pHelper, NULL) < 0) { - tsdbError("vgId:%d failed to load SCompIdx part since %s", REPO_ID(pRepo), tstrerror(terrno)); - goto _err; - } - - // Loop to commit data in each table - for (int tid = 1; tid < pMem->maxTables; tid++) { - SCommitIter *pIter = iters + tid; - if (pIter->pTable == NULL) continue; - - taosRLockLatch(&(pIter->pTable->latch)); - - if (tsdbSetHelperTable(pHelper, pIter->pTable, pRepo) < 0) goto _err; - - if (pIter->pIter != NULL) { - if (tdInitDataCols(pDataCols, tsdbGetTableSchemaImpl(pIter->pTable, false, false, -1)) < 0) { - terrno = TSDB_CODE_TDB_OUT_OF_MEMORY; - goto _err; - } - - if (tsdbCommitTableData(pHelper, pIter, pDataCols, maxKey) < 0) { - taosRUnLockLatch(&(pIter->pTable->latch)); - tsdbError("vgId:%d failed to write data of table %s tid %d uid %" PRIu64 " since %s", REPO_ID(pRepo), - TABLE_CHAR_NAME(pIter->pTable), TABLE_TID(pIter->pTable), TABLE_UID(pIter->pTable), - tstrerror(terrno)); - goto _err; - } - } - - taosRUnLockLatch(&(pIter->pTable->latch)); - - // Move the last block to the new .l file if neccessary - if (tsdbMoveLastBlockIfNeccessary(pHelper) < 0) { - tsdbError("vgId:%d, failed to move last block, since %s", REPO_ID(pRepo), tstrerror(terrno)); - goto _err; - } - - // Write the SCompBlock part - if (tsdbWriteCompInfo(pHelper) < 0) { - tsdbError("vgId:%d, failed to write compInfo part since %s", REPO_ID(pRepo), tstrerror(terrno)); - goto _err; - } - } - - if (tsdbWriteCompIdx(pHelper) < 0) { - tsdbError("vgId:%d failed to write compIdx part to file %d since %s", REPO_ID(pRepo), fid, tstrerror(terrno)); - goto _err; - } - - tfree(dataDir); - tsdbCloseHelperFile(pHelper, 0, pGroup); - - pthread_rwlock_wrlock(&(pFileH->fhlock)); - - (void)rename(helperNewHeadF(pHelper)->fname, helperHeadF(pHelper)->fname); - pGroup->files[TSDB_FILE_TYPE_HEAD].info = helperNewHeadF(pHelper)->info; - - if (newLast) { - (void)rename(helperNewLastF(pHelper)->fname, helperLastF(pHelper)->fname); - pGroup->files[TSDB_FILE_TYPE_LAST].info = helperNewLastF(pHelper)->info; - } else { - pGroup->files[TSDB_FILE_TYPE_LAST].info = helperLastF(pHelper)->info; - } - - pGroup->files[TSDB_FILE_TYPE_DATA].info = helperDataF(pHelper)->info; - - pthread_rwlock_unlock(&(pFileH->fhlock)); - - return 0; - -_err: - tfree(dataDir); - tsdbCloseHelperFile(pHelper, 1, NULL); - return -1; -} - -static SCommitIter *tsdbCreateCommitIters(STsdbRepo *pRepo) { - SMemTable *pMem = pRepo->imem; - STsdbMeta *pMeta = pRepo->tsdbMeta; - - SCommitIter *iters = (SCommitIter *)calloc(pMem->maxTables, sizeof(SCommitIter)); - if (iters == NULL) { - terrno = TSDB_CODE_TDB_OUT_OF_MEMORY; - return NULL; - } - - if (tsdbRLockRepoMeta(pRepo) < 0) goto _err; - - // reference all tables - for (int i = 0; i < pMem->maxTables; i++) { - if (pMeta->tables[i] != NULL) { - tsdbRefTable(pMeta->tables[i]); - iters[i].pTable = pMeta->tables[i]; - } - } - - if (tsdbUnlockRepoMeta(pRepo) < 0) goto _err; - - for (int i = 0; i < pMem->maxTables; i++) { - if ((iters[i].pTable != NULL) && (pMem->tData[i] != NULL) && (TABLE_UID(iters[i].pTable) == pMem->tData[i]->uid)) { - if ((iters[i].pIter = tSkipListCreateIter(pMem->tData[i]->pData)) == NULL) { - terrno = TSDB_CODE_TDB_OUT_OF_MEMORY; - goto _err; - } - - tSkipListIterNext(iters[i].pIter); - } - } - - return iters; - -_err: - tsdbDestroyCommitIters(iters, pMem->maxTables); - return NULL; -} - -static void tsdbDestroyCommitIters(SCommitIter *iters, int maxTables) { - if (iters == NULL) return; - - for (int i = 1; i < maxTables; i++) { - if (iters[i].pTable != NULL) { - tsdbUnRefTable(iters[i].pTable); - tSkipListDestroyIter(iters[i].pIter); - } - } - - free(iters); -} - static int tsdbAdjustMemMaxTables(SMemTable *pMemTable, int maxTables) { ASSERT(pMemTable->maxTables < maxTables); diff --git a/src/util/src/tkvstore.c b/src/util/src/tkvstore.c index 101dd4a6f4..31641ac9a7 100644 --- a/src/util/src/tkvstore.c +++ b/src/util/src/tkvstore.c @@ -236,6 +236,7 @@ int tdUpdateKVStoreRecord(SKVStore *pStore, uint64_t uid, void *cont, int contLe rInfo.offset = lseek(pStore->fd, 0, SEEK_CUR); if (rInfo.offset < 0) { uError("failed to lseek file %s since %s", pStore->fname, strerror(errno)); + terrno = TAOS_SYSTEM_ERROR(errno); return -1; } @@ -254,6 +255,7 @@ int tdUpdateKVStoreRecord(SKVStore *pStore, uint64_t uid, void *cont, int contLe if (taosWrite(pStore->fd, cont, contLen) < contLen) { uError("failed to write %d bytes to file %s since %s", contLen, pStore->fname, strerror(errno)); + terrno = TAOS_SYSTEM_ERROR(errno); return -1; } diff --git a/src/vnode/src/vnodeMain.c b/src/vnode/src/vnodeMain.c index 3dd1bae37f..7922476898 100644 --- a/src/vnode/src/vnodeMain.c +++ b/src/vnode/src/vnodeMain.c @@ -30,7 +30,7 @@ static SHashObj*tsVnodesHash; static void vnodeCleanUp(SVnodeObj *pVnode); -static int vnodeProcessTsdbStatus(void *arg, int status); +static int vnodeProcessTsdbStatus(void *arg, int status, int eno); static uint32_t vnodeGetFileInfo(void *ahandle, char *name, uint32_t *index, uint32_t eindex, int64_t *size, uint64_t *fversion); static int vnodeGetWalInfo(void *ahandle, char *fileName, int64_t *fileId); static void vnodeNotifyRole(void *ahandle, int8_t role); @@ -590,9 +590,13 @@ static void vnodeCleanUp(SVnodeObj *pVnode) { } // TODO: this is a simple implement -static int vnodeProcessTsdbStatus(void *arg, int status) { +static int vnodeProcessTsdbStatus(void *arg, int status, int eno) { SVnodeObj *pVnode = arg; + if (eno != TSDB_CODE_SUCCESS) { + // TODO: deal with the error here + } + if (status == TSDB_STATUS_COMMIT_START) { pVnode->fversion = pVnode->version; vDebug("vgId:%d, start commit, fver:%" PRIu64 " vver:%" PRIu64, pVnode->vgId, pVnode->fversion, pVnode->version);