From b74601b11402a342ccefd8785dad9b5aef0cea76 Mon Sep 17 00:00:00 2001 From: Hongze Cheng Date: Thu, 29 Jul 2021 13:45:54 +0800 Subject: [PATCH 001/185] [TD-5538]: add force-keep-file option --- src/dnode/src/dnodeSystem.c | 2 ++ src/tsdb/src/tsdbFS.c | 40 +++++++++++++++++++++++++++++++++++++ src/util/inc/tconfig.h | 1 + 3 files changed, 43 insertions(+) diff --git a/src/dnode/src/dnodeSystem.c b/src/dnode/src/dnodeSystem.c index e49b3eba99..c3e4dae206 100644 --- a/src/dnode/src/dnodeSystem.c +++ b/src/dnode/src/dnodeSystem.c @@ -42,6 +42,8 @@ int32_t main(int32_t argc, char *argv[]) { } } else if (strcmp(argv[i], "-C") == 0) { dump_config = 1; + } else if (strcmp(argv[i], "--force-keep-file") == 0) { + tsdbForceKeepFile = true; } else if (strcmp(argv[i], "-V") == 0) { #ifdef _ACCT char *versionStr = "enterprise"; diff --git a/src/tsdb/src/tsdbFS.c b/src/tsdb/src/tsdbFS.c index c9f087a5cf..bd70585dd7 100644 --- a/src/tsdb/src/tsdbFS.c +++ b/src/tsdb/src/tsdbFS.c @@ -35,6 +35,8 @@ static int tsdbRestoreCurrent(STsdbRepo *pRepo); static int tsdbComparTFILE(const void *arg1, const void *arg2); static void tsdbScanAndTryFixDFilesHeader(STsdbRepo *pRepo); +// For backward compatibility +bool tsdbForceKeepFile = false; // ================== CURRENT file header info static int tsdbEncodeFSHeader(void **buf, SFSHeader *pHeader) { int tlen = 0; @@ -982,6 +984,26 @@ static int tsdbRestoreMeta(STsdbRepo *pRepo) { return -1; } + if (tsdbForceKeepFile) { + struct stat tfstat; + + // Get real file size + if (fstat(pfs->cstatus->pmf->fd, &tfstat) < 0) { + terrno = TAOS_SYSTEM_ERROR(errno); + tsdbCloseMFile(pfs->cstatus->pmf); + tfsClosedir(tdir); + regfree(®ex); + return -1; + } + + if (pfs->cstatus->pmf->info.size != tfstat.st_size) { + int64_t tfsize = pfs->cstatus->pmf->info.size; + pfs->cstatus->pmf->info.size = tfstat.st_size; + tsdbInfo("vgId:%d file %s header size is changed from %" PRId64 " to %" PRId64, REPO_ID(pRepo), + TSDB_FILE_FULL_NAME(pfs->cstatus->pmf), tfsize, pfs->cstatus->pmf->info.size); + } + } + tsdbCloseMFile(pfs->cstatus->pmf); } } else if (code == REG_NOMATCH) { @@ -1141,6 +1163,24 @@ static int tsdbRestoreDFileSet(STsdbRepo *pRepo) { return -1; } + if (tsdbForceKeepFile) { + struct stat tfstat; + + // Get real file size + if (fstat(pDFile->fd, &tfstat) < 0) { + terrno = TAOS_SYSTEM_ERROR(errno); + taosArrayDestroy(fArray); + return -1; + } + + if (pDFile->info.size != tfstat.st_size) { + int64_t tfsize = pDFile->info.size; + pDFile->info.size = tfstat.st_size; + tsdbInfo("vgId:%d file %s header size is changed from %" PRId64 " to %" PRId64, REPO_ID(pRepo), + TSDB_FILE_FULL_NAME(pDFile), tfsize, pDFile->info.size); + } + } + tsdbCloseDFile(pDFile); index++; } diff --git a/src/util/inc/tconfig.h b/src/util/inc/tconfig.h index fdb2595fd8..d6e0f4ca46 100644 --- a/src/util/inc/tconfig.h +++ b/src/util/inc/tconfig.h @@ -77,6 +77,7 @@ typedef struct { extern SGlobalCfg tsGlobalConfig[]; extern int32_t tsGlobalConfigNum; extern char * tsCfgStatusStr[]; +extern bool tsdbForceKeepFile; void taosReadGlobalLogCfg(); bool taosReadGlobalCfg(); From 57794a070e7599fe644ede4960c56e9c58a9e97b Mon Sep 17 00:00:00 2001 From: Cary Xu Date: Tue, 3 Aug 2021 15:29:55 +0800 Subject: [PATCH 002/185] [TD-5622]: generate SMemRow from source --- src/client/inc/tsclient.h | 514 ++++++++++++++++++++++++++- src/client/src/tscParseInsert.c | 600 ++++++++------------------------ src/client/src/tscPrepare.c | 2 +- src/client/src/tscUtil.c | 140 +------- src/common/inc/tdataformat.h | 90 ++++- src/common/src/tdataformat.c | 3 +- 6 files changed, 733 insertions(+), 616 deletions(-) diff --git a/src/client/inc/tsclient.h b/src/client/inc/tsclient.h index 904f5d4503..c63c84df94 100644 --- a/src/client/inc/tsclient.h +++ b/src/client/inc/tsclient.h @@ -43,6 +43,8 @@ struct SSqlInfo; typedef void (*__async_cb_func_t)(void *param, TAOS_RES *tres, int32_t numOfRows); +#define __5221_BRANCH__ + typedef struct SNewVgroupInfo { int32_t vgId; int8_t inUse; @@ -84,9 +86,14 @@ typedef struct SParamInfo { } SParamInfo; typedef struct SBoundColumn { - bool hasVal; // denote if current column has bound or not - int32_t offset; // all column offset value + int32_t offset; // all column offset value + int32_t toffset; // first part offset for SDataRow TODO: get offset from STSchema on future + uint8_t valStat; // denote if current column bound or not(0 means has val, 1 means no val) } SBoundColumn; +typedef enum { + VAL_STAT_HAS = 0x0, // 0 means has val + VAL_STAT_NONE = 0x01, // 1 means no val +} EValStat; typedef struct { uint16_t schemaColIdx; @@ -99,32 +106,108 @@ typedef enum _COL_ORDER_STATUS { ORDER_STATUS_ORDERED = 1, ORDER_STATUS_DISORDERED = 2, } EOrderStatus; - typedef struct SParsedDataColInfo { int16_t numOfCols; int16_t numOfBound; - int32_t * boundedColumns; // bounded column idx according to schema + uint16_t flen; // TODO: get from STSchema + uint16_t allNullLen; // TODO: get from STSchema + uint16_t extendedVarLen; + int32_t * boundedColumns; // bound column idx according to schema SBoundColumn * cols; SBoundIdxInfo *colIdxInfo; - int8_t orderStatus; // bounded columns: + int8_t orderStatus; // bound columns } SParsedDataColInfo; -#define IS_DATA_COL_ORDERED(s) ((s) == (int8_t)ORDER_STATUS_ORDERED) +#define IS_DATA_COL_ORDERED(spd) ((spd->orderStatus) == (int8_t)ORDER_STATUS_ORDERED) typedef struct { - SSchema * pSchema; - int16_t sversion; - int32_t flen; - uint16_t nCols; - void * buf; - void * pDataBlock; - SSubmitBlk *pSubmitBlk; + int32_t dataLen; // len of SDataRow + int32_t kvLen; // len of SKVRow +} SMemRowInfo; +typedef struct { + uint8_t memRowType; + uint8_t compareStat; // 0 unknown, 1 need compare, 2 no need + TDRowTLenT dataRowInitLen; + TDRowTLenT kvRowInitLen; + SMemRowInfo *rowInfo; } SMemRowBuilder; -typedef struct { - TDRowLenT allNullLen; -} SMemRowHelper; +typedef enum { + ROW_COMPARE_UNKNOWN = 0, + ROW_COMPARE_NEED = 1, + ROW_COMPARE_NO_NEED = 2, +} ERowCompareStat; +int tsParseTime(SStrToken *pToken, int64_t *time, char **next, char *error, int16_t timePrec); + +int initMemRowBuilder(SMemRowBuilder *pBuilder, uint32_t nRows, uint32_t nCols, uint32_t nBoundCols, + int32_t allNullLen); +void destroyMemRowBuilder(SMemRowBuilder *pBuilder); + +/** + * @brief + * + * @param memRowType + * @param spd + * @param idx the absolute bound index of columns + * @return FORCE_INLINE + */ +static FORCE_INLINE void tscGetMemRowAppendInfo(SSchema *pSchema, uint8_t memRowType, SParsedDataColInfo *spd, + int32_t idx, int32_t *toffset, int16_t *colId) { + int32_t schemaIdx = 0; + if (IS_DATA_COL_ORDERED(spd)) { + schemaIdx = spd->boundedColumns[idx]; + if (isDataRowT(memRowType)) { + *toffset = (spd->cols + schemaIdx)->toffset; // the offset of firstPart + } else { + *toffset = idx * sizeof(SColIdx); // the offset of SColIdx + } + } else { + ASSERT(idx == (spd->colIdxInfo + idx)->boundIdx); + schemaIdx = (spd->colIdxInfo + idx)->schemaColIdx; + if (isDataRowT(memRowType)) { + *toffset = (spd->cols + schemaIdx)->toffset; + } else { + *toffset = ((spd->colIdxInfo + idx)->finalIdx) * sizeof(SColIdx); + } + } + *colId = pSchema[schemaIdx].colId; +} + +/** + * @brief Applicable to consume by multi-columns + * + * @param row + * @param value + * @param isCopyVarData In some scenario, the varVal is copied to row directly before calling tdAppend***ColVal() + * @param colId + * @param colType + * @param idx index in SSchema + * @param pBuilder + * @param spd + * @return FORCE_INLINE + */ +static FORCE_INLINE void tscAppendMemRowColVal(SMemRow row, const void *value, bool isCopyVarData, int16_t colId, + int8_t colType, int32_t toffset, SMemRowBuilder *pBuilder, + int32_t rowNum) { + tdAppendMemRowColVal(row, value, isCopyVarData, colId, colType, toffset); + // TODO: When nBoundCols/nCols > 0.5, + if (pBuilder->compareStat == ROW_COMPARE_NEED) { + SMemRowInfo *pRowInfo = pBuilder->rowInfo + rowNum; + tdGetColAppendDeltaLen(value, colType, &pRowInfo->dataLen, &pRowInfo->kvLen); + } +} + +// Applicable to consume by one row +static FORCE_INLINE void tscAppendMemRowColValEx(SMemRow row, const void *value, bool isCopyVarData, int16_t colId, + int8_t colType, int32_t toffset, int32_t *dataLen, int32_t *kvLen, + uint8_t compareStat) { + tdAppendMemRowColVal(row, value, isCopyVarData, colId, colType, toffset); + // TODO: When nBoundCols/nCols > 0.5, + if (compareStat == ROW_COMPARE_NEED) { + tdGetColAppendDeltaLen(value, colType, dataLen, kvLen); + } +} typedef struct STableDataBlocks { SName tableName; int8_t tsSource; // where does the UNIX timestamp come from, server or client @@ -146,7 +229,7 @@ typedef struct STableDataBlocks { uint32_t numOfAllocedParams; uint32_t numOfParams; SParamInfo * params; - SMemRowHelper rowHelper; + SMemRowBuilder rowBuilder; } STableDataBlocks; typedef struct { @@ -435,8 +518,401 @@ int16_t getNewResColId(SSqlCmd* pCmd); int32_t schemaIdxCompar(const void *lhs, const void *rhs); int32_t boundIdxCompar(const void *lhs, const void *rhs); -int initSMemRowHelper(SMemRowHelper *pHelper, SSchema *pSSchema, uint16_t nCols, uint16_t allNullColsLen); -int32_t getExtendedRowSize(STableComInfo *tinfo); +static FORCE_INLINE int32_t getExtendedRowSize(STableDataBlocks *pBlock) { +#ifdef __5221_BRANCH__ + ASSERT(pBlock->rowSize == pBlock->pTableMeta->tableInfo.rowSize); +#endif + return pBlock->pTableMeta->tableInfo.rowSize + TD_MEM_ROW_DATA_HEAD_SIZE + pBlock->boundColumnInfo.extendedVarLen; +} + +static FORCE_INLINE void checkAndConvertMemRow(SMemRow row, int32_t dataLen, int32_t kvLen) { + if (isDataRow(row)) { + if (kvLen < (dataLen * KVRatioConvert)) { + memRowSetConvert(row); + } + } else if (kvLen > dataLen) { + memRowSetConvert(row); + } +} + +static FORCE_INLINE void initSMemRow(SMemRow row, uint8_t memRowType, STableDataBlocks *pBlock, int16_t nBoundCols) { + memRowSetType(row, memRowType); + if (isDataRowT(memRowType)) { + dataRowSetVersion(memRowDataBody(row), pBlock->pTableMeta->sversion); + dataRowSetLen(memRowDataBody(row), (TDRowLenT)(TD_DATA_ROW_HEAD_SIZE + pBlock->boundColumnInfo.flen)); + } else { + ASSERT(nBoundCols > 0); + memRowSetKvVersion(row, pBlock->pTableMeta->sversion); + kvRowSetNCols(memRowKvBody(row), nBoundCols); + kvRowSetLen(memRowKvBody(row), (TDRowLenT)(TD_KV_ROW_HEAD_SIZE + sizeof(SColIdx) * nBoundCols)); + } +} +/** + * TODO: Move to tdataformat.h and refactor when STSchema available. + * - fetch flen and toffset from STSChema and remove param spd + */ +static FORCE_INLINE void convertToSDataRow(SMemRow dest, SMemRow src, SSchema *pSchema, int nCols, + SParsedDataColInfo *spd) { + ASSERT(isKvRow(src)); + + SDataRow dataRow = memRowDataBody(dest); + SKVRow kvRow = memRowKvBody(src); + + memRowSetType(dest, SMEM_ROW_DATA); + dataRowSetVersion(dataRow, memRowKvVersion(src)); + dataRowSetLen(dataRow, (TDRowLenT)(TD_DATA_ROW_HEAD_SIZE + spd->flen)); + + int32_t kvIdx = 0; + for (int i = 0; i < nCols; ++i) { + SSchema *schema = pSchema + i; + void * val = tdGetKVRowValOfColEx(kvRow, schema->colId, &kvIdx); + tdAppendDataColVal(dataRow, val != NULL ? val : getNullValue(schema->type), true, schema->type, + (spd->cols + i)->toffset); + } +} + +// TODO: Move to tdataformat.h and refactor when STSchema available. +static FORCE_INLINE void convertToSKVRow(SMemRow dest, SMemRow src, SSchema *pSchema, int nCols, int nBoundCols, + SParsedDataColInfo *spd) { + ASSERT(isDataRow(src)); + + SDataRow dataRow = memRowKvBody(src); + SKVRow kvRow = memRowDataBody(dest); + + memRowSetType(dest, SMEM_ROW_KV); + memRowSetKvVersion(kvRow, dataRowVersion(dataRow)); + kvRowSetNCols(kvRow, nBoundCols); + kvRowSetLen(kvRow, (TDRowLenT)(TD_KV_ROW_HEAD_SIZE + sizeof(SColIdx) * nBoundCols)); + + int32_t toffset = 0, kvOffset = 0; + for (int i = 0; i < nCols; ++i) { + SSchema *schema = pSchema + i; + if ((spd->cols + i)->valStat == VAL_STAT_HAS) { + toffset = (spd->cols + i)->toffset; + void *val = tdGetRowDataOfCol(dataRow, schema->type, toffset + TD_DATA_ROW_HEAD_SIZE); + tdAppendKvColVal(kvRow, val, true, schema->colId, schema->type, kvOffset); + kvOffset += sizeof(SColIdx); + } + } +} + +// TODO: Move to tdataformat.h and refactor when STSchema available. +static FORCE_INLINE void convertSMemRow(SMemRow dest, SMemRow src, STableDataBlocks *pBlock) { + STableMeta * pTableMeta = pBlock->pTableMeta; + STableComInfo tinfo = tscGetTableInfo(pTableMeta); + SSchema * pSchema = tscGetTableSchema(pTableMeta); + SParsedDataColInfo *spd = &pBlock->boundColumnInfo; + + ASSERT(dest != src); + + if (isDataRow(src)) { + // TODO: Can we use pBlock -> numOfParam directly? + ASSERT(spd->numOfBound > 0); + convertToSKVRow(dest, src, pSchema, tinfo.numOfColumns, spd->numOfBound, spd); + } else { + convertToSDataRow(dest, src, pSchema, tinfo.numOfColumns, spd); + } +} + +static bool isNullStr(SStrToken *pToken) { + return (pToken->type == TK_NULL) || ((pToken->type == TK_STRING) && (pToken->n != 0) && + (strncasecmp(TSDB_DATA_NULL_STR_L, pToken->z, pToken->n) == 0)); +} + +static FORCE_INLINE int32_t tscToDouble(SStrToken *pToken, double *value, char **endPtr) { + errno = 0; + *value = strtold(pToken->z, endPtr); + + // not a valid integer number, return error + if ((*endPtr - pToken->z) != pToken->n) { + return TK_ILLEGAL; + } + + return pToken->type; +} + +static uint8_t TRUE_VALUE = (uint8_t)TSDB_TRUE; +static uint8_t FALSE_VALUE = (uint8_t)TSDB_FALSE; + +static FORCE_INLINE int32_t tsParseOneColumnKV(SSchema *pSchema, SStrToken *pToken, SMemRow row, char *msg, char **str, + bool primaryKey, int16_t timePrec, int32_t toffset, int16_t colId, + int32_t *dataLen, int32_t *kvLen, uint8_t compareStat) { + int64_t iv; + int32_t ret; + char * endptr = NULL; + + if (IS_NUMERIC_TYPE(pSchema->type) && pToken->n == 0) { + return tscInvalidOperationMsg(msg, "invalid numeric data", pToken->z); + } + + switch (pSchema->type) { + case TSDB_DATA_TYPE_BOOL: { // bool + if (isNullStr(pToken)) { + tscAppendMemRowColValEx(row, getNullValue(pSchema->type), true, colId, pSchema->type, toffset, dataLen, kvLen, + compareStat); + } else { + if ((pToken->type == TK_BOOL || pToken->type == TK_STRING) && (pToken->n != 0)) { + if (strncmp(pToken->z, "true", pToken->n) == 0) { + tscAppendMemRowColValEx(row, &TRUE_VALUE, true, colId, pSchema->type, toffset, dataLen, kvLen, compareStat); + } else if (strncmp(pToken->z, "false", pToken->n) == 0) { + tscAppendMemRowColValEx(row, &FALSE_VALUE, true, colId, pSchema->type, toffset, dataLen, kvLen, + compareStat); + } else { + return tscSQLSyntaxErrMsg(msg, "invalid bool data", pToken->z); + } + } else if (pToken->type == TK_INTEGER) { + iv = strtoll(pToken->z, NULL, 10); + tscAppendMemRowColValEx(row, ((iv == 0) ? &FALSE_VALUE : &TRUE_VALUE), true, colId, pSchema->type, toffset, + dataLen, kvLen, compareStat); + } else if (pToken->type == TK_FLOAT) { + double dv = strtod(pToken->z, NULL); + tscAppendMemRowColValEx(row, ((dv == 0) ? &FALSE_VALUE : &TRUE_VALUE), true, colId, pSchema->type, toffset, + dataLen, kvLen, compareStat); + } else { + return tscInvalidOperationMsg(msg, "invalid bool data", pToken->z); + } + } + break; + } + + case TSDB_DATA_TYPE_TINYINT: + if (isNullStr(pToken)) { + tscAppendMemRowColValEx(row, getNullValue(pSchema->type), true, colId, pSchema->type, toffset, dataLen, kvLen, + compareStat); + } else { + ret = tStrToInteger(pToken->z, pToken->type, pToken->n, &iv, true); + if (ret != TSDB_CODE_SUCCESS) { + return tscInvalidOperationMsg(msg, "invalid tinyint data", pToken->z); + } else if (!IS_VALID_TINYINT(iv)) { + return tscInvalidOperationMsg(msg, "data overflow", pToken->z); + } + + uint8_t tmpVal = (uint8_t)iv; + tscAppendMemRowColValEx(row, &tmpVal, true, colId, pSchema->type, toffset, dataLen, kvLen, compareStat); + } + + break; + + case TSDB_DATA_TYPE_UTINYINT: + if (isNullStr(pToken)) { + tscAppendMemRowColValEx(row, getNullValue(pSchema->type), true, colId, pSchema->type, toffset, dataLen, kvLen, + compareStat); + } else { + ret = tStrToInteger(pToken->z, pToken->type, pToken->n, &iv, false); + if (ret != TSDB_CODE_SUCCESS) { + return tscInvalidOperationMsg(msg, "invalid unsigned tinyint data", pToken->z); + } else if (!IS_VALID_UTINYINT(iv)) { + return tscInvalidOperationMsg(msg, "unsigned tinyint data overflow", pToken->z); + } + + uint8_t tmpVal = (uint8_t)iv; + tscAppendMemRowColValEx(row, &tmpVal, true, colId, pSchema->type, toffset, dataLen, kvLen, compareStat); + } + + break; + + case TSDB_DATA_TYPE_SMALLINT: + if (isNullStr(pToken)) { + tscAppendMemRowColValEx(row, getNullValue(pSchema->type), true, colId, pSchema->type, toffset, dataLen, kvLen, + compareStat); + } else { + ret = tStrToInteger(pToken->z, pToken->type, pToken->n, &iv, true); + if (ret != TSDB_CODE_SUCCESS) { + return tscInvalidOperationMsg(msg, "invalid smallint data", pToken->z); + } else if (!IS_VALID_SMALLINT(iv)) { + return tscInvalidOperationMsg(msg, "smallint data overflow", pToken->z); + } + + int16_t tmpVal = (int16_t)iv; + tscAppendMemRowColValEx(row, &tmpVal, true, colId, pSchema->type, toffset, dataLen, kvLen, compareStat); + } + + break; + + case TSDB_DATA_TYPE_USMALLINT: + if (isNullStr(pToken)) { + tscAppendMemRowColValEx(row, getNullValue(pSchema->type), true, colId, pSchema->type, toffset, dataLen, kvLen, + compareStat); + } else { + ret = tStrToInteger(pToken->z, pToken->type, pToken->n, &iv, false); + if (ret != TSDB_CODE_SUCCESS) { + return tscInvalidOperationMsg(msg, "invalid unsigned smallint data", pToken->z); + } else if (!IS_VALID_USMALLINT(iv)) { + return tscInvalidOperationMsg(msg, "unsigned smallint data overflow", pToken->z); + } + + uint16_t tmpVal = (uint16_t)iv; + tscAppendMemRowColValEx(row, &tmpVal, true, colId, pSchema->type, toffset, dataLen, kvLen, compareStat); + } + + break; + + case TSDB_DATA_TYPE_INT: + if (isNullStr(pToken)) { + tscAppendMemRowColValEx(row, getNullValue(pSchema->type), true, colId, pSchema->type, toffset, dataLen, kvLen, + compareStat); + } else { + ret = tStrToInteger(pToken->z, pToken->type, pToken->n, &iv, true); + if (ret != TSDB_CODE_SUCCESS) { + return tscInvalidOperationMsg(msg, "invalid int data", pToken->z); + } else if (!IS_VALID_INT(iv)) { + return tscInvalidOperationMsg(msg, "int data overflow", pToken->z); + } + + int32_t tmpVal = (int32_t)iv; + tscAppendMemRowColValEx(row, &tmpVal, true, colId, pSchema->type, toffset, dataLen, kvLen, compareStat); + } + + break; + + case TSDB_DATA_TYPE_UINT: + if (isNullStr(pToken)) { + tscAppendMemRowColValEx(row, getNullValue(pSchema->type), true, colId, pSchema->type, toffset, dataLen, kvLen, + compareStat); + } else { + ret = tStrToInteger(pToken->z, pToken->type, pToken->n, &iv, false); + if (ret != TSDB_CODE_SUCCESS) { + return tscInvalidOperationMsg(msg, "invalid unsigned int data", pToken->z); + } else if (!IS_VALID_UINT(iv)) { + return tscInvalidOperationMsg(msg, "unsigned int data overflow", pToken->z); + } + + uint32_t tmpVal = (uint32_t)iv; + tscAppendMemRowColValEx(row, &tmpVal, true, colId, pSchema->type, toffset, dataLen, kvLen, compareStat); + } + + break; + + case TSDB_DATA_TYPE_BIGINT: + if (isNullStr(pToken)) { + tscAppendMemRowColValEx(row, getNullValue(pSchema->type), true, colId, pSchema->type, toffset, dataLen, kvLen, + compareStat); + } else { + ret = tStrToInteger(pToken->z, pToken->type, pToken->n, &iv, true); + if (ret != TSDB_CODE_SUCCESS) { + return tscInvalidOperationMsg(msg, "invalid bigint data", pToken->z); + } else if (!IS_VALID_BIGINT(iv)) { + return tscInvalidOperationMsg(msg, "bigint data overflow", pToken->z); + } + + tscAppendMemRowColValEx(row, &iv, true, colId, pSchema->type, toffset, dataLen, kvLen, compareStat); + } + break; + + case TSDB_DATA_TYPE_UBIGINT: + if (isNullStr(pToken)) { + tscAppendMemRowColValEx(row, getNullValue(pSchema->type), true, colId, pSchema->type, toffset, dataLen, kvLen, + compareStat); + } else { + ret = tStrToInteger(pToken->z, pToken->type, pToken->n, &iv, false); + if (ret != TSDB_CODE_SUCCESS) { + return tscInvalidOperationMsg(msg, "invalid unsigned bigint data", pToken->z); + } else if (!IS_VALID_UBIGINT((uint64_t)iv)) { + return tscInvalidOperationMsg(msg, "unsigned bigint data overflow", pToken->z); + } + + uint64_t tmpVal = (uint64_t)iv; + tscAppendMemRowColValEx(row, &tmpVal, true, colId, pSchema->type, toffset, dataLen, kvLen, compareStat); + } + break; + + case TSDB_DATA_TYPE_FLOAT: + if (isNullStr(pToken)) { + tscAppendMemRowColValEx(row, getNullValue(pSchema->type), true, colId, pSchema->type, toffset, dataLen, kvLen, + compareStat); + } else { + double dv; + if (TK_ILLEGAL == tscToDouble(pToken, &dv, &endptr)) { + return tscInvalidOperationMsg(msg, "illegal float data", pToken->z); + } + + if (((dv == HUGE_VAL || dv == -HUGE_VAL) && errno == ERANGE) || dv > FLT_MAX || dv < -FLT_MAX || isinf(dv) || + isnan(dv)) { + return tscInvalidOperationMsg(msg, "illegal float data", pToken->z); + } + + float tmpVal = (float)dv; + tscAppendMemRowColValEx(row, &tmpVal, true, colId, pSchema->type, toffset, dataLen, kvLen, compareStat); + } + break; + + case TSDB_DATA_TYPE_DOUBLE: + if (isNullStr(pToken)) { + tscAppendMemRowColValEx(row, getNullValue(pSchema->type), true, colId, pSchema->type, toffset, dataLen, kvLen, + compareStat); + } else { + double dv; + if (TK_ILLEGAL == tscToDouble(pToken, &dv, &endptr)) { + return tscInvalidOperationMsg(msg, "illegal double data", pToken->z); + } + + if (((dv == HUGE_VAL || dv == -HUGE_VAL) && errno == ERANGE) || isinf(dv) || isnan(dv)) { + return tscInvalidOperationMsg(msg, "illegal double data", pToken->z); + } + + tscAppendMemRowColValEx(row, &dv, true, colId, pSchema->type, toffset, dataLen, kvLen, compareStat); + } + break; + + case TSDB_DATA_TYPE_BINARY: + // binary data cannot be null-terminated char string, otherwise the last char of the string is lost + if (pToken->type == TK_NULL) { + tscAppendMemRowColValEx(row, getNullValue(pSchema->type), true, colId, pSchema->type, toffset, dataLen, kvLen, + compareStat); + } else { // too long values will return invalid sql, not be truncated automatically + if (pToken->n + VARSTR_HEADER_SIZE > pSchema->bytes) { // todo refactor + return tscInvalidOperationMsg(msg, "string data overflow", pToken->z); + } + // STR_WITH_SIZE_TO_VARSTR(payload, pToken->z, pToken->n); + char *rowEnd = memRowEnd(row); + STR_WITH_SIZE_TO_VARSTR(rowEnd, pToken->z, pToken->n); + tscAppendMemRowColValEx(row, rowEnd, false, colId, pSchema->type, toffset, dataLen, kvLen, compareStat); + } + break; + + case TSDB_DATA_TYPE_NCHAR: + if (pToken->type == TK_NULL) { + tscAppendMemRowColValEx(row, getNullValue(pSchema->type), true, colId, pSchema->type, toffset, dataLen, kvLen, + compareStat); + } else { + // if the converted output len is over than pColumnModel->bytes, return error: 'Argument list too long' + int32_t output = 0; + char * rowEnd = memRowEnd(row); + if (!taosMbsToUcs4(pToken->z, pToken->n, (char *)varDataVal(rowEnd), pSchema->bytes - VARSTR_HEADER_SIZE, + &output)) { + char buf[512] = {0}; + snprintf(buf, tListLen(buf), "%s", strerror(errno)); + return tscInvalidOperationMsg(msg, buf, pToken->z); + } + varDataSetLen(rowEnd, output); + tscAppendMemRowColValEx(row, rowEnd, false, colId, pSchema->type, toffset, dataLen, kvLen, compareStat); + } + break; + + case TSDB_DATA_TYPE_TIMESTAMP: { + if (pToken->type == TK_NULL) { + if (primaryKey) { + // When building SKVRow primaryKey, we should not skip even with NULL value. + int64_t tmpVal = 0; + tscAppendMemRowColValEx(row, &tmpVal, true, colId, pSchema->type, toffset, dataLen, kvLen, compareStat); + } else { + tscAppendMemRowColValEx(row, getNullValue(pSchema->type), true, colId, pSchema->type, toffset, dataLen, kvLen, + compareStat); + } + } else { + int64_t tmpVal; + if (tsParseTime(pToken, &tmpVal, str, msg, timePrec) != TSDB_CODE_SUCCESS) { + return tscInvalidOperationMsg(msg, "invalid timestamp", pToken->z); + } + tscAppendMemRowColValEx(row, &tmpVal, true, colId, pSchema->type, toffset, dataLen, kvLen, compareStat); + } + + break; + } + } + + return TSDB_CODE_SUCCESS; +} #ifdef __cplusplus } diff --git a/src/client/src/tscParseInsert.c b/src/client/src/tscParseInsert.c index 8ecc58eb55..9cc2eef662 100644 --- a/src/client/src/tscParseInsert.c +++ b/src/client/src/tscParseInsert.c @@ -38,45 +38,65 @@ enum { TSDB_USE_CLI_TS = 1, }; -static uint8_t TRUE_VALUE = (uint8_t)TSDB_TRUE; -static uint8_t FALSE_VALUE = (uint8_t)TSDB_FALSE; - static int32_t tscAllocateMemIfNeed(STableDataBlocks *pDataBlock, int32_t rowSize, int32_t *numOfRows); static int32_t parseBoundColumns(SInsertStatementParam *pInsertParam, SParsedDataColInfo *pColInfo, SSchema *pSchema, char *str, char **end); - -int32_t getExtendedRowSize(STableComInfo *tinfo) { - return tinfo->rowSize + PAYLOAD_HEADER_LEN + PAYLOAD_COL_HEAD_LEN * tinfo->numOfColumns; -} -int initSMemRowHelper(SMemRowHelper *pHelper, SSchema *pSSchema, uint16_t nCols, uint16_t allNullColsLen) { - pHelper->allNullLen = allNullColsLen; // TODO: get allNullColsLen when creating or altering table meta - if (pHelper->allNullLen == 0) { - for (uint16_t i = 0; i < nCols; ++i) { - uint8_t type = pSSchema[i].type; - int32_t typeLen = TYPE_BYTES[type]; - pHelper->allNullLen += typeLen; - if (TSDB_DATA_TYPE_BINARY == type) { - pHelper->allNullLen += (VARSTR_HEADER_SIZE + CHAR_BYTES); - } else if (TSDB_DATA_TYPE_NCHAR == type) { - int len = VARSTR_HEADER_SIZE + TSDB_NCHAR_SIZE; - pHelper->allNullLen += len; - } +int initMemRowBuilder(SMemRowBuilder *pBuilder, uint32_t nRows, uint32_t nCols, uint32_t nBoundCols, + int32_t allNullLen) { +#ifdef __5221_BRANCH__ + ASSERT(nRows >= 0 && nCols > 0 && (nBoundCols <= nCols)); +#endif + if (nRows > 0) { + // already init(bind multiple rows by single column) + if (pBuilder->compareStat == ROW_COMPARE_NEED && (pBuilder->rowInfo != NULL)) { + return TSDB_CODE_SUCCESS; } } - return 0; -} -static int32_t tscToDouble(SStrToken *pToken, double *value, char **endPtr) { - errno = 0; - *value = strtold(pToken->z, endPtr); - - // not a valid integer number, return error - if ((*endPtr - pToken->z) != pToken->n) { - return TK_ILLEGAL; + + if (nBoundCols == 0) { // file input + pBuilder->memRowType = SMEM_ROW_DATA; + pBuilder->compareStat = ROW_COMPARE_NO_NEED; + return TSDB_CODE_SUCCESS; + } else { + float boundRatio = ((float)nBoundCols / (float)nCols); + + if (boundRatio < KVRatioKV) { + pBuilder->memRowType = SMEM_ROW_KV; + pBuilder->compareStat = ROW_COMPARE_NO_NEED; + return TSDB_CODE_SUCCESS; + } else if (boundRatio > KVRatioData) { + pBuilder->memRowType = SMEM_ROW_DATA; + pBuilder->compareStat = ROW_COMPARE_NO_NEED; + return TSDB_CODE_SUCCESS; + } + pBuilder->compareStat = ROW_COMPARE_NEED; + + if (boundRatio < KVRatioPredict) { + pBuilder->memRowType = SMEM_ROW_KV; + } else { + pBuilder->memRowType = SMEM_ROW_DATA; + } } - return pToken->type; + pBuilder->dataRowInitLen = TD_MEM_ROW_DATA_HEAD_SIZE + allNullLen; + pBuilder->kvRowInitLen = TD_MEM_ROW_KV_HEAD_SIZE + nBoundCols * sizeof(SColIdx); + + if (nRows > 0) { + pBuilder->rowInfo = tcalloc(nRows, sizeof(SMemRowInfo)); + if (pBuilder->rowInfo == NULL) { + return TSDB_CODE_TSC_OUT_OF_MEMORY; + } + + for (int i = 0; i < nRows; ++i) { + (pBuilder->rowInfo + i)->dataLen = pBuilder->dataRowInitLen; + (pBuilder->rowInfo + i)->kvLen = pBuilder->kvRowInitLen; + } + } + + return TSDB_CODE_SUCCESS; } + int tsParseTime(SStrToken *pToken, int64_t *time, char **next, char *error, int16_t timePrec) { int32_t index = 0; SStrToken sToken; @@ -147,10 +167,6 @@ int tsParseTime(SStrToken *pToken, int64_t *time, char **next, char *error, int1 return TSDB_CODE_SUCCESS; } -static bool isNullStr(SStrToken* pToken) { - return (pToken->type == TK_NULL) || ((pToken->type == TK_STRING) && (pToken->n != 0) && - (strncasecmp(TSDB_DATA_NULL_STR_L, pToken->z, pToken->n) == 0)); -} int32_t tsParseOneColumn(SSchema *pSchema, SStrToken *pToken, char *payload, char *msg, char **str, bool primaryKey, int16_t timePrec) { int64_t iv; @@ -401,342 +417,6 @@ int32_t tsParseOneColumn(SSchema *pSchema, SStrToken *pToken, char *payload, cha return TSDB_CODE_SUCCESS; } -static FORCE_INLINE TDRowLenT tsSetPayloadColValue(char *payloadStart, char *payload, int16_t columnId, - uint8_t columnType, const void *value, uint16_t valueLen, TDRowTLenT tOffset) { - payloadColSetId(payload, columnId); - payloadColSetType(payload, columnType); - memcpy(POINTER_SHIFT(payloadStart,tOffset), value, valueLen); - return valueLen; -} - -static int32_t tsParseOneColumnKV(SSchema *pSchema, SStrToken *pToken, char *payloadStart, char *primaryKeyStart, - char *payload, char *msg, char **str, bool primaryKey, int16_t timePrec, - TDRowTLenT tOffset, TDRowLenT *sizeAppend, TDRowLenT *dataRowColDeltaLen, - TDRowLenT *kvRowColLen) { - int64_t iv; - int32_t ret; - char * endptr = NULL; - - if (IS_NUMERIC_TYPE(pSchema->type) && pToken->n == 0) { - return tscInvalidOperationMsg(msg, "invalid numeric data", pToken->z); - } - - switch (pSchema->type) { - case TSDB_DATA_TYPE_BOOL: { // bool - if (isNullStr(pToken)) { - *sizeAppend = tsSetPayloadColValue(payloadStart, payload, pSchema->colId, pSchema->type, - getNullValue(TSDB_DATA_TYPE_BOOL), TYPE_BYTES[TSDB_DATA_TYPE_BOOL], tOffset); - } else { - if ((pToken->type == TK_BOOL || pToken->type == TK_STRING) && (pToken->n != 0)) { - if (strncmp(pToken->z, "true", pToken->n) == 0) { - *sizeAppend = tsSetPayloadColValue(payloadStart, payload, pSchema->colId, pSchema->type, &TRUE_VALUE, - TYPE_BYTES[TSDB_DATA_TYPE_BOOL], tOffset); - *kvRowColLen += (TDRowLenT)(sizeof(SColIdx) + TYPE_BYTES[TSDB_DATA_TYPE_BOOL]); - } else if (strncmp(pToken->z, "false", pToken->n) == 0) { - *sizeAppend = tsSetPayloadColValue(payloadStart, payload, pSchema->colId, pSchema->type, &FALSE_VALUE, - TYPE_BYTES[TSDB_DATA_TYPE_BOOL], tOffset); - *kvRowColLen += (TDRowLenT)(sizeof(SColIdx) + TYPE_BYTES[TSDB_DATA_TYPE_BOOL]); - } else { - return tscSQLSyntaxErrMsg(msg, "invalid bool data", pToken->z); - } - } else if (pToken->type == TK_INTEGER) { - iv = strtoll(pToken->z, NULL, 10); - *sizeAppend = tsSetPayloadColValue(payloadStart, payload, pSchema->colId, pSchema->type, - ((iv == 0) ? &FALSE_VALUE : &TRUE_VALUE), TYPE_BYTES[TSDB_DATA_TYPE_BOOL], tOffset); - *kvRowColLen += (TDRowLenT)(sizeof(SColIdx) + TYPE_BYTES[TSDB_DATA_TYPE_BOOL]); - } else if (pToken->type == TK_FLOAT) { - double dv = strtod(pToken->z, NULL); - *sizeAppend = tsSetPayloadColValue(payloadStart, payload, pSchema->colId, pSchema->type, - ((dv == 0) ? &FALSE_VALUE : &TRUE_VALUE), TYPE_BYTES[TSDB_DATA_TYPE_BOOL], tOffset); - *kvRowColLen += (TDRowLenT)(sizeof(SColIdx) + TYPE_BYTES[TSDB_DATA_TYPE_BOOL]); - } else { - return tscInvalidOperationMsg(msg, "invalid bool data", pToken->z); - } - } - break; - } - - case TSDB_DATA_TYPE_TINYINT: - if (isNullStr(pToken)) { - *sizeAppend = tsSetPayloadColValue(payloadStart, payload, pSchema->colId, pSchema->type, - getNullValue(TSDB_DATA_TYPE_TINYINT), TYPE_BYTES[TSDB_DATA_TYPE_TINYINT], tOffset); - } else { - ret = tStrToInteger(pToken->z, pToken->type, pToken->n, &iv, true); - if (ret != TSDB_CODE_SUCCESS) { - return tscInvalidOperationMsg(msg, "invalid tinyint data", pToken->z); - } else if (!IS_VALID_TINYINT(iv)) { - return tscInvalidOperationMsg(msg, "data overflow", pToken->z); - } - - uint8_t tmpVal = (uint8_t)iv; - *sizeAppend = tsSetPayloadColValue(payloadStart, payload, pSchema->colId, pSchema->type, &tmpVal, - TYPE_BYTES[TSDB_DATA_TYPE_TINYINT], tOffset); - *kvRowColLen += (TDRowLenT)(sizeof(SColIdx) + TYPE_BYTES[TSDB_DATA_TYPE_TINYINT]); - } - - break; - - case TSDB_DATA_TYPE_UTINYINT: - if (isNullStr(pToken)) { - *sizeAppend = tsSetPayloadColValue(payloadStart, payload, pSchema->colId, pSchema->type, - getNullValue(TSDB_DATA_TYPE_UTINYINT), TYPE_BYTES[TSDB_DATA_TYPE_UTINYINT], tOffset); - } else { - ret = tStrToInteger(pToken->z, pToken->type, pToken->n, &iv, false); - if (ret != TSDB_CODE_SUCCESS) { - return tscInvalidOperationMsg(msg, "invalid unsigned tinyint data", pToken->z); - } else if (!IS_VALID_UTINYINT(iv)) { - return tscInvalidOperationMsg(msg, "unsigned tinyint data overflow", pToken->z); - } - - uint8_t tmpVal = (uint8_t)iv; - *sizeAppend = tsSetPayloadColValue(payloadStart, payload, pSchema->colId, pSchema->type, &tmpVal, - TYPE_BYTES[TSDB_DATA_TYPE_UTINYINT], tOffset); - *kvRowColLen += (TDRowLenT)(sizeof(SColIdx) + TYPE_BYTES[TSDB_DATA_TYPE_UTINYINT]); - } - - break; - - case TSDB_DATA_TYPE_SMALLINT: - if (isNullStr(pToken)) { - *sizeAppend = tsSetPayloadColValue(payloadStart, payload, pSchema->colId, pSchema->type, - getNullValue(TSDB_DATA_TYPE_SMALLINT), TYPE_BYTES[TSDB_DATA_TYPE_SMALLINT], tOffset); - } else { - ret = tStrToInteger(pToken->z, pToken->type, pToken->n, &iv, true); - if (ret != TSDB_CODE_SUCCESS) { - return tscInvalidOperationMsg(msg, "invalid smallint data", pToken->z); - } else if (!IS_VALID_SMALLINT(iv)) { - return tscInvalidOperationMsg(msg, "smallint data overflow", pToken->z); - } - - int16_t tmpVal = (int16_t)iv; - *sizeAppend = tsSetPayloadColValue(payloadStart, payload, pSchema->colId, pSchema->type, &tmpVal, - TYPE_BYTES[TSDB_DATA_TYPE_SMALLINT], tOffset); - *kvRowColLen += (TDRowLenT)(sizeof(SColIdx) + TYPE_BYTES[TSDB_DATA_TYPE_SMALLINT]); - } - - break; - - case TSDB_DATA_TYPE_USMALLINT: - if (isNullStr(pToken)) { - *sizeAppend = - tsSetPayloadColValue(payloadStart, payload, pSchema->colId, pSchema->type, - getNullValue(TSDB_DATA_TYPE_USMALLINT), TYPE_BYTES[TSDB_DATA_TYPE_USMALLINT], tOffset); - } else { - ret = tStrToInteger(pToken->z, pToken->type, pToken->n, &iv, false); - if (ret != TSDB_CODE_SUCCESS) { - return tscInvalidOperationMsg(msg, "invalid unsigned smallint data", pToken->z); - } else if (!IS_VALID_USMALLINT(iv)) { - return tscInvalidOperationMsg(msg, "unsigned smallint data overflow", pToken->z); - } - - uint16_t tmpVal = (uint16_t)iv; - *sizeAppend = tsSetPayloadColValue(payloadStart, payload, pSchema->colId, pSchema->type, &tmpVal, - TYPE_BYTES[TSDB_DATA_TYPE_USMALLINT], tOffset); - *kvRowColLen += (TDRowLenT)(sizeof(SColIdx) + TYPE_BYTES[TSDB_DATA_TYPE_USMALLINT]); - } - - break; - - case TSDB_DATA_TYPE_INT: - if (isNullStr(pToken)) { - *sizeAppend = tsSetPayloadColValue(payloadStart, payload, pSchema->colId, pSchema->type, - getNullValue(TSDB_DATA_TYPE_INT), TYPE_BYTES[TSDB_DATA_TYPE_INT], tOffset); - } else { - ret = tStrToInteger(pToken->z, pToken->type, pToken->n, &iv, true); - if (ret != TSDB_CODE_SUCCESS) { - return tscInvalidOperationMsg(msg, "invalid int data", pToken->z); - } else if (!IS_VALID_INT(iv)) { - return tscInvalidOperationMsg(msg, "int data overflow", pToken->z); - } - - int32_t tmpVal = (int32_t)iv; - *sizeAppend = tsSetPayloadColValue(payloadStart, payload, pSchema->colId, pSchema->type, &tmpVal, - TYPE_BYTES[TSDB_DATA_TYPE_INT], tOffset); - *kvRowColLen += (TDRowLenT)(sizeof(SColIdx) + TYPE_BYTES[TSDB_DATA_TYPE_INT]); - } - - break; - - case TSDB_DATA_TYPE_UINT: - if (isNullStr(pToken)) { - *sizeAppend = tsSetPayloadColValue(payloadStart, payload, pSchema->colId, pSchema->type, - getNullValue(TSDB_DATA_TYPE_UINT), TYPE_BYTES[TSDB_DATA_TYPE_UINT], tOffset); - } else { - ret = tStrToInteger(pToken->z, pToken->type, pToken->n, &iv, false); - if (ret != TSDB_CODE_SUCCESS) { - return tscInvalidOperationMsg(msg, "invalid unsigned int data", pToken->z); - } else if (!IS_VALID_UINT(iv)) { - return tscInvalidOperationMsg(msg, "unsigned int data overflow", pToken->z); - } - - uint32_t tmpVal = (uint32_t)iv; - *sizeAppend = tsSetPayloadColValue(payloadStart, payload, pSchema->colId, pSchema->type, &tmpVal, - TYPE_BYTES[TSDB_DATA_TYPE_UINT], tOffset); - *kvRowColLen += (TDRowLenT)(sizeof(SColIdx) + TYPE_BYTES[TSDB_DATA_TYPE_UINT]); - } - - break; - - case TSDB_DATA_TYPE_BIGINT: - if (isNullStr(pToken)) { - *sizeAppend = tsSetPayloadColValue(payloadStart, payload, pSchema->colId, pSchema->type, - getNullValue(TSDB_DATA_TYPE_BIGINT), TYPE_BYTES[TSDB_DATA_TYPE_BIGINT], tOffset); - } else { - ret = tStrToInteger(pToken->z, pToken->type, pToken->n, &iv, true); - if (ret != TSDB_CODE_SUCCESS) { - return tscInvalidOperationMsg(msg, "invalid bigint data", pToken->z); - } else if (!IS_VALID_BIGINT(iv)) { - return tscInvalidOperationMsg(msg, "bigint data overflow", pToken->z); - } - - *sizeAppend = tsSetPayloadColValue(payloadStart, payload, pSchema->colId, pSchema->type, &iv, - TYPE_BYTES[TSDB_DATA_TYPE_BIGINT], tOffset); - *kvRowColLen += (TDRowLenT)(sizeof(SColIdx) + TYPE_BYTES[TSDB_DATA_TYPE_BIGINT]); - } - break; - - case TSDB_DATA_TYPE_UBIGINT: - if (isNullStr(pToken)) { - *sizeAppend = tsSetPayloadColValue(payloadStart, payload, pSchema->colId, pSchema->type, - getNullValue(TSDB_DATA_TYPE_UBIGINT), TYPE_BYTES[TSDB_DATA_TYPE_UBIGINT], tOffset); - } else { - ret = tStrToInteger(pToken->z, pToken->type, pToken->n, &iv, false); - if (ret != TSDB_CODE_SUCCESS) { - return tscInvalidOperationMsg(msg, "invalid unsigned bigint data", pToken->z); - } else if (!IS_VALID_UBIGINT((uint64_t)iv)) { - return tscInvalidOperationMsg(msg, "unsigned bigint data overflow", pToken->z); - } - - uint64_t tmpVal = (uint64_t)iv; - *sizeAppend = tsSetPayloadColValue(payloadStart, payload, pSchema->colId, pSchema->type, &tmpVal, - TYPE_BYTES[TSDB_DATA_TYPE_UBIGINT], tOffset); - *kvRowColLen += (TDRowLenT)(sizeof(SColIdx) + TYPE_BYTES[TSDB_DATA_TYPE_UBIGINT]); - } - break; - - case TSDB_DATA_TYPE_FLOAT: - if (isNullStr(pToken)) { - *sizeAppend = tsSetPayloadColValue(payloadStart, payload, pSchema->colId, pSchema->type, - getNullValue(TSDB_DATA_TYPE_FLOAT), TYPE_BYTES[TSDB_DATA_TYPE_FLOAT], tOffset); - } else { - double dv; - if (TK_ILLEGAL == tscToDouble(pToken, &dv, &endptr)) { - return tscInvalidOperationMsg(msg, "illegal float data", pToken->z); - } - - if (((dv == HUGE_VAL || dv == -HUGE_VAL) && errno == ERANGE) || dv > FLT_MAX || dv < -FLT_MAX || isinf(dv) || - isnan(dv)) { - return tscInvalidOperationMsg(msg, "illegal float data", pToken->z); - } - - float tmpVal = (float)dv; - *sizeAppend = tsSetPayloadColValue(payloadStart, payload, pSchema->colId, pSchema->type, &tmpVal, - TYPE_BYTES[TSDB_DATA_TYPE_FLOAT], tOffset); - *kvRowColLen += (TDRowLenT)(sizeof(SColIdx) + TYPE_BYTES[TSDB_DATA_TYPE_FLOAT]); - } - break; - - case TSDB_DATA_TYPE_DOUBLE: - if (isNullStr(pToken)) { - *sizeAppend = tsSetPayloadColValue(payloadStart, payload, pSchema->colId, pSchema->type, - getNullValue(TSDB_DATA_TYPE_DOUBLE), TYPE_BYTES[TSDB_DATA_TYPE_DOUBLE], tOffset); - } else { - double dv; - if (TK_ILLEGAL == tscToDouble(pToken, &dv, &endptr)) { - return tscInvalidOperationMsg(msg, "illegal double data", pToken->z); - } - - if (((dv == HUGE_VAL || dv == -HUGE_VAL) && errno == ERANGE) || isinf(dv) || isnan(dv)) { - return tscInvalidOperationMsg(msg, "illegal double data", pToken->z); - } - - *sizeAppend = tsSetPayloadColValue(payloadStart, payload, pSchema->colId, pSchema->type, &dv, - TYPE_BYTES[TSDB_DATA_TYPE_DOUBLE], tOffset); - *kvRowColLen += (TDRowLenT)(sizeof(SColIdx) + TYPE_BYTES[TSDB_DATA_TYPE_DOUBLE]); - } - break; - - case TSDB_DATA_TYPE_BINARY: - // binary data cannot be null-terminated char string, otherwise the last char of the string is lost - if (pToken->type == TK_NULL) { - payloadColSetId(payload, pSchema->colId); - payloadColSetType(payload, pSchema->type); - memcpy(POINTER_SHIFT(payloadStart, tOffset), getNullValue(TSDB_DATA_TYPE_BINARY), VARSTR_HEADER_SIZE + CHAR_BYTES); - *sizeAppend = (TDRowLenT)(VARSTR_HEADER_SIZE + CHAR_BYTES); - } else { // too long values will return invalid sql, not be truncated automatically - if (pToken->n + VARSTR_HEADER_SIZE > pSchema->bytes) { // todo refactor - return tscInvalidOperationMsg(msg, "string data overflow", pToken->z); - } - // STR_WITH_SIZE_TO_VARSTR(payload, pToken->z, pToken->n); - - payloadColSetId(payload, pSchema->colId); - payloadColSetType(payload, pSchema->type); - varDataSetLen(POINTER_SHIFT(payloadStart,tOffset), pToken->n); - memcpy(varDataVal(POINTER_SHIFT(payloadStart,tOffset)), pToken->z, pToken->n); - *sizeAppend = (TDRowLenT)(VARSTR_HEADER_SIZE + pToken->n); - *dataRowColDeltaLen += (TDRowLenT)(pToken->n - CHAR_BYTES); - *kvRowColLen += (TDRowLenT)(sizeof(SColIdx) + VARSTR_HEADER_SIZE + pToken->n); - } - - break; - - case TSDB_DATA_TYPE_NCHAR: - if (pToken->type == TK_NULL) { - payloadColSetId(payload, pSchema->colId); - payloadColSetType(payload, pSchema->type); - memcpy(POINTER_SHIFT(payloadStart,tOffset), getNullValue(TSDB_DATA_TYPE_NCHAR), VARSTR_HEADER_SIZE + TSDB_NCHAR_SIZE); - *sizeAppend = (TDRowLenT)(VARSTR_HEADER_SIZE + TSDB_NCHAR_SIZE); - } else { - // if the converted output len is over than pColumnModel->bytes, return error: 'Argument list too long' - int32_t output = 0; - payloadColSetId(payload, pSchema->colId); - payloadColSetType(payload, pSchema->type); - if (!taosMbsToUcs4(pToken->z, pToken->n, varDataVal(POINTER_SHIFT(payloadStart,tOffset)), - pSchema->bytes - VARSTR_HEADER_SIZE, &output)) { - char buf[512] = {0}; - snprintf(buf, tListLen(buf), "%s", strerror(errno)); - return tscInvalidOperationMsg(msg, buf, pToken->z); - } - - varDataSetLen(POINTER_SHIFT(payloadStart,tOffset), output); - - *sizeAppend = (TDRowLenT)(VARSTR_HEADER_SIZE + output); - *dataRowColDeltaLen += (TDRowLenT)(output - sizeof(uint32_t)); - *kvRowColLen += (TDRowLenT)(sizeof(SColIdx) + VARSTR_HEADER_SIZE + output); - } - break; - - case TSDB_DATA_TYPE_TIMESTAMP: { - if (pToken->type == TK_NULL) { - if (primaryKey) { - // When building SKVRow primaryKey, we should not skip even with NULL value. - int64_t tmpVal = 0; - *sizeAppend = tsSetPayloadColValue(payloadStart, primaryKeyStart, pSchema->colId, pSchema->type, &tmpVal, - TYPE_BYTES[TSDB_DATA_TYPE_TIMESTAMP], tOffset); - *kvRowColLen += (TDRowLenT)(sizeof(SColIdx) + TYPE_BYTES[TSDB_DATA_TYPE_TIMESTAMP]); - } else { - *sizeAppend = tsSetPayloadColValue(payloadStart, payload, pSchema->colId, pSchema->type, - getNullValue(TSDB_DATA_TYPE_TIMESTAMP), - TYPE_BYTES[TSDB_DATA_TYPE_TIMESTAMP], tOffset); - } - } else { - int64_t tmpVal; - if (tsParseTime(pToken, &tmpVal, str, msg, timePrec) != TSDB_CODE_SUCCESS) { - return tscInvalidOperationMsg(msg, "invalid timestamp", pToken->z); - } - - *sizeAppend = tsSetPayloadColValue(payloadStart, primaryKey ? primaryKeyStart : payload, pSchema->colId, - pSchema->type, &tmpVal, TYPE_BYTES[TSDB_DATA_TYPE_TIMESTAMP], tOffset); - *kvRowColLen += (TDRowLenT)(sizeof(SColIdx) + TYPE_BYTES[TSDB_DATA_TYPE_TIMESTAMP]); - } - - break; - } - } - - return TSDB_CODE_SUCCESS; -} - /* * The server time/client time should not be mixed up in one sql string * Do not employ sort operation is not involved if server time is used. @@ -778,31 +458,31 @@ int tsParseOneRow(char **str, STableDataBlocks *pDataBlocks, int16_t timePrec, i int32_t index = 0; SStrToken sToken = {0}; - SMemRowHelper *pHelper = &pDataBlocks->rowHelper; - char * payload = pDataBlocks->pData + pDataBlocks->size; + char *row = pDataBlocks->pData + pDataBlocks->size; // skip the SSubmitBlk header SParsedDataColInfo *spd = &pDataBlocks->boundColumnInfo; - SSchema * schema = tscGetTableSchema(pDataBlocks->pTableMeta); + STableMeta * pTableMeta = pDataBlocks->pTableMeta; + SSchema * schema = tscGetTableSchema(pTableMeta); + SMemRowBuilder * pBuilder = &pDataBlocks->rowBuilder; + int32_t dataLen = pBuilder->dataRowInitLen; + int32_t kvLen = pBuilder->kvRowInitLen; + bool isParseBindParam = false; + // void * rowBody = memRowDataBody(row); - TDRowTLenT dataRowLen = pHelper->allNullLen; - TDRowTLenT kvRowLen = TD_MEM_ROW_KV_VER_SIZE; - TDRowTLenT payloadValOffset = 0; - TDRowLenT colValOffset = 0; - ASSERT(dataRowLen > 0); + initSMemRow(row, pBuilder->memRowType, pDataBlocks, spd->numOfBound); - payloadSetNCols(payload, spd->numOfBound); - payloadValOffset = payloadValuesOffset(payload); // rely on payloadNCols - // payloadSetTLen(payload, payloadValOffset); - - char *kvPrimaryKeyStart = payload + PAYLOAD_HEADER_LEN; // primaryKey in 1st column tuple - char *kvStart = kvPrimaryKeyStart + PAYLOAD_COL_HEAD_LEN; // the column tuple behind the primaryKey + // FPAppendColVal fpAppendColVal = tscAppendDataRowColValEx; + // if (isKvRowT(pBuilder->memRowType)) { + // rowBody = memRowKvBody(row); + // fpAppendColVal = tscAppendKvRowColValEx; + // } // 1. set the parsed value from sql string for (int i = 0; i < spd->numOfBound; ++i) { // the start position in data block buffer of current value in sql int32_t colIndex = spd->boundedColumns[i]; - char *start = payload + spd->cols[colIndex].offset; + char *start = row + spd->cols[colIndex].offset; SSchema *pSchema = &schema[colIndex]; // get colId here @@ -811,6 +491,9 @@ int tsParseOneRow(char **str, STableDataBlocks *pDataBlocks, int16_t timePrec, i *str += index; if (sToken.type == TK_QUESTION) { + if (!isParseBindParam) { + isParseBindParam = true; + } if (pInsertParam->insertType != TSDB_QUERY_TYPE_STMT_INSERT) { return tscSQLSyntaxErrMsg(pInsertParam->msg, "? only allowed in binding insertion", *str); } @@ -861,54 +544,43 @@ int tsParseOneRow(char **str, STableDataBlocks *pDataBlocks, int16_t timePrec, i sToken.n -= 2 + cnt; } - bool isPrimaryKey = (colIndex == PRIMARYKEY_TIMESTAMP_COL_INDEX); - TDRowLenT dataRowDeltaColLen = 0; // When combine the data as SDataRow, the delta len between all NULL columns. - TDRowLenT kvRowColLen = 0; - TDRowLenT colValAppended = 0; + bool isPrimaryKey = (colIndex == PRIMARYKEY_TIMESTAMP_COL_INDEX); + int32_t toffset = -1; + int16_t colId = -1; + // TODO: Can we put colId from param? + tscGetMemRowAppendInfo(schema, pBuilder->memRowType, spd, i, &toffset, &colId); - if (!IS_DATA_COL_ORDERED(spd->orderStatus)) { - ASSERT(spd->colIdxInfo != NULL); - if(!isPrimaryKey) { - kvStart = POINTER_SHIFT(kvPrimaryKeyStart, spd->colIdxInfo[i].finalIdx * PAYLOAD_COL_HEAD_LEN); - } else { - ASSERT(spd->colIdxInfo[i].finalIdx == 0); - } - } - // the primary key locates in 1st column - int32_t ret = tsParseOneColumnKV(pSchema, &sToken, payload, kvPrimaryKeyStart, kvStart, pInsertParam->msg, str, - isPrimaryKey, timePrec, payloadValOffset + colValOffset, &colValAppended, - &dataRowDeltaColLen, &kvRowColLen); + int32_t ret = tsParseOneColumnKV(pSchema, &sToken, row, pInsertParam->msg, str, isPrimaryKey, timePrec, toffset, + colId, &dataLen, &kvLen, pBuilder->compareStat); if (ret != TSDB_CODE_SUCCESS) { return ret; } - if (isPrimaryKey) { - if (tsCheckTimestamp(pDataBlocks, payloadValues(payload)) != TSDB_CODE_SUCCESS) { - tscInvalidOperationMsg(pInsertParam->msg, "client time/server time can not be mixed up", sToken.z); - return TSDB_CODE_TSC_INVALID_TIME_STAMP; - } - payloadColSetOffset(kvPrimaryKeyStart, colValOffset); - } else { - payloadColSetOffset(kvStart, colValOffset); - if (IS_DATA_COL_ORDERED(spd->orderStatus)) { - kvStart += PAYLOAD_COL_HEAD_LEN; // move to next column + if (isPrimaryKey && tsCheckTimestamp(pDataBlocks, memRowTuple(row)) != TSDB_CODE_SUCCESS) { + tscInvalidOperationMsg(pInsertParam->msg, "client time/server time can not be mixed up", sToken.z); + return TSDB_CODE_TSC_INVALID_TIME_STAMP; + } + } + + if (!isParseBindParam) { + // 2. check and set convert flag + if (pBuilder->compareStat == ROW_COMPARE_NEED) { + checkAndConvertMemRow(row, dataLen, kvLen); + } + + // 3. set the null value for the columns that do not assign values + if ((spd->numOfBound < spd->numOfCols) && isDataRow(row) && !isNeedConvertRow(row)) { + SDataRow dataRow = memRowDataBody(row); + for (int32_t i = 0; i < spd->numOfCols; ++i) { + if (spd->cols[i].valStat == VAL_STAT_NONE) { + tdAppendDataColVal(dataRow, getNullValue(schema[i].type), true, schema[i].type, spd->cols[i].toffset); + } } } - - colValOffset += colValAppended; - kvRowLen += kvRowColLen; - dataRowLen += dataRowDeltaColLen; } - if (kvRowLen < dataRowLen) { - payloadSetType(payload, SMEM_ROW_KV); - } else { - payloadSetType(payload, SMEM_ROW_DATA); - } + *len = getExtendedRowSize(pDataBlocks); - *len = (int32_t)(payloadValOffset + colValOffset); - payloadSetTLen(payload, *len); - return TSDB_CODE_SUCCESS; } @@ -958,11 +630,13 @@ int32_t tsParseValues(char **str, STableDataBlocks *pDataBlock, int maxRows, SIn int32_t precision = tinfo.precision; - int32_t extendedRowSize = getExtendedRowSize(&tinfo); - - initSMemRowHelper(&pDataBlock->rowHelper, tscGetTableSchema(pDataBlock->pTableMeta), - tscGetNumOfColumns(pDataBlock->pTableMeta), 0); + int32_t extendedRowSize = getExtendedRowSize(pDataBlock); + if (TSDB_CODE_SUCCESS != + (code = initMemRowBuilder(&pDataBlock->rowBuilder, 0, tinfo.numOfColumns, pDataBlock->boundColumnInfo.numOfBound, + pDataBlock->boundColumnInfo.allNullLen))) { + return code; + } while (1) { index = 0; sToken = tStrGetToken(*str, &index, false); @@ -1013,19 +687,37 @@ int32_t tsParseValues(char **str, STableDataBlocks *pDataBlock, int maxRows, SIn void tscSetBoundColumnInfo(SParsedDataColInfo *pColInfo, SSchema *pSchema, int32_t numOfCols) { pColInfo->numOfCols = numOfCols; pColInfo->numOfBound = numOfCols; - pColInfo->orderStatus = ORDER_STATUS_ORDERED; + pColInfo->orderStatus = ORDER_STATUS_ORDERED; // default is ORDERED for non-bound mode pColInfo->boundedColumns = calloc(pColInfo->numOfCols, sizeof(int32_t)); pColInfo->cols = calloc(pColInfo->numOfCols, sizeof(SBoundColumn)); pColInfo->colIdxInfo = NULL; + pColInfo->flen = 0; + pColInfo->allNullLen = 0; + int32_t nVar = 0; for (int32_t i = 0; i < pColInfo->numOfCols; ++i) { + uint8_t type = pSchema[i].type; if (i > 0) { pColInfo->cols[i].offset = pSchema[i - 1].bytes + pColInfo->cols[i - 1].offset; + pColInfo->cols[i].toffset = pColInfo->flen; + } + pColInfo->flen += TYPE_BYTES[type]; + switch (type) { + case TSDB_DATA_TYPE_BINARY: + pColInfo->allNullLen += (VARSTR_HEADER_SIZE + CHAR_BYTES); + ++nVar; + break; + case TSDB_DATA_TYPE_NCHAR: + pColInfo->allNullLen += (VARSTR_HEADER_SIZE + TSDB_NCHAR_SIZE); + ++nVar; + break; + default: + break; } - - pColInfo->cols[i].hasVal = true; pColInfo->boundedColumns[i] = i; } + pColInfo->allNullLen += pColInfo->flen; + pColInfo->extendedVarLen = (uint16_t)(nVar * sizeof(VarDataOffsetT)); } int32_t tscAllocateMemIfNeed(STableDataBlocks *pDataBlock, int32_t rowSize, int32_t * numOfRows) { @@ -1125,35 +817,29 @@ int tscSortRemoveDataBlockDupRows(STableDataBlocks *dataBuf, SBlockKeyInfo *pBlk if (dataBuf->tsSource == TSDB_USE_SERVER_TS) { assert(dataBuf->ordered); } - // allocate memory + // allocate memory size_t nAlloc = nRows * sizeof(SBlockKeyTuple); if (pBlkKeyInfo->pKeyTuple == NULL || pBlkKeyInfo->maxBytesAlloc < nAlloc) { size_t nRealAlloc = nAlloc + 10 * sizeof(SBlockKeyTuple); char * tmp = trealloc(pBlkKeyInfo->pKeyTuple, nRealAlloc); if (tmp == NULL) { - return TSDB_CODE_TSC_OUT_OF_MEMORY; + return TSDB_CODE_TSC_OUT_OF_MEMORY; } pBlkKeyInfo->pKeyTuple = (SBlockKeyTuple *)tmp; pBlkKeyInfo->maxBytesAlloc = (int32_t)nRealAlloc; } memset(pBlkKeyInfo->pKeyTuple, 0, nAlloc); + int32_t extendedRowSize = getExtendedRowSize(dataBuf); SBlockKeyTuple *pBlkKeyTuple = pBlkKeyInfo->pKeyTuple; char * pBlockData = pBlocks->data; - TDRowTLenT totolPayloadTLen = 0; - TDRowTLenT payloadTLen = 0; int n = 0; while (n < nRows) { - pBlkKeyTuple->skey = payloadTSKey(pBlockData); + pBlkKeyTuple->skey = memRowKey(pBlockData); pBlkKeyTuple->payloadAddr = pBlockData; - payloadTLen = payloadTLen(pBlockData); -#if 0 - ASSERT(payloadNCols(pBlockData) <= 4096); - ASSERT(payloadTLen(pBlockData) < 65536); -#endif - totolPayloadTLen += payloadTLen; + // next loop - pBlockData += payloadTLen; + pBlockData += extendedRowSize; ++pBlkKeyTuple; ++n; } @@ -1170,7 +856,6 @@ int tscSortRemoveDataBlockDupRows(STableDataBlocks *dataBuf, SBlockKeyInfo *pBlk TSKEY tj = (pBlkKeyTuple + j)->skey; if (ti == tj) { - totolPayloadTLen -= payloadTLen(pBlkKeyTuple + j); ++j; continue; } @@ -1186,17 +871,15 @@ int tscSortRemoveDataBlockDupRows(STableDataBlocks *dataBuf, SBlockKeyInfo *pBlk pBlocks->numOfRows = i + 1; } - dataBuf->size = sizeof(SSubmitBlk) + totolPayloadTLen; + dataBuf->size = sizeof(SSubmitBlk) + pBlocks->numOfRows * extendedRowSize; dataBuf->prevTS = INT64_MIN; return 0; } -static int32_t doParseInsertStatement(SInsertStatementParam *pInsertParam, char **str, STableDataBlocks* dataBuf, int32_t *totalNum) { - STableComInfo tinfo = tscGetTableInfo(dataBuf->pTableMeta); - +static int32_t doParseInsertStatement(SInsertStatementParam *pInsertParam, char **str, STableDataBlocks* dataBuf, int32_t *totalNum) { int32_t maxNumOfRows; - int32_t code = tscAllocateMemIfNeed(dataBuf, getExtendedRowSize(&tinfo), &maxNumOfRows); + int32_t code = tscAllocateMemIfNeed(dataBuf, getExtendedRowSize(dataBuf), &maxNumOfRows); if (TSDB_CODE_SUCCESS != code) { return TSDB_CODE_TSC_OUT_OF_MEMORY; } @@ -1534,7 +1217,7 @@ static int32_t parseBoundColumns(SInsertStatementParam *pInsertParam, SParsedDat pColInfo->numOfBound = 0; memset(pColInfo->boundedColumns, 0, sizeof(int32_t) * nCols); for (int32_t i = 0; i < nCols; ++i) { - pColInfo->cols[i].hasVal = false; + pColInfo->cols[i].valStat = VAL_STAT_NONE; } int32_t code = TSDB_CODE_SUCCESS; @@ -1573,12 +1256,12 @@ static int32_t parseBoundColumns(SInsertStatementParam *pInsertParam, SParsedDat int32_t nScanned = 0, t = lastColIdx + 1; while (t < nCols) { if (strncmp(sToken.z, pSchema[t].name, sToken.n) == 0 && strlen(pSchema[t].name) == sToken.n) { - if (pColInfo->cols[t].hasVal == true) { + if (pColInfo->cols[t].valStat == VAL_STAT_HAS) { code = tscInvalidOperationMsg(pInsertParam->msg, "duplicated column name", sToken.z); goto _clean; } - pColInfo->cols[t].hasVal = true; + pColInfo->cols[t].valStat = VAL_STAT_HAS; pColInfo->boundedColumns[pColInfo->numOfBound] = t; ++pColInfo->numOfBound; findColumnIndex = true; @@ -1596,12 +1279,12 @@ static int32_t parseBoundColumns(SInsertStatementParam *pInsertParam, SParsedDat int32_t nRemain = nCols - nScanned; while (t < nRemain) { if (strncmp(sToken.z, pSchema[t].name, sToken.n) == 0 && strlen(pSchema[t].name) == sToken.n) { - if (pColInfo->cols[t].hasVal == true) { + if (pColInfo->cols[t].valStat == VAL_STAT_HAS) { code = tscInvalidOperationMsg(pInsertParam->msg, "duplicated column name", sToken.z); goto _clean; } - pColInfo->cols[t].hasVal = true; + pColInfo->cols[t].valStat = VAL_STAT_HAS; pColInfo->boundedColumns[pColInfo->numOfBound] = t; ++pColInfo->numOfBound; findColumnIndex = true; @@ -1836,7 +1519,7 @@ int tsParseInsertSql(SSqlObj *pSql) { goto _clean; } - if (dataBuf->boundColumnInfo.cols[0].hasVal == false) { + if (dataBuf->boundColumnInfo.cols[0].valStat == VAL_STAT_NONE) { code = tscInvalidOperationMsg(pInsertParam->msg, "primary timestamp column can not be null", NULL); goto _clean; } @@ -2047,15 +1730,18 @@ static void parseFileSendDataBlock(void *param, TAOS_RES *tres, int32_t numOfRow goto _error; } - tscAllocateMemIfNeed(pTableDataBlock, getExtendedRowSize(&tinfo), &maxRows); + tscAllocateMemIfNeed(pTableDataBlock, getExtendedRowSize(pTableDataBlock), &maxRows); tokenBuf = calloc(1, TSDB_MAX_BYTES_PER_ROW); if (tokenBuf == NULL) { code = TSDB_CODE_TSC_OUT_OF_MEMORY; goto _error; } - initSMemRowHelper(&pTableDataBlock->rowHelper, tscGetTableSchema(pTableDataBlock->pTableMeta), - tscGetNumOfColumns(pTableDataBlock->pTableMeta), 0); + if (TSDB_CODE_SUCCESS != + (ret = initMemRowBuilder(&pTableDataBlock->rowBuilder, 0, tinfo.numOfColumns, pTableDataBlock->numOfParams, + pTableDataBlock->boundColumnInfo.allNullLen))) { + goto _error; + } while ((readLen = tgetline(&line, &n, fp)) != -1) { if (('\r' == line[readLen - 1]) || ('\n' == line[readLen - 1])) { diff --git a/src/client/src/tscPrepare.c b/src/client/src/tscPrepare.c index 2c2a299549..40664241c1 100644 --- a/src/client/src/tscPrepare.c +++ b/src/client/src/tscPrepare.c @@ -299,7 +299,7 @@ static int fillColumnsNull(STableDataBlocks* pBlock, int32_t rowNum) { SSchema *schema = (SSchema*)pBlock->pTableMeta->schema; for (int32_t i = 0; i < spd->numOfCols; ++i) { - if (!spd->cols[i].hasVal) { // current column do not have any value to insert, set it to null + if (spd->cols[i].valStat == VAL_STAT_NONE) { // current column do not have any value to insert, set it to null for (int32_t n = 0; n < rowNum; ++n) { char *ptr = pBlock->pData + sizeof(SSubmitBlk) + pBlock->rowSize * n + offset; diff --git a/src/client/src/tscUtil.c b/src/client/src/tscUtil.c index a82e452d0b..9c840d59a8 100644 --- a/src/client/src/tscUtil.c +++ b/src/client/src/tscUtil.c @@ -1775,101 +1775,6 @@ int32_t tscGetDataBlockFromList(SHashObj* pHashList, int64_t id, int32_t size, i return TSDB_CODE_SUCCESS; } -static SMemRow tdGenMemRowFromBuilder(SMemRowBuilder* pBuilder) { - SSchema* pSchema = pBuilder->pSchema; - char* p = (char*)pBuilder->buf; - int toffset = 0; - uint16_t nCols = pBuilder->nCols; - - uint8_t memRowType = payloadType(p); - uint16_t nColsBound = payloadNCols(p); - if (pBuilder->nCols <= 0 || nColsBound <= 0) { - return NULL; - } - char* pVals = POINTER_SHIFT(p, payloadValuesOffset(p)); - SMemRow* memRow = (SMemRow)pBuilder->pDataBlock; - memRowSetType(memRow, memRowType); - - // ----------------- Raw payload structure for row: - /* |<------------ Head ------------->|<----------- body of column data tuple ------------------->| - * | |<----------------- flen ------------->|<--- value part --->| - * |SMemRowType| dataTLen | nCols | colId | colType | offset | ... | value |...|...|... | - * +-----------+----------+----------+--------------------------------------|--------------------| - * | uint8_t | uint32_t | uint16_t | int16_t | uint8_t | uint16_t | ... |.......|...|...|... | - * +-----------+----------+----------+--------------------------------------+--------------------| - * 1. offset in column data tuple starts from the value part in case of uint16_t overflow. - * 2. dataTLen: total length including the header and body. - */ - - if (memRowType == SMEM_ROW_DATA) { - SDataRow trow = (SDataRow)memRowDataBody(memRow); - dataRowSetLen(trow, (TDRowLenT)(TD_DATA_ROW_HEAD_SIZE + pBuilder->flen)); - dataRowSetVersion(trow, pBuilder->sversion); - - p = (char*)payloadBody(pBuilder->buf); - uint16_t i = 0, j = 0; - while (j < nCols) { - if (i >= nColsBound) { - break; - } - int16_t colId = payloadColId(p); - if (colId == pSchema[j].colId) { - // ASSERT(payloadColType(p) == pSchema[j].type); - tdAppendColVal(trow, POINTER_SHIFT(pVals, payloadColOffset(p)), pSchema[j].type, toffset); - toffset += TYPE_BYTES[pSchema[j].type]; - p = payloadNextCol(p); - ++i; - ++j; - } else if (colId < pSchema[j].colId) { - p = payloadNextCol(p); - ++i; - } else { - tdAppendColVal(trow, getNullValue(pSchema[j].type), pSchema[j].type, toffset); - toffset += TYPE_BYTES[pSchema[j].type]; - ++j; - } - } - - while (j < nCols) { - tdAppendColVal(trow, getNullValue(pSchema[j].type), pSchema[j].type, toffset); - toffset += TYPE_BYTES[pSchema[j].type]; - ++j; - } - - #if 0 // no need anymore - while (i < nColsBound) { - p = payloadNextCol(p); - ++i; - } - #endif - - } else if (memRowType == SMEM_ROW_KV) { - SKVRow kvRow = (SKVRow)memRowKvBody(memRow); - kvRowSetLen(kvRow, (TDRowLenT)(TD_KV_ROW_HEAD_SIZE + sizeof(SColIdx) * nColsBound)); - kvRowSetNCols(kvRow, nColsBound); - memRowSetKvVersion(memRow, pBuilder->sversion); - - p = (char*)payloadBody(pBuilder->buf); - int i = 0; - while (i < nColsBound) { - int16_t colId = payloadColId(p); - uint8_t colType = payloadColType(p); - tdAppendKvColVal(kvRow, POINTER_SHIFT(pVals,payloadColOffset(p)), colId, colType, &toffset); - //toffset += sizeof(SColIdx); - p = payloadNextCol(p); - ++i; - } - - } else { - ASSERT(0); - } - int32_t rowTLen = memRowTLen(memRow); - pBuilder->pDataBlock = (char*)pBuilder->pDataBlock + rowTLen; // next row - pBuilder->pSubmitBlk->dataLen += rowTLen; - - return memRow; -} - // Erase the empty space reserved for binary data static int trimDataBlock(void* pDataBlock, STableDataBlocks* pTableDataBlock, SInsertStatementParam* insertParam, SBlockKeyTuple* blkKeyTuple) { @@ -1931,18 +1836,20 @@ static int trimDataBlock(void* pDataBlock, STableDataBlocks* pTableDataBlock, SI pBlock->dataLen += memRowTLen(memRow); } } else { - SMemRowBuilder rowBuilder; - rowBuilder.pSchema = pSchema; - rowBuilder.sversion = pTableMeta->sversion; - rowBuilder.flen = flen; - rowBuilder.nCols = tinfo.numOfColumns; - rowBuilder.pDataBlock = pDataBlock; - rowBuilder.pSubmitBlk = pBlock; - rowBuilder.buf = p; - for (int32_t i = 0; i < numOfRows; ++i) { - rowBuilder.buf = (blkKeyTuple + i)->payloadAddr; - tdGenMemRowFromBuilder(&rowBuilder); + char* payload = (blkKeyTuple + i)->payloadAddr; + if (isNeedConvertRow(payload)) { + convertSMemRow(pDataBlock, payload, pTableDataBlock); + TDRowTLenT rowTLen = memRowTLen(pDataBlock); + pDataBlock = POINTER_SHIFT(pDataBlock, rowTLen); + pBlock->dataLen += rowTLen; + ASSERT(rowTLen < memRowTLen(payload)); + } else { + TDRowTLenT rowTLen = memRowTLen(payload); + memcpy(pDataBlock, payload, rowTLen); + pDataBlock = POINTER_SHIFT(pDataBlock, rowTLen); + pBlock->dataLen += rowTLen; + } } } @@ -1953,18 +1860,6 @@ static int trimDataBlock(void* pDataBlock, STableDataBlocks* pTableDataBlock, SI return len; } -static int32_t getRowExpandSize(STableMeta* pTableMeta) { - int32_t result = TD_MEM_ROW_DATA_HEAD_SIZE; - int32_t columns = tscGetNumOfColumns(pTableMeta); - SSchema* pSchema = tscGetTableSchema(pTableMeta); - for(int32_t i = 0; i < columns; i++) { - if (IS_VAR_DATA_TYPE((pSchema + i)->type)) { - result += TYPE_BYTES[TSDB_DATA_TYPE_BINARY]; - } - } - return result; -} - static void extractTableNameList(SInsertStatementParam *pInsertParam, bool freeBlockMap) { pInsertParam->numOfTables = (int32_t) taosHashGetSize(pInsertParam->pTableBlockHashList); if (pInsertParam->pTableNameList == NULL) { @@ -2003,7 +1898,6 @@ int32_t tscMergeTableDataBlocks(SInsertStatementParam *pInsertParam, bool freeBl SSubmitBlk* pBlocks = (SSubmitBlk*) pOneTableBlock->pData; if (pBlocks->numOfRows > 0) { // the maximum expanded size in byte when a row-wise data is converted to SDataRow format - int32_t expandSize = getRowExpandSize(pOneTableBlock->pTableMeta); STableDataBlocks* dataBuf = NULL; int32_t ret = tscGetDataBlockFromList(pVnodeDataBlockHashList, pOneTableBlock->vgId, TSDB_PAYLOAD_SIZE, @@ -2016,7 +1910,8 @@ int32_t tscMergeTableDataBlocks(SInsertStatementParam *pInsertParam, bool freeBl return ret; } - int64_t destSize = dataBuf->size + pOneTableBlock->size + pBlocks->numOfRows * expandSize + sizeof(STColumn) * tscGetNumOfColumns(pOneTableBlock->pTableMeta); + int64_t destSize = + dataBuf->size + pOneTableBlock->size + sizeof(STColumn) * tscGetNumOfColumns(pOneTableBlock->pTableMeta); if (dataBuf->nAllocSize < destSize) { dataBuf->nAllocSize = (uint32_t)(destSize * 1.5); @@ -2060,7 +1955,8 @@ int32_t tscMergeTableDataBlocks(SInsertStatementParam *pInsertParam, bool freeBl pBlocks->numOfRows, pBlocks->sversion, blkKeyInfo.pKeyTuple->skey, pLastKeyTuple->skey); } - int32_t len = pBlocks->numOfRows * (pOneTableBlock->rowSize + expandSize) + sizeof(STColumn) * tscGetNumOfColumns(pOneTableBlock->pTableMeta); + int32_t len = pBlocks->numOfRows * getExtendedRowSize(pOneTableBlock) + + sizeof(STColumn) * tscGetNumOfColumns(pOneTableBlock->pTableMeta); pBlocks->tid = htonl(pBlocks->tid); pBlocks->uid = htobe64(pBlocks->uid); @@ -4977,4 +4873,4 @@ void tscRemoveTableMetaBuf(STableMetaInfo* pTableMetaInfo, uint64_t id) { taosHashRemove(tscTableMetaMap, fname, len); tscDebug("0x%"PRIx64" remove table meta %s, numOfRemain:%d", id, fname, (int32_t) taosHashGetSize(tscTableMetaMap)); -} \ No newline at end of file +} diff --git a/src/common/inc/tdataformat.h b/src/common/inc/tdataformat.h index 47bd8a72b2..0bde3937f3 100644 --- a/src/common/inc/tdataformat.h +++ b/src/common/inc/tdataformat.h @@ -186,6 +186,7 @@ typedef void *SDataRow; #define TD_DATA_ROW_HEAD_SIZE (sizeof(uint16_t) + sizeof(int16_t)) #define dataRowLen(r) (*(TDRowLenT *)(r)) // 0~65535 +#define dataRowEnd(r) POINTER_SHIFT(r, dataRowLen(r)) #define dataRowVersion(r) (*(int16_t *)POINTER_SHIFT(r, sizeof(int16_t))) #define dataRowTuple(r) POINTER_SHIFT(r, TD_DATA_ROW_HEAD_SIZE) #define dataRowTKey(r) (*(TKEY *)(dataRowTuple(r))) @@ -201,14 +202,18 @@ void tdFreeDataRow(SDataRow row); void tdInitDataRow(SDataRow row, STSchema *pSchema); SDataRow tdDataRowDup(SDataRow row); + // offset here not include dataRow header length -static FORCE_INLINE int tdAppendColVal(SDataRow row, const void *value, int8_t type, int32_t offset) { +static FORCE_INLINE int tdAppendDataColVal(SDataRow row, const void *value, bool isCopyVarData, int8_t type, + int32_t offset) { ASSERT(value != NULL); int32_t toffset = offset + TD_DATA_ROW_HEAD_SIZE; if (IS_VAR_DATA_TYPE(type)) { *(VarDataOffsetT *)POINTER_SHIFT(row, toffset) = dataRowLen(row); - memcpy(POINTER_SHIFT(row, dataRowLen(row)), value, varDataTLen(value)); + if (isCopyVarData) { + memcpy(POINTER_SHIFT(row, dataRowLen(row)), value, varDataTLen(value)); + } dataRowLen(row) += varDataTLen(value); } else { if (offset == 0) { @@ -223,6 +228,12 @@ static FORCE_INLINE int tdAppendColVal(SDataRow row, const void *value, int8_t t return 0; } + +// offset here not include dataRow header length +static FORCE_INLINE int tdAppendColVal(SDataRow row, const void *value, int8_t type, int32_t offset) { + return tdAppendDataColVal(row, value, true, type, offset); +} + // NOTE: offset here including the header size static FORCE_INLINE void *tdGetRowDataOfCol(SDataRow row, int8_t type, int32_t offset) { if (IS_VAR_DATA_TYPE(type)) { @@ -475,9 +486,10 @@ static FORCE_INLINE void *tdGetKVRowIdxOfCol(SKVRow row, int16_t colId) { } // offset here not include kvRow header length -static FORCE_INLINE int tdAppendKvColVal(SKVRow row, const void *value, int16_t colId, int8_t type, int32_t *offset) { +static FORCE_INLINE int tdAppendKvColVal(SKVRow row, const void *value, bool isCopyValData, int16_t colId, int8_t type, + int32_t offset) { ASSERT(value != NULL); - int32_t toffset = *offset + TD_KV_ROW_HEAD_SIZE; + int32_t toffset = offset + TD_KV_ROW_HEAD_SIZE; SColIdx *pColIdx = (SColIdx *)POINTER_SHIFT(row, toffset); char * ptr = (char *)POINTER_SHIFT(row, kvRowLen(row)); @@ -485,10 +497,12 @@ static FORCE_INLINE int tdAppendKvColVal(SKVRow row, const void *value, int16_t pColIdx->offset = kvRowLen(row); // offset of pColIdx including the TD_KV_ROW_HEAD_SIZE if (IS_VAR_DATA_TYPE(type)) { - memcpy(ptr, value, varDataTLen(value)); + if (isCopyValData) { + memcpy(ptr, value, varDataTLen(value)); + } kvRowLen(row) += varDataTLen(value); } else { - if (*offset == 0) { + if (offset == 0) { ASSERT(type == TSDB_DATA_TYPE_TIMESTAMP); TKEY tvalue = tdGetTKEY(*(TSKEY *)value); memcpy(ptr, (void *)(&tvalue), TYPE_BYTES[type]); @@ -497,7 +511,6 @@ static FORCE_INLINE int tdAppendKvColVal(SKVRow row, const void *value, int16_t } kvRowLen(row) += TYPE_BYTES[type]; } - *offset += sizeof(SColIdx); return 0; } @@ -592,12 +605,24 @@ typedef void *SMemRow; #define TD_MEM_ROW_DATA_HEAD_SIZE (TD_MEM_ROW_TYPE_SIZE + TD_DATA_ROW_HEAD_SIZE) #define TD_MEM_ROW_KV_HEAD_SIZE (TD_MEM_ROW_TYPE_SIZE + TD_MEM_ROW_KV_VER_SIZE + TD_KV_ROW_HEAD_SIZE) -#define SMEM_ROW_DATA 0U // SDataRow -#define SMEM_ROW_KV 1U // SKVRow +#define SMEM_ROW_DATA 0x0U // SDataRow +#define SMEM_ROW_KV 0x01U // SKVRow +#define SMEM_ROW_CONVERT 0x80U // SMemRow convert flag -#define memRowType(r) (*(uint8_t *)(r)) +#define KVRatioKV (0.2f) // all bool +#define KVRatioPredict (0.4f) +#define KVRatioData (0.75f) // all bigint +#define KVRatioConvert (0.9f) + +#define memRowType(r) ((*(uint8_t *)(r)) & 0x01) + +#define memRowSetType(r, t) ((*(uint8_t *)(r)) = (((*(uint8_t *)(r)) & 0xFE) | (t))) // lowest bit +#define memRowSetConvert(r) ((*(uint8_t *)(r)) = (((*(uint8_t *)(r)) & 0x7F) | SMEM_ROW_CONVERT)) // highest bit +#define isDataRowT(t) (SMEM_ROW_DATA == (((uint8_t)(t)) & 0x01)) #define isDataRow(r) (SMEM_ROW_DATA == memRowType(r)) +#define isKvRowT(t) (SMEM_ROW_KV == (((uint8_t)(t)) & 0x01)) #define isKvRow(r) (SMEM_ROW_KV == memRowType(r)) +#define isNeedConvertRow(r) ((((*(uint8_t *)(r)) & 0x7F) == SMEM_ROW_CONVERT)) #define memRowDataBody(r) POINTER_SHIFT(r, TD_MEM_ROW_TYPE_SIZE) // section after flag #define memRowKvBody(r) \ @@ -614,6 +639,14 @@ typedef void *SMemRow; #define memRowLen(r) (isDataRow(r) ? memRowDataLen(r) : memRowKvLen(r)) #define memRowTLen(r) (isDataRow(r) ? memRowDataTLen(r) : memRowKvTLen(r)) // using uint32_t/int32_t to store the TLen +static FORCE_INLINE char *memRowEnd(SMemRow row) { + if (isDataRow(row)) { + return (char *)dataRowEnd(memRowDataBody(row)); + } else { + return (char *)kvRowEnd(memRowKvBody(row)); + } +} + #define memRowDataVersion(r) dataRowVersion(memRowDataBody(r)) #define memRowKvVersion(r) (*(int16_t *)POINTER_SHIFT(r, TD_MEM_ROW_TYPE_SIZE)) #define memRowVersion(r) (isDataRow(r) ? memRowDataVersion(r) : memRowKvVersion(r)) // schema version @@ -631,7 +664,6 @@ typedef void *SMemRow; } \ } while (0) -#define memRowSetType(r, t) (memRowType(r) = (t)) #define memRowSetLen(r, l) (isDataRow(r) ? memRowDataLen(r) = (l) : memRowKvLen(r) = (l)) #define memRowSetVersion(r, v) (isDataRow(r) ? dataRowSetVersion(memRowDataBody(r), v) : memRowSetKvVersion(r, v)) #define memRowCpy(dst, r) memcpy((dst), (r), memRowTLen(r)) @@ -664,12 +696,12 @@ static FORCE_INLINE void *tdGetMemRowDataOfColEx(void *row, int16_t colId, int8_ } } -static FORCE_INLINE int tdAppendMemColVal(SMemRow row, const void *value, int16_t colId, int8_t type, int32_t offset, - int32_t *kvOffset) { +static FORCE_INLINE int tdAppendMemRowColVal(SMemRow row, const void *value, bool isCopyVarData, int16_t colId, + int8_t type, int32_t offset) { if (isDataRow(row)) { - tdAppendColVal(memRowDataBody(row), value, type, offset); + tdAppendDataColVal(memRowDataBody(row), value, isCopyVarData, type, offset); } else { - tdAppendKvColVal(memRowKvBody(row), value, colId, type, kvOffset); + tdAppendKvColVal(memRowKvBody(row), value, isCopyVarData, colId, type, offset); } return 0; } @@ -691,6 +723,30 @@ static FORCE_INLINE int32_t tdGetColAppendLen(uint8_t rowType, const void *value return len; } +/** + * 1. calculate the delta of AllNullLen for SDataRow. + * 2. calculate the real len for SKVRow. + */ +static FORCE_INLINE void tdGetColAppendDeltaLen(const void *value, int8_t colType, int32_t *dataLen, int32_t *kvLen) { + switch (colType) { + case TSDB_DATA_TYPE_BINARY: { + int32_t varLen = varDataLen(value); + *dataLen += (varLen - CHAR_BYTES); + *kvLen += (varLen + sizeof(SColIdx)); + break; + } + case TSDB_DATA_TYPE_NCHAR: { + int32_t varLen = varDataLen(value); + *dataLen += (varLen - TSDB_NCHAR_SIZE); + *kvLen += (varLen + sizeof(SColIdx)); + break; + } + default: { + *kvLen += (TYPE_BYTES[colType] + sizeof(SColIdx)); + break; + } + } +} typedef struct { int16_t colId; @@ -706,7 +762,7 @@ static FORCE_INLINE void setSColInfo(SColInfo* colInfo, int16_t colId, uint8_t c SMemRow mergeTwoMemRows(void *buffer, SMemRow row1, SMemRow row2, STSchema *pSchema1, STSchema *pSchema2); - +#if 0 // ----------------- Raw payload structure for row: /* |<------------ Head ------------->|<----------- body of column data tuple ------------------->| * | |<----------------- flen ------------->|<--- value part --->| @@ -752,6 +808,8 @@ SMemRow mergeTwoMemRows(void *buffer, SMemRow row1, SMemRow row2, STSchema *pSch static FORCE_INLINE char *payloadNextCol(char *pCol) { return (char *)POINTER_SHIFT(pCol, PAYLOAD_COL_HEAD_LEN); } +#endif + #ifdef __cplusplus } #endif diff --git a/src/common/src/tdataformat.c b/src/common/src/tdataformat.c index 8ef3d083c7..271a6cd6ee 100644 --- a/src/common/src/tdataformat.c +++ b/src/common/src/tdataformat.c @@ -854,7 +854,8 @@ SMemRow mergeTwoMemRows(void *buffer, SMemRow row1, SMemRow row2, STSchema *pSch int16_t k; for (k = 0; k < nKvNCols; ++k) { SColInfo *pColInfo = taosArrayGet(stashRow, k); - tdAppendKvColVal(kvRow, pColInfo->colVal, pColInfo->colId, pColInfo->colType, &toffset); + tdAppendKvColVal(kvRow, pColInfo->colVal, true, pColInfo->colId, pColInfo->colType, toffset); + toffset += sizeof(SColIdx); } ASSERT(kvLen == memRowTLen(tRow)); } From f0b2063f6b51d0df06fedbf65644760136147c53 Mon Sep 17 00:00:00 2001 From: Cary Xu Date: Tue, 3 Aug 2021 17:49:12 +0800 Subject: [PATCH 003/185] bug fix --- src/client/inc/tsclient.h | 7 +++---- src/common/inc/tdataformat.h | 2 +- 2 files changed, 4 insertions(+), 5 deletions(-) diff --git a/src/client/inc/tsclient.h b/src/client/inc/tsclient.h index c63c84df94..cb67fa336b 100644 --- a/src/client/inc/tsclient.h +++ b/src/client/inc/tsclient.h @@ -554,9 +554,8 @@ static FORCE_INLINE void initSMemRow(SMemRow row, uint8_t memRowType, STableData static FORCE_INLINE void convertToSDataRow(SMemRow dest, SMemRow src, SSchema *pSchema, int nCols, SParsedDataColInfo *spd) { ASSERT(isKvRow(src)); - - SDataRow dataRow = memRowDataBody(dest); SKVRow kvRow = memRowKvBody(src); + SDataRow dataRow = memRowDataBody(dest); memRowSetType(dest, SMEM_ROW_DATA); dataRowSetVersion(dataRow, memRowKvVersion(src)); @@ -576,8 +575,8 @@ static FORCE_INLINE void convertToSKVRow(SMemRow dest, SMemRow src, SSchema *pSc SParsedDataColInfo *spd) { ASSERT(isDataRow(src)); - SDataRow dataRow = memRowKvBody(src); - SKVRow kvRow = memRowDataBody(dest); + SDataRow dataRow = memRowDataBody(src); + SKVRow kvRow = memRowKvBody(dest); memRowSetType(dest, SMEM_ROW_KV); memRowSetKvVersion(kvRow, dataRowVersion(dataRow)); diff --git a/src/common/inc/tdataformat.h b/src/common/inc/tdataformat.h index 0bde3937f3..627a8c72c4 100644 --- a/src/common/inc/tdataformat.h +++ b/src/common/inc/tdataformat.h @@ -622,7 +622,7 @@ typedef void *SMemRow; #define isDataRow(r) (SMEM_ROW_DATA == memRowType(r)) #define isKvRowT(t) (SMEM_ROW_KV == (((uint8_t)(t)) & 0x01)) #define isKvRow(r) (SMEM_ROW_KV == memRowType(r)) -#define isNeedConvertRow(r) ((((*(uint8_t *)(r)) & 0x7F) == SMEM_ROW_CONVERT)) +#define isNeedConvertRow(r) ((((*(uint8_t *)(r)) & 0x80) == SMEM_ROW_CONVERT)) #define memRowDataBody(r) POINTER_SHIFT(r, TD_MEM_ROW_TYPE_SIZE) // section after flag #define memRowKvBody(r) \ From e200a7af6d7dc70e7b2fb41ac7ccd8a0250e7489 Mon Sep 17 00:00:00 2001 From: Cary Xu Date: Tue, 3 Aug 2021 17:52:39 +0800 Subject: [PATCH 004/185] code optimization --- src/client/inc/tsclient.h | 6 +----- src/client/src/tscParseInsert.c | 3 --- 2 files changed, 1 insertion(+), 8 deletions(-) diff --git a/src/client/inc/tsclient.h b/src/client/inc/tsclient.h index cb67fa336b..07acb20cbe 100644 --- a/src/client/inc/tsclient.h +++ b/src/client/inc/tsclient.h @@ -43,8 +43,6 @@ struct SSqlInfo; typedef void (*__async_cb_func_t)(void *param, TAOS_RES *tres, int32_t numOfRows); -#define __5221_BRANCH__ - typedef struct SNewVgroupInfo { int32_t vgId; int8_t inUse; @@ -519,10 +517,8 @@ int16_t getNewResColId(SSqlCmd* pCmd); int32_t schemaIdxCompar(const void *lhs, const void *rhs); int32_t boundIdxCompar(const void *lhs, const void *rhs); static FORCE_INLINE int32_t getExtendedRowSize(STableDataBlocks *pBlock) { -#ifdef __5221_BRANCH__ ASSERT(pBlock->rowSize == pBlock->pTableMeta->tableInfo.rowSize); -#endif - return pBlock->pTableMeta->tableInfo.rowSize + TD_MEM_ROW_DATA_HEAD_SIZE + pBlock->boundColumnInfo.extendedVarLen; + return pBlock->rowSize + TD_MEM_ROW_DATA_HEAD_SIZE + pBlock->boundColumnInfo.extendedVarLen; } static FORCE_INLINE void checkAndConvertMemRow(SMemRow row, int32_t dataLen, int32_t kvLen) { diff --git a/src/client/src/tscParseInsert.c b/src/client/src/tscParseInsert.c index 9cc2eef662..6da26577db 100644 --- a/src/client/src/tscParseInsert.c +++ b/src/client/src/tscParseInsert.c @@ -43,9 +43,7 @@ static int32_t parseBoundColumns(SInsertStatementParam *pInsertParam, SParsedDat char *str, char **end); int initMemRowBuilder(SMemRowBuilder *pBuilder, uint32_t nRows, uint32_t nCols, uint32_t nBoundCols, int32_t allNullLen) { -#ifdef __5221_BRANCH__ ASSERT(nRows >= 0 && nCols > 0 && (nBoundCols <= nCols)); -#endif if (nRows > 0) { // already init(bind multiple rows by single column) if (pBuilder->compareStat == ROW_COMPARE_NEED && (pBuilder->rowInfo != NULL)) { @@ -96,7 +94,6 @@ int initMemRowBuilder(SMemRowBuilder *pBuilder, uint32_t nRows, uint3 return TSDB_CODE_SUCCESS; } - int tsParseTime(SStrToken *pToken, int64_t *time, char **next, char *error, int16_t timePrec) { int32_t index = 0; SStrToken sToken; From 108473a7af57fa6cbfd734e410ebfe8c979bf97e Mon Sep 17 00:00:00 2001 From: Cary Xu Date: Wed, 4 Aug 2021 04:00:53 +0800 Subject: [PATCH 005/185] remove ASSERT --- src/common/inc/tdataformat.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/common/inc/tdataformat.h b/src/common/inc/tdataformat.h index 627a8c72c4..04e5e66c1c 100644 --- a/src/common/inc/tdataformat.h +++ b/src/common/inc/tdataformat.h @@ -614,7 +614,7 @@ typedef void *SMemRow; #define KVRatioData (0.75f) // all bigint #define KVRatioConvert (0.9f) -#define memRowType(r) ((*(uint8_t *)(r)) & 0x01) +#define memRowType(r) ((*(uint8_t *)(r)) & 0x01U) #define memRowSetType(r, t) ((*(uint8_t *)(r)) = (((*(uint8_t *)(r)) & 0xFE) | (t))) // lowest bit #define memRowSetConvert(r) ((*(uint8_t *)(r)) = (((*(uint8_t *)(r)) & 0x7F) | SMEM_ROW_CONVERT)) // highest bit From 366a6b24ce6ed77c796cfe07acc4131b6eb55619 Mon Sep 17 00:00:00 2001 From: Cary Xu Date: Wed, 4 Aug 2021 04:04:19 +0800 Subject: [PATCH 006/185] remove ASSERT --- src/client/src/tscUtil.c | 1 - src/common/inc/tdataformat.h | 2 +- 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/src/client/src/tscUtil.c b/src/client/src/tscUtil.c index 5972252d67..2a6271d1d1 100644 --- a/src/client/src/tscUtil.c +++ b/src/client/src/tscUtil.c @@ -1843,7 +1843,6 @@ static int trimDataBlock(void* pDataBlock, STableDataBlocks* pTableDataBlock, SI TDRowTLenT rowTLen = memRowTLen(pDataBlock); pDataBlock = POINTER_SHIFT(pDataBlock, rowTLen); pBlock->dataLen += rowTLen; - ASSERT(rowTLen < memRowTLen(payload)); } else { TDRowTLenT rowTLen = memRowTLen(payload); memcpy(pDataBlock, payload, rowTLen); diff --git a/src/common/inc/tdataformat.h b/src/common/inc/tdataformat.h index 04e5e66c1c..627a8c72c4 100644 --- a/src/common/inc/tdataformat.h +++ b/src/common/inc/tdataformat.h @@ -614,7 +614,7 @@ typedef void *SMemRow; #define KVRatioData (0.75f) // all bigint #define KVRatioConvert (0.9f) -#define memRowType(r) ((*(uint8_t *)(r)) & 0x01U) +#define memRowType(r) ((*(uint8_t *)(r)) & 0x01) #define memRowSetType(r, t) ((*(uint8_t *)(r)) = (((*(uint8_t *)(r)) & 0xFE) | (t))) // lowest bit #define memRowSetConvert(r) ((*(uint8_t *)(r)) = (((*(uint8_t *)(r)) & 0x7F) | SMEM_ROW_CONVERT)) // highest bit From 9f8af9a1ea94833d2d3e0d61a0b81ace74e04229 Mon Sep 17 00:00:00 2001 From: Cary Xu Date: Wed, 4 Aug 2021 04:22:37 +0800 Subject: [PATCH 007/185] remove duplicated () --- src/common/inc/tdataformat.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/common/inc/tdataformat.h b/src/common/inc/tdataformat.h index 627a8c72c4..4590f144a1 100644 --- a/src/common/inc/tdataformat.h +++ b/src/common/inc/tdataformat.h @@ -622,7 +622,7 @@ typedef void *SMemRow; #define isDataRow(r) (SMEM_ROW_DATA == memRowType(r)) #define isKvRowT(t) (SMEM_ROW_KV == (((uint8_t)(t)) & 0x01)) #define isKvRow(r) (SMEM_ROW_KV == memRowType(r)) -#define isNeedConvertRow(r) ((((*(uint8_t *)(r)) & 0x80) == SMEM_ROW_CONVERT)) +#define isNeedConvertRow(r) (((*(uint8_t *)(r)) & 0x80) == SMEM_ROW_CONVERT) #define memRowDataBody(r) POINTER_SHIFT(r, TD_MEM_ROW_TYPE_SIZE) // section after flag #define memRowKvBody(r) \ From 049a4e15821747846a05c56d6c99cc6ed891b59d Mon Sep 17 00:00:00 2001 From: Cary Xu Date: Wed, 4 Aug 2021 07:12:54 +0800 Subject: [PATCH 008/185] bug fix --- src/client/src/tscUtil.c | 20 +++++++++++++++++--- src/common/inc/tdataformat.h | 2 +- 2 files changed, 18 insertions(+), 4 deletions(-) diff --git a/src/client/src/tscUtil.c b/src/client/src/tscUtil.c index 2a6271d1d1..82b40c94c9 100644 --- a/src/client/src/tscUtil.c +++ b/src/client/src/tscUtil.c @@ -1859,6 +1859,18 @@ static int trimDataBlock(void* pDataBlock, STableDataBlocks* pTableDataBlock, SI return len; } +static int32_t getRowExpandSize(STableMeta* pTableMeta) { + int32_t result = TD_MEM_ROW_DATA_HEAD_SIZE; + int32_t columns = tscGetNumOfColumns(pTableMeta); + SSchema* pSchema = tscGetTableSchema(pTableMeta); + for (int32_t i = 0; i < columns; i++) { + if (IS_VAR_DATA_TYPE((pSchema + i)->type)) { + result += TYPE_BYTES[TSDB_DATA_TYPE_BINARY]; + } + } + return result; +} + static void extractTableNameList(SInsertStatementParam *pInsertParam, bool freeBlockMap) { pInsertParam->numOfTables = (int32_t) taosHashGetSize(pInsertParam->pTableBlockHashList); if (pInsertParam->pTableNameList == NULL) { @@ -1897,6 +1909,7 @@ int32_t tscMergeTableDataBlocks(SInsertStatementParam *pInsertParam, bool freeBl SSubmitBlk* pBlocks = (SSubmitBlk*) pOneTableBlock->pData; if (pBlocks->numOfRows > 0) { // the maximum expanded size in byte when a row-wise data is converted to SDataRow format + int32_t expandSize = isRawPayload ? getRowExpandSize(pOneTableBlock->pTableMeta) : 0; STableDataBlocks* dataBuf = NULL; int32_t ret = tscGetDataBlockFromList(pVnodeDataBlockHashList, pOneTableBlock->vgId, TSDB_PAYLOAD_SIZE, @@ -1909,8 +1922,8 @@ int32_t tscMergeTableDataBlocks(SInsertStatementParam *pInsertParam, bool freeBl return ret; } - int64_t destSize = - dataBuf->size + pOneTableBlock->size + sizeof(STColumn) * tscGetNumOfColumns(pOneTableBlock->pTableMeta); + int64_t destSize = dataBuf->size + pOneTableBlock->size + pBlocks->numOfRows * expandSize + + sizeof(STColumn) * tscGetNumOfColumns(pOneTableBlock->pTableMeta); if (dataBuf->nAllocSize < destSize) { dataBuf->nAllocSize = (uint32_t)(destSize * 1.5); @@ -1954,7 +1967,8 @@ int32_t tscMergeTableDataBlocks(SInsertStatementParam *pInsertParam, bool freeBl pBlocks->numOfRows, pBlocks->sversion, blkKeyInfo.pKeyTuple->skey, pLastKeyTuple->skey); } - int32_t len = pBlocks->numOfRows * getExtendedRowSize(pOneTableBlock) + + int32_t len = pBlocks->numOfRows * + (isRawPayload ? (pOneTableBlock->rowSize + expandSize) : getExtendedRowSize(pOneTableBlock)) + sizeof(STColumn) * tscGetNumOfColumns(pOneTableBlock->pTableMeta); pBlocks->tid = htonl(pBlocks->tid); diff --git a/src/common/inc/tdataformat.h b/src/common/inc/tdataformat.h index 4590f144a1..3b1f89f7d6 100644 --- a/src/common/inc/tdataformat.h +++ b/src/common/inc/tdataformat.h @@ -616,7 +616,7 @@ typedef void *SMemRow; #define memRowType(r) ((*(uint8_t *)(r)) & 0x01) -#define memRowSetType(r, t) ((*(uint8_t *)(r)) = (((*(uint8_t *)(r)) & 0xFE) | (t))) // lowest bit +#define memRowSetType(r, t) ((*(uint8_t *)(r)) = (t)) // set the total byte in case of dirty memory #define memRowSetConvert(r) ((*(uint8_t *)(r)) = (((*(uint8_t *)(r)) & 0x7F) | SMEM_ROW_CONVERT)) // highest bit #define isDataRowT(t) (SMEM_ROW_DATA == (((uint8_t)(t)) & 0x01)) #define isDataRow(r) (SMEM_ROW_DATA == memRowType(r)) From fd8521a35d97ed6ceeade732f0a01334786aca71 Mon Sep 17 00:00:00 2001 From: Cary Xu Date: Wed, 4 Aug 2021 13:03:40 +0800 Subject: [PATCH 009/185] make Jenkins happy --- tests/pytest/fulltest.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/pytest/fulltest.sh b/tests/pytest/fulltest.sh index 9a0212d652..1604813a1f 100755 --- a/tests/pytest/fulltest.sh +++ b/tests/pytest/fulltest.sh @@ -169,7 +169,7 @@ python3 test.py -f tools/taosdemoTestQuery.py # nano support python3 test.py -f tools/taosdemoAllTest/taosdemoTestSupportNanoInsert.py python3 test.py -f tools/taosdemoAllTest/taosdemoTestSupportNanoQuery.py -python3 test.py -f tools/taosdemoAllTest/taosdemoTestSupportNanosubscribe.py +# python3 test.py -f tools/taosdemoAllTest/taosdemoTestSupportNanosubscribe.py python3 test.py -f tools/taosdemoAllTest/taosdemoTestInsertTime_step.py python3 test.py -f tools/taosdumpTestNanoSupport.py From 7bc7424dd0b51c99bd186beaab554864adf4651c Mon Sep 17 00:00:00 2001 From: Cary Xu Date: Thu, 5 Aug 2021 07:05:21 +0800 Subject: [PATCH 010/185] code optimization --- src/client/src/tscParseInsert.c | 17 ++++++----------- 1 file changed, 6 insertions(+), 11 deletions(-) diff --git a/src/client/src/tscParseInsert.c b/src/client/src/tscParseInsert.c index bf786e3817..b7a2320b07 100644 --- a/src/client/src/tscParseInsert.c +++ b/src/client/src/tscParseInsert.c @@ -463,16 +463,9 @@ int tsParseOneRow(char **str, STableDataBlocks *pDataBlocks, int16_t timePrec, i int32_t dataLen = pBuilder->dataRowInitLen; int32_t kvLen = pBuilder->kvRowInitLen; bool isParseBindParam = false; - // void * rowBody = memRowDataBody(row); initSMemRow(row, pBuilder->memRowType, pDataBlocks, spd->numOfBound); - // FPAppendColVal fpAppendColVal = tscAppendDataRowColValEx; - // if (isKvRowT(pBuilder->memRowType)) { - // rowBody = memRowKvBody(row); - // fpAppendColVal = tscAppendKvRowColValEx; - // } - // 1. set the parsed value from sql string for (int i = 0; i < spd->numOfBound; ++i) { // the start position in data block buffer of current value in sql @@ -543,7 +536,6 @@ int tsParseOneRow(char **str, STableDataBlocks *pDataBlocks, int16_t timePrec, i bool isPrimaryKey = (colIndex == PRIMARYKEY_TIMESTAMP_COL_INDEX); int32_t toffset = -1; int16_t colId = -1; - // TODO: Can we put colId from param? tscGetMemRowAppendInfo(schema, pBuilder->memRowType, spd, i, &toffset, &colId); int32_t ret = tsParseOneColumnKV(pSchema, &sToken, row, pInsertParam->msg, str, isPrimaryKey, timePrec, toffset, @@ -552,9 +544,12 @@ int tsParseOneRow(char **str, STableDataBlocks *pDataBlocks, int16_t timePrec, i return ret; } - if (isPrimaryKey && tsCheckTimestamp(pDataBlocks, memRowTuple(row)) != TSDB_CODE_SUCCESS) { - tscInvalidOperationMsg(pInsertParam->msg, "client time/server time can not be mixed up", sToken.z); - return TSDB_CODE_TSC_INVALID_TIME_STAMP; + if (isPrimaryKey) { + TSKEY tsKey = memRowKey(row); + if (tsCheckTimestamp(pDataBlocks, (const char *)&tsKey) != TSDB_CODE_SUCCESS) { + tscInvalidOperationMsg(pInsertParam->msg, "client time/server time can not be mixed up", sToken.z); + return TSDB_CODE_TSC_INVALID_TIME_STAMP; + } } } From 9e2188b1961c696ba83f1921bcfff10db912ee7c Mon Sep 17 00:00:00 2001 From: Liu Jicong Date: Thu, 5 Aug 2021 10:25:29 +0800 Subject: [PATCH 011/185] small improvements, fix potential bug for inserting --- src/tsdb/src/tsdbMemTable.c | 21 ++++++++++----------- 1 file changed, 10 insertions(+), 11 deletions(-) diff --git a/src/tsdb/src/tsdbMemTable.c b/src/tsdb/src/tsdbMemTable.c index 8bb2d1c44e..4809f6303f 100644 --- a/src/tsdb/src/tsdbMemTable.c +++ b/src/tsdb/src/tsdbMemTable.c @@ -702,11 +702,12 @@ static int tsdbScanAndConvertSubmitMsg(STsdbRepo *pRepo, SSubmitMsg *pMsg) { } //row1 has higher priority -static SMemRow tsdbInsertDupKeyMerge(SMemRow row1, SMemRow row2, STsdbRepo* pRepo, STSchema **ppSchema1, STSchema **ppSchema2, STable* pTable, int32_t* pAffectedRows, int64_t* pPoints, SMemRow* pLastRow) { +static SMemRow tsdbInsertDupKeyMerge(SMemRow row1, SMemRow row2, STsdbRepo* pRepo, + STSchema **ppSchema1, STSchema **ppSchema2, + STable* pTable, int32_t* pPoints, SMemRow* pLastRow) { //for compatiblity, duplicate key inserted when update=0 should be also calculated as affected rows! if(row1 == NULL && row2 == NULL && pRepo->config.update == TD_ROW_DISCARD_UPDATE) { - (*pAffectedRows)++; (*pPoints)++; return NULL; } @@ -715,7 +716,6 @@ static SMemRow tsdbInsertDupKeyMerge(SMemRow row1, SMemRow row2, STsdbRepo* pRep void* pMem = tsdbAllocBytes(pRepo, memRowTLen(row1)); if(pMem == NULL) return NULL; memRowCpy(pMem, row1); - (*pAffectedRows)++; (*pPoints)++; *pLastRow = pMem; return pMem; @@ -750,7 +750,6 @@ static SMemRow tsdbInsertDupKeyMerge(SMemRow row1, SMemRow row2, STsdbRepo* pRep if(pMem == NULL) return NULL; memRowCpy(pMem, tmp); - (*pAffectedRows)++; (*pPoints)++; *pLastRow = pMem; @@ -758,10 +757,10 @@ static SMemRow tsdbInsertDupKeyMerge(SMemRow row1, SMemRow row2, STsdbRepo* pRep } static void* tsdbInsertDupKeyMergePacked(void** args) { - return tsdbInsertDupKeyMerge(args[0], args[1], args[2], (STSchema**)&args[3], (STSchema**)&args[4], args[5], args[6], args[7], args[8]); + return tsdbInsertDupKeyMerge(args[0], args[1], args[2], (STSchema**)&args[3], (STSchema**)&args[4], args[5], args[6], args[7]); } -static void tsdbSetupSkipListHookFns(SSkipList* pSkipList, STsdbRepo *pRepo, STable *pTable, int32_t* pAffectedRows, int64_t* pPoints, SMemRow* pLastRow) { +static void tsdbSetupSkipListHookFns(SSkipList* pSkipList, STsdbRepo *pRepo, STable *pTable, int32_t* pPoints, SMemRow* pLastRow) { if(pSkipList->insertHandleFn == NULL) { tGenericSavedFunc *dupHandleSavedFunc = genericSavedFuncInit((GenericVaFunc)&tsdbInsertDupKeyMergePacked, 9); @@ -769,17 +768,16 @@ static void tsdbSetupSkipListHookFns(SSkipList* pSkipList, STsdbRepo *pRepo, STa dupHandleSavedFunc->args[3] = NULL; dupHandleSavedFunc->args[4] = NULL; dupHandleSavedFunc->args[5] = pTable; - dupHandleSavedFunc->args[6] = pAffectedRows; - dupHandleSavedFunc->args[7] = pPoints; - dupHandleSavedFunc->args[8] = pLastRow; pSkipList->insertHandleFn = dupHandleSavedFunc; } + pSkipList->insertHandleFn->args[6] = pPoints; + pSkipList->insertHandleFn->args[7] = pLastRow; } static int tsdbInsertDataToTable(STsdbRepo* pRepo, SSubmitBlk* pBlock, int32_t *pAffectedRows) { STsdbMeta *pMeta = pRepo->tsdbMeta; - int64_t points = 0; + int32_t points = 0; STable *pTable = NULL; SSubmitBlkIter blkIter = {0}; SMemTable *pMemTable = NULL; @@ -830,9 +828,10 @@ static int tsdbInsertDataToTable(STsdbRepo* pRepo, SSubmitBlk* pBlock, int32_t * SMemRow lastRow = NULL; int64_t osize = SL_SIZE(pTableData->pData); - tsdbSetupSkipListHookFns(pTableData->pData, pRepo, pTable, pAffectedRows, &points, &lastRow); + tsdbSetupSkipListHookFns(pTableData->pData, pRepo, pTable, &points, &lastRow); tSkipListPutBatchByIter(pTableData->pData, &blkIter, (iter_next_fn_t)tsdbGetSubmitBlkNext); int64_t dsize = SL_SIZE(pTableData->pData) - osize; + (*pAffectedRows) += points; if(lastRow != NULL) { From c3413fd830c94e300fbbfec7e7ebfe55d7d46792 Mon Sep 17 00:00:00 2001 From: Liu Jicong Date: Thu, 5 Aug 2021 10:34:24 +0800 Subject: [PATCH 012/185] fix realloc usage in tdataformat --- src/common/src/tdataformat.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/common/src/tdataformat.c b/src/common/src/tdataformat.c index 3f0ab7f93e..70911c88a6 100644 --- a/src/common/src/tdataformat.c +++ b/src/common/src/tdataformat.c @@ -357,8 +357,9 @@ int tdInitDataCols(SDataCols *pCols, STSchema *pSchema) { int oldMaxCols = pCols->maxCols; if (schemaNCols(pSchema) > oldMaxCols) { pCols->maxCols = schemaNCols(pSchema); - pCols->cols = (SDataCol *)realloc(pCols->cols, sizeof(SDataCol) * pCols->maxCols); - if (pCols->cols == NULL) return -1; + void* ptr = (SDataCol *)realloc(pCols->cols, sizeof(SDataCol) * pCols->maxCols); + if (ptr == NULL) return -1; + pCols->cols = ptr; for(i = oldMaxCols; i < pCols->maxCols; i++) { pCols->cols[i].pData = NULL; pCols->cols[i].dataOff = NULL; From e11ae1f46e7cee40f1f795d8587fdb70ec4da622 Mon Sep 17 00:00:00 2001 From: Liu Jicong Date: Thu, 5 Aug 2021 10:52:26 +0800 Subject: [PATCH 013/185] memory alloc may fail --- src/common/inc/tdataformat.h | 4 ++-- src/common/src/tdataformat.c | 26 +++++++++++++++++--------- src/tsdb/src/tsdbCommit.c | 2 ++ src/tsdb/src/tsdbMemTable.c | 1 - src/tsdb/src/tsdbReadImpl.c | 3 +++ 5 files changed, 24 insertions(+), 12 deletions(-) diff --git a/src/common/inc/tdataformat.h b/src/common/inc/tdataformat.h index fb6bab0cf2..f49c80ad86 100644 --- a/src/common/inc/tdataformat.h +++ b/src/common/inc/tdataformat.h @@ -328,11 +328,11 @@ static FORCE_INLINE void dataColReset(SDataCol *pDataCol) { pDataCol->len = 0; } int tdAllocMemForCol(SDataCol *pCol, int maxPoints); void dataColInit(SDataCol *pDataCol, STColumn *pCol, int maxPoints); -void dataColAppendVal(SDataCol *pCol, const void *value, int numOfRows, int maxPoints); +int dataColAppendVal(SDataCol *pCol, const void *value, int numOfRows, int maxPoints); void dataColSetOffset(SDataCol *pCol, int nEle); bool isNEleNull(SDataCol *pCol, int nEle); -void dataColSetNEleNull(SDataCol *pCol, int nEle, int maxPoints); +int dataColSetNEleNull(SDataCol *pCol, int nEle, int maxPoints); // Get the data pointer from a column-wised data static FORCE_INLINE const void *tdGetColDataOfRow(SDataCol *pCol, int row) { diff --git a/src/common/src/tdataformat.c b/src/common/src/tdataformat.c index 70911c88a6..42854704a5 100644 --- a/src/common/src/tdataformat.c +++ b/src/common/src/tdataformat.c @@ -22,7 +22,6 @@ static void tdMergeTwoDataCols(SDataCols *target, SDataCols *src1, int *iter1, int limit1, SDataCols *src2, int *iter2, int limit2, int tRows, bool forceSetNull); -//TODO: change caller to use return val int tdAllocMemForCol(SDataCol *pCol, int maxPoints) { int spaceNeeded = pCol->bytes * maxPoints; if(IS_VAR_DATA_TYPE(pCol->type)) { @@ -31,7 +30,7 @@ int tdAllocMemForCol(SDataCol *pCol, int maxPoints) { if(pCol->spaceSize < spaceNeeded) { void* ptr = realloc(pCol->pData, spaceNeeded); if(ptr == NULL) { - uDebug("malloc failure, size:%" PRId64 " failed, reason:%s", (int64_t)pCol->spaceSize, + uDebug("malloc failure, size:%" PRId64 " failed, reason:%s", (int64_t)spaceNeeded, strerror(errno)); return -1; } else { @@ -239,20 +238,22 @@ void dataColInit(SDataCol *pDataCol, STColumn *pCol, int maxPoints) { pDataCol->len = 0; } // value from timestamp should be TKEY here instead of TSKEY -void dataColAppendVal(SDataCol *pCol, const void *value, int numOfRows, int maxPoints) { +int dataColAppendVal(SDataCol *pCol, const void *value, int numOfRows, int maxPoints) { ASSERT(pCol != NULL && value != NULL); if (isAllRowsNull(pCol)) { if (isNull(value, pCol->type)) { // all null value yet, just return - return; + return 0; } if (numOfRows > 0) { // Find the first not null value, fill all previouse values as NULL - dataColSetNEleNull(pCol, numOfRows, maxPoints); + if(dataColSetNEleNull(pCol, numOfRows, maxPoints) < 0) + return -1; } else { - tdAllocMemForCol(pCol, maxPoints); + if(tdAllocMemForCol(pCol, maxPoints) < 0) + return -1; } } @@ -268,6 +269,7 @@ void dataColAppendVal(SDataCol *pCol, const void *value, int numOfRows, int maxP memcpy(POINTER_SHIFT(pCol->pData, pCol->len), value, pCol->bytes); pCol->len += pCol->bytes; } + return -1; } bool isNEleNull(SDataCol *pCol, int nEle) { @@ -290,8 +292,10 @@ static FORCE_INLINE void dataColSetNullAt(SDataCol *pCol, int index) { } } -void dataColSetNEleNull(SDataCol *pCol, int nEle, int maxPoints) { - tdAllocMemForCol(pCol, maxPoints); +int dataColSetNEleNull(SDataCol *pCol, int nEle, int maxPoints) { + if(tdAllocMemForCol(pCol, maxPoints)){ + return -1; + } if (IS_VAR_DATA_TYPE(pCol->type)) { pCol->len = 0; @@ -302,6 +306,7 @@ void dataColSetNEleNull(SDataCol *pCol, int nEle, int maxPoints) { setNullN(pCol->pData, pCol->type, pCol->bytes, nEle); pCol->len = TYPE_BYTES[pCol->type] * nEle; } + return 0; } void dataColSetOffset(SDataCol *pCol, int nEle) { @@ -414,7 +419,10 @@ SDataCols *tdDupDataCols(SDataCols *pDataCols, bool keepData) { if (keepData) { if (pDataCols->cols[i].len > 0) { - tdAllocMemForCol(&pRet->cols[i], pRet->maxPoints); + if(tdAllocMemForCol(&pRet->cols[i], pRet->maxPoints) < 0) { + tdFreeDataCols(pRet); + return NULL; + } pRet->cols[i].len = pDataCols->cols[i].len; memcpy(pRet->cols[i].pData, pDataCols->cols[i].pData, pDataCols->cols[i].len); if (IS_VAR_DATA_TYPE(pRet->cols[i].type)) { diff --git a/src/tsdb/src/tsdbCommit.c b/src/tsdb/src/tsdbCommit.c index 6c98283189..9d6b0a3594 100644 --- a/src/tsdb/src/tsdbCommit.c +++ b/src/tsdb/src/tsdbCommit.c @@ -1277,6 +1277,7 @@ static void tsdbLoadAndMergeFromCache(SDataCols *pDataCols, int *iter, SCommitIt if (key1 < key2) { for (int i = 0; i < pDataCols->numOfCols; i++) { + //TODO: dataColAppendVal may fail dataColAppendVal(pTarget->cols + i, tdGetColDataOfRow(pDataCols->cols + i, *iter), pTarget->numOfRows, pTarget->maxPoints); } @@ -1308,6 +1309,7 @@ static void tsdbLoadAndMergeFromCache(SDataCols *pDataCols, int *iter, SCommitIt ASSERT(!isRowDel); for (int i = 0; i < pDataCols->numOfCols; i++) { + //TODO: dataColAppendVal may fail dataColAppendVal(pTarget->cols + i, tdGetColDataOfRow(pDataCols->cols + i, *iter), pTarget->numOfRows, pTarget->maxPoints); } diff --git a/src/tsdb/src/tsdbMemTable.c b/src/tsdb/src/tsdbMemTable.c index 4809f6303f..e766d97a97 100644 --- a/src/tsdb/src/tsdbMemTable.c +++ b/src/tsdb/src/tsdbMemTable.c @@ -751,7 +751,6 @@ static SMemRow tsdbInsertDupKeyMerge(SMemRow row1, SMemRow row2, STsdbRepo* pRep memRowCpy(pMem, tmp); (*pPoints)++; - *pLastRow = pMem; return pMem; } diff --git a/src/tsdb/src/tsdbReadImpl.c b/src/tsdb/src/tsdbReadImpl.c index 711c32535b..c98f03b7de 100644 --- a/src/tsdb/src/tsdbReadImpl.c +++ b/src/tsdb/src/tsdbReadImpl.c @@ -463,6 +463,7 @@ static int tsdbLoadBlockDataImpl(SReadH *pReadh, SBlock *pBlock, SDataCols *pDat SDataCol *pDataCol = &(pDataCols->cols[dcol]); if (dcol != 0 && ccol >= pBlockData->numOfCols) { // Set current column as NULL and forward + // TODO: dataColSetNEleNull may fail dataColSetNEleNull(pDataCol, pBlock->numOfRows, pDataCols->maxPoints); dcol++; continue; @@ -503,6 +504,7 @@ static int tsdbLoadBlockDataImpl(SReadH *pReadh, SBlock *pBlock, SDataCols *pDat ccol++; } else { // Set current column as NULL and forward + // TODO: dataColSetNEleNull may fail dataColSetNEleNull(pDataCol, pBlock->numOfRows, pDataCols->maxPoints); dcol++; } @@ -608,6 +610,7 @@ static int tsdbLoadBlockDataColsImpl(SReadH *pReadh, SBlock *pBlock, SDataCols * } if (pBlockCol == NULL) { + // TODO: dataColSetNEleNull may fail dataColSetNEleNull(pDataCol, pBlock->numOfRows, pDataCols->maxPoints); continue; } From 78855c440cd6df73380122c764a8dc00e753f5d4 Mon Sep 17 00:00:00 2001 From: Cary Xu Date: Thu, 5 Aug 2021 13:53:20 +0800 Subject: [PATCH 014/185] code optimization --- src/client/inc/tsclient.h | 4 +--- src/client/src/tscUtil.c | 7 ++++--- 2 files changed, 5 insertions(+), 6 deletions(-) diff --git a/src/client/inc/tsclient.h b/src/client/inc/tsclient.h index 07acb20cbe..4ead7d4180 100644 --- a/src/client/inc/tsclient.h +++ b/src/client/inc/tsclient.h @@ -189,7 +189,6 @@ static FORCE_INLINE void tscAppendMemRowColVal(SMemRow row, const void *value, b int8_t colType, int32_t toffset, SMemRowBuilder *pBuilder, int32_t rowNum) { tdAppendMemRowColVal(row, value, isCopyVarData, colId, colType, toffset); - // TODO: When nBoundCols/nCols > 0.5, if (pBuilder->compareStat == ROW_COMPARE_NEED) { SMemRowInfo *pRowInfo = pBuilder->rowInfo + rowNum; tdGetColAppendDeltaLen(value, colType, &pRowInfo->dataLen, &pRowInfo->kvLen); @@ -201,7 +200,6 @@ static FORCE_INLINE void tscAppendMemRowColValEx(SMemRow row, const void *value, int8_t colType, int32_t toffset, int32_t *dataLen, int32_t *kvLen, uint8_t compareStat) { tdAppendMemRowColVal(row, value, isCopyVarData, colId, colType, toffset); - // TODO: When nBoundCols/nCols > 0.5, if (compareStat == ROW_COMPARE_NEED) { tdGetColAppendDeltaLen(value, colType, dataLen, kvLen); } @@ -581,8 +579,8 @@ static FORCE_INLINE void convertToSKVRow(SMemRow dest, SMemRow src, SSchema *pSc int32_t toffset = 0, kvOffset = 0; for (int i = 0; i < nCols; ++i) { - SSchema *schema = pSchema + i; if ((spd->cols + i)->valStat == VAL_STAT_HAS) { + SSchema *schema = pSchema + i; toffset = (spd->cols + i)->toffset; void *val = tdGetRowDataOfCol(dataRow, schema->type, toffset + TD_DATA_ROW_HEAD_SIZE); tdAppendKvColVal(kvRow, val, true, schema->colId, schema->type, kvOffset); diff --git a/src/client/src/tscUtil.c b/src/client/src/tscUtil.c index 6db272002d..3c35795b0d 100644 --- a/src/client/src/tscUtil.c +++ b/src/client/src/tscUtil.c @@ -1807,10 +1807,11 @@ static int trimDataBlock(void* pDataBlock, STableDataBlocks* pTableDataBlock, SI int32_t schemaSize = sizeof(STColumn) * numOfCols; pBlock->schemaLen = schemaSize; } else { - for (int32_t j = 0; j < tinfo.numOfColumns; ++j) { - flen += TYPE_BYTES[pSchema[j].type]; + if (IS_RAW_PAYLOAD(insertParam->payloadType)) { + for (int32_t j = 0; j < tinfo.numOfColumns; ++j) { + flen += TYPE_BYTES[pSchema[j].type]; + } } - pBlock->schemaLen = 0; } From 1cfb6b78e006a36cecca7b8e9b8e0eca86041d96 Mon Sep 17 00:00:00 2001 From: Cary Xu Date: Thu, 5 Aug 2021 13:57:05 +0800 Subject: [PATCH 015/185] uncomment the nano subscribe case --- tests/pytest/fulltest.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/pytest/fulltest.sh b/tests/pytest/fulltest.sh index 1604813a1f..9a0212d652 100755 --- a/tests/pytest/fulltest.sh +++ b/tests/pytest/fulltest.sh @@ -169,7 +169,7 @@ python3 test.py -f tools/taosdemoTestQuery.py # nano support python3 test.py -f tools/taosdemoAllTest/taosdemoTestSupportNanoInsert.py python3 test.py -f tools/taosdemoAllTest/taosdemoTestSupportNanoQuery.py -# python3 test.py -f tools/taosdemoAllTest/taosdemoTestSupportNanosubscribe.py +python3 test.py -f tools/taosdemoAllTest/taosdemoTestSupportNanosubscribe.py python3 test.py -f tools/taosdemoAllTest/taosdemoTestInsertTime_step.py python3 test.py -f tools/taosdumpTestNanoSupport.py From 22a2a6602858803cd32553e971fecc3052766ece Mon Sep 17 00:00:00 2001 From: Hongze Cheng Date: Thu, 5 Aug 2021 14:29:15 +0800 Subject: [PATCH 016/185] [TD-5812] : fix possible coredump change schema --- src/tsdb/inc/tsdbMeta.h | 8 ++--- src/tsdb/src/tsdbMeta.c | 77 ++++++++++++++++++++++++++++------------- 2 files changed, 55 insertions(+), 30 deletions(-) diff --git a/src/tsdb/inc/tsdbMeta.h b/src/tsdb/inc/tsdbMeta.h index 9a8de01f71..29d2b6595d 100644 --- a/src/tsdb/inc/tsdbMeta.h +++ b/src/tsdb/inc/tsdbMeta.h @@ -24,8 +24,7 @@ typedef struct STable { tstr* name; // NOTE: there a flexible string here uint64_t suid; struct STable* pSuper; // super table pointer - uint8_t numOfSchemas; - STSchema* schema[TSDB_MAX_TABLE_SCHEMAS]; + SArray* schema; STSchema* tagSchema; SKVRow tagVal; SSkipList* pIndex; // For TSDB_SUPER_TABLE, it is the skiplist index @@ -107,10 +106,9 @@ static FORCE_INLINE STSchema* tsdbGetTableSchemaImpl(STable* pTable, bool lock, if (lock) TSDB_RLOCK_TABLE(pDTable); if (_version < 0) { // get the latest version of schema - pTSchema = pDTable->schema[pDTable->numOfSchemas - 1]; + pTSchema = *(STSchema **)taosArrayGetLast(pTable->schema); } else { // get the schema with version - void* ptr = taosbsearch(&_version, pDTable->schema, pDTable->numOfSchemas, sizeof(STSchema*), - tsdbCompareSchemaVersion, TD_EQ); + void* ptr = taosArraySearch(pTable->schema, &_version, tsdbCompareSchemaVersion, TD_EQ); if (ptr == NULL) { terrno = TSDB_CODE_TDB_IVD_TB_SCHEMA_VERSION; goto _exit; diff --git a/src/tsdb/src/tsdbMeta.c b/src/tsdb/src/tsdbMeta.c index 619b32b3d9..3fb5739641 100644 --- a/src/tsdb/src/tsdbMeta.c +++ b/src/tsdb/src/tsdbMeta.c @@ -44,6 +44,8 @@ static int tsdbRemoveTableFromStore(STsdbRepo *pRepo, STable *pTable); static int tsdbRmTableFromMeta(STsdbRepo *pRepo, STable *pTable); static int tsdbAdjustMetaTables(STsdbRepo *pRepo, int tid); static int tsdbCheckTableTagVal(SKVRow *pKVRow, STSchema *pSchema); +static int tsdbAddSchema(STable *pTable, STSchema *pSchema); +static void tsdbFreeTableSchema(STable *pTable); // ------------------ OUTER FUNCTIONS ------------------ int tsdbCreateTable(STsdbRepo *repo, STableCfg *pCfg) { @@ -723,17 +725,10 @@ void tsdbUpdateTableSchema(STsdbRepo *pRepo, STable *pTable, STSchema *pSchema, STsdbMeta *pMeta = pRepo->tsdbMeta; STable *pCTable = (TABLE_TYPE(pTable) == TSDB_CHILD_TABLE) ? pTable->pSuper : pTable; - ASSERT(schemaVersion(pSchema) > schemaVersion(pCTable->schema[pCTable->numOfSchemas - 1])); + ASSERT(schemaVersion(pSchema) > schemaVersion(*(STSchema **)taosArrayGetLast(pTable->schema))); TSDB_WLOCK_TABLE(pCTable); - if (pCTable->numOfSchemas < TSDB_MAX_TABLE_SCHEMAS) { - pCTable->schema[pCTable->numOfSchemas++] = pSchema; - } else { - ASSERT(pCTable->numOfSchemas == TSDB_MAX_TABLE_SCHEMAS); - tdFreeSchema(pCTable->schema[0]); - memmove(pCTable->schema, pCTable->schema + 1, sizeof(STSchema *) * (TSDB_MAX_TABLE_SCHEMAS - 1)); - pCTable->schema[pCTable->numOfSchemas - 1] = pSchema; - } + tsdbAddSchema(pCTable, pSchema); if (schemaNCols(pSchema) > pMeta->maxCols) pMeta->maxCols = schemaNCols(pSchema); if (schemaTLen(pSchema) > pMeta->maxRowBytes) pMeta->maxRowBytes = schemaTLen(pSchema); @@ -829,9 +824,7 @@ static STable *tsdbCreateTableFromCfg(STableCfg *pCfg, bool isSuper, STable *pST TABLE_TID(pTable) = -1; TABLE_SUID(pTable) = -1; pTable->pSuper = NULL; - pTable->numOfSchemas = 1; - pTable->schema[0] = tdDupSchema(pCfg->schema); - if (pTable->schema[0] == NULL) { + if (tsdbAddSchema(pTable, tdDupSchema(pCfg->schema)) < 0) { terrno = TSDB_CODE_TDB_OUT_OF_MEMORY; goto _err; } @@ -842,7 +835,8 @@ static STable *tsdbCreateTableFromCfg(STableCfg *pCfg, bool isSuper, STable *pST } pTable->tagVal = NULL; STColumn *pCol = schemaColAt(pTable->tagSchema, DEFAULT_TAG_INDEX_COLUMN); - pTable->pIndex = tSkipListCreate(TSDB_SUPER_TABLE_SL_LEVEL, colType(pCol), (uint8_t)(colBytes(pCol)), NULL, SL_ALLOW_DUP_KEY, getTagIndexKey); + pTable->pIndex = tSkipListCreate(TSDB_SUPER_TABLE_SL_LEVEL, colType(pCol), (uint8_t)(colBytes(pCol)), NULL, + SL_ALLOW_DUP_KEY, getTagIndexKey); if (pTable->pIndex == NULL) { terrno = TSDB_CODE_TDB_OUT_OF_MEMORY; goto _err; @@ -871,9 +865,7 @@ static STable *tsdbCreateTableFromCfg(STableCfg *pCfg, bool isSuper, STable *pST } } else { TABLE_SUID(pTable) = -1; - pTable->numOfSchemas = 1; - pTable->schema[0] = tdDupSchema(pCfg->schema); - if (pTable->schema[0] == NULL) { + if (tsdbAddSchema(pTable, tdDupSchema(pCfg->schema)) < 0) { terrno = TSDB_CODE_TDB_OUT_OF_MEMORY; goto _err; } @@ -907,9 +899,7 @@ static void tsdbFreeTable(STable *pTable) { TABLE_UID(pTable)); tfree(TABLE_NAME(pTable)); if (TABLE_TYPE(pTable) != TSDB_CHILD_TABLE) { - for (int i = 0; i < TSDB_MAX_TABLE_SCHEMAS; i++) { - tdFreeSchema(pTable->schema[i]); - } + tsdbFreeTableSchema(pTable); if (TABLE_TYPE(pTable) == TSDB_SUPER_TABLE) { tdFreeSchema(pTable->tagSchema); @@ -1261,9 +1251,10 @@ static int tsdbEncodeTable(void **buf, STable *pTable) { tlen += taosEncodeFixedU64(buf, TABLE_SUID(pTable)); tlen += tdEncodeKVRow(buf, pTable->tagVal); } else { - tlen += taosEncodeFixedU8(buf, pTable->numOfSchemas); - for (int i = 0; i < pTable->numOfSchemas; i++) { - tlen += tdEncodeSchema(buf, pTable->schema[i]); + tlen += taosEncodeFixedU8(buf, taosArrayGetSize(pTable->schema)); + for (int i = 0; i < taosArrayGetSize(pTable->schema); i++) { + STSchema *pSchema = taosArrayGetP(pTable->schema, i); + tlen += tdEncodeSchema(buf, pSchema); } if (TABLE_TYPE(pTable) == TSDB_SUPER_TABLE) { @@ -1294,9 +1285,12 @@ static void *tsdbDecodeTable(void *buf, STable **pRTable) { buf = taosDecodeFixedU64(buf, &TABLE_SUID(pTable)); buf = tdDecodeKVRow(buf, &(pTable->tagVal)); } else { - buf = taosDecodeFixedU8(buf, &(pTable->numOfSchemas)); - for (int i = 0; i < pTable->numOfSchemas; i++) { - buf = tdDecodeSchema(buf, &(pTable->schema[i])); + uint8_t nSchemas; + buf = taosDecodeFixedU8(buf, &nSchemas); + for (int i = 0; i < nSchemas; i++) { + STSchema *pSchema; + buf = tdDecodeSchema(buf, &pSchema); + tsdbAddSchema(pTable, pSchema); } if (TABLE_TYPE(pTable) == TSDB_SUPER_TABLE) { @@ -1458,3 +1452,36 @@ static int tsdbCheckTableTagVal(SKVRow *pKVRow, STSchema *pSchema) { return 0; } + +static int tsdbAddSchema(STable *pTable, STSchema *pSchema) { + ASSERT(TABLE_TYPE(pTable) != TSDB_CHILD_TABLE); + + if (pTable->schema == NULL) { + pTable->schema = taosArrayInit(TSDB_MAX_TABLE_SCHEMAS, sizeof(SSchema *)); + if (pTable->schema == NULL) { + terrno = TAOS_SYSTEM_ERROR(errno); + return -1; + } + } + + ASSERT(taosArrayGetSize(pTable->schema) == 0 || + schemaVersion(pSchema) > schemaVersion(*(STSchema **)taosArrayGetLast(pTable->schema))); + + if (taosArrayPush(pTable->schema, &pSchema) == NULL) { + terrno = TAOS_SYSTEM_ERROR(errno); + return -1; + } + + return 0; +} + +static void tsdbFreeTableSchema(STable *pTable) { + ASSERT(pTable != NULL); + + if (pTable->schema) { + for (size_t i = 0; i < taosArrayGetSize(pTable->schema); i++) { + STSchema *pSchema = taosArrayGetP(pTable->schema, i); + tdFreeSchema(pSchema); + } + } +} \ No newline at end of file From cbcf773962ad5b189f53739529f158b37c481aee Mon Sep 17 00:00:00 2001 From: Hongze Cheng Date: Thu, 5 Aug 2021 15:05:14 +0800 Subject: [PATCH 017/185] fix compile error --- src/tsdb/src/tsdbMeta.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/tsdb/src/tsdbMeta.c b/src/tsdb/src/tsdbMeta.c index 3fb5739641..fca1b17ba9 100644 --- a/src/tsdb/src/tsdbMeta.c +++ b/src/tsdb/src/tsdbMeta.c @@ -1251,7 +1251,7 @@ static int tsdbEncodeTable(void **buf, STable *pTable) { tlen += taosEncodeFixedU64(buf, TABLE_SUID(pTable)); tlen += tdEncodeKVRow(buf, pTable->tagVal); } else { - tlen += taosEncodeFixedU8(buf, taosArrayGetSize(pTable->schema)); + tlen += taosEncodeFixedU8(buf, (uint8_t)taosArrayGetSize(pTable->schema)); for (int i = 0; i < taosArrayGetSize(pTable->schema); i++) { STSchema *pSchema = taosArrayGetP(pTable->schema, i); tlen += tdEncodeSchema(buf, pSchema); From d648f557538c7e527fade2c56b7c04d0855a08cd Mon Sep 17 00:00:00 2001 From: Hongze Cheng Date: Thu, 5 Aug 2021 16:57:50 +0800 Subject: [PATCH 018/185] fix memory leak --- src/tsdb/src/tsdbMeta.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/tsdb/src/tsdbMeta.c b/src/tsdb/src/tsdbMeta.c index fca1b17ba9..b2395f8417 100644 --- a/src/tsdb/src/tsdbMeta.c +++ b/src/tsdb/src/tsdbMeta.c @@ -1483,5 +1483,7 @@ static void tsdbFreeTableSchema(STable *pTable) { STSchema *pSchema = taosArrayGetP(pTable->schema, i); tdFreeSchema(pSchema); } + + taosArrayDestroy(pTable->schema); } } \ No newline at end of file From 907c87018130836b7b0ba91c8ab522e950fd0e43 Mon Sep 17 00:00:00 2001 From: Hongze Cheng Date: Thu, 5 Aug 2021 17:28:10 +0800 Subject: [PATCH 019/185] fix coredump --- src/tsdb/inc/tsdbMeta.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/tsdb/inc/tsdbMeta.h b/src/tsdb/inc/tsdbMeta.h index 29d2b6595d..51801c843c 100644 --- a/src/tsdb/inc/tsdbMeta.h +++ b/src/tsdb/inc/tsdbMeta.h @@ -106,9 +106,9 @@ static FORCE_INLINE STSchema* tsdbGetTableSchemaImpl(STable* pTable, bool lock, if (lock) TSDB_RLOCK_TABLE(pDTable); if (_version < 0) { // get the latest version of schema - pTSchema = *(STSchema **)taosArrayGetLast(pTable->schema); + pTSchema = *(STSchema **)taosArrayGetLast(pDTable->schema); } else { // get the schema with version - void* ptr = taosArraySearch(pTable->schema, &_version, tsdbCompareSchemaVersion, TD_EQ); + void* ptr = taosArraySearch(pDTable->schema, &_version, tsdbCompareSchemaVersion, TD_EQ); if (ptr == NULL) { terrno = TSDB_CODE_TDB_IVD_TB_SCHEMA_VERSION; goto _exit; From 1d881042d5410ae013c29234a70f6323c553f26a Mon Sep 17 00:00:00 2001 From: Hongze Cheng Date: Thu, 5 Aug 2021 17:32:39 +0800 Subject: [PATCH 020/185] fix another possible coredump --- src/tsdb/src/tsdbMeta.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/tsdb/src/tsdbMeta.c b/src/tsdb/src/tsdbMeta.c index b2395f8417..fa125dfa5f 100644 --- a/src/tsdb/src/tsdbMeta.c +++ b/src/tsdb/src/tsdbMeta.c @@ -725,7 +725,7 @@ void tsdbUpdateTableSchema(STsdbRepo *pRepo, STable *pTable, STSchema *pSchema, STsdbMeta *pMeta = pRepo->tsdbMeta; STable *pCTable = (TABLE_TYPE(pTable) == TSDB_CHILD_TABLE) ? pTable->pSuper : pTable; - ASSERT(schemaVersion(pSchema) > schemaVersion(*(STSchema **)taosArrayGetLast(pTable->schema))); + ASSERT(schemaVersion(pSchema) > schemaVersion(*(STSchema **)taosArrayGetLast(pCTable->schema))); TSDB_WLOCK_TABLE(pCTable); tsdbAddSchema(pCTable, pSchema); From 3c566750aa5e16c75a82ec81130dd8be21a32663 Mon Sep 17 00:00:00 2001 From: Cary Xu Date: Fri, 6 Aug 2021 10:23:39 +0800 Subject: [PATCH 021/185] [TD-5765]:check max length when alter tag value --- src/client/src/tscSQLParser.c | 5 + tests/pytest/fulltest.sh | 1 + tests/pytest/tag_lite/TestModifyTag.py | 125 +++++++++++++++++++++++++ 3 files changed, 131 insertions(+) create mode 100644 tests/pytest/tag_lite/TestModifyTag.py diff --git a/src/client/src/tscSQLParser.c b/src/client/src/tscSQLParser.c index 7adc0812ae..8d89b7c3cc 100644 --- a/src/client/src/tscSQLParser.c +++ b/src/client/src/tscSQLParser.c @@ -5884,6 +5884,11 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) { tVariantListItem* pItem = taosArrayGet(pVarList, 1); SSchema* pTagsSchema = tscGetTableColumnSchema(pTableMetaInfo->pTableMeta, columnIndex.columnIndex); + + if (IS_VAR_DATA_TYPE(pTagsSchema->type) && (pItem->pVar.nLen > pTagsSchema->bytes * TSDB_NCHAR_SIZE)) { + return invalidOperationMsg(pMsg, msg14); + } + pAlterSQL->tagData.data = calloc(1, pTagsSchema->bytes * TSDB_NCHAR_SIZE + VARSTR_HEADER_SIZE); if (tVariantDump(&pItem->pVar, pAlterSQL->tagData.data, pTagsSchema->type, true) != TSDB_CODE_SUCCESS) { diff --git a/tests/pytest/fulltest.sh b/tests/pytest/fulltest.sh index 783ee98da3..3cbc80b69d 100755 --- a/tests/pytest/fulltest.sh +++ b/tests/pytest/fulltest.sh @@ -76,6 +76,7 @@ python3 ./test.py -f tag_lite/set.py python3 ./test.py -f tag_lite/smallint.py python3 ./test.py -f tag_lite/tinyint.py python3 ./test.py -f tag_lite/timestamp.py +python3 ./test.py -f tag_lite/TestModifyTag.py #python3 ./test.py -f dbmgmt/database-name-boundary.py python3 test.py -f dbmgmt/nanoSecondCheck.py diff --git a/tests/pytest/tag_lite/TestModifyTag.py b/tests/pytest/tag_lite/TestModifyTag.py new file mode 100644 index 0000000000..acf63695f6 --- /dev/null +++ b/tests/pytest/tag_lite/TestModifyTag.py @@ -0,0 +1,125 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import os +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + self.ts = 1625068800000000000 # this is timestamp "2021-07-01 00:00:00" + self.numberOfTables = 10 + self.numberOfRecords = 100 + + def checkCommunity(self): + selfPath = os.path.dirname(os.path.realpath(__file__)) + if ("community" in selfPath): + return False + else: + return True + + def getBuildPath(self): + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + for root, dirs, files in os.walk(projPath): + if ("taosdump" in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + buildPath = root[:len(root) - len("/build/bin")] + break + return buildPath + + + + def run(self): + + buildPath = self.getBuildPath() + if (buildPath == ""): + tdLog.exit("taosdump not found!") + else: + tdLog.info("taosdump found in %s" % buildPath) + binPath = buildPath + "/build/bin/" + + # basic test for alter tags + tdSql.execute("create database tagdb ") + tdSql.execute(" use tagdb") + tdSql.execute("create table st (ts timestamp , a int) tags (tg1 binary(20), tg2 binary(20), tg3 binary(20))") + tdSql.execute("insert into t using st (tg3, tg2, tg1) tags ('tg3', 'tg2', 'tg1') values (now, 1)") + tdSql.execute("alter table t set tag tg1='newtg1'") + res = tdSql.getResult("select tg1,tg2,tg3 from t") + + if res == [('newtg1', 'tg2', 'tg3')]: + tdLog.info(" alter tag check has pass!") + else: + tdLog.info(" alter tag failed , please check !") + + tdSql.error("alter stable st modify tag tg2 binary(2)") + tdSql.execute("alter stable st modify tag tg2 binary(30) ") + tdSql.execute("alter table t set tag tg2 = 'abcdefghijklmnopqrstuvwxyz1234'") + res = tdSql.getResult("select tg1,tg2,tg3 from t") + if res == [('newtg1', 'abcdefghijklmnopqrstuvwxyz1234', 'tg3')]: + tdLog.info(" alter tag check has pass!") + else: + tdLog.info(" alter tag failed , please check !") + + # test boundary about tags + tdSql.execute("create stable stb1 (ts timestamp , a int) tags (tg1 binary(16374))") + tdSql.error("create stable stb1 (ts timestamp , a int) tags (tg1 binary(16375))") + bound_sql = "create stable stb2 (ts timestamp , a int) tags (tg1 binary(10)," + for i in range(127): + bound_sql+="tag"+str(i)+" binary(10)," + sql1 = bound_sql[:-1]+")" + tdSql.execute(sql1) + sql2 = bound_sql[:-1]+"tag127 binary(10))" + tdSql.error(sql2) + tdSql.execute("create stable stb3 (ts timestamp , a int) tags (tg1 nchar(4093))") + tdSql.error("create stable stb3 (ts timestamp , a int) tags (tg1 nchar(4094))") + tdSql.execute("create stable stb4 (ts timestamp , a int) tags (tg1 nchar(4093),tag2 binary(8))") + tdSql.error("create stable stb4 (ts timestamp , a int) tags (tg1 nchar(4093),tag2 binary(9))") + tdSql.execute("create stable stb5 (ts timestamp , a int) tags (tg1 nchar(4093),tag2 binary(4),tag3 binary(2))") + tdSql.error("create stable stb5 (ts timestamp , a int) tags (tg1 nchar(4093),tag2 binary(4),tag3 binary(3))") + + tdSql.execute("create table stt (ts timestamp , a binary(100)) tags (tg1 binary(20), tg2 binary(20), tg3 binary(20))") + tdSql.execute("insert into tt using stt (tg3, tg2, tg1) tags ('tg3', 'tg2', 'tg1') values (now, 1)") + tags = "t"*16337 + sql3 = "alter table tt set tag tg1=" +"'"+tags+"'" + tdSql.error(sql3) + tdSql.execute("alter stable stt modify tag tg1 binary(16337)") + tdSql.execute(sql3) + res = tdSql.getResult("select tg1,tg2,tg3 from tt") + if res == [(tags, 'tg2', 'tg3')]: + tdLog.info(" alter tag check has pass!") + else: + tdLog.info(" alter tag failed , please check !") + + os.system("rm -rf ./tag_lite/TestModifyTag.py.sql") + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) \ No newline at end of file From c9e176dffa7e7980574411e8fdead7b680bcc511 Mon Sep 17 00:00:00 2001 From: xywang Date: Fri, 6 Aug 2021 10:54:06 +0800 Subject: [PATCH 022/185] [TD-5784]: fixed potential memory leak bugs in HTTP module --- src/plugins/http/src/httpParser.c | 42 +++++++++++++++++++++++++------ src/plugins/http/src/httpUtil.c | 16 +++++++++--- 2 files changed, 46 insertions(+), 12 deletions(-) diff --git a/src/plugins/http/src/httpParser.c b/src/plugins/http/src/httpParser.c index ba88a2b9cd..02f21037b8 100644 --- a/src/plugins/http/src/httpParser.c +++ b/src/plugins/http/src/httpParser.c @@ -101,13 +101,17 @@ char *httpGetStatusDesc(int32_t statusCode) { } static void httpCleanupString(HttpString *str) { - free(str->str); - str->str = NULL; - str->pos = 0; - str->size = 0; + if (str->str) { + free(str->str); + str->str = NULL; + str->pos = 0; + str->size = 0; + } } static int32_t httpAppendString(HttpString *str, const char *s, int32_t len) { + char *new_str = NULL; + if (str->size == 0) { str->pos = 0; str->size = len + 1; @@ -115,7 +119,16 @@ static int32_t httpAppendString(HttpString *str, const char *s, int32_t len) { } else if (str->pos + len + 1 >= str->size) { str->size += len; str->size *= 4; - str->str = realloc(str->str, str->size); + + new_str = realloc(str->str, str->size); + if (new_str == NULL && str->str) { + // if str->str was not NULL originally, + // the old allocated memory was left unchanged, + // see man 3 realloc + free(str->str); + } + + str->str = new_str; } else { } @@ -317,7 +330,7 @@ static int32_t httpOnParseHeaderField(HttpParser *parser, const char *key, const static int32_t httpOnBody(HttpParser *parser, const char *chunk, int32_t len) { HttpContext *pContext = parser->pContext; - HttpString * buf = &parser->body; + HttpString *buf = &parser->body; if (parser->parseCode != TSDB_CODE_SUCCESS) return -1; if (buf->size <= 0) { @@ -326,6 +339,7 @@ static int32_t httpOnBody(HttpParser *parser, const char *chunk, int32_t len) { } int32_t newSize = buf->pos + len + 1; + char *newStr = NULL; if (newSize >= buf->size) { if (buf->size >= HTTP_BUFFER_SIZE) { httpError("context:%p, fd:%d, failed parse body, exceeding buffer size %d", pContext, pContext->fd, buf->size); @@ -336,7 +350,12 @@ static int32_t httpOnBody(HttpParser *parser, const char *chunk, int32_t len) { newSize = MAX(newSize, HTTP_BUFFER_INIT); newSize *= 4; newSize = MIN(newSize, HTTP_BUFFER_SIZE); - buf->str = realloc(buf->str, newSize); + newStr = realloc(buf->str, newSize); + if (newStr == NULL && buf->str) { + free(buf->str); + } + + buf->str = newStr; buf->size = newSize; if (buf->str == NULL) { @@ -374,13 +393,20 @@ static HTTP_PARSER_STATE httpTopStack(HttpParser *parser) { static int32_t httpPushStack(HttpParser *parser, HTTP_PARSER_STATE state) { HttpStack *stack = &parser->stacks; + int8_t *newStacks = NULL; if (stack->size == 0) { stack->pos = 0; stack->size = 32; stack->stacks = malloc(stack->size * sizeof(int8_t)); } else if (stack->pos + 1 > stack->size) { stack->size *= 2; - stack->stacks = realloc(stack->stacks, stack->size * sizeof(int8_t)); + + newStacks = realloc(stack->stacks, stack->size * sizeof(int8_t)); + if (newStacks == NULL && stack->stacks) { + free(stack->stacks); + } + + stack->stacks = newStacks; } else { } diff --git a/src/plugins/http/src/httpUtil.c b/src/plugins/http/src/httpUtil.c index a8031d3fd8..27b95a4934 100644 --- a/src/plugins/http/src/httpUtil.c +++ b/src/plugins/http/src/httpUtil.c @@ -188,13 +188,17 @@ bool httpMallocMultiCmds(HttpContext *pContext, int32_t cmdSize, int32_t bufferS bool httpReMallocMultiCmdsSize(HttpContext *pContext, int32_t cmdSize) { HttpSqlCmds *multiCmds = pContext->multiCmds; - if (cmdSize > HTTP_MAX_CMD_SIZE) { + if (cmdSize <= 0 && cmdSize > HTTP_MAX_CMD_SIZE) { httpError("context:%p, fd:%d, user:%s, mulitcmd size:%d large then %d", pContext, pContext->fd, pContext->user, cmdSize, HTTP_MAX_CMD_SIZE); return false; } - multiCmds->cmds = (HttpSqlCmd *)realloc(multiCmds->cmds, (size_t)cmdSize * sizeof(HttpSqlCmd)); + HttpSqlCmd *new_cmds = (HttpSqlCmd *)realloc(multiCmds->cmds, (size_t)cmdSize * sizeof(HttpSqlCmd)); + if (new_cmds == NULL && multiCmds->cmds) { + free(multiCmds->cmds); + } + multiCmds->cmds = new_cmds; if (multiCmds->cmds == NULL) { httpError("context:%p, fd:%d, user:%s, malloc cmds:%d error", pContext, pContext->fd, pContext->user, cmdSize); return false; @@ -208,13 +212,17 @@ bool httpReMallocMultiCmdsSize(HttpContext *pContext, int32_t cmdSize) { bool httpReMallocMultiCmdsBuffer(HttpContext *pContext, int32_t bufferSize) { HttpSqlCmds *multiCmds = pContext->multiCmds; - if (bufferSize > HTTP_MAX_BUFFER_SIZE) { + if (bufferSize <= 0 || bufferSize > HTTP_MAX_BUFFER_SIZE) { httpError("context:%p, fd:%d, user:%s, mulitcmd buffer size:%d large then %d", pContext, pContext->fd, pContext->user, bufferSize, HTTP_MAX_BUFFER_SIZE); return false; } - multiCmds->buffer = (char *)realloc(multiCmds->buffer, (size_t)bufferSize); + char *new_buffer = (char *)realloc(multiCmds->buffer, (size_t)bufferSize); + if (new_buffer == NULL && multiCmds->buffer) { + free(multiCmds->buffer); + } + multiCmds->buffer = new_buffer; if (multiCmds->buffer == NULL) { httpError("context:%p, fd:%d, user:%s, malloc buffer:%d error", pContext, pContext->fd, pContext->user, bufferSize); return false; From 1cf81a85947a7621c0233a9426683c8e425a91d5 Mon Sep 17 00:00:00 2001 From: xywang Date: Fri, 6 Aug 2021 11:48:20 +0800 Subject: [PATCH 023/185] [TD-5784]: fixed some potential memory leak bugs --- src/plugins/http/inc/httpParser.h | 67 +++++++- src/plugins/http/src/httpParser.c | 257 +++++++++++++++++------------- src/plugins/http/src/httpUtil.c | 21 ++- 3 files changed, 231 insertions(+), 114 deletions(-) diff --git a/src/plugins/http/inc/httpParser.h b/src/plugins/http/inc/httpParser.h index 85ba843716..f7b55958c8 100644 --- a/src/plugins/http/inc/httpParser.h +++ b/src/plugins/http/inc/httpParser.h @@ -17,7 +17,71 @@ #define HTTP_PARSER_H #include "httpGzip.h" -#define HTTP_MAX_URL 5 // http url stack size +#define HTTP_MAX_URL 6 // http url stack size + +#define HTTP_CODE_CONTINUE 100 +#define HTTP_CODE_SWITCHING_PROTOCOL 101 +#define HTTP_CODE_PROCESSING 102 +#define HTTP_CODE_EARLY_HINTS 103 +#define HTTP_CODE_OK 200 +#define HTTP_CODE_CREATED 201 +#define HTTP_CODE_ACCEPTED 202 +#define HTTP_CODE_NON_AUTHORITATIVE_INFO 203 +#define HTTP_CODE_NO_CONTENT 204 +#define HTTP_CODE_RESET_CONTENT 205 +#define HTTP_CODE_PARTIAL_CONTENT 206 +#define HTTP_CODE_MULTI_STATUS 207 +#define HTTP_CODE_ALREADY_REPORTED 208 +#define HTTP_CODE_IM_USED 226 +#define HTTP_CODE_MULTIPLE_CHOICE 300 +#define HTTP_CODE_MOVED_PERMANENTLY 301 +#define HTTP_CODE_FOUND 302 +#define HTTP_CODE_SEE_OTHER 303 +#define HTTP_CODE_NOT_MODIFIED 304 +#define HTTP_CODE_USE_PROXY 305 +#define HTTP_CODE_UNUSED 306 +#define HTTP_CODE_TEMPORARY_REDIRECT 307 +#define HTTP_CODE_PERMANENT_REDIRECT 308 +#define HTTP_CODE_BAD_REQUEST 400 +#define HTTP_CODE_UNAUTHORIZED 401 +#define HTTP_CODE_PAYMENT_REQUIRED 402 +#define HTTP_CODE_FORBIDDEN 403 +#define HTTP_CODE_NOT_FOUND 404 +#define HTTP_CODE_METHOD_NOT_ALLOWED 405 +#define HTTP_CODE_NOT_ACCEPTABLE 406 +#define HTTP_CODE_PROXY_AUTH_REQUIRED 407 +#define HTTP_CODE_REQUEST_TIMEOUT 408 +#define HTTP_CODE_CONFLICT 409 +#define HTTP_CODE_GONE 410 +#define HTTP_CODE_LENGTH_REQUIRED 411 +#define HTTP_CODE_PRECONDITION_FAILED 412 +#define HTTP_CODE_PAYLOAD_TOO_LARGE 413 +#define HTTP_CODE_URI_TOO_LARGE 414 +#define HTTP_CODE_UNSUPPORTED_MEDIA_TYPE 415 +#define HTTP_CODE_RANGE_NOT_SATISFIABLE 416 +#define HTTP_CODE_EXPECTATION_FAILED 417 +#define HTTP_CODE_IM_A_TEAPOT 418 +#define HTTP_CODE_MISDIRECTED_REQUEST 421 +#define HTTP_CODE_UNPROCESSABLE_ENTITY 422 +#define HTTP_CODE_LOCKED 423 +#define HTTP_CODE_FAILED_DEPENDENCY 424 +#define HTTP_CODE_TOO_EARLY 425 +#define HTTP_CODE_UPGRADE_REQUIRED 426 +#define HTTP_CODE_PRECONDITION_REQUIRED 428 +#define HTTP_CODE_TOO_MANY_REQUESTS 429 +#define HTTP_CODE_REQ_HDR_FIELDS_TOO_LARGE 431 +#define HTTP_CODE_UNAVAIL_4_LEGAL_REASONS 451 +#define HTTP_CODE_INTERNAL_SERVER_ERROR 500 +#define HTTP_CODE_NOT_IMPLEMENTED 501 +#define HTTP_CODE_BAD_GATEWAY 502 +#define HTTP_CODE_SERVICE_UNAVAILABLE 503 +#define HTTP_CODE_GATEWAY_TIMEOUT 504 +#define HTTP_CODE_HTTP_VER_NOT_SUPPORTED 505 +#define HTTP_CODE_VARIANT_ALSO_NEGOTIATES 506 +#define HTTP_CODE_INSUFFICIENT_STORAGE 507 +#define HTTP_CODE_LOOP_DETECTED 508 +#define HTTP_CODE_NOT_EXTENDED 510 +#define HTTP_CODE_NETWORK_AUTH_REQUIRED 511 typedef enum HTTP_PARSER_STATE { HTTP_PARSER_BEGIN, @@ -36,6 +100,7 @@ typedef enum HTTP_PARSER_STATE { HTTP_PARSER_CHUNK, HTTP_PARSER_END, HTTP_PARSER_ERROR, + HTTP_PARSER_OPTIONAL_SP } HTTP_PARSER_STATE; typedef enum HTTP_AUTH_TYPE { diff --git a/src/plugins/http/src/httpParser.c b/src/plugins/http/src/httpParser.c index 18cea56cfe..02f21037b8 100644 --- a/src/plugins/http/src/httpParser.c +++ b/src/plugins/http/src/httpParser.c @@ -25,70 +25,70 @@ static void httpOnData(ehttp_gzip_t *gzip, void *arg, const char *buf, int32_t len); static HttpStatus httpStatusCodes[] = { - {100, "Continue"}, - {101, "Switching Protocol"}, - {102, "Processing (WebDAV)"}, - {103, "Early Hints"}, - {200, "OK"}, - {201, "Created"}, - {202, "Accepted"}, - {203, "Non-Authoritative Information"}, - {204, "No Content"}, - {205, "Reset Content"}, - {206, "Partial Content"}, - {207, "Multi-Status (WebDAV)"}, - {208, "Already Reported (WebDAV)"}, - {226, "IM Used (HTTP Delta encoding)"}, - {300, "Multiple Choice"}, - {301, "Moved Permanently"}, - {302, "Found"}, - {303, "See Other"}, - {304, "Not Modified"}, - {305, "Use Proxy"}, - {306, "unused"}, - {307, "Temporary Redirect"}, - {308, "Permanent Redirect"}, - {400, "Bad Request"}, - {401, "Unauthorized"}, - {402, "Payment Required"}, - {403, "Forbidden"}, - {404, "Not Found"}, - {405, "Method Not Allowed"}, - {406, "Not Acceptable"}, - {407, "Proxy Authentication Required"}, - {408, "Request Timeout"}, - {409, "Conflict"}, - {410, "Gone"}, - {411, "Length Required"}, - {412, "Precondition Failed"}, - {413, "Payload Too Large"}, - {414, "URI Too Long"}, - {415, "Unsupported Media Type"}, - {416, "Range Not Satisfiable"}, - {417, "Expectation Failed"}, - {418, "I'm a teapot"}, - {421, "Misdirected Request"}, - {422, "Unprocessable Entity (WebDAV)"}, - {423, "Locked (WebDAV)"}, - {424, "Failed Dependency (WebDAV)"}, - {425, "Too Early"}, - {426, "Upgrade Required"}, - {428, "Precondition Required"}, - {429, "Too Many Requests"}, - {431, "Request Header Fields Too Large"}, - {451, "Unavailable For Legal Reasons"}, - {500, "Internal Server Error"}, - {501, "Not Implemented"}, - {502, "Bad Gateway"}, - {503, "Service Unavailable"}, - {504, "Gateway Timeout"}, - {505, "HTTP Version Not Supported"}, - {506, "Variant Also Negotiates"}, - {507, "Insufficient Storage"}, - {508, "Loop Detected (WebDAV)"}, - {510, "Not Extended"}, - {511, "Network Authentication Required"}, - {0, NULL} + {HTTP_CODE_CONTINUE, "Continue"}, + {HTTP_CODE_SWITCHING_PROTOCOL, "Switching Protocol"}, + {HTTP_CODE_PROCESSING, "Processing (WebDAV)"}, + {HTTP_CODE_EARLY_HINTS, "Early Hints"}, + {HTTP_CODE_OK, "OK"}, + {HTTP_CODE_CREATED, "Created"}, + {HTTP_CODE_ACCEPTED, "Accepted"}, + {HTTP_CODE_NON_AUTHORITATIVE_INFO, "Non-Authoritative Information"}, + {HTTP_CODE_NO_CONTENT, "No Content"}, + {HTTP_CODE_RESET_CONTENT, "Reset Content"}, + {HTTP_CODE_PARTIAL_CONTENT, "Partial Content"}, + {HTTP_CODE_MULTI_STATUS, "Multi-Status (WebDAV)"}, + {HTTP_CODE_ALREADY_REPORTED, "Already Reported (WebDAV)"}, + {HTTP_CODE_IM_USED, "IM Used (HTTP Delta encoding)"}, + {HTTP_CODE_MULTIPLE_CHOICE, "Multiple Choice"}, + {HTTP_CODE_MOVED_PERMANENTLY, "Moved Permanently"}, + {HTTP_CODE_FOUND, "Found"}, + {HTTP_CODE_SEE_OTHER, "See Other"}, + {HTTP_CODE_NOT_MODIFIED, "Not Modified"}, + {HTTP_CODE_USE_PROXY, "Use Proxy"}, + {HTTP_CODE_UNUSED, "unused"}, + {HTTP_CODE_TEMPORARY_REDIRECT, "Temporary Redirect"}, + {HTTP_CODE_PERMANENT_REDIRECT, "Permanent Redirect"}, + {HTTP_CODE_BAD_REQUEST, "Bad Request"}, + {HTTP_CODE_UNAUTHORIZED, "Unauthorized"}, + {HTTP_CODE_PAYMENT_REQUIRED, "Payment Required"}, + {HTTP_CODE_FORBIDDEN, "Forbidden"}, + {HTTP_CODE_NOT_FOUND, "Not Found"}, + {HTTP_CODE_METHOD_NOT_ALLOWED, "Method Not Allowed"}, + {HTTP_CODE_NOT_ACCEPTABLE, "Not Acceptable"}, + {HTTP_CODE_PROXY_AUTH_REQUIRED, "Proxy Authentication Required"}, + {HTTP_CODE_REQUEST_TIMEOUT, "Request Timeout"}, + {HTTP_CODE_CONFLICT, "Conflict"}, + {HTTP_CODE_GONE, "Gone"}, + {HTTP_CODE_LENGTH_REQUIRED, "Length Required"}, + {HTTP_CODE_PRECONDITION_FAILED, "Precondition Failed"}, + {HTTP_CODE_PAYLOAD_TOO_LARGE, "Payload Too Large"}, + {HTTP_CODE_URI_TOO_LARGE, "URI Too Long"}, + {HTTP_CODE_UNSUPPORTED_MEDIA_TYPE, "Unsupported Media Type"}, + {HTTP_CODE_RANGE_NOT_SATISFIABLE, "Range Not Satisfiable"}, + {HTTP_CODE_EXPECTATION_FAILED, "Expectation Failed"}, + {HTTP_CODE_IM_A_TEAPOT, "I'm a teapot"}, + {HTTP_CODE_MISDIRECTED_REQUEST, "Misdirected Request"}, + {HTTP_CODE_UNPROCESSABLE_ENTITY, "Unprocessable Entity (WebDAV)"}, + {HTTP_CODE_LOCKED, "Locked (WebDAV)"}, + {HTTP_CODE_FAILED_DEPENDENCY, "Failed Dependency (WebDAV)"}, + {HTTP_CODE_TOO_EARLY, "Too Early"}, + {HTTP_CODE_UPGRADE_REQUIRED, "Upgrade Required"}, + {HTTP_CODE_PRECONDITION_REQUIRED, "Precondition Required"}, + {HTTP_CODE_TOO_MANY_REQUESTS, "Too Many Requests"}, + {HTTP_CODE_REQ_HDR_FIELDS_TOO_LARGE,"Request Header Fields Too Large"}, + {HTTP_CODE_UNAVAIL_4_LEGAL_REASONS, "Unavailable For Legal Reasons"}, + {HTTP_CODE_INTERNAL_SERVER_ERROR, "Internal Server Error"}, + {HTTP_CODE_NOT_IMPLEMENTED, "Not Implemented"}, + {HTTP_CODE_BAD_GATEWAY, "Bad Gateway"}, + {HTTP_CODE_SERVICE_UNAVAILABLE, "Service Unavailable"}, + {HTTP_CODE_GATEWAY_TIMEOUT, "Gateway Timeout"}, + {HTTP_CODE_HTTP_VER_NOT_SUPPORTED, "HTTP Version Not Supported"}, + {HTTP_CODE_VARIANT_ALSO_NEGOTIATES, "Variant Also Negotiates"}, + {HTTP_CODE_INSUFFICIENT_STORAGE, "Insufficient Storage"}, + {HTTP_CODE_LOOP_DETECTED, "Loop Detected (WebDAV)"}, + {HTTP_CODE_NOT_EXTENDED, "Not Extended"}, + {HTTP_CODE_NETWORK_AUTH_REQUIRED, "Network Authentication Required"}, + {0, NULL} }; char *httpGetStatusDesc(int32_t statusCode) { @@ -101,13 +101,17 @@ char *httpGetStatusDesc(int32_t statusCode) { } static void httpCleanupString(HttpString *str) { - free(str->str); - str->str = NULL; - str->pos = 0; - str->size = 0; + if (str->str) { + free(str->str); + str->str = NULL; + str->pos = 0; + str->size = 0; + } } static int32_t httpAppendString(HttpString *str, const char *s, int32_t len) { + char *new_str = NULL; + if (str->size == 0) { str->pos = 0; str->size = len + 1; @@ -115,7 +119,16 @@ static int32_t httpAppendString(HttpString *str, const char *s, int32_t len) { } else if (str->pos + len + 1 >= str->size) { str->size += len; str->size *= 4; - str->str = realloc(str->str, str->size); + + new_str = realloc(str->str, str->size); + if (new_str == NULL && str->str) { + // if str->str was not NULL originally, + // the old allocated memory was left unchanged, + // see man 3 realloc + free(str->str); + } + + str->str = new_str; } else { } @@ -163,9 +176,9 @@ static int32_t httpOnRequestLine(HttpParser *pParser, char *method, char *target // parse decode method for (int32_t i = 0; i < tsHttpServer.methodScannerLen; i++) { - HttpDecodeMethod *method = tsHttpServer.methodScanner[i]; - if (strcmp(method->module, pParser->path[0].str) == 0) { - pContext->decodeMethod = method; + HttpDecodeMethod *_method = tsHttpServer.methodScanner[i]; + if (strcmp(_method->module, pParser->path[0].str) == 0) { + pContext->decodeMethod = _method; break; } } @@ -317,7 +330,7 @@ static int32_t httpOnParseHeaderField(HttpParser *parser, const char *key, const static int32_t httpOnBody(HttpParser *parser, const char *chunk, int32_t len) { HttpContext *pContext = parser->pContext; - HttpString * buf = &parser->body; + HttpString *buf = &parser->body; if (parser->parseCode != TSDB_CODE_SUCCESS) return -1; if (buf->size <= 0) { @@ -326,6 +339,7 @@ static int32_t httpOnBody(HttpParser *parser, const char *chunk, int32_t len) { } int32_t newSize = buf->pos + len + 1; + char *newStr = NULL; if (newSize >= buf->size) { if (buf->size >= HTTP_BUFFER_SIZE) { httpError("context:%p, fd:%d, failed parse body, exceeding buffer size %d", pContext, pContext->fd, buf->size); @@ -336,7 +350,12 @@ static int32_t httpOnBody(HttpParser *parser, const char *chunk, int32_t len) { newSize = MAX(newSize, HTTP_BUFFER_INIT); newSize *= 4; newSize = MIN(newSize, HTTP_BUFFER_SIZE); - buf->str = realloc(buf->str, newSize); + newStr = realloc(buf->str, newSize); + if (newStr == NULL && buf->str) { + free(buf->str); + } + + buf->str = newStr; buf->size = newSize; if (buf->str == NULL) { @@ -374,13 +393,20 @@ static HTTP_PARSER_STATE httpTopStack(HttpParser *parser) { static int32_t httpPushStack(HttpParser *parser, HTTP_PARSER_STATE state) { HttpStack *stack = &parser->stacks; + int8_t *newStacks = NULL; if (stack->size == 0) { stack->pos = 0; stack->size = 32; stack->stacks = malloc(stack->size * sizeof(int8_t)); } else if (stack->pos + 1 > stack->size) { stack->size *= 2; - stack->stacks = realloc(stack->stacks, stack->size * sizeof(int8_t)); + + newStacks = realloc(stack->stacks, stack->size * sizeof(int8_t)); + if (newStacks == NULL && stack->stacks) { + free(stack->stacks); + } + + stack->stacks = newStacks; } else { } @@ -552,7 +578,7 @@ static int32_t httpParserOnBegin(HttpParser *parser, HTTP_PARSER_STATE state, co if (httpAppendString(&parser->str, &c, 1)) { httpError("context:%p, fd:%d, parser state:%d, char:[%c]%02x, oom", pContext, pContext->fd, state, c, c); ok = -1; - httpOnError(parser, 507, TSDB_CODE_HTTP_PARSE_METHOD_FAILED); + httpOnError(parser, HTTP_CODE_INSUFFICIENT_STORAGE, TSDB_CODE_HTTP_PARSE_METHOD_FAILED); break; } httpPopStack(parser); @@ -561,7 +587,7 @@ static int32_t httpParserOnBegin(HttpParser *parser, HTTP_PARSER_STATE state, co } httpError("context:%p, fd:%d, parser state:%d, unexpected char:[%c]%02x", pContext, pContext->fd, state, c, c); ok = -1; - httpOnError(parser, 400, TSDB_CODE_HTTP_PARSE_METHOD_FAILED); + httpOnError(parser, HTTP_CODE_BAD_REQUEST, TSDB_CODE_HTTP_PARSE_METHOD_FAILED); } while (0); return ok; } @@ -599,7 +625,7 @@ static int32_t httpParserOnRquestOrResponse(HttpParser *parser, HTTP_PARSER_STAT httpError("context:%p, fd:%d, parser state:%d, unexpected char:[%c]%02x", pContext, pContext->fd, state, c, c); ok = -1; - httpOnError(parser, 400, TSDB_CODE_HTTP_PARSE_METHOD_FAILED); + httpOnError(parser, HTTP_CODE_BAD_REQUEST, TSDB_CODE_HTTP_PARSE_METHOD_FAILED); } while (0); return ok; } @@ -612,7 +638,7 @@ static int32_t httpParserOnMethod(HttpParser *parser, HTTP_PARSER_STATE state, c if (httpAppendString(&parser->str, &c, 1)) { httpError("context:%p, fd:%d, parser state:%d, char:[%c]%02x, oom", pContext, pContext->fd, state, c, c); ok = -1; - httpOnError(parser, 507, TSDB_CODE_HTTP_PARSE_METHOD_FAILED); + httpOnError(parser, HTTP_CODE_INSUFFICIENT_STORAGE, TSDB_CODE_HTTP_PARSE_METHOD_FAILED); break; } break; @@ -621,7 +647,7 @@ static int32_t httpParserOnMethod(HttpParser *parser, HTTP_PARSER_STATE state, c if (!parser->method) { httpError("context:%p, fd:%d, parser state:%d, char:[%c]%02x, oom", pContext, pContext->fd, state, c, c); ok = -1; - httpOnError(parser, 507, TSDB_CODE_HTTP_PARSE_METHOD_FAILED); + httpOnError(parser, HTTP_CODE_INSUFFICIENT_STORAGE, TSDB_CODE_HTTP_PARSE_METHOD_FAILED); break; } else { httpTrace("context:%p, fd:%d, httpMethod:%s", pContext, pContext->fd, parser->method); @@ -641,7 +667,7 @@ static int32_t httpParserOnTarget(HttpParser *parser, HTTP_PARSER_STATE state, c if (httpAppendString(&parser->str, &c, 1)) { httpError("context:%p, fd:%d, parser state:%d, char:[%c]%02x, oom", pContext, pContext->fd, state, c, c); ok = -1; - httpOnError(parser, 507, TSDB_CODE_HTTP_PARSE_TARGET_FAILED); + httpOnError(parser, HTTP_CODE_INSUFFICIENT_STORAGE, TSDB_CODE_HTTP_PARSE_TARGET_FAILED); break; } break; @@ -650,7 +676,7 @@ static int32_t httpParserOnTarget(HttpParser *parser, HTTP_PARSER_STATE state, c if (!parser->target) { httpError("context:%p, fd:%d, parser state:%d, char:[%c]%02x, oom", pContext, pContext->fd, state, c, c); ok = -1; - httpOnError(parser, 507, TSDB_CODE_HTTP_PARSE_TARGET_FAILED); + httpOnError(parser, HTTP_CODE_INSUFFICIENT_STORAGE, TSDB_CODE_HTTP_PARSE_TARGET_FAILED); break; } httpClearString(&parser->str); @@ -670,13 +696,13 @@ static int32_t httpParserOnVersion(HttpParser *parser, HTTP_PARSER_STATE state, if (prefix[parser->str.pos] != c) { httpError("context:%p, fd:%d, parser state:%d, unexpected char:[%c]%02x", pContext, pContext->fd, state, c, c); ok = -1; - httpOnError(parser, 400, TSDB_CODE_HTTP_PARSE_VERSION_FAILED); + httpOnError(parser, HTTP_CODE_BAD_REQUEST, TSDB_CODE_HTTP_PARSE_VERSION_FAILED); break; } if (httpAppendString(&parser->str, &c, 1)) { httpError("context:%p, fd:%d, parser state:%d, char:[%c]%02x, oom", pContext, pContext->fd, state, c, c); ok = -1; - httpOnError(parser, 507, TSDB_CODE_HTTP_PARSE_VERSION_FAILED); + httpOnError(parser, HTTP_CODE_INSUFFICIENT_STORAGE, TSDB_CODE_HTTP_PARSE_VERSION_FAILED); break; } break; @@ -685,14 +711,14 @@ static int32_t httpParserOnVersion(HttpParser *parser, HTTP_PARSER_STATE state, if (c != '0' && c != '1' && c != '2') { httpError("context:%p, fd:%d, parser state:%d, unexpected char:[%c]%02x", pContext, pContext->fd, state, c, c); ok = -1; - httpOnError(parser, 400, TSDB_CODE_HTTP_PARSE_VERSION_FAILED); + httpOnError(parser, HTTP_CODE_BAD_REQUEST, TSDB_CODE_HTTP_PARSE_VERSION_FAILED); break; } if (httpAppendString(&parser->str, &c, 1)) { httpError("context:%p, fd:%d, parser state:%d, char:[%c]%02x, oom", pContext, pContext->fd, state, c, c); ok = -1; - httpOnError(parser, 507, TSDB_CODE_HTTP_PARSE_VERSION_FAILED); + httpOnError(parser, HTTP_CODE_INSUFFICIENT_STORAGE, TSDB_CODE_HTTP_PARSE_VERSION_FAILED); break; } @@ -709,7 +735,7 @@ static int32_t httpParserOnVersion(HttpParser *parser, HTTP_PARSER_STATE state, if (!parser->version) { httpError("context:%p, fd:%d, parser state:%d, char:[%c]%02x, oom", pContext, pContext->fd, state, c, c); ok = -1; - httpOnError(parser, 507, TSDB_CODE_HTTP_PARSE_VERSION_FAILED); + httpOnError(parser, HTTP_CODE_INSUFFICIENT_STORAGE, TSDB_CODE_HTTP_PARSE_VERSION_FAILED); break; } @@ -739,11 +765,20 @@ static int32_t httpParserOnSp(HttpParser *parser, HTTP_PARSER_STATE state, const } httpError("context:%p, fd:%d, parser state:%d, char:[%c]%02x, oom", pContext, pContext->fd, state, c, c); ok = -1; - httpOnError(parser, 507, TSDB_CODE_HTTP_PARSE_SP_FAILED); + httpOnError(parser, HTTP_CODE_INSUFFICIENT_STORAGE, TSDB_CODE_HTTP_PARSE_SP_FAILED); } while (0); return ok; } +static int32_t httpParserOnOptionalSp(HttpParser *parser, HTTP_PARSER_STATE state, const char c, int32_t *again) { + int32_t ok = 0; + if (c != ' ') { + *again = 1; + httpPopStack(parser); + } + return ok; +} + static int32_t httpParserOnStatusCode(HttpParser *parser, HTTP_PARSER_STATE state, const char c, int32_t *again) { HttpContext *pContext = parser->pContext; int32_t ok = 0; @@ -752,7 +787,7 @@ static int32_t httpParserOnStatusCode(HttpParser *parser, HTTP_PARSER_STATE stat if (httpAppendString(&parser->str, &c, 1)) { httpError("context:%p, fd:%d, parser state:%d, char:[%c]%02x, oom", pContext, pContext->fd, state, c, c); ok = -1; - httpOnError(parser, 507, TSDB_CODE_HTTP_PARSE_STATUS_FAILED); + httpOnError(parser, HTTP_CODE_INSUFFICIENT_STORAGE, TSDB_CODE_HTTP_PARSE_STATUS_FAILED); break; } if (parser->str.pos < 3) break; @@ -764,7 +799,7 @@ static int32_t httpParserOnStatusCode(HttpParser *parser, HTTP_PARSER_STATE stat } httpError("context:%p, fd:%d, parser state:%d, unexpected char:[%c]%02x", pContext, pContext->fd, state, c, c); ok = -1; - httpOnError(parser, 400, TSDB_CODE_HTTP_PARSE_STATUS_FAILED); + httpOnError(parser, HTTP_CODE_BAD_REQUEST, TSDB_CODE_HTTP_PARSE_STATUS_FAILED); } while (0); return ok; } @@ -778,7 +813,7 @@ static int32_t httpParserOnReasonPhrase(HttpParser *parser, HTTP_PARSER_STATE st if (!parser->reasonPhrase) { httpError("context:%p, fd:%d, parser state:%d, char:[%c]%02x, oom", pContext, pContext->fd, state, c, c); ok = -1; - httpOnError(parser, 507, TSDB_CODE_HTTP_PARSE_PHRASE_FAILED); + httpOnError(parser, HTTP_CODE_INSUFFICIENT_STORAGE, TSDB_CODE_HTTP_PARSE_PHRASE_FAILED); break; } ok = httpOnStatusLine(parser, parser->statusCode, parser->reasonPhrase); @@ -790,7 +825,7 @@ static int32_t httpParserOnReasonPhrase(HttpParser *parser, HTTP_PARSER_STATE st if (httpAppendString(&parser->str, &c, 1)) { httpError("context:%p, fd:%d, parser state:%d, char:[%c]%02x, oom", pContext, pContext->fd, state, c, c); ok = -1; - httpOnError(parser, 507, TSDB_CODE_HTTP_PARSE_PHRASE_FAILED); + httpOnError(parser, HTTP_CODE_INSUFFICIENT_STORAGE, TSDB_CODE_HTTP_PARSE_PHRASE_FAILED); break; } } while (0); @@ -802,7 +837,7 @@ static int32_t httpParserPostProcess(HttpParser *parser) { if (parser->gzip) { if (ehttp_gzip_finish(parser->gzip)) { httpError("context:%p, fd:%d, gzip failed", pContext, pContext->fd); - httpOnError(parser, 507, TSDB_CODE_HTTP_FINISH_GZIP_FAILED); + httpOnError(parser, HTTP_CODE_INSUFFICIENT_STORAGE, TSDB_CODE_HTTP_FINISH_GZIP_FAILED); return -1; } } @@ -819,13 +854,13 @@ static int32_t httpParserOnCrlf(HttpParser *parser, HTTP_PARSER_STATE state, con if (s[parser->str.pos] != c) { httpError("context:%p, fd:%d, parser state:%d, unexpected char:[%c]%02x", pContext, pContext->fd, state, c, c); ok = -1; - httpOnError(parser, 400, TSDB_CODE_HTTP_PARSE_CRLF_FAILED); + httpOnError(parser, HTTP_CODE_BAD_REQUEST, TSDB_CODE_HTTP_PARSE_CRLF_FAILED); break; } if (httpAppendString(&parser->str, &c, 1)) { httpError("context:%p, fd:%d, parser state:%d, char:[%c]%02x, oom", pContext, pContext->fd, state, c, c); ok = -1; - httpOnError(parser, 507, TSDB_CODE_HTTP_PARSE_CRLF_FAILED); + httpOnError(parser, HTTP_CODE_INSUFFICIENT_STORAGE, TSDB_CODE_HTTP_PARSE_CRLF_FAILED); break; } if (parser->str.pos == len) { @@ -862,18 +897,18 @@ static int32_t httpParserOnHeader(HttpParser *parser, HTTP_PARSER_STATE state, c if (httpAppendString(&parser->str, &c, 1)) { httpError("context:%p, fd:%d, parser state:%d, char:[%c]%02x, oom", pContext, pContext->fd, state, c, c); ok = -1; - httpOnError(parser, 507, TSDB_CODE_HTTP_PARSE_HEADER_FAILED); + httpOnError(parser, HTTP_CODE_INSUFFICIENT_STORAGE, TSDB_CODE_HTTP_PARSE_HEADER_FAILED); break; } httpPushStack(parser, HTTP_PARSER_CRLF); httpPushStack(parser, HTTP_PARSER_HEADER_VAL); - httpPushStack(parser, HTTP_PARSER_SP); + httpPushStack(parser, HTTP_PARSER_OPTIONAL_SP); httpPushStack(parser, HTTP_PARSER_HEADER_KEY); break; } httpError("context:%p, fd:%d, parser state:%d, unexpected char:[%c]%02x", pContext, pContext->fd, state, c, c); ok = -1; - httpOnError(parser, 400, TSDB_CODE_HTTP_PARSE_HEADER_FAILED); + httpOnError(parser, HTTP_CODE_BAD_REQUEST, TSDB_CODE_HTTP_PARSE_HEADER_FAILED); } while (0); return ok; } @@ -886,7 +921,7 @@ static int32_t httpParserOnHeaderKey(HttpParser *parser, HTTP_PARSER_STATE state if (httpAppendString(&parser->str, &c, 1)) { httpError("context:%p, fd:%d, parser state:%d, char:[%c]%02x, oom", pContext, pContext->fd, state, c, c); ok = -1; - httpOnError(parser, 507, TSDB_CODE_HTTP_PARSE_HEADER_KEY_FAILED); + httpOnError(parser, HTTP_CODE_INSUFFICIENT_STORAGE, TSDB_CODE_HTTP_PARSE_HEADER_KEY_FAILED); break; } break; @@ -896,7 +931,7 @@ static int32_t httpParserOnHeaderKey(HttpParser *parser, HTTP_PARSER_STATE state if (!parser->key) { httpError("context:%p, fd:%d, parser state:%d, char:[%c]%02x, oom", pContext, pContext->fd, state, c, c); ok = -1; - httpOnError(parser, 507, TSDB_CODE_HTTP_PARSE_HEADER_KEY_FAILED); + httpOnError(parser, HTTP_CODE_INSUFFICIENT_STORAGE, TSDB_CODE_HTTP_PARSE_HEADER_KEY_FAILED); break; } httpClearString(&parser->str); @@ -905,7 +940,7 @@ static int32_t httpParserOnHeaderKey(HttpParser *parser, HTTP_PARSER_STATE state } httpError("context:%p, fd:%d, parser state:%d, unexpected char:[%c]%02x", pContext, pContext->fd, state, c, c); ok = -1; - httpOnError(parser, 400, TSDB_CODE_HTTP_PARSE_HEADER_KEY_FAILED); + httpOnError(parser, HTTP_CODE_BAD_REQUEST, TSDB_CODE_HTTP_PARSE_HEADER_KEY_FAILED); } while (0); return ok; } @@ -919,7 +954,7 @@ static int32_t httpParserOnHeaderVal(HttpParser *parser, HTTP_PARSER_STATE state httpError("context:%p, fd:%d, parser state:%d, char:[%c]%02x, oom", pContext, pContext->fd, state, c, c); ok = -1; parser->parseCode = TSDB_CODE_HTTP_PARSE_HEADER_VAL_FAILED; - httpOnError(parser, 507, TSDB_CODE_HTTP_PARSE_HEADER_VAL_FAILED); + httpOnError(parser, HTTP_CODE_INSUFFICIENT_STORAGE, TSDB_CODE_HTTP_PARSE_HEADER_VAL_FAILED); break; } break; @@ -948,7 +983,7 @@ static int32_t httpParserOnChunkSize(HttpParser *parser, HTTP_PARSER_STATE state if (httpAppendString(&parser->str, &c, 1)) { httpError("context:%p, fd:%d, parser state:%d, char:[%c]%02x, oom", pContext, pContext->fd, state, c, c); ok = -1; - httpOnError(parser, 507, TSDB_CODE_HTTP_PARSE_CHUNK_SIZE_FAILED); + httpOnError(parser, HTTP_CODE_INSUFFICIENT_STORAGE, TSDB_CODE_HTTP_PARSE_CHUNK_SIZE_FAILED); break; } break; @@ -982,7 +1017,7 @@ static int32_t httpParserOnChunkSize(HttpParser *parser, HTTP_PARSER_STATE state } httpError("context:%p, fd:%d, parser state:%d, unexpected char:[%c]%02x", pContext, pContext->fd, state, c, c); ok = -1; - httpOnError(parser, 400, TSDB_CODE_HTTP_PARSE_CHUNK_SIZE_FAILED); + httpOnError(parser, HTTP_CODE_BAD_REQUEST, TSDB_CODE_HTTP_PARSE_CHUNK_SIZE_FAILED); } while (0); return ok; } @@ -994,7 +1029,7 @@ static int32_t httpParserOnChunk(HttpParser *parser, HTTP_PARSER_STATE state, co if (httpAppendString(&parser->str, &c, 1)) { httpError("context:%p, fd:%d, parser state:%d, char:[%c]%02x, oom", pContext, pContext->fd, state, c, c); ok = -1; - httpOnError(parser, 507, TSDB_CODE_HTTP_PARSE_CHUNK_FAILED); + httpOnError(parser, HTTP_CODE_INSUFFICIENT_STORAGE, TSDB_CODE_HTTP_PARSE_CHUNK_FAILED); break; } ++parser->receivedSize; @@ -1005,7 +1040,7 @@ static int32_t httpParserOnChunk(HttpParser *parser, HTTP_PARSER_STATE state, co if (ehttp_gzip_write(parser->gzip, parser->str.str, parser->str.pos)) { httpError("context:%p, fd:%d, gzip failed", pContext, pContext->fd); ok = -1; - httpOnError(parser, 507, TSDB_CODE_HTTP_PARSE_CHUNK_FAILED); + httpOnError(parser, HTTP_CODE_INSUFFICIENT_STORAGE, TSDB_CODE_HTTP_PARSE_CHUNK_FAILED); break; } } else { @@ -1027,7 +1062,7 @@ static int32_t httpParserOnEnd(HttpParser *parser, HTTP_PARSER_STATE state, cons do { ok = -1; httpError("context:%p, fd:%d, parser state:%d, unexpected char:[%c]%02x", pContext, pContext->fd, state, c, c); - httpOnError(parser, 507, TSDB_CODE_HTTP_PARSE_END_FAILED); + httpOnError(parser, HTTP_CODE_INSUFFICIENT_STORAGE, TSDB_CODE_HTTP_PARSE_END_FAILED); } while (0); return ok; } @@ -1061,6 +1096,10 @@ static int32_t httpParseChar(HttpParser *parser, const char c, int32_t *again) { ok = httpParserOnSp(parser, state, c, again); break; } + if (state == HTTP_PARSER_OPTIONAL_SP) { + ok = httpParserOnOptionalSp(parser, state, c, again); + break; + } if (state == HTTP_PARSER_STATUS_CODE) { ok = httpParserOnStatusCode(parser, state, c, again); break; @@ -1104,7 +1143,7 @@ static int32_t httpParseChar(HttpParser *parser, const char c, int32_t *again) { ok = -1; httpError("context:%p, fd:%d, unknown parse state:%d", pContext, pContext->fd, state); - httpOnError(parser, 500, TSDB_CODE_HTTP_PARSE_INVALID_STATE); + httpOnError(parser, HTTP_CODE_INTERNAL_SERVER_ERROR, TSDB_CODE_HTTP_PARSE_INVALID_STATE); } while (0); if (ok == -1) { @@ -1115,7 +1154,7 @@ static int32_t httpParseChar(HttpParser *parser, const char c, int32_t *again) { if (ok == -2) { ok = -1; httpError("context:%p, fd:%d, failed to parse, invalid state", pContext, pContext->fd); - httpOnError(parser, 500, TSDB_CODE_HTTP_PARSE_ERROR_STATE); + httpOnError(parser, HTTP_CODE_INTERNAL_SERVER_ERROR, TSDB_CODE_HTTP_PARSE_ERROR_STATE); } return ok; diff --git a/src/plugins/http/src/httpUtil.c b/src/plugins/http/src/httpUtil.c index 399a33954d..27b95a4934 100644 --- a/src/plugins/http/src/httpUtil.c +++ b/src/plugins/http/src/httpUtil.c @@ -188,13 +188,17 @@ bool httpMallocMultiCmds(HttpContext *pContext, int32_t cmdSize, int32_t bufferS bool httpReMallocMultiCmdsSize(HttpContext *pContext, int32_t cmdSize) { HttpSqlCmds *multiCmds = pContext->multiCmds; - if (cmdSize > HTTP_MAX_CMD_SIZE) { + if (cmdSize <= 0 && cmdSize > HTTP_MAX_CMD_SIZE) { httpError("context:%p, fd:%d, user:%s, mulitcmd size:%d large then %d", pContext, pContext->fd, pContext->user, cmdSize, HTTP_MAX_CMD_SIZE); return false; } - multiCmds->cmds = (HttpSqlCmd *)realloc(multiCmds->cmds, (size_t)cmdSize * sizeof(HttpSqlCmd)); + HttpSqlCmd *new_cmds = (HttpSqlCmd *)realloc(multiCmds->cmds, (size_t)cmdSize * sizeof(HttpSqlCmd)); + if (new_cmds == NULL && multiCmds->cmds) { + free(multiCmds->cmds); + } + multiCmds->cmds = new_cmds; if (multiCmds->cmds == NULL) { httpError("context:%p, fd:%d, user:%s, malloc cmds:%d error", pContext, pContext->fd, pContext->user, cmdSize); return false; @@ -208,13 +212,17 @@ bool httpReMallocMultiCmdsSize(HttpContext *pContext, int32_t cmdSize) { bool httpReMallocMultiCmdsBuffer(HttpContext *pContext, int32_t bufferSize) { HttpSqlCmds *multiCmds = pContext->multiCmds; - if (bufferSize > HTTP_MAX_BUFFER_SIZE) { + if (bufferSize <= 0 || bufferSize > HTTP_MAX_BUFFER_SIZE) { httpError("context:%p, fd:%d, user:%s, mulitcmd buffer size:%d large then %d", pContext, pContext->fd, pContext->user, bufferSize, HTTP_MAX_BUFFER_SIZE); return false; } - multiCmds->buffer = (char *)realloc(multiCmds->buffer, (size_t)bufferSize); + char *new_buffer = (char *)realloc(multiCmds->buffer, (size_t)bufferSize); + if (new_buffer == NULL && multiCmds->buffer) { + free(multiCmds->buffer); + } + multiCmds->buffer = new_buffer; if (multiCmds->buffer == NULL) { httpError("context:%p, fd:%d, user:%s, malloc buffer:%d error", pContext, pContext->fd, pContext->user, bufferSize); return false; @@ -237,6 +245,11 @@ void httpFreeMultiCmds(HttpContext *pContext) { JsonBuf *httpMallocJsonBuf(HttpContext *pContext) { if (pContext->jsonBuf == NULL) { pContext->jsonBuf = (JsonBuf *)malloc(sizeof(JsonBuf)); + if (pContext->jsonBuf == NULL) { + return NULL; + } + + memset(pContext->jsonBuf, 0, sizeof(JsonBuf)); } if (!pContext->jsonBuf->pContext) { From e59c4eb411580a8a67c46f04bdfae9a3ad1b5976 Mon Sep 17 00:00:00 2001 From: liuyq-617 Date: Fri, 6 Aug 2021 13:35:03 +0800 Subject: [PATCH 024/185] [TD-5773]add some arm env in Drone --- .drone.yml | 108 +++++++++++++++++++++++++++++++++++++++-------------- 1 file changed, 81 insertions(+), 27 deletions(-) diff --git a/.drone.yml b/.drone.yml index f7ee4e976f..b520f308ba 100644 --- a/.drone.yml +++ b/.drone.yml @@ -25,15 +25,14 @@ steps: - master --- kind: pipeline -name: test_arm64 +name: test_arm64_bionic platform: os: linux arch: arm64 - steps: - name: build - image: gcc + image: arm64v8/ubuntu:bionic commands: - apt-get update - apt-get install -y cmake build-essential @@ -48,9 +47,87 @@ steps: branch: - develop - master + - 2.0 --- kind: pipeline -name: test_arm +name: test_arm64_focal + +platform: + os: linux + arch: arm64 + +steps: +- name: build + image: arm64v8/ubuntu:focal + commands: + - echo 'debconf debconf/frontend select Noninteractive' | debconf-set-selections + - apt-get update + - apt-get install -y -qq cmake build-essential + - mkdir debug + - cd debug + - cmake .. -DCPUTYPE=aarch64 > /dev/null + - make + trigger: + event: + - pull_request + when: + branch: + - develop + - master + - 2.0 +--- +kind: pipeline +name: test_arm64_centos7 + +platform: + os: linux + arch: arm64 + +steps: +- name: build + image: arm64v8/centos:7 + commands: + - yum install -y gcc gcc-c++ make cmake git + - mkdir debug + - cd debug + - cmake .. -DCPUTYPE=aarch64 > /dev/null + - make + trigger: + event: + - pull_request + when: + branch: + - develop + - master + - 2.0 +--- +kind: pipeline +name: test_arm64_centos8 + +platform: + os: linux + arch: arm64 + +steps: +- name: build + image: arm64v8/centos:8 + commands: + - dnf install -y gcc gcc-c++ make cmake epel-release git libarchive + - mkdir debug + - cd debug + - cmake .. -DCPUTYPE=aarch64 > /dev/null + - make + trigger: + event: + - pull_request + when: + branch: + - develop + - master + - 2.0 +--- +kind: pipeline +name: test_arm_bionic platform: os: linux @@ -73,7 +150,6 @@ steps: branch: - develop - master - --- kind: pipeline name: build_trusty @@ -174,25 +250,3 @@ steps: - develop - master ---- -kind: pipeline -name: goodbye - -platform: - os: linux - arch: amd64 - -steps: -- name: 64-bit - image: alpine - commands: - - echo 64-bit is good. - when: - branch: - - develop - - master - - -depends_on: -- test_arm64 -- test_amd64 \ No newline at end of file From 90ffa91b2e0ff9c66f38b2c8d3cb5cef683c0cfb Mon Sep 17 00:00:00 2001 From: liuyq-617 Date: Fri, 6 Aug 2021 13:45:34 +0800 Subject: [PATCH 025/185] [TD-5773]add some arm env in Drone --- .drone.yml | 114 +++++++++++++++++++++++++++++++++++++---------------- 1 file changed, 80 insertions(+), 34 deletions(-) diff --git a/.drone.yml b/.drone.yml index 3f6c622598..b520f308ba 100644 --- a/.drone.yml +++ b/.drone.yml @@ -23,18 +23,16 @@ steps: branch: - develop - master - - 2.0 --- kind: pipeline -name: test_arm64 +name: test_arm64_bionic platform: os: linux arch: arm64 - steps: - name: build - image: gcc + image: arm64v8/ubuntu:bionic commands: - apt-get update - apt-get install -y cmake build-essential @@ -52,7 +50,84 @@ steps: - 2.0 --- kind: pipeline -name: test_arm +name: test_arm64_focal + +platform: + os: linux + arch: arm64 + +steps: +- name: build + image: arm64v8/ubuntu:focal + commands: + - echo 'debconf debconf/frontend select Noninteractive' | debconf-set-selections + - apt-get update + - apt-get install -y -qq cmake build-essential + - mkdir debug + - cd debug + - cmake .. -DCPUTYPE=aarch64 > /dev/null + - make + trigger: + event: + - pull_request + when: + branch: + - develop + - master + - 2.0 +--- +kind: pipeline +name: test_arm64_centos7 + +platform: + os: linux + arch: arm64 + +steps: +- name: build + image: arm64v8/centos:7 + commands: + - yum install -y gcc gcc-c++ make cmake git + - mkdir debug + - cd debug + - cmake .. -DCPUTYPE=aarch64 > /dev/null + - make + trigger: + event: + - pull_request + when: + branch: + - develop + - master + - 2.0 +--- +kind: pipeline +name: test_arm64_centos8 + +platform: + os: linux + arch: arm64 + +steps: +- name: build + image: arm64v8/centos:8 + commands: + - dnf install -y gcc gcc-c++ make cmake epel-release git libarchive + - mkdir debug + - cd debug + - cmake .. -DCPUTYPE=aarch64 > /dev/null + - make + trigger: + event: + - pull_request + when: + branch: + - develop + - master + - 2.0 +--- +kind: pipeline +name: test_arm_bionic platform: os: linux @@ -75,8 +150,6 @@ steps: branch: - develop - master - - 2.0 - --- kind: pipeline name: build_trusty @@ -103,7 +176,6 @@ steps: branch: - develop - master - - 2.0 --- kind: pipeline name: build_xenial @@ -129,7 +201,6 @@ steps: branch: - develop - master - - 2.0 --- kind: pipeline @@ -155,7 +226,6 @@ steps: branch: - develop - master - - 2.0 --- kind: pipeline name: build_centos7 @@ -179,28 +249,4 @@ steps: branch: - develop - master - - 2.0 ---- -kind: pipeline -name: goodbye - -platform: - os: linux - arch: amd64 - -steps: -- name: 64-bit - image: alpine - commands: - - echo 64-bit is good. - when: - branch: - - develop - - master - - 2.0 - - -depends_on: -- test_arm64 -- test_amd64 \ No newline at end of file From 0868121415201ee1ec0b66b34a841c372b20bebd Mon Sep 17 00:00:00 2001 From: Linhe Huo Date: Fri, 6 Aug 2021 14:48:50 +0800 Subject: [PATCH 026/185] [TD-5860]: fix grafanaplugin error in grafana 6.x(6.2+) (#7222) --- src/connector/grafanaplugin | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/connector/grafanaplugin b/src/connector/grafanaplugin index 4a4d79099b..a44ec1ca49 160000 --- a/src/connector/grafanaplugin +++ b/src/connector/grafanaplugin @@ -1 +1 @@ -Subproject commit 4a4d79099b076b8ff12d5b4fdbcba54049a6866d +Subproject commit a44ec1ca493ad01b2bf825b6418f69e11f548206 From b6280cee5822640016732d01594b6ac16df47c83 Mon Sep 17 00:00:00 2001 From: jiajingbin Date: Fri, 6 Aug 2021 03:50:27 -0300 Subject: [PATCH 027/185] [TD-5816]: add testcase alter/alterColMultiTimes.py for TD-5816 fix a small-probability bug for insert/schemalessInsert.py --- tests/pytest/alter/alterColMultiTimes.py | 67 ++++++++++++++++++++++++ tests/pytest/fulltest.sh | 1 + tests/pytest/insert/schemalessInsert.py | 2 +- 3 files changed, 69 insertions(+), 1 deletion(-) create mode 100644 tests/pytest/alter/alterColMultiTimes.py diff --git a/tests/pytest/alter/alterColMultiTimes.py b/tests/pytest/alter/alterColMultiTimes.py new file mode 100644 index 0000000000..173ca8158d --- /dev/null +++ b/tests/pytest/alter/alterColMultiTimes.py @@ -0,0 +1,67 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import random +import string +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + def genColList(self): + ''' + generate column list + ''' + col_list = list() + for i in range(1, 18): + col_list.append(f'c{i}') + return col_list + + def genIncreaseValue(self, input_value): + ''' + add ', 1' to end of value every loop + ''' + value_list = list(input_value) + value_list.insert(-1, ", 1") + return ''.join(value_list) + + def insertAlter(self): + ''' + after each alter and insert, when execute 'select * from {tbname};' taosd will coredump + ''' + tbname = ''.join(random.choice(string.ascii_letters.lower()) for i in range(7)) + input_value = '(now, 1)' + tdSql.execute(f'create table {tbname} (ts timestamp, c0 int);') + tdSql.execute(f'insert into {tbname} values {input_value};') + for col in self.genColList(): + input_value = self.genIncreaseValue(input_value) + tdSql.execute(f'alter table {tbname} add column {col} int;') + tdSql.execute(f'insert into {tbname} values {input_value};') + tdSql.query(f'select * from {tbname};') + tdSql.checkRows(18) + + def run(self): + tdSql.prepare() + self.insertAlter() + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/pytest/fulltest.sh b/tests/pytest/fulltest.sh index 783ee98da3..d59088574d 100755 --- a/tests/pytest/fulltest.sh +++ b/tests/pytest/fulltest.sh @@ -380,6 +380,7 @@ python3 ./test.py -f query/querySession.py python3 test.py -f alter/alter_create_exception.py python3 ./test.py -f insert/flushwhiledrop.py python3 ./test.py -f insert/schemalessInsert.py +python3 ./test.py -f alter/alterColMultiTimes.py #======================p4-end=============== python3 test.py -f tools/taosdemoAllTest/pytest.py diff --git a/tests/pytest/insert/schemalessInsert.py b/tests/pytest/insert/schemalessInsert.py index 5c93095a1e..88abea477a 100644 --- a/tests/pytest/insert/schemalessInsert.py +++ b/tests/pytest/insert/schemalessInsert.py @@ -705,7 +705,7 @@ class TDTestCase: case no id when stb exist """ self.cleanStb() - input_sql, stb_name = self.genFullTypeSql(t0="f", c0="f") + input_sql, stb_name = self.genFullTypeSql(tb_name="sub_table_0123456", t0="f", c0="f") self.resCmp(input_sql, stb_name) input_sql, stb_name = self.genFullTypeSql(stb_name=stb_name, id_noexist_tag=True, t0="f", c0="f") self.resCmp(input_sql, stb_name, condition='where tbname like "t_%"') From 14ab3747e6b31ca03e86cb72ad6d75748a76d8ef Mon Sep 17 00:00:00 2001 From: Linhe Huo Date: Fri, 6 Aug 2021 14:57:38 +0800 Subject: [PATCH 028/185] [TD-5860]: fix grafanaplugin error in grafana 6.x(6.2+) (#7224) --- src/connector/grafanaplugin | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/connector/grafanaplugin b/src/connector/grafanaplugin index 4a4d79099b..a44ec1ca49 160000 --- a/src/connector/grafanaplugin +++ b/src/connector/grafanaplugin @@ -1 +1 @@ -Subproject commit 4a4d79099b076b8ff12d5b4fdbcba54049a6866d +Subproject commit a44ec1ca493ad01b2bf825b6418f69e11f548206 From f2814e453cc133f3afde53c11c1a4661d2d62990 Mon Sep 17 00:00:00 2001 From: wpan Date: Fri, 6 Aug 2021 16:56:39 +0800 Subject: [PATCH 029/185] block column name as string --- src/client/src/tscSQLParser.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/src/client/src/tscSQLParser.c b/src/client/src/tscSQLParser.c index 7adc0812ae..6df724881e 100644 --- a/src/client/src/tscSQLParser.c +++ b/src/client/src/tscSQLParser.c @@ -4253,11 +4253,15 @@ static bool isValidExpr(tSqlExpr* pLeft, tSqlExpr* pRight, int32_t optr) { if (pRight == NULL) { return true; } - + if (pLeft->tokenId >= TK_BOOL && pLeft->tokenId <= TK_BINARY && pRight->tokenId >= TK_BOOL && pRight->tokenId <= TK_BINARY) { return false; } + if (pLeft->tokenId >= TK_BOOL && pLeft->tokenId <= TK_BINARY && (optr == TK_NOTNULL || optr == TK_ISNULL)) { + return false; + } + return true; } From 85bf62aa182e0d4deec788749e39670de83a9578 Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Fri, 6 Aug 2021 18:28:18 +0800 Subject: [PATCH 030/185] Hotfix/sangshuduo/td 5811 taosdemo delay us for master (#7204) * [TD-5811]: taosdemo use us to count delay. to avoid very large number if the delay is 0ms. * fix interlace delay unit. Co-authored-by: Shuduo Sang --- src/kit/taosdemo/taosdemo.c | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/src/kit/taosdemo/taosdemo.c b/src/kit/taosdemo/taosdemo.c index 2f3f51849c..f327a8a585 100644 --- a/src/kit/taosdemo/taosdemo.c +++ b/src/kit/taosdemo/taosdemo.c @@ -6319,7 +6319,7 @@ static void printStatPerThread(threadInfo *pThreadInfo) pThreadInfo->totalInsertRows, pThreadInfo->totalAffectedRows, (pThreadInfo->totalDelay)? - (double)(pThreadInfo->totalAffectedRows/(pThreadInfo->totalDelay/1000000.0)): + (double)(pThreadInfo->totalAffectedRows/((double)pThreadInfo->totalDelay/1000000.0)): FLT_MAX); } @@ -6539,7 +6539,7 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) { verbosePrint("[%d] %s() LN%d, buffer=%s\n", pThreadInfo->threadID, __func__, __LINE__, pThreadInfo->buffer); - startTs = taosGetTimestampMs(); + startTs = taosGetTimestampUs(); if (recOfBatch == 0) { errorPrint("[%d] %s() LN%d Failed to insert records of batch %d\n", @@ -6555,10 +6555,10 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) { } int64_t affectedRows = execInsert(pThreadInfo, recOfBatch); - endTs = taosGetTimestampMs(); + endTs = taosGetTimestampUs(); uint64_t delay = endTs - startTs; - performancePrint("%s() LN%d, insert execution time is %"PRIu64"ms\n", - __func__, __LINE__, delay); + performancePrint("%s() LN%d, insert execution time is %10.2f ms\n", + __func__, __LINE__, delay / 1000.0); verbosePrint("[%d] %s() LN%d affectedRows=%"PRId64"\n", pThreadInfo->threadID, __func__, __LINE__, affectedRows); @@ -6721,8 +6721,8 @@ static void* syncWriteProgressive(threadInfo *pThreadInfo) { endTs = taosGetTimestampUs(); uint64_t delay = endTs - startTs; - performancePrint("%s() LN%d, insert execution time is %"PRId64"ms\n", - __func__, __LINE__, delay); + performancePrint("%s() LN%d, insert execution time is %10.f ms\n", + __func__, __LINE__, delay/1000.0); verbosePrint("[%d] %s() LN%d affectedRows=%d\n", pThreadInfo->threadID, __func__, __LINE__, affectedRows); From 0b05fd171e6ee9da131bd14e0b69163ba895b3e6 Mon Sep 17 00:00:00 2001 From: haoranchen Date: Fri, 6 Aug 2021 18:59:27 +0800 Subject: [PATCH 031/185] modify makepkg.sh about lite's package --- packaging/tools/makepkg.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packaging/tools/makepkg.sh b/packaging/tools/makepkg.sh index 1064f0b0e5..e9266ec80d 100755 --- a/packaging/tools/makepkg.sh +++ b/packaging/tools/makepkg.sh @@ -35,7 +35,7 @@ fi if [ "$pagMode" == "lite" ]; then strip ${build_dir}/bin/taosd strip ${build_dir}/bin/taos - bin_files="${build_dir}/bin/taosd ${build_dir}/bin/taos ${script_dir}/remove.sh" + bin_files="${build_dir}/bin/taosd ${build_dir}/bin/taos ${script_dir}/remove.sh ${script_dir}/startPre.sh" else bin_files="${build_dir}/bin/taosd ${build_dir}/bin/taos ${build_dir}/bin/taosdump ${build_dir}/bin/taosdemo ${build_dir}/bin/tarbitrator\ ${script_dir}/remove.sh ${script_dir}/set_core.sh ${script_dir}/startPre.sh ${script_dir}/taosd-dump-cfg.gdb" From 3aba1df2f3fd8d83bcd2b53f2c9bee4eb35e300e Mon Sep 17 00:00:00 2001 From: wenzhouwww Date: Fri, 6 Aug 2021 19:52:39 +0800 Subject: [PATCH 032/185] fix an test case error ! --- tests/pytest/fulltest.sh | 12 +- .../NanoTestCase/nano_samples.csv | 100 +++++ .../NanoTestCase/nano_sampletags.csv | 100 +++++ .../NanoTestCase/taosdemoInsertMSDB.json | 63 +++ .../NanoTestCase/taosdemoInsertNanoDB.json | 63 +++ .../NanoTestCase/taosdemoInsertUSDB.json | 63 +++ .../taosdemoTestInsertTime_step.py | 115 ++++++ .../taosdemoTestNanoDatabase.json | 88 +++++ .../taosdemoTestNanoDatabaseInsertForSub.json | 84 ++++ .../taosdemoTestNanoDatabaseNow.json | 62 +++ .../taosdemoTestNanoDatabasecsv.json | 84 ++++ .../taosdemoTestSupportNanoInsert.py | 156 ++++++++ .../taosdemoTestSupportNanoQuery.json | 92 +++++ .../taosdemoTestSupportNanoQuery.py | 157 ++++++++ .../taosdemoTestSupportNanoQuerycsv.json | 110 ++++++ .../taosdemoTestSupportNanoSubscribe.json | 32 ++ .../taosdemoTestSupportNanosubscribe.py | 125 ++++++ .../NanoTestCase/taosdumpTestNanoSupport.py | 362 ++++++++++++++++++ 18 files changed, 1862 insertions(+), 6 deletions(-) create mode 100644 tests/pytest/tools/taosdemoAllTest/NanoTestCase/nano_samples.csv create mode 100644 tests/pytest/tools/taosdemoAllTest/NanoTestCase/nano_sampletags.csv create mode 100644 tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoInsertMSDB.json create mode 100644 tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoInsertNanoDB.json create mode 100644 tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoInsertUSDB.json create mode 100644 tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoTestInsertTime_step.py create mode 100644 tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoTestNanoDatabase.json create mode 100644 tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoTestNanoDatabaseInsertForSub.json create mode 100644 tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoTestNanoDatabaseNow.json create mode 100644 tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoTestNanoDatabasecsv.json create mode 100644 tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoTestSupportNanoInsert.py create mode 100644 tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoTestSupportNanoQuery.json create mode 100644 tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoTestSupportNanoQuery.py create mode 100644 tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoTestSupportNanoQuerycsv.json create mode 100644 tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoTestSupportNanoSubscribe.json create mode 100644 tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoTestSupportNanosubscribe.py create mode 100644 tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdumpTestNanoSupport.py diff --git a/tests/pytest/fulltest.sh b/tests/pytest/fulltest.sh index eb9dbeef79..cccbd8ac8c 100755 --- a/tests/pytest/fulltest.sh +++ b/tests/pytest/fulltest.sh @@ -167,11 +167,11 @@ python3 test.py -f tools/taosdemoTestInterlace.py python3 test.py -f tools/taosdemoTestQuery.py # nano support -#python3 test.py -f tools/taosdemoAllTest/taosdemoTestSupportNanoInsert.py -#python3 test.py -f tools/taosdemoAllTest/taosdemoTestSupportNanoQuery.py -#python3 test.py -f tools/taosdemoAllTest/taosdemoTestSupportNanosubscribe.py -#python3 test.py -f tools/taosdemoAllTest/taosdemoTestInsertTime_step.py -#python3 test.py -f tools/taosdumpTestNanoSupport.py +python3 test.py -f tools/taosdemoAllTest/NanoTestCase/taosdemoTestSupportNanoInsert.py +python3 test.py -f tools/taosdemoAllTest/NanoTestCase/taosdemoTestSupportNanoQuery.py +python3 test.py -f tools/taosdemoAllTest/NanoTestCase/taosdemoTestSupportNanosubscribe.py +python3 test.py -f tools/taosdemoAllTest/NanoTestCase/taosdemoTestInsertTime_step.py +python3 test.py -f tools/taosdemoAllTest/NanoTestCase/taosdumpTestNanoSupport.py # update python3 ./test.py -f update/allow_update.py @@ -382,7 +382,7 @@ python3 ./test.py -f insert/flushwhiledrop.py python3 ./test.py -f insert/schemalessInsert.py #======================p4-end=============== -# python3 test.py -f tools/taosdemoAllTest/pytest.py + diff --git a/tests/pytest/tools/taosdemoAllTest/NanoTestCase/nano_samples.csv b/tests/pytest/tools/taosdemoAllTest/NanoTestCase/nano_samples.csv new file mode 100644 index 0000000000..5fc779b41b --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/NanoTestCase/nano_samples.csv @@ -0,0 +1,100 @@ +8.855,"binary_str0" ,1626870128248246976 +8.75,"binary_str1" ,1626870128249060032 +5.44,"binary_str2" ,1626870128249067968 +8.45,"binary_str3" ,1626870128249072064 +4.07,"binary_str4" ,1626870128249075904 +6.97,"binary_str5" ,1626870128249078976 +6.86,"binary_str6" ,1626870128249082048 +1.585,"binary_str7" ,1626870128249085120 +1.4,"binary_str8" ,1626870128249087936 +5.135,"binary_str9" ,1626870128249092032 +3.15,"binary_str10" ,1626870128249095104 +1.765,"binary_str11" ,1626870128249097920 +7.71,"binary_str12" ,1626870128249100992 +3.91,"binary_str13" ,1626870128249104064 +5.615,"binary_str14" ,1626870128249106880 +9.495,"binary_str15" ,1626870128249109952 +3.825,"binary_str16" ,1626870128249113024 +1.94,"binary_str17" ,1626870128249117120 +5.385,"binary_str18" ,1626870128249119936 +7.075,"binary_str19" ,1626870128249123008 +5.715,"binary_str20" ,1626870128249126080 +1.83,"binary_str21" ,1626870128249128896 +6.365,"binary_str22" ,1626870128249131968 +6.55,"binary_str23" ,1626870128249135040 +6.315,"binary_str24" ,1626870128249138112 +3.82,"binary_str25" ,1626870128249140928 +2.455,"binary_str26" ,1626870128249145024 +7.795,"binary_str27" ,1626870128249148096 +2.47,"binary_str28" ,1626870128249150912 +1.37,"binary_str29" ,1626870128249155008 +5.39,"binary_str30" ,1626870128249158080 +5.13,"binary_str31" ,1626870128249160896 +4.09,"binary_str32" ,1626870128249163968 +5.855,"binary_str33" ,1626870128249167040 +0.17,"binary_str34" ,1626870128249170112 +1.955,"binary_str35" ,1626870128249173952 +0.585,"binary_str36" ,1626870128249178048 +0.33,"binary_str37" ,1626870128249181120 +7.925,"binary_str38" ,1626870128249183936 +9.685,"binary_str39" ,1626870128249187008 +2.6,"binary_str40" ,1626870128249191104 +5.705,"binary_str41" ,1626870128249193920 +3.965,"binary_str42" ,1626870128249196992 +4.43,"binary_str43" ,1626870128249200064 +8.73,"binary_str44" ,1626870128249202880 +3.105,"binary_str45" ,1626870128249205952 +9.39,"binary_str46" ,1626870128249209024 +2.825,"binary_str47" ,1626870128249212096 +9.675,"binary_str48" ,1626870128249214912 +9.99,"binary_str49" ,1626870128249217984 +4.51,"binary_str50" ,1626870128249221056 +4.94,"binary_str51" ,1626870128249223872 +7.72,"binary_str52" ,1626870128249226944 +4.135,"binary_str53" ,1626870128249231040 +2.325,"binary_str54" ,1626870128249234112 +4.585,"binary_str55" ,1626870128249236928 +8.76,"binary_str56" ,1626870128249240000 +4.715,"binary_str57" ,1626870128249243072 +0.56,"binary_str58" ,1626870128249245888 +5.35,"binary_str59" ,1626870128249249984 +5.075,"binary_str60" ,1626870128249253056 +6.665,"binary_str61" ,1626870128249256128 +7.13,"binary_str62" ,1626870128249258944 +2.775,"binary_str63" ,1626870128249262016 +5.775,"binary_str64" ,1626870128249265088 +1.62,"binary_str65" ,1626870128249267904 +1.625,"binary_str66" ,1626870128249270976 +8.15,"binary_str67" ,1626870128249274048 +0.75,"binary_str68" ,1626870128249277120 +3.265,"binary_str69" ,1626870128249280960 +8.585,"binary_str70" ,1626870128249284032 +1.88,"binary_str71" ,1626870128249287104 +8.44,"binary_str72" ,1626870128249289920 +5.12,"binary_str73" ,1626870128249295040 +2.58,"binary_str74" ,1626870128249298112 +9.42,"binary_str75" ,1626870128249300928 +1.765,"binary_str76" ,1626870128249304000 +2.66,"binary_str77" ,1626870128249308096 +1.405,"binary_str78" ,1626870128249310912 +5.595,"binary_str79" ,1626870128249315008 +2.28,"binary_str80" ,1626870128249318080 +9.24,"binary_str81" ,1626870128249320896 +9.03,"binary_str82" ,1626870128249323968 +6.055,"binary_str83" ,1626870128249327040 +1.74,"binary_str84" ,1626870128249330112 +5.77,"binary_str85" ,1626870128249332928 +1.97,"binary_str86" ,1626870128249336000 +0.3,"binary_str87" ,1626870128249339072 +7.145,"binary_str88" ,1626870128249342912 +0.88,"binary_str89" ,1626870128249345984 +8.025,"binary_str90" ,1626870128249349056 +4.81,"binary_str91" ,1626870128249351872 +0.725,"binary_str92" ,1626870128249355968 +3.85,"binary_str93" ,1626870128249359040 +9.455,"binary_str94" ,1626870128249362112 +2.265,"binary_str95" ,1626870128249364928 +3.985,"binary_str96" ,1626870128249368000 +9.375,"binary_str97" ,1626870128249371072 +0.2,"binary_str98" ,1626870128249373888 +6.95,"binary_str99" ,1626870128249377984 diff --git a/tests/pytest/tools/taosdemoAllTest/NanoTestCase/nano_sampletags.csv b/tests/pytest/tools/taosdemoAllTest/NanoTestCase/nano_sampletags.csv new file mode 100644 index 0000000000..18fb855d6d --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/NanoTestCase/nano_sampletags.csv @@ -0,0 +1,100 @@ +"string0",7,8.615 +"string1",4,9.895 +"string2",3,2.92 +"string3",3,5.62 +"string4",7,1.615 +"string5",6,1.45 +"string6",5,7.48 +"string7",7,3.01 +"string8",5,4.76 +"string9",10,7.09 +"string10",2,8.38 +"string11",7,8.65 +"string12",5,5.025 +"string13",10,5.765 +"string14",2,4.57 +"string15",2,1.03 +"string16",7,6.98 +"string17",10,0.23 +"string18",7,5.815 +"string19",1,2.37 +"string20",10,8.865 +"string21",3,1.235 +"string22",2,8.62 +"string23",9,1.045 +"string24",8,4.34 +"string25",1,5.455 +"string26",2,4.475 +"string27",1,6.95 +"string28",2,3.39 +"string29",3,6.79 +"string30",7,9.735 +"string31",1,9.79 +"string32",10,9.955 +"string33",1,5.095 +"string34",3,3.86 +"string35",9,5.105 +"string36",10,4.22 +"string37",1,2.78 +"string38",9,6.345 +"string39",1,0.975 +"string40",5,6.16 +"string41",4,7.735 +"string42",5,6.6 +"string43",8,2.845 +"string44",1,0.655 +"string45",3,2.995 +"string46",9,3.6 +"string47",8,3.47 +"string48",3,7.98 +"string49",6,2.225 +"string50",9,5.44 +"string51",4,6.335 +"string52",3,2.955 +"string53",1,0.565 +"string54",6,5.575 +"string55",6,9.905 +"string56",9,6.025 +"string57",8,0.94 +"string58",10,0.15 +"string59",8,1.555 +"string60",4,2.28 +"string61",2,8.29 +"string62",9,6.22 +"string63",6,3.35 +"string64",10,6.7 +"string65",3,9.345 +"string66",7,9.815 +"string67",1,5.365 +"string68",10,3.81 +"string69",1,6.405 +"string70",8,2.715 +"string71",3,8.58 +"string72",8,6.34 +"string73",2,7.49 +"string74",4,8.64 +"string75",3,8.995 +"string76",7,3.465 +"string77",1,7.64 +"string78",6,3.65 +"string79",6,1.4 +"string80",6,5.875 +"string81",2,1.22 +"string82",5,7.87 +"string83",9,8.41 +"string84",9,8.9 +"string85",9,3.89 +"string86",2,5.0 +"string87",2,4.495 +"string88",4,2.835 +"string89",3,5.895 +"string90",7,8.41 +"string91",5,5.125 +"string92",7,9.165 +"string93",5,8.315 +"string94",10,7.485 +"string95",7,4.635 +"string96",2,6.015 +"string97",8,0.595 +"string98",3,8.79 +"string99",4,1.72 diff --git a/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoInsertMSDB.json b/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoInsertMSDB.json new file mode 100644 index 0000000000..8bd5ddbae8 --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoInsertMSDB.json @@ -0,0 +1,63 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 10, + "thread_count_create_tbl": 10, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 0, + "interlace_rows": 100, + "num_of_records_per_req": 1000, + "max_sql_len": 1024000, + "databases": [{ + "dbinfo": { + "name": "testdb3", + "drop": "yes", + "replica": 1, + "days": 10, + "cache": 50, + "blocks": 8, + "precision": "ms", + "keep": 3600, + "minRows": 100, + "maxRows": 4096, + "comp":2, + "walLevel":1, + "cachelast":0, + "quorum":1, + "fsync":3000, + "update": 0 + }, + "super_tables": [{ + "name": "stb0", + "child_table_exists":"no", + "childtable_count": 100, + "childtable_prefix": "tb0_", + "auto_create_table": "no", + "batch_create_tbl_num": 20, + "data_source": "rand", + "insert_mode": "taosc", + "insert_rows": 100, + "childtable_offset":0, + "multi_thread_write_one_tbl": "no", + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1000, + "start_timestamp": "2021-07-01 00:00:00.000", + "sample_format": "", + "sample_file": "", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":3}, {"type": "BINARY", "len": 16, "count":2}, {"type": "BINARY", "len": 32, "count":2}, + {"type": "TIMESTAMP"}, {"type": "BIGINT", "count":3},{"type": "FLOAT", "count":1},{"type": "SMALLINT", "count":1},{"type": "TINYINT", "count":1}, + {"type": "BOOL"},{"type": "NCHAR","len":16}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5},{"type": "NCHAR","len":16, "count":1}] + }] + }] +} + diff --git a/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoInsertNanoDB.json b/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoInsertNanoDB.json new file mode 100644 index 0000000000..5408a9841a --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoInsertNanoDB.json @@ -0,0 +1,63 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 10, + "thread_count_create_tbl": 10, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 0, + "interlace_rows": 100, + "num_of_records_per_req": 1000, + "max_sql_len": 1024000, + "databases": [{ + "dbinfo": { + "name": "testdb1", + "drop": "yes", + "replica": 1, + "days": 10, + "cache": 50, + "blocks": 8, + "precision": "ns", + "keep": 3600, + "minRows": 100, + "maxRows": 4096, + "comp":2, + "walLevel":1, + "cachelast":0, + "quorum":1, + "fsync":3000, + "update": 0 + }, + "super_tables": [{ + "name": "stb0", + "child_table_exists":"no", + "childtable_count": 100, + "childtable_prefix": "tb0_", + "auto_create_table": "no", + "batch_create_tbl_num": 20, + "data_source": "rand", + "insert_mode": "taosc", + "insert_rows": 100, + "childtable_offset":0, + "multi_thread_write_one_tbl": "no", + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1000, + "start_timestamp": "2021-07-01 00:00:00.000", + "sample_format": "", + "sample_file": "", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":3}, {"type": "BINARY", "len": 16, "count":2}, {"type": "BINARY", "len": 32, "count":2}, + {"type": "TIMESTAMP"}, {"type": "BIGINT", "count":3},{"type": "FLOAT", "count":1},{"type": "SMALLINT", "count":1},{"type": "TINYINT", "count":1}, + {"type": "BOOL"},{"type": "NCHAR","len":16}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5},{"type": "NCHAR","len":16, "count":1}] + }] + }] +} + diff --git a/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoInsertUSDB.json b/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoInsertUSDB.json new file mode 100644 index 0000000000..13eb80f3cf --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoInsertUSDB.json @@ -0,0 +1,63 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 10, + "thread_count_create_tbl": 10, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 0, + "interlace_rows": 100, + "num_of_records_per_req": 1000, + "max_sql_len": 1024000, + "databases": [{ + "dbinfo": { + "name": "testdb2", + "drop": "yes", + "replica": 1, + "days": 10, + "cache": 50, + "blocks": 8, + "precision": "us", + "keep": 3600, + "minRows": 100, + "maxRows": 4096, + "comp":2, + "walLevel":1, + "cachelast":0, + "quorum":1, + "fsync":3000, + "update": 0 + }, + "super_tables": [{ + "name": "stb0", + "child_table_exists":"no", + "childtable_count": 100, + "childtable_prefix": "tb0_", + "auto_create_table": "no", + "batch_create_tbl_num": 20, + "data_source": "rand", + "insert_mode": "taosc", + "insert_rows": 100, + "childtable_offset":0, + "multi_thread_write_one_tbl": "no", + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1000, + "start_timestamp": "2021-07-01 00:00:00.000", + "sample_format": "", + "sample_file": "", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":3}, {"type": "BINARY", "len": 16, "count":2}, {"type": "BINARY", "len": 32, "count":2}, + {"type": "TIMESTAMP"}, {"type": "BIGINT", "count":3},{"type": "FLOAT", "count":1},{"type": "SMALLINT", "count":1},{"type": "TINYINT", "count":1}, + {"type": "BOOL"},{"type": "NCHAR","len":16}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5},{"type": "NCHAR","len":16, "count":1}] + }] + }] +} + diff --git a/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoTestInsertTime_step.py b/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoTestInsertTime_step.py new file mode 100644 index 0000000000..b248eda6b0 --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoTestInsertTime_step.py @@ -0,0 +1,115 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import os +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + def getBuildPath(self): + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + for root, dirs, files in os.walk(projPath): + if ("taosd" in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + buildPath = root[:len(root)-len("/build/bin")] + break + return buildPath + + def run(self): + buildPath = self.getBuildPath() + if (buildPath == ""): + tdLog.exit("taosd not found!") + else: + tdLog.info("taosd found in %s" % buildPath) + binPath = buildPath+ "/build/bin/" + + # insert: create one or mutiple tables per sql and insert multiple rows per sql + + # check the params of taosdemo about time_step is nano + os.system("%staosdemo -f tools/taosdemoAllTest/NanoTestCase/taosdemoInsertNanoDB.json -y " % binPath) + tdSql.execute("use testdb1") + tdSql.query("show stables") + tdSql.checkData(0, 4, 100) + tdSql.query("select count (tbname) from stb0") + tdSql.checkData(0, 0, 100) + tdSql.query("select count(*) from tb0_0") + tdSql.checkData(0, 0, 100) + tdSql.query("select count(*) from stb0") + tdSql.checkData(0, 0, 10000) + tdSql.query("describe stb0") + tdSql.getData(9, 1) + tdSql.checkDataType(9, 1,"TIMESTAMP") + tdSql.query("select last(ts) from stb0") + tdSql.checkData(0, 0,"2021-07-01 00:00:00.000099000") + + # check the params of taosdemo about time_step is us + os.system("%staosdemo -f tools/taosdemoAllTest/NanoTestCase/taosdemoInsertUSDB.json -y " % binPath) + tdSql.execute("use testdb2") + tdSql.query("show stables") + tdSql.checkData(0, 4, 100) + tdSql.query("select count (tbname) from stb0") + tdSql.checkData(0, 0, 100) + tdSql.query("select count(*) from tb0_0") + tdSql.checkData(0, 0, 100) + tdSql.query("select count(*) from stb0") + tdSql.checkData(0, 0, 10000) + tdSql.query("describe stb0") + tdSql.getData(9, 1) + tdSql.checkDataType(9, 1,"TIMESTAMP") + tdSql.query("select last(ts) from stb0") + tdSql.checkData(0, 0,"2021-07-01 00:00:00.099000") + + # check the params of taosdemo about time_step is ms + os.system("%staosdemo -f tools/taosdemoAllTest/NanoTestCase/taosdemoInsertMSDB.json -y " % binPath) + tdSql.execute("use testdb3") + tdSql.query("show stables") + tdSql.checkData(0, 4, 100) + tdSql.query("select count (tbname) from stb0") + tdSql.checkData(0, 0, 100) + tdSql.query("select count(*) from tb0_0") + tdSql.checkData(0, 0, 100) + tdSql.query("select count(*) from stb0") + tdSql.checkData(0, 0, 10000) + tdSql.query("describe stb0") + tdSql.checkDataType(9, 1,"TIMESTAMP") + tdSql.query("select last(ts) from stb0") + tdSql.checkData(0, 0,"2021-07-01 00:01:39.000") + + + os.system("rm -rf ./res.txt") + os.system("rm -rf ./*.py.sql") + + + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoTestNanoDatabase.json b/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoTestNanoDatabase.json new file mode 100644 index 0000000000..38ac666fac --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoTestNanoDatabase.json @@ -0,0 +1,88 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 10, + "thread_count_create_tbl": 10, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 0, + "interlace_rows": 100, + "num_of_records_per_req": 1000, + "max_sql_len": 1024000, + "databases": [{ + "dbinfo": { + "name": "nsdb", + "drop": "yes", + "replica": 1, + "days": 10, + "cache": 50, + "blocks": 8, + "precision": "ns", + "keep": 3600, + "minRows": 100, + "maxRows": 4096, + "comp":2, + "walLevel":1, + "cachelast":0, + "quorum":1, + "fsync":3000, + "update": 0 + }, + "super_tables": [{ + "name": "stb0", + "child_table_exists":"no", + "childtable_count": 100, + "childtable_prefix": "tb0_", + "auto_create_table": "no", + "batch_create_tbl_num": 20, + "data_source": "rand", + "insert_mode": "taosc", + "insert_rows": 100, + "childtable_offset":0, + "multi_thread_write_one_tbl": "no", + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 10000000, + "start_timestamp": "2021-07-01 00:00:00.000", + "sample_format": "", + "sample_file": "", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":3}, {"type": "BINARY", "len": 16, "count":2}, {"type": "BINARY", "len": 32, "count":2}, + {"type": "TIMESTAMP"}, {"type": "BIGINT", "count":3},{"type": "FLOAT", "count":1},{"type": "SMALLINT", "count":1},{"type": "TINYINT", "count":1}, + {"type": "BOOL"},{"type": "NCHAR","len":16}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5},{"type": "NCHAR","len":16, "count":1}] + }, + { + "name": "stb1", + "child_table_exists":"no", + "childtable_count": 100, + "childtable_prefix": "tb1_", + "auto_create_table": "no", + "batch_create_tbl_num": 20, + "data_source": "rand", + "insert_mode": "taosc", + "insert_rows": 100, + "childtable_offset":0, + "multi_thread_write_one_tbl": "no", + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 10, + "disorder_range": 1000, + "timestamp_step": 10000000, + "start_timestamp": "2021-07-01 00:00:00.000", + "sample_format": "", + "sample_file": "", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":3}, {"type": "BINARY", "len": 16, "count":2}, {"type": "BINARY", "len": 32, "count":2}, + {"type": "TIMESTAMP"}, {"type": "BIGINT", "count":3},{"type": "FLOAT", "count":1},{"type": "SMALLINT", "count":1},{"type": "TINYINT", "count":1}, + {"type": "BOOL"},{"type": "NCHAR","len":16}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5},{"type": "NCHAR","len":16, "count":1}] + }] + }] +} diff --git a/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoTestNanoDatabaseInsertForSub.json b/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoTestNanoDatabaseInsertForSub.json new file mode 100644 index 0000000000..9ef4a0af66 --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoTestNanoDatabaseInsertForSub.json @@ -0,0 +1,84 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 10, + "thread_count_create_tbl": 10, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 0, + "interlace_rows": 100, + "num_of_records_per_req": 1000, + "max_sql_len": 1024000, + "databases": [{ + "dbinfo": { + "name": "subnsdb", + "drop": "yes", + "replica": 1, + "days": 10, + "cache": 50, + "blocks": 8, + "precision": "ns", + "keep": 3600, + "minRows": 100, + "maxRows": 4096, + "comp":2, + "walLevel":1, + "cachelast":0, + "quorum":1, + "fsync":3000, + "update": 0 + }, + "super_tables": [{ + "name": "stb0", + "child_table_exists":"no", + "childtable_count": 10, + "childtable_prefix": "tb0_", + "auto_create_table": "no", + "batch_create_tbl_num": 20, + "data_source": "samples", + "insert_mode": "taosc", + "insert_rows": 10, + "childtable_offset":0, + "multi_thread_write_one_tbl": "no", + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 10000000, + "start_timestamp": "2021-07-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./tools/taosdemoAllTest/NanoTestCase/nano_samples.csv", + "tags_file": "./tools/taosdemoAllTest/NanoTestCase/nano_sampletags.csv", + "columns": [{"type": "DOUBLE"}, {"type": "BINARY", "len": 64, "count":1}, {"type": "TIMESTAMP", "count":1}], + "tags": [{"type": "BINARY", "len": 16, "count":1},{"type": "INT"},{"type": "DOUBLE"}] + }, + { + "name": "stb1", + "child_table_exists":"no", + "childtable_count": 10, + "childtable_prefix": "tb1_", + "auto_create_table": "no", + "batch_create_tbl_num": 20, + "data_source": "samples", + "insert_mode": "taosc", + "insert_rows": 10, + "childtable_offset":0, + "multi_thread_write_one_tbl": "no", + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 10, + "disorder_range": 1000, + "timestamp_step": 10000000, + "start_timestamp": "2021-07-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./tools/taosdemoAllTest/NanoTestCase/nano_samples.csv", + "tags_file": "./tools/taosdemoAllTest/NanoTestCase/nano_sampletags.csv", + "columns": [{"type": "DOUBLE"}, {"type": "BINARY", "len": 64, "count":1}, {"type": "TIMESTAMP", "count":1}], + "tags": [{"type": "BINARY", "len": 16, "count":1},{"type": "INT"},{"type": "DOUBLE"}] + }] + }] +} diff --git a/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoTestNanoDatabaseNow.json b/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoTestNanoDatabaseNow.json new file mode 100644 index 0000000000..a09dec21fa --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoTestNanoDatabaseNow.json @@ -0,0 +1,62 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 10, + "thread_count_create_tbl": 10, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 0, + "interlace_rows": 100, + "num_of_records_per_req": 1000, + "max_sql_len": 1024000, + "databases": [{ + "dbinfo": { + "name": "nsdb2", + "drop": "yes", + "replica": 1, + "days": 10, + "cache": 50, + "blocks": 8, + "precision": "ns", + "keep": 3600, + "minRows": 100, + "maxRows": 4096, + "comp":2, + "walLevel":1, + "cachelast":0, + "quorum":1, + "fsync":3000, + "update": 0 + }, + "super_tables": [{ + "name": "stb0", + "child_table_exists":"no", + "childtable_count": 100, + "childtable_prefix": "tb0_", + "auto_create_table": "no", + "batch_create_tbl_num": 20, + "data_source": "rand", + "insert_mode": "taosc", + "insert_rows": 100, + "childtable_offset":0, + "multi_thread_write_one_tbl": "no", + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 10, + "start_timestamp": "now", + "sample_format": "", + "sample_file": "", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":3}, {"type": "BINARY", "len": 16, "count":2}, {"type": "BINARY", "len": 32, "count":2}, + {"type": "TIMESTAMP"}, {"type": "BIGINT", "count":3},{"type": "FLOAT", "count":1},{"type": "SMALLINT", "count":1},{"type": "TINYINT", "count":1}, + {"type": "BOOL"},{"type": "NCHAR","len":16}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5},{"type": "NCHAR","len":16, "count":1}] + }] + }] +} diff --git a/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoTestNanoDatabasecsv.json b/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoTestNanoDatabasecsv.json new file mode 100644 index 0000000000..e99c528c6d --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoTestNanoDatabasecsv.json @@ -0,0 +1,84 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 10, + "thread_count_create_tbl": 10, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 0, + "interlace_rows": 100, + "num_of_records_per_req": 1000, + "max_sql_len": 1024000, + "databases": [{ + "dbinfo": { + "name": "nsdbcsv", + "drop": "yes", + "replica": 1, + "days": 10, + "cache": 50, + "blocks": 8, + "precision": "ns", + "keep": 3600, + "minRows": 100, + "maxRows": 4096, + "comp":2, + "walLevel":1, + "cachelast":0, + "quorum":1, + "fsync":3000, + "update": 0 + }, + "super_tables": [{ + "name": "stb0", + "child_table_exists":"no", + "childtable_count": 100, + "childtable_prefix": "tb0_", + "auto_create_table": "no", + "batch_create_tbl_num": 20, + "data_source": "samples", + "insert_mode": "taosc", + "insert_rows": 100, + "childtable_offset":0, + "multi_thread_write_one_tbl": "no", + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 10000000, + "start_timestamp": "2021-07-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./tools/taosdemoAllTest/NanoTestCase/nano_samples.csv", + "tags_file": "./tools/taosdemoAllTest/NanoTestCase/nano_sampletags.csv", + "columns": [{"type": "DOUBLE"}, {"type": "BINARY", "len": 64, "count":1}, {"type": "TIMESTAMP", "count":1}], + "tags": [{"type": "BINARY", "len": 16, "count":1},{"type": "INT"},{"type": "DOUBLE"}] + }, + { + "name": "stb1", + "child_table_exists":"no", + "childtable_count": 100, + "childtable_prefix": "tb1_", + "auto_create_table": "no", + "batch_create_tbl_num": 20, + "data_source": "samples", + "insert_mode": "taosc", + "insert_rows": 100, + "childtable_offset":0, + "multi_thread_write_one_tbl": "no", + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 10, + "disorder_range": 1000, + "timestamp_step": 10000000, + "start_timestamp": "2021-07-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./tools/taosdemoAllTest/NanoTestCase/nano_samples.csv", + "tags_file": "./tools/taosdemoAllTest/NanoTestCase/nano_sampletags.csv", + "columns": [{"type": "DOUBLE"}, {"type": "BINARY", "len": 64, "count":1}, {"type": "TIMESTAMP", "count":1}], + "tags": [{"type": "BINARY", "len": 16, "count":1},{"type": "INT"},{"type": "DOUBLE"}] + }] + }] +} diff --git a/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoTestSupportNanoInsert.py b/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoTestSupportNanoInsert.py new file mode 100644 index 0000000000..8fcb263125 --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoTestSupportNanoInsert.py @@ -0,0 +1,156 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import os +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + def getBuildPath(self): + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + for root, dirs, files in os.walk(projPath): + if ("taosd" in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + buildPath = root[:len(root)-len("/build/bin")] + break + return buildPath + + def run(self): + buildPath = self.getBuildPath() + if (buildPath == ""): + tdLog.exit("taosd not found!") + else: + tdLog.info("taosd found in %s" % buildPath) + binPath = buildPath+ "/build/bin/" + + + # insert: create one or mutiple tables per sql and insert multiple rows per sql + # insert data from a special timestamp + # check stable stb0 + + os.system("%staosdemo -f tools/taosdemoAllTest/NanoTestCase/taosdemoTestNanoDatabase.json -y " % binPath) + tdSql.execute("use nsdb") + tdSql.query("show stables") + tdSql.checkData(0, 4, 100) + tdSql.query("select count (tbname) from stb0") + tdSql.checkData(0, 0, 100) + tdSql.query("select count(*) from tb0_0") + tdSql.checkData(0, 0, 100) + tdSql.query("select count(*) from stb0") + tdSql.checkData(0, 0, 10000) + tdSql.query("describe stb0") + tdSql.checkDataType(9, 1,"TIMESTAMP") + tdSql.query("select last(ts) from stb0") + tdSql.checkData(0, 0,"2021-07-01 00:00:00.990000000") + + # check stable stb1 which is insert with disord + + tdSql.query("select count (tbname) from stb1") + tdSql.checkData(0, 0, 100) + tdSql.query("select count(*) from tb1_0") + tdSql.checkData(0, 0, 100) + tdSql.query("select count(*) from stb1") + tdSql.checkData(0, 0, 10000) + # check c8 is an nano timestamp + tdSql.query("describe stb1") + tdSql.checkDataType(9, 1,"TIMESTAMP") + # check insert timestamp_step is nano_second + tdSql.query("select last(ts) from stb1") + tdSql.checkData(0, 0,"2021-07-01 00:00:00.990000000") + + # insert data from now time + + # check stable stb0 + os.system("%staosdemo -f tools/taosdemoAllTest/NanoTestCase/taosdemoTestNanoDatabaseNow.json -y " % binPath) + + tdSql.execute("use nsdb2") + tdSql.query("show stables") + tdSql.checkData(0, 4, 100) + tdSql.query("select count (tbname) from stb0") + tdSql.checkData(0, 0, 100) + tdSql.query("select count(*) from tb0_0") + tdSql.checkData(0, 0, 100) + tdSql.query("select count(*) from stb0") + tdSql.checkData(0, 0, 10000) + # check c8 is an nano timestamp + tdSql.query("describe stb0") + tdSql.checkDataType(9,1,"TIMESTAMP") + + # insert by csv files and timetamp is long int , strings in ts and cols + + os.system("%staosdemo -f tools/taosdemoAllTest/NanoTestCase/taosdemoTestNanoDatabasecsv.json -y " % binPath) + tdSql.execute("use nsdbcsv") + tdSql.query("show stables") + tdSql.checkData(0, 4, 100) + tdSql.query("select count(*) from stb0") + tdSql.checkData(0, 0, 10000) + tdSql.query("describe stb0") + tdSql.checkDataType(3, 1, "TIMESTAMP") + tdSql.query("select count(*) from stb0 where ts > \"2021-07-01 00:00:00.490000000\"") + tdSql.checkData(0, 0, 5000) + tdSql.query("select count(*) from stb0 where ts < 1626918583000000000") + tdSql.checkData(0, 0, 10000) + + os.system("rm -rf ./insert_res.txt") + os.system("rm -rf tools/taosdemoAllTest/NanoTestCase/taosdemoTestSupportNano*.py.sql") + + # taosdemo test insert with command and parameter , detals show taosdemo --help + os.system("%staosdemo -u root -P taosdata -p 6030 -a 1 -m pre -n 10 -T 20 -t 60 -o res.txt -y " % binPath) + tdSql.query("select count(*) from test.meters") + tdSql.checkData(0, 0, 600) + # check taosdemo -s + + sqls_ls = ['drop database if exists nsdbsql;','create database nsdbsql precision "ns" keep 3600 days 6 update 1;', + 'use nsdbsql;','CREATE STABLE meters (ts timestamp, current float, voltage int, phase float) TAGS (location binary(64), groupdId int);', + 'CREATE TABLE d1001 USING meters TAGS ("Beijing.Chaoyang", 2);', + 'INSERT INTO d1001 USING METERS TAGS ("Beijng.Chaoyang", 2) VALUES (now, 10.2, 219, 0.32);', + 'INSERT INTO d1001 USING METERS TAGS ("Beijng.Chaoyang", 2) VALUES (now, 85, 32, 0.76);'] + + with open("./taosdemoTestNanoCreateDB.sql",mode ="a" ) as sql_files: + for sql in sqls_ls: + sql_files.write(sql+"\n") + sql_files.close() + + sleep(10) + + os.system("%staosdemo -s taosdemoTestNanoCreateDB.sql -y " % binPath) + tdSql.query("select count(*) from nsdbsql.meters") + tdSql.checkData(0, 0, 2) + + os.system("rm -rf ./res.txt") + os.system("rm -rf ./*.py.sql") + os.system("rm -rf ./taosdemoTestNanoCreateDB.sql") + + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoTestSupportNanoQuery.json b/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoTestSupportNanoQuery.json new file mode 100644 index 0000000000..fff1017588 --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoTestSupportNanoQuery.json @@ -0,0 +1,92 @@ +{ + "filetype": "query", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "confirm_parameter_prompt": "no", + "databases": "nsdb", + "query_times": 10, + "query_mode": "taosc", + "specified_table_query": { + "query_interval": 1, + "concurrent": 2, + "sqls": [ + { + "sql": "select count(*) from stb0 where ts>\"2021-07-01 00:01:00.000000000 \" ;", + "result": "./query_res0.txt" + }, + { + "sql": "select count(*) from stb0 where ts>\"2021-07-01 00:01:00.000000000\" and ts <=\"2021-07-01 00:01:10.000000000\" ;", + "result": "./query_res1.txt" + }, + { + "sql": "select count(*) from stb0 where ts>now-20d ;", + "result": "./query_res2.txt" + }, + { + "sql": "select max(c10) from stb0;", + "result": "./query_res3.txt" + }, + { + "sql": "select min(c1) from stb0;", + "result": "./query_res4.txt" + }, + { + "sql": "select avg(c1) from stb0;", + "result": "./query_res5.txt" + }, + { + "sql":"select count(*) from stb0 group by tbname;", + "result":"./query_res6.txt" + } + + ] + }, + "super_table_query": { + "stblname": "stb0", + "query_interval": 0, + "threads": 4, + "sqls": [ + { + "sql": "select count(*) from xxxx where ts>\"2021-07-01 00:01:00.000000000 \" ;", + "result": "./query_res_tb0.txt" + }, + { + "sql":"select count(*) from xxxx where ts>\"2021-07-01 00:01:00.000000000\" and ts <=\"2021-07-01 00:01:10.000000000\" ;", + "result": "./query_res_tb1.txt" + }, + { + "sql":"select first(*) from xxxx ;", + "result": "./query_res_tb2.txt" + }, + { + "sql":"select last(*) from xxxx;", + "result": "./query_res_tb3.txt" + + }, + { + "sql":"select last_row(*) from xxxx ;", + "result": "./query_res_tb4.txt" + + }, + { + "sql":"select max(c10) from xxxx ;", + "result": "./query_res_tb5.txt" + + }, + { + "sql":"select min(c1) from xxxx ;", + "result": "./query_res_tb6.txt" + + }, + { + "sql":"select avg(c10) from xxxx ;", + "result": "./query_res_tb7.txt" + + } + + ] + } + } \ No newline at end of file diff --git a/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoTestSupportNanoQuery.py b/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoTestSupportNanoQuery.py new file mode 100644 index 0000000000..6c3e4d6c8a --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoTestSupportNanoQuery.py @@ -0,0 +1,157 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import os +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + def getBuildPath(self): + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + for root, dirs, files in os.walk(projPath): + if ("taosd" in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + buildPath = root[:len(root)-len("/build/bin")] + break + return buildPath + + def run(self): + buildPath = self.getBuildPath() + if (buildPath == ""): + tdLog.exit("taosd not found!") + else: + tdLog.info("taosd found in %s" % buildPath) + binPath = buildPath+ "/build/bin/" + + # query: query test for nanoSecond with where and max min groupby order + os.system("%staosdemo -f tools/taosdemoAllTest/NanoTestCase/taosdemoTestNanoDatabase.json -y " % binPath) + + tdSql.execute("use nsdb") + + # use where to filter + + tdSql.query("select count(*) from stb0 where ts>\"2021-07-01 00:00:00.590000000 \" ") + tdSql.checkData(0, 0, 4000) + tdSql.query("select count(*) from stb0 where ts>\"2021-07-01 00:00:00.000000000\" and ts <=\"2021-07-01 00:00:00.590000000\" ") + tdSql.checkData(0, 0, 5900) + + tdSql.query("select count(*) from tb0_0 where ts>\"2021-07-01 00:00:00.590000000 \" ;") + tdSql.checkData(0, 0, 40) + tdSql.query("select count(*) from tb0_0 where ts>\"2021-07-01 00:00:00.000000000\" and ts <=\"2021-07-01 00:00:00.590000000\" ") + tdSql.checkData(0, 0, 59) + + + # select max min avg from special col + tdSql.query("select max(c10) from stb0;") + print("select max(c10) from stb0 : " , tdSql.getData(0, 0)) + + tdSql.query("select max(c10) from tb0_0;") + print("select max(c10) from tb0_0 : " , tdSql.getData(0, 0)) + + + tdSql.query("select min(c1) from stb0;") + print( "select min(c1) from stb0 : " , tdSql.getData(0, 0)) + + tdSql.query("select min(c1) from tb0_0;") + print( "select min(c1) from tb0_0 : " , tdSql.getData(0, 0)) + + tdSql.query("select avg(c1) from stb0;") + print( "select avg(c1) from stb0 : " , tdSql.getData(0, 0)) + + tdSql.query("select avg(c1) from tb0_0;") + print( "select avg(c1) from tb0_0 : " , tdSql.getData(0, 0)) + + tdSql.query("select count(*) from stb0 group by tbname;") + tdSql.checkData(0, 0, 100) + tdSql.checkData(10, 0, 100) + + # query : query above sqls by taosdemo and continuously + + os.system("%staosdemo -f tools/taosdemoAllTest/NanoTestCase/taosdemoTestSupportNanoQuery.json -y " % binPath) + + + os.system("%staosdemo -f tools/taosdemoAllTest/NanoTestCase/taosdemoTestNanoDatabasecsv.json -y " % binPath) + tdSql.execute("use nsdbcsv") + tdSql.query("show stables") + tdSql.checkData(0, 4, 100) + tdSql.query("select count(*) from stb0") + tdSql.checkData(0, 0, 10000) + tdSql.query("describe stb0") + tdSql.checkDataType(3, 1, "TIMESTAMP") + tdSql.query("select count(*) from stb0 where ts >\"2021-07-01 00:00:00.490000000\"") + tdSql.checkData(0, 0, 5000) + tdSql.query("select count(*) from stb0 where ts 162687012800000000') + tdSql.execute('select count(*) from stb0 where c2 < 162687012800000000') + tdSql.execute('select count(*) from stb0 where c2 = 162687012800000000') + tdSql.execute('select count(*) from stb0 where c2 != 162687012800000000') + tdSql.execute('select count(*) from stb0 where c2 <> 162687012800000000') + tdSql.execute('select count(*) from stb0 where c2 > "2021-07-21 20:22:08.248246976"') + tdSql.execute('select count(*) from stb0 where c2 < "2021-07-21 20:22:08.248246976"') + tdSql.execute('select count(*) from stb0 where c2 = "2021-07-21 20:22:08.248246976"') + tdSql.execute('select count(*) from stb0 where c2 != "2021-07-21 20:22:08.248246976"') + tdSql.execute('select count(*) from stb0 where c2 <> "2021-07-21 20:22:08.248246976"') + tdSql.execute('select count(*) from stb0 where ts between "2021-07-01 00:00:00.000000000" and "2021-07-01 00:00:00.990000000"') + tdSql.execute('select count(*) from stb0 where ts between 1625068800000000000 and 1625068801000000000') + tdSql.query('select avg(c0) from stb0 interval(5000000000b)') + tdSql.checkRows(1) + + tdSql.query('select avg(c0) from stb0 interval(100000000b)') + tdSql.checkRows(10) + + tdSql.error('select avg(c0) from stb0 interval(1b)') + tdSql.error('select avg(c0) from stb0 interval(999b)') + + tdSql.query('select avg(c0) from stb0 interval(1000b)') + tdSql.checkRows(100) + + tdSql.query('select avg(c0) from stb0 interval(1u)') + tdSql.checkRows(100) + + tdSql.query('select avg(c0) from stb0 interval(100000000b) sliding (100000000b)') + tdSql.checkRows(10) + + # query : query above sqls by taosdemo and continuously + os.system("%staosdemo -f tools/taosdemoAllTest/NanoTestCase/taosdemoTestSupportNanoQuerycsv.json -y " % binPath) + + os.system("rm -rf ./query_res*.txt*") + os.system("rm -rf tools/taosdemoAllTest/NanoTestCase/*.py.sql") + + + + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoTestSupportNanoQuerycsv.json b/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoTestSupportNanoQuerycsv.json new file mode 100644 index 0000000000..2323b0f370 --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoTestSupportNanoQuerycsv.json @@ -0,0 +1,110 @@ +{ + "filetype": "query", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "confirm_parameter_prompt": "no", + "databases": "nsdbcsv", + "query_times": 10, + "query_mode": "taosc", + "specified_table_query": { + "query_interval": 1, + "concurrent": 2, + "sqls": [ + { + "sql": "select count(*) from stb0 where ts> \"2021-07-01 00:00:00.490000000\" ;", + "result": "./query_res0.txt" + }, + { + "sql": "select count(*) from stb0 where ts < now -22d-1h-3s ;", + "result": "./query_res1.txt" + }, + { + "sql": "select count(*) from stb0 where ts < 1626918583000000000 ;", + "result": "./query_res2.txt" + }, + { + "sql": "select count(*) from stb0 where c2 <> 162687012800000000;", + "result": "./query_res3.txt" + }, + { + "sql": "select count(*) from stb0 where c2 != \"2021-07-21 20:22:08.248246976\";", + "result": "./query_res4.txt" + }, + { + "sql": "select count(*) from stb0 where ts between \"2021-07-01 00:00:00.000000000\" and \"2021-07-01 00:00:00.990000000\";", + "result": "./query_res5.txt" + }, + { + "sql":"select count(*) from stb0 group by tbname;", + "result":"./query_res6.txt" + }, + { + "sql":"select count(*) from stb0 where ts between 1625068800000000000 and 1625068801000000000;", + "result":"./query_res7.txt" + }, + { + "sql":"select avg(c0) from stb0 interval(5000000000b);", + "result":"./query_res8.txt" + }, + { + "sql":"select avg(c0) from stb0 interval(100000000b) sliding (100000000b);", + "result":"./query_res9.txt" + } + + ] + }, + "super_table_query": { + "stblname": "stb0", + "query_interval": 0, + "threads": 4, + "sqls": [ + { + "sql": "select count(*) from xxxx where ts > \"2021-07-01 00:00:00.490000000\" ;", + "result": "./query_res_tb0.txt" + }, + { + "sql":"select count(*) from xxxx where ts between \"2021-07-01 00:00:00.000000000\" and \"2021-07-01 00:00:00.990000000\" ;", + "result": "./query_res_tb1.txt" + }, + { + "sql":"select first(*) from xxxx ;", + "result": "./query_res_tb2.txt" + }, + { + "sql":"select last(*) from xxxx;", + "result": "./query_res_tb3.txt" + + }, + { + "sql":"select last_row(*) from xxxx ;", + "result": "./query_res_tb4.txt" + + }, + { + "sql":"select max(c0) from xxxx ;", + "result": "./query_res_tb5.txt" + + }, + { + "sql":"select min(c0) from xxxx ;", + "result": "./query_res_tb6.txt" + + }, + { + "sql":"select avg(c0) from xxxx ;", + "result": "./query_res_tb7.txt" + + }, + { + "sql":"select avg(c0) from xxxx interval(100000000b) sliding (100000000b) ;", + "result": "./query_res_tb8.txt" + + } + + + ] + } + } \ No newline at end of file diff --git a/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoTestSupportNanoSubscribe.json b/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoTestSupportNanoSubscribe.json new file mode 100644 index 0000000000..1cc834164e --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoTestSupportNanoSubscribe.json @@ -0,0 +1,32 @@ +{ + "filetype":"subscribe", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "databases": "subnsdb", + "confirm_parameter_prompt": "no", + "specified_table_query": + { + "concurrent":2, + "mode":"sync", + "interval":10000, + "restart":"yes", + "keepProgress":"yes", + "sqls": [ + { + "sql": "select * from stb0 where ts>= \"2021-07-01 00:00:00.000000000\" ;", + "result": "./subscribe_res0.txt" + }, + { + "sql": "select * from stb0 where ts < now -2d-1h-3s ;", + "result": "./subscribe_res1.txt" + }, + { + "sql": "select * from stb0 where ts < 1626918583000000000 ;", + "result": "./subscribe_res2.txt" + }] + + } +} diff --git a/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoTestSupportNanosubscribe.py b/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoTestSupportNanosubscribe.py new file mode 100644 index 0000000000..95c1a731bc --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoTestSupportNanosubscribe.py @@ -0,0 +1,125 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import os +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * +import time +from datetime import datetime +import subprocess + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + def getBuildPath(self): + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + for root, dirs, files in os.walk(projPath): + if ("taosd" in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + buildPath = root[:len(root)-len("/build/bin")] + break + return buildPath + + # get the number of subscriptions + def subTimes(self,filename): + self.filename = filename + command = 'cat %s |wc -l'% filename + times = int(subprocess.getstatusoutput(command)[1]) + return times + + # assert results + def assertCheck(self,filename,subResult,expectResult): + self.filename = filename + self.subResult = subResult + self.expectResult = expectResult + args0 = (filename, subResult, expectResult) + assert subResult == expectResult , "Queryfile:%s ,result is %s != expect: %s" % args0 + + def run(self): + buildPath = self.getBuildPath() + if (buildPath == ""): + tdLog.exit("taosd not found!") + else: + tdLog.info("taosd found in %s" % buildPath) + binPath = buildPath+ "/build/bin/" + + # clear env + os.system("ps -ef |grep 'taosdemoAllTest/taosdemoTestSupportNanoSubscribe.json' |grep -v 'grep' |awk '{print $2}'|xargs kill -9") + os.system("rm -rf ./subscribe_res*") + os.system("rm -rf ./all_subscribe_res*") + + + # insert data + os.system("%staosdemo -f tools/taosdemoAllTest/NanoTestCase/taosdemoTestNanoDatabaseInsertForSub.json" % binPath) + os.system("nohup %staosdemo -f tools/taosdemoAllTest/NanoTestCase/taosdemoTestSupportNanoSubscribe.json &" % binPath) + query_pid = int(subprocess.getstatusoutput('ps aux|grep "taosdemoAllTest/NanoTestCase/taosdemoTestSupportNanoSubscribe.json" |grep -v "grep"|awk \'{print $2}\'')[1]) + + + # merge result files + sleep(5) + os.system("cat subscribe_res0.txt* > all_subscribe_res0.txt") + os.system("cat subscribe_res1.txt* > all_subscribe_res1.txt") + os.system("cat subscribe_res2.txt* > all_subscribe_res2.txt") + + + # correct subscribeTimes testcase + subTimes0 = self.subTimes("all_subscribe_res0.txt") + self.assertCheck("all_subscribe_res0.txt",subTimes0 ,200) + + subTimes1 = self.subTimes("all_subscribe_res1.txt") + self.assertCheck("all_subscribe_res1.txt",subTimes1 ,200) + + subTimes2 = self.subTimes("all_subscribe_res2.txt") + self.assertCheck("all_subscribe_res2.txt",subTimes2 ,200) + + + # insert extral data + tdSql.execute("use subnsdb") + tdSql.execute("insert into tb0_0 values(now,100.1000,'subtest1',now-1s)") + sleep(15) + + os.system("cat subscribe_res0.txt* > all_subscribe_res0.txt") + subTimes0 = self.subTimes("all_subscribe_res0.txt") + print("pass") + self.assertCheck("all_subscribe_res0.txt",subTimes0 ,202) + + + + # correct data testcase + os.system("kill -9 %d" % query_pid) + sleep(3) + os.system("rm -rf ./subscribe_res*") + os.system("rm -rf ./all_subscribe*") + os.system("rm -rf ./*.py.sql") + + + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdumpTestNanoSupport.py b/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdumpTestNanoSupport.py new file mode 100644 index 0000000000..ca8832170b --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdumpTestNanoSupport.py @@ -0,0 +1,362 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import os +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + self.ts = 1625068800000000000 # this is timestamp "2021-07-01 00:00:00" + self.numberOfTables = 10 + self.numberOfRecords = 100 + + def checkCommunity(self): + selfPath = os.path.dirname(os.path.realpath(__file__)) + if ("community" in selfPath): + return False + else: + return True + + def getBuildPath(self): + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + for root, dirs, files in os.walk(projPath): + if ("taosdump" in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + buildPath = root[:len(root) - len("/build/bin")] + break + return buildPath + + + + def createdb(self, precision="ns"): + tb_nums = self.numberOfTables + per_tb_rows = self.numberOfRecords + + def build_db(precision, start_time): + tdSql.execute("drop database if exists timedb1") + tdSql.execute( + "create database timedb1 days 10 keep 365 blocks 8 precision "+"\""+precision+"\"") + + tdSql.execute("use timedb1") + tdSql.execute( + "create stable st(ts timestamp, c1 int, c2 nchar(10),c3 timestamp) tags(t1 int, t2 binary(10))") + for tb in range(tb_nums): + tbname = "t"+str(tb) + tdSql.execute("create table " + tbname + + " using st tags(1, 'beijing')") + sql = "insert into " + tbname + " values" + currts = start_time + if precision == "ns": + ts_seed = 1000000000 + elif precision == "us": + ts_seed = 1000000 + else: + ts_seed = 1000 + + for i in range(per_tb_rows): + sql += "(%d, %d, 'nchar%d',%d)" % (currts + i*ts_seed, i % + 100, i % 100, currts + i*100) # currts +1000ms (1000000000ns) + tdSql.execute(sql) + + if precision == "ns": + start_time = 1625068800000000000 + build_db(precision, start_time) + + elif precision == "us": + start_time = 1625068800000000 + build_db(precision, start_time) + + elif precision == "ms": + start_time = 1625068800000 + build_db(precision, start_time) + + else: + print("other time precision not valid , please check! ") + + + def run(self): + + # clear envs + os.system("rm -rf ./taosdumptest/") + tdSql.execute("drop database if exists dumptmp1") + tdSql.execute("drop database if exists dumptmp2") + tdSql.execute("drop database if exists dumptmp3") + + if not os.path.exists("./taosdumptest/tmp1"): + os.makedirs("./taosdumptest/dumptmp1") + else: + print("path exist!") + + if not os.path.exists("./taosdumptest/dumptmp2"): + os.makedirs("./taosdumptest/dumptmp2") + + if not os.path.exists("./taosdumptest/dumptmp3"): + os.makedirs("./taosdumptest/dumptmp3") + + buildPath = self.getBuildPath() + if (buildPath == ""): + tdLog.exit("taosdump not found!") + else: + tdLog.info("taosdump found in %s" % buildPath) + binPath = buildPath + "/build/bin/" + + # create nano second database + + self.createdb(precision="ns") + + # dump all data + + os.system( + "%staosdump --databases timedb1 -o ./taosdumptest/dumptmp1" % binPath) + + # dump part data with -S -E + os.system( + '%staosdump --databases timedb1 -S 1625068810000000000 -E 1625068860000000000 -C ns -o ./taosdumptest/dumptmp2 ' % + binPath) + os.system( + '%staosdump --databases timedb1 -S 1625068810000000000 -o ./taosdumptest/dumptmp3 ' % + binPath) + + # replace strings to dump in databases + os.system( + "sed -i \"s/timedb1/dumptmp1/g\" `grep timedb1 -rl ./taosdumptest/dumptmp1`") + os.system( + "sed -i \"s/timedb1/dumptmp2/g\" `grep timedb1 -rl ./taosdumptest/dumptmp2`") + os.system( + "sed -i \"s/timedb1/dumptmp3/g\" `grep timedb1 -rl ./taosdumptest/dumptmp3`") + + os.system( "%staosdump -i ./taosdumptest/dumptmp1" %binPath) + os.system( "%staosdump -i ./taosdumptest/dumptmp2" %binPath) + os.system( "%staosdump -i ./taosdumptest/dumptmp3" %binPath) + + # dump data and check for taosdump + tdSql.query("select count(*) from dumptmp1.st") + tdSql.checkData(0,0,1000) + + tdSql.query("select count(*) from dumptmp2.st") + tdSql.checkData(0,0,510) + + tdSql.query("select count(*) from dumptmp3.st") + tdSql.checkData(0,0,900) + + # check data + origin_res = tdSql.getResult("select * from timedb1.st") + dump_res = tdSql.getResult("select * from dumptmp1.st") + if origin_res == dump_res: + tdLog.info("test nano second : dump check data pass for all data!" ) + else: + tdLog.info("test nano second : dump check data failed for all data!" ) + + origin_res = tdSql.getResult("select * from timedb1.st where ts >=1625068810000000000 and ts <= 1625068860000000000") + dump_res = tdSql.getResult("select * from dumptmp2.st") + if origin_res == dump_res: + tdLog.info(" test nano second : dump check data pass for data! " ) + else: + tdLog.info(" test nano second : dump check data failed for data !" ) + + origin_res = tdSql.getResult("select * from timedb1.st where ts >=1625068810000000000 ") + dump_res = tdSql.getResult("select * from dumptmp3.st") + if origin_res == dump_res: + tdLog.info(" test nano second : dump check data pass for data! " ) + else: + tdLog.info(" test nano second : dump check data failed for data !" ) + + + # us second support test case + + os.system("rm -rf ./taosdumptest/") + tdSql.execute("drop database if exists dumptmp1") + tdSql.execute("drop database if exists dumptmp2") + tdSql.execute("drop database if exists dumptmp3") + + if not os.path.exists("./taosdumptest/tmp1"): + os.makedirs("./taosdumptest/dumptmp1") + else: + print("path exits!") + + if not os.path.exists("./taosdumptest/dumptmp2"): + os.makedirs("./taosdumptest/dumptmp2") + + if not os.path.exists("./taosdumptest/dumptmp3"): + os.makedirs("./taosdumptest/dumptmp3") + + buildPath = self.getBuildPath() + if (buildPath == ""): + tdLog.exit("taosdump not found!") + else: + tdLog.info("taosdump found in %s" % buildPath) + binPath = buildPath + "/build/bin/" + + self.createdb(precision="us") + + os.system( + "%staosdump --databases timedb1 -o ./taosdumptest/dumptmp1" % binPath) + + os.system( + '%staosdump --databases timedb1 -S 1625068810000000 -E 1625068860000000 -C us -o ./taosdumptest/dumptmp2 ' % + binPath) + os.system( + '%staosdump --databases timedb1 -S 1625068810000000 -o ./taosdumptest/dumptmp3 ' % + binPath) + + os.system( + "sed -i \"s/timedb1/dumptmp1/g\" `grep timedb1 -rl ./taosdumptest/dumptmp1`") + os.system( + "sed -i \"s/timedb1/dumptmp2/g\" `grep timedb1 -rl ./taosdumptest/dumptmp2`") + os.system( + "sed -i \"s/timedb1/dumptmp3/g\" `grep timedb1 -rl ./taosdumptest/dumptmp3`") + + os.system( "%staosdump -i ./taosdumptest/dumptmp1" %binPath) + os.system( "%staosdump -i ./taosdumptest/dumptmp2" %binPath) + os.system( "%staosdump -i ./taosdumptest/dumptmp3" %binPath) + + + tdSql.query("select count(*) from dumptmp1.st") + tdSql.checkData(0,0,1000) + + tdSql.query("select count(*) from dumptmp2.st") + tdSql.checkData(0,0,510) + + tdSql.query("select count(*) from dumptmp3.st") + tdSql.checkData(0,0,900) + + + origin_res = tdSql.getResult("select * from timedb1.st") + dump_res = tdSql.getResult("select * from dumptmp1.st") + if origin_res == dump_res: + tdLog.info("test us second : dump check data pass for all data!" ) + else: + tdLog.info("test us second : dump check data failed for all data!" ) + + origin_res = tdSql.getResult("select * from timedb1.st where ts >=1625068810000000 and ts <= 1625068860000000") + dump_res = tdSql.getResult("select * from dumptmp2.st") + if origin_res == dump_res: + tdLog.info(" test us second : dump check data pass for data! " ) + else: + tdLog.info(" test us second : dump check data failed for data!" ) + + origin_res = tdSql.getResult("select * from timedb1.st where ts >=1625068810000000 ") + dump_res = tdSql.getResult("select * from dumptmp3.st") + if origin_res == dump_res: + tdLog.info(" test us second : dump check data pass for data! " ) + else: + tdLog.info(" test us second : dump check data failed for data! " ) + + + # ms second support test case + + os.system("rm -rf ./taosdumptest/") + tdSql.execute("drop database if exists dumptmp1") + tdSql.execute("drop database if exists dumptmp2") + tdSql.execute("drop database if exists dumptmp3") + + if not os.path.exists("./taosdumptest/tmp1"): + os.makedirs("./taosdumptest/dumptmp1") + else: + print("path exits!") + + if not os.path.exists("./taosdumptest/dumptmp2"): + os.makedirs("./taosdumptest/dumptmp2") + + if not os.path.exists("./taosdumptest/dumptmp3"): + os.makedirs("./taosdumptest/dumptmp3") + + buildPath = self.getBuildPath() + if (buildPath == ""): + tdLog.exit("taosdump not found!") + else: + tdLog.info("taosdump found in %s" % buildPath) + binPath = buildPath + "/build/bin/" + + self.createdb(precision="ms") + + os.system( + "%staosdump --databases timedb1 -o ./taosdumptest/dumptmp1" % binPath) + + os.system( + '%staosdump --databases timedb1 -S 1625068810000 -E 1625068860000 -C ms -o ./taosdumptest/dumptmp2 ' % + binPath) + os.system( + '%staosdump --databases timedb1 -S 1625068810000 -o ./taosdumptest/dumptmp3 ' % + binPath) + + os.system( + "sed -i \"s/timedb1/dumptmp1/g\" `grep timedb1 -rl ./taosdumptest/dumptmp1`") + os.system( + "sed -i \"s/timedb1/dumptmp2/g\" `grep timedb1 -rl ./taosdumptest/dumptmp2`") + os.system( + "sed -i \"s/timedb1/dumptmp3/g\" `grep timedb1 -rl ./taosdumptest/dumptmp3`") + + os.system( "%staosdump -i ./taosdumptest/dumptmp1" %binPath) + os.system( "%staosdump -i ./taosdumptest/dumptmp2" %binPath) + os.system( "%staosdump -i ./taosdumptest/dumptmp3" %binPath) + + + tdSql.query("select count(*) from dumptmp1.st") + tdSql.checkData(0,0,1000) + + tdSql.query("select count(*) from dumptmp2.st") + tdSql.checkData(0,0,510) + + tdSql.query("select count(*) from dumptmp3.st") + tdSql.checkData(0,0,900) + + + origin_res = tdSql.getResult("select * from timedb1.st") + dump_res = tdSql.getResult("select * from dumptmp1.st") + if origin_res == dump_res: + tdLog.info("test ms second : dump check data pass for all data!" ) + else: + tdLog.info("test ms second : dump check data failed for all data!" ) + + origin_res = tdSql.getResult("select * from timedb1.st where ts >=1625068810000 and ts <= 1625068860000") + dump_res = tdSql.getResult("select * from dumptmp2.st") + if origin_res == dump_res: + tdLog.info(" test ms second : dump check data pass for data! " ) + else: + tdLog.info(" test ms second : dump check data failed for data!" ) + + origin_res = tdSql.getResult("select * from timedb1.st where ts >=1625068810000 ") + dump_res = tdSql.getResult("select * from dumptmp3.st") + if origin_res == dump_res: + tdLog.info(" test ms second : dump check data pass for data! " ) + else: + tdLog.info(" test ms second : dump check data failed for data! " ) + + + os.system("rm -rf ./taosdumptest/") + os.system("rm -rf ./dump_result.txt") + os.system("rm -rf *.py.sql") + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) From e136773cf1a560ccaf4b9229f792f981f20e21a6 Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Sat, 7 Aug 2021 14:05:34 +0800 Subject: [PATCH 033/185] [TD-5872]: taosdemo stmt performance. (#7237) --- src/kit/taosdemo/taosdemo.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/kit/taosdemo/taosdemo.c b/src/kit/taosdemo/taosdemo.c index f327a8a585..8db149a559 100644 --- a/src/kit/taosdemo/taosdemo.c +++ b/src/kit/taosdemo/taosdemo.c @@ -5983,7 +5983,7 @@ static int32_t prepareStbStmtBind( int64_t startTime, int32_t recSeq, bool isColumn) { - char *bindBuffer = calloc(1, g_args.len_of_binary); + char *bindBuffer = calloc(1, DOUBLE_BUFF_LEN); // g_args.len_of_binary); if (bindBuffer == NULL) { errorPrint("%s() LN%d, Failed to allocate %d bind buffer\n", __func__, __LINE__, g_args.len_of_binary); From a93326c9799def7472e07ec6bcdcb3db40055697 Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Sat, 7 Aug 2021 14:05:57 +0800 Subject: [PATCH 034/185] [TD-5872]: taosdemo stmt performance. (#7236) --- src/kit/taosdemo/taosdemo.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/kit/taosdemo/taosdemo.c b/src/kit/taosdemo/taosdemo.c index dec26ae92d..c895545b81 100644 --- a/src/kit/taosdemo/taosdemo.c +++ b/src/kit/taosdemo/taosdemo.c @@ -5982,7 +5982,7 @@ static int32_t prepareStbStmtBind( int64_t startTime, int32_t recSeq, bool isColumn) { - char *bindBuffer = calloc(1, g_args.len_of_binary); + char *bindBuffer = calloc(1, DOUBLE_BUFF_LEN); // g_args.len_of_binary); if (bindBuffer == NULL) { errorPrint("%s() LN%d, Failed to allocate %d bind buffer\n", __func__, __LINE__, g_args.len_of_binary); From fcd3b44533662e1ca5917016e92cb519a6d9cee7 Mon Sep 17 00:00:00 2001 From: Liu Jicong Date: Thu, 5 Aug 2021 13:50:12 +0800 Subject: [PATCH 035/185] refactor datacols --- src/common/inc/tdataformat.h | 16 ++++++------- src/common/src/tdataformat.c | 44 ++++++++++++++++-------------------- src/tsdb/src/tsdbCommit.c | 2 +- src/tsdb/src/tsdbCompact.c | 2 +- src/tsdb/src/tsdbMeta.c | 1 - src/tsdb/src/tsdbRead.c | 6 ++--- src/tsdb/src/tsdbReadImpl.c | 19 +++++++++------- 7 files changed, 42 insertions(+), 48 deletions(-) diff --git a/src/common/inc/tdataformat.h b/src/common/inc/tdataformat.h index f49c80ad86..4e2974b090 100644 --- a/src/common/inc/tdataformat.h +++ b/src/common/inc/tdataformat.h @@ -330,9 +330,9 @@ int tdAllocMemForCol(SDataCol *pCol, int maxPoints); void dataColInit(SDataCol *pDataCol, STColumn *pCol, int maxPoints); int dataColAppendVal(SDataCol *pCol, const void *value, int numOfRows, int maxPoints); void dataColSetOffset(SDataCol *pCol, int nEle); +void dataColSetNEleNull(SDataCol *pCol, int nEle); bool isNEleNull(SDataCol *pCol, int nEle); -int dataColSetNEleNull(SDataCol *pCol, int nEle, int maxPoints); // Get the data pointer from a column-wised data static FORCE_INLINE const void *tdGetColDataOfRow(SDataCol *pCol, int row) { @@ -357,13 +357,11 @@ static FORCE_INLINE int32_t dataColGetNEleLen(SDataCol *pDataCol, int rows) { } typedef struct { - int maxRowSize; - int maxCols; // max number of columns - int maxPoints; // max number of points - - int numOfRows; - int numOfCols; // Total number of cols - int sversion; // TODO: set sversion + int maxCols; // max number of columns + int maxPoints; // max number of points + int numOfRows; + int numOfCols; // Total number of cols + int sversion; // TODO: set sversion SDataCol *cols; } SDataCols; @@ -407,7 +405,7 @@ static FORCE_INLINE TSKEY dataColsKeyLast(SDataCols *pCols) { } } -SDataCols *tdNewDataCols(int maxRowSize, int maxCols, int maxRows); +SDataCols *tdNewDataCols(int maxCols, int maxRows); void tdResetDataCols(SDataCols *pCols); int tdInitDataCols(SDataCols *pCols, STSchema *pSchema); SDataCols *tdDupDataCols(SDataCols *pCols, bool keepData); diff --git a/src/common/src/tdataformat.c b/src/common/src/tdataformat.c index 42854704a5..0234a44758 100644 --- a/src/common/src/tdataformat.c +++ b/src/common/src/tdataformat.c @@ -247,13 +247,10 @@ int dataColAppendVal(SDataCol *pCol, const void *value, int numOfRows, int maxPo return 0; } + if(tdAllocMemForCol(pCol, maxPoints) < 0) return -1; if (numOfRows > 0) { // Find the first not null value, fill all previouse values as NULL - if(dataColSetNEleNull(pCol, numOfRows, maxPoints) < 0) - return -1; - } else { - if(tdAllocMemForCol(pCol, maxPoints) < 0) - return -1; + dataColSetNEleNull(pCol, numOfRows); } } @@ -269,13 +266,21 @@ int dataColAppendVal(SDataCol *pCol, const void *value, int numOfRows, int maxPo memcpy(POINTER_SHIFT(pCol->pData, pCol->len), value, pCol->bytes); pCol->len += pCol->bytes; } - return -1; + return 0; +} + +static FORCE_INLINE const void *tdGetColDataOfRowUnsafe(SDataCol *pCol, int row) { + if (IS_VAR_DATA_TYPE(pCol->type)) { + return POINTER_SHIFT(pCol->pData, pCol->dataOff[row]); + } else { + return POINTER_SHIFT(pCol->pData, TYPE_BYTES[pCol->type] * row); + } } bool isNEleNull(SDataCol *pCol, int nEle) { if(isAllRowsNull(pCol)) return true; for (int i = 0; i < nEle; i++) { - if (!isNull(tdGetColDataOfRow(pCol, i), pCol->type)) return false; + if (!isNull(tdGetColDataOfRowUnsafe(pCol, i), pCol->type)) return false; } return true; } @@ -292,11 +297,7 @@ static FORCE_INLINE void dataColSetNullAt(SDataCol *pCol, int index) { } } -int dataColSetNEleNull(SDataCol *pCol, int nEle, int maxPoints) { - if(tdAllocMemForCol(pCol, maxPoints)){ - return -1; - } - +void dataColSetNEleNull(SDataCol *pCol, int nEle) { if (IS_VAR_DATA_TYPE(pCol->type)) { pCol->len = 0; for (int i = 0; i < nEle; i++) { @@ -306,7 +307,6 @@ int dataColSetNEleNull(SDataCol *pCol, int nEle, int maxPoints) { setNullN(pCol->pData, pCol->type, pCol->bytes, nEle); pCol->len = TYPE_BYTES[pCol->type] * nEle; } - return 0; } void dataColSetOffset(SDataCol *pCol, int nEle) { @@ -323,7 +323,7 @@ void dataColSetOffset(SDataCol *pCol, int nEle) { } } -SDataCols *tdNewDataCols(int maxRowSize, int maxCols, int maxRows) { +SDataCols *tdNewDataCols(int maxCols, int maxRows) { SDataCols *pCols = (SDataCols *)calloc(1, sizeof(SDataCols)); if (pCols == NULL) { uDebug("malloc failure, size:%" PRId64 " failed, reason:%s", (int64_t)sizeof(SDataCols), strerror(errno)); @@ -331,6 +331,9 @@ SDataCols *tdNewDataCols(int maxRowSize, int maxCols, int maxRows) { } pCols->maxPoints = maxRows; + pCols->maxCols = maxCols; + pCols->numOfRows = 0; + pCols->numOfCols = 0; if (maxCols > 0) { pCols->cols = (SDataCol *)calloc(maxCols, sizeof(SDataCol)); @@ -347,13 +350,8 @@ SDataCols *tdNewDataCols(int maxRowSize, int maxCols, int maxRows) { pCols->cols[i].pData = NULL; pCols->cols[i].dataOff = NULL; } - - pCols->maxCols = maxCols; } - pCols->maxRowSize = maxRowSize; - - return pCols; } @@ -372,10 +370,6 @@ int tdInitDataCols(SDataCols *pCols, STSchema *pSchema) { } } - if (schemaTLen(pSchema) > pCols->maxRowSize) { - pCols->maxRowSize = schemaTLen(pSchema); - } - tdResetDataCols(pCols); pCols->numOfCols = schemaNCols(pSchema); @@ -404,7 +398,7 @@ SDataCols *tdFreeDataCols(SDataCols *pCols) { } SDataCols *tdDupDataCols(SDataCols *pDataCols, bool keepData) { - SDataCols *pRet = tdNewDataCols(pDataCols->maxRowSize, pDataCols->maxCols, pDataCols->maxPoints); + SDataCols *pRet = tdNewDataCols(pDataCols->maxCols, pDataCols->maxPoints); if (pRet == NULL) return NULL; pRet->numOfCols = pDataCols->numOfCols; @@ -593,7 +587,7 @@ static void tdMergeTwoDataCols(SDataCols *target, SDataCols *src1, int *iter1, i if ((key1 > key2) || (key1 == key2 && !TKEY_IS_DELETED(tkey2))) { for (int i = 0; i < src2->numOfCols; i++) { ASSERT(target->cols[i].type == src2->cols[i].type); - if (src2->cols[i].len > 0 && (forceSetNull || (!forceSetNull && !isNull(src2->cols[i].pData, src2->cols[i].type)))) { + if (src2->cols[i].len > 0 && (forceSetNull || (!isNull(src2->cols[i].pData, src2->cols[i].type)))) { dataColAppendVal(&(target->cols[i]), tdGetColDataOfRow(src2->cols + i, *iter2), target->numOfRows, target->maxPoints); } diff --git a/src/tsdb/src/tsdbCommit.c b/src/tsdb/src/tsdbCommit.c index 9d6b0a3594..3b0db8da6a 100644 --- a/src/tsdb/src/tsdbCommit.c +++ b/src/tsdb/src/tsdbCommit.c @@ -722,7 +722,7 @@ static int tsdbInitCommitH(SCommitH *pCommith, STsdbRepo *pRepo) { return -1; } - pCommith->pDataCols = tdNewDataCols(0, 0, pCfg->maxRowsPerFileBlock); + pCommith->pDataCols = tdNewDataCols(0, pCfg->maxRowsPerFileBlock); if (pCommith->pDataCols == NULL) { terrno = TSDB_CODE_TDB_OUT_OF_MEMORY; tsdbDestroyCommitH(pCommith); diff --git a/src/tsdb/src/tsdbCompact.c b/src/tsdb/src/tsdbCompact.c index 62f9e41119..5ccb9e90f2 100644 --- a/src/tsdb/src/tsdbCompact.c +++ b/src/tsdb/src/tsdbCompact.c @@ -296,7 +296,7 @@ static int tsdbCompactMeta(STsdbRepo *pRepo) { return -1; } - pComph->pDataCols = tdNewDataCols(0, 0, pCfg->maxRowsPerFileBlock); + pComph->pDataCols = tdNewDataCols(0, pCfg->maxRowsPerFileBlock); if (pComph->pDataCols == NULL) { terrno = TSDB_CODE_TDB_OUT_OF_MEMORY; tsdbDestroyCompactH(pComph); diff --git a/src/tsdb/src/tsdbMeta.c b/src/tsdb/src/tsdbMeta.c index 619b32b3d9..21150c66e2 100644 --- a/src/tsdb/src/tsdbMeta.c +++ b/src/tsdb/src/tsdbMeta.c @@ -17,7 +17,6 @@ #define TSDB_SUPER_TABLE_SL_LEVEL 5 #define DEFAULT_TAG_INDEX_COLUMN 0 -static int tsdbCompareSchemaVersion(const void *key1, const void *key2); static char * getTagIndexKey(const void *pData); static STable *tsdbNewTable(); static STable *tsdbCreateTableFromCfg(STableCfg *pCfg, bool isSuper, STable *pSTable); diff --git a/src/tsdb/src/tsdbRead.c b/src/tsdb/src/tsdbRead.c index c578555df2..716f82d154 100644 --- a/src/tsdb/src/tsdbRead.c +++ b/src/tsdb/src/tsdbRead.c @@ -466,7 +466,7 @@ static STsdbQueryHandle* tsdbQueryTablesImpl(STsdbRepo* tsdb, STsdbQueryCond* pC STsdbMeta* pMeta = tsdbGetMeta(tsdb); assert(pMeta != NULL); - pQueryHandle->pDataCols = tdNewDataCols(pMeta->maxRowBytes, pMeta->maxCols, pQueryHandle->pTsdb->config.maxRowsPerFileBlock); + pQueryHandle->pDataCols = tdNewDataCols(pMeta->maxCols, pQueryHandle->pTsdb->config.maxRowsPerFileBlock); if (pQueryHandle->pDataCols == NULL) { tsdbError("%p failed to malloc buf for pDataCols, %"PRIu64, pQueryHandle, pQueryHandle->qId); terrno = TSDB_CODE_TDB_OUT_OF_MEMORY; @@ -1446,7 +1446,7 @@ static int doBinarySearchKey(char* pValue, int num, TSKEY key, int order) { return midPos; } -int32_t doCopyRowsFromFileBlock(STsdbQueryHandle* pQueryHandle, int32_t capacity, int32_t numOfRows, int32_t start, int32_t end) { +static int32_t doCopyRowsFromFileBlock(STsdbQueryHandle* pQueryHandle, int32_t capacity, int32_t numOfRows, int32_t start, int32_t end) { char* pData = NULL; int32_t step = ASCENDING_TRAVERSE(pQueryHandle->order)? 1 : -1; @@ -1481,7 +1481,7 @@ int32_t doCopyRowsFromFileBlock(STsdbQueryHandle* pQueryHandle, int32_t capacity pData = (char*)pColInfo->pData + (capacity - numOfRows - num) * pColInfo->info.bytes; } - if (pColInfo->info.colId == src->colId) { + if (!isAllRowsNull(src) && pColInfo->info.colId == src->colId) { if (pColInfo->info.type != TSDB_DATA_TYPE_BINARY && pColInfo->info.type != TSDB_DATA_TYPE_NCHAR) { memmove(pData, (char*)src->pData + bytes * start, bytes * num); } else { // handle the var-string diff --git a/src/tsdb/src/tsdbReadImpl.c b/src/tsdb/src/tsdbReadImpl.c index c98f03b7de..29f1385616 100644 --- a/src/tsdb/src/tsdbReadImpl.c +++ b/src/tsdb/src/tsdbReadImpl.c @@ -42,14 +42,14 @@ int tsdbInitReadH(SReadH *pReadh, STsdbRepo *pRepo) { return -1; } - pReadh->pDCols[0] = tdNewDataCols(0, 0, pCfg->maxRowsPerFileBlock); + pReadh->pDCols[0] = tdNewDataCols(0, pCfg->maxRowsPerFileBlock); if (pReadh->pDCols[0] == NULL) { terrno = TSDB_CODE_TDB_OUT_OF_MEMORY; tsdbDestroyReadH(pReadh); return -1; } - pReadh->pDCols[1] = tdNewDataCols(0, 0, pCfg->maxRowsPerFileBlock); + pReadh->pDCols[1] = tdNewDataCols(0, pCfg->maxRowsPerFileBlock); if (pReadh->pDCols[1] == NULL) { terrno = TSDB_CODE_TDB_OUT_OF_MEMORY; tsdbDestroyReadH(pReadh); @@ -463,8 +463,9 @@ static int tsdbLoadBlockDataImpl(SReadH *pReadh, SBlock *pBlock, SDataCols *pDat SDataCol *pDataCol = &(pDataCols->cols[dcol]); if (dcol != 0 && ccol >= pBlockData->numOfCols) { // Set current column as NULL and forward - // TODO: dataColSetNEleNull may fail - dataColSetNEleNull(pDataCol, pBlock->numOfRows, pDataCols->maxPoints); + // TODO: tdAllocMemForCol may fail + tdAllocMemForCol(pDataCol, pDataCols->maxPoints); + dataColSetNEleNull(pDataCol, pBlock->numOfRows); dcol++; continue; } @@ -504,8 +505,9 @@ static int tsdbLoadBlockDataImpl(SReadH *pReadh, SBlock *pBlock, SDataCols *pDat ccol++; } else { // Set current column as NULL and forward - // TODO: dataColSetNEleNull may fail - dataColSetNEleNull(pDataCol, pBlock->numOfRows, pDataCols->maxPoints); + // TODO: tdAllocMemForCol may fail + tdAllocMemForCol(pDataCol, pDataCols->maxPoints); + dataColSetNEleNull(pDataCol, pBlock->numOfRows); dcol++; } } @@ -610,8 +612,9 @@ static int tsdbLoadBlockDataColsImpl(SReadH *pReadh, SBlock *pBlock, SDataCols * } if (pBlockCol == NULL) { - // TODO: dataColSetNEleNull may fail - dataColSetNEleNull(pDataCol, pBlock->numOfRows, pDataCols->maxPoints); + // TODO: tdAllocMemForCol may fail + tdAllocMemForCol(pDataCol, pDataCols->maxPoints); + dataColSetNEleNull(pDataCol, pBlock->numOfRows); continue; } From 347ff765cf220270dc4c76b132169730ad00db09 Mon Sep 17 00:00:00 2001 From: Liu Jicong Date: Mon, 9 Aug 2021 03:24:43 +0800 Subject: [PATCH 036/185] fix merge --- src/common/src/tdataformat.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/common/src/tdataformat.c b/src/common/src/tdataformat.c index 0234a44758..b62c97bc2a 100644 --- a/src/common/src/tdataformat.c +++ b/src/common/src/tdataformat.c @@ -587,7 +587,10 @@ static void tdMergeTwoDataCols(SDataCols *target, SDataCols *src1, int *iter1, i if ((key1 > key2) || (key1 == key2 && !TKEY_IS_DELETED(tkey2))) { for (int i = 0; i < src2->numOfCols; i++) { ASSERT(target->cols[i].type == src2->cols[i].type); - if (src2->cols[i].len > 0 && (forceSetNull || (!isNull(src2->cols[i].pData, src2->cols[i].type)))) { + if(!forceSetNull && (isAllRowsNull(&src2->cols[i]) || isNull(src2->cols[i].pData, src2->cols[i].type)) && key1 == key2) { + dataColAppendVal(&(target->cols[i]), tdGetColDataOfRow(src1->cols + i, *iter1), target->numOfRows, + target->maxPoints); + } else if (src2->cols[i].len > 0) { dataColAppendVal(&(target->cols[i]), tdGetColDataOfRow(src2->cols + i, *iter2), target->numOfRows, target->maxPoints); } From 855a76813ce1f84b3a870e0b4366a031c75cefc2 Mon Sep 17 00:00:00 2001 From: Liu Jicong Date: Mon, 9 Aug 2021 04:00:07 +0800 Subject: [PATCH 037/185] fix merge --- src/common/src/tdataformat.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/common/src/tdataformat.c b/src/common/src/tdataformat.c index b62c97bc2a..3b48bf9977 100644 --- a/src/common/src/tdataformat.c +++ b/src/common/src/tdataformat.c @@ -587,12 +587,12 @@ static void tdMergeTwoDataCols(SDataCols *target, SDataCols *src1, int *iter1, i if ((key1 > key2) || (key1 == key2 && !TKEY_IS_DELETED(tkey2))) { for (int i = 0; i < src2->numOfCols; i++) { ASSERT(target->cols[i].type == src2->cols[i].type); - if(!forceSetNull && (isAllRowsNull(&src2->cols[i]) || isNull(src2->cols[i].pData, src2->cols[i].type)) && key1 == key2) { - dataColAppendVal(&(target->cols[i]), tdGetColDataOfRow(src1->cols + i, *iter1), target->numOfRows, - target->maxPoints); - } else if (src2->cols[i].len > 0) { + if (src2->cols[i].len > 0 && !isNull(src2->cols[i].pData, src2->cols[i].type)) { dataColAppendVal(&(target->cols[i]), tdGetColDataOfRow(src2->cols + i, *iter2), target->numOfRows, target->maxPoints); + } else if(!forceSetNull && key1 == key2 && src1->cols[i].len > 0) { + dataColAppendVal(&(target->cols[i]), tdGetColDataOfRow(src1->cols + i, *iter1), target->numOfRows, + target->maxPoints); } } target->numOfRows++; From 1d3a2d47f151b532123be6018eb8ef1cd3d2b2e0 Mon Sep 17 00:00:00 2001 From: Liu Jicong Date: Mon, 9 Aug 2021 05:19:22 +0800 Subject: [PATCH 038/185] remove set null in tsdbread --- src/tsdb/src/tsdbReadImpl.c | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/src/tsdb/src/tsdbReadImpl.c b/src/tsdb/src/tsdbReadImpl.c index 29f1385616..237025864e 100644 --- a/src/tsdb/src/tsdbReadImpl.c +++ b/src/tsdb/src/tsdbReadImpl.c @@ -464,8 +464,9 @@ static int tsdbLoadBlockDataImpl(SReadH *pReadh, SBlock *pBlock, SDataCols *pDat if (dcol != 0 && ccol >= pBlockData->numOfCols) { // Set current column as NULL and forward // TODO: tdAllocMemForCol may fail - tdAllocMemForCol(pDataCol, pDataCols->maxPoints); - dataColSetNEleNull(pDataCol, pBlock->numOfRows); + /*tdAllocMemForCol(pDataCol, pDataCols->maxPoints);*/ + /*dataColSetNEleNull(pDataCol, pBlock->numOfRows);*/ + dataColReset(pDataCol); dcol++; continue; } @@ -506,8 +507,9 @@ static int tsdbLoadBlockDataImpl(SReadH *pReadh, SBlock *pBlock, SDataCols *pDat } else { // Set current column as NULL and forward // TODO: tdAllocMemForCol may fail - tdAllocMemForCol(pDataCol, pDataCols->maxPoints); - dataColSetNEleNull(pDataCol, pBlock->numOfRows); + /*tdAllocMemForCol(pDataCol, pDataCols->maxPoints);*/ + /*dataColSetNEleNull(pDataCol, pBlock->numOfRows);*/ + dataColReset(pDataCol); dcol++; } } @@ -613,8 +615,9 @@ static int tsdbLoadBlockDataColsImpl(SReadH *pReadh, SBlock *pBlock, SDataCols * if (pBlockCol == NULL) { // TODO: tdAllocMemForCol may fail - tdAllocMemForCol(pDataCol, pDataCols->maxPoints); - dataColSetNEleNull(pDataCol, pBlock->numOfRows); + /*tdAllocMemForCol(pDataCol, pDataCols->maxPoints);*/ + /*dataColSetNEleNull(pDataCol, pBlock->numOfRows);*/ + dataColReset(pDataCol); continue; } From a419f091253bc83314d1e51c4b9596629b1de6b6 Mon Sep 17 00:00:00 2001 From: Shengliang Guan Date: Mon, 9 Aug 2021 10:14:31 +0800 Subject: [PATCH 039/185] change version number --- snap/snapcraft.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/snap/snapcraft.yaml b/snap/snapcraft.yaml index aef706311d..c04fa3298b 100644 --- a/snap/snapcraft.yaml +++ b/snap/snapcraft.yaml @@ -1,6 +1,6 @@ name: tdengine base: core18 -version: '2.1.5.0' +version: '2.1.6.0' icon: snap/gui/t-dengine.svg summary: an open-source big data platform designed and optimized for IoT. description: | @@ -72,7 +72,7 @@ parts: - usr/bin/taosd - usr/bin/taos - usr/bin/taosdemo - - usr/lib/libtaos.so.2.1.5.0 + - usr/lib/libtaos.so.2.1.6.0 - usr/lib/libtaos.so.1 - usr/lib/libtaos.so From cc69fcc49ced0ad13e3be106af5e99b88a996e4f Mon Sep 17 00:00:00 2001 From: xywang Date: Mon, 9 Aug 2021 11:01:43 +0800 Subject: [PATCH 040/185] [TD-5784]: fixed a wrong check --- src/plugins/http/src/httpUtil.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/plugins/http/src/httpUtil.c b/src/plugins/http/src/httpUtil.c index 27b95a4934..ade50bdad6 100644 --- a/src/plugins/http/src/httpUtil.c +++ b/src/plugins/http/src/httpUtil.c @@ -188,7 +188,7 @@ bool httpMallocMultiCmds(HttpContext *pContext, int32_t cmdSize, int32_t bufferS bool httpReMallocMultiCmdsSize(HttpContext *pContext, int32_t cmdSize) { HttpSqlCmds *multiCmds = pContext->multiCmds; - if (cmdSize <= 0 && cmdSize > HTTP_MAX_CMD_SIZE) { + if (cmdSize <= 0 || cmdSize > HTTP_MAX_CMD_SIZE) { httpError("context:%p, fd:%d, user:%s, mulitcmd size:%d large then %d", pContext, pContext->fd, pContext->user, cmdSize, HTTP_MAX_CMD_SIZE); return false; From 1ec2cff29a3928b72a7b64d28cbb5f179ef9ac17 Mon Sep 17 00:00:00 2001 From: xywang Date: Mon, 9 Aug 2021 11:04:16 +0800 Subject: [PATCH 041/185] [TD-5784]: fixed a wrong check --- src/plugins/http/src/httpUtil.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/plugins/http/src/httpUtil.c b/src/plugins/http/src/httpUtil.c index 27b95a4934..ade50bdad6 100644 --- a/src/plugins/http/src/httpUtil.c +++ b/src/plugins/http/src/httpUtil.c @@ -188,7 +188,7 @@ bool httpMallocMultiCmds(HttpContext *pContext, int32_t cmdSize, int32_t bufferS bool httpReMallocMultiCmdsSize(HttpContext *pContext, int32_t cmdSize) { HttpSqlCmds *multiCmds = pContext->multiCmds; - if (cmdSize <= 0 && cmdSize > HTTP_MAX_CMD_SIZE) { + if (cmdSize <= 0 || cmdSize > HTTP_MAX_CMD_SIZE) { httpError("context:%p, fd:%d, user:%s, mulitcmd size:%d large then %d", pContext, pContext->fd, pContext->user, cmdSize, HTTP_MAX_CMD_SIZE); return false; From 506f1cd6eb1f7e7634b381d8b15ed78ccfcb6c03 Mon Sep 17 00:00:00 2001 From: wpan Date: Mon, 9 Aug 2021 13:52:03 +0800 Subject: [PATCH 042/185] fix bug --- src/rpc/src/rpcTcp.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/rpc/src/rpcTcp.c b/src/rpc/src/rpcTcp.c index 0449ecac8b..2549518249 100644 --- a/src/rpc/src/rpcTcp.c +++ b/src/rpc/src/rpcTcp.c @@ -397,7 +397,11 @@ void *taosOpenTcpClientConnection(void *shandle, void *thandle, uint32_t ip, uin SThreadObj *pThreadObj = pClientObj->pThreadObj[index]; SOCKET fd = taosOpenTcpClientSocket(ip, port, pThreadObj->ip); +#if defined(_TD_WINDOWS_64) || defined(_TD_WINDOWS_32) + if (fd == (SOCKET)-1) return NULL; +#else if (fd <= 0) return NULL; +#endif struct sockaddr_in sin; uint16_t localPort = 0; From 4521970f9932acbdb1d6606cd3aa3de055fdfff2 Mon Sep 17 00:00:00 2001 From: Liu Jicong Date: Mon, 9 Aug 2021 14:20:36 +0800 Subject: [PATCH 043/185] refactor: dataColSetNEleNull should not be used by other modules and be set to static --- src/common/inc/tdataformat.h | 1 - src/common/src/tdataformat.c | 3 ++- src/tsdb/src/tsdbCommit.c | 1 - src/tsdb/src/tsdbReadImpl.c | 9 --------- 4 files changed, 2 insertions(+), 12 deletions(-) diff --git a/src/common/inc/tdataformat.h b/src/common/inc/tdataformat.h index 4e2974b090..1637c4832b 100644 --- a/src/common/inc/tdataformat.h +++ b/src/common/inc/tdataformat.h @@ -330,7 +330,6 @@ int tdAllocMemForCol(SDataCol *pCol, int maxPoints); void dataColInit(SDataCol *pDataCol, STColumn *pCol, int maxPoints); int dataColAppendVal(SDataCol *pCol, const void *value, int numOfRows, int maxPoints); void dataColSetOffset(SDataCol *pCol, int nEle); -void dataColSetNEleNull(SDataCol *pCol, int nEle); bool isNEleNull(SDataCol *pCol, int nEle); diff --git a/src/common/src/tdataformat.c b/src/common/src/tdataformat.c index 3b48bf9977..c793d241f6 100644 --- a/src/common/src/tdataformat.c +++ b/src/common/src/tdataformat.c @@ -19,6 +19,7 @@ #include "wchar.h" #include "tarray.h" +static void dataColSetNEleNull(SDataCol *pCol, int nEle); static void tdMergeTwoDataCols(SDataCols *target, SDataCols *src1, int *iter1, int limit1, SDataCols *src2, int *iter2, int limit2, int tRows, bool forceSetNull); @@ -297,7 +298,7 @@ static FORCE_INLINE void dataColSetNullAt(SDataCol *pCol, int index) { } } -void dataColSetNEleNull(SDataCol *pCol, int nEle) { +static void dataColSetNEleNull(SDataCol *pCol, int nEle) { if (IS_VAR_DATA_TYPE(pCol->type)) { pCol->len = 0; for (int i = 0; i < nEle; i++) { diff --git a/src/tsdb/src/tsdbCommit.c b/src/tsdb/src/tsdbCommit.c index 3b0db8da6a..8f5f885d69 100644 --- a/src/tsdb/src/tsdbCommit.c +++ b/src/tsdb/src/tsdbCommit.c @@ -920,7 +920,6 @@ int tsdbWriteBlockImpl(STsdbRepo *pRepo, STable *pTable, SDFile *pDFile, SDataCo SDataCol * pDataCol = pDataCols->cols + ncol; SBlockCol *pBlockCol = pBlockData->cols + nColsNotAllNull; - // if (isNEleNull(pDataCol, rowsToWrite)) { // all data to commit are NULL, just ignore it if (isAllRowsNull(pDataCol)) { // all data to commit are NULL, just ignore it continue; } diff --git a/src/tsdb/src/tsdbReadImpl.c b/src/tsdb/src/tsdbReadImpl.c index 237025864e..74d41cce19 100644 --- a/src/tsdb/src/tsdbReadImpl.c +++ b/src/tsdb/src/tsdbReadImpl.c @@ -463,9 +463,6 @@ static int tsdbLoadBlockDataImpl(SReadH *pReadh, SBlock *pBlock, SDataCols *pDat SDataCol *pDataCol = &(pDataCols->cols[dcol]); if (dcol != 0 && ccol >= pBlockData->numOfCols) { // Set current column as NULL and forward - // TODO: tdAllocMemForCol may fail - /*tdAllocMemForCol(pDataCol, pDataCols->maxPoints);*/ - /*dataColSetNEleNull(pDataCol, pBlock->numOfRows);*/ dataColReset(pDataCol); dcol++; continue; @@ -506,9 +503,6 @@ static int tsdbLoadBlockDataImpl(SReadH *pReadh, SBlock *pBlock, SDataCols *pDat ccol++; } else { // Set current column as NULL and forward - // TODO: tdAllocMemForCol may fail - /*tdAllocMemForCol(pDataCol, pDataCols->maxPoints);*/ - /*dataColSetNEleNull(pDataCol, pBlock->numOfRows);*/ dataColReset(pDataCol); dcol++; } @@ -614,9 +608,6 @@ static int tsdbLoadBlockDataColsImpl(SReadH *pReadh, SBlock *pBlock, SDataCols * } if (pBlockCol == NULL) { - // TODO: tdAllocMemForCol may fail - /*tdAllocMemForCol(pDataCol, pDataCols->maxPoints);*/ - /*dataColSetNEleNull(pDataCol, pBlock->numOfRows);*/ dataColReset(pDataCol); continue; } From f8b759d8fd21f5e31046dbe3ddae434223b95150 Mon Sep 17 00:00:00 2001 From: liuyq-617 Date: Mon, 9 Aug 2021 15:42:29 +0800 Subject: [PATCH 044/185] [TD-5909][ci skip] test ci skip --- Jenkinsfile | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/Jenkinsfile b/Jenkinsfile index e6e8a1df32..96195a78e0 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -146,8 +146,19 @@ pipeline { script{ env.skipstage=sh(script:"cd ${WORKSPACE}.tes && git --no-pager diff --name-only FETCH_HEAD ${env.CHANGE_TARGET}|grep -v -E '.*md|//src//connector|Jenkinsfile|test-all.sh' || echo 0 ",returnStdout:true) + if(env.skipstage == 0 ) + { + println env.skipstage + currentBuild.result = 'SUCCESS' + currentBuild.getRawBuild().getExecutor().interrupt(Result.SUCCESS) + } + if (sh(script: "git log -1 --pretty=%B | fgrep -ie '[skip ci]' -e '[ci skip]'", returnStatus: true) == 0) { + currentBuild.result = 'SUCCESS' + currentBuild.getRawBuild().getExecutor().interrupt(Result.SUCCESS) + } } println env.skipstage + sh''' rm -rf ${WORKSPACE}.tes ''' From c3fd8deeacffcee5e282e7e0f2eb72331cd5ac4f Mon Sep 17 00:00:00 2001 From: liuyq-617 Date: Mon, 9 Aug 2021 15:51:51 +0800 Subject: [PATCH 045/185] test --- Jenkinsfile | 3 +++ 1 file changed, 3 insertions(+) diff --git a/Jenkinsfile b/Jenkinsfile index 96195a78e0..7cb901f663 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -154,6 +154,9 @@ pipeline { } if (sh(script: "git log -1 --pretty=%B | fgrep -ie '[skip ci]' -e '[ci skip]'", returnStatus: true) == 0) { currentBuild.result = 'SUCCESS' + sh''' + git log -1 --pretty=%B | fgrep -ie '[skip ci]' -e '[ci skip] + ''' currentBuild.getRawBuild().getExecutor().interrupt(Result.SUCCESS) } } From caa560005cc07785e19a650aadc1fad8ab3869ec Mon Sep 17 00:00:00 2001 From: liuyq-617 Date: Mon, 9 Aug 2021 15:57:56 +0800 Subject: [PATCH 046/185] [TD-5909]test skip ci [ci skip] --- Jenkinsfile | 1 + 1 file changed, 1 insertion(+) diff --git a/Jenkinsfile b/Jenkinsfile index 7cb901f663..dfa83fc592 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -157,6 +157,7 @@ pipeline { sh''' git log -1 --pretty=%B | fgrep -ie '[skip ci]' -e '[ci skip] ''' + currentBuild.getRawBuild().getExecutor().interrupt(Result.SUCCESS) } } From cfcf1fea42d629fd3ba4d8922dd5a96987900b24 Mon Sep 17 00:00:00 2001 From: liuyq-617 Date: Mon, 9 Aug 2021 16:09:35 +0800 Subject: [PATCH 047/185] test[ci skip] --- .drone.yml | 1 - Jenkinsfile | 4 +++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/.drone.yml b/.drone.yml index b520f308ba..710baa5bf3 100644 --- a/.drone.yml +++ b/.drone.yml @@ -249,4 +249,3 @@ steps: branch: - develop - master - diff --git a/Jenkinsfile b/Jenkinsfile index dfa83fc592..40e19be486 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -142,6 +142,9 @@ pipeline { sh''' git fetch origin +refs/pull/${CHANGE_ID}/merge git checkout -qf FETCH_HEAD + cd ${WORKSPACE}.tes + git --no-pager diff --name-only FETCH_HEAD ${env.CHANGE_TARGET}|grep -v -E '.*md|//src//connector|Jenkinsfile|test-all.sh' + git log -1 --pretty=%B | fgrep -ie '[skip ci]' -e '[ci skip] ''' script{ @@ -157,7 +160,6 @@ pipeline { sh''' git log -1 --pretty=%B | fgrep -ie '[skip ci]' -e '[ci skip] ''' - currentBuild.getRawBuild().getExecutor().interrupt(Result.SUCCESS) } } From a533aafdd724e94a0d9a2e0200ac496911721add Mon Sep 17 00:00:00 2001 From: liuyq-617 Date: Mon, 9 Aug 2021 16:20:30 +0800 Subject: [PATCH 048/185] test[ci skip] --- .drone.yml | 2 +- Jenkinsfile | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.drone.yml b/.drone.yml index 710baa5bf3..53b4ef738a 100644 --- a/.drone.yml +++ b/.drone.yml @@ -248,4 +248,4 @@ steps: when: branch: - develop - - master + - master \ No newline at end of file diff --git a/Jenkinsfile b/Jenkinsfile index 40e19be486..713c9f644e 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -144,7 +144,7 @@ pipeline { git checkout -qf FETCH_HEAD cd ${WORKSPACE}.tes git --no-pager diff --name-only FETCH_HEAD ${env.CHANGE_TARGET}|grep -v -E '.*md|//src//connector|Jenkinsfile|test-all.sh' - git log -1 --pretty=%B | fgrep -ie '[skip ci]' -e '[ci skip] + git log -1 --pretty=%B | grep -ie \'\\[skip ci\\]\' -e \'\\[ci skip\\]\' ''' script{ From b1b7db9796995304379ce69c8fd1f5c341817a75 Mon Sep 17 00:00:00 2001 From: liuyq-617 Date: Mon, 9 Aug 2021 16:50:18 +0800 Subject: [PATCH 049/185] test[ci skip] --- .drone.yml | 3 ++- Jenkinsfile | 8 +------- 2 files changed, 3 insertions(+), 8 deletions(-) diff --git a/.drone.yml b/.drone.yml index 53b4ef738a..318761361e 100644 --- a/.drone.yml +++ b/.drone.yml @@ -248,4 +248,5 @@ steps: when: branch: - develop - - master \ No newline at end of file + - master + \ No newline at end of file diff --git a/Jenkinsfile b/Jenkinsfile index 713c9f644e..8102c0e7ba 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -142,9 +142,6 @@ pipeline { sh''' git fetch origin +refs/pull/${CHANGE_ID}/merge git checkout -qf FETCH_HEAD - cd ${WORKSPACE}.tes - git --no-pager diff --name-only FETCH_HEAD ${env.CHANGE_TARGET}|grep -v -E '.*md|//src//connector|Jenkinsfile|test-all.sh' - git log -1 --pretty=%B | grep -ie \'\\[skip ci\\]\' -e \'\\[ci skip\\]\' ''' script{ @@ -155,11 +152,8 @@ pipeline { currentBuild.result = 'SUCCESS' currentBuild.getRawBuild().getExecutor().interrupt(Result.SUCCESS) } - if (sh(script: "git log -1 --pretty=%B | fgrep -ie '[skip ci]' -e '[ci skip]'", returnStatus: true) == 0) { + if (sh(script: "git log -1 --pretty=%B | fgrep -ie '[skip ci]' -e '[ci skip]'", returnStatus: true) == 1) { currentBuild.result = 'SUCCESS' - sh''' - git log -1 --pretty=%B | fgrep -ie '[skip ci]' -e '[ci skip] - ''' currentBuild.getRawBuild().getExecutor().interrupt(Result.SUCCESS) } } From 8eb2ec07de7adf1c0a91505235a0c390758a1ea4 Mon Sep 17 00:00:00 2001 From: liuyq-617 Date: Mon, 9 Aug 2021 17:23:15 +0800 Subject: [PATCH 050/185] test[ci skip] --- .drone.yml | 3 +-- Jenkinsfile | 17 ++++++----------- 2 files changed, 7 insertions(+), 13 deletions(-) diff --git a/.drone.yml b/.drone.yml index 318761361e..53b4ef738a 100644 --- a/.drone.yml +++ b/.drone.yml @@ -248,5 +248,4 @@ steps: when: branch: - develop - - master - \ No newline at end of file + - master \ No newline at end of file diff --git a/Jenkinsfile b/Jenkinsfile index 8102c0e7ba..e023e690d4 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -113,6 +113,9 @@ pipeline { agent{label 'master'} when { changeRequest() + not { + changelog '.*\\[ci skip\\].*' + } } steps { script{ @@ -146,19 +149,8 @@ pipeline { script{ env.skipstage=sh(script:"cd ${WORKSPACE}.tes && git --no-pager diff --name-only FETCH_HEAD ${env.CHANGE_TARGET}|grep -v -E '.*md|//src//connector|Jenkinsfile|test-all.sh' || echo 0 ",returnStdout:true) - if(env.skipstage == 0 ) - { - println env.skipstage - currentBuild.result = 'SUCCESS' - currentBuild.getRawBuild().getExecutor().interrupt(Result.SUCCESS) - } - if (sh(script: "git log -1 --pretty=%B | fgrep -ie '[skip ci]' -e '[ci skip]'", returnStatus: true) == 1) { - currentBuild.result = 'SUCCESS' - currentBuild.getRawBuild().getExecutor().interrupt(Result.SUCCESS) - } } println env.skipstage - sh''' rm -rf ${WORKSPACE}.tes ''' @@ -172,6 +164,9 @@ pipeline { expression { env.skipstage != 0 } + not { + changelog '.*\\[ci skip\\].*' + } } parallel { stage('python_1_s1') { From 224986faf2b0b254f971fd1dff3b30af3aaa4649 Mon Sep 17 00:00:00 2001 From: liuyq-617 Date: Mon, 9 Aug 2021 20:23:20 +0800 Subject: [PATCH 051/185] test[ci skip] --- Jenkinsfile | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/Jenkinsfile b/Jenkinsfile index 4838850722..a7467c002b 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -6,7 +6,7 @@ node { } def skipstage=0 - +def skipbuild=0 def abortPreviousBuilds() { def currentJobName = env.JOB_NAME def currentBuildNumber = env.BUILD_NUMBER.toInteger() @@ -125,6 +125,9 @@ pipeline { agent{label 'master'} when { changeRequest() + not { + changelog '.*\\[ci skip\\].*' + } } steps { script{ @@ -158,6 +161,7 @@ pipeline { script{ env.skipstage=sh(script:"cd ${WORKSPACE}.tes && git --no-pager diff --name-only FETCH_HEAD ${env.CHANGE_TARGET}|grep -v -E '.*md|//src//connector|Jenkinsfile|test-all.sh' || echo 0 ",returnStdout:true) + } println env.skipstage sh''' @@ -173,6 +177,9 @@ pipeline { expression { env.skipstage != 0 } + not { + changelog '.*\\[ci skip\\].*' + } } parallel { stage('python_1_s1') { From f48e0bbc1ef53bbc1a09b96e8867b540120208b9 Mon Sep 17 00:00:00 2001 From: liuyq-617 Date: Mon, 9 Aug 2021 20:28:24 +0800 Subject: [PATCH 052/185] test[ci skip] --- Jenkinsfile | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/Jenkinsfile b/Jenkinsfile index a7467c002b..917a8f728c 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -125,9 +125,7 @@ pipeline { agent{label 'master'} when { changeRequest() - not { - changelog '.*\\[ci skip\\].*' - } + } steps { script{ @@ -161,7 +159,8 @@ pipeline { script{ env.skipstage=sh(script:"cd ${WORKSPACE}.tes && git --no-pager diff --name-only FETCH_HEAD ${env.CHANGE_TARGET}|grep -v -E '.*md|//src//connector|Jenkinsfile|test-all.sh' || echo 0 ",returnStdout:true) - + env.skipbuild=sh(script: "git log -1 --pretty=%B | fgrep -ie '[skip ci]' -e '[ci skip]'", returnStatus: true) + println env.skipbuild } println env.skipstage sh''' @@ -176,10 +175,9 @@ pipeline { changeRequest() expression { env.skipstage != 0 + env.skipbuild ==1 } - not { - changelog '.*\\[ci skip\\].*' - } + } parallel { stage('python_1_s1') { From 93da3dad3381f16a32c30f17d35e561ddba860b7 Mon Sep 17 00:00:00 2001 From: liuyq-617 Date: Mon, 9 Aug 2021 20:35:01 +0800 Subject: [PATCH 053/185] test --- .drone.yml | 6 +++++- Jenkinsfile | 3 +-- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/.drone.yml b/.drone.yml index b520f308ba..cbe5c03318 100644 --- a/.drone.yml +++ b/.drone.yml @@ -150,6 +150,7 @@ steps: branch: - develop - master + - 2.0 --- kind: pipeline name: build_trusty @@ -176,6 +177,7 @@ steps: branch: - develop - master + - 2.0 --- kind: pipeline name: build_xenial @@ -201,6 +203,7 @@ steps: branch: - develop - master + - 2.0 --- kind: pipeline @@ -226,6 +229,7 @@ steps: branch: - develop - master + - 2.0 --- kind: pipeline name: build_centos7 @@ -249,4 +253,4 @@ steps: branch: - develop - master - + - 2.0 \ No newline at end of file diff --git a/Jenkinsfile b/Jenkinsfile index 917a8f728c..14bc6abfbd 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -160,7 +160,7 @@ pipeline { script{ env.skipstage=sh(script:"cd ${WORKSPACE}.tes && git --no-pager diff --name-only FETCH_HEAD ${env.CHANGE_TARGET}|grep -v -E '.*md|//src//connector|Jenkinsfile|test-all.sh' || echo 0 ",returnStdout:true) env.skipbuild=sh(script: "git log -1 --pretty=%B | fgrep -ie '[skip ci]' -e '[ci skip]'", returnStatus: true) - println env.skipbuild + } println env.skipstage sh''' @@ -177,7 +177,6 @@ pipeline { env.skipstage != 0 env.skipbuild ==1 } - } parallel { stage('python_1_s1') { From dc1401e6e9c428371a8675b78f89bde52a8876fb Mon Sep 17 00:00:00 2001 From: liuyq-617 Date: Mon, 9 Aug 2021 20:46:02 +0800 Subject: [PATCH 054/185] test[ci skip] --- Jenkinsfile | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) diff --git a/Jenkinsfile b/Jenkinsfile index e023e690d4..f1de92cded 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -6,7 +6,7 @@ node { } def skipstage=0 - +def skipbuild=0 def abortPreviousBuilds() { def currentJobName = env.JOB_NAME def currentBuildNumber = env.BUILD_NUMBER.toInteger() @@ -113,9 +113,6 @@ pipeline { agent{label 'master'} when { changeRequest() - not { - changelog '.*\\[ci skip\\].*' - } } steps { script{ @@ -149,6 +146,7 @@ pipeline { script{ env.skipstage=sh(script:"cd ${WORKSPACE}.tes && git --no-pager diff --name-only FETCH_HEAD ${env.CHANGE_TARGET}|grep -v -E '.*md|//src//connector|Jenkinsfile|test-all.sh' || echo 0 ",returnStdout:true) + env.skipbuild=sh(script: "git log -1 --pretty=%B | fgrep -ie '[skip ci]' -e '[ci skip]'", returnStatus: true) } println env.skipstage sh''' @@ -163,10 +161,8 @@ pipeline { changeRequest() expression { env.skipstage != 0 + env.skipbuild ==1 } - not { - changelog '.*\\[ci skip\\].*' - } } parallel { stage('python_1_s1') { From 2fbe5f8639838438ecab5fdfb7f98f362bd99321 Mon Sep 17 00:00:00 2001 From: Yiqing Liu Date: Tue, 10 Aug 2021 10:12:08 +0800 Subject: [PATCH 055/185] Revert "Test/test ci skip 2.0 [ci skip]" --- .drone.yml | 6 +----- Jenkinsfile | 6 +----- 2 files changed, 2 insertions(+), 10 deletions(-) diff --git a/.drone.yml b/.drone.yml index cbe5c03318..b520f308ba 100644 --- a/.drone.yml +++ b/.drone.yml @@ -150,7 +150,6 @@ steps: branch: - develop - master - - 2.0 --- kind: pipeline name: build_trusty @@ -177,7 +176,6 @@ steps: branch: - develop - master - - 2.0 --- kind: pipeline name: build_xenial @@ -203,7 +201,6 @@ steps: branch: - develop - master - - 2.0 --- kind: pipeline @@ -229,7 +226,6 @@ steps: branch: - develop - master - - 2.0 --- kind: pipeline name: build_centos7 @@ -253,4 +249,4 @@ steps: branch: - develop - master - - 2.0 \ No newline at end of file + diff --git a/Jenkinsfile b/Jenkinsfile index 14bc6abfbd..4838850722 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -6,7 +6,7 @@ node { } def skipstage=0 -def skipbuild=0 + def abortPreviousBuilds() { def currentJobName = env.JOB_NAME def currentBuildNumber = env.BUILD_NUMBER.toInteger() @@ -125,7 +125,6 @@ pipeline { agent{label 'master'} when { changeRequest() - } steps { script{ @@ -159,8 +158,6 @@ pipeline { script{ env.skipstage=sh(script:"cd ${WORKSPACE}.tes && git --no-pager diff --name-only FETCH_HEAD ${env.CHANGE_TARGET}|grep -v -E '.*md|//src//connector|Jenkinsfile|test-all.sh' || echo 0 ",returnStdout:true) - env.skipbuild=sh(script: "git log -1 --pretty=%B | fgrep -ie '[skip ci]' -e '[ci skip]'", returnStatus: true) - } println env.skipstage sh''' @@ -175,7 +172,6 @@ pipeline { changeRequest() expression { env.skipstage != 0 - env.skipbuild ==1 } } parallel { From cbd5f344cffe36b1de8202f8360aa6468018d358 Mon Sep 17 00:00:00 2001 From: Yiqing Liu Date: Tue, 10 Aug 2021 10:15:28 +0800 Subject: [PATCH 056/185] Revert "[TD-5909]test skip ci [ci skip]" --- .drone.yml | 3 ++- Jenkinsfile | 4 +--- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/.drone.yml b/.drone.yml index 53b4ef738a..b520f308ba 100644 --- a/.drone.yml +++ b/.drone.yml @@ -248,4 +248,5 @@ steps: when: branch: - develop - - master \ No newline at end of file + - master + diff --git a/Jenkinsfile b/Jenkinsfile index f1de92cded..e6e8a1df32 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -6,7 +6,7 @@ node { } def skipstage=0 -def skipbuild=0 + def abortPreviousBuilds() { def currentJobName = env.JOB_NAME def currentBuildNumber = env.BUILD_NUMBER.toInteger() @@ -146,7 +146,6 @@ pipeline { script{ env.skipstage=sh(script:"cd ${WORKSPACE}.tes && git --no-pager diff --name-only FETCH_HEAD ${env.CHANGE_TARGET}|grep -v -E '.*md|//src//connector|Jenkinsfile|test-all.sh' || echo 0 ",returnStdout:true) - env.skipbuild=sh(script: "git log -1 --pretty=%B | fgrep -ie '[skip ci]' -e '[ci skip]'", returnStatus: true) } println env.skipstage sh''' @@ -161,7 +160,6 @@ pipeline { changeRequest() expression { env.skipstage != 0 - env.skipbuild ==1 } } parallel { From f07267ebbf73bf99ec35d06c96f41b4edb7af9e8 Mon Sep 17 00:00:00 2001 From: Ping Xiao Date: Tue, 10 Aug 2021 11:24:52 +0800 Subject: [PATCH 057/185] [TD-5707]: add test case --- tests/pytest/fulltest.sh | 1 + tests/pytest/functions/function_interp.py | 46 +++++++++++++++++++++++ 2 files changed, 47 insertions(+) create mode 100644 tests/pytest/functions/function_interp.py diff --git a/tests/pytest/fulltest.sh b/tests/pytest/fulltest.sh index fdcd0a3c42..5df37bb182 100755 --- a/tests/pytest/fulltest.sh +++ b/tests/pytest/fulltest.sh @@ -338,6 +338,7 @@ python3 ./test.py -f functions/function_twa.py -r 1 python3 ./test.py -f functions/function_twa_test2.py python3 ./test.py -f functions/function_stddev_td2555.py python3 ./test.py -f functions/showOfflineThresholdIs864000.py +python3 ./test.py -f functions/function_interp.py python3 ./test.py -f insert/metadataUpdate.py python3 ./test.py -f query/last_cache.py python3 ./test.py -f query/last_row_cache.py diff --git a/tests/pytest/functions/function_interp.py b/tests/pytest/functions/function_interp.py new file mode 100644 index 0000000000..810c90279c --- /dev/null +++ b/tests/pytest/functions/function_interp.py @@ -0,0 +1,46 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import taos +from util.log import * +from util.cases import * +from util.sql import * +import numpy as np + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor()) + + self.rowNum = 10 + self.ts = 1537146000000 + + def run(self): + tdSql.prepare() + tdSql.execute("create table t(ts timestamp, k int)") + tdSql.execute("insert into t values('2021-1-1 1:1:1', 12);") + + tdSql.query("select interp(*) from t where ts='2021-1-1 1:1:1'") + tdSql.checkRows(1) + tdSql.checkData(0, 1, 12) + + tdSql.error("select interp(*) from t where ts >'2021-1-1 1:1:1' and ts < now interval(1s) fill(next)") + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) From e554181cfb0f2641bcfcdbf4dee02ff8968099f1 Mon Sep 17 00:00:00 2001 From: Ganlin Zhao Date: Tue, 10 Aug 2021 14:13:00 +0800 Subject: [PATCH 058/185] [TD-5921]:fix case problem in 'tbname in' syntax --- src/tsdb/src/tsdbRead.c | 1 + 1 file changed, 1 insertion(+) diff --git a/src/tsdb/src/tsdbRead.c b/src/tsdb/src/tsdbRead.c index e1d40aa7d0..260c3d67dc 100644 --- a/src/tsdb/src/tsdbRead.c +++ b/src/tsdb/src/tsdbRead.c @@ -3468,6 +3468,7 @@ void filterPrepare(void* expr, void* param) { SArray *arr = (SArray *)(pCond->arr); for (size_t i = 0; i < taosArrayGetSize(arr); i++) { char* p = taosArrayGetP(arr, i); + strtolower(varDataVal(p), varDataVal(p)); taosHashPut(pObj, varDataVal(p),varDataLen(p), &dummy, sizeof(dummy)); } } else { From 8082fdf90604481e5ea9de159dcb65d74da22640 Mon Sep 17 00:00:00 2001 From: wpan Date: Tue, 10 Aug 2021 14:27:30 +0800 Subject: [PATCH 059/185] sort colId --- src/client/src/tscSubquery.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/client/src/tscSubquery.c b/src/client/src/tscSubquery.c index 8c0d642ca6..b673585904 100644 --- a/src/client/src/tscSubquery.c +++ b/src/client/src/tscSubquery.c @@ -2395,8 +2395,8 @@ int32_t tscHandleFirstRoundStableQuery(SSqlObj *pSql) { SColumn* x = taosArrayGetP(pNewQueryInfo->colList, index1); tscColumnCopy(x, pCol); } else { - SColumn *p = tscColumnClone(pCol); - taosArrayPush(pNewQueryInfo->colList, &p); + SSchema ss = {.type = pCol->info.type, .bytes = pCol->info.bytes, .colId = pCol->columnIndex}; + tscColumnListInsert(pNewQueryInfo->colList, pCol->columnIndex, pCol->tableUid, &ss); } } } From 2d4fd68267f3e9311f23ae699b56ffbbe8ce874e Mon Sep 17 00:00:00 2001 From: wpan Date: Tue, 10 Aug 2021 14:53:43 +0800 Subject: [PATCH 060/185] add fill next process --- src/query/src/qFill.c | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/src/query/src/qFill.c b/src/query/src/qFill.c index 7dd73c9fe4..1a86bbae36 100644 --- a/src/query/src/qFill.c +++ b/src/query/src/qFill.c @@ -206,6 +206,12 @@ static int32_t fillResultImpl(SFillInfo* pFillInfo, void** data, int32_t outputR } else { assert(pFillInfo->currentKey == ts); initBeforeAfterDataBuf(pFillInfo, prev); + if (pFillInfo->type == TSDB_FILL_NEXT && (pFillInfo->index + 1) < pFillInfo->numOfRows) { + initBeforeAfterDataBuf(pFillInfo, next); + ++pFillInfo->index; + copyCurrentRowIntoBuf(pFillInfo, srcData, *next); + --pFillInfo->index; + } // assign rows to dst buffer for (int32_t i = 0; i < pFillInfo->numOfCols; ++i) { @@ -227,6 +233,12 @@ static int32_t fillResultImpl(SFillInfo* pFillInfo, void** data, int32_t outputR } else if (pFillInfo->type == TSDB_FILL_LINEAR) { assignVal(output, src, pCol->col.bytes, pCol->col.type); memcpy(*prev + pCol->col.offset, src, pCol->col.bytes); + } else if (pFillInfo->type == TSDB_FILL_NEXT) { + if (*next) { + assignVal(output, *next + pCol->col.offset, pCol->col.bytes, pCol->col.type); + } else { + setNull(output, pCol->col.type, pCol->col.bytes); + } } else { assignVal(output, (char*)&pCol->fillVal.i, pCol->col.bytes, pCol->col.type); } From 5d9fe3906c01ff00396e39debfd1517b8755b2aa Mon Sep 17 00:00:00 2001 From: wpan Date: Tue, 10 Aug 2021 15:01:48 +0800 Subject: [PATCH 061/185] fix windows compile error --- src/client/src/tscSubquery.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/client/src/tscSubquery.c b/src/client/src/tscSubquery.c index b673585904..2e7e02cfd5 100644 --- a/src/client/src/tscSubquery.c +++ b/src/client/src/tscSubquery.c @@ -2395,7 +2395,7 @@ int32_t tscHandleFirstRoundStableQuery(SSqlObj *pSql) { SColumn* x = taosArrayGetP(pNewQueryInfo->colList, index1); tscColumnCopy(x, pCol); } else { - SSchema ss = {.type = pCol->info.type, .bytes = pCol->info.bytes, .colId = pCol->columnIndex}; + SSchema ss = {.type = (uint8_t)pCol->info.type, .bytes = pCol->info.bytes, .colId = (int16_t)pCol->columnIndex}; tscColumnListInsert(pNewQueryInfo->colList, pCol->columnIndex, pCol->tableUid, &ss); } } From 9dab2ac93be0b8c99ac7908a33941154fe50be39 Mon Sep 17 00:00:00 2001 From: wpan Date: Tue, 10 Aug 2021 17:17:55 +0800 Subject: [PATCH 062/185] add test case --- tests/script/general/parser/fill.sim | 23 ++++++++++++++++++++++- tests/script/general/parser/function.sim | 18 ++++++++++++++++++ 2 files changed, 40 insertions(+), 1 deletion(-) diff --git a/tests/script/general/parser/fill.sim b/tests/script/general/parser/fill.sim index d109dd50f7..3413a0b596 100644 --- a/tests/script/general/parser/fill.sim +++ b/tests/script/general/parser/fill.sim @@ -1050,6 +1050,27 @@ sql_error select min(c3) from m_fl_mt0 interval(10w) fill(value, 20) sql_error select max(c3) from m_fl_mt0 interval(1n) fill(prev) sql_error select min(c3) from m_fl_mt0 interval(1y) fill(value, 20) +sql create table nexttb1 (ts timestamp, f1 int); +sql insert into nexttb1 values ('2021-08-08 1:1:1', NULL); +sql insert into nexttb1 values ('2021-08-08 1:1:5', 3); + +sql select last(*) from nexttb1 where ts >= '2021-08-08 1:1:1' and ts < '2021-08-08 1:1:10' interval(1s) fill(next); +if $rows != 9 then + return -1 +endi +if $data00 != @21-08-08 01:01:01.000@ then + return -1 +endi +if $data01 != @21-08-08 01:01:01.000@ then + return -1 +endi +if $data02 != 3 then + return -1 +endi + + + + print =============== clear #sql drop database $db #sql show databases @@ -1057,4 +1078,4 @@ print =============== clear # return -1 #endi -system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file +system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/general/parser/function.sim b/tests/script/general/parser/function.sim index 5edadad3a6..0c93fe919a 100644 --- a/tests/script/general/parser/function.sim +++ b/tests/script/general/parser/function.sim @@ -1148,3 +1148,21 @@ endi sql select derivative(test_column_alias_name, 1s, 0) from (select avg(k) test_column_alias_name from t1 interval(1s)); + +sql create table smeters (ts timestamp, current float, voltage int); +sql insert into smeters values ('2021-08-08 10:10:10', 10, 1); +sql insert into smeters values ('2021-08-08 10:10:12', 10, 2); + +sql select stddev(voltage) from smeters where ts>='2021-08-08 10:10:10.000' and ts < '2021-08-08 10:10:20.000' and current=10 interval(1000a); +if $rows != 2 then + return -1 +endi +if $data00 != @21-08-08 10:10:10.000@ then + return -1 +endi +if $data10 != @21-08-08 10:10:12.000@ then + return -1 +endi + + + From e2cdea9e9274988f9d3242a02621024fe86b30d1 Mon Sep 17 00:00:00 2001 From: liuyq-617 Date: Tue, 10 Aug 2021 18:28:04 +0800 Subject: [PATCH 063/185] test[ci skip] --- Jenkinsfile | 41 ++- tests/pytest/crash_gen/valgrind_taos.supp | 388 +++++++++++++++++++++- 2 files changed, 407 insertions(+), 22 deletions(-) diff --git a/Jenkinsfile b/Jenkinsfile index 14bc6abfbd..19eaec8d5f 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -5,7 +5,6 @@ node { git url: 'https://github.com/taosdata/TDengine.git' } -def skipstage=0 def skipbuild=0 def abortPreviousBuilds() { def currentJobName = env.JOB_NAME @@ -33,8 +32,7 @@ def abort_previous(){ milestone(buildNumber) } def pre_test(){ - - + sh'hostname' sh ''' sudo rmtaos || echo "taosd has not installed" ''' @@ -157,12 +155,11 @@ pipeline { git checkout -qf FETCH_HEAD ''' - script{ - env.skipstage=sh(script:"cd ${WORKSPACE}.tes && git --no-pager diff --name-only FETCH_HEAD ${env.CHANGE_TARGET}|grep -v -E '.*md|//src//connector|Jenkinsfile|test-all.sh' || echo 0 ",returnStdout:true) - env.skipbuild=sh(script: "git log -1 --pretty=%B | fgrep -ie '[skip ci]' -e '[ci skip]'", returnStatus: true) - + script{ + skipbuild='2' + skipbuild=sh(script: "git log -2 --pretty=%B | fgrep -ie '[skip ci]' -e '[ci skip]' && echo 1 || echo 2", returnStdout:true) + println skipbuild } - println env.skipstage sh''' rm -rf ${WORKSPACE}.tes ''' @@ -172,15 +169,17 @@ pipeline { stage('Parallel test stage') { //only build pr when { + allOf{ changeRequest() - expression { - env.skipstage != 0 - env.skipbuild ==1 + expression{ + return skipbuild.trim() == '2' + } + } } parallel { stage('python_1_s1') { - agent{label 'p1'} + agent any steps { pre_test() @@ -195,7 +194,7 @@ pipeline { } } stage('python_2_s5') { - agent{label 'p2'} + agent any steps { pre_test() @@ -209,7 +208,7 @@ pipeline { } } stage('python_3_s6') { - agent{label 'p3'} + agent any steps { timeout(time: 45, unit: 'MINUTES'){ pre_test() @@ -222,7 +221,7 @@ pipeline { } } stage('test_b1_s2') { - agent{label 'b1'} + agent any steps { timeout(time: 45, unit: 'MINUTES'){ pre_test() @@ -235,7 +234,7 @@ pipeline { } stage('test_crash_gen_s3') { - agent{label "b2"} + agent any steps { pre_test() @@ -274,7 +273,7 @@ pipeline { } stage('test_valgrind_s4') { - agent{label "b3"} + agent any steps { pre_test() @@ -300,7 +299,7 @@ pipeline { } } stage('test_b4_s7') { - agent{label 'b4'} + agent any steps { timeout(time: 45, unit: 'MINUTES'){ pre_test() @@ -319,7 +318,7 @@ pipeline { } } stage('test_b5_s8') { - agent{label 'b5'} + agent any steps { timeout(time: 45, unit: 'MINUTES'){ pre_test() @@ -332,7 +331,7 @@ pipeline { } } stage('test_b6_s9') { - agent{label 'b6'} + agent any steps { timeout(time: 45, unit: 'MINUTES'){ pre_test() @@ -345,7 +344,7 @@ pipeline { } } stage('test_b7_s10') { - agent{label 'b7'} + agent any steps { timeout(time: 45, unit: 'MINUTES'){ pre_test() diff --git a/tests/pytest/crash_gen/valgrind_taos.supp b/tests/pytest/crash_gen/valgrind_taos.supp index b42015a053..b9296f008e 100644 --- a/tests/pytest/crash_gen/valgrind_taos.supp +++ b/tests/pytest/crash_gen/valgrind_taos.supp @@ -17722,4 +17722,390 @@ fun:_PyEval_EvalCodeWithName fun:_PyFunction_Vectorcall fun:_PyEval_EvalFrameDefault -} \ No newline at end of file +} +{ + + Memcheck:Leak + match-leak-kinds: definite + fun:malloc + fun:__libc_alloc_buffer_allocate + fun:alloc_buffer_allocate + fun:__resolv_conf_allocate + fun:__resolv_conf_load + fun:__resolv_conf_get_current + fun:__res_vinit + fun:maybe_init + fun:context_get + fun:__resolv_context_get + fun:gaih_inet.constprop.7 + fun:getaddrinfo + fun:taosGetFqdn + fun:taosCheckGlobalCfg + fun:taos_init_imp +} +{ + + Memcheck:Leak + match-leak-kinds: definite + fun:malloc + obj:/usr/bin/python3.8 + fun:PyTuple_Pack + obj:/usr/bin/python3.8 + fun:PyObject_GetItem + fun:_PyEval_EvalFrameDefault + obj:/usr/bin/python3.8 + fun:_PyEval_EvalFrameDefault + fun:_PyEval_EvalCodeWithName + obj:/usr/bin/python3.8 + fun:_PyEval_EvalFrameDefault + fun:_PyEval_EvalCodeWithName +} +{ + + Memcheck:Leak + match-leak-kinds: definite + fun:malloc + obj:/usr/bin/python3.8 + fun:_PyObject_MakeTpCall + fun:_PyEval_EvalFrameDefault + obj:/usr/bin/python3.8 + fun:_PyEval_EvalFrameDefault + obj:/usr/bin/python3.8 + fun:_PyEval_EvalFrameDefault + fun:_PyEval_EvalFrameDefault + obj:/usr/bin/python3.8 + fun:_PyEval_EvalFrameDefault + fun:_PyEval_EvalCodeWithName +} +{ + + Memcheck:Leak + match-leak-kinds: definite + fun:malloc + fun:PyCode_NewWithPosOnlyArgs + fun:PyCode_New + obj:/usr/local/lib/python3.8/dist-packages/pandas/_libs/tslibs/parsing.cpython-38-x86_64-linux-gnu.so + obj:/usr/local/lib/python3.8/dist-packages/pandas/_libs/tslibs/parsing.cpython-38-x86_64-linux-gnu.so + fun:PyModule_ExecDef + obj:/usr/bin/python3.8 + obj:/usr/bin/python3.8 + fun:PyVectorcall_Call + fun:_PyEval_EvalFrameDefault + fun:_PyEval_EvalCodeWithName + fun:_PyFunction_Vectorcall +} +{ + + Memcheck:Leak + match-leak-kinds: definite + fun:malloc + obj:/usr/bin/python3.8 + fun:_PyObject_MakeTpCall + fun:_PyEval_EvalFrameDefault + obj:/usr/local/lib/python3.8 + fun:_PyEval_EvalFrameDefault + obj:/usr/bin/python3.8 + fun:_PyEval_EvalFrameDefault + fun:_PyEval_EvalCodeWithName + obj:/usr/bin/python3.8 + fun:PyVectorcall_Call + fun:_PyEval_EvalFrameDefault + fun:_PyEval_EvalCodeWithName +} +{ + + Memcheck:Leak + match-leak-kinds: definite + fun:malloc + obj:/usr/bin/python3.8 + fun:PyTuple_New + obj:/usr/bin/python3.8 + obj:/usr/bin/python3.8 + obj:/usr/bin/python3.8 + obj:/usr/bin/python3.8 + obj:/usr/bin/python3.8 + obj:/usr/bin/python3.8 + obj:/usr/bin/python3.8 + fun:_PyEval_EvalFrameDefault + fun:_PyEval_EvalCodeWithName +} +{ + + Memcheck:Leak + match-leak-kinds: definite + fun:malloc + obj:/usr/bin/python3.8 + fun:PyTuple_New + fun:_PyEval_EvalCodeWithName + fun:_PyFunction_Vectorcall + fun:_PyEval_EvalFrameDefault + fun:_PyEval_EvalCodeWithName + fun:_PyFunction_Vectorcall + fun:_PyEval_EvalFrameDefault + fun:_PyFunction_Vectorcall + fun:_PyEval_EvalFrameDefault + fun:_PyFunction_Vectorcall +} +{ + + Memcheck:Leak + match-leak-kinds: definite + fun:malloc + obj:/usr/bin/python3.8 + fun:_PyObject_MakeTpCall + fun:_PyEval_EvalFrameDefault + fun:_PyFunction_Vectorcall + obj:/usr/bin/python3.8 + obj:/usr/bin/python3.8 + fun:_PyObject_MakeTpCall + fun:_PyEval_EvalFrameDefault + obj:/usr/bin/python3.8 + fun:_PyEval_EvalFrameDefault + obj:/usr/bin/python3.8 +} +{ + + Memcheck:Leak + match-leak-kinds: definite + fun:malloc + obj:/usr/bin/python3.8 + obj:/usr/bin/python3.8 + fun:_PyEval_EvalFrameDefault + fun:_PyFunction_Vectorcall + fun:_PyEval_EvalFrameDefault + fun:_PyEval_EvalCodeWithName + fun:_PyFunction_Vectorcall + fun:_PyEval_EvalFrameDefault + fun:_PyFunction_Vectorcall + fun:_PyEval_EvalFrameDefault + fun:_PyEval_EvalCodeWithName +} +{ + + Memcheck:Leak + match-leak-kinds: definite + fun: malloc + obj:/usr/bin/python3.8 + fun:PyTuple_Pack + obj:/usr/local/lib/python3.8/dist-packages/pandas/_libs/interval.cpython-38-x86_64-linux-gnu.so + fun:PyModule_ExecDef + obj:/usr/bin/python3.8 + obj:/usr/bin/python3.8 + fun:PyVectorcall_Call + fun:_PyEval_EvalFrameDefault + fun:_PyEval_EvalCodeWithName + fun:_PyFunction_Vectorcall + fun:_PyEval_EvalFrameDefault +} +{ + + Memcheck:Leak + match-leak-kinds: definite + fun: malloc + obj:/usr/bin/python3.8 + fun:_PyObject_MakeTpCall + fun:_PyEval_EvalFrameDefault + obj:/usr/bin/python3.8 + fun:_PyEval_EvalFrameDefault + obj:/usr/bin/python3.8 + fun:_PyEval_EvalFrameDefault + fun:_PyEval_EvalCodeWithName + obj:/usr/bin/python3.8 + fun:_PyEval_EvalFrameDefault + fun:_PyEval_EvalCodeWithName +} +{ + + Memcheck:Leak + match-leak-kinds: definite + fun:malloc + obj:/usr/bin/python3.8 + fun:_PyObject_MakeTpCall + obj:/usr/bin/python3.8 + fun:PyObject_CallFunctionObjArgs + obj:/usr/bin/python3.8 + obj:/usr/bin/python3.8 + fun:PyObject_GetAttr + fun:_PyEval_EvalFrameDefault + fun:_PyEval_EvalCodeWithName + fun:_PyFunction_Vectorcall + fun:_PyEval_EvalFrameDefault +} +{ + + Memcheck:Leak + match-leak-kinds: definite + fun:malloc + obj:/usr/bin/python3.8 + fun:_PyObject_MakeTpCall + fun:_PyEval_EvalFrameDefault + obj:/usr/bin/python3.8) + fun:_PyEval_EvalFrameDefault + obj:/usr/bin/python3.8) + fun:_PyEval_EvalFrameDefault + fun:_PyEval_EvalCodeWithName + obj:/usr/bin/python3.8) + fun:_PyEval_EvalFrameDefault + fun:_PyEval_EvalCodeWithName +} +{ + + Memcheck:Leak + match-leak-kinds: definite + fun:malloc + obj:/usr/bin/python3.8) + fun:PyTuple_Pack + obj:/usr/local/lib/python3.8/dist-packages/pandas/_libs/interval.cpython-38-x86_64-linux-gnu.so + fun:PyModule_ExecDef + obj:/usr/bin/python3.8) + obj:/usr/bin/python3.8) + fun:PyVectorcall_Call + fun:_PyEval_EvalFrameDefault + fun:_PyEval_EvalCodeWithName + fun:_PyFunction_Vectorcall + fun:_PyEval_EvalFrameDefault +} +{ + + Memcheck:Leak + match-leak-kinds: definite + fun:malloc + obj:/usr/bin/python3.8 + fun:PyTuple_Pack + obj:/usr/local/lib/python3.8/dist-packages/pandas/_libs/tslibs/np_datetime.cpython-38-x86_64-linux-gnu.so + fun:PyModule_ExecDef + obj:/usr/bin/python3.8 + obj:/usr/bin/python3.8 + fun:PyVectorcall_Call + fun:_PyEval_EvalFrameDefault + fun:_PyEval_EvalCodeWithName + fun:_PyFunction_Vectorcall + fun:_PyEval_EvalFrameDefault +} +{ + + Memcheck:Leak + match-leak-kinds: definite + fun:malloc + obj:/usr/bin/python3.8 + fun:PyTuple_Pack + obj:/usr/local/lib/python3.8/dist-packages/pandas/_libs/tslibs/ccalendar.cpython-38-x86_64-linux-gnu.so + fun:PyModule_ExecDef + obj:/usr/bin/python3.8 + obj:/usr/bin/python3.8 + fun:PyVectorcall_Call + fun:_PyEval_EvalFrameDefault + fun:_PyEval_EvalCodeWithName + fun:_PyFunction_Vectorcall + fun:_PyEval_EvalFrameDefault +} +{ + + Memcheck:Leak + match-leak-kinds: definite + fun:malloc + obj:/usr/bin/python3.8 + fun:_PyObject_MakeTpCall + fun:_PyEval_EvalFrameDefault + obj:/usr/bin/python3.8 + fun:_PyEval_EvalFrameDefault + obj:/usr/bin/python3.8 + fun:_PyEval_EvalFrameDefault + fun:_PyEval_EvalCodeWithName + obj:/usr/bin/python3.8 + fun:_PyEval_EvalFrameDefault + fun:_PyEval_EvalCodeWithName +} +{ + + Memcheck:Leak + match-leak-kinds: definite + fun:malloc + obj:/usr/bin/python3.8 + fun:PyTuple_Pack + obj:/usr/local/lib/python3.8/dist-packages/pandas/_libs/interval.cpython-38-x86_64-linux-gnu.so + fun:PyModule_ExecDef + obj:/usr/bin/python3.8 + obj:/usr/bin/python3.8 + fun:PyVectorcall_Call + fun:_PyEval_EvalFrameDefault + fun:_PyEval_EvalCodeWithName + fun:_PyFunction_Vectorcall + fun:_PyEval_EvalFrameDefault +} +{ + + Memcheck:Leak + match-leak-kinds: definite + fun:malloc + obj:/usr/bin/python3.8 + fun:PyTuple_Pack + obj:/usr/local/lib/python3.8/dist-packages/pandas/_libs/hashtable.cpython-38-x86_64-linux-gnu.so + obj:/usr/local/lib/python3.8/dist-packages/pandas/_libs/hashtable.cpython-38-x86_64-linux-gnu.so + fun:PyModule_ExecDef + obj:/usr/bin/python3.8 + obj:/usr/bin/python3.8 + fun:PyVectorcall_Call + fun:_PyEval_EvalFrameDefault + fun:_PyEval_EvalCodeWithName + fun:_PyFunction_Vectorcall +} +{ + + Memcheck:Leak + match-leak-kinds: definite + ... + obj:/usr/local/lib/python3.8/dist-packages/pandas/* + ... +} +{ + + Memcheck:Leak + match-leak-kinds: definite + fun:malloc + obj:/usr/bin/python3.8 + fun:PyTuple_New + fun:_PyEval_EvalCodeWithName + fun:_PyFunction_Vectorcall + fun:_PyEval_EvalFrameDefault + fun:_PyEval_EvalCodeWithName + fun:_PyFunction_Vectorcall + fun:_PyEval_EvalFrameDefault + fun:_PyEval_EvalCodeWithName + obj:/usr/bin/python3.8 + fun:_PyEval_EvalFrameDefault +} +{ + + Memcheck:Leak + match-leak-kinds: definite + fun:malloc + obj:/usr/bin/python3.8 + fun:PyTuple_New + obj:/usr/bin/python3.8 + obj:/usr/bin/python3.8 + fun:_PyObject_MakeTpCall + fun:_PyEval_EvalFrameDefault + fun:_PyFunction_Vectorcall + fun:_PyEval_EvalFrameDefault + obj:/usr/bin/python3.8 + fun:PyObject_GetAttr + fun:_PyEval_EvalFrameDefault +} +{ + + Memcheck:Leak + match-leak-kinds: definite + fun:malloc + obj:/usr/bin/python3.8 + fun:PyTuple_New + obj:/usr/bin/python3.8 + obj:/usr/bin/python3.8 + obj:/usr/bin/python3.8 + obj:/usr/bin/python3.8 + obj:/usr/bin/python3.8 + obj:/usr/bin/python3.8 + fun:_PyEval_EvalFrameDefault + fun:_PyEval_EvalCodeWithName + fun:_PyFunction_Vectorcall +} \ No newline at end of file From 10f0d6ab1cdce130a9658da065d0131cd4909ff1 Mon Sep 17 00:00:00 2001 From: liuyq-617 Date: Tue, 10 Aug 2021 18:33:15 +0800 Subject: [PATCH 064/185] test --- Jenkinsfile | 20 ++++++++++---------- tests/pytest/crash_gen/valgrind_taos.supp | 2 +- 2 files changed, 11 insertions(+), 11 deletions(-) diff --git a/Jenkinsfile b/Jenkinsfile index 19eaec8d5f..9fe2d07877 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -179,7 +179,7 @@ pipeline { } parallel { stage('python_1_s1') { - agent any + agent {label 'tt'} steps { pre_test() @@ -194,7 +194,7 @@ pipeline { } } stage('python_2_s5') { - agent any + agent {label 'tt'} steps { pre_test() @@ -208,7 +208,7 @@ pipeline { } } stage('python_3_s6') { - agent any + agent {label 'tt'} steps { timeout(time: 45, unit: 'MINUTES'){ pre_test() @@ -221,7 +221,7 @@ pipeline { } } stage('test_b1_s2') { - agent any + agent {label 'tt'} steps { timeout(time: 45, unit: 'MINUTES'){ pre_test() @@ -234,7 +234,7 @@ pipeline { } stage('test_crash_gen_s3') { - agent any + agent {label 'tt'} steps { pre_test() @@ -273,7 +273,7 @@ pipeline { } stage('test_valgrind_s4') { - agent any + agent {label 'tt'} steps { pre_test() @@ -299,7 +299,7 @@ pipeline { } } stage('test_b4_s7') { - agent any + agent {label 'tt'} steps { timeout(time: 45, unit: 'MINUTES'){ pre_test() @@ -318,7 +318,7 @@ pipeline { } } stage('test_b5_s8') { - agent any + agent {label 'tt'} steps { timeout(time: 45, unit: 'MINUTES'){ pre_test() @@ -331,7 +331,7 @@ pipeline { } } stage('test_b6_s9') { - agent any + agent {label 'tt'} steps { timeout(time: 45, unit: 'MINUTES'){ pre_test() @@ -344,7 +344,7 @@ pipeline { } } stage('test_b7_s10') { - agent any + agent {label 'tt'} steps { timeout(time: 45, unit: 'MINUTES'){ pre_test() diff --git a/tests/pytest/crash_gen/valgrind_taos.supp b/tests/pytest/crash_gen/valgrind_taos.supp index b9296f008e..6c92f33018 100644 --- a/tests/pytest/crash_gen/valgrind_taos.supp +++ b/tests/pytest/crash_gen/valgrind_taos.supp @@ -18108,4 +18108,4 @@ fun:_PyEval_EvalFrameDefault fun:_PyEval_EvalCodeWithName fun:_PyFunction_Vectorcall -} \ No newline at end of file +} \ No newline at end of file From fbf76a3705bcb6014a8f413d0a18a553aa4f5a2e Mon Sep 17 00:00:00 2001 From: Ganlin Zhao Date: Tue, 10 Aug 2021 18:46:55 +0800 Subject: [PATCH 065/185] [TD-5922]: add unitest cases --- tests/pytest/query/tbname.py | 3 +++ .../script/general/parser/tbnameIn_query.sim | 24 +++++++++++++++++++ 2 files changed, 27 insertions(+) diff --git a/tests/pytest/query/tbname.py b/tests/pytest/query/tbname.py index 08416ba3ed..30d90b1f9d 100644 --- a/tests/pytest/query/tbname.py +++ b/tests/pytest/query/tbname.py @@ -53,6 +53,9 @@ class TDTestCase: "select * from cars where id=0 and tbname in ('carzero', 'cartwo')") tdSql.checkRows(1) + tdSql.query("select * from cars where tbname in ('carZero', 'CARONE')") + tdSql.checkRows(2) + """ tdSql.query("select * from cars where tbname like 'car%'") tdSql.checkRows(2) diff --git a/tests/script/general/parser/tbnameIn_query.sim b/tests/script/general/parser/tbnameIn_query.sim index 65bb89d549..db27886bbf 100644 --- a/tests/script/general/parser/tbnameIn_query.sim +++ b/tests/script/general/parser/tbnameIn_query.sim @@ -101,6 +101,30 @@ if $data11 != 2 then return -1 endi +## tbname in can accpet Upper case table name +sql select count(*) from $stb where tbname in ('ti_tb0', 'TI_tb1', 'TI_TB2') group by t1 order by t1 +if $rows != 3 then + return -1 +endi +if $data00 != 10 then + return -1 +endi +if $data01 != 0 then + return -1 +endi +if $data10 != 10 then + return -1 +endi +if $data11 != 1 then + return -1 +endi +if $data20 != 10 then + return -1 +endi +if $data21 != 2 then + return -1 +endi + # multiple tbname in is not allowed NOW sql_error select count(*) from $stb where tbname in ('ti_tb1', 'ti_tb300') and tbname in ('ti_tb5', 'ti_tb1000') group by t1 order by t1 asc #if $rows != 4 then From ce5b43c17c701083ab94af3045bce41d0e587f9f Mon Sep 17 00:00:00 2001 From: wpan Date: Wed, 11 Aug 2021 08:46:59 +0800 Subject: [PATCH 066/185] fix top/bottom use wrong expr issue --- src/query/inc/qUtil.h | 4 ++-- src/query/src/qExecutor.c | 10 +++++----- src/query/src/qUtil.c | 12 ++++++++++++ 3 files changed, 19 insertions(+), 7 deletions(-) diff --git a/src/query/inc/qUtil.h b/src/query/inc/qUtil.h index d2802d9fe0..ce607f0fe2 100644 --- a/src/query/inc/qUtil.h +++ b/src/query/inc/qUtil.h @@ -39,7 +39,6 @@ #define GET_QID(_r) (((SQInfo*)((_r)->qinfo))->qId) #define curTimeWindowIndex(_winres) ((_winres)->curIndex) -#define GET_ROW_PARAM_FOR_MULTIOUTPUT(_q, tbq, sq) (((tbq) && (!(sq)))? (_q)->pExpr1[1].base.param[0].i64:1) int32_t getOutputInterResultBufSize(SQueryAttr* pQueryAttr); @@ -60,6 +59,7 @@ SResultRowCellInfo* getResultCell(const SResultRow* pRow, int32_t index, int32_t void* destroyQueryFuncExpr(SExprInfo* pExprInfo, int32_t numOfExpr); void* freeColumnInfo(SColumnInfo* pColumnInfo, int32_t numOfCols); +int32_t getRowNumForMultioutput(SQueryAttr* pQueryAttr, bool topBottomQuery, bool stable); static FORCE_INLINE SResultRow *getResultRow(SResultRowInfo *pResultRowInfo, int32_t slot) { assert(pResultRowInfo != NULL && slot >= 0 && slot < pResultRowInfo->size); @@ -70,7 +70,7 @@ static FORCE_INLINE char* getPosInResultPage(SQueryAttr* pQueryAttr, tFilePage* int32_t offset) { assert(rowOffset >= 0 && pQueryAttr != NULL); - int32_t numOfRows = (int32_t)GET_ROW_PARAM_FOR_MULTIOUTPUT(pQueryAttr, pQueryAttr->topBotQuery, pQueryAttr->stableQuery); + int32_t numOfRows = (int32_t)getRowNumForMultioutput(pQueryAttr, pQueryAttr->topBotQuery, pQueryAttr->stableQuery); return ((char *)page->data) + rowOffset + offset * numOfRows; } diff --git a/src/query/src/qExecutor.c b/src/query/src/qExecutor.c index 52e5dbb7f3..e31792398d 100644 --- a/src/query/src/qExecutor.c +++ b/src/query/src/qExecutor.c @@ -2706,7 +2706,7 @@ static void getIntermediateBufInfo(SQueryRuntimeEnv* pRuntimeEnv, int32_t* ps, i SQueryAttr* pQueryAttr = pRuntimeEnv->pQueryAttr; int32_t MIN_ROWS_PER_PAGE = 4; - *rowsize = (int32_t)(pQueryAttr->resultRowSize * GET_ROW_PARAM_FOR_MULTIOUTPUT(pQueryAttr, pQueryAttr->topBotQuery, pQueryAttr->stableQuery)); + *rowsize = (int32_t)(pQueryAttr->resultRowSize * getRowNumForMultioutput(pQueryAttr, pQueryAttr->topBotQuery, pQueryAttr->stableQuery)); int32_t overhead = sizeof(tFilePage); // one page contains at least two rows @@ -3630,7 +3630,7 @@ void updateOutputBuf(SOptrBasicInfo* pBInfo, int32_t *bufCapacity, int32_t numOf // re-estabilish output buffer pointer. int32_t functionId = pBInfo->pCtx[i].functionId; if (functionId == TSDB_FUNC_TOP || functionId == TSDB_FUNC_BOTTOM || functionId == TSDB_FUNC_DIFF || functionId == TSDB_FUNC_DERIVATIVE) { - pBInfo->pCtx[i].ptsOutputBuf = pBInfo->pCtx[0].pOutput; + pBInfo->pCtx[i].ptsOutputBuf = pBInfo->pCtx[i-1].pOutput; } } } @@ -5298,7 +5298,7 @@ SOperatorInfo* createGlobalAggregateOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SMultiwayMergeInfo* pInfo = calloc(1, sizeof(SMultiwayMergeInfo)); pInfo->resultRowFactor = - (int32_t)(GET_ROW_PARAM_FOR_MULTIOUTPUT(pRuntimeEnv->pQueryAttr, pRuntimeEnv->pQueryAttr->topBotQuery, false)); + (int32_t)(getRowNumForMultioutput(pRuntimeEnv->pQueryAttr, pRuntimeEnv->pQueryAttr->topBotQuery, false)); pRuntimeEnv->scanFlag = MERGE_STAGE; // TODO init when creating pCtx @@ -6327,7 +6327,7 @@ SOperatorInfo* createAggregateOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOpera SAggOperatorInfo* pInfo = calloc(1, sizeof(SAggOperatorInfo)); SQueryAttr* pQueryAttr = pRuntimeEnv->pQueryAttr; - int32_t numOfRows = (int32_t)(GET_ROW_PARAM_FOR_MULTIOUTPUT(pQueryAttr, pQueryAttr->topBotQuery, pQueryAttr->stableQuery)); + int32_t numOfRows = (int32_t)(getRowNumForMultioutput(pQueryAttr, pQueryAttr->topBotQuery, pQueryAttr->stableQuery)); pInfo->binfo.pRes = createOutputBuf(pExpr, numOfOutput, numOfRows); pInfo->binfo.pCtx = createSQLFunctionCtx(pRuntimeEnv, pExpr, numOfOutput, &pInfo->binfo.rowCellInfoOffset); @@ -6701,7 +6701,7 @@ SOperatorInfo* createGroupbyOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperato SQueryAttr *pQueryAttr = pRuntimeEnv->pQueryAttr; pQueryAttr->resultRowSize = (pQueryAttr->resultRowSize * - (int32_t)(GET_ROW_PARAM_FOR_MULTIOUTPUT(pQueryAttr, pQueryAttr->topBotQuery, pQueryAttr->stableQuery))); + (int32_t)(getRowNumForMultioutput(pQueryAttr, pQueryAttr->topBotQuery, pQueryAttr->stableQuery))); pInfo->binfo.pRes = createOutputBuf(pExpr, numOfOutput, pRuntimeEnv->resultInfo.capacity); initResultRowInfo(&pInfo->binfo.resultRowInfo, 8, TSDB_DATA_TYPE_INT); diff --git a/src/query/src/qUtil.c b/src/query/src/qUtil.c index a3d2e424d2..f017c4db1f 100644 --- a/src/query/src/qUtil.c +++ b/src/query/src/qUtil.c @@ -30,6 +30,18 @@ typedef struct SCompSupporter { int32_t order; } SCompSupporter; +int32_t getRowNumForMultioutput(SQueryAttr* pQueryAttr, bool topBottomQuery, bool stable) { + if (pQueryAttr && (!stable)) { + for (int16_t i = 0; i < pQueryAttr->numOfOutput; ++i) { + if (pQueryAttr->pExpr1[i].base.functionId == TSDB_FUNC_TOP || pQueryAttr->pExpr1[i].base.functionId == TSDB_FUNC_BOTTOM) { + return pQueryAttr->pExpr1[i].base.param[0].i64; + } + } + } + + return 1; +} + int32_t getOutputInterResultBufSize(SQueryAttr* pQueryAttr) { int32_t size = 0; From 9bafb6436eb14c6b9fbbaf4c706a42142c0be87b Mon Sep 17 00:00:00 2001 From: Cary Xu Date: Wed, 11 Aug 2021 09:04:04 +0800 Subject: [PATCH 067/185] trigger CI --- src/client/src/tscSQLParser.c | 1 - 1 file changed, 1 deletion(-) diff --git a/src/client/src/tscSQLParser.c b/src/client/src/tscSQLParser.c index 8d89b7c3cc..afbecd3a10 100644 --- a/src/client/src/tscSQLParser.c +++ b/src/client/src/tscSQLParser.c @@ -5888,7 +5888,6 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) { if (IS_VAR_DATA_TYPE(pTagsSchema->type) && (pItem->pVar.nLen > pTagsSchema->bytes * TSDB_NCHAR_SIZE)) { return invalidOperationMsg(pMsg, msg14); } - pAlterSQL->tagData.data = calloc(1, pTagsSchema->bytes * TSDB_NCHAR_SIZE + VARSTR_HEADER_SIZE); if (tVariantDump(&pItem->pVar, pAlterSQL->tagData.data, pTagsSchema->type, true) != TSDB_CODE_SUCCESS) { From 37b43a40067d512d5e76fd1fbf4561434a7c4085 Mon Sep 17 00:00:00 2001 From: wpan Date: Wed, 11 Aug 2021 09:16:13 +0800 Subject: [PATCH 068/185] fix windows compile error --- src/query/src/qUtil.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/query/src/qUtil.c b/src/query/src/qUtil.c index f017c4db1f..4caf351799 100644 --- a/src/query/src/qUtil.c +++ b/src/query/src/qUtil.c @@ -34,7 +34,7 @@ int32_t getRowNumForMultioutput(SQueryAttr* pQueryAttr, bool topBottomQuery, boo if (pQueryAttr && (!stable)) { for (int16_t i = 0; i < pQueryAttr->numOfOutput; ++i) { if (pQueryAttr->pExpr1[i].base.functionId == TSDB_FUNC_TOP || pQueryAttr->pExpr1[i].base.functionId == TSDB_FUNC_BOTTOM) { - return pQueryAttr->pExpr1[i].base.param[0].i64; + return (int32_t)pQueryAttr->pExpr1[i].base.param[0].i64; } } } From 40d80c972aa02ec51d0f987066f0c07359e8b70b Mon Sep 17 00:00:00 2001 From: liuyq-617 Date: Wed, 11 Aug 2021 09:27:50 +0800 Subject: [PATCH 069/185] add ci time[ci skip] --- Jenkinsfile | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/Jenkinsfile b/Jenkinsfile index e6e8a1df32..680fcce37f 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -168,7 +168,7 @@ pipeline { steps { pre_test() - timeout(time: 45, unit: 'MINUTES'){ + timeout(time: 55, unit: 'MINUTES'){ sh ''' date cd ${WKC}/tests @@ -183,7 +183,7 @@ pipeline { steps { pre_test() - timeout(time: 45, unit: 'MINUTES'){ + timeout(time: 55, unit: 'MINUTES'){ sh ''' date cd ${WKC}/tests @@ -195,7 +195,7 @@ pipeline { stage('python_3_s6') { agent{label 'p3'} steps { - timeout(time: 45, unit: 'MINUTES'){ + timeout(time: 55, unit: 'MINUTES'){ pre_test() sh ''' date @@ -208,7 +208,7 @@ pipeline { stage('test_b1_s2') { agent{label 'b1'} steps { - timeout(time: 45, unit: 'MINUTES'){ + timeout(time: 55, unit: 'MINUTES'){ pre_test() sh ''' cd ${WKC}/tests @@ -245,7 +245,7 @@ pipeline { ./handle_taosd_val_log.sh ''' } - timeout(time: 45, unit: 'MINUTES'){ + timeout(time: 55, unit: 'MINUTES'){ sh ''' date cd ${WKC}/tests @@ -269,7 +269,7 @@ pipeline { ./handle_val_log.sh ''' } - timeout(time: 45, unit: 'MINUTES'){ + timeout(time: 55, unit: 'MINUTES'){ sh ''' date cd ${WKC}/tests @@ -286,7 +286,7 @@ pipeline { stage('test_b4_s7') { agent{label 'b4'} steps { - timeout(time: 45, unit: 'MINUTES'){ + timeout(time: 55, unit: 'MINUTES'){ pre_test() sh ''' date @@ -305,7 +305,7 @@ pipeline { stage('test_b5_s8') { agent{label 'b5'} steps { - timeout(time: 45, unit: 'MINUTES'){ + timeout(time: 55, unit: 'MINUTES'){ pre_test() sh ''' date @@ -318,7 +318,7 @@ pipeline { stage('test_b6_s9') { agent{label 'b6'} steps { - timeout(time: 45, unit: 'MINUTES'){ + timeout(time: 55, unit: 'MINUTES'){ pre_test() sh ''' date @@ -331,7 +331,7 @@ pipeline { stage('test_b7_s10') { agent{label 'b7'} steps { - timeout(time: 45, unit: 'MINUTES'){ + timeout(time: 55, unit: 'MINUTES'){ pre_test() sh ''' date From 1df4567d7b56f93a3be549a4ec319cd7102e2b5e Mon Sep 17 00:00:00 2001 From: Zhiyu Yang <69311263+zyyang-taosdata@users.noreply.github.com> Date: Wed, 11 Aug 2021 09:28:51 +0800 Subject: [PATCH 070/185] Fix/td 5831 (#7258) * remove useless method * [TD-5831]: add reset query cache test case for restful * [TD-5831]: jni and restful must have user and password properties * add without password test case * add without password test case * change * change * change * change * change * change * change * change * try to remove execute syntax check * change * remove System.out * change * change --- src/connector/jdbc/pom.xml | 1 - .../java/com/taosdata/jdbc/TSDBDriver.java | 7 ++ .../java/com/taosdata/jdbc/TSDBError.java | 2 + .../com/taosdata/jdbc/TSDBErrorNumbers.java | 5 ++ .../com/taosdata/jdbc/TSDBJNIConnector.java | 1 - .../com/taosdata/jdbc/rs/RestfulDriver.java | 10 ++- .../taosdata/jdbc/rs/RestfulStatement.java | 59 ++++++++--------- .../jdbc/utils/SqlSyntaxValidator.java | 15 +---- .../java/com/taosdata/jdbc/SubscribeTest.java | 2 + .../jdbc/cases/AuthenticationTest.java | 44 +++++++++++++ .../taosdata/jdbc/cases/BatchInsertTest.java | 2 + .../com/taosdata/jdbc/cases/ImportTest.java | 2 + .../cases/InsertSpecialCharacterJniTest.java | 41 ++++++++++-- .../InsertSpecialCharacterRestfulTest.java | 6 +- .../taosdata/jdbc/cases/QueryDataTest.java | 2 + .../jdbc/cases/ResetQueryCacheTest.java | 66 +++++++++---------- .../com/taosdata/jdbc/cases/SelectTest.java | 2 + .../com/taosdata/jdbc/cases/StableTest.java | 2 + .../jdbc/utils/SqlSyntaxValidatorTest.java | 21 ------ 19 files changed, 176 insertions(+), 114 deletions(-) delete mode 100644 src/connector/jdbc/src/test/java/com/taosdata/jdbc/utils/SqlSyntaxValidatorTest.java diff --git a/src/connector/jdbc/pom.xml b/src/connector/jdbc/pom.xml index 907562fe26..fbeeeb56d3 100644 --- a/src/connector/jdbc/pom.xml +++ b/src/connector/jdbc/pom.xml @@ -113,7 +113,6 @@ **/AppMemoryLeakTest.java - **/AuthenticationTest.java **/ConnectMultiTaosdByRestfulWithDifferentTokenTest.java **/DatetimeBefore1970Test.java **/FailOverTest.java diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBDriver.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBDriver.java index f5f16758c1..521a88b128 100755 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBDriver.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBDriver.java @@ -14,6 +14,8 @@ *****************************************************************************/ package com.taosdata.jdbc; +import java.net.URLEncoder; +import java.nio.charset.StandardCharsets; import java.sql.*; import java.util.*; import java.util.logging.Logger; @@ -127,6 +129,11 @@ public class TSDBDriver extends AbstractDriver { return null; } + if (!props.containsKey(TSDBDriver.PROPERTY_KEY_USER)) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_USER_IS_REQUIRED); + if (!props.containsKey(TSDBDriver.PROPERTY_KEY_PASSWORD)) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_PASSWORD_IS_REQUIRED); + try { TSDBJNIConnector.init((String) props.get(PROPERTY_KEY_CONFIG_DIR), (String) props.get(PROPERTY_KEY_LOCALE), (String) props.get(PROPERTY_KEY_CHARSET), (String) props.get(PROPERTY_KEY_TIME_ZONE)); diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBError.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBError.java index d626698663..977ae66515 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBError.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBError.java @@ -33,6 +33,8 @@ public class TSDBError { TSDBErrorMap.put(TSDBErrorNumbers.ERROR_NUMERIC_VALUE_OUT_OF_RANGE, "numeric value out of range"); TSDBErrorMap.put(TSDBErrorNumbers.ERROR_UNKNOWN_TAOS_TYPE, "unknown taos type in tdengine"); TSDBErrorMap.put(TSDBErrorNumbers.ERROR_UNKNOWN_TIMESTAMP_PRECISION, "unknown timestamp precision"); + TSDBErrorMap.put(TSDBErrorNumbers.ERROR_USER_IS_REQUIRED, "user is required"); + TSDBErrorMap.put(TSDBErrorNumbers.ERROR_PASSWORD_IS_REQUIRED, "password is required"); TSDBErrorMap.put(TSDBErrorNumbers.ERROR_UNKNOWN, "unknown error"); diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBErrorNumbers.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBErrorNumbers.java index 3c44d69be5..2207db6f93 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBErrorNumbers.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBErrorNumbers.java @@ -29,6 +29,9 @@ public class TSDBErrorNumbers { public static final int ERROR_UNKNOWN_TIMESTAMP_PRECISION = 0x2316; // unknown timestamp precision public static final int ERROR_RESTFul_Client_Protocol_Exception = 0x2317; public static final int ERROR_RESTFul_Client_IOException = 0x2318; + public static final int ERROR_USER_IS_REQUIRED = 0x2319; // user is required + public static final int ERROR_PASSWORD_IS_REQUIRED = 0x231a; // password is required + public static final int ERROR_UNKNOWN = 0x2350; //unknown error @@ -67,6 +70,8 @@ public class TSDBErrorNumbers { errorNumbers.add(ERROR_UNKNOWN_TAOS_TYPE); errorNumbers.add(ERROR_UNKNOWN_TIMESTAMP_PRECISION); errorNumbers.add(ERROR_RESTFul_Client_IOException); + errorNumbers.add(ERROR_USER_IS_REQUIRED); + errorNumbers.add(ERROR_PASSWORD_IS_REQUIRED); errorNumbers.add(ERROR_RESTFul_Client_Protocol_Exception); diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBJNIConnector.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBJNIConnector.java index 4fdbb308c5..c634fe2e95 100755 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBJNIConnector.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBJNIConnector.java @@ -36,7 +36,6 @@ public class TSDBJNIConnector { static { System.loadLibrary("taos"); - System.out.println("java.library.path:" + System.getProperty("java.library.path")); } public boolean isClosed() { diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulDriver.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulDriver.java index 9ab67c5502..0a8809e84f 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulDriver.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulDriver.java @@ -7,6 +7,7 @@ import com.taosdata.jdbc.utils.HttpClientPoolUtil; import java.io.UnsupportedEncodingException; import java.net.URLEncoder; +import java.nio.charset.StandardCharsets; import java.sql.*; import java.util.Properties; import java.util.logging.Logger; @@ -40,8 +41,13 @@ public class RestfulDriver extends AbstractDriver { String loginUrl = "http://" + host + ":" + port + "/rest/login/" + props.getProperty(TSDBDriver.PROPERTY_KEY_USER) + "/" + props.getProperty(TSDBDriver.PROPERTY_KEY_PASSWORD) + ""; try { - String user = URLEncoder.encode(props.getProperty(TSDBDriver.PROPERTY_KEY_USER), "UTF-8"); - String password = URLEncoder.encode(props.getProperty(TSDBDriver.PROPERTY_KEY_PASSWORD), "UTF-8"); + if (!props.containsKey(TSDBDriver.PROPERTY_KEY_USER)) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_USER_IS_REQUIRED); + if (!props.containsKey(TSDBDriver.PROPERTY_KEY_PASSWORD)) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_PASSWORD_IS_REQUIRED); + + String user = URLEncoder.encode(props.getProperty(TSDBDriver.PROPERTY_KEY_USER), StandardCharsets.UTF_8.displayName()); + String password = URLEncoder.encode(props.getProperty(TSDBDriver.PROPERTY_KEY_PASSWORD), StandardCharsets.UTF_8.displayName()); loginUrl = "http://" + props.getProperty(TSDBDriver.PROPERTY_KEY_HOST) + ":" + props.getProperty(TSDBDriver.PROPERTY_KEY_PORT) + "/rest/login/" + user + "/" + password + ""; } catch (UnsupportedEncodingException e) { e.printStackTrace(); diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulStatement.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulStatement.java index f8acd8f061..a88dc411f3 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulStatement.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulStatement.java @@ -7,6 +7,7 @@ import com.taosdata.jdbc.AbstractStatement; import com.taosdata.jdbc.TSDBDriver; import com.taosdata.jdbc.TSDBError; import com.taosdata.jdbc.TSDBErrorNumbers; +import com.taosdata.jdbc.enums.TimestampFormat; import com.taosdata.jdbc.utils.HttpClientPoolUtil; import com.taosdata.jdbc.utils.SqlSyntaxValidator; @@ -45,9 +46,7 @@ public class RestfulStatement extends AbstractStatement { if (!SqlSyntaxValidator.isValidForExecuteUpdate(sql)) throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_INVALID_FOR_EXECUTE_UPDATE, "not a valid sql for executeUpdate: " + sql); - final String url = "http://" + conn.getHost() + ":" + conn.getPort() + "/rest/sql"; - - return executeOneUpdate(url, sql); + return executeOneUpdate(sql); } @Override @@ -62,34 +61,25 @@ public class RestfulStatement extends AbstractStatement { public boolean execute(String sql) throws SQLException { if (isClosed()) throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); - if (!SqlSyntaxValidator.isValidForExecute(sql)) - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_INVALID_FOR_EXECUTE, "not a valid sql for execute: " + sql); //如果执行了use操作应该将当前Statement的catalog设置为新的database boolean result = true; - String url = "http://" + conn.getHost() + ":" + conn.getPort() + "/rest/sql"; - if (conn.getClientInfo(TSDBDriver.PROPERTY_KEY_TIMESTAMP_FORMAT).equals("TIMESTAMP")) { - url = "http://" + conn.getHost() + ":" + conn.getPort() + "/rest/sqlt"; - } - if (conn.getClientInfo(TSDBDriver.PROPERTY_KEY_TIMESTAMP_FORMAT).equals("UTC")) { - url = "http://" + conn.getHost() + ":" + conn.getPort() + "/rest/sqlutc"; - } if (SqlSyntaxValidator.isUseSql(sql)) { - HttpClientPoolUtil.execute(url, sql, this.conn.getToken()); + HttpClientPoolUtil.execute(getUrl(), sql, this.conn.getToken()); this.database = sql.trim().replace("use", "").trim(); this.conn.setCatalog(this.database); result = false; } else if (SqlSyntaxValidator.isDatabaseUnspecifiedQuery(sql)) { executeOneQuery(sql); } else if (SqlSyntaxValidator.isDatabaseUnspecifiedUpdate(sql)) { - executeOneUpdate(url, sql); + executeOneUpdate(sql); result = false; } else { if (SqlSyntaxValidator.isValidForExecuteQuery(sql)) { - executeQuery(sql); + executeOneQuery(sql); } else { - executeUpdate(sql); + executeOneUpdate(sql); result = false; } } @@ -97,19 +87,25 @@ public class RestfulStatement extends AbstractStatement { return result; } + private String getUrl() throws SQLException { + TimestampFormat timestampFormat = TimestampFormat.valueOf(conn.getClientInfo(TSDBDriver.PROPERTY_KEY_TIMESTAMP_FORMAT).trim().toUpperCase()); + String url; + switch (timestampFormat) { + case TIMESTAMP: + url = "http://" + conn.getHost() + ":" + conn.getPort() + "/rest/sqlt"; + break; + case UTC: + url = "http://" + conn.getHost() + ":" + conn.getPort() + "/rest/sqlutc"; + break; + default: + url = "http://" + conn.getHost() + ":" + conn.getPort() + "/rest/sql"; + } + return url; + } + private ResultSet executeOneQuery(String sql) throws SQLException { - if (!SqlSyntaxValidator.isValidForExecuteQuery(sql)) - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_INVALID_FOR_EXECUTE_QUERY, "not a valid sql for executeQuery: " + sql); - // row data - String url = "http://" + conn.getHost() + ":" + conn.getPort() + "/rest/sql"; - String timestampFormat = conn.getClientInfo(TSDBDriver.PROPERTY_KEY_TIMESTAMP_FORMAT); - if ("TIMESTAMP".equalsIgnoreCase(timestampFormat)) - url = "http://" + conn.getHost() + ":" + conn.getPort() + "/rest/sqlt"; - if ("UTC".equalsIgnoreCase(timestampFormat)) - url = "http://" + conn.getHost() + ":" + conn.getPort() + "/rest/sqlutc"; - - String result = HttpClientPoolUtil.execute(url, sql, this.conn.getToken()); + String result = HttpClientPoolUtil.execute(getUrl(), sql, this.conn.getToken()); JSONObject resultJson = JSON.parseObject(result); if (resultJson.getString("status").equals("error")) { throw TSDBError.createSQLException(resultJson.getInteger("code"), resultJson.getString("desc")); @@ -119,11 +115,8 @@ public class RestfulStatement extends AbstractStatement { return resultSet; } - private int executeOneUpdate(String url, String sql) throws SQLException { - if (!SqlSyntaxValidator.isValidForExecuteUpdate(sql)) - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_INVALID_FOR_EXECUTE_UPDATE, "not a valid sql for executeUpdate: " + sql); - - String result = HttpClientPoolUtil.execute(url, sql, this.conn.getToken()); + private int executeOneUpdate(String sql) throws SQLException { + String result = HttpClientPoolUtil.execute(getUrl(), sql, this.conn.getToken()); JSONObject jsonObject = JSON.parseObject(result); if (jsonObject.getString("status").equals("error")) { throw TSDBError.createSQLException(jsonObject.getInteger("code"), jsonObject.getString("desc")); @@ -134,7 +127,7 @@ public class RestfulStatement extends AbstractStatement { } private int getAffectedRows(JSONObject jsonObject) throws SQLException { - // create ... SQLs should return 0 , and Restful result is this: + // create ... SQLs should return 0 , and Restful result like this: // {"status": "succ", "head": ["affected_rows"], "data": [[0]], "rows": 1} JSONArray head = jsonObject.getJSONArray("head"); if (head.size() != 1 || !"affected_rows".equals(head.getString(0))) diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/SqlSyntaxValidator.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/SqlSyntaxValidator.java index 0f99ff4f66..cbd806b35a 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/SqlSyntaxValidator.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/SqlSyntaxValidator.java @@ -16,8 +16,7 @@ package com.taosdata.jdbc.utils; public class SqlSyntaxValidator { - private static final String[] SQL = {"select", "insert", "import", "create", "use", "alter", "drop", "set", "show", "describe", "reset"}; - private static final String[] updateSQL = {"insert", "import", "create", "use", "alter", "drop", "set"}; + private static final String[] updateSQL = {"insert", "import", "create", "use", "alter", "drop", "set", "reset"}; private static final String[] querySQL = {"select", "show", "describe"}; private static final String[] databaseUnspecifiedShow = {"databases", "dnodes", "mnodes", "variables"}; @@ -38,14 +37,6 @@ public class SqlSyntaxValidator { return false; } - public static boolean isValidForExecute(String sql) { - for (String prefix : SQL) { - if (sql.trim().toLowerCase().startsWith(prefix)) - return true; - } - return false; - } - public static boolean isDatabaseUnspecifiedQuery(String sql) { for (String databaseObj : databaseUnspecifiedShow) { if (sql.trim().toLowerCase().matches("show\\s+" + databaseObj + ".*")) @@ -63,9 +54,5 @@ public class SqlSyntaxValidator { return sql.trim().toLowerCase().startsWith("use"); } - public static boolean isSelectSql(String sql) { - return sql.trim().toLowerCase().startsWith("select"); - } - } diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/SubscribeTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/SubscribeTest.java index 24c73fdd5c..95307071e1 100644 --- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/SubscribeTest.java +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/SubscribeTest.java @@ -69,6 +69,8 @@ public class SubscribeTest { @Before public void createDatabase() throws SQLException { Properties properties = new Properties(); + properties.setProperty(TSDBDriver.PROPERTY_KEY_USER, "root"); + properties.setProperty(TSDBDriver.PROPERTY_KEY_PASSWORD, "taosdata"); properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8"); properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8"); properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8"); diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/AuthenticationTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/AuthenticationTest.java index 6702de9bdb..0ea46dade2 100644 --- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/AuthenticationTest.java +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/AuthenticationTest.java @@ -1,6 +1,9 @@ package com.taosdata.jdbc.cases; +import com.taosdata.jdbc.TSDBErrorNumbers; +import org.junit.Assert; import org.junit.Before; +import org.junit.Ignore; import org.junit.Test; import java.sql.*; @@ -12,6 +15,47 @@ public class AuthenticationTest { private static final String password = "taos?data"; private Connection conn; + @Test + public void connectWithoutUserByJni() { + try { + DriverManager.getConnection("jdbc:TAOS://" + host + ":0/?"); + } catch (SQLException e) { + Assert.assertEquals(TSDBErrorNumbers.ERROR_USER_IS_REQUIRED, e.getErrorCode()); + Assert.assertEquals("ERROR (2319): user is required", e.getMessage()); + } + } + + @Test + public void connectWithoutUserByRestful() { + try { + DriverManager.getConnection("jdbc:TAOS-RS://" + host + ":6041/?"); + } catch (SQLException e) { + Assert.assertEquals(TSDBErrorNumbers.ERROR_USER_IS_REQUIRED, e.getErrorCode()); + Assert.assertEquals("ERROR (2319): user is required", e.getMessage()); + } + } + + @Test + public void connectWithoutPasswordByJni() { + try { + DriverManager.getConnection("jdbc:TAOS://" + host + ":0/?user=root"); + } catch (SQLException e) { + Assert.assertEquals(TSDBErrorNumbers.ERROR_PASSWORD_IS_REQUIRED, e.getErrorCode()); + Assert.assertEquals("ERROR (231a): password is required", e.getMessage()); + } + } + + @Test + public void connectWithoutPasswordByRestful() { + try { + DriverManager.getConnection("jdbc:TAOS-RS://" + host + ":6041/?user=root"); + } catch (SQLException e) { + Assert.assertEquals(TSDBErrorNumbers.ERROR_PASSWORD_IS_REQUIRED, e.getErrorCode()); + Assert.assertEquals("ERROR (231a): password is required", e.getMessage()); + } + } + + @Ignore @Test public void test() { // change password diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/BatchInsertTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/BatchInsertTest.java index e175d6d114..f2b102cfe7 100644 --- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/BatchInsertTest.java +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/BatchInsertTest.java @@ -29,6 +29,8 @@ public class BatchInsertTest { public void before() { try { Properties properties = new Properties(); + properties.setProperty(TSDBDriver.PROPERTY_KEY_USER, "root"); + properties.setProperty(TSDBDriver.PROPERTY_KEY_PASSWORD, "taosdata"); properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8"); properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8"); properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8"); diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/ImportTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/ImportTest.java index bc11c7f34e..1297a6b4c4 100644 --- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/ImportTest.java +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/ImportTest.java @@ -21,6 +21,8 @@ public class ImportTest { public static void before() { try { Properties properties = new Properties(); + properties.setProperty(TSDBDriver.PROPERTY_KEY_USER, "root"); + properties.setProperty(TSDBDriver.PROPERTY_KEY_PASSWORD, "taosdata"); properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8"); properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8"); properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8"); diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/InsertSpecialCharacterJniTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/InsertSpecialCharacterJniTest.java index 9f8243542f..ac254bebf3 100644 --- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/InsertSpecialCharacterJniTest.java +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/InsertSpecialCharacterJniTest.java @@ -270,6 +270,41 @@ public class InsertSpecialCharacterJniTest { } } + @Ignore + @Test + public void testSingleQuotaEscape() throws SQLException { + final long now = System.currentTimeMillis(); + final String sql = "insert into t? using ? tags(?) values(?, ?, ?) t? using " + tbname2 + " tags(?) values(?,?,?) "; + try (PreparedStatement pstmt = conn.prepareStatement(sql)) { + // t1 + pstmt.setInt(1, 1); + pstmt.setString(2, tbname2); + pstmt.setString(3, special_character_str_5); + pstmt.setTimestamp(4, new Timestamp(now)); + pstmt.setBytes(5, special_character_str_5.getBytes()); + // t2 + pstmt.setInt(7, 2); + pstmt.setString(8, special_character_str_5); + pstmt.setTimestamp(9, new Timestamp(now)); + pstmt.setString(11, special_character_str_5); + + int ret = pstmt.executeUpdate(); + Assert.assertEquals(2, ret); + } + + String query = "select * from ?.t? where ? < ? and ts >= ? and f1 is not null"; + try (PreparedStatement pstmt = conn.prepareStatement(query)) { + pstmt.setString(1, dbName); + pstmt.setInt(2, 1); + pstmt.setString(3, "ts"); + pstmt.setTimestamp(4, new Timestamp(System.currentTimeMillis())); + pstmt.setTimestamp(5, new Timestamp(0)); + + ResultSet rs = pstmt.executeQuery(); + Assert.assertNotNull(rs); + } + } + @Test public void testCase10() throws SQLException { final long now = System.currentTimeMillis(); @@ -293,13 +328,12 @@ public class InsertSpecialCharacterJniTest { Assert.assertEquals(2, ret); } //query t1 - String query = "select * from ?.t? where ts < ? and ts >= ? and ? is not null"; + String query = "select * from ?.t? where ts < ? and ts >= ? and f1 is not null"; try (PreparedStatement pstmt = conn.prepareStatement(query)) { pstmt.setString(1, dbName); pstmt.setInt(2, 1); pstmt.setTimestamp(3, new Timestamp(System.currentTimeMillis())); pstmt.setTimestamp(4, new Timestamp(0)); - pstmt.setString(5, "f1"); ResultSet rs = pstmt.executeQuery(); rs.next(); @@ -311,12 +345,11 @@ public class InsertSpecialCharacterJniTest { Assert.assertNull(f2); } // query t2 - query = "select * from t? where ts < ? and ts >= ? and ? is not null"; + query = "select * from t? where ts < ? and ts >= ? and f2 is not null"; try (PreparedStatement pstmt = conn.prepareStatement(query)) { pstmt.setInt(1, 2); pstmt.setTimestamp(2, new Timestamp(System.currentTimeMillis())); pstmt.setTimestamp(3, new Timestamp(0)); - pstmt.setString(4, "f2"); ResultSet rs = pstmt.executeQuery(); rs.next(); diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/InsertSpecialCharacterRestfulTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/InsertSpecialCharacterRestfulTest.java index 2e981e7f41..eedccec6f1 100644 --- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/InsertSpecialCharacterRestfulTest.java +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/InsertSpecialCharacterRestfulTest.java @@ -293,13 +293,12 @@ public class InsertSpecialCharacterRestfulTest { Assert.assertEquals(2, ret); } //query t1 - String query = "select * from ?.t? where ts < ? and ts >= ? and ? is not null"; + String query = "select * from ?.t? where ts < ? and ts >= ? and f1 is not null"; try (PreparedStatement pstmt = conn.prepareStatement(query)) { pstmt.setString(1, dbName); pstmt.setInt(2, 1); pstmt.setTimestamp(3, new Timestamp(System.currentTimeMillis())); pstmt.setTimestamp(4, new Timestamp(0)); - pstmt.setString(5, "f1"); ResultSet rs = pstmt.executeQuery(); rs.next(); @@ -311,12 +310,11 @@ public class InsertSpecialCharacterRestfulTest { Assert.assertNull(f2); } // query t2 - query = "select * from t? where ts < ? and ts >= ? and ? is not null"; + query = "select * from t? where ts < ? and ts >= ? and f2 is not null"; try (PreparedStatement pstmt = conn.prepareStatement(query)) { pstmt.setInt(1, 2); pstmt.setTimestamp(2, new Timestamp(System.currentTimeMillis())); pstmt.setTimestamp(3, new Timestamp(0)); - pstmt.setString(4, "f2"); ResultSet rs = pstmt.executeQuery(); rs.next(); diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/QueryDataTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/QueryDataTest.java index 3fea221446..b4449491a9 100644 --- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/QueryDataTest.java +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/QueryDataTest.java @@ -22,6 +22,8 @@ public class QueryDataTest { public void createDatabase() { try { Properties properties = new Properties(); + properties.setProperty(TSDBDriver.PROPERTY_KEY_USER, "root"); + properties.setProperty(TSDBDriver.PROPERTY_KEY_PASSWORD, "taosdata"); properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8"); properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8"); properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8"); diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/ResetQueryCacheTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/ResetQueryCacheTest.java index 4eebbd08e8..48753d62f0 100644 --- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/ResetQueryCacheTest.java +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/ResetQueryCacheTest.java @@ -1,51 +1,49 @@ package com.taosdata.jdbc.cases; -import com.taosdata.jdbc.TSDBDriver; -import org.junit.After; -import org.junit.Before; import org.junit.Test; -import java.sql.*; -import java.util.Properties; +import java.sql.Connection; +import java.sql.DriverManager; +import java.sql.SQLException; +import java.sql.Statement; import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; public class ResetQueryCacheTest { - static Connection connection; - static Statement statement; - static String host = "127.0.0.1"; + @Test + public void jni() throws SQLException { + // given + Connection connection = DriverManager.getConnection("jdbc:TAOS://127.0.0.1:0/?user=root&password=taosdata&timezone=UTC-8&charset=UTF-8&locale=en_US.UTF-8"); + Statement statement = connection.createStatement(); - @Before - public void init() { - try { - Properties properties = new Properties(); - properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8"); - properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8"); - properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8"); - connection = DriverManager.getConnection("jdbc:TAOS://" + host + ":0/", properties); - statement = connection.createStatement(); - } catch (SQLException e) { - return; - } + // when + boolean execute = statement.execute("reset query cache"); + + // then + assertFalse(execute); + assertEquals(0, statement.getUpdateCount()); + + statement.close(); + connection.close(); } @Test - public void testResetQueryCache() throws SQLException { - String resetSql = "reset query cache"; - statement.execute(resetSql); - } + public void restful() throws SQLException { + // given + Connection connection = DriverManager.getConnection("jdbc:TAOS-RS://127.0.0.1:6041/?user=root&password=taosdata&timezone=UTC-8&charset=UTF-8&locale=en_US.UTF-8"); + Statement statement = connection.createStatement(); - @After - public void close() { - try { - if (statement != null) - statement.close(); - if (connection != null) - connection.close(); - } catch (SQLException e) { - e.printStackTrace(); - } + // when + boolean execute = statement.execute("reset query cache"); + + // then + assertFalse(execute); + assertEquals(0, statement.getUpdateCount()); + + statement.close(); + connection.close(); } } \ No newline at end of file diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/SelectTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/SelectTest.java index 0022ceaf21..b51c0309be 100644 --- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/SelectTest.java +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/SelectTest.java @@ -20,6 +20,8 @@ public class SelectTest { public void createDatabaseAndTable() { try { Properties properties = new Properties(); + properties.setProperty(TSDBDriver.PROPERTY_KEY_USER, "root"); + properties.setProperty(TSDBDriver.PROPERTY_KEY_PASSWORD, "taosdata"); properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8"); properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8"); properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8"); diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/StableTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/StableTest.java index 1600fec13d..8e10743e5e 100644 --- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/StableTest.java +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/StableTest.java @@ -24,6 +24,8 @@ public class StableTest { public static void createDatabase() { try { Properties properties = new Properties(); + properties.setProperty(TSDBDriver.PROPERTY_KEY_USER, "root"); + properties.setProperty(TSDBDriver.PROPERTY_KEY_PASSWORD, "taosdata"); properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8"); properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8"); properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8"); diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/utils/SqlSyntaxValidatorTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/utils/SqlSyntaxValidatorTest.java deleted file mode 100644 index ccb0941da0..0000000000 --- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/utils/SqlSyntaxValidatorTest.java +++ /dev/null @@ -1,21 +0,0 @@ -package com.taosdata.jdbc.utils; - -import org.junit.Assert; -import org.junit.Test; - -public class SqlSyntaxValidatorTest { - - @Test - public void isSelectSQL() { - Assert.assertTrue(SqlSyntaxValidator.isSelectSql("select * from test.weather")); - Assert.assertTrue(SqlSyntaxValidator.isSelectSql(" select * from test.weather")); - Assert.assertTrue(SqlSyntaxValidator.isSelectSql(" select * from test.weather ")); - Assert.assertFalse(SqlSyntaxValidator.isSelectSql("insert into test.weather values(now, 1.1, 2)")); - } - - @Test - public void isUseSQL() { - Assert.assertTrue(SqlSyntaxValidator.isUseSql("use database test")); - } - -} \ No newline at end of file From 6726f1960766c01953853e099cc9039e00b2a12b Mon Sep 17 00:00:00 2001 From: liuyq-617 Date: Wed, 11 Aug 2021 09:31:27 +0800 Subject: [PATCH 071/185] add some ci time[ci skip] --- Jenkinsfile | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/Jenkinsfile b/Jenkinsfile index 4838850722..f9f70a762d 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -180,7 +180,7 @@ pipeline { steps { pre_test() - timeout(time: 45, unit: 'MINUTES'){ + timeout(time: 55, unit: 'MINUTES'){ sh ''' date cd ${WKC}/tests @@ -195,7 +195,7 @@ pipeline { steps { pre_test() - timeout(time: 45, unit: 'MINUTES'){ + timeout(time: 55, unit: 'MINUTES'){ sh ''' date cd ${WKC}/tests @@ -207,7 +207,7 @@ pipeline { stage('python_3_s6') { agent{label 'p3'} steps { - timeout(time: 45, unit: 'MINUTES'){ + timeout(time: 55, unit: 'MINUTES'){ pre_test() sh ''' date @@ -220,7 +220,7 @@ pipeline { stage('test_b1_s2') { agent{label 'b1'} steps { - timeout(time: 45, unit: 'MINUTES'){ + timeout(time: 55, unit: 'MINUTES'){ pre_test() sh ''' cd ${WKC}/tests @@ -257,7 +257,7 @@ pipeline { ./handle_taosd_val_log.sh ''' } - timeout(time: 45, unit: 'MINUTES'){ + timeout(time: 55, unit: 'MINUTES'){ sh ''' date cd ${WKC}/tests @@ -281,7 +281,7 @@ pipeline { ./handle_val_log.sh ''' } - timeout(time: 45, unit: 'MINUTES'){ + timeout(time: 55, unit: 'MINUTES'){ sh ''' date cd ${WKC}/tests @@ -298,7 +298,7 @@ pipeline { stage('test_b4_s7') { agent{label 'b4'} steps { - timeout(time: 45, unit: 'MINUTES'){ + timeout(time: 55, unit: 'MINUTES'){ pre_test() sh ''' date @@ -317,7 +317,7 @@ pipeline { stage('test_b5_s8') { agent{label 'b5'} steps { - timeout(time: 45, unit: 'MINUTES'){ + timeout(time: 55, unit: 'MINUTES'){ pre_test() sh ''' date @@ -330,7 +330,7 @@ pipeline { stage('test_b6_s9') { agent{label 'b6'} steps { - timeout(time: 45, unit: 'MINUTES'){ + timeout(time: 55, unit: 'MINUTES'){ pre_test() sh ''' date @@ -343,7 +343,7 @@ pipeline { stage('test_b7_s10') { agent{label 'b7'} steps { - timeout(time: 45, unit: 'MINUTES'){ + timeout(time: 55, unit: 'MINUTES'){ pre_test() sh ''' date From e130f9f2f9bde5f2b6649ac1f2fd8c01eceb6af9 Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Sun, 8 Aug 2021 16:59:04 +0800 Subject: [PATCH 072/185] [TD-5875]: taosdemo show progress --- src/kit/taosdemo/taosdemo.c | 22 ++++++++++++++++++++-- 1 file changed, 20 insertions(+), 2 deletions(-) diff --git a/src/kit/taosdemo/taosdemo.c b/src/kit/taosdemo/taosdemo.c index 8db149a559..883dfd95f2 100644 --- a/src/kit/taosdemo/taosdemo.c +++ b/src/kit/taosdemo/taosdemo.c @@ -246,7 +246,6 @@ typedef struct SArguments_S { uint32_t disorderRatio; // 0: no disorder, >0: x% int disorderRange; // ms, us or ns. accordig to database precision uint32_t method_of_delete; - char ** arg_list; uint64_t totalInsertRows; uint64_t totalAffectedRows; bool demo_mode; // use default column name and semi-random data @@ -638,7 +637,6 @@ SArguments g_args = { 0, // disorderRatio 1000, // disorderRange 1, // method_of_delete - NULL, // arg_list 0, // totalInsertRows; 0, // totalAffectedRows; true, // demo_mode; @@ -6401,6 +6399,9 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) { bool flagSleep = true; uint64_t sleepTimeTotal = 0; + int percentComplete = 0; + int64_t totalRows = insertRows * pThreadInfo->ntables; + while(pThreadInfo->totalInsertRows < pThreadInfo->ntables * insertRows) { if ((flagSleep) && (insert_interval)) { st = taosGetTimestampMs(); @@ -6577,6 +6578,11 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) { pThreadInfo->totalAffectedRows += affectedRows; + int currentPercent = pThreadInfo->totalAffectedRows * 100 / totalRows; + if (currentPercent > percentComplete ) { + printf("[%d]:%d%%\n", pThreadInfo->threadID, currentPercent); + percentComplete = currentPercent; + } int64_t currentPrintTime = taosGetTimestampMs(); if (currentPrintTime - lastPrintTime > 30*1000) { printf("thread[%d] has currently inserted rows: %"PRIu64 ", affected rows: %"PRIu64 "\n", @@ -6598,6 +6604,8 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) { } } } + if (percentComplete < 100) + printf("[%d]:%d%%\n", pThreadInfo->threadID, percentComplete); free_of_interlace: tmfree(pThreadInfo->buffer); @@ -6635,6 +6643,9 @@ static void* syncWriteProgressive(threadInfo *pThreadInfo) { pThreadInfo->samplePos = 0; + int percentComplete = 0; + int64_t totalRows = insertRows * pThreadInfo->ntables; + for (uint64_t tableSeq = pThreadInfo->start_table_from; tableSeq <= pThreadInfo->end_table_to; tableSeq ++) { @@ -6740,6 +6751,11 @@ static void* syncWriteProgressive(threadInfo *pThreadInfo) { pThreadInfo->totalAffectedRows += affectedRows; + int currentPercent = pThreadInfo->totalAffectedRows * 100 / totalRows; + if (currentPercent > percentComplete ) { + printf("[%d]:%d%%\n", pThreadInfo->threadID, currentPercent); + percentComplete = currentPercent; + } int64_t currentPrintTime = taosGetTimestampMs(); if (currentPrintTime - lastPrintTime > 30*1000) { printf("thread[%d] has currently inserted rows: %"PRId64 ", affected rows: %"PRId64 "\n", @@ -6762,6 +6778,8 @@ static void* syncWriteProgressive(threadInfo *pThreadInfo) { __func__, __LINE__, pThreadInfo->samplePos); } } // tableSeq + if (percentComplete < 100) + printf("[%d]:%d%%\n", pThreadInfo->threadID, percentComplete); free_of_progressive: tmfree(pThreadInfo->buffer); From 59bb4dfbceb933b3e6b26b2f3465750f72f0c1f8 Mon Sep 17 00:00:00 2001 From: liuyq-617 Date: Wed, 11 Aug 2021 10:34:18 +0800 Subject: [PATCH 073/185] test --- Jenkinsfile | 20 ++++++++++---------- tests/pytest/crash_gen/valgrind_taos.supp | 2 +- 2 files changed, 11 insertions(+), 11 deletions(-) diff --git a/Jenkinsfile b/Jenkinsfile index ca12b134d7..f293d3061b 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -181,7 +181,7 @@ pipeline { } parallel { stage('python_1_s1') { - agent {label 'tt'} + agent{label " slave1 && slave11 "} steps { pre_test() @@ -196,7 +196,7 @@ pipeline { } } stage('python_2_s5') { - agent {label 'tt'} + agent{label " slave5 && slave15 "} steps { pre_test() @@ -210,7 +210,7 @@ pipeline { } } stage('python_3_s6') { - agent {label 'tt'} + agent{label " slave6 && slave16 "} steps { timeout(time: 45, unit: 'MINUTES'){ pre_test() @@ -223,7 +223,7 @@ pipeline { } } stage('test_b1_s2') { - agent {label 'tt'} + agent{label " slave2 && slave12 "} steps { timeout(time: 45, unit: 'MINUTES'){ pre_test() @@ -236,7 +236,7 @@ pipeline { } stage('test_crash_gen_s3') { - agent {label 'tt'} + agent{label " slave3 && slave13 "} steps { pre_test() @@ -275,7 +275,7 @@ pipeline { } stage('test_valgrind_s4') { - agent {label 'tt'} + agent{label " slave4 && slave14 "} steps { pre_test() @@ -301,7 +301,7 @@ pipeline { } } stage('test_b4_s7') { - agent {label 'tt'} + agent{label " slave7 && slave17 "} steps { timeout(time: 45, unit: 'MINUTES'){ pre_test() @@ -320,7 +320,7 @@ pipeline { } } stage('test_b5_s8') { - agent {label 'tt'} + agent{label " slave8 && slave18 "} steps { timeout(time: 45, unit: 'MINUTES'){ pre_test() @@ -333,7 +333,7 @@ pipeline { } } stage('test_b6_s9') { - agent {label 'tt'} + agent{label " slave9 && slave19 "} steps { timeout(time: 45, unit: 'MINUTES'){ pre_test() @@ -346,7 +346,7 @@ pipeline { } } stage('test_b7_s10') { - agent {label 'tt'} + agent{label " slave10 && slave20 "} steps { timeout(time: 45, unit: 'MINUTES'){ pre_test() diff --git a/tests/pytest/crash_gen/valgrind_taos.supp b/tests/pytest/crash_gen/valgrind_taos.supp index 6c92f33018..b9296f008e 100644 --- a/tests/pytest/crash_gen/valgrind_taos.supp +++ b/tests/pytest/crash_gen/valgrind_taos.supp @@ -18108,4 +18108,4 @@ fun:_PyEval_EvalFrameDefault fun:_PyEval_EvalCodeWithName fun:_PyFunction_Vectorcall -} \ No newline at end of file +} \ No newline at end of file From 75bc3745ff4f7b3aad8c8a6fbcb9b66e50755179 Mon Sep 17 00:00:00 2001 From: liuyq-617 Date: Wed, 11 Aug 2021 11:16:24 +0800 Subject: [PATCH 074/185] test --- Jenkinsfile | 14 ++++++++++---- tests/pytest/crash_gen/valgrind_taos.supp | 2 +- 2 files changed, 11 insertions(+), 5 deletions(-) diff --git a/Jenkinsfile b/Jenkinsfile index f293d3061b..cef902328c 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -135,19 +135,25 @@ pipeline { rm -rf ${WORKSPACE}.tes cp -r ${WORKSPACE} ${WORKSPACE}.tes cd ${WORKSPACE}.tes - + git fetch ''' script { if (env.CHANGE_TARGET == 'master') { sh ''' + cd ${WK} git checkout master - git pull origin master ''' } - else { + else if(env.CHANGE_TARGET == '2.0'){ sh ''' + cd ${WK} + git checkout 2.0 + ''' + } + else{ + sh ''' + cd ${WK} git checkout develop - git pull origin develop ''' } } diff --git a/tests/pytest/crash_gen/valgrind_taos.supp b/tests/pytest/crash_gen/valgrind_taos.supp index b9296f008e..6c92f33018 100644 --- a/tests/pytest/crash_gen/valgrind_taos.supp +++ b/tests/pytest/crash_gen/valgrind_taos.supp @@ -18108,4 +18108,4 @@ fun:_PyEval_EvalFrameDefault fun:_PyEval_EvalCodeWithName fun:_PyFunction_Vectorcall -} \ No newline at end of file +} \ No newline at end of file From 13512a470870362c9746622aa186958428adfe2e Mon Sep 17 00:00:00 2001 From: liuyq-617 Date: Wed, 11 Aug 2021 11:20:18 +0800 Subject: [PATCH 075/185] test --- Jenkinsfile | 3 --- tests/pytest/crash_gen/valgrind_taos.supp | 2 +- 2 files changed, 1 insertion(+), 4 deletions(-) diff --git a/Jenkinsfile b/Jenkinsfile index cef902328c..bbde00d8a6 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -140,19 +140,16 @@ pipeline { script { if (env.CHANGE_TARGET == 'master') { sh ''' - cd ${WK} git checkout master ''' } else if(env.CHANGE_TARGET == '2.0'){ sh ''' - cd ${WK} git checkout 2.0 ''' } else{ sh ''' - cd ${WK} git checkout develop ''' } diff --git a/tests/pytest/crash_gen/valgrind_taos.supp b/tests/pytest/crash_gen/valgrind_taos.supp index 6c92f33018..b9296f008e 100644 --- a/tests/pytest/crash_gen/valgrind_taos.supp +++ b/tests/pytest/crash_gen/valgrind_taos.supp @@ -18108,4 +18108,4 @@ fun:_PyEval_EvalFrameDefault fun:_PyEval_EvalCodeWithName fun:_PyFunction_Vectorcall -} \ No newline at end of file +} \ No newline at end of file From 7d3fb6c14ff90096f6c0c91c42cf2fb3042e222a Mon Sep 17 00:00:00 2001 From: liuyq-617 Date: Wed, 11 Aug 2021 11:30:08 +0800 Subject: [PATCH 076/185] add node test --- Jenkinsfile | 20 ++++++++++---------- tests/pytest/crash_gen/valgrind_taos.supp | 2 +- 2 files changed, 11 insertions(+), 11 deletions(-) diff --git a/Jenkinsfile b/Jenkinsfile index bbde00d8a6..636232e456 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -184,7 +184,7 @@ pipeline { } parallel { stage('python_1_s1') { - agent{label " slave1 && slave11 "} + agent{label " slave1 || slave11 "} steps { pre_test() @@ -199,7 +199,7 @@ pipeline { } } stage('python_2_s5') { - agent{label " slave5 && slave15 "} + agent{label " slave5 || slave15 "} steps { pre_test() @@ -213,7 +213,7 @@ pipeline { } } stage('python_3_s6') { - agent{label " slave6 && slave16 "} + agent{label " slave6 || slave16 "} steps { timeout(time: 45, unit: 'MINUTES'){ pre_test() @@ -226,7 +226,7 @@ pipeline { } } stage('test_b1_s2') { - agent{label " slave2 && slave12 "} + agent{label " slave2 || slave12 "} steps { timeout(time: 45, unit: 'MINUTES'){ pre_test() @@ -239,7 +239,7 @@ pipeline { } stage('test_crash_gen_s3') { - agent{label " slave3 && slave13 "} + agent{label " slave3 || slave13 "} steps { pre_test() @@ -278,7 +278,7 @@ pipeline { } stage('test_valgrind_s4') { - agent{label " slave4 && slave14 "} + agent{label " slave4 || slave14 "} steps { pre_test() @@ -304,7 +304,7 @@ pipeline { } } stage('test_b4_s7') { - agent{label " slave7 && slave17 "} + agent{label " slave7 || slave17 "} steps { timeout(time: 45, unit: 'MINUTES'){ pre_test() @@ -323,7 +323,7 @@ pipeline { } } stage('test_b5_s8') { - agent{label " slave8 && slave18 "} + agent{label " slave8 || slave18 "} steps { timeout(time: 45, unit: 'MINUTES'){ pre_test() @@ -336,7 +336,7 @@ pipeline { } } stage('test_b6_s9') { - agent{label " slave9 && slave19 "} + agent{label " slave9 || slave19 "} steps { timeout(time: 45, unit: 'MINUTES'){ pre_test() @@ -349,7 +349,7 @@ pipeline { } } stage('test_b7_s10') { - agent{label " slave10 && slave20 "} + agent{label " slave10 || slave20 "} steps { timeout(time: 45, unit: 'MINUTES'){ pre_test() diff --git a/tests/pytest/crash_gen/valgrind_taos.supp b/tests/pytest/crash_gen/valgrind_taos.supp index b9296f008e..6c92f33018 100644 --- a/tests/pytest/crash_gen/valgrind_taos.supp +++ b/tests/pytest/crash_gen/valgrind_taos.supp @@ -18108,4 +18108,4 @@ fun:_PyEval_EvalFrameDefault fun:_PyEval_EvalCodeWithName fun:_PyFunction_Vectorcall -} \ No newline at end of file +} \ No newline at end of file From 251008bf1bf8cd3970d9933fbcc763167c341264 Mon Sep 17 00:00:00 2001 From: liuyq-617 Date: Wed, 11 Aug 2021 11:40:59 +0800 Subject: [PATCH 077/185] test[ci skip] --- Jenkinsfile | 20 ++++++++++---------- tests/pytest/crash_gen/valgrind_taos.supp | 2 +- 2 files changed, 11 insertions(+), 11 deletions(-) diff --git a/Jenkinsfile b/Jenkinsfile index 636232e456..0d9c0f132e 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -188,7 +188,7 @@ pipeline { steps { pre_test() - timeout(time: 45, unit: 'MINUTES'){ + timeout(time: 55, unit: 'MINUTES'){ sh ''' date cd ${WKC}/tests @@ -203,7 +203,7 @@ pipeline { steps { pre_test() - timeout(time: 45, unit: 'MINUTES'){ + timeout(time: 55, unit: 'MINUTES'){ sh ''' date cd ${WKC}/tests @@ -215,7 +215,7 @@ pipeline { stage('python_3_s6') { agent{label " slave6 || slave16 "} steps { - timeout(time: 45, unit: 'MINUTES'){ + timeout(time: 55, unit: 'MINUTES'){ pre_test() sh ''' date @@ -228,7 +228,7 @@ pipeline { stage('test_b1_s2') { agent{label " slave2 || slave12 "} steps { - timeout(time: 45, unit: 'MINUTES'){ + timeout(time: 55, unit: 'MINUTES'){ pre_test() sh ''' cd ${WKC}/tests @@ -265,7 +265,7 @@ pipeline { ./handle_taosd_val_log.sh ''' } - timeout(time: 45, unit: 'MINUTES'){ + timeout(time: 55, unit: 'MINUTES'){ sh ''' date cd ${WKC}/tests @@ -289,7 +289,7 @@ pipeline { ./handle_val_log.sh ''' } - timeout(time: 45, unit: 'MINUTES'){ + timeout(time: 55, unit: 'MINUTES'){ sh ''' date cd ${WKC}/tests @@ -306,7 +306,7 @@ pipeline { stage('test_b4_s7') { agent{label " slave7 || slave17 "} steps { - timeout(time: 45, unit: 'MINUTES'){ + timeout(time: 55, unit: 'MINUTES'){ pre_test() sh ''' date @@ -325,7 +325,7 @@ pipeline { stage('test_b5_s8') { agent{label " slave8 || slave18 "} steps { - timeout(time: 45, unit: 'MINUTES'){ + timeout(time: 55, unit: 'MINUTES'){ pre_test() sh ''' date @@ -338,7 +338,7 @@ pipeline { stage('test_b6_s9') { agent{label " slave9 || slave19 "} steps { - timeout(time: 45, unit: 'MINUTES'){ + timeout(time: 55, unit: 'MINUTES'){ pre_test() sh ''' date @@ -351,7 +351,7 @@ pipeline { stage('test_b7_s10') { agent{label " slave10 || slave20 "} steps { - timeout(time: 45, unit: 'MINUTES'){ + timeout(time: 55, unit: 'MINUTES'){ pre_test() sh ''' date diff --git a/tests/pytest/crash_gen/valgrind_taos.supp b/tests/pytest/crash_gen/valgrind_taos.supp index 6c92f33018..b9296f008e 100644 --- a/tests/pytest/crash_gen/valgrind_taos.supp +++ b/tests/pytest/crash_gen/valgrind_taos.supp @@ -18108,4 +18108,4 @@ fun:_PyEval_EvalFrameDefault fun:_PyEval_EvalCodeWithName fun:_PyFunction_Vectorcall -} \ No newline at end of file +} \ No newline at end of file From 9146f5fcdddc4ad391359b55c6fe832366bd89de Mon Sep 17 00:00:00 2001 From: liuyq-617 Date: Wed, 11 Aug 2021 13:25:33 +0800 Subject: [PATCH 078/185] add some ci time --- Jenkinsfile | 72 +++-- tests/pytest/crash_gen/valgrind_taos.supp | 368 +++++++++++++++++++++- 2 files changed, 413 insertions(+), 27 deletions(-) diff --git a/Jenkinsfile b/Jenkinsfile index 680fcce37f..0d9c0f132e 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -5,7 +5,8 @@ node { git url: 'https://github.com/taosdata/TDengine.git' } -def skipstage=0 + +def skipbuild=0 def abortPreviousBuilds() { def currentJobName = env.JOB_NAME @@ -33,8 +34,7 @@ def abort_previous(){ milestone(buildNumber) } def pre_test(){ - - + sh'hostname' sh ''' sudo rmtaos || echo "taosd has not installed" ''' @@ -52,12 +52,18 @@ def pre_test(){ git checkout master ''' } - else { + else if(env.CHANGE_TARGET == '2.0'){ + sh ''' + cd ${WKC} + git checkout 2.0 + ''' + } + else{ sh ''' cd ${WKC} git checkout develop ''' - } + } } sh''' cd ${WKC} @@ -75,7 +81,13 @@ def pre_test(){ git checkout master ''' } - else { + else if(env.CHANGE_TARGET == '2.0'){ + sh ''' + cd ${WK} + git checkout 2.0 + ''' + } + else{ sh ''' cd ${WK} git checkout develop @@ -95,7 +107,7 @@ def pre_test(){ make > /dev/null make install > /dev/null cd ${WKC}/tests - pip3 install ${WKC}/src/connector/python + pip3 install ${WKC}/src/connector/python/ ''' return 1 } @@ -123,19 +135,22 @@ pipeline { rm -rf ${WORKSPACE}.tes cp -r ${WORKSPACE} ${WORKSPACE}.tes cd ${WORKSPACE}.tes - + git fetch ''' script { if (env.CHANGE_TARGET == 'master') { sh ''' git checkout master - git pull origin master ''' } - else { + else if(env.CHANGE_TARGET == '2.0'){ + sh ''' + git checkout 2.0 + ''' + } + else{ sh ''' git checkout develop - git pull origin develop ''' } } @@ -144,10 +159,13 @@ pipeline { git checkout -qf FETCH_HEAD ''' - script{ - env.skipstage=sh(script:"cd ${WORKSPACE}.tes && git --no-pager diff --name-only FETCH_HEAD ${env.CHANGE_TARGET}|grep -v -E '.*md|//src//connector|Jenkinsfile|test-all.sh' || echo 0 ",returnStdout:true) + + script{ + skipbuild='2' + skipbuild=sh(script: "git log -2 --pretty=%B | fgrep -ie '[skip ci]' -e '[ci skip]' && echo 1 || echo 2", returnStdout:true) + println skipbuild + } - println env.skipstage sh''' rm -rf ${WORKSPACE}.tes ''' @@ -157,14 +175,16 @@ pipeline { stage('Parallel test stage') { //only build pr when { + allOf{ changeRequest() - expression { - env.skipstage != 0 + expression{ + return skipbuild.trim() == '2' } + } } parallel { stage('python_1_s1') { - agent{label 'p1'} + agent{label " slave1 || slave11 "} steps { pre_test() @@ -179,7 +199,7 @@ pipeline { } } stage('python_2_s5') { - agent{label 'p2'} + agent{label " slave5 || slave15 "} steps { pre_test() @@ -193,7 +213,7 @@ pipeline { } } stage('python_3_s6') { - agent{label 'p3'} + agent{label " slave6 || slave16 "} steps { timeout(time: 55, unit: 'MINUTES'){ pre_test() @@ -206,7 +226,7 @@ pipeline { } } stage('test_b1_s2') { - agent{label 'b1'} + agent{label " slave2 || slave12 "} steps { timeout(time: 55, unit: 'MINUTES'){ pre_test() @@ -219,7 +239,7 @@ pipeline { } stage('test_crash_gen_s3') { - agent{label "b2"} + agent{label " slave3 || slave13 "} steps { pre_test() @@ -258,7 +278,7 @@ pipeline { } stage('test_valgrind_s4') { - agent{label "b3"} + agent{label " slave4 || slave14 "} steps { pre_test() @@ -284,7 +304,7 @@ pipeline { } } stage('test_b4_s7') { - agent{label 'b4'} + agent{label " slave7 || slave17 "} steps { timeout(time: 55, unit: 'MINUTES'){ pre_test() @@ -303,7 +323,7 @@ pipeline { } } stage('test_b5_s8') { - agent{label 'b5'} + agent{label " slave8 || slave18 "} steps { timeout(time: 55, unit: 'MINUTES'){ pre_test() @@ -316,7 +336,7 @@ pipeline { } } stage('test_b6_s9') { - agent{label 'b6'} + agent{label " slave9 || slave19 "} steps { timeout(time: 55, unit: 'MINUTES'){ pre_test() @@ -329,7 +349,7 @@ pipeline { } } stage('test_b7_s10') { - agent{label 'b7'} + agent{label " slave10 || slave20 "} steps { timeout(time: 55, unit: 'MINUTES'){ pre_test() diff --git a/tests/pytest/crash_gen/valgrind_taos.supp b/tests/pytest/crash_gen/valgrind_taos.supp index 376567b7e8..b9296f008e 100644 --- a/tests/pytest/crash_gen/valgrind_taos.supp +++ b/tests/pytest/crash_gen/valgrind_taos.supp @@ -17742,4 +17742,370 @@ fun:taosGetFqdn fun:taosCheckGlobalCfg fun:taos_init_imp -} \ No newline at end of file +} +{ + + Memcheck:Leak + match-leak-kinds: definite + fun:malloc + obj:/usr/bin/python3.8 + fun:PyTuple_Pack + obj:/usr/bin/python3.8 + fun:PyObject_GetItem + fun:_PyEval_EvalFrameDefault + obj:/usr/bin/python3.8 + fun:_PyEval_EvalFrameDefault + fun:_PyEval_EvalCodeWithName + obj:/usr/bin/python3.8 + fun:_PyEval_EvalFrameDefault + fun:_PyEval_EvalCodeWithName +} +{ + + Memcheck:Leak + match-leak-kinds: definite + fun:malloc + obj:/usr/bin/python3.8 + fun:_PyObject_MakeTpCall + fun:_PyEval_EvalFrameDefault + obj:/usr/bin/python3.8 + fun:_PyEval_EvalFrameDefault + obj:/usr/bin/python3.8 + fun:_PyEval_EvalFrameDefault + fun:_PyEval_EvalFrameDefault + obj:/usr/bin/python3.8 + fun:_PyEval_EvalFrameDefault + fun:_PyEval_EvalCodeWithName +} +{ + + Memcheck:Leak + match-leak-kinds: definite + fun:malloc + fun:PyCode_NewWithPosOnlyArgs + fun:PyCode_New + obj:/usr/local/lib/python3.8/dist-packages/pandas/_libs/tslibs/parsing.cpython-38-x86_64-linux-gnu.so + obj:/usr/local/lib/python3.8/dist-packages/pandas/_libs/tslibs/parsing.cpython-38-x86_64-linux-gnu.so + fun:PyModule_ExecDef + obj:/usr/bin/python3.8 + obj:/usr/bin/python3.8 + fun:PyVectorcall_Call + fun:_PyEval_EvalFrameDefault + fun:_PyEval_EvalCodeWithName + fun:_PyFunction_Vectorcall +} +{ + + Memcheck:Leak + match-leak-kinds: definite + fun:malloc + obj:/usr/bin/python3.8 + fun:_PyObject_MakeTpCall + fun:_PyEval_EvalFrameDefault + obj:/usr/local/lib/python3.8 + fun:_PyEval_EvalFrameDefault + obj:/usr/bin/python3.8 + fun:_PyEval_EvalFrameDefault + fun:_PyEval_EvalCodeWithName + obj:/usr/bin/python3.8 + fun:PyVectorcall_Call + fun:_PyEval_EvalFrameDefault + fun:_PyEval_EvalCodeWithName +} +{ + + Memcheck:Leak + match-leak-kinds: definite + fun:malloc + obj:/usr/bin/python3.8 + fun:PyTuple_New + obj:/usr/bin/python3.8 + obj:/usr/bin/python3.8 + obj:/usr/bin/python3.8 + obj:/usr/bin/python3.8 + obj:/usr/bin/python3.8 + obj:/usr/bin/python3.8 + obj:/usr/bin/python3.8 + fun:_PyEval_EvalFrameDefault + fun:_PyEval_EvalCodeWithName +} +{ + + Memcheck:Leak + match-leak-kinds: definite + fun:malloc + obj:/usr/bin/python3.8 + fun:PyTuple_New + fun:_PyEval_EvalCodeWithName + fun:_PyFunction_Vectorcall + fun:_PyEval_EvalFrameDefault + fun:_PyEval_EvalCodeWithName + fun:_PyFunction_Vectorcall + fun:_PyEval_EvalFrameDefault + fun:_PyFunction_Vectorcall + fun:_PyEval_EvalFrameDefault + fun:_PyFunction_Vectorcall +} +{ + + Memcheck:Leak + match-leak-kinds: definite + fun:malloc + obj:/usr/bin/python3.8 + fun:_PyObject_MakeTpCall + fun:_PyEval_EvalFrameDefault + fun:_PyFunction_Vectorcall + obj:/usr/bin/python3.8 + obj:/usr/bin/python3.8 + fun:_PyObject_MakeTpCall + fun:_PyEval_EvalFrameDefault + obj:/usr/bin/python3.8 + fun:_PyEval_EvalFrameDefault + obj:/usr/bin/python3.8 +} +{ + + Memcheck:Leak + match-leak-kinds: definite + fun:malloc + obj:/usr/bin/python3.8 + obj:/usr/bin/python3.8 + fun:_PyEval_EvalFrameDefault + fun:_PyFunction_Vectorcall + fun:_PyEval_EvalFrameDefault + fun:_PyEval_EvalCodeWithName + fun:_PyFunction_Vectorcall + fun:_PyEval_EvalFrameDefault + fun:_PyFunction_Vectorcall + fun:_PyEval_EvalFrameDefault + fun:_PyEval_EvalCodeWithName +} +{ + + Memcheck:Leak + match-leak-kinds: definite + fun: malloc + obj:/usr/bin/python3.8 + fun:PyTuple_Pack + obj:/usr/local/lib/python3.8/dist-packages/pandas/_libs/interval.cpython-38-x86_64-linux-gnu.so + fun:PyModule_ExecDef + obj:/usr/bin/python3.8 + obj:/usr/bin/python3.8 + fun:PyVectorcall_Call + fun:_PyEval_EvalFrameDefault + fun:_PyEval_EvalCodeWithName + fun:_PyFunction_Vectorcall + fun:_PyEval_EvalFrameDefault +} +{ + + Memcheck:Leak + match-leak-kinds: definite + fun: malloc + obj:/usr/bin/python3.8 + fun:_PyObject_MakeTpCall + fun:_PyEval_EvalFrameDefault + obj:/usr/bin/python3.8 + fun:_PyEval_EvalFrameDefault + obj:/usr/bin/python3.8 + fun:_PyEval_EvalFrameDefault + fun:_PyEval_EvalCodeWithName + obj:/usr/bin/python3.8 + fun:_PyEval_EvalFrameDefault + fun:_PyEval_EvalCodeWithName +} +{ + + Memcheck:Leak + match-leak-kinds: definite + fun:malloc + obj:/usr/bin/python3.8 + fun:_PyObject_MakeTpCall + obj:/usr/bin/python3.8 + fun:PyObject_CallFunctionObjArgs + obj:/usr/bin/python3.8 + obj:/usr/bin/python3.8 + fun:PyObject_GetAttr + fun:_PyEval_EvalFrameDefault + fun:_PyEval_EvalCodeWithName + fun:_PyFunction_Vectorcall + fun:_PyEval_EvalFrameDefault +} +{ + + Memcheck:Leak + match-leak-kinds: definite + fun:malloc + obj:/usr/bin/python3.8 + fun:_PyObject_MakeTpCall + fun:_PyEval_EvalFrameDefault + obj:/usr/bin/python3.8) + fun:_PyEval_EvalFrameDefault + obj:/usr/bin/python3.8) + fun:_PyEval_EvalFrameDefault + fun:_PyEval_EvalCodeWithName + obj:/usr/bin/python3.8) + fun:_PyEval_EvalFrameDefault + fun:_PyEval_EvalCodeWithName +} +{ + + Memcheck:Leak + match-leak-kinds: definite + fun:malloc + obj:/usr/bin/python3.8) + fun:PyTuple_Pack + obj:/usr/local/lib/python3.8/dist-packages/pandas/_libs/interval.cpython-38-x86_64-linux-gnu.so + fun:PyModule_ExecDef + obj:/usr/bin/python3.8) + obj:/usr/bin/python3.8) + fun:PyVectorcall_Call + fun:_PyEval_EvalFrameDefault + fun:_PyEval_EvalCodeWithName + fun:_PyFunction_Vectorcall + fun:_PyEval_EvalFrameDefault +} +{ + + Memcheck:Leak + match-leak-kinds: definite + fun:malloc + obj:/usr/bin/python3.8 + fun:PyTuple_Pack + obj:/usr/local/lib/python3.8/dist-packages/pandas/_libs/tslibs/np_datetime.cpython-38-x86_64-linux-gnu.so + fun:PyModule_ExecDef + obj:/usr/bin/python3.8 + obj:/usr/bin/python3.8 + fun:PyVectorcall_Call + fun:_PyEval_EvalFrameDefault + fun:_PyEval_EvalCodeWithName + fun:_PyFunction_Vectorcall + fun:_PyEval_EvalFrameDefault +} +{ + + Memcheck:Leak + match-leak-kinds: definite + fun:malloc + obj:/usr/bin/python3.8 + fun:PyTuple_Pack + obj:/usr/local/lib/python3.8/dist-packages/pandas/_libs/tslibs/ccalendar.cpython-38-x86_64-linux-gnu.so + fun:PyModule_ExecDef + obj:/usr/bin/python3.8 + obj:/usr/bin/python3.8 + fun:PyVectorcall_Call + fun:_PyEval_EvalFrameDefault + fun:_PyEval_EvalCodeWithName + fun:_PyFunction_Vectorcall + fun:_PyEval_EvalFrameDefault +} +{ + + Memcheck:Leak + match-leak-kinds: definite + fun:malloc + obj:/usr/bin/python3.8 + fun:_PyObject_MakeTpCall + fun:_PyEval_EvalFrameDefault + obj:/usr/bin/python3.8 + fun:_PyEval_EvalFrameDefault + obj:/usr/bin/python3.8 + fun:_PyEval_EvalFrameDefault + fun:_PyEval_EvalCodeWithName + obj:/usr/bin/python3.8 + fun:_PyEval_EvalFrameDefault + fun:_PyEval_EvalCodeWithName +} +{ + + Memcheck:Leak + match-leak-kinds: definite + fun:malloc + obj:/usr/bin/python3.8 + fun:PyTuple_Pack + obj:/usr/local/lib/python3.8/dist-packages/pandas/_libs/interval.cpython-38-x86_64-linux-gnu.so + fun:PyModule_ExecDef + obj:/usr/bin/python3.8 + obj:/usr/bin/python3.8 + fun:PyVectorcall_Call + fun:_PyEval_EvalFrameDefault + fun:_PyEval_EvalCodeWithName + fun:_PyFunction_Vectorcall + fun:_PyEval_EvalFrameDefault +} +{ + + Memcheck:Leak + match-leak-kinds: definite + fun:malloc + obj:/usr/bin/python3.8 + fun:PyTuple_Pack + obj:/usr/local/lib/python3.8/dist-packages/pandas/_libs/hashtable.cpython-38-x86_64-linux-gnu.so + obj:/usr/local/lib/python3.8/dist-packages/pandas/_libs/hashtable.cpython-38-x86_64-linux-gnu.so + fun:PyModule_ExecDef + obj:/usr/bin/python3.8 + obj:/usr/bin/python3.8 + fun:PyVectorcall_Call + fun:_PyEval_EvalFrameDefault + fun:_PyEval_EvalCodeWithName + fun:_PyFunction_Vectorcall +} +{ + + Memcheck:Leak + match-leak-kinds: definite + ... + obj:/usr/local/lib/python3.8/dist-packages/pandas/* + ... +} +{ + + Memcheck:Leak + match-leak-kinds: definite + fun:malloc + obj:/usr/bin/python3.8 + fun:PyTuple_New + fun:_PyEval_EvalCodeWithName + fun:_PyFunction_Vectorcall + fun:_PyEval_EvalFrameDefault + fun:_PyEval_EvalCodeWithName + fun:_PyFunction_Vectorcall + fun:_PyEval_EvalFrameDefault + fun:_PyEval_EvalCodeWithName + obj:/usr/bin/python3.8 + fun:_PyEval_EvalFrameDefault +} +{ + + Memcheck:Leak + match-leak-kinds: definite + fun:malloc + obj:/usr/bin/python3.8 + fun:PyTuple_New + obj:/usr/bin/python3.8 + obj:/usr/bin/python3.8 + fun:_PyObject_MakeTpCall + fun:_PyEval_EvalFrameDefault + fun:_PyFunction_Vectorcall + fun:_PyEval_EvalFrameDefault + obj:/usr/bin/python3.8 + fun:PyObject_GetAttr + fun:_PyEval_EvalFrameDefault +} +{ + + Memcheck:Leak + match-leak-kinds: definite + fun:malloc + obj:/usr/bin/python3.8 + fun:PyTuple_New + obj:/usr/bin/python3.8 + obj:/usr/bin/python3.8 + obj:/usr/bin/python3.8 + obj:/usr/bin/python3.8 + obj:/usr/bin/python3.8 + obj:/usr/bin/python3.8 + fun:_PyEval_EvalFrameDefault + fun:_PyEval_EvalCodeWithName + fun:_PyFunction_Vectorcall +} \ No newline at end of file From 05fc8f5e741a4c8124fa623cd2b5063a33dc5f54 Mon Sep 17 00:00:00 2001 From: liuyq-617 Date: Wed, 11 Aug 2021 13:34:09 +0800 Subject: [PATCH 079/185] test --- tests/pytest/crash_gen/valgrind_taos.supp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/pytest/crash_gen/valgrind_taos.supp b/tests/pytest/crash_gen/valgrind_taos.supp index b9296f008e..716d18a984 100644 --- a/tests/pytest/crash_gen/valgrind_taos.supp +++ b/tests/pytest/crash_gen/valgrind_taos.supp @@ -18108,4 +18108,4 @@ fun:_PyEval_EvalFrameDefault fun:_PyEval_EvalCodeWithName fun:_PyFunction_Vectorcall -} \ No newline at end of file +} \ No newline at end of file From 8a206c73ea1ea3698d7bc43705f6e1533dcd719e Mon Sep 17 00:00:00 2001 From: liuyq-617 Date: Wed, 11 Aug 2021 13:34:18 +0800 Subject: [PATCH 080/185] test --- Jenkinsfile | 15 +-------------- 1 file changed, 1 insertion(+), 14 deletions(-) diff --git a/Jenkinsfile b/Jenkinsfile index 0d9c0f132e..085cd78a98 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -4,10 +4,6 @@ properties([pipelineTriggers([githubPush()])]) node { git url: 'https://github.com/taosdata/TDengine.git' } - - -def skipbuild=0 - def abortPreviousBuilds() { def currentJobName = env.JOB_NAME def currentBuildNumber = env.BUILD_NUMBER.toInteger() @@ -114,12 +110,10 @@ def pre_test(){ pipeline { agent none - environment{ WK = '/var/lib/jenkins/workspace/TDinternal' WKC= '/var/lib/jenkins/workspace/TDinternal/community' } - stages { stage('pre_build'){ agent{label 'master'} @@ -158,20 +152,16 @@ pipeline { git fetch origin +refs/pull/${CHANGE_ID}/merge git checkout -qf FETCH_HEAD ''' - - script{ skipbuild='2' skipbuild=sh(script: "git log -2 --pretty=%B | fgrep -ie '[skip ci]' -e '[ci skip]' && echo 1 || echo 2", returnStdout:true) println skipbuild - } sh''' rm -rf ${WORKSPACE}.tes ''' } } - stage('Parallel test stage') { //only build pr when { @@ -237,7 +227,6 @@ pipeline { } } } - stage('test_crash_gen_s3') { agent{label " slave3 || slave13 "} @@ -272,11 +261,9 @@ pipeline { ./test-all.sh b2fq date ''' - } - + } } } - stage('test_valgrind_s4') { agent{label " slave4 || slave14 "} From ba04baecf3737c67ce1e2ca730bbd3f53e542cfc Mon Sep 17 00:00:00 2001 From: liuyq-617 Date: Wed, 11 Aug 2021 13:38:32 +0800 Subject: [PATCH 081/185] test[ci skip] --- Jenkinsfile | 5 ++--- tests/pytest/crash_gen/valgrind_taos.supp | 2 +- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/Jenkinsfile b/Jenkinsfile index 085cd78a98..8fa7e78a5f 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -428,6 +428,5 @@ pipeline { from: "support@taosdata.com" ) } - } - -} + } +} \ No newline at end of file diff --git a/tests/pytest/crash_gen/valgrind_taos.supp b/tests/pytest/crash_gen/valgrind_taos.supp index 716d18a984..6c92f33018 100644 --- a/tests/pytest/crash_gen/valgrind_taos.supp +++ b/tests/pytest/crash_gen/valgrind_taos.supp @@ -18108,4 +18108,4 @@ fun:_PyEval_EvalFrameDefault fun:_PyEval_EvalCodeWithName fun:_PyFunction_Vectorcall -} \ No newline at end of file +} \ No newline at end of file From 52848c4de5ac5d6415267e8a08fb7860acb8cc43 Mon Sep 17 00:00:00 2001 From: jiajingbin Date: Wed, 11 Aug 2021 14:55:45 +0800 Subject: [PATCH 082/185] [TD-5923]finish test--->where tbname in UPPER/lower/mIXed add query/queryTbnameUpperLower.py add util/common.py for adding common function in case of duplicate use --- tests/pytest/fulltest.sh | 1 + tests/pytest/query/queryTbnameUpperLower.py | 78 +++++++++++++++++++++ tests/pytest/util/common.py | 53 ++++++++++++++ 3 files changed, 132 insertions(+) create mode 100644 tests/pytest/query/queryTbnameUpperLower.py create mode 100644 tests/pytest/util/common.py diff --git a/tests/pytest/fulltest.sh b/tests/pytest/fulltest.sh index fdcd0a3c42..17c2fc6915 100755 --- a/tests/pytest/fulltest.sh +++ b/tests/pytest/fulltest.sh @@ -381,6 +381,7 @@ python3 test.py -f alter/alter_create_exception.py python3 ./test.py -f insert/flushwhiledrop.py python3 ./test.py -f insert/schemalessInsert.py python3 ./test.py -f alter/alterColMultiTimes.py +python3 ./test.py -f query/queryTbnameUpperLower.py #======================p4-end=============== diff --git a/tests/pytest/query/queryTbnameUpperLower.py b/tests/pytest/query/queryTbnameUpperLower.py new file mode 100644 index 0000000000..bd4e85c5ca --- /dev/null +++ b/tests/pytest/query/queryTbnameUpperLower.py @@ -0,0 +1,78 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- +from util.log import tdLog +from util.cases import tdCases +from util.sql import tdSql +from util.common import tdCom + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + def checkStbWhereIn(self): + ''' + where in ---> upper lower mixed + ''' + tdCom.cleanTb() + table_name = tdCom.getLongName(8, "letters_mixed") + table_name_sub = f'{table_name}_sub' + tb_name_lower = table_name_sub.lower() + tb_name_upper = table_name_sub.upper() + + ## create stb and tb + tdSql.execute(f'CREATE TABLE {table_name} (ts timestamp, id int, bi1 binary(20)) tags (si1 binary(20))') + tdSql.execute(f'create table {table_name_sub}1 using {table_name} tags ("{table_name_sub}1")') + tdSql.execute(f'create table {tb_name_lower}2 using {table_name} tags ("{tb_name_lower}2")') + tdSql.execute(f'create table {tb_name_upper}3 using {table_name} tags ("{tb_name_upper}3")') + + ## insert values + tdSql.execute(f'insert into {table_name_sub}1 values (now-1s, 1, "{table_name_sub}1")') + tdSql.execute(f'insert into {tb_name_lower}2 values (now-2s, 2, "{tb_name_lower}21")') + tdSql.execute(f'insert into {tb_name_lower}2 values (now-3s, 3, "{tb_name_lower}22")') + tdSql.execute(f'insert into {tb_name_upper}3 values (now-4s, 4, "{tb_name_upper}31")') + tdSql.execute(f'insert into {tb_name_upper}3 values (now-5s, 5, "{tb_name_upper}32")') + tdSql.execute(f'insert into {tb_name_upper}3 values (now-6s, 6, "{tb_name_upper}33")') + + ## query where tbname in single + tdSql.query(f'select * from {table_name} where tbname in ("{table_name_sub}1")') + tdSql.checkRows(1) + tdSql.query(f'select * from {table_name} where tbname in ("{table_name_sub.upper()}1")') + tdSql.checkRows(1) + tdSql.query(f'select * from {table_name} where tbname in ("{table_name_sub.lower()}1")') + tdSql.checkRows(1) + tdSql.query(f'select * from {table_name} where tbname in ("{tb_name_lower}2")') + tdSql.checkRows(2) + tdSql.query(f'select * from {table_name} where tbname in ("{tb_name_lower.upper()}2")') + tdSql.checkRows(2) + tdSql.query(f'select * from {table_name} where tbname in ("{tb_name_upper}3")') + tdSql.checkRows(3) + tdSql.query(f'select * from {table_name} where tbname in ("{tb_name_upper.lower()}3")') + tdSql.checkRows(3) + + ## query where tbname in multi + tdSql.query(f'select * from {table_name} where id=5 and tbname in ("{table_name_sub}1", "{tb_name_lower.upper()}2", "{tb_name_upper.lower()}3")') + tdSql.checkRows(1) + tdSql.query(f'select * from {table_name} where tbname in ("{table_name_sub}1", "{tb_name_lower.upper()}2", "{tb_name_upper.lower()}3")') + tdSql.checkRows(6) + + def run(self): + tdSql.prepare() + self.checkStbWhereIn() + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) \ No newline at end of file diff --git a/tests/pytest/util/common.py b/tests/pytest/util/common.py new file mode 100644 index 0000000000..1c7d94a8a4 --- /dev/null +++ b/tests/pytest/util/common.py @@ -0,0 +1,53 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import random +import string +from util.sql import tdSql + +class TDCom: + def init(self, conn, logSql): + tdSql.init(conn.cursor(), logSql) + + def cleanTb(self): + query_sql = "show stables" + res_row_list = tdSql.query(query_sql, True) + stb_list = map(lambda x: x[0], res_row_list) + for stb in stb_list: + tdSql.execute(f'drop table if exists {stb}') + + query_sql = "show tables" + res_row_list = tdSql.query(query_sql, True) + tb_list = map(lambda x: x[0], res_row_list) + for tb in tb_list: + tdSql.execute(f'drop table if exists {tb}') + + def getLongName(self, len, mode = "mixed"): + """ + generate long name + mode could be numbers/letters/letters_mixed/mixed + """ + if mode == "numbers": + chars = ''.join(random.choice(string.digits) for i in range(len)) + elif mode == "letters": + chars = ''.join(random.choice(string.ascii_letters.lower()) for i in range(len)) + elif mode == "letters_mixed": + chars = ''.join(random.choice(string.ascii_letters.upper() + string.ascii_letters.lower()) for i in range(len)) + else: + chars = ''.join(random.choice(string.ascii_letters.lower() + string.digits) for i in range(len)) + return chars + + def close(self): + self.cursor.close() + +tdCom = TDCom() \ No newline at end of file From 1ab504f90c39c3bfe1358103a15198ab7f709e3a Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Wed, 11 Aug 2021 18:25:26 +0800 Subject: [PATCH 083/185] [TD-5978]: taosdemo set only one custom field. (#7301) --- src/kit/taosdemo/taosdemo.c | 1 + 1 file changed, 1 insertion(+) diff --git a/src/kit/taosdemo/taosdemo.c b/src/kit/taosdemo/taosdemo.c index 8db149a559..2536736cb6 100644 --- a/src/kit/taosdemo/taosdemo.c +++ b/src/kit/taosdemo/taosdemo.c @@ -1010,6 +1010,7 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) { exit(EXIT_FAILURE); } arguments->datatype[0] = argv[i]; + arguments->datatype[1] = NULL; } else { // more than one col int index = 0; From 2dd5f45b544c050a2d6535ecc620a4bf9e7cfba1 Mon Sep 17 00:00:00 2001 From: Ping Xiao Date: Wed, 11 Aug 2021 18:43:42 +0800 Subject: [PATCH 084/185] [TD-5835]: update performance test script --- tests/pytest/tools/taosdemoPerformance.py | 62 +++++++++++++++++++---- 1 file changed, 53 insertions(+), 9 deletions(-) diff --git a/tests/pytest/tools/taosdemoPerformance.py b/tests/pytest/tools/taosdemoPerformance.py index 6b5681dfbc..c28f94b3db 100644 --- a/tests/pytest/tools/taosdemoPerformance.py +++ b/tests/pytest/tools/taosdemoPerformance.py @@ -19,11 +19,16 @@ import json import sys class taosdemoPerformace: - def __init__(self, commitID, dbName, branch, type): + def __init__(self, commitID, dbName, branch, type, numOfTables, numOfRows, numOfInt, numOfDouble, numOfBinary): self.commitID = commitID self.dbName = dbName self.branch = branch self.type = type + self.numOfTables = numOfTables + self.numOfRows = numOfRows + self.numOfInt = numOfInt + self.numOfDouble = numOfDouble + self.numOfBinary = numOfBinary self.host = "127.0.0.1" self.user = "root" self.password = "taosdata" @@ -51,13 +56,13 @@ class taosdemoPerformace: stb = { "name": "meters", "child_table_exists": "no", - "childtable_count": 10000, + "childtable_count": self.numOfTables, "childtable_prefix": "stb_", "auto_create_table": "no", "data_source": "rand", "batch_create_tbl_num": 10, "insert_mode": "taosc", - "insert_rows": 100000, + "insert_rows": self.numOfRows, "interlace_rows": 100, "max_sql_len": 1024000, "disorder_ratio": 0, @@ -68,7 +73,9 @@ class taosdemoPerformace: "sample_file": "./sample.csv", "tags_file": "", "columns": [ - {"type": "INT", "count": 4} + {"type": "INT", "count": self.numOfInt}, + {"type": "DOUBLE", "count": self.numOfDouble}, + {"type": "BINARY", "len": 128, "count": self.numOfBinary} ], "tags": [ {"type": "INT", "count": 1}, @@ -76,6 +83,7 @@ class taosdemoPerformace: ] } + stables = [] stables.append(stb) @@ -163,7 +171,7 @@ class taosdemoPerformace: cursor.execute("create database if not exists %s" % self.dbName) cursor.execute("use %s" % self.dbName) - cursor.execute("create table if not exists taosdemo_perf (ts timestamp, create_table_time float, insert_records_time float, records_per_second float, commit_id binary(50), avg_delay float, max_delay float, min_delay float, branch binary(50), type binary(20))") + cursor.execute("create table if not exists taosdemo_perf (ts timestamp, create_table_time float, insert_records_time float, records_per_second float, commit_id binary(50), avg_delay float, max_delay float, min_delay float, branch binary(50), type binary(20), numoftables int, numofrows int, numofint int, numofdouble int, numofbinary int)") print("==================== taosdemo performance ====================") print("create tables time: %f" % float(self.createTableTime)) print("insert records time: %f" % float(self.insertRecordsTime)) @@ -171,13 +179,14 @@ class taosdemoPerformace: print("avg delay: %f" % float(self.avgDelay)) print("max delay: %f" % float(self.maxDelay)) print("min delay: %f" % float(self.minDelay)) - cursor.execute("insert into taosdemo_perf values(now, %f, %f, %f, '%s', %f, %f, %f, '%s', '%s')" % + cursor.execute("insert into taosdemo_perf values(now, %f, %f, %f, '%s', %f, %f, %f, '%s', '%s', %d, %d, %d, %d, %d)" % (float(self.createTableTime), float(self.insertRecordsTime), float(self.recordsPerSecond), - self.commitID, float(self.avgDelay), float(self.maxDelay), float(self.minDelay), self.branch, self.type)) + self.commitID, float(self.avgDelay), float(self.maxDelay), float(self.minDelay), self.branch, + self.type, self.numOfTables, self.numOfRows, self.numOfInt, self.numOfDouble, self.numOfBinary)) cursor.close() cursor1 = self.conn.cursor() - cursor1.execute("drop database if exists %s" % self.insertDB) + # cursor1.execute("drop database if exists %s" % self.insertDB) cursor1.close() if __name__ == '__main__': @@ -209,8 +218,43 @@ if __name__ == '__main__': default='glibc', type=str, help='build type (default: glibc)') + parser.add_argument( + '-i', + '--num-of-int', + action='store', + default=4, + type=int, + help='num of int columns (default: 4)') + parser.add_argument( + '-D', + '--num-of-double', + action='store', + default=0, + type=int, + help='num of double columns (default: 4)') + parser.add_argument( + '-B', + '--num-of-binary', + action='store', + default=0, + type=int, + help='num of binary columns (default: 4)') + parser.add_argument( + '-t', + '--num-of-tables', + action='store', + default=10000, + type=int, + help='num of tables (default: 10000)') + parser.add_argument( + '-r', + '--num-of-rows', + action='store', + default=100000, + type=int, + help='num of rows (default: 100000)') args = parser.parse_args() - perftest = taosdemoPerformace(args.commit_id, args.database_name, args.git_branch, args.build_type) + perftest = taosdemoPerformace(args.commit_id, args.database_name, args.git_branch, args.build_type, args.num_of_tables, args.num_of_rows, args.num_of_int, args.num_of_double, args.num_of_binary) perftest.insertData() perftest.createTablesAndStoreData() From 24eda742e42011331d69fc0c183e4d20a7f6246a Mon Sep 17 00:00:00 2001 From: wangmm0220 Date: Wed, 11 Aug 2021 18:50:07 +0800 Subject: [PATCH 085/185] [TD-5918] add the configuration of max length wild cards --- packaging/cfg/taos.cfg | 3 +++ src/client/src/tscSQLParser.c | 16 ++++++++++------ src/common/inc/tglobal.h | 1 + src/common/src/tglobal.c | 13 +++++++++++++ src/util/inc/tcompare.h | 2 +- src/util/src/tcompare.c | 19 ++++++++++--------- .../functions/showOfflineThresholdIs864000.py | 2 +- 7 files changed, 39 insertions(+), 17 deletions(-) diff --git a/packaging/cfg/taos.cfg b/packaging/cfg/taos.cfg index bbe6eae419..3ae4e9941e 100644 --- a/packaging/cfg/taos.cfg +++ b/packaging/cfg/taos.cfg @@ -144,6 +144,9 @@ keepColumnName 1 # max length of an SQL # maxSQLLength 65480 +# max length of WildCards +# maxWildCardsLength 100 + # the maximum number of records allowed for super table time sorting # maxNumOfOrderedRes 100000 diff --git a/src/client/src/tscSQLParser.c b/src/client/src/tscSQLParser.c index 6df724881e..490c8be468 100644 --- a/src/client/src/tscSQLParser.c +++ b/src/client/src/tscSQLParser.c @@ -3218,7 +3218,7 @@ int32_t setShowInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) { pCmd->command = TSDB_SQL_SHOW; const char* msg1 = "invalid name"; - const char* msg2 = "pattern filter string too long"; + const char* msg2 = "wildcard string should be less than %d characters"; const char* msg3 = "database name too long"; const char* msg4 = "invalid ip address"; const char* msg5 = "database name is empty"; @@ -3262,8 +3262,10 @@ int32_t setShowInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) { return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg6); } - if (!tscValidateTableNameLength(pCmd->payloadLen)) { - return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2); + if (pPattern->n > tsMaxWildCardsLen){ + char tmp[64] = {0}; + sprintf(tmp, msg2, tsMaxWildCardsLen); + return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), tmp); } } } else if (showType == TSDB_MGMT_TABLE_VNODES) { @@ -4394,15 +4396,17 @@ static int32_t validateNullExpr(tSqlExpr* pExpr, char* msgBuf) { // check for like expression static int32_t validateLikeExpr(tSqlExpr* pExpr, STableMeta* pTableMeta, int32_t index, char* msgBuf) { - const char* msg1 = "wildcard string should be less than 20 characters"; + const char* msg1 = "wildcard string should be less than %d characters"; const char* msg2 = "illegal column name"; tSqlExpr* pLeft = pExpr->pLeft; tSqlExpr* pRight = pExpr->pRight; if (pExpr->tokenId == TK_LIKE) { - if (pRight->value.nLen > TSDB_PATTERN_STRING_MAX_LEN) { - return invalidOperationMsg(msgBuf, msg1); + if (pRight->value.nLen > tsMaxWildCardsLen) { + char tmp[64] = {0}; + sprintf(tmp, msg1, tsMaxWildCardsLen); + return invalidOperationMsg(msgBuf, tmp); } SSchema* pSchema = tscGetTableSchema(pTableMeta); diff --git a/src/common/inc/tglobal.h b/src/common/inc/tglobal.h index 7290db6ec9..25d1c90ec5 100644 --- a/src/common/inc/tglobal.h +++ b/src/common/inc/tglobal.h @@ -70,6 +70,7 @@ extern int8_t tsKeepOriginalColumnName; // client extern int32_t tsMaxSQLStringLen; +extern int32_t tsMaxWildCardsLen; extern int8_t tsTscEnableRecordSql; extern int32_t tsMaxNumOfOrderedResults; extern int32_t tsMinSlidingTime; diff --git a/src/common/src/tglobal.c b/src/common/src/tglobal.c index a58303e9fc..f9135605bb 100644 --- a/src/common/src/tglobal.c +++ b/src/common/src/tglobal.c @@ -25,6 +25,7 @@ #include "tutil.h" #include "tlocale.h" #include "ttimezone.h" +#include "tcompare.h" // cluster char tsFirst[TSDB_EP_LEN] = {0}; @@ -75,6 +76,7 @@ int32_t tsCompressMsgSize = -1; // client int32_t tsMaxSQLStringLen = TSDB_MAX_ALLOWED_SQL_LEN; +int32_t tsMaxWildCardsLen = TSDB_PATTERN_STRING_MAX_LEN; int8_t tsTscEnableRecordSql = 0; // the maximum number of results for projection query on super table that are returned from @@ -984,6 +986,16 @@ static void doInitGlobalConfig(void) { cfg.unitType = TAOS_CFG_UTYPE_BYTE; taosInitConfigOption(cfg); + cfg.option = "maxWildCardsLength"; + cfg.ptr = &tsMaxWildCardsLen; + cfg.valType = TAOS_CFG_VTYPE_INT32; + cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_CLIENT | TSDB_CFG_CTYPE_B_SHOW; + cfg.minValue = 0; + cfg.maxValue = TSDB_MAX_FIELD_LEN; + cfg.ptrLength = 0; + cfg.unitType = TAOS_CFG_UTYPE_BYTE; + taosInitConfigOption(cfg); + cfg.option = "maxNumOfOrderedRes"; cfg.ptr = &tsMaxNumOfOrderedResults; cfg.valType = TAOS_CFG_VTYPE_INT32; @@ -1531,6 +1543,7 @@ static void doInitGlobalConfig(void) { cfg.unitType = TAOS_CFG_UTYPE_NONE; taosInitConfigOption(cfg); + assert(tsGlobalConfigNum <= TSDB_CFG_MAX_NUM); #ifdef TD_TSZ // lossy compress cfg.option = "lossyColumns"; diff --git a/src/util/inc/tcompare.h b/src/util/inc/tcompare.h index 612ce7ede0..fe46f00086 100644 --- a/src/util/inc/tcompare.h +++ b/src/util/inc/tcompare.h @@ -25,7 +25,7 @@ extern "C" { #define TSDB_PATTERN_MATCH 0 #define TSDB_PATTERN_NOMATCH 1 #define TSDB_PATTERN_NOWILDCARDMATCH 2 -#define TSDB_PATTERN_STRING_MAX_LEN 20 +#define TSDB_PATTERN_STRING_MAX_LEN 100 #define FLT_COMPAR_TOL_FACTOR 4 #define FLT_EQUAL(_x, _y) (fabs((_x) - (_y)) <= (FLT_COMPAR_TOL_FACTOR * FLT_EPSILON)) diff --git a/src/util/src/tcompare.c b/src/util/src/tcompare.c index e953f4c464..ebcee2edff 100644 --- a/src/util/src/tcompare.c +++ b/src/util/src/tcompare.c @@ -264,18 +264,19 @@ int WCSPatternMatch(const wchar_t *patterStr, const wchar_t *str, size_t size, c static int32_t compareStrPatternComp(const void* pLeft, const void* pRight) { SPatternCompareInfo pInfo = {'%', '_'}; - - char pattern[128] = {0}; + + assert(varDataLen(pRight) <= TSDB_MAX_FIELD_LEN); + char *pattern = calloc(varDataLen(pRight) + 1, sizeof(char)); memcpy(pattern, varDataVal(pRight), varDataLen(pRight)); - assert(varDataLen(pRight) < 128); size_t sz = varDataLen(pLeft); - char *buf = malloc(sz + 1); - memcpy(buf, varDataVal(pLeft), sz); + char *buf = malloc(sz + 1); + memcpy(buf, varDataVal(pLeft), sz); buf[sz] = 0; int32_t ret = patternMatch(pattern, buf, sz, &pInfo); free(buf); + free(pattern); return (ret == TSDB_PATTERN_MATCH) ? 0 : 1; } @@ -297,13 +298,13 @@ static int32_t compareFindItemInSet(const void *pLeft, const void* pRight) { static int32_t compareWStrPatternComp(const void* pLeft, const void* pRight) { SPatternCompareInfo pInfo = {'%', '_'}; - wchar_t pattern[128] = {0}; - assert(TSDB_PATTERN_STRING_MAX_LEN < 128); + assert(varDataLen(pRight) <= TSDB_MAX_FIELD_LEN * TSDB_NCHAR_SIZE); + wchar_t *pattern = calloc(varDataLen(pRight) + 1, sizeof(wchar_t)); memcpy(pattern, varDataVal(pRight), varDataLen(pRight)); - assert(varDataLen(pRight) < 128); - + int32_t ret = WCSPatternMatch(pattern, varDataVal(pLeft), varDataLen(pLeft)/TSDB_NCHAR_SIZE, &pInfo); + free(pattern); return (ret == TSDB_PATTERN_MATCH) ? 0 : 1; } diff --git a/tests/pytest/functions/showOfflineThresholdIs864000.py b/tests/pytest/functions/showOfflineThresholdIs864000.py index a7a1c2bf3f..57d0b1921b 100644 --- a/tests/pytest/functions/showOfflineThresholdIs864000.py +++ b/tests/pytest/functions/showOfflineThresholdIs864000.py @@ -25,7 +25,7 @@ class TDTestCase: def run(self): tdSql.query("show variables") - tdSql.checkData(53, 1, 864000) + tdSql.checkData(54, 1, 864000) def stop(self): tdSql.close() From 70f76974067260a1e6a95760bd121829ad807269 Mon Sep 17 00:00:00 2001 From: jiajingbin Date: Wed, 11 Aug 2021 19:17:08 +0800 Subject: [PATCH 086/185] [TD-5616]like wildcard max_length test--merge to master add getVariable() to util/sql.py add 4 testcases to query/queryWildcardLength.py --- tests/pytest/fulltest.sh | 1 + tests/pytest/query/queryWildcardLength.py | 207 ++++++++++++++++++++++ tests/pytest/util/sql.py | 16 ++ 3 files changed, 224 insertions(+) create mode 100644 tests/pytest/query/queryWildcardLength.py diff --git a/tests/pytest/fulltest.sh b/tests/pytest/fulltest.sh index 5df37bb182..8b015727ea 100755 --- a/tests/pytest/fulltest.sh +++ b/tests/pytest/fulltest.sh @@ -382,6 +382,7 @@ python3 test.py -f alter/alter_create_exception.py python3 ./test.py -f insert/flushwhiledrop.py python3 ./test.py -f insert/schemalessInsert.py python3 ./test.py -f alter/alterColMultiTimes.py +python3 ./test.py -f query/queryWildcardLength.py #======================p4-end=============== diff --git a/tests/pytest/query/queryWildcardLength.py b/tests/pytest/query/queryWildcardLength.py new file mode 100644 index 0000000000..1fc46fe7d6 --- /dev/null +++ b/tests/pytest/query/queryWildcardLength.py @@ -0,0 +1,207 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- +from copy import deepcopy +import string +import random +from util.log import tdLog +from util.cases import tdCases +from util.sql import tdSql + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + def cleanTb(self): + query_sql = "show stables" + res_row_list = tdSql.query(query_sql, True) + stb_list = map(lambda x: x[0], res_row_list) + for stb in stb_list: + tdSql.execute(f'drop table if exists {stb}') + + query_sql = "show tables" + res_row_list = tdSql.query(query_sql, True) + tb_list = map(lambda x: x[0], res_row_list) + for tb in tb_list: + tdSql.execute(f'drop table if exists {tb}') + + def getLongWildcardStr(self, len=None): + """ + generate long wildcard str + """ + maxWildCardsLength = int(tdSql.getVariable('maxWildCardsLength')[0]) + if len: + chars = ''.join(random.choice(string.ascii_letters.lower()) for i in range(len)) + else: + chars = ''.join(random.choice(string.ascii_letters.lower()) for i in range(maxWildCardsLength+1)) + return chars + + def genTableName(self): + ''' + generate table name + hp_name--->'%str' + lp_name--->'str%' + ul_name--->'st_r' + ''' + table_name = self.getLongWildcardStr() + table_name_list = list(table_name) + table_name_list.pop(-1) + + if len(table_name_list) > 1: + lp_name = deepcopy(table_name_list) + lp_name[-1] = '%' + lp_name = ''.join(lp_name) + + ul_name = list(lp_name) + ul_name[int(len(ul_name)/2)] = '_' + ul_name = ''.join(ul_name) + + table_name_list = list(table_name) + hp_name = deepcopy(table_name_list) + hp_name.pop(1) + hp_name[0] = '%' + hp_name = ''.join(hp_name) + else: + hp_name = '%' + lp_name = '%' + ul_name = '_' + return table_name, hp_name, lp_name, ul_name + + def checkRegularTableWildcardLength(self): + ''' + check regular table wildcard length with % and _ + ''' + self.cleanTb() + table_name, hp_name, lp_name, ul_name = self.genTableName() + tdSql.execute(f"CREATE TABLE {table_name} (ts timestamp, a1 int)") + sql_list = [f'show tables like "{hp_name}"', f'show tables like "{lp_name}"', f'show tables like "{ul_name}"'] + for sql in sql_list: + tdSql.query(sql) + if len(table_name) >= 1: + tdSql.checkRows(1) + else: + tdSql.error(sql) + + exceed_sql_list = [f'show tables like "%{hp_name}"', f'show tables like "{lp_name}%"', f'show tables like "{ul_name}%"'] + for sql in exceed_sql_list: + tdSql.error(sql) + + def checkSuperTableWildcardLength(self): + ''' + check super table wildcard length with % and _ + ''' + self.cleanTb() + table_name, hp_name, lp_name, ul_name = self.genTableName() + tdSql.execute(f"CREATE TABLE {table_name} (ts timestamp, c1 int) tags (t1 int)") + sql_list = [f'show stables like "{hp_name}"', f'show stables like "{lp_name}"', f'show stables like "{ul_name}"'] + for sql in sql_list: + tdSql.query(sql) + if len(table_name) >= 1: + tdSql.checkRows(1) + else: + tdSql.error(sql) + + exceed_sql_list = [f'show stables like "%{hp_name}"', f'show stables like "{lp_name}%"', f'show stables like "{ul_name}%"'] + for sql in exceed_sql_list: + tdSql.error(sql) + + def checkRegularWildcardSelectLength(self): + ''' + check regular table wildcard select length with % and _ + ''' + self.cleanTb() + table_name, hp_name, lp_name, ul_name = self.genTableName() + tdSql.execute(f"CREATE TABLE {table_name} (ts timestamp, bi1 binary(200), nc1 nchar(200))") + tdSql.execute(f'insert into {table_name} values (now, "{table_name}", "{table_name}")') + sql_list = [f'select * from {table_name} where bi1 like "{hp_name}"', + f'select * from {table_name} where bi1 like "{lp_name}"', + f'select * from {table_name} where bi1 like "{ul_name}"', + f'select * from {table_name} where nc1 like "{hp_name}"', + f'select * from {table_name} where nc1 like "{lp_name}"', + f'select * from {table_name} where nc1 like "{ul_name}"'] + for sql in sql_list: + tdSql.query(sql) + if len(table_name) >= 1: + tdSql.checkRows(1) + else: + tdSql.error(sql) + + exceed_sql_list = [f'select * from {table_name} where bi1 like "%{hp_name}"', + f'select * from {table_name} where bi1 like "{lp_name}%"', + f'select * from {table_name} where bi1 like "{ul_name}%"', + f'select * from {table_name} where nc1 like "%{hp_name}"', + f'select * from {table_name} where nc1 like "{lp_name}%"', + f'select * from {table_name} where nc1 like "{ul_name}%"'] + for sql in exceed_sql_list: + tdSql.error(sql) + + def checkStbWildcardSelectLength(self): + ''' + check stb wildcard select length with % and _ + ''' + self.cleanTb() + table_name, hp_name, lp_name, ul_name = self.genTableName() + + tdSql.execute(f'CREATE TABLE {table_name} (ts timestamp, bi1 binary(200), nc1 nchar(200)) tags (si1 binary(200), sc1 nchar(200))') + tdSql.execute(f'create table {table_name}_sub1 using {table_name} tags ("{table_name}", "{table_name}")') + tdSql.execute(f'insert into {table_name}_sub1 values (now, "{table_name}", "{table_name}");') + + sql_list = [f'select * from {table_name} where bi1 like "{hp_name}"', + f'select * from {table_name} where bi1 like "{lp_name}"', + f'select * from {table_name} where bi1 like "{ul_name}"', + f'select * from {table_name} where nc1 like "{hp_name}"', + f'select * from {table_name} where nc1 like "{lp_name}"', + f'select * from {table_name} where nc1 like "{ul_name}"', + f'select * from {table_name} where si1 like "{hp_name}"', + f'select * from {table_name} where si1 like "{lp_name}"', + f'select * from {table_name} where si1 like "{ul_name}"', + f'select * from {table_name} where sc1 like "{hp_name}"', + f'select * from {table_name} where sc1 like "{lp_name}"', + f'select * from {table_name} where sc1 like "{ul_name}"'] + + for sql in sql_list: + tdSql.query(sql) + if len(table_name) >= 1: + tdSql.checkRows(1) + else: + tdSql.error(sql) + exceed_sql_list = [f'select * from {table_name} where bi1 like "%{hp_name}"', + f'select * from {table_name} where bi1 like "{lp_name}%"', + f'select * from {table_name} where bi1 like "{ul_name}%"', + f'select * from {table_name} where nc1 like "%{hp_name}"', + f'select * from {table_name} where nc1 like "{lp_name}%"', + f'select * from {table_name} where nc1 like "{ul_name}%"', + f'select * from {table_name} where si1 like "%{hp_name}"', + f'select * from {table_name} where si1 like "{lp_name}%"', + f'select * from {table_name} where si1 like "{ul_name}%"', + f'select * from {table_name} where sc1 like "%{hp_name}"', + f'select * from {table_name} where sc1 like "{lp_name}%"', + f'select * from {table_name} where sc1 like "{ul_name}%"'] + for sql in exceed_sql_list: + tdSql.error(sql) + + def run(self): + tdSql.prepare() + self.checkRegularTableWildcardLength() + self.checkSuperTableWildcardLength() + self.checkRegularWildcardSelectLength() + self.checkStbWildcardSelectLength() + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) + diff --git a/tests/pytest/util/sql.py b/tests/pytest/util/sql.py index b42af27d06..dfe1e4a582 100644 --- a/tests/pytest/util/sql.py +++ b/tests/pytest/util/sql.py @@ -81,6 +81,22 @@ class TDSql: return self.queryResult return self.queryRows + def getVariable(self, search_attr): + ''' + get variable of search_attr access "show variables" + ''' + try: + sql = 'show variables' + param_list = self.query(sql, row_tag=True) + for param in param_list: + if param[0] == search_attr: + return param[1], param_list + except Exception as e: + caller = inspect.getframeinfo(inspect.stack()[1][0]) + args = (caller.filename, caller.lineno, sql, repr(e)) + tdLog.notice("%s(%d) failed: sql:%s, %s" % args) + raise Exception(repr(e)) + def getColNameList(self, sql, col_tag=None): self.sql = sql try: From ad01fcb88497bcbb42549d3471e8640bf871ef88 Mon Sep 17 00:00:00 2001 From: shenglian zhou Date: Wed, 11 Aug 2021 08:54:43 +0800 Subject: [PATCH 087/185] [TD-5930]:_block_dist initialize client merge buffer with max steps --- src/query/src/qAggMain.c | 11 +++++++---- src/query/src/qExecutor.c | 3 +++ 2 files changed, 10 insertions(+), 4 deletions(-) diff --git a/src/query/src/qAggMain.c b/src/query/src/qAggMain.c index 9f081cfb2f..6eadedcaf3 100644 --- a/src/query/src/qAggMain.c +++ b/src/query/src/qAggMain.c @@ -4062,12 +4062,15 @@ static void mergeTableBlockDist(SResultRowCellInfo* pResInfo, const STableBlockD pDist->maxRows = pSrc->maxRows; pDist->minRows = pSrc->minRows; - int32_t numSteps = tsMaxRowsInFileBlock/TSDB_BLOCK_DIST_STEP_ROWS; - pDist->dataBlockInfos = taosArrayInit(numSteps, sizeof(SFileBlockInfo)); - taosArraySetSize(pDist->dataBlockInfos, numSteps); + int32_t maxSteps = TSDB_MAX_MAX_ROW_FBLOCK/TSDB_BLOCK_DIST_STEP_ROWS; + if (TSDB_MAX_MAX_ROW_FBLOCK % TSDB_BLOCK_DIST_STEP_ROWS != 0) { + ++maxSteps; + } + pDist->dataBlockInfos = taosArrayInit(maxSteps, sizeof(SFileBlockInfo)); + taosArraySetSize(pDist->dataBlockInfos, maxSteps); } - size_t steps = taosArrayGetSize(pDist->dataBlockInfos); + size_t steps = taosArrayGetSize(pSrc->dataBlockInfos); for (int32_t i = 0; i < steps; ++i) { int32_t srcNumBlocks = ((SFileBlockInfo*)taosArrayGet(pSrc->dataBlockInfos, i))->numBlocksOfStep; SFileBlockInfo* blockInfo = (SFileBlockInfo*)taosArrayGet(pDist->dataBlockInfos, i); diff --git a/src/query/src/qExecutor.c b/src/query/src/qExecutor.c index 52e5dbb7f3..7b6dd86f22 100644 --- a/src/query/src/qExecutor.c +++ b/src/query/src/qExecutor.c @@ -5024,6 +5024,9 @@ static SSDataBlock* doBlockInfoScan(void* param, bool* newgroup) { tableBlockDist.numOfTables = (int32_t)pOperator->pRuntimeEnv->tableqinfoGroupInfo.numOfTables; int32_t numRowSteps = tsMaxRowsInFileBlock / TSDB_BLOCK_DIST_STEP_ROWS; + if (tsMaxRowsInFileBlock % TSDB_BLOCK_DIST_STEP_ROWS != 0) { + ++numRowSteps; + } tableBlockDist.dataBlockInfos = taosArrayInit(numRowSteps, sizeof(SFileBlockInfo)); taosArraySetSize(tableBlockDist.dataBlockInfos, numRowSteps); tableBlockDist.maxRows = INT_MIN; From 6cc49faf82fa5d1d546128756561ab3f9e377a22 Mon Sep 17 00:00:00 2001 From: shenglian zhou Date: Tue, 10 Aug 2021 14:37:19 +0800 Subject: [PATCH 088/185] [TD-5931]:invalidate time range when no tables in table groups of tsdb query --- src/tsdb/src/tsdbRead.c | 12 ++++++++++++ tests/script/general/parser/interp.sim | 13 ++++++++++++- 2 files changed, 24 insertions(+), 1 deletion(-) diff --git a/src/tsdb/src/tsdbRead.c b/src/tsdb/src/tsdbRead.c index e1d40aa7d0..9cc9b7224c 100644 --- a/src/tsdb/src/tsdbRead.c +++ b/src/tsdb/src/tsdbRead.c @@ -691,6 +691,18 @@ static STableGroupInfo* trimTableGroup(STimeWindow* window, STableGroupInfo* pGr TsdbQueryHandleT tsdbQueryRowsInExternalWindow(STsdbRepo *tsdb, STsdbQueryCond* pCond, STableGroupInfo *groupList, uint64_t qId, SMemRef* pRef) { STableGroupInfo* pNew = trimTableGroup(&pCond->twindow, groupList); + if (pNew->numOfTables == 0) { + tsdbDebug("update query time range to invalidate time window"); + + assert(taosArrayGetSize(pNew->pGroupList) == 0); + bool asc = ASCENDING_TRAVERSE(pCond->order); + if (asc) { + pCond->twindow.ekey = pCond->twindow.skey - 1; + } else { + pCond->twindow.skey = pCond->twindow.ekey - 1; + } + } + STsdbQueryHandle *pQueryHandle = (STsdbQueryHandle*) tsdbQueryTables(tsdb, pCond, pNew, qId, pRef); pQueryHandle->loadExternalRow = true; pQueryHandle->currentLoadExternalRows = true; diff --git a/tests/script/general/parser/interp.sim b/tests/script/general/parser/interp.sim index 55c5701985..7626b15647 100644 --- a/tests/script/general/parser/interp.sim +++ b/tests/script/general/parser/interp.sim @@ -68,4 +68,15 @@ print ================== server restart completed run general/parser/interp_test.sim -#system sh/exec.sh -n dnode1 -s stop -x SIGINT +print ================= TD-5931 +sql create stable st5931(ts timestamp, f int) tags(t int) +sql create table ct5931 using st5931 tags(1) +sql create table nt5931(ts timestamp, f int) +sql select interp(*) from nt5931 where ts=now +sql select interp(*) from st5931 where ts=now +sql select interp(*) from ct5931 where ts=now +if $rows != 0 then + return -1 +endi + +system sh/exec.sh -n dnode1 -s stop -x SIGINT From 5cb50f7922bfe7c6c38fa30e68e837763040060c Mon Sep 17 00:00:00 2001 From: tickduan <417921451@qq.com> Date: Thu, 12 Aug 2021 10:25:06 +0800 Subject: [PATCH 089/185] fixed huffman tree memory leak --- deps/TSZ | 2 +- src/kit/taospack/taospack.c | 52 ++++++++++++++++++++++++++++++++++--- 2 files changed, 50 insertions(+), 4 deletions(-) diff --git a/deps/TSZ b/deps/TSZ index 0ca5b15a8e..ceda5bf9fc 160000 --- a/deps/TSZ +++ b/deps/TSZ @@ -1 +1 @@ -Subproject commit 0ca5b15a8eac40327dd737be52c926fa5675712c +Subproject commit ceda5bf9fcd7836509ac97dcc0056b3f1dd48cc5 diff --git a/src/kit/taospack/taospack.c b/src/kit/taospack/taospack.c index 33d779dfcf..ad188c3010 100644 --- a/src/kit/taospack/taospack.c +++ b/src/kit/taospack/taospack.c @@ -18,6 +18,7 @@ #include #include + #if defined(WINDOWS) int main(int argc, char *argv[]) { printf("welcome to use taospack tools v1.3 for windows.\n"); @@ -601,7 +602,6 @@ void test_threadsafe_double(int thread_count){ } - void unitTestFloat() { float ft1 [] = {1.11, 2.22, 3.333}; @@ -662,7 +662,50 @@ void unitTestFloat() { free(ft2); free(buff); free(output); - +} + +void leakFloat() { + + int cnt = sizeof(g_ft1)/sizeof(float); + float* floats = g_ft1; + int algorithm = 2; + + // compress + const char* input = (const char*)floats; + int input_len = cnt * sizeof(float); + int output_len = input_len + 1024; + char* output = (char*) malloc(output_len); + char* buff = (char*) malloc(input_len); + int buff_len = input_len; + + int ret_len = 0; + ret_len = tsCompressFloatLossy(input, input_len, cnt, output, output_len, algorithm, buff, buff_len); + + if(ret_len == 0) { + printf(" compress float error.\n"); + free(buff); + free(output); + return ; + } + + float* ft2 = (float*)malloc(input_len); + ret_len = tsDecompressFloatLossy(output, ret_len, cnt, (char*)ft2, input_len, algorithm, buff, buff_len); + if(ret_len == 0) { + printf(" decompress float error.\n"); + } + + free(ft2); + free(buff); + free(output); +} + + +void leakTest(){ + for(int i=0; i< 90000000000000; i++){ + if(i%10000==0) + printf(" ---------- %d ---------------- \n", i); + leakFloat(); + } } #define DB_CNT 500 @@ -689,7 +732,7 @@ extern char Compressor []; // ----------------- main ---------------------- // int main(int argc, char *argv[]) { - printf("welcome to use taospack tools v1.3\n"); + printf("welcome to use taospack tools v1.5\n"); //printf(" sizeof(int)=%d\n", (int)sizeof(int)); //printf(" sizeof(long)=%d\n", (int)sizeof(long)); @@ -753,6 +796,9 @@ int main(int argc, char *argv[]) { if(strcmp(argv[1], "-mem") == 0) { memTest(); } + else if(strcmp(argv[1], "-leak") == 0) { + leakTest(); + } } else{ unitTestFloat(); From 7573622bd2e2fb6a507b64d3d1926338caad0fd6 Mon Sep 17 00:00:00 2001 From: xywang Date: Thu, 12 Aug 2021 11:38:08 +0800 Subject: [PATCH 090/185] [TD-6012]: fixed no sql recordings via http while httpEnableRecordSql was on --- src/dnode/src/dnodeMain.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/dnode/src/dnodeMain.c b/src/dnode/src/dnodeMain.c index eac04fe7bb..5291fb73a0 100644 --- a/src/dnode/src/dnodeMain.c +++ b/src/dnode/src/dnodeMain.c @@ -166,7 +166,6 @@ int32_t dnodeInitSystem() { taosInitGlobalCfg(); taosReadGlobalLogCfg(); taosSetCoreDump(); - taosInitNotes(); dnodeInitTmr(); if (dnodeCreateDir(tsLogDir) < 0) { @@ -188,6 +187,8 @@ int32_t dnodeInitSystem() { dInfo("start to initialize TDengine"); + taosInitNotes(); + if (dnodeInitComponents() != 0) { return -1; } From 5b9e1cff99ee931bfc65d61c6325f15d004d2004 Mon Sep 17 00:00:00 2001 From: xywang Date: Thu, 12 Aug 2021 11:44:14 +0800 Subject: [PATCH 091/185] [TD-6012]: fixed no sql recordings via http while httpEnableRecordSql was on --- src/dnode/src/dnodeMain.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/dnode/src/dnodeMain.c b/src/dnode/src/dnodeMain.c index 22ce6c995a..ab2fcbea6a 100644 --- a/src/dnode/src/dnodeMain.c +++ b/src/dnode/src/dnodeMain.c @@ -129,7 +129,6 @@ int32_t dnodeInitSystem() { taosInitGlobalCfg(); taosReadGlobalLogCfg(); taosSetCoreDump(); - taosInitNotes(); dnodeInitTmr(); if (dnodeCreateDir(tsLogDir) < 0) { @@ -151,6 +150,8 @@ int32_t dnodeInitSystem() { dInfo("start to initialize TDengine"); + taosInitNotes(); + if (dnodeInitComponents() != 0) { return -1; } From 735e6c1ea1720e0cd0f8156be797bfd48e050775 Mon Sep 17 00:00:00 2001 From: Liu Jicong Date: Thu, 12 Aug 2021 11:53:53 +0800 Subject: [PATCH 092/185] fix: fix odr violation --- src/common/src/tglobal.c | 3 +++ src/tsdb/inc/tsdbFS.h | 5 ++++- src/tsdb/src/tsdbFS.c | 3 +-- 3 files changed, 8 insertions(+), 3 deletions(-) diff --git a/src/common/src/tglobal.c b/src/common/src/tglobal.c index a58303e9fc..b9a4f4ffea 100644 --- a/src/common/src/tglobal.c +++ b/src/common/src/tglobal.c @@ -26,6 +26,9 @@ #include "tlocale.h" #include "ttimezone.h" +// TSDB +bool tsdbForceKeepFile = false; + // cluster char tsFirst[TSDB_EP_LEN] = {0}; char tsSecond[TSDB_EP_LEN] = {0}; diff --git a/src/tsdb/inc/tsdbFS.h b/src/tsdb/inc/tsdbFS.h index d63aeb14ac..55db8d56ff 100644 --- a/src/tsdb/inc/tsdbFS.h +++ b/src/tsdb/inc/tsdbFS.h @@ -18,6 +18,9 @@ #define TSDB_FS_VERSION 0 +// ================== TSDB global config +extern bool tsdbForceKeepFile; + // ================== CURRENT file header info typedef struct { uint32_t version; // Current file system version (relating to code) @@ -109,4 +112,4 @@ static FORCE_INLINE int tsdbUnLockFS(STsdbFS* pFs) { return 0; } -#endif /* _TD_TSDB_FS_H_ */ \ No newline at end of file +#endif /* _TD_TSDB_FS_H_ */ diff --git a/src/tsdb/src/tsdbFS.c b/src/tsdb/src/tsdbFS.c index 68450301d8..70a5262138 100644 --- a/src/tsdb/src/tsdbFS.c +++ b/src/tsdb/src/tsdbFS.c @@ -38,7 +38,6 @@ static int tsdbProcessExpiredFS(STsdbRepo *pRepo); static int tsdbCreateMeta(STsdbRepo *pRepo); // For backward compatibility -bool tsdbForceKeepFile = false; // ================== CURRENT file header info static int tsdbEncodeFSHeader(void **buf, SFSHeader *pHeader) { int tlen = 0; @@ -1354,4 +1353,4 @@ static void tsdbScanAndTryFixDFilesHeader(STsdbRepo *pRepo, int32_t *nExpired) { tsdbCloseDFileSet(&fset); } -} \ No newline at end of file +} From 7a0d2c10c530aa215169e9f737026739840a2b3a Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Thu, 12 Aug 2021 14:05:38 +0800 Subject: [PATCH 093/185] [td-225]merge master. --- src/connector/go | 2 +- src/connector/grafanaplugin | 2 +- src/connector/hivemq-tdengine-extension | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src/connector/go b/src/connector/go index b8f76da4a7..050667e5b4 160000 --- a/src/connector/go +++ b/src/connector/go @@ -1 +1 @@ -Subproject commit b8f76da4a708d158ec3cc4b844571dc4414e36b4 +Subproject commit 050667e5b4d0eafa5387e4283e713559b421203f diff --git a/src/connector/grafanaplugin b/src/connector/grafanaplugin index a44ec1ca49..32e2c97a4c 160000 --- a/src/connector/grafanaplugin +++ b/src/connector/grafanaplugin @@ -1 +1 @@ -Subproject commit a44ec1ca493ad01b2bf825b6418f69e11f548206 +Subproject commit 32e2c97a4cf7bedaa99f5d6dd8cb036e7f4470df diff --git a/src/connector/hivemq-tdengine-extension b/src/connector/hivemq-tdengine-extension index ce52010141..b62a26ecc1 160000 --- a/src/connector/hivemq-tdengine-extension +++ b/src/connector/hivemq-tdengine-extension @@ -1 +1 @@ -Subproject commit ce5201014136503d34fecbd56494b67b4961056c +Subproject commit b62a26ecc164a310104df57691691b237e091c89 From 8ecda5ba5404a21b755c0d6ecada820cbbe290ac Mon Sep 17 00:00:00 2001 From: shenglian zhou Date: Tue, 10 Aug 2021 14:37:19 +0800 Subject: [PATCH 094/185] [TD-5931]:invalidate time range when no tables in table groups of tsdb query --- src/tsdb/src/tsdbRead.c | 12 ++++++++++++ tests/script/general/parser/interp.sim | 17 ++++++++++++++++- 2 files changed, 28 insertions(+), 1 deletion(-) diff --git a/src/tsdb/src/tsdbRead.c b/src/tsdb/src/tsdbRead.c index e1d40aa7d0..9cc9b7224c 100644 --- a/src/tsdb/src/tsdbRead.c +++ b/src/tsdb/src/tsdbRead.c @@ -691,6 +691,18 @@ static STableGroupInfo* trimTableGroup(STimeWindow* window, STableGroupInfo* pGr TsdbQueryHandleT tsdbQueryRowsInExternalWindow(STsdbRepo *tsdb, STsdbQueryCond* pCond, STableGroupInfo *groupList, uint64_t qId, SMemRef* pRef) { STableGroupInfo* pNew = trimTableGroup(&pCond->twindow, groupList); + if (pNew->numOfTables == 0) { + tsdbDebug("update query time range to invalidate time window"); + + assert(taosArrayGetSize(pNew->pGroupList) == 0); + bool asc = ASCENDING_TRAVERSE(pCond->order); + if (asc) { + pCond->twindow.ekey = pCond->twindow.skey - 1; + } else { + pCond->twindow.skey = pCond->twindow.ekey - 1; + } + } + STsdbQueryHandle *pQueryHandle = (STsdbQueryHandle*) tsdbQueryTables(tsdb, pCond, pNew, qId, pRef); pQueryHandle->loadExternalRow = true; pQueryHandle->currentLoadExternalRows = true; diff --git a/tests/script/general/parser/interp.sim b/tests/script/general/parser/interp.sim index 55c5701985..4654913d1e 100644 --- a/tests/script/general/parser/interp.sim +++ b/tests/script/general/parser/interp.sim @@ -68,4 +68,19 @@ print ================== server restart completed run general/parser/interp_test.sim -#system sh/exec.sh -n dnode1 -s stop -x SIGINT +print ================= TD-5931 +sql create stable st5931(ts timestamp, f int) tags(t int) +sql create table ct5931 using st5931 tags(1) +sql create table nt5931(ts timestamp, f int) + +sql select interp(*) from nt5931 where ts=now + +sql select interp(*) from st5931 where ts=now + +sql select interp(*) from ct5931 where ts=now + +if $rows != 0 then + return -1 +endi + +system sh/exec.sh -n dnode1 -s stop -x SIGINT From b874090eac8637cbfd8c03e4fbc15d590f830d2d Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Thu, 12 Aug 2021 14:22:49 +0800 Subject: [PATCH 095/185] [td-255] cherry pick order support. --- src/client/src/tscSQLParser.c | 85 ++++++++++++++++++++--------------- src/client/src/tscServer.c | 6 +-- src/query/inc/qExecutor.h | 2 +- src/query/src/qPlan.c | 15 ++++--- 4 files changed, 62 insertions(+), 46 deletions(-) diff --git a/src/client/src/tscSQLParser.c b/src/client/src/tscSQLParser.c index 6df724881e..46292ceb91 100644 --- a/src/client/src/tscSQLParser.c +++ b/src/client/src/tscSQLParser.c @@ -5482,14 +5482,19 @@ static void setDefaultOrderInfo(SQueryInfo* pQueryInfo) { pQueryInfo->order.order = TSDB_ORDER_ASC; if (isTopBottomQuery(pQueryInfo)) { pQueryInfo->order.orderColId = PRIMARYKEY_TIMESTAMP_COL_INDEX; - } else { // in case of select tbname from super_table, the defualt order column can not be the primary ts column - pQueryInfo->order.orderColId = INT32_MIN; + } else { // in case of select tbname from super_table, the default order column can not be the primary ts column + pQueryInfo->order.orderColId = INT32_MIN; // todo define a macro } /* for super table query, set default ascending order for group output */ if (UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) { pQueryInfo->groupbyExpr.orderType = TSDB_ORDER_ASC; } + + if (pQueryInfo->distinct) { + pQueryInfo->order.order = TSDB_ORDER_ASC; + pQueryInfo->order.orderColId = PRIMARYKEY_TIMESTAMP_COL_INDEX; + } } int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSqlNode, SSchema* pSchema) { @@ -5501,17 +5506,12 @@ int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSq setDefaultOrderInfo(pQueryInfo); STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0); - - if (pQueryInfo->distinct == true) { - pQueryInfo->order.order = TSDB_ORDER_ASC; - pQueryInfo->order.orderColId = 0; - return TSDB_CODE_SUCCESS; - } - if (pSqlNode->pSortOrder == NULL) { + if (pQueryInfo->distinct || pSqlNode->pSortOrder == NULL) { return TSDB_CODE_SUCCESS; } - SArray* pSortorder = pSqlNode->pSortOrder; + char* pMsgBuf = tscGetErrorMsgPayload(pCmd); + SArray* pSortOrder = pSqlNode->pSortOrder; /* * for table query, there is only one or none order option is allowed, which is the @@ -5519,19 +5519,19 @@ int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSq * * for super table query, the order option must be less than 3. */ - size_t size = taosArrayGetSize(pSortorder); - if (UTIL_TABLE_IS_NORMAL_TABLE(pTableMetaInfo)) { + size_t size = taosArrayGetSize(pSortOrder); + if (UTIL_TABLE_IS_NORMAL_TABLE(pTableMetaInfo) || UTIL_TABLE_IS_TMP_TABLE(pTableMetaInfo)) { if (size > 1) { - return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg0); + return invalidOperationMsg(pMsgBuf, msg0); } } else { if (size > 2) { - return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg3); + return invalidOperationMsg(pMsgBuf, msg3); } } // handle the first part of order by - tVariant* pVar = taosArrayGet(pSortorder, 0); + tVariant* pVar = taosArrayGet(pSortOrder, 0); // e.g., order by 1 asc, return directly with out further check. if (pVar->nType >= TSDB_DATA_TYPE_TINYINT && pVar->nType <= TSDB_DATA_TYPE_BIGINT) { @@ -5543,7 +5543,7 @@ int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSq if (UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) { // super table query if (getColumnIndexByName(&columnName, pQueryInfo, &index, tscGetErrorMsgPayload(pCmd)) != TSDB_CODE_SUCCESS) { - return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1); + return invalidOperationMsg(pMsgBuf, msg1); } bool orderByTags = false; @@ -5555,7 +5555,7 @@ int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSq // it is a tag column if (pQueryInfo->groupbyExpr.columnInfo == NULL) { - return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2); + return invalidOperationMsg(pMsgBuf, msg2); } SColIndex* pColIndex = taosArrayGet(pQueryInfo->groupbyExpr.columnInfo, 0); if (relTagIndex == pColIndex->colIndex) { @@ -5576,13 +5576,14 @@ int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSq orderByGroupbyCol = true; } } + if (!(orderByTags || orderByTS || orderByGroupbyCol) && !isTopBottomQuery(pQueryInfo)) { - return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg3); + return invalidOperationMsg(pMsgBuf, msg3); } else { // order by top/bottom result value column is not supported in case of interval query. assert(!(orderByTags && orderByTS && orderByGroupbyCol)); } - size_t s = taosArrayGetSize(pSortorder); + size_t s = taosArrayGetSize(pSortOrder); if (s == 1) { if (orderByTags) { pQueryInfo->groupbyExpr.orderIndex = index.columnIndex - tscGetNumOfColumns(pTableMetaInfo->pTableMeta); @@ -5601,7 +5602,7 @@ int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSq pExpr = tscExprGet(pQueryInfo, 1); if (pExpr->base.colInfo.colIndex != index.columnIndex && index.columnIndex != PRIMARYKEY_TIMESTAMP_COL_INDEX) { - return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2); + return invalidOperationMsg(pMsgBuf, msg2); } tVariantListItem* p1 = taosArrayGet(pSqlNode->pSortOrder, 0); @@ -5619,9 +5620,7 @@ int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSq addPrimaryTsColIntoResult(pQueryInfo, pCmd); } } - } - - if (s == 2) { + } else { tVariantListItem *pItem = taosArrayGet(pSqlNode->pSortOrder, 0); if (orderByTags) { pQueryInfo->groupbyExpr.orderIndex = index.columnIndex - tscGetNumOfColumns(pTableMetaInfo->pTableMeta); @@ -5638,22 +5637,23 @@ int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSq tVariant* pVar2 = &pItem->pVar; SStrToken cname = {pVar2->nLen, pVar2->nType, pVar2->pz}; if (getColumnIndexByName(&cname, pQueryInfo, &index, tscGetErrorMsgPayload(pCmd)) != TSDB_CODE_SUCCESS) { - return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1); + return invalidOperationMsg(pMsgBuf, msg1); } if (index.columnIndex != PRIMARYKEY_TIMESTAMP_COL_INDEX) { - return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2); + return invalidOperationMsg(pMsgBuf, msg2); } else { - tVariantListItem* p1 = taosArrayGet(pSortorder, 1); + tVariantListItem* p1 = taosArrayGet(pSortOrder, 1); pQueryInfo->order.order = p1->sortOrder; pQueryInfo->order.orderColId = PRIMARYKEY_TIMESTAMP_COL_INDEX; } } - } else { // meter query - if (getColumnIndexByName(&columnName, pQueryInfo, &index, tscGetErrorMsgPayload(pCmd)) != TSDB_CODE_SUCCESS) { - return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1); + } else if (UTIL_TABLE_IS_NORMAL_TABLE(pTableMetaInfo) || UTIL_TABLE_IS_CHILD_TABLE(pTableMetaInfo)) { // check order by clause for normal table & temp table + if (getColumnIndexByName(&columnName, pQueryInfo, &index, pMsgBuf) != TSDB_CODE_SUCCESS) { + return invalidOperationMsg(pMsgBuf, msg1); } + if (index.columnIndex != PRIMARYKEY_TIMESTAMP_COL_INDEX && !isTopBottomQuery(pQueryInfo)) { bool validOrder = false; SArray *columnInfo = pQueryInfo->groupbyExpr.columnInfo; @@ -5661,13 +5661,14 @@ int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSq SColIndex* pColIndex = taosArrayGet(columnInfo, 0); validOrder = (pColIndex->colIndex == index.columnIndex); } + if (!validOrder) { - return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2); + return invalidOperationMsg(pMsgBuf, msg2); } + tVariantListItem* p1 = taosArrayGet(pSqlNode->pSortOrder, 0); pQueryInfo->groupbyExpr.orderIndex = pSchema[index.columnIndex].colId; pQueryInfo->groupbyExpr.orderType = p1->sortOrder; - } if (isTopBottomQuery(pQueryInfo)) { @@ -5683,13 +5684,14 @@ int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSq pExpr = tscExprGet(pQueryInfo, 1); if (pExpr->base.colInfo.colIndex != index.columnIndex && index.columnIndex != PRIMARYKEY_TIMESTAMP_COL_INDEX) { - return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2); + return invalidOperationMsg(pMsgBuf, msg2); } + validOrder = true; } if (!validOrder) { - return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2); + return invalidOperationMsg(pMsgBuf, msg2); } tVariantListItem* pItem = taosArrayGet(pSqlNode->pSortOrder, 0); @@ -5699,6 +5701,18 @@ int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSq return TSDB_CODE_SUCCESS; } + tVariantListItem* pItem = taosArrayGet(pSqlNode->pSortOrder, 0); + pQueryInfo->order.order = pItem->sortOrder; + pQueryInfo->order.orderColId = pSchema[index.columnIndex].colId; + } else { + // handle the temp table order by clause. You can order by any single column in case of the temp table, created by + // inner subquery. + assert(UTIL_TABLE_IS_TMP_TABLE(pTableMetaInfo) && taosArrayGetSize(pSqlNode->pSortOrder) == 1); + + if (getColumnIndexByName(&columnName, pQueryInfo, &index, pMsgBuf) != TSDB_CODE_SUCCESS) { + return invalidOperationMsg(pMsgBuf, msg1); + } + tVariantListItem* pItem = taosArrayGet(pSqlNode->pSortOrder, 0); pQueryInfo->order.order = pItem->sortOrder; pQueryInfo->order.orderColId = pSchema[index.columnIndex].colId; @@ -8458,8 +8472,7 @@ int32_t validateSqlNode(SSqlObj* pSql, SSqlNode* pSqlNode, SQueryInfo* pQueryInf const char* msg8 = "condition missing for join query"; const char* msg9 = "not support 3 level select"; - int32_t code = TSDB_CODE_SUCCESS; - + int32_t code = TSDB_CODE_SUCCESS; SSqlCmd* pCmd = &pSql->cmd; STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0); @@ -8754,8 +8767,6 @@ int32_t validateSqlNode(SSqlObj* pSql, SSqlNode* pSqlNode, SQueryInfo* pQueryInf pQueryInfo->simpleAgg = isSimpleAggregateRv(pQueryInfo); pQueryInfo->onlyTagQuery = onlyTagPrjFunction(pQueryInfo); pQueryInfo->groupbyColumn = tscGroupbyColumn(pQueryInfo); - //pQueryInfo->globalMerge = tscIsTwoStageSTableQuery(pQueryInfo, 0); - pQueryInfo->arithmeticOnAgg = tsIsArithmeticQueryOnAggResult(pQueryInfo); pQueryInfo->orderProjectQuery = tscOrderedProjectionQueryOnSTable(pQueryInfo, 0); diff --git a/src/client/src/tscServer.c b/src/client/src/tscServer.c index 3e8dfac1da..e809fe3137 100644 --- a/src/client/src/tscServer.c +++ b/src/client/src/tscServer.c @@ -880,16 +880,16 @@ int tscBuildQueryMsg(SSqlObj *pSql, SSqlInfo *pInfo) { } SQueryInfo *pQueryInfo = tscGetQueryInfo(pCmd); + STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0); + STableMeta * pTableMeta = pTableMetaInfo->pTableMeta; SQueryAttr query = {{0}}; tscCreateQueryFromQueryInfo(pQueryInfo, &query, pSql); + query.vgId = pTableMeta->vgId; SArray* tableScanOperator = createTableScanPlan(&query); SArray* queryOperator = createExecOperatorPlan(&query); - STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0); - STableMeta * pTableMeta = pTableMetaInfo->pTableMeta; - SQueryTableMsg *pQueryMsg = (SQueryTableMsg *)pCmd->payload; tstrncpy(pQueryMsg->version, version, tListLen(pQueryMsg->version)); diff --git a/src/query/inc/qExecutor.h b/src/query/inc/qExecutor.h index 5fe68e456f..29cfa5edf1 100644 --- a/src/query/inc/qExecutor.h +++ b/src/query/inc/qExecutor.h @@ -417,7 +417,6 @@ typedef struct STableScanInfo { int32_t *rowCellInfoOffset; SExprInfo *pExpr; SSDataBlock block; - bool loadExternalRows; // load external rows (prev & next rows) int32_t numOfOutput; int64_t elapsedTime; @@ -570,6 +569,7 @@ SOperatorInfo* createFilterOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperator int32_t numOfOutput, SColumnInfo* pCols, int32_t numOfFilter); SOperatorInfo* createJoinOperatorInfo(SOperatorInfo** pUpstream, int32_t numOfUpstream, SSchema* pSchema, int32_t numOfOutput); +SOperatorInfo *createOrderOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput); SSDataBlock* doGlobalAggregate(void* param, bool* newgroup); SSDataBlock* doMultiwayMergeSort(void* param, bool* newgroup); diff --git a/src/query/src/qPlan.c b/src/query/src/qPlan.c index b8a5ee7699..f72f70c911 100644 --- a/src/query/src/qPlan.c +++ b/src/query/src/qPlan.c @@ -557,10 +557,9 @@ SArray* createExecOperatorPlan(SQueryAttr* pQueryAttr) { int32_t op = 0; if (onlyQueryTags(pQueryAttr)) { // do nothing for tags query - if (onlyQueryTags(pQueryAttr)) { - op = OP_TagScan; - taosArrayPush(plan, &op); - } + op = OP_TagScan; + taosArrayPush(plan, &op); + if (pQueryAttr->distinct) { op = OP_Distinct; taosArrayPush(plan, &op); @@ -651,8 +650,14 @@ SArray* createExecOperatorPlan(SQueryAttr* pQueryAttr) { taosArrayPush(plan, &op); } } + + // outer query order by support + int32_t orderColId = pQueryAttr->order.orderColId; + if (pQueryAttr->vgId == 0 && orderColId != PRIMARYKEY_TIMESTAMP_COL_INDEX && orderColId != INT32_MIN) { + op = OP_Order; + taosArrayPush(plan, &op); + } } - if (pQueryAttr->limit.limit > 0 || pQueryAttr->limit.offset > 0) { op = OP_Limit; From 6fafa5b1bef6eee56417995f85c2ce7a1d3ab17a Mon Sep 17 00:00:00 2001 From: zhaoyanggh Date: Thu, 12 Aug 2021 14:59:57 +0800 Subject: [PATCH 096/185] [TD-6006] query where by column with no quotes --- tests/pytest/query/queryError.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/tests/pytest/query/queryError.py b/tests/pytest/query/queryError.py index ac78c0518f..e5c468600b 100644 --- a/tests/pytest/query/queryError.py +++ b/tests/pytest/query/queryError.py @@ -65,6 +65,10 @@ class TDTestCase: # TD-2208 tdSql.error("select diff(tagtype),top(tagtype,1) from dev_001") + # TD-6006 + tdSql.error("select * from dev_001 where 'name' is not null") + tdSql.error("select * from dev_001 where \"name\" = 'first'") + def stop(self): tdSql.close() tdLog.success("%s successfully executed" % __file__) From f415a994fe7d6d1c0cc94ae3441b8a6709de888c Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Thu, 12 Aug 2021 15:18:25 +0800 Subject: [PATCH 097/185] [td-255] --- src/client/src/tscUtil.c | 1 - src/query/inc/qExecutor.h | 8 ++ src/query/inc/qExtbuffer.h | 2 + src/query/src/qExtbuffer.c | 54 +++++++ src/query/src/qPercentile.c | 2 +- src/util/inc/tcompare.h | 2 +- src/util/src/tcompare.c | 244 +++++++++++++++++++++----------- src/util/src/tskiplist.c | 2 +- src/util/tests/skiplistTest.cpp | 2 +- 9 files changed, 232 insertions(+), 85 deletions(-) diff --git a/src/client/src/tscUtil.c b/src/client/src/tscUtil.c index 3c35795b0d..b393459c52 100644 --- a/src/client/src/tscUtil.c +++ b/src/client/src/tscUtil.c @@ -1206,7 +1206,6 @@ void handleDownstreamOperator(SSqlObj** pSqlObjList, int32_t numOfUpstream, SQue createInputDataFilterInfo(px, numOfCol1, &numOfFilterCols, &pFilterInfo); SOperatorInfo* pSourceOperator = createDummyInputOperator(pSqlObjList[0], pSchema, numOfCol1, pFilterInfo, numOfFilterCols); - pOutput->precision = pSqlObjList[0]->res.precision; SSchema* schema = NULL; diff --git a/src/query/inc/qExecutor.h b/src/query/inc/qExecutor.h index 29cfa5edf1..6dca502838 100644 --- a/src/query/inc/qExecutor.h +++ b/src/query/inc/qExecutor.h @@ -333,6 +333,7 @@ enum OPERATOR_TYPE_E { OP_StateWindow = 22, OP_AllTimeWindow = 23, OP_AllMultiTableTimeInterval = 24, + OP_Order = 25, }; typedef struct SOperatorInfo { @@ -540,6 +541,13 @@ typedef struct SMultiwayMergeInfo { SArray *udfInfo; } SMultiwayMergeInfo; +// todo support the disk-based sort +typedef struct SOrderOperatorInfo { + int32_t colIndex; + int32_t order; + SSDataBlock *pDataBlock; +} SOrderOperatorInfo; + void appendUpstream(SOperatorInfo* p, SOperatorInfo* pUpstream); SOperatorInfo* createDataBlocksOptScanInfo(void* pTsdbQueryHandle, SQueryRuntimeEnv* pRuntimeEnv, int32_t repeatTime, int32_t reverseTime); diff --git a/src/query/inc/qExtbuffer.h b/src/query/inc/qExtbuffer.h index cf0e8ce31a..c06fae298c 100644 --- a/src/query/inc/qExtbuffer.h +++ b/src/query/inc/qExtbuffer.h @@ -220,6 +220,8 @@ tOrderDescriptor *tOrderDesCreate(const int32_t *orderColIdx, int32_t numOfOrder void tOrderDescDestroy(tOrderDescriptor *pDesc); +void taoscQSort(void** pCols, SSchema* pSchema, int32_t numOfCols, int32_t numOfRows, int32_t index, __compar_fn_t compareFn); + void tColModelAppend(SColumnModel *dstModel, tFilePage *dstPage, void *srcData, int32_t srcStartRows, int32_t numOfRowsToWrite, int32_t srcCapacity); diff --git a/src/query/src/qExtbuffer.c b/src/query/src/qExtbuffer.c index cc47cc824b..91004fe707 100644 --- a/src/query/src/qExtbuffer.c +++ b/src/query/src/qExtbuffer.c @@ -1102,3 +1102,57 @@ void tOrderDescDestroy(tOrderDescriptor *pDesc) { destroyColumnModel(pDesc->pColumnModel); tfree(pDesc); } + +void taoscQSort(void** pCols, SSchema* pSchema, int32_t numOfCols, int32_t numOfRows, int32_t index, __compar_fn_t compareFn) { + assert(numOfRows > 0 && numOfCols > 0 && index >= 0 && index < numOfCols); + + int32_t bytes = pSchema[index].bytes; + int32_t size = bytes + sizeof(int32_t); + + char* buf = calloc(1, size * numOfRows); + + for(int32_t i = 0; i < numOfRows; ++i) { + char* dest = buf + size * i; + memcpy(dest, pCols[index] + bytes * i, bytes); + *(int32_t*)(dest+bytes) = i; + } + + qsort(buf, numOfRows, size, compareFn); + + int32_t prevLength = 0; + char* p = NULL; + + for(int32_t i = 0; i < numOfCols; ++i) { + int32_t bytes1 = pSchema[i].bytes; + + if (i == index) { + for(int32_t j = 0; j < numOfRows; ++j){ + char* src = buf + (j * size); + char* dest = pCols[i] + (j * bytes1); + memcpy(dest, src, bytes1); + } + } else { + // make sure memory buffer is enough + if (prevLength < bytes1) { + char *tmp = realloc(p, bytes1 * numOfRows); + assert(tmp); + + p = tmp; + prevLength = bytes1; + } + + memcpy(p, pCols[i], bytes1 * numOfRows); + + for(int32_t j = 0; j < numOfRows; ++j){ + char* dest = pCols[i] + bytes1 * j; + + int32_t newPos = *(int32_t*)(buf + (j * size) + bytes); + char* src = p + (newPos * bytes1); + memcpy(dest, src, bytes1); + } + } + } + + tfree(buf); + tfree(p); +} \ No newline at end of file diff --git a/src/query/src/qPercentile.c b/src/query/src/qPercentile.c index e3326cc26b..e9022db503 100644 --- a/src/query/src/qPercentile.c +++ b/src/query/src/qPercentile.c @@ -237,7 +237,7 @@ tMemBucket *tMemBucketCreate(int16_t nElemSize, int16_t dataType, double minval, } pBucket->elemPerPage = (pBucket->bufPageSize - sizeof(tFilePage))/pBucket->bytes; - pBucket->comparFn = getKeyComparFunc(pBucket->type); + pBucket->comparFn = getKeyComparFunc(pBucket->type, TSDB_ORDER_ASC); pBucket->hashFunc = getHashFunc(pBucket->type); if (pBucket->hashFunc == NULL) { diff --git a/src/util/inc/tcompare.h b/src/util/inc/tcompare.h index 612ce7ede0..4861779acd 100644 --- a/src/util/inc/tcompare.h +++ b/src/util/inc/tcompare.h @@ -47,7 +47,7 @@ int WCSPatternMatch(const wchar_t *pattern, const wchar_t *str, size_t size, con int32_t doCompare(const char* a, const char* b, int32_t type, size_t size); -__compar_fn_t getKeyComparFunc(int32_t keyType); +__compar_fn_t getKeyComparFunc(int32_t keyType, int32_t order); __compar_fn_t getComparFunc(int32_t type, int32_t optr); diff --git a/src/util/src/tcompare.c b/src/util/src/tcompare.c index e953f4c464..6e135878c1 100644 --- a/src/util/src/tcompare.c +++ b/src/util/src/tcompare.c @@ -16,28 +16,22 @@ #include "os.h" #include "ttype.h" #include "tcompare.h" -#include "tarray.h" #include "hash.h" -int32_t compareInt32Val(const void *pLeft, const void *pRight) { - int32_t left = GET_INT32_VAL(pLeft), right = GET_INT32_VAL(pRight); - if (left > right) return 1; - if (left < right) return -1; - return 0; +int32_t setCompareBytes1(const void *pLeft, const void *pRight) { + return NULL != taosHashGet((SHashObj *)pRight, pLeft, 1) ? 1 : 0; } -int32_t compareInt64Val(const void *pLeft, const void *pRight) { - int64_t left = GET_INT64_VAL(pLeft), right = GET_INT64_VAL(pRight); - if (left > right) return 1; - if (left < right) return -1; - return 0; +int32_t setCompareBytes2(const void *pLeft, const void *pRight) { + return NULL != taosHashGet((SHashObj *)pRight, pLeft, 2) ? 1 : 0; } -int32_t compareInt16Val(const void *pLeft, const void *pRight) { - int16_t left = GET_INT16_VAL(pLeft), right = GET_INT16_VAL(pRight); - if (left > right) return 1; - if (left < right) return -1; - return 0; +int32_t setCompareBytes4(const void *pLeft, const void *pRight) { + return NULL != taosHashGet((SHashObj *)pRight, pLeft, 4) ? 1 : 0; +} + +int32_t setCompareBytes8(const void *pLeft, const void *pRight) { + return NULL != taosHashGet((SHashObj *)pRight, pLeft, 8) ? 1 : 0; } int32_t compareInt8Val(const void *pLeft, const void *pRight) { @@ -47,27 +41,76 @@ int32_t compareInt8Val(const void *pLeft, const void *pRight) { return 0; } -int32_t compareUint32Val(const void *pLeft, const void *pRight) { - int32_t left = GET_UINT32_VAL(pLeft), right = GET_UINT32_VAL(pRight); +int32_t compareInt8ValDesc(const void *pLeft, const void *pRight) { + return compareInt8Val(pRight, pLeft); +} + +int32_t compareInt16Val(const void *pLeft, const void *pRight) { + int16_t left = GET_INT16_VAL(pLeft), right = GET_INT16_VAL(pRight); if (left > right) return 1; if (left < right) return -1; return 0; } +int32_t compareInt16ValDesc(const void* pLeft, const void* pRight) { + return compareInt16Val(pRight, pLeft); +} + +int32_t compareInt32Val(const void *pLeft, const void *pRight) { + int32_t left = GET_INT32_VAL(pLeft), right = GET_INT32_VAL(pRight); + if (left > right) return 1; + if (left < right) return -1; + return 0; +} + +int32_t compareInt32ValDesc(const void* pLeft, const void* pRight) { + return compareInt32Val(pRight, pLeft); +} + +int32_t compareInt64Val(const void *pLeft, const void *pRight) { + int64_t left = GET_INT64_VAL(pLeft), right = GET_INT64_VAL(pRight); + if (left > right) return 1; + if (left < right) return -1; + return 0; +} + +int32_t compareInt64ValDesc(const void* pLeft, const void* pRight) { + return compareInt64Val(pRight, pLeft); +} + +int32_t compareUint32Val(const void *pLeft, const void *pRight) { + uint32_t left = GET_UINT32_VAL(pLeft), right = GET_UINT32_VAL(pRight); + if (left > right) return 1; + if (left < right) return -1; + return 0; +} + +int32_t compareUint32ValDesc(const void* pLeft, const void* pRight) { + return compareUint32Val(pRight, pLeft); +} + int32_t compareUint64Val(const void *pLeft, const void *pRight) { - int64_t left = GET_UINT64_VAL(pLeft), right = GET_UINT64_VAL(pRight); + uint64_t left = GET_UINT64_VAL(pLeft), right = GET_UINT64_VAL(pRight); if (left > right) return 1; if (left < right) return -1; return 0; } +int32_t compareUint64ValDesc(const void* pLeft, const void* pRight) { + return compareUint64Val(pRight, pLeft); +} + int32_t compareUint16Val(const void *pLeft, const void *pRight) { - int16_t left = GET_UINT16_VAL(pLeft), right = GET_UINT16_VAL(pRight); + uint16_t left = GET_UINT16_VAL(pLeft), right = GET_UINT16_VAL(pRight); if (left > right) return 1; if (left < right) return -1; return 0; } +int32_t compareUint16ValDesc(const void* pLeft, const void* pRight) { + return compareUint16Val(pRight, pLeft); +} + int32_t compareUint8Val(const void* pLeft, const void* pRight) { uint8_t left = GET_UINT8_VAL(pLeft), right = GET_UINT8_VAL(pRight); if (left > right) return 1; @@ -75,6 +118,10 @@ int32_t compareUint8Val(const void* pLeft, const void* pRight) { return 0; } +int32_t compareUint8ValDesc(const void* pLeft, const void* pRight) { + return compareUint8Val(pRight, pLeft); +} + int32_t compareFloatVal(const void *pLeft, const void *pRight) { float p1 = GET_FLOAT_VAL(pLeft); float p2 = GET_FLOAT_VAL(pRight); @@ -92,8 +139,12 @@ int32_t compareFloatVal(const void *pLeft, const void *pRight) { } if (FLT_EQUAL(p1, p2)) { return 0; - } - return FLT_GREATER(p1, p2) ? 1: -1; + } + return FLT_GREATER(p1, p2) ? 1: -1; +} + +int32_t compareFloatValDesc(const void* pLeft, const void* pRight) { + return compareFloatVal(pRight, pLeft); } int32_t compareDoubleVal(const void *pLeft, const void *pRight) { @@ -113,14 +164,18 @@ int32_t compareDoubleVal(const void *pLeft, const void *pRight) { } if (FLT_EQUAL(p1, p2)) { return 0; - } - return FLT_GREATER(p1, p2) ? 1: -1; + } + return FLT_GREATER(p1, p2) ? 1: -1; +} + +int32_t compareDoubleValDesc(const void* pLeft, const void* pRight) { + return compareDoubleVal(pRight, pLeft); } int32_t compareLenPrefixedStr(const void *pLeft, const void *pRight) { int32_t len1 = varDataLen(pLeft); int32_t len2 = varDataLen(pRight); - + if (len1 != len2) { return len1 > len2? 1:-1; } else { @@ -133,10 +188,14 @@ int32_t compareLenPrefixedStr(const void *pLeft, const void *pRight) { } } +int32_t compareLenPrefixedStrDesc(const void* pLeft, const void* pRight) { + return compareLenPrefixedStr(pRight, pLeft); +} + int32_t compareLenPrefixedWStr(const void *pLeft, const void *pRight) { int32_t len1 = varDataLen(pLeft); int32_t len2 = varDataLen(pRight); - + if (len1 != len2) { return len1 > len2? 1:-1; } else { @@ -149,6 +208,10 @@ int32_t compareLenPrefixedWStr(const void *pLeft, const void *pRight) { } } +int32_t compareLenPrefixedWStrDesc(const void* pLeft, const void* pRight) { + return compareLenPrefixedWStr(pRight, pLeft); +} + /* * Compare two strings * TSDB_MATCH: Match @@ -161,33 +224,33 @@ int32_t compareLenPrefixedWStr(const void *pLeft, const void *pRight) { */ int patternMatch(const char *patterStr, const char *str, size_t size, const SPatternCompareInfo *pInfo) { char c, c1; - + int32_t i = 0; int32_t j = 0; - + while ((c = patterStr[i++]) != 0) { if (c == pInfo->matchAll) { /* Match "*" */ - + while ((c = patterStr[i++]) == pInfo->matchAll || c == pInfo->matchOne) { if (c == pInfo->matchOne && (j > size || str[j++] == 0)) { // empty string, return not match return TSDB_PATTERN_NOWILDCARDMATCH; } } - + if (c == 0) { return TSDB_PATTERN_MATCH; /* "*" at the end of the pattern matches */ } - + char next[3] = {toupper(c), tolower(c), 0}; while (1) { size_t n = strcspn(str, next); str += n; - + if (str[0] == 0 || (n >= size)) { break; } - + int32_t ret = patternMatch(&patterStr[i], ++str, size - n - 1, pInfo); if (ret != TSDB_PATTERN_NOMATCH) { return ret; @@ -195,18 +258,18 @@ int patternMatch(const char *patterStr, const char *str, size_t size, const SPat } return TSDB_PATTERN_NOWILDCARDMATCH; } - + c1 = str[j++]; - + if (j <= size) { if (c == c1 || tolower(c) == tolower(c1) || (c == pInfo->matchOne && c1 != 0)) { continue; } } - + return TSDB_PATTERN_NOMATCH; } - + return (str[j] == 0 || j >= size) ? TSDB_PATTERN_MATCH : TSDB_PATTERN_NOMATCH; } @@ -214,13 +277,13 @@ int WCSPatternMatch(const wchar_t *patterStr, const wchar_t *str, size_t size, c wchar_t c, c1; wchar_t matchOne = L'_'; // "_" wchar_t matchAll = L'%'; // "%" - + int32_t i = 0; int32_t j = 0; - + while ((c = patterStr[i++]) != 0) { if (c == matchAll) { /* Match "%" */ - + while ((c = patterStr[i++]) == matchAll || c == matchOne) { if (c == matchOne && (j > size || str[j++] == 0)) { return TSDB_PATTERN_NOWILDCARDMATCH; @@ -229,40 +292,40 @@ int WCSPatternMatch(const wchar_t *patterStr, const wchar_t *str, size_t size, c if (c == 0) { return TSDB_PATTERN_MATCH; } - + wchar_t accept[3] = {towupper(c), towlower(c), 0}; while (1) { size_t n = wcscspn(str, accept); - + str += n; if (str[0] == 0 || (n >= size)) { break; } - + int32_t ret = WCSPatternMatch(&patterStr[i], ++str, size - n - 1, pInfo); if (ret != TSDB_PATTERN_NOMATCH) { return ret; } } - + return TSDB_PATTERN_NOWILDCARDMATCH; } - + c1 = str[j++]; - + if (j <= size) { if (c == c1 || towlower(c) == towlower(c1) || (c == matchOne && c1 != 0)) { continue; } } - + return TSDB_PATTERN_NOMATCH; } return (str[j] == 0 || j >= size) ? TSDB_PATTERN_MATCH : TSDB_PATTERN_NOMATCH; } -static int32_t compareStrPatternComp(const void* pLeft, const void* pRight) { +int32_t compareStrPatternComp(const void* pLeft, const void* pRight) { SPatternCompareInfo pInfo = {'%', '_'}; char pattern[128] = {0}; @@ -270,8 +333,8 @@ static int32_t compareStrPatternComp(const void* pLeft, const void* pRight) { assert(varDataLen(pRight) < 128); size_t sz = varDataLen(pLeft); - char *buf = malloc(sz + 1); - memcpy(buf, varDataVal(pLeft), sz); + char *buf = malloc(sz + 1); + memcpy(buf, varDataVal(pLeft), sz); buf[sz] = 0; int32_t ret = patternMatch(pattern, buf, sz, &pInfo); @@ -282,19 +345,15 @@ static int32_t compareStrPatternComp(const void* pLeft, const void* pRight) { int32_t taosArrayCompareString(const void* a, const void* b) { const char* x = *(const char**)a; const char* y = *(const char**)b; - + return compareLenPrefixedStr(x, y); } -//static int32_t compareFindStrInArray(const void* pLeft, const void* pRight) { -// const SArray* arr = (const SArray*) pRight; -// return taosArraySearchString(arr, pLeft, taosArrayCompareString, TD_EQ) == NULL ? 0 : 1; -//} -static int32_t compareFindItemInSet(const void *pLeft, const void* pRight) { - return NULL != taosHashGet((SHashObj *)pRight, varDataVal(pLeft), varDataLen(pLeft)) ? 1 : 0; +int32_t compareFindItemInSet(const void *pLeft, const void* pRight) { + return NULL != taosHashGet((SHashObj *)pRight, varDataVal(pLeft), varDataLen(pLeft)) ? 1 : 0; } -static int32_t compareWStrPatternComp(const void* pLeft, const void* pRight) { +int32_t compareWStrPatternComp(const void* pLeft, const void* pRight) { SPatternCompareInfo pInfo = {'%', '_'}; wchar_t pattern[128] = {0}; @@ -302,14 +361,37 @@ static int32_t compareWStrPatternComp(const void* pLeft, const void* pRight) { memcpy(pattern, varDataVal(pRight), varDataLen(pRight)); assert(varDataLen(pRight) < 128); - + int32_t ret = WCSPatternMatch(pattern, varDataVal(pLeft), varDataLen(pLeft)/TSDB_NCHAR_SIZE, &pInfo); return (ret == TSDB_PATTERN_MATCH) ? 0 : 1; } __compar_fn_t getComparFunc(int32_t type, int32_t optr) { __compar_fn_t comparFn = NULL; - + + if (optr == TSDB_RELATION_IN && (type != TSDB_DATA_TYPE_BINARY && type != TSDB_DATA_TYPE_NCHAR)) { + switch (type) { + case TSDB_DATA_TYPE_BOOL: + case TSDB_DATA_TYPE_TINYINT: + case TSDB_DATA_TYPE_UTINYINT: + return setCompareBytes1; + case TSDB_DATA_TYPE_SMALLINT: + case TSDB_DATA_TYPE_USMALLINT: + return setCompareBytes2; + case TSDB_DATA_TYPE_INT: + case TSDB_DATA_TYPE_UINT: + case TSDB_DATA_TYPE_FLOAT: + return setCompareBytes4; + case TSDB_DATA_TYPE_BIGINT: + case TSDB_DATA_TYPE_UBIGINT: + case TSDB_DATA_TYPE_DOUBLE: + case TSDB_DATA_TYPE_TIMESTAMP: + return setCompareBytes8; + default: + assert(0); + } + } + switch (type) { case TSDB_DATA_TYPE_BOOL: case TSDB_DATA_TYPE_TINYINT: comparFn = compareInt8Val; break; @@ -327,13 +409,15 @@ __compar_fn_t getComparFunc(int32_t type, int32_t optr) { } else { /* normal relational comparFn */ comparFn = compareLenPrefixedStr; } - + break; } - + case TSDB_DATA_TYPE_NCHAR: { if (optr == TSDB_RELATION_LIKE) { comparFn = compareWStrPatternComp; + } else if (optr == TSDB_RELATION_IN) { + comparFn = compareFindItemInSet; } else { comparFn = compareLenPrefixedWStr; } @@ -349,57 +433,57 @@ __compar_fn_t getComparFunc(int32_t type, int32_t optr) { comparFn = compareInt32Val; break; } - + return comparFn; } -__compar_fn_t getKeyComparFunc(int32_t keyType) { +__compar_fn_t getKeyComparFunc(int32_t keyType, int32_t order) { __compar_fn_t comparFn = NULL; - + switch (keyType) { case TSDB_DATA_TYPE_TINYINT: case TSDB_DATA_TYPE_BOOL: - comparFn = compareInt8Val; + comparFn = (order == TSDB_ORDER_ASC)? compareInt8Val:compareInt8ValDesc; break; case TSDB_DATA_TYPE_SMALLINT: - comparFn = compareInt16Val; + comparFn = (order == TSDB_ORDER_ASC)? compareInt16Val:compareInt16ValDesc; break; case TSDB_DATA_TYPE_INT: - comparFn = compareInt32Val; + comparFn = (order == TSDB_ORDER_ASC)? compareInt32Val:compareInt32ValDesc; break; case TSDB_DATA_TYPE_BIGINT: case TSDB_DATA_TYPE_TIMESTAMP: - comparFn = compareInt64Val; + comparFn = (order == TSDB_ORDER_ASC)? compareInt64Val:compareInt64ValDesc; break; case TSDB_DATA_TYPE_FLOAT: - comparFn = compareFloatVal; + comparFn = (order == TSDB_ORDER_ASC)? compareFloatVal:compareFloatValDesc; break; case TSDB_DATA_TYPE_DOUBLE: - comparFn = compareDoubleVal; + comparFn = (order == TSDB_ORDER_ASC)? compareDoubleVal:compareDoubleValDesc; break; case TSDB_DATA_TYPE_UTINYINT: - comparFn = compareUint8Val; + comparFn = (order == TSDB_ORDER_ASC)? compareUint8Val:compareUint8ValDesc; break; case TSDB_DATA_TYPE_USMALLINT: - comparFn = compareUint16Val; + comparFn = (order == TSDB_ORDER_ASC)? compareUint16Val:compareUint16ValDesc; break; case TSDB_DATA_TYPE_UINT: - comparFn = compareUint32Val; + comparFn = (order == TSDB_ORDER_ASC)? compareUint32Val:compareUint32ValDesc; break; case TSDB_DATA_TYPE_UBIGINT: - comparFn = compareUint64Val; + comparFn = (order == TSDB_ORDER_ASC)? compareUint64Val:compareUint64ValDesc; break; case TSDB_DATA_TYPE_BINARY: - comparFn = compareLenPrefixedStr; + comparFn = (order == TSDB_ORDER_ASC)? compareLenPrefixedStr:compareLenPrefixedStrDesc; break; case TSDB_DATA_TYPE_NCHAR: - comparFn = compareLenPrefixedWStr; + comparFn = (order == TSDB_ORDER_ASC)? compareLenPrefixedWStr:compareLenPrefixedWStrDesc; break; default: - comparFn = compareInt32Val; + comparFn = (order == TSDB_ORDER_ASC)? compareInt32Val:compareInt32ValDesc; break; } - + return comparFn; } @@ -433,7 +517,7 @@ int32_t doCompare(const char* f1, const char* f2, int32_t type, size_t size) { default: { // todo refactor tstr* t1 = (tstr*) f1; tstr* t2 = (tstr*) f2; - + if (t1->len != t2->len) { return t1->len > t2->len? 1:-1; } else { diff --git a/src/util/src/tskiplist.c b/src/util/src/tskiplist.c index b464519ba6..98fd9c094c 100644 --- a/src/util/src/tskiplist.c +++ b/src/util/src/tskiplist.c @@ -54,7 +54,7 @@ SSkipList *tSkipListCreate(uint8_t maxLevel, uint8_t keyType, uint16_t keyLen, _ pSkipList->keyFn = fn; pSkipList->seed = rand(); if (comparFn == NULL) { - pSkipList->comparFn = getKeyComparFunc(keyType); + pSkipList->comparFn = getKeyComparFunc(keyType, TSDB_ORDER_ASC); } else { pSkipList->comparFn = comparFn; } diff --git a/src/util/tests/skiplistTest.cpp b/src/util/tests/skiplistTest.cpp index dfbe0f6716..df4c5af5e2 100644 --- a/src/util/tests/skiplistTest.cpp +++ b/src/util/tests/skiplistTest.cpp @@ -70,7 +70,7 @@ void doubleSkipListTest() { } void randKeyTest() { - SSkipList* pSkipList = tSkipListCreate(10, TSDB_DATA_TYPE_INT, sizeof(int32_t), getKeyComparFunc(TSDB_DATA_TYPE_INT), + SSkipList* pSkipList = tSkipListCreate(10, TSDB_DATA_TYPE_INT, sizeof(int32_t), getKeyComparFunc(TSDB_DATA_TYPE_INT, TSDB_ORDER_ASC), false, getkey); int32_t size = 200000; From 799c642cecdcc1a660f5c6c82384806a5c51f45c Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Thu, 12 Aug 2021 13:50:29 +0800 Subject: [PATCH 098/185] [td-5881]: Sort the result according to any single column in the outer query result is allowed. --- src/client/inc/tscUtil.h | 11 +-- src/query/src/qExecutor.c | 158 ++++++++++++++++++++++++++++++++------ 2 files changed, 140 insertions(+), 29 deletions(-) diff --git a/src/client/inc/tscUtil.h b/src/client/inc/tscUtil.h index b3674a7bf5..0b2d4fd115 100644 --- a/src/client/inc/tscUtil.h +++ b/src/client/inc/tscUtil.h @@ -29,15 +29,16 @@ extern "C" { #include "tsched.h" #include "tsclient.h" -#define UTIL_TABLE_IS_SUPER_TABLE(metaInfo) \ +#define UTIL_TABLE_IS_SUPER_TABLE(metaInfo) \ (((metaInfo)->pTableMeta != NULL) && ((metaInfo)->pTableMeta->tableType == TSDB_SUPER_TABLE)) + #define UTIL_TABLE_IS_CHILD_TABLE(metaInfo) \ (((metaInfo)->pTableMeta != NULL) && ((metaInfo)->pTableMeta->tableType == TSDB_CHILD_TABLE)) - -#define UTIL_TABLE_IS_NORMAL_TABLE(metaInfo)\ - (!(UTIL_TABLE_IS_SUPER_TABLE(metaInfo) || UTIL_TABLE_IS_CHILD_TABLE(metaInfo))) -#define UTIL_TABLE_IS_TMP_TABLE(metaInfo) \ +#define UTIL_TABLE_IS_NORMAL_TABLE(metaInfo) \ + (!(UTIL_TABLE_IS_SUPER_TABLE(metaInfo) || UTIL_TABLE_IS_CHILD_TABLE(metaInfo) || UTIL_TABLE_IS_TMP_TABLE(metaInfo))) + +#define UTIL_TABLE_IS_TMP_TABLE(metaInfo) \ (((metaInfo)->pTableMeta != NULL) && ((metaInfo)->pTableMeta->tableType == TSDB_TEMP_TABLE)) #pragma pack(push,1) diff --git a/src/query/src/qExecutor.c b/src/query/src/qExecutor.c index e31792398d..c124bd20fc 100644 --- a/src/query/src/qExecutor.c +++ b/src/query/src/qExecutor.c @@ -224,6 +224,7 @@ static void destroySFillOperatorInfo(void* param, int32_t numOfOutput); static void destroyGroupbyOperatorInfo(void* param, int32_t numOfOutput); static void destroyProjectOperatorInfo(void* param, int32_t numOfOutput); static void destroyTagScanOperatorInfo(void* param, int32_t numOfOutput); +static void destroyOrderOperatorInfo(void* param, int32_t numOfOutput); static void destroySWindowOperatorInfo(void* param, int32_t numOfOutput); static void destroyStateWindowOperatorInfo(void* param, int32_t numOfOutput); static void destroyAggOperatorInfo(void* param, int32_t numOfOutput); @@ -1622,12 +1623,12 @@ static void hashAllIntervalAgg(SOperatorInfo* pOperatorInfo, SResultRowInfo* pRe } startPos = pSDataBlock->info.rows - 1; - + // window start(end) key interpolation doWindowBorderInterpolation(pOperatorInfo, pSDataBlock, pInfo->pCtx, pResult, &win, startPos, forwardStep); doApplyFunctions(pRuntimeEnv, pInfo->pCtx, &win, startPos, forwardStep, tsCols, pSDataBlock->info.rows, numOfOutput); } - + break; } setResultRowInterpo(pResult, RESULT_ROW_END_INTERP); @@ -2213,6 +2214,7 @@ static int32_t setupQueryRuntimeEnv(SQueryRuntimeEnv *pRuntimeEnv, int32_t numOf } break; } + case OP_StateWindow: { pRuntimeEnv->proot = createStatewindowOperatorInfo(pRuntimeEnv, pRuntimeEnv->proot, pQueryAttr->pExpr1, pQueryAttr->numOfOutput); int32_t opType = pRuntimeEnv->proot->upstream[0]->operatorType; @@ -2229,24 +2231,20 @@ static int32_t setupQueryRuntimeEnv(SQueryRuntimeEnv *pRuntimeEnv, int32_t numOf case OP_Filter: { // todo refactor int32_t numOfFilterCols = 0; -// if (pQueryAttr->numOfFilterCols > 0) { -// pRuntimeEnv->proot = createFilterOperatorInfo(pRuntimeEnv, pRuntimeEnv->proot, pQueryAttr->pExpr1, -// pQueryAttr->numOfOutput, pQueryAttr->tableCols, pQueryAttr->numOfFilterCols); -// } else { - if (pQueryAttr->stableQuery) { - SColumnInfo* pColInfo = - extractColumnFilterInfo(pQueryAttr->pExpr3, pQueryAttr->numOfExpr3, &numOfFilterCols); - pRuntimeEnv->proot = createFilterOperatorInfo(pRuntimeEnv, pRuntimeEnv->proot, pQueryAttr->pExpr3, - pQueryAttr->numOfExpr3, pColInfo, numOfFilterCols); - freeColumnInfo(pColInfo, pQueryAttr->numOfExpr3); - } else { - SColumnInfo* pColInfo = - extractColumnFilterInfo(pQueryAttr->pExpr1, pQueryAttr->numOfOutput, &numOfFilterCols); - pRuntimeEnv->proot = createFilterOperatorInfo(pRuntimeEnv, pRuntimeEnv->proot, pQueryAttr->pExpr1, - pQueryAttr->numOfOutput, pColInfo, numOfFilterCols); - freeColumnInfo(pColInfo, pQueryAttr->numOfOutput); - } -// } + if (pQueryAttr->stableQuery) { + SColumnInfo* pColInfo = + extractColumnFilterInfo(pQueryAttr->pExpr3, pQueryAttr->numOfExpr3, &numOfFilterCols); + pRuntimeEnv->proot = createFilterOperatorInfo(pRuntimeEnv, pRuntimeEnv->proot, pQueryAttr->pExpr3, + pQueryAttr->numOfExpr3, pColInfo, numOfFilterCols); + freeColumnInfo(pColInfo, pQueryAttr->numOfExpr3); + } else { + SColumnInfo* pColInfo = + extractColumnFilterInfo(pQueryAttr->pExpr1, pQueryAttr->numOfOutput, &numOfFilterCols); + pRuntimeEnv->proot = createFilterOperatorInfo(pRuntimeEnv, pRuntimeEnv->proot, pQueryAttr->pExpr1, + pQueryAttr->numOfOutput, pColInfo, numOfFilterCols); + freeColumnInfo(pColInfo, pQueryAttr->numOfOutput); + } + break; } @@ -2258,11 +2256,12 @@ static int32_t setupQueryRuntimeEnv(SQueryRuntimeEnv *pRuntimeEnv, int32_t numOf case OP_MultiwayMergeSort: { bool groupMix = true; - if(pQueryAttr->slimit.offset != 0 || pQueryAttr->slimit.limit != -1) { + if (pQueryAttr->slimit.offset != 0 || pQueryAttr->slimit.limit != -1) { groupMix = false; } + pRuntimeEnv->proot = createMultiwaySortOperatorInfo(pRuntimeEnv, pQueryAttr->pExpr1, pQueryAttr->numOfOutput, - 4096, merger, groupMix); // TODO hack it + 4096, merger, groupMix); // TODO hack it break; } @@ -2283,6 +2282,11 @@ static int32_t setupQueryRuntimeEnv(SQueryRuntimeEnv *pRuntimeEnv, int32_t numOf break; } + case OP_Order: { + pRuntimeEnv->proot = createOrderOperatorInfo(pRuntimeEnv, pRuntimeEnv->proot, pQueryAttr->pExpr1, pQueryAttr->numOfOutput); + break; + } + default: { assert(0); } @@ -3092,7 +3096,7 @@ int32_t loadDataBlockOnDemand(SQueryRuntimeEnv* pRuntimeEnv, STableScanInfo* pTa // check if this data block is required to load if ((*status) != BLK_DATA_ALL_NEEDED) { bool needFilter = true; - + // the pCtx[i] result is belonged to previous time window since the outputBuf has not been set yet, // the filter result may be incorrect. So in case of interval query, we need to set the correct time output buffer if (QUERY_IS_INTERVAL_QUERY(pQueryAttr)) { @@ -5404,6 +5408,108 @@ SOperatorInfo *createMultiwaySortOperatorInfo(SQueryRuntimeEnv *pRuntimeEnv, SEx return pOperator; } +static int32_t doMergeSDatablock(SSDataBlock* pDest, SSDataBlock* pSrc) { + assert(pSrc != NULL && pDest != NULL && pDest->info.numOfCols == pSrc->info.numOfCols); + + int32_t numOfCols = pSrc->info.numOfCols; + for(int32_t i = 0; i < numOfCols; ++i) { + SColumnInfoData* pCol2 = taosArrayGet(pDest->pDataBlock, i); + SColumnInfoData* pCol1 = taosArrayGet(pSrc->pDataBlock, i); + + int32_t newSize = (pDest->info.rows + pSrc->info.rows) * pCol2->info.bytes; + char* tmp = realloc(pCol2->pData, newSize); + if (tmp != NULL) { + pCol2->pData = tmp; + int32_t offset = pCol2->info.bytes * pDest->info.rows; + memcpy(pCol2->pData + offset, pCol1->pData, pSrc->info.rows * pCol2->info.bytes); + } else { + return TSDB_CODE_VND_OUT_OF_MEMORY; + } + } + + pDest->info.rows += pSrc->info.rows; + return TSDB_CODE_SUCCESS; +} + +static SSDataBlock* doSort(void* param, bool* newgroup) { + SOperatorInfo* pOperator = (SOperatorInfo*) param; + if (pOperator->status == OP_EXEC_DONE) { + return NULL; + } + + SOrderOperatorInfo* pInfo = pOperator->info; + + SSDataBlock* pBlock = NULL; + while(1) { + publishOperatorProfEvent(pOperator->upstream[0], QUERY_PROF_BEFORE_OPERATOR_EXEC); + pBlock = pOperator->upstream[0]->exec(pOperator->upstream[0], newgroup); + publishOperatorProfEvent(pOperator->upstream[0], QUERY_PROF_AFTER_OPERATOR_EXEC); + + // start to flush data into disk and try do multiway merge sort + if (pBlock == NULL) { + setQueryStatus(pOperator->pRuntimeEnv, QUERY_COMPLETED); + pOperator->status = OP_EXEC_DONE; + break; + } + + int32_t code = doMergeSDatablock(pInfo->pDataBlock, pBlock); + if (code != TSDB_CODE_SUCCESS) { + // todo handle error + } + } + + int32_t numOfCols = pInfo->pDataBlock->info.numOfCols; + void** pCols = calloc(numOfCols, POINTER_BYTES); + SSchema* pSchema = calloc(numOfCols, sizeof(SSchema)); + + for(int32_t i = 0; i < numOfCols; ++i) { + SColumnInfoData* p1 = taosArrayGet(pInfo->pDataBlock->pDataBlock, i); + pCols[i] = p1->pData; + pSchema[i].colId = p1->info.colId; + pSchema[i].bytes = p1->info.bytes; + pSchema[i].type = (uint8_t) p1->info.type; + } + + __compar_fn_t comp = getKeyComparFunc(pSchema[pInfo->colIndex].type, pInfo->order); + taoscQSort(pCols, pSchema, numOfCols, pInfo->pDataBlock->info.rows, pInfo->colIndex, comp); + + tfree(pCols); + tfree(pSchema); + return (pInfo->pDataBlock->info.rows > 0)? pInfo->pDataBlock:NULL; +} + +SOperatorInfo *createOrderOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput) { + SOrderOperatorInfo* pInfo = calloc(1, sizeof(SOrderOperatorInfo)); + + { + SSDataBlock* pDataBlock = calloc(1, sizeof(SSDataBlock)); + pDataBlock->pDataBlock = taosArrayInit(numOfOutput, sizeof(SColumnInfoData)); + for(int32_t i = 0; i < numOfOutput; ++i) { + SColumnInfoData col = {{0}}; + col.info.bytes = pExpr->base.resBytes; + col.info.colId = pExpr->base.resColId; + col.info.type = pExpr->base.resType; + taosArrayPush(pDataBlock->pDataBlock, &col); + } + + pDataBlock->info.numOfCols = numOfOutput; + pInfo->pDataBlock = pDataBlock; + } + + SOperatorInfo* pOperator = calloc(1, sizeof(SOperatorInfo)); + pOperator->name = "InMemoryOrder"; + pOperator->operatorType = OP_Order; + pOperator->blockingOptr = true; + pOperator->status = OP_IN_EXECUTING; + pOperator->info = pInfo; + pOperator->exec = doSort; + pOperator->cleanup = destroyOrderOperatorInfo; + pOperator->pRuntimeEnv = pRuntimeEnv; + + appendUpstream(pOperator, upstream); + return pOperator; +} + static int32_t getTableScanOrder(STableScanInfo* pTableScanInfo) { return pTableScanInfo->order; } @@ -6404,6 +6510,11 @@ static void destroyTagScanOperatorInfo(void* param, int32_t numOfOutput) { pInfo->pRes = destroyOutputBuf(pInfo->pRes); } +static void destroyOrderOperatorInfo(void* param, int32_t numOfOutput) { + SOrderOperatorInfo* pInfo = (SOrderOperatorInfo*) param; + pInfo->pDataBlock = destroyOutputBuf(pInfo->pDataBlock); +} + static void destroyConditionOperatorInfo(void* param, int32_t numOfOutput) { SFilterOperatorInfo* pInfo = (SFilterOperatorInfo*) param; doDestroyFilterInfo(pInfo->pFilterInfo, pInfo->numOfFilterCols); @@ -6752,7 +6863,6 @@ SOperatorInfo* createFillOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorIn pOperator->numOfOutput = numOfOutput; pOperator->info = pInfo; pOperator->pRuntimeEnv = pRuntimeEnv; - pOperator->exec = doFill; pOperator->cleanup = destroySFillOperatorInfo; From c0e81449bfc86d24b2d77cef25c5a7a417c7924e Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Thu, 12 Aug 2021 15:36:29 +0800 Subject: [PATCH 099/185] [TD-6013]: taosdemo buffer overflow. (#7317) --- src/kit/taosdemo/taosdemo.c | 49 +++++++++++++++++++++++-------------- 1 file changed, 31 insertions(+), 18 deletions(-) diff --git a/src/kit/taosdemo/taosdemo.c b/src/kit/taosdemo/taosdemo.c index 27b26e7364..bd0feaeb92 100644 --- a/src/kit/taosdemo/taosdemo.c +++ b/src/kit/taosdemo/taosdemo.c @@ -5101,21 +5101,27 @@ static int64_t generateStbRowData( int64_t dataLen = 0; char *pstr = recBuf; int64_t maxLen = MAX_DATA_SIZE; + int tmpLen; dataLen += snprintf(pstr + dataLen, maxLen - dataLen, "(%" PRId64 ",", timestamp); for (int i = 0; i < stbInfo->columnCount; i++) { if ((0 == strncasecmp(stbInfo->columns[i].dataType, - "BINARY", strlen("BINARY"))) + "BINARY", 6)) || (0 == strncasecmp(stbInfo->columns[i].dataType, - "NCHAR", strlen("NCHAR")))) { + "NCHAR", 5))) { if (stbInfo->columns[i].dataLen > TSDB_MAX_BINARY_LEN) { errorPrint( "binary or nchar length overflow, max size:%u\n", (uint32_t)TSDB_MAX_BINARY_LEN); return -1; } + if ((stbInfo->columns[i].dataLen + 1) > + /* need count 3 extra chars \', \', and , */ + (remainderBufLen - dataLen - 3)) { + return 0; + } char* buf = (char*)calloc(stbInfo->columns[i].dataLen+1, 1); if (NULL == buf) { errorPrint( "calloc failed! size:%d\n", stbInfo->columns[i].dataLen); @@ -5129,19 +5135,20 @@ static int64_t generateStbRowData( char *tmp; if (0 == strncasecmp(stbInfo->columns[i].dataType, - "INT", strlen("INT"))) { + "INT", 3)) { if ((g_args.demo_mode) && (i == 1)) { tmp = demo_voltage_int_str(); } else { tmp = rand_int_str(); } - tstrncpy(pstr + dataLen, tmp, INT_BUFF_LEN); + tmpLen = strlen(tmp); + tstrncpy(pstr + dataLen, tmp, min(tmpLen + 1, INT_BUFF_LEN)); } else if (0 == strncasecmp(stbInfo->columns[i].dataType, - "BIGINT", strlen("BIGINT"))) { + "BIGINT", 6)) { tmp = rand_bigint_str(); tstrncpy(pstr + dataLen, tmp, BIGINT_BUFF_LEN); } else if (0 == strncasecmp(stbInfo->columns[i].dataType, - "FLOAT", strlen("FLOAT"))) { + "FLOAT", 5)) { if (g_args.demo_mode) { if (i == 0) { tmp = demo_current_float_str(); @@ -5151,27 +5158,33 @@ static int64_t generateStbRowData( } else { tmp = rand_float_str(); } - tstrncpy(pstr + dataLen, tmp, FLOAT_BUFF_LEN); + tmpLen = strlen(tmp); + tstrncpy(pstr + dataLen, tmp, min(tmpLen +1, FLOAT_BUFF_LEN)); } else if (0 == strncasecmp(stbInfo->columns[i].dataType, - "DOUBLE", strlen("DOUBLE"))) { + "DOUBLE", 6)) { tmp = rand_double_str(); - tstrncpy(pstr + dataLen, tmp, DOUBLE_BUFF_LEN); + tmpLen = strlen(tmp); + tstrncpy(pstr + dataLen, tmp, min(tmpLen +1, DOUBLE_BUFF_LEN)); } else if (0 == strncasecmp(stbInfo->columns[i].dataType, - "SMALLINT", strlen("SMALLINT"))) { + "SMALLINT", 8)) { tmp = rand_smallint_str(); - tstrncpy(pstr + dataLen, tmp, SMALLINT_BUFF_LEN); + tmpLen = strlen(tmp); + tstrncpy(pstr + dataLen, tmp, min(tmpLen + 1, SMALLINT_BUFF_LEN)); } else if (0 == strncasecmp(stbInfo->columns[i].dataType, - "TINYINT", strlen("TINYINT"))) { + "TINYINT", 7)) { tmp = rand_tinyint_str(); - tstrncpy(pstr + dataLen, tmp, TINYINT_BUFF_LEN); + tmpLen = strlen(tmp); + tstrncpy(pstr + dataLen, tmp, min(tmpLen +1, TINYINT_BUFF_LEN)); } else if (0 == strncasecmp(stbInfo->columns[i].dataType, - "BOOL", strlen("BOOL"))) { + "BOOL", 4)) { tmp = rand_bool_str(); - tstrncpy(pstr + dataLen, tmp, BOOL_BUFF_LEN); + tmpLen = strlen(tmp); + tstrncpy(pstr + dataLen, tmp, min(tmpLen +1, BOOL_BUFF_LEN)); } else if (0 == strncasecmp(stbInfo->columns[i].dataType, - "TIMESTAMP", strlen("TIMESTAMP"))) { + "TIMESTAMP", 9)) { tmp = rand_int_str(); - tstrncpy(pstr + dataLen, tmp, INT_BUFF_LEN); + tmpLen = strlen(tmp); + tstrncpy(pstr + dataLen, tmp, min(tmpLen +1, INT_BUFF_LEN)); } else { errorPrint( "Not support data type: %s\n", stbInfo->columns[i].dataType); return -1; @@ -5182,7 +5195,7 @@ static int64_t generateStbRowData( dataLen += 1; } - if (dataLen > (remainderBufLen - (DOUBLE_BUFF_LEN + 1))) + if (dataLen > (remainderBufLen - (128))) return 0; } From bf3477e7c3d16eda61785080124498fe47297f24 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Thu, 12 Aug 2021 16:03:49 +0800 Subject: [PATCH 100/185] [td-5881]: Sort the result according to any single column in the outer query result is allowed. --- src/query/inc/qExecutor.h | 2 +- src/query/src/qExecutor.c | 15 ++++++++++----- 2 files changed, 11 insertions(+), 6 deletions(-) diff --git a/src/query/inc/qExecutor.h b/src/query/inc/qExecutor.h index 6dca502838..d30971ab47 100644 --- a/src/query/inc/qExecutor.h +++ b/src/query/inc/qExecutor.h @@ -577,7 +577,7 @@ SOperatorInfo* createFilterOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperator int32_t numOfOutput, SColumnInfo* pCols, int32_t numOfFilter); SOperatorInfo* createJoinOperatorInfo(SOperatorInfo** pUpstream, int32_t numOfUpstream, SSchema* pSchema, int32_t numOfOutput); -SOperatorInfo *createOrderOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput); +SOperatorInfo* createOrderOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput, SOrderVal* pOrderVal); SSDataBlock* doGlobalAggregate(void* param, bool* newgroup); SSDataBlock* doMultiwayMergeSort(void* param, bool* newgroup); diff --git a/src/query/src/qExecutor.c b/src/query/src/qExecutor.c index c124bd20fc..55a762d809 100644 --- a/src/query/src/qExecutor.c +++ b/src/query/src/qExecutor.c @@ -2283,7 +2283,7 @@ static int32_t setupQueryRuntimeEnv(SQueryRuntimeEnv *pRuntimeEnv, int32_t numOf } case OP_Order: { - pRuntimeEnv->proot = createOrderOperatorInfo(pRuntimeEnv, pRuntimeEnv->proot, pQueryAttr->pExpr1, pQueryAttr->numOfOutput); + pRuntimeEnv->proot = createOrderOperatorInfo(pRuntimeEnv, pRuntimeEnv->proot, pQueryAttr->pExpr1, pQueryAttr->numOfOutput, &pQueryAttr->order); break; } @@ -5428,6 +5428,7 @@ static int32_t doMergeSDatablock(SSDataBlock* pDest, SSDataBlock* pSrc) { } pDest->info.rows += pSrc->info.rows; + return TSDB_CODE_SUCCESS; } @@ -5478,7 +5479,7 @@ static SSDataBlock* doSort(void* param, bool* newgroup) { return (pInfo->pDataBlock->info.rows > 0)? pInfo->pDataBlock:NULL; } -SOperatorInfo *createOrderOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput) { +SOperatorInfo *createOrderOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput, SOrderVal* pOrderVal) { SOrderOperatorInfo* pInfo = calloc(1, sizeof(SOrderOperatorInfo)); { @@ -5486,10 +5487,14 @@ SOperatorInfo *createOrderOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorI pDataBlock->pDataBlock = taosArrayInit(numOfOutput, sizeof(SColumnInfoData)); for(int32_t i = 0; i < numOfOutput; ++i) { SColumnInfoData col = {{0}}; - col.info.bytes = pExpr->base.resBytes; - col.info.colId = pExpr->base.resColId; - col.info.type = pExpr->base.resType; + col.info.colId = pExpr[i].base.colInfo.colId; + col.info.bytes = pExpr[i].base.colBytes; + col.info.type = pExpr[i].base.colType; taosArrayPush(pDataBlock->pDataBlock, &col); + + if (col.info.colId == pOrderVal->orderColId) { + pInfo->colIndex = i; + } } pDataBlock->info.numOfCols = numOfOutput; From 977b2202eaf775b0d33d95b0ea9f33887b14028e Mon Sep 17 00:00:00 2001 From: cpwu Date: Thu, 12 Aug 2021 16:21:14 +0800 Subject: [PATCH 101/185] [TD-5935] add case for TD-5935 --- tests/pytest/functions/queryTestCases.py | 394 ++++++++++++++++++++++- 1 file changed, 392 insertions(+), 2 deletions(-) diff --git a/tests/pytest/functions/queryTestCases.py b/tests/pytest/functions/queryTestCases.py index b7480fdbd5..ae2bbc4a81 100644 --- a/tests/pytest/functions/queryTestCases.py +++ b/tests/pytest/functions/queryTestCases.py @@ -13,6 +13,8 @@ import sys import subprocess +import random +import math from util.log import * from util.cases import * @@ -106,6 +108,9 @@ class TDTestCase: tdSql.execute("drop database if exists db1") tdSql.execute("create database if not exists db keep 3650") tdSql.execute("create database if not exists db1 keep 3650") + tdSql.execute("create database if not exists new keep 3650") + tdSql.execute("create database if not exists private keep 3650") + tdSql.execute("create database if not exists db2 keep 3650") tdSql.execute("create stable db.stb1 (ts timestamp, c1 int) tags(t1 int)") tdSql.execute("create stable db.stb2 (ts timestamp, c1 int) tags(t1 int)") @@ -122,6 +127,14 @@ class TDTestCase: # p1 不进入指定数据库 tdSql.query("show create database db") tdSql.checkRows(1) + tdSql.query("show create database db1") + tdSql.checkRows(1) + tdSql.query("show create database db2") + tdSql.checkRows(1) + tdSql.query("show create database new") + tdSql.checkRows(1) + tdSql.query("show create database private") + tdSql.checkRows(1) tdSql.error("show create database ") tdSql.error("show create databases db ") tdSql.error("show create database db.stb1") @@ -340,17 +353,394 @@ class TDTestCase: pass + def td4889(self): + tdLog.printNoPrefix("==========TD-4889==========") + tdSql.execute("drop database if exists db") + tdSql.execute("create database if not exists db keep 3650") + + tdSql.execute("use db") + tdSql.execute("create stable db.stb1 (ts timestamp, c1 int) tags(t1 int)") + + for i in range(1000): + tdSql.execute(f"create table db.t1{i} using db.stb1 tags({i})") + for j in range(100): + tdSql.execute(f"insert into db.t1{i} values (now-100d, {i+j})") + + tdSql.query("show vgroups") + index = tdSql.getData(0,0) + tdSql.checkData(0, 6, 0) + tdSql.execute(f"compact vnodes in({index})") + for i in range(3): + tdSql.query("show vgroups") + if tdSql.getData(0, 6) == 1: + tdLog.printNoPrefix("show vgroups row:0 col:6 data:1 == expect:1") + break + if i == 3: + tdLog.exit("compacting not occured") + time.sleep(0.5) + + pass + + def td5168insert(self): + tdSql.execute("drop database if exists db") + tdSql.execute("create database if not exists db keep 3650") + + tdSql.execute("use db") + tdSql.execute("create stable db.stb1 (ts timestamp, c1 float, c2 float, c3 double, c4 double) tags(t1 int)") + tdSql.execute("create table db.t1 using db.stb1 tags(1)") + + for i in range(5): + c1 = 1001.11 + i*0.1 + c2 = 1001.11 + i*0.1 + 1*0.01 + c3 = 1001.11 + i*0.1 + 2*0.01 + c4 = 1001.11 + i*0.1 + 3*0.01 + tdSql.execute(f"insert into db.t1 values ('2021-07-01 08:00:0{i}.000', {c1}, {c2}, {c3}, {c4})") + + # tdSql.execute("insert into db.t1 values ('2021-07-01 08:00:00.000', 1001.11, 1001.12, 1001.13, 1001.14)") + # tdSql.execute("insert into db.t1 values ('2021-07-01 08:00:01.000', 1001.21, 1001.22, 1001.23, 1001.24)") + # tdSql.execute("insert into db.t1 values ('2021-07-01 08:00:02.000', 1001.31, 1001.32, 1001.33, 1001.34)") + # tdSql.execute("insert into db.t1 values ('2021-07-01 08:00:03.000', 1001.41, 1001.42, 1001.43, 1001.44)") + # tdSql.execute("insert into db.t1 values ('2021-07-01 08:00:04.000', 1001.51, 1001.52, 1001.53, 1001.54)") + + # for i in range(1000000): + for i in range(1000000): + random1 = random.uniform(1000,1001) + random2 = random.uniform(1000,1001) + random3 = random.uniform(1000,1001) + random4 = random.uniform(1000,1001) + tdSql.execute(f"insert into db.t1 values (now+{i}a, {random1}, {random2},{random3}, {random4})") + + pass + + def td5168(self): + tdLog.printNoPrefix("==========TD-5168==========") + # 插入小范围内的随机数 + tdLog.printNoPrefix("=====step0: 默认情况下插入数据========") + self.td5168insert() + + # 获取五个时间点的数据作为基准数值,未压缩情况下精准匹配 + for i in range(5): + tdSql.query(f"select * from db.t1 where ts='2021-07-01 08:00:0{i}.000' ") + # c1, c2, c3, c4 = tdSql.getData(0, 1), tdSql.getData(0, 2), tdSql.getData(0, 3), tdSql.getData(0, 4) + for j in range(4): + locals()["f" + str(j) + str(i)] = tdSql.getData(0, j+1) + print(f"f{j}{i}:", locals()["f" + str(j) + str(i)]) + tdSql.checkData(0, j+1, locals()["f" + str(j) + str(i)]) + + # tdSql.query("select * from db.t1 limit 100,1") + # f10, f11, f12, f13 = tdSql.getData(0,1), tdSql.getData(0,2), tdSql.getData(0,3), tdSql.getData(0,4) + # + # tdSql.query("select * from db.t1 limit 1000,1") + # f20, f21, f22, f23 = tdSql.getData(0,1), tdSql.getData(0,2), tdSql.getData(0,3), tdSql.getData(0,4) + # + # tdSql.query("select * from db.t1 limit 10000,1") + # f30, f31, f32, f33 = tdSql.getData(0,1), tdSql.getData(0,2), tdSql.getData(0,3), tdSql.getData(0,4) + # + # tdSql.query("select * from db.t1 limit 100000,1") + # f40, f41, f42, f43 = tdSql.getData(0,1), tdSql.getData(0,2), tdSql.getData(0,3), tdSql.getData(0,4) + # + # tdSql.query("select * from db.t1 limit 1000000,1") + # f50, f51, f52, f53 = tdSql.getData(0,1), tdSql.getData(0,2), tdSql.getData(0,3), tdSql.getData(0,4) + + # 关闭服务并获取未开启压缩情况下的数据容量 + tdSql.query("show dnodes") + index = tdSql.getData(0, 0) + tdDnodes.stop(index) + + cfgdir = self.getCfgDir() + cfgfile = self.getCfgFile() + + lossy_cfg_cmd=f"grep lossyColumns {cfgfile}|awk '{{print $2}}'" + data_size_cmd = f"du -s {cfgdir}/../data/vnode/ | awk '{{print $1}}'" + dsize_init = int(subprocess.check_output(data_size_cmd,shell=True).decode("utf-8")) + lossy_args = subprocess.check_output(lossy_cfg_cmd, shell=True).decode("utf-8") + tdLog.printNoPrefix(f"close the lossyColumns,data size is: {dsize_init};the lossyColumns line is: {lossy_args}") + + ################################################### + float_lossy = "float" + double_lossy = "double" + float_double_lossy = "float|double" + no_loosy = "" + + double_precision_cmd = f"sed -i '$a dPrecision 0.000001' {cfgfile}" + _ = subprocess.check_output(double_precision_cmd, shell=True).decode("utf-8") + + lossy_float_cmd = f"sed -i '$a lossyColumns {float_lossy}' {cfgfile} " + lossy_double_cmd = f"sed -i '$d' {cfgfile} && sed -i '$a lossyColumns {double_lossy}' {cfgfile} " + lossy_float_double_cmd = f"sed -i '$d' {cfgfile} && sed -i '$a lossyColumns {float_double_lossy}' {cfgfile} " + lossy_no_cmd = f"sed -i '$a lossyColumns {no_loosy}' {cfgfile} " + + ################################################### + + # 开启有损压缩,参数float,并启动服务插入数据 + tdLog.printNoPrefix("=====step1: lossyColumns设置为float========") + lossy_float = subprocess.check_output(lossy_float_cmd, shell=True).decode("utf-8") + tdDnodes.start(index) + self.td5168insert() + + # 查询前面所述5个时间数据并与基准数值进行比较 + for i in range(5): + tdSql.query(f"select * from db.t1 where ts='2021-07-01 08:00:0{i}.000' ") + # c1, c2, c3, c4 = tdSql.getData(0, 1), tdSql.getData(0, 2), tdSql.getData(0, 3), tdSql.getData(0, 4) + for j in range(4): + # locals()["f" + str(j) + str(i)] = tdSql.getData(0, j+1) + # print(f"f{j}{i}:", locals()["f" + str(j) + str(i)]) + tdSql.checkData(0, j+1, locals()["f" + str(j) + str(i)]) + + # 关闭服务并获取压缩参数为float情况下的数据容量 + tdDnodes.stop(index) + dsize_float = int(subprocess.check_output(data_size_cmd,shell=True).decode("utf-8")) + lossy_args = subprocess.check_output(lossy_cfg_cmd, shell=True).decode("utf-8") + tdLog.printNoPrefix(f"open the lossyColumns, data size is:{dsize_float};the lossyColumns line is: {lossy_args}") + + # 修改有损压缩,参数double,并启动服务 + tdLog.printNoPrefix("=====step2: lossyColumns设置为double========") + lossy_double = subprocess.check_output(lossy_double_cmd, shell=True).decode("utf-8") + tdDnodes.start(index) + self.td5168insert() + + # 查询前面所述5个时间数据并与基准数值进行比较 + for i in range(5): + tdSql.query(f"select * from db.t1 where ts='2021-07-01 08:00:0{i}.000' ") + for j in range(4): + tdSql.checkData(0, j+1, locals()["f" + str(j) + str(i)]) + + # 关闭服务并获取压缩参数为double情况下的数据容量 + tdDnodes.stop(index) + dsize_double = int(subprocess.check_output(data_size_cmd, shell=True).decode("utf-8")) + lossy_args = subprocess.check_output(lossy_cfg_cmd, shell=True).decode("utf-8") + tdLog.printNoPrefix(f"open the lossyColumns, data size is:{dsize_double};the lossyColumns line is: {lossy_args}") + + # 修改有损压缩,参数 float&&double ,并启动服务 + tdLog.printNoPrefix("=====step3: lossyColumns设置为 float&&double ========") + lossy_float_double = subprocess.check_output(lossy_float_double_cmd, shell=True).decode("utf-8") + tdDnodes.start(index) + self.td5168insert() + + # 查询前面所述5个时间数据并与基准数值进行比较 + for i in range(5): + tdSql.query(f"select * from db.t1 where ts='2021-07-01 08:00:0{i}.000' ") + for j in range(4): + tdSql.checkData(0, j+1, locals()["f" + str(j) + str(i)]) + + # 关闭服务并获取压缩参数为 float&&double 情况下的数据容量 + tdDnodes.stop(index) + dsize_float_double = int(subprocess.check_output(data_size_cmd, shell=True).decode("utf-8")) + lossy_args = subprocess.check_output(lossy_cfg_cmd, shell=True).decode("utf-8") + tdLog.printNoPrefix(f"open the lossyColumns, data size is:{dsize_float_double};the lossyColumns line is: {lossy_args}") + + if not ((dsize_float_double < dsize_init) and (dsize_double < dsize_init) and (dsize_float < dsize_init)) : + tdLog.printNoPrefix(f"When lossyColumns value is float, data size is: {dsize_float}") + tdLog.printNoPrefix(f"When lossyColumns value is double, data size is: {dsize_double}") + tdLog.printNoPrefix(f"When lossyColumns value is float and double, data size is: {dsize_float_double}") + tdLog.printNoPrefix(f"When lossyColumns is closed, data size is: {dsize_init}") + tdLog.exit("压缩未生效") + else: + tdLog.printNoPrefix(f"When lossyColumns value is float, data size is: {dsize_float}") + tdLog.printNoPrefix(f"When lossyColumns value is double, data size is: {dsize_double}") + tdLog.printNoPrefix(f"When lossyColumns value is float and double, data size is: {dsize_float_double}") + tdLog.printNoPrefix(f"When lossyColumns is closed, data size is: {dsize_init}") + tdLog.printNoPrefix("压缩生效") + + pass + + def td5433(self): + tdLog.printNoPrefix("==========TD-5433==========") + tdSql.execute("drop database if exists db") + tdSql.execute("create database if not exists db keep 3650") + + tdSql.execute("use db") + tdSql.execute("create stable db.stb1 (ts timestamp, c1 int) tags(t0 tinyint, t1 int)") + tdSql.execute("create stable db.stb2 (ts timestamp, c1 int) tags(t0 binary(16), t1 binary(16))") + numtab=2000000 + for i in range(numtab): + sql = f"create table db.t{i} using db.stb1 tags({i%128}, {100+i})" + tdSql.execute(sql) + tdSql.execute(f"insert into db.t{i} values (now-10d, {i})") + tdSql.execute(f"insert into db.t{i} values (now-9d, {i*2})") + tdSql.execute(f"insert into db.t{i} values (now-8d, {i*3})") + + tdSql.execute("create table db.t01 using db.stb2 tags('1', '100')") + tdSql.execute("create table db.t02 using db.stb2 tags('2', '200')") + tdSql.execute("create table db.t03 using db.stb2 tags('3', '300')") + tdSql.execute("create table db.t04 using db.stb2 tags('4', '400')") + tdSql.execute("create table db.t05 using db.stb2 tags('5', '500')") + + tdSql.query("select distinct t1 from stb1 where t1 != '150'") + tdSql.checkRows(numtab-1) + tdSql.query("select distinct t1 from stb1 where t1 != 150") + tdSql.checkRows(numtab-1) + tdSql.query("select distinct t1 from stb1 where t1 = 150") + tdSql.checkRows(1) + tdSql.query("select distinct t1 from stb1 where t1 = '150'") + tdSql.checkRows(1) + tdSql.query("select distinct t1 from stb1") + tdSql.checkRows(numtab) + + tdSql.query("select distinct t0 from stb1 where t0 != '2'") + tdSql.checkRows(127) + tdSql.query("select distinct t0 from stb1 where t0 != 2") + tdSql.checkRows(127) + tdSql.query("select distinct t0 from stb1 where t0 = 2") + tdSql.checkRows(1) + tdSql.query("select distinct t0 from stb1 where t0 = '2'") + tdSql.checkRows(1) + tdSql.query("select distinct t0 from stb1") + tdSql.checkRows(128) + + tdSql.query("select distinct t1 from stb2 where t1 != '200'") + tdSql.checkRows(4) + tdSql.query("select distinct t1 from stb2 where t1 != 200") + tdSql.checkRows(4) + tdSql.query("select distinct t1 from stb2 where t1 = 200") + tdSql.checkRows(1) + tdSql.query("select distinct t1 from stb2 where t1 = '200'") + tdSql.checkRows(1) + tdSql.query("select distinct t1 from stb2") + tdSql.checkRows(5) + + tdSql.query("select distinct t0 from stb2 where t0 != '2'") + tdSql.checkRows(4) + tdSql.query("select distinct t0 from stb2 where t0 != 2") + tdSql.checkRows(4) + tdSql.query("select distinct t0 from stb2 where t0 = 2") + tdSql.checkRows(1) + tdSql.query("select distinct t0 from stb2 where t0 = '2'") + tdSql.checkRows(1) + tdSql.query("select distinct t0 from stb2") + tdSql.checkRows(5) + + pass + + def td5798(self): + tdLog.printNoPrefix("==========TD-5798==========") + tdSql.execute("drop database if exists db") + tdSql.execute("create database if not exists db keep 3650") + + tdSql.execute("use db") + tdSql.execute("create stable db.stb1 (ts timestamp, c1 int, c2 int) tags(t0 tinyint, t1 int, t2 int)") + tdSql.execute("create stable db.stb2 (ts timestamp, c2 int, c3 int) tags(t2 binary(16), t3 binary(16), t4 int)") + for i in range(100): + sql = f"create table db.t{i} using db.stb1 tags({i%7}, {(i-1)%7}, {i%2})" + tdSql.execute(sql) + tdSql.execute(f"insert into db.t{i} values (now-10d, {i}, {i%3})") + tdSql.execute(f"insert into db.t{i} values (now-9d, {i*2}, {(i-1)%3})") + tdSql.execute(f"insert into db.t{i} values (now-8d, {i*3}, {(i-2)%3})") + + tdSql.execute(f"create table db.t0{i} using db.stb2 tags('{i}', '{100-i}', {i%3})") + tdSql.execute(f"insert into db.t0{i} values (now-10d, {i}, {(i+1)%3})") + tdSql.execute(f"insert into db.t0{i} values (now-9d, {i*2}, {(i+2)%3})") + tdSql.execute(f"insert into db.t0{i} values (now-8d, {i*3}, {(i)%3})") + + tdSql.query("select distinct t1 from stb1") + tdSql.checkRows(7) + tdSql.query("select distinct t0, t1 from stb1") + tdSql.checkRows(7) + tdSql.query("select distinct t1, t0 from stb1") + tdSql.checkRows(7) + tdSql.query("select distinct t1, t2 from stb1") + tdSql.checkRows(14) + tdSql.query("select distinct t0, t1, t2 from stb1") + tdSql.checkRows(14) + tdSql.query("select distinct t0 t1, t1 t2 from stb1") + tdSql.checkRows(7) + tdSql.query("select distinct t0, t0, t0 from stb1") + tdSql.checkRows(7) + tdSql.query("select distinct t0, t1 from t1") + tdSql.checkRows(1) + + + ########## should be error ######### + tdSql.error("select distinct from stb1") + tdSql.error("select distinct t2 from ") + tdSql.error("distinct t2 from stb1") + tdSql.error("select distinct stb1") + tdSql.error("select distinct t0, t1, t2, t3 from stb1") + tdSql.error("select distinct stb1.t0, stb1.t1, stb2.t2, stb2.t3 from stb1") + + tdSql.error("select dist t0 from stb1") + tdSql.error("select distinct stb2.t2, stb2.t3 from stb1") + tdSql.error("select distinct stb2.t2 t1, stb2.t3 t2 from stb1") + + tdSql.error("select distinct t0, t1 from t1 where t0 < 7") + + ########## add where condition ########## + tdSql.query("select distinct t0, t1 from stb1 where t1 > 3") + tdSql.checkRows(3) + tdSql.query("select distinct t0, t1 from stb1 where t1 > 3 limit 2") + tdSql.checkRows(2) + tdSql.query("select distinct t0, t1 from stb1 where t1 > 3 limit 2 offset 2") + tdSql.checkRows(1) + tdSql.query("select distinct t0, t1 from stb1 where t1 > 3 slimit 2") + tdSql.checkRows(3) + tdSql.error("select distinct t0, t1 from stb1 where c1 > 2") + tdSql.query("select distinct t0, t1 from stb1 where t1 > 3 and t1 < 5") + tdSql.checkRows(1) + tdSql.error("select distinct stb1.t0, stb1.t1 from stb1, stb2 where stb1.t2=stb2.t4") + tdSql.error("select distinct t0, t1 from stb1 where stb2.t4 > 2") + # tdSql.error("select max(c1), distinct t0 from stb1 where t0 > 2") + tdSql.error("select distinct t0 from stb1 where t0 in (select t0 from stb1 where t0 > 2)") + tdSql.query("select distinct t0, t1 from stb1 where t0 in (1,2,3,4,5)") + tdSql.checkRows(5) + tdSql.query("select distinct t1 from (select t0, t1 from stb1 where t0 > 2) ") + tdSql.checkRows(4) + tdSql.query("select distinct t1 from (select distinct t0, t1 from stb1 where t0 > 2 and t1 < 3) ") + tdSql.checkRows(1) + tdSql.query("select distinct t1 from (select distinct t0, t1 from stb1 where t0 > 2 ) where t1 < 3") + tdSql.checkRows(1) + tdSql.query("select distinct t1 from (select t0, t1 from stb1 where t0 > 2 ) where t1 < 3") + tdSql.checkRows(1) + tdSql.error("select distinct t1, t0 from (select t1 from stb1 where t0 > 2 ) where t1 < 3") + tdSql.query("select distinct t1, t0 from (select t1,t0 from stb1 where t0 > 2 ) where t1 < 3") + tdSql.checkRows(1) + # tdSql.error(" select distinct t1, t0 from (select t1,t0 from stb1 where t0 > 2 order by ts) where t1 < 3") + tdSql.query("select t1, t0 from (select distinct t1,t0 from stb1 where t0 > 2 ) where t1 < 3") + + + pass + + def td5935(self): + tdLog.printNoPrefix("==========TD-5798==========") + tdSql.execute("drop database if exists db") + tdSql.execute("create database if not exists db keep 3650") + + tdSql.execute("use db") + tdSql.execute("create stable db.stb1 (ts timestamp, c1 int, c2 float) tags(t1 int, t2 int)") + nowtime=int(round((time.time()*1000))) + for i in range(100): + sql = f"create table db.t{i} using db.stb1 tags({i % 7}, {i % 2})" + tdSql.execute(sql) + for j in range(1000): + tdSql.execute(f"insert into db.t{i} values ({nowtime-j*10}, {1000-j}, {round(random.random()*j,3)})") + tdSql.execute(f"insert into db.t{i} (ts) values ({nowtime-10000}) ") + + fillsql=f"select last(*) from t0 where ts>={nowtime-10000} and ts<{nowtime} interval(10a) fill(next) limit 10" + tdSql.query(fillsql) + fillResult=False + if (tdSql.getData(0,2) != 0) and (tdSql.getData(0, 2) is not None): + fillResult=True + if fillResult: + tdLog.success(f"sql is :{fillsql}, fill(next) is correct") + else: + tdLog.exit("fill(next) is wrong") + + + def run(self): # master branch # self.td3690() # self.td4082() # self.td4288() - self.td4724() + # self.td4724() + # self.td5798() + self.td5935() # develop branch # self.td4097() - + # self.td4889() + # self.td5168() + # self.td5433() def stop(self): tdSql.close() From 4943f4ebc47261f4a2f53812fcd01fdd152903d1 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Thu, 12 Aug 2021 16:21:48 +0800 Subject: [PATCH 102/185] [td-5881]fix bug in TD-5881 --- src/query/src/qExecutor.c | 1 + 1 file changed, 1 insertion(+) diff --git a/src/query/src/qExecutor.c b/src/query/src/qExecutor.c index 55a762d809..7088d58830 100644 --- a/src/query/src/qExecutor.c +++ b/src/query/src/qExecutor.c @@ -5498,6 +5498,7 @@ SOperatorInfo *createOrderOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorI } pDataBlock->info.numOfCols = numOfOutput; + pInfo->order = pOrderVal->order; pInfo->pDataBlock = pDataBlock; } From 932d065da5de3df129a02813852493dc56d50ac8 Mon Sep 17 00:00:00 2001 From: Linhe Huo Date: Thu, 12 Aug 2021 16:45:41 +0800 Subject: [PATCH 103/185] [TD-6032]: fix nanosecond 999999999 error for nodejs [ci skip] (#7329) --- src/connector/nodejs/nodetaos/taosobjects.js | 3 ++- src/connector/nodejs/package.json | 2 +- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/src/connector/nodejs/nodetaos/taosobjects.js b/src/connector/nodejs/nodetaos/taosobjects.js index 0fc8dc8ef1..3bc0fe0aca 100644 --- a/src/connector/nodejs/nodetaos/taosobjects.js +++ b/src/connector/nodejs/nodetaos/taosobjects.js @@ -47,7 +47,8 @@ class TaosTimestamp extends Date { super(Math.floor(date / 1000)); this.precisionExtras = date % 1000; } else if (precision === 2) { - super(parseInt(date / 1000000)); + // use BigInt to fix: 1623254400999999999 / 1000000 = 1623254401000 which not expected + super(parseInt(BigInt(date) / 1000000n)); // use BigInt to fix: 1625801548423914405 % 1000000 = 914496 which not expected (914405) this.precisionExtras = parseInt(BigInt(date) % 1000000n); } else { diff --git a/src/connector/nodejs/package.json b/src/connector/nodejs/package.json index db37318a16..6a2c66100b 100644 --- a/src/connector/nodejs/package.json +++ b/src/connector/nodejs/package.json @@ -1,6 +1,6 @@ { "name": "td2.0-connector", - "version": "2.0.9", + "version": "2.0.10", "description": "A Node.js connector for TDengine.", "main": "tdengine.js", "directories": { From ea16e9c73c1ed0e8038bbc6042e46e0d760602b7 Mon Sep 17 00:00:00 2001 From: Ping Xiao Date: Thu, 12 Aug 2021 17:11:29 +0800 Subject: [PATCH 104/185] [TD-5835]: add test case for daily performance test --- tests/perftest-scripts/perftest-query.sh | 7 +++++++ tests/pytest/tools/taosdemoPerformance.py | 3 +-- 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/tests/perftest-scripts/perftest-query.sh b/tests/perftest-scripts/perftest-query.sh index 5b2c860122..d4853c0825 100755 --- a/tests/perftest-scripts/perftest-query.sh +++ b/tests/perftest-scripts/perftest-query.sh @@ -101,7 +101,14 @@ function runQueryPerfTest { python3 insert/insertFromCSVPerformance.py -c $LOCAL_COMMIT -b $branch -T $type | tee -a $PERFORMANCE_TEST_REPORT + echo "=========== taosdemo performance: 4 int columns, 10000 tables, 100000 recoreds per table ===========" | tee -a $PERFORMANCE_TEST_REPORT python3 tools/taosdemoPerformance.py -c $LOCAL_COMMIT -b $branch -T $type | tee -a $PERFORMANCE_TEST_REPORT + + echo "=========== taosdemo performance: 400 int columns, 400 double columns, 200 binary(128) columns, 10000 tables, 1000 recoreds per table ===========" | tee -a $PERFORMANCE_TEST_REPORT + python3 tools/taosdemoPerformance.py -c $LOCAL_COMMIT -b $branch -T $type -i 400 -D 400 -B 200 -t 10000 -r 1000 | tee -a $PERFORMANCE_TEST_REPORT + + echo "=========== taosdemo performance: 1900 int columns, 1900 double columns, 200 binary(128) columns, 10000 tables, 1000 recoreds per table ===========" | tee -a $PERFORMANCE_TEST_REPORT + python3 tools/taosdemoPerformance.py -c $LOCAL_COMMIT -b $branch -T $type -i 1900 -D 1900 -B 200 -t 10000 -r 1000 | tee -a $PERFORMANCE_TEST_REPORT } diff --git a/tests/pytest/tools/taosdemoPerformance.py b/tests/pytest/tools/taosdemoPerformance.py index c28f94b3db..4a5abd49d8 100644 --- a/tests/pytest/tools/taosdemoPerformance.py +++ b/tests/pytest/tools/taosdemoPerformance.py @@ -63,7 +63,7 @@ class taosdemoPerformace: "batch_create_tbl_num": 10, "insert_mode": "taosc", "insert_rows": self.numOfRows, - "interlace_rows": 100, + "interlace_rows": 0, "max_sql_len": 1024000, "disorder_ratio": 0, "disorder_range": 1000, @@ -172,7 +172,6 @@ class taosdemoPerformace: cursor.execute("create database if not exists %s" % self.dbName) cursor.execute("use %s" % self.dbName) cursor.execute("create table if not exists taosdemo_perf (ts timestamp, create_table_time float, insert_records_time float, records_per_second float, commit_id binary(50), avg_delay float, max_delay float, min_delay float, branch binary(50), type binary(20), numoftables int, numofrows int, numofint int, numofdouble int, numofbinary int)") - print("==================== taosdemo performance ====================") print("create tables time: %f" % float(self.createTableTime)) print("insert records time: %f" % float(self.insertRecordsTime)) print("records per second: %f" % float(self.recordsPerSecond)) From 7e9be7c5635d7c8d728b8796f609bdfab4b7309a Mon Sep 17 00:00:00 2001 From: tickduan <417921451@qq.com> Date: Thu, 12 Aug 2021 17:33:45 +0800 Subject: [PATCH 105/185] add version to trigger pr --- src/kit/taospack/taospack.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/kit/taospack/taospack.c b/src/kit/taospack/taospack.c index ad188c3010..aa0bf4d485 100644 --- a/src/kit/taospack/taospack.c +++ b/src/kit/taospack/taospack.c @@ -732,7 +732,7 @@ extern char Compressor []; // ----------------- main ---------------------- // int main(int argc, char *argv[]) { - printf("welcome to use taospack tools v1.5\n"); + printf("welcome to use taospack tools v1.6\n"); //printf(" sizeof(int)=%d\n", (int)sizeof(int)); //printf(" sizeof(long)=%d\n", (int)sizeof(long)); From 1b7861f8fd30b3171e9ce1781aeab82e2eaabee5 Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Thu, 12 Aug 2021 18:06:51 +0800 Subject: [PATCH 106/185] [TD-6013]: taosdemo buffer overflow. (#7319) --- src/kit/taosdemo/taosdemo.c | 49 +++++++++++++++++++++++-------------- 1 file changed, 31 insertions(+), 18 deletions(-) diff --git a/src/kit/taosdemo/taosdemo.c b/src/kit/taosdemo/taosdemo.c index c895545b81..c255ea6841 100644 --- a/src/kit/taosdemo/taosdemo.c +++ b/src/kit/taosdemo/taosdemo.c @@ -5101,21 +5101,27 @@ static int64_t generateStbRowData( int64_t dataLen = 0; char *pstr = recBuf; int64_t maxLen = MAX_DATA_SIZE; + int tmpLen; dataLen += snprintf(pstr + dataLen, maxLen - dataLen, "(%" PRId64 ",", timestamp); for (int i = 0; i < stbInfo->columnCount; i++) { if ((0 == strncasecmp(stbInfo->columns[i].dataType, - "BINARY", strlen("BINARY"))) + "BINARY", 6)) || (0 == strncasecmp(stbInfo->columns[i].dataType, - "NCHAR", strlen("NCHAR")))) { + "NCHAR", 5))) { if (stbInfo->columns[i].dataLen > TSDB_MAX_BINARY_LEN) { errorPrint( "binary or nchar length overflow, max size:%u\n", (uint32_t)TSDB_MAX_BINARY_LEN); return -1; } + if ((stbInfo->columns[i].dataLen + 1) > + /* need count 3 extra chars \', \', and , */ + (remainderBufLen - dataLen - 3)) { + return 0; + } char* buf = (char*)calloc(stbInfo->columns[i].dataLen+1, 1); if (NULL == buf) { errorPrint( "calloc failed! size:%d\n", stbInfo->columns[i].dataLen); @@ -5129,19 +5135,20 @@ static int64_t generateStbRowData( char *tmp; if (0 == strncasecmp(stbInfo->columns[i].dataType, - "INT", strlen("INT"))) { + "INT", 3)) { if ((g_args.demo_mode) && (i == 1)) { tmp = demo_voltage_int_str(); } else { tmp = rand_int_str(); } - tstrncpy(pstr + dataLen, tmp, INT_BUFF_LEN); + tmpLen = strlen(tmp); + tstrncpy(pstr + dataLen, tmp, min(tmpLen + 1, INT_BUFF_LEN)); } else if (0 == strncasecmp(stbInfo->columns[i].dataType, - "BIGINT", strlen("BIGINT"))) { + "BIGINT", 6)) { tmp = rand_bigint_str(); tstrncpy(pstr + dataLen, tmp, BIGINT_BUFF_LEN); } else if (0 == strncasecmp(stbInfo->columns[i].dataType, - "FLOAT", strlen("FLOAT"))) { + "FLOAT", 5)) { if (g_args.demo_mode) { if (i == 0) { tmp = demo_current_float_str(); @@ -5151,27 +5158,33 @@ static int64_t generateStbRowData( } else { tmp = rand_float_str(); } - tstrncpy(pstr + dataLen, tmp, FLOAT_BUFF_LEN); + tmpLen = strlen(tmp); + tstrncpy(pstr + dataLen, tmp, min(tmpLen +1, FLOAT_BUFF_LEN)); } else if (0 == strncasecmp(stbInfo->columns[i].dataType, - "DOUBLE", strlen("DOUBLE"))) { + "DOUBLE", 6)) { tmp = rand_double_str(); - tstrncpy(pstr + dataLen, tmp, DOUBLE_BUFF_LEN); + tmpLen = strlen(tmp); + tstrncpy(pstr + dataLen, tmp, min(tmpLen +1, DOUBLE_BUFF_LEN)); } else if (0 == strncasecmp(stbInfo->columns[i].dataType, - "SMALLINT", strlen("SMALLINT"))) { + "SMALLINT", 8)) { tmp = rand_smallint_str(); - tstrncpy(pstr + dataLen, tmp, SMALLINT_BUFF_LEN); + tmpLen = strlen(tmp); + tstrncpy(pstr + dataLen, tmp, min(tmpLen + 1, SMALLINT_BUFF_LEN)); } else if (0 == strncasecmp(stbInfo->columns[i].dataType, - "TINYINT", strlen("TINYINT"))) { + "TINYINT", 7)) { tmp = rand_tinyint_str(); - tstrncpy(pstr + dataLen, tmp, TINYINT_BUFF_LEN); + tmpLen = strlen(tmp); + tstrncpy(pstr + dataLen, tmp, min(tmpLen +1, TINYINT_BUFF_LEN)); } else if (0 == strncasecmp(stbInfo->columns[i].dataType, - "BOOL", strlen("BOOL"))) { + "BOOL", 4)) { tmp = rand_bool_str(); - tstrncpy(pstr + dataLen, tmp, BOOL_BUFF_LEN); + tmpLen = strlen(tmp); + tstrncpy(pstr + dataLen, tmp, min(tmpLen +1, BOOL_BUFF_LEN)); } else if (0 == strncasecmp(stbInfo->columns[i].dataType, - "TIMESTAMP", strlen("TIMESTAMP"))) { + "TIMESTAMP", 9)) { tmp = rand_int_str(); - tstrncpy(pstr + dataLen, tmp, INT_BUFF_LEN); + tmpLen = strlen(tmp); + tstrncpy(pstr + dataLen, tmp, min(tmpLen +1, INT_BUFF_LEN)); } else { errorPrint( "Not support data type: %s\n", stbInfo->columns[i].dataType); return -1; @@ -5182,7 +5195,7 @@ static int64_t generateStbRowData( dataLen += 1; } - if (dataLen > (remainderBufLen - (DOUBLE_BUFF_LEN + 1))) + if (dataLen > (remainderBufLen - (128))) return 0; } From b61eaa10402dfb6a966ac63647914f04ec5ee7ec Mon Sep 17 00:00:00 2001 From: wpan Date: Fri, 13 Aug 2021 09:34:39 +0800 Subject: [PATCH 107/185] fix stddev query condition column issue and add test case --- src/client/src/tscSubquery.c | 4 ++++ tests/script/general/parser/function.sim | 22 ++++++++++++++++++---- 2 files changed, 22 insertions(+), 4 deletions(-) diff --git a/src/client/src/tscSubquery.c b/src/client/src/tscSubquery.c index 2e7e02cfd5..293fe18c77 100644 --- a/src/client/src/tscSubquery.c +++ b/src/client/src/tscSubquery.c @@ -2397,6 +2397,10 @@ int32_t tscHandleFirstRoundStableQuery(SSqlObj *pSql) { } else { SSchema ss = {.type = (uint8_t)pCol->info.type, .bytes = pCol->info.bytes, .colId = (int16_t)pCol->columnIndex}; tscColumnListInsert(pNewQueryInfo->colList, pCol->columnIndex, pCol->tableUid, &ss); + int32_t ti = tscColumnExists(pNewQueryInfo->colList, pCol->columnIndex, pCol->tableUid); + assert(ti >= 0); + SColumn* x = taosArrayGetP(pNewQueryInfo->colList, ti); + tscColumnCopy(x, pCol); } } } diff --git a/tests/script/general/parser/function.sim b/tests/script/general/parser/function.sim index 0c93fe919a..e9e907f97a 100644 --- a/tests/script/general/parser/function.sim +++ b/tests/script/general/parser/function.sim @@ -1149,9 +1149,11 @@ endi sql select derivative(test_column_alias_name, 1s, 0) from (select avg(k) test_column_alias_name from t1 interval(1s)); -sql create table smeters (ts timestamp, current float, voltage int); -sql insert into smeters values ('2021-08-08 10:10:10', 10, 1); -sql insert into smeters values ('2021-08-08 10:10:12', 10, 2); +sql create table smeters (ts timestamp, current float, voltage int) tags (t1 int); +sql create table smeter1 using smeters tags (1); +sql insert into smeter1 values ('2021-08-08 10:10:10', 10, 2); +sql insert into smeter1 values ('2021-08-08 10:10:12', 10, 2); +sql insert into smeter1 values ('2021-08-08 10:10:14', 20, 1); sql select stddev(voltage) from smeters where ts>='2021-08-08 10:10:10.000' and ts < '2021-08-08 10:10:20.000' and current=10 interval(1000a); if $rows != 2 then @@ -1160,9 +1162,21 @@ endi if $data00 != @21-08-08 10:10:10.000@ then return -1 endi +if $data01 != 0.000000000 then + return -1 +endi if $data10 != @21-08-08 10:10:12.000@ then return -1 endi +if $data11 != 0.000000000 then + return -1 +endi - +sql select stddev(voltage) from smeters where ts>='2021-08-08 10:10:10.000' and ts < '2021-08-08 10:10:20.000' and current=10; +if $rows != 1 then + return -1 +endi +if $data00 != 0.000000000 then + return -1 +endi From 356018a99057a3deedfd3d4b366ce1f4e0a61e87 Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Fri, 13 Aug 2021 09:38:01 +0800 Subject: [PATCH 108/185] Hotfix/sangshuduo/td 5844 cmdline parameters align for master (#7337) * [TD-5844]: make cmd line parameter similar. * fix test case align with taosdemo change. * fix windows stack overflow issue. * fix mac compile error. * fix taosdemo cmdline parameter in tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoTestSupportNanoInsert.py --- src/kit/taosdemo/taosdemo.c | 125 ++++++++++++------ .../taosdemoTestSupportNanoInsert.py | 73 +++++----- .../taosdemoTestSupportNanoInsert.py | 2 +- tests/pytest/tools/taosdemoTestInterlace.py | 2 +- 4 files changed, 126 insertions(+), 76 deletions(-) diff --git a/src/kit/taosdemo/taosdemo.c b/src/kit/taosdemo/taosdemo.c index bd0feaeb92..aad2fe95fa 100644 --- a/src/kit/taosdemo/taosdemo.c +++ b/src/kit/taosdemo/taosdemo.c @@ -77,7 +77,7 @@ extern char configDir[]; #define COL_BUFFER_LEN ((TSDB_COL_NAME_LEN + 15) * TSDB_MAX_COLUMNS) #define MAX_USERNAME_SIZE 64 -#define MAX_PASSWORD_SIZE 64 +#define MAX_PASSWORD_SIZE 16 #define MAX_HOSTNAME_SIZE 253 // https://man7.org/linux/man-pages/man7/hostname.7.html #define MAX_TB_NAME_SIZE 64 #define MAX_DATA_SIZE (16*TSDB_MAX_COLUMNS)+20 // max record len: 16*MAX_COLUMNS, timestamp string and ,('') need extra space @@ -216,7 +216,7 @@ typedef struct SArguments_S { uint16_t port; uint16_t iface; char * user; - char * password; + char password[MAX_PASSWORD_SIZE]; char * database; int replica; char * tb_prefix; @@ -709,24 +709,24 @@ static void printHelp() { printf("%s%s%s%s\n", indent, "-u", indent, "The TDengine user name to use when connecting to the server. Default is 'root'."); #ifdef _TD_POWER_ - printf("%s%s%s%s\n", indent, "-P", indent, + printf("%s%s%s%s\n", indent, "-p", indent, "The password to use when connecting to the server. Default is 'powerdb'."); printf("%s%s%s%s\n", indent, "-c", indent, "Configuration directory. Default is '/etc/power/'."); #elif (_TD_TQ_ == true) - printf("%s%s%s%s\n", indent, "-P", indent, + printf("%s%s%s%s\n", indent, "-p", indent, "The password to use when connecting to the server. Default is 'tqueue'."); printf("%s%s%s%s\n", indent, "-c", indent, "Configuration directory. Default is '/etc/tq/'."); #else - printf("%s%s%s%s\n", indent, "-P", indent, + printf("%s%s%s%s\n", indent, "-p", indent, "The password to use when connecting to the server. Default is 'taosdata'."); printf("%s%s%s%s\n", indent, "-c", indent, "Configuration directory. Default is '/etc/taos/'."); #endif printf("%s%s%s%s\n", indent, "-h", indent, "The host to connect to TDengine. Default is localhost."); - printf("%s%s%s%s\n", indent, "-p", indent, + printf("%s%s%s%s\n", indent, "-P", indent, "The TCP/IP port number to use for the connection. Default is 0."); printf("%s%s%s%s\n", indent, "-I", indent, #if STMT_IFACE_ENABLED == 1 @@ -826,11 +826,11 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) { exit(EXIT_FAILURE); } arguments->host = argv[++i]; - } else if (strcmp(argv[i], "-p") == 0) { + } else if (strcmp(argv[i], "-P") == 0) { if ((argc == i+1) || (!isStringNumber(argv[i+1]))) { printHelp(); - errorPrint("%s", "\n\t-p need a number following!\n"); + errorPrint("%s", "\n\t-P need a number following!\n"); exit(EXIT_FAILURE); } arguments->port = atoi(argv[++i]); @@ -860,13 +860,13 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) { exit(EXIT_FAILURE); } arguments->user = argv[++i]; - } else if (strcmp(argv[i], "-P") == 0) { - if (argc == i+1) { - printHelp(); - errorPrint("%s", "\n\t-P need a valid string following!\n"); - exit(EXIT_FAILURE); + } else if (strncmp(argv[i], "-p", 2) == 0) { + if (strlen(argv[i]) == 2) { + printf("Enter password:"); + scanf("%s", arguments->password); + } else { + tstrncpy(arguments->password, (char *)(argv[i] + 2), MAX_PASSWORD_SIZE); } - arguments->password = argv[++i]; } else if (strcmp(argv[i], "-o") == 0) { if (argc == i+1) { printHelp(); @@ -1065,7 +1065,7 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) { arguments->debug_print = true; } else if (strcmp(argv[i], "-gg") == 0) { arguments->verbose_print = true; - } else if (strcmp(argv[i], "-pp") == 0) { + } else if (strcmp(argv[i], "-PP") == 0) { arguments->performance_print = true; } else if (strcmp(argv[i], "-O") == 0) { if ((argc == i+1) || @@ -2319,15 +2319,15 @@ static void printfDbInfoForQueryToFile( } static void printfQuerySystemInfo(TAOS * taos) { - char filename[BUFFER_SIZE+1] = {0}; - char buffer[BUFFER_SIZE+1] = {0}; + char filename[MAX_FILE_NAME_LEN] = {0}; + char buffer[1024] = {0}; TAOS_RES* res; time_t t; struct tm* lt; time(&t); lt = localtime(&t); - snprintf(filename, BUFFER_SIZE, "querySystemInfo-%d-%d-%d %d:%d:%d", + snprintf(filename, MAX_FILE_NAME_LEN, "querySystemInfo-%d-%d-%d %d:%d:%d", lt->tm_year+1900, lt->tm_mon, lt->tm_mday, lt->tm_hour, lt->tm_min, lt->tm_sec); @@ -2359,12 +2359,12 @@ static void printfQuerySystemInfo(TAOS * taos) { printfDbInfoForQueryToFile(filename, dbInfos[i], i); // show db.vgroups - snprintf(buffer, BUFFER_SIZE, "show %s.vgroups;", dbInfos[i]->name); + snprintf(buffer, 1024, "show %s.vgroups;", dbInfos[i]->name); res = taos_query(taos, buffer); xDumpResultToFile(filename, res); // show db.stables - snprintf(buffer, BUFFER_SIZE, "show %s.stables;", dbInfos[i]->name); + snprintf(buffer, 1024, "show %s.stables;", dbInfos[i]->name); res = taos_query(taos, buffer); xDumpResultToFile(filename, res); free(dbInfos[i]); @@ -2713,7 +2713,7 @@ static int getChildNameOfSuperTableWithLimitAndOffset(TAOS * taos, char* dbName, char* sTblName, char** childTblNameOfSuperTbl, int64_t* childTblCountOfSuperTbl, int64_t limit, uint64_t offset) { - char command[BUFFER_SIZE] = "\0"; + char command[1024] = "\0"; char limitBuf[100] = "\0"; TAOS_RES * res; @@ -2727,7 +2727,7 @@ static int getChildNameOfSuperTableWithLimitAndOffset(TAOS * taos, } //get all child table name use cmd: select tbname from superTblName; - snprintf(command, BUFFER_SIZE, "select tbname from %s.%s %s", + snprintf(command, 1024, "select tbname from %s.%s %s", dbName, sTblName, limitBuf); res = taos_query(taos, command); @@ -2805,13 +2805,13 @@ static int getAllChildNameOfSuperTable(TAOS * taos, char* dbName, static int getSuperTableFromServer(TAOS * taos, char* dbName, SSuperTable* superTbls) { - char command[BUFFER_SIZE] = "\0"; + char command[1024] = "\0"; TAOS_RES * res; TAOS_ROW row = NULL; int count = 0; //get schema use cmd: describe superTblName; - snprintf(command, BUFFER_SIZE, "describe %s.%s", dbName, superTbls->sTblName); + snprintf(command, 1024, "describe %s.%s", dbName, superTbls->sTblName); res = taos_query(taos, command); int32_t code = taos_errno(res); if (code != 0) { @@ -2891,7 +2891,8 @@ static int createSuperTable( TAOS * taos, char* dbName, SSuperTable* superTbl) { - char command[BUFFER_SIZE] = "\0"; + char *command = calloc(1, BUFFER_SIZE); + assert(command); char cols[COL_BUFFER_LEN] = "\0"; int colIndex; @@ -2902,6 +2903,7 @@ static int createSuperTable( if (superTbl->columnCount == 0) { errorPrint("%s() LN%d, super table column count is %d\n", __func__, __LINE__, superTbl->columnCount); + free(command); return -1; } @@ -2964,6 +2966,7 @@ static int createSuperTable( taos_close(taos); errorPrint("%s() LN%d, config error data type : %s\n", __func__, __LINE__, dataType); + free(command); exit(-1); } } @@ -2976,6 +2979,7 @@ static int createSuperTable( errorPrint("%s() LN%d, Failed when calloc, size:%d", __func__, __LINE__, len+1); taos_close(taos); + free(command); exit(-1); } @@ -2986,6 +2990,7 @@ static int createSuperTable( if (superTbl->tagCount == 0) { errorPrint("%s() LN%d, super table tag count is %d\n", __func__, __LINE__, superTbl->tagCount); + free(command); return -1; } @@ -3051,6 +3056,7 @@ static int createSuperTable( taos_close(taos); errorPrint("%s() LN%d, config error tag type : %s\n", __func__, __LINE__, dataType); + free(command); exit(-1); } } @@ -3066,13 +3072,16 @@ static int createSuperTable( if (0 != queryDbExec(taos, command, NO_INSERT_TYPE, false)) { errorPrint( "create supertable %s failed!\n\n", superTbl->sTblName); + free(command); return -1; } + debugPrint("create supertable %s success!\n\n", superTbl->sTblName); + free(command); return 0; } -static int createDatabasesAndStables() { +int createDatabasesAndStables(char *command) { TAOS * taos = NULL; int ret = 0; taos = taos_connect(g_Dbs.host, g_Dbs.user, g_Dbs.password, NULL, g_Dbs.port); @@ -3080,8 +3089,7 @@ static int createDatabasesAndStables() { errorPrint( "Failed to connect to TDengine, reason:%s\n", taos_errstr(NULL)); return -1; } - char command[BUFFER_SIZE] = "\0"; - + for (int i = 0; i < g_Dbs.dbCount; i++) { if (g_Dbs.db[i].drop) { sprintf(command, "drop database if exists %s;", g_Dbs.db[i].dbName); @@ -7145,7 +7153,8 @@ static void startMultiThreadInsertData(int threads, char* db_name, exit(-1); } - char buffer[BUFFER_SIZE]; + char *buffer = calloc(1, BUFFER_SIZE); + assert(buffer); char *pstr = buffer; if ((superTblInfo) @@ -7174,8 +7183,11 @@ static void startMultiThreadInsertData(int threads, char* db_name, ret, taos_stmt_errstr(pThreadInfo->stmt)); free(pids); free(infos); + free(buffer); exit(-1); } + + free(buffer); } #endif } else { @@ -7310,12 +7322,15 @@ static void *readTable(void *sarg) { threadInfo *pThreadInfo = (threadInfo *)sarg; TAOS *taos = pThreadInfo->taos; setThreadName("readTable"); - char command[BUFFER_SIZE] = "\0"; + char *command = calloc(1, BUFFER_SIZE); + assert(command); + uint64_t sTime = pThreadInfo->start_time; char *tb_prefix = pThreadInfo->tb_prefix; FILE *fp = fopen(pThreadInfo->filePath, "a"); if (NULL == fp) { errorPrint( "fopen %s fail, reason:%s.\n", pThreadInfo->filePath, strerror(errno)); + free(command); return NULL; } @@ -7354,6 +7369,7 @@ static void *readTable(void *sarg) { taos_free_result(pSql); taos_close(taos); fclose(fp); + free(command); return NULL; } @@ -7374,6 +7390,7 @@ static void *readTable(void *sarg) { } fprintf(fp, "\n"); fclose(fp); + free(command); #endif return NULL; } @@ -7383,10 +7400,13 @@ static void *readMetric(void *sarg) { threadInfo *pThreadInfo = (threadInfo *)sarg; TAOS *taos = pThreadInfo->taos; setThreadName("readMetric"); - char command[BUFFER_SIZE] = "\0"; + char *command = calloc(1, BUFFER_SIZE); + assert(command); + FILE *fp = fopen(pThreadInfo->filePath, "a"); if (NULL == fp) { printf("fopen %s fail, reason:%s.\n", pThreadInfo->filePath, strerror(errno)); + free(command); return NULL; } @@ -7431,6 +7451,7 @@ static void *readMetric(void *sarg) { taos_free_result(pSql); taos_close(taos); fclose(fp); + free(command); return NULL; } int count = 0; @@ -7448,6 +7469,7 @@ static void *readMetric(void *sarg) { fprintf(fp, "\n"); } fclose(fp); + free(command); #endif return NULL; } @@ -7484,11 +7506,16 @@ static int insertTestProcess() { init_rand_data(); // create database and super tables - if(createDatabasesAndStables() != 0) { + char *cmdBuffer = calloc(1, BUFFER_SIZE); + assert(cmdBuffer); + + if(createDatabasesAndStables(cmdBuffer) != 0) { if (g_fpOfInsertResult) fclose(g_fpOfInsertResult); + free(cmdBuffer); return -1; } + free(cmdBuffer); // pretreatement if (prepareSampleData() != 0) { @@ -7657,7 +7684,9 @@ static void replaceChildTblName(char* inSql, char* outSql, int tblIndex) { } static void *superTableQuery(void *sarg) { - char sqlstr[BUFFER_SIZE]; + char *sqlstr = calloc(1, BUFFER_SIZE); + assert(sqlstr); + threadInfo *pThreadInfo = (threadInfo *)sarg; setThreadName("superTableQuery"); @@ -7672,6 +7701,7 @@ static void *superTableQuery(void *sarg) { if (taos == NULL) { errorPrint("[%d] Failed to connect to TDengine, reason:%s\n", pThreadInfo->threadID, taos_errstr(NULL)); + free(sqlstr); return NULL; } else { pThreadInfo->taos = taos; @@ -7696,7 +7726,7 @@ static void *superTableQuery(void *sarg) { st = taosGetTimestampMs(); for (int i = pThreadInfo->start_table_from; i <= pThreadInfo->end_table_to; i++) { for (int j = 0; j < g_queryInfo.superQueryInfo.sqlCount; j++) { - memset(sqlstr,0,sizeof(sqlstr)); + memset(sqlstr, 0, BUFFER_SIZE); replaceChildTblName(g_queryInfo.superQueryInfo.sql[j], sqlstr, i); if (g_queryInfo.superQueryInfo.result[j][0] != '\0') { sprintf(pThreadInfo->filePath, "%s-%d", @@ -7727,6 +7757,7 @@ static void *superTableQuery(void *sarg) { (double)(et - st)/1000.0); } + free(sqlstr); return NULL; } @@ -7960,7 +7991,9 @@ static TAOS_SUB* subscribeImpl( static void *superSubscribe(void *sarg) { threadInfo *pThreadInfo = (threadInfo *)sarg; - char subSqlstr[BUFFER_SIZE]; + char *subSqlStr = calloc(1, BUFFER_SIZE); + assert(subSqlStr); + TAOS_SUB* tsub[MAX_QUERY_SQL_COUNT] = {0}; uint64_t tsubSeq; @@ -7969,6 +8002,7 @@ static void *superSubscribe(void *sarg) { if (pThreadInfo->ntables > MAX_QUERY_SQL_COUNT) { errorPrint("The table number(%"PRId64") of the thread is more than max query sql count: %d\n", pThreadInfo->ntables, MAX_QUERY_SQL_COUNT); + free(subSqlStr); exit(-1); } @@ -7981,6 +8015,7 @@ static void *superSubscribe(void *sarg) { if (pThreadInfo->taos == NULL) { errorPrint("[%d] Failed to connect to TDengine, reason:%s\n", pThreadInfo->threadID, taos_errstr(NULL)); + free(subSqlStr); return NULL; } } @@ -7991,6 +8026,7 @@ static void *superSubscribe(void *sarg) { taos_close(pThreadInfo->taos); errorPrint( "use database %s failed!\n\n", g_queryInfo.dbName); + free(subSqlStr); return NULL; } @@ -8005,25 +8041,26 @@ static void *superSubscribe(void *sarg) { pThreadInfo->end_table_to, i); sprintf(topic, "taosdemo-subscribe-%"PRIu64"-%"PRIu64"", i, pThreadInfo->querySeq); - memset(subSqlstr, 0, sizeof(subSqlstr)); + memset(subSqlStr, 0, BUFFER_SIZE); replaceChildTblName( g_queryInfo.superQueryInfo.sql[pThreadInfo->querySeq], - subSqlstr, i); + subSqlStr, i); if (g_queryInfo.superQueryInfo.result[pThreadInfo->querySeq][0] != 0) { sprintf(pThreadInfo->filePath, "%s-%d", g_queryInfo.superQueryInfo.result[pThreadInfo->querySeq], pThreadInfo->threadID); } - verbosePrint("%s() LN%d, [%d] subSqlstr: %s\n", - __func__, __LINE__, pThreadInfo->threadID, subSqlstr); + verbosePrint("%s() LN%d, [%d] subSqlStr: %s\n", + __func__, __LINE__, pThreadInfo->threadID, subSqlStr); tsub[tsubSeq] = subscribeImpl( STABLE_CLASS, - pThreadInfo, subSqlstr, topic, + pThreadInfo, subSqlStr, topic, g_queryInfo.superQueryInfo.subscribeRestart, g_queryInfo.superQueryInfo.subscribeInterval); if (NULL == tsub[tsubSeq]) { taos_close(pThreadInfo->taos); + free(subSqlStr); return NULL; } } @@ -8080,12 +8117,13 @@ static void *superSubscribe(void *sarg) { consumed[tsubSeq]= 0; tsub[tsubSeq] = subscribeImpl( STABLE_CLASS, - pThreadInfo, subSqlstr, topic, + pThreadInfo, subSqlStr, topic, g_queryInfo.superQueryInfo.subscribeRestart, g_queryInfo.superQueryInfo.subscribeInterval ); if (NULL == tsub[tsubSeq]) { taos_close(pThreadInfo->taos); + free(subSqlStr); return NULL; } } @@ -8105,6 +8143,7 @@ static void *superSubscribe(void *sarg) { } taos_close(pThreadInfo->taos); + free(subSqlStr); return NULL; } @@ -8407,9 +8446,7 @@ static void setParaFromArg() { tstrncpy(g_Dbs.user, g_args.user, MAX_USERNAME_SIZE); } - if (g_args.password) { - tstrncpy(g_Dbs.password, g_args.password, MAX_PASSWORD_SIZE); - } + tstrncpy(g_Dbs.password, g_args.password, MAX_PASSWORD_SIZE); if (g_args.port) { g_Dbs.port = g_args.port; diff --git a/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoTestSupportNanoInsert.py b/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoTestSupportNanoInsert.py index 8fcb263125..f069bb8f70 100644 --- a/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoTestSupportNanoInsert.py +++ b/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoTestSupportNanoInsert.py @@ -36,7 +36,7 @@ class TDTestCase: if ("taosd" in files): rootRealPath = os.path.dirname(os.path.realpath(root)) if ("packaging" not in rootRealPath): - buildPath = root[:len(root)-len("/build/bin")] + buildPath = root[:len(root) - len("/build/bin")] break return buildPath @@ -46,14 +46,15 @@ class TDTestCase: tdLog.exit("taosd not found!") else: tdLog.info("taosd found in %s" % buildPath) - binPath = buildPath+ "/build/bin/" + binPath = buildPath + "/build/bin/" - # insert: create one or mutiple tables per sql and insert multiple rows per sql # insert data from a special timestamp # check stable stb0 - os.system("%staosdemo -f tools/taosdemoAllTest/NanoTestCase/taosdemoTestNanoDatabase.json -y " % binPath) + os.system( + "%staosdemo -f tools/taosdemoAllTest/NanoTestCase/taosdemoTestNanoDatabase.json -y " % + binPath) tdSql.execute("use nsdb") tdSql.query("show stables") tdSql.checkData(0, 4, 100) @@ -64,9 +65,9 @@ class TDTestCase: tdSql.query("select count(*) from stb0") tdSql.checkData(0, 0, 10000) tdSql.query("describe stb0") - tdSql.checkDataType(9, 1,"TIMESTAMP") + tdSql.checkDataType(9, 1, "TIMESTAMP") tdSql.query("select last(ts) from stb0") - tdSql.checkData(0, 0,"2021-07-01 00:00:00.990000000") + tdSql.checkData(0, 0, "2021-07-01 00:00:00.990000000") # check stable stb1 which is insert with disord @@ -78,16 +79,18 @@ class TDTestCase: tdSql.checkData(0, 0, 10000) # check c8 is an nano timestamp tdSql.query("describe stb1") - tdSql.checkDataType(9, 1,"TIMESTAMP") + tdSql.checkDataType(9, 1, "TIMESTAMP") # check insert timestamp_step is nano_second tdSql.query("select last(ts) from stb1") - tdSql.checkData(0, 0,"2021-07-01 00:00:00.990000000") - + tdSql.checkData(0, 0, "2021-07-01 00:00:00.990000000") + # insert data from now time # check stable stb0 - os.system("%staosdemo -f tools/taosdemoAllTest/NanoTestCase/taosdemoTestNanoDatabaseNow.json -y " % binPath) - + os.system( + "%staosdemo -f tools/taosdemoAllTest/NanoTestCase/taosdemoTestNanoDatabaseNow.json -y " % + binPath) + tdSql.execute("use nsdb2") tdSql.query("show stables") tdSql.checkData(0, 4, 100) @@ -99,11 +102,14 @@ class TDTestCase: tdSql.checkData(0, 0, 10000) # check c8 is an nano timestamp tdSql.query("describe stb0") - tdSql.checkDataType(9,1,"TIMESTAMP") + tdSql.checkDataType(9, 1, "TIMESTAMP") - # insert by csv files and timetamp is long int , strings in ts and cols - - os.system("%staosdemo -f tools/taosdemoAllTest/NanoTestCase/taosdemoTestNanoDatabasecsv.json -y " % binPath) + # insert by csv files and timetamp is long int , strings in ts and + # cols + + os.system( + "%staosdemo -f tools/taosdemoAllTest/NanoTestCase/taosdemoTestNanoDatabasecsv.json -y " % + binPath) tdSql.execute("use nsdbcsv") tdSql.query("show stables") tdSql.checkData(0, 4, 100) @@ -111,29 +117,37 @@ class TDTestCase: tdSql.checkData(0, 0, 10000) tdSql.query("describe stb0") tdSql.checkDataType(3, 1, "TIMESTAMP") - tdSql.query("select count(*) from stb0 where ts > \"2021-07-01 00:00:00.490000000\"") + tdSql.query( + "select count(*) from stb0 where ts > \"2021-07-01 00:00:00.490000000\"") tdSql.checkData(0, 0, 5000) tdSql.query("select count(*) from stb0 where ts < 1626918583000000000") tdSql.checkData(0, 0, 10000) - - os.system("rm -rf ./insert_res.txt") - os.system("rm -rf tools/taosdemoAllTest/NanoTestCase/taosdemoTestSupportNano*.py.sql") - # taosdemo test insert with command and parameter , detals show taosdemo --help - os.system("%staosdemo -u root -P taosdata -p 6030 -a 1 -m pre -n 10 -T 20 -t 60 -o res.txt -y " % binPath) + os.system("rm -rf ./insert_res.txt") + os.system( + "rm -rf tools/taosdemoAllTest/NanoTestCase/taosdemoTestSupportNano*.py.sql") + + # taosdemo test insert with command and parameter , detals show + # taosdemo --help + os.system( + "%staosdemo -u root -ptaosdata -P 6030 -a 1 -m pre -n 10 -T 20 -t 60 -o res.txt -y " % + binPath) tdSql.query("select count(*) from test.meters") tdSql.checkData(0, 0, 600) # check taosdemo -s - sqls_ls = ['drop database if exists nsdbsql;','create database nsdbsql precision "ns" keep 3600 days 6 update 1;', - 'use nsdbsql;','CREATE STABLE meters (ts timestamp, current float, voltage int, phase float) TAGS (location binary(64), groupdId int);', - 'CREATE TABLE d1001 USING meters TAGS ("Beijing.Chaoyang", 2);', - 'INSERT INTO d1001 USING METERS TAGS ("Beijng.Chaoyang", 2) VALUES (now, 10.2, 219, 0.32);', - 'INSERT INTO d1001 USING METERS TAGS ("Beijng.Chaoyang", 2) VALUES (now, 85, 32, 0.76);'] + sqls_ls = [ + 'drop database if exists nsdbsql;', + 'create database nsdbsql precision "ns" keep 3600 days 6 update 1;', + 'use nsdbsql;', + 'CREATE STABLE meters (ts timestamp, current float, voltage int, phase float) TAGS (location binary(64), groupdId int);', + 'CREATE TABLE d1001 USING meters TAGS ("Beijing.Chaoyang", 2);', + 'INSERT INTO d1001 USING METERS TAGS ("Beijng.Chaoyang", 2) VALUES (now, 10.2, 219, 0.32);', + 'INSERT INTO d1001 USING METERS TAGS ("Beijng.Chaoyang", 2) VALUES (now, 85, 32, 0.76);'] - with open("./taosdemoTestNanoCreateDB.sql",mode ="a" ) as sql_files: + with open("./taosdemoTestNanoCreateDB.sql", mode="a") as sql_files: for sql in sqls_ls: - sql_files.write(sql+"\n") + sql_files.write(sql + "\n") sql_files.close() sleep(10) @@ -141,11 +155,10 @@ class TDTestCase: os.system("%staosdemo -s taosdemoTestNanoCreateDB.sql -y " % binPath) tdSql.query("select count(*) from nsdbsql.meters") tdSql.checkData(0, 0, 2) - + os.system("rm -rf ./res.txt") os.system("rm -rf ./*.py.sql") os.system("rm -rf ./taosdemoTestNanoCreateDB.sql") - def stop(self): tdSql.close() diff --git a/tests/pytest/tools/taosdemoAllTest/taosdemoTestSupportNanoInsert.py b/tests/pytest/tools/taosdemoAllTest/taosdemoTestSupportNanoInsert.py index 266a8fa712..c3fdff00ec 100644 --- a/tests/pytest/tools/taosdemoAllTest/taosdemoTestSupportNanoInsert.py +++ b/tests/pytest/tools/taosdemoAllTest/taosdemoTestSupportNanoInsert.py @@ -120,7 +120,7 @@ class TDTestCase: os.system("rm -rf tools/taosdemoAllTest/taosdemoTestSupportNano*.py.sql") # taosdemo test insert with command and parameter , detals show taosdemo --help - os.system("%staosdemo -u root -P taosdata -p 6030 -a 1 -m pre -n 10 -T 20 -t 60 -o res.txt -y " % binPath) + os.system("%staosdemo -u root -ptaosdata -P 6030 -a 1 -m pre -n 10 -T 20 -t 60 -o res.txt -y " % binPath) tdSql.query("select count(*) from test.meters") tdSql.checkData(0, 0, 600) # check taosdemo -s diff --git a/tests/pytest/tools/taosdemoTestInterlace.py b/tests/pytest/tools/taosdemoTestInterlace.py index 4c551f327a..30c04729b7 100644 --- a/tests/pytest/tools/taosdemoTestInterlace.py +++ b/tests/pytest/tools/taosdemoTestInterlace.py @@ -49,7 +49,7 @@ class TDTestCase: else: tdLog.info("taosd found in %s" % buildPath) binPath = buildPath + "/build/bin/" - taosdemoCmd = "%staosdemo -f tools/insert-interlace.json -pp 2>&1 | grep sleep | wc -l" % binPath + taosdemoCmd = "%staosdemo -f tools/insert-interlace.json -PP 2>&1 | grep sleep | wc -l" % binPath sleepTimes = subprocess.check_output( taosdemoCmd, shell=True).decode("utf-8") print("sleep times: %d" % int(sleepTimes)) From 0309fd44126e3ff6ce44cd516f6b87163a5b46ef Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Fri, 13 Aug 2021 10:17:47 +0800 Subject: [PATCH 109/185] [td-255] fix compiler error on windows. --- src/query/src/qExtbuffer.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/query/src/qExtbuffer.c b/src/query/src/qExtbuffer.c index 91004fe707..5152d17264 100644 --- a/src/query/src/qExtbuffer.c +++ b/src/query/src/qExtbuffer.c @@ -1113,7 +1113,7 @@ void taoscQSort(void** pCols, SSchema* pSchema, int32_t numOfCols, int32_t numOf for(int32_t i = 0; i < numOfRows; ++i) { char* dest = buf + size * i; - memcpy(dest, pCols[index] + bytes * i, bytes); + memcpy(dest, ((char*) pCols[index]) + bytes * i, bytes); *(int32_t*)(dest+bytes) = i; } @@ -1128,7 +1128,7 @@ void taoscQSort(void** pCols, SSchema* pSchema, int32_t numOfCols, int32_t numOf if (i == index) { for(int32_t j = 0; j < numOfRows; ++j){ char* src = buf + (j * size); - char* dest = pCols[i] + (j * bytes1); + char* dest = ((char*)pCols[i]) + (j * bytes1); memcpy(dest, src, bytes1); } } else { @@ -1144,7 +1144,7 @@ void taoscQSort(void** pCols, SSchema* pSchema, int32_t numOfCols, int32_t numOf memcpy(p, pCols[i], bytes1 * numOfRows); for(int32_t j = 0; j < numOfRows; ++j){ - char* dest = pCols[i] + bytes1 * j; + char* dest = ((char*)pCols[i]) + bytes1 * j; int32_t newPos = *(int32_t*)(buf + (j * size) + bytes); char* src = p + (newPos * bytes1); From 7fc6dc17fec04d4fce2babe002ce865bee0aca89 Mon Sep 17 00:00:00 2001 From: cpwu Date: Fri, 13 Aug 2021 10:27:27 +0800 Subject: [PATCH 110/185] [TD-5933] add case for TD-5933 --- tests/pytest/functions/queryTestCases.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/tests/pytest/functions/queryTestCases.py b/tests/pytest/functions/queryTestCases.py index ae2bbc4a81..8523c6310e 100644 --- a/tests/pytest/functions/queryTestCases.py +++ b/tests/pytest/functions/queryTestCases.py @@ -714,6 +714,12 @@ class TDTestCase: tdSql.execute(f"insert into db.t{i} values ({nowtime-j*10}, {1000-j}, {round(random.random()*j,3)})") tdSql.execute(f"insert into db.t{i} (ts) values ({nowtime-10000}) ") + ########### TD-5933 verify the bug of "function stddev with interval return 0 rows" is fixed ########## + stddevAndIntervalSql=f"select last(*) from t0 where ts>={nowtime-10000} interval(10a) fill(next) limit 10" + tdSql.query(stddevAndIntervalSql) + tdSql.checkRows(10) + + ########## TD-5978 verify the bug of "when start row is null, result by fill(next) is 0 " is fixed ########## fillsql=f"select last(*) from t0 where ts>={nowtime-10000} and ts<{nowtime} interval(10a) fill(next) limit 10" tdSql.query(fillsql) fillResult=False @@ -724,7 +730,7 @@ class TDTestCase: else: tdLog.exit("fill(next) is wrong") - + pass def run(self): From ecea1ce747a00ef859e3ec3241eb803358dac1a8 Mon Sep 17 00:00:00 2001 From: tickduan <417921451@qq.com> Date: Fri, 13 Aug 2021 11:17:55 +0800 Subject: [PATCH 111/185] fixed realloc can cause memory leak --- deps/MsvcLibX/src/iconv.c | 6 ++++-- deps/MsvcLibX/src/main.c | 10 +++++++--- deps/MsvcLibX/src/realpath.c | 16 ++++++++++++---- src/balance/src/bnScore.c | 3 ++- src/client/src/tscPrepare.c | 5 +++-- src/client/src/tscSql.c | 4 +++- src/client/src/tscUtil.c | 12 +++++++++--- src/common/inc/tdataformat.h | 10 ++++++---- src/common/src/tdataformat.c | 5 +++-- src/kit/shell/src/shellCheck.c | 5 +++-- src/kit/taospack/taospack.c | 5 ++++- src/mnode/src/mnodeTable.c | 5 +++-- src/os/src/detail/osMemory.c | 5 +++-- src/os/src/windows/wGetline.c | 6 ++++-- src/query/src/qTsbuf.c | 7 +++++-- 15 files changed, 71 insertions(+), 33 deletions(-) diff --git a/deps/MsvcLibX/src/iconv.c b/deps/MsvcLibX/src/iconv.c index 40b6e6462d..1ec0dc7354 100644 --- a/deps/MsvcLibX/src/iconv.c +++ b/deps/MsvcLibX/src/iconv.c @@ -98,6 +98,7 @@ int ConvertString(char *buf, size_t nBytes, UINT cpFrom, UINT cpTo, LPCSTR lpDef char *DupAndConvert(const char *string, UINT cpFrom, UINT cpTo, LPCSTR lpDefaultChar) { int nBytes; char *pBuf; + char *pBuf1; nBytes = 4 * ((int)lstrlen(string) + 1); /* Worst case for the size needed */ pBuf = (char *)malloc(nBytes); if (!pBuf) { @@ -110,8 +111,9 @@ char *DupAndConvert(const char *string, UINT cpFrom, UINT cpTo, LPCSTR lpDefault free(pBuf); return NULL; } - pBuf = realloc(pBuf, nBytes+1); - return pBuf; + pBuf1 = realloc(pBuf, nBytes+1); + if(pBuf1 == NULL && pBuf != NULL) free(pBuf); + return pBuf1; } int CountCharacters(const char *string, UINT cp) { diff --git a/deps/MsvcLibX/src/main.c b/deps/MsvcLibX/src/main.c index f366b081ad..9b9b4a46af 100644 --- a/deps/MsvcLibX/src/main.c +++ b/deps/MsvcLibX/src/main.c @@ -68,6 +68,7 @@ int BreakArgLine(LPSTR pszCmdLine, char ***pppszArg) { int iString = FALSE; /* TRUE = string mode; FALSE = non-string mode */ int nBackslash = 0; char **ppszArg; + char **ppszArg1; int iArg = FALSE; /* TRUE = inside an argument; FALSE = between arguments */ ppszArg = (char **)malloc((argc+1)*sizeof(char *)); @@ -89,7 +90,10 @@ int BreakArgLine(LPSTR pszCmdLine, char ***pppszArg) { if ((!iArg) && (c != ' ') && (c != '\t')) { /* Beginning of a new argument */ iArg = TRUE; ppszArg[argc++] = pszCopy+j; - ppszArg = (char **)realloc(ppszArg, (argc+1)*sizeof(char *)); + ppszArg1 = (char **)realloc(ppszArg, (argc+1)*sizeof(char *)); + if(ppszArg1 == NULL && ppszArg != NULL) + free(ppszArg); + ppszArg = ppszArg1; if (!ppszArg) return -1; pszCopy[j] = c0 = '\0'; } @@ -212,14 +216,14 @@ int _initU(void) { fprintf(stderr, "Warning: Can't convert the argument line to UTF-8\n"); _acmdln[0] = '\0'; } - realloc(_acmdln, n+1); /* Resize the memory block to fit the UTF-8 line */ + //realloc(_acmdln, n+1); /* Resize the memory block to fit the UTF-8 line */ /* Should not fail since we make it smaller */ /* Record the console code page, to allow converting the output accordingly */ codePage = GetConsoleOutputCP(); return 0; -} +}ß #endif /* defined(_WIN32) */ diff --git a/deps/MsvcLibX/src/realpath.c b/deps/MsvcLibX/src/realpath.c index 5fbcf773a2..ec1b606bf6 100644 --- a/deps/MsvcLibX/src/realpath.c +++ b/deps/MsvcLibX/src/realpath.c @@ -196,6 +196,7 @@ not_compact_enough: /* Normally defined in stdlib.h. Output buf must contain PATH_MAX bytes */ char *realpath(const char *path, char *outbuf) { char *pOutbuf = outbuf; + char *pOutbuf1; int iErr; const char *pc; @@ -242,8 +243,11 @@ realpath_failed: return NULL; } - if (!outbuf) pOutbuf = realloc(pOutbuf, strlen(pOutbuf) + 1); - return pOutbuf; + if (!outbuf) { + pOutbuf1 = realloc(pOutbuf, strlen(pOutbuf) + 1); + if(pOutbuf1 == NULL && pOutbuf) free(pOutbuf); + } + return pOutbuf1; } #endif @@ -517,6 +521,7 @@ int ResolveLinksA(const char *path, char *buf, size_t bufsize) { /* Normally defined in stdlib.h. Output buf must contain PATH_MAX bytes */ char *realpathU(const char *path, char *outbuf) { char *pOutbuf = outbuf; + char *pOutbuf1; char *pPath1 = NULL; char *pPath2 = NULL; int iErr; @@ -590,10 +595,13 @@ realpathU_failed: } DEBUG_LEAVE(("return 0x%p; // \"%s\"\n", pOutbuf, pOutbuf)); - if (!outbuf) pOutbuf = realloc(pOutbuf, strlen(pOutbuf) + 1); + if (!outbuf) { + pOutbuf1 = realloc(pOutbuf, strlen(pOutbuf) + 1); + if(pOutbuf1 == NULL && pOutbuf) free(pOutbuf); + } free(pPath1); free(pPath2); - return pOutbuf; + return pOutbuf1; } #endif /* defined(_WIN32) */ diff --git a/src/balance/src/bnScore.c b/src/balance/src/bnScore.c index 7d94df1c23..a2b9ba8e09 100644 --- a/src/balance/src/bnScore.c +++ b/src/balance/src/bnScore.c @@ -117,7 +117,8 @@ void bnCleanupDnodes() { static void bnCheckDnodesSize(int32_t dnodesNum) { if (tsBnDnodes.maxSize <= dnodesNum) { tsBnDnodes.maxSize = dnodesNum * 2; - tsBnDnodes.list = realloc(tsBnDnodes.list, tsBnDnodes.maxSize * sizeof(SDnodeObj *)); + SDnodeObj** list1 = realloc(tsBnDnodes.list, tsBnDnodes.maxSize * sizeof(SDnodeObj *)); + if(list1) tsBnDnodes.list = list1; } } diff --git a/src/client/src/tscPrepare.c b/src/client/src/tscPrepare.c index 40664241c1..06a9505086 100644 --- a/src/client/src/tscPrepare.c +++ b/src/client/src/tscPrepare.c @@ -1527,8 +1527,9 @@ int taos_stmt_prepare(TAOS_STMT* stmt, const char* sql, unsigned long length) { pCmd->insertParam.insertType = TSDB_QUERY_TYPE_STMT_INSERT; pCmd->insertParam.objectId = pSql->self; - pSql->sqlstr = realloc(pSql->sqlstr, sqlLen + 1); - + char* sqlstr = realloc(pSql->sqlstr, sqlLen + 1); + if(sqlstr == NULL && pSql->sqlstr) free(pSql->sqlstr); + pSql->sqlstr = sqlstr; if (pSql->sqlstr == NULL) { tscError("%p failed to malloc sql string buffer", pSql); STMT_RET(TSDB_CODE_TSC_OUT_OF_MEMORY); diff --git a/src/client/src/tscSql.c b/src/client/src/tscSql.c index 6f3d5c3a63..026c65a595 100644 --- a/src/client/src/tscSql.c +++ b/src/client/src/tscSql.c @@ -887,7 +887,9 @@ int taos_validate_sql(TAOS *taos, const char *sql) { return TSDB_CODE_TSC_EXCEED_SQL_LIMIT; } - pSql->sqlstr = realloc(pSql->sqlstr, sqlLen + 1); + char* sqlstr = realloc(pSql->sqlstr, sqlLen + 1); + if(sqlstr == NULL && pSql->sqlstr) free(pSql->sqlstr); + pSql->sqlstr = sqlstr; if (pSql->sqlstr == NULL) { tscError("0x%"PRIx64" failed to malloc sql string buffer", pSql->self); tfree(pSql); diff --git a/src/client/src/tscUtil.c b/src/client/src/tscUtil.c index 3c35795b0d..ca7a5cbf77 100644 --- a/src/client/src/tscUtil.c +++ b/src/client/src/tscUtil.c @@ -625,8 +625,10 @@ static void setResRawPtrImpl(SSqlRes* pRes, SInternalField* pInfo, int32_t i, bo } else if (convertNchar && pInfo->field.type == TSDB_DATA_TYPE_NCHAR) { // convert unicode to native code in a temporary buffer extra one byte for terminated symbol - pRes->buffer[i] = realloc(pRes->buffer[i], pInfo->field.bytes * pRes->numOfRows); - + char* buffer = realloc(pRes->buffer[i], pInfo->field.bytes * pRes->numOfRows); + if(buffer == NULL) + return ; + pRes->buffer[i] = buffer; // string terminated char for binary data memset(pRes->buffer[i], 0, pInfo->field.bytes * pRes->numOfRows); @@ -4364,6 +4366,7 @@ int32_t tscCreateTableMetaFromSTableMeta(STableMeta** ppChild, const char* name, STableMeta* p = NULL; size_t sz = 0; STableMeta* pChild = *ppChild; + STableMeta* pChild1; taosHashGetCloneExt(tscTableMetaMap, pChild->sTableName, strnlen(pChild->sTableName, TSDB_TABLE_FNAME_LEN), NULL, (void **)&p, &sz); @@ -4374,7 +4377,10 @@ int32_t tscCreateTableMetaFromSTableMeta(STableMeta** ppChild, const char* name, int32_t totalBytes = (p->tableInfo.numOfColumns + p->tableInfo.numOfTags) * sizeof(SSchema); int32_t tableMetaSize = sizeof(STableMeta) + totalBytes; if (*tableMetaCapacity < tableMetaSize) { - pChild = realloc(pChild, tableMetaSize); + pChild1 = realloc(pChild, tableMetaSize); + if(pChild1 == NULL) + return -1; + pChild = pChild1; *tableMetaCapacity = (size_t)tableMetaSize; } diff --git a/src/common/inc/tdataformat.h b/src/common/inc/tdataformat.h index 46259c8488..a01c377539 100644 --- a/src/common/inc/tdataformat.h +++ b/src/common/inc/tdataformat.h @@ -547,8 +547,9 @@ SKVRow tdGetKVRowFromBuilder(SKVRowBuilder *pBuilder); static FORCE_INLINE int tdAddColToKVRow(SKVRowBuilder *pBuilder, int16_t colId, int8_t type, void *value) { if (pBuilder->nCols >= pBuilder->tCols) { pBuilder->tCols *= 2; - pBuilder->pColIdx = (SColIdx *)realloc((void *)(pBuilder->pColIdx), sizeof(SColIdx) * pBuilder->tCols); - if (pBuilder->pColIdx == NULL) return -1; + SColIdx* pColIdx = (SColIdx *)realloc((void *)(pBuilder->pColIdx), sizeof(SColIdx) * pBuilder->tCols); + if (pColIdx == NULL) return -1; + pBuilder->pColIdx = pColIdx; } pBuilder->pColIdx[pBuilder->nCols].colId = colId; @@ -561,8 +562,9 @@ static FORCE_INLINE int tdAddColToKVRow(SKVRowBuilder *pBuilder, int16_t colId, while (tlen > pBuilder->alloc - pBuilder->size) { pBuilder->alloc *= 2; } - pBuilder->buf = realloc(pBuilder->buf, pBuilder->alloc); - if (pBuilder->buf == NULL) return -1; + void* buf = realloc(pBuilder->buf, pBuilder->alloc); + if (buf == NULL) return -1; + pBuilder->buf = buf; } memcpy(POINTER_SHIFT(pBuilder->buf, pBuilder->size), value, tlen); diff --git a/src/common/src/tdataformat.c b/src/common/src/tdataformat.c index a3a6c0fed4..181afa225d 100644 --- a/src/common/src/tdataformat.c +++ b/src/common/src/tdataformat.c @@ -138,8 +138,9 @@ int tdAddColToSchema(STSchemaBuilder *pBuilder, int8_t type, int16_t colId, int1 if (pBuilder->nCols >= pBuilder->tCols) { pBuilder->tCols *= 2; - pBuilder->columns = (STColumn *)realloc(pBuilder->columns, sizeof(STColumn) * pBuilder->tCols); - if (pBuilder->columns == NULL) return -1; + STColumn* columns = (STColumn *)realloc(pBuilder->columns, sizeof(STColumn) * pBuilder->tCols); + if (columns == NULL) return -1; + pBuilder->columns = columns; } STColumn *pCol = &(pBuilder->columns[pBuilder->nCols]); diff --git a/src/kit/shell/src/shellCheck.c b/src/kit/shell/src/shellCheck.c index d78f1a6b99..7fc8b1409a 100644 --- a/src/kit/shell/src/shellCheck.c +++ b/src/kit/shell/src/shellCheck.c @@ -72,12 +72,13 @@ static int32_t shellShowTables(TAOS *con, char *db) { int32_t tbIndex = tbNum++; if (tbMallocNum < tbNum) { tbMallocNum = (tbMallocNum * 2 + 1); - tbNames = realloc(tbNames, tbMallocNum * sizeof(char *)); - if (tbNames == NULL) { + char** tbNames1 = realloc(tbNames, tbMallocNum * sizeof(char *)); + if (tbNames1 == NULL) { fprintf(stdout, "failed to malloc tablenames, num:%d\n", tbMallocNum); code = TSDB_CODE_TSC_OUT_OF_MEMORY; break; } + tbNames = tbNames1; } tbNames[tbIndex] = malloc(TSDB_TABLE_NAME_LEN); diff --git a/src/kit/taospack/taospack.c b/src/kit/taospack/taospack.c index 33d779dfcf..1b22c16ee0 100644 --- a/src/kit/taospack/taospack.c +++ b/src/kit/taospack/taospack.c @@ -148,7 +148,10 @@ float* read_float(const char* inFile, int* pcount){ //printf(" buff=%s float=%.50f \n ", buf, floats[fi]); if ( ++fi == malloc_cnt ) { malloc_cnt += 100000; - floats = realloc(floats, malloc_cnt*sizeof(float)); + float* floats1 = realloc(floats, malloc_cnt*sizeof(float)); + if(floats1 == NULL) + break; + floats = floats1; } memset(buf, 0, sizeof(buf)); } diff --git a/src/mnode/src/mnodeTable.c b/src/mnode/src/mnodeTable.c index 0bc114ffdf..a4ecf5d6f3 100644 --- a/src/mnode/src/mnodeTable.c +++ b/src/mnode/src/mnodeTable.c @@ -2921,10 +2921,11 @@ static SMultiTableMeta* ensureMsgBufferSpace(SMultiTableMeta *pMultiMeta, SArray (*totalMallocLen) *= 2; } - pMultiMeta = realloc(pMultiMeta, *totalMallocLen); - if (pMultiMeta == NULL) { + SMultiTableMeta* pMultiMeta1 = realloc(pMultiMeta, *totalMallocLen); + if (pMultiMeta1 == NULL) { return NULL; } + pMultiMeta = pMultiMeta1; } return pMultiMeta; diff --git a/src/os/src/detail/osMemory.c b/src/os/src/detail/osMemory.c index d8194feab4..22954f1523 100644 --- a/src/os/src/detail/osMemory.c +++ b/src/os/src/detail/osMemory.c @@ -504,8 +504,9 @@ void * taosTRealloc(void *ptr, size_t size) { void * tptr = (void *)((char *)ptr - sizeof(size_t)); size_t tsize = size + sizeof(size_t); - tptr = realloc(tptr, tsize); - if (tptr == NULL) return NULL; + void* tptr1 = realloc(tptr, tsize); + if (tptr1 == NULL) return NULL; + tptr = tptr1; *(size_t *)tptr = size; diff --git a/src/os/src/windows/wGetline.c b/src/os/src/windows/wGetline.c index 553aecaf0a..aa45854884 100644 --- a/src/os/src/windows/wGetline.c +++ b/src/os/src/windows/wGetline.c @@ -81,11 +81,13 @@ int32_t getstr(char **lineptr, size_t *n, FILE *stream, char terminator, int32_t *n += MIN_CHUNK; nchars_avail = (int32_t)(*n + *lineptr - read_pos); - *lineptr = realloc(*lineptr, *n); - if (!*lineptr) { + char* lineptr1 = realloc(*lineptr, *n); + if (!lineptr1) { errno = ENOMEM; return -1; } + *lineptr = lineptr1; + read_pos = *n - nchars_avail + *lineptr; assert((*lineptr + *n) == (read_pos + nchars_avail)); } diff --git a/src/query/src/qTsbuf.c b/src/query/src/qTsbuf.c index 825b7960de..4cf05dd2c7 100644 --- a/src/query/src/qTsbuf.c +++ b/src/query/src/qTsbuf.c @@ -223,8 +223,11 @@ static STSGroupBlockInfoEx* addOneGroupInfo(STSBuf* pTSBuf, int32_t id) { static void shrinkBuffer(STSList* ptsData) { // shrink tmp buffer size if it consumes too many memory compared to the pre-defined size if (ptsData->allocSize >= ptsData->threshold * 2) { - ptsData->rawBuf = realloc(ptsData->rawBuf, MEM_BUF_SIZE); - ptsData->allocSize = MEM_BUF_SIZE; + char* rawBuf = realloc(ptsData->rawBuf, MEM_BUF_SIZE); + if(rawBuf) { + ptsData->rawBuf = rawBuf; + ptsData->allocSize = MEM_BUF_SIZE; + } } } From 92652f449186e766e3306dbeba28d0e7d6d52501 Mon Sep 17 00:00:00 2001 From: tickduan <417921451@qq.com> Date: Fri, 13 Aug 2021 11:47:39 +0800 Subject: [PATCH 112/185] bnScore.c modify ok --- src/balance/src/bnScore.c | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/src/balance/src/bnScore.c b/src/balance/src/bnScore.c index a2b9ba8e09..04a14357c9 100644 --- a/src/balance/src/bnScore.c +++ b/src/balance/src/bnScore.c @@ -116,9 +116,17 @@ void bnCleanupDnodes() { static void bnCheckDnodesSize(int32_t dnodesNum) { if (tsBnDnodes.maxSize <= dnodesNum) { - tsBnDnodes.maxSize = dnodesNum * 2; - SDnodeObj** list1 = realloc(tsBnDnodes.list, tsBnDnodes.maxSize * sizeof(SDnodeObj *)); - if(list1) tsBnDnodes.list = list1; + int32_t maxSize = dnodesNum * 2; + SDnodeObj** list1 = NULL; + int32_t retry = 0; + + while(list1 == NULL && retry++ < 3) { + list1 = realloc(tsBnDnodes.list, maxSize * sizeof(SDnodeObj *)); + } + if(list1) { + tsBnDnodes.list = list1; + tsBnDnodes.maxSize = maxSize; + } } } From 490702c941b0ebb29bd6471dce58d587eea37e23 Mon Sep 17 00:00:00 2001 From: tickduan <417921451@qq.com> Date: Fri, 13 Aug 2021 13:26:32 +0800 Subject: [PATCH 113/185] msvclibX main.c error append a charater --- deps/MsvcLibX/src/main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deps/MsvcLibX/src/main.c b/deps/MsvcLibX/src/main.c index 9b9b4a46af..85f4c83f24 100644 --- a/deps/MsvcLibX/src/main.c +++ b/deps/MsvcLibX/src/main.c @@ -223,7 +223,7 @@ int _initU(void) { codePage = GetConsoleOutputCP(); return 0; -}ß +} #endif /* defined(_WIN32) */ From 16c429760665d8486e9a3089617bb9b8de28fd5d Mon Sep 17 00:00:00 2001 From: wu champion Date: Fri, 13 Aug 2021 14:47:54 +0800 Subject: [PATCH 114/185] Update queryTestCases.py --- tests/pytest/functions/queryTestCases.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/pytest/functions/queryTestCases.py b/tests/pytest/functions/queryTestCases.py index 8523c6310e..8cd3ef6b3a 100644 --- a/tests/pytest/functions/queryTestCases.py +++ b/tests/pytest/functions/queryTestCases.py @@ -715,7 +715,7 @@ class TDTestCase: tdSql.execute(f"insert into db.t{i} (ts) values ({nowtime-10000}) ") ########### TD-5933 verify the bug of "function stddev with interval return 0 rows" is fixed ########## - stddevAndIntervalSql=f"select last(*) from t0 where ts>={nowtime-10000} interval(10a) fill(next) limit 10" + stddevAndIntervalSql=f"select last(*) from t0 where ts>={nowtime-10000} interval(10a) limit 10" tdSql.query(stddevAndIntervalSql) tdSql.checkRows(10) From 9867ea1c156ce8a46a1dadf3fcc4ec029a29cf11 Mon Sep 17 00:00:00 2001 From: Ping Xiao Date: Fri, 13 Aug 2021 14:55:56 +0800 Subject: [PATCH 115/185] [ci skip]: reduce data insertion time --- tests/perftest-scripts/perftest-query.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/perftest-scripts/perftest-query.sh b/tests/perftest-scripts/perftest-query.sh index d4853c0825..68b64fd4e0 100755 --- a/tests/perftest-scripts/perftest-query.sh +++ b/tests/perftest-scripts/perftest-query.sh @@ -105,10 +105,10 @@ function runQueryPerfTest { python3 tools/taosdemoPerformance.py -c $LOCAL_COMMIT -b $branch -T $type | tee -a $PERFORMANCE_TEST_REPORT echo "=========== taosdemo performance: 400 int columns, 400 double columns, 200 binary(128) columns, 10000 tables, 1000 recoreds per table ===========" | tee -a $PERFORMANCE_TEST_REPORT - python3 tools/taosdemoPerformance.py -c $LOCAL_COMMIT -b $branch -T $type -i 400 -D 400 -B 200 -t 10000 -r 1000 | tee -a $PERFORMANCE_TEST_REPORT + python3 tools/taosdemoPerformance.py -c $LOCAL_COMMIT -b $branch -T $type -i 400 -D 400 -B 200 -t 10000 -r 100 | tee -a $PERFORMANCE_TEST_REPORT echo "=========== taosdemo performance: 1900 int columns, 1900 double columns, 200 binary(128) columns, 10000 tables, 1000 recoreds per table ===========" | tee -a $PERFORMANCE_TEST_REPORT - python3 tools/taosdemoPerformance.py -c $LOCAL_COMMIT -b $branch -T $type -i 1900 -D 1900 -B 200 -t 10000 -r 1000 | tee -a $PERFORMANCE_TEST_REPORT + python3 tools/taosdemoPerformance.py -c $LOCAL_COMMIT -b $branch -T $type -i 1900 -D 1900 -B 200 -t 10000 -r 100 | tee -a $PERFORMANCE_TEST_REPORT } From 3948b97e85e19288859c857a3a8bbaee542ac4ad Mon Sep 17 00:00:00 2001 From: Cary Xu Date: Fri, 13 Aug 2021 15:14:17 +0800 Subject: [PATCH 116/185] [TD-6044]: WAL compatibility since v2.1.5.0 --- src/inc/taoserror.h | 1 + src/inc/twal.h | 2 +- src/util/src/terror.c | 1 + src/wal/src/walWrite.c | 97 +++++++++++++++++++++++++++++++++++++++--- 4 files changed, 94 insertions(+), 7 deletions(-) diff --git a/src/inc/taoserror.h b/src/inc/taoserror.h index 2214078f55..d7e1592911 100644 --- a/src/inc/taoserror.h +++ b/src/inc/taoserror.h @@ -306,6 +306,7 @@ int32_t* taosGetErrno(); #define TSDB_CODE_WAL_APP_ERROR TAOS_DEF_ERROR_CODE(0, 0x1000) //"Unexpected generic error in wal") #define TSDB_CODE_WAL_FILE_CORRUPTED TAOS_DEF_ERROR_CODE(0, 0x1001) //"WAL file is corrupted") #define TSDB_CODE_WAL_SIZE_LIMIT TAOS_DEF_ERROR_CODE(0, 0x1002) //"WAL size exceeds limit") +#define TSDB_CODE_WAL_OUT_OF_MEMORY TAOS_DEF_ERROR_CODE(0, 0x1003) //"WAL out of memory") // http #define TSDB_CODE_HTTP_SERVER_OFFLINE TAOS_DEF_ERROR_CODE(0, 0x1100) //"http server is not onlin") diff --git a/src/inc/twal.h b/src/inc/twal.h index bce398d6f9..868a1fbd78 100644 --- a/src/inc/twal.h +++ b/src/inc/twal.h @@ -32,7 +32,7 @@ typedef enum { typedef struct { int8_t msgType; - int8_t sver; + int8_t sver; // sver 2 for WAL SDataRow/SMemRow compatibility int8_t reserved[2]; int32_t len; uint64_t version; diff --git a/src/util/src/terror.c b/src/util/src/terror.c index 42fc76e6c9..8d2ef29c8c 100644 --- a/src/util/src/terror.c +++ b/src/util/src/terror.c @@ -314,6 +314,7 @@ TAOS_DEFINE_ERROR(TSDB_CODE_SYN_INVALID_MSGTYPE, "Invalid msg type") TAOS_DEFINE_ERROR(TSDB_CODE_WAL_APP_ERROR, "Unexpected generic error in wal") TAOS_DEFINE_ERROR(TSDB_CODE_WAL_FILE_CORRUPTED, "WAL file is corrupted") TAOS_DEFINE_ERROR(TSDB_CODE_WAL_SIZE_LIMIT, "WAL size exceeds limit") +TAOS_DEFINE_ERROR(TSDB_CODE_WAL_OUT_OF_MEMORY, "WAL out of memory") // http TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_SERVER_OFFLINE, "http server is not onlin") diff --git a/src/wal/src/walWrite.c b/src/wal/src/walWrite.c index 4774998799..c1b3c1ac3f 100644 --- a/src/wal/src/walWrite.c +++ b/src/wal/src/walWrite.c @@ -17,6 +17,7 @@ #define TAOS_RANDOM_FILE_FAIL_TEST #include "os.h" #include "taoserror.h" +#include "taosmsg.h" #include "tchecksum.h" #include "tfile.h" #include "twal.h" @@ -114,7 +115,7 @@ void walRemoveAllOldFiles(void *handle) { #if defined(WAL_CHECKSUM_WHOLE) static void walUpdateChecksum(SWalHead *pHead) { - pHead->sver = 1; + pHead->sver = 2; pHead->cksum = 0; pHead->cksum = taosCalcChecksum(0, (uint8_t *)pHead, sizeof(*pHead) + pHead->len); } @@ -122,7 +123,7 @@ static void walUpdateChecksum(SWalHead *pHead) { static int walValidateChecksum(SWalHead *pHead) { if (pHead->sver == 0) { // for compatible with wal before sver 1 return taosCheckChecksumWhole((uint8_t *)pHead, sizeof(*pHead)); - } else if (pHead->sver == 1) { + } else if (pHead->sver == 2 || pHead->sver == 1) { uint32_t cksum = pHead->cksum; pHead->cksum = 0; return taosCheckChecksum((uint8_t *)pHead, sizeof(*pHead) + pHead->len, cksum); @@ -281,7 +282,7 @@ static int32_t walSkipCorruptedRecord(SWal *pWal, SWalHead *pHead, int64_t tfd, return TSDB_CODE_SUCCESS; } - if (pHead->sver == 1) { + if (pHead->sver == 2 || pHead->sver == 1) { if (tfRead(tfd, pHead->cont, pHead->len) < pHead->len) { wError("vgId:%d, read to end of corrupted wal file, offset:%" PRId64, pWal->vgId, pos); return TSDB_CODE_WAL_FILE_CORRUPTED; @@ -306,7 +307,85 @@ static int32_t walSkipCorruptedRecord(SWal *pWal, SWalHead *pHead, int64_t tfd, return TSDB_CODE_WAL_FILE_CORRUPTED; } +// Add SMemRowType ahead of SDataRow +static void expandSubmitBlk(SSubmitBlk *pDest, SSubmitBlk *pSrc, int32_t *lenExpand) { + memcpy(pDest, pSrc, sizeof(SSubmitBlk)); + int nRows = htons(pSrc->numOfRows); + if (nRows <= 0) { + return; + } + char *pDestData = pDest->data; + char *pSrcData = pSrc->data; + for (int i = 0; i < nRows; ++i) { + memRowSetType(pDestData, SMEM_ROW_DATA); + memcpy(memRowDataBody(pDestData), pSrcData, dataRowLen(pSrcData)); + pDestData = POINTER_SHIFT(pDestData, memRowTLen(pDestData)); + pSrcData = POINTER_SHIFT(pSrcData, dataRowLen(pSrcData)); + ++(*lenExpand); + } + int32_t dataLen = htonl(pDest->dataLen); + pDest->dataLen = htonl(dataLen + nRows * sizeof(uint8_t)); +} +static bool walIsSDataRow(void *pBlkData, int nRows, int32_t dataLen) { + int32_t len = 0; + for (int i = 0; i < nRows; ++i) { + len += dataRowLen(pBlkData); + if (len > dataLen) { + return false; + } + pBlkData = POINTER_SHIFT(pBlkData, dataRowLen(pBlkData)); + } + if (len != dataLen) { + return false; + } + return true; +} +// for WAL SMemRow/SDataRow compatibility +static int walSMemRowCheck(SWalHead *pHead) { + if ((pHead->sver < 2) && (pHead->msgType == TSDB_MSG_TYPE_SUBMIT)) { + SSubmitMsg *pMsg = (SSubmitMsg *)pHead->cont; + int32_t numOfBlocks = htonl(pMsg->numOfBlocks); + if (numOfBlocks <= 0) { + return 0; + } + + int32_t nTotalRows = 0; + SSubmitBlk *pBlk = (SSubmitBlk *)pMsg->blocks; + for (int32_t i = 0; i < numOfBlocks; ++i) { + int32_t dataLen = htonl(pBlk->dataLen); + int32_t nRows = htons(pBlk->numOfRows); + nTotalRows += nRows; + if (!walIsSDataRow(pBlk->data, nRows, dataLen)) { + return 0; + } + pBlk = (SSubmitBlk *)POINTER_SHIFT(pBlk, sizeof(SSubmitBlk) + dataLen); + } + + SWalHead *pWalHead = (SWalHead *)calloc(sizeof(SWalHead) + pHead->len + nTotalRows * sizeof(uint8_t), 1); + if (pWalHead == NULL) { + return TSDB_CODE_WAL_OUT_OF_MEMORY; + } + // len should be updated + memcpy(pWalHead, pHead, sizeof(SWalHead) + sizeof(SSubmitMsg)); + + SSubmitMsg *pDestMsg = (SSubmitMsg *)pWalHead->cont; + SSubmitBlk *pDestBlks = (SSubmitBlk *)pDestMsg->blocks; + SSubmitBlk *pSrcBlks = (SSubmitBlk *)pMsg->blocks; + int32_t lenExpand = 0; + for (int32_t i = 0; i < numOfBlocks; ++i) { + expandSubmitBlk(pDestBlks, pSrcBlks, &lenExpand); + } + if (lenExpand > 0) { + pDestMsg->length = htonl(htonl(pDestMsg->length) + lenExpand); + pWalHead->len = pWalHead->len + lenExpand; + } + + memcpy(pHead, pWalHead, sizeof(SWalHead) + pWalHead->len); + tfree(pWalHead); + } + return 0; +} static int32_t walRestoreWalFile(SWal *pWal, void *pVnode, FWalWrite writeFp, char *name, int64_t fileId) { int32_t size = WAL_MAX_SIZE; @@ -346,7 +425,7 @@ static int32_t walRestoreWalFile(SWal *pWal, void *pVnode, FWalWrite writeFp, ch } #if defined(WAL_CHECKSUM_WHOLE) - if ((pHead->sver == 0 && !walValidateChecksum(pHead)) || pHead->sver < 0 || pHead->sver > 1) { + if ((pHead->sver == 0 && !walValidateChecksum(pHead)) || pHead->sver < 0 || pHead->sver > 2) { wError("vgId:%d, file:%s, wal head cksum is messed up, hver:%" PRIu64 " len:%d offset:%" PRId64, pWal->vgId, name, pHead->version, pHead->len, offset); code = walSkipCorruptedRecord(pWal, pHead, tfd, &offset); @@ -379,7 +458,7 @@ static int32_t walRestoreWalFile(SWal *pWal, void *pVnode, FWalWrite writeFp, ch continue; } - if (pHead->sver == 1 && !walValidateChecksum(pHead)) { + if ((pHead->sver == 2 || pHead->sver == 1) && !walValidateChecksum(pHead)) { wError("vgId:%d, file:%s, wal whole cksum is messed up, hver:%" PRIu64 " len:%d offset:%" PRId64, pWal->vgId, name, pHead->version, pHead->len, offset); code = walSkipCorruptedRecord(pWal, pHead, tfd, &offset); @@ -431,7 +510,13 @@ static int32_t walRestoreWalFile(SWal *pWal, void *pVnode, FWalWrite writeFp, ch pWal->version = pHead->version; - //wInfo("writeFp: %ld", offset); + // wInfo("writeFp: %ld", offset); + + if (0 != walSMemRowCheck(pHead)) { + wError("vgId:%d, restore wal, fileId:%" PRId64 " hver:%" PRIu64 " wver:%" PRIu64 " len:%d offset:%" PRId64, + pWal->vgId, fileId, pHead->version, pWal->version, pHead->len, offset); + } + (*writeFp)(pVnode, pHead, TAOS_QTYPE_WAL, NULL); } From 56d0d69a487bd91bfe5ce293bcbc8187e2aba048 Mon Sep 17 00:00:00 2001 From: Hongze Cheng Date: Fri, 13 Aug 2021 17:05:30 +0800 Subject: [PATCH 117/185] [TD-5538]: add --force-keep-file option --- src/common/src/tglobal.c | 3 +++ src/dnode/src/dnodeSystem.c | 2 ++ src/tsdb/inc/tsdbFS.h | 3 +++ src/tsdb/src/tsdbFS.c | 38 +++++++++++++++++++++++++++++++++++++ src/util/inc/tconfig.h | 1 + 5 files changed, 47 insertions(+) diff --git a/src/common/src/tglobal.c b/src/common/src/tglobal.c index ec7249cef5..198d57b6b9 100644 --- a/src/common/src/tglobal.c +++ b/src/common/src/tglobal.c @@ -26,6 +26,9 @@ #include "tlocale.h" #include "ttimezone.h" +// TSDB +bool tsdbForceKeepFile = false; + // cluster char tsFirst[TSDB_EP_LEN] = {0}; char tsSecond[TSDB_EP_LEN] = {0}; diff --git a/src/dnode/src/dnodeSystem.c b/src/dnode/src/dnodeSystem.c index e49b3eba99..c3e4dae206 100644 --- a/src/dnode/src/dnodeSystem.c +++ b/src/dnode/src/dnodeSystem.c @@ -42,6 +42,8 @@ int32_t main(int32_t argc, char *argv[]) { } } else if (strcmp(argv[i], "-C") == 0) { dump_config = 1; + } else if (strcmp(argv[i], "--force-keep-file") == 0) { + tsdbForceKeepFile = true; } else if (strcmp(argv[i], "-V") == 0) { #ifdef _ACCT char *versionStr = "enterprise"; diff --git a/src/tsdb/inc/tsdbFS.h b/src/tsdb/inc/tsdbFS.h index d63aeb14ac..367d905522 100644 --- a/src/tsdb/inc/tsdbFS.h +++ b/src/tsdb/inc/tsdbFS.h @@ -18,6 +18,9 @@ #define TSDB_FS_VERSION 0 +// ================== TSDB global config +extern bool tsdbForceKeepFile; + // ================== CURRENT file header info typedef struct { uint32_t version; // Current file system version (relating to code) diff --git a/src/tsdb/src/tsdbFS.c b/src/tsdb/src/tsdbFS.c index c9f087a5cf..37f9f0af98 100644 --- a/src/tsdb/src/tsdbFS.c +++ b/src/tsdb/src/tsdbFS.c @@ -982,6 +982,26 @@ static int tsdbRestoreMeta(STsdbRepo *pRepo) { return -1; } + if (tsdbForceKeepFile) { + struct stat tfstat; + + // Get real file size + if (fstat(pfs->cstatus->pmf->fd, &tfstat) < 0) { + terrno = TAOS_SYSTEM_ERROR(errno); + tsdbCloseMFile(pfs->cstatus->pmf); + tfsClosedir(tdir); + regfree(®ex); + return -1; + } + + if (pfs->cstatus->pmf->info.size != tfstat.st_size) { + int64_t tfsize = pfs->cstatus->pmf->info.size; + pfs->cstatus->pmf->info.size = tfstat.st_size; + tsdbInfo("vgId:%d file %s header size is changed from %" PRId64 " to %" PRId64, REPO_ID(pRepo), + TSDB_FILE_FULL_NAME(pfs->cstatus->pmf), tfsize, pfs->cstatus->pmf->info.size); + } + } + tsdbCloseMFile(pfs->cstatus->pmf); } } else if (code == REG_NOMATCH) { @@ -1141,6 +1161,24 @@ static int tsdbRestoreDFileSet(STsdbRepo *pRepo) { return -1; } + if (tsdbForceKeepFile) { + struct stat tfstat; + + // Get real file size + if (fstat(pDFile->fd, &tfstat) < 0) { + terrno = TAOS_SYSTEM_ERROR(errno); + taosArrayDestroy(fArray); + return -1; + } + + if (pDFile->info.size != tfstat.st_size) { + int64_t tfsize = pDFile->info.size; + pDFile->info.size = tfstat.st_size; + tsdbInfo("vgId:%d file %s header size is changed from %" PRId64 " to %" PRId64, REPO_ID(pRepo), + TSDB_FILE_FULL_NAME(pDFile), tfsize, pDFile->info.size); + } + } + tsdbCloseDFile(pDFile); index++; } diff --git a/src/util/inc/tconfig.h b/src/util/inc/tconfig.h index fdb2595fd8..d6e0f4ca46 100644 --- a/src/util/inc/tconfig.h +++ b/src/util/inc/tconfig.h @@ -77,6 +77,7 @@ typedef struct { extern SGlobalCfg tsGlobalConfig[]; extern int32_t tsGlobalConfigNum; extern char * tsCfgStatusStr[]; +extern bool tsdbForceKeepFile; void taosReadGlobalLogCfg(); bool taosReadGlobalCfg(); From 124be91c397bd2889961b4e8dd7d4475e7c9effd Mon Sep 17 00:00:00 2001 From: liuyq-617 Date: Fri, 13 Aug 2021 17:19:19 +0800 Subject: [PATCH 118/185] speed up drone in master --- .drone.yml | 28 ++++++++++++++++------------ 1 file changed, 16 insertions(+), 12 deletions(-) diff --git a/.drone.yml b/.drone.yml index b520f308ba..085a07acf9 100644 --- a/.drone.yml +++ b/.drone.yml @@ -15,7 +15,7 @@ steps: - mkdir debug - cd debug - cmake .. - - make + - make -j4 trigger: event: - pull_request @@ -23,6 +23,7 @@ steps: branch: - develop - master + - 2.0 --- kind: pipeline name: test_arm64_bionic @@ -39,7 +40,7 @@ steps: - mkdir debug - cd debug - cmake .. -DCPUTYPE=aarch64 > /dev/null - - make + - make -j4 trigger: event: - pull_request @@ -66,7 +67,7 @@ steps: - mkdir debug - cd debug - cmake .. -DCPUTYPE=aarch64 > /dev/null - - make + - make -j4 trigger: event: - pull_request @@ -91,7 +92,7 @@ steps: - mkdir debug - cd debug - cmake .. -DCPUTYPE=aarch64 > /dev/null - - make + - make -j4 trigger: event: - pull_request @@ -116,7 +117,7 @@ steps: - mkdir debug - cd debug - cmake .. -DCPUTYPE=aarch64 > /dev/null - - make + - make -j4 trigger: event: - pull_request @@ -142,7 +143,7 @@ steps: - mkdir debug - cd debug - cmake .. -DCPUTYPE=aarch32 > /dev/null - - make + - make -j4 trigger: event: - pull_request @@ -150,6 +151,7 @@ steps: branch: - develop - master + - 2.0 --- kind: pipeline name: build_trusty @@ -168,7 +170,7 @@ steps: - mkdir debug - cd debug - cmake .. - - make + - make -j4 trigger: event: - pull_request @@ -176,6 +178,7 @@ steps: branch: - develop - master + - 2.0 --- kind: pipeline name: build_xenial @@ -193,7 +196,7 @@ steps: - mkdir debug - cd debug - cmake .. - - make + - make -j4 trigger: event: - pull_request @@ -201,7 +204,7 @@ steps: branch: - develop - master - + - 2.0 --- kind: pipeline name: build_bionic @@ -218,7 +221,7 @@ steps: - mkdir debug - cd debug - cmake .. - - make + - make -j4 trigger: event: - pull_request @@ -226,6 +229,7 @@ steps: branch: - develop - master + - 2.0 --- kind: pipeline name: build_centos7 @@ -241,7 +245,7 @@ steps: - mkdir debug - cd debug - cmake .. - - make + - make -j4 trigger: event: - pull_request @@ -249,4 +253,4 @@ steps: branch: - develop - master - + - 2.0 \ No newline at end of file From 7d0e9c515e1902df66fb4a97f8234a100fae7883 Mon Sep 17 00:00:00 2001 From: liuyq-617 Date: Fri, 13 Aug 2021 17:21:36 +0800 Subject: [PATCH 119/185] speed up drone in 2.0 --- .drone.yml | 28 ++++++++++++++++------------ 1 file changed, 16 insertions(+), 12 deletions(-) diff --git a/.drone.yml b/.drone.yml index b520f308ba..085a07acf9 100644 --- a/.drone.yml +++ b/.drone.yml @@ -15,7 +15,7 @@ steps: - mkdir debug - cd debug - cmake .. - - make + - make -j4 trigger: event: - pull_request @@ -23,6 +23,7 @@ steps: branch: - develop - master + - 2.0 --- kind: pipeline name: test_arm64_bionic @@ -39,7 +40,7 @@ steps: - mkdir debug - cd debug - cmake .. -DCPUTYPE=aarch64 > /dev/null - - make + - make -j4 trigger: event: - pull_request @@ -66,7 +67,7 @@ steps: - mkdir debug - cd debug - cmake .. -DCPUTYPE=aarch64 > /dev/null - - make + - make -j4 trigger: event: - pull_request @@ -91,7 +92,7 @@ steps: - mkdir debug - cd debug - cmake .. -DCPUTYPE=aarch64 > /dev/null - - make + - make -j4 trigger: event: - pull_request @@ -116,7 +117,7 @@ steps: - mkdir debug - cd debug - cmake .. -DCPUTYPE=aarch64 > /dev/null - - make + - make -j4 trigger: event: - pull_request @@ -142,7 +143,7 @@ steps: - mkdir debug - cd debug - cmake .. -DCPUTYPE=aarch32 > /dev/null - - make + - make -j4 trigger: event: - pull_request @@ -150,6 +151,7 @@ steps: branch: - develop - master + - 2.0 --- kind: pipeline name: build_trusty @@ -168,7 +170,7 @@ steps: - mkdir debug - cd debug - cmake .. - - make + - make -j4 trigger: event: - pull_request @@ -176,6 +178,7 @@ steps: branch: - develop - master + - 2.0 --- kind: pipeline name: build_xenial @@ -193,7 +196,7 @@ steps: - mkdir debug - cd debug - cmake .. - - make + - make -j4 trigger: event: - pull_request @@ -201,7 +204,7 @@ steps: branch: - develop - master - + - 2.0 --- kind: pipeline name: build_bionic @@ -218,7 +221,7 @@ steps: - mkdir debug - cd debug - cmake .. - - make + - make -j4 trigger: event: - pull_request @@ -226,6 +229,7 @@ steps: branch: - develop - master + - 2.0 --- kind: pipeline name: build_centos7 @@ -241,7 +245,7 @@ steps: - mkdir debug - cd debug - cmake .. - - make + - make -j4 trigger: event: - pull_request @@ -249,4 +253,4 @@ steps: branch: - develop - master - + - 2.0 \ No newline at end of file From d8622364aafb84b2030e37f94b6266909ff195fa Mon Sep 17 00:00:00 2001 From: wpan Date: Fri, 13 Aug 2021 17:22:19 +0800 Subject: [PATCH 120/185] fix interp stable query issue --- src/client/src/tscSubquery.c | 1 - src/query/src/qAggMain.c | 18 +- src/query/src/qExecutor.c | 10 +- src/query/src/qPlan.c | 2 +- tests/script/general/parser/interp_test.sim | 246 ++++++++++++++++++++ 5 files changed, 263 insertions(+), 14 deletions(-) diff --git a/src/client/src/tscSubquery.c b/src/client/src/tscSubquery.c index 2e7e02cfd5..1a138f2b48 100644 --- a/src/client/src/tscSubquery.c +++ b/src/client/src/tscSubquery.c @@ -2812,7 +2812,6 @@ static void tscAllDataRetrievedFromDnode(SRetrieveSupport *trsupport, SSqlObj* p pParentSql->self, pState->numOfSub, pState->numOfRetrievedRows); SQueryInfo *pPQueryInfo = tscGetQueryInfo(&pParentSql->cmd); - tscClearInterpInfo(pPQueryInfo); code = tscCreateGlobalMerger(trsupport->pExtMemBuffer, pState->numOfSub, pDesc, pPQueryInfo, &pParentSql->res.pMerger, pParentSql->self); pParentSql->res.code = code; diff --git a/src/query/src/qAggMain.c b/src/query/src/qAggMain.c index 6eadedcaf3..14d3f4e417 100644 --- a/src/query/src/qAggMain.c +++ b/src/query/src/qAggMain.c @@ -3670,6 +3670,8 @@ static void interp_function_impl(SQLFunctionCtx *pCtx) { return; } + bool ascQuery = (pCtx->order == TSDB_ORDER_ASC); + if (pCtx->inputType == TSDB_DATA_TYPE_TIMESTAMP) { *(TSKEY *)pCtx->pOutput = pCtx->startTs; } else if (type == TSDB_FILL_NULL) { @@ -3677,7 +3679,7 @@ static void interp_function_impl(SQLFunctionCtx *pCtx) { } else if (type == TSDB_FILL_SET_VALUE) { tVariantDump(&pCtx->param[1], pCtx->pOutput, pCtx->inputType, true); } else { - if (pCtx->start.key != INT64_MIN && pCtx->start.key < pCtx->startTs && pCtx->end.key > pCtx->startTs) { + if (pCtx->start.key != INT64_MIN && ((ascQuery && pCtx->start.key <= pCtx->startTs && pCtx->end.key >= pCtx->startTs) || ((!ascQuery) && pCtx->start.key >= pCtx->startTs && pCtx->end.key <= pCtx->startTs))) { if (type == TSDB_FILL_PREV) { if (IS_NUMERIC_TYPE(pCtx->inputType) || pCtx->inputType == TSDB_DATA_TYPE_BOOL) { SET_TYPED_DATA(pCtx->pOutput, pCtx->inputType, pCtx->start.val); @@ -3716,13 +3718,14 @@ static void interp_function_impl(SQLFunctionCtx *pCtx) { TSKEY skey = GET_TS_DATA(pCtx, 0); if (type == TSDB_FILL_PREV) { - if (skey > pCtx->startTs) { + if ((ascQuery && skey > pCtx->startTs) || ((!ascQuery) && skey < pCtx->startTs)) { return; } if (pCtx->size > 1) { TSKEY ekey = GET_TS_DATA(pCtx, 1); - if (ekey > skey && ekey <= pCtx->startTs) { + if ((ascQuery && ekey > skey && ekey <= pCtx->startTs) || + ((!ascQuery) && ekey < skey && ekey >= pCtx->startTs)){ skey = ekey; } } @@ -3731,10 +3734,10 @@ static void interp_function_impl(SQLFunctionCtx *pCtx) { TSKEY ekey = skey; char* val = NULL; - if (ekey < pCtx->startTs) { + if ((ascQuery && ekey < pCtx->startTs) || ((!ascQuery) && ekey > pCtx->startTs)) { if (pCtx->size > 1) { ekey = GET_TS_DATA(pCtx, 1); - if (ekey < pCtx->startTs) { + if ((ascQuery && ekey < pCtx->startTs) || ((!ascQuery) && ekey > pCtx->startTs)) { return; } @@ -3755,12 +3758,11 @@ static void interp_function_impl(SQLFunctionCtx *pCtx) { TSKEY ekey = GET_TS_DATA(pCtx, 1); // no data generated yet - if (!(skey < pCtx->startTs && ekey > pCtx->startTs)) { + if ((ascQuery && !(skey <= pCtx->startTs && ekey >= pCtx->startTs)) + || ((!ascQuery) && !(skey >= pCtx->startTs && ekey <= pCtx->startTs))) { return; } - assert(pCtx->start.key == INT64_MIN && skey < pCtx->startTs && ekey > pCtx->startTs); - char *start = GET_INPUT_DATA(pCtx, 0); char *end = GET_INPUT_DATA(pCtx, 1); diff --git a/src/query/src/qExecutor.c b/src/query/src/qExecutor.c index b1fb2add1b..bb16b74d3d 100644 --- a/src/query/src/qExecutor.c +++ b/src/query/src/qExecutor.c @@ -1595,6 +1595,7 @@ static void hashAllIntervalAgg(SOperatorInfo* pOperatorInfo, SResultRowInfo* pRe SResultRow* pResult = NULL; int32_t forwardStep = 0; int32_t ret = 0; + STimeWindow preWin = win; while (1) { // null data, failed to allocate more memory buffer @@ -1609,12 +1610,13 @@ static void hashAllIntervalAgg(SOperatorInfo* pOperatorInfo, SResultRowInfo* pRe // window start(end) key interpolation doWindowBorderInterpolation(pOperatorInfo, pSDataBlock, pInfo->pCtx, pResult, &win, startPos, forwardStep); - doApplyFunctions(pRuntimeEnv, pInfo->pCtx, &win, startPos, forwardStep, tsCols, pSDataBlock->info.rows, numOfOutput); - + doApplyFunctions(pRuntimeEnv, pInfo->pCtx, ascQuery ? &win : &preWin, startPos, forwardStep, tsCols, pSDataBlock->info.rows, numOfOutput); + preWin = win; + int32_t prevEndPos = (forwardStep - 1) * step + startPos; startPos = getNextQualifiedWindow(pQueryAttr, &win, &pSDataBlock->info, tsCols, binarySearchForKey, prevEndPos); if (startPos < 0) { - if (win.skey <= pQueryAttr->window.ekey) { + if ((ascQuery && win.skey <= pQueryAttr->window.ekey) || ((!ascQuery) && win.ekey >= pQueryAttr->window.ekey)) { int32_t code = setResultOutputBufByKey(pRuntimeEnv, pResultRowInfo, pSDataBlock->info.tid, &win, masterScan, &pResult, tableGroupId, pInfo->pCtx, numOfOutput, pInfo->rowCellInfoOffset); if (code != TSDB_CODE_SUCCESS || pResult == NULL) { @@ -1625,7 +1627,7 @@ static void hashAllIntervalAgg(SOperatorInfo* pOperatorInfo, SResultRowInfo* pRe // window start(end) key interpolation doWindowBorderInterpolation(pOperatorInfo, pSDataBlock, pInfo->pCtx, pResult, &win, startPos, forwardStep); - doApplyFunctions(pRuntimeEnv, pInfo->pCtx, &win, startPos, forwardStep, tsCols, pSDataBlock->info.rows, numOfOutput); + doApplyFunctions(pRuntimeEnv, pInfo->pCtx, ascQuery ? &win : &preWin, startPos, forwardStep, tsCols, pSDataBlock->info.rows, numOfOutput); } break; diff --git a/src/query/src/qPlan.c b/src/query/src/qPlan.c index b8a5ee7699..ad286f7afa 100644 --- a/src/query/src/qPlan.c +++ b/src/query/src/qPlan.c @@ -693,7 +693,7 @@ SArray* createGlobalMergePlan(SQueryAttr* pQueryAttr) { } // fill operator - if (pQueryAttr->fillType != TSDB_FILL_NONE && (!pQueryAttr->pointInterpQuery)) { + if (pQueryAttr->fillType != TSDB_FILL_NONE && pQueryAttr->interval.interval > 0) { op = OP_Fill; taosArrayPush(plan, &op); } diff --git a/tests/script/general/parser/interp_test.sim b/tests/script/general/parser/interp_test.sim index 845afb0173..5a2021dcfc 100644 --- a/tests/script/general/parser/interp_test.sim +++ b/tests/script/general/parser/interp_test.sim @@ -930,8 +930,254 @@ if $data44 != @18-11-25 19:06:00.000@ then endi +sql select interp(c1) from intp_stb0 where ts >= '2018-09-17 20:35:00.000' and ts <= '2018-09-17 20:42:00.000' interval(1m) fill(linear); +if $rows != 8 then + return -1 +endi +if $data00 != @18-09-17 20:35:00.000@ then + return -1 +endi +if $data01 != NULL then + return -1 +endi +if $data10 != @18-09-17 20:36:00.000@ then + return -1 +endi +if $data11 != NULL then + return -1 +endi +if $data20 != @18-09-17 20:37:00.000@ then + return -1 +endi +if $data21 != NULL then + return -1 +endi +if $data30 != @18-09-17 20:38:00.000@ then + return -1 +endi +if $data31 != NULL then + return -1 +endi +if $data40 != @18-09-17 20:39:00.000@ then + return -1 +endi +if $data41 != NULL then + return -1 +endi +if $data50 != @18-09-17 20:40:00.000@ then + return -1 +endi +if $data51 != 0 then + return -1 +endi +if $data60 != @18-09-17 20:41:00.000@ then + return -1 +endi +if $data61 != NULL then + return -1 +endi +if $data70 != @18-09-17 20:42:00.000@ then + return -1 +endi +if $data71 != NULL then + return -1 +endi +sql select interp(c1) from intp_stb0 where ts >= '2018-09-17 20:35:00.000' and ts <= '2018-09-17 20:42:00.000' interval(1m) fill(linear) order by ts desc; +if $rows != 8 then + return -1 +endi +if $data00 != @18-09-17 20:42:00.000@ then + return -1 +endi +if $data01 != NULL then + return -1 +endi +if $data10 != @18-09-17 20:41:00.000@ then + return -1 +endi +if $data11 != NULL then + return -1 +endi +if $data20 != @18-09-17 20:40:00.000@ then + return -1 +endi +if $data21 != 0 then + return -1 +endi +if $data30 != @18-09-17 20:39:00.000@ then + return -1 +endi +if $data31 != NULL then + return -1 +endi +if $data40 != @18-09-17 20:38:00.000@ then + return -1 +endi +if $data41 != NULL then + return -1 +endi +if $data50 != @18-09-17 20:37:00.000@ then + return -1 +endi +if $data51 != NULL then + return -1 +endi +if $data60 != @18-09-17 20:36:00.000@ then + return -1 +endi +if $data61 != NULL then + return -1 +endi +if $data70 != @18-09-17 20:35:00.000@ then + return -1 +endi +if $data71 != NULL then + return -1 +endi + +sql select interp(c3) from intp_stb0 where ts >= '2018-09-17 20:35:00.000' and ts <= '2018-09-17 20:50:00.000' interval(2m) fill(linear) order by ts; +if $rows != 9 then + return -1 +endi +if $data00 != @18-09-17 20:34:00.000@ then + return -1 +endi +if $data01 != NULL then + return -1 +endi +if $data10 != @18-09-17 20:36:00.000@ then + return -1 +endi +if $data11 != NULL then + return -1 +endi +if $data20 != @18-09-17 20:38:00.000@ then + return -1 +endi +if $data21 != NULL then + return -1 +endi +if $data30 != @18-09-17 20:40:00.000@ then + return -1 +endi +if $data31 != 0.00000 then + return -1 +endi +if $data40 != @18-09-17 20:42:00.000@ then + return -1 +endi +if $data41 != 0.20000 then + return -1 +endi +if $data50 != @18-09-17 20:44:00.000@ then + return -1 +endi +if $data51 != 0.40000 then + return -1 +endi +if $data60 != @18-09-17 20:46:00.000@ then + return -1 +endi +if $data61 != 0.60000 then + return -1 +endi +if $data70 != @18-09-17 20:48:00.000@ then + return -1 +endi +if $data71 != 0.80000 then + return -1 +endi +if $data80 != @18-09-17 20:50:00.000@ then + return -1 +endi +if $data81 != 1.00000 then + return -1 +endi + + +sql select interp(c3) from intp_stb0 where ts >= '2018-09-17 20:35:00.000' and ts <= '2018-09-17 20:50:00.000' interval(3m) fill(linear) order by ts; +if $rows != 6 then + return -1 +endi +if $data00 != @18-09-17 20:33:00.000@ then + return -1 +endi +if $data01 != NULL then + return -1 +endi +if $data10 != @18-09-17 20:36:00.000@ then + return -1 +endi +if $data11 != NULL then + return -1 +endi +if $data20 != @18-09-17 20:39:00.000@ then + return -1 +endi +if $data21 != NULL then + return -1 +endi +if $data30 != @18-09-17 20:42:00.000@ then + return -1 +endi +if $data31 != 0.20000 then + return -1 +endi +if $data40 != @18-09-17 20:45:00.000@ then + return -1 +endi +if $data41 != 0.50000 then + return -1 +endi +if $data50 != @18-09-17 20:48:00.000@ then + return -1 +endi +if $data51 != 0.80000 then + return -1 +endi + +sql select interp(c3) from intp_stb0 where ts >= '2018-09-17 20:35:00.000' and ts <= '2018-09-17 20:50:00.000' interval(3m) fill(linear) order by ts desc; +if $rows != 6 then + return -1 +endi +if $data00 != @18-09-17 20:48:00.000@ then + return -1 +endi +if $data01 != 0.80000 then + return -1 +endi +if $data10 != @18-09-17 20:45:00.000@ then + return -1 +endi +if $data11 != 0.50000 then + return -1 +endi +if $data20 != @18-09-17 20:42:00.000@ then + return -1 +endi +if $data21 != 0.20000 then + return -1 +endi +if $data30 != @18-09-17 20:39:00.000@ then + return -1 +endi +if $data31 != NULL then + return -1 +endi +if $data40 != @18-09-17 20:36:00.000@ then + return -1 +endi +if $data41 != NULL then + return -1 +endi +if $data50 != @18-09-17 20:33:00.000@ then + return -1 +endi +if $data51 != NULL then + return -1 +endi From eebc5d40000f793a6cf85778425b5d0f763c17c1 Mon Sep 17 00:00:00 2001 From: Cary Xu Date: Fri, 13 Aug 2021 18:04:43 +0800 Subject: [PATCH 121/185] bug fix --- src/wal/src/walWrite.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/src/wal/src/walWrite.c b/src/wal/src/walWrite.c index c1b3c1ac3f..3b1eb55a72 100644 --- a/src/wal/src/walWrite.c +++ b/src/wal/src/walWrite.c @@ -375,15 +375,18 @@ static int walSMemRowCheck(SWalHead *pHead) { int32_t lenExpand = 0; for (int32_t i = 0; i < numOfBlocks; ++i) { expandSubmitBlk(pDestBlks, pSrcBlks, &lenExpand); + pDestBlks = POINTER_SHIFT(pDestBlks, htonl(pDestBlks->dataLen) + sizeof(SSubmitBlk)); + pSrcBlks = POINTER_SHIFT(pSrcBlks, htonl(pSrcBlks->dataLen) + sizeof(SSubmitBlk)); } if (lenExpand > 0) { - pDestMsg->length = htonl(htonl(pDestMsg->length) + lenExpand); + pDestMsg->header.contLen = htonl(pDestMsg->length) + lenExpand; + pDestMsg->length = htonl(pDestMsg->header.contLen); pWalHead->len = pWalHead->len + lenExpand; } memcpy(pHead, pWalHead, sizeof(SWalHead) + pWalHead->len); tfree(pWalHead); - } + } return 0; } @@ -511,12 +514,10 @@ static int32_t walRestoreWalFile(SWal *pWal, void *pVnode, FWalWrite writeFp, ch pWal->version = pHead->version; // wInfo("writeFp: %ld", offset); - if (0 != walSMemRowCheck(pHead)) { wError("vgId:%d, restore wal, fileId:%" PRId64 " hver:%" PRIu64 " wver:%" PRIu64 " len:%d offset:%" PRId64, pWal->vgId, fileId, pHead->version, pWal->version, pHead->len, offset); } - (*writeFp)(pVnode, pHead, TAOS_QTYPE_WAL, NULL); } From c06123053faac12b3d8a5452d177e6b601c38668 Mon Sep 17 00:00:00 2001 From: Cary Xu Date: Fri, 13 Aug 2021 18:17:29 +0800 Subject: [PATCH 122/185] code optimization --- src/wal/src/walWrite.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/wal/src/walWrite.c b/src/wal/src/walWrite.c index 3b1eb55a72..8d311d5e3e 100644 --- a/src/wal/src/walWrite.c +++ b/src/wal/src/walWrite.c @@ -366,7 +366,7 @@ static int walSMemRowCheck(SWalHead *pHead) { if (pWalHead == NULL) { return TSDB_CODE_WAL_OUT_OF_MEMORY; } - // len should be updated + memcpy(pWalHead, pHead, sizeof(SWalHead) + sizeof(SSubmitMsg)); SSubmitMsg *pDestMsg = (SSubmitMsg *)pWalHead->cont; @@ -386,7 +386,7 @@ static int walSMemRowCheck(SWalHead *pHead) { memcpy(pHead, pWalHead, sizeof(SWalHead) + pWalHead->len); tfree(pWalHead); - } + } return 0; } From 9c73bb0dc5c84d022f9526acd6efcc62a6a1da9d Mon Sep 17 00:00:00 2001 From: Cary Xu Date: Fri, 13 Aug 2021 18:43:25 +0800 Subject: [PATCH 123/185] code optimization --- src/inc/taoserror.h | 1 - src/util/src/terror.c | 1 - src/wal/src/walWrite.c | 9 +++++---- 3 files changed, 5 insertions(+), 6 deletions(-) diff --git a/src/inc/taoserror.h b/src/inc/taoserror.h index d7e1592911..2214078f55 100644 --- a/src/inc/taoserror.h +++ b/src/inc/taoserror.h @@ -306,7 +306,6 @@ int32_t* taosGetErrno(); #define TSDB_CODE_WAL_APP_ERROR TAOS_DEF_ERROR_CODE(0, 0x1000) //"Unexpected generic error in wal") #define TSDB_CODE_WAL_FILE_CORRUPTED TAOS_DEF_ERROR_CODE(0, 0x1001) //"WAL file is corrupted") #define TSDB_CODE_WAL_SIZE_LIMIT TAOS_DEF_ERROR_CODE(0, 0x1002) //"WAL size exceeds limit") -#define TSDB_CODE_WAL_OUT_OF_MEMORY TAOS_DEF_ERROR_CODE(0, 0x1003) //"WAL out of memory") // http #define TSDB_CODE_HTTP_SERVER_OFFLINE TAOS_DEF_ERROR_CODE(0, 0x1100) //"http server is not onlin") diff --git a/src/util/src/terror.c b/src/util/src/terror.c index 8d2ef29c8c..42fc76e6c9 100644 --- a/src/util/src/terror.c +++ b/src/util/src/terror.c @@ -314,7 +314,6 @@ TAOS_DEFINE_ERROR(TSDB_CODE_SYN_INVALID_MSGTYPE, "Invalid msg type") TAOS_DEFINE_ERROR(TSDB_CODE_WAL_APP_ERROR, "Unexpected generic error in wal") TAOS_DEFINE_ERROR(TSDB_CODE_WAL_FILE_CORRUPTED, "WAL file is corrupted") TAOS_DEFINE_ERROR(TSDB_CODE_WAL_SIZE_LIMIT, "WAL size exceeds limit") -TAOS_DEFINE_ERROR(TSDB_CODE_WAL_OUT_OF_MEMORY, "WAL out of memory") // http TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_SERVER_OFFLINE, "http server is not onlin") diff --git a/src/wal/src/walWrite.c b/src/wal/src/walWrite.c index 8d311d5e3e..2dfdb84818 100644 --- a/src/wal/src/walWrite.c +++ b/src/wal/src/walWrite.c @@ -123,7 +123,7 @@ static void walUpdateChecksum(SWalHead *pHead) { static int walValidateChecksum(SWalHead *pHead) { if (pHead->sver == 0) { // for compatible with wal before sver 1 return taosCheckChecksumWhole((uint8_t *)pHead, sizeof(*pHead)); - } else if (pHead->sver == 2 || pHead->sver == 1) { + } else if (pHead->sver >= 1) { uint32_t cksum = pHead->cksum; pHead->cksum = 0; return taosCheckChecksum((uint8_t *)pHead, sizeof(*pHead) + pHead->len, cksum); @@ -282,7 +282,7 @@ static int32_t walSkipCorruptedRecord(SWal *pWal, SWalHead *pHead, int64_t tfd, return TSDB_CODE_SUCCESS; } - if (pHead->sver == 2 || pHead->sver == 1) { + if (pHead->sver >= 1) { if (tfRead(tfd, pHead->cont, pHead->len) < pHead->len) { wError("vgId:%d, read to end of corrupted wal file, offset:%" PRId64, pWal->vgId, pos); return TSDB_CODE_WAL_FILE_CORRUPTED; @@ -364,7 +364,7 @@ static int walSMemRowCheck(SWalHead *pHead) { SWalHead *pWalHead = (SWalHead *)calloc(sizeof(SWalHead) + pHead->len + nTotalRows * sizeof(uint8_t), 1); if (pWalHead == NULL) { - return TSDB_CODE_WAL_OUT_OF_MEMORY; + return -1; } memcpy(pWalHead, pHead, sizeof(SWalHead) + sizeof(SSubmitMsg)); @@ -461,7 +461,7 @@ static int32_t walRestoreWalFile(SWal *pWal, void *pVnode, FWalWrite writeFp, ch continue; } - if ((pHead->sver == 2 || pHead->sver == 1) && !walValidateChecksum(pHead)) { + if ((pHead->sver >= 1) && !walValidateChecksum(pHead)) { wError("vgId:%d, file:%s, wal whole cksum is messed up, hver:%" PRIu64 " len:%d offset:%" PRId64, pWal->vgId, name, pHead->version, pHead->len, offset); code = walSkipCorruptedRecord(pWal, pHead, tfd, &offset); @@ -517,6 +517,7 @@ static int32_t walRestoreWalFile(SWal *pWal, void *pVnode, FWalWrite writeFp, ch if (0 != walSMemRowCheck(pHead)) { wError("vgId:%d, restore wal, fileId:%" PRId64 " hver:%" PRIu64 " wver:%" PRIu64 " len:%d offset:%" PRId64, pWal->vgId, fileId, pHead->version, pWal->version, pHead->len, offset); + return TAOS_SYSTEM_ERROR(errno); } (*writeFp)(pVnode, pHead, TAOS_QTYPE_WAL, NULL); } From 57d5f22a13c5996a8489260c1f5b7329f391b698 Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Fri, 13 Aug 2021 19:17:29 +0800 Subject: [PATCH 124/185] Hotfix/sangshuduo/td 5872 taosdemo stmt improve for master (#7338) * cherry pick from develop branch. * cherry pick 548131751fad2b0e11e15b75af6c254164f28eea * [TD-5872]: taosdemo stmt csv perf improve. * rand func back to early impl. * fix windows/mac compile error. * cherry pick from develop branch. Co-authored-by: Shuduo Sang --- src/kit/taosdemo/taosdemo.c | 1412 +++++++++++++++++++++++------------ 1 file changed, 932 insertions(+), 480 deletions(-) diff --git a/src/kit/taosdemo/taosdemo.c b/src/kit/taosdemo/taosdemo.c index aad2fe95fa..3b91de32b0 100644 --- a/src/kit/taosdemo/taosdemo.c +++ b/src/kit/taosdemo/taosdemo.c @@ -295,6 +295,9 @@ typedef struct SSuperTable_S { uint64_t lenOfTagOfOneRow; char* sampleDataBuf; +#if STMT_IFACE_ENABLED == 1 + char* sampleBindArray; +#endif //int sampleRowCount; //int sampleUsePos; @@ -441,6 +444,7 @@ typedef struct SQueryMetaInfo_S { typedef struct SThreadInfo_S { TAOS * taos; TAOS_STMT *stmt; + int64_t *bind_ts; int threadID; char db_name[TSDB_DB_NAME_LEN]; uint32_t time_precision; @@ -454,7 +458,7 @@ typedef struct SThreadInfo_S { int64_t start_time; char* cols; bool use_metric; - SSuperTable* superTblInfo; + SSuperTable* stbInfo; char *buffer; // sql cmd buffer // for async insert @@ -674,7 +678,7 @@ static FILE * g_fpOfInsertResult = NULL; /////////////////////////////////////////////////// -static void ERROR_EXIT(const char *msg) { perror(msg); exit(-1); } +static void ERROR_EXIT(const char *msg) { errorPrint("%s", msg); exit(-1); } #ifndef TAOSDEMO_COMMIT_SHA1 #define TAOSDEMO_COMMIT_SHA1 "unknown" @@ -1136,8 +1140,7 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) { } if (0 == columnCount) { - perror("data type error!"); - exit(-1); + ERROR_EXIT("data type error!"); } g_args.num_of_CPR = columnCount; @@ -2425,14 +2428,14 @@ static int postProceSql(char *host, struct sockaddr_in *pServAddr, uint16_t port #endif debugPrint("%s() LN%d, sockfd=%d\n", __func__, __LINE__, sockfd); free(request_buf); - ERROR_EXIT("ERROR opening socket"); + ERROR_EXIT("opening socket"); } int retConn = connect(sockfd, (struct sockaddr *)pServAddr, sizeof(struct sockaddr)); debugPrint("%s() LN%d connect() return %d\n", __func__, __LINE__, retConn); if (retConn < 0) { free(request_buf); - ERROR_EXIT("ERROR connecting"); + ERROR_EXIT("connecting"); } memset(base64_buf, 0, INPUT_BUF_LEN); @@ -2465,7 +2468,7 @@ static int postProceSql(char *host, struct sockaddr_in *pServAddr, uint16_t port auth, strlen(sqlstr), sqlstr); if (r >= req_buf_len) { free(request_buf); - ERROR_EXIT("ERROR too long request"); + ERROR_EXIT("too long request"); } verbosePrint("%s() LN%d: Request:\n%s\n", __func__, __LINE__, request_buf); @@ -2478,7 +2481,7 @@ static int postProceSql(char *host, struct sockaddr_in *pServAddr, uint16_t port bytes = write(sockfd, request_buf + sent, req_str_len - sent); #endif if (bytes < 0) - ERROR_EXIT("ERROR writing message to socket"); + ERROR_EXIT("writing message to socket"); if (bytes == 0) break; sent+=bytes; @@ -2495,7 +2498,7 @@ static int postProceSql(char *host, struct sockaddr_in *pServAddr, uint16_t port #endif if (bytes < 0) { free(request_buf); - ERROR_EXIT("ERROR reading response from socket"); + ERROR_EXIT("reading response from socket"); } if (bytes == 0) break; @@ -2504,7 +2507,7 @@ static int postProceSql(char *host, struct sockaddr_in *pServAddr, uint16_t port if (received == resp_len) { free(request_buf); - ERROR_EXIT("ERROR storing complete response from socket"); + ERROR_EXIT("storing complete response from socket"); } response_buf[RESP_BUF_LEN - 1] = '\0'; @@ -2667,8 +2670,8 @@ static int calcRowLen(SSuperTable* superTbls) { } else if (strcasecmp(dataType, "TIMESTAMP") == 0) { lenOfOneRow += TIMESTAMP_BUFF_LEN; } else { - printf("get error data type : %s\n", dataType); - exit(-1); + errorPrint("get error data type : %s\n", dataType); + exit(EXIT_FAILURE); } } @@ -2698,8 +2701,8 @@ static int calcRowLen(SSuperTable* superTbls) { } else if (strcasecmp(dataType, "DOUBLE") == 0) { lenOfTagOfOneRow += superTbls->tags[tagIndex].dataLen + DOUBLE_BUFF_LEN; } else { - printf("get error tag type : %s\n", dataType); - exit(-1); + errorPrint("get error tag type : %s\n", dataType); + exit(EXIT_FAILURE); } } @@ -2737,7 +2740,7 @@ static int getChildNameOfSuperTableWithLimitAndOffset(TAOS * taos, taos_close(taos); errorPrint("%s() LN%d, failed to run command %s\n", __func__, __LINE__, command); - exit(-1); + exit(EXIT_FAILURE); } int64_t childTblCount = (limit < 0)?10000:limit; @@ -2748,7 +2751,7 @@ static int getChildNameOfSuperTableWithLimitAndOffset(TAOS * taos, taos_free_result(res); taos_close(taos); errorPrint("%s() LN%d, failed to allocate memory!\n", __func__, __LINE__); - exit(-1); + exit(EXIT_FAILURE); } } @@ -2759,7 +2762,7 @@ static int getChildNameOfSuperTableWithLimitAndOffset(TAOS * taos, if (0 == strlen((char *)row[0])) { errorPrint("%s() LN%d, No.%"PRId64" table return empty name\n", __func__, __LINE__, count); - exit(-1); + exit(EXIT_FAILURE); } tstrncpy(pTblName, (char *)row[0], len[0]+1); @@ -2775,12 +2778,12 @@ static int getChildNameOfSuperTableWithLimitAndOffset(TAOS * taos, (size_t)((childTblCount-count)*TSDB_TABLE_NAME_LEN)); } else { // exit, if allocate more memory failed - errorPrint("%s() LN%d, realloc fail for save child table name of %s.%s\n", - __func__, __LINE__, dbName, sTblName); tmfree(childTblName); taos_free_result(res); taos_close(taos); - exit(-1); + errorPrint("%s() LN%d, realloc fail for save child table name of %s.%s\n", + __func__, __LINE__, dbName, sTblName); + exit(EXIT_FAILURE); } } pTblName = childTblName + count * TSDB_TABLE_NAME_LEN; @@ -2964,10 +2967,10 @@ static int createSuperTable( lenOfOneRow += TIMESTAMP_BUFF_LEN; } else { taos_close(taos); + free(command); errorPrint("%s() LN%d, config error data type : %s\n", __func__, __LINE__, dataType); - free(command); - exit(-1); + exit(EXIT_FAILURE); } } @@ -2976,11 +2979,11 @@ static int createSuperTable( // save for creating child table superTbl->colsOfCreateChildTable = (char*)calloc(len+20, 1); if (NULL == superTbl->colsOfCreateChildTable) { - errorPrint("%s() LN%d, Failed when calloc, size:%d", - __func__, __LINE__, len+1); taos_close(taos); free(command); - exit(-1); + errorPrint("%s() LN%d, Failed when calloc, size:%d", + __func__, __LINE__, len+1); + exit(EXIT_FAILURE); } snprintf(superTbl->colsOfCreateChildTable, len+20, "(ts timestamp%s)", cols); @@ -3054,10 +3057,10 @@ static int createSuperTable( lenOfTagOfOneRow += superTbl->tags[tagIndex].dataLen + DOUBLE_BUFF_LEN; } else { taos_close(taos); + free(command); errorPrint("%s() LN%d, config error tag type : %s\n", __func__, __LINE__, dataType); - free(command); - exit(-1); + exit(EXIT_FAILURE); } } @@ -3089,7 +3092,7 @@ int createDatabasesAndStables(char *command) { errorPrint( "Failed to connect to TDengine, reason:%s\n", taos_errstr(NULL)); return -1; } - + for (int i = 0; i < g_Dbs.dbCount; i++) { if (g_Dbs.db[i].drop) { sprintf(command, "drop database if exists %s;", g_Dbs.db[i].dbName); @@ -3100,35 +3103,43 @@ int createDatabasesAndStables(char *command) { int dataLen = 0; dataLen += snprintf(command + dataLen, - BUFFER_SIZE - dataLen, "create database if not exists %s", g_Dbs.db[i].dbName); + BUFFER_SIZE - dataLen, "create database if not exists %s", + g_Dbs.db[i].dbName); if (g_Dbs.db[i].dbCfg.blocks > 0) { dataLen += snprintf(command + dataLen, - BUFFER_SIZE - dataLen, " blocks %d", g_Dbs.db[i].dbCfg.blocks); + BUFFER_SIZE - dataLen, " blocks %d", + g_Dbs.db[i].dbCfg.blocks); } if (g_Dbs.db[i].dbCfg.cache > 0) { dataLen += snprintf(command + dataLen, - BUFFER_SIZE - dataLen, " cache %d", g_Dbs.db[i].dbCfg.cache); + BUFFER_SIZE - dataLen, " cache %d", + g_Dbs.db[i].dbCfg.cache); } if (g_Dbs.db[i].dbCfg.days > 0) { dataLen += snprintf(command + dataLen, - BUFFER_SIZE - dataLen, " days %d", g_Dbs.db[i].dbCfg.days); + BUFFER_SIZE - dataLen, " days %d", + g_Dbs.db[i].dbCfg.days); } if (g_Dbs.db[i].dbCfg.keep > 0) { dataLen += snprintf(command + dataLen, - BUFFER_SIZE - dataLen, " keep %d", g_Dbs.db[i].dbCfg.keep); + BUFFER_SIZE - dataLen, " keep %d", + g_Dbs.db[i].dbCfg.keep); } if (g_Dbs.db[i].dbCfg.quorum > 1) { dataLen += snprintf(command + dataLen, - BUFFER_SIZE - dataLen, " quorum %d", g_Dbs.db[i].dbCfg.quorum); + BUFFER_SIZE - dataLen, " quorum %d", + g_Dbs.db[i].dbCfg.quorum); } if (g_Dbs.db[i].dbCfg.replica > 0) { dataLen += snprintf(command + dataLen, - BUFFER_SIZE - dataLen, " replica %d", g_Dbs.db[i].dbCfg.replica); + BUFFER_SIZE - dataLen, " replica %d", + g_Dbs.db[i].dbCfg.replica); } if (g_Dbs.db[i].dbCfg.update > 0) { dataLen += snprintf(command + dataLen, - BUFFER_SIZE - dataLen, " update %d", g_Dbs.db[i].dbCfg.update); + BUFFER_SIZE - dataLen, " update %d", + g_Dbs.db[i].dbCfg.update); } //if (g_Dbs.db[i].dbCfg.maxtablesPerVnode > 0) { // dataLen += snprintf(command + dataLen, @@ -3136,42 +3147,48 @@ int createDatabasesAndStables(char *command) { //} if (g_Dbs.db[i].dbCfg.minRows > 0) { dataLen += snprintf(command + dataLen, - BUFFER_SIZE - dataLen, " minrows %d", g_Dbs.db[i].dbCfg.minRows); + BUFFER_SIZE - dataLen, " minrows %d", + g_Dbs.db[i].dbCfg.minRows); } if (g_Dbs.db[i].dbCfg.maxRows > 0) { dataLen += snprintf(command + dataLen, - BUFFER_SIZE - dataLen, " maxrows %d", g_Dbs.db[i].dbCfg.maxRows); + BUFFER_SIZE - dataLen, " maxrows %d", + g_Dbs.db[i].dbCfg.maxRows); } if (g_Dbs.db[i].dbCfg.comp > 0) { dataLen += snprintf(command + dataLen, - BUFFER_SIZE - dataLen, " comp %d", g_Dbs.db[i].dbCfg.comp); + BUFFER_SIZE - dataLen, " comp %d", + g_Dbs.db[i].dbCfg.comp); } if (g_Dbs.db[i].dbCfg.walLevel > 0) { dataLen += snprintf(command + dataLen, - BUFFER_SIZE - dataLen, " wal %d", g_Dbs.db[i].dbCfg.walLevel); + BUFFER_SIZE - dataLen, " wal %d", + g_Dbs.db[i].dbCfg.walLevel); } if (g_Dbs.db[i].dbCfg.cacheLast > 0) { dataLen += snprintf(command + dataLen, - BUFFER_SIZE - dataLen, " cachelast %d", g_Dbs.db[i].dbCfg.cacheLast); + BUFFER_SIZE - dataLen, " cachelast %d", + g_Dbs.db[i].dbCfg.cacheLast); } if (g_Dbs.db[i].dbCfg.fsync > 0) { dataLen += snprintf(command + dataLen, BUFFER_SIZE - dataLen, " fsync %d", g_Dbs.db[i].dbCfg.fsync); } - if ((0 == strncasecmp(g_Dbs.db[i].dbCfg.precision, "ms", strlen("ms"))) + if ((0 == strncasecmp(g_Dbs.db[i].dbCfg.precision, "ms", 2)) #if NANO_SECOND_ENABLED == 1 || (0 == strncasecmp(g_Dbs.db[i].dbCfg.precision, - "ns", strlen("ns"))) + "ns", 2)) #endif || (0 == strncasecmp(g_Dbs.db[i].dbCfg.precision, - "us", strlen("us")))) { + "us", 2))) { dataLen += snprintf(command + dataLen, BUFFER_SIZE - dataLen, " precision \'%s\';", g_Dbs.db[i].dbCfg.precision); } if (0 != queryDbExec(taos, command, NO_INSERT_TYPE, false)) { taos_close(taos); - errorPrint( "\ncreate database %s failed!\n\n", g_Dbs.db[i].dbName); + errorPrint( "\ncreate database %s failed!\n\n", + g_Dbs.db[i].dbName); return -1; } printf("\ncreate database %s success!\n\n", g_Dbs.db[i].dbName); @@ -3218,7 +3235,7 @@ int createDatabasesAndStables(char *command) { static void* createTable(void *sarg) { threadInfo *pThreadInfo = (threadInfo *)sarg; - SSuperTable* superTblInfo = pThreadInfo->superTblInfo; + SSuperTable* stbInfo = pThreadInfo->stbInfo; setThreadName("createTable"); @@ -3229,7 +3246,7 @@ static void* createTable(void *sarg) pThreadInfo->buffer = calloc(buff_len, 1); if (pThreadInfo->buffer == NULL) { errorPrint("%s() LN%d, Memory allocated failed!\n", __func__, __LINE__); - exit(-1); + exit(EXIT_FAILURE); } int len = 0; @@ -3248,11 +3265,11 @@ static void* createTable(void *sarg) g_args.tb_prefix, i, pThreadInfo->cols); } else { - if (superTblInfo == NULL) { + if (stbInfo == NULL) { + free(pThreadInfo->buffer); errorPrint("%s() LN%d, use metric, but super table info is NULL\n", __func__, __LINE__); - free(pThreadInfo->buffer); - exit(-1); + exit(EXIT_FAILURE); } else { if (0 == len) { batchNum = 0; @@ -3260,29 +3277,35 @@ static void* createTable(void *sarg) len += snprintf(pThreadInfo->buffer + len, buff_len - len, "create table "); } + char* tagsValBuf = NULL; - if (0 == superTblInfo->tagSource) { - tagsValBuf = generateTagValuesForStb(superTblInfo, i); + if (0 == stbInfo->tagSource) { + tagsValBuf = generateTagValuesForStb(stbInfo, i); } else { + if (0 == stbInfo->tagSampleCount) { + free(pThreadInfo->buffer); + ERROR_EXIT("use sample file for tag, but has no content!\n"); + } tagsValBuf = getTagValueFromTagSample( - superTblInfo, - i % superTblInfo->tagSampleCount); + stbInfo, + i % stbInfo->tagSampleCount); } + if (NULL == tagsValBuf) { free(pThreadInfo->buffer); - return NULL; + ERROR_EXIT("use metric, but tag buffer is NULL\n"); } len += snprintf(pThreadInfo->buffer + len, buff_len - len, "if not exists %s.%s%"PRIu64" using %s.%s tags %s ", - pThreadInfo->db_name, superTblInfo->childTblPrefix, + pThreadInfo->db_name, stbInfo->childTblPrefix, i, pThreadInfo->db_name, - superTblInfo->sTblName, tagsValBuf); + stbInfo->sTblName, tagsValBuf); free(tagsValBuf); batchNum++; - if ((batchNum < superTblInfo->batchCreateTableNum) + if ((batchNum < stbInfo->batchCreateTableNum) && ((buff_len - len) - >= (superTblInfo->lenOfTagOfOneRow + 256))) { + >= (stbInfo->lenOfTagOfOneRow + 256))) { continue; } } @@ -3317,14 +3340,13 @@ static void* createTable(void *sarg) static int startMultiThreadCreateChildTable( char* cols, int threads, uint64_t tableFrom, int64_t ntables, - char* db_name, SSuperTable* superTblInfo) { + char* db_name, SSuperTable* stbInfo) { pthread_t *pids = calloc(1, threads * sizeof(pthread_t)); threadInfo *infos = calloc(1, threads * sizeof(threadInfo)); if ((NULL == pids) || (NULL == infos)) { - printf("malloc failed\n"); - exit(-1); + ERROR_EXIT("createChildTable malloc failed\n"); } if (threads < 1) { @@ -3344,7 +3366,7 @@ static int startMultiThreadCreateChildTable( threadInfo *pThreadInfo = infos + i; pThreadInfo->threadID = i; tstrncpy(pThreadInfo->db_name, db_name, TSDB_DB_NAME_LEN); - pThreadInfo->superTblInfo = superTblInfo; + pThreadInfo->stbInfo = stbInfo; verbosePrint("%s() %d db_name: %s\n", __func__, __LINE__, db_name); pThreadInfo->taos = taos_connect( g_Dbs.host, @@ -3451,26 +3473,26 @@ static void createChildTables() { /* Read 10000 lines at most. If more than 10000 lines, continue to read after using */ -static int readTagFromCsvFileToMem(SSuperTable * superTblInfo) { +static int readTagFromCsvFileToMem(SSuperTable * stbInfo) { size_t n = 0; ssize_t readLen = 0; char * line = NULL; - FILE *fp = fopen(superTblInfo->tagsFile, "r"); + FILE *fp = fopen(stbInfo->tagsFile, "r"); if (fp == NULL) { printf("Failed to open tags file: %s, reason:%s\n", - superTblInfo->tagsFile, strerror(errno)); + stbInfo->tagsFile, strerror(errno)); return -1; } - if (superTblInfo->tagDataBuf) { - free(superTblInfo->tagDataBuf); - superTblInfo->tagDataBuf = NULL; + if (stbInfo->tagDataBuf) { + free(stbInfo->tagDataBuf); + stbInfo->tagDataBuf = NULL; } int tagCount = 10000; int count = 0; - char* tagDataBuf = calloc(1, superTblInfo->lenOfTagOfOneRow * tagCount); + char* tagDataBuf = calloc(1, stbInfo->lenOfTagOfOneRow * tagCount); if (tagDataBuf == NULL) { printf("Failed to calloc, reason:%s\n", strerror(errno)); fclose(fp); @@ -3486,20 +3508,20 @@ static int readTagFromCsvFileToMem(SSuperTable * superTblInfo) { continue; } - memcpy(tagDataBuf + count * superTblInfo->lenOfTagOfOneRow, line, readLen); + memcpy(tagDataBuf + count * stbInfo->lenOfTagOfOneRow, line, readLen); count++; if (count >= tagCount - 1) { char *tmp = realloc(tagDataBuf, - (size_t)tagCount*1.5*superTblInfo->lenOfTagOfOneRow); + (size_t)tagCount*1.5*stbInfo->lenOfTagOfOneRow); if (tmp != NULL) { tagDataBuf = tmp; tagCount = (int)(tagCount*1.5); - memset(tagDataBuf + count*superTblInfo->lenOfTagOfOneRow, - 0, (size_t)((tagCount-count)*superTblInfo->lenOfTagOfOneRow)); + memset(tagDataBuf + count*stbInfo->lenOfTagOfOneRow, + 0, (size_t)((tagCount-count)*stbInfo->lenOfTagOfOneRow)); } else { // exit, if allocate more memory failed - printf("realloc fail for save tag val from %s\n", superTblInfo->tagsFile); + printf("realloc fail for save tag val from %s\n", stbInfo->tagsFile); tmfree(tagDataBuf); free(line); fclose(fp); @@ -3508,8 +3530,8 @@ static int readTagFromCsvFileToMem(SSuperTable * superTblInfo) { } } - superTblInfo->tagDataBuf = tagDataBuf; - superTblInfo->tagSampleCount = count; + stbInfo->tagDataBuf = tagDataBuf; + stbInfo->tagSampleCount = count; free(line); fclose(fp); @@ -3520,28 +3542,28 @@ static int readTagFromCsvFileToMem(SSuperTable * superTblInfo) { Read 10000 lines at most. If more than 10000 lines, continue to read after using */ static int readSampleFromCsvFileToMem( - SSuperTable* superTblInfo) { + SSuperTable* stbInfo) { size_t n = 0; ssize_t readLen = 0; char * line = NULL; int getRows = 0; - FILE* fp = fopen(superTblInfo->sampleFile, "r"); + FILE* fp = fopen(stbInfo->sampleFile, "r"); if (fp == NULL) { errorPrint( "Failed to open sample file: %s, reason:%s\n", - superTblInfo->sampleFile, strerror(errno)); + stbInfo->sampleFile, strerror(errno)); return -1; } - assert(superTblInfo->sampleDataBuf); - memset(superTblInfo->sampleDataBuf, 0, - MAX_SAMPLES_ONCE_FROM_FILE * superTblInfo->lenOfOneRow); + assert(stbInfo->sampleDataBuf); + memset(stbInfo->sampleDataBuf, 0, + MAX_SAMPLES_ONCE_FROM_FILE * stbInfo->lenOfOneRow); while(1) { readLen = tgetline(&line, &n, fp); if (-1 == readLen) { if(0 != fseek(fp, 0, SEEK_SET)) { errorPrint( "Failed to fseek file: %s, reason:%s\n", - superTblInfo->sampleFile, strerror(errno)); + stbInfo->sampleFile, strerror(errno)); fclose(fp); return -1; } @@ -3556,13 +3578,13 @@ static int readSampleFromCsvFileToMem( continue; } - if (readLen > superTblInfo->lenOfOneRow) { + if (readLen > stbInfo->lenOfOneRow) { printf("sample row len[%d] overflow define schema len[%"PRIu64"], so discard this row\n", - (int32_t)readLen, superTblInfo->lenOfOneRow); + (int32_t)readLen, stbInfo->lenOfOneRow); continue; } - memcpy(superTblInfo->sampleDataBuf + getRows * superTblInfo->lenOfOneRow, + memcpy(stbInfo->sampleDataBuf + getRows * stbInfo->lenOfOneRow, line, readLen); getRows++; @@ -5047,6 +5069,23 @@ static void postFreeResource() { free(g_Dbs.db[i].superTbls[j].sampleDataBuf); g_Dbs.db[i].superTbls[j].sampleDataBuf = NULL; } +#if STMT_IFACE_ENABLED == 1 + if (g_Dbs.db[i].superTbls[j].sampleBindArray) { + for (int k = 0; k < MAX_SAMPLES_ONCE_FROM_FILE; k++) { + uintptr_t *tmp = (uintptr_t *)(*(uintptr_t *)( + g_Dbs.db[i].superTbls[j].sampleBindArray + + sizeof(uintptr_t *) * k)); + for (int c = 1; c < g_Dbs.db[i].superTbls[j].columnCount + 1; c++) { + TAOS_BIND *bind = (TAOS_BIND *)((char *)tmp + (sizeof(TAOS_BIND) * c)); + if (bind) + tmfree(bind->buffer); + } + tmfree((char *)tmp); + } + } + tmfree((char *)g_Dbs.db[i].superTbls[j].sampleBindArray); +#endif + if (0 != g_Dbs.db[i].superTbls[j].tagDataBuf) { free(g_Dbs.db[i].superTbls[j].tagDataBuf); g_Dbs.db[i].superTbls[j].tagDataBuf = NULL; @@ -5067,21 +5106,14 @@ static void postFreeResource() { tmfree(g_randfloat_buff); tmfree(g_rand_current_buff); tmfree(g_rand_phase_buff); - tmfree(g_randdouble_buff); + } static int getRowDataFromSample( char* dataBuf, int64_t maxLen, int64_t timestamp, - SSuperTable* superTblInfo, int64_t* sampleUsePos) + SSuperTable* stbInfo, int64_t* sampleUsePos) { if ((*sampleUsePos) == MAX_SAMPLES_ONCE_FROM_FILE) { - /* int ret = readSampleFromCsvFileToMem(superTblInfo); - if (0 != ret) { - tmfree(superTblInfo->sampleDataBuf); - superTblInfo->sampleDataBuf = NULL; - return -1; - } - */ *sampleUsePos = 0; } @@ -5091,8 +5123,8 @@ static int getRowDataFromSample( "(%" PRId64 ", ", timestamp); dataLen += snprintf(dataBuf + dataLen, maxLen - dataLen, "%s", - superTblInfo->sampleDataBuf - + superTblInfo->lenOfOneRow * (*sampleUsePos)); + stbInfo->sampleDataBuf + + stbInfo->lenOfOneRow * (*sampleUsePos)); dataLen += snprintf(dataBuf + dataLen, maxLen - dataLen, ")"); (*sampleUsePos)++; @@ -5248,7 +5280,7 @@ static int64_t generateData(char *recBuf, char **data_type, if (s == NULL) { errorPrint("%s() LN%d, memory allocation %d bytes failed\n", __func__, __LINE__, lenOfBinary + 1); - exit(-1); + exit(EXIT_FAILURE); } rand_string(s, lenOfBinary); pstr += sprintf(pstr, ",\"%s\"", s); @@ -5258,7 +5290,7 @@ static int64_t generateData(char *recBuf, char **data_type, if (s == NULL) { errorPrint("%s() LN%d, memory allocation %d bytes failed\n", __func__, __LINE__, lenOfBinary + 1); - exit(-1); + exit(EXIT_FAILURE); } rand_string(s, lenOfBinary); pstr += sprintf(pstr, ",\"%s\"", s); @@ -5266,8 +5298,7 @@ static int64_t generateData(char *recBuf, char **data_type, } if (strlen(recBuf) > MAX_DATA_SIZE) { - perror("column length too long, abort"); - exit(-1); + ERROR_EXIT("column length too long, abort"); } } @@ -5278,27 +5309,27 @@ static int64_t generateData(char *recBuf, char **data_type, return (int32_t)strlen(recBuf); } -static int prepareSampleDataForSTable(SSuperTable *superTblInfo) { +static int prepareSampleDataForSTable(SSuperTable *stbInfo) { char* sampleDataBuf = NULL; sampleDataBuf = calloc( - superTblInfo->lenOfOneRow * MAX_SAMPLES_ONCE_FROM_FILE, 1); + stbInfo->lenOfOneRow * MAX_SAMPLES_ONCE_FROM_FILE, 1); if (sampleDataBuf == NULL) { errorPrint("%s() LN%d, Failed to calloc %"PRIu64" Bytes, reason:%s\n", __func__, __LINE__, - superTblInfo->lenOfOneRow * MAX_SAMPLES_ONCE_FROM_FILE, + stbInfo->lenOfOneRow * MAX_SAMPLES_ONCE_FROM_FILE, strerror(errno)); return -1; } - superTblInfo->sampleDataBuf = sampleDataBuf; - int ret = readSampleFromCsvFileToMem(superTblInfo); + stbInfo->sampleDataBuf = sampleDataBuf; + int ret = readSampleFromCsvFileToMem(stbInfo); if (0 != ret) { errorPrint("%s() LN%d, read sample from csv file failed.\n", __func__, __LINE__); tmfree(sampleDataBuf); - superTblInfo->sampleDataBuf = NULL; + stbInfo->sampleDataBuf = NULL; return -1; } @@ -5308,14 +5339,14 @@ static int prepareSampleDataForSTable(SSuperTable *superTblInfo) { static int32_t execInsert(threadInfo *pThreadInfo, uint32_t k) { int32_t affectedRows; - SSuperTable* superTblInfo = pThreadInfo->superTblInfo; + SSuperTable* stbInfo = pThreadInfo->stbInfo; verbosePrint("[%d] %s() LN%d %s\n", pThreadInfo->threadID, __func__, __LINE__, pThreadInfo->buffer); uint16_t iface; - if (superTblInfo) - iface = superTblInfo->iface; + if (stbInfo) + iface = stbInfo->iface; else { if (g_args.iface == INTERFACE_BUT) iface = TAOSC_IFACE; @@ -5355,7 +5386,7 @@ static int32_t execInsert(threadInfo *pThreadInfo, uint32_t k) __func__, __LINE__, taos_stmt_errstr(pThreadInfo->stmt)); fprintf(stderr, "\n\033[31m === Please reduce batch number if WAL size exceeds limit. ===\033[0m\n\n"); - exit(-1); + exit(EXIT_FAILURE); } affectedRows = k; break; @@ -5363,7 +5394,7 @@ static int32_t execInsert(threadInfo *pThreadInfo, uint32_t k) default: errorPrint("%s() LN%d: unknown insert mode: %d\n", - __func__, __LINE__, superTblInfo->iface); + __func__, __LINE__, stbInfo->iface); affectedRows = 0; } @@ -5373,24 +5404,24 @@ static int32_t execInsert(threadInfo *pThreadInfo, uint32_t k) static void getTableName(char *pTblName, threadInfo* pThreadInfo, uint64_t tableSeq) { - SSuperTable* superTblInfo = pThreadInfo->superTblInfo; - if (superTblInfo) { - if (AUTO_CREATE_SUBTBL != superTblInfo->autoCreateTable) { - if (superTblInfo->childTblLimit > 0) { + SSuperTable* stbInfo = pThreadInfo->stbInfo; + if (stbInfo) { + if (AUTO_CREATE_SUBTBL != stbInfo->autoCreateTable) { + if (stbInfo->childTblLimit > 0) { snprintf(pTblName, TSDB_TABLE_NAME_LEN, "%s", - superTblInfo->childTblName + - (tableSeq - superTblInfo->childTblOffset) * TSDB_TABLE_NAME_LEN); + stbInfo->childTblName + + (tableSeq - stbInfo->childTblOffset) * TSDB_TABLE_NAME_LEN); } else { verbosePrint("[%d] %s() LN%d: from=%"PRIu64" count=%"PRId64" seq=%"PRIu64"\n", pThreadInfo->threadID, __func__, __LINE__, pThreadInfo->start_table_from, pThreadInfo->ntables, tableSeq); snprintf(pTblName, TSDB_TABLE_NAME_LEN, "%s", - superTblInfo->childTblName + tableSeq * TSDB_TABLE_NAME_LEN); + stbInfo->childTblName + tableSeq * TSDB_TABLE_NAME_LEN); } } else { snprintf(pTblName, TSDB_TABLE_NAME_LEN, "%s%"PRIu64"", - superTblInfo->childTblPrefix, tableSeq); + stbInfo->childTblPrefix, tableSeq); } } else { snprintf(pTblName, TSDB_TABLE_NAME_LEN, "%s%"PRIu64"", @@ -5471,7 +5502,7 @@ static int64_t getTSRandTail(int64_t timeStampStep, int32_t seq, } static int32_t generateStbDataTail( - SSuperTable* superTblInfo, + SSuperTable* stbInfo, uint32_t batch, char* buffer, int64_t remainderBufLen, int64_t insertRows, uint64_t recordFrom, int64_t startTime, @@ -5481,7 +5512,7 @@ static int32_t generateStbDataTail( char *pstr = buffer; bool tsRand; - if (0 == strncasecmp(superTblInfo->dataSource, "rand", strlen("rand"))) { + if (0 == strncasecmp(stbInfo->dataSource, "rand", strlen("rand"))) { tsRand = true; } else { tsRand = false; @@ -5496,26 +5527,26 @@ static int32_t generateStbDataTail( int64_t lenOfRow = 0; if (tsRand) { - if (superTblInfo->disorderRatio > 0) { - lenOfRow = generateStbRowData(superTblInfo, data, + if (stbInfo->disorderRatio > 0) { + lenOfRow = generateStbRowData(stbInfo, data, remainderBufLen, startTime + getTSRandTail( - superTblInfo->timeStampStep, k, - superTblInfo->disorderRatio, - superTblInfo->disorderRange) + stbInfo->timeStampStep, k, + stbInfo->disorderRatio, + stbInfo->disorderRange) ); } else { - lenOfRow = generateStbRowData(superTblInfo, data, + lenOfRow = generateStbRowData(stbInfo, data, remainderBufLen, - startTime + superTblInfo->timeStampStep * k + startTime + stbInfo->timeStampStep * k ); } } else { lenOfRow = getRowDataFromSample( data, (remainderBufLen < MAX_DATA_SIZE)?remainderBufLen:MAX_DATA_SIZE, - startTime + superTblInfo->timeStampStep * k, - superTblInfo, + startTime + stbInfo->timeStampStep * k, + stbInfo, pSamplePos); } @@ -5571,7 +5602,7 @@ static int generateSQLHeadWithoutStb(char *tableName, } static int generateStbSQLHead( - SSuperTable* superTblInfo, + SSuperTable* stbInfo, char *tableName, int64_t tableSeq, char *dbName, char *buffer, int remainderBufLen) @@ -5580,14 +5611,14 @@ static int generateStbSQLHead( char headBuf[HEAD_BUFF_LEN]; - if (AUTO_CREATE_SUBTBL == superTblInfo->autoCreateTable) { + if (AUTO_CREATE_SUBTBL == stbInfo->autoCreateTable) { char* tagsValBuf = NULL; - if (0 == superTblInfo->tagSource) { - tagsValBuf = generateTagValuesForStb(superTblInfo, tableSeq); + if (0 == stbInfo->tagSource) { + tagsValBuf = generateTagValuesForStb(stbInfo, tableSeq); } else { tagsValBuf = getTagValueFromTagSample( - superTblInfo, - tableSeq % superTblInfo->tagSampleCount); + stbInfo, + tableSeq % stbInfo->tagSampleCount); } if (NULL == tagsValBuf) { errorPrint("%s() LN%d, tag buf failed to allocate memory\n", @@ -5602,10 +5633,10 @@ static int generateStbSQLHead( dbName, tableName, dbName, - superTblInfo->sTblName, + stbInfo->sTblName, tagsValBuf); tmfree(tagsValBuf); - } else if (TBL_ALREADY_EXISTS == superTblInfo->childTblExists) { + } else if (TBL_ALREADY_EXISTS == stbInfo->childTblExists) { len = snprintf( headBuf, HEAD_BUFF_LEN, @@ -5630,12 +5661,12 @@ static int generateStbSQLHead( } static int32_t generateStbInterlaceData( - SSuperTable *superTblInfo, + threadInfo *pThreadInfo, char *tableName, uint32_t batchPerTbl, uint64_t i, uint32_t batchPerTblTimes, uint64_t tableSeq, - threadInfo *pThreadInfo, char *buffer, + char *buffer, int64_t insertRows, int64_t startTime, uint64_t *pRemainderBufLen) @@ -5643,8 +5674,9 @@ static int32_t generateStbInterlaceData( assert(buffer); char *pstr = buffer; + SSuperTable *stbInfo = pThreadInfo->stbInfo; int headLen = generateStbSQLHead( - superTblInfo, + stbInfo, tableName, tableSeq, pThreadInfo->db_name, pstr, *pRemainderBufLen); @@ -5664,12 +5696,12 @@ static int32_t generateStbInterlaceData( pThreadInfo->threadID, __func__, __LINE__, i, batchPerTblTimes, batchPerTbl); - if (0 == strncasecmp(superTblInfo->startTimestamp, "now", 3)) { + if (0 == strncasecmp(stbInfo->startTimestamp, "now", 3)) { startTime = taosGetTimestamp(pThreadInfo->time_precision); } int32_t k = generateStbDataTail( - superTblInfo, + stbInfo, batchPerTbl, pstr, *pRemainderBufLen, insertRows, 0, startTime, &(pThreadInfo->samplePos), &dataLen); @@ -5732,8 +5764,206 @@ static int64_t generateInterlaceDataWithoutStb( } #if STMT_IFACE_ENABLED == 1 -static int32_t prepareStmtBindArrayByType(TAOS_BIND *bind, - char *dataType, int32_t dataLen, char **ptr, char *value) +static int32_t prepareStmtBindArrayByType( + TAOS_BIND *bind, + char *dataType, int32_t dataLen, + int32_t timePrec, + char *value) +{ + if (0 == strncasecmp(dataType, + "BINARY", strlen("BINARY"))) { + if (dataLen > TSDB_MAX_BINARY_LEN) { + errorPrint( "binary length overflow, max size:%u\n", + (uint32_t)TSDB_MAX_BINARY_LEN); + return -1; + } + char *bind_binary; + + bind->buffer_type = TSDB_DATA_TYPE_BINARY; + if (value) { + bind_binary = calloc(1, strlen(value) + 1); + strncpy(bind_binary, value, strlen(value)); + bind->buffer_length = strlen(bind_binary); + } else { + bind_binary = calloc(1, dataLen + 1); + rand_string(bind_binary, dataLen); + bind->buffer_length = dataLen; + } + + bind->length = &bind->buffer_length; + bind->buffer = bind_binary; + bind->is_null = NULL; + } else if (0 == strncasecmp(dataType, + "NCHAR", strlen("NCHAR"))) { + if (dataLen > TSDB_MAX_BINARY_LEN) { + errorPrint( "nchar length overflow, max size:%u\n", + (uint32_t)TSDB_MAX_BINARY_LEN); + return -1; + } + char *bind_nchar; + + bind->buffer_type = TSDB_DATA_TYPE_NCHAR; + if (value) { + bind_nchar = calloc(1, strlen(value) + 1); + strncpy(bind_nchar, value, strlen(value)); + } else { + bind_nchar = calloc(1, dataLen + 1); + rand_string(bind_nchar, dataLen); + } + + bind->buffer_length = strlen(bind_nchar); + bind->buffer = bind_nchar; + bind->length = &bind->buffer_length; + bind->is_null = NULL; + } else if (0 == strncasecmp(dataType, + "INT", strlen("INT"))) { + int32_t *bind_int = malloc(sizeof(int32_t)); + + if (value) { + *bind_int = atoi(value); + } else { + *bind_int = rand_int(); + } + bind->buffer_type = TSDB_DATA_TYPE_INT; + bind->buffer_length = sizeof(int32_t); + bind->buffer = bind_int; + bind->length = &bind->buffer_length; + bind->is_null = NULL; + } else if (0 == strncasecmp(dataType, + "BIGINT", strlen("BIGINT"))) { + int64_t *bind_bigint = malloc(sizeof(int64_t)); + + if (value) { + *bind_bigint = atoll(value); + } else { + *bind_bigint = rand_bigint(); + } + bind->buffer_type = TSDB_DATA_TYPE_BIGINT; + bind->buffer_length = sizeof(int64_t); + bind->buffer = bind_bigint; + bind->length = &bind->buffer_length; + bind->is_null = NULL; + } else if (0 == strncasecmp(dataType, + "FLOAT", strlen("FLOAT"))) { + float *bind_float = malloc(sizeof(float)); + + if (value) { + *bind_float = (float)atof(value); + } else { + *bind_float = rand_float(); + } + bind->buffer_type = TSDB_DATA_TYPE_FLOAT; + bind->buffer_length = sizeof(float); + bind->buffer = bind_float; + bind->length = &bind->buffer_length; + bind->is_null = NULL; + } else if (0 == strncasecmp(dataType, + "DOUBLE", strlen("DOUBLE"))) { + double *bind_double = malloc(sizeof(double)); + + if (value) { + *bind_double = atof(value); + } else { + *bind_double = rand_double(); + } + bind->buffer_type = TSDB_DATA_TYPE_DOUBLE; + bind->buffer_length = sizeof(double); + bind->buffer = bind_double; + bind->length = &bind->buffer_length; + bind->is_null = NULL; + } else if (0 == strncasecmp(dataType, + "SMALLINT", strlen("SMALLINT"))) { + int16_t *bind_smallint = malloc(sizeof(int16_t)); + + if (value) { + *bind_smallint = (int16_t)atoi(value); + } else { + *bind_smallint = rand_smallint(); + } + bind->buffer_type = TSDB_DATA_TYPE_SMALLINT; + bind->buffer_length = sizeof(int16_t); + bind->buffer = bind_smallint; + bind->length = &bind->buffer_length; + bind->is_null = NULL; + } else if (0 == strncasecmp(dataType, + "TINYINT", strlen("TINYINT"))) { + int8_t *bind_tinyint = malloc(sizeof(int8_t)); + + if (value) { + *bind_tinyint = (int8_t)atoi(value); + } else { + *bind_tinyint = rand_tinyint(); + } + bind->buffer_type = TSDB_DATA_TYPE_TINYINT; + bind->buffer_length = sizeof(int8_t); + bind->buffer = bind_tinyint; + bind->length = &bind->buffer_length; + bind->is_null = NULL; + } else if (0 == strncasecmp(dataType, + "BOOL", strlen("BOOL"))) { + int8_t *bind_bool = malloc(sizeof(int8_t)); + + if (value) { + if (strncasecmp(value, "true", 4)) { + *bind_bool = true; + } else { + *bind_bool = false; + } + } else { + *bind_bool = rand_bool(); + } + bind->buffer_type = TSDB_DATA_TYPE_BOOL; + bind->buffer_length = sizeof(int8_t); + bind->buffer = bind_bool; + bind->length = &bind->buffer_length; + bind->is_null = NULL; + + } else if (0 == strncasecmp(dataType, + "TIMESTAMP", strlen("TIMESTAMP"))) { + int64_t *bind_ts2 = malloc(sizeof(int64_t)); + + if (value) { + if (strchr(value, ':') && strchr(value, '-')) { + int i = 0; + while(value[i] != '\0') { + if (value[i] == '\"' || value[i] == '\'') { + value[i] = ' '; + } + i++; + } + int64_t tmpEpoch; + if (TSDB_CODE_SUCCESS != taosParseTime( + value, &tmpEpoch, strlen(value), + timePrec, 0)) { + errorPrint("Input %s, time format error!\n", value); + return -1; + } + *bind_ts2 = tmpEpoch; + } else { + *bind_ts2 = atoll(value); + } + } else { + *bind_ts2 = rand_bigint(); + } + bind->buffer_type = TSDB_DATA_TYPE_TIMESTAMP; + bind->buffer_length = sizeof(int64_t); + bind->buffer = bind_ts2; + bind->length = &bind->buffer_length; + bind->is_null = NULL; + } else { + errorPrint( "No support data type: %s\n", dataType); + return -1; + } + + return 0; +} + +static int32_t prepareStmtBindArrayByTypeForRand( + TAOS_BIND *bind, + char *dataType, int32_t dataLen, + int32_t timePrec, + char **ptr, + char *value) { if (0 == strncasecmp(dataType, "BINARY", strlen("BINARY"))) { @@ -5814,7 +6044,7 @@ static int32_t prepareStmtBindArrayByType(TAOS_BIND *bind, *ptr += bind->buffer_length; } else if (0 == strncasecmp(dataType, "FLOAT", strlen("FLOAT"))) { - float *bind_float = (float *) *ptr; + float *bind_float = (float *)*ptr; if (value) { *bind_float = (float)atof(value); @@ -5830,7 +6060,7 @@ static int32_t prepareStmtBindArrayByType(TAOS_BIND *bind, *ptr += bind->buffer_length; } else if (0 == strncasecmp(dataType, "DOUBLE", strlen("DOUBLE"))) { - double *bind_double = (double *)*ptr; + double *bind_double = (double *)*ptr; if (value) { *bind_double = atof(value); @@ -5862,7 +6092,7 @@ static int32_t prepareStmtBindArrayByType(TAOS_BIND *bind, *ptr += bind->buffer_length; } else if (0 == strncasecmp(dataType, "TINYINT", strlen("TINYINT"))) { - int8_t *bind_tinyint = (int8_t *)*ptr; + int8_t *bind_tinyint = (int8_t *)*ptr; if (value) { *bind_tinyint = (int8_t)atoi(value); @@ -5874,12 +6104,21 @@ static int32_t prepareStmtBindArrayByType(TAOS_BIND *bind, bind->buffer = bind_tinyint; bind->length = &bind->buffer_length; bind->is_null = NULL; + *ptr += bind->buffer_length; } else if (0 == strncasecmp(dataType, "BOOL", strlen("BOOL"))) { - int8_t *bind_bool = (int8_t *)*ptr; + int8_t *bind_bool = (int8_t *)*ptr; - *bind_bool = rand_bool(); + if (value) { + if (strncasecmp(value, "true", 4)) { + *bind_bool = true; + } else { + *bind_bool = false; + } + } else { + *bind_bool = rand_bool(); + } bind->buffer_type = TSDB_DATA_TYPE_BOOL; bind->buffer_length = sizeof(int8_t); bind->buffer = bind_bool; @@ -5889,10 +6128,28 @@ static int32_t prepareStmtBindArrayByType(TAOS_BIND *bind, *ptr += bind->buffer_length; } else if (0 == strncasecmp(dataType, "TIMESTAMP", strlen("TIMESTAMP"))) { - int64_t *bind_ts2 = (int64_t *) *ptr; + int64_t *bind_ts2 = (int64_t *)*ptr; if (value) { - *bind_ts2 = atoll(value); + if (strchr(value, ':') && strchr(value, '-')) { + int i = 0; + while(value[i] != '\0') { + if (value[i] == '\"' || value[i] == '\'') { + value[i] = ' '; + } + i++; + } + int64_t tmpEpoch; + if (TSDB_CODE_SUCCESS != taosParseTime( + value, &tmpEpoch, strlen(value), + timePrec, 0)) { + errorPrint("Input %s, time format error!\n", value); + return -1; + } + *bind_ts2 = tmpEpoch; + } else { + *bind_ts2 = atoll(value); + } } else { *bind_ts2 = rand_bigint(); } @@ -5912,13 +6169,14 @@ static int32_t prepareStmtBindArrayByType(TAOS_BIND *bind, } static int32_t prepareStmtWithoutStb( - TAOS_STMT *stmt, + threadInfo *pThreadInfo, char *tableName, uint32_t batch, int64_t insertRows, int64_t recordFrom, int64_t startTime) { + TAOS_STMT *stmt = pThreadInfo->stmt; int ret = taos_stmt_set_tbname(stmt, tableName); if (ret != 0) { errorPrint("failed to execute taos_stmt_set_tbname(%s). return 0x%x. reason: %s\n", @@ -5938,15 +6196,11 @@ static int32_t prepareStmtWithoutStb( int32_t k = 0; for (k = 0; k < batch;) { /* columnCount + 1 (ts) */ - char data[MAX_DATA_SIZE]; - memset(data, 0, MAX_DATA_SIZE); - char *ptr = data; TAOS_BIND *bind = (TAOS_BIND *)(bindArray + 0); - int64_t *bind_ts; + int64_t *bind_ts = pThreadInfo->bind_ts; - bind_ts = (int64_t *)ptr; bind->buffer_type = TSDB_DATA_TYPE_TIMESTAMP; if (g_args.disorderRatio) { @@ -5962,8 +6216,6 @@ static int32_t prepareStmtWithoutStb( bind->length = &bind->buffer_length; bind->is_null = NULL; - ptr += bind->buffer_length; - for (int i = 0; i < g_args.num_of_CPR; i ++) { bind = (TAOS_BIND *)((char *)bindArray + (sizeof(TAOS_BIND) * (i + 1))); @@ -5971,7 +6223,8 @@ static int32_t prepareStmtWithoutStb( bind, data_type[i], g_args.len_of_binary, - &ptr, NULL)) { + pThreadInfo->time_precision, + NULL)) { return -1; } } @@ -5998,10 +6251,42 @@ static int32_t prepareStmtWithoutStb( return k; } -static int32_t prepareStbStmtBind( - char *bindArray, SSuperTable *stbInfo, bool sourceRand, +static int32_t prepareStbStmtBindTag( + char *bindArray, SSuperTable *stbInfo, + char *tagsVal, + int32_t timePrec) +{ + char *bindBuffer = calloc(1, DOUBLE_BUFF_LEN); // g_args.len_of_binary); + if (bindBuffer == NULL) { + errorPrint("%s() LN%d, Failed to allocate %d bind buffer\n", + __func__, __LINE__, g_args.len_of_binary); + return -1; + } + + TAOS_BIND *tag; + + for (int t = 0; t < stbInfo->tagCount; t ++) { + tag = (TAOS_BIND *)((char *)bindArray + (sizeof(TAOS_BIND) * t)); + if ( -1 == prepareStmtBindArrayByType( + tag, + stbInfo->tags[t].dataType, + stbInfo->tags[t].dataLen, + timePrec, + NULL)) { + free(bindBuffer); + return -1; + } + } + + free(bindBuffer); + return 0; +} + +static int32_t prepareStbStmtBindRand( + int64_t *ts, + char *bindArray, SSuperTable *stbInfo, int64_t startTime, int32_t recSeq, - bool isColumn) + int32_t timePrec) { char *bindBuffer = calloc(1, DOUBLE_BUFF_LEN); // g_args.len_of_binary); if (bindBuffer == NULL) { @@ -6016,121 +6301,92 @@ static int32_t prepareStbStmtBind( TAOS_BIND *bind; - if (isColumn) { - int cursor = 0; + for (int i = 0; i < stbInfo->columnCount + 1; i ++) { + bind = (TAOS_BIND *)((char *)bindArray + (sizeof(TAOS_BIND) * i)); - for (int i = 0; i < stbInfo->columnCount + 1; i ++) { - bind = (TAOS_BIND *)((char *)bindArray + (sizeof(TAOS_BIND) * i)); + if (i == 0) { + int64_t *bind_ts = ts; - if (i == 0) { - int64_t *bind_ts; - - bind_ts = (int64_t *)ptr; - bind->buffer_type = TSDB_DATA_TYPE_TIMESTAMP; - if (stbInfo->disorderRatio) { - *bind_ts = startTime + getTSRandTail( - stbInfo->timeStampStep, recSeq, - stbInfo->disorderRatio, - stbInfo->disorderRange); - } else { - *bind_ts = startTime + stbInfo->timeStampStep * recSeq; - } - bind->buffer_length = sizeof(int64_t); - bind->buffer = bind_ts; - bind->length = &bind->buffer_length; - bind->is_null = NULL; - - ptr += bind->buffer_length; + bind->buffer_type = TSDB_DATA_TYPE_TIMESTAMP; + if (stbInfo->disorderRatio) { + *bind_ts = startTime + getTSRandTail( + stbInfo->timeStampStep, recSeq, + stbInfo->disorderRatio, + stbInfo->disorderRange); } else { - - if (sourceRand) { - if ( -1 == prepareStmtBindArrayByType( - bind, - stbInfo->columns[i-1].dataType, - stbInfo->columns[i-1].dataLen, - &ptr, - NULL)) { - free(bindBuffer); - return -1; - } - } else { - char *restStr = stbInfo->sampleDataBuf + cursor; - int lengthOfRest = strlen(restStr); - - int index = 0; - for (index = 0; index < lengthOfRest; index ++) { - if (restStr[index] == ',') { - break; - } - } - - memset(bindBuffer, 0, g_args.len_of_binary); - strncpy(bindBuffer, restStr, index); - cursor += index + 1; // skip ',' too - - if ( -1 == prepareStmtBindArrayByType( - bind, - stbInfo->columns[i-1].dataType, - stbInfo->columns[i-1].dataLen, - &ptr, - bindBuffer)) { - free(bindBuffer); - return -1; - } - } + *bind_ts = startTime + stbInfo->timeStampStep * recSeq; } - } - } else { - TAOS_BIND *tag; + bind->buffer_length = sizeof(int64_t); + bind->buffer = bind_ts; + bind->length = &bind->buffer_length; + bind->is_null = NULL; - for (int t = 0; t < stbInfo->tagCount; t ++) { - tag = (TAOS_BIND *)((char *)bindArray + (sizeof(TAOS_BIND) * t)); - if ( -1 == prepareStmtBindArrayByType( - tag, - stbInfo->tags[t].dataType, - stbInfo->tags[t].dataLen, - &ptr, - NULL)) { - free(bindBuffer); - return -1; - } + ptr += bind->buffer_length; + } else if ( -1 == prepareStmtBindArrayByTypeForRand( + bind, + stbInfo->columns[i-1].dataType, + stbInfo->columns[i-1].dataLen, + timePrec, + &ptr, + NULL)) { + tmfree(bindBuffer); + return -1; } - } - free(bindBuffer); + tmfree(bindBuffer); return 0; } -static int32_t prepareStbStmt( - SSuperTable *stbInfo, - TAOS_STMT *stmt, +static int32_t prepareStbStmtBindWithSample( + int64_t *ts, + char *bindArray, SSuperTable *stbInfo, + int64_t startTime, int32_t recSeq, + int32_t timePrec, + int64_t samplePos) +{ + TAOS_BIND *bind; + + bind = (TAOS_BIND *)bindArray; + + int64_t *bind_ts = ts; + + bind->buffer_type = TSDB_DATA_TYPE_TIMESTAMP; + if (stbInfo->disorderRatio) { + *bind_ts = startTime + getTSRandTail( + stbInfo->timeStampStep, recSeq, + stbInfo->disorderRatio, + stbInfo->disorderRange); + } else { + *bind_ts = startTime + stbInfo->timeStampStep * recSeq; + } + bind->buffer_length = sizeof(int64_t); + bind->buffer = bind_ts; + bind->length = &bind->buffer_length; + bind->is_null = NULL; + + return 0; +} + +static int32_t prepareStbStmtRand( + threadInfo *pThreadInfo, char *tableName, int64_t tableSeq, uint32_t batch, uint64_t insertRows, uint64_t recordFrom, - int64_t startTime, - int64_t *pSamplePos) + int64_t startTime) { int ret; - - bool sourceRand; - if (0 == strncasecmp(stbInfo->dataSource, "rand", strlen("rand"))) { - sourceRand = true; - } else { - sourceRand = false; // from sample data file - } + SSuperTable *stbInfo = pThreadInfo->stbInfo; + TAOS_STMT *stmt = pThreadInfo->stmt; if (AUTO_CREATE_SUBTBL == stbInfo->autoCreateTable) { char* tagsValBuf = NULL; - bool tagRand; if (0 == stbInfo->tagSource) { - tagRand = true; tagsValBuf = generateTagValuesForStb(stbInfo, tableSeq); } else { - tagRand = false; tagsValBuf = getTagValueFromTagSample( stbInfo, tableSeq % stbInfo->tagSampleCount); @@ -6150,8 +6406,9 @@ static int32_t prepareStbStmt( return -1; } - if (-1 == prepareStbStmtBind( - tagsArray, stbInfo, tagRand, -1, -1, false /* is tag */)) { + if (-1 == prepareStbStmtBindTag( + tagsArray, stbInfo, tagsValBuf, pThreadInfo->time_precision + /* is tag */)) { tmfree(tagsValBuf); tmfree(tagsArray); return -1; @@ -6186,8 +6443,12 @@ static int32_t prepareStbStmt( uint32_t k; for (k = 0; k < batch;) { /* columnCount + 1 (ts) */ - if (-1 == prepareStbStmtBind(bindArray, stbInfo, sourceRand, - startTime, k, true /* is column */)) { + if (-1 == prepareStbStmtBindRand( + pThreadInfo->bind_ts, + bindArray, stbInfo, + startTime, k, + pThreadInfo->time_precision + /* is column */)) { free(bindArray); return -1; } @@ -6210,10 +6471,6 @@ static int32_t prepareStbStmt( k++; recordFrom ++; - if (!sourceRand) { - (*pSamplePos) ++; - } - if (recordFrom >= insertRows) { break; } @@ -6223,9 +6480,8 @@ static int32_t prepareStbStmt( return k; } -static int32_t prepareStbStmtInterlace( - SSuperTable *stbInfo, - TAOS_STMT *stmt, +static int32_t prepareStbStmtWithSample( + threadInfo *pThreadInfo, char *tableName, int64_t tableSeq, uint32_t batch, @@ -6234,41 +6490,109 @@ static int32_t prepareStbStmtInterlace( int64_t startTime, int64_t *pSamplePos) { - return prepareStbStmt( - stbInfo, - stmt, - tableName, - tableSeq, - batch, - insertRows, 0, startTime, - pSamplePos); -} + int ret; + SSuperTable *stbInfo = pThreadInfo->stbInfo; + TAOS_STMT *stmt = pThreadInfo->stmt; -static int32_t prepareStbStmtProgressive( - SSuperTable *stbInfo, - TAOS_STMT *stmt, - char *tableName, - int64_t tableSeq, - uint32_t batch, - uint64_t insertRows, - uint64_t recordFrom, - int64_t startTime, - int64_t *pSamplePos) -{ - return prepareStbStmt( - stbInfo, - stmt, - tableName, - tableSeq, - g_args.num_of_RPR, - insertRows, recordFrom, startTime, - pSamplePos); -} + if (AUTO_CREATE_SUBTBL == stbInfo->autoCreateTable) { + char* tagsValBuf = NULL; + if (0 == stbInfo->tagSource) { + tagsValBuf = generateTagValuesForStb(stbInfo, tableSeq); + } else { + tagsValBuf = getTagValueFromTagSample( + stbInfo, + tableSeq % stbInfo->tagSampleCount); + } + + if (NULL == tagsValBuf) { + errorPrint("%s() LN%d, tag buf failed to allocate memory\n", + __func__, __LINE__); + return -1; + } + + char *tagsArray = calloc(1, sizeof(TAOS_BIND) * stbInfo->tagCount); + if (NULL == tagsArray) { + tmfree(tagsValBuf); + errorPrint("%s() LN%d, tag buf failed to allocate memory\n", + __func__, __LINE__); + return -1; + } + + if (-1 == prepareStbStmtBindTag( + tagsArray, stbInfo, tagsValBuf, pThreadInfo->time_precision + /* is tag */)) { + tmfree(tagsValBuf); + tmfree(tagsArray); + return -1; + } + + ret = taos_stmt_set_tbname_tags(stmt, tableName, (TAOS_BIND *)tagsArray); + + tmfree(tagsValBuf); + tmfree(tagsArray); + + if (0 != ret) { + errorPrint("%s() LN%d, stmt_set_tbname_tags() failed! reason: %s\n", + __func__, __LINE__, taos_stmt_errstr(stmt)); + return -1; + } + } else { + ret = taos_stmt_set_tbname(stmt, tableName); + if (0 != ret) { + errorPrint("%s() LN%d, stmt_set_tbname() failed! reason: %s\n", + __func__, __LINE__, taos_stmt_errstr(stmt)); + return -1; + } + } + + uint32_t k; + for (k = 0; k < batch;) { + char *bindArray = (char *)(*((uintptr_t *) + (stbInfo->sampleBindArray + (sizeof(char *)) * (*pSamplePos)))); + /* columnCount + 1 (ts) */ + if (-1 == prepareStbStmtBindWithSample( + pThreadInfo->bind_ts, + bindArray, stbInfo, + startTime, k, + pThreadInfo->time_precision, + *pSamplePos + /* is column */)) { + return -1; + } + ret = taos_stmt_bind_param(stmt, (TAOS_BIND *)bindArray); + if (0 != ret) { + errorPrint("%s() LN%d, stmt_bind_param() failed! reason: %s\n", + __func__, __LINE__, taos_stmt_errstr(stmt)); + return -1; + } + // if msg > 3MB, break + ret = taos_stmt_add_batch(stmt); + if (0 != ret) { + errorPrint("%s() LN%d, stmt_add_batch() failed! reason: %s\n", + __func__, __LINE__, taos_stmt_errstr(stmt)); + return -1; + } + + k++; + recordFrom ++; + + (*pSamplePos) ++; + if ((*pSamplePos) == MAX_SAMPLES_ONCE_FROM_FILE) { + *pSamplePos = 0; + } + + if (recordFrom >= insertRows) { + break; + } + } + + return k; +} #endif static int32_t generateStbProgressiveData( - SSuperTable *superTblInfo, + SSuperTable *stbInfo, char *tableName, int64_t tableSeq, char *dbName, char *buffer, @@ -6282,7 +6606,7 @@ static int32_t generateStbProgressiveData( memset(pstr, 0, *pRemainderBufLen); int64_t headLen = generateStbSQLHead( - superTblInfo, + stbInfo, tableName, tableSeq, dbName, buffer, *pRemainderBufLen); @@ -6294,7 +6618,7 @@ static int32_t generateStbProgressiveData( int64_t dataLen; - return generateStbDataTail(superTblInfo, + return generateStbDataTail(stbInfo, g_args.num_of_RPR, pstr, *pRemainderBufLen, insertRows, recordFrom, startTime, @@ -6354,26 +6678,34 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) { int64_t nTimeStampStep; uint64_t insert_interval; - SSuperTable* superTblInfo = pThreadInfo->superTblInfo; + bool sourceRand; - if (superTblInfo) { - insertRows = superTblInfo->insertRows; + SSuperTable* stbInfo = pThreadInfo->stbInfo; - if ((superTblInfo->interlaceRows == 0) + if (stbInfo) { + insertRows = stbInfo->insertRows; + + if ((stbInfo->interlaceRows == 0) && (g_args.interlace_rows > 0)) { interlaceRows = g_args.interlace_rows; } else { - interlaceRows = superTblInfo->interlaceRows; + interlaceRows = stbInfo->interlaceRows; + } + maxSqlLen = stbInfo->maxSqlLen; + nTimeStampStep = stbInfo->timeStampStep; + insert_interval = stbInfo->insertInterval; + if (0 == strncasecmp(stbInfo->dataSource, "rand", 4)) { + sourceRand = true; + } else { + sourceRand = false; // from sample data file } - maxSqlLen = superTblInfo->maxSqlLen; - nTimeStampStep = superTblInfo->timeStampStep; - insert_interval = superTblInfo->insertInterval; } else { insertRows = g_args.num_of_DPT; interlaceRows = g_args.interlace_rows; maxSqlLen = g_args.max_sql_len; nTimeStampStep = g_args.timestamp_step; insert_interval = g_args.insert_interval; + sourceRand = true; } debugPrint("[%d] %s() LN%d: start_table_from=%"PRIu64" ntables=%"PRId64" insertRows=%"PRIu64"\n", @@ -6456,28 +6788,38 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) { uint64_t oldRemainderLen = remainderBufLen; int32_t generated; - if (superTblInfo) { - if (superTblInfo->iface == STMT_IFACE) { + if (stbInfo) { + if (stbInfo->iface == STMT_IFACE) { #if STMT_IFACE_ENABLED == 1 - generated = prepareStbStmtInterlace( - superTblInfo, - pThreadInfo->stmt, - tableName, - tableSeq, - batchPerTbl, - insertRows, i, - startTime, - &(pThreadInfo->samplePos)); + if (sourceRand) { + generated = prepareStbStmtRand( + pThreadInfo, + tableName, + tableSeq, + batchPerTbl, + insertRows, 0, + startTime + ); + } else { + generated = prepareStbStmtWithSample( + pThreadInfo, + tableName, + tableSeq, + batchPerTbl, + insertRows, 0, + startTime, + &(pThreadInfo->samplePos)); + } #else generated = -1; #endif } else { generated = generateStbInterlaceData( - superTblInfo, + pThreadInfo, tableName, batchPerTbl, i, batchPerTblTimes, tableSeq, - pThreadInfo, pstr, + pstr, insertRows, startTime, &remainderBufLen); @@ -6490,7 +6832,8 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) { tableName, batchPerTbl, startTime); #if STMT_IFACE_ENABLED == 1 generated = prepareStmtWithoutStb( - pThreadInfo->stmt, tableName, + pThreadInfo, + tableName, batchPerTbl, insertRows, i, startTime); @@ -6639,12 +6982,12 @@ free_of_interlace: static void* syncWriteProgressive(threadInfo *pThreadInfo) { debugPrint("%s() LN%d: ### progressive write\n", __func__, __LINE__); - SSuperTable* superTblInfo = pThreadInfo->superTblInfo; - uint64_t maxSqlLen = superTblInfo?superTblInfo->maxSqlLen:g_args.max_sql_len; + SSuperTable* stbInfo = pThreadInfo->stbInfo; + uint64_t maxSqlLen = stbInfo?stbInfo->maxSqlLen:g_args.max_sql_len; int64_t timeStampStep = - superTblInfo?superTblInfo->timeStampStep:g_args.timestamp_step; + stbInfo?stbInfo->timeStampStep:g_args.timestamp_step; int64_t insertRows = - (superTblInfo)?superTblInfo->insertRows:g_args.num_of_DPT; + (stbInfo)?stbInfo->insertRows:g_args.num_of_DPT; verbosePrint("%s() LN%d insertRows=%"PRId64"\n", __func__, __LINE__, insertRows); @@ -6663,6 +7006,17 @@ static void* syncWriteProgressive(threadInfo *pThreadInfo) { pThreadInfo->totalInsertRows = 0; pThreadInfo->totalAffectedRows = 0; + bool sourceRand; + if (stbInfo) { + if (0 == strncasecmp(stbInfo->dataSource, "rand", 4)) { + sourceRand = true; + } else { + sourceRand = false; // from sample data file + } + } else { + sourceRand = true; + } + pThreadInfo->samplePos = 0; int percentComplete = 0; @@ -6696,24 +7050,35 @@ static void* syncWriteProgressive(threadInfo *pThreadInfo) { remainderBufLen -= len; int32_t generated; - if (superTblInfo) { - if (superTblInfo->iface == STMT_IFACE) { + if (stbInfo) { + if (stbInfo->iface == STMT_IFACE) { #if STMT_IFACE_ENABLED == 1 - generated = prepareStbStmtProgressive( - superTblInfo, - pThreadInfo->stmt, - tableName, - tableSeq, - g_args.num_of_RPR, - insertRows, i, start_time, - &(pThreadInfo->samplePos)); + if (sourceRand) { + generated = prepareStbStmtRand( + pThreadInfo, + tableName, + tableSeq, + g_args.num_of_RPR, + insertRows, + i, start_time + ); + } else { + generated = prepareStbStmtWithSample( + pThreadInfo, + tableName, + tableSeq, + g_args.num_of_RPR, + insertRows, i, start_time, + &(pThreadInfo->samplePos)); + } #else generated = -1; #endif } else { generated = generateStbProgressiveData( - superTblInfo, - tableName, tableSeq, pThreadInfo->db_name, pstr, + stbInfo, + tableName, tableSeq, + pThreadInfo->db_name, pstr, insertRows, i, start_time, &(pThreadInfo->samplePos), &remainderBufLen); @@ -6722,7 +7087,7 @@ static void* syncWriteProgressive(threadInfo *pThreadInfo) { if (g_args.iface == STMT_IFACE) { #if STMT_IFACE_ENABLED == 1 generated = prepareStmtWithoutStb( - pThreadInfo->stmt, + pThreadInfo, tableName, g_args.num_of_RPR, insertRows, i, @@ -6792,9 +7157,9 @@ static void* syncWriteProgressive(threadInfo *pThreadInfo) { } // num_of_DPT if ((g_args.verbose_print) && - (tableSeq == pThreadInfo->ntables - 1) && (superTblInfo) + (tableSeq == pThreadInfo->ntables - 1) && (stbInfo) && (0 == strncasecmp( - superTblInfo->dataSource, + stbInfo->dataSource, "sample", strlen("sample")))) { verbosePrint("%s() LN%d samplePos=%"PRId64"\n", __func__, __LINE__, pThreadInfo->samplePos); @@ -6812,18 +7177,18 @@ free_of_progressive: static void* syncWrite(void *sarg) { threadInfo *pThreadInfo = (threadInfo *)sarg; - SSuperTable* superTblInfo = pThreadInfo->superTblInfo; + SSuperTable* stbInfo = pThreadInfo->stbInfo; setThreadName("syncWrite"); uint32_t interlaceRows; - if (superTblInfo) { - if ((superTblInfo->interlaceRows == 0) + if (stbInfo) { + if ((stbInfo->interlaceRows == 0) && (g_args.interlace_rows > 0)) { interlaceRows = g_args.interlace_rows; } else { - interlaceRows = superTblInfo->interlaceRows; + interlaceRows = stbInfo->interlaceRows; } } else { interlaceRows = g_args.interlace_rows; @@ -6840,10 +7205,10 @@ static void* syncWrite(void *sarg) { static void callBack(void *param, TAOS_RES *res, int code) { threadInfo* pThreadInfo = (threadInfo*)param; - SSuperTable* superTblInfo = pThreadInfo->superTblInfo; + SSuperTable* stbInfo = pThreadInfo->stbInfo; int insert_interval = - superTblInfo?superTblInfo->insertInterval:g_args.insert_interval; + stbInfo?stbInfo->insertInterval:g_args.insert_interval; if (insert_interval) { pThreadInfo->et = taosGetTimestampMs(); if ((pThreadInfo->et - pThreadInfo->st) < insert_interval) { @@ -6851,13 +7216,13 @@ static void callBack(void *param, TAOS_RES *res, int code) { } } - char *buffer = calloc(1, pThreadInfo->superTblInfo->maxSqlLen); + char *buffer = calloc(1, pThreadInfo->stbInfo->maxSqlLen); char data[MAX_DATA_SIZE]; char *pstr = buffer; pstr += sprintf(pstr, "insert into %s.%s%"PRId64" values", pThreadInfo->db_name, pThreadInfo->tb_prefix, pThreadInfo->start_table_from); - // if (pThreadInfo->counter >= pThreadInfo->superTblInfo->insertRows) { + // if (pThreadInfo->counter >= pThreadInfo->stbInfo->insertRows) { if (pThreadInfo->counter >= g_args.num_of_RPR) { pThreadInfo->start_table_from++; pThreadInfo->counter = 0; @@ -6871,15 +7236,15 @@ static void callBack(void *param, TAOS_RES *res, int code) { for (int i = 0; i < g_args.num_of_RPR; i++) { int rand_num = taosRandom() % 100; - if (0 != pThreadInfo->superTblInfo->disorderRatio - && rand_num < pThreadInfo->superTblInfo->disorderRatio) { + if (0 != pThreadInfo->stbInfo->disorderRatio + && rand_num < pThreadInfo->stbInfo->disorderRatio) { int64_t d = pThreadInfo->lastTs - - (taosRandom() % pThreadInfo->superTblInfo->disorderRange + 1); - generateStbRowData(pThreadInfo->superTblInfo, data, + - (taosRandom() % pThreadInfo->stbInfo->disorderRange + 1); + generateStbRowData(pThreadInfo->stbInfo, data, MAX_DATA_SIZE, d); } else { - generateStbRowData(pThreadInfo->superTblInfo, + generateStbRowData(pThreadInfo->stbInfo, data, MAX_DATA_SIZE, pThreadInfo->lastTs += 1000); @@ -6887,7 +7252,7 @@ static void callBack(void *param, TAOS_RES *res, int code) { pstr += sprintf(pstr, "%s", data); pThreadInfo->counter++; - if (pThreadInfo->counter >= pThreadInfo->superTblInfo->insertRows) { + if (pThreadInfo->counter >= pThreadInfo->stbInfo->insertRows) { break; } } @@ -6903,7 +7268,7 @@ static void callBack(void *param, TAOS_RES *res, int code) { static void *asyncWrite(void *sarg) { threadInfo *pThreadInfo = (threadInfo *)sarg; - SSuperTable* superTblInfo = pThreadInfo->superTblInfo; + SSuperTable* stbInfo = pThreadInfo->stbInfo; setThreadName("asyncWrite"); @@ -6912,7 +7277,7 @@ static void *asyncWrite(void *sarg) { pThreadInfo->lastTs = pThreadInfo->start_time; int insert_interval = - superTblInfo?superTblInfo->insertInterval:g_args.insert_interval; + stbInfo?stbInfo->insertInterval:g_args.insert_interval; if (insert_interval) { pThreadInfo->st = taosGetTimestampMs(); } @@ -6949,8 +7314,81 @@ static int convertHostToServAddr(char *host, uint16_t port, struct sockaddr_in * return 0; } +#if STMT_IFACE_ENABLED == 1 +static int parseSampleFileToStmt(SSuperTable *stbInfo, uint32_t timePrec) +{ + stbInfo->sampleBindArray = calloc(1, sizeof(char *) * MAX_SAMPLES_ONCE_FROM_FILE); + if (stbInfo->sampleBindArray == NULL) { + errorPrint("%s() LN%d, Failed to allocate %"PRIu64" bind array buffer\n", + __func__, __LINE__, (uint64_t)sizeof(char *) * MAX_SAMPLES_ONCE_FROM_FILE); + return -1; + } + + + for (int i=0; i < MAX_SAMPLES_ONCE_FROM_FILE; i++) { + char *bindArray = calloc(1, sizeof(TAOS_BIND) * (stbInfo->columnCount + 1)); + if (bindArray == NULL) { + errorPrint("%s() LN%d, Failed to allocate %d bind params\n", + __func__, __LINE__, (stbInfo->columnCount + 1)); + return -1; + } + + + TAOS_BIND *bind; + int cursor = 0; + + for (int c = 0; c < stbInfo->columnCount + 1; c++) { + bind = (TAOS_BIND *)((char *)bindArray + (sizeof(TAOS_BIND) * c)); + + if (c == 0) { + bind->buffer_type = TSDB_DATA_TYPE_TIMESTAMP; + bind->buffer_length = sizeof(int64_t); + bind->buffer = NULL; //bind_ts; + bind->length = &bind->buffer_length; + bind->is_null = NULL; + } else { + char *restStr = stbInfo->sampleDataBuf + + stbInfo->lenOfOneRow * i + cursor; + int lengthOfRest = strlen(restStr); + + int index = 0; + for (index = 0; index < lengthOfRest; index ++) { + if (restStr[index] == ',') { + break; + } + } + + char *bindBuffer = calloc(1, index + 1); + if (bindBuffer == NULL) { + errorPrint("%s() LN%d, Failed to allocate %d bind buffer\n", + __func__, __LINE__, DOUBLE_BUFF_LEN); + return -1; + } + + strncpy(bindBuffer, restStr, index); + cursor += index + 1; // skip ',' too + + if (-1 == prepareStmtBindArrayByType( + bind, + stbInfo->columns[c-1].dataType, + stbInfo->columns[c-1].dataLen, + timePrec, + bindBuffer)) { + free(bindBuffer); + return -1; + } + free(bindBuffer); + } + } + *((uintptr_t *)(stbInfo->sampleBindArray + (sizeof(char *)) * i)) = (uintptr_t)bindArray; + } + + return 0; +} +#endif + static void startMultiThreadInsertData(int threads, char* db_name, - char* precision, SSuperTable* superTblInfo) { + char* precision, SSuperTable* stbInfo) { int32_t timePrec = TSDB_TIME_PRECISION_MILLI; if (0 != precision[0]) { @@ -6964,19 +7402,19 @@ static void startMultiThreadInsertData(int threads, char* db_name, #endif } else { errorPrint("Not support precision: %s\n", precision); - exit(-1); + exit(EXIT_FAILURE); } } int64_t start_time; - if (superTblInfo) { - if (0 == strncasecmp(superTblInfo->startTimestamp, "now", 3)) { + if (stbInfo) { + if (0 == strncasecmp(stbInfo->startTimestamp, "now", 3)) { start_time = taosGetTimestamp(timePrec); } else { if (TSDB_CODE_SUCCESS != taosParseTime( - superTblInfo->startTimestamp, + stbInfo->startTimestamp, &start_time, - strlen(superTblInfo->startTimestamp), + strlen(stbInfo->startTimestamp), timePrec, 0)) { ERROR_EXIT("failed to parse time!\n"); } @@ -6990,12 +7428,12 @@ static void startMultiThreadInsertData(int threads, char* db_name, int64_t start = taosGetTimestampMs(); // read sample data from file first - if ((superTblInfo) && (0 == strncasecmp(superTblInfo->dataSource, + if ((stbInfo) && (0 == strncasecmp(stbInfo->dataSource, "sample", strlen("sample")))) { - if (0 != prepareSampleDataForSTable(superTblInfo)) { + if (0 != prepareSampleDataForSTable(stbInfo)) { errorPrint("%s() LN%d, prepare sample data for stable failed!\n", __func__, __LINE__); - exit(-1); + exit(EXIT_FAILURE); } } @@ -7005,68 +7443,68 @@ static void startMultiThreadInsertData(int threads, char* db_name, if (NULL == taos0) { errorPrint("%s() LN%d, connect to server fail , reason: %s\n", __func__, __LINE__, taos_errstr(NULL)); - exit(-1); + exit(EXIT_FAILURE); } int64_t ntables = 0; uint64_t tableFrom; - if (superTblInfo) { + if (stbInfo) { int64_t limit; uint64_t offset; if ((NULL != g_args.sqlFile) - && (superTblInfo->childTblExists == TBL_NO_EXISTS) - && ((superTblInfo->childTblOffset != 0) - || (superTblInfo->childTblLimit >= 0))) { + && (stbInfo->childTblExists == TBL_NO_EXISTS) + && ((stbInfo->childTblOffset != 0) + || (stbInfo->childTblLimit >= 0))) { printf("WARNING: offset and limit will not be used since the child tables not exists!\n"); } - if (superTblInfo->childTblExists == TBL_ALREADY_EXISTS) { - if ((superTblInfo->childTblLimit < 0) - || ((superTblInfo->childTblOffset - + superTblInfo->childTblLimit) - > (superTblInfo->childTblCount))) { - superTblInfo->childTblLimit = - superTblInfo->childTblCount - superTblInfo->childTblOffset; + if (stbInfo->childTblExists == TBL_ALREADY_EXISTS) { + if ((stbInfo->childTblLimit < 0) + || ((stbInfo->childTblOffset + + stbInfo->childTblLimit) + > (stbInfo->childTblCount))) { + stbInfo->childTblLimit = + stbInfo->childTblCount - stbInfo->childTblOffset; } - offset = superTblInfo->childTblOffset; - limit = superTblInfo->childTblLimit; + offset = stbInfo->childTblOffset; + limit = stbInfo->childTblLimit; } else { - limit = superTblInfo->childTblCount; + limit = stbInfo->childTblCount; offset = 0; } ntables = limit; tableFrom = offset; - if ((superTblInfo->childTblExists != TBL_NO_EXISTS) - && ((superTblInfo->childTblOffset + superTblInfo->childTblLimit ) - > superTblInfo->childTblCount)) { + if ((stbInfo->childTblExists != TBL_NO_EXISTS) + && ((stbInfo->childTblOffset + stbInfo->childTblLimit) + > stbInfo->childTblCount)) { printf("WARNING: specified offset + limit > child table count!\n"); prompt(); } - if ((superTblInfo->childTblExists != TBL_NO_EXISTS) - && (0 == superTblInfo->childTblLimit)) { + if ((stbInfo->childTblExists != TBL_NO_EXISTS) + && (0 == stbInfo->childTblLimit)) { printf("WARNING: specified limit = 0, which cannot find table name to insert or query! \n"); prompt(); } - superTblInfo->childTblName = (char*)calloc(1, + stbInfo->childTblName = (char*)calloc(1, limit * TSDB_TABLE_NAME_LEN); - if (superTblInfo->childTblName == NULL) { - errorPrint("%s() LN%d, alloc memory failed!\n", __func__, __LINE__); + if (stbInfo->childTblName == NULL) { taos_close(taos0); - exit(-1); + errorPrint("%s() LN%d, alloc memory failed!\n", __func__, __LINE__); + exit(EXIT_FAILURE); } int64_t childTblCount; getChildNameOfSuperTableWithLimitAndOffset( taos0, - db_name, superTblInfo->sTblName, - &superTblInfo->childTblName, &childTblCount, + db_name, stbInfo->sTblName, + &stbInfo->childTblName, &childTblCount, limit, offset); } else { @@ -7087,11 +7525,11 @@ static void startMultiThreadInsertData(int threads, char* db_name, b = ntables % threads; } - if ((superTblInfo) - && (superTblInfo->iface == REST_IFACE)) { + if ((stbInfo) + && (stbInfo->iface == REST_IFACE)) { if (convertHostToServAddr( g_Dbs.host, g_Dbs.port, &(g_Dbs.serv_addr)) != 0) { - exit(-1); + ERROR_EXIT("convert host to server address"); } } @@ -7104,98 +7542,110 @@ static void startMultiThreadInsertData(int threads, char* db_name, memset(pids, 0, threads * sizeof(pthread_t)); memset(infos, 0, threads * sizeof(threadInfo)); +#if STMT_IFACE_ENABLED == 1 + char *stmtBuffer = calloc(1, BUFFER_SIZE); + assert(stmtBuffer); + if ((g_args.iface == STMT_IFACE) + || ((stbInfo) + && (stbInfo->iface == STMT_IFACE))) { + char *pstr = stmtBuffer; + + if ((stbInfo) + && (AUTO_CREATE_SUBTBL + == stbInfo->autoCreateTable)) { + pstr += sprintf(pstr, "INSERT INTO ? USING %s TAGS(?", + stbInfo->sTblName); + for (int tag = 0; tag < (stbInfo->tagCount - 1); + tag ++ ) { + pstr += sprintf(pstr, ",?"); + } + pstr += sprintf(pstr, ") VALUES(?"); + } else { + pstr += sprintf(pstr, "INSERT INTO ? VALUES(?"); + } + + int columnCount; + if (stbInfo) { + columnCount = stbInfo->columnCount; + } else { + columnCount = g_args.num_of_CPR; + } + + for (int col = 0; col < columnCount; col ++) { + pstr += sprintf(pstr, ",?"); + } + pstr += sprintf(pstr, ")"); + + debugPrint("%s() LN%d, stmtBuffer: %s", __func__, __LINE__, stmtBuffer); + + if ((stbInfo) && (0 == strncasecmp(stbInfo->dataSource, + "sample", strlen("sample")))) { + parseSampleFileToStmt(stbInfo, timePrec); + } + } +#endif + for (int i = 0; i < threads; i++) { threadInfo *pThreadInfo = infos + i; pThreadInfo->threadID = i; tstrncpy(pThreadInfo->db_name, db_name, TSDB_DB_NAME_LEN); pThreadInfo->time_precision = timePrec; - pThreadInfo->superTblInfo = superTblInfo; + pThreadInfo->stbInfo = stbInfo; pThreadInfo->start_time = start_time; pThreadInfo->minDelay = UINT64_MAX; - if ((NULL == superTblInfo) || - (superTblInfo->iface != REST_IFACE)) { + if ((NULL == stbInfo) || + (stbInfo->iface != REST_IFACE)) { //t_info->taos = taos; pThreadInfo->taos = taos_connect( g_Dbs.host, g_Dbs.user, g_Dbs.password, db_name, g_Dbs.port); if (NULL == pThreadInfo->taos) { + free(infos); errorPrint( "%s() LN%d, connect to server fail from insert sub thread, reason: %s\n", __func__, __LINE__, taos_errstr(NULL)); - free(infos); - exit(-1); + exit(EXIT_FAILURE); } #if STMT_IFACE_ENABLED == 1 if ((g_args.iface == STMT_IFACE) - || ((superTblInfo) - && (superTblInfo->iface == STMT_IFACE))) { + || ((stbInfo) + && (stbInfo->iface == STMT_IFACE))) { - int columnCount; - if (superTblInfo) { - columnCount = superTblInfo->columnCount; - } else { - columnCount = g_args.num_of_CPR; - } pThreadInfo->stmt = taos_stmt_init(pThreadInfo->taos); if (NULL == pThreadInfo->stmt) { + free(pids); + free(infos); errorPrint( "%s() LN%d, failed init stmt, reason: %s\n", __func__, __LINE__, taos_errstr(NULL)); + exit(EXIT_FAILURE); + } + + int ret = taos_stmt_prepare(pThreadInfo->stmt, stmtBuffer, 0); + if (ret != 0){ free(pids); free(infos); - exit(-1); - } - - char *buffer = calloc(1, BUFFER_SIZE); - assert(buffer); - char *pstr = buffer; - - if ((superTblInfo) - && (AUTO_CREATE_SUBTBL - == superTblInfo->autoCreateTable)) { - pstr += sprintf(pstr, "INSERT INTO ? USING %s TAGS(?", - superTblInfo->sTblName); - for (int tag = 0; tag < (superTblInfo->tagCount - 1); - tag ++ ) { - pstr += sprintf(pstr, ",?"); - } - pstr += sprintf(pstr, ") VALUES(?"); - } else { - pstr += sprintf(pstr, "INSERT INTO ? VALUES(?"); - } - - for (int col = 0; col < columnCount; col ++) { - pstr += sprintf(pstr, ",?"); - } - pstr += sprintf(pstr, ")"); - - debugPrint("%s() LN%d, buffer: %s", __func__, __LINE__, buffer); - int ret = taos_stmt_prepare(pThreadInfo->stmt, buffer, 0); - if (ret != 0){ + free(stmtBuffer); errorPrint("failed to execute taos_stmt_prepare. return 0x%x. reason: %s\n", ret, taos_stmt_errstr(pThreadInfo->stmt)); - free(pids); - free(infos); - free(buffer); - exit(-1); + exit(EXIT_FAILURE); } - - free(buffer); + pThreadInfo->bind_ts = malloc(sizeof(int64_t)); } #endif } else { pThreadInfo->taos = NULL; } - /* if ((NULL == superTblInfo) - || (0 == superTblInfo->multiThreadWriteOneTbl)) { + /* if ((NULL == stbInfo) + || (0 == stbInfo->multiThreadWriteOneTbl)) { */ pThreadInfo->start_table_from = tableFrom; pThreadInfo->ntables = iend_table_to + 1; /* } else { pThreadInfo->start_table_from = 0; - pThreadInfo->ntables = superTblInfo->childTblCount; + pThreadInfo->ntables = stbInfo->childTblCount; pThreadInfo->start_time = pThreadInfo->start_time + rand_int() % 10000 - rand_tinyint(); } */ @@ -7215,6 +7665,10 @@ static void startMultiThreadInsertData(int threads, char* db_name, } } +#if STMT_IFACE_ENABLED == 1 + free(stmtBuffer); +#endif + for (int i = 0; i < threads; i++) { pthread_join(pids[i], NULL); } @@ -7228,11 +7682,10 @@ static void startMultiThreadInsertData(int threads, char* db_name, for (int i = 0; i < threads; i++) { threadInfo *pThreadInfo = infos + i; - tsem_destroy(&(pThreadInfo->lock_sem)); - #if STMT_IFACE_ENABLED == 1 if (pThreadInfo->stmt) { taos_stmt_close(pThreadInfo->stmt); + tmfree((char *)pThreadInfo->bind_ts); } #endif tsem_destroy(&(pThreadInfo->lock_sem)); @@ -7242,9 +7695,9 @@ static void startMultiThreadInsertData(int threads, char* db_name, __func__, __LINE__, pThreadInfo->threadID, pThreadInfo->totalInsertRows, pThreadInfo->totalAffectedRows); - if (superTblInfo) { - superTblInfo->totalAffectedRows += pThreadInfo->totalAffectedRows; - superTblInfo->totalInsertRows += pThreadInfo->totalInsertRows; + if (stbInfo) { + stbInfo->totalAffectedRows += pThreadInfo->totalAffectedRows; + stbInfo->totalInsertRows += pThreadInfo->totalInsertRows; } else { g_args.totalAffectedRows += pThreadInfo->totalAffectedRows; g_args.totalInsertRows += pThreadInfo->totalInsertRows; @@ -7265,22 +7718,22 @@ static void startMultiThreadInsertData(int threads, char* db_name, double tInMs = t/1000.0; - if (superTblInfo) { + if (stbInfo) { fprintf(stderr, "Spent %.2f seconds to insert rows: %"PRIu64", affected rows: %"PRIu64" with %d thread(s) into %s.%s. %.2f records/second\n\n", - tInMs, superTblInfo->totalInsertRows, - superTblInfo->totalAffectedRows, - threads, db_name, superTblInfo->sTblName, + tInMs, stbInfo->totalInsertRows, + stbInfo->totalAffectedRows, + threads, db_name, stbInfo->sTblName, (tInMs)? - (double)(superTblInfo->totalInsertRows/tInMs):FLT_MAX); + (double)(stbInfo->totalInsertRows/tInMs):FLT_MAX); if (g_fpOfInsertResult) { fprintf(g_fpOfInsertResult, "Spent %.2f seconds to insert rows: %"PRIu64", affected rows: %"PRIu64" with %d thread(s) into %s.%s. %.2f records/second\n\n", - tInMs, superTblInfo->totalInsertRows, - superTblInfo->totalAffectedRows, - threads, db_name, superTblInfo->sTblName, + tInMs, stbInfo->totalInsertRows, + stbInfo->totalAffectedRows, + threads, db_name, stbInfo->sTblName, (tInMs)? - (double)(superTblInfo->totalInsertRows/tInMs):FLT_MAX); + (double)(stbInfo->totalInsertRows/tInMs):FLT_MAX); } } else { fprintf(stderr, "Spent %.2f seconds to insert rows: %"PRIu64", affected rows: %"PRIu64" with %d thread(s) into %s %.2f records/second\n\n", @@ -7335,8 +7788,8 @@ static void *readTable(void *sarg) { } int64_t num_of_DPT; - /* if (pThreadInfo->superTblInfo) { - num_of_DPT = pThreadInfo->superTblInfo->insertRows; // nrecords_per_table; + /* if (pThreadInfo->stbInfo) { + num_of_DPT = pThreadInfo->stbInfo->insertRows; // nrecords_per_table; } else { */ num_of_DPT = g_args.num_of_DPT; @@ -7410,7 +7863,7 @@ static void *readMetric(void *sarg) { return NULL; } - int64_t num_of_DPT = pThreadInfo->superTblInfo->insertRows; + int64_t num_of_DPT = pThreadInfo->stbInfo->insertRows; int64_t num_of_tables = pThreadInfo->ntables; // rinfo->end_table_to - rinfo->start_table_from + 1; int64_t totalData = num_of_DPT * num_of_tables; bool do_aggreFunc = g_Dbs.do_aggreFunc; @@ -7549,14 +8002,14 @@ static int insertTestProcess() { if (g_Dbs.db[i].superTblCount > 0) { for (uint64_t j = 0; j < g_Dbs.db[i].superTblCount; j++) { - SSuperTable* superTblInfo = &g_Dbs.db[i].superTbls[j]; + SSuperTable* stbInfo = &g_Dbs.db[i].superTbls[j]; - if (superTblInfo && (superTblInfo->insertRows > 0)) { + if (stbInfo && (stbInfo->insertRows > 0)) { startMultiThreadInsertData( g_Dbs.threadCount, g_Dbs.db[i].dbName, g_Dbs.db[i].dbCfg.precision, - superTblInfo); + stbInfo); } } } @@ -7776,7 +8229,7 @@ static int queryTestProcess() { if (taos == NULL) { errorPrint( "Failed to connect to TDengine, reason:%s\n", taos_errstr(NULL)); - exit(-1); + exit(EXIT_FAILURE); } if (0 != g_queryInfo.superQueryInfo.sqlCount) { @@ -7796,7 +8249,7 @@ static int queryTestProcess() { if (0 == strncasecmp(g_queryInfo.queryMode, "rest", strlen("rest"))) { if (convertHostToServAddr( g_queryInfo.host, g_queryInfo.port, &g_queryInfo.serv_addr) != 0) - exit(-1); + ERROR_EXIT("convert host to server address"); } pthread_t *pids = NULL; @@ -8000,10 +8453,10 @@ static void *superSubscribe(void *sarg) { setThreadName("superSub"); if (pThreadInfo->ntables > MAX_QUERY_SQL_COUNT) { + free(subSqlStr); errorPrint("The table number(%"PRId64") of the thread is more than max query sql count: %d\n", pThreadInfo->ntables, MAX_QUERY_SQL_COUNT); - free(subSqlStr); - exit(-1); + exit(EXIT_FAILURE); } if (pThreadInfo->taos == NULL) { @@ -8269,7 +8722,7 @@ static int subscribeTestProcess() { if (taos == NULL) { errorPrint( "Failed to connect to TDengine, reason:%s\n", taos_errstr(NULL)); - exit(-1); + exit(EXIT_FAILURE); } if (0 != g_queryInfo.superQueryInfo.sqlCount) { @@ -8298,7 +8751,7 @@ static int subscribeTestProcess() { errorPrint("%s() LN%d, sepcified query sqlCount %d.\n", __func__, __LINE__, g_queryInfo.specifiedQueryInfo.sqlCount); - exit(-1); + exit(EXIT_FAILURE); } pids = calloc( @@ -8313,7 +8766,7 @@ static int subscribeTestProcess() { sizeof(threadInfo)); if ((NULL == pids) || (NULL == infos)) { errorPrint("%s() LN%d, malloc failed for create threads\n", __func__, __LINE__); - exit(-1); + exit(EXIT_FAILURE); } for (int i = 0; i < g_queryInfo.specifiedQueryInfo.sqlCount; i++) { @@ -8350,7 +8803,7 @@ static int subscribeTestProcess() { errorPrint("%s() LN%d, malloc failed for create threads\n", __func__, __LINE__); // taos_close(taos); - exit(-1); + exit(EXIT_FAILURE); } int64_t ntables = g_queryInfo.superQueryInfo.childTblCount; @@ -8553,8 +9006,7 @@ static int regexMatch(const char *s, const char *reg, int cflags) { /* Compile regular expression */ if (regcomp(®ex, reg, cflags) != 0) { - printf("Fail to compile regex\n"); - exit(-1); + ERROR_EXIT("Fail to compile regex\n"); } /* Execute regular expression */ @@ -8567,9 +9019,9 @@ static int regexMatch(const char *s, const char *reg, int cflags) { return 0; } else { regerror(reti, ®ex, msgbuf, sizeof(msgbuf)); - printf("Regex match failed: %s\n", msgbuf); regfree(®ex); - exit(-1); + printf("Regex match failed: %s\n", msgbuf); + exit(EXIT_FAILURE); } return 0; @@ -8671,7 +9123,7 @@ static void queryResult() { if (g_args.use_metric) { pThreadInfo->ntables = g_Dbs.db[0].superTbls[0].childTblCount; pThreadInfo->end_table_to = g_Dbs.db[0].superTbls[0].childTblCount - 1; - pThreadInfo->superTblInfo = &g_Dbs.db[0].superTbls[0]; + pThreadInfo->stbInfo = &g_Dbs.db[0].superTbls[0]; tstrncpy(pThreadInfo->tb_prefix, g_Dbs.db[0].superTbls[0].childTblPrefix, TBNAME_PREFIX_LEN); } else { @@ -8687,10 +9139,10 @@ static void queryResult() { g_Dbs.db[0].dbName, g_Dbs.port); if (pThreadInfo->taos == NULL) { + free(pThreadInfo); errorPrint( "Failed to connect to TDengine, reason:%s\n", taos_errstr(NULL)); - free(pThreadInfo); - exit(-1); + exit(EXIT_FAILURE); } tstrncpy(pThreadInfo->filePath, g_Dbs.resultFile, MAX_FILE_NAME_LEN); From d5f38ac9c92f452386771fa7bfefcf29b2c94520 Mon Sep 17 00:00:00 2001 From: Cary Xu Date: Sat, 14 Aug 2021 08:57:03 +0800 Subject: [PATCH 125/185] do further check for blk with len 0 and SKVRow --- src/wal/src/walWrite.c | 37 ++++++++++++++++++++++++++++++++----- 1 file changed, 32 insertions(+), 5 deletions(-) diff --git a/src/wal/src/walWrite.c b/src/wal/src/walWrite.c index 2dfdb84818..7523369dc2 100644 --- a/src/wal/src/walWrite.c +++ b/src/wal/src/walWrite.c @@ -309,36 +309,63 @@ static int32_t walSkipCorruptedRecord(SWal *pWal, SWalHead *pHead, int64_t tfd, } // Add SMemRowType ahead of SDataRow static void expandSubmitBlk(SSubmitBlk *pDest, SSubmitBlk *pSrc, int32_t *lenExpand) { + // copy the header firstly memcpy(pDest, pSrc, sizeof(SSubmitBlk)); - int nRows = htons(pSrc->numOfRows); - if (nRows <= 0) { + + int32_t nRows = htons(pDest->numOfRows); + int32_t dataLen = htonl(pDest->dataLen); + + if ((nRows <= 0) || (dataLen <= 0)) { return; } + char *pDestData = pDest->data; char *pSrcData = pSrc->data; - for (int i = 0; i < nRows; ++i) { + for (int32_t i = 0; i < nRows; ++i) { memRowSetType(pDestData, SMEM_ROW_DATA); memcpy(memRowDataBody(pDestData), pSrcData, dataRowLen(pSrcData)); pDestData = POINTER_SHIFT(pDestData, memRowTLen(pDestData)); pSrcData = POINTER_SHIFT(pSrcData, dataRowLen(pSrcData)); ++(*lenExpand); } - int32_t dataLen = htonl(pDest->dataLen); pDest->dataLen = htonl(dataLen + nRows * sizeof(uint8_t)); } +// Check SDataRow by comparing the SDataRow len and SSubmitBlk dataLen static bool walIsSDataRow(void *pBlkData, int nRows, int32_t dataLen) { - int32_t len = 0; + if ((nRows <= 0) || (dataLen <= 0)) { + return true; + } + int32_t len = 0, kvLen = 0; for (int i = 0; i < nRows; ++i) { len += dataRowLen(pBlkData); if (len > dataLen) { return false; } + + /** + * For SDataRow between version [2.1.5.0 and 2.1.6.X], it would never conflict. + * For SKVRow between version [2.1.5.0 and 2.1.6.X], it may conflict in below scenario + * - with 1st type byte 0x01 and sversion 0x0101(257), thus do further check + */ + if (dataRowLen(pBlkData) == 257) { + SMemRow memRow = pBlkData; + SKVRow kvRow = memRowKvBody(memRow); + int nCols = kvRowNCols(kvRow); + uint16_t calcTsOffset = (uint16_t)(TD_MEM_ROW_KV_HEAD_SIZE + sizeof(SColIdx) * nCols); + uint16_t realTsOffset = (kvRowColIdx(kvRow))->offset; + if (calcTsOffset == realTsOffset) { + kvLen += memRowKvTLen(memRow); + } + } pBlkData = POINTER_SHIFT(pBlkData, dataRowLen(pBlkData)); } if (len != dataLen) { return false; } + if (kvLen == dataLen) { + return false; + } return true; } // for WAL SMemRow/SDataRow compatibility From 4227a8c19ca430e2fdfe840177d3c331b091bb4e Mon Sep 17 00:00:00 2001 From: Cary Xu Date: Sat, 14 Aug 2021 09:41:39 +0800 Subject: [PATCH 126/185] update header --- src/wal/src/walWrite.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/wal/src/walWrite.c b/src/wal/src/walWrite.c index 7523369dc2..9590aba224 100644 --- a/src/wal/src/walWrite.c +++ b/src/wal/src/walWrite.c @@ -352,7 +352,7 @@ static bool walIsSDataRow(void *pBlkData, int nRows, int32_t dataLen) { SMemRow memRow = pBlkData; SKVRow kvRow = memRowKvBody(memRow); int nCols = kvRowNCols(kvRow); - uint16_t calcTsOffset = (uint16_t)(TD_MEM_ROW_KV_HEAD_SIZE + sizeof(SColIdx) * nCols); + uint16_t calcTsOffset = (uint16_t)(TD_KV_ROW_HEAD_SIZE + sizeof(SColIdx) * nCols); uint16_t realTsOffset = (kvRowColIdx(kvRow))->offset; if (calcTsOffset == realTsOffset) { kvLen += memRowKvTLen(memRow); From 7ed460bd035031a5ebc84b236d5b974d705ff385 Mon Sep 17 00:00:00 2001 From: Cary Xu Date: Sat, 14 Aug 2021 11:09:14 +0800 Subject: [PATCH 127/185] code optimization --- src/wal/src/walWrite.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/wal/src/walWrite.c b/src/wal/src/walWrite.c index 9590aba224..e991bf02aa 100644 --- a/src/wal/src/walWrite.c +++ b/src/wal/src/walWrite.c @@ -388,7 +388,7 @@ static int walSMemRowCheck(SWalHead *pHead) { } pBlk = (SSubmitBlk *)POINTER_SHIFT(pBlk, sizeof(SSubmitBlk) + dataLen); } - + ASSERT(nTotalRows >= 0); SWalHead *pWalHead = (SWalHead *)calloc(sizeof(SWalHead) + pHead->len + nTotalRows * sizeof(uint8_t), 1); if (pWalHead == NULL) { return -1; @@ -544,6 +544,8 @@ static int32_t walRestoreWalFile(SWal *pWal, void *pVnode, FWalWrite writeFp, ch if (0 != walSMemRowCheck(pHead)) { wError("vgId:%d, restore wal, fileId:%" PRId64 " hver:%" PRIu64 " wver:%" PRIu64 " len:%d offset:%" PRId64, pWal->vgId, fileId, pHead->version, pWal->version, pHead->len, offset); + tfClose(tfd); + tfree(buffer); return TAOS_SYSTEM_ERROR(errno); } (*writeFp)(pVnode, pHead, TAOS_QTYPE_WAL, NULL); From 9976d7a0931a43e3ab7f2785acae24c09f6013a1 Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Sat, 14 Aug 2021 11:10:36 +0800 Subject: [PATCH 128/185] Hotfix/sangshuduo/td 5702 taosdemo remove memop for master (#7359) * [TD-5702]: taosdemo remove memory operation. * [TD-5702]: taosdemo remove memory operation. * add remainderBufLen to check row data generation. * row data generation with remainder buffer length checking. * git checkout --patch hotfix/sangshuduo/TD-5702-taosdemo-remove-memop taosdemo.c Co-authored-by: Shuduo Sang --- src/kit/taosdemo/taosdemo.c | 38 +++++++++++++++++++------------------ 1 file changed, 20 insertions(+), 18 deletions(-) diff --git a/src/kit/taosdemo/taosdemo.c b/src/kit/taosdemo/taosdemo.c index 3b91de32b0..f3270a22f2 100644 --- a/src/kit/taosdemo/taosdemo.c +++ b/src/kit/taosdemo/taosdemo.c @@ -1340,8 +1340,8 @@ static char *rand_bool_str(){ static int32_t rand_bool(){ static int cursor; cursor++; - cursor = cursor % MAX_PREPARED_RAND; - return g_randint[cursor] % 2; + if (cursor > (MAX_PREPARED_RAND - 1)) cursor = 0; + return g_randint[cursor % MAX_PREPARED_RAND] % 2; } static char *rand_tinyint_str() @@ -1356,8 +1356,8 @@ static int32_t rand_tinyint() { static int cursor; cursor++; - cursor = cursor % MAX_PREPARED_RAND; - return g_randint[cursor] % 128; + if (cursor > (MAX_PREPARED_RAND - 1)) cursor = 0; + return g_randint[cursor % MAX_PREPARED_RAND] % 128; } static char *rand_smallint_str() @@ -1372,8 +1372,8 @@ static int32_t rand_smallint() { static int cursor; cursor++; - cursor = cursor % MAX_PREPARED_RAND; - return g_randint[cursor] % 32767; + if (cursor > (MAX_PREPARED_RAND - 1)) cursor = 0; + return g_randint[cursor % MAX_PREPARED_RAND] % 32767; } static char *rand_int_str() @@ -1388,8 +1388,8 @@ static int32_t rand_int() { static int cursor; cursor++; - cursor = cursor % MAX_PREPARED_RAND; - return g_randint[cursor]; + if (cursor > (MAX_PREPARED_RAND - 1)) cursor = 0; + return g_randint[cursor % MAX_PREPARED_RAND]; } static char *rand_bigint_str() @@ -1404,8 +1404,8 @@ static int64_t rand_bigint() { static int cursor; cursor++; - cursor = cursor % MAX_PREPARED_RAND; - return g_randbigint[cursor]; + if (cursor > (MAX_PREPARED_RAND - 1)) cursor = 0; + return g_randbigint[cursor % MAX_PREPARED_RAND]; } static char *rand_float_str() @@ -1421,8 +1421,8 @@ static float rand_float() { static int cursor; cursor++; - cursor = cursor % MAX_PREPARED_RAND; - return g_randfloat[cursor]; + if (cursor > (MAX_PREPARED_RAND - 1)) cursor = 0; + return g_randfloat[cursor % MAX_PREPARED_RAND]; } static char *demo_current_float_str() @@ -1437,8 +1437,9 @@ static float UNUSED_FUNC demo_current_float() { static int cursor; cursor++; - cursor = cursor % MAX_PREPARED_RAND; - return (float)(9.8 + 0.04 * (g_randint[cursor] % 10) + g_randfloat[cursor]/1000000000); + if (cursor > (MAX_PREPARED_RAND - 1)) cursor = 0; + return (float)(9.8 + 0.04 * (g_randint[cursor % MAX_PREPARED_RAND] % 10) + + g_randfloat[cursor % MAX_PREPARED_RAND]/1000000000); } static char *demo_voltage_int_str() @@ -1453,8 +1454,8 @@ static int32_t UNUSED_FUNC demo_voltage_int() { static int cursor; cursor++; - cursor = cursor % MAX_PREPARED_RAND; - return 215 + g_randint[cursor] % 10; + if (cursor > (MAX_PREPARED_RAND - 1)) cursor = 0; + return 215 + g_randint[cursor % MAX_PREPARED_RAND] % 10; } static char *demo_phase_float_str() { @@ -1467,8 +1468,9 @@ static char *demo_phase_float_str() { static float UNUSED_FUNC demo_phase_float(){ static int cursor; cursor++; - cursor = cursor % MAX_PREPARED_RAND; - return (float)((115 + g_randint[cursor] % 10 + g_randfloat[cursor]/1000000000)/360); + if (cursor > (MAX_PREPARED_RAND - 1)) cursor = 0; + return (float)((115 + g_randint[cursor % MAX_PREPARED_RAND] % 10 + + g_randfloat[cursor % MAX_PREPARED_RAND]/1000000000)/360); } #if 0 From 9161244c22aa9584e3690c7158ac1ccce54e0d64 Mon Sep 17 00:00:00 2001 From: Linhe Huo Date: Sat, 14 Aug 2021 12:04:12 +0800 Subject: [PATCH 129/185] [TD-6078]: fix binary/nchar null error in node.js [ci skip] (#7366) --- src/connector/nodejs/nodetaos/cinterface.js | 26 ++++++++++++++-- src/connector/nodejs/test/testnchar.js | 33 +++++++++++++++++++++ 2 files changed, 57 insertions(+), 2 deletions(-) create mode 100644 src/connector/nodejs/test/testnchar.js diff --git a/src/connector/nodejs/nodetaos/cinterface.js b/src/connector/nodejs/nodetaos/cinterface.js index 03d27e5593..5ba2739c35 100644 --- a/src/connector/nodejs/nodetaos/cinterface.js +++ b/src/connector/nodejs/nodetaos/cinterface.js @@ -109,6 +109,24 @@ function convertDouble(data, num_of_rows, nbytes = 0, offset = 0, precision = 0) return res; } +function convertBinary(data, num_of_rows, nbytes = 0, offset = 0, precision = 0) { + data = ref.reinterpret(data.deref(), nbytes * num_of_rows, offset); + let res = []; + + let currOffset = 0; + while (currOffset < data.length) { + let len = data.readIntLE(currOffset, 2); + let dataEntry = data.slice(currOffset + 2, currOffset + len + 2); //one entry in a row under a column; + if (dataEntry[0] == 255) { + res.push(null) + } else { + res.push(dataEntry.toString("utf-8")); + } + currOffset += nbytes; + } + return res; +} + function convertNchar(data, num_of_rows, nbytes = 0, offset = 0, precision = 0) { data = ref.reinterpret(data.deref(), nbytes * num_of_rows, offset); let res = []; @@ -117,7 +135,11 @@ function convertNchar(data, num_of_rows, nbytes = 0, offset = 0, precision = 0) while (currOffset < data.length) { let len = data.readIntLE(currOffset, 2); let dataEntry = data.slice(currOffset + 2, currOffset + len + 2); //one entry in a row under a column; - res.push(dataEntry.toString("utf-8")); + if (dataEntry[0] == 255 && dataEntry[1] == 255) { + res.push(null) + } else { + res.push(dataEntry.toString("utf-8")); + } currOffset += nbytes; } return res; @@ -132,7 +154,7 @@ let convertFunctions = { [FieldTypes.C_BIGINT]: convertBigint, [FieldTypes.C_FLOAT]: convertFloat, [FieldTypes.C_DOUBLE]: convertDouble, - [FieldTypes.C_BINARY]: convertNchar, + [FieldTypes.C_BINARY]: convertBinary, [FieldTypes.C_TIMESTAMP]: convertTimestamp, [FieldTypes.C_NCHAR]: convertNchar } diff --git a/src/connector/nodejs/test/testnchar.js b/src/connector/nodejs/test/testnchar.js new file mode 100644 index 0000000000..68fad89c22 --- /dev/null +++ b/src/connector/nodejs/test/testnchar.js @@ -0,0 +1,33 @@ +const taos = require('../tdengine'); +var conn = taos.connect({ host: "localhost" }); +var c1 = conn.cursor(); + + +function checkData(data, row, col, expect) { + let checkdata = data[row][col]; + if (checkdata == expect) { + // console.log('check pass') + } + else { + console.log('check failed, expect ' + expect + ', but is ' + checkdata) + } +} + +c1.execute('drop database if exists testnodejsnchar') +c1.execute('create database testnodejsnchar') +c1.execute('use testnodejsnchar'); +c1.execute('create table tb (ts timestamp, value float, text binary(200))') +c1.execute("insert into tb values('2021-06-10 00:00:00', 24.7, '中文10000000000000000000000');") - +c1.execute('insert into tb values(1623254400150, 24.7, NULL);') +c1.execute('import into tb values(1623254400300, 24.7, "中文3中文10000000000000000000000中文10000000000000000000000中文10000000000000000000000中文10000000000000000000000");') +sql = 'select * from tb;' + +console.log('*******************************************') + +c1.execute(sql); +data = c1.fetchall(); +console.log(data) +//check data about insert data +checkData(data, 0, 2, '中文10000000000000000000000') +checkData(data, 1, 2, null) +checkData(data, 2, 2, '中文3中文10000000000000000000000中文10000000000000000000000中文10000000000000000000000中文10000000000000000000000') \ No newline at end of file From f1c6cc11e64ce62ab17ca4a875dc218103e46ee6 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Sat, 14 Aug 2021 13:45:45 +0800 Subject: [PATCH 130/185] [td-255] Fix error found by regression test, and rename a macro. --- src/common/src/tglobal.c | 2 +- src/util/inc/tcompare.h | 8 ++++---- src/util/src/tcompare.c | 4 ++-- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/src/common/src/tglobal.c b/src/common/src/tglobal.c index b4ccdf6e3b..d880a73e84 100644 --- a/src/common/src/tglobal.c +++ b/src/common/src/tglobal.c @@ -79,7 +79,7 @@ int32_t tsCompressMsgSize = -1; // client int32_t tsMaxSQLStringLen = TSDB_MAX_ALLOWED_SQL_LEN; -int32_t tsMaxWildCardsLen = TSDB_PATTERN_STRING_MAX_LEN; +int32_t tsMaxWildCardsLen = TSDB_PATTERN_STRING_DEFAULT_LEN; int8_t tsTscEnableRecordSql = 0; // the maximum number of results for projection query on super table that are returned from diff --git a/src/util/inc/tcompare.h b/src/util/inc/tcompare.h index 624d92e36a..84346cc79c 100644 --- a/src/util/inc/tcompare.h +++ b/src/util/inc/tcompare.h @@ -22,10 +22,10 @@ extern "C" { #include "os.h" -#define TSDB_PATTERN_MATCH 0 -#define TSDB_PATTERN_NOMATCH 1 -#define TSDB_PATTERN_NOWILDCARDMATCH 2 -#define TSDB_PATTERN_STRING_MAX_LEN 100 +#define TSDB_PATTERN_MATCH 0 +#define TSDB_PATTERN_NOMATCH 1 +#define TSDB_PATTERN_NOWILDCARDMATCH 2 +#define TSDB_PATTERN_STRING_DEFAULT_LEN 100 #define FLT_COMPAR_TOL_FACTOR 4 #define FLT_EQUAL(_x, _y) (fabs((_x) - (_y)) <= (FLT_COMPAR_TOL_FACTOR * FLT_EPSILON)) diff --git a/src/util/src/tcompare.c b/src/util/src/tcompare.c index 5461b4b689..309aa55925 100644 --- a/src/util/src/tcompare.c +++ b/src/util/src/tcompare.c @@ -358,13 +358,13 @@ int32_t compareWStrPatternComp(const void* pLeft, const void* pRight) { SPatternCompareInfo pInfo = {'%', '_'}; assert(varDataLen(pRight) <= TSDB_MAX_FIELD_LEN * TSDB_NCHAR_SIZE); - wchar_t *pattern = calloc(varDataLen(pRight) + 1, sizeof(wchar_t)); + wchar_t *pattern = calloc(varDataLen(pRight) + 1, sizeof(wchar_t)); memcpy(pattern, varDataVal(pRight), varDataLen(pRight)); - assert(varDataLen(pRight) < 128); int32_t ret = WCSPatternMatch(pattern, varDataVal(pLeft), varDataLen(pLeft)/TSDB_NCHAR_SIZE, &pInfo); free(pattern); + return (ret == TSDB_PATTERN_MATCH) ? 0 : 1; } From 8b2b278fb02d844c830509c56126bd85f4227e13 Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Sun, 15 Aug 2021 05:49:16 +0000 Subject: [PATCH 131/185] [TD-6087] merge distinct to master --- src/client/src/tscSQLParser.c | 54 ++++++++---- src/client/src/tscServer.c | 2 +- src/client/src/tscUtil.c | 4 +- src/common/inc/tglobal.h | 1 + src/common/src/tglobal.c | 13 +++ src/query/inc/qExecutor.h | 10 ++- src/query/src/qExecutor.c | 152 +++++++++++++++++++++------------- 7 files changed, 157 insertions(+), 79 deletions(-) diff --git a/src/client/src/tscSQLParser.c b/src/client/src/tscSQLParser.c index fab7e8fa3b..f25ea70f32 100644 --- a/src/client/src/tscSQLParser.c +++ b/src/client/src/tscSQLParser.c @@ -1940,20 +1940,6 @@ static void addPrimaryTsColIntoResult(SQueryInfo* pQueryInfo, SSqlCmd* pCmd) { pQueryInfo->type |= TSDB_QUERY_TYPE_PROJECTION_QUERY; } -bool isValidDistinctSql(SQueryInfo* pQueryInfo) { - if (pQueryInfo == NULL) { - return false; - } - if ((pQueryInfo->type & TSDB_QUERY_TYPE_STABLE_QUERY) != TSDB_QUERY_TYPE_STABLE_QUERY - && (pQueryInfo->type & TSDB_QUERY_TYPE_TABLE_QUERY) != TSDB_QUERY_TYPE_TABLE_QUERY) { - return false; - } - if (tscNumOfExprs(pQueryInfo) == 1){ - return true; - } - return false; -} - static bool hasNoneUserDefineExpr(SQueryInfo* pQueryInfo) { size_t numOfExprs = taosArrayGetSize(pQueryInfo->exprList); for (int32_t i = 0; i < numOfExprs; ++i) { @@ -2043,9 +2029,11 @@ int32_t validateSelectNodeList(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SArray* pS const char* msg1 = "too many items in selection clause"; const char* msg2 = "functions or others can not be mixed up"; const char* msg3 = "not support query expression"; - const char* msg4 = "only support distinct one column or tag"; + const char* msg4 = "not support distinct mixed with proj/agg func"; const char* msg5 = "invalid function name"; - const char* msg6 = "_block_dist not support subquery, only support stable/table"; + const char* msg6 = "not support distinct mixed with join"; + const char* msg7 = "not support distinct mixed with groupby"; + const char* msg8 = "not support distinct in nest query"; // too many result columns not support order by in query if (taosArrayGetSize(pSelNodeList) > TSDB_MAX_COLUMNS) { @@ -2056,18 +2044,25 @@ int32_t validateSelectNodeList(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SArray* pS pQueryInfo->colList = taosArrayInit(4, POINTER_BYTES); } + bool hasDistinct = false; + bool hasAgg = false; size_t numOfExpr = taosArrayGetSize(pSelNodeList); + int32_t distIdx = -1; for (int32_t i = 0; i < numOfExpr; ++i) { int32_t outputIndex = (int32_t)tscNumOfExprs(pQueryInfo); tSqlExprItem* pItem = taosArrayGet(pSelNodeList, i); if (hasDistinct == false) { hasDistinct = (pItem->distinct == true); + distIdx = hasDistinct ? i : -1; } int32_t type = pItem->pNode->type; if (type == SQL_NODE_SQLFUNCTION) { + hasAgg = true; + if (hasDistinct) break; + pItem->pNode->functionId = isValidFunction(pItem->pNode->Expr.operand.z, pItem->pNode->Expr.operand.n); if (pItem->pNode->functionId == TSDB_FUNC_BLKINFO && taosArrayGetSize(pQueryInfo->pUpstream) > 0) { @@ -2108,10 +2103,22 @@ int32_t validateSelectNodeList(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SArray* pS } } + //TODO(dengyihao), refactor as function + //handle distinct func mixed with other func if (hasDistinct == true) { - if (!isValidDistinctSql(pQueryInfo) ) { + if (distIdx != 0 || hasAgg) { return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg4); + } + if (joinQuery) { + return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg6); } + if (pQueryInfo->groupbyExpr.numOfGroupCols != 0) { + return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg7); + } + if (pQueryInfo->pDownstream != NULL) { + return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg8); + } + pQueryInfo->distinct = true; } @@ -5512,6 +5519,7 @@ int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSq const char* msg1 = "invalid column name"; const char* msg2 = "order by primary timestamp, first tag or groupby column in groupby clause allowed"; const char* msg3 = "invalid column in order by clause, only primary timestamp or first tag in groupby clause allowed"; + const char* msg4 = "orderby column must projected in subquery"; setDefaultOrderInfo(pQueryInfo); STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0); @@ -5627,6 +5635,17 @@ int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSq // orderby ts query on super table if (tscOrderedProjectionQueryOnSTable(pQueryInfo, 0)) { + bool found = false; + for (int32_t i = 0; i < tscNumOfExprs(pQueryInfo); ++i) { + SExprInfo* pExpr = tscExprGet(pQueryInfo, i); + if (pExpr->base.functionId == TSDB_FUNC_PRJ && pExpr->base.colInfo.colId == PRIMARYKEY_TIMESTAMP_COL_INDEX) { + found = true; + break; + } + } + if (!found && pQueryInfo->pDownstream) { + return invalidOperationMsg(pMsgBuf, msg4); + } addPrimaryTsColIntoResult(pQueryInfo, pCmd); } } @@ -8418,6 +8437,7 @@ static int32_t doValidateSubquery(SSqlNode* pSqlNode, int32_t index, SSqlObj* pS pSub->pUdfInfo = pUdfInfo; pSub->udfCopy = true; + pSub->pDownstream = pQueryInfo; int32_t code = validateSqlNode(pSql, p, pSub); if (code != TSDB_CODE_SUCCESS) { return code; diff --git a/src/client/src/tscServer.c b/src/client/src/tscServer.c index e809fe3137..c0723e210a 100644 --- a/src/client/src/tscServer.c +++ b/src/client/src/tscServer.c @@ -409,7 +409,7 @@ static void doProcessMsgFromServer(SSchedMsg* pSchedMsg) { if ((TSDB_QUERY_HAS_TYPE(pQueryInfo->type, (TSDB_QUERY_TYPE_STABLE_SUBQUERY | TSDB_QUERY_TYPE_SUBQUERY | TSDB_QUERY_TYPE_TAG_FILTER_QUERY)) && !TSDB_QUERY_HAS_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_PROJECTION_QUERY)) || - (TSDB_QUERY_HAS_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_NEST_SUBQUERY))) { + (TSDB_QUERY_HAS_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_NEST_SUBQUERY)) || (TSDB_QUERY_HAS_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_STABLE_SUBQUERY) && pQueryInfo->distinct)) { // do nothing in case of super table subquery } else { pSql->retry += 1; diff --git a/src/client/src/tscUtil.c b/src/client/src/tscUtil.c index f54827911a..f0a46d3e48 100644 --- a/src/client/src/tscUtil.c +++ b/src/client/src/tscUtil.c @@ -3533,8 +3533,10 @@ SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, __async_cb_func_t pNewQueryInfo->numOfTables = 0; pNewQueryInfo->pTableMetaInfo = NULL; pNewQueryInfo->bufLen = pQueryInfo->bufLen; - pNewQueryInfo->buf = malloc(pQueryInfo->bufLen); + + + pNewQueryInfo->distinct = pQueryInfo->distinct; if (pNewQueryInfo->buf == NULL) { terrno = TSDB_CODE_TSC_OUT_OF_MEMORY; goto _error; diff --git a/src/common/inc/tglobal.h b/src/common/inc/tglobal.h index 25d1c90ec5..ed35168457 100644 --- a/src/common/inc/tglobal.h +++ b/src/common/inc/tglobal.h @@ -59,6 +59,7 @@ extern char tsLocale[]; extern char tsCharset[]; // default encode string extern int8_t tsEnableCoreFile; extern int32_t tsCompressMsgSize; +extern int32_t tsMaxNumOfDistinctResults; extern char tsTempDir[]; //query buffer management diff --git a/src/common/src/tglobal.c b/src/common/src/tglobal.c index d880a73e84..e4ff353787 100644 --- a/src/common/src/tglobal.c +++ b/src/common/src/tglobal.c @@ -89,6 +89,9 @@ int32_t tsMaxNumOfOrderedResults = 100000; // 10 ms for sliding time, the value will changed in case of time precision changed int32_t tsMinSlidingTime = 10; +// the maxinum number of distict query result +int32_t tsMaxNumOfDistinctResults = 1000 * 10000; + // 1 us for interval time range, changed accordingly int32_t tsMinIntervalTime = 1; @@ -546,6 +549,16 @@ static void doInitGlobalConfig(void) { cfg.unitType = TAOS_CFG_UTYPE_NONE; taosInitConfigOption(cfg); + cfg.option = "maxNumOfDistinctRes"; + cfg.ptr = &tsMaxNumOfDistinctResults; + cfg.valType = TAOS_CFG_VTYPE_INT32; + cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_SHOW | TSDB_CFG_CTYPE_B_CLIENT; + cfg.minValue = 10*10000; + cfg.maxValue = 10000*10000; + cfg.ptrLength = 0; + cfg.unitType = TAOS_CFG_UTYPE_NONE; + taosInitConfigOption(cfg); + cfg.option = "numOfMnodes"; cfg.ptr = &tsNumOfMnodes; cfg.valType = TAOS_CFG_VTYPE_INT32; diff --git a/src/query/inc/qExecutor.h b/src/query/inc/qExecutor.h index d30971ab47..e8b7855769 100644 --- a/src/query/inc/qExecutor.h +++ b/src/query/inc/qExecutor.h @@ -510,13 +510,21 @@ typedef struct SStateWindowOperatorInfo { bool reptScan; } SStateWindowOperatorInfo ; +typedef struct SDistinctDataInfo { + int32_t index; + int32_t type; + int32_t bytes; +} SDistinctDataInfo; + typedef struct SDistinctOperatorInfo { SHashObj *pSet; SSDataBlock *pRes; bool recordNullVal; //has already record the null value, no need to try again int64_t threshold; int64_t outputCapacity; - int32_t colIndex; + int32_t totalBytes; + char* buf; + SArray* pDistinctDataInfo; } SDistinctOperatorInfo; struct SGlobalMerger; diff --git a/src/query/src/qExecutor.c b/src/query/src/qExecutor.c index bae62f1c82..cf77820b1f 100644 --- a/src/query/src/qExecutor.c +++ b/src/query/src/qExecutor.c @@ -44,6 +44,10 @@ #define SDATA_BLOCK_INITIALIZER (SDataBlockInfo) {{0}, 0} +#define MULTI_KEY_DELIM "-" + +#define HASH_CAPACITY_LIMIT 10000000 + #define TIME_WINDOW_COPY(_dst, _src) do {\ (_dst).skey = (_src).skey;\ (_dst).ekey = (_src).ekey;\ @@ -3581,6 +3585,7 @@ void setDefaultOutputBuf(SQueryRuntimeEnv *pRuntimeEnv, SOptrBasicInfo *pInfo, i SResultRowInfo* pResultRowInfo = &pInfo->resultRowInfo; int64_t tid = 0; + pRuntimeEnv->keyBuf = realloc(pRuntimeEnv->keyBuf, sizeof(tid) + sizeof(int64_t) + POINTER_BYTES); SResultRow* pRow = doSetResultOutBufByKey(pRuntimeEnv, pResultRowInfo, tid, (char *)&tid, sizeof(tid), true, uid); for (int32_t i = 0; i < pDataBlock->info.numOfCols; ++i) { @@ -6534,6 +6539,8 @@ static void destroyConditionOperatorInfo(void* param, int32_t numOfOutput) { static void destroyDistinctOperatorInfo(void* param, int32_t numOfOutput) { SDistinctOperatorInfo* pInfo = (SDistinctOperatorInfo*) param; taosHashCleanup(pInfo->pSet); + tfree(pInfo->buf); + taosArrayDestroy(pInfo->pDistinctDataInfo); pInfo->pRes = destroyOutputBuf(pInfo->pRes); } @@ -7075,6 +7082,52 @@ SOperatorInfo* createTagScanOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SExprInf return pOperator; } +static bool initMultiDistinctInfo(SDistinctOperatorInfo *pInfo, SOperatorInfo* pOperator, SSDataBlock *pBlock) { + if (taosArrayGetSize(pInfo->pDistinctDataInfo) == pOperator->numOfOutput) { + // distinct info already inited + return true; + } + for (int i = 0; i < pOperator->numOfOutput; i++) { + pInfo->totalBytes += pOperator->pExpr[i].base.colBytes; + } + for (int i = 0; i < pOperator->numOfOutput; i++) { + int numOfBlock = (int)taosArrayGetSize(pBlock->pDataBlock); + assert(i < numOfBlock); + for (int j = 0; j < numOfBlock; j++) { + SColumnInfoData* pColDataInfo = taosArrayGet(pBlock->pDataBlock, j); + if (pColDataInfo->info.colId == pOperator->pExpr[i].base.resColId) { + SDistinctDataInfo item = {.index = j, .type = pColDataInfo->info.type, .bytes = pColDataInfo->info.bytes}; + taosArrayInsert(pInfo->pDistinctDataInfo, i, &item); + } + } + } + pInfo->totalBytes += (int32_t)strlen(MULTI_KEY_DELIM) * (pOperator->numOfOutput); + pInfo->buf = calloc(1, pInfo->totalBytes); + return taosArrayGetSize(pInfo->pDistinctDataInfo) == pOperator->numOfOutput ? true : false; +} +static void buildMultiDistinctKey(SDistinctOperatorInfo *pInfo, SSDataBlock *pBlock, int32_t rowId) { + char *p = pInfo->buf; + memset(p, 0, pInfo->totalBytes); + + for (int i = 0; i < taosArrayGetSize(pInfo->pDistinctDataInfo); i++) { + SDistinctDataInfo* pDistDataInfo = (SDistinctDataInfo *)taosArrayGet(pInfo->pDistinctDataInfo, i); + SColumnInfoData* pColDataInfo = taosArrayGet(pBlock->pDataBlock, pDistDataInfo->index); + char *val = ((char *)pColDataInfo->pData) + pColDataInfo->info.bytes * rowId; + if (isNull(val, pDistDataInfo->type)) { + p += pDistDataInfo->bytes; + continue; + } + if (IS_VAR_DATA_TYPE(pDistDataInfo->type)) { + memcpy(p, varDataVal(val), varDataLen(val)); + p += varDataLen(val); + } else { + memcpy(p, val, pDistDataInfo->bytes); + p += pDistDataInfo->bytes; + } + memcpy(p, MULTI_KEY_DELIM, strlen(MULTI_KEY_DELIM)); + p += strlen(MULTI_KEY_DELIM); + } +} static SSDataBlock* hashDistinct(void* param, bool* newgroup) { SOperatorInfo* pOperator = (SOperatorInfo*) param; @@ -7082,11 +7135,9 @@ static SSDataBlock* hashDistinct(void* param, bool* newgroup) { return NULL; } - SDistinctOperatorInfo* pInfo = pOperator->info; SSDataBlock* pRes = pInfo->pRes; - pRes->info.rows = 0; SSDataBlock* pBlock = NULL; while(1) { @@ -7099,77 +7150,60 @@ static SSDataBlock* hashDistinct(void* param, bool* newgroup) { pOperator->status = OP_EXEC_DONE; break; } - if (pInfo->colIndex == -1) { - for (int i = 0; i < taosArrayGetSize(pBlock->pDataBlock); i++) { - SColumnInfoData* pColDataInfo = taosArrayGet(pBlock->pDataBlock, i); - if (pColDataInfo->info.colId == pOperator->pExpr[0].base.resColId) { - pInfo->colIndex = i; - break; - } - } - } - if (pInfo->colIndex == -1) { + if (!initMultiDistinctInfo(pInfo, pOperator, pBlock)) { setQueryStatus(pOperator->pRuntimeEnv, QUERY_COMPLETED); pOperator->status = OP_EXEC_DONE; - return NULL; - } - SColumnInfoData* pColInfoData = taosArrayGet(pBlock->pDataBlock, pInfo->colIndex); - - int16_t bytes = pColInfoData->info.bytes; - int16_t type = pColInfoData->info.type; - - // ensure the output buffer size - SColumnInfoData* pResultColInfoData = taosArrayGet(pRes->pDataBlock, 0); - if (pRes->info.rows + pBlock->info.rows > pInfo->outputCapacity) { - int32_t newSize = pRes->info.rows + pBlock->info.rows; - char* tmp = realloc(pResultColInfoData->pData, newSize * bytes); - if (tmp == NULL) { - return NULL; - } else { - pResultColInfoData->pData = tmp; + break; + } + + // ensure result output buf + if (pRes->info.rows + pBlock->info.rows > pInfo->outputCapacity) { + int32_t newSize = pRes->info.rows + pBlock->info.rows; + for (int i = 0; i < taosArrayGetSize(pRes->pDataBlock); i++) { + SColumnInfoData* pResultColInfoData = taosArrayGet(pRes->pDataBlock, i); + SDistinctDataInfo* pDistDataInfo = taosArrayGet(pInfo->pDistinctDataInfo, i); + char* tmp = realloc(pResultColInfoData->pData, newSize * pDistDataInfo->bytes); + if (tmp == NULL) { + return NULL; + } else { + pResultColInfoData->pData = tmp; + } + } pInfo->outputCapacity = newSize; - } - } - - for(int32_t i = 0; i < pBlock->info.rows; ++i) { - char* val = ((char*)pColInfoData->pData) + bytes * i; - if (isNull(val, type)) { - continue; - } - char* p = val; - size_t keyLen = 0; - if (IS_VAR_DATA_TYPE(pOperator->pExpr->base.colType)) { - tstr* var = (tstr*)(val); - p = var->data; - keyLen = varDataLen(var); - } else { - keyLen = bytes; - } - - int dummy; - void* res = taosHashGet(pInfo->pSet, p, keyLen); - if (res == NULL) { - taosHashPut(pInfo->pSet, p, keyLen, &dummy, sizeof(dummy)); - char* start = pResultColInfoData->pData + bytes * pInfo->pRes->info.rows; - memcpy(start, val, bytes); + } + for (int32_t i = 0; i < pBlock->info.rows; i++) { + buildMultiDistinctKey(pInfo, pBlock, i); + if (taosHashGet(pInfo->pSet, pInfo->buf, pInfo->totalBytes) == NULL) { + int32_t dummy; + taosHashPut(pInfo->pSet, pInfo->buf, pInfo->totalBytes, &dummy, sizeof(dummy)); + for (int j = 0; j < taosArrayGetSize(pRes->pDataBlock); j++) { + SDistinctDataInfo* pDistDataInfo = taosArrayGet(pInfo->pDistinctDataInfo, j); // distinct meta info + SColumnInfoData* pColInfoData = taosArrayGet(pBlock->pDataBlock, pDistDataInfo->index); //src + SColumnInfoData* pResultColInfoData = taosArrayGet(pRes->pDataBlock, j); // dist + char* val = ((char*)pColInfoData->pData) + pDistDataInfo->bytes * i; + char *start = pResultColInfoData->pData + pDistDataInfo->bytes * pInfo->pRes->info.rows; + memcpy(start, val, pDistDataInfo->bytes); + } pRes->info.rows += 1; - } - } + } + } if (pRes->info.rows >= pInfo->threshold) { break; } } - return (pInfo->pRes->info.rows > 0)? pInfo->pRes:NULL; } SOperatorInfo* createDistinctOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput) { SDistinctOperatorInfo* pInfo = calloc(1, sizeof(SDistinctOperatorInfo)); - pInfo->colIndex = -1; - pInfo->threshold = 10000000; // distinct result threshold - pInfo->outputCapacity = 4096; - pInfo->pSet = taosHashInit(64, taosGetDefaultHashFunction(pExpr->base.colType), false, HASH_NO_LOCK); + + pInfo->totalBytes = 0; + pInfo->buf = NULL; + pInfo->threshold = tsMaxNumOfDistinctResults; // distinct result threshold + pInfo->outputCapacity = 4096; + pInfo->pDistinctDataInfo = taosArrayInit(numOfOutput, sizeof(SDistinctDataInfo)); + pInfo->pSet = taosHashInit(64, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), false, HASH_NO_LOCK); pInfo->pRes = createOutputBuf(pExpr, numOfOutput, (int32_t) pInfo->outputCapacity); SOperatorInfo* pOperator = calloc(1, sizeof(SOperatorInfo)); From 3aa24b6b235c75627b7bf95cb0f4bfd6827bd6aa Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Sun, 15 Aug 2021 13:52:07 +0800 Subject: [PATCH 132/185] Hotfix/sangshuduo/td 3197 taosdemo coverity scan for master (#7375) * [TD-3197]: taosdemo and taosdump coverity scan issues. * exit if read sample file failed. * fix converity scan issue. * fix coverity scan issue. * fix coverity scan memory leak. * fix resource leak reported by coverity scan. * fix taosdemo coverity scan issue. Co-authored-by: Shuduo Sang --- src/kit/taosdemo/taosdemo.c | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/src/kit/taosdemo/taosdemo.c b/src/kit/taosdemo/taosdemo.c index f3270a22f2..18f5877e09 100644 --- a/src/kit/taosdemo/taosdemo.c +++ b/src/kit/taosdemo/taosdemo.c @@ -5820,6 +5820,7 @@ static int32_t prepareStmtBindArrayByType( } else if (0 == strncasecmp(dataType, "INT", strlen("INT"))) { int32_t *bind_int = malloc(sizeof(int32_t)); + assert(bind_int); if (value) { *bind_int = atoi(value); @@ -5834,6 +5835,7 @@ static int32_t prepareStmtBindArrayByType( } else if (0 == strncasecmp(dataType, "BIGINT", strlen("BIGINT"))) { int64_t *bind_bigint = malloc(sizeof(int64_t)); + assert(bind_bigint); if (value) { *bind_bigint = atoll(value); @@ -5848,6 +5850,7 @@ static int32_t prepareStmtBindArrayByType( } else if (0 == strncasecmp(dataType, "FLOAT", strlen("FLOAT"))) { float *bind_float = malloc(sizeof(float)); + assert(bind_float); if (value) { *bind_float = (float)atof(value); @@ -5862,6 +5865,7 @@ static int32_t prepareStmtBindArrayByType( } else if (0 == strncasecmp(dataType, "DOUBLE", strlen("DOUBLE"))) { double *bind_double = malloc(sizeof(double)); + assert(bind_double); if (value) { *bind_double = atof(value); @@ -5876,6 +5880,7 @@ static int32_t prepareStmtBindArrayByType( } else if (0 == strncasecmp(dataType, "SMALLINT", strlen("SMALLINT"))) { int16_t *bind_smallint = malloc(sizeof(int16_t)); + assert(bind_smallint); if (value) { *bind_smallint = (int16_t)atoi(value); @@ -5890,6 +5895,7 @@ static int32_t prepareStmtBindArrayByType( } else if (0 == strncasecmp(dataType, "TINYINT", strlen("TINYINT"))) { int8_t *bind_tinyint = malloc(sizeof(int8_t)); + assert(bind_tinyint); if (value) { *bind_tinyint = (int8_t)atoi(value); @@ -5904,6 +5910,7 @@ static int32_t prepareStmtBindArrayByType( } else if (0 == strncasecmp(dataType, "BOOL", strlen("BOOL"))) { int8_t *bind_bool = malloc(sizeof(int8_t)); + assert(bind_bool); if (value) { if (strncasecmp(value, "true", 4)) { @@ -5923,6 +5930,7 @@ static int32_t prepareStmtBindArrayByType( } else if (0 == strncasecmp(dataType, "TIMESTAMP", strlen("TIMESTAMP"))) { int64_t *bind_ts2 = malloc(sizeof(int64_t)); + assert(bind_ts2); if (value) { if (strchr(value, ':') && strchr(value, '-')) { @@ -5937,6 +5945,7 @@ static int32_t prepareStmtBindArrayByType( if (TSDB_CODE_SUCCESS != taosParseTime( value, &tmpEpoch, strlen(value), timePrec, 0)) { + free(bind_ts2); errorPrint("Input %s, time format error!\n", value); return -1; } @@ -6261,7 +6270,7 @@ static int32_t prepareStbStmtBindTag( char *bindBuffer = calloc(1, DOUBLE_BUFF_LEN); // g_args.len_of_binary); if (bindBuffer == NULL) { errorPrint("%s() LN%d, Failed to allocate %d bind buffer\n", - __func__, __LINE__, g_args.len_of_binary); + __func__, __LINE__, DOUBLE_BUFF_LEN); return -1; } @@ -6293,7 +6302,7 @@ static int32_t prepareStbStmtBindRand( char *bindBuffer = calloc(1, DOUBLE_BUFF_LEN); // g_args.len_of_binary); if (bindBuffer == NULL) { errorPrint("%s() LN%d, Failed to allocate %d bind buffer\n", - __func__, __LINE__, g_args.len_of_binary); + __func__, __LINE__, DOUBLE_BUFF_LEN); return -1; } From 6a18e3d3d512ed6eea96f04b717abfb359139167 Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Sun, 15 Aug 2021 15:45:30 +0000 Subject: [PATCH 133/185] [TD-6097] Crash occurs when the select function in the subquery is used with group by --- src/client/src/tscSQLParser.c | 9 +++------ src/client/src/tscUtil.c | 1 - 2 files changed, 3 insertions(+), 7 deletions(-) diff --git a/src/client/src/tscSQLParser.c b/src/client/src/tscSQLParser.c index c7db7b5d4b..4e5245742c 100644 --- a/src/client/src/tscSQLParser.c +++ b/src/client/src/tscSQLParser.c @@ -2643,7 +2643,7 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col tickPerSec /= TSDB_TICK_PER_SECOND(TSDB_TIME_PRECISION_MICRO); } else if (info.precision == TSDB_TIME_PRECISION_MICRO) { tickPerSec /= TSDB_TICK_PER_SECOND(TSDB_TIME_PRECISION_MILLI); - } + } if (tickPerSec <= 0 || tickPerSec < TSDB_TICK_PER_SECOND(info.precision)) { return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg10); @@ -2677,8 +2677,8 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col assert(ids.num == 1); tscColumnListInsert(pQueryInfo->colList, ids.ids[0].columnIndex, pExpr->base.uid, pSchema); } - tscInsertPrimaryTsSourceColumn(pQueryInfo, pExpr->base.uid); + return TSDB_CODE_SUCCESS; } @@ -3060,7 +3060,6 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col tscColumnListInsert(pQueryInfo->colList, index.columnIndex, uid, &s); } } - tscInsertPrimaryTsSourceColumn(pQueryInfo, pTableMetaInfo->pTableMeta->id.uid); return TSDB_CODE_SUCCESS; } @@ -8418,7 +8417,7 @@ static STableMeta* extractTempTableMetaFromSubquery(SQueryInfo* pUpstream) { n += 1; } - + info->numOfColumns = n; return meta; } @@ -8447,8 +8446,6 @@ static int32_t doValidateSubquery(SSqlNode* pSqlNode, int32_t index, SSqlObj* pS return code; } - pSub->pDownstream = pQueryInfo; - // create dummy table meta info STableMetaInfo* pTableMetaInfo1 = calloc(1, sizeof(STableMetaInfo)); if (pTableMetaInfo1 == NULL) { diff --git a/src/client/src/tscUtil.c b/src/client/src/tscUtil.c index f0a46d3e48..c2cf24520d 100644 --- a/src/client/src/tscUtil.c +++ b/src/client/src/tscUtil.c @@ -1503,7 +1503,6 @@ void tscFreeSqlObj(SSqlObj* pSql) { tscFreeSqlResult(pSql); tscResetSqlCmd(pCmd, false); - memset(pCmd->payload, 0, (size_t)pCmd->allocSize); tfree(pCmd->payload); pCmd->allocSize = 0; From d8897baa9cb3b9e8024889df16fdcbe6de697970 Mon Sep 17 00:00:00 2001 From: shenglian zhou Date: Mon, 16 Aug 2021 08:51:02 +0800 Subject: [PATCH 134/185] [TD-5998]:_block_dist() only support tables, not subqueries --- src/client/src/tscSQLParser.c | 6 ++++++ tests/script/general/compute/block_dist.sim | 6 +++++- 2 files changed, 11 insertions(+), 1 deletion(-) diff --git a/src/client/src/tscSQLParser.c b/src/client/src/tscSQLParser.c index 6df724881e..7b936487f0 100644 --- a/src/client/src/tscSQLParser.c +++ b/src/client/src/tscSQLParser.c @@ -2045,6 +2045,7 @@ int32_t validateSelectNodeList(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SArray* pS const char* msg3 = "not support query expression"; const char* msg4 = "only support distinct one column or tag"; const char* msg5 = "invalid function name"; + const char* msg6 = "_block_dist not support subquery, only support stable/table"; // too many result columns not support order by in query if (taosArrayGetSize(pSelNodeList) > TSDB_MAX_COLUMNS) { @@ -2068,6 +2069,11 @@ int32_t validateSelectNodeList(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SArray* pS int32_t type = pItem->pNode->type; if (type == SQL_NODE_SQLFUNCTION) { pItem->pNode->functionId = isValidFunction(pItem->pNode->Expr.operand.z, pItem->pNode->Expr.operand.n); + + if (pItem->pNode->functionId == TSDB_FUNC_BLKINFO && taosArrayGetSize(pQueryInfo->pUpstream) > 0) { + return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg6); + } + SUdfInfo* pUdfInfo = NULL; if (pItem->pNode->functionId < 0) { pUdfInfo = isValidUdf(pQueryInfo->pUdfInfo, pItem->pNode->Expr.operand.z, pItem->pNode->Expr.operand.n); diff --git a/tests/script/general/compute/block_dist.sim b/tests/script/general/compute/block_dist.sim index 51cf903654..5343c1db28 100644 --- a/tests/script/general/compute/block_dist.sim +++ b/tests/script/general/compute/block_dist.sim @@ -84,6 +84,10 @@ if $rows != 1 then return -1 endi +print ============== TD-5998 +sql_error select _block_dist() from (select * from $nt) +sql_error select _block_dist() from (select * from $mt) + print =============== clear sql drop database $db sql show databases @@ -91,4 +95,4 @@ if $rows != 0 then return -1 endi -system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file +system sh/exec.sh -n dnode1 -s stop -x SIGINT From 43bd6c65871c498842b940386facd421b2335aac Mon Sep 17 00:00:00 2001 From: Ping Xiao Date: Mon, 16 Aug 2021 09:37:13 +0800 Subject: [PATCH 135/185] [ci skip] update daily performance script --- tests/pytest/tools/taosdemoPerformance.py | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/tests/pytest/tools/taosdemoPerformance.py b/tests/pytest/tools/taosdemoPerformance.py index 4a5abd49d8..1d28a2708f 100644 --- a/tests/pytest/tools/taosdemoPerformance.py +++ b/tests/pytest/tools/taosdemoPerformance.py @@ -145,26 +145,26 @@ class taosdemoPerformace: binPath = buildPath + "/build/bin/" os.system( - "%staosdemo -f %s > taosdemoperf.txt 2>&1" % + "%staosdemo -f %s > /dev/null 2>&1" % (binPath, self.generateJson())) self.createTableTime = self.getCMDOutput( - "grep 'Spent' taosdemoperf.txt | awk 'NR==1{print $2}'") + "grep 'Spent' insert_res.txt | awk 'NR==1{print $2}'") self.insertRecordsTime = self.getCMDOutput( - "grep 'Spent' taosdemoperf.txt | awk 'NR==2{print $2}'") + "grep 'Spent' insert_res.txt | awk 'NR==2{print $2}'") self.recordsPerSecond = self.getCMDOutput( - "grep 'Spent' taosdemoperf.txt | awk 'NR==2{print $16}'") + "grep 'Spent' insert_res.txt | awk 'NR==2{print $16}'") self.commitID = self.getCMDOutput("git rev-parse --short HEAD") delay = self.getCMDOutput( - "grep 'delay' taosdemoperf.txt | awk '{print $4}'") + "grep 'delay' insert_res.txt | awk '{print $4}'") self.avgDelay = delay[:-4] delay = self.getCMDOutput( - "grep 'delay' taosdemoperf.txt | awk '{print $6}'") + "grep 'delay' insert_res.txt | awk '{print $6}'") self.maxDelay = delay[:-4] delay = self.getCMDOutput( - "grep 'delay' taosdemoperf.txt | awk '{print $8}'") + "grep 'delay' insert_res.txt | awk '{print $8}'") self.minDelay = delay[:-3] - os.system("[ -f taosdemoperf.txt ] && rm taosdemoperf.txt") + os.system("[ -f insert_res.txt ] && rm insert_res.txt") def createTablesAndStoreData(self): cursor = self.conn2.cursor() @@ -185,7 +185,7 @@ class taosdemoPerformace: cursor.close() cursor1 = self.conn.cursor() - # cursor1.execute("drop database if exists %s" % self.insertDB) + cursor1.execute("drop database if exists %s" % self.insertDB) cursor1.close() if __name__ == '__main__': From 2db8feb7448e299e14a25f5247cf7f4942b1f4b9 Mon Sep 17 00:00:00 2001 From: wpan Date: Mon, 16 Aug 2021 13:23:54 +0800 Subject: [PATCH 136/185] fix binary interp function --- src/query/src/qAggMain.c | 34 ++++++++++++++++++++++++++++++---- src/query/src/qExecutor.c | 10 ++++++++++ 2 files changed, 40 insertions(+), 4 deletions(-) diff --git a/src/query/src/qAggMain.c b/src/query/src/qAggMain.c index 14d3f4e417..1587746587 100644 --- a/src/query/src/qAggMain.c +++ b/src/query/src/qAggMain.c @@ -3790,11 +3790,37 @@ static void interp_function_impl(SQLFunctionCtx *pCtx) { static void interp_function(SQLFunctionCtx *pCtx) { // at this point, the value is existed, return directly if (pCtx->size > 0) { - // impose the timestamp check - TSKEY key = GET_TS_DATA(pCtx, 0); + bool ascQuery = (pCtx->order == TSDB_ORDER_ASC); + TSKEY key; + char *pData; + int32_t typedData = 0; + + if (ascQuery) { + key = GET_TS_DATA(pCtx, 0); + pData = GET_INPUT_DATA(pCtx, 0); + } else { + key = pCtx->start.key; + if (key == INT64_MIN) { + key = GET_TS_DATA(pCtx, 0); + pData = GET_INPUT_DATA(pCtx, 0); + } else { + if (!(IS_NUMERIC_TYPE(pCtx->inputType) || pCtx->inputType == TSDB_DATA_TYPE_BOOL)) { + pData = pCtx->start.ptr; + } else { + typedData = 1; + pData = (char *)&pCtx->start.val; + } + } + } + + //if (key == pCtx->startTs && (ascQuery || !(IS_NUMERIC_TYPE(pCtx->inputType) || pCtx->inputType == TSDB_DATA_TYPE_BOOL))) { if (key == pCtx->startTs) { - char *pData = GET_INPUT_DATA(pCtx, 0); - assignVal(pCtx->pOutput, pData, pCtx->inputBytes, pCtx->inputType); + if (typedData) { + SET_TYPED_DATA(pCtx->pOutput, pCtx->inputType, *(double *)pData); + } else { + assignVal(pCtx->pOutput, pData, pCtx->inputBytes, pCtx->inputType); + } + SET_VAL(pCtx, 1, 1); } else { interp_function_impl(pCtx); diff --git a/src/query/src/qExecutor.c b/src/query/src/qExecutor.c index bae62f1c82..a6a85b370e 100644 --- a/src/query/src/qExecutor.c +++ b/src/query/src/qExecutor.c @@ -1327,6 +1327,16 @@ void doTimeWindowInterpolation(SOperatorInfo* pOperator, SOptrBasicInfo* pInfo, pCtx[k].end.key = curTs; pCtx[k].end.val = v2; + + if (pColInfo->info.type == TSDB_DATA_TYPE_BINARY || pColInfo->info.type == TSDB_DATA_TYPE_NCHAR) { + if (prevRowIndex == -1) { + pCtx[k].start.ptr = (char *)pRuntimeEnv->prevRow[index]; + } else { + pCtx[k].start.ptr = (char *)pColInfo->pData + prevRowIndex * pColInfo->info.bytes; + } + + pCtx[k].end.ptr = (char *)pColInfo->pData + curRowIndex * pColInfo->info.bytes; + } } } else if (functionId == TSDB_FUNC_TWA) { SPoint point1 = (SPoint){.key = prevTs, .val = &v1}; From 7e56ba09e9624a2530efd93ee59efc43c9fd4047 Mon Sep 17 00:00:00 2001 From: liuyq-617 Date: Mon, 16 Aug 2021 13:26:59 +0800 Subject: [PATCH 137/185] obtain coredump execfn in script[ci skip] --- tests/test-all.sh | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/test-all.sh b/tests/test-all.sh index 6e7963e787..8dd1ade9be 100755 --- a/tests/test-all.sh +++ b/tests/test-all.sh @@ -24,9 +24,9 @@ function stopTaosd { function dohavecore(){ corefile=`find $corepath -mmin 1` - core_file=`echo $corefile|cut -d " " -f2` - proc=`echo $corefile|cut -d "_" -f3` if [ -n "$corefile" ];then + core_file=`echo $corefile|cut -d " " -f2` + proc=`file $core_file|awk -F "execfn:" '/execfn:/{print $2}'|tr -d \' |awk '{print $1}'|tr -d \,` echo 'taosd or taos has generated core' rm case.log if [[ "$tests_dir" == *"$IN_TDINTERNAL"* ]] && [[ $1 == 1 ]]; then @@ -46,7 +46,7 @@ function dohavecore(){ fi fi if [[ $1 == 1 ]];then - echo '\n'|gdb /usr/local/taos/bin/$proc $core_file -ex "bt 10" -ex quit + echo '\n'|gdb $proc $core_file -ex "bt 10" -ex quit exit 8 fi fi From 7759df3b0b5389857ce5461d4d804c2e7f70c6fc Mon Sep 17 00:00:00 2001 From: Ganlin Zhao Date: Mon, 16 Aug 2021 15:29:38 +0800 Subject: [PATCH 138/185] [TD-6054]: Filtered by tag with nchar value not as expected --- src/util/src/tcompare.c | 22 ++++++++- tests/pytest/query/filterWithinMultiNchar.py | 51 ++++++++++++++++++++ 2 files changed, 71 insertions(+), 2 deletions(-) create mode 100644 tests/pytest/query/filterWithinMultiNchar.py diff --git a/src/util/src/tcompare.c b/src/util/src/tcompare.c index 309aa55925..921da82d44 100644 --- a/src/util/src/tcompare.c +++ b/src/util/src/tcompare.c @@ -199,7 +199,16 @@ int32_t compareLenPrefixedWStr(const void *pLeft, const void *pRight) { if (len1 != len2) { return len1 > len2? 1:-1; } else { - int32_t ret = wcsncmp(varDataVal(pLeft), varDataVal(pRight), len1/TSDB_NCHAR_SIZE); + char *pLeftTerm = (char *)tcalloc(len1 + 1, sizeof(char)); + char *pRightTerm = (char *)tcalloc(len1 + 1, sizeof(char)); + memcpy(pLeftTerm, varDataVal(pLeft), len1); + memcpy(pRightTerm, varDataVal(pRight), len2); + + int32_t ret = wcsncmp((wchar_t*) pLeftTerm, (wchar_t*) pRightTerm, len1/TSDB_NCHAR_SIZE); + + tfree(pLeftTerm); + tfree(pRightTerm); + if (ret == 0) { return 0; } else { @@ -510,7 +519,16 @@ int32_t doCompare(const char* f1, const char* f2, int32_t type, size_t size) { return t1->len > t2->len? 1:-1; } - int32_t ret = wcsncmp((wchar_t*) t1->data, (wchar_t*) t2->data, t2->len/TSDB_NCHAR_SIZE); + char *t1_term = (char *)tcalloc(t1->len + 1, sizeof(char)); + char *t2_term = (char *)tcalloc(t2->len + 1, sizeof(char)); + memcpy(t1_term, t1->data, t1->len); + memcpy(t2_term, t2->data, t2->len); + + int32_t ret = wcsncmp((wchar_t*) t1_term, (wchar_t*) t2_term, t2->len/TSDB_NCHAR_SIZE); + + tfree(t1_term); + tfree(t2_term); + if (ret == 0) { return ret; } diff --git a/tests/pytest/query/filterWithinMultiNchar.py b/tests/pytest/query/filterWithinMultiNchar.py new file mode 100644 index 0000000000..15fcf4e24b --- /dev/null +++ b/tests/pytest/query/filterWithinMultiNchar.py @@ -0,0 +1,51 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import taos +from util.log import tdLog +from util.cases import tdCases +from util.sql import tdSql + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + def run(self): + tdSql.prepare() + + print("==============step1") + tdSql.execute( + "create stable t6 (ts timestamp,val int,flow nchar(36)) tags(dev nchar(36),dev1 nchar(36),dev2 nchar(36))") + tdSql.execute("insert into t6004 using t6 (dev,dev1,dev2) tags ('b50c79bc-b102-48e6-bda1-4212263e46d0','b50c79bc-b102-48e6-bda1-4212263e46d0', 'b50c79bc-b102-48e6-bda1-4212263e46d0') values(now,1,'b50c79bc-b102-48e6-bda1-4212263e46d0')") + + + print("==============step2") + tdSql.query("select * from t6 where dev='b50c79bc-b102-48e6-bda1-4212263e46d0'") + tdSql.checkRows(1) + + tdSql.query("select * from t6 where dev1='b50c79bc-b102-48e6-bda1-4212263e46d0'") + tdSql.checkRows(1) + + tdSql.query("select * from t6 where dev2='b50c79bc-b102-48e6-bda1-4212263e46d0'") + tdSql.checkRows(1) + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) From 28a50c47b96dcd3dcc684099050553349b443791 Mon Sep 17 00:00:00 2001 From: Ganlin Zhao Date: Mon, 16 Aug 2021 15:29:38 +0800 Subject: [PATCH 139/185] [TD-6054]: Filtered by tag with nchar value not as expected --- src/util/src/tcompare.c | 22 ++++++++- tests/pytest/query/filterWithinMultiNchar.py | 51 ++++++++++++++++++++ 2 files changed, 71 insertions(+), 2 deletions(-) create mode 100644 tests/pytest/query/filterWithinMultiNchar.py diff --git a/src/util/src/tcompare.c b/src/util/src/tcompare.c index 354e7899c2..3619dad83b 100644 --- a/src/util/src/tcompare.c +++ b/src/util/src/tcompare.c @@ -129,7 +129,16 @@ int32_t compareLenPrefixedWStr(const void *pLeft, const void *pRight) { if (len1 != len2) { return len1 > len2? 1:-1; } else { - int32_t ret = wcsncmp(varDataVal(pLeft), varDataVal(pRight), len1/TSDB_NCHAR_SIZE); + char *pLeftTerm = (char *)tcalloc(len1 + 1, sizeof(char)); + char *pRightTerm = (char *)tcalloc(len1 + 1, sizeof(char)); + memcpy(pLeftTerm, varDataVal(pLeft), len1); + memcpy(pRightTerm, varDataVal(pRight), len2); + + int32_t ret = wcsncmp((wchar_t*) pLeftTerm, (wchar_t*) pRightTerm, len1/TSDB_NCHAR_SIZE); + + tfree(pLeftTerm); + tfree(pRightTerm); + if (ret == 0) { return 0; } else { @@ -410,7 +419,16 @@ int32_t doCompare(const char* f1, const char* f2, int32_t type, size_t size) { return t1->len > t2->len? 1:-1; } - int32_t ret = wcsncmp((wchar_t*) t1->data, (wchar_t*) t2->data, t2->len/TSDB_NCHAR_SIZE); + char *t1_term = (char *)tcalloc(t1->len + 1, sizeof(char)); + char *t2_term = (char *)tcalloc(t2->len + 1, sizeof(char)); + memcpy(t1_term, t1->data, t1->len); + memcpy(t2_term, t2->data, t2->len); + + int32_t ret = wcsncmp((wchar_t*) t1_term, (wchar_t*) t2_term, t2->len/TSDB_NCHAR_SIZE); + + tfree(t1_term); + tfree(t2_term); + if (ret == 0) { return ret; } diff --git a/tests/pytest/query/filterWithinMultiNchar.py b/tests/pytest/query/filterWithinMultiNchar.py new file mode 100644 index 0000000000..15fcf4e24b --- /dev/null +++ b/tests/pytest/query/filterWithinMultiNchar.py @@ -0,0 +1,51 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import taos +from util.log import tdLog +from util.cases import tdCases +from util.sql import tdSql + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + def run(self): + tdSql.prepare() + + print("==============step1") + tdSql.execute( + "create stable t6 (ts timestamp,val int,flow nchar(36)) tags(dev nchar(36),dev1 nchar(36),dev2 nchar(36))") + tdSql.execute("insert into t6004 using t6 (dev,dev1,dev2) tags ('b50c79bc-b102-48e6-bda1-4212263e46d0','b50c79bc-b102-48e6-bda1-4212263e46d0', 'b50c79bc-b102-48e6-bda1-4212263e46d0') values(now,1,'b50c79bc-b102-48e6-bda1-4212263e46d0')") + + + print("==============step2") + tdSql.query("select * from t6 where dev='b50c79bc-b102-48e6-bda1-4212263e46d0'") + tdSql.checkRows(1) + + tdSql.query("select * from t6 where dev1='b50c79bc-b102-48e6-bda1-4212263e46d0'") + tdSql.checkRows(1) + + tdSql.query("select * from t6 where dev2='b50c79bc-b102-48e6-bda1-4212263e46d0'") + tdSql.checkRows(1) + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) From 010d416f4a7ad5be907fa8672653f6a53414d99f Mon Sep 17 00:00:00 2001 From: wpan Date: Mon, 16 Aug 2021 16:07:43 +0800 Subject: [PATCH 140/185] fix crash issue --- src/client/src/tscUtil.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/client/src/tscUtil.c b/src/client/src/tscUtil.c index f54827911a..a193921af2 100644 --- a/src/client/src/tscUtil.c +++ b/src/client/src/tscUtil.c @@ -3831,6 +3831,9 @@ void executeQuery(SSqlObj* pSql, SQueryInfo* pQueryInfo) { pNew->sqlstr = strdup(pSql->sqlstr); pNew->fp = tscSubqueryCompleteCallback; pNew->maxRetry = pSql->maxRetry; + + pNew->cmd.resColumnId = TSDB_RES_COL_ID; + tsem_init(&pNew->rspSem, 0, 0); SRetrieveSupport* ps = calloc(1, sizeof(SRetrieveSupport)); // todo use object id From d7eb5a22fec0e02884a8d1b93cb2f68a9dc7fa18 Mon Sep 17 00:00:00 2001 From: liuyq-617 Date: Mon, 16 Aug 2021 16:42:27 +0800 Subject: [PATCH 141/185] [TD-6035]add connector test in CI --- Jenkinsfile | 21 ++++++++++++++++++++- tests/gotest/case001/case001.sh | 5 +++-- tests/test-all.sh | 8 ++++---- 3 files changed, 27 insertions(+), 7 deletions(-) diff --git a/Jenkinsfile b/Jenkinsfile index 0eb9a1aa95..882d224407 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -160,7 +160,6 @@ pipeline { skipbuild='2' skipbuild=sh(script: "git log -2 --pretty=%B | fgrep -ie '[skip ci]' -e '[ci skip]' && echo 1 || echo 2", returnStdout:true) println skipbuild - } sh''' rm -rf ${WORKSPACE}.tes @@ -225,6 +224,26 @@ pipeline { steps { timeout(time: 55, unit: 'MINUTES'){ pre_test() + sh ''' + rm -rf /var/lib/taos/* + rm -rf /var/log/taos/* + nohup taosd >/dev/null & + sleep 10 + ''' + sh ''' + cd ${WKC}/tests/examples/nodejs + npm install td2.0-connector > /dev/null 2>&1 + node nodejsChecker.js host=localhost + ''' + sh ''' + cd ${WKC}/tests/examples/C#/taosdemo + mcs -out:taosdemo *.cs > /dev/null 2>&1 + echo '' |./taosdemo + ''' + sh ''' + cd ${WKC}/tests/gotest + bash batchtest.sh + ''' sh ''' cd ${WKC}/tests ./test-all.sh b1fq diff --git a/tests/gotest/case001/case001.sh b/tests/gotest/case001/case001.sh index 831e9f83ac..94e5bb44e0 100644 --- a/tests/gotest/case001/case001.sh +++ b/tests/gotest/case001/case001.sh @@ -15,7 +15,8 @@ script_dir="$(dirname $(readlink -f $0))" ###### step 3: start build cd $script_dir rm -f go.* -go mod init demotest -go build +go mod init demotest > /dev/null 2>&1 +go mod tidy > /dev/null 2>&1 +go build > /dev/null 2>&1 sleep 1s ./demotest -h $1 -p $2 diff --git a/tests/test-all.sh b/tests/test-all.sh index 6e7963e787..c18951b8ac 100755 --- a/tests/test-all.sh +++ b/tests/test-all.sh @@ -12,7 +12,7 @@ IN_TDINTERNAL="community" function stopTaosd { echo "Stop taosd" - sudo systemctl stop taosd + sudo systemctl stop taosd || echo 'no sudo or systemctl or stop fail' PID=`ps -ef|grep -w taosd | grep -v grep | awk '{print $2}'` while [ -n "$PID" ] do @@ -24,9 +24,9 @@ function stopTaosd { function dohavecore(){ corefile=`find $corepath -mmin 1` - core_file=`echo $corefile|cut -d " " -f2` - proc=`echo $corefile|cut -d "_" -f3` if [ -n "$corefile" ];then + core_file=`echo $corefile|cut -d " " -f2` + proc=`file $core_file|awk -F "execfn:" '/execfn:/{print $2}'|tr -d \' |awk '{print $1}'|tr -d \,` echo 'taosd or taos has generated core' rm case.log if [[ "$tests_dir" == *"$IN_TDINTERNAL"* ]] && [[ $1 == 1 ]]; then @@ -46,7 +46,7 @@ function dohavecore(){ fi fi if [[ $1 == 1 ]];then - echo '\n'|gdb /usr/local/taos/bin/$proc $core_file -ex "bt 10" -ex quit + echo '\n'|gdb $proc $core_file -ex "bt 10" -ex quit exit 8 fi fi From 01f42064f26d4c4a4c2d470d1b43f1bbf893f5f8 Mon Sep 17 00:00:00 2001 From: liuyq-617 Date: Mon, 16 Aug 2021 16:58:43 +0800 Subject: [PATCH 142/185] make ci happy From 3f62a65b67c7723f6aee41eeb00917a342970670 Mon Sep 17 00:00:00 2001 From: liuyq-617 Date: Mon, 16 Aug 2021 17:06:05 +0800 Subject: [PATCH 143/185] test --- tests/test-all.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/test-all.sh b/tests/test-all.sh index c18951b8ac..2578030165 100755 --- a/tests/test-all.sh +++ b/tests/test-all.sh @@ -12,7 +12,7 @@ IN_TDINTERNAL="community" function stopTaosd { echo "Stop taosd" - sudo systemctl stop taosd || echo 'no sudo or systemctl or stop fail' + sudo systemctl stop taosd || echo 'no sudo or systemctl or stop fail ' PID=`ps -ef|grep -w taosd | grep -v grep | awk '{print $2}'` while [ -n "$PID" ] do From a6011d55a20fe9542480ccb66635a01d4d8f214c Mon Sep 17 00:00:00 2001 From: tomchon Date: Mon, 16 Aug 2021 18:03:17 +0800 Subject: [PATCH 144/185] change version number --- cmake/version.inc | 2 +- snap/snapcraft.yaml | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/cmake/version.inc b/cmake/version.inc index d34080aa43..84ef060196 100755 --- a/cmake/version.inc +++ b/cmake/version.inc @@ -4,7 +4,7 @@ PROJECT(TDengine) IF (DEFINED VERNUMBER) SET(TD_VER_NUMBER ${VERNUMBER}) ELSE () - SET(TD_VER_NUMBER "2.0.20.12") + SET(TD_VER_NUMBER "2.0.20.13") ENDIF () IF (DEFINED VERCOMPATIBLE) diff --git a/snap/snapcraft.yaml b/snap/snapcraft.yaml index 7538765d2c..b08ae53ecd 100644 --- a/snap/snapcraft.yaml +++ b/snap/snapcraft.yaml @@ -1,6 +1,6 @@ name: tdengine base: core18 -version: '2.0.20.12' +version: '2.0.20.13' icon: snap/gui/t-dengine.svg summary: an open-source big data platform designed and optimized for IoT. description: | @@ -72,7 +72,7 @@ parts: - usr/bin/taosd - usr/bin/taos - usr/bin/taosdemo - - usr/lib/libtaos.so.2.0.20.12 + - usr/lib/libtaos.so.2.0.20.13 - usr/lib/libtaos.so.1 - usr/lib/libtaos.so From 296376df1fb92f5bbcd137d759c00689d56215e4 Mon Sep 17 00:00:00 2001 From: shenglian zhou Date: Mon, 16 Aug 2021 18:36:09 +0800 Subject: [PATCH 145/185] [TD-6086]:num of tags taken from output cols instead of groupby expr --- src/client/src/tscLocalMerge.c | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/src/client/src/tscLocalMerge.c b/src/client/src/tscLocalMerge.c index 97d52cc684..5fe020bb33 100644 --- a/src/client/src/tscLocalMerge.c +++ b/src/client/src/tscLocalMerge.c @@ -396,7 +396,16 @@ void tscCreateLocalMerger(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrde if (pQueryInfo->fillType != TSDB_FILL_NONE) { SFillColInfo* pFillCol = createFillColInfo(pQueryInfo); - pReducer->pFillInfo = taosCreateFillInfo(pQueryInfo->order.order, revisedSTime, pQueryInfo->groupbyExpr.numOfGroupCols, + // support sql like: select selective_function, tag1... where ... group by tag3... fill(not fill none) + // the group by expr columns and select tags are different + int32_t numOfCols = tscNumOfFields(pQueryInfo); + int32_t numOfTags = 0; + for (int32_t i = 0; i < numOfCols; ++i) { + if (TSDB_COL_IS_TAG(pFillCol[i].flag)) { + numOfTags++; + } + } + pReducer->pFillInfo = taosCreateFillInfo(pQueryInfo->order.order, revisedSTime, numOfTags, 4096, (int32_t)pQueryInfo->fieldsInfo.numOfOutput, pQueryInfo->interval.sliding, pQueryInfo->interval.slidingUnit, tinfo.precision, pQueryInfo->fillType, pFillCol, pSql); } From abff364c1e204c9040447f53179023f3033e3178 Mon Sep 17 00:00:00 2001 From: tomchon Date: Mon, 16 Aug 2021 19:57:56 +0800 Subject: [PATCH 146/185] change version number --- cmake/version.inc | 2 +- snap/snapcraft.yaml | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/cmake/version.inc b/cmake/version.inc index ffceecf492..12dc2b1b16 100755 --- a/cmake/version.inc +++ b/cmake/version.inc @@ -4,7 +4,7 @@ PROJECT(TDengine) IF (DEFINED VERNUMBER) SET(TD_VER_NUMBER ${VERNUMBER}) ELSE () - SET(TD_VER_NUMBER "2.1.6.0") + SET(TD_VER_NUMBER "2.1.7.0") ENDIF () IF (DEFINED VERCOMPATIBLE) diff --git a/snap/snapcraft.yaml b/snap/snapcraft.yaml index c04fa3298b..260af6f21b 100644 --- a/snap/snapcraft.yaml +++ b/snap/snapcraft.yaml @@ -1,6 +1,6 @@ name: tdengine base: core18 -version: '2.1.6.0' +version: '2.1.7.0' icon: snap/gui/t-dengine.svg summary: an open-source big data platform designed and optimized for IoT. description: | @@ -72,7 +72,7 @@ parts: - usr/bin/taosd - usr/bin/taos - usr/bin/taosdemo - - usr/lib/libtaos.so.2.1.6.0 + - usr/lib/libtaos.so.2.1.7.0 - usr/lib/libtaos.so.1 - usr/lib/libtaos.so From 2bd11ebfd66bf9525124b4c316674b365b2a1c11 Mon Sep 17 00:00:00 2001 From: shenglian zhou Date: Tue, 17 Aug 2021 09:12:16 +0800 Subject: [PATCH 147/185] [TD-6086]:add test case --- tests/script/general/parser/function.sim | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/tests/script/general/parser/function.sim b/tests/script/general/parser/function.sim index 65058333fb..c00f6fc898 100644 --- a/tests/script/general/parser/function.sim +++ b/tests/script/general/parser/function.sim @@ -310,6 +310,12 @@ if $rows != 6 then return -1 endi +print =============================> TD-6086 +sql create stable td6086st(ts timestamp, d double) tags(t nchar(50)); +sql create table td6086ct1 using td6086st tags("ct1"); +sql create table td6086ct2 using td6086st tags("ct2"); +sql SELECT LAST(d),t FROM td6086st WHERE tbname in ('td6086ct1', 'td6086ct2') and ts>="2019-07-30 00:00:00" and ts<="2021-08-31 00:00:00" interval(1800s) fill(prev) GROUP BY tbname; + print ==================> td-2624 sql create table tm2(ts timestamp, k int, b binary(12)); sql insert into tm2 values('2011-01-02 18:42:45.326', -1,'abc'); From 497159f3aa53b605585378fc4d5804a573cf1a4f Mon Sep 17 00:00:00 2001 From: wpan Date: Tue, 17 Aug 2021 09:40:56 +0800 Subject: [PATCH 148/185] fix subquery reparse sql issue --- src/client/src/tscSubquery.c | 13 +++++++++---- src/client/src/tscUtil.c | 18 ++++++++++++------ 2 files changed, 21 insertions(+), 10 deletions(-) diff --git a/src/client/src/tscSubquery.c b/src/client/src/tscSubquery.c index 553d2c0804..04b10caa4e 100644 --- a/src/client/src/tscSubquery.c +++ b/src/client/src/tscSubquery.c @@ -2735,18 +2735,23 @@ void tscHandleSubqueryError(SRetrieveSupport *trsupport, SSqlObj *pSql, int numO tscDebug("0x%"PRIx64" retry parse sql and send query, prev error: %s, retry:%d", pParentSql->self, tstrerror(code), pParentSql->retry); - code = tsParseSql(pParentSql, true); + SSqlObj *userSql = ((SRetrieveSupport*)pParentSql->param)->pParentSql; + + code = tsParseSql(userSql, true); if (code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) { return; } if (code != TSDB_CODE_SUCCESS) { - pParentSql->res.code = code; - tscAsyncResultOnError(pParentSql); + userSql->res.code = code; + tscAsyncResultOnError(userSql); return; } - executeQuery(pParentSql, pQueryInfo); + doCleanupSubqueries(userSql, userSql->subState.numOfSub); + + pQueryInfo = tscGetQueryInfo(&userSql->cmd); + executeQuery(userSql, pQueryInfo); } else { (*pParentSql->fp)(pParentSql->param, pParentSql, pParentSql->res.code); } diff --git a/src/client/src/tscUtil.c b/src/client/src/tscUtil.c index 8c2165cadf..ae78c7a81e 100644 --- a/src/client/src/tscUtil.c +++ b/src/client/src/tscUtil.c @@ -3771,19 +3771,24 @@ static void tscSubqueryCompleteCallback(void* param, TAOS_RES* tres, int code) { tscDebug("0x%"PRIx64" retry parse sql and send query, prev error: %s, retry:%d", pParentSql->self, tstrerror(code), pParentSql->retry); - code = tsParseSql(pParentSql, true); + SSqlObj *userSql = ((SRetrieveSupport*)pParentSql->param)->pParentSql; + + code = tsParseSql(userSql, true); if (code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) { return; } if (code != TSDB_CODE_SUCCESS) { - pParentSql->res.code = code; - tscAsyncResultOnError(pParentSql); + userSql->res.code = code; + tscAsyncResultOnError(userSql); return; } - SQueryInfo *pQueryInfo = tscGetQueryInfo(pParentCmd); - executeQuery(pParentSql, pQueryInfo); + SQueryInfo *pQueryInfo = tscGetQueryInfo(&userSql->cmd); + + doCleanupSubqueries(userSql, userSql->subState.numOfSub); + + executeQuery(userSql, pQueryInfo); return; } @@ -3805,8 +3810,9 @@ void executeQuery(SSqlObj* pSql, SQueryInfo* pQueryInfo) { } if (taosArrayGetSize(pQueryInfo->pUpstream) > 0) { // nest query. do execute it firstly + assert(pSql->subState.numOfSub == 0); pSql->subState.numOfSub = (int32_t) taosArrayGetSize(pQueryInfo->pUpstream); - + assert(pSql->pSubs == NULL); pSql->pSubs = calloc(pSql->subState.numOfSub, POINTER_BYTES); pSql->subState.states = calloc(pSql->subState.numOfSub, sizeof(int8_t)); code = pthread_mutex_init(&pSql->subState.mutex, NULL); From 762284c409276108e781c3599b3f55fa973eb00b Mon Sep 17 00:00:00 2001 From: wpan Date: Tue, 17 Aug 2021 09:52:39 +0800 Subject: [PATCH 149/185] fix bug --- src/client/inc/tscSubquery.h | 3 +++ src/client/src/tscSubquery.c | 2 +- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/src/client/inc/tscSubquery.h b/src/client/inc/tscSubquery.h index f0349c2b3d..f21ce67e9e 100644 --- a/src/client/inc/tscSubquery.h +++ b/src/client/inc/tscSubquery.h @@ -50,6 +50,9 @@ void tscUnlockByThread(int64_t *lockedBy); int tsInsertInitialCheck(SSqlObj *pSql); +void doCleanupSubqueries(SSqlObj *pSql, int32_t numOfSubs); + + #ifdef __cplusplus } #endif diff --git a/src/client/src/tscSubquery.c b/src/client/src/tscSubquery.c index 04b10caa4e..52bf5d9bfe 100644 --- a/src/client/src/tscSubquery.c +++ b/src/client/src/tscSubquery.c @@ -2034,7 +2034,7 @@ void tscHandleMasterJoinQuery(SSqlObj* pSql) { tscAsyncResultOnError(pSql); } -static void doCleanupSubqueries(SSqlObj *pSql, int32_t numOfSubs) { +void doCleanupSubqueries(SSqlObj *pSql, int32_t numOfSubs) { assert(numOfSubs <= pSql->subState.numOfSub && numOfSubs >= 0); for(int32_t i = 0; i < numOfSubs; ++i) { From 0a969d042c7dfaed35f130d2a77168b0a4870238 Mon Sep 17 00:00:00 2001 From: wpan Date: Tue, 17 Aug 2021 10:06:01 +0800 Subject: [PATCH 150/185] fix bug --- src/client/src/tscSubquery.c | 1 + src/client/src/tscUtil.c | 1 + 2 files changed, 2 insertions(+) diff --git a/src/client/src/tscSubquery.c b/src/client/src/tscSubquery.c index 52bf5d9bfe..32955bef3c 100644 --- a/src/client/src/tscSubquery.c +++ b/src/client/src/tscSubquery.c @@ -2749,6 +2749,7 @@ void tscHandleSubqueryError(SRetrieveSupport *trsupport, SSqlObj *pSql, int numO } doCleanupSubqueries(userSql, userSql->subState.numOfSub); + userSql->subState.numOfSub = 0; pQueryInfo = tscGetQueryInfo(&userSql->cmd); executeQuery(userSql, pQueryInfo); diff --git a/src/client/src/tscUtil.c b/src/client/src/tscUtil.c index ae78c7a81e..098204605a 100644 --- a/src/client/src/tscUtil.c +++ b/src/client/src/tscUtil.c @@ -4934,3 +4934,4 @@ void tscRemoveTableMetaBuf(STableMetaInfo* pTableMetaInfo, uint64_t id) { taosHashRemove(tscTableMetaMap, fname, len); tscDebug("0x%"PRIx64" remove table meta %s, numOfRemain:%d", id, fname, (int32_t) taosHashGetSize(tscTableMetaMap)); } + \ No newline at end of file From af641899eb93157f586ab5e3bb47c602d8027822 Mon Sep 17 00:00:00 2001 From: wpan Date: Tue, 17 Aug 2021 10:07:24 +0800 Subject: [PATCH 151/185] fix bug --- src/client/src/tscUtil.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/client/src/tscUtil.c b/src/client/src/tscUtil.c index 098204605a..75473c7baf 100644 --- a/src/client/src/tscUtil.c +++ b/src/client/src/tscUtil.c @@ -3787,6 +3787,7 @@ static void tscSubqueryCompleteCallback(void* param, TAOS_RES* tres, int code) { SQueryInfo *pQueryInfo = tscGetQueryInfo(&userSql->cmd); doCleanupSubqueries(userSql, userSql->subState.numOfSub); + userSql->subState.numOfSub = 0; executeQuery(userSql, pQueryInfo); return; @@ -4934,4 +4935,3 @@ void tscRemoveTableMetaBuf(STableMetaInfo* pTableMetaInfo, uint64_t id) { taosHashRemove(tscTableMetaMap, fname, len); tscDebug("0x%"PRIx64" remove table meta %s, numOfRemain:%d", id, fname, (int32_t) taosHashGetSize(tscTableMetaMap)); } - \ No newline at end of file From 44be3ee34df9b406f00cf1da458ce22649d14928 Mon Sep 17 00:00:00 2001 From: wpan Date: Tue, 17 Aug 2021 10:11:11 +0800 Subject: [PATCH 152/185] fix bug --- src/client/src/tscSubquery.c | 6 +++--- src/client/src/tscUtil.c | 6 +++--- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/src/client/src/tscSubquery.c b/src/client/src/tscSubquery.c index 32955bef3c..07ea4bf77f 100644 --- a/src/client/src/tscSubquery.c +++ b/src/client/src/tscSubquery.c @@ -2737,6 +2737,9 @@ void tscHandleSubqueryError(SRetrieveSupport *trsupport, SSqlObj *pSql, int numO SSqlObj *userSql = ((SRetrieveSupport*)pParentSql->param)->pParentSql; + doCleanupSubqueries(userSql, userSql->subState.numOfSub); + userSql->subState.numOfSub = 0; + code = tsParseSql(userSql, true); if (code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) { return; @@ -2748,9 +2751,6 @@ void tscHandleSubqueryError(SRetrieveSupport *trsupport, SSqlObj *pSql, int numO return; } - doCleanupSubqueries(userSql, userSql->subState.numOfSub); - userSql->subState.numOfSub = 0; - pQueryInfo = tscGetQueryInfo(&userSql->cmd); executeQuery(userSql, pQueryInfo); } else { diff --git a/src/client/src/tscUtil.c b/src/client/src/tscUtil.c index 75473c7baf..9623b247a5 100644 --- a/src/client/src/tscUtil.c +++ b/src/client/src/tscUtil.c @@ -3773,6 +3773,9 @@ static void tscSubqueryCompleteCallback(void* param, TAOS_RES* tres, int code) { SSqlObj *userSql = ((SRetrieveSupport*)pParentSql->param)->pParentSql; + doCleanupSubqueries(userSql, userSql->subState.numOfSub); + userSql->subState.numOfSub = 0; + code = tsParseSql(userSql, true); if (code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) { return; @@ -3786,9 +3789,6 @@ static void tscSubqueryCompleteCallback(void* param, TAOS_RES* tres, int code) { SQueryInfo *pQueryInfo = tscGetQueryInfo(&userSql->cmd); - doCleanupSubqueries(userSql, userSql->subState.numOfSub); - userSql->subState.numOfSub = 0; - executeQuery(userSql, pQueryInfo); return; } From 6adcba9dd78f3d25cbde2f9a435d62e0d8600d07 Mon Sep 17 00:00:00 2001 From: wpan Date: Tue, 17 Aug 2021 10:23:56 +0800 Subject: [PATCH 153/185] fix bug --- src/client/src/tscSubquery.c | 10 +++++----- src/client/src/tscUtil.c | 10 +++++----- 2 files changed, 10 insertions(+), 10 deletions(-) diff --git a/src/client/src/tscSubquery.c b/src/client/src/tscSubquery.c index 07ea4bf77f..f709be3767 100644 --- a/src/client/src/tscSubquery.c +++ b/src/client/src/tscSubquery.c @@ -2729,17 +2729,17 @@ void tscHandleSubqueryError(SRetrieveSupport *trsupport, SSqlObj *pSql, int numO pParentCmd->pTableMetaMap = tscCleanupTableMetaMap(pParentCmd->pTableMetaMap); pParentCmd->pTableMetaMap = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), false, HASH_NO_LOCK); + SSqlObj *userSql = ((SRetrieveSupport*)pParentSql->param)->pParentSql; + + doCleanupSubqueries(userSql, userSql->subState.numOfSub); + userSql->subState.numOfSub = 0; + pParentSql->res.code = TSDB_CODE_SUCCESS; pParentSql->retry++; tscDebug("0x%"PRIx64" retry parse sql and send query, prev error: %s, retry:%d", pParentSql->self, tstrerror(code), pParentSql->retry); - SSqlObj *userSql = ((SRetrieveSupport*)pParentSql->param)->pParentSql; - - doCleanupSubqueries(userSql, userSql->subState.numOfSub); - userSql->subState.numOfSub = 0; - code = tsParseSql(userSql, true); if (code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) { return; diff --git a/src/client/src/tscUtil.c b/src/client/src/tscUtil.c index 9623b247a5..3583c8140c 100644 --- a/src/client/src/tscUtil.c +++ b/src/client/src/tscUtil.c @@ -3765,16 +3765,16 @@ static void tscSubqueryCompleteCallback(void* param, TAOS_RES* tres, int code) { pParentCmd->pTableMetaMap = tscCleanupTableMetaMap(pParentCmd->pTableMetaMap); pParentCmd->pTableMetaMap = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), false, HASH_NO_LOCK); + SSqlObj *userSql = ((SRetrieveSupport*)pParentSql->param)->pParentSql; + + doCleanupSubqueries(userSql, userSql->subState.numOfSub); + userSql->subState.numOfSub = 0; + pParentSql->res.code = TSDB_CODE_SUCCESS; pParentSql->retry++; tscDebug("0x%"PRIx64" retry parse sql and send query, prev error: %s, retry:%d", pParentSql->self, tstrerror(code), pParentSql->retry); - - SSqlObj *userSql = ((SRetrieveSupport*)pParentSql->param)->pParentSql; - - doCleanupSubqueries(userSql, userSql->subState.numOfSub); - userSql->subState.numOfSub = 0; code = tsParseSql(userSql, true); if (code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) { From fa6f3c2fdb470452830d35c5008d87f27aa46d60 Mon Sep 17 00:00:00 2001 From: tomchon Date: Tue, 17 Aug 2021 11:13:03 +0800 Subject: [PATCH 154/185] change version number --- cmake/version.inc | 2 +- snap/snapcraft.yaml | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/cmake/version.inc b/cmake/version.inc index 12dc2b1b16..148c33106a 100755 --- a/cmake/version.inc +++ b/cmake/version.inc @@ -4,7 +4,7 @@ PROJECT(TDengine) IF (DEFINED VERNUMBER) SET(TD_VER_NUMBER ${VERNUMBER}) ELSE () - SET(TD_VER_NUMBER "2.1.7.0") + SET(TD_VER_NUMBER "2.1.7.1") ENDIF () IF (DEFINED VERCOMPATIBLE) diff --git a/snap/snapcraft.yaml b/snap/snapcraft.yaml index 260af6f21b..859d40cf69 100644 --- a/snap/snapcraft.yaml +++ b/snap/snapcraft.yaml @@ -1,6 +1,6 @@ name: tdengine base: core18 -version: '2.1.7.0' +version: '2.1.7.1' icon: snap/gui/t-dengine.svg summary: an open-source big data platform designed and optimized for IoT. description: | @@ -72,7 +72,7 @@ parts: - usr/bin/taosd - usr/bin/taos - usr/bin/taosdemo - - usr/lib/libtaos.so.2.1.7.0 + - usr/lib/libtaos.so.2.1.7.1 - usr/lib/libtaos.so.1 - usr/lib/libtaos.so From 6dac76a70d5de3c54832389a5ed57206f73cc9de Mon Sep 17 00:00:00 2001 From: liuyq-617 Date: Mon, 16 Aug 2021 16:53:42 +0800 Subject: [PATCH 155/185] stop taosd using kill -9 --- tests/pytest/util/dnodes.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/pytest/util/dnodes.py b/tests/pytest/util/dnodes.py index ae4ba97eb3..698c3caa55 100644 --- a/tests/pytest/util/dnodes.py +++ b/tests/pytest/util/dnodes.py @@ -556,7 +556,7 @@ class TDDnodes: psCmd = "ps -ef|grep -w taosd| grep -v grep| grep -v defunct | awk '{print $2}'" processID = subprocess.check_output(psCmd, shell=True).decode("utf-8") while(processID): - killCmd = "kill -TERM %s > /dev/null 2>&1" % processID + killCmd = "kill -9 %s > /dev/null 2>&1" % processID os.system(killCmd) time.sleep(1) processID = subprocess.check_output( From 9f0bbd0cb3a3148144a831306a2025b1a0e90b53 Mon Sep 17 00:00:00 2001 From: liuyq-617 Date: Mon, 16 Aug 2021 18:03:24 +0800 Subject: [PATCH 156/185] test --- tests/pytest/util/dnodes.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/pytest/util/dnodes.py b/tests/pytest/util/dnodes.py index 698c3caa55..0f4919ba96 100644 --- a/tests/pytest/util/dnodes.py +++ b/tests/pytest/util/dnodes.py @@ -436,7 +436,7 @@ class TDDnodes: psCmd = "ps -ef|grep -w taosd| grep -v grep| grep -v defunct | awk '{print $2}'" processID = subprocess.check_output(psCmd, shell=True).decode("utf-8") while(processID): - killCmd = "kill -TERM %s > /dev/null 2>&1" % processID + killCmd = "kill -9 %s > /dev/null 2>&1" % processID os.system(killCmd) time.sleep(1) processID = subprocess.check_output( @@ -445,7 +445,7 @@ class TDDnodes: psCmd = "ps -ef|grep -w valgrind.bin| grep -v grep | awk '{print $2}'" processID = subprocess.check_output(psCmd, shell=True).decode("utf-8") while(processID): - killCmd = "kill -TERM %s > /dev/null 2>&1" % processID + killCmd = "kill -9 %s > /dev/null 2>&1" % processID os.system(killCmd) time.sleep(1) processID = subprocess.check_output( From 81cf139d0eef263e3a312ec7d0079ccade23f2b1 Mon Sep 17 00:00:00 2001 From: liuyq-617 Date: Tue, 17 Aug 2021 11:32:03 +0800 Subject: [PATCH 157/185] [TD-6101]reduce python case execution time --- tests/pytest/fulltest.sh | 3 ++- tests/pytest/test.py | 3 +-- tests/script/jenkins/basic.txt | 14 ++++++++------ tests/test-all.sh | 3 +++ 4 files changed, 14 insertions(+), 9 deletions(-) diff --git a/tests/pytest/fulltest.sh b/tests/pytest/fulltest.sh index 600582098e..e444a0a318 100755 --- a/tests/pytest/fulltest.sh +++ b/tests/pytest/fulltest.sh @@ -152,6 +152,7 @@ python3 ./test.py -f import_merge/importCSV.py python3 ./test.py -f import_merge/import_update_0.py python3 ./test.py -f import_merge/import_update_1.py python3 ./test.py -f import_merge/import_update_2.py +python3 ./test.py -f update/merge_commit_data.py #======================p1-end=============== #======================p2-start=============== # tools @@ -180,7 +181,7 @@ python3 ./test.py -f update/allow_update-0.py python3 ./test.py -f update/append_commit_data.py python3 ./test.py -f update/append_commit_last-0.py python3 ./test.py -f update/append_commit_last.py -python3 ./test.py -f update/merge_commit_data.py + python3 ./test.py -f update/merge_commit_data2.py python3 ./test.py -f update/merge_commit_data2_update0.py diff --git a/tests/pytest/test.py b/tests/pytest/test.py index 65abd3ef93..97dca6be18 100644 --- a/tests/pytest/test.py +++ b/tests/pytest/test.py @@ -87,7 +87,7 @@ if __name__ == "__main__": else: toBeKilled = "valgrind.bin" - killCmd = "ps -ef|grep -w %s| grep -v grep | awk '{print $2}' | xargs kill -HUP > /dev/null 2>&1" % toBeKilled + killCmd = "ps -ef|grep -w %s| grep -v grep | awk '{print $2}' | xargs kill -TERM > /dev/null 2>&1" % toBeKilled psCmd = "ps -ef|grep -w %s| grep -v grep | awk '{print $2}'" % toBeKilled processID = subprocess.check_output(psCmd, shell=True) @@ -110,7 +110,6 @@ if __name__ == "__main__": time.sleep(2) tdLog.info('stop All dnodes') - sys.exit(0) tdDnodes.init(deployPath) tdDnodes.setTestCluster(testCluster) diff --git a/tests/script/jenkins/basic.txt b/tests/script/jenkins/basic.txt index 6ad6a74eed..b4aad278d8 100644 --- a/tests/script/jenkins/basic.txt +++ b/tests/script/jenkins/basic.txt @@ -90,6 +90,14 @@ cd ../../../debug; make ./test.sh -f general/parser/function.sim ./test.sh -f unique/cluster/vgroup100.sim +./test.sh -f unique/http/admin.sim +./test.sh -f unique/http/opentsdb.sim + +./test.sh -f unique/import/replica2.sim +./test.sh -f unique/import/replica3.sim + +./test.sh -f general/alter/cached_schema_after_alter.sim + #======================b1-end=============== #======================b2-start=============== @@ -198,13 +206,7 @@ cd ../../../debug; make #======================b3-end=============== #======================b4-start=============== -./test.sh -f unique/http/admin.sim -./test.sh -f unique/http/opentsdb.sim -./test.sh -f unique/import/replica2.sim -./test.sh -f unique/import/replica3.sim - -./test.sh -f general/alter/cached_schema_after_alter.sim ./test.sh -f general/alter/count.sim ./test.sh -f general/alter/dnode.sim ./test.sh -f general/alter/import.sim diff --git a/tests/test-all.sh b/tests/test-all.sh index 2578030165..b54857cf88 100755 --- a/tests/test-all.sh +++ b/tests/test-all.sh @@ -179,6 +179,9 @@ function runPyCaseOneByOnefq() { start_time=`date +%s` date +%F\ %T | tee -a pytest-out.log echo -n $case + if [[ $1 =~ full ]] ; then + line=$line" -s" + fi $line > case.log 2>&1 && \ echo -e "${GREEN} success${NC}" | tee -a pytest-out.log || \ echo -e "${RED} failed${NC}" | tee -a pytest-out.log From b65bc4dc18ac8e346c61948a6a5d2d62600f36bd Mon Sep 17 00:00:00 2001 From: wpan Date: Tue, 17 Aug 2021 11:35:55 +0800 Subject: [PATCH 158/185] fix bug --- src/client/src/tscSubquery.c | 3 +-- src/client/src/tscUtil.c | 4 ++-- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/src/client/src/tscSubquery.c b/src/client/src/tscSubquery.c index f709be3767..dfdbe9fd0b 100644 --- a/src/client/src/tscSubquery.c +++ b/src/client/src/tscSubquery.c @@ -2731,8 +2731,7 @@ void tscHandleSubqueryError(SRetrieveSupport *trsupport, SSqlObj *pSql, int numO SSqlObj *userSql = ((SRetrieveSupport*)pParentSql->param)->pParentSql; - doCleanupSubqueries(userSql, userSql->subState.numOfSub); - userSql->subState.numOfSub = 0; + tscFreeSubobj(userSql); pParentSql->res.code = TSDB_CODE_SUCCESS; pParentSql->retry++; diff --git a/src/client/src/tscUtil.c b/src/client/src/tscUtil.c index 3583c8140c..bc55357ddf 100644 --- a/src/client/src/tscUtil.c +++ b/src/client/src/tscUtil.c @@ -3767,8 +3767,7 @@ static void tscSubqueryCompleteCallback(void* param, TAOS_RES* tres, int code) { SSqlObj *userSql = ((SRetrieveSupport*)pParentSql->param)->pParentSql; - doCleanupSubqueries(userSql, userSql->subState.numOfSub); - userSql->subState.numOfSub = 0; + tscFreeSubobj(userSql); pParentSql->res.code = TSDB_CODE_SUCCESS; pParentSql->retry++; @@ -3815,6 +3814,7 @@ void executeQuery(SSqlObj* pSql, SQueryInfo* pQueryInfo) { pSql->subState.numOfSub = (int32_t) taosArrayGetSize(pQueryInfo->pUpstream); assert(pSql->pSubs == NULL); pSql->pSubs = calloc(pSql->subState.numOfSub, POINTER_BYTES); + assert(pSql->subState.states == NULL); pSql->subState.states = calloc(pSql->subState.numOfSub, sizeof(int8_t)); code = pthread_mutex_init(&pSql->subState.mutex, NULL); From 53aa829a8629c0098261d2c54ffdec1ad3867af5 Mon Sep 17 00:00:00 2001 From: wpan Date: Tue, 17 Aug 2021 11:40:14 +0800 Subject: [PATCH 159/185] fix bug --- src/client/src/tscSubquery.c | 3 ++- src/client/src/tscUtil.c | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/src/client/src/tscSubquery.c b/src/client/src/tscSubquery.c index dfdbe9fd0b..4576af6e81 100644 --- a/src/client/src/tscSubquery.c +++ b/src/client/src/tscSubquery.c @@ -2731,7 +2731,8 @@ void tscHandleSubqueryError(SRetrieveSupport *trsupport, SSqlObj *pSql, int numO SSqlObj *userSql = ((SRetrieveSupport*)pParentSql->param)->pParentSql; - tscFreeSubobj(userSql); + tscFreeSubobj(userSql); + tfree(pSql->pSubs); pParentSql->res.code = TSDB_CODE_SUCCESS; pParentSql->retry++; diff --git a/src/client/src/tscUtil.c b/src/client/src/tscUtil.c index bc55357ddf..cb797b5ceb 100644 --- a/src/client/src/tscUtil.c +++ b/src/client/src/tscUtil.c @@ -3767,7 +3767,8 @@ static void tscSubqueryCompleteCallback(void* param, TAOS_RES* tres, int code) { SSqlObj *userSql = ((SRetrieveSupport*)pParentSql->param)->pParentSql; - tscFreeSubobj(userSql); + tscFreeSubobj(userSql); + tfree(pSql->pSubs); pParentSql->res.code = TSDB_CODE_SUCCESS; pParentSql->retry++; From 83ec49107996a241ff3bd234e0a303bbb603eac3 Mon Sep 17 00:00:00 2001 From: wpan Date: Tue, 17 Aug 2021 11:48:54 +0800 Subject: [PATCH 160/185] fix bug --- src/client/src/tscSubquery.c | 2 +- src/client/src/tscUtil.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/client/src/tscSubquery.c b/src/client/src/tscSubquery.c index 4576af6e81..a27a8a41a1 100644 --- a/src/client/src/tscSubquery.c +++ b/src/client/src/tscSubquery.c @@ -2732,7 +2732,7 @@ void tscHandleSubqueryError(SRetrieveSupport *trsupport, SSqlObj *pSql, int numO SSqlObj *userSql = ((SRetrieveSupport*)pParentSql->param)->pParentSql; tscFreeSubobj(userSql); - tfree(pSql->pSubs); + tfree(userSql->pSubs); pParentSql->res.code = TSDB_CODE_SUCCESS; pParentSql->retry++; diff --git a/src/client/src/tscUtil.c b/src/client/src/tscUtil.c index cb797b5ceb..ba533bb03b 100644 --- a/src/client/src/tscUtil.c +++ b/src/client/src/tscUtil.c @@ -3768,7 +3768,7 @@ static void tscSubqueryCompleteCallback(void* param, TAOS_RES* tres, int code) { SSqlObj *userSql = ((SRetrieveSupport*)pParentSql->param)->pParentSql; tscFreeSubobj(userSql); - tfree(pSql->pSubs); + tfree(userSql->pSubs); pParentSql->res.code = TSDB_CODE_SUCCESS; pParentSql->retry++; From f85b81e0560b4ba9e4c82b2c8ff30be3a89ee7d3 Mon Sep 17 00:00:00 2001 From: wpan Date: Tue, 17 Aug 2021 12:47:48 +0800 Subject: [PATCH 161/185] fix bug --- src/client/src/tscSubquery.c | 1 + src/client/src/tscUtil.c | 3 +++ 2 files changed, 4 insertions(+) diff --git a/src/client/src/tscSubquery.c b/src/client/src/tscSubquery.c index a27a8a41a1..b4fb832b14 100644 --- a/src/client/src/tscSubquery.c +++ b/src/client/src/tscSubquery.c @@ -2740,6 +2740,7 @@ void tscHandleSubqueryError(SRetrieveSupport *trsupport, SSqlObj *pSql, int numO tscDebug("0x%"PRIx64" retry parse sql and send query, prev error: %s, retry:%d", pParentSql->self, tstrerror(code), pParentSql->retry); + tscResetSqlCmd(&userSql->cmd, false); code = tsParseSql(userSql, true); if (code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) { return; diff --git a/src/client/src/tscUtil.c b/src/client/src/tscUtil.c index ba533bb03b..d5323625da 100644 --- a/src/client/src/tscUtil.c +++ b/src/client/src/tscUtil.c @@ -3775,6 +3775,9 @@ static void tscSubqueryCompleteCallback(void* param, TAOS_RES* tres, int code) { tscDebug("0x%"PRIx64" retry parse sql and send query, prev error: %s, retry:%d", pParentSql->self, tstrerror(code), pParentSql->retry); + + + tscResetSqlCmd(&userSql->cmd, false); code = tsParseSql(userSql, true); if (code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) { From 176af30aecb94b3698e65211ae542fbab304d619 Mon Sep 17 00:00:00 2001 From: wpan Date: Tue, 17 Aug 2021 12:59:30 +0800 Subject: [PATCH 162/185] fix mem leak --- src/client/inc/tscSubquery.h | 2 ++ src/client/src/tscSubquery.c | 15 +++++++++++++++ src/client/src/tscUtil.c | 2 ++ 3 files changed, 19 insertions(+) diff --git a/src/client/inc/tscSubquery.h b/src/client/inc/tscSubquery.h index f21ce67e9e..9e16f3f900 100644 --- a/src/client/inc/tscSubquery.h +++ b/src/client/inc/tscSubquery.h @@ -52,6 +52,8 @@ int tsInsertInitialCheck(SSqlObj *pSql); void doCleanupSubqueries(SSqlObj *pSql, int32_t numOfSubs); +void tscFreeRetrieveSupporters(SSqlObj *pSql); + #ifdef __cplusplus } diff --git a/src/client/src/tscSubquery.c b/src/client/src/tscSubquery.c index b4fb832b14..9761c5b4ca 100644 --- a/src/client/src/tscSubquery.c +++ b/src/client/src/tscSubquery.c @@ -2050,6 +2050,19 @@ void doCleanupSubqueries(SSqlObj *pSql, int32_t numOfSubs) { } } +void tscFreeRetrieveSupporters(SSqlObj *pSql) { + for(int32_t i = 0; i < pSql->subState.numOfSub; ++i) { + SSqlObj* pSub = pSql->pSubs[i]; + assert(pSub != NULL); + + SRetrieveSupport* pSupport = pSub->param; + + tfree(pSupport->localBuffer); + tfree(pSub->param); + } +} + + void tscLockByThread(int64_t *lockedBy) { int64_t tid = taosGetSelfPthreadId(); int i = 0; @@ -2731,6 +2744,8 @@ void tscHandleSubqueryError(SRetrieveSupport *trsupport, SSqlObj *pSql, int numO SSqlObj *userSql = ((SRetrieveSupport*)pParentSql->param)->pParentSql; + tscFreeRetrieveSupporters(pParentSql); + tscFreeSubobj(userSql); tfree(userSql->pSubs); diff --git a/src/client/src/tscUtil.c b/src/client/src/tscUtil.c index d5323625da..67b1a1c199 100644 --- a/src/client/src/tscUtil.c +++ b/src/client/src/tscUtil.c @@ -3767,6 +3767,8 @@ static void tscSubqueryCompleteCallback(void* param, TAOS_RES* tres, int code) { SSqlObj *userSql = ((SRetrieveSupport*)pParentSql->param)->pParentSql; + tscFreeRetrieveSupporters(pParentSql); + tscFreeSubobj(userSql); tfree(userSql->pSubs); From 3885f10f8e2c6a1a62610be172dd32dfa2f5f7e1 Mon Sep 17 00:00:00 2001 From: shenglian zhou Date: Mon, 16 Aug 2021 09:41:50 +0800 Subject: [PATCH 163/185] []:run all ci tests first --- src/client/inc/tscUtil.h | 1 + src/client/src/tscSQLParser.c | 12 ++++++++---- src/client/src/tscUtil.c | 21 +++++++++++++++++++++ 3 files changed, 30 insertions(+), 4 deletions(-) diff --git a/src/client/inc/tscUtil.h b/src/client/inc/tscUtil.h index 0b2d4fd115..e11748efbe 100644 --- a/src/client/inc/tscUtil.h +++ b/src/client/inc/tscUtil.h @@ -143,6 +143,7 @@ bool tscIsSessionWindowQuery(SQueryInfo* pQueryInfo); bool tscIsSecondStageQuery(SQueryInfo* pQueryInfo); bool tsIsArithmeticQueryOnAggResult(SQueryInfo* pQueryInfo); bool tscGroupbyColumn(SQueryInfo* pQueryInfo); +int32_t tscGetTopBotQueryExprIndex(SQueryInfo* pQueryInfo); bool tscIsTopBotQuery(SQueryInfo* pQueryInfo); bool hasTagValOutput(SQueryInfo* pQueryInfo); bool timeWindowInterpoRequired(SQueryInfo *pQueryInfo); diff --git a/src/client/src/tscSQLParser.c b/src/client/src/tscSQLParser.c index 4e5245742c..968d62d939 100644 --- a/src/client/src/tscSQLParser.c +++ b/src/client/src/tscSQLParser.c @@ -5613,11 +5613,13 @@ int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSq pQueryInfo->groupbyExpr.orderType = p1->sortOrder; pQueryInfo->order.orderColId = pSchema[index.columnIndex].colId; } else if (isTopBottomQuery(pQueryInfo)) { + int32_t topBotIndex = tscGetTopBotQueryExprIndex(pQueryInfo); + assert(topBotIndex >= 1); /* order of top/bottom query in interval is not valid */ - SExprInfo* pExpr = tscExprGet(pQueryInfo, 0); + SExprInfo* pExpr = tscExprGet(pQueryInfo, topBotIndex-1); assert(pExpr->base.functionId == TSDB_FUNC_TS); - pExpr = tscExprGet(pQueryInfo, 1); + pExpr = tscExprGet(pQueryInfo, topBotIndex); if (pExpr->base.colInfo.colIndex != index.columnIndex && index.columnIndex != PRIMARYKEY_TIMESTAMP_COL_INDEX) { return invalidOperationMsg(pMsgBuf, msg2); } @@ -5706,11 +5708,13 @@ int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSq SColIndex* pColIndex = taosArrayGet(columnInfo, 0); validOrder = (pColIndex->colIndex == index.columnIndex); } else { + int32_t topBotIndex = tscGetTopBotQueryExprIndex(pQueryInfo); + assert(topBotIndex >= 1); /* order of top/bottom query in interval is not valid */ - SExprInfo* pExpr = tscExprGet(pQueryInfo, 0); + SExprInfo* pExpr = tscExprGet(pQueryInfo, topBotIndex-1); assert(pExpr->base.functionId == TSDB_FUNC_TS); - pExpr = tscExprGet(pQueryInfo, 1); + pExpr = tscExprGet(pQueryInfo, topBotIndex); if (pExpr->base.colInfo.colIndex != index.columnIndex && index.columnIndex != PRIMARYKEY_TIMESTAMP_COL_INDEX) { return invalidOperationMsg(pMsgBuf, msg2); } diff --git a/src/client/src/tscUtil.c b/src/client/src/tscUtil.c index 612d6a5642..6e0bf6f7d2 100644 --- a/src/client/src/tscUtil.c +++ b/src/client/src/tscUtil.c @@ -369,6 +369,27 @@ bool tscGroupbyColumn(SQueryInfo* pQueryInfo) { return false; } +int32_t tscGetTopBotQueryExprIndex(SQueryInfo* pQueryInfo) { + size_t numOfExprs = tscNumOfExprs(pQueryInfo); + + for (int32_t i = 0; i < numOfExprs; ++i) { + SExprInfo* pExpr = tscExprGet(pQueryInfo, i); + if (pExpr == NULL) { + continue; + } + + if (pExpr->base.functionId == TSDB_FUNC_TS) { + continue; + } + + if (pExpr->base.functionId == TSDB_FUNC_TOP || pExpr->base.functionId == TSDB_FUNC_BOTTOM) { + return i; + } + } + + return -1; +} + bool tscIsTopBotQuery(SQueryInfo* pQueryInfo) { size_t numOfExprs = tscNumOfExprs(pQueryInfo); From e9ffa8b13a24f6136e7f1b4c104da471ab675db4 Mon Sep 17 00:00:00 2001 From: shenglian zhou Date: Tue, 17 Aug 2021 11:48:42 +0800 Subject: [PATCH 164/185] [TD-6017]:add test case that top/bottom expr is not first expression --- tests/script/general/parser/limit.sim | 5 +++++ tests/script/general/parser/limit_tb.sim | 4 ++++ 2 files changed, 9 insertions(+) diff --git a/tests/script/general/parser/limit.sim b/tests/script/general/parser/limit.sim index 23b85095c5..3af2cb3018 100644 --- a/tests/script/general/parser/limit.sim +++ b/tests/script/general/parser/limit.sim @@ -75,4 +75,9 @@ sleep 100 run general/parser/limit_tb.sim run general/parser/limit_stb.sim +print ========> TD-6017 +sql use $db +sql select * from (select ts, top(c1, 5) from $tb where ts >= $ts0 order by ts desc limit 3 offset 1) +sql select * from (select ts, top(c1, 5) from $stb where ts >= $ts0 order by ts desc limit 3 offset 1) + system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file diff --git a/tests/script/general/parser/limit_tb.sim b/tests/script/general/parser/limit_tb.sim index 0c987d88c9..4a93797d40 100644 --- a/tests/script/general/parser/limit_tb.sim +++ b/tests/script/general/parser/limit_tb.sim @@ -355,6 +355,10 @@ sql select top(c1, 1) from $tb where ts >= $ts0 and ts <= $tsu limit 5 offset 1 if $rows != 0 then return -1 endi + +print ========> TD-6017 +sql select * from (select ts, top(c1, 5) from $tb where ts >= $ts0 and ts <= $tsu order by ts desc limit 3 offset 1) + sql select top(c1, 5) from $tb where ts >= $ts0 and ts <= $tsu order by ts desc limit 3 offset 1 print select top(c1, 5) from $tb where ts >= $ts0 and ts <= $tsu order by ts desc limit 3 offset 1 print $data00 $data01 From b9d5df6761cfedcf8a6dec148f5470840cb98c2d Mon Sep 17 00:00:00 2001 From: wpan Date: Tue, 17 Aug 2021 13:54:15 +0800 Subject: [PATCH 165/185] fix crash issue --- src/client/src/tscSubquery.c | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/src/client/src/tscSubquery.c b/src/client/src/tscSubquery.c index 9761c5b4ca..5464d4168a 100644 --- a/src/client/src/tscSubquery.c +++ b/src/client/src/tscSubquery.c @@ -2054,11 +2054,8 @@ void tscFreeRetrieveSupporters(SSqlObj *pSql) { for(int32_t i = 0; i < pSql->subState.numOfSub; ++i) { SSqlObj* pSub = pSql->pSubs[i]; assert(pSub != NULL); - - SRetrieveSupport* pSupport = pSub->param; - - tfree(pSupport->localBuffer); - tfree(pSub->param); + + tscFreeRetrieveSup(pSub); } } From e43b3c330ad47f447295e6ce789b3c84c3a4d214 Mon Sep 17 00:00:00 2001 From: wpan Date: Tue, 17 Aug 2021 13:56:11 +0800 Subject: [PATCH 166/185] fix issue --- src/client/src/tscSubquery.c | 22 ++++++++++++---------- 1 file changed, 12 insertions(+), 10 deletions(-) diff --git a/src/client/src/tscSubquery.c b/src/client/src/tscSubquery.c index 5464d4168a..04267421cf 100644 --- a/src/client/src/tscSubquery.c +++ b/src/client/src/tscSubquery.c @@ -2050,16 +2050,6 @@ void doCleanupSubqueries(SSqlObj *pSql, int32_t numOfSubs) { } } -void tscFreeRetrieveSupporters(SSqlObj *pSql) { - for(int32_t i = 0; i < pSql->subState.numOfSub; ++i) { - SSqlObj* pSub = pSql->pSubs[i]; - assert(pSub != NULL); - - tscFreeRetrieveSup(pSub); - } -} - - void tscLockByThread(int64_t *lockedBy) { int64_t tid = taosGetSelfPthreadId(); int i = 0; @@ -2586,6 +2576,18 @@ static void tscFreeRetrieveSup(SSqlObj *pSql) { tfree(trsupport); } +void tscFreeRetrieveSupporters(SSqlObj *pSql) { + for(int32_t i = 0; i < pSql->subState.numOfSub; ++i) { + SSqlObj* pSub = pSql->pSubs[i]; + assert(pSub != NULL); + + tscFreeRetrieveSup(pSub); + } +} + + + + static void tscRetrieveFromDnodeCallBack(void *param, TAOS_RES *tres, int numOfRows); static void tscHandleSubqueryError(SRetrieveSupport *trsupport, SSqlObj *pSql, int numOfRows); From 829055102fa12bce12cb33eb8ad2d97676fc9449 Mon Sep 17 00:00:00 2001 From: cpwu Date: Tue, 17 Aug 2021 13:57:47 +0800 Subject: [PATCH 167/185] [TD-5798] add case for td5798 --- tests/pytest/functions/queryTestCases.py | 214 +++++++++++++++++++---- 1 file changed, 183 insertions(+), 31 deletions(-) diff --git a/tests/pytest/functions/queryTestCases.py b/tests/pytest/functions/queryTestCases.py index 8cd3ef6b3a..f320db43af 100644 --- a/tests/pytest/functions/queryTestCases.py +++ b/tests/pytest/functions/queryTestCases.py @@ -58,7 +58,7 @@ class TDTestCase: def td3690(self): tdLog.printNoPrefix("==========TD-3690==========") tdSql.query("show variables") - tdSql.checkData(51, 1, 864000) + tdSql.checkData(53, 1, 864000) def td4082(self): tdLog.printNoPrefix("==========TD-4082==========") @@ -268,7 +268,7 @@ class TDTestCase: tdSql.execute("drop database if exists db") tdSql.execute("create database if not exists db") tdSql.query("show variables") - tdSql.checkData(36, 1, 3650) + tdSql.checkData(38, 1, 3650) tdSql.query("show databases") tdSql.checkData(0,7,"3650,3650,3650") @@ -296,7 +296,7 @@ class TDTestCase: tdSql.query("show databases") tdSql.checkData(0, 7, "3650,3650,3650") tdSql.query("show variables") - tdSql.checkData(36, 1, 3650) + tdSql.checkData(38, 1, 3650) tdSql.execute("alter database db1 keep 365") tdSql.execute("drop database if exists db1") @@ -613,45 +613,189 @@ class TDTestCase: pass def td5798(self): - tdLog.printNoPrefix("==========TD-5798==========") + tdLog.printNoPrefix("==========TD-5798 + TD-5810==========") tdSql.execute("drop database if exists db") tdSql.execute("create database if not exists db keep 3650") tdSql.execute("use db") tdSql.execute("create stable db.stb1 (ts timestamp, c1 int, c2 int) tags(t0 tinyint, t1 int, t2 int)") - tdSql.execute("create stable db.stb2 (ts timestamp, c2 int, c3 int) tags(t2 binary(16), t3 binary(16), t4 int)") - for i in range(100): - sql = f"create table db.t{i} using db.stb1 tags({i%7}, {(i-1)%7}, {i%2})" + tdSql.execute("create stable db.stb2 (ts timestamp, c2 int, c3 binary(16)) tags(t2 binary(16), t3 binary(16), t4 int)") + maxRemainderNum=7 + tbnum=101 + for i in range(tbnum-1): + sql = f"create table db.t{i} using db.stb1 tags({i%maxRemainderNum}, {(i-1)%maxRemainderNum}, {i%2})" tdSql.execute(sql) tdSql.execute(f"insert into db.t{i} values (now-10d, {i}, {i%3})") - tdSql.execute(f"insert into db.t{i} values (now-9d, {i*2}, {(i-1)%3})") - tdSql.execute(f"insert into db.t{i} values (now-8d, {i*3}, {(i-2)%3})") + tdSql.execute(f"insert into db.t{i} values (now-9d, {i}, {(i-1)%3})") + tdSql.execute(f"insert into db.t{i} values (now-8d, {i}, {(i-2)%3})") + tdSql.execute(f"insert into db.t{i} (ts )values (now-7d)") - tdSql.execute(f"create table db.t0{i} using db.stb2 tags('{i}', '{100-i}', {i%3})") - tdSql.execute(f"insert into db.t0{i} values (now-10d, {i}, {(i+1)%3})") - tdSql.execute(f"insert into db.t0{i} values (now-9d, {i*2}, {(i+2)%3})") - tdSql.execute(f"insert into db.t0{i} values (now-8d, {i*3}, {(i)%3})") + tdSql.execute(f"create table db.t0{i} using db.stb2 tags('{i%maxRemainderNum}', '{(i-1)%maxRemainderNum}', {i%3})") + tdSql.execute(f"insert into db.t0{i} values (now-10d, {i}, '{(i+1)%3}')") + tdSql.execute(f"insert into db.t0{i} values (now-9d, {i}, '{(i+2)%3}')") + tdSql.execute(f"insert into db.t0{i} values (now-8d, {i}, '{(i)%3}')") + tdSql.execute(f"insert into db.t0{i} (ts )values (now-7d)") + tdSql.execute("create table db.t100num using db.stb1 tags(null, null, null)") + tdSql.execute("create table db.t0100num using db.stb2 tags(null, null, null)") + tdSql.execute(f"insert into db.t100num values (now-10d, {tbnum-1}, 1)") + tdSql.execute(f"insert into db.t100num values (now-9d, {tbnum-1}, 0)") + tdSql.execute(f"insert into db.t100num values (now-8d, {tbnum-1}, 2)") + tdSql.execute(f"insert into db.t100num (ts )values (now-7d)") + tdSql.execute(f"insert into db.t0100num values (now-10d, {tbnum-1}, 1)") + tdSql.execute(f"insert into db.t0100num values (now-9d, {tbnum-1}, 0)") + tdSql.execute(f"insert into db.t0100num values (now-8d, {tbnum-1}, 2)") + tdSql.execute(f"insert into db.t0100num (ts )values (now-7d)") + #========== TD-5810 suport distinct multi-data-coloumn ========== + tdSql.query(f"select distinct c1 from stb1 where c1 <{tbnum}") + tdSql.checkRows(tbnum) + tdSql.query(f"select distinct c2 from stb1") + tdSql.checkRows(4) + tdSql.query(f"select distinct c1,c2 from stb1 where c1 <{tbnum}") + tdSql.checkRows(tbnum*3) + tdSql.query(f"select distinct c1,c1 from stb1 where c1 <{tbnum}") + tdSql.checkRows(tbnum) + tdSql.query(f"select distinct c1,c2 from stb1 where c1 <{tbnum} limit 3") + tdSql.checkRows(3) + tdSql.query(f"select distinct c1,c2 from stb1 where c1 <{tbnum} limit 3 offset {tbnum*3-2}") + tdSql.checkRows(2) + + tdSql.query(f"select distinct c1 from t1 where c1 <{tbnum}") + tdSql.checkRows(1) + tdSql.query(f"select distinct c2 from t1") + tdSql.checkRows(4) + tdSql.query(f"select distinct c1,c2 from t1 where c1 <{tbnum}") + tdSql.checkRows(3) + tdSql.query(f"select distinct c1,c1 from t1 ") + tdSql.checkRows(2) + tdSql.query(f"select distinct c1,c1 from t1 where c1 <{tbnum}") + tdSql.checkRows(1) + tdSql.query(f"select distinct c1,c2 from t1 where c1 <{tbnum} limit 3") + tdSql.checkRows(3) + tdSql.query(f"select distinct c1,c2 from t1 where c1 <{tbnum} limit 3 offset 2") + tdSql.checkRows(1) + + tdSql.query(f"select distinct c3 from stb2 where c2 <{tbnum} ") + tdSql.checkRows(3) + tdSql.query(f"select distinct c3, c2 from stb2 where c2 <{tbnum} limit 2") + tdSql.checkRows(2) + + tdSql.error("select distinct c5 from stb1") + tdSql.error("select distinct c5 from t1") + tdSql.error("select distinct c1 from db.*") + tdSql.error("select c2, distinct c1 from stb1") + tdSql.error("select c2, distinct c1 from t1") + tdSql.error("select distinct c2 from ") + tdSql.error("distinct c2 from stb1") + tdSql.error("distinct c2 from t1") + tdSql.error("select distinct c1, c2, c3 from stb1") + tdSql.error("select distinct c1, c2, c3 from t1") + tdSql.error("select distinct stb1.c1, stb1.c2, stb2.c2, stb2.c3 from stb1") + tdSql.error("select distinct stb1.c1, stb1.c2, stb2.c2, stb2.c3 from t1") + tdSql.error("select distinct t1.c1, t1.c2, t2.c1, t2.c2 from t1") + tdSql.query(f"select distinct c1 c2, c2 c3 from stb1 where c1 <{tbnum}") + tdSql.checkRows(tbnum*3) + tdSql.query(f"select distinct c1 c2, c2 c3 from t1 where c1 <{tbnum}") + tdSql.checkRows(3) + tdSql.query("select distinct c1, c2 from stb1 order by ts") + tdSql.checkRows(tbnum*3+1) + tdSql.query("select distinct c1, c2 from t1 order by ts") + tdSql.checkRows(4) + tdSql.error("select distinct c1, ts from stb1 group by c2") + tdSql.error("select distinct c1, ts from t1 group by c2") + tdSql.error("select distinct c1, max(c2) from stb1 ") + tdSql.error("select distinct c1, max(c2) from t1 ") + tdSql.error("select max(c2), distinct c1 from stb1 ") + tdSql.error("select max(c2), distinct c1 from t1 ") + tdSql.error("select distinct c1, c2 from stb1 where c1 > 3 group by t0") + tdSql.error("select distinct c1, c2 from t1 where c1 > 3 group by t0") + tdSql.error("select distinct c1, c2 from stb1 where c1 > 3 interval(1d) ") + tdSql.error("select distinct c1, c2 from t1 where c1 > 3 interval(1d) ") + tdSql.error("select distinct c1, c2 from stb1 where c1 > 3 interval(1d) fill(next)") + tdSql.error("select distinct c1, c2 from t1 where c1 > 3 interval(1d) fill(next)") + tdSql.error("select distinct c1, c2 from stb1 where ts > now-10d and ts < now interval(1d) fill(next)") + tdSql.error("select distinct c1, c2 from t1 where ts > now-10d and ts < now interval(1d) fill(next)") + tdSql.error("select distinct c1, c2 from stb1 where c1 > 3 slimit 1") + tdSql.error("select distinct c1, c2 from t1 where c1 > 3 slimit 1") + tdSql.query(f"select distinct c1, c2 from stb1 where c1 between {tbnum-2} and {tbnum} ") + tdSql.checkRows(6) + tdSql.query("select distinct c1, c2 from stb1 where c1 in (1,2,3,4,5)") + tdSql.checkRows(15) + tdSql.query("select distinct c1, c2 from stb1 where c1 in (100,1000,10000)") + tdSql.checkRows(3) + + tdSql.query(f"select distinct c1,c2 from (select * from stb1 where c1 > {tbnum-2}) ") + tdSql.checkRows(3) + tdSql.query(f"select distinct c1,c2 from (select * from t1 where c1 < {tbnum}) ") + tdSql.checkRows(3) + tdSql.query(f"select distinct c1,c2 from (select * from stb1 where t2 !=0 and t2 != 1) ") + tdSql.checkRows(4) + tdSql.error("select distinct c1, c2 from (select distinct c1, c2 from stb1 where t0 > 2 and t1 < 3) ") + tdSql.error("select c1, c2 from (select distinct c1, c2 from stb1 where t0 > 2 and t1 < 3) ") + tdSql.query("select distinct c1, c2 from (select c2, c1 from stb1 where c1 > 2 ) where c1 < 4") + tdSql.checkRows(3) + tdSql.error("select distinct c1, c2 from (select c1 from stb1 where t0 > 2 ) where t1 < 3") + tdSql.error("select distinct c1, c2 from (select c2, c1 from stb1 where c1 > 2 order by ts)") + # tdSql.error("select distinct c1, c2 from (select c2, c1 from t1 where c1 > 2 order by ts)") + tdSql.error("select distinct c1, c2 from (select c2, c1 from stb1 where c1 > 2 group by c1)") + # tdSql.error("select distinct c1, c2 from (select max(c1) c1, max(c2) c2 from stb1 group by c1)") + # tdSql.error("select distinct c1, c2 from (select max(c1) c1, max(c2) c2 from t1 group by c1)") + tdSql.query("select distinct c1, c2 from (select max(c1) c1, max(c2) c2 from stb1 )") + tdSql.checkRows(1) + tdSql.query("select distinct c1, c2 from (select max(c1) c1, max(c2) c2 from t1 )") + tdSql.checkRows(1) + tdSql.error("select distinct stb1.c1, stb1.c2 from stb1 , stb2 where stb1.ts=stb2.ts and stb1.t2=stb2.t4") + tdSql.error("select distinct t1.c1, t1.c2 from t1 , t2 where t1.ts=t2.ts ") + + # tdSql.error("select distinct c1, c2 from (select count(c1) c1, count(c2) c2 from stb1 group by ts)") + # tdSql.error("select distinct c1, c2 from (select count(c1) c1, count(c2) c2 from t1 group by ts)") + + + + #========== TD-5798 suport distinct multi-tags-coloumn ========== tdSql.query("select distinct t1 from stb1") - tdSql.checkRows(7) + tdSql.checkRows(maxRemainderNum+1) tdSql.query("select distinct t0, t1 from stb1") - tdSql.checkRows(7) + tdSql.checkRows(maxRemainderNum+1) tdSql.query("select distinct t1, t0 from stb1") - tdSql.checkRows(7) + tdSql.checkRows(maxRemainderNum+1) tdSql.query("select distinct t1, t2 from stb1") - tdSql.checkRows(14) + tdSql.checkRows(maxRemainderNum*2+1) tdSql.query("select distinct t0, t1, t2 from stb1") - tdSql.checkRows(14) + tdSql.checkRows(maxRemainderNum*2+1) tdSql.query("select distinct t0 t1, t1 t2 from stb1") - tdSql.checkRows(7) + tdSql.checkRows(maxRemainderNum+1) tdSql.query("select distinct t0, t0, t0 from stb1") - tdSql.checkRows(7) + tdSql.checkRows(maxRemainderNum+1) tdSql.query("select distinct t0, t1 from t1") tdSql.checkRows(1) + tdSql.query("select distinct t0, t1 from t100num") + tdSql.checkRows(1) + + tdSql.query("select distinct t3 from stb2") + tdSql.checkRows(maxRemainderNum+1) + tdSql.query("select distinct t2, t3 from stb2") + tdSql.checkRows(maxRemainderNum+1) + tdSql.query("select distinct t3, t2 from stb2") + tdSql.checkRows(maxRemainderNum+1) + tdSql.query("select distinct t4, t2 from stb2") + tdSql.checkRows(maxRemainderNum*3+1) + tdSql.query("select distinct t2, t3, t4 from stb2") + tdSql.checkRows(maxRemainderNum*3+1) + tdSql.query("select distinct t2 t1, t3 t2 from stb2") + tdSql.checkRows(maxRemainderNum+1) + tdSql.query("select distinct t3, t3, t3 from stb2") + tdSql.checkRows(maxRemainderNum+1) + tdSql.query("select distinct t2, t3 from t01") + tdSql.checkRows(1) + tdSql.query("select distinct t3, t4 from t0100num") + tdSql.checkRows(1) ########## should be error ######### tdSql.error("select distinct from stb1") + tdSql.error("select distinct t3 from stb1") + tdSql.error("select distinct t1 from db.*") tdSql.error("select distinct t2 from ") tdSql.error("distinct t2 from stb1") tdSql.error("select distinct stb1") @@ -678,29 +822,37 @@ class TDTestCase: tdSql.checkRows(1) tdSql.error("select distinct stb1.t0, stb1.t1 from stb1, stb2 where stb1.t2=stb2.t4") tdSql.error("select distinct t0, t1 from stb1 where stb2.t4 > 2") - # tdSql.error("select max(c1), distinct t0 from stb1 where t0 > 2") + tdSql.error("select distinct t0, t1 from stb1 where t1 > 3 group by t0") + tdSql.error("select distinct t0, t1 from stb1 where t1 > 3 interval(1d) ") + tdSql.error("select distinct t0, t1 from stb1 where t1 > 3 interval(1d) fill(next)") + tdSql.error("select distinct t0, t1 from stb1 where ts > now-10d and ts < now interval(1d) fill(next)") + + tdSql.error("select max(c1), distinct t0 from stb1 where t0 > 2") + tdSql.error("select distinct t0, max(c1) from stb1 where t0 > 2") tdSql.error("select distinct t0 from stb1 where t0 in (select t0 from stb1 where t0 > 2)") tdSql.query("select distinct t0, t1 from stb1 where t0 in (1,2,3,4,5)") tdSql.checkRows(5) tdSql.query("select distinct t1 from (select t0, t1 from stb1 where t0 > 2) ") tdSql.checkRows(4) - tdSql.query("select distinct t1 from (select distinct t0, t1 from stb1 where t0 > 2 and t1 < 3) ") - tdSql.checkRows(1) - tdSql.query("select distinct t1 from (select distinct t0, t1 from stb1 where t0 > 2 ) where t1 < 3") - tdSql.checkRows(1) + tdSql.error("select distinct t1 from (select distinct t0, t1 from stb1 where t0 > 2 and t1 < 3) ") + tdSql.error("select distinct t1 from (select distinct t0, t1 from stb1 where t0 > 2 ) where t1 < 3") tdSql.query("select distinct t1 from (select t0, t1 from stb1 where t0 > 2 ) where t1 < 3") tdSql.checkRows(1) tdSql.error("select distinct t1, t0 from (select t1 from stb1 where t0 > 2 ) where t1 < 3") + tdSql.error("select distinct t1, t0 from (select max(t1) t1, max(t0) t0 from stb1 group by t1)") + tdSql.error("select distinct t1, t0 from (select max(t1) t1, max(t0) t0 from stb1)") tdSql.query("select distinct t1, t0 from (select t1,t0 from stb1 where t0 > 2 ) where t1 < 3") tdSql.checkRows(1) - # tdSql.error(" select distinct t1, t0 from (select t1,t0 from stb1 where t0 > 2 order by ts) where t1 < 3") - tdSql.query("select t1, t0 from (select distinct t1,t0 from stb1 where t0 > 2 ) where t1 < 3") - + tdSql.error(" select distinct t1, t0 from (select t1,t0 from stb1 where t0 > 2 order by ts) where t1 < 3") + tdSql.error("select t1, t0 from (select distinct t1,t0 from stb1 where t0 > 2 ) where t1 < 3") + tdSql.error(" select distinct t1, t0 from (select t1,t0 from stb1 where t0 > 2 group by ts) where t1 < 3") + tdSql.error("select distinct stb1.t1, stb1.t2 from stb1 , stb2 where stb1.ts=stb2.ts and stb1.t2=stb2.t4") + tdSql.error("select distinct t1.t1, t1.t2 from t1 , t2 where t1.ts=t2.ts ") pass def td5935(self): - tdLog.printNoPrefix("==========TD-5798==========") + tdLog.printNoPrefix("==========TD-5935==========") tdSql.execute("drop database if exists db") tdSql.execute("create database if not exists db keep 3650") @@ -739,8 +891,8 @@ class TDTestCase: # self.td4082() # self.td4288() # self.td4724() - # self.td5798() - self.td5935() + self.td5798() + # self.td5935() # develop branch # self.td4097() From 2fde105908837521e34cc03b048d1276466df1b6 Mon Sep 17 00:00:00 2001 From: wpan Date: Tue, 17 Aug 2021 14:04:27 +0800 Subject: [PATCH 168/185] fix mem leak --- src/client/src/tscSubquery.c | 2 +- src/client/src/tscUtil.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/client/src/tscSubquery.c b/src/client/src/tscSubquery.c index 04267421cf..9ee735de45 100644 --- a/src/client/src/tscSubquery.c +++ b/src/client/src/tscSubquery.c @@ -2743,7 +2743,7 @@ void tscHandleSubqueryError(SRetrieveSupport *trsupport, SSqlObj *pSql, int numO SSqlObj *userSql = ((SRetrieveSupport*)pParentSql->param)->pParentSql; - tscFreeRetrieveSupporters(pParentSql); + tscFreeRetrieveSup(pParentSql); tscFreeSubobj(userSql); tfree(userSql->pSubs); diff --git a/src/client/src/tscUtil.c b/src/client/src/tscUtil.c index 67b1a1c199..ad04550e77 100644 --- a/src/client/src/tscUtil.c +++ b/src/client/src/tscUtil.c @@ -3767,7 +3767,7 @@ static void tscSubqueryCompleteCallback(void* param, TAOS_RES* tres, int code) { SSqlObj *userSql = ((SRetrieveSupport*)pParentSql->param)->pParentSql; - tscFreeRetrieveSupporters(pParentSql); + tscFreeRetrieveSup(pParentSql); tscFreeSubobj(userSql); tfree(userSql->pSubs); From a299c05c96c0e8cef4e150626dc364143ca03f7e Mon Sep 17 00:00:00 2001 From: wpan Date: Tue, 17 Aug 2021 14:07:04 +0800 Subject: [PATCH 169/185] fix bug --- src/client/inc/tscSubquery.h | 3 +++ src/client/src/tscSubquery.c | 2 +- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/src/client/inc/tscSubquery.h b/src/client/inc/tscSubquery.h index 9e16f3f900..99215551c3 100644 --- a/src/client/inc/tscSubquery.h +++ b/src/client/inc/tscSubquery.h @@ -54,6 +54,9 @@ void doCleanupSubqueries(SSqlObj *pSql, int32_t numOfSubs); void tscFreeRetrieveSupporters(SSqlObj *pSql); +void tscFreeRetrieveSup(SSqlObj *pSql); + + #ifdef __cplusplus } diff --git a/src/client/src/tscSubquery.c b/src/client/src/tscSubquery.c index 9ee735de45..56cee02f4d 100644 --- a/src/client/src/tscSubquery.c +++ b/src/client/src/tscSubquery.c @@ -2562,7 +2562,7 @@ int32_t tscHandleMasterSTableQuery(SSqlObj *pSql) { return TSDB_CODE_SUCCESS; } -static void tscFreeRetrieveSup(SSqlObj *pSql) { +void tscFreeRetrieveSup(SSqlObj *pSql) { SRetrieveSupport *trsupport = pSql->param; void* p = atomic_val_compare_exchange_ptr(&pSql->param, trsupport, 0); From 010db435a49f6936b5768ae12af765b0f61ebd05 Mon Sep 17 00:00:00 2001 From: wpan Date: Tue, 17 Aug 2021 14:50:19 +0800 Subject: [PATCH 170/185] fix crash issue --- src/query/src/qExecutor.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/src/query/src/qExecutor.c b/src/query/src/qExecutor.c index 844b8ec683..f59270d40a 100644 --- a/src/query/src/qExecutor.c +++ b/src/query/src/qExecutor.c @@ -1604,6 +1604,7 @@ static void hashAllIntervalAgg(SOperatorInfo* pOperatorInfo, SResultRowInfo* pRe int32_t startPos = ascQuery? 0 : (pSDataBlock->info.rows - 1); TSKEY ts = getStartTsKey(pQueryAttr, &pSDataBlock->info.window, tsCols, pSDataBlock->info.rows); + int32_t startInterp = (pResultRowInfo->curPos == -1); STimeWindow win = getCurrentActiveTimeWindow(pResultRowInfo, ts, pQueryAttr); bool masterScan = IS_MASTER_SCAN(pRuntimeEnv); @@ -1625,7 +1626,10 @@ static void hashAllIntervalAgg(SOperatorInfo* pOperatorInfo, SResultRowInfo* pRe // window start(end) key interpolation doWindowBorderInterpolation(pOperatorInfo, pSDataBlock, pInfo->pCtx, pResult, &win, startPos, forwardStep); - doApplyFunctions(pRuntimeEnv, pInfo->pCtx, ascQuery ? &win : &preWin, startPos, forwardStep, tsCols, pSDataBlock->info.rows, numOfOutput); + if (!startInterp) { + doApplyFunctions(pRuntimeEnv, pInfo->pCtx, ascQuery ? &win : &preWin, startPos, forwardStep, tsCols, pSDataBlock->info.rows, numOfOutput); + } + startInterp = 0; preWin = win; int32_t prevEndPos = (forwardStep - 1) * step + startPos; From ecb66a1598a10c6ca11afaf73df0730009fd9adc Mon Sep 17 00:00:00 2001 From: wpan Date: Tue, 17 Aug 2021 15:18:37 +0800 Subject: [PATCH 171/185] fix bug --- src/client/src/tscSQLParser.c | 4 ++++ src/query/src/qExecutor.c | 6 +----- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/src/client/src/tscSQLParser.c b/src/client/src/tscSQLParser.c index 4e5245742c..9bda1458f4 100644 --- a/src/client/src/tscSQLParser.c +++ b/src/client/src/tscSQLParser.c @@ -5382,6 +5382,7 @@ int32_t validateFillNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSqlNo const char* msg3 = "top/bottom not support fill"; const char* msg4 = "illegal value or data overflow"; const char* msg5 = "fill only available for interval query"; + const char* msg6 = "not supported function now"; if ((!isTimeWindowQuery(pQueryInfo)) && (!tscIsPointInterpQuery(pQueryInfo))) { return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg5); @@ -5420,6 +5421,9 @@ int32_t validateFillNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSqlNo } } else if (strncasecmp(pItem->pVar.pz, "prev", 4) == 0 && pItem->pVar.nLen == 4) { pQueryInfo->fillType = TSDB_FILL_PREV; + if (tscIsPointInterpQuery(pQueryInfo) && pQueryInfo->order.order == TSDB_ORDER_DESC) { + return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg6); + } } else if (strncasecmp(pItem->pVar.pz, "next", 4) == 0 && pItem->pVar.nLen == 4) { pQueryInfo->fillType = TSDB_FILL_NEXT; } else if (strncasecmp(pItem->pVar.pz, "linear", 6) == 0 && pItem->pVar.nLen == 6) { diff --git a/src/query/src/qExecutor.c b/src/query/src/qExecutor.c index f59270d40a..844b8ec683 100644 --- a/src/query/src/qExecutor.c +++ b/src/query/src/qExecutor.c @@ -1604,7 +1604,6 @@ static void hashAllIntervalAgg(SOperatorInfo* pOperatorInfo, SResultRowInfo* pRe int32_t startPos = ascQuery? 0 : (pSDataBlock->info.rows - 1); TSKEY ts = getStartTsKey(pQueryAttr, &pSDataBlock->info.window, tsCols, pSDataBlock->info.rows); - int32_t startInterp = (pResultRowInfo->curPos == -1); STimeWindow win = getCurrentActiveTimeWindow(pResultRowInfo, ts, pQueryAttr); bool masterScan = IS_MASTER_SCAN(pRuntimeEnv); @@ -1626,10 +1625,7 @@ static void hashAllIntervalAgg(SOperatorInfo* pOperatorInfo, SResultRowInfo* pRe // window start(end) key interpolation doWindowBorderInterpolation(pOperatorInfo, pSDataBlock, pInfo->pCtx, pResult, &win, startPos, forwardStep); - if (!startInterp) { - doApplyFunctions(pRuntimeEnv, pInfo->pCtx, ascQuery ? &win : &preWin, startPos, forwardStep, tsCols, pSDataBlock->info.rows, numOfOutput); - } - startInterp = 0; + doApplyFunctions(pRuntimeEnv, pInfo->pCtx, ascQuery ? &win : &preWin, startPos, forwardStep, tsCols, pSDataBlock->info.rows, numOfOutput); preWin = win; int32_t prevEndPos = (forwardStep - 1) * step + startPos; From 89028123997938fa5ccf07280207feacea58f894 Mon Sep 17 00:00:00 2001 From: wpan Date: Tue, 17 Aug 2021 16:04:48 +0800 Subject: [PATCH 172/185] fix bug --- src/client/src/tscSubquery.c | 1 + src/client/src/tscUtil.c | 1 + 2 files changed, 2 insertions(+) diff --git a/src/client/src/tscSubquery.c b/src/client/src/tscSubquery.c index 56cee02f4d..c5afbe6dba 100644 --- a/src/client/src/tscSubquery.c +++ b/src/client/src/tscSubquery.c @@ -2744,6 +2744,7 @@ void tscHandleSubqueryError(SRetrieveSupport *trsupport, SSqlObj *pSql, int numO SSqlObj *userSql = ((SRetrieveSupport*)pParentSql->param)->pParentSql; tscFreeRetrieveSup(pParentSql); + tscFreeRetrieveSup(userSql); tscFreeSubobj(userSql); tfree(userSql->pSubs); diff --git a/src/client/src/tscUtil.c b/src/client/src/tscUtil.c index ad04550e77..a79f50b63d 100644 --- a/src/client/src/tscUtil.c +++ b/src/client/src/tscUtil.c @@ -3768,6 +3768,7 @@ static void tscSubqueryCompleteCallback(void* param, TAOS_RES* tres, int code) { SSqlObj *userSql = ((SRetrieveSupport*)pParentSql->param)->pParentSql; tscFreeRetrieveSup(pParentSql); + tscFreeRetrieveSup(userSql); tscFreeSubobj(userSql); tfree(userSql->pSubs); From bd0b2e4b484da282f7b81e580c94ce327b3a5aa7 Mon Sep 17 00:00:00 2001 From: zhaoyanggh Date: Tue, 17 Aug 2021 16:17:39 +0800 Subject: [PATCH 173/185] [TD-6070] add test case --- tests/pytest/functions/function_interp.py | 70 ++++++++++++++++++++--- 1 file changed, 61 insertions(+), 9 deletions(-) diff --git a/tests/pytest/functions/function_interp.py b/tests/pytest/functions/function_interp.py index 810c90279c..41215f15eb 100644 --- a/tests/pytest/functions/function_interp.py +++ b/tests/pytest/functions/function_interp.py @@ -26,18 +26,70 @@ class TDTestCase: self.rowNum = 10 self.ts = 1537146000000 - + def run(self): tdSql.prepare() - tdSql.execute("create table t(ts timestamp, k int)") - tdSql.execute("insert into t values('2021-1-1 1:1:1', 12);") - - tdSql.query("select interp(*) from t where ts='2021-1-1 1:1:1'") - tdSql.checkRows(1) - tdSql.checkData(0, 1, 12) + tdSql.execute("create table ap1 (ts timestamp, pav float)") + tdSql.execute("insert into ap1 values ('2021-07-25 02:19:54.119', 2.90799)") + tdSql.execute("insert into ap1 values ('2021-07-25 02:19:54.317', 3.07399)") + tdSql.execute("insert into ap1 values ('2021-07-25 02:19:54.517', 0.58117)") + tdSql.execute("insert into ap1 values ('2021-07-25 02:19:54.717', 0.16150)") + tdSql.execute("insert into ap1 values ('2021-07-25 02:19:54.918', 1.47885)") + tdSql.execute("insert into ap1 values ('2021-07-25 02:19:56.569', 1.76472)") + tdSql.execute("insert into ap1 values ('2021-07-25 02:19:57.381', 2.13722)") + tdSql.execute("insert into ap1 values ('2021-07-25 02:19:57.574', 4.10256)") + tdSql.execute("insert into ap1 values ('2021-07-25 02:19:57.776', 3.55345)") + tdSql.execute("insert into ap1 values ('2021-07-25 02:19:57.976', 1.46624)") + tdSql.execute("insert into ap1 values ('2021-07-25 02:19:58.187', 0.17943)") + tdSql.execute("insert into ap1 values ('2021-07-25 02:19:58.372', 2.04101)") + tdSql.execute("insert into ap1 values ('2021-07-25 02:19:58.573', 3.20924)") + tdSql.execute("insert into ap1 values ('2021-07-25 02:19:58.768', 1.71807)") + tdSql.execute("insert into ap1 values ('2021-07-25 02:19:58.964', 4.60900)") + tdSql.execute("insert into ap1 values ('2021-07-25 02:19:59.155', 4.33907)") + tdSql.execute("insert into ap1 values ('2021-07-25 02:19:59.359', 0.76940)") + tdSql.execute("insert into ap1 values ('2021-07-25 02:19:59.553', 0.06458)") + tdSql.execute("insert into ap1 values ('2021-07-25 02:19:59.742', 4.59857)") + tdSql.execute("insert into ap1 values ('2021-07-25 02:19:59.938', 1.55081)") + + tdSql.query("select interp(pav) from ap1 where ts = '2021-07-25 02:19:54' FILL (PREV)") + tdSql.checkRows(0) + tdSql.query("select interp(pav) from ap1 where ts = '2021-07-25 02:19:54' FILL (NEXT)") + tdSql.checkRows(0) + tdSql.query("select interp(pav) from ap1 where ts = '2021-07-25 02:19:54' FILL (LINEAR)") + tdSql.checkRows(0) + tdSql.query("select interp(pav) from ap1 where ts> '2021-07-25 02:19:54' and ts<'2021-07-25 02:20:00' INTERVAL(1000a) FILL (LINEAR)") + tdSql.checkRows(6) + tdSql.query("select interp(pav) from ap1 where ts>= '2021-07-25 02:19:54' and ts<'2021-07-25 02:20:00' INTERVAL(1000a) FILL (NEXT)") + tdSql.checkRows(6) + tdSql.checkData(0,1,2.90799) + tdSql.query("select interp(pav) from ap1 where ts> '2021-07-25 02:19:54' and ts <= '2021-07-25 02:20:00' INTERVAL(1000a) FILL (PREV)") + tdSql.checkRows(7) + tdSql.checkData(1,1,1.47885) + tdSql.query("select interp(pav) from ap1 where ts>= '2021-07-25 02:19:54' and ts <= '2021-07-25 02:20:00' INTERVAL(1000a) FILL (LINEAR)") + tdSql.checkRows(7) + + # check desc order + tdSql.error("select interp(pav) from ap1 where ts = '2021-07-25 02:19:54' FILL (PREV) order by ts desc") + tdSql.query("select interp(pav) from ap1 where ts = '2021-07-25 02:19:54' FILL (NEXT) order by ts desc") + tdSql.checkRows(0) + tdSql.query("select interp(pav) from ap1 where ts = '2021-07-25 02:19:54' FILL (LINEAR) order by ts desc") + tdSql.checkRows(0) + tdSql.query("select interp(pav) from ap1 where ts> '2021-07-25 02:19:54' and ts<'2021-07-25 02:20:00' INTERVAL(1000a) FILL (LINEAR) order by ts desc") + tdSql.checkRows(6) + tdSql.query("select interp(pav) from ap1 where ts>= '2021-07-25 02:19:54' and ts<'2021-07-25 02:20:00' INTERVAL(1000a) FILL (NEXT) order by ts desc") + tdSql.checkRows(6) + tdSql.checkData(0,1,4.60900) + tdSql.error("select interp(pav) from ap1 where ts> '2021-07-25 02:19:54' and ts <= '2021-07-25 02:20:00' INTERVAL(1000a) FILL (PREV) order by ts desc") + tdSql.query("select interp(pav) from ap1 where ts>= '2021-07-25 02:19:54' and ts <= '2021-07-25 02:20:00' INTERVAL(1000a) FILL (LINEAR) order by ts desc") + tdSql.checkRows(7) + + # check exception + tdSql.error("select interp(*) from ap1") + tdSql.error("select interp(*) from ap1 FILL(NEXT)") + tdSql.error("select interp(*) from ap1 ts >= '2021-07-25 02:19:54' FILL(NEXT)") + tdSql.error("select interp(*) from ap1 ts <= '2021-07-25 02:19:54' FILL(NEXT)") + tdSql.error("select interp(*) from ap1 where ts >'2021-07-25 02:19:59.938' and ts < now interval(1s) fill(next)") - tdSql.error("select interp(*) from t where ts >'2021-1-1 1:1:1' and ts < now interval(1s) fill(next)") - def stop(self): tdSql.close() tdLog.success("%s successfully executed" % __file__) From 55c8c15742aa29e09494bbfbf19b9ee4e52010e0 Mon Sep 17 00:00:00 2001 From: wpan Date: Tue, 17 Aug 2021 16:32:38 +0800 Subject: [PATCH 174/185] fix msg null issue --- src/client/src/tscSQLParser.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/client/src/tscSQLParser.c b/src/client/src/tscSQLParser.c index 968d62d939..e580232f01 100644 --- a/src/client/src/tscSQLParser.c +++ b/src/client/src/tscSQLParser.c @@ -4524,7 +4524,7 @@ static int32_t handleExprInQueryCond(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSql */ tSqlExprDestroy(*pExpr); } else { - ret = setExprToCond(&pCondExpr->pTimewindow, *pExpr, msg3, parentOptr, pQueryInfo->msg); + ret = setExprToCond(&pCondExpr->pTimewindow, *pExpr, msg3, parentOptr, pCmd->payload); } *pExpr = NULL; // remove this expression @@ -4562,7 +4562,7 @@ static int32_t handleExprInQueryCond(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSql } pQueryInfo->type |= TSDB_QUERY_TYPE_JOIN_QUERY; - ret = setExprToCond(&pCondExpr->pJoinExpr, *pExpr, NULL, parentOptr, pQueryInfo->msg); + ret = setExprToCond(&pCondExpr->pJoinExpr, *pExpr, NULL, parentOptr, pCmd->payload); *pExpr = NULL; } else { // do nothing @@ -4580,7 +4580,7 @@ static int32_t handleExprInQueryCond(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSql return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg5); } - ret = setExprToCond(&pCondExpr->pColumnCond, *pExpr, NULL, parentOptr, pQueryInfo->msg); + ret = setExprToCond(&pCondExpr->pColumnCond, *pExpr, NULL, parentOptr, pCmd->payload); *pExpr = NULL; // remove it from expr tree } From f98b41bb4f07fe8ad2f66e02091dde9cdb73b6e3 Mon Sep 17 00:00:00 2001 From: Ganlin Zhao Date: Tue, 17 Aug 2021 17:04:10 +0800 Subject: [PATCH 175/185] [TD-6165]: use memcpy to replace wcsncmp for nchar type comparision --- src/util/src/tcompare.c | 23 ++--------------------- 1 file changed, 2 insertions(+), 21 deletions(-) diff --git a/src/util/src/tcompare.c b/src/util/src/tcompare.c index 921da82d44..1b980a4a1d 100644 --- a/src/util/src/tcompare.c +++ b/src/util/src/tcompare.c @@ -199,16 +199,7 @@ int32_t compareLenPrefixedWStr(const void *pLeft, const void *pRight) { if (len1 != len2) { return len1 > len2? 1:-1; } else { - char *pLeftTerm = (char *)tcalloc(len1 + 1, sizeof(char)); - char *pRightTerm = (char *)tcalloc(len1 + 1, sizeof(char)); - memcpy(pLeftTerm, varDataVal(pLeft), len1); - memcpy(pRightTerm, varDataVal(pRight), len2); - - int32_t ret = wcsncmp((wchar_t*) pLeftTerm, (wchar_t*) pRightTerm, len1/TSDB_NCHAR_SIZE); - - tfree(pLeftTerm); - tfree(pRightTerm); - + int32_t ret = memcmp((wchar_t*) pLeft, (wchar_t*) pRight, len1); if (ret == 0) { return 0; } else { @@ -518,17 +509,7 @@ int32_t doCompare(const char* f1, const char* f2, int32_t type, size_t size) { if (t1->len != t2->len) { return t1->len > t2->len? 1:-1; } - - char *t1_term = (char *)tcalloc(t1->len + 1, sizeof(char)); - char *t2_term = (char *)tcalloc(t2->len + 1, sizeof(char)); - memcpy(t1_term, t1->data, t1->len); - memcpy(t2_term, t2->data, t2->len); - - int32_t ret = wcsncmp((wchar_t*) t1_term, (wchar_t*) t2_term, t2->len/TSDB_NCHAR_SIZE); - - tfree(t1_term); - tfree(t2_term); - + int32_t ret = memcmp((wchar_t*) t1, (wchar_t*) t2, t2->len); if (ret == 0) { return ret; } From 7e7b67146a4b91b33f3590efe39d20dc4c21801b Mon Sep 17 00:00:00 2001 From: Ganlin Zhao Date: Tue, 17 Aug 2021 17:04:10 +0800 Subject: [PATCH 176/185] [TD-6165]: use memcpy to replace wcsncmp for nchar type comparision --- src/util/src/tcompare.c | 23 ++--------------------- 1 file changed, 2 insertions(+), 21 deletions(-) diff --git a/src/util/src/tcompare.c b/src/util/src/tcompare.c index 3619dad83b..f8deb65d48 100644 --- a/src/util/src/tcompare.c +++ b/src/util/src/tcompare.c @@ -129,16 +129,7 @@ int32_t compareLenPrefixedWStr(const void *pLeft, const void *pRight) { if (len1 != len2) { return len1 > len2? 1:-1; } else { - char *pLeftTerm = (char *)tcalloc(len1 + 1, sizeof(char)); - char *pRightTerm = (char *)tcalloc(len1 + 1, sizeof(char)); - memcpy(pLeftTerm, varDataVal(pLeft), len1); - memcpy(pRightTerm, varDataVal(pRight), len2); - - int32_t ret = wcsncmp((wchar_t*) pLeftTerm, (wchar_t*) pRightTerm, len1/TSDB_NCHAR_SIZE); - - tfree(pLeftTerm); - tfree(pRightTerm); - + int32_t ret = memcmp((wchar_t*) pLeft, (wchar_t*) pRight, len1); if (ret == 0) { return 0; } else { @@ -418,17 +409,7 @@ int32_t doCompare(const char* f1, const char* f2, int32_t type, size_t size) { if (t1->len != t2->len) { return t1->len > t2->len? 1:-1; } - - char *t1_term = (char *)tcalloc(t1->len + 1, sizeof(char)); - char *t2_term = (char *)tcalloc(t2->len + 1, sizeof(char)); - memcpy(t1_term, t1->data, t1->len); - memcpy(t2_term, t2->data, t2->len); - - int32_t ret = wcsncmp((wchar_t*) t1_term, (wchar_t*) t2_term, t2->len/TSDB_NCHAR_SIZE); - - tfree(t1_term); - tfree(t2_term); - + int32_t ret = memcmp((wchar_t*) t1, (wchar_t*) t2, t2->len); if (ret == 0) { return ret; } From 2bed48b40274b9b69d494664a1f37ba3017575f9 Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Tue, 17 Aug 2021 18:38:33 +0800 Subject: [PATCH 177/185] [TD-6166]: pointer not initialized led taos.exe exit on windows. (#7425) --- deps/MsvcLibX/src/realpath.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deps/MsvcLibX/src/realpath.c b/deps/MsvcLibX/src/realpath.c index ec1b606bf6..62e701bf3a 100644 --- a/deps/MsvcLibX/src/realpath.c +++ b/deps/MsvcLibX/src/realpath.c @@ -521,7 +521,7 @@ int ResolveLinksA(const char *path, char *buf, size_t bufsize) { /* Normally defined in stdlib.h. Output buf must contain PATH_MAX bytes */ char *realpathU(const char *path, char *outbuf) { char *pOutbuf = outbuf; - char *pOutbuf1; + char *pOutbuf1 = NULL; char *pPath1 = NULL; char *pPath2 = NULL; int iErr; From 0c7cf3a918f01c51b8d8992fb64def668db0b338 Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Tue, 17 Aug 2021 18:39:02 +0800 Subject: [PATCH 178/185] [TD-6166]: pointer not initialized led taos.exe exit on windows. (#7424) --- deps/MsvcLibX/src/realpath.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deps/MsvcLibX/src/realpath.c b/deps/MsvcLibX/src/realpath.c index ec1b606bf6..62e701bf3a 100644 --- a/deps/MsvcLibX/src/realpath.c +++ b/deps/MsvcLibX/src/realpath.c @@ -521,7 +521,7 @@ int ResolveLinksA(const char *path, char *buf, size_t bufsize) { /* Normally defined in stdlib.h. Output buf must contain PATH_MAX bytes */ char *realpathU(const char *path, char *outbuf) { char *pOutbuf = outbuf; - char *pOutbuf1; + char *pOutbuf1 = NULL; char *pPath1 = NULL; char *pPath2 = NULL; int iErr; From 2d62d2fe2e7e092e8c7bddf3510b304e6275358b Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Tue, 17 Aug 2021 19:22:48 +0800 Subject: [PATCH 179/185] Hotfix/sangshuduo/td 6166 pointer not init for 2171 (#7428) * [TD-6166]: pointer not initialized led taos.exe exit on windows. * another line. --- deps/MsvcLibX/src/realpath.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deps/MsvcLibX/src/realpath.c b/deps/MsvcLibX/src/realpath.c index 62e701bf3a..e2ba755f2d 100644 --- a/deps/MsvcLibX/src/realpath.c +++ b/deps/MsvcLibX/src/realpath.c @@ -196,7 +196,7 @@ not_compact_enough: /* Normally defined in stdlib.h. Output buf must contain PATH_MAX bytes */ char *realpath(const char *path, char *outbuf) { char *pOutbuf = outbuf; - char *pOutbuf1; + char *pOutbuf1 = NULL; int iErr; const char *pc; From 5e15ac45fc1c9d341d3fcc18ed6c72b663872055 Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Tue, 17 Aug 2021 19:24:37 +0800 Subject: [PATCH 180/185] Hotfix/sangshuduo/td 6166 pointer not init (#7427) * [TD-6166]: pointer not initialized led taos.exe exit on windows. * another line. --- deps/MsvcLibX/src/realpath.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deps/MsvcLibX/src/realpath.c b/deps/MsvcLibX/src/realpath.c index 62e701bf3a..e2ba755f2d 100644 --- a/deps/MsvcLibX/src/realpath.c +++ b/deps/MsvcLibX/src/realpath.c @@ -196,7 +196,7 @@ not_compact_enough: /* Normally defined in stdlib.h. Output buf must contain PATH_MAX bytes */ char *realpath(const char *path, char *outbuf) { char *pOutbuf = outbuf; - char *pOutbuf1; + char *pOutbuf1 = NULL; int iErr; const char *pc; From 19feac31c96a61210e592589b4f376d3ea4df710 Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Wed, 18 Aug 2021 01:13:22 +0000 Subject: [PATCH 181/185] [TD-6198] fix bigint sum error --- src/common/src/ttypes.c | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/src/common/src/ttypes.c b/src/common/src/ttypes.c index eeffe49adc..ee940531e6 100644 --- a/src/common/src/ttypes.c +++ b/src/common/src/ttypes.c @@ -38,11 +38,7 @@ const int32_t TYPE_BYTES[15] = { #define DO_STATICS(__sum, __min, __max, __minIndex, __maxIndex, _list, _index) \ do { \ - if (_list[(_index)] >= (INT64_MAX - (__sum))) { \ - __sum = INT64_MAX; \ - } else { \ - (__sum) += (_list)[(_index)]; \ - } \ + (__sum) += (_list)[(_index)]; \ if ((__min) > (_list)[(_index)]) { \ (__min) = (_list)[(_index)]; \ (__minIndex) = (_index); \ From 35b82a644f559a240f78cde78af37bd778dd4626 Mon Sep 17 00:00:00 2001 From: wpan Date: Wed, 18 Aug 2021 09:40:18 +0800 Subject: [PATCH 182/185] fix nested query bug --- src/client/inc/tscSubquery.h | 2 -- src/client/src/tscSubquery.c | 42 ++++++++---------------------------- src/client/src/tscUtil.c | 23 +++++++++----------- 3 files changed, 19 insertions(+), 48 deletions(-) diff --git a/src/client/inc/tscSubquery.h b/src/client/inc/tscSubquery.h index 99215551c3..a012ca5a7f 100644 --- a/src/client/inc/tscSubquery.h +++ b/src/client/inc/tscSubquery.h @@ -52,8 +52,6 @@ int tsInsertInitialCheck(SSqlObj *pSql); void doCleanupSubqueries(SSqlObj *pSql, int32_t numOfSubs); -void tscFreeRetrieveSupporters(SSqlObj *pSql); - void tscFreeRetrieveSup(SSqlObj *pSql); diff --git a/src/client/src/tscSubquery.c b/src/client/src/tscSubquery.c index c5afbe6dba..d16d744393 100644 --- a/src/client/src/tscSubquery.c +++ b/src/client/src/tscSubquery.c @@ -2040,11 +2040,8 @@ void doCleanupSubqueries(SSqlObj *pSql, int32_t numOfSubs) { for(int32_t i = 0; i < numOfSubs; ++i) { SSqlObj* pSub = pSql->pSubs[i]; assert(pSub != NULL); - - SRetrieveSupport* pSupport = pSub->param; - - tfree(pSupport->localBuffer); - tfree(pSupport); + + tscFreeRetrieveSup(pSub); taos_free_result(pSub); } @@ -2576,18 +2573,6 @@ void tscFreeRetrieveSup(SSqlObj *pSql) { tfree(trsupport); } -void tscFreeRetrieveSupporters(SSqlObj *pSql) { - for(int32_t i = 0; i < pSql->subState.numOfSub; ++i) { - SSqlObj* pSub = pSql->pSubs[i]; - assert(pSub != NULL); - - tscFreeRetrieveSup(pSub); - } -} - - - - static void tscRetrieveFromDnodeCallBack(void *param, TAOS_RES *tres, int numOfRows); static void tscHandleSubqueryError(SRetrieveSupport *trsupport, SSqlObj *pSql, int numOfRows); @@ -2732,30 +2717,21 @@ void tscHandleSubqueryError(SRetrieveSupport *trsupport, SSqlObj *pSql, int numO if (!TSDB_QUERY_HAS_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_JOIN_SEC_STAGE)) { int32_t code = pParentSql->res.code; - if ((code == TSDB_CODE_TDB_INVALID_TABLE_ID || code == TSDB_CODE_VND_INVALID_VGROUP_ID) && pParentSql->retry < pParentSql->maxRetry) { - // remove the cached tableMeta and vgroup id list, and then parse the sql again - SSqlCmd* pParentCmd = &pParentSql->cmd; - STableMetaInfo* pTableMetaInfo = tscGetTableMetaInfoFromCmd(pParentCmd, 0); - tscRemoveTableMetaBuf(pTableMetaInfo, pParentSql->self); - - pParentCmd->pTableMetaMap = tscCleanupTableMetaMap(pParentCmd->pTableMetaMap); - pParentCmd->pTableMetaMap = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), false, HASH_NO_LOCK); - - SSqlObj *userSql = ((SRetrieveSupport*)pParentSql->param)->pParentSql; + SSqlObj *userSql = ((SRetrieveSupport*)pParentSql->param)->pParentSql; + if ((code == TSDB_CODE_TDB_INVALID_TABLE_ID || code == TSDB_CODE_VND_INVALID_VGROUP_ID) && userSql->retry < userSql->maxRetry) { tscFreeRetrieveSup(pParentSql); - tscFreeRetrieveSup(userSql); tscFreeSubobj(userSql); tfree(userSql->pSubs); - pParentSql->res.code = TSDB_CODE_SUCCESS; - pParentSql->retry++; + userSql->res.code = TSDB_CODE_SUCCESS; + userSql->retry++; - tscDebug("0x%"PRIx64" retry parse sql and send query, prev error: %s, retry:%d", pParentSql->self, - tstrerror(code), pParentSql->retry); + tscDebug("0x%"PRIx64" retry parse sql and send query, prev error: %s, retry:%d", userSql->self, + tstrerror(code), userSql->retry); - tscResetSqlCmd(&userSql->cmd, false); + tscResetSqlCmd(&userSql->cmd, true); code = tsParseSql(userSql, true); if (code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) { return; diff --git a/src/client/src/tscUtil.c b/src/client/src/tscUtil.c index a79f50b63d..06b61d443d 100644 --- a/src/client/src/tscUtil.c +++ b/src/client/src/tscUtil.c @@ -3758,29 +3758,26 @@ static void tscSubqueryCompleteCallback(void* param, TAOS_RES* tres, int code) { // todo refactor tscDebug("0x%"PRIx64" all subquery response received, retry", pParentSql->self); - SSqlCmd* pParentCmd = &pParentSql->cmd; - STableMetaInfo* pTableMetaInfo = tscGetTableMetaInfoFromCmd(pParentCmd, 0); - tscRemoveTableMetaBuf(pTableMetaInfo, pParentSql->self); - - pParentCmd->pTableMetaMap = tscCleanupTableMetaMap(pParentCmd->pTableMetaMap); - pParentCmd->pTableMetaMap = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), false, HASH_NO_LOCK); - SSqlObj *userSql = ((SRetrieveSupport*)pParentSql->param)->pParentSql; tscFreeRetrieveSup(pParentSql); - tscFreeRetrieveSup(userSql); + + if (code && !((code == TSDB_CODE_TDB_INVALID_TABLE_ID || code == TSDB_CODE_VND_INVALID_VGROUP_ID) && userSql->retry < userSql->maxRetry)) { + tscAsyncResultOnError(pParentSql); + return; + } tscFreeSubobj(userSql); tfree(userSql->pSubs); - pParentSql->res.code = TSDB_CODE_SUCCESS; - pParentSql->retry++; + userSql->res.code = TSDB_CODE_SUCCESS; + userSql->retry++; - tscDebug("0x%"PRIx64" retry parse sql and send query, prev error: %s, retry:%d", pParentSql->self, - tstrerror(code), pParentSql->retry); + tscDebug("0x%"PRIx64" retry parse sql and send query, prev error: %s, retry:%d", userSql->self, + tstrerror(code), userSql->retry); - tscResetSqlCmd(&userSql->cmd, false); + tscResetSqlCmd(&userSql->cmd, true); code = tsParseSql(userSql, true); if (code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) { From ffcc93016a6a46fb21001e213d5ac26e10172afc Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Wed, 18 Aug 2021 16:12:36 +0800 Subject: [PATCH 183/185] Hotfix/sangshuduo/td 5136 taosdemo rework for master (#7433) * cherry pick from develop branch. * [TD-5136]: taosdemo simulate real senario. * update test case according to taosdemo change * adjust range of semi-random data. * make demo mode use different tag name and value. * change malloc to calloc for pid allocation. * fix typo. * fix binary length default value and group id logic. Co-authored-by: Shuduo Sang --- src/kit/taosdemo/taosdemo.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/kit/taosdemo/taosdemo.c b/src/kit/taosdemo/taosdemo.c index 18f5877e09..016e27dc13 100644 --- a/src/kit/taosdemo/taosdemo.c +++ b/src/kit/taosdemo/taosdemo.c @@ -626,7 +626,7 @@ SArguments g_args = { "INT", // datatype "FLOAT", // datatype. DEFAULT_DATATYPE_NUM is 3 }, - 16, // len_of_binary + 64, // len_of_binary 4, // num_of_CPR 10, // num_of_connections/thread 0, // insert_interval @@ -2598,7 +2598,7 @@ static char* generateTagValuesForStb(SSuperTable* stbInfo, int64_t tableSeq) { if ((g_args.demo_mode) && (i == 0)) { dataLen += snprintf(dataBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen, - "%"PRId64",", tableSeq % 10); + "%"PRId64",", (tableSeq % 10) + 1); } else { dataLen += snprintf(dataBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen, From 16fe1e27bf5f5e62ab98527d36819e3818bcd102 Mon Sep 17 00:00:00 2001 From: wpan Date: Wed, 18 Aug 2021 16:45:59 +0800 Subject: [PATCH 184/185] fix nested query bug --- src/client/src/tscSubquery.c | 13 +++++++++++-- src/client/src/tscUtil.c | 33 ++++++++++++++------------------- 2 files changed, 25 insertions(+), 21 deletions(-) diff --git a/src/client/src/tscSubquery.c b/src/client/src/tscSubquery.c index d16d744393..816347cecd 100644 --- a/src/client/src/tscSubquery.c +++ b/src/client/src/tscSubquery.c @@ -2717,10 +2717,19 @@ void tscHandleSubqueryError(SRetrieveSupport *trsupport, SSqlObj *pSql, int numO if (!TSDB_QUERY_HAS_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_JOIN_SEC_STAGE)) { int32_t code = pParentSql->res.code; - SSqlObj *userSql = ((SRetrieveSupport*)pParentSql->param)->pParentSql; + SSqlObj *userSql = NULL; + if (pParentSql->param) { + userSql = ((SRetrieveSupport*)pParentSql->param)->pParentSql; + } + + if (userSql == NULL) { + userSql = pParentSql; + } if ((code == TSDB_CODE_TDB_INVALID_TABLE_ID || code == TSDB_CODE_VND_INVALID_VGROUP_ID) && userSql->retry < userSql->maxRetry) { - tscFreeRetrieveSup(pParentSql); + if (userSql != pParentSql) { + tscFreeRetrieveSup(pParentSql); + } tscFreeSubobj(userSql); tfree(userSql->pSubs); diff --git a/src/client/src/tscUtil.c b/src/client/src/tscUtil.c index 06b61d443d..2ee6a85431 100644 --- a/src/client/src/tscUtil.c +++ b/src/client/src/tscUtil.c @@ -3747,8 +3747,7 @@ static void tscSubqueryCompleteCallback(void* param, TAOS_RES* tres, int code) { int32_t index = ps->subqueryIndex; bool ret = subAndCheckDone(pSql, pParentSql, index); - tfree(ps); - pSql->param = NULL; + tscFreeRetrieveSup(pSql); if (!ret) { tscDebug("0x%"PRIx64" sub:0x%"PRIx64" orderOfSub:%d completed, not all subquery finished", pParentSql->self, pSql->self, index); @@ -3758,41 +3757,37 @@ static void tscSubqueryCompleteCallback(void* param, TAOS_RES* tres, int code) { // todo refactor tscDebug("0x%"PRIx64" all subquery response received, retry", pParentSql->self); - SSqlObj *userSql = ((SRetrieveSupport*)pParentSql->param)->pParentSql; - - tscFreeRetrieveSup(pParentSql); - - if (code && !((code == TSDB_CODE_TDB_INVALID_TABLE_ID || code == TSDB_CODE_VND_INVALID_VGROUP_ID) && userSql->retry < userSql->maxRetry)) { + if (code && !((code == TSDB_CODE_TDB_INVALID_TABLE_ID || code == TSDB_CODE_VND_INVALID_VGROUP_ID) && pParentSql->retry < pParentSql->maxRetry)) { tscAsyncResultOnError(pParentSql); return; } - tscFreeSubobj(userSql); - tfree(userSql->pSubs); + tscFreeSubobj(pParentSql); + tfree(pParentSql->pSubs); - userSql->res.code = TSDB_CODE_SUCCESS; - userSql->retry++; + pParentSql->res.code = TSDB_CODE_SUCCESS; + pParentSql->retry++; - tscDebug("0x%"PRIx64" retry parse sql and send query, prev error: %s, retry:%d", userSql->self, - tstrerror(code), userSql->retry); + tscDebug("0x%"PRIx64" retry parse sql and send query, prev error: %s, retry:%d", pParentSql->self, + tstrerror(code), pParentSql->retry); - tscResetSqlCmd(&userSql->cmd, true); + tscResetSqlCmd(&pParentSql->cmd, true); - code = tsParseSql(userSql, true); + code = tsParseSql(pParentSql, true); if (code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) { return; } if (code != TSDB_CODE_SUCCESS) { - userSql->res.code = code; - tscAsyncResultOnError(userSql); + pParentSql->res.code = code; + tscAsyncResultOnError(pParentSql); return; } - SQueryInfo *pQueryInfo = tscGetQueryInfo(&userSql->cmd); + SQueryInfo *pQueryInfo = tscGetQueryInfo(&pParentSql->cmd); - executeQuery(userSql, pQueryInfo); + executeQuery(pParentSql, pQueryInfo); return; } From ffe99d41d9eede67d473380f84834ab68e829b6a Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Thu, 19 Aug 2021 10:02:12 +0800 Subject: [PATCH 185/185] Hotfix/sangshuduo/td 5844 cmdline parameters align for master (#7444) * [TD-5844]: make cmd line parameter similar. * fix test case align with taosdemo change. * fix windows stack overflow issue. * fix mac compile error. * fix taosdemo cmdline parameter in tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoTestSupportNanoInsert.py * make taos.exe use mysql style password input. * make taos shell and taosdump use mysql style password input. * determine scanf return value. * cherry pick from develop/feature branch. --- src/kit/shell/src/shellDarwin.c | 23 ++++++++++++++--- src/kit/shell/src/shellEngine.c | 42 ++++++++++++++++++-------------- src/kit/shell/src/shellLinux.c | 35 +++++++++++++++++++++++--- src/kit/shell/src/shellMain.c | 2 ++ src/kit/shell/src/shellWindows.c | 30 +++++++++++++++++------ src/kit/taosdemo/taosdemo.c | 8 ++++-- src/kit/taosdump/taosdump.c | 31 +++++++++++++++++++---- src/os/inc/osSystem.h | 2 ++ src/os/src/darwin/darwinSystem.c | 26 ++++++++++++++++++++ src/os/src/linux/osSystem.c | 24 ++++++++++++++++++ src/os/src/windows/wSystem.c | 14 +++++++++++ 11 files changed, 196 insertions(+), 41 deletions(-) diff --git a/src/kit/shell/src/shellDarwin.c b/src/kit/shell/src/shellDarwin.c index 4dcd8b3d50..5ca4537aeb 100644 --- a/src/kit/shell/src/shellDarwin.c +++ b/src/kit/shell/src/shellDarwin.c @@ -64,6 +64,10 @@ void printHelp() { exit(EXIT_SUCCESS); } +char DARWINCLIENT_VERSION[] = "Welcome to the TDengine shell from %s, Client Version:%s\n" + "Copyright (c) 2020 by TAOS Data, Inc. All rights reserved.\n\n"; +char g_password[MAX_PASSWORD_SIZE]; + void shellParseArgument(int argc, char *argv[], SShellArguments *arguments) { wordexp_t full_path; for (int i = 1; i < argc; i++) { @@ -77,10 +81,21 @@ void shellParseArgument(int argc, char *argv[], SShellArguments *arguments) { } } // for password - else if (strcmp(argv[i], "-p") == 0) { - arguments->is_use_passwd = true; + else if (strncmp(argv[i], "-p", 2) == 0) { + strcpy(tsOsName, "Darwin"); + printf(DARWINCLIENT_VERSION, tsOsName, taos_get_client_info()); + if (strlen(argv[i]) == 2) { + printf("Enter password: "); + if (scanf("%s", g_password) > 1) { + fprintf(stderr, "password read error\n"); + } + getchar(); + } else { + tstrncpy(g_password, (char *)(argv[i] + 2), MAX_PASSWORD_SIZE); + } + arguments->password = g_password; } - // for management port + // for management port else if (strcmp(argv[i], "-P") == 0) { if (i < argc - 1) { arguments->port = atoi(argv[++i]); @@ -98,7 +113,7 @@ void shellParseArgument(int argc, char *argv[], SShellArguments *arguments) { exit(EXIT_FAILURE); } } else if (strcmp(argv[i], "-c") == 0) { - if (i < argc - 1) { + if (i < argc - 1) { if (strlen(argv[++i]) >= TSDB_FILENAME_LEN) { fprintf(stderr, "config file path: %s overflow max len %d\n", argv[i], TSDB_FILENAME_LEN - 1); exit(EXIT_FAILURE); diff --git a/src/kit/shell/src/shellEngine.c b/src/kit/shell/src/shellEngine.c index 58f4b7ff02..51a25d59c4 100644 --- a/src/kit/shell/src/shellEngine.c +++ b/src/kit/shell/src/shellEngine.c @@ -65,7 +65,15 @@ extern TAOS *taos_connect_auth(const char *ip, const char *user, const char *aut */ TAOS *shellInit(SShellArguments *_args) { printf("\n"); - printf(CLIENT_VERSION, tsOsName, taos_get_client_info()); + if (!_args->is_use_passwd) { +#ifdef TD_WINDOWS + strcpy(tsOsName, "Windows"); +#elif defined(TD_DARWIN) + strcpy(tsOsName, "Darwin"); +#endif + printf(CLIENT_VERSION, tsOsName, taos_get_client_info()); + } + fflush(stdout); // set options before initializing @@ -73,9 +81,7 @@ TAOS *shellInit(SShellArguments *_args) { taos_options(TSDB_OPTION_TIMEZONE, _args->timezone); } - if (_args->is_use_passwd) { - if (_args->password == NULL) _args->password = getpass("Enter password: "); - } else { + if (!_args->is_use_passwd) { _args->password = TSDB_DEFAULT_PASS; } @@ -170,7 +176,7 @@ static int32_t shellRunSingleCommand(TAOS *con, char *command) { system("clear"); return 0; } - + if (regex_match(command, "^[\t ]*set[ \t]+max_binary_display_width[ \t]+(default|[1-9][0-9]*)[ \t;]*$", REG_EXTENDED | REG_ICASE)) { strtok(command, " \t"); strtok(NULL, " \t"); @@ -182,7 +188,7 @@ static int32_t shellRunSingleCommand(TAOS *con, char *command) { } return 0; } - + if (regex_match(command, "^[ \t]*source[\t ]+[^ ]+[ \t;]*$", REG_EXTENDED | REG_ICASE)) { /* If source file. */ char *c_ptr = strtok(command, " ;"); @@ -247,7 +253,7 @@ int32_t shellRunCommand(TAOS* con, char* command) { esc = false; continue; } - + if (c == '\\') { esc = true; continue; @@ -336,8 +342,8 @@ void shellRunCommandOnServer(TAOS *con, char command[]) { } if (!tscIsUpdateQuery(pSql)) { // select and show kinds of commands - int error_no = 0; - + int error_no = 0; + int numOfRows = shellDumpResult(pSql, fname, &error_no, printMode); if (numOfRows < 0) { atomic_store_64(&result, 0); @@ -530,7 +536,7 @@ static int dumpResultToFile(const char* fname, TAOS_RES* tres) { fprintf(fp, "%s", fields[col].name); } fputc('\n', fp); - + int numOfRows = 0; do { int32_t* length = taos_fetch_lengths(tres); @@ -716,7 +722,7 @@ static int verticalPrintResult(TAOS_RES* tres) { int numOfRows = 0; int showMore = 1; - do { + do { if (numOfRows < resShowMaxNum) { printf("*************************** %d.row ***************************\n", numOfRows + 1); @@ -851,7 +857,7 @@ static int horizontalPrintResult(TAOS_RES* tres) { int numOfRows = 0; int showMore = 1; - + do { int32_t* length = taos_fetch_lengths(tres); if (numOfRows < resShowMaxNum) { @@ -867,7 +873,7 @@ static int horizontalPrintResult(TAOS_RES* tres) { printf("[You can add limit statement to show more or redirect results to specific file to get all.]\n"); showMore = 0; } - + numOfRows++; row = taos_fetch_row(tres); } while(row != NULL); @@ -909,7 +915,7 @@ void read_history() { if (errno != ENOENT) { fprintf(stderr, "Failed to open file %s, reason:%s\n", f_history, strerror(errno)); } -#endif +#endif return; } @@ -934,9 +940,9 @@ void write_history() { FILE *f = fopen(f_history, "w"); if (f == NULL) { -#ifndef WINDOWS +#ifndef WINDOWS fprintf(stderr, "Failed to open file %s for write, reason:%s\n", f_history, strerror(errno)); -#endif +#endif return; } @@ -982,13 +988,13 @@ void source_file(TAOS *con, char *fptr) { /* if (access(fname, F_OK) != 0) { fprintf(stderr, "ERROR: file %s is not exist\n", fptr); - + wordfree(&full_path); free(cmd); return; } */ - + FILE *f = fopen(fname, "r"); if (f == NULL) { fprintf(stderr, "ERROR: failed to open file %s\n", fname); diff --git a/src/kit/shell/src/shellLinux.c b/src/kit/shell/src/shellLinux.c index dc74f6fcaa..d051d3535e 100644 --- a/src/kit/shell/src/shellLinux.c +++ b/src/kit/shell/src/shellLinux.c @@ -34,7 +34,7 @@ static char doc[] = ""; static char args_doc[] = ""; static struct argp_option options[] = { {"host", 'h', "HOST", 0, "TDengine server FQDN to connect. The default host is localhost."}, - {"password", 'p', "PASSWORD", OPTION_ARG_OPTIONAL, "The password to use when connecting to the server."}, + {"password", 'p', 0, 0, "The password to use when connecting to the server."}, {"port", 'P', "PORT", 0, "The TCP/IP port number to use for the connection."}, {"user", 'u', "USER", 0, "The user name to use when connecting to the server."}, {"auth", 'A', "Auth", 0, "The auth string to use when connecting to the server."}, @@ -63,8 +63,6 @@ static error_t parse_opt(int key, char *arg, struct argp_state *state) { arguments->host = arg; break; case 'p': - arguments->is_use_passwd = true; - if (arg) arguments->password = arg; break; case 'P': if (arg) { @@ -160,12 +158,41 @@ static error_t parse_opt(int key, char *arg, struct argp_state *state) { /* Our argp parser. */ static struct argp argp = {options, parse_opt, args_doc, doc}; +char LINUXCLIENT_VERSION[] = "Welcome to the TDengine shell from %s, Client Version:%s\n" + "Copyright (c) 2020 by TAOS Data, Inc. All rights reserved.\n\n"; +char g_password[MAX_PASSWORD_SIZE]; + +static void parse_password( + int argc, char *argv[], SShellArguments *arguments) { + for (int i = 1; i < argc; i++) { + if (strncmp(argv[i], "-p", 2) == 0) { + strcpy(tsOsName, "Linux"); + printf(LINUXCLIENT_VERSION, tsOsName, taos_get_client_info()); + if (strlen(argv[i]) == 2) { + printf("Enter password: "); + if (scanf("%20s", g_password) > 1) { + fprintf(stderr, "password reading error\n"); + } + getchar(); + } else { + tstrncpy(g_password, (char *)(argv[i] + 2), MAX_PASSWORD_SIZE); + } + arguments->password = g_password; + arguments->is_use_passwd = true; + } + } +} + void shellParseArgument(int argc, char *argv[], SShellArguments *arguments) { static char verType[32] = {0}; sprintf(verType, "version: %s\n", version); argp_program_version = verType; - + + if (argc > 1) { + parse_password(argc, argv, arguments); + } + argp_parse(&argp, argc, argv, 0, 0, arguments); if (arguments->abort) { #ifndef _ALPINE diff --git a/src/kit/shell/src/shellMain.c b/src/kit/shell/src/shellMain.c index 0c70386061..4e00b0d8ff 100644 --- a/src/kit/shell/src/shellMain.c +++ b/src/kit/shell/src/shellMain.c @@ -71,7 +71,9 @@ int checkVersion() { // Global configurations SShellArguments args = { .host = NULL, +#ifndef TD_WINDOWS .password = NULL, +#endif .user = NULL, .database = NULL, .timezone = NULL, diff --git a/src/kit/shell/src/shellWindows.c b/src/kit/shell/src/shellWindows.c index 87d11a3516..bf9afe4b80 100644 --- a/src/kit/shell/src/shellWindows.c +++ b/src/kit/shell/src/shellWindows.c @@ -19,6 +19,9 @@ extern char configDir[]; +char WINCLIENT_VERSION[] = "Welcome to the TDengine shell from %s, Client Version:%s\n" + "Copyright (c) 2020 by TAOS Data, Inc. All rights reserved.\n\n"; + void printVersion() { printf("version: %s\n", version); } @@ -61,6 +64,8 @@ void printHelp() { exit(EXIT_SUCCESS); } +char g_password[MAX_PASSWORD_SIZE]; + void shellParseArgument(int argc, char *argv[], SShellArguments *arguments) { for (int i = 1; i < argc; i++) { // for host @@ -73,11 +78,20 @@ void shellParseArgument(int argc, char *argv[], SShellArguments *arguments) { } } // for password - else if (strcmp(argv[i], "-p") == 0) { - arguments->is_use_passwd = true; - if (i < argc - 1 && argv[i + 1][0] != '-') { - arguments->password = argv[++i]; - } + else if (strncmp(argv[i], "-p", 2) == 0) { + arguments->is_use_passwd = true; + strcpy(tsOsName, "Windows"); + printf(WINCLIENT_VERSION, tsOsName, taos_get_client_info()); + if (strlen(argv[i]) == 2) { + printf("Enter password: "); + if (scanf("%s", g_password) > 1) { + fprintf(stderr, "password read error!\n"); + } + getchar(); + } else { + tstrncpy(g_password, (char *)(argv[i] + 2), MAX_PASSWORD_SIZE); + } + arguments->password = g_password; } // for management port else if (strcmp(argv[i], "-P") == 0) { @@ -104,7 +118,7 @@ void shellParseArgument(int argc, char *argv[], SShellArguments *arguments) { exit(EXIT_FAILURE); } } else if (strcmp(argv[i], "-c") == 0) { - if (i < argc - 1) { + if (i < argc - 1) { char *tmp = argv[++i]; if (strlen(tmp) >= TSDB_FILENAME_LEN) { fprintf(stderr, "config file path: %s overflow max len %d\n", tmp, TSDB_FILENAME_LEN - 1); @@ -265,7 +279,7 @@ void *shellLoopQuery(void *arg) { if (command == NULL) return NULL; int32_t err = 0; - + do { memset(command, 0, MAX_COMMAND_SIZE); shellPrintPrompt(); @@ -274,7 +288,7 @@ void *shellLoopQuery(void *arg) { err = shellReadCommand(con, command); if (err) { break; - } + } } while (shellRunCommand(con, command) == 0); return NULL; diff --git a/src/kit/taosdemo/taosdemo.c b/src/kit/taosdemo/taosdemo.c index 016e27dc13..9a7d3cd25c 100644 --- a/src/kit/taosdemo/taosdemo.c +++ b/src/kit/taosdemo/taosdemo.c @@ -866,8 +866,12 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) { arguments->user = argv[++i]; } else if (strncmp(argv[i], "-p", 2) == 0) { if (strlen(argv[i]) == 2) { - printf("Enter password:"); - scanf("%s", arguments->password); + printf("Enter password: "); + taosSetConsoleEcho(false); + if (scanf("%s", arguments->password) > 1) { + fprintf(stderr, "password read error!\n"); + } + taosSetConsoleEcho(true); } else { tstrncpy(arguments->password, (char *)(argv[i] + 2), MAX_PASSWORD_SIZE); } diff --git a/src/kit/taosdump/taosdump.c b/src/kit/taosdump/taosdump.c index bea6e65106..c54b8da1b7 100644 --- a/src/kit/taosdump/taosdump.c +++ b/src/kit/taosdump/taosdump.c @@ -206,9 +206,9 @@ static struct argp_option options[] = { {"host", 'h', "HOST", 0, "Server host dumping data from. Default is localhost.", 0}, {"user", 'u', "USER", 0, "User name used to connect to server. Default is root.", 0}, #ifdef _TD_POWER_ - {"password", 'p', "PASSWORD", 0, "User password to connect to server. Default is powerdb.", 0}, + {"password", 'p', 0, 0, "User password to connect to server. Default is powerdb.", 0}, #else - {"password", 'p', "PASSWORD", 0, "User password to connect to server. Default is taosdata.", 0}, + {"password", 'p', 0, 0, "User password to connect to server. Default is taosdata.", 0}, #endif {"port", 'P', "PORT", 0, "Port to connect", 0}, {"cversion", 'v', "CVERION", 0, "client version", 0}, @@ -248,12 +248,14 @@ static struct argp_option options[] = { {0} }; +#define MAX_PASSWORD_SIZE 20 + /* Used by main to communicate with parse_opt. */ typedef struct arguments { // connection option char *host; char *user; - char *password; + char password[MAX_PASSWORD_SIZE]; uint16_t port; char cversion[12]; uint16_t mysqlFlag; @@ -376,7 +378,6 @@ static error_t parse_opt(int key, char *arg, struct argp_state *state) { g_args.user = arg; break; case 'p': - g_args.password = arg; break; case 'P': g_args.port = atoi(arg); @@ -554,6 +555,25 @@ static void parse_precision_first( } } +static void parse_password( + int argc, char *argv[], SArguments *arguments) { + for (int i = 1; i < argc; i++) { + if (strncmp(argv[i], "-p", 2) == 0) { + if (strlen(argv[i]) == 2) { + printf("Enter password: "); + taosSetConsoleEcho(false); + if(scanf("%20s", arguments->password) > 1) { + errorPrint("%s() LN%d, password read error!\n", __func__, __LINE__); + } + taosSetConsoleEcho(true); + } else { + tstrncpy(arguments->password, (char *)(argv[i] + 2), MAX_PASSWORD_SIZE); + } + argv[i] = ""; + } + } +} + static void parse_timestamp( int argc, char *argv[], SArguments *arguments) { for (int i = 1; i < argc; i++) { @@ -616,9 +636,10 @@ int main(int argc, char *argv[]) { int ret = 0; /* Parse our arguments; every option seen by parse_opt will be reflected in arguments. */ - if (argc > 2) { + if (argc > 1) { parse_precision_first(argc, argv, &g_args); parse_timestamp(argc, argv, &g_args); + parse_password(argc, argv, &g_args); } argp_parse(&argp, argc, argv, 0, 0, &g_args); diff --git a/src/os/inc/osSystem.h b/src/os/inc/osSystem.h index e7a3ec13ae..4b79250740 100644 --- a/src/os/inc/osSystem.h +++ b/src/os/inc/osSystem.h @@ -24,6 +24,8 @@ void* taosLoadDll(const char *filename); void* taosLoadSym(void* handle, char* name); void taosCloseDll(void *handle); +int taosSetConsoleEcho(bool on); + #ifdef __cplusplus } #endif diff --git a/src/os/src/darwin/darwinSystem.c b/src/os/src/darwin/darwinSystem.c index 17cafdd664..6f296c9fef 100644 --- a/src/os/src/darwin/darwinSystem.c +++ b/src/os/src/darwin/darwinSystem.c @@ -29,4 +29,30 @@ void* taosLoadSym(void* handle, char* name) { void taosCloseDll(void *handle) { } +int taosSetConsoleEcho(bool on) +{ +#if 0 +#define ECHOFLAGS (ECHO | ECHOE | ECHOK | ECHONL) + int err; + struct termios term; + + if (tcgetattr(STDIN_FILENO, &term) == -1) { + perror("Cannot get the attribution of the terminal"); + return -1; + } + + if (on) + term.c_lflag|=ECHOFLAGS; + else + term.c_lflag &=~ECHOFLAGS; + + err = tcsetattr(STDIN_FILENO,TCSAFLUSH,&term); + if (err == -1 && err == EINTR) { + perror("Cannot set the attribution of the terminal"); + return -1; + } + +#endif + return 0; +} diff --git a/src/os/src/linux/osSystem.c b/src/os/src/linux/osSystem.c index 052b7a22a8..0cdb20dbdb 100644 --- a/src/os/src/linux/osSystem.c +++ b/src/os/src/linux/osSystem.c @@ -51,4 +51,28 @@ void taosCloseDll(void *handle) { } } +int taosSetConsoleEcho(bool on) +{ +#define ECHOFLAGS (ECHO | ECHOE | ECHOK | ECHONL) + int err; + struct termios term; + + if (tcgetattr(STDIN_FILENO, &term) == -1) { + perror("Cannot get the attribution of the terminal"); + return -1; + } + + if (on) + term.c_lflag|=ECHOFLAGS; + else + term.c_lflag &=~ECHOFLAGS; + + err = tcsetattr(STDIN_FILENO,TCSAFLUSH,&term); + if (err == -1 && err == EINTR) { + perror("Cannot set the attribution of the terminal"); + return -1; + } + + return 0; +} diff --git a/src/os/src/windows/wSystem.c b/src/os/src/windows/wSystem.c index 17cafdd664..564005f79b 100644 --- a/src/os/src/windows/wSystem.c +++ b/src/os/src/windows/wSystem.c @@ -30,3 +30,17 @@ void taosCloseDll(void *handle) { } +int taosSetConsoleEcho(bool on) +{ + HANDLE hStdin = GetStdHandle(STD_INPUT_HANDLE); + DWORD mode = 0; + GetConsoleMode(hStdin, &mode ); + if (on) { + mode |= ENABLE_ECHO_INPUT; + } else { + mode &= ~ENABLE_ECHO_INPUT; + } + SetConsoleMode(hStdin, mode); + + return 0; +}