diff --git a/documentation20/cn/02.getting-started/docs.md b/documentation20/cn/02.getting-started/docs.md index fa36481646..c67b9e61b9 100644 --- a/documentation20/cn/02.getting-started/docs.md +++ b/documentation20/cn/02.getting-started/docs.md @@ -126,7 +126,7 @@ taos> source ; $ taosdemo ``` -该命令将在数据库 test 下面自动创建一张超级表 meters,该超级表下有 1 万张表,表名为 "t0" 到 "t9999",每张表有 1 万条记录,每条记录有 (ts, current, voltage, phase) 四个字段,时间戳从 "2017-07-14 10:40:00 000" 到 "2017-07-14 10:40:09 999",每张表带有标签 location 和 groupdId,groupdId 被设置为 1 到 10, location 被设置为 "beijing" 或者 "shanghai"。 +该命令将在数据库 test 下面自动创建一张超级表 meters,该超级表下有 1 万张表,表名为 "d0" 到 "d9999",每张表有 1 万条记录,每条记录有 (ts, current, voltage, phase) 四个字段,时间戳从 "2017-07-14 10:40:00 000" 到 "2017-07-14 10:40:09 999",每张表带有标签 location 和 groupdId,groupdId 被设置为 1 到 10, location 被设置为 "beijing" 或者 "shanghai"。 执行这条命令大概需要几分钟,最后共插入 1 亿条记录。 @@ -156,10 +156,10 @@ taos> select count(*) from test.meters where location="beijing"; taos> select avg(current), max(voltage), min(phase) from test.meters where groupdId=10; ``` -- 对表 t10 按 10s 进行平均值、最大值和最小值聚合统计: +- 对表 d10 按 10s 进行平均值、最大值和最小值聚合统计: ```mysql -taos> select avg(current), max(voltage), min(phase) from test.t10 interval(10s); +taos> select avg(current), max(voltage), min(phase) from test.d10 interval(10s); ``` **Note:** taosdemo 命令本身带有很多选项,配置表的数目、记录条数等等,请执行 `taosdemo --help` 详细列出。您可以设置不同参数进行体验。 diff --git a/packaging/tools/install_client.sh b/packaging/tools/install_client.sh index 9044f23672..aa09013e53 100755 --- a/packaging/tools/install_client.sh +++ b/packaging/tools/install_client.sh @@ -182,7 +182,13 @@ function install_jemalloc() { ${csudo} /usr/bin/install -c -d /usr/local/share/man/man3 ${csudo} /usr/bin/install -c -m 644 ${jemalloc_dir}/share/man/man3/jemalloc.3 /usr/local/share/man/man3 fi - ${csudo} ldconfig + + if [ -d /etc/ld.so.conf.d ]; then + ${csudo} echo "/usr/local/lib" > /etc/ld.so.conf.d/jemalloc.conf + ${csudo} ldconfig + else + echo "/etc/ld.so.conf.d not found!" + fi fi } diff --git a/packaging/tools/makeclient.sh b/packaging/tools/makeclient.sh index 2c36e85970..a7dc931882 100755 --- a/packaging/tools/makeclient.sh +++ b/packaging/tools/makeclient.sh @@ -41,10 +41,10 @@ fi if [ "$osType" != "Darwin" ]; then if [ "$pagMode" == "lite" ]; then - #strip ${build_dir}/bin/taosd + #strip ${build_dir}/bin/taosd strip ${build_dir}/bin/taos bin_files="${build_dir}/bin/taos ${script_dir}/remove_client.sh" - else + else bin_files="${build_dir}/bin/taos ${build_dir}/bin/taosdump ${build_dir}/bin/taosdemo \ ${script_dir}/remove_client.sh ${script_dir}/set_core.sh ${script_dir}/get_client.sh ${script_dir}/taosd-dump-cfg.gdb" fi @@ -139,7 +139,7 @@ if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then cp -r ${examples_dir}/C# ${install_dir}/examples fi # Copy driver -mkdir -p ${install_dir}/driver +mkdir -p ${install_dir}/driver cp ${lib_files} ${install_dir}/driver # Copy connector @@ -168,7 +168,7 @@ fi # exit 1 -cd ${release_dir} +cd ${release_dir} # install_dir has been distinguishes cluster from edege, so comments this code pkg_name=${install_dir}-${osType}-${cpuType} @@ -195,6 +195,15 @@ if [ "$pagMode" == "lite" ]; then pkg_name=${pkg_name}-Lite fi +if [ "$verType" == "beta" ]; then + pkg_name=${pkg_name}-${verType} +elif [ "$verType" == "stable" ]; then + pkg_name=${pkg_name} +else + echo "unknow verType, nor stable or beta" + exit 1 +fi + if [ "$osType" != "Darwin" ]; then tar -zcv -f "$(basename ${pkg_name}).tar.gz" $(basename ${install_dir}) --remove-files || : else diff --git a/packaging/tools/makeclient_power.sh b/packaging/tools/makeclient_power.sh index 6d10245b4f..89591cac23 100755 --- a/packaging/tools/makeclient_power.sh +++ b/packaging/tools/makeclient_power.sh @@ -41,10 +41,10 @@ fi if [ "$osType" != "Darwin" ]; then # if [ "$pagMode" == "lite" ]; then -# strip ${build_dir}/bin/powerd +# strip ${build_dir}/bin/powerd # strip ${build_dir}/bin/power # bin_files="${build_dir}/bin/power ${script_dir}/remove_client_power.sh" -# else +# else # bin_files="${build_dir}/bin/power ${build_dir}/bin/powerdemo ${script_dir}/remove_client_power.sh ${script_dir}/set_core.sh" # fi lib_files="${build_dir}/lib/libtaos.so.${version}" @@ -67,6 +67,39 @@ mkdir -p ${install_dir} mkdir -p ${install_dir}/inc && cp ${header_files} ${install_dir}/inc mkdir -p ${install_dir}/cfg && cp ${cfg_dir}/taos.cfg ${install_dir}/cfg/taos.cfg +if [ -f ${build_dir}/bin/jemalloc-config ]; then + mkdir -p ${install_dir}/jemalloc/{bin,lib,lib/pkgconfig,include/jemalloc,share/doc/jemalloc,share/man/man3} + cp ${build_dir}/bin/jemalloc-config ${install_dir}/jemalloc/bin + if [ -f ${build_dir}/bin/jemalloc.sh ]; then + cp ${build_dir}/bin/jemalloc.sh ${install_dir}/jemalloc/bin + fi + if [ -f ${build_dir}/bin/jeprof ]; then + cp ${build_dir}/bin/jeprof ${install_dir}/jemalloc/bin + fi + if [ -f ${build_dir}/include/jemalloc/jemalloc.h ]; then + cp ${build_dir}/include/jemalloc/jemalloc.h ${install_dir}/jemalloc/include/jemalloc + fi + if [ -f ${build_dir}/lib/libjemalloc.so.2 ]; then + cp ${build_dir}/lib/libjemalloc.so.2 ${install_dir}/jemalloc/lib + ln -sf libjemalloc.so.2 ${install_dir}/jemalloc/lib/libjemalloc.so + fi + if [ -f ${build_dir}/lib/libjemalloc.a ]; then + cp ${build_dir}/lib/libjemalloc.a ${install_dir}/jemalloc/lib + fi + if [ -f ${build_dir}/lib/libjemalloc_pic.a ]; then + cp ${build_dir}/lib/libjemalloc_pic.a ${install_dir}/jemalloc/lib + fi + if [ -f ${build_dir}/lib/pkgconfig/jemalloc.pc ]; then + cp ${build_dir}/lib/pkgconfig/jemalloc.pc ${install_dir}/jemalloc/lib/pkgconfig + fi + if [ -f ${build_dir}/share/doc/jemalloc/jemalloc.html ]; then + cp ${build_dir}/share/doc/jemalloc/jemalloc.html ${install_dir}/jemalloc/share/doc/jemalloc + fi + if [ -f ${build_dir}/share/man/man3/jemalloc.3 ]; then + cp ${build_dir}/share/man/man3/jemalloc.3 ${install_dir}/jemalloc/share/man/man3 + fi +fi + sed -i '/dataDir/ {s/taos/power/g}' ${install_dir}/cfg/taos.cfg sed -i '/logDir/ {s/taos/power/g}' ${install_dir}/cfg/taos.cfg sed -i "s/TDengine/PowerDB/g" ${install_dir}/cfg/taos.cfg @@ -77,11 +110,11 @@ if [ "$osType" != "Darwin" ]; then strip ${build_dir}/bin/taos cp ${build_dir}/bin/taos ${install_dir}/bin/power cp ${script_dir}/remove_power.sh ${install_dir}/bin - else + else cp ${build_dir}/bin/taos ${install_dir}/bin/power cp ${script_dir}/remove_power.sh ${install_dir}/bin - cp ${build_dir}/bin/taosdemo ${install_dir}/bin/powerdemo - cp ${build_dir}/bin/taosdump ${install_dir}/bin/powerdump + cp ${build_dir}/bin/taosdemo ${install_dir}/bin/powerdemo + cp ${build_dir}/bin/taosdump ${install_dir}/bin/powerdump cp ${script_dir}/set_core.sh ${install_dir}/bin cp ${script_dir}/get_client.sh ${install_dir}/bin cp ${script_dir}/taosd-dump-cfg.gdb ${install_dir}/bin @@ -158,15 +191,15 @@ if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then cp -r ${examples_dir}/JDBC ${install_dir}/examples cp -r ${examples_dir}/matlab ${install_dir}/examples sed -i '/password/ {s/taosdata/powerdb/g}' ${install_dir}/examples/matlab/TDengineDemo.m - cp -r ${examples_dir}/python ${install_dir}/examples + cp -r ${examples_dir}/python ${install_dir}/examples sed -i '/password/ {s/taosdata/powerdb/g}' ${install_dir}/examples/python/read_example.py cp -r ${examples_dir}/R ${install_dir}/examples sed -i '/password/ {s/taosdata/powerdb/g}' ${install_dir}/examples/R/command.txt - cp -r ${examples_dir}/go ${install_dir}/examples + cp -r ${examples_dir}/go ${install_dir}/examples sed -i '/root/ {s/taosdata/powerdb/g}' ${install_dir}/examples/go/taosdemo.go fi # Copy driver -mkdir -p ${install_dir}/driver +mkdir -p ${install_dir}/driver cp ${lib_files} ${install_dir}/driver # Copy connector @@ -188,11 +221,11 @@ if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then echo "WARNING: go connector not found, please check if want to use it!" fi cp -r ${connector_dir}/python ${install_dir}/connector - + sed -i '/password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/taos/cinterface.py - + sed -i '/password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/taos/subscription.py - + sed -i '/self._password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/taos/connection.py fi # Copy release note @@ -200,7 +233,7 @@ fi # exit 1 -cd ${release_dir} +cd ${release_dir} if [ "$verMode" == "cluster" ]; then pkg_name=${install_dir}-${osType}-${cpuType} @@ -217,8 +250,8 @@ fi if [ "$verType" == "beta" ]; then pkg_name=${pkg_name}-${verType} -elif [ "$verType" == "stable" ]; then - pkg_name=${pkg_name} +elif [ "$verType" == "stable" ]; then + pkg_name=${pkg_name} else echo "unknow verType, nor stable or beta" exit 1 diff --git a/src/balance/src/bnThread.c b/src/balance/src/bnThread.c index 44cb24effa..c5dca2da85 100644 --- a/src/balance/src/bnThread.c +++ b/src/balance/src/bnThread.c @@ -23,6 +23,8 @@ static SBnThread tsBnThread; static void *bnThreadFunc(void *arg) { + setThreadName("bnThreadd"); + while (1) { pthread_mutex_lock(&tsBnThread.mutex); if (tsBnThread.stop) { diff --git a/src/client/inc/tscUtil.h b/src/client/inc/tscUtil.h index 54ac89b3f3..264070d70f 100644 --- a/src/client/inc/tscUtil.h +++ b/src/client/inc/tscUtil.h @@ -94,11 +94,23 @@ typedef struct SVgroupTableInfo { SArray *itemList; // SArray } SVgroupTableInfo; +typedef struct SBlockKeyTuple { + TSKEY skey; + void* payloadAddr; +} SBlockKeyTuple; + +typedef struct SBlockKeyInfo { + int32_t maxBytesAlloc; + SBlockKeyTuple* pKeyTuple; +} SBlockKeyInfo; + int32_t converToStr(char *str, int type, void *buf, int32_t bufSize, int32_t *len); int32_t tscCreateDataBlock(size_t initialSize, int32_t rowSize, int32_t startOffset, SName* name, STableMeta* pTableMeta, STableDataBlocks** dataBlocks); void tscDestroyDataBlock(STableDataBlocks* pDataBlock, bool removeMeta); -void tscSortRemoveDataBlockDupRows(STableDataBlocks* dataBuf); +void tscSortRemoveDataBlockDupRowsRaw(STableDataBlocks* dataBuf); +int tscSortRemoveDataBlockDupRows(STableDataBlocks* dataBuf, SBlockKeyInfo* pBlkKeyInfo); +int32_t tsSetBlockInfo(SSubmitBlk *pBlocks, const STableMeta *pTableMeta, int32_t numOfRows); void tscDestroyBoundColumnInfo(SParsedDataColInfo* pColInfo); void doRetrieveSubqueryData(SSchedMsg *pMsg); diff --git a/src/client/inc/tsclient.h b/src/client/inc/tsclient.h index 9318b7a7a6..5b5224e92e 100644 --- a/src/client/inc/tsclient.h +++ b/src/client/inc/tsclient.h @@ -88,13 +88,43 @@ typedef struct SBoundColumn { int32_t offset; // all column offset value } SBoundColumn; +typedef struct { + uint16_t schemaColIdx; + uint16_t boundIdx; + uint16_t finalIdx; +} SBoundIdxInfo; + +typedef enum _COL_ORDER_STATUS { + ORDER_STATUS_UNKNOWN = 0, + ORDER_STATUS_ORDERED = 1, + ORDER_STATUS_DISORDERED = 2, +} EOrderStatus; + typedef struct SParsedDataColInfo { - int16_t numOfCols; - int16_t numOfBound; - int32_t *boundedColumns; - SBoundColumn *cols; + int16_t numOfCols; + int16_t numOfBound; + int32_t * boundedColumns; // bounded column idx according to schema + SBoundColumn * cols; + SBoundIdxInfo *colIdxInfo; + int8_t orderStatus; // bounded columns: } SParsedDataColInfo; +#define IS_DATA_COL_ORDERED(s) ((s) == (int8_t)ORDER_STATUS_ORDERED) + +typedef struct { + SSchema * pSchema; + int16_t sversion; + int32_t flen; + uint16_t nCols; + void * buf; + void * pDataBlock; + SSubmitBlk *pSubmitBlk; +} SMemRowBuilder; + +typedef struct { + TDRowLenT allNullLen; +} SMemRowHelper; + typedef struct STableDataBlocks { SName tableName; int8_t tsSource; // where does the UNIX timestamp come from, server or client @@ -108,13 +138,15 @@ typedef struct STableDataBlocks { uint32_t size; STableMeta *pTableMeta; // the tableMeta of current table, the table meta will be used during submit, keep a ref to avoid to be removed from cache char *pData; - - SParsedDataColInfo boundColumnInfo; + bool cloned; + + SParsedDataColInfo boundColumnInfo; // for parameter ('?') binding - uint32_t numOfAllocedParams; - uint32_t numOfParams; - SParamInfo *params; + uint32_t numOfAllocedParams; + uint32_t numOfParams; + SParamInfo * params; + SMemRowHelper rowHelper; } STableDataBlocks; typedef struct { @@ -128,6 +160,7 @@ typedef struct SInsertStatementParam { SHashObj *pTableBlockHashList; // data block for each table SArray *pDataBlocks; // SArray. Merged submit block for each vgroup int8_t schemaAttached; // denote if submit block is built with table schema or not + uint8_t payloadType; // EPayloadType. 0: K-V payload for non-prepare insert, 1: rawPayload for prepare insert STagData tagData; // NOTE: pTagData->data is used as a variant length array int32_t batchSize; // for parameter ('?') binding and batch processing @@ -139,6 +172,14 @@ typedef struct SInsertStatementParam { char *sql; // current sql statement position } SInsertStatementParam; +typedef enum { + PAYLOAD_TYPE_KV = 0, + PAYLOAD_TYPE_RAW = 1, +} EPayloadType; + +#define IS_RAW_PAYLOAD(t) \ + (((int)(t)) == PAYLOAD_TYPE_RAW) // 0: K-V payload for non-prepare insert, 1: rawPayload for prepare insert + // TODO extract sql parser supporter typedef struct { int command; @@ -243,6 +284,7 @@ typedef struct SSqlObj { void * pStream; void * pSubscription; char * sqlstr; + void * pBuf; // table meta buffer char parseRetry; char retry; char maxRetry; @@ -382,10 +424,15 @@ extern int tscRefId; extern int tscNumOfObj; // number of existed sqlObj in current process. extern int (*tscBuildMsg[TSDB_SQL_MAX])(SSqlObj *pSql, SSqlInfo *pInfo); - + void tscBuildVgroupTableInfo(SSqlObj* pSql, STableMetaInfo* pTableMetaInfo, SArray* tables); int16_t getNewResColId(SSqlCmd* pCmd); +int32_t schemaIdxCompar(const void *lhs, const void *rhs); +int32_t boundIdxCompar(const void *lhs, const void *rhs); +int initSMemRowHelper(SMemRowHelper *pHelper, SSchema *pSSchema, uint16_t nCols, uint16_t allNullColsLen); +int32_t getExtendedRowSize(STableComInfo *tinfo); + #ifdef __cplusplus } #endif diff --git a/src/client/src/taos.def b/src/client/src/taos.def index 43cd819061..7d3b8e80c2 100644 --- a/src/client/src/taos.def +++ b/src/client/src/taos.def @@ -7,11 +7,16 @@ taos_connect_auth taos_close taos_stmt_init taos_stmt_prepare +taos_stmt_set_tbname_tags +taos_stmt_set_tbname +taos_stmt_is_insert +taos_stmt_num_params taos_stmt_bind_param taos_stmt_add_batch taos_stmt_execute taos_stmt_use_result taos_stmt_close +taos_stmt_errstr taos_query taos_fetch_row taos_result_precision @@ -37,6 +42,4 @@ taos_consume taos_unsubscribe taos_open_stream taos_close_stream -taos_fetch_block taos_load_table_info - diff --git a/src/client/src/tscParseInsert.c b/src/client/src/tscParseInsert.c index 26d9cf0e49..d26cbebc34 100644 --- a/src/client/src/tscParseInsert.c +++ b/src/client/src/tscParseInsert.c @@ -38,10 +38,33 @@ enum { TSDB_USE_CLI_TS = 1, }; +static uint8_t TRUE_VALUE = (uint8_t)TSDB_TRUE; +static uint8_t FALSE_VALUE = (uint8_t)TSDB_FALSE; + static int32_t tscAllocateMemIfNeed(STableDataBlocks *pDataBlock, int32_t rowSize, int32_t *numOfRows); static int32_t parseBoundColumns(SInsertStatementParam *pInsertParam, SParsedDataColInfo *pColInfo, SSchema *pSchema, char *str, char **end); +int32_t getExtendedRowSize(STableComInfo *tinfo) { + return tinfo->rowSize + PAYLOAD_HEADER_LEN + PAYLOAD_COL_HEAD_LEN * tinfo->numOfColumns; +} +int initSMemRowHelper(SMemRowHelper *pHelper, SSchema *pSSchema, uint16_t nCols, uint16_t allNullColsLen) { + pHelper->allNullLen = allNullColsLen; // TODO: get allNullColsLen when creating or altering table meta + if (pHelper->allNullLen == 0) { + for (uint16_t i = 0; i < nCols; ++i) { + uint8_t type = pSSchema[i].type; + int32_t typeLen = TYPE_BYTES[type]; + pHelper->allNullLen += typeLen; + if (TSDB_DATA_TYPE_BINARY == type) { + pHelper->allNullLen += (VARSTR_HEADER_SIZE + CHAR_BYTES); + } else if (TSDB_DATA_TYPE_NCHAR == type) { + int len = VARSTR_HEADER_SIZE + TSDB_NCHAR_SIZE; + pHelper->allNullLen += len; + } + } + } + return 0; +} static int32_t tscToDouble(SStrToken *pToken, double *value, char **endPtr) { errno = 0; *value = strtold(pToken->z, endPtr); @@ -378,6 +401,342 @@ int32_t tsParseOneColumn(SSchema *pSchema, SStrToken *pToken, char *payload, cha return TSDB_CODE_SUCCESS; } +static FORCE_INLINE TDRowLenT tsSetPayloadColValue(char *payloadStart, char *payload, int16_t columnId, + uint8_t columnType, const void *value, uint16_t valueLen, TDRowTLenT tOffset) { + payloadColSetId(payload, columnId); + payloadColSetType(payload, columnType); + memcpy(POINTER_SHIFT(payloadStart,tOffset), value, valueLen); + return valueLen; +} + +static int32_t tsParseOneColumnKV(SSchema *pSchema, SStrToken *pToken, char *payloadStart, char *primaryKeyStart, + char *payload, char *msg, char **str, bool primaryKey, int16_t timePrec, + TDRowTLenT tOffset, TDRowLenT *sizeAppend, TDRowLenT *dataRowColDeltaLen, + TDRowLenT *kvRowColLen) { + int64_t iv; + int32_t ret; + char * endptr = NULL; + + if (IS_NUMERIC_TYPE(pSchema->type) && pToken->n == 0) { + return tscInvalidOperationMsg(msg, "invalid numeric data", pToken->z); + } + + switch (pSchema->type) { + case TSDB_DATA_TYPE_BOOL: { // bool + if (isNullStr(pToken)) { + *sizeAppend = tsSetPayloadColValue(payloadStart, payload, pSchema->colId, pSchema->type, + getNullValue(TSDB_DATA_TYPE_BOOL), TYPE_BYTES[TSDB_DATA_TYPE_BOOL], tOffset); + } else { + if ((pToken->type == TK_BOOL || pToken->type == TK_STRING) && (pToken->n != 0)) { + if (strncmp(pToken->z, "true", pToken->n) == 0) { + *sizeAppend = tsSetPayloadColValue(payloadStart, payload, pSchema->colId, pSchema->type, &TRUE_VALUE, + TYPE_BYTES[TSDB_DATA_TYPE_BOOL], tOffset); + *kvRowColLen += (TDRowLenT)(sizeof(SColIdx) + TYPE_BYTES[TSDB_DATA_TYPE_BOOL]); + } else if (strncmp(pToken->z, "false", pToken->n) == 0) { + *sizeAppend = tsSetPayloadColValue(payloadStart, payload, pSchema->colId, pSchema->type, &FALSE_VALUE, + TYPE_BYTES[TSDB_DATA_TYPE_BOOL], tOffset); + *kvRowColLen += (TDRowLenT)(sizeof(SColIdx) + TYPE_BYTES[TSDB_DATA_TYPE_BOOL]); + } else { + return tscSQLSyntaxErrMsg(msg, "invalid bool data", pToken->z); + } + } else if (pToken->type == TK_INTEGER) { + iv = strtoll(pToken->z, NULL, 10); + *sizeAppend = tsSetPayloadColValue(payloadStart, payload, pSchema->colId, pSchema->type, + ((iv == 0) ? &FALSE_VALUE : &TRUE_VALUE), TYPE_BYTES[TSDB_DATA_TYPE_BOOL], tOffset); + *kvRowColLen += (TDRowLenT)(sizeof(SColIdx) + TYPE_BYTES[TSDB_DATA_TYPE_BOOL]); + } else if (pToken->type == TK_FLOAT) { + double dv = strtod(pToken->z, NULL); + *sizeAppend = tsSetPayloadColValue(payloadStart, payload, pSchema->colId, pSchema->type, + ((dv == 0) ? &FALSE_VALUE : &TRUE_VALUE), TYPE_BYTES[TSDB_DATA_TYPE_BOOL], tOffset); + *kvRowColLen += (TDRowLenT)(sizeof(SColIdx) + TYPE_BYTES[TSDB_DATA_TYPE_BOOL]); + } else { + return tscInvalidOperationMsg(msg, "invalid bool data", pToken->z); + } + } + break; + } + + case TSDB_DATA_TYPE_TINYINT: + if (isNullStr(pToken)) { + *sizeAppend = tsSetPayloadColValue(payloadStart, payload, pSchema->colId, pSchema->type, + getNullValue(TSDB_DATA_TYPE_TINYINT), TYPE_BYTES[TSDB_DATA_TYPE_TINYINT], tOffset); + } else { + ret = tStrToInteger(pToken->z, pToken->type, pToken->n, &iv, true); + if (ret != TSDB_CODE_SUCCESS) { + return tscInvalidOperationMsg(msg, "invalid tinyint data", pToken->z); + } else if (!IS_VALID_TINYINT(iv)) { + return tscInvalidOperationMsg(msg, "data overflow", pToken->z); + } + + uint8_t tmpVal = (uint8_t)iv; + *sizeAppend = tsSetPayloadColValue(payloadStart, payload, pSchema->colId, pSchema->type, &tmpVal, + TYPE_BYTES[TSDB_DATA_TYPE_TINYINT], tOffset); + *kvRowColLen += (TDRowLenT)(sizeof(SColIdx) + TYPE_BYTES[TSDB_DATA_TYPE_TINYINT]); + } + + break; + + case TSDB_DATA_TYPE_UTINYINT: + if (isNullStr(pToken)) { + *sizeAppend = tsSetPayloadColValue(payloadStart, payload, pSchema->colId, pSchema->type, + getNullValue(TSDB_DATA_TYPE_UTINYINT), TYPE_BYTES[TSDB_DATA_TYPE_UTINYINT], tOffset); + } else { + ret = tStrToInteger(pToken->z, pToken->type, pToken->n, &iv, false); + if (ret != TSDB_CODE_SUCCESS) { + return tscInvalidOperationMsg(msg, "invalid unsigned tinyint data", pToken->z); + } else if (!IS_VALID_UTINYINT(iv)) { + return tscInvalidOperationMsg(msg, "unsigned tinyint data overflow", pToken->z); + } + + uint8_t tmpVal = (uint8_t)iv; + *sizeAppend = tsSetPayloadColValue(payloadStart, payload, pSchema->colId, pSchema->type, &tmpVal, + TYPE_BYTES[TSDB_DATA_TYPE_UTINYINT], tOffset); + *kvRowColLen += (TDRowLenT)(sizeof(SColIdx) + TYPE_BYTES[TSDB_DATA_TYPE_UTINYINT]); + } + + break; + + case TSDB_DATA_TYPE_SMALLINT: + if (isNullStr(pToken)) { + *sizeAppend = tsSetPayloadColValue(payloadStart, payload, pSchema->colId, pSchema->type, + getNullValue(TSDB_DATA_TYPE_SMALLINT), TYPE_BYTES[TSDB_DATA_TYPE_SMALLINT], tOffset); + } else { + ret = tStrToInteger(pToken->z, pToken->type, pToken->n, &iv, true); + if (ret != TSDB_CODE_SUCCESS) { + return tscInvalidOperationMsg(msg, "invalid smallint data", pToken->z); + } else if (!IS_VALID_SMALLINT(iv)) { + return tscInvalidOperationMsg(msg, "smallint data overflow", pToken->z); + } + + int16_t tmpVal = (int16_t)iv; + *sizeAppend = tsSetPayloadColValue(payloadStart, payload, pSchema->colId, pSchema->type, &tmpVal, + TYPE_BYTES[TSDB_DATA_TYPE_SMALLINT], tOffset); + *kvRowColLen += (TDRowLenT)(sizeof(SColIdx) + TYPE_BYTES[TSDB_DATA_TYPE_SMALLINT]); + } + + break; + + case TSDB_DATA_TYPE_USMALLINT: + if (isNullStr(pToken)) { + *sizeAppend = + tsSetPayloadColValue(payloadStart, payload, pSchema->colId, pSchema->type, + getNullValue(TSDB_DATA_TYPE_USMALLINT), TYPE_BYTES[TSDB_DATA_TYPE_USMALLINT], tOffset); + } else { + ret = tStrToInteger(pToken->z, pToken->type, pToken->n, &iv, false); + if (ret != TSDB_CODE_SUCCESS) { + return tscInvalidOperationMsg(msg, "invalid unsigned smallint data", pToken->z); + } else if (!IS_VALID_USMALLINT(iv)) { + return tscInvalidOperationMsg(msg, "unsigned smallint data overflow", pToken->z); + } + + uint16_t tmpVal = (uint16_t)iv; + *sizeAppend = tsSetPayloadColValue(payloadStart, payload, pSchema->colId, pSchema->type, &tmpVal, + TYPE_BYTES[TSDB_DATA_TYPE_USMALLINT], tOffset); + *kvRowColLen += (TDRowLenT)(sizeof(SColIdx) + TYPE_BYTES[TSDB_DATA_TYPE_USMALLINT]); + } + + break; + + case TSDB_DATA_TYPE_INT: + if (isNullStr(pToken)) { + *sizeAppend = tsSetPayloadColValue(payloadStart, payload, pSchema->colId, pSchema->type, + getNullValue(TSDB_DATA_TYPE_INT), TYPE_BYTES[TSDB_DATA_TYPE_INT], tOffset); + } else { + ret = tStrToInteger(pToken->z, pToken->type, pToken->n, &iv, true); + if (ret != TSDB_CODE_SUCCESS) { + return tscInvalidOperationMsg(msg, "invalid int data", pToken->z); + } else if (!IS_VALID_INT(iv)) { + return tscInvalidOperationMsg(msg, "int data overflow", pToken->z); + } + + int32_t tmpVal = (int32_t)iv; + *sizeAppend = tsSetPayloadColValue(payloadStart, payload, pSchema->colId, pSchema->type, &tmpVal, + TYPE_BYTES[TSDB_DATA_TYPE_INT], tOffset); + *kvRowColLen += (TDRowLenT)(sizeof(SColIdx) + TYPE_BYTES[TSDB_DATA_TYPE_INT]); + } + + break; + + case TSDB_DATA_TYPE_UINT: + if (isNullStr(pToken)) { + *sizeAppend = tsSetPayloadColValue(payloadStart, payload, pSchema->colId, pSchema->type, + getNullValue(TSDB_DATA_TYPE_UINT), TYPE_BYTES[TSDB_DATA_TYPE_UINT], tOffset); + } else { + ret = tStrToInteger(pToken->z, pToken->type, pToken->n, &iv, false); + if (ret != TSDB_CODE_SUCCESS) { + return tscInvalidOperationMsg(msg, "invalid unsigned int data", pToken->z); + } else if (!IS_VALID_UINT(iv)) { + return tscInvalidOperationMsg(msg, "unsigned int data overflow", pToken->z); + } + + uint32_t tmpVal = (uint32_t)iv; + *sizeAppend = tsSetPayloadColValue(payloadStart, payload, pSchema->colId, pSchema->type, &tmpVal, + TYPE_BYTES[TSDB_DATA_TYPE_UINT], tOffset); + *kvRowColLen += (TDRowLenT)(sizeof(SColIdx) + TYPE_BYTES[TSDB_DATA_TYPE_UINT]); + } + + break; + + case TSDB_DATA_TYPE_BIGINT: + if (isNullStr(pToken)) { + *sizeAppend = tsSetPayloadColValue(payloadStart, payload, pSchema->colId, pSchema->type, + getNullValue(TSDB_DATA_TYPE_BIGINT), TYPE_BYTES[TSDB_DATA_TYPE_BIGINT], tOffset); + } else { + ret = tStrToInteger(pToken->z, pToken->type, pToken->n, &iv, true); + if (ret != TSDB_CODE_SUCCESS) { + return tscInvalidOperationMsg(msg, "invalid bigint data", pToken->z); + } else if (!IS_VALID_BIGINT(iv)) { + return tscInvalidOperationMsg(msg, "bigint data overflow", pToken->z); + } + + *sizeAppend = tsSetPayloadColValue(payloadStart, payload, pSchema->colId, pSchema->type, &iv, + TYPE_BYTES[TSDB_DATA_TYPE_BIGINT], tOffset); + *kvRowColLen += (TDRowLenT)(sizeof(SColIdx) + TYPE_BYTES[TSDB_DATA_TYPE_BIGINT]); + } + break; + + case TSDB_DATA_TYPE_UBIGINT: + if (isNullStr(pToken)) { + *sizeAppend = tsSetPayloadColValue(payloadStart, payload, pSchema->colId, pSchema->type, + getNullValue(TSDB_DATA_TYPE_UBIGINT), TYPE_BYTES[TSDB_DATA_TYPE_UBIGINT], tOffset); + } else { + ret = tStrToInteger(pToken->z, pToken->type, pToken->n, &iv, false); + if (ret != TSDB_CODE_SUCCESS) { + return tscInvalidOperationMsg(msg, "invalid unsigned bigint data", pToken->z); + } else if (!IS_VALID_UBIGINT((uint64_t)iv)) { + return tscInvalidOperationMsg(msg, "unsigned bigint data overflow", pToken->z); + } + + uint64_t tmpVal = (uint64_t)iv; + *sizeAppend = tsSetPayloadColValue(payloadStart, payload, pSchema->colId, pSchema->type, &tmpVal, + TYPE_BYTES[TSDB_DATA_TYPE_UBIGINT], tOffset); + *kvRowColLen += (TDRowLenT)(sizeof(SColIdx) + TYPE_BYTES[TSDB_DATA_TYPE_UBIGINT]); + } + break; + + case TSDB_DATA_TYPE_FLOAT: + if (isNullStr(pToken)) { + *sizeAppend = tsSetPayloadColValue(payloadStart, payload, pSchema->colId, pSchema->type, + getNullValue(TSDB_DATA_TYPE_FLOAT), TYPE_BYTES[TSDB_DATA_TYPE_FLOAT], tOffset); + } else { + double dv; + if (TK_ILLEGAL == tscToDouble(pToken, &dv, &endptr)) { + return tscInvalidOperationMsg(msg, "illegal float data", pToken->z); + } + + if (((dv == HUGE_VAL || dv == -HUGE_VAL) && errno == ERANGE) || dv > FLT_MAX || dv < -FLT_MAX || isinf(dv) || + isnan(dv)) { + return tscInvalidOperationMsg(msg, "illegal float data", pToken->z); + } + + float tmpVal = (float)dv; + *sizeAppend = tsSetPayloadColValue(payloadStart, payload, pSchema->colId, pSchema->type, &tmpVal, + TYPE_BYTES[TSDB_DATA_TYPE_FLOAT], tOffset); + *kvRowColLen += (TDRowLenT)(sizeof(SColIdx) + TYPE_BYTES[TSDB_DATA_TYPE_FLOAT]); + } + break; + + case TSDB_DATA_TYPE_DOUBLE: + if (isNullStr(pToken)) { + *sizeAppend = tsSetPayloadColValue(payloadStart, payload, pSchema->colId, pSchema->type, + getNullValue(TSDB_DATA_TYPE_DOUBLE), TYPE_BYTES[TSDB_DATA_TYPE_DOUBLE], tOffset); + } else { + double dv; + if (TK_ILLEGAL == tscToDouble(pToken, &dv, &endptr)) { + return tscInvalidOperationMsg(msg, "illegal double data", pToken->z); + } + + if (((dv == HUGE_VAL || dv == -HUGE_VAL) && errno == ERANGE) || isinf(dv) || isnan(dv)) { + return tscInvalidOperationMsg(msg, "illegal double data", pToken->z); + } + + *sizeAppend = tsSetPayloadColValue(payloadStart, payload, pSchema->colId, pSchema->type, &dv, + TYPE_BYTES[TSDB_DATA_TYPE_DOUBLE], tOffset); + *kvRowColLen += (TDRowLenT)(sizeof(SColIdx) + TYPE_BYTES[TSDB_DATA_TYPE_DOUBLE]); + } + break; + + case TSDB_DATA_TYPE_BINARY: + // binary data cannot be null-terminated char string, otherwise the last char of the string is lost + if (pToken->type == TK_NULL) { + payloadColSetId(payload, pSchema->colId); + payloadColSetType(payload, pSchema->type); + memcpy(POINTER_SHIFT(payloadStart, tOffset), getNullValue(TSDB_DATA_TYPE_BINARY), VARSTR_HEADER_SIZE + CHAR_BYTES); + *sizeAppend = (TDRowLenT)(VARSTR_HEADER_SIZE + CHAR_BYTES); + } else { // too long values will return invalid sql, not be truncated automatically + if (pToken->n + VARSTR_HEADER_SIZE > pSchema->bytes) { // todo refactor + return tscInvalidOperationMsg(msg, "string data overflow", pToken->z); + } + // STR_WITH_SIZE_TO_VARSTR(payload, pToken->z, pToken->n); + + payloadColSetId(payload, pSchema->colId); + payloadColSetType(payload, pSchema->type); + varDataSetLen(POINTER_SHIFT(payloadStart,tOffset), pToken->n); + memcpy(varDataVal(POINTER_SHIFT(payloadStart,tOffset)), pToken->z, pToken->n); + *sizeAppend = (TDRowLenT)(VARSTR_HEADER_SIZE + pToken->n); + *dataRowColDeltaLen += (TDRowLenT)(pToken->n - CHAR_BYTES); + *kvRowColLen += (TDRowLenT)(sizeof(SColIdx) + VARSTR_HEADER_SIZE + pToken->n); + } + + break; + + case TSDB_DATA_TYPE_NCHAR: + if (pToken->type == TK_NULL) { + payloadColSetId(payload, pSchema->colId); + payloadColSetType(payload, pSchema->type); + memcpy(POINTER_SHIFT(payloadStart,tOffset), getNullValue(TSDB_DATA_TYPE_NCHAR), VARSTR_HEADER_SIZE + TSDB_NCHAR_SIZE); + *sizeAppend = (TDRowLenT)(VARSTR_HEADER_SIZE + TSDB_NCHAR_SIZE); + } else { + // if the converted output len is over than pColumnModel->bytes, return error: 'Argument list too long' + int32_t output = 0; + payloadColSetId(payload, pSchema->colId); + payloadColSetType(payload, pSchema->type); + if (!taosMbsToUcs4(pToken->z, pToken->n, varDataVal(POINTER_SHIFT(payloadStart,tOffset)), + pSchema->bytes - VARSTR_HEADER_SIZE, &output)) { + char buf[512] = {0}; + snprintf(buf, tListLen(buf), "%s", strerror(errno)); + return tscInvalidOperationMsg(msg, buf, pToken->z); + } + + varDataSetLen(POINTER_SHIFT(payloadStart,tOffset), output); + + *sizeAppend = (TDRowLenT)(VARSTR_HEADER_SIZE + output); + *dataRowColDeltaLen += (TDRowLenT)(output - sizeof(uint32_t)); + *kvRowColLen += (TDRowLenT)(sizeof(SColIdx) + VARSTR_HEADER_SIZE + output); + } + break; + + case TSDB_DATA_TYPE_TIMESTAMP: { + if (pToken->type == TK_NULL) { + if (primaryKey) { + // When building SKVRow primaryKey, we should not skip even with NULL value. + int64_t tmpVal = 0; + *sizeAppend = tsSetPayloadColValue(payloadStart, primaryKeyStart, pSchema->colId, pSchema->type, &tmpVal, + TYPE_BYTES[TSDB_DATA_TYPE_TIMESTAMP], tOffset); + *kvRowColLen += (TDRowLenT)(sizeof(SColIdx) + TYPE_BYTES[TSDB_DATA_TYPE_TIMESTAMP]); + } else { + *sizeAppend = tsSetPayloadColValue(payloadStart, payload, pSchema->colId, pSchema->type, + getNullValue(TSDB_DATA_TYPE_TIMESTAMP), + TYPE_BYTES[TSDB_DATA_TYPE_TIMESTAMP], tOffset); + } + } else { + int64_t tmpVal; + if (tsParseTime(pToken, &tmpVal, str, msg, timePrec) != TSDB_CODE_SUCCESS) { + return tscInvalidOperationMsg(msg, "invalid timestamp", pToken->z); + } + + *sizeAppend = tsSetPayloadColValue(payloadStart, primaryKey ? primaryKeyStart : payload, pSchema->colId, + pSchema->type, &tmpVal, TYPE_BYTES[TSDB_DATA_TYPE_TIMESTAMP], tOffset); + *kvRowColLen += (TDRowLenT)(sizeof(SColIdx) + TYPE_BYTES[TSDB_DATA_TYPE_TIMESTAMP]); + } + + break; + } + } + + return TSDB_CODE_SUCCESS; +} + /* * The server time/client time should not be mixed up in one sql string * Do not employ sort operation is not involved if server time is used. @@ -414,24 +773,38 @@ int32_t tsCheckTimestamp(STableDataBlocks *pDataBlocks, const char *start) { return TSDB_CODE_SUCCESS; } -int tsParseOneRow(char **str, STableDataBlocks *pDataBlocks, int16_t timePrec, int32_t *len, - char *tmpTokenBuf, SInsertStatementParam* pInsertParam) { - int32_t index = 0; - SStrToken sToken = {0}; - char *payload = pDataBlocks->pData + pDataBlocks->size; +int tsParseOneRow(char **str, STableDataBlocks *pDataBlocks, int16_t timePrec, int32_t *len, char *tmpTokenBuf, + SInsertStatementParam *pInsertParam) { + int32_t index = 0; + SStrToken sToken = {0}; + + SMemRowHelper *pHelper = &pDataBlocks->rowHelper; + char * payload = pDataBlocks->pData + pDataBlocks->size; SParsedDataColInfo *spd = &pDataBlocks->boundColumnInfo; - SSchema *schema = tscGetTableSchema(pDataBlocks->pTableMeta); + SSchema * schema = tscGetTableSchema(pDataBlocks->pTableMeta); + + TDRowTLenT dataRowLen = pHelper->allNullLen; + TDRowTLenT kvRowLen = TD_MEM_ROW_KV_VER_SIZE; + TDRowTLenT payloadValOffset = 0; + TDRowLenT colValOffset = 0; + ASSERT(dataRowLen > 0); + + payloadSetNCols(payload, spd->numOfBound); + payloadValOffset = payloadValuesOffset(payload); // rely on payloadNCols + // payloadSetTLen(payload, payloadValOffset); + + char *kvPrimaryKeyStart = payload + PAYLOAD_HEADER_LEN; // primaryKey in 1st column tuple + char *kvStart = kvPrimaryKeyStart + PAYLOAD_COL_HEAD_LEN; // the column tuple behind the primaryKey // 1. set the parsed value from sql string - int32_t rowSize = 0; for (int i = 0; i < spd->numOfBound; ++i) { // the start position in data block buffer of current value in sql int32_t colIndex = spd->boundedColumns[i]; - char *start = payload + spd->cols[colIndex].offset; - SSchema *pSchema = &schema[colIndex]; - rowSize += pSchema->bytes; + char *start = payload + spd->cols[colIndex].offset; + + SSchema *pSchema = &schema[colIndex]; // get colId here index = 0; sToken = tStrGetToken(*str, &index, true); @@ -453,7 +826,8 @@ int tsParseOneRow(char **str, STableDataBlocks *pDataBlocks, int16_t timePrec, i int16_t type = sToken.type; if ((type != TK_NOW && type != TK_INTEGER && type != TK_STRING && type != TK_FLOAT && type != TK_BOOL && - type != TK_NULL && type != TK_HEX && type != TK_OCT && type != TK_BIN) || (sToken.n == 0) || (type == TK_RP)) { + type != TK_NULL && type != TK_HEX && type != TK_OCT && type != TK_BIN) || + (sToken.n == 0) || (type == TK_RP)) { return tscSQLSyntaxErrMsg(pInsertParam->msg, "invalid data or symbol", sToken.z); } @@ -467,10 +841,10 @@ int tsParseOneRow(char **str, STableDataBlocks *pDataBlocks, int16_t timePrec, i if (sToken.n >= TSDB_MAX_BYTES_PER_ROW) { return tscSQLSyntaxErrMsg(pInsertParam->msg, "too long string", sToken.z); } - + for (uint32_t k = 1; k < sToken.n - 1; ++k) { if (sToken.z[k] == '\\' || (sToken.z[k] == delim && sToken.z[k + 1] == delim)) { - tmpTokenBuf[j] = sToken.z[k + 1]; + tmpTokenBuf[j] = sToken.z[k + 1]; cnt++; j++; @@ -487,42 +861,54 @@ int tsParseOneRow(char **str, STableDataBlocks *pDataBlocks, int16_t timePrec, i sToken.n -= 2 + cnt; } - bool isPrimaryKey = (colIndex == PRIMARYKEY_TIMESTAMP_COL_INDEX); - int32_t ret = tsParseOneColumn(pSchema, &sToken, start, pInsertParam->msg, str, isPrimaryKey, timePrec); + bool isPrimaryKey = (colIndex == PRIMARYKEY_TIMESTAMP_COL_INDEX); + TDRowLenT dataRowDeltaColLen = 0; // When combine the data as SDataRow, the delta len between all NULL columns. + TDRowLenT kvRowColLen = 0; + TDRowLenT colValAppended = 0; + + if (!IS_DATA_COL_ORDERED(spd->orderStatus)) { + ASSERT(spd->colIdxInfo != NULL); + if(!isPrimaryKey) { + kvStart = POINTER_SHIFT(kvPrimaryKeyStart, spd->colIdxInfo[i].finalIdx * PAYLOAD_COL_HEAD_LEN); + } else { + ASSERT(spd->colIdxInfo[i].finalIdx == 0); + } + } + // the primary key locates in 1st column + int32_t ret = tsParseOneColumnKV(pSchema, &sToken, payload, kvPrimaryKeyStart, kvStart, pInsertParam->msg, str, + isPrimaryKey, timePrec, payloadValOffset + colValOffset, &colValAppended, + &dataRowDeltaColLen, &kvRowColLen); if (ret != TSDB_CODE_SUCCESS) { return ret; } - if (isPrimaryKey && tsCheckTimestamp(pDataBlocks, start) != TSDB_CODE_SUCCESS) { - tscInvalidOperationMsg(pInsertParam->msg, "client time/server time can not be mixed up", sToken.z); - return TSDB_CODE_TSC_INVALID_TIME_STAMP; - } - } - - // 2. set the null value for the columns that do not assign values - if (spd->numOfBound < spd->numOfCols) { - char *ptr = payload; - - for (int32_t i = 0; i < spd->numOfCols; ++i) { - if (!spd->cols[i].hasVal) { // current column do not have any value to insert, set it to null - if (schema[i].type == TSDB_DATA_TYPE_BINARY) { - varDataSetLen(ptr, sizeof(int8_t)); - *(uint8_t*) varDataVal(ptr) = TSDB_DATA_BINARY_NULL; - } else if (schema[i].type == TSDB_DATA_TYPE_NCHAR) { - varDataSetLen(ptr, sizeof(int32_t)); - *(uint32_t*) varDataVal(ptr) = TSDB_DATA_NCHAR_NULL; - } else { - setNull(ptr, schema[i].type, schema[i].bytes); - } + if (isPrimaryKey) { + if (tsCheckTimestamp(pDataBlocks, payloadValues(payload)) != TSDB_CODE_SUCCESS) { + tscInvalidOperationMsg(pInsertParam->msg, "client time/server time can not be mixed up", sToken.z); + return TSDB_CODE_TSC_INVALID_TIME_STAMP; + } + payloadColSetOffset(kvPrimaryKeyStart, colValOffset); + } else { + payloadColSetOffset(kvStart, colValOffset); + if (IS_DATA_COL_ORDERED(spd->orderStatus)) { + kvStart += PAYLOAD_COL_HEAD_LEN; // move to next column } - - ptr += schema[i].bytes; } - - rowSize = (int32_t)(ptr - payload); + + colValOffset += colValAppended; + kvRowLen += kvRowColLen; + dataRowLen += dataRowDeltaColLen; } - *len = rowSize; + if (kvRowLen < dataRowLen) { + payloadSetType(payload, SMEM_ROW_KV); + } else { + payloadSetType(payload, SMEM_ROW_DATA); + } + + *len = (int32_t)(payloadValOffset + colValOffset); + payloadSetTLen(payload, *len); + return TSDB_CODE_SUCCESS; } @@ -536,6 +922,27 @@ static int32_t rowDataCompar(const void *lhs, const void *rhs) { return left > right ? 1 : -1; } } +int32_t schemaIdxCompar(const void *lhs, const void *rhs) { + uint16_t left = *(uint16_t *)lhs; + uint16_t right = *(uint16_t *)rhs; + + if (left == right) { + return 0; + } else { + return left > right ? 1 : -1; + } +} + +int32_t boundIdxCompar(const void *lhs, const void *rhs) { + uint16_t left = *(uint16_t *)POINTER_SHIFT(lhs, sizeof(uint16_t)); + uint16_t right = *(uint16_t *)POINTER_SHIFT(rhs, sizeof(uint16_t)); + + if (left == right) { + return 0; + } else { + return left > right ? 1 : -1; + } +} int32_t tsParseValues(char **str, STableDataBlocks *pDataBlock, int maxRows, SInsertStatementParam *pInsertParam, int32_t* numOfRows, char *tmpTokenBuf) { @@ -551,21 +958,26 @@ int32_t tsParseValues(char **str, STableDataBlocks *pDataBlock, int maxRows, SIn int32_t precision = tinfo.precision; + int32_t extendedRowSize = getExtendedRowSize(&tinfo); + + initSMemRowHelper(&pDataBlock->rowHelper, tscGetTableSchema(pDataBlock->pTableMeta), + tscGetNumOfColumns(pDataBlock->pTableMeta), 0); + while (1) { index = 0; sToken = tStrGetToken(*str, &index, false); if (sToken.n == 0 || sToken.type != TK_LP) break; *str += index; - if ((*numOfRows) >= maxRows || pDataBlock->size + tinfo.rowSize >= pDataBlock->nAllocSize) { + if ((*numOfRows) >= maxRows || pDataBlock->size + extendedRowSize >= pDataBlock->nAllocSize) { int32_t tSize; - code = tscAllocateMemIfNeed(pDataBlock, tinfo.rowSize, &tSize); + code = tscAllocateMemIfNeed(pDataBlock, extendedRowSize, &tSize); if (code != TSDB_CODE_SUCCESS) { //TODO pass the correct error code to client strcpy(pInsertParam->msg, "client out of memory"); return TSDB_CODE_TSC_OUT_OF_MEMORY; } - ASSERT(tSize > maxRows); + ASSERT(tSize >= maxRows); maxRows = tSize; } @@ -601,9 +1013,10 @@ int32_t tsParseValues(char **str, STableDataBlocks *pDataBlock, int maxRows, SIn void tscSetBoundColumnInfo(SParsedDataColInfo *pColInfo, SSchema *pSchema, int32_t numOfCols) { pColInfo->numOfCols = numOfCols; pColInfo->numOfBound = numOfCols; - + pColInfo->orderStatus = ORDER_STATUS_ORDERED; pColInfo->boundedColumns = calloc(pColInfo->numOfCols, sizeof(int32_t)); pColInfo->cols = calloc(pColInfo->numOfCols, sizeof(SBoundColumn)); + pColInfo->colIdxInfo = NULL; for (int32_t i = 0; i < pColInfo->numOfCols; ++i) { if (i > 0) { @@ -643,7 +1056,7 @@ int32_t tscAllocateMemIfNeed(STableDataBlocks *pDataBlock, int32_t rowSize, int3 return TSDB_CODE_SUCCESS; } -static int32_t tsSetBlockInfo(SSubmitBlk *pBlocks, const STableMeta *pTableMeta, int32_t numOfRows) { +int32_t FORCE_INLINE tsSetBlockInfo(SSubmitBlk *pBlocks, const STableMeta *pTableMeta, int32_t numOfRows) { pBlocks->tid = pTableMeta->id.tid; pBlocks->uid = pTableMeta->id.uid; pBlocks->sversion = pTableMeta->sversion; @@ -657,7 +1070,7 @@ static int32_t tsSetBlockInfo(SSubmitBlk *pBlocks, const STableMeta *pTableMeta, } // data block is disordered, sort it in ascending order -void tscSortRemoveDataBlockDupRows(STableDataBlocks *dataBuf) { +void tscSortRemoveDataBlockDupRowsRaw(STableDataBlocks *dataBuf) { SSubmitBlk *pBlocks = (SSubmitBlk *)dataBuf->pData; // size is less than the total size, since duplicated rows may be removed yet. @@ -701,11 +1114,89 @@ void tscSortRemoveDataBlockDupRows(STableDataBlocks *dataBuf) { dataBuf->prevTS = INT64_MIN; } +// data block is disordered, sort it in ascending order +int tscSortRemoveDataBlockDupRows(STableDataBlocks *dataBuf, SBlockKeyInfo *pBlkKeyInfo) { + SSubmitBlk *pBlocks = (SSubmitBlk *)dataBuf->pData; + int16_t nRows = pBlocks->numOfRows; + + // size is less than the total size, since duplicated rows may be removed yet. + + // if use server time, this block must be ordered + if (dataBuf->tsSource == TSDB_USE_SERVER_TS) { + assert(dataBuf->ordered); + } + // allocate memory + size_t nAlloc = nRows * sizeof(SBlockKeyTuple); + if (pBlkKeyInfo->pKeyTuple == NULL || pBlkKeyInfo->maxBytesAlloc < nAlloc) { + size_t nRealAlloc = nAlloc + 10 * sizeof(SBlockKeyTuple); + char * tmp = trealloc(pBlkKeyInfo->pKeyTuple, nRealAlloc); + if (tmp == NULL) { + return TSDB_CODE_TSC_OUT_OF_MEMORY; + } + pBlkKeyInfo->pKeyTuple = (SBlockKeyTuple *)tmp; + pBlkKeyInfo->maxBytesAlloc = (int32_t)nRealAlloc; + } + memset(pBlkKeyInfo->pKeyTuple, 0, nAlloc); + + SBlockKeyTuple *pBlkKeyTuple = pBlkKeyInfo->pKeyTuple; + char * pBlockData = pBlocks->data; + TDRowTLenT totolPayloadTLen = 0; + TDRowTLenT payloadTLen = 0; + int n = 0; + while (n < nRows) { + pBlkKeyTuple->skey = payloadTSKey(pBlockData); + pBlkKeyTuple->payloadAddr = pBlockData; + payloadTLen = payloadTLen(pBlockData); +#if 0 + ASSERT(payloadNCols(pBlockData) <= 4096); + ASSERT(payloadTLen(pBlockData) < 65536); +#endif + totolPayloadTLen += payloadTLen; + // next loop + pBlockData += payloadTLen; + ++pBlkKeyTuple; + ++n; + } + + if (!dataBuf->ordered) { + pBlkKeyTuple = pBlkKeyInfo->pKeyTuple; + qsort(pBlkKeyTuple, nRows, sizeof(SBlockKeyTuple), rowDataCompar); + + pBlkKeyTuple = pBlkKeyInfo->pKeyTuple; + int32_t i = 0; + int32_t j = 1; + while (j < nRows) { + TSKEY ti = (pBlkKeyTuple + i)->skey; + TSKEY tj = (pBlkKeyTuple + j)->skey; + + if (ti == tj) { + totolPayloadTLen -= payloadTLen(pBlkKeyTuple + j); + ++j; + continue; + } + + int32_t nextPos = (++i); + if (nextPos != j) { + memmove(pBlkKeyTuple + nextPos, pBlkKeyTuple + j, sizeof(SBlockKeyTuple)); + } + ++j; + } + + dataBuf->ordered = true; + pBlocks->numOfRows = i + 1; + } + + dataBuf->size = sizeof(SSubmitBlk) + totolPayloadTLen; + dataBuf->prevTS = INT64_MIN; + + return 0; +} + static int32_t doParseInsertStatement(SInsertStatementParam *pInsertParam, char **str, STableDataBlocks* dataBuf, int32_t *totalNum) { STableComInfo tinfo = tscGetTableInfo(dataBuf->pTableMeta); int32_t maxNumOfRows; - int32_t code = tscAllocateMemIfNeed(dataBuf, tinfo.rowSize, &maxNumOfRows); + int32_t code = tscAllocateMemIfNeed(dataBuf, getExtendedRowSize(&tinfo), &maxNumOfRows); if (TSDB_CODE_SUCCESS != code) { return TSDB_CODE_TSC_OUT_OF_MEMORY; } @@ -1038,10 +1529,11 @@ static int32_t validateDataSource(SInsertStatementParam *pInsertParam, int32_t t static int32_t parseBoundColumns(SInsertStatementParam *pInsertParam, SParsedDataColInfo* pColInfo, SSchema* pSchema, char* str, char **end) { - pColInfo->numOfBound = 0; + int32_t nCols = pColInfo->numOfCols; - memset(pColInfo->boundedColumns, 0, sizeof(int32_t) * pColInfo->numOfCols); - for(int32_t i = 0; i < pColInfo->numOfCols; ++i) { + pColInfo->numOfBound = 0; + memset(pColInfo->boundedColumns, 0, sizeof(int32_t) * nCols); + for (int32_t i = 0; i < nCols; ++i) { pColInfo->cols[i].hasVal = false; } @@ -1056,6 +1548,8 @@ static int32_t parseBoundColumns(SInsertStatementParam *pInsertParam, SParsedDat goto _clean; } + bool isOrdered = true; + int32_t lastColIdx = -1; // last column found while (1) { index = 0; sToken = tStrGetToken(str, &index, false); @@ -1076,7 +1570,8 @@ static int32_t parseBoundColumns(SInsertStatementParam *pInsertParam, SParsedDat bool findColumnIndex = false; // todo speedup by using hash list - for (int32_t t = 0; t < pColInfo->numOfCols; ++t) { + int32_t nScanned = 0, t = lastColIdx + 1; + while (t < nCols) { if (strncmp(sToken.z, pSchema[t].name, sToken.n) == 0 && strlen(pSchema[t].name) == sToken.n) { if (pColInfo->cols[t].hasVal == true) { code = tscInvalidOperationMsg(pInsertParam->msg, "duplicated column name", sToken.z); @@ -1085,10 +1580,39 @@ static int32_t parseBoundColumns(SInsertStatementParam *pInsertParam, SParsedDat pColInfo->cols[t].hasVal = true; pColInfo->boundedColumns[pColInfo->numOfBound] = t; - pColInfo->numOfBound += 1; + ++pColInfo->numOfBound; findColumnIndex = true; + if (isOrdered && (lastColIdx > t)) { + isOrdered = false; + } + lastColIdx = t; break; } + ++t; + ++nScanned; + } + if (!findColumnIndex) { + t = 0; + int32_t nRemain = nCols - nScanned; + while (t < nRemain) { + if (strncmp(sToken.z, pSchema[t].name, sToken.n) == 0 && strlen(pSchema[t].name) == sToken.n) { + if (pColInfo->cols[t].hasVal == true) { + code = tscInvalidOperationMsg(pInsertParam->msg, "duplicated column name", sToken.z); + goto _clean; + } + + pColInfo->cols[t].hasVal = true; + pColInfo->boundedColumns[pColInfo->numOfBound] = t; + ++pColInfo->numOfBound; + findColumnIndex = true; + if (isOrdered && (lastColIdx > t)) { + isOrdered = false; + } + lastColIdx = t; + break; + } + ++t; + } } if (!findColumnIndex) { @@ -1097,10 +1621,32 @@ static int32_t parseBoundColumns(SInsertStatementParam *pInsertParam, SParsedDat } } - memset(&pColInfo->boundedColumns[pColInfo->numOfBound], 0 , sizeof(int32_t) * (pColInfo->numOfCols - pColInfo->numOfBound)); + pColInfo->orderStatus = isOrdered ? ORDER_STATUS_ORDERED : ORDER_STATUS_DISORDERED; + + if (!isOrdered) { + pColInfo->colIdxInfo = tcalloc(pColInfo->numOfBound, sizeof(SBoundIdxInfo)); + if (pColInfo->colIdxInfo == NULL) { + code = TSDB_CODE_TSC_OUT_OF_MEMORY; + goto _clean; + } + SBoundIdxInfo *pColIdx = pColInfo->colIdxInfo; + for (uint16_t i = 0; i < pColInfo->numOfBound; ++i) { + pColIdx[i].schemaColIdx = (uint16_t)pColInfo->boundedColumns[i]; + pColIdx[i].boundIdx = i; + } + qsort(pColIdx, pColInfo->numOfBound, sizeof(SBoundIdxInfo), schemaIdxCompar); + for (uint16_t i = 0; i < pColInfo->numOfBound; ++i) { + pColIdx[i].finalIdx = i; + } + qsort(pColIdx, pColInfo->numOfBound, sizeof(SBoundIdxInfo), boundIdxCompar); + } + + memset(&pColInfo->boundedColumns[pColInfo->numOfBound], 0, + sizeof(int32_t) * (pColInfo->numOfCols - pColInfo->numOfBound)); + return TSDB_CODE_SUCCESS; - _clean: +_clean: pInsertParam->sql = NULL; return code; } @@ -1494,21 +2040,24 @@ static void parseFileSendDataBlock(void *param, TAOS_RES *tres, int32_t numOfRow } STableDataBlocks *pTableDataBlock = NULL; - int32_t ret = - tscGetDataBlockFromList(pInsertParam->pTableBlockHashList, pTableMeta->id.uid, TSDB_PAYLOAD_SIZE, sizeof(SSubmitBlk), - tinfo.rowSize, &pTableMetaInfo->name, pTableMeta, &pTableDataBlock, NULL); + int32_t ret = tscGetDataBlockFromList(pInsertParam->pTableBlockHashList, pTableMeta->id.uid, TSDB_PAYLOAD_SIZE, + sizeof(SSubmitBlk), tinfo.rowSize, &pTableMetaInfo->name, pTableMeta, + &pTableDataBlock, NULL); if (ret != TSDB_CODE_SUCCESS) { pParentSql->res.code = TSDB_CODE_TSC_OUT_OF_MEMORY; goto _error; } - tscAllocateMemIfNeed(pTableDataBlock, tinfo.rowSize, &maxRows); + tscAllocateMemIfNeed(pTableDataBlock, getExtendedRowSize(&tinfo), &maxRows); tokenBuf = calloc(1, TSDB_MAX_BYTES_PER_ROW); if (tokenBuf == NULL) { code = TSDB_CODE_TSC_OUT_OF_MEMORY; goto _error; } + initSMemRowHelper(&pTableDataBlock->rowHelper, tscGetTableSchema(pTableDataBlock->pTableMeta), + tscGetNumOfColumns(pTableDataBlock->pTableMeta), 0); + while ((readLen = tgetline(&line, &n, fp)) != -1) { if (('\r' == line[readLen - 1]) || ('\n' == line[readLen - 1])) { line[--readLen] = 0; diff --git a/src/client/src/tscPrepare.c b/src/client/src/tscPrepare.c index 3a462c96d6..01c90680fa 100644 --- a/src/client/src/tscPrepare.c +++ b/src/client/src/tscPrepare.c @@ -47,6 +47,7 @@ typedef struct SNormalStmt { typedef struct SMultiTbStmt { bool nameSet; bool tagSet; + bool subSet; uint64_t currentUid; char *sqlstr; uint32_t tbNum; @@ -54,6 +55,7 @@ typedef struct SMultiTbStmt { SStrToken stbname; SStrToken values; SArray *tags; + STableDataBlocks *lastBlock; SHashObj *pTableHash; SHashObj *pTableBlockHashList; // data block for each table } SMultiTbStmt; @@ -347,11 +349,11 @@ int32_t fillTablesColumnsNull(SSqlObj* pSql) { //////////////////////////////////////////////////////////////////////////////// // functions for insertion statement preparation -static int doBindParam(STableDataBlocks* pBlock, char* data, SParamInfo* param, TAOS_BIND* bind, int32_t colNum) { - if (bind->is_null != NULL && *(bind->is_null)) { - setNull(data + param->offset, param->type, param->bytes); - return TSDB_CODE_SUCCESS; - } +static FORCE_INLINE int doBindParam(STableDataBlocks* pBlock, char* data, SParamInfo* param, TAOS_BIND* bind, int32_t colNum) { + if (bind->is_null != NULL && *(bind->is_null)) { + setNull(data + param->offset, param->type, param->bytes); + return TSDB_CODE_SUCCESS; + } #if 0 if (0) { @@ -746,25 +748,25 @@ static int doBindParam(STableDataBlocks* pBlock, char* data, SParamInfo* param, case TSDB_DATA_TYPE_BOOL: case TSDB_DATA_TYPE_TINYINT: case TSDB_DATA_TYPE_UTINYINT: - size = 1; + *(uint8_t *)(data + param->offset) = *(uint8_t *)bind->buffer; break; case TSDB_DATA_TYPE_SMALLINT: case TSDB_DATA_TYPE_USMALLINT: - size = 2; + *(uint16_t *)(data + param->offset) = *(uint16_t *)bind->buffer; break; case TSDB_DATA_TYPE_INT: case TSDB_DATA_TYPE_UINT: case TSDB_DATA_TYPE_FLOAT: - size = 4; + *(uint32_t *)(data + param->offset) = *(uint32_t *)bind->buffer; break; case TSDB_DATA_TYPE_BIGINT: case TSDB_DATA_TYPE_UBIGINT: case TSDB_DATA_TYPE_DOUBLE: case TSDB_DATA_TYPE_TIMESTAMP: - size = 8; + *(uint64_t *)(data + param->offset) = *(uint64_t *)bind->buffer; break; case TSDB_DATA_TYPE_BINARY: @@ -790,7 +792,6 @@ static int doBindParam(STableDataBlocks* pBlock, char* data, SParamInfo* param, return TSDB_CODE_TSC_INVALID_VALUE; } - memcpy(data + param->offset, bind->buffer, size); if (param->offset == 0) { if (tsCheckTimestamp(pBlock, data + param->offset) != TSDB_CODE_SUCCESS) { tscError("invalid timestamp"); @@ -801,6 +802,58 @@ static int doBindParam(STableDataBlocks* pBlock, char* data, SParamInfo* param, return TSDB_CODE_SUCCESS; } +static int32_t insertStmtGenLastBlock(STableDataBlocks** lastBlock, STableDataBlocks* pBlock) { + *lastBlock = (STableDataBlocks*)malloc(sizeof(STableDataBlocks)); + memcpy(*lastBlock, pBlock, sizeof(STableDataBlocks)); + (*lastBlock)->cloned = true; + + (*lastBlock)->pData = NULL; + (*lastBlock)->ordered = true; + (*lastBlock)->prevTS = INT64_MIN; + (*lastBlock)->size = sizeof(SSubmitBlk); + (*lastBlock)->tsSource = -1; + + return TSDB_CODE_SUCCESS; +} + + +static int32_t insertStmtGenBlock(STscStmt* pStmt, STableDataBlocks** pBlock, STableMeta* pTableMeta, SName* name) { + int32_t code = 0; + + if (pStmt->mtb.lastBlock == NULL) { + tscError("no previous data block"); + return TSDB_CODE_TSC_APP_ERROR; + } + + int32_t msize = tscGetTableMetaSize(pTableMeta); + int32_t tsize = sizeof(STableDataBlocks) + msize; + + void *t = malloc(tsize); + *pBlock = t; + + memcpy(*pBlock, pStmt->mtb.lastBlock, sizeof(STableDataBlocks)); + + t = (char *)t + sizeof(STableDataBlocks); + (*pBlock)->pTableMeta = t; + memcpy((*pBlock)->pTableMeta, pTableMeta, msize); + + (*pBlock)->pData = malloc((*pBlock)->nAllocSize); + + (*pBlock)->vgId = (*pBlock)->pTableMeta->vgId; + + tNameAssign(&(*pBlock)->tableName, name); + + SSubmitBlk* blk = (SSubmitBlk*)(*pBlock)->pData; + memset(blk, 0, sizeof(*blk)); + + code = tsSetBlockInfo(blk, pTableMeta, 0); + if (code != TSDB_CODE_SUCCESS) { + STMT_RET(code); + } + + return TSDB_CODE_SUCCESS; +} + static int doBindBatchParam(STableDataBlocks* pBlock, SParamInfo* param, TAOS_MULTI_BIND* bind, int32_t rowNum) { if (bind->buffer_type != param->type || !isValidDataType(param->type)) { @@ -1172,7 +1225,7 @@ static void insertBatchClean(STscStmt* pStmt) { static int insertBatchStmtExecute(STscStmt* pStmt) { int32_t code = 0; - + if(pStmt->mtb.nameSet == false) { tscError("0x%"PRIx64" no table name set", pStmt->pSql->self); return invalidOperationMsg(tscGetErrorMsgPayload(&pStmt->pSql->cmd), "no table name set"); @@ -1227,11 +1280,11 @@ int stmtParseInsertTbTags(SSqlObj* pSql, STscStmt* pStmt) { pStmt->mtb.tbname = sToken; pStmt->mtb.nameSet = false; if (pStmt->mtb.pTableHash == NULL) { - pStmt->mtb.pTableHash = taosHashInit(16, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, false); + pStmt->mtb.pTableHash = taosHashInit(128, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, false); } if (pStmt->mtb.pTableBlockHashList == NULL) { - pStmt->mtb.pTableBlockHashList = taosHashInit(16, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), true, false); + pStmt->mtb.pTableBlockHashList = taosHashInit(128, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), true, false); } pStmt->mtb.tagSet = true; @@ -1522,6 +1575,7 @@ int taos_stmt_prepare(TAOS_STMT* stmt, const char* sql, unsigned long length) { int taos_stmt_set_tbname_tags(TAOS_STMT* stmt, const char* name, TAOS_BIND* tags) { STscStmt* pStmt = (STscStmt*)stmt; + int32_t code = 0; if (stmt == NULL || pStmt->pSql == NULL || pStmt->taos == NULL) { STMT_RET(TSDB_CODE_TSC_DISCONNECTED); @@ -1559,6 +1613,9 @@ int taos_stmt_set_tbname_tags(TAOS_STMT* stmt, const char* name, TAOS_BIND* tags SSubmitBlk* pBlk = (SSubmitBlk*) (*t1)->pData; pCmd->batchSize = pBlk->numOfRows; + if (pBlk->numOfRows == 0) { + (*t1)->prevTS = INT64_MIN; + } taosHashPut(pCmd->insertParam.pTableBlockHashList, (void *)&pStmt->mtb.currentUid, sizeof(pStmt->mtb.currentUid), (void*)t1, POINTER_BYTES); @@ -1566,6 +1623,51 @@ int taos_stmt_set_tbname_tags(TAOS_STMT* stmt, const char* name, TAOS_BIND* tags STMT_RET(TSDB_CODE_SUCCESS); } + if (pStmt->mtb.subSet && taosHashGetSize(pStmt->mtb.pTableHash) > 0) { + STableMetaInfo* pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, 0); + STableMeta* pTableMeta = pTableMetaInfo->pTableMeta; + char sTableName[TSDB_TABLE_FNAME_LEN]; + strncpy(sTableName, pTableMeta->sTableName, sizeof(sTableName)); + + SStrToken tname = {0}; + tname.type = TK_STRING; + tname.z = (char *)name; + tname.n = (uint32_t)strlen(name); + SName fullname = {0}; + tscSetTableFullName(&fullname, &tname, pSql); + + memcpy(&pTableMetaInfo->name, &fullname, sizeof(fullname)); + + code = tscGetTableMeta(pSql, pTableMetaInfo); + if (code != TSDB_CODE_SUCCESS) { + STMT_RET(code); + } + + pTableMeta = pTableMetaInfo->pTableMeta; + + if (strcmp(sTableName, pTableMeta->sTableName)) { + tscError("0x%"PRIx64" only tables belongs to one stable is allowed", pSql->self); + STMT_RET(TSDB_CODE_TSC_APP_ERROR); + } + + STableDataBlocks* pBlock = NULL; + + insertStmtGenBlock(pStmt, &pBlock, pTableMeta, &pTableMetaInfo->name); + + pCmd->batchSize = 0; + + pStmt->mtb.currentUid = pTableMeta->id.uid; + pStmt->mtb.tbNum++; + + taosHashPut(pCmd->insertParam.pTableBlockHashList, (void *)&pStmt->mtb.currentUid, sizeof(pStmt->mtb.currentUid), (void*)&pBlock, POINTER_BYTES); + taosHashPut(pStmt->mtb.pTableBlockHashList, (void *)&pStmt->mtb.currentUid, sizeof(pStmt->mtb.currentUid), (void*)&pBlock, POINTER_BYTES); + taosHashPut(pStmt->mtb.pTableHash, name, strlen(name), (char*) &pTableMeta->id.uid, sizeof(pTableMeta->id.uid)); + + tscDebug("0x%"PRIx64" table:%s is prepared, uid:%" PRIx64, pSql->self, name, pStmt->mtb.currentUid); + + STMT_RET(TSDB_CODE_SUCCESS); + } + if (pStmt->mtb.tagSet) { pStmt->mtb.tbname = tscReplaceStrToken(&pSql->sqlstr, &pStmt->mtb.tbname, name); } else { @@ -1594,7 +1696,7 @@ int taos_stmt_set_tbname_tags(TAOS_STMT* stmt, const char* name, TAOS_BIND* tags pCmd->insertParam.pTableBlockHashList = hashList; } - int32_t code = tsParseSql(pStmt->pSql, true); + code = tsParseSql(pStmt->pSql, true); if (code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) { // wait for the callback function to post the semaphore tsem_wait(&pStmt->pSql->rspSem); @@ -1622,6 +1724,10 @@ int taos_stmt_set_tbname_tags(TAOS_STMT* stmt, const char* name, TAOS_BIND* tags taosHashPut(pStmt->mtb.pTableBlockHashList, (void *)&pStmt->mtb.currentUid, sizeof(pStmt->mtb.currentUid), (void*)&pBlock, POINTER_BYTES); taosHashPut(pStmt->mtb.pTableHash, name, strlen(name), (char*) &pTableMeta->id.uid, sizeof(pTableMeta->id.uid)); + if (pStmt->mtb.lastBlock == NULL) { + insertStmtGenLastBlock(&pStmt->mtb.lastBlock, pBlock); + } + tscDebug("0x%"PRIx64" table:%s is prepared, uid:%" PRIx64, pSql->self, name, pStmt->mtb.currentUid); } @@ -1629,7 +1735,17 @@ int taos_stmt_set_tbname_tags(TAOS_STMT* stmt, const char* name, TAOS_BIND* tags } +int taos_stmt_set_sub_tbname(TAOS_STMT* stmt, const char* name) { + STscStmt* pStmt = (STscStmt*)stmt; + pStmt->mtb.subSet = true; + return taos_stmt_set_tbname_tags(stmt, name, NULL); +} + + + int taos_stmt_set_tbname(TAOS_STMT* stmt, const char* name) { + STscStmt* pStmt = (STscStmt*)stmt; + pStmt->mtb.subSet = false; return taos_stmt_set_tbname_tags(stmt, name, NULL); } @@ -1653,6 +1769,7 @@ int taos_stmt_close(TAOS_STMT* stmt) { if (pStmt->pSql && pStmt->pSql->res.code != 0) { rmMeta = true; } + tscDestroyDataBlock(pStmt->mtb.lastBlock, rmMeta); pStmt->mtb.pTableBlockHashList = tscDestroyBlockHashTable(pStmt->mtb.pTableBlockHashList, rmMeta); taosHashCleanup(pStmt->pSql->cmd.insertParam.pTableBlockHashList); pStmt->pSql->cmd.insertParam.pTableBlockHashList = NULL; @@ -1687,6 +1804,8 @@ int taos_stmt_bind_param(TAOS_STMT* stmt, TAOS_BIND* bind) { pStmt->last = STMT_BIND; + tscDebug("tableId:%" PRIu64 ", try to bind one row", pStmt->mtb.currentUid); + STMT_RET(insertStmtBindParam(pStmt, bind)); } else { STMT_RET(normalStmtBindParam(pStmt, bind)); @@ -1806,6 +1925,7 @@ int taos_stmt_execute(TAOS_STMT* stmt) { pStmt->last = STMT_EXECUTE; + pStmt->pSql->cmd.insertParam.payloadType = PAYLOAD_TYPE_RAW; if (pStmt->multiTbInsert) { ret = insertBatchStmtExecute(pStmt); } else { diff --git a/src/client/src/tscSQLParser.c b/src/client/src/tscSQLParser.c index 77c37239da..adea1cc453 100644 --- a/src/client/src/tscSQLParser.c +++ b/src/client/src/tscSQLParser.c @@ -1396,12 +1396,16 @@ static bool validateTableColumnInfo(SArray* pFieldList, SSqlCmd* pCmd) { const char* msg4 = "invalid data type"; const char* msg5 = "invalid binary/nchar column length"; const char* msg6 = "invalid column name"; + const char* msg7 = "too many columns"; // number of fields no less than 2 size_t numOfCols = taosArrayGetSize(pFieldList); - if (numOfCols <= 1 || numOfCols > TSDB_MAX_COLUMNS) { + if (numOfCols <= 1 ) { invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg); return false; + } else if (numOfCols > TSDB_MAX_COLUMNS) { + invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg7); + return false; } // first column must be timestamp @@ -1536,13 +1540,20 @@ bool validateOneTags(SSqlCmd* pCmd, TAOS_FIELD* pTagField) { const char* msg4 = "invalid tag name"; const char* msg5 = "invalid binary/nchar tag length"; const char* msg6 = "invalid data type in tags"; + const char* msg7 = "too many columns"; STableMetaInfo* pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, 0); STableMeta* pTableMeta = pTableMetaInfo->pTableMeta; int32_t numOfTags = tscGetNumOfTags(pTableMeta); int32_t numOfCols = tscGetNumOfColumns(pTableMeta); - + + // no more max columns + if (numOfTags + numOfCols >= TSDB_MAX_COLUMNS) { + invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg7); + return false; + } + // no more than 6 tags if (numOfTags == TSDB_MAX_TAGS) { char msg[128] = {0}; @@ -5999,6 +6010,16 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) { return invalidOperationMsg(pMsg, msg22); } + SSchema* pSchema = (SSchema*) pTableMetaInfo->pTableMeta->schema; + int16_t numOfColumns = pTableMetaInfo->pTableMeta->tableInfo.numOfColumns; + int16_t i; + uint32_t nLen = 0; + for (i = 0; i < numOfColumns; ++i) { + nLen += pSchema[i].colId != columnIndex.columnIndex ? pSchema[i].bytes : pItem->bytes; + } + if (nLen >= TSDB_MAX_BYTES_PER_ROW) { + return invalidOperationMsg(pMsg, msg24); + } TAOS_FIELD f = tscCreateField(pColSchema->type, name.z, pItem->bytes); tscFieldInfoAppend(&pQueryInfo->fieldsInfo, &f); }else if (pAlterSQL->type == TSDB_ALTER_TABLE_MODIFY_TAG_COLUMN) { @@ -6040,6 +6061,17 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) { return invalidOperationMsg(pMsg, msg22); } + SSchema* pSchema = (SSchema*) pTableMetaInfo->pTableMeta->schema; + int16_t numOfColumns = pTableMetaInfo->pTableMeta->tableInfo.numOfColumns; + int16_t i; + uint32_t nLen = 0; + for (i = 0; i < numOfColumns; ++i) { + nLen += pSchema[i].colId != columnIndex.columnIndex ? pSchema[i].bytes : pItem->bytes; + } + if (nLen >= TSDB_MAX_BYTES_PER_ROW) { + return invalidOperationMsg(pMsg, msg24); + } + TAOS_FIELD f = tscCreateField(pColSchema->type, name.z, pItem->bytes); tscFieldInfoAppend(&pQueryInfo->fieldsInfo, &f); } @@ -8005,6 +8037,28 @@ static void freeElem(void* p) { tfree(*(char**)p); } +int32_t tnameComparFn(const void* p1, const void* p2) { + SName* pn1 = (SName*)p1; + SName* pn2 = (SName*)p2; + + int32_t ret = strncmp(pn1->acctId, pn2->acctId, tListLen(pn1->acctId)); + if (ret != 0) { + return ret > 0? 1:-1; + } else { + ret = strncmp(pn1->dbname, pn2->dbname, tListLen(pn1->dbname)); + if (ret != 0) { + return ret > 0? 1:-1; + } else { + ret = strncmp(pn1->tname, pn2->tname, tListLen(pn1->tname)); + if (ret != 0) { + return ret > 0? 1:-1; + } else { + return 0; + } + } + } +} + int32_t loadAllTableMeta(SSqlObj* pSql, struct SSqlInfo* pInfo) { SSqlCmd* pCmd = &pSql->cmd; @@ -8048,13 +8102,20 @@ int32_t loadAllTableMeta(SSqlObj* pSql, struct SSqlInfo* pInfo) { uint32_t maxSize = tscGetTableMetaMaxSize(); char name[TSDB_TABLE_FNAME_LEN] = {0}; - char buf[80 * 1024] = {0}; - assert(maxSize < 80 * 1024); + assert(maxSize < 80 * TSDB_MAX_COLUMNS); + if (!pSql->pBuf) { + if (NULL == (pSql->pBuf = tcalloc(1, 80 * TSDB_MAX_COLUMNS))) { + return TSDB_CODE_TSC_OUT_OF_MEMORY; + } + } pTableMeta = calloc(1, maxSize); plist = taosArrayInit(4, POINTER_BYTES); pVgroupList = taosArrayInit(4, POINTER_BYTES); + taosArraySort(tableNameList, tnameComparFn); + taosArrayRemoveDuplicate(tableNameList, tnameComparFn, NULL); + size_t numOfTables = taosArrayGetSize(tableNameList); for (int32_t i = 0; i < numOfTables; ++i) { SName* pname = taosArrayGet(tableNameList, i); @@ -8066,7 +8127,7 @@ int32_t loadAllTableMeta(SSqlObj* pSql, struct SSqlInfo* pInfo) { if (pTableMeta->id.uid > 0) { if (pTableMeta->tableType == TSDB_CHILD_TABLE) { - code = tscCreateTableMetaFromSTableMeta(pTableMeta, name, buf); + code = tscCreateTableMetaFromSTableMeta(pTableMeta, name, pSql->pBuf); // create the child table meta from super table failed, try load it from mnode if (code != TSDB_CODE_SUCCESS) { @@ -8075,8 +8136,7 @@ int32_t loadAllTableMeta(SSqlObj* pSql, struct SSqlInfo* pInfo) { continue; } } else if (pTableMeta->tableType == TSDB_SUPER_TABLE) { - // the vgroup list of a super table is not kept in local buffer, so here need retrieve it - // from the mnode each time + // the vgroup list of super table is not kept in local buffer, so here need retrieve it from the mnode each time char* t = strdup(name); taosArrayPush(pVgroupList, &t); } @@ -8103,6 +8163,7 @@ int32_t loadAllTableMeta(SSqlObj* pSql, struct SSqlInfo* pInfo) { if (pInfo->funcs) { funcSize = taosArrayGetSize(pInfo->funcs); } + if (funcSize > 0) { for (size_t i = 0; i < funcSize; ++i) { SStrToken* t = taosArrayGet(pInfo->funcs, i); @@ -8262,7 +8323,9 @@ static int32_t doValidateSubquery(SSqlNode* pSqlNode, int32_t index, SSqlObj* pS // union all is not support currently SSqlNode* p = taosArrayGetP(subInfo->pSubquery, 0); - + if (taosArrayGetSize(subInfo->pSubquery) >= 2) { + return invalidOperationMsg(msgBuf, "not support union in subquery"); + } SQueryInfo* pSub = calloc(1, sizeof(SQueryInfo)); tscInitQueryInfo(pSub); diff --git a/src/client/src/tscServer.c b/src/client/src/tscServer.c index 74c3747576..e975dd7b06 100644 --- a/src/client/src/tscServer.c +++ b/src/client/src/tscServer.c @@ -2804,22 +2804,24 @@ int32_t tscGetTableMetaImpl(SSqlObj* pSql, STableMetaInfo *pTableMetaInfo, bool taosHashGetClone(tscTableMetaInfo, name, len, NULL, pTableMetaInfo->pTableMeta, -1); // TODO resize the tableMeta - char buf[80*1024] = {0}; - assert(size < 80*1024); + assert(size < 80 * TSDB_MAX_COLUMNS); + if (!pSql->pBuf) { + if (NULL == (pSql->pBuf = tcalloc(1, 80 * TSDB_MAX_COLUMNS))) { + return TSDB_CODE_TSC_OUT_OF_MEMORY; + } + } STableMeta* pMeta = pTableMetaInfo->pTableMeta; if (pMeta->id.uid > 0) { // in case of child table, here only get the if (pMeta->tableType == TSDB_CHILD_TABLE) { - int32_t code = tscCreateTableMetaFromSTableMeta(pTableMetaInfo->pTableMeta, name, buf); + int32_t code = tscCreateTableMetaFromSTableMeta(pTableMetaInfo->pTableMeta, name, pSql->pBuf); if (code != TSDB_CODE_SUCCESS) { return getTableMetaFromMnode(pSql, pTableMetaInfo, autocreate); } } - return TSDB_CODE_SUCCESS; } - return getTableMetaFromMnode(pSql, pTableMetaInfo, autocreate); } @@ -2934,7 +2936,6 @@ int tscGetSTableVgroupInfo(SSqlObj *pSql, SQueryInfo* pQueryInfo) { if (allVgroupInfoRetrieved(pQueryInfo)) { return TSDB_CODE_SUCCESS; } - SSqlObj *pNew = calloc(1, sizeof(SSqlObj)); pNew->pTscObj = pSql->pTscObj; pNew->signature = pNew; diff --git a/src/client/src/tscSubquery.c b/src/client/src/tscSubquery.c index 906b2eb037..b72bd78b1b 100644 --- a/src/client/src/tscSubquery.c +++ b/src/client/src/tscSubquery.c @@ -712,6 +712,15 @@ static void updateQueryTimeRange(SQueryInfo* pQueryInfo, STimeWindow* win) { pQueryInfo->window = *win; } +int32_t tagValCompar(const void* p1, const void* p2) { + const STidTags* t1 = (const STidTags*) varDataVal(p1); + const STidTags* t2 = (const STidTags*) varDataVal(p2); + + __compar_fn_t func = getComparFunc(t1->padding, 0); + + return func(t1->tag, t2->tag); +} + int32_t tidTagsCompar(const void* p1, const void* p2) { const STidTags* t1 = (const STidTags*) (p1); const STidTags* t2 = (const STidTags*) (p2); @@ -720,28 +729,7 @@ int32_t tidTagsCompar(const void* p1, const void* p2) { return (t1->vgId > t2->vgId) ? 1 : -1; } - tstr* tag1 = (tstr*) t1->tag; - tstr* tag2 = (tstr*) t2->tag; - - if (tag1->len != tag2->len) { - return (tag1->len > tag2->len)? 1: -1; - } - - return strncmp(tag1->data, tag2->data, tag1->len); -} - -int32_t tagValCompar(const void* p1, const void* p2) { - const STidTags* t1 = (const STidTags*) varDataVal(p1); - const STidTags* t2 = (const STidTags*) varDataVal(p2); - - tstr* tag1 = (tstr*) t1->tag; - tstr* tag2 = (tstr*) t2->tag; - - if (tag1->len != tag2->len) { - return (tag1->len > tag2->len)? 1: -1; - } - - return memcmp(tag1->data, tag2->data, tag1->len); + return tagValCompar(p1, p2); } void tscBuildVgroupTableInfo(SSqlObj* pSql, STableMetaInfo* pTableMetaInfo, SArray* tables) { @@ -871,6 +859,12 @@ static bool checkForDuplicateTagVal(SSchema* pColSchema, SJoinSupporter* p1, SSq return true; } +static void setTidTagType(SJoinSupporter* p, uint8_t type) { + for (int32_t i = 0; i < p->num; ++i) { + STidTags * tag = (STidTags*) varDataVal(p->pIdTagList + i * p->tagSize); + tag->padding = type; + } +} static int32_t getIntersectionOfTableTuple(SQueryInfo* pQueryInfo, SSqlObj* pParentSql, SArray* resList) { int16_t joinNum = pParentSql->subState.numOfSub; @@ -890,6 +884,8 @@ static int32_t getIntersectionOfTableTuple(SQueryInfo* pQueryInfo, SSqlObj* pPar for (int32_t i = 0; i < joinNum; i++) { SJoinSupporter* p = pParentSql->pSubs[i]->param; + setTidTagType(p, pColSchema->type); + ctxlist[i].p = p; ctxlist[i].res = taosArrayInit(p->num, size); @@ -2440,9 +2436,9 @@ int32_t tscHandleMasterSTableQuery(SSqlObj *pSql) { tOrderDescriptor *pDesc = NULL; pRes->qId = 0x1; // hack the qhandle check - - const uint32_t nBufferSize = (1u << 16u); // 64KB - + + const uint32_t nBufferSize = (1u << 18u); // 256KB + SQueryInfo *pQueryInfo = tscGetQueryInfo(pCmd); STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0); SSubqueryState *pState = &pSql->subState; diff --git a/src/client/src/tscUtil.c b/src/client/src/tscUtil.c index 6ae0249329..5809218e6a 100644 --- a/src/client/src/tscUtil.c +++ b/src/client/src/tscUtil.c @@ -1315,8 +1315,7 @@ void tscFreeQueryInfo(SSqlCmd* pCmd, bool removeMeta) { return; } - SQueryInfo* pQueryInfo = tscGetQueryInfo(pCmd); - + SQueryInfo* pQueryInfo = pCmd->pQueryInfo; while(pQueryInfo != NULL) { SQueryInfo* p = pQueryInfo->sibling; @@ -1488,6 +1487,7 @@ void tscFreeSqlObj(SSqlObj* pSql) { pSql->signature = NULL; pSql->fp = NULL; tfree(pSql->sqlstr); + tfree(pSql->pBuf); tfree(pSql->pSubs); pSql->subState.numOfSub = 0; @@ -1499,7 +1499,7 @@ void tscFreeSqlObj(SSqlObj* pSql) { memset(pCmd->payload, 0, (size_t)pCmd->allocSize); tfree(pCmd->payload); pCmd->allocSize = 0; - + tsem_destroy(&pSql->rspSem); memset(pSql, 0, sizeof(*pSql)); free(pSql); @@ -1508,6 +1508,7 @@ void tscFreeSqlObj(SSqlObj* pSql) { void tscDestroyBoundColumnInfo(SParsedDataColInfo* pColInfo) { tfree(pColInfo->boundedColumns); tfree(pColInfo->cols); + tfree(pColInfo->colIdxInfo); } void tscDestroyDataBlock(STableDataBlocks* pDataBlock, bool removeMeta) { @@ -1516,12 +1517,6 @@ void tscDestroyDataBlock(STableDataBlocks* pDataBlock, bool removeMeta) { } tfree(pDataBlock->pData); - tfree(pDataBlock->params); - - // free the refcount for metermeta - if (pDataBlock->pTableMeta != NULL) { - tfree(pDataBlock->pTableMeta); - } if (removeMeta) { char name[TSDB_TABLE_FNAME_LEN] = {0}; @@ -1530,7 +1525,17 @@ void tscDestroyDataBlock(STableDataBlocks* pDataBlock, bool removeMeta) { taosHashRemove(tscTableMetaInfo, name, strnlen(name, TSDB_TABLE_FNAME_LEN)); } - tscDestroyBoundColumnInfo(&pDataBlock->boundColumnInfo); + if (!pDataBlock->cloned) { + tfree(pDataBlock->params); + + // free the refcount for metermeta + if (pDataBlock->pTableMeta != NULL) { + tfree(pDataBlock->pTableMeta); + } + + tscDestroyBoundColumnInfo(&pDataBlock->boundColumnInfo); + } + tfree(pDataBlock); } @@ -1596,7 +1601,7 @@ void freeUdfInfo(SUdfInfo* pUdfInfo) { taosCloseDll(pUdfInfo->handle); } - +// todo refactor void* tscDestroyUdfArrayList(SArray* pUdfList) { if (pUdfList == NULL) { return NULL; @@ -1709,12 +1714,14 @@ int32_t tscCreateDataBlock(size_t defaultSize, int32_t rowSize, int32_t startOff dataBuf->nAllocSize = dataBuf->headerSize * 2; } - dataBuf->pData = calloc(1, dataBuf->nAllocSize); + //dataBuf->pData = calloc(1, dataBuf->nAllocSize); + dataBuf->pData = malloc(dataBuf->nAllocSize); if (dataBuf->pData == NULL) { tscError("failed to allocated memory, reason:%s", strerror(errno)); tfree(dataBuf); return TSDB_CODE_TSC_OUT_OF_MEMORY; } + memset(dataBuf->pData, 0, sizeof(SSubmitBlk)); //Here we keep the tableMeta to avoid it to be remove by other threads. dataBuf->pTableMeta = tscTableMetaDup(pTableMeta); @@ -1762,11 +1769,108 @@ int32_t tscGetDataBlockFromList(SHashObj* pHashList, int64_t id, int32_t size, i return TSDB_CODE_SUCCESS; } -static int trimDataBlock(void* pDataBlock, STableDataBlocks* pTableDataBlock, bool includeSchema) { +static SMemRow tdGenMemRowFromBuilder(SMemRowBuilder* pBuilder) { + SSchema* pSchema = pBuilder->pSchema; + char* p = (char*)pBuilder->buf; + int toffset = 0; + uint16_t nCols = pBuilder->nCols; + + uint8_t memRowType = payloadType(p); + uint16_t nColsBound = payloadNCols(p); + if (pBuilder->nCols <= 0 || nColsBound <= 0) { + return NULL; + } + char* pVals = POINTER_SHIFT(p, payloadValuesOffset(p)); + SMemRow* memRow = (SMemRow)pBuilder->pDataBlock; + memRowSetType(memRow, memRowType); + + // ----------------- Raw payload structure for row: + /* |<------------ Head ------------->|<----------- body of column data tuple ------------------->| + * | |<----------------- flen ------------->|<--- value part --->| + * |SMemRowType| dataTLen | nCols | colId | colType | offset | ... | value |...|...|... | + * +-----------+----------+----------+--------------------------------------|--------------------| + * | uint8_t | uint32_t | uint16_t | int16_t | uint8_t | uint16_t | ... |.......|...|...|... | + * +-----------+----------+----------+--------------------------------------+--------------------| + * 1. offset in column data tuple starts from the value part in case of uint16_t overflow. + * 2. dataTLen: total length including the header and body. + */ + + if (memRowType == SMEM_ROW_DATA) { + SDataRow trow = (SDataRow)memRowDataBody(memRow); + dataRowSetLen(trow, (TDRowLenT)(TD_DATA_ROW_HEAD_SIZE + pBuilder->flen)); + dataRowSetVersion(trow, pBuilder->sversion); + + p = (char*)payloadBody(pBuilder->buf); + uint16_t i = 0, j = 0; + while (j < nCols) { + if (i >= nColsBound) { + break; + } + int16_t colId = payloadColId(p); + if (colId == pSchema[j].colId) { + // ASSERT(payloadColType(p) == pSchema[j].type); + tdAppendColVal(trow, POINTER_SHIFT(pVals, payloadColOffset(p)), pSchema[j].type, toffset); + toffset += TYPE_BYTES[pSchema[j].type]; + p = payloadNextCol(p); + ++i; + ++j; + } else if (colId < pSchema[j].colId) { + p = payloadNextCol(p); + ++i; + } else { + tdAppendColVal(trow, getNullValue(pSchema[j].type), pSchema[j].type, toffset); + toffset += TYPE_BYTES[pSchema[j].type]; + ++j; + } + } + + while (j < nCols) { + tdAppendColVal(trow, getNullValue(pSchema[j].type), pSchema[j].type, toffset); + toffset += TYPE_BYTES[pSchema[j].type]; + ++j; + } + + #if 0 // no need anymore + while (i < nColsBound) { + p = payloadNextCol(p); + ++i; + } + #endif + + } else if (memRowType == SMEM_ROW_KV) { + SKVRow kvRow = (SKVRow)memRowKvBody(memRow); + kvRowSetLen(kvRow, (TDRowLenT)(TD_KV_ROW_HEAD_SIZE + sizeof(SColIdx) * nColsBound)); + kvRowSetNCols(kvRow, nColsBound); + memRowSetKvVersion(memRow, pBuilder->sversion); + + p = (char*)payloadBody(pBuilder->buf); + int i = 0; + while (i < nColsBound) { + int16_t colId = payloadColId(p); + uint8_t colType = payloadColType(p); + tdAppendKvColVal(kvRow, POINTER_SHIFT(pVals,payloadColOffset(p)), colId, colType, toffset); + toffset += sizeof(SColIdx); + p = payloadNextCol(p); + ++i; + } + + } else { + ASSERT(0); + } + int32_t rowTLen = memRowTLen(memRow); + pBuilder->pDataBlock = (char*)pBuilder->pDataBlock + rowTLen; // next row + pBuilder->pSubmitBlk->dataLen += rowTLen; + + return memRow; +} + +// Erase the empty space reserved for binary data +static int trimDataBlock(void* pDataBlock, STableDataBlocks* pTableDataBlock, SInsertStatementParam* insertParam, + SBlockKeyTuple* blkKeyTuple) { // TODO: optimize this function, handle the case while binary is not presented - STableMeta* pTableMeta = pTableDataBlock->pTableMeta; - STableComInfo tinfo = tscGetTableInfo(pTableMeta); - SSchema* pSchema = tscGetTableSchema(pTableMeta); + STableMeta* pTableMeta = pTableDataBlock->pTableMeta; + STableComInfo tinfo = tscGetTableInfo(pTableMeta); + SSchema* pSchema = tscGetTableSchema(pTableMeta); SSubmitBlk* pBlock = pDataBlock; memcpy(pDataBlock, pTableDataBlock->pData, sizeof(SSubmitBlk)); @@ -1775,7 +1879,7 @@ static int trimDataBlock(void* pDataBlock, STableDataBlocks* pTableDataBlock, bo int32_t flen = 0; // original total length of row // schema needs to be included into the submit data block - if (includeSchema) { + if (insertParam->schemaAttached) { int32_t numOfCols = tscGetNumOfColumns(pTableDataBlock->pTableMeta); for(int32_t j = 0; j < numOfCols; ++j) { STColumn* pCol = (STColumn*) pDataBlock; @@ -1801,21 +1905,39 @@ static int trimDataBlock(void* pDataBlock, STableDataBlocks* pTableDataBlock, bo char* p = pTableDataBlock->pData + sizeof(SSubmitBlk); pBlock->dataLen = 0; int32_t numOfRows = htons(pBlock->numOfRows); - - for (int32_t i = 0; i < numOfRows; ++i) { - SDataRow trow = (SDataRow) pDataBlock; - dataRowSetLen(trow, (uint16_t)(TD_DATA_ROW_HEAD_SIZE + flen)); - dataRowSetVersion(trow, pTableMeta->sversion); - int toffset = 0; - for (int32_t j = 0; j < tinfo.numOfColumns; j++) { - tdAppendColVal(trow, p, pSchema[j].type, pSchema[j].bytes, toffset); - toffset += TYPE_BYTES[pSchema[j].type]; - p += pSchema[j].bytes; + if (IS_RAW_PAYLOAD(insertParam->payloadType)) { + for (int32_t i = 0; i < numOfRows; ++i) { + SMemRow memRow = (SMemRow)pDataBlock; + memRowSetType(memRow, SMEM_ROW_DATA); + SDataRow trow = memRowDataBody(memRow); + dataRowSetLen(trow, (uint16_t)(TD_DATA_ROW_HEAD_SIZE + flen)); + dataRowSetVersion(trow, pTableMeta->sversion); + + int toffset = 0; + for (int32_t j = 0; j < tinfo.numOfColumns; j++) { + tdAppendColVal(trow, p, pSchema[j].type, toffset); + toffset += TYPE_BYTES[pSchema[j].type]; + p += pSchema[j].bytes; + } + + pDataBlock = (char*)pDataBlock + memRowTLen(memRow); + pBlock->dataLen += memRowTLen(memRow); } + } else { + SMemRowBuilder rowBuilder; + rowBuilder.pSchema = pSchema; + rowBuilder.sversion = pTableMeta->sversion; + rowBuilder.flen = flen; + rowBuilder.nCols = tinfo.numOfColumns; + rowBuilder.pDataBlock = pDataBlock; + rowBuilder.pSubmitBlk = pBlock; + rowBuilder.buf = p; - pDataBlock = (char*)pDataBlock + dataRowLen(trow); - pBlock->dataLen += dataRowLen(trow); + for (int32_t i = 0; i < numOfRows; ++i) { + rowBuilder.buf = (blkKeyTuple + i)->payloadAddr; + tdGenMemRowFromBuilder(&rowBuilder); + } } int32_t len = pBlock->dataLen + pBlock->schemaLen; @@ -1826,7 +1948,7 @@ static int trimDataBlock(void* pDataBlock, STableDataBlocks* pTableDataBlock, bo } static int32_t getRowExpandSize(STableMeta* pTableMeta) { - int32_t result = TD_DATA_ROW_HEAD_SIZE; + int32_t result = TD_MEM_ROW_DATA_HEAD_SIZE; int32_t columns = tscGetNumOfColumns(pTableMeta); SSchema* pSchema = tscGetTableSchema(pTableMeta); for(int32_t i = 0; i < columns; i++) { @@ -1840,16 +1962,14 @@ static int32_t getRowExpandSize(STableMeta* pTableMeta) { static void extractTableNameList(SInsertStatementParam *pInsertParam, bool freeBlockMap) { pInsertParam->numOfTables = (int32_t) taosHashGetSize(pInsertParam->pTableBlockHashList); if (pInsertParam->pTableNameList == NULL) { - pInsertParam->pTableNameList = calloc(pInsertParam->numOfTables, POINTER_BYTES); - } else { - memset(pInsertParam->pTableNameList, 0, pInsertParam->numOfTables * POINTER_BYTES); + pInsertParam->pTableNameList = malloc(pInsertParam->numOfTables * POINTER_BYTES); } STableDataBlocks **p1 = taosHashIterate(pInsertParam->pTableBlockHashList, NULL); int32_t i = 0; while(p1) { STableDataBlocks* pBlocks = *p1; - tfree(pInsertParam->pTableNameList[i]); + //tfree(pInsertParam->pTableNameList[i]); pInsertParam->pTableNameList[i++] = tNameDup(&pBlocks->tableName); p1 = taosHashIterate(pInsertParam->pTableBlockHashList, p1); @@ -1862,13 +1982,17 @@ static void extractTableNameList(SInsertStatementParam *pInsertParam, bool freeB int32_t tscMergeTableDataBlocks(SInsertStatementParam *pInsertParam, bool freeBlockMap) { const int INSERT_HEAD_SIZE = sizeof(SMsgDesc) + sizeof(SSubmitMsg); - - void* pVnodeDataBlockHashList = taosHashInit(128, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), true, false); - SArray* pVnodeDataBlockList = taosArrayInit(8, POINTER_BYTES); + int code = 0; + bool isRawPayload = IS_RAW_PAYLOAD(pInsertParam->payloadType); + void* pVnodeDataBlockHashList = taosHashInit(128, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), true, false); + SArray* pVnodeDataBlockList = taosArrayInit(8, POINTER_BYTES); STableDataBlocks** p = taosHashIterate(pInsertParam->pTableBlockHashList, NULL); STableDataBlocks* pOneTableBlock = *p; + + SBlockKeyInfo blkKeyInfo = {0}; // share by pOneTableBlock + while(pOneTableBlock) { SSubmitBlk* pBlocks = (SSubmitBlk*) pOneTableBlock->pData; if (pBlocks->numOfRows > 0) { @@ -1882,37 +2006,54 @@ int32_t tscMergeTableDataBlocks(SInsertStatementParam *pInsertParam, bool freeBl tscError("0x%"PRIx64" failed to prepare the data block buffer for merging table data, code:%d", pInsertParam->objectId, ret); taosHashCleanup(pVnodeDataBlockHashList); tscDestroyBlockArrayList(pVnodeDataBlockList); + tfree(blkKeyInfo.pKeyTuple); return ret; } int64_t destSize = dataBuf->size + pOneTableBlock->size + pBlocks->numOfRows * expandSize + sizeof(STColumn) * tscGetNumOfColumns(pOneTableBlock->pTableMeta); if (dataBuf->nAllocSize < destSize) { - while (dataBuf->nAllocSize < destSize) { - dataBuf->nAllocSize = (uint32_t)(dataBuf->nAllocSize * 1.5); - } + dataBuf->nAllocSize = (uint32_t)(destSize * 1.5); char* tmp = realloc(dataBuf->pData, dataBuf->nAllocSize); if (tmp != NULL) { dataBuf->pData = tmp; - memset(dataBuf->pData + dataBuf->size, 0, dataBuf->nAllocSize - dataBuf->size); + //memset(dataBuf->pData + dataBuf->size, 0, dataBuf->nAllocSize - dataBuf->size); } else { // failed to allocate memory, free already allocated memory and return error code tscError("0x%"PRIx64" failed to allocate memory for merging submit block, size:%d", pInsertParam->objectId, dataBuf->nAllocSize); taosHashCleanup(pVnodeDataBlockHashList); tscDestroyBlockArrayList(pVnodeDataBlockList); tfree(dataBuf->pData); + tfree(blkKeyInfo.pKeyTuple); return TSDB_CODE_TSC_OUT_OF_MEMORY; } } - tscSortRemoveDataBlockDupRows(pOneTableBlock); - char* ekey = (char*)pBlocks->data + pOneTableBlock->rowSize*(pBlocks->numOfRows-1); + if (isRawPayload) { + tscSortRemoveDataBlockDupRowsRaw(pOneTableBlock); + char* ekey = (char*)pBlocks->data + pOneTableBlock->rowSize * (pBlocks->numOfRows - 1); - tscDebug("0x%"PRIx64" name:%s, tid:%d rows:%d sversion:%d skey:%" PRId64 ", ekey:%" PRId64, pInsertParam->objectId, tNameGetTableName(&pOneTableBlock->tableName), - pBlocks->tid, pBlocks->numOfRows, pBlocks->sversion, GET_INT64_VAL(pBlocks->data), GET_INT64_VAL(ekey)); + tscDebug("0x%" PRIx64 " name:%s, tid:%d rows:%d sversion:%d skey:%" PRId64 ", ekey:%" PRId64, + pInsertParam->objectId, tNameGetTableName(&pOneTableBlock->tableName), pBlocks->tid, + pBlocks->numOfRows, pBlocks->sversion, GET_INT64_VAL(pBlocks->data), GET_INT64_VAL(ekey)); + } else { + if ((code = tscSortRemoveDataBlockDupRows(pOneTableBlock, &blkKeyInfo)) != 0) { + taosHashCleanup(pVnodeDataBlockHashList); + tscDestroyBlockArrayList(pVnodeDataBlockList); + tfree(dataBuf->pData); + tfree(blkKeyInfo.pKeyTuple); + return code; + } + ASSERT(blkKeyInfo.pKeyTuple != NULL && pBlocks->numOfRows > 0); + SBlockKeyTuple* pLastKeyTuple = blkKeyInfo.pKeyTuple + pBlocks->numOfRows - 1; + tscDebug("0x%" PRIx64 " name:%s, tid:%d rows:%d sversion:%d skey:%" PRId64 ", ekey:%" PRId64, + pInsertParam->objectId, tNameGetTableName(&pOneTableBlock->tableName), pBlocks->tid, + pBlocks->numOfRows, pBlocks->sversion, blkKeyInfo.pKeyTuple->skey, pLastKeyTuple->skey); + } + int32_t len = pBlocks->numOfRows * (pOneTableBlock->rowSize + expandSize) + sizeof(STColumn) * tscGetNumOfColumns(pOneTableBlock->pTableMeta); pBlocks->tid = htonl(pBlocks->tid); @@ -1922,7 +2063,7 @@ int32_t tscMergeTableDataBlocks(SInsertStatementParam *pInsertParam, bool freeBl pBlocks->schemaLen = 0; // erase the empty space reserved for binary data - int32_t finalLen = trimDataBlock(dataBuf->pData + dataBuf->size, pOneTableBlock, pInsertParam->schemaAttached); + int32_t finalLen = trimDataBlock(dataBuf->pData + dataBuf->size, pOneTableBlock, pInsertParam, blkKeyInfo.pKeyTuple); assert(finalLen <= len); dataBuf->size += (finalLen + sizeof(SSubmitBlk)); @@ -1950,6 +2091,7 @@ int32_t tscMergeTableDataBlocks(SInsertStatementParam *pInsertParam, bool freeBl // free the table data blocks; pInsertParam->pDataBlocks = pVnodeDataBlockList; taosHashCleanup(pVnodeDataBlockHashList); + tfree(blkKeyInfo.pKeyTuple); return TSDB_CODE_SUCCESS; } @@ -2986,6 +3128,7 @@ void tscInitQueryInfo(SQueryInfo* pQueryInfo) { int32_t tscAddQueryInfo(SSqlCmd* pCmd) { assert(pCmd != NULL); SQueryInfo* pQueryInfo = calloc(1, sizeof(SQueryInfo)); + if (pQueryInfo == NULL) { return TSDB_CODE_TSC_OUT_OF_MEMORY; } @@ -3778,7 +3921,7 @@ bool tscIsUpdateQuery(SSqlObj* pSql) { } SSqlCmd* pCmd = &pSql->cmd; - return ((pCmd->command >= TSDB_SQL_INSERT && pCmd->command <= TSDB_SQL_DROP_DNODE) || TSDB_SQL_USE_DB == pCmd->command); + return ((pCmd->command >= TSDB_SQL_INSERT && pCmd->command <= TSDB_SQL_DROP_DNODE) || TSDB_SQL_RESET_CACHE == pCmd->command || TSDB_SQL_USE_DB == pCmd->command); } char* tscGetSqlStr(SSqlObj* pSql) { @@ -3968,17 +4111,21 @@ void tscTryQueryNextClause(SSqlObj* pSql, __async_cb_func_t fp) { //backup the total number of result first int64_t num = pRes->numOfTotal + pRes->numOfClauseTotal; - // DON't free final since it may be recoreded and used later in APP TAOS_FIELD* finalBk = pRes->final; pRes->final = NULL; tscFreeSqlResult(pSql); + pRes->final = finalBk; - pRes->numOfTotal = num; - + + for(int32_t i = 0; i < pSql->subState.numOfSub; ++i) { + taos_free_result(pSql->pSubs[i]); + } + tfree(pSql->pSubs); pSql->subState.numOfSub = 0; + pSql->fp = fp; tscDebug("0x%"PRIx64" try data in the next subclause", pSql->self); @@ -4239,7 +4386,7 @@ STableMeta* tscTableMetaDup(STableMeta* pTableMeta) { assert(pTableMeta != NULL); size_t size = tscGetTableMetaSize(pTableMeta); - STableMeta* p = calloc(1, size); + STableMeta* p = malloc(size); memcpy(p, pTableMeta, size); return p; } @@ -4618,6 +4765,20 @@ int32_t nameComparFn(const void* n1, const void* n2) { } } +static void freeContent(void* p) { + char* ptr = *(char**)p; + tfree(ptr); +} + +static int32_t contCompare(const void* p1, const void* p2) { + int32_t ret = strcmp(p1, p2); + if (ret == 0) { + return 0; + } else { + return ret > 0 ? 1:-1; + } +} + int tscTransferTableNameList(SSqlObj *pSql, const char *pNameList, int32_t length, SArray* pNameArray) { SSqlCmd *pCmd = &pSql->cmd; @@ -4665,32 +4826,7 @@ int tscTransferTableNameList(SSqlObj *pSql, const char *pNameList, int32_t lengt } taosArraySort(pNameArray, nameComparFn); - - int32_t pos = 0; - for(int32_t i = 1; i < len; ++i) { - char** p1 = taosArrayGet(pNameArray, pos); - char** p2 = taosArrayGet(pNameArray, i); - - if (strcmp(*p1, *p2) == 0) { - // do nothing - } else { - if (pos + 1 != i) { - char* p = taosArrayGetP(pNameArray, pos + 1); - tfree(p); - taosArraySet(pNameArray, pos + 1, p2); - pos += 1; - } else { - pos += 1; - } - } - } - - for(int32_t i = pos + 1; i < pNameArray->size; ++i) { - char* p = taosArrayGetP(pNameArray, i); - tfree(p); - } - - pNameArray->size = pos + 1; + taosArrayRemoveDuplicate(pNameArray, contCompare, freeContent); return TSDB_CODE_SUCCESS; } diff --git a/src/common/inc/tdataformat.h b/src/common/inc/tdataformat.h index 4f04a82f0a..829e771c70 100644 --- a/src/common/inc/tdataformat.h +++ b/src/common/inc/tdataformat.h @@ -53,10 +53,10 @@ extern "C" { // ----------------- TSDB COLUMN DEFINITION typedef struct { - int8_t type; // Column type - int16_t colId; // column ID - int16_t bytes; // column bytes - int16_t offset; // point offset in SDataRow after the header part + int8_t type; // Column type + int16_t colId; // column ID + uint16_t bytes; // column bytes + uint16_t offset; // point offset in SDataRow after the header part. } STColumn; #define colType(col) ((col)->type) @@ -167,10 +167,11 @@ static FORCE_INLINE int tkeyComparFn(const void *tkey1, const void *tkey2) { return 0; } } + // ----------------- Data row structure /* A data row, the format is like below: - * |<--------------------+--------------------------- len ---------------------------------->| + * |<------------------------------------------------ len ---------------------------------->| * |<-- Head -->|<--------- flen -------------->| | * +---------------------+---------------------------------+---------------------------------+ * | uint16_t | int16_t | | | @@ -184,8 +185,8 @@ typedef void *SDataRow; #define TD_DATA_ROW_HEAD_SIZE (sizeof(uint16_t) + sizeof(int16_t)) -#define dataRowLen(r) (*(uint16_t *)(r)) -#define dataRowVersion(r) *(int16_t *)POINTER_SHIFT(r, sizeof(int16_t)) +#define dataRowLen(r) (*(TDRowLenT *)(r)) // 0~65535 +#define dataRowVersion(r) (*(int16_t *)POINTER_SHIFT(r, sizeof(int16_t))) #define dataRowTuple(r) POINTER_SHIFT(r, TD_DATA_ROW_HEAD_SIZE) #define dataRowTKey(r) (*(TKEY *)(dataRowTuple(r))) #define dataRowKey(r) tdGetKey(dataRowTKey(r)) @@ -201,20 +202,19 @@ void tdInitDataRow(SDataRow row, STSchema *pSchema); SDataRow tdDataRowDup(SDataRow row); // offset here not include dataRow header length -static FORCE_INLINE int tdAppendColVal(SDataRow row, void *value, int8_t type, int32_t bytes, int32_t offset) { +static FORCE_INLINE int tdAppendColVal(SDataRow row, const void *value, int8_t type, int32_t offset) { ASSERT(value != NULL); int32_t toffset = offset + TD_DATA_ROW_HEAD_SIZE; - char * ptr = (char *)POINTER_SHIFT(row, dataRowLen(row)); if (IS_VAR_DATA_TYPE(type)) { *(VarDataOffsetT *)POINTER_SHIFT(row, toffset) = dataRowLen(row); - memcpy(ptr, value, varDataTLen(value)); + memcpy(POINTER_SHIFT(row, dataRowLen(row)), value, varDataTLen(value)); dataRowLen(row) += varDataTLen(value); } else { if (offset == 0) { ASSERT(type == TSDB_DATA_TYPE_TIMESTAMP); TKEY tvalue = tdGetTKEY(*(TSKEY *)value); - memcpy(POINTER_SHIFT(row, toffset), (void *)(&tvalue), TYPE_BYTES[type]); + memcpy(POINTER_SHIFT(row, toffset), (const void *)(&tvalue), TYPE_BYTES[type]); } else { memcpy(POINTER_SHIFT(row, toffset), value, TYPE_BYTES[type]); } @@ -245,17 +245,21 @@ typedef struct SDataCol { TSKEY ts; // only used in last NULL column } SDataCol; +#define isAllRowsNull(pCol) ((pCol)->len == 0) static FORCE_INLINE void dataColReset(SDataCol *pDataCol) { pDataCol->len = 0; } void dataColInit(SDataCol *pDataCol, STColumn *pCol, void **pBuf, int maxPoints); -void dataColAppendVal(SDataCol *pCol, void *value, int numOfRows, int maxPoints); +void dataColAppendVal(SDataCol *pCol, const void *value, int numOfRows, int maxPoints); void dataColSetOffset(SDataCol *pCol, int nEle); bool isNEleNull(SDataCol *pCol, int nEle); void dataColSetNEleNull(SDataCol *pCol, int nEle, int maxPoints); // Get the data pointer from a column-wised data -static FORCE_INLINE void *tdGetColDataOfRow(SDataCol *pCol, int row) { +static FORCE_INLINE const void *tdGetColDataOfRow(SDataCol *pCol, int row) { + if (isAllRowsNull(pCol)) { + return getNullValue(pCol->type); + } if (IS_VAR_DATA_TYPE(pCol->type)) { return POINTER_SHIFT(pCol->pData, pCol->dataOff[row]); } else { @@ -287,7 +291,7 @@ typedef struct { } SDataCols; #define keyCol(pCols) (&((pCols)->cols[0])) // Key column -#define dataColsTKeyAt(pCols, idx) ((TKEY *)(keyCol(pCols)->pData))[(idx)] +#define dataColsTKeyAt(pCols, idx) ((TKEY *)(keyCol(pCols)->pData))[(idx)] // the idx row of column-wised data #define dataColsKeyAt(pCols, idx) tdGetKey(dataColsTKeyAt(pCols, idx)) static FORCE_INLINE TKEY dataColsTKeyFirst(SDataCols *pCols) { if (pCols->numOfRows) { @@ -331,13 +335,13 @@ void tdResetDataCols(SDataCols *pCols); int tdInitDataCols(SDataCols *pCols, STSchema *pSchema); SDataCols *tdDupDataCols(SDataCols *pCols, bool keepData); SDataCols *tdFreeDataCols(SDataCols *pCols); -void tdAppendDataRowToDataCol(SDataRow row, STSchema *pSchema, SDataCols *pCols); int tdMergeDataCols(SDataCols *target, SDataCols *source, int rowsToMerge, int *pOffset); // ----------------- K-V data row structure -/* +/* |<-------------------------------------- len -------------------------------------------->| + * |<----- header ----->|<--------------------------- body -------------------------------->| * +----------+----------+---------------------------------+---------------------------------+ - * | int16_t | int16_t | | | + * | uint16_t | int16_t | | | * +----------+----------+---------------------------------+---------------------------------+ * | len | ncols | cols index | data part | * +----------+----------+---------------------------------+---------------------------------+ @@ -345,14 +349,14 @@ int tdMergeDataCols(SDataCols *target, SDataCols *source, int rowsToMerge typedef void *SKVRow; typedef struct { - int16_t colId; - int16_t offset; + int16_t colId; + uint16_t offset; } SColIdx; -#define TD_KV_ROW_HEAD_SIZE (2 * sizeof(int16_t)) +#define TD_KV_ROW_HEAD_SIZE (sizeof(uint16_t) + sizeof(int16_t)) -#define kvRowLen(r) (*(int16_t *)(r)) -#define kvRowNCols(r) (*(int16_t *)POINTER_SHIFT(r, sizeof(int16_t))) +#define kvRowLen(r) (*(TDRowLenT *)(r)) +#define kvRowNCols(r) (*(int16_t *)POINTER_SHIFT(r, sizeof(uint16_t))) #define kvRowSetLen(r, len) kvRowLen(r) = (len) #define kvRowSetNCols(r, n) kvRowNCols(r) = (n) #define kvRowColIdx(r) (SColIdx *)POINTER_SHIFT(r, TD_KV_ROW_HEAD_SIZE) @@ -362,6 +366,9 @@ typedef struct { #define kvRowColIdxAt(r, i) (kvRowColIdx(r) + (i)) #define kvRowFree(r) tfree(r) #define kvRowEnd(r) POINTER_SHIFT(r, kvRowLen(r)) +#define kvRowTKey(r) (*(TKEY *)(kvRowValues(r))) +#define kvRowKey(r) tdGetKey(kvRowTKey(r)) +#define kvRowDeleted(r) TKEY_IS_DELETED(kvRowTKey(r)) SKVRow tdKVRowDup(SKVRow row); int tdSetKVRowDataOfCol(SKVRow *orow, int16_t colId, int8_t type, void *value); @@ -385,13 +392,44 @@ static FORCE_INLINE void *tdGetKVRowValOfCol(SKVRow row, int16_t colId) { return kvRowColVal(row, (SColIdx *)ret); } +static FORCE_INLINE void *tdGetKVRowIdxOfCol(SKVRow row, int16_t colId) { + return taosbsearch(&colId, kvRowColIdx(row), kvRowNCols(row), sizeof(SColIdx), comparTagId, TD_EQ); +} + +// offset here not include kvRow header length +static FORCE_INLINE int tdAppendKvColVal(SKVRow row, const void *value, int16_t colId, int8_t type, int32_t offset) { + ASSERT(value != NULL); + int32_t toffset = offset + TD_KV_ROW_HEAD_SIZE; + SColIdx *pColIdx = (SColIdx *)POINTER_SHIFT(row, toffset); + char * ptr = (char *)POINTER_SHIFT(row, kvRowLen(row)); + + pColIdx->colId = colId; + pColIdx->offset = kvRowLen(row); // offset of pColIdx including the TD_KV_ROW_HEAD_SIZE + + if (IS_VAR_DATA_TYPE(type)) { + memcpy(ptr, value, varDataTLen(value)); + kvRowLen(row) += varDataTLen(value); + } else { + if (offset == 0) { + ASSERT(type == TSDB_DATA_TYPE_TIMESTAMP); + TKEY tvalue = tdGetTKEY(*(TSKEY *)value); + memcpy(ptr, (void *)(&tvalue), TYPE_BYTES[type]); + } else { + memcpy(ptr, value, TYPE_BYTES[type]); + } + kvRowLen(row) += TYPE_BYTES[type]; + } + + return 0; +} + // ----------------- K-V data row builder typedef struct { int16_t tCols; int16_t nCols; SColIdx *pColIdx; - int16_t alloc; - int16_t size; + uint16_t alloc; + uint16_t size; void * buf; } SKVRowBuilder; @@ -427,8 +465,146 @@ static FORCE_INLINE int tdAddColToKVRow(SKVRowBuilder *pBuilder, int16_t colId, return 0; } +// ----------------- SMemRow appended with sequential data row structure +/* + * |---------|------------------------------------------------- len ---------------------------------->| + * |<-------- Head ------>|<--------- flen -------------->| | + * |---------+---------------------+---------------------------------+---------------------------------+ + * | uint8_t | uint16_t | int16_t | | | + * |---------+----------+----------+---------------------------------+---------------------------------+ + * | flag | len | sversion | First part | Second part | + * +---------+----------+----------+---------------------------------+---------------------------------+ + * + * NOTE: timestamp in this row structure is TKEY instead of TSKEY + */ + +// ----------------- SMemRow appended with extended K-V data row structure +/* |--------------------|------------------------------------------------ len ---------------------------------->| + * |<------------- Head ------------>|<--------- flen -------------->| | + * |--------------------+----------+--------------------------------------------+---------------------------------+ + * | uint8_t | int16_t | uint16_t | int16_t | | | + * |---------+----------+----------+----------+---------------------------------+---------------------------------+ + * | flag | sversion | len | ncols | cols index | data part | + * |---------+----------+----------+----------+---------------------------------+---------------------------------+ + */ + +typedef void *SMemRow; + +#define TD_MEM_ROW_TYPE_SIZE sizeof(uint8_t) +#define TD_MEM_ROW_KV_VER_SIZE sizeof(int16_t) +#define TD_MEM_ROW_KV_TYPE_VER_SIZE (TD_MEM_ROW_TYPE_SIZE + TD_MEM_ROW_KV_VER_SIZE) +#define TD_MEM_ROW_DATA_HEAD_SIZE (TD_MEM_ROW_TYPE_SIZE + TD_DATA_ROW_HEAD_SIZE) +// #define TD_MEM_ROW_KV_HEAD_SIZE (TD_MEM_ROW_TYPE_SIZE + TD_MEM_ROW_KV_VER_SIZE + TD_KV_ROW_HEAD_SIZE) + +#define SMEM_ROW_DATA 0U // SDataRow +#define SMEM_ROW_KV 1U // SKVRow + +#define memRowType(r) (*(uint8_t *)(r)) +#define isDataRow(r) (SMEM_ROW_DATA == memRowType(r)) +#define isKvRow(r) (SMEM_ROW_KV == memRowType(r)) + +#define memRowDataBody(r) POINTER_SHIFT(r, TD_MEM_ROW_TYPE_SIZE) // section after flag +#define memRowKvBody(r) \ + POINTER_SHIFT(r, TD_MEM_ROW_KV_TYPE_VER_SIZE) // section after flag + sversion as to reuse SKVRow + +#define memRowDataLen(r) (*(TDRowLenT *)memRowDataBody(r)) // 0~65535 +#define memRowKvLen(r) (*(TDRowLenT *)memRowKvBody(r)) // 0~65535 + +#define memRowDataTLen(r) \ + ((TDRowTLenT)(memRowDataLen(r) + TD_MEM_ROW_TYPE_SIZE)) // using uint32_t/int32_t to store the TLen + +#define memRowKvTLen(r) ((TDRowTLenT)(memRowKvLen(r) + TD_MEM_ROW_KV_TYPE_VER_SIZE)) + +#define memRowLen(r) (isDataRow(r) ? memRowDataLen(r) : memRowKvLen(r)) +#define memRowTLen(r) (isDataRow(r) ? memRowDataTLen(r) : memRowKvTLen(r)) // using uint32_t/int32_t to store the TLen + +#define memRowDataVersion(r) dataRowVersion(memRowDataBody(r)) +#define memRowKvVersion(r) (*(int16_t *)POINTER_SHIFT(r, TD_MEM_ROW_TYPE_SIZE)) +#define memRowVersion(r) (isDataRow(r) ? memRowDataVersion(r) : memRowKvVersion(r)) // schema version +#define memRowSetKvVersion(r, v) (memRowKvVersion(r) = (v)) +#define memRowTuple(r) (isDataRow(r) ? dataRowTuple(memRowDataBody(r)) : kvRowValues(memRowKvBody(r))) + +#define memRowTKey(r) (isDataRow(r) ? dataRowTKey(memRowDataBody(r)) : kvRowTKey(memRowKvBody(r))) +#define memRowKey(r) (isDataRow(r) ? dataRowKey(memRowDataBody(r)) : kvRowKey(memRowKvBody(r))) +#define memRowSetTKey(r, k) \ + do { \ + if (isDataRow(r)) { \ + dataRowTKey(memRowDataBody(r)) = (k); \ + } else { \ + kvRowTKey(memRowKvBody(r)) = (k); \ + } \ + } while (0) + +#define memRowSetType(r, t) (memRowType(r) = (t)) +#define memRowSetLen(r, l) (isDataRow(r) ? memRowDataLen(r) = (l) : memRowKvLen(r) = (l)) +#define memRowSetVersion(r, v) (isDataRow(r) ? dataRowSetVersion(memRowDataBody(r), v) : memRowKvSetVersion(r, v)) +#define memRowCpy(dst, r) memcpy((dst), (r), memRowTLen(r)) +#define memRowMaxBytesFromSchema(s) (schemaTLen(s) + TD_MEM_ROW_DATA_HEAD_SIZE) +#define memRowDeleted(r) TKEY_IS_DELETED(memRowTKey(r)) + +SMemRow tdMemRowDup(SMemRow row); +void tdAppendMemRowToDataCol(SMemRow row, STSchema *pSchema, SDataCols *pCols); +// NOTE: offset here including the header size +static FORCE_INLINE void *tdGetKvRowDataOfCol(void *row, int32_t offset) { return POINTER_SHIFT(row, offset); } +// NOTE: offset here including the header size +static FORCE_INLINE void *tdGetMemRowDataOfCol(void *row, int8_t type, int32_t offset) { + if (isDataRow(row)) { + return tdGetRowDataOfCol(row, type, offset); + } else if (isKvRow(row)) { + return tdGetKvRowDataOfCol(row, offset); + } else { + ASSERT(0); + } + return NULL; +} + +// ----------------- Raw payload structure for row: +/* |<------------ Head ------------->|<----------- body of column data tuple ------------------->| + * | |<----------------- flen ------------->|<--- value part --->| + * |SMemRowType| dataTLen | nCols | colId | colType | offset | ... | value |...|...|... | + * +-----------+----------+----------+--------------------------------------|--------------------| + * | uint8_t | uint32_t | uint16_t | int16_t | uint8_t | uint16_t | ... |.......|...|...|... | + * +-----------+----------+----------+--------------------------------------+--------------------| + * 1. offset in column data tuple starts from the value part in case of uint16_t overflow. + * 2. dataTLen: total length including the header and body. + */ + +#define PAYLOAD_NCOLS_LEN sizeof(uint16_t) +#define PAYLOAD_NCOLS_OFFSET (sizeof(uint8_t) + sizeof(TDRowTLenT)) +#define PAYLOAD_HEADER_LEN (PAYLOAD_NCOLS_OFFSET + PAYLOAD_NCOLS_LEN) +#define PAYLOAD_ID_LEN sizeof(int16_t) +#define PAYLOAD_ID_TYPE_LEN (sizeof(int16_t) + sizeof(uint8_t)) +#define PAYLOAD_COL_HEAD_LEN (PAYLOAD_ID_TYPE_LEN + sizeof(uint16_t)) +#define PAYLOAD_PRIMARY_COL_LEN (PAYLOAD_ID_TYPE_LEN + sizeof(TSKEY)) + +#define payloadBody(r) POINTER_SHIFT(r, PAYLOAD_HEADER_LEN) +#define payloadType(r) (*(uint8_t *)(r)) +#define payloadSetType(r, t) (payloadType(r) = (t)) +#define payloadTLen(r) (*(TDRowTLenT *)POINTER_SHIFT(r, TD_MEM_ROW_TYPE_SIZE)) // including total header +#define payloadSetTLen(r, l) (payloadTLen(r) = (l)) +#define payloadNCols(r) (*(TDRowLenT *)POINTER_SHIFT(r, PAYLOAD_NCOLS_OFFSET)) +#define payloadSetNCols(r, n) (payloadNCols(r) = (n)) +#define payloadValuesOffset(r) \ + (PAYLOAD_HEADER_LEN + payloadNCols(r) * PAYLOAD_COL_HEAD_LEN) // avoid using the macro in loop +#define payloadValues(r) POINTER_SHIFT(r, payloadValuesOffset(r)) // avoid using the macro in loop +#define payloadColId(c) (*(int16_t *)(c)) +#define payloadColType(c) (*(uint8_t *)POINTER_SHIFT(c, PAYLOAD_ID_LEN)) +#define payloadColOffset(c) (*(uint16_t *)POINTER_SHIFT(c, PAYLOAD_ID_TYPE_LEN)) +#define payloadColValue(c) POINTER_SHIFT(c, payloadColOffset(c)) + +#define payloadColSetId(c, i) (payloadColId(c) = (i)) +#define payloadColSetType(c, t) (payloadColType(c) = (t)) +#define payloadColSetOffset(c, o) (payloadColOffset(c) = (o)) + +#define payloadTSKey(r) (*(TSKEY *)POINTER_SHIFT(r, payloadValuesOffset(r))) +#define payloadTKey(r) (*(TKEY *)POINTER_SHIFT(r, payloadValuesOffset(r))) +#define payloadKey(r) tdGetKey(payloadTKey(r)) + + +static FORCE_INLINE char *payloadNextCol(char *pCol) { return (char *)POINTER_SHIFT(pCol, PAYLOAD_COL_HEAD_LEN); } + #ifdef __cplusplus } #endif -#endif // _TD_DATA_FORMAT_H_ +#endif // _TD_DATA_FORMAT_H_ \ No newline at end of file diff --git a/src/common/src/tdataformat.c b/src/common/src/tdataformat.c index 94c429cfc0..9f7432c90d 100644 --- a/src/common/src/tdataformat.c +++ b/src/common/src/tdataformat.c @@ -198,6 +198,14 @@ SDataRow tdDataRowDup(SDataRow row) { return trow; } +SMemRow tdMemRowDup(SMemRow row) { + SMemRow trow = malloc(memRowTLen(row)); + if (trow == NULL) return NULL; + + memRowCpy(trow, row); + return trow; +} + void dataColInit(SDataCol *pDataCol, STColumn *pCol, void **pBuf, int maxPoints) { pDataCol->type = colType(pCol); pDataCol->colId = colColId(pCol); @@ -217,11 +225,22 @@ void dataColInit(SDataCol *pDataCol, STColumn *pCol, void **pBuf, int maxPoints) *pBuf = POINTER_SHIFT(*pBuf, pDataCol->spaceSize); } } - // value from timestamp should be TKEY here instead of TSKEY -void dataColAppendVal(SDataCol *pCol, void *value, int numOfRows, int maxPoints) { +void dataColAppendVal(SDataCol *pCol, const void *value, int numOfRows, int maxPoints) { ASSERT(pCol != NULL && value != NULL); + if (isAllRowsNull(pCol)) { + if (isNull(value, pCol->type)) { + // all null value yet, just return + return; + } + + if (numOfRows > 0) { + // Find the first not null value, fill all previouse values as NULL + dataColSetNEleNull(pCol, numOfRows, maxPoints); + } + } + if (IS_VAR_DATA_TYPE(pCol->type)) { // set offset pCol->dataOff[numOfRows] = pCol->len; @@ -243,7 +262,7 @@ bool isNEleNull(SDataCol *pCol, int nEle) { return true; } -void dataColSetNullAt(SDataCol *pCol, int index) { +FORCE_INLINE void dataColSetNullAt(SDataCol *pCol, int index) { if (IS_VAR_DATA_TYPE(pCol->type)) { pCol->dataOff[index] = pCol->len; char *ptr = POINTER_SHIFT(pCol->pData, pCol->len); @@ -399,8 +418,7 @@ void tdResetDataCols(SDataCols *pCols) { } } } - -void tdAppendDataRowToDataCol(SDataRow row, STSchema *pSchema, SDataCols *pCols) { +static void tdAppendDataRowToDataCol(SDataRow row, STSchema *pSchema, SDataCols *pCols) { ASSERT(pCols->numOfRows == 0 || dataColsKeyLast(pCols) < dataRowKey(row)); int rcol = 0; @@ -419,7 +437,8 @@ void tdAppendDataRowToDataCol(SDataRow row, STSchema *pSchema, SDataCols *pCols) while (dcol < pCols->numOfCols) { SDataCol *pDataCol = &(pCols->cols[dcol]); if (rcol >= schemaNCols(pSchema)) { - dataColSetNullAt(pDataCol, pCols->numOfRows); + // dataColSetNullAt(pDataCol, pCols->numOfRows); + dataColAppendVal(pDataCol, getNullValue(pDataCol->type), pCols->numOfRows, pCols->maxPoints); dcol++; continue; } @@ -433,7 +452,8 @@ void tdAppendDataRowToDataCol(SDataRow row, STSchema *pSchema, SDataCols *pCols) } else if (pRowCol->colId < pDataCol->colId) { rcol++; } else { - dataColSetNullAt(pDataCol, pCols->numOfRows); + // dataColSetNullAt(pDataCol, pCols->numOfRows); + dataColAppendVal(pDataCol, getNullValue(pDataCol->type), pCols->numOfRows, pCols->maxPoints); dcol++; } } @@ -441,6 +461,62 @@ void tdAppendDataRowToDataCol(SDataRow row, STSchema *pSchema, SDataCols *pCols) pCols->numOfRows++; } +static void tdAppendKvRowToDataCol(SKVRow row, STSchema *pSchema, SDataCols *pCols) { + ASSERT(pCols->numOfRows == 0 || dataColsKeyLast(pCols) < kvRowKey(row)); + + int rcol = 0; + int dcol = 0; + + if (kvRowDeleted(row)) { + for (; dcol < pCols->numOfCols; dcol++) { + SDataCol *pDataCol = &(pCols->cols[dcol]); + if (dcol == 0) { + dataColAppendVal(pDataCol, kvRowValues(row), pCols->numOfRows, pCols->maxPoints); + } else { + dataColSetNullAt(pDataCol, pCols->numOfRows); + } + } + } else { + int nRowCols = kvRowNCols(row); + + while (dcol < pCols->numOfCols) { + SDataCol *pDataCol = &(pCols->cols[dcol]); + if (rcol >= nRowCols || rcol >= schemaNCols(pSchema)) { + // dataColSetNullAt(pDataCol, pCols->numOfRows); + dataColAppendVal(pDataCol, getNullValue(pDataCol->type), pCols->numOfRows, pCols->maxPoints); + ++dcol; + continue; + } + + SColIdx *colIdx = kvRowColIdxAt(row, rcol); + + if (colIdx->colId == pDataCol->colId) { + void *value = tdGetKvRowDataOfCol(row, colIdx->offset); + dataColAppendVal(pDataCol, value, pCols->numOfRows, pCols->maxPoints); + ++dcol; + ++rcol; + } else if (colIdx->colId < pDataCol->colId) { + ++rcol; + } else { + // dataColSetNullAt(pDataCol, pCols->numOfRows); + dataColAppendVal(pDataCol, getNullValue(pDataCol->type), pCols->numOfRows, pCols->maxPoints); + ++dcol; + } + } + } + pCols->numOfRows++; +} + +void tdAppendMemRowToDataCol(SMemRow row, STSchema *pSchema, SDataCols *pCols) { + if (isDataRow(row)) { + tdAppendDataRowToDataCol(memRowDataBody(row), pSchema, pCols); + } else if (isKvRow(row)) { + tdAppendKvRowToDataCol(memRowKvBody(row), pSchema, pCols); + } else { + ASSERT(0); + } +} + int tdMergeDataCols(SDataCols *target, SDataCols *source, int rowsToMerge, int *pOffset) { ASSERT(rowsToMerge > 0 && rowsToMerge <= source->numOfRows); ASSERT(target->numOfCols == source->numOfCols); @@ -563,7 +639,7 @@ int tdSetKVRowDataOfCol(SKVRow *orow, int16_t colId, int8_t type, void *value) { nrow = malloc(kvRowLen(row) + sizeof(SColIdx) + diff); if (nrow == NULL) return -1; - kvRowSetLen(nrow, kvRowLen(row) + (int16_t)sizeof(SColIdx) + diff); + kvRowSetLen(nrow, kvRowLen(row) + (uint16_t)sizeof(SColIdx) + diff); kvRowSetNCols(nrow, kvRowNCols(row) + 1); if (ptr == NULL) { @@ -605,8 +681,8 @@ int tdSetKVRowDataOfCol(SKVRow *orow, int16_t colId, int8_t type, void *value) { if (varDataTLen(value) == varDataTLen(pOldVal)) { // just update the column value in place memcpy(pOldVal, value, varDataTLen(value)); } else { // need to reallocate the memory - int16_t diff = varDataTLen(value) - varDataTLen(pOldVal); - int16_t nlen = kvRowLen(row) + diff; + uint16_t diff = varDataTLen(value) - varDataTLen(pOldVal); + uint16_t nlen = kvRowLen(row) + diff; ASSERT(nlen > 0); nrow = malloc(nlen); if (nrow == NULL) return -1; @@ -708,4 +784,4 @@ SKVRow tdGetKVRowFromBuilder(SKVRowBuilder *pBuilder) { memcpy(kvRowValues(row), pBuilder->buf, pBuilder->size); return row; -} +} \ No newline at end of file diff --git a/src/common/src/tname.c b/src/common/src/tname.c index 72e2d42ff9..26502c5d9c 100644 --- a/src/common/src/tname.c +++ b/src/common/src/tname.c @@ -306,7 +306,7 @@ bool tIsValidName(const SName* name) { SName* tNameDup(const SName* name) { assert(name != NULL); - SName* p = calloc(1, sizeof(SName)); + SName* p = malloc(sizeof(SName)); memcpy(p, name, sizeof(SName)); return p; } diff --git a/src/common/src/ttypes.c b/src/common/src/ttypes.c index 34dda32401..6021a8f579 100644 --- a/src/common/src/ttypes.c +++ b/src/common/src/ttypes.c @@ -492,30 +492,32 @@ void setNullN(char *val, int32_t type, int32_t bytes, int32_t numOfElems) { } } -static uint8_t nullBool = TSDB_DATA_BOOL_NULL; -static uint8_t nullTinyInt = TSDB_DATA_TINYINT_NULL; -static uint16_t nullSmallInt = TSDB_DATA_SMALLINT_NULL; -static uint32_t nullInt = TSDB_DATA_INT_NULL; -static uint64_t nullBigInt = TSDB_DATA_BIGINT_NULL; -static uint32_t nullFloat = TSDB_DATA_FLOAT_NULL; -static uint64_t nullDouble = TSDB_DATA_DOUBLE_NULL; -static uint8_t nullTinyIntu = TSDB_DATA_UTINYINT_NULL; -static uint16_t nullSmallIntu = TSDB_DATA_USMALLINT_NULL; -static uint32_t nullIntu = TSDB_DATA_UINT_NULL; -static uint64_t nullBigIntu = TSDB_DATA_UBIGINT_NULL; +static uint8_t nullBool = TSDB_DATA_BOOL_NULL; +static uint8_t nullTinyInt = TSDB_DATA_TINYINT_NULL; +static uint16_t nullSmallInt = TSDB_DATA_SMALLINT_NULL; +static uint32_t nullInt = TSDB_DATA_INT_NULL; +static uint64_t nullBigInt = TSDB_DATA_BIGINT_NULL; +static uint32_t nullFloat = TSDB_DATA_FLOAT_NULL; +static uint64_t nullDouble = TSDB_DATA_DOUBLE_NULL; +static uint8_t nullTinyIntu = TSDB_DATA_UTINYINT_NULL; +static uint16_t nullSmallIntu = TSDB_DATA_USMALLINT_NULL; +static uint32_t nullIntu = TSDB_DATA_UINT_NULL; +static uint64_t nullBigIntu = TSDB_DATA_UBIGINT_NULL; +static SBinaryNullT nullBinary = {1, TSDB_DATA_BINARY_NULL}; +static SNCharNullT nullNchar = {4, TSDB_DATA_NCHAR_NULL}; -static union { - tstr str; - char pad[sizeof(tstr) + 4]; -} nullBinary = {.str = {.len = 1}}, nullNchar = {.str = {.len = 4}}; +// static union { +// tstr str; +// char pad[sizeof(tstr) + 4]; +// } nullBinary = {.str = {.len = 1}}, nullNchar = {.str = {.len = 4}}; -static void *nullValues[] = { +static const void *nullValues[] = { &nullBool, &nullTinyInt, &nullSmallInt, &nullInt, &nullBigInt, &nullFloat, &nullDouble, &nullBinary, &nullBigInt, &nullNchar, &nullTinyIntu, &nullSmallIntu, &nullIntu, &nullBigIntu, }; -void *getNullValue(int32_t type) { +const void *getNullValue(int32_t type) { assert(type >= TSDB_DATA_TYPE_BOOL && type <= TSDB_DATA_TYPE_UBIGINT); return nullValues[type - 1]; } diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/ResetQueryCacheTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/ResetQueryCacheTest.java new file mode 100644 index 0000000000..4eebbd08e8 --- /dev/null +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/ResetQueryCacheTest.java @@ -0,0 +1,51 @@ +package com.taosdata.jdbc.cases; + +import com.taosdata.jdbc.TSDBDriver; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; + +import java.sql.*; +import java.util.Properties; + +import static org.junit.Assert.assertEquals; + +public class ResetQueryCacheTest { + + static Connection connection; + static Statement statement; + static String host = "127.0.0.1"; + + @Before + public void init() { + try { + Properties properties = new Properties(); + properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8"); + properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8"); + properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8"); + connection = DriverManager.getConnection("jdbc:TAOS://" + host + ":0/", properties); + statement = connection.createStatement(); + } catch (SQLException e) { + return; + } + } + + @Test + public void testResetQueryCache() throws SQLException { + String resetSql = "reset query cache"; + statement.execute(resetSql); + } + + @After + public void close() { + try { + if (statement != null) + statement.close(); + if (connection != null) + connection.close(); + } catch (SQLException e) { + e.printStackTrace(); + } + } + +} \ No newline at end of file diff --git a/src/cq/src/cqMain.c b/src/cq/src/cqMain.c index f539e77253..aac5a1c665 100644 --- a/src/cq/src/cqMain.c +++ b/src/cq/src/cqMain.c @@ -476,21 +476,23 @@ static void cqProcessStreamRes(void *param, TAOS_RES *tres, TAOS_ROW row) { cDebug("vgId:%d, id:%d CQ:%s stream result is ready", pContext->vgId, pObj->tid, pObj->sqlStr); - int32_t size = sizeof(SWalHead) + sizeof(SSubmitMsg) + sizeof(SSubmitBlk) + TD_DATA_ROW_HEAD_SIZE + pObj->rowSize; + int32_t size = sizeof(SWalHead) + sizeof(SSubmitMsg) + sizeof(SSubmitBlk) + TD_MEM_ROW_DATA_HEAD_SIZE + pObj->rowSize; char *buffer = calloc(size, 1); SWalHead *pHead = (SWalHead *)buffer; SSubmitMsg *pMsg = (SSubmitMsg *) (buffer + sizeof(SWalHead)); SSubmitBlk *pBlk = (SSubmitBlk *) (buffer + sizeof(SWalHead) + sizeof(SSubmitMsg)); - SDataRow trow = (SDataRow)pBlk->data; - tdInitDataRow(trow, pSchema); + SMemRow trow = (SMemRow)pBlk->data; + SDataRow dataRow = (SDataRow)memRowDataBody(trow); + memRowSetType(trow, SMEM_ROW_DATA); + tdInitDataRow(dataRow, pSchema); for (int32_t i = 0; i < pSchema->numOfCols; i++) { STColumn *c = pSchema->columns + i; - void* val = row[i]; + void *val = row[i]; if (val == NULL) { - val = getNullValue(c->type); + val = (void *)getNullValue(c->type); } else if (c->type == TSDB_DATA_TYPE_BINARY) { val = ((char*)val) - sizeof(VarDataLenT); } else if (c->type == TSDB_DATA_TYPE_NCHAR) { @@ -500,9 +502,9 @@ static void cqProcessStreamRes(void *param, TAOS_RES *tres, TAOS_ROW row) { memcpy((char *)val + sizeof(VarDataLenT), buf, len); varDataLen(val) = len; } - tdAppendColVal(trow, val, c->type, c->bytes, c->offset); + tdAppendColVal(dataRow, val, c->type, c->offset); } - pBlk->dataLen = htonl(dataRowLen(trow)); + pBlk->dataLen = htonl(memRowDataTLen(trow)); pBlk->schemaLen = 0; pBlk->uid = htobe64(pObj->uid); @@ -511,7 +513,7 @@ static void cqProcessStreamRes(void *param, TAOS_RES *tres, TAOS_ROW row) { pBlk->sversion = htonl(pSchema->version); pBlk->padding = 0; - pHead->len = sizeof(SSubmitMsg) + sizeof(SSubmitBlk) + dataRowLen(trow); + pHead->len = sizeof(SSubmitMsg) + sizeof(SSubmitBlk) + memRowDataTLen(trow); pMsg->header.vgId = htonl(pContext->vgId); pMsg->header.contLen = htonl(pHead->len); diff --git a/src/dnode/src/dnodeMPeer.c b/src/dnode/src/dnodeMPeer.c index e4942c49aa..8aa28d1618 100644 --- a/src/dnode/src/dnodeMPeer.c +++ b/src/dnode/src/dnodeMPeer.c @@ -150,6 +150,8 @@ static void *dnodeProcessMPeerQueue(void *param) { SMnodeMsg *pPeerMsg; int32_t type; void * unUsed; + + setThreadName("dnodeMPeerQ"); while (1) { if (taosReadQitemFromQset(tsMPeerQset, &type, (void **)&pPeerMsg, &unUsed) == 0) { diff --git a/src/dnode/src/dnodeMRead.c b/src/dnode/src/dnodeMRead.c index 90332e6783..184a6b743a 100644 --- a/src/dnode/src/dnodeMRead.c +++ b/src/dnode/src/dnodeMRead.c @@ -155,6 +155,8 @@ static void *dnodeProcessMReadQueue(void *param) { int32_t type; void * unUsed; + setThreadName("dnodeMReadQ"); + while (1) { if (taosReadQitemFromQset(tsMReadQset, &type, (void **)&pRead, &unUsed) == 0) { dDebug("qset:%p, mnode read got no message from qset, exiting", tsMReadQset); diff --git a/src/dnode/src/dnodeMWrite.c b/src/dnode/src/dnodeMWrite.c index a409d537fa..904ddc21d0 100644 --- a/src/dnode/src/dnodeMWrite.c +++ b/src/dnode/src/dnodeMWrite.c @@ -168,7 +168,9 @@ static void *dnodeProcessMWriteQueue(void *param) { SMnodeMsg *pWrite; int32_t type; void * unUsed; - + + setThreadName("dnodeMWriteQ"); + while (1) { if (taosReadQitemFromQset(tsMWriteQset, &type, (void **)&pWrite, &unUsed) == 0) { dDebug("qset:%p, mnode write got no message from qset, exiting", tsMWriteQset); diff --git a/src/dnode/src/dnodeTelemetry.c b/src/dnode/src/dnodeTelemetry.c index 4caece1661..59b66879d4 100644 --- a/src/dnode/src/dnodeTelemetry.c +++ b/src/dnode/src/dnodeTelemetry.c @@ -245,6 +245,8 @@ static void* telemetryThread(void* param) { clock_gettime(CLOCK_REALTIME, &end); end.tv_sec += 300; // wait 5 minutes before send first report + setThreadName("telemetryThrd"); + while (!tsExit) { int r = 0; struct timespec ts = end; diff --git a/src/dnode/src/dnodeVMgmt.c b/src/dnode/src/dnodeVMgmt.c index daf62aac94..c1bfb1460b 100644 --- a/src/dnode/src/dnodeVMgmt.c +++ b/src/dnode/src/dnodeVMgmt.c @@ -103,6 +103,8 @@ static void *dnodeProcessMgmtQueue(void *wparam) { int32_t qtype; void * handle; + setThreadName("dnodeMgmtQ"); + while (1) { if (taosReadQitemFromQset(pPool->qset, &qtype, (void **)&pMgmt, &handle) == 0) { dDebug("qdnode mgmt got no message from qset:%p, , exit", pPool->qset); diff --git a/src/dnode/src/dnodeVRead.c b/src/dnode/src/dnodeVRead.c index 41016d7b99..e8003a8fe7 100644 --- a/src/dnode/src/dnodeVRead.c +++ b/src/dnode/src/dnodeVRead.c @@ -118,6 +118,11 @@ static void *dnodeProcessReadQueue(void *wparam) { SVReadMsg * pRead; int32_t qtype; void * pVnode; + char name[16]; + + memset(name, 0, 16); + snprintf(name, 16, "%s-dnReadQ", pPool->name); + setThreadName(name); while (1) { if (taosReadQitemFromQset(pPool->qset, &qtype, (void **)&pRead, &pVnode) == 0) { diff --git a/src/dnode/src/dnodeVWrite.c b/src/dnode/src/dnodeVWrite.c index ff2d12f001..ed2a6e2109 100644 --- a/src/dnode/src/dnodeVWrite.c +++ b/src/dnode/src/dnodeVWrite.c @@ -191,6 +191,8 @@ static void *dnodeProcessVWriteQueue(void *wparam) { taosBlockSIGPIPE(); dDebug("dnode vwrite worker:%d is running", pWorker->workerId); + setThreadName("dnodeWriteQ"); + while (1) { numOfMsgs = taosReadAllQitemsFromQset(pWorker->qset, pWorker->qall, &pVnode); if (numOfMsgs == 0) { diff --git a/src/dnode/src/dnodeVnodes.c b/src/dnode/src/dnodeVnodes.c index f01a510370..8ea8e280de 100644 --- a/src/dnode/src/dnodeVnodes.c +++ b/src/dnode/src/dnodeVnodes.c @@ -91,6 +91,8 @@ static void *dnodeOpenVnode(void *param) { dDebug("thread:%d, start to open %d vnodes", pThread->threadIndex, pThread->vnodeNum); + setThreadName("dnodeOpenVnode"); + for (int32_t v = 0; v < pThread->vnodeNum; ++v) { int32_t vgId = pThread->vnodeList[v]; snprintf(stepDesc, TSDB_STEP_DESC_LEN, "vgId:%d, start to restore, %d of %d have been opened", vgId, tsOpenVnodes, tsTotalVnodes); diff --git a/src/inc/taos.h b/src/inc/taos.h index a62f387924..ba53c1ca8f 100644 --- a/src/inc/taos.h +++ b/src/inc/taos.h @@ -72,6 +72,7 @@ DLL_EXPORT int taos_init(); DLL_EXPORT void taos_cleanup(void); DLL_EXPORT int taos_options(TSDB_OPTION option, const void *arg, ...); DLL_EXPORT TAOS *taos_connect(const char *ip, const char *user, const char *pass, const char *db, uint16_t port); +DLL_EXPORT TAOS *taos_connect_auth(const char *ip, const char *user, const char *auth, const char *db, uint16_t port); DLL_EXPORT void taos_close(TAOS *taos); const char *taos_data_type(int type); @@ -110,21 +111,23 @@ typedef struct TAOS_MULTI_BIND { } TAOS_MULTI_BIND; -TAOS_STMT *taos_stmt_init(TAOS *taos); -int taos_stmt_prepare(TAOS_STMT *stmt, const char *sql, unsigned long length); -int taos_stmt_set_tbname_tags(TAOS_STMT* stmt, const char* name, TAOS_BIND* tags); -int taos_stmt_set_tbname(TAOS_STMT* stmt, const char* name); -int taos_stmt_is_insert(TAOS_STMT *stmt, int *insert); -int taos_stmt_num_params(TAOS_STMT *stmt, int *nums); + +DLL_EXPORT TAOS_STMT *taos_stmt_init(TAOS *taos); +DLL_EXPORT int taos_stmt_prepare(TAOS_STMT *stmt, const char *sql, unsigned long length); +DLL_EXPORT int taos_stmt_set_tbname_tags(TAOS_STMT* stmt, const char* name, TAOS_BIND* tags); +DLL_EXPORT int taos_stmt_set_tbname(TAOS_STMT* stmt, const char* name); +DLL_EXPORT int taos_stmt_set_sub_tbname(TAOS_STMT* stmt, const char* name); +DLL_EXPORT int taos_stmt_is_insert(TAOS_STMT *stmt, int *insert); +DLL_EXPORT int taos_stmt_num_params(TAOS_STMT *stmt, int *nums); int taos_stmt_get_param(TAOS_STMT *stmt, int idx, int *type, int *bytes); -int taos_stmt_bind_param(TAOS_STMT *stmt, TAOS_BIND *bind); +DLL_EXPORT int taos_stmt_bind_param(TAOS_STMT *stmt, TAOS_BIND *bind); int taos_stmt_bind_param_batch(TAOS_STMT* stmt, TAOS_MULTI_BIND* bind); int taos_stmt_bind_single_param_batch(TAOS_STMT* stmt, TAOS_MULTI_BIND* bind, int colIdx); -int taos_stmt_add_batch(TAOS_STMT *stmt); -int taos_stmt_execute(TAOS_STMT *stmt); -TAOS_RES * taos_stmt_use_result(TAOS_STMT *stmt); -int taos_stmt_close(TAOS_STMT *stmt); -char * taos_stmt_errstr(TAOS_STMT *stmt); +DLL_EXPORT int taos_stmt_add_batch(TAOS_STMT *stmt); +DLL_EXPORT int taos_stmt_execute(TAOS_STMT *stmt); +DLL_EXPORT TAOS_RES * taos_stmt_use_result(TAOS_STMT *stmt); +DLL_EXPORT int taos_stmt_close(TAOS_STMT *stmt); +DLL_EXPORT char * taos_stmt_errstr(TAOS_STMT *stmt); DLL_EXPORT TAOS_RES *taos_query(TAOS *taos, const char *sql); DLL_EXPORT TAOS_ROW taos_fetch_row(TAOS_RES *res); @@ -139,10 +142,10 @@ DLL_EXPORT int taos_print_row(char *str, TAOS_ROW row, TAOS_FIELD *fields, int n DLL_EXPORT void taos_stop_query(TAOS_RES *res); DLL_EXPORT bool taos_is_null(TAOS_RES *res, int32_t row, int32_t col); -int taos_fetch_block(TAOS_RES *res, TAOS_ROW *rows); -int taos_validate_sql(TAOS *taos, const char *sql); +DLL_EXPORT int taos_fetch_block(TAOS_RES *res, TAOS_ROW *rows); +DLL_EXPORT int taos_validate_sql(TAOS *taos, const char *sql); -int* taos_fetch_lengths(TAOS_RES *res); +DLL_EXPORT int* taos_fetch_lengths(TAOS_RES *res); // TAOS_RES *taos_list_tables(TAOS *mysql, const char *wild); // TAOS_RES *taos_list_dbs(TAOS *mysql, const char *wild); diff --git a/src/inc/taosdef.h b/src/inc/taosdef.h index 56118b0a76..ca8ad3cc09 100644 --- a/src/inc/taosdef.h +++ b/src/inc/taosdef.h @@ -180,7 +180,7 @@ do { \ // this is the length of its string representation, including the terminator zero #define TSDB_ACCT_ID_LEN 11 -#define TSDB_MAX_COLUMNS 1024 +#define TSDB_MAX_COLUMNS 4096 #define TSDB_MIN_COLUMNS 2 //PRIMARY COLUMN(timestamp) + other columns #define TSDB_NODE_NAME_LEN 64 @@ -199,7 +199,13 @@ do { \ #define TSDB_APPNAME_LEN TSDB_UNI_LEN -#define TSDB_MAX_BYTES_PER_ROW 16384 + /** + * In some scenarios uint16_t (0~65535) is used to store the row len. + * - Firstly, we use 65531(65535 - 4), as the SDataRow/SKVRow contains 4 bits header. + * - Secondly, if all cols are VarDataT type except primary key, we need 4 bits to store the offset, thus + * the final value is 65531-(4096-1)*4 = 49151. + */ +#define TSDB_MAX_BYTES_PER_ROW 49151 #define TSDB_MAX_TAGS_LEN 16384 #define TSDB_MAX_TAGS 128 #define TSDB_MAX_TAG_CONDITIONS 1024 @@ -327,8 +333,9 @@ do { \ #define TSDB_MAX_JOIN_TABLE_NUM 10 #define TSDB_MAX_UNION_CLAUSE 5 -#define TSDB_MAX_BINARY_LEN (TSDB_MAX_BYTES_PER_ROW-TSDB_KEYSIZE) -#define TSDB_MAX_NCHAR_LEN (TSDB_MAX_BYTES_PER_ROW-TSDB_KEYSIZE) +#define TSDB_MAX_FIELD_LEN 16384 +#define TSDB_MAX_BINARY_LEN (TSDB_MAX_FIELD_LEN-TSDB_KEYSIZE) // keep 16384 +#define TSDB_MAX_NCHAR_LEN (TSDB_MAX_FIELD_LEN-TSDB_KEYSIZE) // keep 16384 #define PRIMARYKEY_TIMESTAMP_COL_INDEX 0 #define TSDB_MAX_RPC_THREADS 5 diff --git a/src/inc/taoserror.h b/src/inc/taoserror.h index fbb29014e0..eefa0ed92a 100644 --- a/src/inc/taoserror.h +++ b/src/inc/taoserror.h @@ -174,6 +174,7 @@ int32_t* taosGetErrno(); #define TSDB_CODE_MND_FIELD_NOT_EXIST TAOS_DEF_ERROR_CODE(0, 0x036C) //"Field does not exist") #define TSDB_CODE_MND_INVALID_STABLE_NAME TAOS_DEF_ERROR_CODE(0, 0x036D) //"Super table does not exist") #define TSDB_CODE_MND_INVALID_CREATE_TABLE_MSG TAOS_DEF_ERROR_CODE(0, 0x036E) //"Invalid create table message") +#define TSDB_CODE_MND_EXCEED_MAX_ROW_BYTES TAOS_DEF_ERROR_CODE(0, 0x036F) //"Exceed max row bytes") #define TSDB_CODE_MND_INVALID_FUNC_NAME TAOS_DEF_ERROR_CODE(0, 0x0370) //"Invalid func name") #define TSDB_CODE_MND_INVALID_FUNC_LEN TAOS_DEF_ERROR_CODE(0, 0x0371) //"Invalid func length") diff --git a/src/inc/ttype.h b/src/inc/ttype.h index 8eaa52f90a..6e436bd23d 100644 --- a/src/inc/ttype.h +++ b/src/inc/ttype.h @@ -10,14 +10,28 @@ extern "C" { #include "taosdef.h" // ----------------- For variable data types such as TSDB_DATA_TYPE_BINARY and TSDB_DATA_TYPE_NCHAR -typedef int32_t VarDataOffsetT; -typedef int16_t VarDataLenT; +typedef int32_t VarDataOffsetT; +typedef int16_t VarDataLenT; // maxVarDataLen: 32767 +typedef uint16_t TDRowLenT; // not including overhead: 0 ~ 65535 +typedef uint32_t TDRowTLenT; // total length, including overhead typedef struct tstr { VarDataLenT len; char data[]; } tstr; +#pragma pack(push, 1) +typedef struct { + VarDataLenT len; + uint8_t data; +} SBinaryNullT; + +typedef struct { + VarDataLenT len; + uint32_t data; +} SNCharNullT; +#pragma pack(pop) + #define VARSTR_HEADER_SIZE sizeof(VarDataLenT) #define varDataLen(v) ((VarDataLenT *)(v))[0] @@ -180,7 +194,7 @@ bool isValidDataType(int32_t type); void setVardataNull(char* val, int32_t type); void setNull(char *val, int32_t type, int32_t bytes); void setNullN(char *val, int32_t type, int32_t bytes, int32_t numOfElems); -void *getNullValue(int32_t type); +const void *getNullValue(int32_t type); void assignVal(char *val, const char *src, int32_t len, int32_t type); void tsDataSwap(void *pLeft, void *pRight, int32_t type, int32_t size, void* buf); diff --git a/src/kit/shell/src/shellCheck.c b/src/kit/shell/src/shellCheck.c index 4ff5dc36fc..d78f1a6b99 100644 --- a/src/kit/shell/src/shellCheck.c +++ b/src/kit/shell/src/shellCheck.c @@ -104,6 +104,8 @@ static void shellFreeTbnames() { static void *shellCheckThreadFp(void *arg) { ShellThreadObj *pThread = (ShellThreadObj *)arg; + setThreadName("shellCheckThrd"); + int32_t interval = tbNum / pThread->totalThreads + 1; int32_t start = pThread->threadIndex * interval; int32_t end = (pThread->threadIndex + 1) * interval; diff --git a/src/kit/shell/src/shellDarwin.c b/src/kit/shell/src/shellDarwin.c index 31ad7046e9..86c0fea573 100644 --- a/src/kit/shell/src/shellDarwin.c +++ b/src/kit/shell/src/shellDarwin.c @@ -336,6 +336,8 @@ void *shellLoopQuery(void *arg) { TAOS *con = (TAOS *)arg; + setThreadName("shellLoopQuery"); + pthread_cleanup_push(cleanup_handler, NULL); char *command = malloc(MAX_COMMAND_SIZE); diff --git a/src/kit/shell/src/shellImport.c b/src/kit/shell/src/shellImport.c index 5de50a3aaf..222d69e854 100644 --- a/src/kit/shell/src/shellImport.c +++ b/src/kit/shell/src/shellImport.c @@ -223,6 +223,8 @@ static void shellSourceFile(TAOS *con, char *fptr) { void* shellImportThreadFp(void *arg) { ShellThreadObj *pThread = (ShellThreadObj*)arg; + setThreadName("shellImportThrd"); + for (int f = 0; f < shellSQLFileNum; ++f) { if (f % pThread->totalThreads == pThread->threadIndex) { char *SQLFileName = shellSQLFiles[f]; diff --git a/src/kit/shell/src/shellLinux.c b/src/kit/shell/src/shellLinux.c index 4eead252fd..2a32a8d82e 100644 --- a/src/kit/shell/src/shellLinux.c +++ b/src/kit/shell/src/shellLinux.c @@ -336,6 +336,8 @@ void *shellLoopQuery(void *arg) { TAOS *con = (TAOS *)arg; + setThreadName("shellLoopQuery"); + pthread_cleanup_push(cleanup_handler, NULL); char *command = malloc(MAX_COMMAND_SIZE); diff --git a/src/kit/shell/src/shellMain.c b/src/kit/shell/src/shellMain.c index 4c7e550760..0c70386061 100644 --- a/src/kit/shell/src/shellMain.c +++ b/src/kit/shell/src/shellMain.c @@ -26,6 +26,8 @@ void shellQueryInterruptHandler(int32_t signum, void *sigInfo, void *context) { } void *cancelHandler(void *arg) { + setThreadName("cancelHandler"); + while(1) { if (tsem_wait(&cancelSem) != 0) { taosMsleep(10); diff --git a/src/kit/taosdemo/taosdemo.c b/src/kit/taosdemo/taosdemo.c index fc5bac0a99..d3ee573150 100644 --- a/src/kit/taosdemo/taosdemo.c +++ b/src/kit/taosdemo/taosdemo.c @@ -71,16 +71,15 @@ extern char configDir[]; #define HEAD_BUFF_LEN TSDB_MAX_COLUMNS*24 // 16*MAX_COLUMNS + (192+32)*2 + insert into .. -#define MAX_SQL_SIZE 65536 -#define BUFFER_SIZE (65536*2) -#define COND_BUF_LEN (BUFFER_SIZE - 30) +#define BUFFER_SIZE TSDB_MAX_ALLOWED_SQL_LEN +#define COND_BUF_LEN (BUFFER_SIZE - 30) +#define COL_BUFFER_LEN ((TSDB_COL_NAME_LEN + 15) * TSDB_MAX_COLUMNS) #define MAX_USERNAME_SIZE 64 #define MAX_PASSWORD_SIZE 64 #define MAX_HOSTNAME_SIZE 64 #define MAX_TB_NAME_SIZE 64 #define MAX_DATA_SIZE (16*TSDB_MAX_COLUMNS)+20 // max record len: 16*MAX_COLUMNS, timestamp string and ,('') need extra space #define OPT_ABORT 1 /* –abort */ -#define STRING_LEN 60000 #define MAX_PREPARED_RAND 1000000 #define MAX_FILE_NAME_LEN 256 // max file name length on linux is 255. @@ -91,7 +90,7 @@ extern char configDir[]; #define MAX_SUPER_TABLE_COUNT 200 #define MAX_QUERY_SQL_COUNT 100 -#define MAX_QUERY_SQL_LENGTH 1024 +#define MAX_QUERY_SQL_LENGTH BUFFER_SIZE #define MAX_DATABASE_COUNT 256 #define INPUT_BUF_LEN 256 @@ -1141,7 +1140,6 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) { if (arguments->disorderRatio) { printf("# Data order: %d\n", arguments->disorderRatio); printf("# Data out of order rate: %d\n", arguments->disorderRange); - } printf("# Delete method: %d\n", arguments->method_of_delete); printf("# Answer yes when prompt: %d\n", arguments->answer_yes); @@ -2196,7 +2194,6 @@ static void printfQuerySystemInfo(TAOS * taos) { snprintf(buffer, MAX_QUERY_SQL_LENGTH, "show %s.stables;", dbInfos[i]->name); res = taos_query(taos, buffer); xDumpResultToFile(filename, res); - free(dbInfos[i]); } @@ -2370,7 +2367,25 @@ static char* getTagValueFromTagSample(SSuperTable* stbInfo, int tagUsePos) { return dataBuf; } -static char* generateTagVaulesForStb(SSuperTable* stbInfo, int32_t tableSeq) { +static char *generateBinaryNCharTagValues(int64_t tableSeq, uint32_t len) +{ + char* buf = (char*)calloc(len, 1); + if (NULL == buf) { + printf("calloc failed! size:%d\n", len); + return NULL; + } + + if (tableSeq % 2) { + tstrncpy(buf, "beijing", len); + } else { + tstrncpy(buf, "shanghai", len); + } + //rand_string(buf, stbInfo->tags[i].dataLen); + + return buf; +} + +static char* generateTagValuesForStb(SSuperTable* stbInfo, int64_t tableSeq) { char* dataBuf = (char*)calloc(TSDB_MAX_SQL_LEN+1, 1); if (NULL == dataBuf) { printf("calloc failed! size:%d\n", TSDB_MAX_SQL_LEN+1); @@ -2391,70 +2406,62 @@ static char* generateTagVaulesForStb(SSuperTable* stbInfo, int32_t tableSeq) { return NULL; } - int tagBufLen = stbInfo->tags[i].dataLen + 1; - char* buf = (char*)calloc(tagBufLen, 1); + int32_t tagBufLen = stbInfo->tags[i].dataLen + 1; + char *buf = generateBinaryNCharTagValues(tableSeq, tagBufLen); if (NULL == buf) { - printf("calloc failed! size:%d\n", stbInfo->tags[i].dataLen); tmfree(dataBuf); return NULL; } - - if (tableSeq % 2) { - tstrncpy(buf, "beijing", tagBufLen); - } else { - tstrncpy(buf, "shanghai", tagBufLen); - } - //rand_string(buf, stbInfo->tags[i].dataLen); dataLen += snprintf(dataBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen, - "\'%s\', ", buf); + "\'%s\',", buf); tmfree(buf); } else if (0 == strncasecmp(stbInfo->tags[i].dataType, "int", strlen("int"))) { if ((g_args.demo_mode) && (i == 0)) { dataLen += snprintf(dataBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen, - "%d, ", tableSeq % 10); + "%"PRId64",", tableSeq % 10); } else { dataLen += snprintf(dataBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen, - "%d, ", tableSeq); + "%"PRId64",", tableSeq); } } else if (0 == strncasecmp(stbInfo->tags[i].dataType, "bigint", strlen("bigint"))) { dataLen += snprintf(dataBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen, - "%"PRId64", ", rand_bigint()); + "%"PRId64",", rand_bigint()); } else if (0 == strncasecmp(stbInfo->tags[i].dataType, "float", strlen("float"))) { dataLen += snprintf(dataBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen, - "%f, ", rand_float()); + "%f,", rand_float()); } else if (0 == strncasecmp(stbInfo->tags[i].dataType, "double", strlen("double"))) { dataLen += snprintf(dataBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen, - "%f, ", rand_double()); + "%f,", rand_double()); } else if (0 == strncasecmp(stbInfo->tags[i].dataType, "smallint", strlen("smallint"))) { dataLen += snprintf(dataBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen, - "%d, ", rand_smallint()); + "%d,", rand_smallint()); } else if (0 == strncasecmp(stbInfo->tags[i].dataType, "tinyint", strlen("tinyint"))) { dataLen += snprintf(dataBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen, - "%d, ", rand_tinyint()); + "%d,", rand_tinyint()); } else if (0 == strncasecmp(stbInfo->tags[i].dataType, "bool", strlen("bool"))) { dataLen += snprintf(dataBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen, - "%d, ", rand_bool()); + "%d,", rand_bool()); } else if (0 == strncasecmp(stbInfo->tags[i].dataType, "timestamp", strlen("timestamp"))) { dataLen += snprintf(dataBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen, - "%"PRId64", ", rand_bigint()); + "%"PRId64",", rand_bigint()); } else { - printf("No support data type: %s\n", stbInfo->tags[i].dataType); + errorPrint("No support data type: %s\n", stbInfo->tags[i].dataType); tmfree(dataBuf); return NULL; } } - dataLen -= 2; + dataLen -= 1; dataLen += snprintf(dataBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen, ")"); return dataBuf; } @@ -2709,7 +2716,7 @@ static int createSuperTable( char command[BUFFER_SIZE] = "\0"; - char cols[STRING_LEN] = "\0"; + char cols[COL_BUFFER_LEN] = "\0"; int colIndex; int len = 0; @@ -2725,55 +2732,55 @@ static int createSuperTable( char* dataType = superTbl->columns[colIndex].dataType; if (strcasecmp(dataType, "BINARY") == 0) { - len += snprintf(cols + len, STRING_LEN - len, - ", col%d %s(%d)", colIndex, "BINARY", + len += snprintf(cols + len, COL_BUFFER_LEN - len, + ",C%d %s(%d)", colIndex, "BINARY", superTbl->columns[colIndex].dataLen); lenOfOneRow += superTbl->columns[colIndex].dataLen + 3; } else if (strcasecmp(dataType, "NCHAR") == 0) { - len += snprintf(cols + len, STRING_LEN - len, - ", col%d %s(%d)", colIndex, "NCHAR", + len += snprintf(cols + len, COL_BUFFER_LEN - len, + ",C%d %s(%d)", colIndex, "NCHAR", superTbl->columns[colIndex].dataLen); lenOfOneRow += superTbl->columns[colIndex].dataLen + 3; } else if (strcasecmp(dataType, "INT") == 0) { if ((g_args.demo_mode) && (colIndex == 1)) { - len += snprintf(cols + len, STRING_LEN - len, + len += snprintf(cols + len, COL_BUFFER_LEN - len, ", VOLTAGE INT"); } else { - len += snprintf(cols + len, STRING_LEN - len, ", col%d %s", colIndex, "INT"); + len += snprintf(cols + len, COL_BUFFER_LEN - len, ",C%d %s", colIndex, "INT"); } lenOfOneRow += 11; } else if (strcasecmp(dataType, "BIGINT") == 0) { - len += snprintf(cols + len, STRING_LEN - len, ", col%d %s", + len += snprintf(cols + len, COL_BUFFER_LEN - len, ",C%d %s", colIndex, "BIGINT"); lenOfOneRow += 21; } else if (strcasecmp(dataType, "SMALLINT") == 0) { - len += snprintf(cols + len, STRING_LEN - len, ", col%d %s", + len += snprintf(cols + len, COL_BUFFER_LEN - len, ",C%d %s", colIndex, "SMALLINT"); lenOfOneRow += 6; } else if (strcasecmp(dataType, "TINYINT") == 0) { - len += snprintf(cols + len, STRING_LEN - len, ", col%d %s", colIndex, "TINYINT"); + len += snprintf(cols + len, COL_BUFFER_LEN - len, ",C%d %s", colIndex, "TINYINT"); lenOfOneRow += 4; } else if (strcasecmp(dataType, "BOOL") == 0) { - len += snprintf(cols + len, STRING_LEN - len, ", col%d %s", colIndex, "BOOL"); + len += snprintf(cols + len, COL_BUFFER_LEN - len, ",C%d %s", colIndex, "BOOL"); lenOfOneRow += 6; } else if (strcasecmp(dataType, "FLOAT") == 0) { if (g_args.demo_mode) { if (colIndex == 0) { - len += snprintf(cols + len, STRING_LEN - len, ", CURRENT FLOAT"); + len += snprintf(cols + len, COL_BUFFER_LEN - len, ", CURRENT FLOAT"); } else if (colIndex == 2) { - len += snprintf(cols + len, STRING_LEN - len, ", PHASE FLOAT"); + len += snprintf(cols + len, COL_BUFFER_LEN - len, ", PHASE FLOAT"); } } else { - len += snprintf(cols + len, STRING_LEN - len, ", col%d %s", colIndex, "FLOAT"); + len += snprintf(cols + len, COL_BUFFER_LEN - len, ",C%d %s", colIndex, "FLOAT"); } lenOfOneRow += 22; } else if (strcasecmp(dataType, "DOUBLE") == 0) { - len += snprintf(cols + len, STRING_LEN - len, ", col%d %s", + len += snprintf(cols + len, COL_BUFFER_LEN - len, ",C%d %s", colIndex, "DOUBLE"); lenOfOneRow += 42; } else if (strcasecmp(dataType, "TIMESTAMP") == 0) { - len += snprintf(cols + len, STRING_LEN - len, ", col%d %s", + len += snprintf(cols + len, COL_BUFFER_LEN - len, ",C%d %s", colIndex, "TIMESTAMP"); lenOfOneRow += 21; } else { @@ -2805,60 +2812,63 @@ static int createSuperTable( return -1; } - char tags[STRING_LEN] = "\0"; + char tags[TSDB_MAX_TAGS_LEN] = "\0"; int tagIndex; len = 0; int lenOfTagOfOneRow = 0; - len += snprintf(tags + len, STRING_LEN - len, "("); + len += snprintf(tags + len, TSDB_MAX_TAGS_LEN - len, "("); for (tagIndex = 0; tagIndex < superTbl->tagCount; tagIndex++) { char* dataType = superTbl->tags[tagIndex].dataType; if (strcasecmp(dataType, "BINARY") == 0) { if ((g_args.demo_mode) && (tagIndex == 1)) { - len += snprintf(tags + len, STRING_LEN - len, - "loction BINARY(%d), ", + len += snprintf(tags + len, TSDB_MAX_TAGS_LEN - len, + "location BINARY(%d),", superTbl->tags[tagIndex].dataLen); } else { - len += snprintf(tags + len, STRING_LEN - len, "t%d %s(%d), ", - tagIndex, "BINARY", superTbl->tags[tagIndex].dataLen); + len += snprintf(tags + len, TSDB_MAX_TAGS_LEN - len, + "T%d %s(%d),", tagIndex, "BINARY", + superTbl->tags[tagIndex].dataLen); } lenOfTagOfOneRow += superTbl->tags[tagIndex].dataLen + 3; } else if (strcasecmp(dataType, "NCHAR") == 0) { - len += snprintf(tags + len, STRING_LEN - len, "t%d %s(%d), ", tagIndex, + len += snprintf(tags + len, TSDB_MAX_TAGS_LEN - len, + "T%d %s(%d),", tagIndex, "NCHAR", superTbl->tags[tagIndex].dataLen); lenOfTagOfOneRow += superTbl->tags[tagIndex].dataLen + 3; } else if (strcasecmp(dataType, "INT") == 0) { if ((g_args.demo_mode) && (tagIndex == 0)) { - len += snprintf(tags + len, STRING_LEN - len, "groupId INT, "); + len += snprintf(tags + len, TSDB_MAX_TAGS_LEN - len, + "groupId INT, "); } else { - len += snprintf(tags + len, STRING_LEN - len, "t%d %s, ", tagIndex, - "INT"); + len += snprintf(tags + len, TSDB_MAX_TAGS_LEN - len, + "T%d %s,", tagIndex, "INT"); } lenOfTagOfOneRow += superTbl->tags[tagIndex].dataLen + 11; } else if (strcasecmp(dataType, "BIGINT") == 0) { - len += snprintf(tags + len, STRING_LEN - len, "t%d %s, ", tagIndex, - "BIGINT"); + len += snprintf(tags + len, TSDB_MAX_TAGS_LEN - len, + "T%d %s,", tagIndex, "BIGINT"); lenOfTagOfOneRow += superTbl->tags[tagIndex].dataLen + 21; } else if (strcasecmp(dataType, "SMALLINT") == 0) { - len += snprintf(tags + len, STRING_LEN - len, "t%d %s, ", tagIndex, - "SMALLINT"); + len += snprintf(tags + len, TSDB_MAX_TAGS_LEN - len, + "T%d %s,", tagIndex, "SMALLINT"); lenOfTagOfOneRow += superTbl->tags[tagIndex].dataLen + 6; } else if (strcasecmp(dataType, "TINYINT") == 0) { - len += snprintf(tags + len, STRING_LEN - len, "t%d %s, ", tagIndex, - "TINYINT"); + len += snprintf(tags + len, TSDB_MAX_TAGS_LEN - len, + "T%d %s,", tagIndex, "TINYINT"); lenOfTagOfOneRow += superTbl->tags[tagIndex].dataLen + 4; } else if (strcasecmp(dataType, "BOOL") == 0) { - len += snprintf(tags + len, STRING_LEN - len, "t%d %s, ", tagIndex, - "BOOL"); + len += snprintf(tags + len, TSDB_MAX_TAGS_LEN - len, + "T%d %s,", tagIndex, "BOOL"); lenOfTagOfOneRow += superTbl->tags[tagIndex].dataLen + 6; } else if (strcasecmp(dataType, "FLOAT") == 0) { - len += snprintf(tags + len, STRING_LEN - len, "t%d %s, ", tagIndex, - "FLOAT"); + len += snprintf(tags + len, TSDB_MAX_TAGS_LEN - len, + "T%d %s,", tagIndex, "FLOAT"); lenOfTagOfOneRow += superTbl->tags[tagIndex].dataLen + 22; } else if (strcasecmp(dataType, "DOUBLE") == 0) { - len += snprintf(tags + len, STRING_LEN - len, "t%d %s, ", tagIndex, - "DOUBLE"); + len += snprintf(tags + len, TSDB_MAX_TAGS_LEN - len, + "T%d %s,", tagIndex, "DOUBLE"); lenOfTagOfOneRow += superTbl->tags[tagIndex].dataLen + 42; } else { taos_close(taos); @@ -2868,8 +2878,8 @@ static int createSuperTable( } } - len -= 2; - len += snprintf(tags + len, STRING_LEN - len, ")"); + len -= 1; + len += snprintf(tags + len, TSDB_MAX_TAGS_LEN - len, ")"); superTbl->lenOfTagOfOneRow = lenOfTagOfOneRow; @@ -3022,175 +3032,176 @@ static int createDatabasesAndStables() { static void* createTable(void *sarg) { - threadInfo *pThreadInfo = (threadInfo *)sarg; - SSuperTable* superTblInfo = pThreadInfo->superTblInfo; + threadInfo *pThreadInfo = (threadInfo *)sarg; + SSuperTable* superTblInfo = pThreadInfo->superTblInfo; - uint64_t lastPrintTime = taosGetTimestampMs(); + setThreadName("createTable"); - int buff_len; - buff_len = BUFFER_SIZE / 8; + uint64_t lastPrintTime = taosGetTimestampMs(); - pThreadInfo->buffer = calloc(buff_len, 1); - if (pThreadInfo->buffer == NULL) { - errorPrint("%s() LN%d, Memory allocated failed!\n", __func__, __LINE__); - exit(-1); - } + int buff_len = BUFFER_SIZE; - int len = 0; - int batchNum = 0; - - verbosePrint("%s() LN%d: Creating table from %"PRIu64" to %"PRIu64"\n", - __func__, __LINE__, - pThreadInfo->start_table_from, pThreadInfo->end_table_to); - - for (uint64_t i = pThreadInfo->start_table_from; - i <= pThreadInfo->end_table_to; i++) { - if (0 == g_Dbs.use_metric) { - snprintf(pThreadInfo->buffer, buff_len, - "create table if not exists %s.%s%"PRIu64" %s;", - pThreadInfo->db_name, - g_args.tb_prefix, i, - pThreadInfo->cols); - } else { - if (superTblInfo == NULL) { - errorPrint("%s() LN%d, use metric, but super table info is NULL\n", - __func__, __LINE__); - free(pThreadInfo->buffer); + pThreadInfo->buffer = calloc(buff_len, 1); + if (pThreadInfo->buffer == NULL) { + errorPrint("%s() LN%d, Memory allocated failed!\n", __func__, __LINE__); exit(-1); - } else { - if (0 == len) { - batchNum = 0; - memset(pThreadInfo->buffer, 0, buff_len); - len += snprintf(pThreadInfo->buffer + len, - buff_len - len, "create table "); - } - char* tagsValBuf = NULL; - if (0 == superTblInfo->tagSource) { - tagsValBuf = generateTagVaulesForStb(superTblInfo, i); + } + + int len = 0; + int batchNum = 0; + + verbosePrint("%s() LN%d: Creating table from %"PRIu64" to %"PRIu64"\n", + __func__, __LINE__, + pThreadInfo->start_table_from, pThreadInfo->end_table_to); + + for (uint64_t i = pThreadInfo->start_table_from; + i <= pThreadInfo->end_table_to; i++) { + if (0 == g_Dbs.use_metric) { + snprintf(pThreadInfo->buffer, buff_len, + "create table if not exists %s.%s%"PRIu64" %s;", + pThreadInfo->db_name, + g_args.tb_prefix, i, + pThreadInfo->cols); } else { - tagsValBuf = getTagValueFromTagSample( - superTblInfo, - i % superTblInfo->tagSampleCount); + if (superTblInfo == NULL) { + errorPrint("%s() LN%d, use metric, but super table info is NULL\n", + __func__, __LINE__); + free(pThreadInfo->buffer); + exit(-1); + } else { + if (0 == len) { + batchNum = 0; + memset(pThreadInfo->buffer, 0, buff_len); + len += snprintf(pThreadInfo->buffer + len, + buff_len - len, "create table "); + } + char* tagsValBuf = NULL; + if (0 == superTblInfo->tagSource) { + tagsValBuf = generateTagValuesForStb(superTblInfo, i); + } else { + tagsValBuf = getTagValueFromTagSample( + superTblInfo, + i % superTblInfo->tagSampleCount); + } + if (NULL == tagsValBuf) { + free(pThreadInfo->buffer); + return NULL; + } + len += snprintf(pThreadInfo->buffer + len, + buff_len - len, + "if not exists %s.%s%"PRIu64" using %s.%s tags %s ", + pThreadInfo->db_name, superTblInfo->childTblPrefix, + i, pThreadInfo->db_name, + superTblInfo->sTblName, tagsValBuf); + free(tagsValBuf); + batchNum++; + if ((batchNum < superTblInfo->batchCreateTableNum) + && ((buff_len - len) + >= (superTblInfo->lenOfTagOfOneRow + 256))) { + continue; + } + } } - if (NULL == tagsValBuf) { - free(pThreadInfo->buffer); - return NULL; + + len = 0; + if (0 != queryDbExec(pThreadInfo->taos, pThreadInfo->buffer, + NO_INSERT_TYPE, false)){ + errorPrint( "queryDbExec() failed. buffer:\n%s\n", pThreadInfo->buffer); + free(pThreadInfo->buffer); + return NULL; } - len += snprintf(pThreadInfo->buffer + len, - buff_len - len, - "if not exists %s.%s%"PRIu64" using %s.%s tags %s ", - pThreadInfo->db_name, superTblInfo->childTblPrefix, - i, pThreadInfo->db_name, - superTblInfo->sTblName, tagsValBuf); - free(tagsValBuf); - batchNum++; - if ((batchNum < superTblInfo->batchCreateTableNum) - && ((buff_len - len) - >= (superTblInfo->lenOfTagOfOneRow + 256))) { - continue; + + uint64_t currentPrintTime = taosGetTimestampMs(); + if (currentPrintTime - lastPrintTime > 30*1000) { + printf("thread[%d] already create %"PRIu64" - %"PRIu64" tables\n", + pThreadInfo->threadID, pThreadInfo->start_table_from, i); + lastPrintTime = currentPrintTime; } - } } - len = 0; - if (0 != queryDbExec(pThreadInfo->taos, pThreadInfo->buffer, - NO_INSERT_TYPE, false)){ - errorPrint( "queryDbExec() failed. buffer:\n%s\n", pThreadInfo->buffer); - free(pThreadInfo->buffer); - return NULL; + if (0 != len) { + if (0 != queryDbExec(pThreadInfo->taos, pThreadInfo->buffer, + NO_INSERT_TYPE, false)) { + errorPrint( "queryDbExec() failed. buffer:\n%s\n", pThreadInfo->buffer); + } } - uint64_t currentPrintTime = taosGetTimestampMs(); - if (currentPrintTime - lastPrintTime > 30*1000) { - printf("thread[%d] already create %"PRIu64" - %"PRIu64" tables\n", - pThreadInfo->threadID, pThreadInfo->start_table_from, i); - lastPrintTime = currentPrintTime; - } - } - - if (0 != len) { - if (0 != queryDbExec(pThreadInfo->taos, pThreadInfo->buffer, - NO_INSERT_TYPE, false)) { - errorPrint( "queryDbExec() failed. buffer:\n%s\n", pThreadInfo->buffer); - } - } - - free(pThreadInfo->buffer); - return NULL; + free(pThreadInfo->buffer); + return NULL; } static int startMultiThreadCreateChildTable( char* cols, int threads, uint64_t tableFrom, int64_t ntables, char* db_name, SSuperTable* superTblInfo) { - pthread_t *pids = calloc(1, threads * sizeof(pthread_t)); - threadInfo *infos = calloc(1, threads * sizeof(threadInfo)); + pthread_t *pids = calloc(1, threads * sizeof(pthread_t)); + threadInfo *infos = calloc(1, threads * sizeof(threadInfo)); - if ((NULL == pids) || (NULL == infos)) { - printf("malloc failed\n"); - exit(-1); - } - - if (threads < 1) { - threads = 1; - } - - int64_t a = ntables / threads; - if (a < 1) { - threads = ntables; - a = 1; - } - - int64_t b = 0; - b = ntables % threads; - - for (int64_t i = 0; i < threads; i++) { - threadInfo *pThreadInfo = infos + i; - pThreadInfo->threadID = i; - tstrncpy(pThreadInfo->db_name, db_name, TSDB_DB_NAME_LEN); - pThreadInfo->superTblInfo = superTblInfo; - verbosePrint("%s() %d db_name: %s\n", __func__, __LINE__, db_name); - pThreadInfo->taos = taos_connect( - g_Dbs.host, - g_Dbs.user, - g_Dbs.password, - db_name, - g_Dbs.port); - if (pThreadInfo->taos == NULL) { - errorPrint( "%s() LN%d, Failed to connect to TDengine, reason:%s\n", - __func__, __LINE__, taos_errstr(NULL)); - free(pids); - free(infos); - return -1; + if ((NULL == pids) || (NULL == infos)) { + printf("malloc failed\n"); + exit(-1); } - pThreadInfo->start_table_from = tableFrom; - pThreadInfo->ntables = iend_table_to = i < b ? tableFrom + a : tableFrom + a - 1; - tableFrom = pThreadInfo->end_table_to + 1; - pThreadInfo->use_metric = true; - pThreadInfo->cols = cols; - pThreadInfo->minDelay = UINT64_MAX; - pthread_create(pids + i, NULL, createTable, pThreadInfo); - } + if (threads < 1) { + threads = 1; + } - for (int i = 0; i < threads; i++) { - pthread_join(pids[i], NULL); - } + int64_t a = ntables / threads; + if (a < 1) { + threads = ntables; + a = 1; + } - for (int i = 0; i < threads; i++) { - threadInfo *pThreadInfo = infos + i; - taos_close(pThreadInfo->taos); - } + int64_t b = 0; + b = ntables % threads; - free(pids); - free(infos); + for (int64_t i = 0; i < threads; i++) { + threadInfo *pThreadInfo = infos + i; + pThreadInfo->threadID = i; + tstrncpy(pThreadInfo->db_name, db_name, TSDB_DB_NAME_LEN); + pThreadInfo->superTblInfo = superTblInfo; + verbosePrint("%s() %d db_name: %s\n", __func__, __LINE__, db_name); + pThreadInfo->taos = taos_connect( + g_Dbs.host, + g_Dbs.user, + g_Dbs.password, + db_name, + g_Dbs.port); + if (pThreadInfo->taos == NULL) { + errorPrint( "%s() LN%d, Failed to connect to TDengine, reason:%s\n", + __func__, __LINE__, taos_errstr(NULL)); + free(pids); + free(infos); + return -1; + } - return 0; + pThreadInfo->start_table_from = tableFrom; + pThreadInfo->ntables = iend_table_to = i < b ? tableFrom + a : tableFrom + a - 1; + tableFrom = pThreadInfo->end_table_to + 1; + pThreadInfo->use_metric = true; + pThreadInfo->cols = cols; + pThreadInfo->minDelay = UINT64_MAX; + pthread_create(pids + i, NULL, createTable, pThreadInfo); + } + + for (int i = 0; i < threads; i++) { + pthread_join(pids[i], NULL); + } + + for (int i = 0; i < threads; i++) { + threadInfo *pThreadInfo = infos + i; + taos_close(pThreadInfo->taos); + } + + free(pids); + free(infos); + + return 0; } static void createChildTables() { - char tblColsBuf[MAX_SQL_SIZE]; + char tblColsBuf[TSDB_MAX_BYTES_PER_ROW]; int len; for (int i = 0; i < g_Dbs.dbCount; i++) { @@ -3222,21 +3233,21 @@ static void createChildTables() { } } else { // normal table - len = snprintf(tblColsBuf, MAX_SQL_SIZE, "(TS TIMESTAMP"); + len = snprintf(tblColsBuf, TSDB_MAX_BYTES_PER_ROW, "(TS TIMESTAMP"); for (int j = 0; j < g_args.num_of_CPR; j++) { if ((strncasecmp(g_args.datatype[j], "BINARY", strlen("BINARY")) == 0) || (strncasecmp(g_args.datatype[j], "NCHAR", strlen("NCHAR")) == 0)) { - snprintf(tblColsBuf + len, MAX_SQL_SIZE - len, - ", COL%d %s(%d)", j, g_args.datatype[j], g_args.len_of_binary); + snprintf(tblColsBuf + len, TSDB_MAX_BYTES_PER_ROW - len, + ",C%d %s(%d)", j, g_args.datatype[j], g_args.len_of_binary); } else { - snprintf(tblColsBuf + len, MAX_SQL_SIZE - len, - ", COL%d %s", j, g_args.datatype[j]); + snprintf(tblColsBuf + len, TSDB_MAX_BYTES_PER_ROW - len, + ",C%d %s", j, g_args.datatype[j]); } len = strlen(tblColsBuf); } - snprintf(tblColsBuf + len, MAX_SQL_SIZE - len, ")"); + snprintf(tblColsBuf + len, TSDB_MAX_BYTES_PER_ROW - len, ")"); verbosePrint("%s() LN%d: dbName: %s num of tb: %"PRId64" schema: %s\n", __func__, __LINE__, @@ -4275,577 +4286,577 @@ PARSE_OVER: } static bool getMetaFromQueryJsonFile(cJSON* root) { - bool ret = false; + bool ret = false; - cJSON* cfgdir = cJSON_GetObjectItem(root, "cfgdir"); - if (cfgdir && cfgdir->type == cJSON_String && cfgdir->valuestring != NULL) { - tstrncpy(g_queryInfo.cfgDir, cfgdir->valuestring, MAX_FILE_NAME_LEN); - } + cJSON* cfgdir = cJSON_GetObjectItem(root, "cfgdir"); + if (cfgdir && cfgdir->type == cJSON_String && cfgdir->valuestring != NULL) { + tstrncpy(g_queryInfo.cfgDir, cfgdir->valuestring, MAX_FILE_NAME_LEN); + } - cJSON* host = cJSON_GetObjectItem(root, "host"); - if (host && host->type == cJSON_String && host->valuestring != NULL) { - tstrncpy(g_queryInfo.host, host->valuestring, MAX_HOSTNAME_SIZE); - } else if (!host) { - tstrncpy(g_queryInfo.host, "127.0.0.1", MAX_HOSTNAME_SIZE); - } else { - printf("ERROR: failed to read json, host not found\n"); - goto PARSE_OVER; - } - - cJSON* port = cJSON_GetObjectItem(root, "port"); - if (port && port->type == cJSON_Number) { - g_queryInfo.port = port->valueint; - } else if (!port) { - g_queryInfo.port = 6030; - } - - cJSON* user = cJSON_GetObjectItem(root, "user"); - if (user && user->type == cJSON_String && user->valuestring != NULL) { - tstrncpy(g_queryInfo.user, user->valuestring, MAX_USERNAME_SIZE); - } else if (!user) { - tstrncpy(g_queryInfo.user, "root", MAX_USERNAME_SIZE); ; - } - - cJSON* password = cJSON_GetObjectItem(root, "password"); - if (password && password->type == cJSON_String && password->valuestring != NULL) { - tstrncpy(g_queryInfo.password, password->valuestring, MAX_PASSWORD_SIZE); - } else if (!password) { - tstrncpy(g_queryInfo.password, "taosdata", MAX_PASSWORD_SIZE);; - } - - cJSON *answerPrompt = cJSON_GetObjectItem(root, "confirm_parameter_prompt"); // yes, no, - if (answerPrompt && answerPrompt->type == cJSON_String - && answerPrompt->valuestring != NULL) { - if (0 == strncasecmp(answerPrompt->valuestring, "yes", 3)) { - g_args.answer_yes = false; - } else if (0 == strncasecmp(answerPrompt->valuestring, "no", 2)) { - g_args.answer_yes = true; + cJSON* host = cJSON_GetObjectItem(root, "host"); + if (host && host->type == cJSON_String && host->valuestring != NULL) { + tstrncpy(g_queryInfo.host, host->valuestring, MAX_HOSTNAME_SIZE); + } else if (!host) { + tstrncpy(g_queryInfo.host, "127.0.0.1", MAX_HOSTNAME_SIZE); } else { - g_args.answer_yes = false; - } - } else if (!answerPrompt) { - g_args.answer_yes = false; - } else { - printf("ERROR: failed to read json, confirm_parameter_prompt not found\n"); - goto PARSE_OVER; - } - - cJSON* gQueryTimes = cJSON_GetObjectItem(root, "query_times"); - if (gQueryTimes && gQueryTimes->type == cJSON_Number) { - if (gQueryTimes->valueint <= 0) { - errorPrint("%s() LN%d, failed to read json, query_times input mistake\n", - __func__, __LINE__); - goto PARSE_OVER; - } - g_args.query_times = gQueryTimes->valueint; - } else if (!gQueryTimes) { - g_args.query_times = 1; - } else { - errorPrint("%s() LN%d, failed to read json, query_times input mistake\n", - __func__, __LINE__); - goto PARSE_OVER; - } - - cJSON* dbs = cJSON_GetObjectItem(root, "databases"); - if (dbs && dbs->type == cJSON_String && dbs->valuestring != NULL) { - tstrncpy(g_queryInfo.dbName, dbs->valuestring, TSDB_DB_NAME_LEN); - } else if (!dbs) { - printf("ERROR: failed to read json, databases not found\n"); - goto PARSE_OVER; - } - - cJSON* queryMode = cJSON_GetObjectItem(root, "query_mode"); - if (queryMode && queryMode->type == cJSON_String && queryMode->valuestring != NULL) { - tstrncpy(g_queryInfo.queryMode, queryMode->valuestring, MAX_TB_NAME_SIZE); - } else if (!queryMode) { - tstrncpy(g_queryInfo.queryMode, "taosc", MAX_TB_NAME_SIZE); - } else { - printf("ERROR: failed to read json, query_mode not found\n"); - goto PARSE_OVER; - } - - // specified_table_query - cJSON *specifiedQuery = cJSON_GetObjectItem(root, "specified_table_query"); - if (!specifiedQuery) { - g_queryInfo.specifiedQueryInfo.concurrent = 1; - g_queryInfo.specifiedQueryInfo.sqlCount = 0; - } else if (specifiedQuery->type != cJSON_Object) { - printf("ERROR: failed to read json, super_table_query not found\n"); - goto PARSE_OVER; - } else { - cJSON* queryInterval = cJSON_GetObjectItem(specifiedQuery, "query_interval"); - if (queryInterval && queryInterval->type == cJSON_Number) { - g_queryInfo.specifiedQueryInfo.queryInterval = queryInterval->valueint; - } else if (!queryInterval) { - g_queryInfo.specifiedQueryInfo.queryInterval = 0; - } - - cJSON* specifiedQueryTimes = cJSON_GetObjectItem(specifiedQuery, - "query_times"); - if (specifiedQueryTimes && specifiedQueryTimes->type == cJSON_Number) { - if (specifiedQueryTimes->valueint <= 0) { - errorPrint( - "%s() LN%d, failed to read json, query_times: %"PRId64", need be a valid (>0) number\n", - __func__, __LINE__, specifiedQueryTimes->valueint); + printf("ERROR: failed to read json, host not found\n"); goto PARSE_OVER; - - } - g_queryInfo.specifiedQueryInfo.queryTimes = specifiedQueryTimes->valueint; - } else if (!specifiedQueryTimes) { - g_queryInfo.specifiedQueryInfo.queryTimes = g_args.query_times; - } else { - errorPrint("%s() LN%d, failed to read json, query_times input mistake\n", - __func__, __LINE__); - goto PARSE_OVER; } - cJSON* concurrent = cJSON_GetObjectItem(specifiedQuery, "concurrent"); - if (concurrent && concurrent->type == cJSON_Number) { - if (concurrent->valueint <= 0) { - errorPrint( - "%s() LN%d, query sqlCount %d or concurrent %d is not correct.\n", - __func__, __LINE__, - g_queryInfo.specifiedQueryInfo.sqlCount, - g_queryInfo.specifiedQueryInfo.concurrent); - goto PARSE_OVER; - } - g_queryInfo.specifiedQueryInfo.concurrent = concurrent->valueint; - } else if (!concurrent) { - g_queryInfo.specifiedQueryInfo.concurrent = 1; + cJSON* port = cJSON_GetObjectItem(root, "port"); + if (port && port->type == cJSON_Number) { + g_queryInfo.port = port->valueint; + } else if (!port) { + g_queryInfo.port = 6030; } - cJSON* specifiedAsyncMode = cJSON_GetObjectItem(specifiedQuery, "mode"); - if (specifiedAsyncMode && specifiedAsyncMode->type == cJSON_String - && specifiedAsyncMode->valuestring != NULL) { - if (0 == strcmp("sync", specifiedAsyncMode->valuestring)) { - g_queryInfo.specifiedQueryInfo.asyncMode = SYNC_MODE; - } else if (0 == strcmp("async", specifiedAsyncMode->valuestring)) { - g_queryInfo.specifiedQueryInfo.asyncMode = ASYNC_MODE; - } else { - errorPrint("%s() LN%d, failed to read json, async mode input error\n", - __func__, __LINE__); - goto PARSE_OVER; - } - } else { - g_queryInfo.specifiedQueryInfo.asyncMode = SYNC_MODE; + cJSON* user = cJSON_GetObjectItem(root, "user"); + if (user && user->type == cJSON_String && user->valuestring != NULL) { + tstrncpy(g_queryInfo.user, user->valuestring, MAX_USERNAME_SIZE); + } else if (!user) { + tstrncpy(g_queryInfo.user, "root", MAX_USERNAME_SIZE); ; } - cJSON* interval = cJSON_GetObjectItem(specifiedQuery, "interval"); - if (interval && interval->type == cJSON_Number) { - g_queryInfo.specifiedQueryInfo.subscribeInterval = interval->valueint; - } else if (!interval) { - //printf("failed to read json, subscribe interval no found\n"); - //goto PARSE_OVER; - g_queryInfo.specifiedQueryInfo.subscribeInterval = 10000; + cJSON* password = cJSON_GetObjectItem(root, "password"); + if (password && password->type == cJSON_String && password->valuestring != NULL) { + tstrncpy(g_queryInfo.password, password->valuestring, MAX_PASSWORD_SIZE); + } else if (!password) { + tstrncpy(g_queryInfo.password, "taosdata", MAX_PASSWORD_SIZE);; } - cJSON* restart = cJSON_GetObjectItem(specifiedQuery, "restart"); - if (restart && restart->type == cJSON_String && restart->valuestring != NULL) { - if (0 == strcmp("yes", restart->valuestring)) { - g_queryInfo.specifiedQueryInfo.subscribeRestart = true; - } else if (0 == strcmp("no", restart->valuestring)) { - g_queryInfo.specifiedQueryInfo.subscribeRestart = false; - } else { - printf("ERROR: failed to read json, subscribe restart error\n"); - goto PARSE_OVER; - } - } else { - g_queryInfo.specifiedQueryInfo.subscribeRestart = true; - } - - cJSON* keepProgress = cJSON_GetObjectItem(specifiedQuery, "keepProgress"); - if (keepProgress - && keepProgress->type == cJSON_String - && keepProgress->valuestring != NULL) { - if (0 == strcmp("yes", keepProgress->valuestring)) { - g_queryInfo.specifiedQueryInfo.subscribeKeepProgress = 1; - } else if (0 == strcmp("no", keepProgress->valuestring)) { - g_queryInfo.specifiedQueryInfo.subscribeKeepProgress = 0; - } else { - printf("ERROR: failed to read json, subscribe keepProgress error\n"); - goto PARSE_OVER; - } - } else { - g_queryInfo.specifiedQueryInfo.subscribeKeepProgress = 0; - } - - // sqls - cJSON* specifiedSqls = cJSON_GetObjectItem(specifiedQuery, "sqls"); - if (!specifiedSqls) { - g_queryInfo.specifiedQueryInfo.sqlCount = 0; - } else if (specifiedSqls->type != cJSON_Array) { - errorPrint("%s() LN%d, failed to read json, super sqls not found\n", - __func__, __LINE__); - goto PARSE_OVER; - } else { - int superSqlSize = cJSON_GetArraySize(specifiedSqls); - if (superSqlSize * g_queryInfo.specifiedQueryInfo.concurrent - > MAX_QUERY_SQL_COUNT) { - errorPrint("%s() LN%d, failed to read json, query sql(%d) * concurrent(%d) overflow, max is %d\n", - __func__, __LINE__, - superSqlSize, - g_queryInfo.specifiedQueryInfo.concurrent, - MAX_QUERY_SQL_COUNT); - goto PARSE_OVER; - } - - g_queryInfo.specifiedQueryInfo.sqlCount = superSqlSize; - for (int j = 0; j < superSqlSize; ++j) { - cJSON* sql = cJSON_GetArrayItem(specifiedSqls, j); - if (sql == NULL) continue; - - cJSON *sqlStr = cJSON_GetObjectItem(sql, "sql"); - if (!sqlStr || sqlStr->type != cJSON_String || sqlStr->valuestring == NULL) { - printf("ERROR: failed to read json, sql not found\n"); - goto PARSE_OVER; - } - tstrncpy(g_queryInfo.specifiedQueryInfo.sql[j], - sqlStr->valuestring, MAX_QUERY_SQL_LENGTH); - - // default value is -1, which mean infinite loop - g_queryInfo.specifiedQueryInfo.endAfterConsume[j] = -1; - cJSON* endAfterConsume = - cJSON_GetObjectItem(specifiedQuery, "endAfterConsume"); - if (endAfterConsume - && endAfterConsume->type == cJSON_Number) { - g_queryInfo.specifiedQueryInfo.endAfterConsume[j] - = endAfterConsume->valueint; - } - if (g_queryInfo.specifiedQueryInfo.endAfterConsume[j] < -1) - g_queryInfo.specifiedQueryInfo.endAfterConsume[j] = -1; - - g_queryInfo.specifiedQueryInfo.resubAfterConsume[j] = -1; - cJSON* resubAfterConsume = - cJSON_GetObjectItem(specifiedQuery, "resubAfterConsume"); - if ((resubAfterConsume) - && (resubAfterConsume->type == cJSON_Number) - && (resubAfterConsume->valueint >= 0)) { - g_queryInfo.specifiedQueryInfo.resubAfterConsume[j] - = resubAfterConsume->valueint; - } - - if (g_queryInfo.specifiedQueryInfo.resubAfterConsume[j] < -1) - g_queryInfo.specifiedQueryInfo.resubAfterConsume[j] = -1; - - cJSON *result = cJSON_GetObjectItem(sql, "result"); - if ((NULL != result) && (result->type == cJSON_String) - && (result->valuestring != NULL)) { - tstrncpy(g_queryInfo.specifiedQueryInfo.result[j], - result->valuestring, MAX_FILE_NAME_LEN); - } else if (NULL == result) { - memset(g_queryInfo.specifiedQueryInfo.result[j], - 0, MAX_FILE_NAME_LEN); + cJSON *answerPrompt = cJSON_GetObjectItem(root, "confirm_parameter_prompt"); // yes, no, + if (answerPrompt && answerPrompt->type == cJSON_String + && answerPrompt->valuestring != NULL) { + if (0 == strncasecmp(answerPrompt->valuestring, "yes", 3)) { + g_args.answer_yes = false; + } else if (0 == strncasecmp(answerPrompt->valuestring, "no", 2)) { + g_args.answer_yes = true; } else { - printf("ERROR: failed to read json, super query result file not found\n"); - goto PARSE_OVER; + g_args.answer_yes = false; } - } - } - } - - // super_table_query - cJSON *superQuery = cJSON_GetObjectItem(root, "super_table_query"); - if (!superQuery) { - g_queryInfo.superQueryInfo.threadCnt = 1; - g_queryInfo.superQueryInfo.sqlCount = 0; - } else if (superQuery->type != cJSON_Object) { - printf("ERROR: failed to read json, sub_table_query not found\n"); - ret = true; - goto PARSE_OVER; - } else { - cJSON* subrate = cJSON_GetObjectItem(superQuery, "query_interval"); - if (subrate && subrate->type == cJSON_Number) { - g_queryInfo.superQueryInfo.queryInterval = subrate->valueint; - } else if (!subrate) { - g_queryInfo.superQueryInfo.queryInterval = 0; - } - - cJSON* superQueryTimes = cJSON_GetObjectItem(superQuery, "query_times"); - if (superQueryTimes && superQueryTimes->type == cJSON_Number) { - if (superQueryTimes->valueint <= 0) { - errorPrint("%s() LN%d, failed to read json, query_times: %"PRId64", need be a valid (>0) number\n", - __func__, __LINE__, superQueryTimes->valueint); - goto PARSE_OVER; - } - g_queryInfo.superQueryInfo.queryTimes = superQueryTimes->valueint; - } else if (!superQueryTimes) { - g_queryInfo.superQueryInfo.queryTimes = g_args.query_times; + } else if (!answerPrompt) { + g_args.answer_yes = false; } else { - errorPrint("%s() LN%d, failed to read json, query_times input mistake\n", - __func__, __LINE__); - goto PARSE_OVER; - } - - cJSON* threads = cJSON_GetObjectItem(superQuery, "threads"); - if (threads && threads->type == cJSON_Number) { - if (threads->valueint <= 0) { - errorPrint("%s() LN%d, failed to read json, threads input mistake\n", - __func__, __LINE__); + printf("ERROR: failed to read json, confirm_parameter_prompt not found\n"); goto PARSE_OVER; - - } - g_queryInfo.superQueryInfo.threadCnt = threads->valueint; - } else if (!threads) { - g_queryInfo.superQueryInfo.threadCnt = 1; } - //cJSON* subTblCnt = cJSON_GetObjectItem(superQuery, "childtable_count"); - //if (subTblCnt && subTblCnt->type == cJSON_Number) { - // g_queryInfo.superQueryInfo.childTblCount = subTblCnt->valueint; - //} else if (!subTblCnt) { - // g_queryInfo.superQueryInfo.childTblCount = 0; - //} - - cJSON* stblname = cJSON_GetObjectItem(superQuery, "stblname"); - if (stblname && stblname->type == cJSON_String - && stblname->valuestring != NULL) { - tstrncpy(g_queryInfo.superQueryInfo.sTblName, stblname->valuestring, - TSDB_TABLE_NAME_LEN); + cJSON* gQueryTimes = cJSON_GetObjectItem(root, "query_times"); + if (gQueryTimes && gQueryTimes->type == cJSON_Number) { + if (gQueryTimes->valueint <= 0) { + errorPrint("%s() LN%d, failed to read json, query_times input mistake\n", + __func__, __LINE__); + goto PARSE_OVER; + } + g_args.query_times = gQueryTimes->valueint; + } else if (!gQueryTimes) { + g_args.query_times = 1; } else { - errorPrint("%s() LN%d, failed to read json, super table name input error\n", - __func__, __LINE__); - goto PARSE_OVER; + errorPrint("%s() LN%d, failed to read json, query_times input mistake\n", + __func__, __LINE__); + goto PARSE_OVER; } - cJSON* superAsyncMode = cJSON_GetObjectItem(superQuery, "mode"); - if (superAsyncMode && superAsyncMode->type == cJSON_String - && superAsyncMode->valuestring != NULL) { - if (0 == strcmp("sync", superAsyncMode->valuestring)) { - g_queryInfo.superQueryInfo.asyncMode = SYNC_MODE; - } else if (0 == strcmp("async", superAsyncMode->valuestring)) { - g_queryInfo.superQueryInfo.asyncMode = ASYNC_MODE; - } else { - errorPrint("%s() LN%d, failed to read json, async mode input error\n", - __func__, __LINE__); + cJSON* dbs = cJSON_GetObjectItem(root, "databases"); + if (dbs && dbs->type == cJSON_String && dbs->valuestring != NULL) { + tstrncpy(g_queryInfo.dbName, dbs->valuestring, TSDB_DB_NAME_LEN); + } else if (!dbs) { + printf("ERROR: failed to read json, databases not found\n"); goto PARSE_OVER; - } + } + + cJSON* queryMode = cJSON_GetObjectItem(root, "query_mode"); + if (queryMode && queryMode->type == cJSON_String && queryMode->valuestring != NULL) { + tstrncpy(g_queryInfo.queryMode, queryMode->valuestring, MAX_TB_NAME_SIZE); + } else if (!queryMode) { + tstrncpy(g_queryInfo.queryMode, "taosc", MAX_TB_NAME_SIZE); } else { - g_queryInfo.superQueryInfo.asyncMode = SYNC_MODE; + printf("ERROR: failed to read json, query_mode not found\n"); + goto PARSE_OVER; } - cJSON* superInterval = cJSON_GetObjectItem(superQuery, "interval"); - if (superInterval && superInterval->type == cJSON_Number) { - if (superInterval->valueint < 0) { - errorPrint("%s() LN%d, failed to read json, interval input mistake\n", - __func__, __LINE__); + // specified_table_query + cJSON *specifiedQuery = cJSON_GetObjectItem(root, "specified_table_query"); + if (!specifiedQuery) { + g_queryInfo.specifiedQueryInfo.concurrent = 1; + g_queryInfo.specifiedQueryInfo.sqlCount = 0; + } else if (specifiedQuery->type != cJSON_Object) { + printf("ERROR: failed to read json, super_table_query not found\n"); goto PARSE_OVER; - } - g_queryInfo.superQueryInfo.subscribeInterval = superInterval->valueint; - } else if (!superInterval) { - //printf("failed to read json, subscribe interval no found\n"); - //goto PARSE_OVER; - g_queryInfo.superQueryInfo.subscribeInterval = 10000; - } - - cJSON* subrestart = cJSON_GetObjectItem(superQuery, "restart"); - if (subrestart && subrestart->type == cJSON_String - && subrestart->valuestring != NULL) { - if (0 == strcmp("yes", subrestart->valuestring)) { - g_queryInfo.superQueryInfo.subscribeRestart = true; - } else if (0 == strcmp("no", subrestart->valuestring)) { - g_queryInfo.superQueryInfo.subscribeRestart = false; - } else { - printf("ERROR: failed to read json, subscribe restart error\n"); - goto PARSE_OVER; - } } else { - g_queryInfo.superQueryInfo.subscribeRestart = true; + cJSON* queryInterval = cJSON_GetObjectItem(specifiedQuery, "query_interval"); + if (queryInterval && queryInterval->type == cJSON_Number) { + g_queryInfo.specifiedQueryInfo.queryInterval = queryInterval->valueint; + } else if (!queryInterval) { + g_queryInfo.specifiedQueryInfo.queryInterval = 0; + } + + cJSON* specifiedQueryTimes = cJSON_GetObjectItem(specifiedQuery, + "query_times"); + if (specifiedQueryTimes && specifiedQueryTimes->type == cJSON_Number) { + if (specifiedQueryTimes->valueint <= 0) { + errorPrint( + "%s() LN%d, failed to read json, query_times: %"PRId64", need be a valid (>0) number\n", + __func__, __LINE__, specifiedQueryTimes->valueint); + goto PARSE_OVER; + + } + g_queryInfo.specifiedQueryInfo.queryTimes = specifiedQueryTimes->valueint; + } else if (!specifiedQueryTimes) { + g_queryInfo.specifiedQueryInfo.queryTimes = g_args.query_times; + } else { + errorPrint("%s() LN%d, failed to read json, query_times input mistake\n", + __func__, __LINE__); + goto PARSE_OVER; + } + + cJSON* concurrent = cJSON_GetObjectItem(specifiedQuery, "concurrent"); + if (concurrent && concurrent->type == cJSON_Number) { + if (concurrent->valueint <= 0) { + errorPrint( + "%s() LN%d, query sqlCount %d or concurrent %d is not correct.\n", + __func__, __LINE__, + g_queryInfo.specifiedQueryInfo.sqlCount, + g_queryInfo.specifiedQueryInfo.concurrent); + goto PARSE_OVER; + } + g_queryInfo.specifiedQueryInfo.concurrent = concurrent->valueint; + } else if (!concurrent) { + g_queryInfo.specifiedQueryInfo.concurrent = 1; + } + + cJSON* specifiedAsyncMode = cJSON_GetObjectItem(specifiedQuery, "mode"); + if (specifiedAsyncMode && specifiedAsyncMode->type == cJSON_String + && specifiedAsyncMode->valuestring != NULL) { + if (0 == strcmp("sync", specifiedAsyncMode->valuestring)) { + g_queryInfo.specifiedQueryInfo.asyncMode = SYNC_MODE; + } else if (0 == strcmp("async", specifiedAsyncMode->valuestring)) { + g_queryInfo.specifiedQueryInfo.asyncMode = ASYNC_MODE; + } else { + errorPrint("%s() LN%d, failed to read json, async mode input error\n", + __func__, __LINE__); + goto PARSE_OVER; + } + } else { + g_queryInfo.specifiedQueryInfo.asyncMode = SYNC_MODE; + } + + cJSON* interval = cJSON_GetObjectItem(specifiedQuery, "interval"); + if (interval && interval->type == cJSON_Number) { + g_queryInfo.specifiedQueryInfo.subscribeInterval = interval->valueint; + } else if (!interval) { + //printf("failed to read json, subscribe interval no found\n"); + //goto PARSE_OVER; + g_queryInfo.specifiedQueryInfo.subscribeInterval = 10000; + } + + cJSON* restart = cJSON_GetObjectItem(specifiedQuery, "restart"); + if (restart && restart->type == cJSON_String && restart->valuestring != NULL) { + if (0 == strcmp("yes", restart->valuestring)) { + g_queryInfo.specifiedQueryInfo.subscribeRestart = true; + } else if (0 == strcmp("no", restart->valuestring)) { + g_queryInfo.specifiedQueryInfo.subscribeRestart = false; + } else { + printf("ERROR: failed to read json, subscribe restart error\n"); + goto PARSE_OVER; + } + } else { + g_queryInfo.specifiedQueryInfo.subscribeRestart = true; + } + + cJSON* keepProgress = cJSON_GetObjectItem(specifiedQuery, "keepProgress"); + if (keepProgress + && keepProgress->type == cJSON_String + && keepProgress->valuestring != NULL) { + if (0 == strcmp("yes", keepProgress->valuestring)) { + g_queryInfo.specifiedQueryInfo.subscribeKeepProgress = 1; + } else if (0 == strcmp("no", keepProgress->valuestring)) { + g_queryInfo.specifiedQueryInfo.subscribeKeepProgress = 0; + } else { + printf("ERROR: failed to read json, subscribe keepProgress error\n"); + goto PARSE_OVER; + } + } else { + g_queryInfo.specifiedQueryInfo.subscribeKeepProgress = 0; + } + + // sqls + cJSON* specifiedSqls = cJSON_GetObjectItem(specifiedQuery, "sqls"); + if (!specifiedSqls) { + g_queryInfo.specifiedQueryInfo.sqlCount = 0; + } else if (specifiedSqls->type != cJSON_Array) { + errorPrint("%s() LN%d, failed to read json, super sqls not found\n", + __func__, __LINE__); + goto PARSE_OVER; + } else { + int superSqlSize = cJSON_GetArraySize(specifiedSqls); + if (superSqlSize * g_queryInfo.specifiedQueryInfo.concurrent + > MAX_QUERY_SQL_COUNT) { + errorPrint("%s() LN%d, failed to read json, query sql(%d) * concurrent(%d) overflow, max is %d\n", + __func__, __LINE__, + superSqlSize, + g_queryInfo.specifiedQueryInfo.concurrent, + MAX_QUERY_SQL_COUNT); + goto PARSE_OVER; + } + + g_queryInfo.specifiedQueryInfo.sqlCount = superSqlSize; + for (int j = 0; j < superSqlSize; ++j) { + cJSON* sql = cJSON_GetArrayItem(specifiedSqls, j); + if (sql == NULL) continue; + + cJSON *sqlStr = cJSON_GetObjectItem(sql, "sql"); + if (!sqlStr || sqlStr->type != cJSON_String || sqlStr->valuestring == NULL) { + printf("ERROR: failed to read json, sql not found\n"); + goto PARSE_OVER; + } + tstrncpy(g_queryInfo.specifiedQueryInfo.sql[j], + sqlStr->valuestring, MAX_QUERY_SQL_LENGTH); + + // default value is -1, which mean infinite loop + g_queryInfo.specifiedQueryInfo.endAfterConsume[j] = -1; + cJSON* endAfterConsume = + cJSON_GetObjectItem(specifiedQuery, "endAfterConsume"); + if (endAfterConsume + && endAfterConsume->type == cJSON_Number) { + g_queryInfo.specifiedQueryInfo.endAfterConsume[j] + = endAfterConsume->valueint; + } + if (g_queryInfo.specifiedQueryInfo.endAfterConsume[j] < -1) + g_queryInfo.specifiedQueryInfo.endAfterConsume[j] = -1; + + g_queryInfo.specifiedQueryInfo.resubAfterConsume[j] = -1; + cJSON* resubAfterConsume = + cJSON_GetObjectItem(specifiedQuery, "resubAfterConsume"); + if ((resubAfterConsume) + && (resubAfterConsume->type == cJSON_Number) + && (resubAfterConsume->valueint >= 0)) { + g_queryInfo.specifiedQueryInfo.resubAfterConsume[j] + = resubAfterConsume->valueint; + } + + if (g_queryInfo.specifiedQueryInfo.resubAfterConsume[j] < -1) + g_queryInfo.specifiedQueryInfo.resubAfterConsume[j] = -1; + + cJSON *result = cJSON_GetObjectItem(sql, "result"); + if ((NULL != result) && (result->type == cJSON_String) + && (result->valuestring != NULL)) { + tstrncpy(g_queryInfo.specifiedQueryInfo.result[j], + result->valuestring, MAX_FILE_NAME_LEN); + } else if (NULL == result) { + memset(g_queryInfo.specifiedQueryInfo.result[j], + 0, MAX_FILE_NAME_LEN); + } else { + printf("ERROR: failed to read json, super query result file not found\n"); + goto PARSE_OVER; + } + } + } } - cJSON* superkeepProgress = cJSON_GetObjectItem(superQuery, "keepProgress"); - if (superkeepProgress && - superkeepProgress->type == cJSON_String - && superkeepProgress->valuestring != NULL) { - if (0 == strcmp("yes", superkeepProgress->valuestring)) { - g_queryInfo.superQueryInfo.subscribeKeepProgress = 1; - } else if (0 == strcmp("no", superkeepProgress->valuestring)) { - g_queryInfo.superQueryInfo.subscribeKeepProgress = 0; - } else { - printf("ERROR: failed to read json, subscribe super table keepProgress error\n"); + // super_table_query + cJSON *superQuery = cJSON_GetObjectItem(root, "super_table_query"); + if (!superQuery) { + g_queryInfo.superQueryInfo.threadCnt = 1; + g_queryInfo.superQueryInfo.sqlCount = 0; + } else if (superQuery->type != cJSON_Object) { + printf("ERROR: failed to read json, sub_table_query not found\n"); + ret = true; goto PARSE_OVER; - } } else { - g_queryInfo.superQueryInfo.subscribeKeepProgress = 0; - } + cJSON* subrate = cJSON_GetObjectItem(superQuery, "query_interval"); + if (subrate && subrate->type == cJSON_Number) { + g_queryInfo.superQueryInfo.queryInterval = subrate->valueint; + } else if (!subrate) { + g_queryInfo.superQueryInfo.queryInterval = 0; + } - // default value is -1, which mean do not resub - g_queryInfo.superQueryInfo.endAfterConsume = -1; - cJSON* superEndAfterConsume = - cJSON_GetObjectItem(superQuery, "endAfterConsume"); - if (superEndAfterConsume - && superEndAfterConsume->type == cJSON_Number) { - g_queryInfo.superQueryInfo.endAfterConsume = - superEndAfterConsume->valueint; - } - if (g_queryInfo.superQueryInfo.endAfterConsume < -1) + cJSON* superQueryTimes = cJSON_GetObjectItem(superQuery, "query_times"); + if (superQueryTimes && superQueryTimes->type == cJSON_Number) { + if (superQueryTimes->valueint <= 0) { + errorPrint("%s() LN%d, failed to read json, query_times: %"PRId64", need be a valid (>0) number\n", + __func__, __LINE__, superQueryTimes->valueint); + goto PARSE_OVER; + } + g_queryInfo.superQueryInfo.queryTimes = superQueryTimes->valueint; + } else if (!superQueryTimes) { + g_queryInfo.superQueryInfo.queryTimes = g_args.query_times; + } else { + errorPrint("%s() LN%d, failed to read json, query_times input mistake\n", + __func__, __LINE__); + goto PARSE_OVER; + } + + cJSON* threads = cJSON_GetObjectItem(superQuery, "threads"); + if (threads && threads->type == cJSON_Number) { + if (threads->valueint <= 0) { + errorPrint("%s() LN%d, failed to read json, threads input mistake\n", + __func__, __LINE__); + goto PARSE_OVER; + + } + g_queryInfo.superQueryInfo.threadCnt = threads->valueint; + } else if (!threads) { + g_queryInfo.superQueryInfo.threadCnt = 1; + } + + //cJSON* subTblCnt = cJSON_GetObjectItem(superQuery, "childtable_count"); + //if (subTblCnt && subTblCnt->type == cJSON_Number) { + // g_queryInfo.superQueryInfo.childTblCount = subTblCnt->valueint; + //} else if (!subTblCnt) { + // g_queryInfo.superQueryInfo.childTblCount = 0; + //} + + cJSON* stblname = cJSON_GetObjectItem(superQuery, "stblname"); + if (stblname && stblname->type == cJSON_String + && stblname->valuestring != NULL) { + tstrncpy(g_queryInfo.superQueryInfo.sTblName, stblname->valuestring, + TSDB_TABLE_NAME_LEN); + } else { + errorPrint("%s() LN%d, failed to read json, super table name input error\n", + __func__, __LINE__); + goto PARSE_OVER; + } + + cJSON* superAsyncMode = cJSON_GetObjectItem(superQuery, "mode"); + if (superAsyncMode && superAsyncMode->type == cJSON_String + && superAsyncMode->valuestring != NULL) { + if (0 == strcmp("sync", superAsyncMode->valuestring)) { + g_queryInfo.superQueryInfo.asyncMode = SYNC_MODE; + } else if (0 == strcmp("async", superAsyncMode->valuestring)) { + g_queryInfo.superQueryInfo.asyncMode = ASYNC_MODE; + } else { + errorPrint("%s() LN%d, failed to read json, async mode input error\n", + __func__, __LINE__); + goto PARSE_OVER; + } + } else { + g_queryInfo.superQueryInfo.asyncMode = SYNC_MODE; + } + + cJSON* superInterval = cJSON_GetObjectItem(superQuery, "interval"); + if (superInterval && superInterval->type == cJSON_Number) { + if (superInterval->valueint < 0) { + errorPrint("%s() LN%d, failed to read json, interval input mistake\n", + __func__, __LINE__); + goto PARSE_OVER; + } + g_queryInfo.superQueryInfo.subscribeInterval = superInterval->valueint; + } else if (!superInterval) { + //printf("failed to read json, subscribe interval no found\n"); + //goto PARSE_OVER; + g_queryInfo.superQueryInfo.subscribeInterval = 10000; + } + + cJSON* subrestart = cJSON_GetObjectItem(superQuery, "restart"); + if (subrestart && subrestart->type == cJSON_String + && subrestart->valuestring != NULL) { + if (0 == strcmp("yes", subrestart->valuestring)) { + g_queryInfo.superQueryInfo.subscribeRestart = true; + } else if (0 == strcmp("no", subrestart->valuestring)) { + g_queryInfo.superQueryInfo.subscribeRestart = false; + } else { + printf("ERROR: failed to read json, subscribe restart error\n"); + goto PARSE_OVER; + } + } else { + g_queryInfo.superQueryInfo.subscribeRestart = true; + } + + cJSON* superkeepProgress = cJSON_GetObjectItem(superQuery, "keepProgress"); + if (superkeepProgress && + superkeepProgress->type == cJSON_String + && superkeepProgress->valuestring != NULL) { + if (0 == strcmp("yes", superkeepProgress->valuestring)) { + g_queryInfo.superQueryInfo.subscribeKeepProgress = 1; + } else if (0 == strcmp("no", superkeepProgress->valuestring)) { + g_queryInfo.superQueryInfo.subscribeKeepProgress = 0; + } else { + printf("ERROR: failed to read json, subscribe super table keepProgress error\n"); + goto PARSE_OVER; + } + } else { + g_queryInfo.superQueryInfo.subscribeKeepProgress = 0; + } + + // default value is -1, which mean do not resub g_queryInfo.superQueryInfo.endAfterConsume = -1; + cJSON* superEndAfterConsume = + cJSON_GetObjectItem(superQuery, "endAfterConsume"); + if (superEndAfterConsume + && superEndAfterConsume->type == cJSON_Number) { + g_queryInfo.superQueryInfo.endAfterConsume = + superEndAfterConsume->valueint; + } + if (g_queryInfo.superQueryInfo.endAfterConsume < -1) + g_queryInfo.superQueryInfo.endAfterConsume = -1; - // default value is -1, which mean do not resub - g_queryInfo.superQueryInfo.resubAfterConsume = -1; - cJSON* superResubAfterConsume = - cJSON_GetObjectItem(superQuery, "resubAfterConsume"); - if ((superResubAfterConsume) - && (superResubAfterConsume->type == cJSON_Number) - && (superResubAfterConsume->valueint >= 0)) { - g_queryInfo.superQueryInfo.resubAfterConsume = - superResubAfterConsume->valueint; - } - if (g_queryInfo.superQueryInfo.resubAfterConsume < -1) + // default value is -1, which mean do not resub g_queryInfo.superQueryInfo.resubAfterConsume = -1; - - // supert table sqls - cJSON* superSqls = cJSON_GetObjectItem(superQuery, "sqls"); - if (!superSqls) { - g_queryInfo.superQueryInfo.sqlCount = 0; - } else if (superSqls->type != cJSON_Array) { - errorPrint("%s() LN%d: failed to read json, super sqls not found\n", - __func__, __LINE__); - goto PARSE_OVER; - } else { - int superSqlSize = cJSON_GetArraySize(superSqls); - if (superSqlSize > MAX_QUERY_SQL_COUNT) { - errorPrint("%s() LN%d, failed to read json, query sql size overflow, max is %d\n", - __func__, __LINE__, MAX_QUERY_SQL_COUNT); - goto PARSE_OVER; - } - - g_queryInfo.superQueryInfo.sqlCount = superSqlSize; - for (int j = 0; j < superSqlSize; ++j) { - cJSON* sql = cJSON_GetArrayItem(superSqls, j); - if (sql == NULL) continue; - - cJSON *sqlStr = cJSON_GetObjectItem(sql, "sql"); - if (!sqlStr || sqlStr->type != cJSON_String - || sqlStr->valuestring == NULL) { - errorPrint("%s() LN%d, failed to read json, sql not found\n", - __func__, __LINE__); - goto PARSE_OVER; + cJSON* superResubAfterConsume = + cJSON_GetObjectItem(superQuery, "resubAfterConsume"); + if ((superResubAfterConsume) + && (superResubAfterConsume->type == cJSON_Number) + && (superResubAfterConsume->valueint >= 0)) { + g_queryInfo.superQueryInfo.resubAfterConsume = + superResubAfterConsume->valueint; } - tstrncpy(g_queryInfo.superQueryInfo.sql[j], sqlStr->valuestring, - MAX_QUERY_SQL_LENGTH); + if (g_queryInfo.superQueryInfo.resubAfterConsume < -1) + g_queryInfo.superQueryInfo.resubAfterConsume = -1; - cJSON *result = cJSON_GetObjectItem(sql, "result"); - if (result != NULL && result->type == cJSON_String - && result->valuestring != NULL){ - tstrncpy(g_queryInfo.superQueryInfo.result[j], - result->valuestring, MAX_FILE_NAME_LEN); - } else if (NULL == result) { - memset(g_queryInfo.superQueryInfo.result[j], 0, MAX_FILE_NAME_LEN); - } else { - errorPrint("%s() LN%d, failed to read json, sub query result file not found\n", - __func__, __LINE__); - goto PARSE_OVER; + // supert table sqls + cJSON* superSqls = cJSON_GetObjectItem(superQuery, "sqls"); + if (!superSqls) { + g_queryInfo.superQueryInfo.sqlCount = 0; + } else if (superSqls->type != cJSON_Array) { + errorPrint("%s() LN%d: failed to read json, super sqls not found\n", + __func__, __LINE__); + goto PARSE_OVER; + } else { + int superSqlSize = cJSON_GetArraySize(superSqls); + if (superSqlSize > MAX_QUERY_SQL_COUNT) { + errorPrint("%s() LN%d, failed to read json, query sql size overflow, max is %d\n", + __func__, __LINE__, MAX_QUERY_SQL_COUNT); + goto PARSE_OVER; + } + + g_queryInfo.superQueryInfo.sqlCount = superSqlSize; + for (int j = 0; j < superSqlSize; ++j) { + cJSON* sql = cJSON_GetArrayItem(superSqls, j); + if (sql == NULL) continue; + + cJSON *sqlStr = cJSON_GetObjectItem(sql, "sql"); + if (!sqlStr || sqlStr->type != cJSON_String + || sqlStr->valuestring == NULL) { + errorPrint("%s() LN%d, failed to read json, sql not found\n", + __func__, __LINE__); + goto PARSE_OVER; + } + tstrncpy(g_queryInfo.superQueryInfo.sql[j], sqlStr->valuestring, + MAX_QUERY_SQL_LENGTH); + + cJSON *result = cJSON_GetObjectItem(sql, "result"); + if (result != NULL && result->type == cJSON_String + && result->valuestring != NULL){ + tstrncpy(g_queryInfo.superQueryInfo.result[j], + result->valuestring, MAX_FILE_NAME_LEN); + } else if (NULL == result) { + memset(g_queryInfo.superQueryInfo.result[j], 0, MAX_FILE_NAME_LEN); + } else { + errorPrint("%s() LN%d, failed to read json, sub query result file not found\n", + __func__, __LINE__); + goto PARSE_OVER; + } + } } - } } - } - ret = true; + ret = true; PARSE_OVER: - return ret; + return ret; } static bool getInfoFromJsonFile(char* file) { debugPrint("%s %d %s\n", __func__, __LINE__, file); - FILE *fp = fopen(file, "r"); - if (!fp) { - printf("failed to read %s, reason:%s\n", file, strerror(errno)); - return false; - } - - bool ret = false; - int maxLen = 6400000; - char *content = calloc(1, maxLen + 1); - int len = fread(content, 1, maxLen, fp); - if (len <= 0) { - free(content); - fclose(fp); - printf("failed to read %s, content is null", file); - return false; - } - - content[len] = 0; - cJSON* root = cJSON_Parse(content); - if (root == NULL) { - printf("ERROR: failed to cjson parse %s, invalid json format\n", file); - goto PARSE_OVER; - } - - cJSON* filetype = cJSON_GetObjectItem(root, "filetype"); - if (filetype && filetype->type == cJSON_String && filetype->valuestring != NULL) { - if (0 == strcasecmp("insert", filetype->valuestring)) { - g_args.test_mode = INSERT_TEST; - } else if (0 == strcasecmp("query", filetype->valuestring)) { - g_args.test_mode = QUERY_TEST; - } else if (0 == strcasecmp("subscribe", filetype->valuestring)) { - g_args.test_mode = SUBSCRIBE_TEST; - } else { - printf("ERROR: failed to read json, filetype not support\n"); - goto PARSE_OVER; + FILE *fp = fopen(file, "r"); + if (!fp) { + printf("failed to read %s, reason:%s\n", file, strerror(errno)); + return false; } - } else if (!filetype) { - g_args.test_mode = INSERT_TEST; - } else { - printf("ERROR: failed to read json, filetype not found\n"); - goto PARSE_OVER; - } - if (INSERT_TEST == g_args.test_mode) { - ret = getMetaFromInsertJsonFile(root); - } else if ((QUERY_TEST == g_args.test_mode) - || (SUBSCRIBE_TEST == g_args.test_mode)) { - ret = getMetaFromQueryJsonFile(root); - } else { - errorPrint("%s() LN%d, input json file type error! please input correct file type: insert or query or subscribe\n", - __func__, __LINE__); - goto PARSE_OVER; - } + bool ret = false; + int maxLen = 6400000; + char *content = calloc(1, maxLen + 1); + int len = fread(content, 1, maxLen, fp); + if (len <= 0) { + free(content); + fclose(fp); + printf("failed to read %s, content is null", file); + return false; + } + + content[len] = 0; + cJSON* root = cJSON_Parse(content); + if (root == NULL) { + printf("ERROR: failed to cjson parse %s, invalid json format\n", file); + goto PARSE_OVER; + } + + cJSON* filetype = cJSON_GetObjectItem(root, "filetype"); + if (filetype && filetype->type == cJSON_String && filetype->valuestring != NULL) { + if (0 == strcasecmp("insert", filetype->valuestring)) { + g_args.test_mode = INSERT_TEST; + } else if (0 == strcasecmp("query", filetype->valuestring)) { + g_args.test_mode = QUERY_TEST; + } else if (0 == strcasecmp("subscribe", filetype->valuestring)) { + g_args.test_mode = SUBSCRIBE_TEST; + } else { + printf("ERROR: failed to read json, filetype not support\n"); + goto PARSE_OVER; + } + } else if (!filetype) { + g_args.test_mode = INSERT_TEST; + } else { + printf("ERROR: failed to read json, filetype not found\n"); + goto PARSE_OVER; + } + + if (INSERT_TEST == g_args.test_mode) { + ret = getMetaFromInsertJsonFile(root); + } else if ((QUERY_TEST == g_args.test_mode) + || (SUBSCRIBE_TEST == g_args.test_mode)) { + ret = getMetaFromQueryJsonFile(root); + } else { + errorPrint("%s() LN%d, input json file type error! please input correct file type: insert or query or subscribe\n", + __func__, __LINE__); + goto PARSE_OVER; + } PARSE_OVER: - free(content); - cJSON_Delete(root); - fclose(fp); - return ret; + free(content); + cJSON_Delete(root); + fclose(fp); + return ret; } static int prepareSampleData() { - for (int i = 0; i < g_Dbs.dbCount; i++) { - for (int j = 0; j < g_Dbs.db[i].superTblCount; j++) { - if (g_Dbs.db[i].superTbls[j].tagsFile[0] != 0) { - if (readTagFromCsvFileToMem(&g_Dbs.db[i].superTbls[j]) != 0) { - return -1; + for (int i = 0; i < g_Dbs.dbCount; i++) { + for (int j = 0; j < g_Dbs.db[i].superTblCount; j++) { + if (g_Dbs.db[i].superTbls[j].tagsFile[0] != 0) { + if (readTagFromCsvFileToMem(&g_Dbs.db[i].superTbls[j]) != 0) { + return -1; + } + } } - } } - } - return 0; + return 0; } static void postFreeResource() { - tmfclose(g_fpOfInsertResult); - for (int i = 0; i < g_Dbs.dbCount; i++) { - for (uint64_t j = 0; j < g_Dbs.db[i].superTblCount; j++) { - if (0 != g_Dbs.db[i].superTbls[j].colsOfCreateChildTable) { - free(g_Dbs.db[i].superTbls[j].colsOfCreateChildTable); - g_Dbs.db[i].superTbls[j].colsOfCreateChildTable = NULL; - } - if (0 != g_Dbs.db[i].superTbls[j].sampleDataBuf) { - free(g_Dbs.db[i].superTbls[j].sampleDataBuf); - g_Dbs.db[i].superTbls[j].sampleDataBuf = NULL; - } - if (0 != g_Dbs.db[i].superTbls[j].tagDataBuf) { - free(g_Dbs.db[i].superTbls[j].tagDataBuf); - g_Dbs.db[i].superTbls[j].tagDataBuf = NULL; - } - if (0 != g_Dbs.db[i].superTbls[j].childTblName) { - free(g_Dbs.db[i].superTbls[j].childTblName); - g_Dbs.db[i].superTbls[j].childTblName = NULL; - } + tmfclose(g_fpOfInsertResult); + for (int i = 0; i < g_Dbs.dbCount; i++) { + for (uint64_t j = 0; j < g_Dbs.db[i].superTblCount; j++) { + if (0 != g_Dbs.db[i].superTbls[j].colsOfCreateChildTable) { + free(g_Dbs.db[i].superTbls[j].colsOfCreateChildTable); + g_Dbs.db[i].superTbls[j].colsOfCreateChildTable = NULL; + } + if (0 != g_Dbs.db[i].superTbls[j].sampleDataBuf) { + free(g_Dbs.db[i].superTbls[j].sampleDataBuf); + g_Dbs.db[i].superTbls[j].sampleDataBuf = NULL; + } + if (0 != g_Dbs.db[i].superTbls[j].tagDataBuf) { + free(g_Dbs.db[i].superTbls[j].tagDataBuf); + g_Dbs.db[i].superTbls[j].tagDataBuf = NULL; + } + if (0 != g_Dbs.db[i].superTbls[j].childTblName) { + free(g_Dbs.db[i].superTbls[j].childTblName); + g_Dbs.db[i].superTbls[j].childTblName = NULL; + } + } } - } } static int getRowDataFromSample( @@ -5033,30 +5044,30 @@ static int64_t generateData(char *recBuf, char **data_type, } static int prepareSampleDataForSTable(SSuperTable *superTblInfo) { - char* sampleDataBuf = NULL; + char* sampleDataBuf = NULL; - sampleDataBuf = calloc( + sampleDataBuf = calloc( superTblInfo->lenOfOneRow * MAX_SAMPLES_ONCE_FROM_FILE, 1); - if (sampleDataBuf == NULL) { - errorPrint("%s() LN%d, Failed to calloc %"PRIu64" Bytes, reason:%s\n", - __func__, __LINE__, - superTblInfo->lenOfOneRow * MAX_SAMPLES_ONCE_FROM_FILE, - strerror(errno)); - return -1; - } + if (sampleDataBuf == NULL) { + errorPrint("%s() LN%d, Failed to calloc %"PRIu64" Bytes, reason:%s\n", + __func__, __LINE__, + superTblInfo->lenOfOneRow * MAX_SAMPLES_ONCE_FROM_FILE, + strerror(errno)); + return -1; + } - superTblInfo->sampleDataBuf = sampleDataBuf; - int ret = readSampleFromCsvFileToMem(superTblInfo); + superTblInfo->sampleDataBuf = sampleDataBuf; + int ret = readSampleFromCsvFileToMem(superTblInfo); - if (0 != ret) { - errorPrint("%s() LN%d, read sample from csv file failed.\n", - __func__, __LINE__); - tmfree(sampleDataBuf); - superTblInfo->sampleDataBuf = NULL; - return -1; - } + if (0 != ret) { + errorPrint("%s() LN%d, read sample from csv file failed.\n", + __func__, __LINE__); + tmfree(sampleDataBuf); + superTblInfo->sampleDataBuf = NULL; + return -1; + } - return 0; + return 0; } static int32_t execInsert(threadInfo *pThreadInfo, uint32_t k) @@ -5155,54 +5166,54 @@ static int32_t generateDataTailWithoutStb( uint64_t recordFrom, int64_t startTime, /* int64_t *pSamplePos, */int64_t *dataLen) { - uint64_t len = 0; - char *pstr = buffer; + uint64_t len = 0; + char *pstr = buffer; - verbosePrint("%s() LN%d batch=%d\n", __func__, __LINE__, batch); + verbosePrint("%s() LN%d batch=%d\n", __func__, __LINE__, batch); - int32_t k = 0; - for (k = 0; k < batch;) { - char data[MAX_DATA_SIZE]; - memset(data, 0, MAX_DATA_SIZE); + int32_t k = 0; + for (k = 0; k < batch;) { + char data[MAX_DATA_SIZE]; + memset(data, 0, MAX_DATA_SIZE); - int64_t retLen = 0; + int64_t retLen = 0; - char **data_type = g_args.datatype; - int lenOfBinary = g_args.len_of_binary; + char **data_type = g_args.datatype; + int lenOfBinary = g_args.len_of_binary; - if (g_args.disorderRatio) { - retLen = generateData(data, data_type, - startTime + getTSRandTail( - (int64_t) DEFAULT_TIMESTAMP_STEP, k, - g_args.disorderRatio, - g_args.disorderRange), - lenOfBinary); - } else { - retLen = generateData(data, data_type, - startTime + (int64_t) (DEFAULT_TIMESTAMP_STEP* k), - lenOfBinary); + if (g_args.disorderRatio) { + retLen = generateData(data, data_type, + startTime + getTSRandTail( + (int64_t) DEFAULT_TIMESTAMP_STEP, k, + g_args.disorderRatio, + g_args.disorderRange), + lenOfBinary); + } else { + retLen = generateData(data, data_type, + startTime + (int64_t) (DEFAULT_TIMESTAMP_STEP* k), + lenOfBinary); + } + + if (len > remainderBufLen) + break; + + pstr += sprintf(pstr, "%s", data); + k++; + len += retLen; + remainderBufLen -= retLen; + + verbosePrint("%s() LN%d len=%"PRIu64" k=%d \nbuffer=%s\n", + __func__, __LINE__, len, k, buffer); + + recordFrom ++; + + if (recordFrom >= insertRows) { + break; + } } - if (len > remainderBufLen) - break; - - pstr += sprintf(pstr, "%s", data); - k++; - len += retLen; - remainderBufLen -= retLen; - - verbosePrint("%s() LN%d len=%"PRIu64" k=%d \nbuffer=%s\n", - __func__, __LINE__, len, k, buffer); - - recordFrom ++; - - if (recordFrom >= insertRows) { - break; - } - } - - *dataLen = len; - return k; + *dataLen = len; + return k; } static int64_t getTSRandTail(int64_t timeStampStep, int32_t seq, @@ -5227,63 +5238,69 @@ static int32_t generateStbDataTail( int64_t remainderBufLen, int64_t insertRows, uint64_t recordFrom, int64_t startTime, int64_t *pSamplePos, int64_t *dataLen) { - uint64_t len = 0; + uint64_t len = 0; - char *pstr = buffer; + char *pstr = buffer; - bool tsRand; - if (0 == strncasecmp(superTblInfo->dataSource, "rand", strlen("rand"))) { - tsRand = true; - } else { - tsRand = false; - } - verbosePrint("%s() LN%d batch=%u buflen=%"PRId64"\n", - __func__, __LINE__, batch, remainderBufLen); - - int32_t k; - for (k = 0; k < batch;) { - char data[MAX_DATA_SIZE]; - memset(data, 0, MAX_DATA_SIZE); - - int64_t lenOfRow = 0; - - if (tsRand) { - lenOfRow = generateStbRowData(superTblInfo, data, - startTime + getTSRandTail( - superTblInfo->timeStampStep, k, - superTblInfo->disorderRatio, - superTblInfo->disorderRange) - ); + bool tsRand; + if (0 == strncasecmp(superTblInfo->dataSource, "rand", strlen("rand"))) { + tsRand = true; } else { - lenOfRow = getRowDataFromSample( - data, - (remainderBufLen < MAX_DATA_SIZE)?remainderBufLen:MAX_DATA_SIZE, - startTime + superTblInfo->timeStampStep * k, - superTblInfo, - pSamplePos); + tsRand = false; + } + verbosePrint("%s() LN%d batch=%u buflen=%"PRId64"\n", + __func__, __LINE__, batch, remainderBufLen); + + int32_t k; + for (k = 0; k < batch;) { + char data[MAX_DATA_SIZE]; + memset(data, 0, MAX_DATA_SIZE); + + int64_t lenOfRow = 0; + + if (tsRand) { + if (superTblInfo->disorderRatio > 0) { + lenOfRow = generateStbRowData(superTblInfo, data, + startTime + getTSRandTail( + superTblInfo->timeStampStep, k, + superTblInfo->disorderRatio, + superTblInfo->disorderRange) + ); + } else { + lenOfRow = generateStbRowData(superTblInfo, data, + startTime + superTblInfo->timeStampStep * k + ); + } + } else { + lenOfRow = getRowDataFromSample( + data, + (remainderBufLen < MAX_DATA_SIZE)?remainderBufLen:MAX_DATA_SIZE, + startTime + superTblInfo->timeStampStep * k, + superTblInfo, + pSamplePos); + } + + if ((lenOfRow + 1) > remainderBufLen) { + break; + } + + pstr += snprintf(pstr , lenOfRow + 1, "%s", data); + k++; + len += lenOfRow; + remainderBufLen -= lenOfRow; + + verbosePrint("%s() LN%d len=%"PRIu64" k=%u \nbuffer=%s\n", + __func__, __LINE__, len, k, buffer); + + recordFrom ++; + + if (recordFrom >= insertRows) { + break; + } } - if ((lenOfRow + 1) > remainderBufLen) { - break; - } - - pstr += snprintf(pstr , lenOfRow + 1, "%s", data); - k++; - len += lenOfRow; - remainderBufLen -= lenOfRow; - - verbosePrint("%s() LN%d len=%"PRIu64" k=%u \nbuffer=%s\n", - __func__, __LINE__, len, k, buffer); - - recordFrom ++; - - if (recordFrom >= insertRows) { - break; - } - } - - *dataLen = len; - return k; + *dataLen = len; + return k; } @@ -5291,82 +5308,82 @@ static int generateSQLHeadWithoutStb(char *tableName, char *dbName, char *buffer, int remainderBufLen) { - int len; + int len; - char headBuf[HEAD_BUFF_LEN]; + char headBuf[HEAD_BUFF_LEN]; - len = snprintf( - headBuf, - HEAD_BUFF_LEN, - "%s.%s values", - dbName, - tableName); + len = snprintf( + headBuf, + HEAD_BUFF_LEN, + "%s.%s values", + dbName, + tableName); - if (len > remainderBufLen) - return -1; + if (len > remainderBufLen) + return -1; - tstrncpy(buffer, headBuf, len + 1); + tstrncpy(buffer, headBuf, len + 1); - return len; + return len; } static int generateStbSQLHead( SSuperTable* superTblInfo, - char *tableName, int32_t tableSeq, + char *tableName, int64_t tableSeq, char *dbName, char *buffer, int remainderBufLen) { - int len; + int len; - char headBuf[HEAD_BUFF_LEN]; + char headBuf[HEAD_BUFF_LEN]; - if (AUTO_CREATE_SUBTBL == superTblInfo->autoCreateTable) { - char* tagsValBuf = NULL; - if (0 == superTblInfo->tagSource) { - tagsValBuf = generateTagVaulesForStb(superTblInfo, tableSeq); - } else { + if (AUTO_CREATE_SUBTBL == superTblInfo->autoCreateTable) { + char* tagsValBuf = NULL; + if (0 == superTblInfo->tagSource) { + tagsValBuf = generateTagValuesForStb(superTblInfo, tableSeq); + } else { tagsValBuf = getTagValueFromTagSample( superTblInfo, tableSeq % superTblInfo->tagSampleCount); - } - if (NULL == tagsValBuf) { - errorPrint("%s() LN%d, tag buf failed to allocate memory\n", - __func__, __LINE__); - return -1; - } + } + if (NULL == tagsValBuf) { + errorPrint("%s() LN%d, tag buf failed to allocate memory\n", + __func__, __LINE__); + return -1; + } - len = snprintf( - headBuf, - HEAD_BUFF_LEN, - "%s.%s using %s.%s tags %s values", - dbName, - tableName, - dbName, - superTblInfo->sTblName, - tagsValBuf); - tmfree(tagsValBuf); + len = snprintf( + headBuf, + HEAD_BUFF_LEN, + "%s.%s using %s.%s TAGS%s values", + dbName, + tableName, + dbName, + superTblInfo->sTblName, + tagsValBuf); + tmfree(tagsValBuf); } else if (TBL_ALREADY_EXISTS == superTblInfo->childTblExists) { - len = snprintf( - headBuf, - HEAD_BUFF_LEN, - "%s.%s values", - dbName, - tableName); + len = snprintf( + headBuf, + HEAD_BUFF_LEN, + "%s.%s values", + dbName, + tableName); } else { - len = snprintf( - headBuf, - HEAD_BUFF_LEN, - "%s.%s values", - dbName, - tableName); - } + len = snprintf( + headBuf, + HEAD_BUFF_LEN, + "%s.%s values", + dbName, + tableName); + } - if (len > remainderBufLen) - return -1; + if (len > remainderBufLen) + return -1; - tstrncpy(buffer, headBuf, len + 1); + tstrncpy(buffer, headBuf, len + 1); - return len; + return len; } static int32_t generateStbInterlaceData( @@ -5380,52 +5397,52 @@ static int32_t generateStbInterlaceData( int64_t startTime, uint64_t *pRemainderBufLen) { - assert(buffer); - char *pstr = buffer; + assert(buffer); + char *pstr = buffer; - int headLen = generateStbSQLHead( - superTblInfo, - tableName, tableSeq, pThreadInfo->db_name, - pstr, *pRemainderBufLen); + int headLen = generateStbSQLHead( + superTblInfo, + tableName, tableSeq, pThreadInfo->db_name, + pstr, *pRemainderBufLen); - if (headLen <= 0) { - return 0; - } - // generate data buffer - verbosePrint("[%d] %s() LN%d i=%"PRIu64" buffer:\n%s\n", + if (headLen <= 0) { + return 0; + } + // generate data buffer + verbosePrint("[%d] %s() LN%d i=%"PRIu64" buffer:\n%s\n", pThreadInfo->threadID, __func__, __LINE__, i, buffer); - pstr += headLen; - *pRemainderBufLen -= headLen; + pstr += headLen; + *pRemainderBufLen -= headLen; - int64_t dataLen = 0; + int64_t dataLen = 0; - verbosePrint("[%d] %s() LN%d i=%"PRIu64" batchPerTblTimes=%u batchPerTbl = %u\n", + verbosePrint("[%d] %s() LN%d i=%"PRIu64" batchPerTblTimes=%u batchPerTbl = %u\n", pThreadInfo->threadID, __func__, __LINE__, i, batchPerTblTimes, batchPerTbl); - if (0 == strncasecmp(superTblInfo->startTimestamp, "now", 3)) { - startTime = taosGetTimestamp(pThreadInfo->time_precision); - } + if (0 == strncasecmp(superTblInfo->startTimestamp, "now", 3)) { + startTime = taosGetTimestamp(pThreadInfo->time_precision); + } - int32_t k = generateStbDataTail( + int32_t k = generateStbDataTail( superTblInfo, batchPerTbl, pstr, *pRemainderBufLen, insertRows, 0, startTime, &(pThreadInfo->samplePos), &dataLen); - if (k == batchPerTbl) { - pstr += dataLen; - *pRemainderBufLen -= dataLen; - } else { - debugPrint("%s() LN%d, generated data tail: %u, not equal batch per table: %u\n", - __func__, __LINE__, k, batchPerTbl); - pstr -= headLen; - pstr[0] = '\0'; - k = 0; - } + if (k == batchPerTbl) { + pstr += dataLen; + *pRemainderBufLen -= dataLen; + } else { + debugPrint("%s() LN%d, generated data tail: %u, not equal batch per table: %u\n", + __func__, __LINE__, k, batchPerTbl); + pstr -= headLen; + pstr[0] = '\0'; + k = 0; + } - return k; + return k; } static int64_t generateInterlaceDataWithoutStb( @@ -5644,8 +5661,7 @@ static int32_t prepareStmtBindArrayByType(TAOS_BIND *bind, *ptr += bind->buffer_length; } else { - errorPrint( "No support data type: %s\n", - dataType); + errorPrint( "No support data type: %s\n", dataType); return -1; } @@ -5731,28 +5747,120 @@ static int32_t prepareStmtWithoutStb( return k; } +static int32_t prepareStbStmtBind( + char *bindArray, SSuperTable *stbInfo, bool sourceRand, + int64_t startTime, int32_t recSeq, + bool isColumn) +{ + char *bindBuffer = calloc(1, g_args.len_of_binary); + if (bindBuffer == NULL) { + errorPrint("%s() LN%d, Failed to allocate %d bind buffer\n", + __func__, __LINE__, g_args.len_of_binary); + return -1; + } + + char data[MAX_DATA_SIZE]; + memset(data, 0, MAX_DATA_SIZE); + char *ptr = data; + + TAOS_BIND *bind; + + if (isColumn) { + for (int i = 0; i < stbInfo->columnCount + 1; i ++) { + bind = (TAOS_BIND *)((char *)bindArray + (sizeof(TAOS_BIND) * i)); + + if (i == 0) { + int64_t *bind_ts; + + bind_ts = (int64_t *)ptr; + bind->buffer_type = TSDB_DATA_TYPE_TIMESTAMP; + if (stbInfo->disorderRatio) { + *bind_ts = startTime + getTSRandTail( + stbInfo->timeStampStep, recSeq, + stbInfo->disorderRatio, + stbInfo->disorderRange); + } else { + *bind_ts = startTime + stbInfo->timeStampStep * recSeq; + } + bind->buffer_length = sizeof(int64_t); + bind->buffer = bind_ts; + bind->length = &bind->buffer_length; + bind->is_null = NULL; + + ptr += bind->buffer_length; + } else { + int cursor = 0; + + if (sourceRand) { + if ( -1 == prepareStmtBindArrayByType( + bind, + stbInfo->columns[i-1].dataType, + stbInfo->columns[i-1].dataLen, + &ptr, + NULL)) { + free(bindBuffer); + return -1; + } + } else { + char *restStr = stbInfo->sampleDataBuf + cursor; + int lengthOfRest = strlen(restStr); + + int index = 0; + for (index = 0; index < lengthOfRest; index ++) { + if (restStr[index] == ',') { + break; + } + } + + memset(bindBuffer, 0, g_args.len_of_binary); + strncpy(bindBuffer, restStr, index); + cursor += index + 1; // skip ',' too + + if ( -1 == prepareStmtBindArrayByType( + bind, + stbInfo->columns[i-1].dataType, + stbInfo->columns[i-1].dataLen, + &ptr, + bindBuffer)) { + free(bindBuffer); + return -1; + } + } + } + } + } else { + TAOS_BIND *tag; + + for (int t = 0; t < stbInfo->tagCount; t ++) { + tag = (TAOS_BIND *)((char *)bindArray + (sizeof(TAOS_BIND) * t)); + if ( -1 == prepareStmtBindArrayByType( + tag, + stbInfo->tags[t].dataType, + stbInfo->tags[t].dataLen, + &ptr, + NULL)) { + free(bindBuffer); + return -1; + } + } + + } + + return 0; +} + static int32_t prepareStbStmt( SSuperTable *stbInfo, TAOS_STMT *stmt, - char *tableName, uint32_t batch, + char *tableName, + int64_t tableSeq, + uint32_t batch, uint64_t insertRows, uint64_t recordFrom, int64_t startTime, int64_t *pSamplePos) { - int ret = taos_stmt_set_tbname(stmt, tableName); - if (ret != 0) { - errorPrint("failed to execute taos_stmt_set_tbname(%s). return 0x%x. reason: %s\n", - tableName, ret, taos_errstr(NULL)); - return ret; - } - - char *bindArray = malloc(sizeof(TAOS_BIND) * (stbInfo->columnCount + 1)); - if (bindArray == NULL) { - errorPrint("%s() LN%d, Failed to allocate %d bind params\n", - __func__, __LINE__, (stbInfo->columnCount + 1)); - return -1; - } + int ret; bool sourceRand; if (0 == strncasecmp(stbInfo->dataSource, "rand", strlen("rand"))) { @@ -5761,83 +5869,68 @@ static int32_t prepareStbStmt( sourceRand = false; // from sample data file } - char *bindBuffer = malloc(g_args.len_of_binary); - if (bindBuffer == NULL) { - errorPrint("%s() LN%d, Failed to allocate %d bind buffer\n", - __func__, __LINE__, g_args.len_of_binary); - free(bindArray); + if (AUTO_CREATE_SUBTBL == stbInfo->autoCreateTable) { + char* tagsValBuf = NULL; + + bool tagRand; + if (0 == stbInfo->tagSource) { + tagRand = true; + tagsValBuf = generateTagValuesForStb(stbInfo, tableSeq); + } else { + tagRand = false; + tagsValBuf = getTagValueFromTagSample( + stbInfo, + tableSeq % stbInfo->tagSampleCount); + } + + if (NULL == tagsValBuf) { + errorPrint("%s() LN%d, tag buf failed to allocate memory\n", + __func__, __LINE__); + return -1; + } + + char *tagsArray = calloc(1, sizeof(TAOS_BIND) * stbInfo->tagCount); + if (NULL == tagsArray) { + tmfree(tagsValBuf); + errorPrint("%s() LN%d, tag buf failed to allocate memory\n", + __func__, __LINE__); + return -1; + } + + if (-1 == prepareStbStmtBind( + tagsArray, stbInfo, tagRand, -1, -1, false /* is tag */)) { + free(tagsArray); + return -1; + } + + ret = taos_stmt_set_tbname_tags(stmt, tableName, (TAOS_BIND *)tagsArray); + + tmfree(tagsValBuf); + tmfree((char *)tagsArray); + } else { + ret = taos_stmt_set_tbname(stmt, tableName); + } + + if (ret != 0) { + errorPrint("failed to execute taos_stmt_set_tbname(%s). return 0x%x. reason: %s\n", + tableName, ret, taos_errstr(NULL)); + return ret; + } + + char *bindArray = calloc(1, sizeof(TAOS_BIND) * (stbInfo->columnCount + 1)); + if (bindArray == NULL) { + errorPrint("%s() LN%d, Failed to allocate %d bind params\n", + __func__, __LINE__, (stbInfo->columnCount + 1)); return -1; } uint32_t k; for (k = 0; k < batch;) { /* columnCount + 1 (ts) */ - char data[MAX_DATA_SIZE]; - memset(data, 0, MAX_DATA_SIZE); - - char *ptr = data; - TAOS_BIND *bind = (TAOS_BIND *)(bindArray + 0); - - int64_t *bind_ts; - - bind_ts = (int64_t *)ptr; - bind->buffer_type = TSDB_DATA_TYPE_TIMESTAMP; - if (stbInfo->disorderRatio) { - *bind_ts = startTime + getTSRandTail( - stbInfo->timeStampStep, k, - stbInfo->disorderRatio, - stbInfo->disorderRange); - } else { - *bind_ts = startTime + stbInfo->timeStampStep * k; - } - bind->buffer_length = sizeof(int64_t); - bind->buffer = bind_ts; - bind->length = &bind->buffer_length; - bind->is_null = NULL; - - ptr += bind->buffer_length; - - int cursor = 0; - for (int i = 0; i < stbInfo->columnCount; i ++) { - bind = (TAOS_BIND *)((char *)bindArray + (sizeof(TAOS_BIND) * (i + 1))); - - if (sourceRand) { - if ( -1 == prepareStmtBindArrayByType( - bind, - stbInfo->columns[i].dataType, - stbInfo->columns[i].dataLen, - &ptr, - NULL)) { - free(bindArray); - free(bindBuffer); - return -1; - } - } else { - char *restStr = stbInfo->sampleDataBuf + cursor; - int lengthOfRest = strlen(restStr); - - int index = 0; - for (index = 0; index < lengthOfRest; index ++) { - if (restStr[index] == ',') { - break; - } - } - - memset(bindBuffer, 0, g_args.len_of_binary); - strncpy(bindBuffer, restStr, index); - cursor += index + 1; // skip ',' too - - if ( -1 == prepareStmtBindArrayByType( - bind, - stbInfo->columns[i].dataType, - stbInfo->columns[i].dataLen, - &ptr, - bindBuffer)) { - free(bindArray); - free(bindBuffer); - return -1; - } - } + if (-1 == prepareStbStmtBind(bindArray, stbInfo, sourceRand, + startTime, k, true /* is column */)) { + free(bindArray); + return -1; } taos_stmt_bind_param(stmt, (TAOS_BIND *)bindArray); // if msg > 3MB, break @@ -5855,7 +5948,6 @@ static int32_t prepareStbStmt( } } - free(bindBuffer); free(bindArray); return k; } @@ -5863,7 +5955,9 @@ static int32_t prepareStbStmt( static int32_t prepareStbStmtInterlace( SSuperTable *stbInfo, TAOS_STMT *stmt, - char *tableName, uint32_t batch, + char *tableName, + int64_t tableSeq, + uint32_t batch, uint64_t insertRows, uint64_t recordFrom, int64_t startTime, @@ -5873,7 +5967,8 @@ static int32_t prepareStbStmtInterlace( stbInfo, stmt, tableName, - g_args.num_of_RPR, + tableSeq, + batch, insertRows, 0, startTime, pSamplePos); } @@ -5881,7 +5976,9 @@ static int32_t prepareStbStmtInterlace( static int32_t prepareStbStmtProgressive( SSuperTable *stbInfo, TAOS_STMT *stmt, - char *tableName, uint32_t batch, + char *tableName, + int64_t tableSeq, + uint32_t batch, uint64_t insertRows, uint64_t recordFrom, int64_t startTime, @@ -5891,6 +5988,7 @@ static int32_t prepareStbStmtProgressive( stbInfo, stmt, tableName, + tableSeq, g_args.num_of_RPR, insertRows, recordFrom, startTime, pSamplePos); @@ -5907,29 +6005,29 @@ static int32_t generateStbProgressiveData( uint64_t recordFrom, int64_t startTime, int64_t *pSamplePos, int64_t *pRemainderBufLen) { - assert(buffer != NULL); - char *pstr = buffer; + assert(buffer != NULL); + char *pstr = buffer; - memset(buffer, 0, *pRemainderBufLen); + memset(buffer, 0, *pRemainderBufLen); - int64_t headLen = generateStbSQLHead( - superTblInfo, - tableName, tableSeq, dbName, - buffer, *pRemainderBufLen); + int64_t headLen = generateStbSQLHead( + superTblInfo, + tableName, tableSeq, dbName, + buffer, *pRemainderBufLen); - if (headLen <= 0) { - return 0; - } - pstr += headLen; - *pRemainderBufLen -= headLen; + if (headLen <= 0) { + return 0; + } + pstr += headLen; + *pRemainderBufLen -= headLen; - int64_t dataLen; + int64_t dataLen; - return generateStbDataTail(superTblInfo, - g_args.num_of_RPR, pstr, *pRemainderBufLen, - insertRows, recordFrom, - startTime, - pSamplePos, &dataLen); + return generateStbDataTail(superTblInfo, + g_args.num_of_RPR, pstr, *pRemainderBufLen, + insertRows, recordFrom, + startTime, + pSamplePos, &dataLen); } static int32_t generateProgressiveDataWithoutStb( @@ -5976,472 +6074,476 @@ static void printStatPerThread(threadInfo *pThreadInfo) // sync write interlace data static void* syncWriteInterlace(threadInfo *pThreadInfo) { - debugPrint("[%d] %s() LN%d: ### interlace write\n", - pThreadInfo->threadID, __func__, __LINE__); - - int64_t insertRows; - uint32_t interlaceRows; - uint64_t maxSqlLen; - int64_t nTimeStampStep; - uint64_t insert_interval; - - SSuperTable* superTblInfo = pThreadInfo->superTblInfo; - - if (superTblInfo) { - insertRows = superTblInfo->insertRows; - - if ((superTblInfo->interlaceRows == 0) - && (g_args.interlace_rows > 0)) { - interlaceRows = g_args.interlace_rows; - } else { - interlaceRows = superTblInfo->interlaceRows; - } - maxSqlLen = superTblInfo->maxSqlLen; - nTimeStampStep = superTblInfo->timeStampStep; - insert_interval = superTblInfo->insertInterval; - } else { - insertRows = g_args.num_of_DPT; - interlaceRows = g_args.interlace_rows; - maxSqlLen = g_args.max_sql_len; - nTimeStampStep = DEFAULT_TIMESTAMP_STEP; - insert_interval = g_args.insert_interval; - } - - debugPrint("[%d] %s() LN%d: start_table_from=%"PRIu64" ntables=%"PRId64" insertRows=%"PRIu64"\n", - pThreadInfo->threadID, __func__, __LINE__, - pThreadInfo->start_table_from, - pThreadInfo->ntables, insertRows); - - if (interlaceRows > insertRows) - interlaceRows = insertRows; - - if (interlaceRows > g_args.num_of_RPR) - interlaceRows = g_args.num_of_RPR; - - uint32_t batchPerTbl = interlaceRows; - uint32_t batchPerTblTimes; - - if ((interlaceRows > 0) && (pThreadInfo->ntables > 1)) { - batchPerTblTimes = - g_args.num_of_RPR / interlaceRows; - } else { - batchPerTblTimes = 1; - } - - pThreadInfo->buffer = calloc(maxSqlLen, 1); - if (NULL == pThreadInfo->buffer) { - errorPrint( "%s() LN%d, Failed to alloc %"PRIu64" Bytes, reason:%s\n", - __func__, __LINE__, maxSqlLen, strerror(errno)); - return NULL; - } - - pThreadInfo->totalInsertRows = 0; - pThreadInfo->totalAffectedRows = 0; - - uint64_t st = 0; - uint64_t et = UINT64_MAX; - - uint64_t lastPrintTime = taosGetTimestampMs(); - uint64_t startTs = taosGetTimestampMs(); - uint64_t endTs; - - uint64_t tableSeq = pThreadInfo->start_table_from; - int64_t startTime = pThreadInfo->start_time; - - uint64_t generatedRecPerTbl = 0; - bool flagSleep = true; - uint64_t sleepTimeTotal = 0; - - while(pThreadInfo->totalInsertRows < pThreadInfo->ntables * insertRows) { - if ((flagSleep) && (insert_interval)) { - st = taosGetTimestampMs(); - flagSleep = false; - } - // generate data - memset(pThreadInfo->buffer, 0, maxSqlLen); - uint64_t remainderBufLen = maxSqlLen; - - char *pstr = pThreadInfo->buffer; - - int len = snprintf(pstr, - strlen(STR_INSERT_INTO) + 1, "%s", STR_INSERT_INTO); - pstr += len; - remainderBufLen -= len; - - uint32_t recOfBatch = 0; - - for (uint64_t i = 0; i < batchPerTblTimes; i ++) { - char tableName[TSDB_TABLE_NAME_LEN]; - - getTableName(tableName, pThreadInfo, tableSeq); - if (0 == strlen(tableName)) { - errorPrint("[%d] %s() LN%d, getTableName return null\n", + debugPrint("[%d] %s() LN%d: ### interlace write\n", pThreadInfo->threadID, __func__, __LINE__); - free(pThreadInfo->buffer); - return NULL; - } - uint64_t oldRemainderLen = remainderBufLen; + int64_t insertRows; + uint32_t interlaceRows; + uint64_t maxSqlLen; + int64_t nTimeStampStep; + uint64_t insert_interval; - int32_t generated; - if (superTblInfo) { - if (superTblInfo->iface == STMT_IFACE) { -#if STMT_IFACE_ENABLED == 1 - generated = prepareStbStmtInterlace( - superTblInfo, - pThreadInfo->stmt, - tableName, - batchPerTbl, - insertRows, i, - startTime, - &(pThreadInfo->samplePos)); -#else - generated = -1; -#endif - } else { - generated = generateStbInterlaceData( - superTblInfo, - tableName, batchPerTbl, i, - batchPerTblTimes, - tableSeq, - pThreadInfo, pstr, - insertRows, - startTime, - &remainderBufLen); - } - } else { - if (g_args.iface == STMT_IFACE) { - debugPrint("[%d] %s() LN%d, tableName:%s, batch:%d startTime:%"PRId64"\n", - pThreadInfo->threadID, - __func__, __LINE__, - tableName, batchPerTbl, startTime); -#if STMT_IFACE_ENABLED == 1 - generated = prepareStmtWithoutStb( - pThreadInfo->stmt, tableName, - batchPerTbl, - insertRows, i, - startTime); -#else - generated = -1; -#endif - } else { - generated = generateInterlaceDataWithoutStb( - tableName, batchPerTbl, - tableSeq, - pThreadInfo->db_name, pstr, - insertRows, - startTime, - &remainderBufLen); - } - } + SSuperTable* superTblInfo = pThreadInfo->superTblInfo; - debugPrint("[%d] %s() LN%d, generated records is %d\n", - pThreadInfo->threadID, __func__, __LINE__, generated); - if (generated < 0) { - errorPrint("[%d] %s() LN%d, generated records is %d\n", - pThreadInfo->threadID, __func__, __LINE__, generated); - goto free_of_interlace; - } else if (generated == 0) { - break; - } + if (superTblInfo) { + insertRows = superTblInfo->insertRows; - tableSeq ++; - recOfBatch += batchPerTbl; - - pstr += (oldRemainderLen - remainderBufLen); - pThreadInfo->totalInsertRows += batchPerTbl; - - verbosePrint("[%d] %s() LN%d batchPerTbl=%d recOfBatch=%d\n", - pThreadInfo->threadID, __func__, __LINE__, - batchPerTbl, recOfBatch); - - if (tableSeq == pThreadInfo->start_table_from + pThreadInfo->ntables) { - // turn to first table - tableSeq = pThreadInfo->start_table_from; - generatedRecPerTbl += batchPerTbl; - - startTime = pThreadInfo->start_time - + generatedRecPerTbl * nTimeStampStep; - - flagSleep = true; - if (generatedRecPerTbl >= insertRows) - break; - - int64_t remainRows = insertRows - generatedRecPerTbl; - if ((remainRows > 0) && (batchPerTbl > remainRows)) - batchPerTbl = remainRows; - - if (pThreadInfo->ntables * batchPerTbl < g_args.num_of_RPR) - break; - } - - verbosePrint("[%d] %s() LN%d generatedRecPerTbl=%"PRId64" insertRows=%"PRId64"\n", - pThreadInfo->threadID, __func__, __LINE__, - generatedRecPerTbl, insertRows); - - if ((g_args.num_of_RPR - recOfBatch) < batchPerTbl) - break; - } - - verbosePrint("[%d] %s() LN%d recOfBatch=%d totalInsertRows=%"PRIu64"\n", - pThreadInfo->threadID, __func__, __LINE__, recOfBatch, - pThreadInfo->totalInsertRows); - verbosePrint("[%d] %s() LN%d, buffer=%s\n", - pThreadInfo->threadID, __func__, __LINE__, pThreadInfo->buffer); - - startTs = taosGetTimestampMs(); - - if (recOfBatch == 0) { - errorPrint("[%d] %s() LN%d Failed to insert records of batch %d\n", - pThreadInfo->threadID, __func__, __LINE__, - batchPerTbl); - if (batchPerTbl > 0) { - errorPrint("\tIf the batch is %d, the length of the SQL to insert a row must be less then %"PRId64"\n", - batchPerTbl, maxSqlLen / batchPerTbl); + if ((superTblInfo->interlaceRows == 0) + && (g_args.interlace_rows > 0)) { + interlaceRows = g_args.interlace_rows; + } else { + interlaceRows = superTblInfo->interlaceRows; } - errorPrint("\tPlease check if the buffer length(%"PRId64") or batch(%d) is set with proper value!\n", - maxSqlLen, batchPerTbl); - goto free_of_interlace; - } - int64_t affectedRows = execInsert(pThreadInfo, recOfBatch); - - endTs = taosGetTimestampMs(); - uint64_t delay = endTs - startTs; - performancePrint("%s() LN%d, insert execution time is %"PRIu64"ms\n", - __func__, __LINE__, delay); - verbosePrint("[%d] %s() LN%d affectedRows=%"PRId64"\n", - pThreadInfo->threadID, - __func__, __LINE__, affectedRows); - - if (delay > pThreadInfo->maxDelay) pThreadInfo->maxDelay = delay; - if (delay < pThreadInfo->minDelay) pThreadInfo->minDelay = delay; - pThreadInfo->cntDelay++; - pThreadInfo->totalDelay += delay; - - if (recOfBatch != affectedRows) { - errorPrint("[%d] %s() LN%d execInsert insert %d, affected rows: %"PRId64"\n%s\n", - pThreadInfo->threadID, __func__, __LINE__, - recOfBatch, affectedRows, pThreadInfo->buffer); - goto free_of_interlace; + maxSqlLen = superTblInfo->maxSqlLen; + nTimeStampStep = superTblInfo->timeStampStep; + insert_interval = superTblInfo->insertInterval; + } else { + insertRows = g_args.num_of_DPT; + interlaceRows = g_args.interlace_rows; + maxSqlLen = g_args.max_sql_len; + nTimeStampStep = DEFAULT_TIMESTAMP_STEP; + insert_interval = g_args.insert_interval; } - pThreadInfo->totalAffectedRows += affectedRows; + debugPrint("[%d] %s() LN%d: start_table_from=%"PRIu64" ntables=%"PRId64" insertRows=%"PRIu64"\n", + pThreadInfo->threadID, __func__, __LINE__, + pThreadInfo->start_table_from, + pThreadInfo->ntables, insertRows); - int64_t currentPrintTime = taosGetTimestampMs(); - if (currentPrintTime - lastPrintTime > 30*1000) { - printf("thread[%d] has currently inserted rows: %"PRIu64 ", affected rows: %"PRIu64 "\n", + if (interlaceRows > insertRows) + interlaceRows = insertRows; + + if (interlaceRows > g_args.num_of_RPR) + interlaceRows = g_args.num_of_RPR; + + uint32_t batchPerTbl = interlaceRows; + uint32_t batchPerTblTimes; + + if ((interlaceRows > 0) && (pThreadInfo->ntables > 1)) { + batchPerTblTimes = + g_args.num_of_RPR / interlaceRows; + } else { + batchPerTblTimes = 1; + } + + pThreadInfo->buffer = calloc(maxSqlLen, 1); + if (NULL == pThreadInfo->buffer) { + errorPrint( "%s() LN%d, Failed to alloc %"PRIu64" Bytes, reason:%s\n", + __func__, __LINE__, maxSqlLen, strerror(errno)); + return NULL; + } + + pThreadInfo->totalInsertRows = 0; + pThreadInfo->totalAffectedRows = 0; + + uint64_t st = 0; + uint64_t et = UINT64_MAX; + + uint64_t lastPrintTime = taosGetTimestampMs(); + uint64_t startTs = taosGetTimestampMs(); + uint64_t endTs; + + uint64_t tableSeq = pThreadInfo->start_table_from; + int64_t startTime = pThreadInfo->start_time; + + uint64_t generatedRecPerTbl = 0; + bool flagSleep = true; + uint64_t sleepTimeTotal = 0; + + while(pThreadInfo->totalInsertRows < pThreadInfo->ntables * insertRows) { + if ((flagSleep) && (insert_interval)) { + st = taosGetTimestampMs(); + flagSleep = false; + } + // generate data + memset(pThreadInfo->buffer, 0, maxSqlLen); + uint64_t remainderBufLen = maxSqlLen; + + char *pstr = pThreadInfo->buffer; + + int len = snprintf(pstr, + strlen(STR_INSERT_INTO) + 1, "%s", STR_INSERT_INTO); + pstr += len; + remainderBufLen -= len; + + uint32_t recOfBatch = 0; + + for (uint64_t i = 0; i < batchPerTblTimes; i ++) { + char tableName[TSDB_TABLE_NAME_LEN]; + + getTableName(tableName, pThreadInfo, tableSeq); + if (0 == strlen(tableName)) { + errorPrint("[%d] %s() LN%d, getTableName return null\n", + pThreadInfo->threadID, __func__, __LINE__); + free(pThreadInfo->buffer); + return NULL; + } + + uint64_t oldRemainderLen = remainderBufLen; + + int32_t generated; + if (superTblInfo) { + if (superTblInfo->iface == STMT_IFACE) { +#if STMT_IFACE_ENABLED == 1 + generated = prepareStbStmtInterlace( + superTblInfo, + pThreadInfo->stmt, + tableName, + tableSeq, + batchPerTbl, + insertRows, i, + startTime, + &(pThreadInfo->samplePos)); +#else + generated = -1; +#endif + } else { + generated = generateStbInterlaceData( + superTblInfo, + tableName, batchPerTbl, i, + batchPerTblTimes, + tableSeq, + pThreadInfo, pstr, + insertRows, + startTime, + &remainderBufLen); + } + } else { + if (g_args.iface == STMT_IFACE) { + debugPrint("[%d] %s() LN%d, tableName:%s, batch:%d startTime:%"PRId64"\n", + pThreadInfo->threadID, + __func__, __LINE__, + tableName, batchPerTbl, startTime); +#if STMT_IFACE_ENABLED == 1 + generated = prepareStmtWithoutStb( + pThreadInfo->stmt, tableName, + batchPerTbl, + insertRows, i, + startTime); +#else + generated = -1; +#endif + } else { + generated = generateInterlaceDataWithoutStb( + tableName, batchPerTbl, + tableSeq, + pThreadInfo->db_name, pstr, + insertRows, + startTime, + &remainderBufLen); + } + } + + debugPrint("[%d] %s() LN%d, generated records is %d\n", + pThreadInfo->threadID, __func__, __LINE__, generated); + if (generated < 0) { + errorPrint("[%d] %s() LN%d, generated records is %d\n", + pThreadInfo->threadID, __func__, __LINE__, generated); + goto free_of_interlace; + } else if (generated == 0) { + break; + } + + tableSeq ++; + recOfBatch += batchPerTbl; + + pstr += (oldRemainderLen - remainderBufLen); + pThreadInfo->totalInsertRows += batchPerTbl; + + verbosePrint("[%d] %s() LN%d batchPerTbl=%d recOfBatch=%d\n", + pThreadInfo->threadID, __func__, __LINE__, + batchPerTbl, recOfBatch); + + if (tableSeq == pThreadInfo->start_table_from + pThreadInfo->ntables) { + // turn to first table + tableSeq = pThreadInfo->start_table_from; + generatedRecPerTbl += batchPerTbl; + + startTime = pThreadInfo->start_time + + generatedRecPerTbl * nTimeStampStep; + + flagSleep = true; + if (generatedRecPerTbl >= insertRows) + break; + + int64_t remainRows = insertRows - generatedRecPerTbl; + if ((remainRows > 0) && (batchPerTbl > remainRows)) + batchPerTbl = remainRows; + + if (pThreadInfo->ntables * batchPerTbl < g_args.num_of_RPR) + break; + } + + verbosePrint("[%d] %s() LN%d generatedRecPerTbl=%"PRId64" insertRows=%"PRId64"\n", + pThreadInfo->threadID, __func__, __LINE__, + generatedRecPerTbl, insertRows); + + if ((g_args.num_of_RPR - recOfBatch) < batchPerTbl) + break; + } + + verbosePrint("[%d] %s() LN%d recOfBatch=%d totalInsertRows=%"PRIu64"\n", + pThreadInfo->threadID, __func__, __LINE__, recOfBatch, + pThreadInfo->totalInsertRows); + verbosePrint("[%d] %s() LN%d, buffer=%s\n", + pThreadInfo->threadID, __func__, __LINE__, pThreadInfo->buffer); + + startTs = taosGetTimestampMs(); + + if (recOfBatch == 0) { + errorPrint("[%d] %s() LN%d Failed to insert records of batch %d\n", + pThreadInfo->threadID, __func__, __LINE__, + batchPerTbl); + if (batchPerTbl > 0) { + errorPrint("\tIf the batch is %d, the length of the SQL to insert a row must be less then %"PRId64"\n", + batchPerTbl, maxSqlLen / batchPerTbl); + } + errorPrint("\tPlease check if the buffer length(%"PRId64") or batch(%d) is set with proper value!\n", + maxSqlLen, batchPerTbl); + goto free_of_interlace; + } + int64_t affectedRows = execInsert(pThreadInfo, recOfBatch); + + endTs = taosGetTimestampMs(); + uint64_t delay = endTs - startTs; + performancePrint("%s() LN%d, insert execution time is %"PRIu64"ms\n", + __func__, __LINE__, delay); + verbosePrint("[%d] %s() LN%d affectedRows=%"PRId64"\n", + pThreadInfo->threadID, + __func__, __LINE__, affectedRows); + + if (delay > pThreadInfo->maxDelay) pThreadInfo->maxDelay = delay; + if (delay < pThreadInfo->minDelay) pThreadInfo->minDelay = delay; + pThreadInfo->cntDelay++; + pThreadInfo->totalDelay += delay; + + if (recOfBatch != affectedRows) { + errorPrint("[%d] %s() LN%d execInsert insert %d, affected rows: %"PRId64"\n%s\n", + pThreadInfo->threadID, __func__, __LINE__, + recOfBatch, affectedRows, pThreadInfo->buffer); + goto free_of_interlace; + } + + pThreadInfo->totalAffectedRows += affectedRows; + + int64_t currentPrintTime = taosGetTimestampMs(); + if (currentPrintTime - lastPrintTime > 30*1000) { + printf("thread[%d] has currently inserted rows: %"PRIu64 ", affected rows: %"PRIu64 "\n", pThreadInfo->threadID, pThreadInfo->totalInsertRows, pThreadInfo->totalAffectedRows); - lastPrintTime = currentPrintTime; - } + lastPrintTime = currentPrintTime; + } - if ((insert_interval) && flagSleep) { - et = taosGetTimestampMs(); + if ((insert_interval) && flagSleep) { + et = taosGetTimestampMs(); - if (insert_interval > (et - st) ) { - uint64_t sleepTime = insert_interval - (et -st); - performancePrint("%s() LN%d sleep: %"PRId64" ms for insert interval\n", - __func__, __LINE__, sleepTime); - taosMsleep(sleepTime); // ms - sleepTimeTotal += insert_interval; - } + if (insert_interval > (et - st) ) { + uint64_t sleepTime = insert_interval - (et -st); + performancePrint("%s() LN%d sleep: %"PRId64" ms for insert interval\n", + __func__, __LINE__, sleepTime); + taosMsleep(sleepTime); // ms + sleepTimeTotal += insert_interval; + } + } } - } free_of_interlace: - tmfree(pThreadInfo->buffer); - printStatPerThread(pThreadInfo); - return NULL; + tmfree(pThreadInfo->buffer); + printStatPerThread(pThreadInfo); + return NULL; } // sync insertion progressive data static void* syncWriteProgressive(threadInfo *pThreadInfo) { - debugPrint("%s() LN%d: ### progressive write\n", __func__, __LINE__); + debugPrint("%s() LN%d: ### progressive write\n", __func__, __LINE__); - SSuperTable* superTblInfo = pThreadInfo->superTblInfo; - uint64_t maxSqlLen = superTblInfo?superTblInfo->maxSqlLen:g_args.max_sql_len; - int64_t timeStampStep = - superTblInfo?superTblInfo->timeStampStep:DEFAULT_TIMESTAMP_STEP; - int64_t insertRows = + SSuperTable* superTblInfo = pThreadInfo->superTblInfo; + uint64_t maxSqlLen = superTblInfo?superTblInfo->maxSqlLen:g_args.max_sql_len; + int64_t timeStampStep = + superTblInfo?superTblInfo->timeStampStep:DEFAULT_TIMESTAMP_STEP; + int64_t insertRows = (superTblInfo)?superTblInfo->insertRows:g_args.num_of_DPT; - verbosePrint("%s() LN%d insertRows=%"PRId64"\n", + verbosePrint("%s() LN%d insertRows=%"PRId64"\n", __func__, __LINE__, insertRows); - pThreadInfo->buffer = calloc(maxSqlLen, 1); - if (NULL == pThreadInfo->buffer) { - errorPrint( "Failed to alloc %"PRIu64" Bytes, reason:%s\n", - maxSqlLen, - strerror(errno)); - return NULL; - } - - uint64_t lastPrintTime = taosGetTimestampMs(); - uint64_t startTs = taosGetTimestampMs(); - uint64_t endTs; - - pThreadInfo->totalInsertRows = 0; - pThreadInfo->totalAffectedRows = 0; - - pThreadInfo->samplePos = 0; - - for (uint64_t tableSeq = pThreadInfo->start_table_from; - tableSeq <= pThreadInfo->end_table_to; - tableSeq ++) { - int64_t start_time = pThreadInfo->start_time; - - for (uint64_t i = 0; i < insertRows;) { - char tableName[TSDB_TABLE_NAME_LEN]; - getTableName(tableName, pThreadInfo, tableSeq); - verbosePrint("%s() LN%d: tid=%d seq=%"PRId64" tableName=%s\n", - __func__, __LINE__, - pThreadInfo->threadID, tableSeq, tableName); - if (0 == strlen(tableName)) { - errorPrint("[%d] %s() LN%d, getTableName return null\n", - pThreadInfo->threadID, __func__, __LINE__); - free(pThreadInfo->buffer); + pThreadInfo->buffer = calloc(maxSqlLen, 1); + if (NULL == pThreadInfo->buffer) { + errorPrint( "Failed to alloc %"PRIu64" Bytes, reason:%s\n", + maxSqlLen, + strerror(errno)); return NULL; - } - - int64_t remainderBufLen = maxSqlLen; - char *pstr = pThreadInfo->buffer; - - int len = snprintf(pstr, - strlen(STR_INSERT_INTO) + 1, "%s", STR_INSERT_INTO); - - pstr += len; - remainderBufLen -= len; - - int32_t generated; - if (superTblInfo) { - if (superTblInfo->iface == STMT_IFACE) { -#if STMT_IFACE_ENABLED == 1 - generated = prepareStbStmtProgressive( - superTblInfo, - pThreadInfo->stmt, - tableName, - g_args.num_of_RPR, - insertRows, i, start_time, - &(pThreadInfo->samplePos)); -#else - generated = -1; -#endif - } else { - generated = generateStbProgressiveData( - superTblInfo, - tableName, tableSeq, pThreadInfo->db_name, pstr, - insertRows, i, start_time, - &(pThreadInfo->samplePos), - &remainderBufLen); - } - } else { - if (g_args.iface == STMT_IFACE) { -#if STMT_IFACE_ENABLED == 1 - generated = prepareStmtWithoutStb( - pThreadInfo->stmt, - tableName, - g_args.num_of_RPR, - insertRows, i, - start_time); -#else - generated = -1; -#endif - } else { - generated = generateProgressiveDataWithoutStb( - tableName, - /* tableSeq, */ - pThreadInfo, pstr, insertRows, - i, start_time, - /* &(pThreadInfo->samplePos), */ - &remainderBufLen); - } - } - if (generated > 0) - i += generated; - else - goto free_of_progressive; - - start_time += generated * timeStampStep; - pThreadInfo->totalInsertRows += generated; - - startTs = taosGetTimestampMs(); - - int32_t affectedRows = execInsert(pThreadInfo, generated); - - endTs = taosGetTimestampMs(); - uint64_t delay = endTs - startTs; - performancePrint("%s() LN%d, insert execution time is %"PRId64"ms\n", - __func__, __LINE__, delay); - verbosePrint("[%d] %s() LN%d affectedRows=%d\n", - pThreadInfo->threadID, - __func__, __LINE__, affectedRows); - - if (delay > pThreadInfo->maxDelay) pThreadInfo->maxDelay = delay; - if (delay < pThreadInfo->minDelay) pThreadInfo->minDelay = delay; - pThreadInfo->cntDelay++; - pThreadInfo->totalDelay += delay; - - if (affectedRows < 0) { - errorPrint("%s() LN%d, affected rows: %d\n", - __func__, __LINE__, affectedRows); - goto free_of_progressive; - } - - pThreadInfo->totalAffectedRows += affectedRows; - - int64_t currentPrintTime = taosGetTimestampMs(); - if (currentPrintTime - lastPrintTime > 30*1000) { - printf("thread[%d] has currently inserted rows: %"PRId64 ", affected rows: %"PRId64 "\n", - pThreadInfo->threadID, - pThreadInfo->totalInsertRows, - pThreadInfo->totalAffectedRows); - lastPrintTime = currentPrintTime; - } - - if (i >= insertRows) - break; - } // num_of_DPT - - if ((g_args.verbose_print) && - (tableSeq == pThreadInfo->ntables - 1) && (superTblInfo) && - (0 == strncasecmp( - superTblInfo->dataSource, "sample", strlen("sample")))) { - verbosePrint("%s() LN%d samplePos=%"PRId64"\n", - __func__, __LINE__, pThreadInfo->samplePos); } - } // tableSeq + + uint64_t lastPrintTime = taosGetTimestampMs(); + uint64_t startTs = taosGetTimestampMs(); + uint64_t endTs; + + pThreadInfo->totalInsertRows = 0; + pThreadInfo->totalAffectedRows = 0; + + pThreadInfo->samplePos = 0; + + for (uint64_t tableSeq = pThreadInfo->start_table_from; + tableSeq <= pThreadInfo->end_table_to; + tableSeq ++) { + int64_t start_time = pThreadInfo->start_time; + + for (uint64_t i = 0; i < insertRows;) { + char tableName[TSDB_TABLE_NAME_LEN]; + getTableName(tableName, pThreadInfo, tableSeq); + verbosePrint("%s() LN%d: tid=%d seq=%"PRId64" tableName=%s\n", + __func__, __LINE__, + pThreadInfo->threadID, tableSeq, tableName); + if (0 == strlen(tableName)) { + errorPrint("[%d] %s() LN%d, getTableName return null\n", + pThreadInfo->threadID, __func__, __LINE__); + free(pThreadInfo->buffer); + return NULL; + } + + int64_t remainderBufLen = maxSqlLen; + char *pstr = pThreadInfo->buffer; + + int len = snprintf(pstr, + strlen(STR_INSERT_INTO) + 1, "%s", STR_INSERT_INTO); + + pstr += len; + remainderBufLen -= len; + + int32_t generated; + if (superTblInfo) { + if (superTblInfo->iface == STMT_IFACE) { +#if STMT_IFACE_ENABLED == 1 + generated = prepareStbStmtProgressive( + superTblInfo, + pThreadInfo->stmt, + tableName, + tableSeq, + g_args.num_of_RPR, + insertRows, i, start_time, + &(pThreadInfo->samplePos)); +#else + generated = -1; +#endif + } else { + generated = generateStbProgressiveData( + superTblInfo, + tableName, tableSeq, pThreadInfo->db_name, pstr, + insertRows, i, start_time, + &(pThreadInfo->samplePos), + &remainderBufLen); + } + } else { + if (g_args.iface == STMT_IFACE) { +#if STMT_IFACE_ENABLED == 1 + generated = prepareStmtWithoutStb( + pThreadInfo->stmt, + tableName, + g_args.num_of_RPR, + insertRows, i, + start_time); +#else + generated = -1; +#endif + } else { + generated = generateProgressiveDataWithoutStb( + tableName, + /* tableSeq, */ + pThreadInfo, pstr, insertRows, + i, start_time, + /* &(pThreadInfo->samplePos), */ + &remainderBufLen); + } + } + if (generated > 0) + i += generated; + else + goto free_of_progressive; + + start_time += generated * timeStampStep; + pThreadInfo->totalInsertRows += generated; + + startTs = taosGetTimestampMs(); + + int32_t affectedRows = execInsert(pThreadInfo, generated); + + endTs = taosGetTimestampMs(); + uint64_t delay = endTs - startTs; + performancePrint("%s() LN%d, insert execution time is %"PRId64"ms\n", + __func__, __LINE__, delay); + verbosePrint("[%d] %s() LN%d affectedRows=%d\n", + pThreadInfo->threadID, + __func__, __LINE__, affectedRows); + + if (delay > pThreadInfo->maxDelay) pThreadInfo->maxDelay = delay; + if (delay < pThreadInfo->minDelay) pThreadInfo->minDelay = delay; + pThreadInfo->cntDelay++; + pThreadInfo->totalDelay += delay; + + if (affectedRows < 0) { + errorPrint("%s() LN%d, affected rows: %d\n", + __func__, __LINE__, affectedRows); + goto free_of_progressive; + } + + pThreadInfo->totalAffectedRows += affectedRows; + + int64_t currentPrintTime = taosGetTimestampMs(); + if (currentPrintTime - lastPrintTime > 30*1000) { + printf("thread[%d] has currently inserted rows: %"PRId64 ", affected rows: %"PRId64 "\n", + pThreadInfo->threadID, + pThreadInfo->totalInsertRows, + pThreadInfo->totalAffectedRows); + lastPrintTime = currentPrintTime; + } + + if (i >= insertRows) + break; + } // num_of_DPT + + if ((g_args.verbose_print) && + (tableSeq == pThreadInfo->ntables - 1) && (superTblInfo) && + (0 == strncasecmp( + superTblInfo->dataSource, "sample", strlen("sample")))) { + verbosePrint("%s() LN%d samplePos=%"PRId64"\n", + __func__, __LINE__, pThreadInfo->samplePos); + } + } // tableSeq free_of_progressive: - tmfree(pThreadInfo->buffer); - printStatPerThread(pThreadInfo); - return NULL; + tmfree(pThreadInfo->buffer); + printStatPerThread(pThreadInfo); + return NULL; } static void* syncWrite(void *sarg) { - threadInfo *pThreadInfo = (threadInfo *)sarg; - SSuperTable* superTblInfo = pThreadInfo->superTblInfo; + threadInfo *pThreadInfo = (threadInfo *)sarg; + SSuperTable* superTblInfo = pThreadInfo->superTblInfo; - uint32_t interlaceRows; + setThreadName("syncWrite"); - if (superTblInfo) { - if ((superTblInfo->interlaceRows == 0) - && (g_args.interlace_rows > 0)) { - interlaceRows = g_args.interlace_rows; + uint32_t interlaceRows; + + if (superTblInfo) { + if ((superTblInfo->interlaceRows == 0) + && (g_args.interlace_rows > 0)) { + interlaceRows = g_args.interlace_rows; + } else { + interlaceRows = superTblInfo->interlaceRows; + } } else { - interlaceRows = superTblInfo->interlaceRows; + interlaceRows = g_args.interlace_rows; } - } else { - interlaceRows = g_args.interlace_rows; - } - if (interlaceRows > 0) { - // interlace mode - return syncWriteInterlace(pThreadInfo); - } else { - // progressive mode - return syncWriteProgressive(pThreadInfo); - } + if (interlaceRows > 0) { + // interlace mode + return syncWriteInterlace(pThreadInfo); + } else { + // progressive mode + return syncWriteProgressive(pThreadInfo); + } } static void callBack(void *param, TAOS_RES *res, int code) { @@ -6507,6 +6609,8 @@ static void *asyncWrite(void *sarg) { threadInfo *pThreadInfo = (threadInfo *)sarg; SSuperTable* superTblInfo = pThreadInfo->superTblInfo; + setThreadName("asyncWrite"); + pThreadInfo->st = 0; pThreadInfo->et = 0; pThreadInfo->lastTs = pThreadInfo->start_time; @@ -6584,6 +6688,8 @@ static void startMultiThreadInsertData(int threads, char* db_name, } else { start_time = 1500000000000; } + debugPrint("%s() LN%d, start_time= %"PRId64"\n", + __func__, __LINE__, start_time); int64_t start = taosGetTimestampMs(); @@ -6746,7 +6852,7 @@ static void startMultiThreadInsertData(int threads, char* db_name, exit(-1); } - char buffer[3000]; + char buffer[BUFFER_SIZE]; char *pstr = buffer; if ((superTblInfo) @@ -6905,6 +7011,7 @@ static void *readTable(void *sarg) { #if 1 threadInfo *pThreadInfo = (threadInfo *)sarg; TAOS *taos = pThreadInfo->taos; + setThreadName("readTable"); char command[BUFFER_SIZE] = "\0"; uint64_t sTime = pThreadInfo->start_time; char *tb_prefix = pThreadInfo->tb_prefix; @@ -6977,6 +7084,7 @@ static void *readMetric(void *sarg) { #if 1 threadInfo *pThreadInfo = (threadInfo *)sarg; TAOS *taos = pThreadInfo->taos; + setThreadName("readMetric"); char command[BUFFER_SIZE] = "\0"; FILE *fp = fopen(pThreadInfo->filePath, "a"); if (NULL == fp) { @@ -7153,6 +7261,8 @@ static int insertTestProcess() { static void *specifiedTableQuery(void *sarg) { threadInfo *pThreadInfo = (threadInfo *)sarg; + setThreadName("specTableQuery"); + if (pThreadInfo->taos == NULL) { TAOS * taos = NULL; taos = taos_connect(g_queryInfo.host, @@ -7252,6 +7362,8 @@ static void *superTableQuery(void *sarg) { char sqlstr[MAX_QUERY_SQL_LENGTH]; threadInfo *pThreadInfo = (threadInfo *)sarg; + setThreadName("superTableQuery"); + if (pThreadInfo->taos == NULL) { TAOS * taos = NULL; taos = taos_connect(g_queryInfo.host, @@ -7554,6 +7666,8 @@ static void *superSubscribe(void *sarg) { TAOS_SUB* tsub[MAX_QUERY_SQL_COUNT] = {0}; uint64_t tsubSeq; + setThreadName("superSub"); + if (pThreadInfo->ntables > MAX_QUERY_SQL_COUNT) { errorPrint("The table number(%"PRId64") of the thread is more than max query sql count: %d\n", pThreadInfo->ntables, MAX_QUERY_SQL_COUNT); @@ -7700,6 +7814,8 @@ static void *specifiedSubscribe(void *sarg) { threadInfo *pThreadInfo = (threadInfo *)sarg; // TAOS_SUB* tsub = NULL; + setThreadName("specSub"); + if (pThreadInfo->taos == NULL) { pThreadInfo->taos = taos_connect(g_queryInfo.host, g_queryInfo.user, @@ -7982,7 +8098,7 @@ static void initOfQueryMeta() { tstrncpy(g_queryInfo.password, TSDB_DEFAULT_PASS, MAX_PASSWORD_SIZE); } -static void setParaFromArg(){ +static void setParaFromArg() { if (g_args.host) { tstrncpy(g_Dbs.host, g_args.host, MAX_HOSTNAME_SIZE); } else { @@ -8018,10 +8134,10 @@ static void setParaFromArg(){ g_Dbs.do_aggreFunc = true; - char dataString[STRING_LEN]; + char dataString[TSDB_MAX_BYTES_PER_ROW]; char **data_type = g_args.datatype; - memset(dataString, 0, STRING_LEN); + memset(dataString, 0, TSDB_MAX_BYTES_PER_ROW); if (strcasecmp(data_type[0], "BINARY") == 0 || strcasecmp(data_type[0], "BOOL") == 0 @@ -8132,55 +8248,55 @@ static int isCommentLine(char *line) { static void querySqlFile(TAOS* taos, char* sqlFile) { - FILE *fp = fopen(sqlFile, "r"); - if (fp == NULL) { - printf("failed to open file %s, reason:%s\n", sqlFile, strerror(errno)); - return; - } - - int read_len = 0; - char * cmd = calloc(1, MAX_SQL_SIZE); - size_t cmd_len = 0; - char * line = NULL; - size_t line_len = 0; - - double t = taosGetTimestampMs(); - - while((read_len = tgetline(&line, &line_len, fp)) != -1) { - if (read_len >= MAX_SQL_SIZE) continue; - line[--read_len] = '\0'; - - if (read_len == 0 || isCommentLine(line)) { // line starts with # - continue; - } - - if (line[read_len - 1] == '\\') { - line[read_len - 1] = ' '; - memcpy(cmd + cmd_len, line, read_len); - cmd_len += read_len; - continue; - } - - memcpy(cmd + cmd_len, line, read_len); - if (0 != queryDbExec(taos, cmd, NO_INSERT_TYPE, false)) { - errorPrint("%s() LN%d, queryDbExec %s failed!\n", - __func__, __LINE__, cmd); - tmfree(cmd); - tmfree(line); - tmfclose(fp); + FILE *fp = fopen(sqlFile, "r"); + if (fp == NULL) { + printf("failed to open file %s, reason:%s\n", sqlFile, strerror(errno)); return; } - memset(cmd, 0, MAX_SQL_SIZE); - cmd_len = 0; - } - t = taosGetTimestampMs() - t; - printf("run %s took %.6f second(s)\n\n", sqlFile, t); + int read_len = 0; + char * cmd = calloc(1, TSDB_MAX_BYTES_PER_ROW); + size_t cmd_len = 0; + char * line = NULL; + size_t line_len = 0; - tmfree(cmd); - tmfree(line); - tmfclose(fp); - return; + double t = taosGetTimestampMs(); + + while((read_len = tgetline(&line, &line_len, fp)) != -1) { + if (read_len >= TSDB_MAX_BYTES_PER_ROW) continue; + line[--read_len] = '\0'; + + if (read_len == 0 || isCommentLine(line)) { // line starts with # + continue; + } + + if (line[read_len - 1] == '\\') { + line[read_len - 1] = ' '; + memcpy(cmd + cmd_len, line, read_len); + cmd_len += read_len; + continue; + } + + memcpy(cmd + cmd_len, line, read_len); + if (0 != queryDbExec(taos, cmd, NO_INSERT_TYPE, false)) { + errorPrint("%s() LN%d, queryDbExec %s failed!\n", + __func__, __LINE__, cmd); + tmfree(cmd); + tmfree(line); + tmfclose(fp); + return; + } + memset(cmd, 0, TSDB_MAX_BYTES_PER_ROW); + cmd_len = 0; + } + + t = taosGetTimestampMs() - t; + printf("run %s took %.6f second(s)\n\n", sqlFile, t); + + tmfree(cmd); + tmfree(line); + tmfclose(fp); + return; } static void testMetaFile() { diff --git a/src/kit/taosdump/taosdump.c b/src/kit/taosdump/taosdump.c index 98521d8420..e5501b4366 100644 --- a/src/kit/taosdump/taosdump.c +++ b/src/kit/taosdump/taosdump.c @@ -1474,6 +1474,8 @@ static void* taosDumpOutWorkThreadFp(void *arg) STableRecord tableRecord; int fd; + setThreadName("dumpOutWorkThrd"); + char tmpBuf[4096] = {0}; sprintf(tmpBuf, ".tables.tmp.%d", pThread->threadIndex); fd = open(tmpBuf, O_RDWR | O_CREAT, S_IRWXU | S_IRGRP | S_IXGRP | S_IROTH); @@ -2571,6 +2573,8 @@ static int taosDumpInOneFile(TAOS* taos, FILE* fp, char* fcharset, static void* taosDumpInWorkThreadFp(void *arg) { SThreadParaObj *pThread = (SThreadParaObj*)arg; + setThreadName("dumpInWorkThrd"); + for (int32_t f = 0; f < g_tsSqlFileNum; ++f) { if (f % pThread->totalThreads == pThread->threadIndex) { char *SQLFileName = g_tsDumpInSqlFiles[f]; diff --git a/src/mnode/src/mnodeSdb.c b/src/mnode/src/mnodeSdb.c index 897c3a2f0f..7644f4d733 100644 --- a/src/mnode/src/mnodeSdb.c +++ b/src/mnode/src/mnodeSdb.c @@ -1113,6 +1113,7 @@ static void *sdbWorkerFp(void *pWorker) { void * unUsed; taosBlockSIGPIPE(); + setThreadName("sdbWorker"); while (1) { int32_t numOfMsgs = taosReadAllQitemsFromQset(tsSdbWQset, tsSdbWQall, &unUsed); diff --git a/src/mnode/src/mnodeTable.c b/src/mnode/src/mnodeTable.c index 3631b1e005..6e5cf14b96 100644 --- a/src/mnode/src/mnodeTable.c +++ b/src/mnode/src/mnodeTable.c @@ -1503,6 +1503,18 @@ static int32_t mnodeChangeSuperTableColumn(SMnodeMsg *pMsg) { return TSDB_CODE_MND_FIELD_NOT_EXIST; } + // check exceed max row bytes + int32_t i; + uint32_t nLen = 0; + for (i = 0; i < pStable->numOfColumns; ++i) { + nLen += (pStable->schema[i].colId == col) ? pAlter->schema[0].bytes : pStable->schema[i].bytes; + } + if (nLen > TSDB_MAX_BYTES_PER_ROW) { + mError("msg:%p, app:%p stable:%s, change column, name:%s exceed max row bytes", pMsg, pMsg->rpcMsg.ahandle, + pStable->info.tableId, name); + return TSDB_CODE_MND_EXCEED_MAX_ROW_BYTES; + } + // update SSchema *schema = (SSchema *) (pStable->schema + col); ASSERT(schema->type == TSDB_DATA_TYPE_BINARY || schema->type == TSDB_DATA_TYPE_NCHAR); diff --git a/src/os/inc/osDef.h b/src/os/inc/osDef.h index 9176da5b8e..54a4f98254 100644 --- a/src/os/inc/osDef.h +++ b/src/os/inc/osDef.h @@ -210,6 +210,25 @@ extern "C" { #define PRIzu "zu" #endif + +#if defined(_TD_LINUX_64) || defined(_TD_LINUX_32) || defined(_TD_MIPS_64) || defined(_TD_ARM_32) || defined(_TD_ARM_64) || defined(_TD_DARWIN_64) + #if defined(_TD_DARWIN_64) + // MacOS + #if !defined(_GNU_SOURCE) + #define setThreadName(name) do { pthread_setname_np((name)); } while (0) + #else + // pthread_setname_np not defined + #define setThreadName(name) + #endif + #else + // Linux, length of name must <= 16 (the last '\0' included) + #define setThreadName(name) do { prctl(PR_SET_NAME, (name)); } while (0) + #endif +#else + // Windows + #define setThreadName(name) +#endif + #ifdef __cplusplus } #endif diff --git a/src/os/inc/osInc.h b/src/os/inc/osInc.h index 340ff34635..9b78110833 100644 --- a/src/os/inc/osInc.h +++ b/src/os/inc/osInc.h @@ -85,6 +85,7 @@ extern "C" { #include #include #include + #include #if !(defined(_ALPINE)) #include diff --git a/src/os/src/darwin/dwSemaphore.c b/src/os/src/darwin/dwSemaphore.c index 898410647a..25cb28cff1 100644 --- a/src/os/src/darwin/dwSemaphore.c +++ b/src/os/src/darwin/dwSemaphore.c @@ -41,6 +41,8 @@ static semaphore_t sem_exit; static void* sem_thread_routine(void *arg) { (void)arg; + setThreadName("sem_thrd"); + sem_port = mach_task_self(); kern_return_t ret = semaphore_create(sem_port, &sem_exit, SYNC_POLICY_FIFO, 0); if (ret != KERN_SUCCESS) { diff --git a/src/os/src/darwin/dwTimer.c b/src/os/src/darwin/dwTimer.c index ee1becc91a..d395a7f53f 100644 --- a/src/os/src/darwin/dwTimer.c +++ b/src/os/src/darwin/dwTimer.c @@ -32,6 +32,7 @@ static volatile int timer_stop = 0; static void* timer_routine(void *arg) { (void)arg; + setThreadName("timer"); int r = 0; struct timespec to = {0}; diff --git a/src/os/src/detail/osTimer.c b/src/os/src/detail/osTimer.c index b054f08c78..c381b3e825 100644 --- a/src/os/src/detail/osTimer.c +++ b/src/os/src/detail/osTimer.c @@ -38,6 +38,8 @@ static void *taosProcessAlarmSignal(void *tharg) { struct sigevent sevent = {{0}}; + setThreadName("alarmSignal"); + #ifdef _ALPINE sevent.sigev_notify = SIGEV_THREAD; sevent.sigev_value.sival_int = syscall(__NR_gettid); diff --git a/src/plugins/http/inc/httpParser.h b/src/plugins/http/inc/httpParser.h index fbe1ecee7e..f7b55958c8 100644 --- a/src/plugins/http/inc/httpParser.h +++ b/src/plugins/http/inc/httpParser.h @@ -100,6 +100,7 @@ typedef enum HTTP_PARSER_STATE { HTTP_PARSER_CHUNK, HTTP_PARSER_END, HTTP_PARSER_ERROR, + HTTP_PARSER_OPTIONAL_SP } HTTP_PARSER_STATE; typedef enum HTTP_AUTH_TYPE { diff --git a/src/plugins/http/src/httpParser.c b/src/plugins/http/src/httpParser.c index a68a9bd7ed..ba88a2b9cd 100644 --- a/src/plugins/http/src/httpParser.c +++ b/src/plugins/http/src/httpParser.c @@ -744,6 +744,15 @@ static int32_t httpParserOnSp(HttpParser *parser, HTTP_PARSER_STATE state, const return ok; } +static int32_t httpParserOnOptionalSp(HttpParser *parser, HTTP_PARSER_STATE state, const char c, int32_t *again) { + int32_t ok = 0; + if (c != ' ') { + *again = 1; + httpPopStack(parser); + } + return ok; +} + static int32_t httpParserOnStatusCode(HttpParser *parser, HTTP_PARSER_STATE state, const char c, int32_t *again) { HttpContext *pContext = parser->pContext; int32_t ok = 0; @@ -867,7 +876,7 @@ static int32_t httpParserOnHeader(HttpParser *parser, HTTP_PARSER_STATE state, c } httpPushStack(parser, HTTP_PARSER_CRLF); httpPushStack(parser, HTTP_PARSER_HEADER_VAL); - httpPushStack(parser, HTTP_PARSER_SP); + httpPushStack(parser, HTTP_PARSER_OPTIONAL_SP); httpPushStack(parser, HTTP_PARSER_HEADER_KEY); break; } @@ -1061,6 +1070,10 @@ static int32_t httpParseChar(HttpParser *parser, const char c, int32_t *again) { ok = httpParserOnSp(parser, state, c, again); break; } + if (state == HTTP_PARSER_OPTIONAL_SP) { + ok = httpParserOnOptionalSp(parser, state, c, again); + break; + } if (state == HTTP_PARSER_STATUS_CODE) { ok = httpParserOnStatusCode(parser, state, c, again); break; diff --git a/src/plugins/http/src/httpQueue.c b/src/plugins/http/src/httpQueue.c index 7f7ce40460..677ab0c91d 100644 --- a/src/plugins/http/src/httpQueue.c +++ b/src/plugins/http/src/httpQueue.c @@ -70,6 +70,8 @@ static void *httpProcessResultQueue(void *param) { int32_t type; void * unUsed; + setThreadName("httpResultQ"); + while (1) { if (taosReadQitemFromQset(tsHttpQset, &type, (void **)&pMsg, &unUsed) == 0) { httpDebug("qset:%p, http queue got no message from qset, exiting", tsHttpQset); diff --git a/src/plugins/http/src/httpServer.c b/src/plugins/http/src/httpServer.c index 9d98d3f113..f02859f165 100644 --- a/src/plugins/http/src/httpServer.c +++ b/src/plugins/http/src/httpServer.c @@ -117,6 +117,7 @@ static void httpProcessHttpData(void *param) { int32_t fdNum; taosSetMaskSIGPIPE(); + setThreadName("httpData"); while (1) { struct epoll_event events[HTTP_MAX_EVENTS]; @@ -208,6 +209,7 @@ static void *httpAcceptHttpConnection(void *arg) { int32_t totalFds = 0; taosSetMaskSIGPIPE(); + setThreadName("httpAcceptConn"); pServer->fd = taosOpenTcpServerSocket(pServer->serverIp, pServer->serverPort); diff --git a/src/plugins/monitor/src/monMain.c b/src/plugins/monitor/src/monMain.c index 2c4a0c1a4c..960a097f5d 100644 --- a/src/plugins/monitor/src/monMain.c +++ b/src/plugins/monitor/src/monMain.c @@ -114,6 +114,7 @@ int32_t monStartSystem() { static void *monThreadFunc(void *param) { monDebug("starting to initialize monitor module ..."); + setThreadName("monThrd"); while (1) { static int32_t accessTimes = 0; diff --git a/src/plugins/mqtt/src/mqttSystem.c b/src/plugins/mqtt/src/mqttSystem.c index eacc3b1f74..e0f2f393bb 100644 --- a/src/plugins/mqtt/src/mqttSystem.c +++ b/src/plugins/mqtt/src/mqttSystem.c @@ -100,6 +100,8 @@ void mqttPublishCallback(void** unused, struct mqtt_response_publish* published) } void* mqttClientRefresher(void* client) { + setThreadName("mqttCliRefresh"); + while (tsMqttIsRuning) { mqtt_sync((struct mqtt_client*)client); taosMsleep(100); @@ -141,4 +143,4 @@ void mqttReconnectClient(struct mqtt_client* client, void** unused) { mqtt_reinit(client, sockfd, tsMqttStatus.sendbuf, tsMqttStatus.sendbufsz, tsMqttStatus.recvbuf, tsMqttStatus.recvbufsz); mqtt_connect(client, tsMqttClientId, NULL, NULL, 0, tsMqttUser, tsMqttPass, MQTT_CONNECT_CLEAN_SESSION, 400); mqtt_subscribe(client, tsMqttTopic, 0); -} \ No newline at end of file +} diff --git a/src/query/inc/qExtbuffer.h b/src/query/inc/qExtbuffer.h index b851fbb3e0..cf0e8ce31a 100644 --- a/src/query/inc/qExtbuffer.h +++ b/src/query/inc/qExtbuffer.h @@ -77,13 +77,13 @@ typedef struct tFilePagesItem { typedef struct SSchemaEx { struct SSchema field; - int16_t offset; + int32_t offset; } SSchemaEx; typedef struct SColumnModel { int32_t capacity; int32_t numOfCols; - int16_t rowSize; + int32_t rowSize; SSchemaEx *pFields; } SColumnModel; diff --git a/src/query/inc/qUtil.h b/src/query/inc/qUtil.h index b31a93fd2d..d2802d9fe0 100644 --- a/src/query/inc/qUtil.h +++ b/src/query/inc/qUtil.h @@ -66,7 +66,8 @@ static FORCE_INLINE SResultRow *getResultRow(SResultRowInfo *pResultRowInfo, int return pResultRowInfo->pResult[slot]; } -static FORCE_INLINE char *getPosInResultPage(SQueryAttr *pQueryAttr, tFilePage* page, int32_t rowOffset, int16_t offset) { +static FORCE_INLINE char* getPosInResultPage(SQueryAttr* pQueryAttr, tFilePage* page, int32_t rowOffset, + int32_t offset) { assert(rowOffset >= 0 && pQueryAttr != NULL); int32_t numOfRows = (int32_t)GET_ROW_PARAM_FOR_MULTIOUTPUT(pQueryAttr, pQueryAttr->topBotQuery, pQueryAttr->stableQuery); diff --git a/src/query/src/qAggMain.c b/src/query/src/qAggMain.c index 93e6de3821..d96b260b13 100644 --- a/src/query/src/qAggMain.c +++ b/src/query/src/qAggMain.c @@ -3982,7 +3982,7 @@ void blockInfo_func(SQLFunctionCtx* pCtx) { int32_t len = *(int32_t*) pCtx->pInput; blockDistInfoFromBinary((char*)pCtx->pInput + sizeof(int32_t), len, pDist); - pDist->rowSize = (int16_t) pCtx->param[0].i64; + pDist->rowSize = (uint16_t)pCtx->param[0].i64; memcpy(pCtx->pOutput, pCtx->pInput, sizeof(int32_t) + len); @@ -4129,7 +4129,7 @@ void blockinfo_func_finalizer(SQLFunctionCtx* pCtx) { SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx); STableBlockDist* pDist = (STableBlockDist*) GET_ROWCELL_INTERBUF(pResInfo); - pDist->rowSize = (int16_t)pCtx->param[0].i64; + pDist->rowSize = (uint16_t)pCtx->param[0].i64; generateBlockDistResult(pDist, pCtx->pOutput); // cannot set the numOfIteratedElems again since it is set during previous iteration diff --git a/src/query/src/qExecutor.c b/src/query/src/qExecutor.c index 0f3d56515d..288a206491 100644 --- a/src/query/src/qExecutor.c +++ b/src/query/src/qExecutor.c @@ -3583,7 +3583,7 @@ void setResultRowOutputBufInitCtx(SQueryRuntimeEnv *pRuntimeEnv, SResultRow *pRe // Note: pResult->pos[i]->num == 0, there is only fixed number of results for each group tFilePage* bufPage = getResBufPage(pRuntimeEnv->pResultBuf, pResult->pageId); - int16_t offset = 0; + int32_t offset = 0; for (int32_t i = 0; i < numOfOutput; ++i) { pCtx[i].resultInfo = getResultCell(pResult, i, rowCellInfoOffset); @@ -3897,7 +3897,7 @@ static int32_t doCopyToSDataBlock(SQueryRuntimeEnv* pRuntimeEnv, SGroupResInfo* tFilePage *page = getResBufPage(pRuntimeEnv->pResultBuf, pRow->pageId); - int16_t offset = 0; + int32_t offset = 0; for (int32_t j = 0; j < pBlock->info.numOfCols; ++j) { SColumnInfoData* pColInfoData = taosArrayGet(pBlock->pDataBlock, j); int32_t bytes = pColInfoData->info.bytes; diff --git a/src/query/src/qTsbuf.c b/src/query/src/qTsbuf.c index 27c877fafa..825b7960de 100644 --- a/src/query/src/qTsbuf.c +++ b/src/query/src/qTsbuf.c @@ -267,6 +267,10 @@ static void writeDataToDisk(STSBuf* pTSBuf) { if (pBlock->tag.nType == TSDB_DATA_TYPE_BINARY || pBlock->tag.nType == TSDB_DATA_TYPE_NCHAR) { metaLen += (int32_t)fwrite(&pBlock->tag.nLen, 1, sizeof(pBlock->tag.nLen), pTSBuf->f); metaLen += (int32_t)fwrite(pBlock->tag.pz, 1, (size_t)pBlock->tag.nLen, pTSBuf->f); + } else if (pBlock->tag.nType == TSDB_DATA_TYPE_FLOAT) { + metaLen += (int32_t)fwrite(&pBlock->tag.nLen, 1, sizeof(pBlock->tag.nLen), pTSBuf->f); + float tfloat = (float)pBlock->tag.dKey; + metaLen += (int32_t)fwrite(&tfloat, 1, (size_t) pBlock->tag.nLen, pTSBuf->f); } else if (pBlock->tag.nType != TSDB_DATA_TYPE_NULL) { metaLen += (int32_t)fwrite(&pBlock->tag.nLen, 1, sizeof(pBlock->tag.nLen), pTSBuf->f); metaLen += (int32_t)fwrite(&pBlock->tag.i64, 1, (size_t) pBlock->tag.nLen, pTSBuf->f); @@ -351,6 +355,11 @@ STSBlock* readDataFromDisk(STSBuf* pTSBuf, int32_t order, bool decomp) { sz = fread(pBlock->tag.pz, (size_t)pBlock->tag.nLen, 1, pTSBuf->f); UNUSED(sz); + } else if (pBlock->tag.nType == TSDB_DATA_TYPE_FLOAT) { + float tfloat = 0; + sz = fread(&tfloat, (size_t) pBlock->tag.nLen, 1, pTSBuf->f); + pBlock->tag.dKey = (double)tfloat; + UNUSED(sz); } else if (pBlock->tag.nType != TSDB_DATA_TYPE_NULL) { //TODO check the return value sz = fread(&pBlock->tag.i64, (size_t) pBlock->tag.nLen, 1, pTSBuf->f); UNUSED(sz); diff --git a/src/rpc/src/rpcTcp.c b/src/rpc/src/rpcTcp.c index 029629eff0..e9feeef9d3 100644 --- a/src/rpc/src/rpcTcp.c +++ b/src/rpc/src/rpcTcp.c @@ -242,6 +242,7 @@ static void *taosAcceptTcpConnection(void *arg) { pServerObj = (SServerObj *)arg; tDebug("%s TCP server is ready, ip:0x%x:%hu", pServerObj->label, pServerObj->ip, pServerObj->port); + setThreadName("acceptTcpConn"); while (1) { socklen_t addrlen = sizeof(caddr); @@ -528,6 +529,11 @@ static void *taosProcessTcpData(void *param) { SFdObj *pFdObj; struct epoll_event events[maxEvents]; SRecvInfo recvInfo; + char name[16]; + + memset(name, 0, sizeof(name)); + snprintf(name, 16, "%s-tcpData", pThreadObj->label); + setThreadName(name); while (1) { int fdNum = epoll_wait(pThreadObj->pollFd, events, maxEvents, TAOS_EPOLL_WAIT_TIME); diff --git a/src/rpc/src/rpcUdp.c b/src/rpc/src/rpcUdp.c index 7a46dbe5c3..086a390cb8 100644 --- a/src/rpc/src/rpcUdp.c +++ b/src/rpc/src/rpcUdp.c @@ -195,6 +195,8 @@ static void *taosRecvUdpData(void *param) { tDebug("%s UDP thread is created, index:%d", pConn->label, pConn->index); char *msg = pConn->buffer; + setThreadName("recvUdpData"); + while (1) { dataLen = recvfrom(pConn->fd, pConn->buffer, RPC_MAX_UDP_SIZE, 0, (struct sockaddr *)&sourceAdd, &addLen); if (dataLen <= 0) { diff --git a/src/rpc/test/rclient.c b/src/rpc/test/rclient.c index faa6d40da3..de30114bd1 100644 --- a/src/rpc/test/rclient.c +++ b/src/rpc/test/rclient.c @@ -47,6 +47,8 @@ static int tcount = 0; static void *sendRequest(void *param) { SInfo *pInfo = (SInfo *)param; SRpcMsg rpcMsg = {0}; + + setThreadName("sendCliReq"); tDebug("thread:%d, start to send request", pInfo->index); diff --git a/src/rpc/test/rsclient.c b/src/rpc/test/rsclient.c index a152d8e4a5..3e94a56efb 100644 --- a/src/rpc/test/rsclient.c +++ b/src/rpc/test/rsclient.c @@ -39,8 +39,10 @@ static int terror = 0; static void *sendRequest(void *param) { SInfo *pInfo = (SInfo *)param; - SRpcMsg rpcMsg, rspMsg; + SRpcMsg rpcMsg, rspMsg; + setThreadName("sendSrvReq"); + tDebug("thread:%d, start to send request", pInfo->index); while ( pInfo->numOfReqs == 0 || pInfo->num < pInfo->numOfReqs) { diff --git a/src/sync/src/syncRestore.c b/src/sync/src/syncRestore.c index c0d66316cd..bf9d5201a0 100644 --- a/src/sync/src/syncRestore.c +++ b/src/sync/src/syncRestore.c @@ -263,6 +263,7 @@ static int32_t syncRestoreDataStepByStep(SSyncPeer *pPeer) { } void *syncRestoreData(void *param) { + setThreadName("syncRestoreData"); int64_t rid = (int64_t)param; SSyncPeer *pPeer = syncAcquirePeer(rid); if (pPeer == NULL) { diff --git a/src/sync/src/syncRetrieve.c b/src/sync/src/syncRetrieve.c index c86ab85499..89fdda0686 100644 --- a/src/sync/src/syncRetrieve.c +++ b/src/sync/src/syncRetrieve.c @@ -415,6 +415,7 @@ static int32_t syncRetrieveDataStepByStep(SSyncPeer *pPeer) { } void *syncRetrieveData(void *param) { + setThreadName("syncRetrievData"); int64_t rid = (int64_t)param; SSyncPeer *pPeer = syncAcquirePeer(rid); if (pPeer == NULL) { diff --git a/src/sync/src/syncTcp.c b/src/sync/src/syncTcp.c index 3ad9e9bba0..698245f9e4 100644 --- a/src/sync/src/syncTcp.c +++ b/src/sync/src/syncTcp.c @@ -195,6 +195,8 @@ static void *syncProcessTcpData(void *param) { SConnObj * pConn = NULL; struct epoll_event events[maxEvents]; + setThreadName("syncTcpData"); + void *buffer = malloc(pInfo->bufferSize); taosBlockSIGPIPE(); @@ -257,6 +259,7 @@ static void *syncAcceptPeerTcpConnection(void *argv) { SPoolInfo *pInfo = &pPool->info; taosBlockSIGPIPE(); + setThreadName("acceptTcpConn"); while (1) { struct sockaddr_in clientAddr; diff --git a/src/sync/test/syncClient.c b/src/sync/test/syncClient.c index 23ea54ee0c..303d2376ef 100644 --- a/src/sync/test/syncClient.c +++ b/src/sync/test/syncClient.c @@ -48,6 +48,8 @@ void *sendRequest(void *param) { SInfo * pInfo = (SInfo *)param; SRpcMsg rpcMsg = {0}; + setThreadName("sendCliReq"); + uDebug("thread:%d, start to send request", pInfo->index); while (pInfo->numOfReqs == 0 || pInfo->num < pInfo->numOfReqs) { diff --git a/src/sync/test/syncServer.c b/src/sync/test/syncServer.c index eeaa6a08c2..a3d0696648 100644 --- a/src/sync/test/syncServer.c +++ b/src/sync/test/syncServer.c @@ -178,6 +178,8 @@ void *processWriteQueue(void *param) { int type; void *item; + setThreadName("writeQ"); + while (1) { int ret = taosReadQitem(qhandle, &type, &item); if (ret <= 0) { diff --git a/src/tsdb/inc/tsdbMemTable.h b/src/tsdb/inc/tsdbMemTable.h index babb7024b2..67e9976c70 100644 --- a/src/tsdb/inc/tsdbMemTable.h +++ b/src/tsdb/inc/tsdbMemTable.h @@ -71,27 +71,27 @@ int tsdbLoadDataFromCache(STable* pTable, SSkipListIterator* pIter, TSKEY maxK TKEY* filterKeys, int nFilterKeys, bool keepDup, SMergeInfo* pMergeInfo); void* tsdbCommitData(STsdbRepo* pRepo); -static FORCE_INLINE SDataRow tsdbNextIterRow(SSkipListIterator* pIter) { +static FORCE_INLINE SMemRow tsdbNextIterRow(SSkipListIterator* pIter) { if (pIter == NULL) return NULL; SSkipListNode* node = tSkipListIterGet(pIter); if (node == NULL) return NULL; - return (SDataRow)SL_GET_NODE_DATA(node); + return (SMemRow)SL_GET_NODE_DATA(node); } static FORCE_INLINE TSKEY tsdbNextIterKey(SSkipListIterator* pIter) { - SDataRow row = tsdbNextIterRow(pIter); + SMemRow row = tsdbNextIterRow(pIter); if (row == NULL) return TSDB_DATA_TIMESTAMP_NULL; - return dataRowKey(row); + return memRowKey(row); } static FORCE_INLINE TKEY tsdbNextIterTKey(SSkipListIterator* pIter) { - SDataRow row = tsdbNextIterRow(pIter); + SMemRow row = tsdbNextIterRow(pIter); if (row == NULL) return TKEY_NULL; - return dataRowTKey(row); + return memRowTKey(row); } #endif /* _TD_TSDB_MEMTABLE_H_ */ \ No newline at end of file diff --git a/src/tsdb/inc/tsdbMeta.h b/src/tsdb/inc/tsdbMeta.h index 45bbd5a7c6..9a8de01f71 100644 --- a/src/tsdb/inc/tsdbMeta.h +++ b/src/tsdb/inc/tsdbMeta.h @@ -32,7 +32,7 @@ typedef struct STable { void* eventHandler; // TODO void* streamHandler; // TODO TSKEY lastKey; - SDataRow lastRow; + SMemRow lastRow; char* sql; void* cqhandle; SRWLatch latch; // TODO: implementa latch functions @@ -148,7 +148,7 @@ static FORCE_INLINE STSchema *tsdbGetTableTagSchema(STable *pTable) { } static FORCE_INLINE TSKEY tsdbGetTableLastKeyImpl(STable* pTable) { - ASSERT(pTable->lastRow == NULL || pTable->lastKey == dataRowKey(pTable->lastRow)); + ASSERT((pTable->lastRow == NULL) || (pTable->lastKey == memRowKey(pTable->lastRow))); return pTable->lastKey; } diff --git a/src/tsdb/src/tsdbCommit.c b/src/tsdb/src/tsdbCommit.c index 75b072b063..6330da6058 100644 --- a/src/tsdb/src/tsdbCommit.c +++ b/src/tsdb/src/tsdbCommit.c @@ -920,7 +920,8 @@ int tsdbWriteBlockImpl(STsdbRepo *pRepo, STable *pTable, SDFile *pDFile, SDataCo SDataCol * pDataCol = pDataCols->cols + ncol; SBlockCol *pBlockCol = pBlockData->cols + nColsNotAllNull; - if (isNEleNull(pDataCol, rowsToWrite)) { // all data to commit are NULL, just ignore it + // if (isNEleNull(pDataCol, rowsToWrite)) { // all data to commit are NULL, just ignore it + if (isAllRowsNull(pDataCol)) { // all data to commit are NULL, just ignore it continue; } @@ -1264,12 +1265,12 @@ static void tsdbLoadAndMergeFromCache(SDataCols *pDataCols, int *iter, SCommitIt while (true) { key1 = (*iter >= pDataCols->numOfRows) ? INT64_MAX : dataColsKeyAt(pDataCols, *iter); bool isRowDel = false; - SDataRow row = tsdbNextIterRow(pCommitIter->pIter); - if (row == NULL || dataRowKey(row) > maxKey) { + SMemRow row = tsdbNextIterRow(pCommitIter->pIter); + if (row == NULL || memRowKey(row) > maxKey) { key2 = INT64_MAX; } else { - key2 = dataRowKey(row); - isRowDel = dataRowDeleted(row); + key2 = memRowKey(row); + isRowDel = memRowDeleted(row); } if (key1 == INT64_MAX && key2 == INT64_MAX) break; @@ -1284,24 +1285,24 @@ static void tsdbLoadAndMergeFromCache(SDataCols *pDataCols, int *iter, SCommitIt (*iter)++; } else if (key1 > key2) { if (!isRowDel) { - if (pSchema == NULL || schemaVersion(pSchema) != dataRowVersion(row)) { - pSchema = tsdbGetTableSchemaImpl(pCommitIter->pTable, false, false, dataRowVersion(row)); + if (pSchema == NULL || schemaVersion(pSchema) != memRowVersion(row)) { + pSchema = tsdbGetTableSchemaImpl(pCommitIter->pTable, false, false, memRowVersion(row)); ASSERT(pSchema != NULL); } - tdAppendDataRowToDataCol(row, pSchema, pTarget); + tdAppendMemRowToDataCol(row, pSchema, pTarget); } tSkipListIterNext(pCommitIter->pIter); } else { if (update) { if (!isRowDel) { - if (pSchema == NULL || schemaVersion(pSchema) != dataRowVersion(row)) { - pSchema = tsdbGetTableSchemaImpl(pCommitIter->pTable, false, false, dataRowVersion(row)); + if (pSchema == NULL || schemaVersion(pSchema) != memRowVersion(row)) { + pSchema = tsdbGetTableSchemaImpl(pCommitIter->pTable, false, false, memRowVersion(row)); ASSERT(pSchema != NULL); } - tdAppendDataRowToDataCol(row, pSchema, pTarget); + tdAppendMemRowToDataCol(row, pSchema, pTarget); } } else { ASSERT(!isRowDel); diff --git a/src/tsdb/src/tsdbCommitQueue.c b/src/tsdb/src/tsdbCommitQueue.c index e25014bc1e..e45ac05e97 100644 --- a/src/tsdb/src/tsdbCommitQueue.c +++ b/src/tsdb/src/tsdbCommitQueue.c @@ -158,6 +158,8 @@ static void *tsdbLoopCommit(void *arg) { STsdbRepo * pRepo = NULL; TSDB_REQ_T req; + setThreadName("tsdbCommit"); + while (true) { pthread_mutex_lock(&(pQueue->lock)); @@ -208,4 +210,4 @@ void tsdbDecCommitRef(int vgId) { int refCount = atomic_sub_fetch_32(&tsCommitQueue.refCount, 1); pthread_cond_broadcast(&(tsCommitQueue.queueNotEmpty)); tsdbDebug("vgId:%d, dec commit queue ref to %d", vgId, refCount); -} \ No newline at end of file +} diff --git a/src/tsdb/src/tsdbMain.c b/src/tsdb/src/tsdbMain.c index 09f3585b9d..8266a7c20f 100644 --- a/src/tsdb/src/tsdbMain.c +++ b/src/tsdb/src/tsdbMain.c @@ -641,7 +641,7 @@ static int tsdbRestoreLastColumns(STsdbRepo *pRepo, STable *pTable, SReadH* pRea int numColumns; int32_t blockIdx; SDataStatis* pBlockStatis = NULL; - SDataRow row = NULL; + SMemRow row = NULL; // restore last column data with last schema int err = 0; @@ -657,13 +657,15 @@ static int tsdbRestoreLastColumns(STsdbRepo *pRepo, STable *pTable, SReadH* pRea } } - row = taosTMalloc(dataRowMaxBytesFromSchema(pSchema)); + row = taosTMalloc(memRowMaxBytesFromSchema(pSchema)); if (row == NULL) { terrno = TSDB_CODE_TDB_OUT_OF_MEMORY; err = -1; goto out; } - tdInitDataRow(row, pSchema); + + memRowSetType(row, SMEM_ROW_DATA); + tdInitDataRow(memRowDataBody(row), pSchema); // first load block index info if (tsdbLoadBlockInfo(pReadh, NULL) < 0) { @@ -720,9 +722,9 @@ static int tsdbRestoreLastColumns(STsdbRepo *pRepo, STable *pTable, SReadH* pRea // OK,let's load row from backward to get not-null column for (int32_t rowId = pBlock->numOfRows - 1; rowId >= 0; rowId--) { SDataCol *pDataCol = pReadh->pDCols[0]->cols + i; - tdAppendColVal(row, tdGetColDataOfRow(pDataCol, rowId), pCol->type, pCol->bytes, pCol->offset); + tdAppendColVal(memRowDataBody(row), tdGetColDataOfRow(pDataCol, rowId), pCol->type, pCol->offset); //SDataCol *pDataCol = readh.pDCols[0]->cols + j; - void* value = tdGetRowDataOfCol(row, (int8_t)pCol->type, TD_DATA_ROW_HEAD_SIZE + pCol->offset); + void *value = tdGetRowDataOfCol(memRowDataBody(row), (int8_t)pCol->type, TD_DATA_ROW_HEAD_SIZE + pCol->offset); if (isNull(value, pCol->type)) { continue; } @@ -742,8 +744,8 @@ static int tsdbRestoreLastColumns(STsdbRepo *pRepo, STable *pTable, SReadH* pRea // save row ts(in column 0) pDataCol = pReadh->pDCols[0]->cols + 0; pCol = schemaColAt(pSchema, 0); - tdAppendColVal(row, tdGetColDataOfRow(pDataCol, rowId), pCol->type, pCol->bytes, pCol->offset); - pLastCol->ts = dataRowKey(row); + tdAppendColVal(memRowDataBody(row), tdGetColDataOfRow(pDataCol, rowId), pCol->type, pCol->offset); + pLastCol->ts = memRowKey(row); pTable->restoreColumnNum += 1; @@ -779,18 +781,18 @@ static int tsdbRestoreLastRow(STsdbRepo *pRepo, STable *pTable, SReadH* pReadh, // Get the data in row STSchema *pSchema = tsdbGetTableSchema(pTable); - pTable->lastRow = taosTMalloc(dataRowMaxBytesFromSchema(pSchema)); + pTable->lastRow = taosTMalloc(memRowMaxBytesFromSchema(pSchema)); if (pTable->lastRow == NULL) { terrno = TSDB_CODE_TDB_OUT_OF_MEMORY; return -1; } - - tdInitDataRow(pTable->lastRow, pSchema); + memRowSetType(pTable->lastRow, SMEM_ROW_DATA); + tdInitDataRow(memRowDataBody(pTable->lastRow), pSchema); for (int icol = 0; icol < schemaNCols(pSchema); icol++) { STColumn *pCol = schemaColAt(pSchema, icol); SDataCol *pDataCol = pReadh->pDCols[0]->cols + icol; - tdAppendColVal(pTable->lastRow, tdGetColDataOfRow(pDataCol, pBlock->numOfRows - 1), pCol->type, pCol->bytes, - pCol->offset); + tdAppendColVal(memRowDataBody(pTable->lastRow), tdGetColDataOfRow(pDataCol, pBlock->numOfRows - 1), pCol->type, + pCol->offset); } return 0; diff --git a/src/tsdb/src/tsdbMemTable.c b/src/tsdb/src/tsdbMemTable.c index 50b5d32177..9ac5503e5f 100644 --- a/src/tsdb/src/tsdbMemTable.c +++ b/src/tsdb/src/tsdbMemTable.c @@ -21,7 +21,7 @@ typedef struct { int32_t totalLen; int32_t len; - SDataRow row; + SMemRow row; } SSubmitBlkIter; typedef struct { @@ -36,20 +36,19 @@ static STableData *tsdbNewTableData(STsdbCfg *pCfg, STable *pTable); static void tsdbFreeTableData(STableData *pTableData); static char * tsdbGetTsTupleKey(const void *data); static int tsdbAdjustMemMaxTables(SMemTable *pMemTable, int maxTables); -static int tsdbAppendTableRowToCols(STable *pTable, SDataCols *pCols, STSchema **ppSchema, SDataRow row); +static int tsdbAppendTableRowToCols(STable *pTable, SDataCols *pCols, STSchema **ppSchema, SMemRow row); static int tsdbInitSubmitBlkIter(SSubmitBlk *pBlock, SSubmitBlkIter *pIter); -static SDataRow tsdbGetSubmitBlkNext(SSubmitBlkIter *pIter); +static SMemRow tsdbGetSubmitBlkNext(SSubmitBlkIter *pIter); static int tsdbScanAndConvertSubmitMsg(STsdbRepo *pRepo, SSubmitMsg *pMsg); static int tsdbInsertDataToTable(STsdbRepo *pRepo, SSubmitBlk *pBlock, int32_t *affectedrows); -static int tsdbCopyRowToMem(STsdbRepo *pRepo, SDataRow row, STable *pTable, void **ppRow); +static int tsdbCopyRowToMem(STsdbRepo *pRepo, SMemRow row, STable *pTable, void **ppRow); static int tsdbInitSubmitMsgIter(SSubmitMsg *pMsg, SSubmitMsgIter *pIter); static int tsdbGetSubmitMsgNext(SSubmitMsgIter *pIter, SSubmitBlk **pPBlock); static int tsdbCheckTableSchema(STsdbRepo *pRepo, SSubmitBlk *pBlock, STable *pTable); static int tsdbInsertDataToTableImpl(STsdbRepo *pRepo, STable *pTable, void **rows, int rowCounter); static void tsdbFreeRows(STsdbRepo *pRepo, void **rows, int rowCounter); -static int tsdbUpdateTableLatestInfo(STsdbRepo *pRepo, STable *pTable, SDataRow row); - -static FORCE_INLINE int tsdbCheckRowRange(STsdbRepo *pRepo, STable *pTable, SDataRow row, TSKEY minKey, TSKEY maxKey, +static int tsdbUpdateTableLatestInfo(STsdbRepo *pRepo, STable *pTable, SMemRow row); +static FORCE_INLINE int tsdbCheckRowRange(STsdbRepo *pRepo, STable *pTable, SMemRow row, TSKEY minKey, TSKEY maxKey, TSKEY now); int32_t tsdbInsertData(STsdbRepo *repo, SSubmitMsg *pMsg, SShellSubmitRspMsg *pRsp) { @@ -354,7 +353,7 @@ int tsdbLoadDataFromCache(STable *pTable, SSkipListIterator *pIter, TSKEY maxKey TSKEY fKey = 0; bool isRowDel = false; int filterIter = 0; - SDataRow row = NULL; + SMemRow row = NULL; SMergeInfo mInfo; if (pMergeInfo == NULL) pMergeInfo = &mInfo; @@ -365,12 +364,12 @@ int tsdbLoadDataFromCache(STable *pTable, SSkipListIterator *pIter, TSKEY maxKey if (pCols) tdResetDataCols(pCols); row = tsdbNextIterRow(pIter); - if (row == NULL || dataRowKey(row) > maxKey) { + if (row == NULL || memRowKey(row) > maxKey) { rowKey = INT64_MAX; isRowDel = false; } else { - rowKey = dataRowKey(row); - isRowDel = dataRowDeleted(row); + rowKey = memRowKey(row); + isRowDel = memRowDeleted(row); } if (filterIter >= nFilterKeys) { @@ -407,12 +406,12 @@ int tsdbLoadDataFromCache(STable *pTable, SSkipListIterator *pIter, TSKEY maxKey tSkipListIterNext(pIter); row = tsdbNextIterRow(pIter); - if (row == NULL || dataRowKey(row) > maxKey) { + if (row == NULL || memRowKey(row) > maxKey) { rowKey = INT64_MAX; isRowDel = false; } else { - rowKey = dataRowKey(row); - isRowDel = dataRowDeleted(row); + rowKey = memRowKey(row); + isRowDel = memRowDeleted(row); } } else { if (isRowDel) { @@ -437,12 +436,12 @@ int tsdbLoadDataFromCache(STable *pTable, SSkipListIterator *pIter, TSKEY maxKey tSkipListIterNext(pIter); row = tsdbNextIterRow(pIter); - if (row == NULL || dataRowKey(row) > maxKey) { + if (row == NULL || memRowKey(row) > maxKey) { rowKey = INT64_MAX; isRowDel = false; } else { - rowKey = dataRowKey(row); - isRowDel = dataRowDeleted(row); + rowKey = memRowKey(row); + isRowDel = memRowDeleted(row); } filterIter++; @@ -548,7 +547,7 @@ static void tsdbFreeTableData(STableData *pTableData) { } } -static char *tsdbGetTsTupleKey(const void *data) { return dataRowTuple((SDataRow)data); } +static char *tsdbGetTsTupleKey(const void *data) { return memRowTuple((SMemRow)data); } static int tsdbAdjustMemMaxTables(SMemTable *pMemTable, int maxTables) { ASSERT(pMemTable->maxTables < maxTables); @@ -572,17 +571,17 @@ static int tsdbAdjustMemMaxTables(SMemTable *pMemTable, int maxTables) { return 0; } -static int tsdbAppendTableRowToCols(STable *pTable, SDataCols *pCols, STSchema **ppSchema, SDataRow row) { +static int tsdbAppendTableRowToCols(STable *pTable, SDataCols *pCols, STSchema **ppSchema, SMemRow row) { if (pCols) { - if (*ppSchema == NULL || schemaVersion(*ppSchema) != dataRowVersion(row)) { - *ppSchema = tsdbGetTableSchemaImpl(pTable, false, false, dataRowVersion(row)); + if (*ppSchema == NULL || schemaVersion(*ppSchema) != memRowVersion(row)) { + *ppSchema = tsdbGetTableSchemaImpl(pTable, false, false, memRowVersion(row)); if (*ppSchema == NULL) { ASSERT(false); return -1; } } - tdAppendDataRowToDataCol(row, *ppSchema, pCols); + tdAppendMemRowToDataCol(row, *ppSchema, pCols); } return 0; @@ -592,31 +591,32 @@ static int tsdbInitSubmitBlkIter(SSubmitBlk *pBlock, SSubmitBlkIter *pIter) { if (pBlock->dataLen <= 0) return -1; pIter->totalLen = pBlock->dataLen; pIter->len = 0; - pIter->row = (SDataRow)(pBlock->data+pBlock->schemaLen); + pIter->row = (SMemRow)(pBlock->data + pBlock->schemaLen); return 0; } -static SDataRow tsdbGetSubmitBlkNext(SSubmitBlkIter *pIter) { - SDataRow row = pIter->row; +static SMemRow tsdbGetSubmitBlkNext(SSubmitBlkIter *pIter) { + SMemRow row = pIter->row; // firstly, get current row if (row == NULL) return NULL; - pIter->len += dataRowLen(row); - if (pIter->len >= pIter->totalLen) { + pIter->len += memRowTLen(row); + if (pIter->len >= pIter->totalLen) { // reach the end pIter->row = NULL; } else { - pIter->row = (char *)row + dataRowLen(row); + pIter->row = (char *)row + memRowTLen(row); // secondly, move to next row } return row; } -static FORCE_INLINE int tsdbCheckRowRange(STsdbRepo *pRepo, STable *pTable, SDataRow row, TSKEY minKey, TSKEY maxKey, +static FORCE_INLINE int tsdbCheckRowRange(STsdbRepo *pRepo, STable *pTable, SMemRow row, TSKEY minKey, TSKEY maxKey, TSKEY now) { - if (dataRowKey(row) < minKey || dataRowKey(row) > maxKey) { + TSKEY rowKey = memRowKey(row); + if (rowKey < minKey || rowKey > maxKey) { tsdbError("vgId:%d table %s tid %d uid %" PRIu64 " timestamp is out of range! now %" PRId64 " minKey %" PRId64 " maxKey %" PRId64 " row key %" PRId64, REPO_ID(pRepo), TABLE_CHAR_NAME(pTable), TABLE_TID(pTable), TABLE_UID(pTable), now, minKey, maxKey, - dataRowKey(row)); + rowKey); terrno = TSDB_CODE_TDB_TIMESTAMP_OUT_OF_RANGE; return -1; } @@ -630,7 +630,7 @@ static int tsdbScanAndConvertSubmitMsg(STsdbRepo *pRepo, SSubmitMsg *pMsg) { SSubmitMsgIter msgIter = {0}; SSubmitBlk * pBlock = NULL; SSubmitBlkIter blkIter = {0}; - SDataRow row = NULL; + SMemRow row = NULL; TSKEY now = taosGetTimestamp(pRepo->config.precision); TSKEY minKey = now - tsTickPerDay[pRepo->config.precision] * pRepo->config.keep; TSKEY maxKey = now + tsTickPerDay[pRepo->config.precision] * pRepo->config.daysPerFile; @@ -698,7 +698,7 @@ static int tsdbInsertDataToTable(STsdbRepo *pRepo, SSubmitBlk *pBlock, int32_t * int64_t points = 0; STable * pTable = NULL; SSubmitBlkIter blkIter = {0}; - SDataRow row = NULL; + SMemRow row = NULL; void * rows[TSDB_MAX_INSERT_BATCH] = {0}; int rowCounter = 0; @@ -744,10 +744,10 @@ _err: return -1; } -static int tsdbCopyRowToMem(STsdbRepo *pRepo, SDataRow row, STable *pTable, void **ppRow) { +static int tsdbCopyRowToMem(STsdbRepo *pRepo, SMemRow row, STable *pTable, void **ppRow) { STsdbCfg * pCfg = &pRepo->config; - TKEY tkey = dataRowTKey(row); - TSKEY key = dataRowKey(row); + TKEY tkey = memRowTKey(row); + TSKEY key = memRowKey(row); bool isRowDelete = TKEY_IS_DELETED(tkey); if (isRowDelete) { @@ -765,15 +765,15 @@ static int tsdbCopyRowToMem(STsdbRepo *pRepo, SDataRow row, STable *pTable, void } } - void *pRow = tsdbAllocBytes(pRepo, dataRowLen(row)); + void *pRow = tsdbAllocBytes(pRepo, memRowTLen(row)); if (pRow == NULL) { - tsdbError("vgId:%d failed to insert row with key %" PRId64 " to table %s while allocate %d bytes since %s", - REPO_ID(pRepo), key, TABLE_CHAR_NAME(pTable), dataRowLen(row), tstrerror(terrno)); + tsdbError("vgId:%d failed to insert row with key %" PRId64 " to table %s while allocate %" PRIu32 " bytes since %s", + REPO_ID(pRepo), key, TABLE_CHAR_NAME(pTable), memRowTLen(row), tstrerror(terrno)); return -1; } - dataRowCpy(pRow, row); - ppRow[0] = pRow; + memRowCpy(pRow, row); + ppRow[0] = pRow; // save the memory address of data rows tsdbTrace("vgId:%d a row is %s table %s tid %d uid %" PRIu64 " key %" PRIu64, REPO_ID(pRepo), isRowDelete ? "deleted from" : "updated in", TABLE_CHAR_NAME(pTable), TABLE_TID(pTable), TABLE_UID(pTable), @@ -932,13 +932,15 @@ static int tsdbInsertDataToTableImpl(STsdbRepo *pRepo, STable *pTable, void **ro int64_t osize = SL_SIZE(pTableData->pData); tSkipListPutBatch(pTableData->pData, rows, rowCounter); int64_t dsize = SL_SIZE(pTableData->pData) - osize; + TSKEY keyFirstRow = memRowKey(rows[0]); + TSKEY keyLastRow = memRowKey(rows[rowCounter - 1]); - if (pMemTable->keyFirst > dataRowKey(rows[0])) pMemTable->keyFirst = dataRowKey(rows[0]); - if (pMemTable->keyLast < dataRowKey(rows[rowCounter - 1])) pMemTable->keyLast = dataRowKey(rows[rowCounter - 1]); + if (pMemTable->keyFirst > keyFirstRow) pMemTable->keyFirst = keyFirstRow; + if (pMemTable->keyLast < keyLastRow) pMemTable->keyLast = keyLastRow; pMemTable->numOfRows += dsize; - if (pTableData->keyFirst > dataRowKey(rows[0])) pTableData->keyFirst = dataRowKey(rows[0]); - if (pTableData->keyLast < dataRowKey(rows[rowCounter - 1])) pTableData->keyLast = dataRowKey(rows[rowCounter - 1]); + if (pTableData->keyFirst > keyFirstRow) pTableData->keyFirst = keyFirstRow; + if (pTableData->keyLast < keyLastRow) pTableData->keyLast = keyLastRow; pTableData->numOfRows += dsize; // update table latest info @@ -954,8 +956,8 @@ static void tsdbFreeRows(STsdbRepo *pRepo, void **rows, int rowCounter) { STsdbBufPool *pBufPool = pRepo->pPool; for (int i = rowCounter - 1; i >= 0; --i) { - SDataRow row = (SDataRow)rows[i]; - int bytes = (int)dataRowLen(row); + SMemRow row = (SMemRow)rows[i]; + int bytes = (int)memRowTLen(row); if (pRepo->mem->extraBuffList == NULL) { STsdbBufBlock *pBufBlock = tsdbGetCurrBufBlock(pRepo); @@ -988,21 +990,23 @@ static void tsdbFreeRows(STsdbRepo *pRepo, void **rows, int rowCounter) { } } -static void updateTableLatestColumn(STsdbRepo *pRepo, STable *pTable, SDataRow row) { - tsdbDebug("vgId:%d updateTableLatestColumn, %s row version:%d", REPO_ID(pRepo), pTable->name->data, dataRowVersion(row)); +static void updateTableLatestColumn(STsdbRepo *pRepo, STable *pTable, SMemRow row) { + tsdbDebug("vgId:%d updateTableLatestColumn, %s row version:%d", REPO_ID(pRepo), pTable->name->data, + memRowVersion(row)); STSchema* pSchema = tsdbGetTableLatestSchema(pTable); if (tsdbUpdateLastColSchema(pTable, pSchema) < 0) { return; } - pSchema = tsdbGetTableSchemaByVersion(pTable, dataRowVersion(row)); + pSchema = tsdbGetTableSchemaByVersion(pTable, memRowVersion(row)); if (pSchema == NULL) { return; } SDataCol *pLatestCols = pTable->lastCols; + bool isDataRow = isDataRow(row); for (int16_t j = 0; j < schemaNCols(pSchema); j++) { STColumn *pTCol = schemaColAt(pSchema, j); // ignore not exist colId @@ -1010,9 +1014,21 @@ static void updateTableLatestColumn(STsdbRepo *pRepo, STable *pTable, SDataRow r if (idx == -1) { continue; } - - void* value = tdGetRowDataOfCol(row, (int8_t)pTCol->type, TD_DATA_ROW_HEAD_SIZE + pSchema->columns[j].offset); - if (isNull(value, pTCol->type)) { + + void *value = NULL; + + if (isDataRow) { + value = tdGetRowDataOfCol(memRowDataBody(row), (int8_t)pTCol->type, + TD_DATA_ROW_HEAD_SIZE + pSchema->columns[j].offset); + } else { + // SKVRow + SColIdx *pColIdx = tdGetKVRowIdxOfCol(memRowKvBody(row), pTCol->colId); + if (pColIdx) { + value = tdGetKvRowDataOfCol(memRowKvBody(row), pColIdx->offset); + } + } + + if ((value == NULL) || isNull(value, pTCol->type)) { continue; } @@ -1027,11 +1043,11 @@ static void updateTableLatestColumn(STsdbRepo *pRepo, STable *pTable, SDataRow r memcpy(pDataCol->pData, value, pDataCol->bytes); //tsdbInfo("updateTableLatestColumn vgId:%d cache column %d for %d,%s", REPO_ID(pRepo), j, pDataCol->bytes, (char*)pDataCol->pData); - pDataCol->ts = dataRowKey(row); + pDataCol->ts = memRowKey(row); } } -static int tsdbUpdateTableLatestInfo(STsdbRepo *pRepo, STable *pTable, SDataRow row) { +static int tsdbUpdateTableLatestInfo(STsdbRepo *pRepo, STable *pTable, SMemRow row) { STsdbCfg *pCfg = &pRepo->config; // if cacheLastRow config has been reset, free the lastRow @@ -1042,31 +1058,31 @@ static int tsdbUpdateTableLatestInfo(STsdbRepo *pRepo, STable *pTable, SDataRow TSDB_WUNLOCK_TABLE(pTable); } - if (tsdbGetTableLastKeyImpl(pTable) < dataRowKey(row)) { + if (tsdbGetTableLastKeyImpl(pTable) < memRowKey(row)) { if (CACHE_LAST_ROW(pCfg) || pTable->lastRow != NULL) { - SDataRow nrow = pTable->lastRow; - if (taosTSizeof(nrow) < dataRowLen(row)) { - SDataRow orow = nrow; - nrow = taosTMalloc(dataRowLen(row)); + SMemRow nrow = pTable->lastRow; + if (taosTSizeof(nrow) < memRowTLen(row)) { + SMemRow orow = nrow; + nrow = taosTMalloc(memRowTLen(row)); if (nrow == NULL) { terrno = TSDB_CODE_TDB_OUT_OF_MEMORY; return -1; } - dataRowCpy(nrow, row); + memRowCpy(nrow, row); TSDB_WLOCK_TABLE(pTable); - pTable->lastKey = dataRowKey(row); + pTable->lastKey = memRowKey(row); pTable->lastRow = nrow; TSDB_WUNLOCK_TABLE(pTable); taosTZfree(orow); } else { TSDB_WLOCK_TABLE(pTable); - pTable->lastKey = dataRowKey(row); - dataRowCpy(nrow, row); + pTable->lastKey = memRowKey(row); + memRowCpy(nrow, row); TSDB_WUNLOCK_TABLE(pTable); } } else { - pTable->lastKey = dataRowKey(row); + pTable->lastKey = memRowKey(row); } if (CACHE_LAST_NULL_COLUMN(pCfg)) { diff --git a/src/tsdb/src/tsdbMeta.c b/src/tsdb/src/tsdbMeta.c index 621be04e21..f233500ee9 100644 --- a/src/tsdb/src/tsdbMeta.c +++ b/src/tsdb/src/tsdbMeta.c @@ -787,7 +787,7 @@ static char *getTagIndexKey(const void *pData) { void * res = tdGetKVRowValOfCol(pTable->tagVal, pCol->colId); if (res == NULL) { // treat the column as NULL if we cannot find it - res = getNullValue(pCol->type); + res = (char*)getNullValue(pCol->type); } return res; } diff --git a/src/tsdb/src/tsdbRead.c b/src/tsdb/src/tsdbRead.c index 1eafb5e233..0957a777e3 100644 --- a/src/tsdb/src/tsdbRead.c +++ b/src/tsdb/src/tsdbRead.c @@ -147,7 +147,7 @@ typedef struct STableGroupSupporter { static STimeWindow updateLastrowForEachGroup(STableGroupInfo *groupList); static int32_t checkForCachedLastRow(STsdbQueryHandle* pQueryHandle, STableGroupInfo *groupList); static int32_t checkForCachedLast(STsdbQueryHandle* pQueryHandle); -static int32_t tsdbGetCachedLastRow(STable* pTable, SDataRow* pRes, TSKEY* lastKey); +static int32_t tsdbGetCachedLastRow(STable* pTable, SMemRow* pRes, TSKEY* lastKey); static void changeQueryHandleForInterpQuery(TsdbQueryHandleT pHandle); static void doMergeTwoLevelData(STsdbQueryHandle* pQueryHandle, STableCheckInfo* pCheckInfo, SBlock* pBlock); @@ -734,8 +734,8 @@ static bool initTableMemIterator(STsdbQueryHandle* pHandle, STableCheckInfo* pCh SSkipListNode* node = tSkipListIterGet(pCheckInfo->iter); assert(node != NULL); - SDataRow row = (SDataRow)SL_GET_NODE_DATA(node); - TSKEY key = dataRowKey(row); // first timestamp in buffer + SMemRow row = (SMemRow)SL_GET_NODE_DATA(node); + TSKEY key = memRowKey(row); // first timestamp in buffer tsdbDebug("%p uid:%" PRId64 ", tid:%d check data in mem from skey:%" PRId64 ", order:%d, ts range in buf:%" PRId64 "-%" PRId64 ", lastKey:%" PRId64 ", numOfRows:%"PRId64", 0x%"PRIx64, pHandle, pCheckInfo->tableId.uid, pCheckInfo->tableId.tid, key, order, pMem->keyFirst, pMem->keyLast, @@ -756,8 +756,8 @@ static bool initTableMemIterator(STsdbQueryHandle* pHandle, STableCheckInfo* pCh SSkipListNode* node = tSkipListIterGet(pCheckInfo->iiter); assert(node != NULL); - SDataRow row = (SDataRow)SL_GET_NODE_DATA(node); - TSKEY key = dataRowKey(row); // first timestamp in buffer + SMemRow row = (SMemRow)SL_GET_NODE_DATA(node); + TSKEY key = memRowKey(row); // first timestamp in buffer tsdbDebug("%p uid:%" PRId64 ", tid:%d check data in imem from skey:%" PRId64 ", order:%d, ts range in buf:%" PRId64 "-%" PRId64 ", lastKey:%" PRId64 ", numOfRows:%"PRId64", 0x%"PRIx64, pHandle, pCheckInfo->tableId.uid, pCheckInfo->tableId.tid, key, order, pIMem->keyFirst, pIMem->keyLast, @@ -781,19 +781,19 @@ static void destroyTableMemIterator(STableCheckInfo* pCheckInfo) { tSkipListDestroyIter(pCheckInfo->iiter); } -static SDataRow getSDataRowInTableMem(STableCheckInfo* pCheckInfo, int32_t order, int32_t update) { - SDataRow rmem = NULL, rimem = NULL; +static SMemRow getSDataRowInTableMem(STableCheckInfo* pCheckInfo, int32_t order, int32_t update) { + SMemRow rmem = NULL, rimem = NULL; if (pCheckInfo->iter) { SSkipListNode* node = tSkipListIterGet(pCheckInfo->iter); if (node != NULL) { - rmem = (SDataRow)SL_GET_NODE_DATA(node); + rmem = (SMemRow)SL_GET_NODE_DATA(node); } } if (pCheckInfo->iiter) { SSkipListNode* node = tSkipListIterGet(pCheckInfo->iiter); if (node != NULL) { - rimem = (SDataRow)SL_GET_NODE_DATA(node); + rimem = (SMemRow)SL_GET_NODE_DATA(node); } } @@ -811,8 +811,8 @@ static SDataRow getSDataRowInTableMem(STableCheckInfo* pCheckInfo, int32_t order return rimem; } - TSKEY r1 = dataRowKey(rmem); - TSKEY r2 = dataRowKey(rimem); + TSKEY r1 = memRowKey(rmem); + TSKEY r2 = memRowKey(rimem); if (r1 == r2) { // data ts are duplicated, ignore the data in mem if (!update) { @@ -891,12 +891,12 @@ static bool hasMoreDataInCache(STsdbQueryHandle* pHandle) { initTableMemIterator(pHandle, pCheckInfo); } - SDataRow row = getSDataRowInTableMem(pCheckInfo, pHandle->order, pCfg->update); + SMemRow row = getSDataRowInTableMem(pCheckInfo, pHandle->order, pCfg->update); if (row == NULL) { return false; } - pCheckInfo->lastKey = dataRowKey(row); // first timestamp in buffer + pCheckInfo->lastKey = memRowKey(row); // first timestamp in buffer tsdbDebug("%p uid:%" PRId64", tid:%d check data in buffer from skey:%" PRId64 ", order:%d, 0x%"PRIx64, pHandle, pCheckInfo->tableId.uid, pCheckInfo->tableId.tid, pCheckInfo->lastKey, pHandle->order, pHandle->qId); @@ -1155,11 +1155,11 @@ static int32_t handleDataMergeIfNeeded(STsdbQueryHandle* pQueryHandle, SBlock* p int32_t code = TSDB_CODE_SUCCESS; /*bool hasData = */ initTableMemIterator(pQueryHandle, pCheckInfo); - SDataRow row = getSDataRowInTableMem(pCheckInfo, pQueryHandle->order, pCfg->update); + SMemRow row = getSDataRowInTableMem(pCheckInfo, pQueryHandle->order, pCfg->update); assert(cur->pos >= 0 && cur->pos <= binfo.rows); - TSKEY key = (row != NULL)? dataRowKey(row):TSKEY_INITIAL_VAL; + TSKEY key = (row != NULL) ? memRowKey(row) : TSKEY_INITIAL_VAL; if (key != TSKEY_INITIAL_VAL) { tsdbDebug("%p key in mem:%"PRId64", 0x%"PRIx64, pQueryHandle, key, pQueryHandle->qId); } else { @@ -1401,7 +1401,7 @@ int32_t doCopyRowsFromFileBlock(STsdbQueryHandle* pQueryHandle, int32_t capacity // todo refactor, only copy one-by-one for (int32_t k = start; k < num + start; ++k) { - char* p = tdGetColDataOfRow(src, k); + const char* p = tdGetColDataOfRow(src, k); memcpy(dst, p, varDataTLen(p)); dst += bytes; } @@ -1452,88 +1452,170 @@ int32_t doCopyRowsFromFileBlock(STsdbQueryHandle* pQueryHandle, int32_t capacity return numOfRows + num; } -static void copyOneRowFromMem(STsdbQueryHandle* pQueryHandle, int32_t capacity, int32_t numOfRows, SDataRow row, +static void copyOneRowFromMem(STsdbQueryHandle* pQueryHandle, int32_t capacity, int32_t numOfRows, SMemRow row, int32_t numOfCols, STable* pTable, STSchema* pSchema) { char* pData = NULL; - // the schema version info is embeded in SDataRow + // the schema version info is embedded in SDataRow, and use latest schema version for SKVRow int32_t numOfRowCols = 0; if (pSchema == NULL) { - pSchema = tsdbGetTableSchemaByVersion(pTable, dataRowVersion(row)); + pSchema = tsdbGetTableSchemaByVersion(pTable, memRowVersion(row)); numOfRowCols = schemaNCols(pSchema); } else { numOfRowCols = schemaNCols(pSchema); } - - int32_t i = 0, j = 0; - while(i < numOfCols && j < numOfRowCols) { - SColumnInfoData* pColInfo = taosArrayGet(pQueryHandle->pColumns, i); - if (pSchema->columns[j].colId < pColInfo->info.colId) { - j++; - continue; - } - if (ASCENDING_TRAVERSE(pQueryHandle->order)) { - pData = (char*)pColInfo->pData + numOfRows * pColInfo->info.bytes; - } else { - pData = (char*)pColInfo->pData + (capacity - numOfRows - 1) * pColInfo->info.bytes; - } + int32_t i = 0; - if (pSchema->columns[j].colId == pColInfo->info.colId) { - void* value = tdGetRowDataOfCol(row, (int8_t)pColInfo->info.type, TD_DATA_ROW_HEAD_SIZE + pSchema->columns[j].offset); - switch (pColInfo->info.type) { - case TSDB_DATA_TYPE_BINARY: - case TSDB_DATA_TYPE_NCHAR: - memcpy(pData, value, varDataTLen(value)); - break; - case TSDB_DATA_TYPE_NULL: - case TSDB_DATA_TYPE_BOOL: - case TSDB_DATA_TYPE_TINYINT: - case TSDB_DATA_TYPE_UTINYINT: - *(uint8_t *)pData = *(uint8_t *)value; - break; - case TSDB_DATA_TYPE_SMALLINT: - case TSDB_DATA_TYPE_USMALLINT: - *(uint16_t *)pData = *(uint16_t *)value; - break; - case TSDB_DATA_TYPE_INT: - case TSDB_DATA_TYPE_UINT: - *(uint32_t *)pData = *(uint32_t *)value; - break; - case TSDB_DATA_TYPE_BIGINT: - case TSDB_DATA_TYPE_UBIGINT: - *(uint64_t *)pData = *(uint64_t *)value; - break; - case TSDB_DATA_TYPE_FLOAT: - SET_FLOAT_PTR(pData, value); - break; - case TSDB_DATA_TYPE_DOUBLE: - SET_DOUBLE_PTR(pData, value); - break; - case TSDB_DATA_TYPE_TIMESTAMP: - if (pColInfo->info.colId == PRIMARYKEY_TIMESTAMP_COL_INDEX) { - *(TSKEY *)pData = tdGetKey(*(TKEY *)value); - } else { - *(TSKEY *)pData = *(TSKEY *)value; - } - break; - default: - memcpy(pData, value, pColInfo->info.bytes); + if (isDataRow(row)) { + SDataRow dataRow = memRowDataBody(row); + int32_t j = 0; + while (i < numOfCols && j < numOfRowCols) { + SColumnInfoData* pColInfo = taosArrayGet(pQueryHandle->pColumns, i); + if (pSchema->columns[j].colId < pColInfo->info.colId) { + j++; + continue; } - j++; - i++; - } else { // pColInfo->info.colId < pSchema->columns[j].colId, it is a NULL data + if (ASCENDING_TRAVERSE(pQueryHandle->order)) { + pData = (char*)pColInfo->pData + numOfRows * pColInfo->info.bytes; + } else { + pData = (char*)pColInfo->pData + (capacity - numOfRows - 1) * pColInfo->info.bytes; + } + + if (pSchema->columns[j].colId == pColInfo->info.colId) { + void* value = + tdGetRowDataOfCol(dataRow, (int8_t)pColInfo->info.type, TD_DATA_ROW_HEAD_SIZE + pSchema->columns[j].offset); + switch (pColInfo->info.type) { + case TSDB_DATA_TYPE_BINARY: + case TSDB_DATA_TYPE_NCHAR: + memcpy(pData, value, varDataTLen(value)); + break; + case TSDB_DATA_TYPE_NULL: + case TSDB_DATA_TYPE_BOOL: + case TSDB_DATA_TYPE_TINYINT: + case TSDB_DATA_TYPE_UTINYINT: + *(uint8_t*)pData = *(uint8_t*)value; + break; + case TSDB_DATA_TYPE_SMALLINT: + case TSDB_DATA_TYPE_USMALLINT: + *(uint16_t*)pData = *(uint16_t*)value; + break; + case TSDB_DATA_TYPE_INT: + case TSDB_DATA_TYPE_UINT: + *(uint32_t*)pData = *(uint32_t*)value; + break; + case TSDB_DATA_TYPE_BIGINT: + case TSDB_DATA_TYPE_UBIGINT: + *(uint64_t*)pData = *(uint64_t*)value; + break; + case TSDB_DATA_TYPE_FLOAT: + SET_FLOAT_PTR(pData, value); + break; + case TSDB_DATA_TYPE_DOUBLE: + SET_DOUBLE_PTR(pData, value); + break; + case TSDB_DATA_TYPE_TIMESTAMP: + if (pColInfo->info.colId == PRIMARYKEY_TIMESTAMP_COL_INDEX) { + *(TSKEY*)pData = tdGetKey(*(TKEY*)value); + } else { + *(TSKEY*)pData = *(TSKEY*)value; + } + break; + default: + memcpy(pData, value, pColInfo->info.bytes); + } + + j++; + i++; + } else { // pColInfo->info.colId < pSchema->columns[j].colId, it is a NULL data + if (pColInfo->info.type == TSDB_DATA_TYPE_BINARY || pColInfo->info.type == TSDB_DATA_TYPE_NCHAR) { + setVardataNull(pData, pColInfo->info.type); + } else { + setNull(pData, pColInfo->info.type, pColInfo->info.bytes); + } + i++; + } + } + } else if (isKvRow(row)) { + SKVRow kvRow = memRowKvBody(row); + int32_t k = 0; + int32_t nKvRowCols = kvRowNCols(kvRow); + + while (i < numOfCols && k < nKvRowCols) { + SColumnInfoData* pColInfo = taosArrayGet(pQueryHandle->pColumns, i); + SColIdx* pColIdx = kvRowColIdxAt(kvRow, k); + + if (pColIdx->colId < pColInfo->info.colId) { + ++k; + continue; + } + + if (ASCENDING_TRAVERSE(pQueryHandle->order)) { + pData = (char*)pColInfo->pData + numOfRows * pColInfo->info.bytes; + } else { + pData = (char*)pColInfo->pData + (capacity - numOfRows - 1) * pColInfo->info.bytes; + } + + if (pColIdx->colId == pColInfo->info.colId) { + // offset of pColIdx for SKVRow including the TD_KV_ROW_HEAD_SIZE + void* value = tdGetKvRowDataOfCol(kvRow, pColIdx->offset); + switch (pColInfo->info.type) { + case TSDB_DATA_TYPE_BINARY: + case TSDB_DATA_TYPE_NCHAR: + memcpy(pData, value, varDataTLen(value)); + break; + case TSDB_DATA_TYPE_NULL: + case TSDB_DATA_TYPE_BOOL: + case TSDB_DATA_TYPE_TINYINT: + case TSDB_DATA_TYPE_UTINYINT: + *(uint8_t*)pData = *(uint8_t*)value; + break; + case TSDB_DATA_TYPE_SMALLINT: + case TSDB_DATA_TYPE_USMALLINT: + *(uint16_t*)pData = *(uint16_t*)value; + break; + case TSDB_DATA_TYPE_INT: + case TSDB_DATA_TYPE_UINT: + *(uint32_t*)pData = *(uint32_t*)value; + break; + case TSDB_DATA_TYPE_BIGINT: + case TSDB_DATA_TYPE_UBIGINT: + *(uint64_t*)pData = *(uint64_t*)value; + break; + case TSDB_DATA_TYPE_FLOAT: + SET_FLOAT_PTR(pData, value); + break; + case TSDB_DATA_TYPE_DOUBLE: + SET_DOUBLE_PTR(pData, value); + break; + case TSDB_DATA_TYPE_TIMESTAMP: + if (pColInfo->info.colId == PRIMARYKEY_TIMESTAMP_COL_INDEX) { + *(TSKEY*)pData = tdGetKey(*(TKEY*)value); + } else { + *(TSKEY*)pData = *(TSKEY*)value; + } + break; + default: + memcpy(pData, value, pColInfo->info.bytes); + } + ++k; + ++i; + continue; + } + // If (pColInfo->info.colId < pColIdx->colId), it is NULL data if (pColInfo->info.type == TSDB_DATA_TYPE_BINARY || pColInfo->info.type == TSDB_DATA_TYPE_NCHAR) { setVardataNull(pData, pColInfo->info.type); } else { setNull(pData, pColInfo->info.type, pColInfo->info.bytes); } - i++; + ++i; } + } else { + ASSERT(0); } - while (i < numOfCols) { // the remain columns are all null data + while (i < numOfCols) { // the remain columns are all null data SColumnInfoData* pColInfo = taosArrayGet(pQueryHandle->pColumns, i); if (ASCENDING_TRAVERSE(pQueryHandle->order)) { pData = (char*)pColInfo->pData + numOfRows * pColInfo->info.bytes; @@ -1550,7 +1632,6 @@ static void copyOneRowFromMem(STsdbQueryHandle* pQueryHandle, int32_t capacity, i++; } } - static void moveDataToFront(STsdbQueryHandle* pQueryHandle, int32_t numOfRows, int32_t numOfCols) { if (numOfRows == 0 || ASCENDING_TRAVERSE(pQueryHandle->order)) { return; @@ -1730,12 +1811,12 @@ static void doMergeTwoLevelData(STsdbQueryHandle* pQueryHandle, STableCheckInfo* } else if (pCheckInfo->iter != NULL || pCheckInfo->iiter != NULL) { SSkipListNode* node = NULL; do { - SDataRow row = getSDataRowInTableMem(pCheckInfo, pQueryHandle->order, pCfg->update); + SMemRow row = getSDataRowInTableMem(pCheckInfo, pQueryHandle->order, pCfg->update); if (row == NULL) { break; } - TSKEY key = dataRowKey(row); + TSKEY key = memRowKey(row); if ((key > pQueryHandle->window.ekey && ASCENDING_TRAVERSE(pQueryHandle->order)) || (key < pQueryHandle->window.ekey && !ASCENDING_TRAVERSE(pQueryHandle->order))) { break; @@ -1748,11 +1829,11 @@ static void doMergeTwoLevelData(STsdbQueryHandle* pQueryHandle, STableCheckInfo* if ((key < tsArray[pos] && ASCENDING_TRAVERSE(pQueryHandle->order)) || (key > tsArray[pos] && !ASCENDING_TRAVERSE(pQueryHandle->order))) { - if (rv != dataRowVersion(row)) { - pSchema = tsdbGetTableSchemaByVersion(pTable, dataRowVersion(row)); - rv = dataRowVersion(row); - } - + if (rv != memRowVersion(row)) { + pSchema = tsdbGetTableSchemaByVersion(pTable, memRowVersion(row)); + rv = memRowVersion(row); + } + copyOneRowFromMem(pQueryHandle, pQueryHandle->outputCapacity, numOfRows, row, numOfCols, pTable, pSchema); numOfRows += 1; if (cur->win.skey == TSKEY_INITIAL_VAL) { @@ -1766,11 +1847,11 @@ static void doMergeTwoLevelData(STsdbQueryHandle* pQueryHandle, STableCheckInfo* moveToNextRowInMem(pCheckInfo); } else if (key == tsArray[pos]) { // data in buffer has the same timestamp of data in file block, ignore it if (pCfg->update) { - if (rv != dataRowVersion(row)) { - pSchema = tsdbGetTableSchemaByVersion(pTable, dataRowVersion(row)); - rv = dataRowVersion(row); + if (rv != memRowVersion(row)) { + pSchema = tsdbGetTableSchemaByVersion(pTable, memRowVersion(row)); + rv = memRowVersion(row); } - + copyOneRowFromMem(pQueryHandle, pQueryHandle->outputCapacity, numOfRows, row, numOfCols, pTable, pSchema); numOfRows += 1; if (cur->win.skey == TSKEY_INITIAL_VAL) { @@ -1820,8 +1901,10 @@ static void doMergeTwoLevelData(STsdbQueryHandle* pQueryHandle, STableCheckInfo* * copy them all to result buffer, since it may be overlapped with file data block. */ if (node == NULL || - ((dataRowKey((SDataRow)SL_GET_NODE_DATA(node)) > pQueryHandle->window.ekey) && ASCENDING_TRAVERSE(pQueryHandle->order)) || - ((dataRowKey((SDataRow)SL_GET_NODE_DATA(node)) < pQueryHandle->window.ekey) && !ASCENDING_TRAVERSE(pQueryHandle->order))) { + ((memRowKey((SMemRow)SL_GET_NODE_DATA(node)) > pQueryHandle->window.ekey) && + ASCENDING_TRAVERSE(pQueryHandle->order)) || + ((memRowKey((SMemRow)SL_GET_NODE_DATA(node)) < pQueryHandle->window.ekey) && + !ASCENDING_TRAVERSE(pQueryHandle->order))) { // no data in cache or data in cache is greater than the ekey of time window, load data from file block if (cur->win.skey == TSKEY_INITIAL_VAL) { cur->win.skey = tsArray[pos]; @@ -2407,12 +2490,12 @@ static int tsdbReadRowsFromCache(STableCheckInfo* pCheckInfo, TSKEY maxKey, int STSchema* pSchema = NULL; do { - SDataRow row = getSDataRowInTableMem(pCheckInfo, pQueryHandle->order, pCfg->update); + SMemRow row = getSDataRowInTableMem(pCheckInfo, pQueryHandle->order, pCfg->update); if (row == NULL) { break; } - TSKEY key = dataRowKey(row); + TSKEY key = memRowKey(row); if ((key > maxKey && ASCENDING_TRAVERSE(pQueryHandle->order)) || (key < maxKey && !ASCENDING_TRAVERSE(pQueryHandle->order))) { tsdbDebug("%p key:%"PRIu64" beyond qrange:%"PRId64" - %"PRId64", no more data in buffer", pQueryHandle, key, pQueryHandle->window.skey, pQueryHandle->window.ekey); @@ -2425,9 +2508,9 @@ static int tsdbReadRowsFromCache(STableCheckInfo* pCheckInfo, TSKEY maxKey, int } win->ekey = key; - if (rv != dataRowVersion(row)) { - pSchema = tsdbGetTableSchemaByVersion(pTable, dataRowVersion(row)); - rv = dataRowVersion(row); + if (rv != memRowVersion(row)) { + pSchema = tsdbGetTableSchemaByVersion(pTable, memRowVersion(row)); + rv = memRowVersion(row); } copyOneRowFromMem(pQueryHandle, maxRowsToRead, numOfRows, row, numOfCols, pTable, pSchema); @@ -2544,7 +2627,7 @@ static bool loadCachedLastRow(STsdbQueryHandle* pQueryHandle) { SQueryFilePos* cur = &pQueryHandle->cur; - SDataRow pRow = NULL; + SMemRow pRow = NULL; TSKEY key = TSKEY_INITIAL_VAL; int32_t step = ASCENDING_TRAVERSE(pQueryHandle->order)? 1:-1; @@ -2954,7 +3037,7 @@ bool tsdbGetExternalRow(TsdbQueryHandleT pHandle) { * if lastRow == NULL, return TSDB_CODE_TDB_NO_CACHE_LAST_ROW * else set pRes and return TSDB_CODE_SUCCESS and save lastKey */ -int32_t tsdbGetCachedLastRow(STable* pTable, SDataRow* pRes, TSKEY* lastKey) { +int32_t tsdbGetCachedLastRow(STable* pTable, SMemRow* pRes, TSKEY* lastKey) { int32_t code = TSDB_CODE_SUCCESS; TSDB_RLOCK_TABLE(pTable); @@ -2965,7 +3048,7 @@ int32_t tsdbGetCachedLastRow(STable* pTable, SDataRow* pRes, TSKEY* lastKey) { } if (pRes) { - *pRes = tdDataRowDup(pTable->lastRow); + *pRes = tdMemRowDup(pTable->lastRow); if (*pRes == NULL) { code = TSDB_CODE_TDB_OUT_OF_MEMORY; } @@ -3002,7 +3085,7 @@ int32_t checkForCachedLastRow(STsdbQueryHandle* pQueryHandle, STableGroupInfo *g } // update the tsdb query time range - if (pQueryHandle->cachelastrow) { + if (pQueryHandle->cachelastrow != TSDB_CACHED_TYPE_NONE) { pQueryHandle->window = TSWINDOW_INITIALIZER; pQueryHandle->checkFiles = false; pQueryHandle->activeIndex = -1; // start from -1 @@ -3034,6 +3117,7 @@ STimeWindow updateLastrowForEachGroup(STableGroupInfo *groupList) { STimeWindow window = {INT64_MAX, INT64_MIN}; int32_t totalNumOfTable = 0; + SArray* emptyGroup = taosArrayInit(16, sizeof(int32_t)); // NOTE: starts from the buffer in case of descending timestamp order check data blocks size_t numOfGroups = taosArrayGetSize(groupList->pGroupList); @@ -3076,27 +3160,30 @@ STimeWindow updateLastrowForEachGroup(STableGroupInfo *groupList) { } } - taosArrayClear(pGroup); - // more than one table in each group, only one table left for each group if (keyInfo.pTable != NULL) { totalNumOfTable++; - taosArrayPush(pGroup, &keyInfo); - } else { + if (taosArrayGetSize(pGroup) == 1) { + // do nothing + } else { + taosArrayClear(pGroup); + taosArrayPush(pGroup, &keyInfo); + } + } else { // mark all the empty groups, and remove it later taosArrayDestroy(pGroup); - - taosArrayRemove(groupList->pGroupList, j); - numOfGroups -= 1; - j -= 1; + taosArrayPush(emptyGroup, &j); } } // window does not being updated, so set the original if (window.skey == INT64_MAX && window.ekey == INT64_MIN) { window = TSWINDOW_INITIALIZER; - assert(totalNumOfTable == 0 && taosArrayGetSize(groupList->pGroupList) == 0); + assert(totalNumOfTable == 0 && taosArrayGetSize(groupList->pGroupList) == numOfGroups); } + taosArrayRemoveBatch(groupList->pGroupList, TARRAY_GET_START(emptyGroup), (int32_t) taosArrayGetSize(emptyGroup)); + taosArrayDestroy(emptyGroup); + groupList->numOfTables = totalNumOfTable; return window; } @@ -3698,6 +3785,10 @@ static void* doFreeColumnInfoData(SArray* pColumnInfoData) { } static void* destroyTableCheckInfo(SArray* pTableCheckInfo) { + if (pTableCheckInfo == NULL) { + return NULL; + } + size_t size = taosArrayGetSize(pTableCheckInfo); for (int32_t i = 0; i < size; ++i) { STableCheckInfo* p = taosArrayGet(pTableCheckInfo, i); diff --git a/src/tsdb/tests/tsdbTests.cpp b/src/tsdb/tests/tsdbTests.cpp index ac254d6c34..dc804856fd 100644 --- a/src/tsdb/tests/tsdbTests.cpp +++ b/src/tsdb/tests/tsdbTests.cpp @@ -55,10 +55,10 @@ static int insertData(SInsertInfo *pInfo) { for (int j = 0; j < schemaNCols(pInfo->pSchema); j++) { STColumn *pTCol = schemaColAt(pInfo->pSchema, j); if (j == 0) { // Just for timestamp - tdAppendColVal(row, (void *)(&start_time), pTCol->type, pTCol->bytes, pTCol->offset); + tdAppendColVal(row, (void *)(&start_time), pTCol->type, pTCol->offset); } else { // For int int val = 10; - tdAppendColVal(row, (void *)(&val), pTCol->type, pTCol->bytes, pTCol->offset); + tdAppendColVal(row, (void *)(&val), pTCol->type, pTCol->offset); } } pBlock->dataLen += dataRowLen(row); diff --git a/src/util/CMakeLists.txt b/src/util/CMakeLists.txt index 85b15c0a4f..44808b1025 100644 --- a/src/util/CMakeLists.txt +++ b/src/util/CMakeLists.txt @@ -4,13 +4,14 @@ PROJECT(TDengine) INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/rpc/inc) INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/sync/inc) INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/deps/rmonotonic/inc) + AUX_SOURCE_DIRECTORY(src SRC) ADD_LIBRARY(tutil ${SRC}) TARGET_LINK_LIBRARIES(tutil pthread os lz4 z rmonotonic) IF (TD_LINUX) TARGET_LINK_LIBRARIES(tutil m rt) - # ADD_SUBDIRECTORY(tests) + ADD_SUBDIRECTORY(tests) FIND_PATH(ICONV_INCLUDE_EXIST iconv.h /usr/include/ /usr/local/include/) IF (ICONV_INCLUDE_EXIST) diff --git a/src/util/inc/tarray.h b/src/util/inc/tarray.h index 63cadf39a3..2da74eac82 100644 --- a/src/util/inc/tarray.h +++ b/src/util/inc/tarray.h @@ -52,6 +52,22 @@ void* taosArrayInit(size_t size, size_t elemSize); */ void *taosArrayAddBatch(SArray *pArray, const void *pData, int nEles); +/** + * + * @param pArray + * @param pData position array list + * @param numOfElems the number of removed position + */ +void taosArrayRemoveBatch(SArray *pArray, const int32_t* pData, int32_t numOfElems); + +/** + * + * @param pArray + * @param comparFn + * @param fp + */ +void taosArrayRemoveDuplicate(SArray *pArray, __compar_fn_t comparFn, void (*fp)(void*)); + /** * add all element from the source array list into the destination * @param pArray diff --git a/src/util/src/tarray.c b/src/util/src/tarray.c index fe529edaac..d0d126c1e4 100644 --- a/src/util/src/tarray.c +++ b/src/util/src/tarray.c @@ -83,6 +83,87 @@ void* taosArrayAddBatch(SArray* pArray, const void* pData, int nEles) { return dst; } +void taosArrayRemoveBatch(SArray *pArray, const int32_t* pData, int32_t numOfElems) { + assert(pArray != NULL && pData != NULL); + if (numOfElems <= 0) { + return; + } + + size_t size = taosArrayGetSize(pArray); + if (numOfElems >= size) { + taosArrayClear(pArray); + return; + } + + int32_t i = pData[0] + 1, j = 0; + while(i < size) { + if (j == numOfElems - 1) { + break; + } + + char* p = TARRAY_GET_ELEM(pArray, i); + if (i > pData[j] && i < pData[j + 1]) { + char* dst = TARRAY_GET_ELEM(pArray, i - (j + 1)); + memmove(dst, p, pArray->elemSize); + } else if (i == pData[j + 1]) { + j += 1; + } + + i += 1; + } + + assert(i == pData[numOfElems - 1] + 1); + + int32_t dstIndex = pData[numOfElems - 1] - numOfElems + 1; + int32_t srcIndex = pData[numOfElems - 1] + 1; + + char* dst = TARRAY_GET_ELEM(pArray, dstIndex); + char* src = TARRAY_GET_ELEM(pArray, srcIndex); + memmove(dst, src, pArray->elemSize * (pArray->size - numOfElems)); + + pArray->size -= numOfElems; +} + +void taosArrayRemoveDuplicate(SArray *pArray, __compar_fn_t comparFn, void (*fp)(void*)) { + assert(pArray); + + size_t size = pArray->size; + if (size <= 1) { + return; + } + + int32_t pos = 0; + for(int32_t i = 1; i < size; ++i) { + char* p1 = taosArrayGet(pArray, pos); + char* p2 = taosArrayGet(pArray, i); + + if (comparFn(p1, p2) == 0) { + // do nothing + } else { + if (pos + 1 != i) { + void* p = taosArrayGet(pArray, pos + 1); + if (fp != NULL) { + fp(p); + } + + taosArraySet(pArray, pos + 1, p2); + pos += 1; + } else { + pos += 1; + } + } + } + + if (fp != NULL) { + for(int32_t i = pos + 1; i < pArray->size; ++i) { + void* p = taosArrayGet(pArray, i); + fp(p); + } + } + + pArray->size = pos + 1; +} + void* taosArrayAddAll(SArray* pArray, const SArray* pInput) { return taosArrayAddBatch(pArray, pInput->pData, (int32_t) taosArrayGetSize(pInput)); } diff --git a/src/util/src/tcache.c b/src/util/src/tcache.c index 569f9b01bd..c6ee79e101 100644 --- a/src/util/src/tcache.c +++ b/src/util/src/tcache.c @@ -656,6 +656,8 @@ void* taosCacheTimedRefresh(void *handle) { return NULL; } + setThreadName("cacheTimedRefre"); + const int32_t SLEEP_DURATION = 500; //500 ms int64_t totalTick = pCacheObj->refreshTime / SLEEP_DURATION; diff --git a/src/util/src/terror.c b/src/util/src/terror.c index e36ab94844..46a33569b2 100644 --- a/src/util/src/terror.c +++ b/src/util/src/terror.c @@ -183,6 +183,7 @@ TAOS_DEFINE_ERROR(TSDB_CODE_MND_FIELD_ALREAY_EXIST, "Field already exists" TAOS_DEFINE_ERROR(TSDB_CODE_MND_FIELD_NOT_EXIST, "Field does not exist") TAOS_DEFINE_ERROR(TSDB_CODE_MND_INVALID_STABLE_NAME, "Super table does not exist") TAOS_DEFINE_ERROR(TSDB_CODE_MND_INVALID_CREATE_TABLE_MSG, "Invalid create table message") +TAOS_DEFINE_ERROR(TSDB_CODE_MND_EXCEED_MAX_ROW_BYTES, "Exceed max row bytes") TAOS_DEFINE_ERROR(TSDB_CODE_MND_INVALID_FUNC_NAME, "Invalid func name") TAOS_DEFINE_ERROR(TSDB_CODE_MND_INVALID_FUNC_LEN, "Invalid func length") diff --git a/src/util/src/tlog.c b/src/util/src/tlog.c index 45ff14ffa4..88f57e8ac2 100644 --- a/src/util/src/tlog.c +++ b/src/util/src/tlog.c @@ -178,6 +178,8 @@ static void *taosThreadToOpenNewFile(void *param) { char keepName[LOG_FILE_NAME_LEN + 20]; sprintf(keepName, "%s.%d", tsLogObj.logName, tsLogObj.flag); + setThreadName("openNewFile"); + tsLogObj.flag ^= 1; tsLogObj.lines = 0; char name[LOG_FILE_NAME_LEN + 20]; @@ -687,6 +689,8 @@ static void taosWriteLog(SLogBuff *tLogBuff) { static void *taosAsyncOutputLog(void *param) { SLogBuff *tLogBuff = (SLogBuff *)param; + + setThreadName("asyncOutputLog"); while (1) { //tsem_wait(&(tLogBuff->buffNotEmpty)); diff --git a/src/util/src/tnettest.c b/src/util/src/tnettest.c index 318a2d4860..0bab7b7e66 100644 --- a/src/util/src/tnettest.c +++ b/src/util/src/tnettest.c @@ -50,7 +50,9 @@ static void *taosNetBindUdpPort(void *sarg) { struct sockaddr_in server_addr; struct sockaddr_in clientAddr; - if ((serverSocket = socket(AF_INET, SOCK_DGRAM, IPPROTO_UDP)) < 0) { + setThreadName("netBindUdpPort"); + + if ((serverSocket = socket(AF_INET, SOCK_DGRAM, IPPROTO_UDP)) < 0) { uError("failed to create UDP socket since %s", strerror(errno)); return NULL; } @@ -106,13 +108,15 @@ static void *taosNetBindTcpPort(void *sarg) { struct sockaddr_in server_addr; struct sockaddr_in clientAddr; - STestInfo *pinfo = sarg; + STestInfo *pinfo = sarg; int32_t port = pinfo->port; SOCKET serverSocket; int32_t addr_len = sizeof(clientAddr); SOCKET client; char buffer[BUFFER_SIZE]; + setThreadName("netBindTcpPort"); + if ((serverSocket = socket(AF_INET, SOCK_STREAM, IPPROTO_TCP)) < 0) { uError("failed to create TCP socket since %s", strerror(errno)); return NULL; diff --git a/src/util/src/tnote.c b/src/util/src/tnote.c index c6d49dfb3d..b691abc5b9 100644 --- a/src/util/src/tnote.c +++ b/src/util/src/tnote.c @@ -84,6 +84,8 @@ static void *taosThreadToOpenNewNote(void *param) { char name[NOTE_FILE_NAME_LEN * 2]; SNoteObj *pNote = (SNoteObj *)param; + setThreadName("openNewNote"); + pNote->flag ^= 1; pNote->lines = 0; sprintf(name, "%s.%d", pNote->name, pNote->flag); diff --git a/src/util/src/tsched.c b/src/util/src/tsched.c index 16142470c9..3d3dfd9899 100644 --- a/src/util/src/tsched.c +++ b/src/util/src/tsched.c @@ -122,6 +122,8 @@ void *taosProcessSchedQueue(void *scheduler) { SSchedQueue *pSched = (SSchedQueue *)scheduler; int ret = 0; + setThreadName("schedQ"); + while (1) { if ((ret = tsem_wait(&pSched->fullSem)) != 0) { uFatal("wait %s fullSem failed(%s)", pSched->label, strerror(errno)); @@ -234,4 +236,4 @@ void taosDumpSchedulerStatus(void *qhandle, void *tmrId) { } taosTmrReset(taosDumpSchedulerStatus, DUMP_SCHEDULER_TIME_WINDOW, pSched, pSched->pTmrCtrl, &pSched->pTimer); -} \ No newline at end of file +} diff --git a/src/util/src/tskiplist.c b/src/util/src/tskiplist.c index a88fee51d7..082b454bb5 100644 --- a/src/util/src/tskiplist.c +++ b/src/util/src/tskiplist.c @@ -681,143 +681,3 @@ static SSkipListNode *tSkipListPutImpl(SSkipList *pSkipList, void *pData, SSkipL return pNode; } - -// static int32_t tSkipListEndParQuery(SSkipList *pSkipList, SSkipListNode *pStartNode, SSkipListKey *pEndKey, -// int32_t cond, SSkipListNode ***pRes) { -// pthread_rwlock_rdlock(&pSkipList->lock); -// SSkipListNode *p = pStartNode; -// int32_t numOfRes = 0; -// -// __compar_fn_t filterComparFn = getComparFunc(pSkipList, pEndKey->nType); -// while (p != NULL) { -// int32_t ret = filterComparFn(&p->key, pEndKey); -// if (ret > 0) { -// break; -// } -// -// if (ret < 0) { -// numOfRes++; -// p = p->pForward[0]; -// } else if (ret == 0) { -// if (cond == TSDB_RELATION_LESS_EQUAL) { -// numOfRes++; -// p = p->pForward[0]; -// } else { -// break; -// } -// } -// } -// -// (*pRes) = (SSkipListNode **)malloc(POINTER_BYTES * numOfRes); -// for (int32_t i = 0; i < numOfRes; ++i) { -// (*pRes)[i] = pStartNode; -// pStartNode = pStartNode->pForward[0]; -// } -// pthread_rwlock_unlock(&pSkipList->lock); -// -// return numOfRes; -//} -// -///* -// * maybe return the copy of SSkipListNode would be better -// */ -// int32_t tSkipListGets(SSkipList *pSkipList, SSkipListKey *pKey, SSkipListNode ***pRes) { -// (*pRes) = NULL; -// -// SSkipListNode *pNode = tSkipListGet(pSkipList, pKey); -// if (pNode == NULL) { -// return 0; -// } -// -// __compar_fn_t filterComparFn = getComparFunc(pSkipList, pKey->nType); -// -// // backward check if previous nodes are with the same value. -// SSkipListNode *pPrev = pNode->pBackward[0]; -// while ((pPrev != &pSkipList->pHead) && filterComparFn(&pPrev->key, pKey) == 0) { -// pPrev = pPrev->pBackward[0]; -// } -// -// return tSkipListEndParQuery(pSkipList, pPrev->pForward[0], &pNode->key, TSDB_RELATION_LESS_EQUAL, pRes); -//} -// -// static SSkipListNode *tSkipListParQuery(SSkipList *pSkipList, SSkipListKey *pKey, int32_t cond) { -// int32_t sLevel = pSkipList->level - 1; -// int32_t ret = -1; -// -// SSkipListNode *x = &pSkipList->pHead; -// __compar_fn_t filterComparFn = getComparFunc(pSkipList, pKey->nType); -// -// pthread_rwlock_rdlock(&pSkipList->lock); -// -// if (cond == TSDB_RELATION_GREATER_EQUAL || cond == TSDB_RELATION_GREATER) { -// for (int32_t i = sLevel; i >= 0; --i) { -// while (x->pForward[i] != NULL && (ret = filterComparFn(&x->pForward[i]->key, pKey)) < 0) { -// x = x->pForward[i]; -// } -// } -// -// // backward check if previous nodes are with the same value. -// if (cond == TSDB_RELATION_GREATER_EQUAL && ret == 0) { -// SSkipListNode *pNode = x->pForward[0]; -// while ((pNode->pBackward[0] != &pSkipList->pHead) && (filterComparFn(&pNode->pBackward[0]->key, pKey) == 0)) { -// pNode = pNode->pBackward[0]; -// } -// pthread_rwlock_unlock(&pSkipList->lock); -// return pNode; -// } -// -// if (ret > 0 || cond == TSDB_RELATION_GREATER_EQUAL) { -// pthread_rwlock_unlock(&pSkipList->lock); -// return x->pForward[0]; -// } else { // cond == TSDB_RELATION_GREATER && ret == 0 -// SSkipListNode *pn = x->pForward[0]; -// while (pn != NULL && filterComparFn(&pn->key, pKey) == 0) { -// pn = pn->pForward[0]; -// } -// pthread_rwlock_unlock(&pSkipList->lock); -// return pn; -// } -// } -// -// pthread_rwlock_unlock(&pSkipList->lock); -// return NULL; -//} -// -// -// static bool removeSupport(SSkipList *pSkipList, SSkipListNode **forward, SSkipListKey *pKey) { -// __compar_fn_t filterComparFn = getComparFunc(pSkipList, pKey->nType); -// -// if (filterComparFn(&forward[0]->pForward[0]->key, pKey) == 0) { -// SSkipListNode *p = forward[0]->pForward[0]; -// doRemove(pSkipList, p, forward); -// } else { // failed to find the node of specified value,abort -// return false; -// } -// -// // compress the minimum level of skip list -// while (pSkipList->level > 0 && SL_NODE_GET_FORWARD_POINTER(pSkipList->pHead, pSkipList->level - 1) == NULL) { -// pSkipList->level -= 1; -// } -// -// return true; -//} -// -// bool tSkipListRemove(SSkipList *pSkipList, SSkipListKey *pKey) { -// SSkipListNode *forward[MAX_SKIP_LIST_LEVEL] = {0}; -// __compar_fn_t filterComparFn = getComparFunc(pSkipList, pKey->nType); -// -// pthread_rwlock_rdlock(&pSkipList->lock); -// -// SSkipListNode *x = &pSkipList->pHead; -// for (int32_t i = pSkipList->level - 1; i >= 0; --i) { -// while (x->pForward[i] != NULL && (filterComparFn(&x->pForward[i]->key, pKey) < 0)) { -// x = x->pForward[i]; -// } -// forward[i] = x; -// } -// -// bool ret = removeSupport(pSkipList, forward, pKey); -// pthread_rwlock_unlock(&pSkipList->lock); -// -// return ret; -//} diff --git a/src/util/tests/arrayTest.cpp b/src/util/tests/arrayTest.cpp new file mode 100644 index 0000000000..94b08ca6d7 --- /dev/null +++ b/src/util/tests/arrayTest.cpp @@ -0,0 +1,50 @@ +#include +#include +#include +#include + +#include "tarray.h" + +namespace { + +static void remove_batch_test() { + SArray *pa = (SArray*) taosArrayInit(4, sizeof(int32_t)); + + for(int32_t i = 0; i < 20; ++i) { + int32_t a = i; + taosArrayPush(pa, &a); + } + + SArray* delList = (SArray*)taosArrayInit(4, sizeof(int32_t)); + taosArrayRemoveBatch(pa, (const int32_t*) TARRAY_GET_START(delList), taosArrayGetSize(delList)); + EXPECT_EQ(taosArrayGetSize(pa), 20); + + int32_t a = 5; + taosArrayPush(delList, &a); + + taosArrayRemoveBatch(pa, (const int32_t*) TARRAY_GET_START(delList), taosArrayGetSize(delList)); + EXPECT_EQ(taosArrayGetSize(pa), 19); + EXPECT_EQ(*(int*)taosArrayGet(pa, 5), 6); + + taosArrayInsert(pa, 5, &a); + EXPECT_EQ(taosArrayGetSize(pa), 20); + EXPECT_EQ(*(int*)taosArrayGet(pa, 5), 5); + + taosArrayClear(delList); + + a = 6; + taosArrayPush(delList, &a); + + a = 9; + taosArrayPush(delList, &a); + + a = 14; + taosArrayPush(delList, &a); + taosArrayRemoveBatch(pa, (const int32_t*) TARRAY_GET_START(delList), taosArrayGetSize(delList)); + EXPECT_EQ(taosArrayGetSize(pa), 17); +} +} // namespace + +TEST(arrayTest, array_list_test) { + remove_batch_test(); +} diff --git a/src/util/tests/cacheTest.cpp b/src/util/tests/cacheTest.cpp index 0a4791f6a9..970f1c23a9 100644 --- a/src/util/tests/cacheTest.cpp +++ b/src/util/tests/cacheTest.cpp @@ -5,10 +5,6 @@ #include "taos.h" #include "tcache.h" -namespace { -int32_t tsMaxMgmtConnections = 10000; -int32_t tsMaxMeterConnections = 200; -} // test cache TEST(testCase, client_cache_test) { const int32_t REFRESH_TIME_IN_SEC = 2; @@ -43,7 +39,7 @@ TEST(testCase, client_cache_test) { sleep(3); char* d = (char*) taosCacheAcquireByKey(tscMetaCache, key3, strlen(key3)); -// assert(d == NULL); + assert(d == NULL); char key5[] = "test5"; char data5[] = "data5kkkkk"; @@ -102,7 +98,7 @@ TEST(testCase, cache_resize_test) { char key[256] = {0}; char data[1024] = "abcdefghijk"; - int32_t len = strlen(data); +// int32_t len = strlen(data); uint64_t startTime = taosGetTimestampUs(); int32_t num = 10000; diff --git a/src/util/tests/skiplistTest.cpp b/src/util/tests/skiplistTest.cpp index 2203ae8e4f..dfbe0f6716 100644 --- a/src/util/tests/skiplistTest.cpp +++ b/src/util/tests/skiplistTest.cpp @@ -1,6 +1,7 @@ #include #include #include +#include #include #include "os.h" @@ -8,12 +9,14 @@ #include "tskiplist.h" #include "tutil.h" +#if 0 namespace { char* getkey(const void* data) { return (char*)(data); } void doubleSkipListTest() { - SSkipList* pSkipList = tSkipListCreate(10, TSDB_DATA_TYPE_DOUBLE, sizeof(double), 0, false, true, getkey); + SSkipList* pSkipList = tSkipListCreate(10, TSDB_DATA_TYPE_DOUBLE, sizeof(double), + getKeyComparFunc(TSDB_DATA_TYPE_DOUBLE), false, getkey); double doubleVal[1000] = {0}; int32_t size = 20000; @@ -25,18 +28,15 @@ void doubleSkipListTest() { doubleVal[i] = i * 0.997; } - int32_t level = 0; - int32_t size = 0; +// int32_t level = 0; + size = 0; - tSkipListNewNodeInfo(pSkipList, &level, &size); - auto d = (SSkipListNode*)calloc(1, size + sizeof(double) * 2); - d->level = level; + // tSkipListNewNodeInfo(pSkipList, &level, &size); + // auto d = (SSkipListNode*)calloc(1, size + sizeof(double) * 2); + // d->level = level; - double* key = (double*)SL_GET_NODE_KEY(pSkipList, d); - key[0] = i * 0.997; - key[1] = i * 0.997; - - tSkipListPut(pSkipList, d); + double key = 0.997; + tSkipListPut(pSkipList, &key); } printf("the first level of skip list is:\n"); @@ -70,7 +70,8 @@ void doubleSkipListTest() { } void randKeyTest() { - SSkipList* pSkipList = tSkipListCreate(10, TSDB_DATA_TYPE_INT, sizeof(int32_t), 0, false, true, getkey); + SSkipList* pSkipList = tSkipListCreate(10, TSDB_DATA_TYPE_INT, sizeof(int32_t), getKeyComparFunc(TSDB_DATA_TYPE_INT), + false, getkey); int32_t size = 200000; srand(time(NULL)); @@ -375,3 +376,5 @@ TEST(testCase, skiplist_test) { free(pKeys);*/ } + +#endif \ No newline at end of file diff --git a/src/util/tests/trefTest.c b/src/util/tests/trefTest.c index 454860410b..fe3dcab201 100644 --- a/src/util/tests/trefTest.c +++ b/src/util/tests/trefTest.c @@ -14,18 +14,18 @@ typedef struct { int refNum; int steps; int rsetId; - int64_t rid; + int64_t*rid; void **p; } SRefSpace; void iterateRefs(int rsetId) { int count = 0; - void *p = taosIterateRef(rsetId, NULL); + void *p = taosIterateRef(rsetId, 0); while (p) { // process P count++; - p = taosIterateRef(rsetId, p); + p = taosIterateRef(rsetId, (int64_t) p); } printf(" %d ", count); @@ -34,7 +34,8 @@ void iterateRefs(int rsetId) { void *addRef(void *param) { SRefSpace *pSpace = (SRefSpace *)param; int id; - int64_t rid; + + setThreadName("addRef"); for (int i=0; i < pSpace->steps; ++i) { printf("a"); @@ -51,8 +52,9 @@ void *addRef(void *param) { void *removeRef(void *param) { SRefSpace *pSpace = (SRefSpace *)param; - int id; - int64_t rid; + int id, code; + + setThreadName("removeRef"); for (int i=0; i < pSpace->steps; ++i) { printf("d"); @@ -71,16 +73,17 @@ void *removeRef(void *param) { void *acquireRelease(void *param) { SRefSpace *pSpace = (SRefSpace *)param; int id; - int64_t rid; + + setThreadName("acquireRelease"); for (int i=0; i < pSpace->steps; ++i) { printf("a"); id = random() % pSpace->refNum; - void *p = taosAcquireRef(pSpace->rsetId, pSpace->p[id]); + void *p = taosAcquireRef(pSpace->rsetId, (int64_t) pSpace->p[id]); if (p) { usleep(id % 5 + 1); - taosReleaseRef(pSpace->rsetId, pSpace->p[id]); + taosReleaseRef(pSpace->rsetId, (int64_t) pSpace->p[id]); } } @@ -94,6 +97,8 @@ void myfree(void *p) { void *openRefSpace(void *param) { SRefSpace *pSpace = (SRefSpace *)param; + setThreadName("openRefSpace"); + printf("c"); pSpace->rsetId = taosOpenRef(50, myfree); @@ -103,6 +108,7 @@ void *openRefSpace(void *param) { } pSpace->p = (void **) calloc(sizeof(void *), pSpace->refNum); + pSpace->rid = calloc(pSpace->refNum, sizeof(int64_t)); pthread_attr_t thattr; pthread_attr_init(&thattr); diff --git a/src/vnode/src/vnodeBackup.c b/src/vnode/src/vnodeBackup.c index a0a975be2b..801af42e0e 100644 --- a/src/vnode/src/vnodeBackup.c +++ b/src/vnode/src/vnodeBackup.c @@ -61,6 +61,8 @@ static void vnodeProcessBackupMsg(SVBackupMsg *pMsg) { } static void *vnodeBackupFunc(void *param) { + setThreadName("vnodeBackup"); + while (1) { SVBackupMsg *pMsg = NULL; if (taosReadQitemFromQset(tsVBackupQset, NULL, (void **)&pMsg, NULL) == 0) { diff --git a/src/vnode/src/vnodeWorker.c b/src/vnode/src/vnodeWorker.c index 6fb79d10fe..e94c99cbea 100644 --- a/src/vnode/src/vnodeWorker.c +++ b/src/vnode/src/vnodeWorker.c @@ -188,6 +188,8 @@ static void vnodeProcessMWorkerMsg(SVMWorkerMsg *pMsg) { } static void *vnodeMWorkerFunc(void *param) { + setThreadName("vnodeMWorker"); + while (1) { SVMWorkerMsg *pMsg = NULL; if (taosReadQitemFromQset(tsVMWorkerQset, NULL, (void **)&pMsg, NULL) == 0) { diff --git a/src/wal/src/walMgmt.c b/src/wal/src/walMgmt.c index 9bd5cdf175..45f65b2c2f 100644 --- a/src/wal/src/walMgmt.c +++ b/src/wal/src/walMgmt.c @@ -192,6 +192,7 @@ static void walFsyncAll() { static void *walThreadFunc(void *param) { int stop = 0; + setThreadName("walThrd"); while (1) { walUpdateSeq(); walFsyncAll(); diff --git a/tests/examples/lua/luaconnector.so b/tests/examples/lua/luaconnector.so new file mode 100755 index 0000000000..08bf6a6156 Binary files /dev/null and b/tests/examples/lua/luaconnector.so differ diff --git a/tests/perftest-scripts/perftest-query.sh b/tests/perftest-scripts/perftest-query.sh index bf62f401bf..bcc944dadb 100755 --- a/tests/perftest-scripts/perftest-query.sh +++ b/tests/perftest-scripts/perftest-query.sh @@ -1,5 +1,21 @@ #!/bin/bash +branch= +if [ x$1 != x ];then + branch=$1 + echo "Testing branch: $branch" +else + echo "Please enter branch name as a parameter" + exit 1 +fi +jemalloc= +if [ x$2 != x ];then + jemalloc=jemalloc + echo "Building TDengine using jemalloc" +else + echo "Building TDengine using glibc" +fi + today=`date +"%Y%m%d"` WORK_DIR=/home/ubuntu/pxiao PERFORMANCE_TEST_REPORT=$WORK_DIR/TDengine/tests/performance-test-report-$today.log @@ -40,8 +56,8 @@ function buildTDengine { git remote update > /dev/null git reset --hard HEAD - git checkout master - REMOTE_COMMIT=`git rev-parse --short remotes/origin/master` + git checkout $branch + REMOTE_COMMIT=`git rev-parse --short remotes/origin/$branch` LOCAL_COMMIT=`git rev-parse --short @` echo " LOCAL: $LOCAL_COMMIT" @@ -53,9 +69,20 @@ function buildTDengine { git pull > /dev/null 2>&1 LOCAL_COMMIT=`git rev-parse --short @` + if [ $jemalloc = "jemalloc" ];then + echo "git submodule update --init --recursive" + git submodule update --init --recursive + fi + cd debug rm -rf * - cmake .. > /dev/null + + if [ $jemalloc = "jemalloc" ];then + echo "cmake .. -DJEMALLOC_ENABLED=true > /dev/null" + cmake .. -DJEMALLOC_ENABLED=true > /dev/null + else + cmake .. > /dev/null + fi make && make install > /dev/null fi } @@ -95,7 +122,8 @@ function sendReport { sed -i 's/\x1b\[[0-9;]*m//g' $PERFORMANCE_TEST_REPORT BODY_CONTENT=`cat $PERFORMANCE_TEST_REPORT` - echo -e "From: \nto: ${receiver}\nsubject: Query Performace Report ${today}, commit ID: ${LOCAL_COMMIT}\n\n${today}:\n${BODY_CONTENT}" | \ + + echo -e "From: \nto: ${receiver}\nsubject: Query Performace Report ${branch} ${jemalloc} ${today}, commit ID: ${LOCAL_COMMIT}\n\n${today}:\n${BODY_CONTENT}" | \ (cat - && uuencode $PERFORMANCE_TEST_REPORT performance-test-report-$today.log) | \ /usr/sbin/ssmtp "${receiver}" && echo "Report Sent!" } diff --git a/tests/pytest/fulltest.sh b/tests/pytest/fulltest.sh index ac22d3ee4a..c9a41a929b 100755 --- a/tests/pytest/fulltest.sh +++ b/tests/pytest/fulltest.sh @@ -241,7 +241,7 @@ python3 ./test.py -f query/queryStateWindow.py python3 ./test.py -f query/nestedQuery/queryWithOrderLimit.py python3 ./test.py -f query/nestquery_last_row.py python3 ./test.py -f query/queryCnameDisplay.py - +python3 test.py -f query/nestedQuery/queryWithSpread.py #stream python3 ./test.py -f stream/metric_1.py @@ -352,7 +352,7 @@ python3 ./test.py -f alter/alter_debugFlag.py python3 ./test.py -f query/queryBetweenAnd.py python3 ./test.py -f tag_lite/alter_tag.py -python3 test.py -f tools/taosdemoAllTest/taosdemoTestInsertWithJson.py +# python3 test.py -f tools/taosdemoAllTest/taosdemoTestInsertWithJson.py python3 test.py -f tools/taosdemoAllTest/taosdemoTestQueryWithJson.py python3 test.py -f tools/taosdemoAllTest/TD-4985/query-limit-offset.py python3 ./test.py -f tag_lite/drop_auto_create.py @@ -361,4 +361,6 @@ python3 test.py -f alter/alter_keep.py python3 test.py -f alter/alter_cacheLastRow.py python3 ./test.py -f query/querySession.py python3 test.py -f alter/alter_create_exception.py + +python3 ./test.py -f insert/flushwhiledrop.py #======================p4-end=============== diff --git a/tests/pytest/insert/flushwhiledrop.py b/tests/pytest/insert/flushwhiledrop.py new file mode 100644 index 0000000000..3784657117 --- /dev/null +++ b/tests/pytest/insert/flushwhiledrop.py @@ -0,0 +1,67 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import threading +from util.log import * +from util.cases import * +from util.sql import * +from time import sleep + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + self.numberOfRecords = 15000 + self.ts = 1601481600000 + + def run(self): + tdSql.execute('create database test cache 1 blocks 3') + tdSql.execute('use test') + tdSql.execute('create table tb(ts timestamp, c1 timestamp, c2 int, c3 bigint, c4 float, c5 double, c6 binary(8), c7 smallint, c8 tinyint, c9 bool, c10 nchar(8))') + threads = [] + t1 = threading.Thread(target=self.insertAndFlush, args=()) + threads.append(t1) + t2 = threading.Thread(target=self.drop, args=()) + threads.append(t2) + for t in threads: + t.setDaemon(True) + t.start() + for t in threads: + t.join() + + def insertAndFlush(self): + finish = 0 + currts = self.ts + + while(finish < self.numberOfRecords): + sql = "insert into tb values" + for i in range(finish, self.numberOfRecords): + sql += "(%d, 1019774612, 29931, 1442173978, 165092.468750, 1128.643179, 'MOCq1pTu', 18405, 82, 0, 'g0A6S0Fu')" % (currts + i) + finish = i + 1 + if (1048576 - len(sql)) < 16384: + break + tdSql.execute(sql) + + def drop(self): + sleep(30) + tdSql.execute('drop database test') + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) \ No newline at end of file diff --git a/tests/pytest/query/nestedQuery/queryWithSpread.py b/tests/pytest/query/nestedQuery/queryWithSpread.py new file mode 100644 index 0000000000..4e09b1b826 --- /dev/null +++ b/tests/pytest/query/nestedQuery/queryWithSpread.py @@ -0,0 +1,46 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +from util.log import * +from util.cases import * +from util.sql import * + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + def run(self): + tdSql.prepare() + + tdSql.execute('create table stb (ts timestamp, speed int) tags(t1 int);') + + for i in range(10): + tdSql.execute(f'create table tb_{i} using stb tags({i});') + tdSql.execute(f'insert into tb_{i} values(now, 0)') + tdSql.execute(f'insert into tb_{i} values(now+10s, {i})') + + tdSql.query('select max(col3) from (select spread(speed) as col3 from stb group by tbname);') + tdSql.checkData(0,0,'9.0') + tdSql.error('select max(col3) from (select spread(col3) as col3 from stb group by tbname);') + + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/pytest/query/nestquery_last_row.py b/tests/pytest/query/nestquery_last_row.py index a04cb173af..3c4ada5174 100644 --- a/tests/pytest/query/nestquery_last_row.py +++ b/tests/pytest/query/nestquery_last_row.py @@ -160,22 +160,23 @@ class TDTestCase: tdSql.checkData(0,9,9.000000000) tdSql.checkData(0,10,'2020-09-13 20:26:40.009') + # incorrect result, not support nest > 2 sql = '''select last_row(*) from ((select * from table_0) union all (select * from table_1) union all (select * from table_2));''' - tdSql.query(sql) - tdSql.checkRows(1) - tdSql.checkData(0,1,self.num-1) - tdSql.checkData(0,2,self.num-1) - tdSql.checkData(0,3,self.num-1) - tdSql.checkData(0,4,self.num-1) - tdSql.checkData(0,5,'False') - tdSql.checkData(0,6,'binary.9') - tdSql.checkData(0,7,'nchar.9') - tdSql.checkData(0,8,9.00000) - tdSql.checkData(0,9,9.000000000) - tdSql.checkData(0,10,'2020-09-13 20:26:40.009') + tdSql.error(sql) + #tdSql.checkRows(1) + #tdSql.checkData(0,1,self.num-1) + #tdSql.checkData(0,2,self.num-1) + #tdSql.checkData(0,3,self.num-1) + #tdSql.checkData(0,4,self.num-1) + #tdSql.checkData(0,5,'False') + #tdSql.checkData(0,6,'binary.9') + #tdSql.checkData(0,7,'nchar.9') + #tdSql.checkData(0,8,9.00000) + #tdSql.checkData(0,9,9.000000000) + #tdSql.checkData(0,10,'2020-09-13 20:26:40.009') # bug 5055 # sql = '''select last_row(*) from @@ -189,18 +190,18 @@ class TDTestCase: ((select last_row(*) from table_0) union all (select last_row(*) from table_1) union all (select last_row(*) from table_2));''' - tdSql.query(sql) - tdSql.checkRows(1) - tdSql.checkData(0,1,self.num-1) - tdSql.checkData(0,2,self.num-1) - tdSql.checkData(0,3,self.num-1) - tdSql.checkData(0,4,self.num-1) - tdSql.checkData(0,5,'False') - tdSql.checkData(0,6,'binary.9') - tdSql.checkData(0,7,'nchar.9') - tdSql.checkData(0,8,9.00000) - tdSql.checkData(0,9,9.000000000) - tdSql.checkData(0,10,'2020-09-13 20:26:40.009') + tdSql.error(sql) + #tdSql.checkRows(1) + #tdSql.checkData(0,1,self.num-1) + #tdSql.checkData(0,2,self.num-1) + #tdSql.checkData(0,3,self.num-1) + #tdSql.checkData(0,4,self.num-1) + #tdSql.checkData(0,5,'False') + #tdSql.checkData(0,6,'binary.9') + #tdSql.checkData(0,7,'nchar.9') + #tdSql.checkData(0,8,9.00000) + #tdSql.checkData(0,9,9.000000000) + #tdSql.checkData(0,10,'2020-09-13 20:26:40.009') # bug 5055 # sql = '''select last_row(*) from @@ -214,18 +215,18 @@ class TDTestCase: ((select * from table_0 limit 5 offset 5) union all (select * from table_1 limit 5 offset 5) union all (select * from regular_table_1 limit 5 offset 5));''' - tdSql.query(sql) - tdSql.checkRows(1) - tdSql.checkData(0,1,self.num-1) - tdSql.checkData(0,2,self.num-1) - tdSql.checkData(0,3,self.num-1) - tdSql.checkData(0,4,self.num-1) - tdSql.checkData(0,5,'False') - tdSql.checkData(0,6,'binary.9') - tdSql.checkData(0,7,'nchar.9') - tdSql.checkData(0,8,9.00000) - tdSql.checkData(0,9,9.000000000) - tdSql.checkData(0,10,'2020-09-13 20:26:40.009') + tdSql.error(sql) + #tdSql.checkRows(1) + #tdSql.checkData(0,1,self.num-1) + #tdSql.checkData(0,2,self.num-1) + #tdSql.checkData(0,3,self.num-1) + #tdSql.checkData(0,4,self.num-1) + #tdSql.checkData(0,5,'False') + #tdSql.checkData(0,6,'binary.9') + #tdSql.checkData(0,7,'nchar.9') + #tdSql.checkData(0,8,9.00000) + #tdSql.checkData(0,9,9.000000000) + #tdSql.checkData(0,10,'2020-09-13 20:26:40.009') sql = '''select last_row(*) from @@ -260,4 +261,4 @@ class TDTestCase: tdCases.addWindows(__file__, TDTestCase()) -tdCases.addLinux(__file__, TDTestCase()) \ No newline at end of file +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/pytest/query/query1970YearsAf.py b/tests/pytest/query/query1970YearsAf.py index 93404afd59..a365369b21 100644 --- a/tests/pytest/query/query1970YearsAf.py +++ b/tests/pytest/query/query1970YearsAf.py @@ -187,19 +187,19 @@ class TDTestCase: "select * from t9 where t9.ts > '1969-12-31 22:00:00.000' and t9.ts <'1970-01-01 02:00:00.000' " ) tdSql.checkRows(719) - + tdSql.query( "select * from t0,t1 where t0.ts=t1.ts and t1.ts >= '1970-01-01 00:00:00.000' " ) tdSql.checkRows(680) - + tdSql.query( - "select diff(col1) from t0 where t0.ts >= '1970-01-01 00:00:00.000' " + "select diff(c1) from t0 where t0.ts >= '1970-01-01 00:00:00.000' " ) tdSql.checkRows(679) tdSql.query( - "select t0,col1 from stb2 where stb2.ts < '1970-01-01 00:00:00.000' order by ts" + "select t0,c1 from stb2 where stb2.ts < '1970-01-01 00:00:00.000' order by ts" ) tdSql.checkRows(43200) diff --git a/tests/pytest/query/queryPerformance.py b/tests/pytest/query/queryPerformance.py index 742a3c2cd1..2aa760624f 100644 --- a/tests/pytest/query/queryPerformance.py +++ b/tests/pytest/query/queryPerformance.py @@ -73,10 +73,11 @@ class taosdemoQueryPerformace: sql = "select avg(f1), max(f2), min(f3) from meters where ts <= '2017-07-15 10:40:01.000' and ts <= '2017-07-15 14:00:40.000'" tableid = 8 cursor.execute("create table if not exists %s%d using %s tags(%d, \"%s\")" % (self.tbPerfix, tableid, self.stbName, tableid, sql)) - + sql = "select last(*) from meters" tableid = 9 cursor.execute("create table if not exists %s%d using %s tags(%d, '%s')" % (self.tbPerfix, tableid, self.stbName, tableid, sql)) + cursor.close() def query(self): diff --git a/tests/pytest/table/max_table_length.py b/tests/pytest/table/max_table_length.py index ec34f3008f..366e20456d 100644 --- a/tests/pytest/table/max_table_length.py +++ b/tests/pytest/table/max_table_length.py @@ -42,7 +42,7 @@ class TDTestCase: print("==============step3") tdLog.info("check int & binary") - tdSql.error("create table anal2 (ts timestamp ,i binary(16371),j int)") + # tdSql.error("create table anal2 (ts timestamp ,i binary(16371),j int)") tdSql.execute("create table anal2 (ts timestamp ,i binary(16370),j int)") tdSql.execute("create table anal3 (ts timestamp ,i binary(16366), j int, k int)") diff --git a/tests/pytest/tools/taosdemoAllTest/TD-4985/query-limit-offset.py b/tests/pytest/tools/taosdemoAllTest/TD-4985/query-limit-offset.py index 081057f180..4edef88cf1 100644 --- a/tests/pytest/tools/taosdemoAllTest/TD-4985/query-limit-offset.py +++ b/tests/pytest/tools/taosdemoAllTest/TD-4985/query-limit-offset.py @@ -23,7 +23,7 @@ class TDTestCase: def init(self, conn, logSql): tdLog.debug("start to execute %s" % __file__) tdSql.init(conn.cursor(), logSql) - + def getBuildPath(self): selfPath = os.path.dirname(os.path.realpath(__file__)) @@ -39,7 +39,7 @@ class TDTestCase: buildPath = root[:len(root)-len("/build/bin")] break return buildPath - + def run(self): buildPath = self.getBuildPath() if (buildPath == ""): @@ -48,7 +48,7 @@ class TDTestCase: tdLog.info("taosd found in %s" % buildPath) binPath = buildPath+ "/build/bin/" - # insert: create one or mutiple tables per sql and insert multiple rows per sql + # insert: create one or mutiple tables per sql and insert multiple rows per sql # test case for https://jira.taosdata.com:18080/browse/TD-4985 os.system("%staosdemo -f tools/taosdemoAllTest/TD-4985/query-limit-offset.json -y " % binPath) tdSql.execute("use db") @@ -56,27 +56,27 @@ class TDTestCase: tdSql.checkData(0, 0, 10000) for i in range(1000): - tdSql.execute('''insert into stb00_9999 values(%d, %d, %d,'test99.%s')''' + tdSql.execute('''insert into stb00_9999 values(%d, %d, %d,'test99.%s')''' % (1600000000000 + i, i, -10000+i, i)) - tdSql.execute('''insert into stb00_8888 values(%d, %d, %d,'test98.%s')''' + tdSql.execute('''insert into stb00_8888 values(%d, %d, %d,'test98.%s')''' % (1600000000000 + i, i, -10000+i, i)) - tdSql.execute('''insert into stb00_7777 values(%d, %d, %d,'test97.%s')''' + tdSql.execute('''insert into stb00_7777 values(%d, %d, %d,'test97.%s')''' % (1600000000000 + i, i, -10000+i, i)) - tdSql.execute('''insert into stb00_6666 values(%d, %d, %d,'test96.%s')''' + tdSql.execute('''insert into stb00_6666 values(%d, %d, %d,'test96.%s')''' % (1600000000000 + i, i, -10000+i, i)) - tdSql.execute('''insert into stb00_5555 values(%d, %d, %d,'test95.%s')''' + tdSql.execute('''insert into stb00_5555 values(%d, %d, %d,'test95.%s')''' % (1600000000000 + i, i, -10000+i, i)) - tdSql.execute('''insert into stb00_4444 values(%d, %d, %d,'test94.%s')''' + tdSql.execute('''insert into stb00_4444 values(%d, %d, %d,'test94.%s')''' % (1600000000000 + i, i, -10000+i, i)) - tdSql.execute('''insert into stb00_3333 values(%d, %d, %d,'test93.%s')''' + tdSql.execute('''insert into stb00_3333 values(%d, %d, %d,'test93.%s')''' % (1600000000000 + i, i, -10000+i, i)) - tdSql.execute('''insert into stb00_2222 values(%d, %d, %d,'test92.%s')''' + tdSql.execute('''insert into stb00_2222 values(%d, %d, %d,'test92.%s')''' % (1600000000000 + i, i, -10000+i, i)) - tdSql.execute('''insert into stb00_1111 values(%d, %d, %d,'test91.%s')''' + tdSql.execute('''insert into stb00_1111 values(%d, %d, %d,'test91.%s')''' % (1600000000000 + i, i, -10000+i, i)) - tdSql.execute('''insert into stb00_100 values(%d, %d, %d,'test90.%s')''' + tdSql.execute('''insert into stb00_100 values(%d, %d, %d,'test90.%s')''' % (1600000000000 + i, i, -10000+i, i)) - tdSql.query("select * from stb0 where col2 like 'test99%' ") + tdSql.query("select * from stb0 where c2 like 'test99%' ") tdSql.checkRows(1000) tdSql.query("select * from stb0 where tbname like 'stb00_9999' limit 10" ) tdSql.checkData(0, 1, 0) @@ -86,7 +86,7 @@ class TDTestCase: tdSql.checkData(0, 1, 5) tdSql.checkData(1, 1, 6) tdSql.checkData(2, 1, 7) - tdSql.query("select * from stb0 where col2 like 'test98%' ") + tdSql.query("select * from stb0 where c2 like 'test98%' ") tdSql.checkRows(1000) tdSql.query("select * from stb0 where tbname like 'stb00_8888' limit 10" ) tdSql.checkData(0, 1, 0) @@ -96,7 +96,7 @@ class TDTestCase: tdSql.checkData(0, 1, 5) tdSql.checkData(1, 1, 6) tdSql.checkData(2, 1, 7) - tdSql.query("select * from stb0 where col2 like 'test97%' ") + tdSql.query("select * from stb0 where c2 like 'test97%' ") tdSql.checkRows(1000) tdSql.query("select * from stb0 where tbname like 'stb00_7777' limit 10" ) tdSql.checkData(0, 1, 0) @@ -106,7 +106,7 @@ class TDTestCase: tdSql.checkData(0, 1, 5) tdSql.checkData(1, 1, 6) tdSql.checkData(2, 1, 7) - tdSql.query("select * from stb0 where col2 like 'test96%' ") + tdSql.query("select * from stb0 where c2 like 'test96%' ") tdSql.checkRows(1000) tdSql.query("select * from stb0 where tbname like 'stb00_6666' limit 10" ) tdSql.checkData(0, 1, 0) @@ -116,7 +116,7 @@ class TDTestCase: tdSql.checkData(0, 1, 5) tdSql.checkData(1, 1, 6) tdSql.checkData(2, 1, 7) - tdSql.query("select * from stb0 where col2 like 'test95%' ") + tdSql.query("select * from stb0 where c2 like 'test95%' ") tdSql.checkRows(1000) tdSql.query("select * from stb0 where tbname like 'stb00_5555' limit 10" ) tdSql.checkData(0, 1, 0) @@ -126,7 +126,7 @@ class TDTestCase: tdSql.checkData(0, 1, 5) tdSql.checkData(1, 1, 6) tdSql.checkData(2, 1, 7) - tdSql.query("select * from stb0 where col2 like 'test94%' ") + tdSql.query("select * from stb0 where c2 like 'test94%' ") tdSql.checkRows(1000) tdSql.query("select * from stb0 where tbname like 'stb00_4444' limit 10" ) tdSql.checkData(0, 1, 0) @@ -136,7 +136,7 @@ class TDTestCase: tdSql.checkData(0, 1, 5) tdSql.checkData(1, 1, 6) tdSql.checkData(2, 1, 7) - tdSql.query("select * from stb0 where col2 like 'test93%' ") + tdSql.query("select * from stb0 where c2 like 'test93%' ") tdSql.checkRows(1000) tdSql.query("select * from stb0 where tbname like 'stb00_3333' limit 100" ) tdSql.checkData(0, 1, 0) @@ -146,7 +146,7 @@ class TDTestCase: tdSql.checkData(0, 1, 5) tdSql.checkData(1, 1, 6) tdSql.checkData(2, 1, 7) - tdSql.query("select * from stb0 where col2 like 'test92%' ") + tdSql.query("select * from stb0 where c2 like 'test92%' ") tdSql.checkRows(1000) tdSql.query("select * from stb0 where tbname like 'stb00_2222' limit 100" ) tdSql.checkData(0, 1, 0) @@ -156,7 +156,7 @@ class TDTestCase: tdSql.checkData(0, 1, 5) tdSql.checkData(1, 1, 6) tdSql.checkData(2, 1, 7) - tdSql.query("select * from stb0 where col2 like 'test91%' ") + tdSql.query("select * from stb0 where c2 like 'test91%' ") tdSql.checkRows(1000) tdSql.query("select * from stb0 where tbname like 'stb00_1111' limit 100" ) tdSql.checkData(0, 1, 0) @@ -166,7 +166,7 @@ class TDTestCase: tdSql.checkData(0, 1, 5) tdSql.checkData(1, 1, 6) tdSql.checkData(2, 1, 7) - tdSql.query("select * from stb0 where col2 like 'test90%' ") + tdSql.query("select * from stb0 where c2 like 'test90%' ") tdSql.checkRows(1000) tdSql.query("select * from stb0 where tbname like 'stb00_100' limit 100" ) tdSql.checkData(0, 1, 0) @@ -178,10 +178,10 @@ class TDTestCase: tdSql.checkData(2, 1, 7) - os.system("rm -rf tools/taosdemoAllTest/TD-4985/query-limit-offset.py.sql") - - - + os.system("rm -rf tools/taosdemoAllTest/TD-4985/query-limit-offset.py.sql") + + + def stop(self): tdSql.close() tdLog.success("%s successfully executed" % __file__) diff --git a/tests/pytest/tools/taosdemoAllTest/TD-5213/insertSigcolumnsNum4096.csv b/tests/pytest/tools/taosdemoAllTest/TD-5213/insertSigcolumnsNum4096.csv new file mode 100755 index 0000000000..078c3b93df --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/TD-5213/insertSigcolumnsNum4096.csv @@ -0,0 +1,10 @@ +0,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, +1,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, +2,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, +3,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, +4,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, +5,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, +6,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, +7,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, +8,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, +9,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, \ No newline at end of file diff --git a/tests/pytest/tools/taosdemoAllTest/TD-5213/insertSigcolumnsNum4096.json b/tests/pytest/tools/taosdemoAllTest/TD-5213/insertSigcolumnsNum4096.json new file mode 100755 index 0000000000..e2b15b83a7 --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/TD-5213/insertSigcolumnsNum4096.json @@ -0,0 +1,112 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 10, + "thread_count_create_tbl": 10, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 0, + "interlace_rows": 10, + "num_of_records_per_req": 1, + "max_sql_len": 102400000, + "databases": [{ + "dbinfo": { + "name": "db", + "drop": "yes", + "replica": 1, + "days": 10, + "cache": 50, + "blocks": 8, + "precision": "ms", + "keep": 365, + "minRows": 100, + "maxRows": 4096, + "comp":2, + "walLevel":1, + "cachelast":0, + "quorum":1, + "fsync":3000, + "update": 0 + }, + "super_tables": [{ + "name": "stb_old", + "child_table_exists":"no", + "childtable_count": 10, + "childtable_prefix": "stb_old_", + "auto_create_table": "no", + "batch_create_tbl_num": 5, + "data_source": "sample", + "insert_mode": "taosc", + "insert_rows": 100, + "childtable_limit": 0, + "childtable_offset":0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./tools/taosdemoAllTest/TD-5213/insertSigcolumnsNum4096.csv", + "tags_file": "", + "columns": [{"type": "INT","count":4000}, {"type": "BINARY", "len": 16, "count":1}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":1}] + },{ + "name": "stb_new", + "child_table_exists":"no", + "childtable_count": 10, + "childtable_prefix": "stb_new_", + "auto_create_table": "no", + "batch_create_tbl_num": 5, + "data_source": "rand", + "insert_mode": "taosc", + "insert_rows": 100, + "childtable_limit": 0, + "childtable_offset":0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./tools/taosdemoAllTest/sample.csv", + "tags_file": "", + "columns": [{"type": "DOUBLE","count":1020}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":1}] + },{ + "name": "stb_int", + "child_table_exists":"no", + "childtable_count": 10, + "childtable_prefix": "stb_int_", + "auto_create_table": "no", + "batch_create_tbl_num": 5, + "data_source": "rand", + "insert_mode": "taosc", + "insert_rows": 100, + "childtable_limit": 0, + "childtable_offset":0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./tools/taosdemoAllTest/sample.csv", + "tags_file": "", + "columns": [{"type": "int","count":1020}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":1}] + }] + }] +} diff --git a/tests/pytest/tools/taosdemoAllTest/TD-5213/insertSigcolumnsNum4096.py b/tests/pytest/tools/taosdemoAllTest/TD-5213/insertSigcolumnsNum4096.py new file mode 100755 index 0000000000..78bd0c7e60 --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/TD-5213/insertSigcolumnsNum4096.py @@ -0,0 +1,137 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import os +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + def getBuildPath(self): + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + for root, dirs, files in os.walk(projPath): + if ("taosd" in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + buildPath = root[:len(root)-len("/build/bin")] + break + return buildPath + + def run(self): + buildPath = self.getBuildPath() + if (buildPath == ""): + tdLog.exit("taosd not found!") + else: + tdLog.info("taosd found in %s" % buildPath) + binPath = buildPath+ "/build/bin/" + + # insert: create one or mutiple tables per sql and insert multiple rows per sql + # test case for https://jira.taosdata.com:18080/browse/TD-5213 + os.system("%staosdemo -f tools/taosdemoAllTest/TD-5213/insertSigcolumnsNum4096.json -y " % binPath) + tdSql.execute("use db") + tdSql.query("select count (tbname) from stb_old") + tdSql.checkData(0, 0, 10) + + # tdSql.query("select * from stb_old") + # tdSql.checkRows(10) + # tdSql.checkCols(1024) + + # tdSql.query("select count (tbname) from stb_new") + # tdSql.checkData(0, 0, 10) + + # tdSql.query("select * from stb_new") + # tdSql.checkRows(10) + # tdSql.checkCols(4096) + + # tdLog.info("stop dnode to commit data to disk") + # tdDnodes.stop(1) + # tdDnodes.start(1) + + #regular table + sql = "create table tb(ts timestamp, " + for i in range(1022): + sql += "c%d binary(14), " % (i + 1) + sql += "c1023 binary(22))" + tdSql.execute(sql) + + for i in range(4): + sql = "insert into tb values(%d, " + for j in range(1022): + str = "'%s', " % self.get_random_string(14) + sql += str + sql += "'%s')" % self.get_random_string(22) + tdSql.execute(sql % (self.ts + i)) + + time.sleep(10) + tdSql.query("select count(*) from tb") + tdSql.checkData(0, 0, 4) + + tdDnodes.stop(1) + tdDnodes.start(1) + + time.sleep(1) + tdSql.query("select count(*) from tb") + tdSql.checkData(0, 0, 4) + + + sql = "create table tb1(ts timestamp, " + for i in range(4094): + sql += "c%d binary(14), " % (i + 1) + sql += "c4095 binary(22))" + tdSql.execute(sql) + + for i in range(4): + sql = "insert into tb1 values(%d, " + for j in range(4094): + str = "'%s', " % self.get_random_string(14) + sql += str + sql += "'%s')" % self.get_random_string(22) + tdSql.execute(sql % (self.ts + i)) + + time.sleep(10) + tdSql.query("select count(*) from tb1") + tdSql.checkData(0, 0, 4) + + tdDnodes.stop(1) + tdDnodes.start(1) + + time.sleep(1) + tdSql.query("select count(*) from tb1") + tdSql.checkData(0, 0, 4) + + + + #os.system("rm -rf tools/taosdemoAllTest/TD-5213/insertSigcolumnsNum4096.py.sql") + + + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/pytest/tools/taosdemoAllTest/taosdemoTestInsertWithJson.py b/tests/pytest/tools/taosdemoAllTest/taosdemoTestInsertWithJson.py index a4c4b37b50..c37ab8a5e3 100644 --- a/tests/pytest/tools/taosdemoAllTest/taosdemoTestInsertWithJson.py +++ b/tests/pytest/tools/taosdemoAllTest/taosdemoTestInsertWithJson.py @@ -23,7 +23,7 @@ class TDTestCase: def init(self, conn, logSql): tdLog.debug("start to execute %s" % __file__) tdSql.init(conn.cursor(), logSql) - + def getBuildPath(self): selfPath = os.path.dirname(os.path.realpath(__file__)) @@ -39,7 +39,7 @@ class TDTestCase: buildPath = root[:len(root)-len("/build/bin")] break return buildPath - + def run(self): buildPath = self.getBuildPath() if (buildPath == ""): @@ -48,7 +48,7 @@ class TDTestCase: tdLog.info("taosd found in %s" % buildPath) binPath = buildPath+ "/build/bin/" - # insert: create one or mutiple tables per sql and insert multiple rows per sql + # insert: create one or mutiple tables per sql and insert multiple rows per sql os.system("%staosdemo -f tools/taosdemoAllTest/insert-1s1tnt1r.json -y " % binPath) tdSql.execute("use db") tdSql.query("select count (tbname) from stb0") @@ -62,7 +62,7 @@ class TDTestCase: tdSql.query("select count(*) from stb01_1") tdSql.checkData(0, 0, 200) tdSql.query("select count(*) from stb1") - tdSql.checkData(0, 0, 200000) + tdSql.checkData(0, 0, 200000) # restful connector insert data os.system("%staosdemo -f tools/taosdemoAllTest/insertRestful.json -y " % binPath) @@ -81,7 +81,7 @@ class TDTestCase: tdSql.checkData(0, 0, 200) - # insert: create mutiple tables per sql and insert one rows per sql . + # insert: create mutiple tables per sql and insert one rows per sql . os.system("%staosdemo -f tools/taosdemoAllTest/insert-1s1tntmr.json -y " % binPath) tdSql.execute("use db") tdSql.query("select count (tbname) from stb0") @@ -89,34 +89,34 @@ class TDTestCase: tdSql.query("select count (tbname) from stb1") tdSql.checkData(0, 0, 20) tdSql.query("select count(*) from stb00_0") - tdSql.checkData(0, 0, 10000) + tdSql.checkData(0, 0, 10000) tdSql.query("select count(*) from stb0") - tdSql.checkData(0, 0, 100000) + tdSql.checkData(0, 0, 100000) tdSql.query("select count(*) from stb01_0") - tdSql.checkData(0, 0, 20000) + tdSql.checkData(0, 0, 20000) tdSql.query("select count(*) from stb1") - tdSql.checkData(0, 0, 400000) + tdSql.checkData(0, 0, 400000) - # insert: using parament "insert_interval to controls spped of insert. + # insert: using parament "insert_interval to controls spped of insert. # but We need to have accurate methods to control the speed, such as getting the speed value, checking the count and so on。 os.system("%staosdemo -f tools/taosdemoAllTest/insert-interval-speed.json -y" % binPath) tdSql.execute("use db") tdSql.query("show stables") tdSql.checkData(0, 4, 100) tdSql.query("select count(*) from stb00_0") - tdSql.checkData(0, 0, 20000) + tdSql.checkData(0, 0, 20000) tdSql.query("select count(*) from stb0") - tdSql.checkData(0, 0, 2000000) + tdSql.checkData(0, 0, 2000000) tdSql.query("show stables") tdSql.checkData(1, 4, 100) tdSql.query("select count(*) from stb01_0") - tdSql.checkData(0, 0, 20000) + tdSql.checkData(0, 0, 20000) tdSql.query("select count(*) from stb1") - tdSql.checkData(0, 0, 2000000) - + tdSql.checkData(0, 0, 2000000) + # spend 2min30s for 3 testcases. # insert: drop and child_table_exists combination test - # insert: using parament "childtable_offset and childtable_limit" to control table'offset point and offset + # insert: using parament "childtable_offset and childtable_limit" to control table'offset point and offset os.system("%staosdemo -f tools/taosdemoAllTest/insert-nodbnodrop.json -y" % binPath) tdSql.error("show dbno.stables") os.system("%staosdemo -f tools/taosdemoAllTest/insert-newdb.json -y" % binPath) @@ -128,41 +128,41 @@ class TDTestCase: tdSql.query("select count (tbname) from stb2") tdSql.checkData(0, 0, 7) tdSql.query("select count (tbname) from stb3") - tdSql.checkData(0, 0, 8) + tdSql.checkData(0, 0, 8) tdSql.query("select count (tbname) from stb4") - tdSql.checkData(0, 0, 8) + tdSql.checkData(0, 0, 8) os.system("%staosdemo -f tools/taosdemoAllTest/insert-offset.json -y" % binPath) - tdSql.execute("use db") + tdSql.execute("use db") tdSql.query("select count(*) from stb0") - tdSql.checkData(0, 0, 50) + tdSql.checkData(0, 0, 50) tdSql.query("select count(*) from stb1") - tdSql.checkData(0, 0, 240) + tdSql.checkData(0, 0, 240) tdSql.query("select count(*) from stb2") - tdSql.checkData(0, 0, 220) + tdSql.checkData(0, 0, 220) tdSql.query("select count(*) from stb3") tdSql.checkData(0, 0, 180) tdSql.query("select count(*) from stb4") tdSql.checkData(0, 0, 160) os.system("%staosdemo -f tools/taosdemoAllTest/insert-newtable.json -y" % binPath) - tdSql.execute("use db") + tdSql.execute("use db") tdSql.query("select count(*) from stb0") - tdSql.checkData(0, 0, 150) + tdSql.checkData(0, 0, 150) tdSql.query("select count(*) from stb1") - tdSql.checkData(0, 0, 360) + tdSql.checkData(0, 0, 360) tdSql.query("select count(*) from stb2") - tdSql.checkData(0, 0, 360) + tdSql.checkData(0, 0, 360) tdSql.query("select count(*) from stb3") tdSql.checkData(0, 0, 340) tdSql.query("select count(*) from stb4") tdSql.checkData(0, 0, 400) os.system("%staosdemo -f tools/taosdemoAllTest/insert-renewdb.json -y" % binPath) - tdSql.execute("use db") + tdSql.execute("use db") tdSql.query("select count(*) from stb0") - tdSql.checkData(0, 0, 50) + tdSql.checkData(0, 0, 50) tdSql.query("select count(*) from stb1") - tdSql.checkData(0, 0, 120) + tdSql.checkData(0, 0, 120) tdSql.query("select count(*) from stb2") - tdSql.checkData(0, 0, 140) + tdSql.checkData(0, 0, 140) tdSql.query("select count(*) from stb3") tdSql.checkData(0, 0, 160) tdSql.query("select count(*) from stb4") @@ -170,59 +170,59 @@ class TDTestCase: # insert: let parament in json file is illegal, it'll expect error. - tdSql.execute("drop database if exists db") + tdSql.execute("drop database if exists db") os.system("%staosdemo -f tools/taosdemoAllTest/insertColumnsAndTagNumLarge1024.json -y " % binPath) tdSql.error("use db") - tdSql.execute("drop database if exists db") + tdSql.execute("drop database if exists db") os.system("%staosdemo -f tools/taosdemoAllTest/insertSigcolumnsNum1024.json -y " % binPath) tdSql.error("select * from db.stb0") - tdSql.execute("drop database if exists db") + tdSql.execute("drop database if exists db") os.system("%staosdemo -f tools/taosdemoAllTest/insertColumnsAndTagNum1024.json -y " % binPath) tdSql.query("select count(*) from db.stb0") - tdSql.checkData(0, 0, 10000) - tdSql.execute("drop database if exists db") + tdSql.checkData(0, 0, 10000) + tdSql.execute("drop database if exists db") os.system("%staosdemo -f tools/taosdemoAllTest/insertInterlaceRowsLarge1M.json -y " % binPath) tdSql.query("select count(*) from db.stb0") tdSql.checkRows(0) - tdSql.execute("drop database if exists db") + tdSql.execute("drop database if exists db") os.system("%staosdemo -f tools/taosdemoAllTest/insertColumnsNum0.json -y " % binPath) - tdSql.execute("use db") + tdSql.execute("use db") tdSql.query("show stables like 'stb0%' ") tdSql.checkData(0, 2, 11) - tdSql.execute("drop database if exists db") - os.system("%staosdemo -f tools/taosdemoAllTest/insertTagsNumLarge128.json -y " % binPath) - tdSql.error("use db1") - tdSql.execute("drop database if exists db") - os.system("%staosdemo -f tools/taosdemoAllTest/insertBinaryLenLarge16374AllcolLar16384.json -y " % binPath) - tdSql.query("select count(*) from db.stb0") + tdSql.execute("drop database if exists db") + os.system("%staosdemo -f tools/taosdemoAllTest/insertTagsNumLarge128.json -y " % binPath) + tdSql.error("use db1") + tdSql.execute("drop database if exists db") + os.system("%staosdemo -f tools/taosdemoAllTest/insertBinaryLenLarge16374AllcolLar16384.json -y " % binPath) + tdSql.query("select count(*) from db.stb0") tdSql.checkRows(1) - tdSql.query("select count(*) from db.stb1") + tdSql.query("select count(*) from db.stb1") tdSql.checkRows(1) tdSql.error("select * from db.stb3") tdSql.error("select * from db.stb2") - tdSql.execute("drop database if exists db") - os.system("%staosdemo -f tools/taosdemoAllTest/insertNumOfrecordPerReq0.json -y " % binPath) - tdSql.error("select count(*) from db.stb0") - tdSql.execute("drop database if exists db") - os.system("%staosdemo -f tools/taosdemoAllTest/insertNumOfrecordPerReqless0.json -y " % binPath) - tdSql.error("use db") - tdSql.execute("drop database if exists db") - os.system("%staosdemo -f tools/taosdemoAllTest/insertChildTab0.json -y " % binPath) - tdSql.error("use db") - tdSql.execute("drop database if exists db") - os.system("%staosdemo -f tools/taosdemoAllTest/insertChildTabLess0.json -y " % binPath) - tdSql.error("use db") - tdSql.execute("drop database if exists blf") - os.system("%staosdemo -f tools/taosdemoAllTest/insertTimestepMulRowsLargeint16.json -y " % binPath) - tdSql.execute("use blf") - tdSql.query("select ts from blf.p_0_topics_7 limit 262800,1") + tdSql.execute("drop database if exists db") + os.system("%staosdemo -f tools/taosdemoAllTest/insertNumOfrecordPerReq0.json -y " % binPath) + tdSql.error("select count(*) from db.stb0") + tdSql.execute("drop database if exists db") + os.system("%staosdemo -f tools/taosdemoAllTest/insertNumOfrecordPerReqless0.json -y " % binPath) + tdSql.error("use db") + tdSql.execute("drop database if exists db") + os.system("%staosdemo -f tools/taosdemoAllTest/insertChildTab0.json -y " % binPath) + tdSql.error("use db") + tdSql.execute("drop database if exists db") + os.system("%staosdemo -f tools/taosdemoAllTest/insertChildTabLess0.json -y " % binPath) + tdSql.error("use db") + tdSql.execute("drop database if exists blf") + os.system("%staosdemo -f tools/taosdemoAllTest/insertTimestepMulRowsLargeint16.json -y " % binPath) + tdSql.execute("use blf") + tdSql.query("select ts from blf.p_0_topics_7 limit 262800,1") tdSql.checkData(0, 0, "2020-03-31 12:00:00.000") tdSql.query("select first(ts) from blf.p_0_topics_2") tdSql.checkData(0, 0, "2019-10-01 00:00:00") - tdSql.query("select last(ts) from blf.p_0_topics_6 ") + tdSql.query("select last(ts) from blf.p_0_topics_6 ") tdSql.checkData(0, 0, "2020-09-29 23:59:00") - os.system("%staosdemo -f tools/taosdemoAllTest/insertMaxNumPerReq.json -y " % binPath) - tdSql.execute("use db") + os.system("%staosdemo -f tools/taosdemoAllTest/insertMaxNumPerReq.json -y " % binPath) + tdSql.execute("use db") tdSql.query("select count(*) from stb0") tdSql.checkData(0, 0, 5000000) tdSql.query("select count(*) from stb1") @@ -230,7 +230,7 @@ class TDTestCase: - # insert: timestamp and step + # insert: timestamp and step os.system("%staosdemo -f tools/taosdemoAllTest/insert-timestep.json -y " % binPath) tdSql.execute("use db") tdSql.query("show stables") @@ -239,13 +239,13 @@ class TDTestCase: tdSql.query("select count (tbname) from stb1") tdSql.checkData(0, 0, 20) tdSql.query("select last(ts) from db.stb00_0") - tdSql.checkData(0, 0, "2020-10-01 00:00:00.019000") + tdSql.checkData(0, 0, "2020-10-01 00:00:00.019000") tdSql.query("select count(*) from stb0") - tdSql.checkData(0, 0, 200) + tdSql.checkData(0, 0, 200) tdSql.query("select last(ts) from db.stb01_0") - tdSql.checkData(0, 0, "2020-11-01 00:00:00.190000") + tdSql.checkData(0, 0, "2020-11-01 00:00:00.190000") tdSql.query("select count(*) from stb1") - tdSql.checkData(0, 0, 400) + tdSql.checkData(0, 0, 400) # # insert: disorder_ratio os.system("%staosdemo -f tools/taosdemoAllTest/insert-disorder.json -g 2>&1 -y " % binPath) @@ -255,14 +255,14 @@ class TDTestCase: tdSql.query("select count (tbname) from stb1") tdSql.checkData(0, 0, 1) tdSql.query("select count(*) from stb0") - tdSql.checkData(0, 0, 10) + tdSql.checkData(0, 0, 10) tdSql.query("select count(*) from stb1") - tdSql.checkData(0, 0, 10) + tdSql.checkData(0, 0, 10) # insert: sample json os.system("%staosdemo -f tools/taosdemoAllTest/insert-sample.json -y " % binPath) tdSql.execute("use dbtest123") - tdSql.query("select col2 from stb0") + tdSql.query("select c2 from stb0") tdSql.checkData(0, 0, 2147483647) tdSql.query("select * from stb1 where t1=-127") tdSql.checkRows(20) @@ -271,13 +271,13 @@ class TDTestCase: tdSql.query("select * from stb1 where t2=126") tdSql.checkRows(10) - # insert: test interlace parament + # insert: test interlace parament os.system("%staosdemo -f tools/taosdemoAllTest/insert-interlace-row.json -y " % binPath) tdSql.execute("use db") tdSql.query("select count (tbname) from stb0") tdSql.checkData(0, 0, 100) tdSql.query("select count (*) from stb0") - tdSql.checkData(0, 0, 15000) + tdSql.checkData(0, 0, 15000) # # insert: auto_create diff --git a/tests/pytest/tools/taosdemoPerformance.py b/tests/pytest/tools/taosdemoPerformance.py index 90d2c52c15..c8293ee31f 100644 --- a/tests/pytest/tools/taosdemoPerformance.py +++ b/tests/pytest/tools/taosdemoPerformance.py @@ -19,7 +19,6 @@ import json from util.log import tdLog from util.sql import tdSql - class taosdemoPerformace: def __init__(self, commitID, dbName): self.commitID = commitID @@ -123,7 +122,7 @@ class taosdemoPerformace: return buildPath def insertData(self): - tdSql.prepare() + buildPath = self.getBuildPath() if (buildPath == ""): tdLog.exit("taosdemo not found!") diff --git a/tests/pytest/tools/taosdemoTest.py b/tests/pytest/tools/taosdemoTest.py index 4cae8dfd3c..5662881031 100644 --- a/tests/pytest/tools/taosdemoTest.py +++ b/tests/pytest/tools/taosdemoTest.py @@ -59,11 +59,11 @@ class TDTestCase: tdSql.checkData(0, 0, self.numberOfTables * self.numberOfRecords) tdSql.query( - "select sum(col1) from test.meters interval(1h) sliding(30m)") + "select sum(c1) from test.meters interval(1h) sliding(30m)") tdSql.checkRows(2) tdSql.query( - "select apercentile(col1, 1) from test.meters interval(100s)") + "select apercentile(c1, 1) from test.meters interval(100s)") tdSql.checkRows(1) tdSql.error("select loc, count(loc) from test.meters") diff --git a/tests/pytest/tools/taosdumpTest.py b/tests/pytest/tools/taosdumpTest.py index b2c9eb3ec1..0dfc42f331 100644 --- a/tests/pytest/tools/taosdumpTest.py +++ b/tests/pytest/tools/taosdumpTest.py @@ -105,7 +105,6 @@ class TDTestCase: # 6--days,7--keep0,keep1,keep, 12--block, isCommunity = self.checkCommunity() - print("iscommunity: %d" % isCommunity) for i in range(len(dbresult)): if dbresult[i][0] == 'db': diff --git a/tests/pytest/wal/sdbComp.py b/tests/pytest/wal/sdbComp.py index 56b18c49eb..428fbc9a14 100644 --- a/tests/pytest/wal/sdbComp.py +++ b/tests/pytest/wal/sdbComp.py @@ -26,11 +26,11 @@ class TDTestCase: def init(self, conn, logSql): tdLog.debug("start to execute %s" % __file__) tdSql.init(conn.cursor(), logSql) - + def getBuildPath(self): global selfPath selfPath = os.path.dirname(os.path.realpath(__file__)) - + if ("community" in selfPath): projPath = selfPath[:selfPath.find("community")] else: @@ -43,7 +43,7 @@ class TDTestCase: buildPath = root[:len(root)-len("/build/bin")] break return buildPath - + def run(self): # set path para @@ -62,9 +62,9 @@ class TDTestCase: os.system("rm -rf %s/sim/dnode1/data/mnode_bak/" % testPath) tdSql.execute("drop database if exists db2") os.system("%staosdemo -f wal/insertDataDb1.json -y " % binPath) - tdSql.execute("drop database if exists db1") + tdSql.execute("drop database if exists db1") os.system("%staosdemo -f wal/insertDataDb2.json -y " % binPath) - tdSql.execute("drop table if exists db2.stb0") + tdSql.execute("drop table if exists db2.stb0") os.system("%staosdemo -f wal/insertDataDb2Newstab.json -y " % binPath) query_pid1 = int(subprocess.getstatusoutput('ps aux|grep taosd |grep -v "grep"|awk \'{print $2}\'')[1]) print(query_pid1) @@ -72,14 +72,14 @@ class TDTestCase: tdSql.execute("drop table if exists stb1_0") tdSql.execute("drop table if exists stb1_1") tdSql.execute("insert into stb0_0 values(1614218412000,8637,78.861045,'R','bf3')(1614218422000,8637,98.861045,'R','bf3')") - tdSql.execute("alter table db2.stb0 add column col4 int") - tdSql.execute("alter table db2.stb0 drop column col2") - tdSql.execute("alter table db2.stb0 add tag t3 int;") + tdSql.execute("alter table db2.stb0 add column c4 int") + tdSql.execute("alter table db2.stb0 drop column c2") + tdSql.execute("alter table db2.stb0 add tag t3 int;") tdSql.execute("alter table db2.stb0 drop tag t1") - tdSql.execute("create table if not exists stb2_0 (ts timestamp, col0 int, col1 float) ") + tdSql.execute("create table if not exists stb2_0 (ts timestamp, c0 int, c1 float) ") tdSql.execute("insert into stb2_0 values(1614218412000,8637,78.861045)") - tdSql.execute("alter table stb2_0 add column col2 binary(4)") - tdSql.execute("alter table stb2_0 drop column col1") + tdSql.execute("alter table stb2_0 add column c2 binary(4)") + tdSql.execute("alter table stb2_0 drop column c1") tdSql.execute("insert into stb2_0 values(1614218422000,8638,'R')") # stop taosd and compact wal file @@ -87,9 +87,9 @@ class TDTestCase: sleep(10) os.system("nohup %s/taosd --compact-mnode-wal -c %s/sim/dnode1/cfg/ & " %(binPath,testPath) ) sleep(5) - assert os.path.exists(walFilePath) , "%s is not generated, compact didn't take effect " % walFilePath + assert os.path.exists(walFilePath) , "%s is not generated, compact didn't take effect " % walFilePath - # use new wal file to start taosd + # use new wal file to start taosd tdDnodes.start(1) sleep(5) tdSql.execute("reset query cache") @@ -108,14 +108,14 @@ class TDTestCase: tdSql.checkData(0, 0, 2) tdSql.query("select count(*) from stb2_0") tdSql.checkData(0, 0, 2) - + # delete useless file testcaseFilename = os.path.split(__file__)[-1] os.system("rm -rf ./insert_res.txt") - os.system("rm -rf wal/%s.sql" % testcaseFilename ) - - - + os.system("rm -rf wal/%s.sql" % testcaseFilename) + + + def stop(self): tdSql.close() tdLog.success("%s successfully executed" % __file__) diff --git a/tests/robust/makefile b/tests/robust/makefile new file mode 100644 index 0000000000..c98469498f --- /dev/null +++ b/tests/robust/makefile @@ -0,0 +1,8 @@ +NAME=robust +CFLAGS = -O3 -g -Wall -Wno-deprecated -fPIC -Wno-unused-result -Wconversion -Wno-char-subscripts -D_REENTRANT -Wno-format -DLINUX -msse4.2 -Wno-unused-function -D_M_X64 -std=gnu99 + +all: + gcc $(CFLAGS) ./$(NAME).c -o $(NAME) -ltaos -lpthread + +clean: + rm $(NAME) \ No newline at end of file diff --git a/tests/robust/robust.c b/tests/robust/robust.c new file mode 100644 index 0000000000..3488ca1b9b --- /dev/null +++ b/tests/robust/robust.c @@ -0,0 +1,260 @@ +#include +#include +#include +#include +#include +#include +#include + +typedef struct { + char querySQL[256]; + int client; + int stable; + int table; + unsigned interval; + int insert; + int create; + int query; +} ProArgs; + +typedef struct { + pthread_t pid; + int threadId; + void* taos; +} ThreadObj; + +static ProArgs arguments; +void parseArg(int argc, char *argv[]); +void create(); +void createImp(void *param); +void start(); +void insertImp(void *param); +void queryImp(void *param); + +int64_t getTimeStampMs() { + struct timeval systemTime; + gettimeofday(&systemTime, NULL); + return (int64_t)systemTime.tv_sec * 1000L + (int64_t)systemTime.tv_usec / 1000; +} + +void parseArg(int argc, char *argv[]) { + arguments.stable = 100000; + arguments.table = 50; + arguments.client = 16; + arguments.interval = 1; + sprintf(arguments.querySQL, "select count(*) from"); + for (int i = 1; i < argc; ++i) { + if (strcmp(argv[i], "-stable") == 0) { + if (i < argc - 1) { + arguments.stable = atoi(argv[++i]); + } else { + fprintf(stderr, "'-stable' requires a parameter, default:%d\n", arguments.stable); + } + } else if (strcmp(argv[i], "-table") == 0) { + if (i < argc - 1) { + arguments.table = atoi(argv[++i]); + } else { + fprintf(stderr, "'-table' requires a parameter, default:%d\n", arguments.table); + } + } else if (strcmp(argv[i], "-client") == 0) { + if (i < argc - 1) { + arguments.client = atoi(argv[++i]); + } else { + fprintf(stderr, "'-client' requires a parameter, default:%d\n", arguments.client); + } + } else if (strcmp(argv[i], "-sql") == 0) { + if (i < argc - 1) { + strcpy(arguments.querySQL,argv[++i]); + } else { + fprintf(stderr, "'-sql' requires a parameter, default:%d\n", arguments.querySQL); + } + } else if (strcmp(argv[i], "-insert") == 0) { + arguments.insert = 1; + } else if (strcmp(argv[i], "-create") == 0) { + arguments.create = 1; + } else if (strcmp(argv[i], "-query") == 0) { + arguments.query = 1; + } + } +} + +void create() { + int64_t st = getTimeStampMs(); + void *taos = taos_connect("127.0.0.1", "root", "taosdata", NULL, 0); + if (taos == NULL) { + printf("TDengine error: failed to connect\n"); + exit(EXIT_FAILURE); + } + TAOS_RES *result = taos_query(taos, "drop database if exists db"); + if (result == NULL || taos_errno(result) != 0) { + printf("In line:%d, failed to execute sql, reason:%s\n", __LINE__, taos_errstr(result)); + taos_free_result(result); + exit(EXIT_FAILURE); + } + taos_free_result(result); + int64_t elapsed = getTimeStampMs() - st; + printf("--- spend %ld ms to drop database db\n", elapsed); + st = getTimeStampMs(); + result = taos_query(taos, "create database if not exists db"); + if (result == NULL || taos_errno(result) != 0) { + printf("In line:%d, failed to execute sql, reason:%s\n", __LINE__, taos_errstr(result)); + taos_free_result(result); + exit(EXIT_FAILURE); + } + taos_free_result(result); + elapsed = getTimeStampMs() - st; + printf("--- spend %ld ms to create database db\n", elapsed); + st = getTimeStampMs(); + start(); + printf("--- Spend %ld ms to create %d super tables and each %d tables\n", elapsed, arguments.stable, arguments.table); +} + +void createImp(void *param) { + char command[256] = "\0"; + int sqlLen = 0; + int count = 0; + char *sql = calloc(1, 1024*1024); + ThreadObj *pThread = (ThreadObj *)param; + printf("Thread %d start create super table s%d to s%d\n", pThread->threadId, (pThread->threadId - 1) * arguments.stable / arguments.client, pThread->threadId * arguments.stable / arguments.client - 1); + for (int i = (pThread->threadId - 1) * arguments.stable / arguments.client; i < pThread->threadId * arguments.stable / arguments.client; i++) { + sprintf(command, "create table if not exists db.s%d(ts timestamp, c1 int, c2 int, c3 int) TAGS (id int)", i); + TAOS_RES *result = taos_query(pThread->taos, command); + if (result == NULL || taos_errno(result) != 0) { + printf("In line:%d, failed to execute sql:%s, reason:%s\n", __LINE__, command, taos_errstr(result)); + taos_free_result(result); + return; + } + taos_free_result(result); + result = taos_query(pThread->taos, "use db"); + if (result == NULL || taos_errno(result) != 0) { + printf("In line:%d, failed to execute sql, reason:%s\n", __LINE__, taos_errstr(result)); + taos_free_result(result); + return; + } + taos_free_result(result); + sqlLen = sprintf(sql, "create table if not exists "); + count = 0; + for (int j = 0; j < arguments.table; j++) { + sqlLen += sprintf(sql + sqlLen, " s%d_%d USING s%d TAGS (%d)", i, j, i, j); + if ((j + 1) % arguments.table == 0) { + result = taos_query(pThread->taos, sql); + if (result == NULL || taos_errno(result) != 0) { + printf("In line:%d, failed to execute sql, reason:%s\n", __LINE__, taos_errstr(result)); + taos_free_result(result); + return; + } + taos_free_result(result); + printf("Thread %d created table s%d_%d to s%d_%d\n", pThread->threadId, i, count, i, j); + count = j; + sqlLen = sprintf(sql, "create table if not exists "); + } + } + } +} + +void start() { + ThreadObj *threads = calloc((size_t)arguments.client, sizeof(ThreadObj)); + for (int i = 0; i < arguments.client; i++) { + ThreadObj *pthread = threads + i; + pthread_attr_t thattr; + pthread->threadId = i + 1; + pthread->taos = taos_connect("127.0.0.1", "root", "taosdata", NULL, 0); + pthread_attr_init(&thattr); + pthread_attr_setdetachstate(&thattr, PTHREAD_CREATE_JOINABLE); + if (arguments.create) { + pthread_create(&pthread->pid, &thattr, (void *(*)(void *))createImp, pthread); + } else if (arguments.insert) { + pthread_create(&pthread->pid, &thattr, (void *(*)(void *))insertImp, pthread); + } else if (arguments.query) { + pthread_create(&pthread->pid, &thattr, (void *(*)(void *))queryImp, pthread); + } + } + for (int i = 0; i < arguments.client; i++) { + pthread_join(threads[i].pid, NULL); + } + free(threads); +} + +void insertImp(void *param) { + int count = 0; + ThreadObj *pThread = (ThreadObj *)param; + printf("Thread %d start insert data\n", pThread->threadId); + int sqlLen = 0; + char *sql = calloc(1, 1024*1024); + TAOS_RES *result = taos_query(pThread->taos, "use db"); + if (result == NULL || taos_errno(result) != 0) { + printf("In line:%d, failed to execute sql, reason:%s\n", __LINE__, taos_errstr(result)); + taos_free_result(result); + return; + } + taos_free_result(result); + int64_t time = getTimeStampMs(); + while (true) { + sqlLen = sprintf(sql, "insert into"); + for (int i = (pThread->threadId - 1) * arguments.stable / arguments.client; i < pThread->threadId * arguments.stable / arguments.client; i++) { + for (int j = 0; j < arguments.table; j++) { + sqlLen += sprintf(sql + sqlLen, " s%d_%d values (%ld, %d, %d, %d)", i, j, time, rand(), rand(), rand()); + count++; + if ( (1048576 - sqlLen) < 49151 || i == (pThread->threadId * arguments.stable / arguments.client - 1)) { + result = taos_query(pThread->taos, sql); + printf("Thread %d already insert %d rows\n", pThread->threadId, count); + if (result == NULL || taos_errno(result) != 0) { + printf("In line:%d, failed to execute sql, reason:%s\n", __LINE__, taos_errstr(result)); + taos_free_result(result); + return; + } + taos_free_result(result); + sqlLen = sprintf(sql, "insert into"); + } + } + } + time += 1000*arguments.interval; + } +} + +void queryImp(void *param) { + ThreadObj *pThread = (ThreadObj *)param; + printf("Thread %d start query\n", pThread->threadId); + int sqlLen = 0; + char *sql = calloc(1, 1024*1024); + TAOS_RES *result = taos_query(pThread->taos, "use db"); + if (result == NULL || taos_errno(result) != 0) { + printf("In line:%d, failed to execute sql, reason:%s\n", __LINE__, taos_errstr(result)); + taos_free_result(result); + return; + } + taos_free_result(result); + while (true) { + for (int i = (pThread->threadId - 1) * arguments.stable / arguments.client; i < pThread->threadId * arguments.stable / arguments.client; i++) { + sqlLen = sprintf(sql, arguments.querySQL); + sqlLen += sprintf(sql + sqlLen, " s%d", i); + result = taos_query(pThread->taos, sql); + if (result == NULL || taos_errno(result) != 0) { + printf("In line:%d, failed to execute sql: %s, reason:%s\n", __LINE__, sql, taos_errstr(result)); + taos_free_result(result); + return; + } + int num_fields = taos_field_count(result); + TAOS_FIELD *fields = taos_fetch_fields(result); + TAOS_ROW row = NULL; + while ((row = taos_fetch_row(result))) { + char temp[16000] = {0}; + int len = taos_print_row(temp, row, fields, num_fields); + len += sprintf(temp + len, "\n"); + printf("Thread %d query result: %s\n", pThread->threadId, temp); + } + taos_free_result(result); + sleep(arguments.interval); + } + } + +} + +int main(int argc, char *argv[]) { + parseArg(argc, argv); + if (arguments.insert || arguments.query || arguments.create) { + start(); + } else { + printf("choose one argument: \n1) -create\n2) -insert\n3) -query\n"); + } +} \ No newline at end of file diff --git a/tests/script/sh/prepare_udf.sh b/tests/script/sh/prepare_udf.sh index ecfc53660e..a856b96c98 100644 --- a/tests/script/sh/prepare_udf.sh +++ b/tests/script/sh/prepare_udf.sh @@ -5,6 +5,7 @@ echo -n "" > /tmp/empty dd if=/dev/zero bs=3584 of=/tmp/big count=1 rm -rf /tmp/sum_double.so /tmp/add_one.so +touch /tmp/normal gcc -g -O0 -fPIC -shared sh/sum_double.c -o /tmp/sum_double.so gcc -g -O0 -fPIC -shared sh/add_one.c -o /tmp/add_one.so