diff --git a/cmake/cmake.define b/cmake/cmake.define index aeab39cab4..e5ef08acb8 100644 --- a/cmake/cmake.define +++ b/cmake/cmake.define @@ -14,6 +14,25 @@ MESSAGE(STATUS "Project binary files output path: " ${PROJECT_BINARY_DIR}) MESSAGE(STATUS "Project executable files output path: " ${EXECUTABLE_OUTPUT_PATH}) MESSAGE(STATUS "Project library files output path: " ${LIBRARY_OUTPUT_PATH}) +find_package(Git QUIET) +if(GIT_FOUND AND EXISTS "${TD_SOURCE_DIR}/.git") +# Update submodules as needed + option(GIT_SUBMODULE "Check submodules during build" ON) + if(GIT_SUBMODULE) + message(STATUS "Submodule update") + execute_process(COMMAND cd ${TD_SOURCE_DIR} && ${GIT_EXECUTABLE} submodule update --init --recursive + WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR} + RESULT_VARIABLE GIT_SUBMOD_RESULT) + if(NOT GIT_SUBMOD_RESULT EQUAL "0") + message(WARNING "git submodule update --init --recursive failed with ${GIT_SUBMOD_RESULT}, please checkout submodules") + endif() + endif() +endif() + +if(NOT EXISTS "${TD_SOURCE_DIR}/tools/taos-tools/CMakeLists.txt") + message(WARNING "The submodules were not downloaded! GIT_SUBMODULE was turned off or failed. Please update submodules manually if you need build them.") +endif() + if (NOT DEFINED TD_GRANT) SET(TD_GRANT FALSE) endif() @@ -47,7 +66,7 @@ ENDIF () IF (TD_WINDOWS) MESSAGE("${Yellow} set compiler flag for Windows! ${ColourReset}") SET(COMMON_FLAGS "/W3 /D_WIN32") - + SET(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} /MANIFEST:NO") # IF (MSVC AND (MSVC_VERSION GREATER_EQUAL 1900)) # SET(COMMON_FLAGS "${COMMON_FLAGS} /Wv:18") # ENDIF () diff --git a/include/common/systable.h b/include/common/systable.h index bd8aae998f..506bdcfa8a 100644 --- a/include/common/systable.h +++ b/include/common/systable.h @@ -23,7 +23,6 @@ extern "C" { #define TDENGINE_SYSTABLE_H #define TSDB_INFORMATION_SCHEMA_DB "information_schema" -#define TSDB_PERFORMANCE_SCHEMA_DB "performance_schema" #define TSDB_INS_TABLE_DNODES "dnodes" #define TSDB_INS_TABLE_MNODES "mnodes" #define TSDB_INS_TABLE_MODULES "modules" @@ -44,27 +43,27 @@ extern "C" { #define TSDB_INS_TABLE_VNODES "vnodes" #define TSDB_INS_TABLE_CONFIGS "configs" -#define TSDB_PERFORMANCE_SCHEMA_DB "performance_schema" -#define TSDB_PERFS_TABLE_SMAS "smas" -#define TSDB_PERFS_TABLE_SUBSCRIBES "subscribes" -#define TSDB_PERFS_TABLE_CONNECTIONS "connections" -#define TSDB_PERFS_TABLE_QUERIES "queries" -#define TSDB_PERFS_TABLE_TOPICS "topics" -#define TSDB_PERFS_TABLE_CONSUMERS "consumers" -#define TSDB_PERFS_TABLE_SUBSCRIPTIONS "subscriptions" -#define TSDB_PERFS_TABLE_OFFSETS "offsets" -#define TSDB_PERFS_TABLE_TRANS "trans" -#define TSDB_PERFS_TABLE_STREAMS "streams" +#define TSDB_PERFORMANCE_SCHEMA_DB "performance_schema" +#define TSDB_PERFS_TABLE_SMAS "smas" +#define TSDB_PERFS_TABLE_SUBSCRIBES "subscribes" +#define TSDB_PERFS_TABLE_CONNECTIONS "connections" +#define TSDB_PERFS_TABLE_QUERIES "queries" +#define TSDB_PERFS_TABLE_TOPICS "topics" +#define TSDB_PERFS_TABLE_CONSUMERS "consumers" +#define TSDB_PERFS_TABLE_SUBSCRIPTIONS "subscriptions" +#define TSDB_PERFS_TABLE_OFFSETS "offsets" +#define TSDB_PERFS_TABLE_TRANS "trans" +#define TSDB_PERFS_TABLE_STREAMS "streams" typedef struct SSysDbTableSchema { - const char *name; + const char* name; const int32_t type; const int32_t bytes; } SSysDbTableSchema; typedef struct SSysTableMeta { - const char *name; - const SSysDbTableSchema *schema; + const char* name; + const SSysDbTableSchema* schema; const int32_t colNum; } SSysTableMeta; diff --git a/include/common/tdatablock.h b/include/common/tdatablock.h index 19b108dcbf..cea40f4785 100644 --- a/include/common/tdatablock.h +++ b/include/common/tdatablock.h @@ -56,11 +56,11 @@ SEpSet getEpSet_s(SCorEpSet* pEpSet); #define colDataSetNotNull_f(bm_, r_) \ do { \ - BMCharPos(bm_, r_) &= ~(1u << (7u - BitPos(r_))); \ + BMCharPos(bm_, r_) &= ~(1u << (7u - BitPos(r_))); \ } while (0) -#define colDataIsNull_var(pColumnInfoData, row) (pColumnInfoData->varmeta.offset[row] == -1) -#define colDataSetNull_var(pColumnInfoData, row) (pColumnInfoData->varmeta.offset[row] = -1) +#define colDataIsNull_var(pColumnInfoData, row) (pColumnInfoData->varmeta.offset[row] == -1) +#define colDataSetNull_var(pColumnInfoData, row) (pColumnInfoData->varmeta.offset[row] = -1) #define BitmapLen(_n) (((_n) + ((1 << NBIT) - 1)) >> NBIT) @@ -187,8 +187,8 @@ static FORCE_INLINE void colDataAppendDouble(SColumnInfoData* pColumnInfoData, u } int32_t colDataAppend(SColumnInfoData* pColumnInfoData, uint32_t currentRow, const char* pData, bool isNull); -int32_t colDataMergeCol(SColumnInfoData* pColumnInfoData, uint32_t numOfRow1, int32_t* capacity, const SColumnInfoData* pSource, - uint32_t numOfRow2); +int32_t colDataMergeCol(SColumnInfoData* pColumnInfoData, uint32_t numOfRow1, int32_t* capacity, + const SColumnInfoData* pSource, uint32_t numOfRow2); int32_t colDataAssign(SColumnInfoData* pColumnInfoData, const SColumnInfoData* pSource, int32_t numOfRows); int32_t blockDataUpdateTsWindow(SSDataBlock* pDataBlock); @@ -230,9 +230,9 @@ SSDataBlock* createOneDataBlock(const SSDataBlock* pDataBlock, bool copyData); void blockDebugShowData(const SArray* dataBlocks); int32_t buildSubmitReqFromDataBlock(SSubmitReq** pReq, const SArray* pDataBlocks, STSchema* pTSchema, int32_t vgId, - tb_uid_t uid, tb_uid_t suid); + tb_uid_t uid, tb_uid_t suid); -SSubmitReq* tdBlockToSubmit(const SArray* pBlocks, const STSchema* pSchema); +SSubmitReq* tdBlockToSubmit(const SArray* pBlocks, const STSchema* pSchema, bool createTb, int64_t suid, int32_t vgId); static FORCE_INLINE int32_t blockGetEncodeSize(const SSDataBlock* pBlock) { return blockDataGetSerialMetaSize(pBlock) + blockDataGetSize(pBlock); diff --git a/include/common/tglobal.h b/include/common/tglobal.h index f253d31963..da5158abb5 100644 --- a/include/common/tglobal.h +++ b/include/common/tglobal.h @@ -17,8 +17,8 @@ #define _TD_COMMON_GLOBAL_H_ #include "tarray.h" -#include "tdef.h" #include "tconfig.h" +#include "tdef.h" #ifdef __cplusplus extern "C" { @@ -121,15 +121,16 @@ extern char tsCompressor[]; extern int32_t tsDiskCfgNum; extern SDiskCfg tsDiskCfg[]; -// internal -extern int32_t tsTransPullupMs; -extern int32_t tsMaRebalanceMs; +// internal +extern int32_t tsTransPullupInterval; +extern int32_t tsMqRebalanceInterval; #define NEEDTO_COMPRESSS_MSG(size) (tsCompressMsgSize != -1 && (size) > tsCompressMsgSize) -int32_t taosCreateLog(const char *logname, int32_t logFileNum, const char *cfgDir, const char **envCmd, const char *envFile, - char *apolloUrl, SArray *pArgs, bool tsc); -int32_t taosInitCfg(const char *cfgDir, const char **envCmd, const char *envFile, char *apolloUrl, SArray *pArgs, bool tsc); +int32_t taosCreateLog(const char *logname, int32_t logFileNum, const char *cfgDir, const char **envCmd, + const char *envFile, char *apolloUrl, SArray *pArgs, bool tsc); +int32_t taosInitCfg(const char *cfgDir, const char **envCmd, const char *envFile, char *apolloUrl, SArray *pArgs, + bool tsc); void taosCleanupCfg(); void taosCfgDynamicOptions(const char *option, const char *value); void taosAddDataDir(int32_t index, char *v1, int32_t level, int32_t primary); diff --git a/include/common/tmsg.h b/include/common/tmsg.h index 7636d9b9d0..72418148b0 100644 --- a/include/common/tmsg.h +++ b/include/common/tmsg.h @@ -190,6 +190,8 @@ typedef struct SRetention { int8_t keepUnit; } SRetention; +#define RETENTION_VALID(r) (((r)->freq > 0) && ((r)->keep > 0)) + #pragma pack(push, 1) // null-terminated string instead of char array to avoid too many memory consumption in case of more than 1M tableMeta diff --git a/include/common/tmsgdef.h b/include/common/tmsgdef.h index 36bef5e85a..8e918c40f9 100644 --- a/include/common/tmsgdef.h +++ b/include/common/tmsgdef.h @@ -204,7 +204,6 @@ enum { TD_DEF_MSG_TYPE(TDMT_VND_DROP_SMA, "vnode-drop-sma", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_VND_SUBMIT_RSMA, "vnode-submit-rsma", SSubmitReq, SSubmitRsp) - // sync integration TD_DEF_MSG_TYPE(TDMT_VND_SYNC_TIMEOUT, "vnode-sync-timeout", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_VND_SYNC_PING, "vnode-sync-ping", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_VND_SYNC_PING_REPLY, "vnode-sync-ping-reply", NULL, NULL) diff --git a/include/libs/function/tudf.h b/include/libs/function/tudf.h index d59a7c23f7..bdccd29acf 100644 --- a/include/libs/function/tudf.h +++ b/include/libs/function/tudf.h @@ -44,7 +44,8 @@ enum { UDFC_CODE_PIPE_READ_ERR = -2, UDFC_CODE_CONNECT_PIPE_ERR = -3, UDFC_CODE_LOAD_UDF_FAILURE = -4, - UDFC_CODE_INVALID_STATE = -5 + UDFC_CODE_INVALID_STATE = -5, + UDFC_CODE_NO_PIPE = -6, }; typedef void *UdfcFuncHandle; @@ -140,6 +141,44 @@ typedef int32_t (*TUdfDestroyFunc)(); #define UDF_MEMORY_EXP_GROWTH 1.5 +#define udfColDataIsNull_var(pColumn, row) ((pColumn->colData.varLenCol.varOffsets)[row] == -1) +#define udfColDataIsNull_f(pColumn, row) ((BMCharPos(pColumn->colData.fixLenCol.nullBitmap, row) & (1u << (7u - BitPos(row)))) == (1u << (7u - BitPos(row)))) +#define udfColDataSetNull_f(pColumn, row) \ + do { \ + BMCharPos(pColumn->colData.fixLenCol.nullBitmap, row) |= (1u << (7u - BitPos(row))); \ + } while (0) + +#define udfColDataSetNotNull_f(pColumn, r_) \ + do { \ + BMCharPos(pColumn->colData.fixLenCol.nullBitmap, r_) &= ~(1u << (7u - BitPos(r_))); \ + } while (0) +#define udfColDataSetNull_var(pColumn, row) ((pColumn->colData.varLenCol.varOffsets)[row] = -1) + + +static FORCE_INLINE char* udfColDataGetData(const SUdfColumn* pColumn, int32_t row) { + if (IS_VAR_DATA_TYPE(pColumn->colMeta.type)) { + return pColumn->colData.varLenCol.payload + pColumn->colData.varLenCol.varOffsets[row]; + } else { + return pColumn->colData.fixLenCol.data + pColumn->colMeta.bytes * row; + } +} + +static FORCE_INLINE bool udfColDataIsNull(const SUdfColumn* pColumn, int32_t row) { + if (IS_VAR_DATA_TYPE(pColumn->colMeta.type)) { + if (pColumn->colMeta.type == TSDB_DATA_TYPE_JSON) { + if (udfColDataIsNull_var(pColumn, row)) { + return true; + } + char* data = udfColDataGetData(pColumn, row); + return (*data == TSDB_DATA_TYPE_NULL); + } else { + return udfColDataIsNull_var(pColumn, row); + } + } else { + return udfColDataIsNull_f(pColumn, row); + } +} + static FORCE_INLINE int32_t udfColEnsureCapacity(SUdfColumn* pColumn, int32_t newCapacity) { SUdfColumnMeta *meta = &pColumn->colMeta; SUdfColumnData *data = &pColumn->colData; @@ -186,17 +225,22 @@ static FORCE_INLINE int32_t udfColEnsureCapacity(SUdfColumn* pColumn, int32_t ne return TSDB_CODE_SUCCESS; } -static FORCE_INLINE int32_t udfColSetRow(SUdfColumn* pColumn, uint32_t currentRow, const char* pData, bool isNull) { +static FORCE_INLINE void udfColDataSetNull(SUdfColumn* pColumn, int32_t row) { + udfColEnsureCapacity(pColumn, row+1); + if (IS_VAR_DATA_TYPE(pColumn->colMeta.type)) { + udfColDataSetNull_var(pColumn, row); + } else { + udfColDataSetNull_f(pColumn, row); + } +} + +static FORCE_INLINE int32_t udfColDataSet(SUdfColumn* pColumn, uint32_t currentRow, const char* pData, bool isNull) { SUdfColumnMeta *meta = &pColumn->colMeta; SUdfColumnData *data = &pColumn->colData; udfColEnsureCapacity(pColumn, currentRow+1); bool isVarCol = IS_VAR_DATA_TYPE(meta->type); if (isNull) { - if (isVarCol) { - data->varLenCol.varOffsets[currentRow] = -1; - } else { - colDataSetNull_f(data->fixLenCol.nullBitmap, currentRow); - } + udfColDataSetNull(pColumn, currentRow); } else { if (!isVarCol) { colDataSetNotNull_f(data->fixLenCol.nullBitmap, currentRow); diff --git a/include/libs/nodes/nodes.h b/include/libs/nodes/nodes.h index 34ccea2898..ad8a472d08 100644 --- a/include/libs/nodes/nodes.h +++ b/include/libs/nodes/nodes.h @@ -164,7 +164,6 @@ typedef enum ENodeType { QUERY_NODE_SHOW_TOPICS_STMT, QUERY_NODE_SHOW_CONSUMERS_STMT, QUERY_NODE_SHOW_SUBSCRIBES_STMT, - QUERY_NODE_SHOW_TRANS_STMT, QUERY_NODE_SHOW_SMAS_STMT, QUERY_NODE_SHOW_CONFIGS_STMT, QUERY_NODE_SHOW_CONNECTIONS_STMT, diff --git a/include/libs/parser/parser.h b/include/libs/parser/parser.h index 7c9602734b..998d45aee1 100644 --- a/include/libs/parser/parser.h +++ b/include/libs/parser/parser.h @@ -105,7 +105,7 @@ int32_t qCreateSName(SName* pName, const char* pTableName, int32_t acctId, char* void* smlInitHandle(SQuery *pQuery); void smlDestroyHandle(void *pHandle); -int32_t smlBindData(void *handle, SArray *tags, SArray *colsFormat, SHashObj *colsHash, SArray *cols, bool format, STableMeta *pTableMeta, char *tableName, char *msgBuf, int16_t msgBufLen); +int32_t smlBindData(void *handle, SArray *tags, SArray *colsFormat, SArray *colsSchema, SArray *cols, bool format, STableMeta *pTableMeta, char *tableName, char *msgBuf, int16_t msgBufLen); int32_t smlBuildOutput(void* handle, SHashObj* pVgHash); #ifdef __cplusplus diff --git a/include/libs/stream/tstream.h b/include/libs/stream/tstream.h index d7df976e1a..e277622c40 100644 --- a/include/libs/stream/tstream.h +++ b/include/libs/stream/tstream.h @@ -25,6 +25,8 @@ extern "C" { #ifndef _TSTREAM_H_ #define _TSTREAM_H_ +typedef struct SStreamTask SStreamTask; + enum { STREAM_TASK_STATUS__RUNNING = 1, STREAM_TASK_STATUS__STOP, @@ -69,20 +71,24 @@ typedef struct { SUseDbRsp dbInfo; } STaskDispatcherShuffle; +typedef void FTbSink(SStreamTask* pTask, void* vnode, int64_t ver, void* data); + typedef struct { - int8_t reserved; + int64_t stbUid; SSchemaWrapper* pSchemaWrapper; // not applicable to encoder and decoder + void* vnode; + FTbSink* tbSinkFunc; STSchema* pTSchema; SHashObj* pHash; // groupId to tbuid } STaskSinkTb; -typedef void FSmaHandle(void* vnode, int64_t smaId, const SArray* data); +typedef void FSmaSink(void* vnode, int64_t smaId, const SArray* data); typedef struct { int64_t smaId; // following are not applicable to encoder and decoder - FSmaHandle* smaHandle; + FSmaSink* smaSink; } STaskSinkSma; typedef struct { @@ -115,7 +121,7 @@ enum { TASK_SINK__FETCH, }; -typedef struct { +struct SStreamTask { int64_t streamId; int32_t taskId; int8_t status; @@ -150,8 +156,7 @@ typedef struct { // application storage void* ahandle; - -} SStreamTask; +}; SStreamTask* tNewSStreamTask(int64_t streamId); int32_t tEncodeSStreamTask(SEncoder* pEncoder, const SStreamTask* pTask); diff --git a/include/util/taoserror.h b/include/util/taoserror.h index 4e843aeb59..6a57b8d53e 100644 --- a/include/util/taoserror.h +++ b/include/util/taoserror.h @@ -411,7 +411,6 @@ int32_t* taosGetErrno(); #define TSDB_CODE_SYN_INVALID_MSGLEN TAOS_DEF_ERROR_CODE(0, 0x0909) #define TSDB_CODE_SYN_INVALID_MSGTYPE TAOS_DEF_ERROR_CODE(0, 0x090A) -// sync integration #define TSDB_CODE_SYN_NOT_LEADER TAOS_DEF_ERROR_CODE(0, 0x0910) #define TSDB_CODE_SYN_INTERNAL_ERROR TAOS_DEF_ERROR_CODE(0, 0x09FF) diff --git a/source/client/src/clientSml.c b/source/client/src/clientSml.c index 5f9138bb43..2a0e85092b 100644 --- a/source/client/src/clientSml.c +++ b/source/client/src/clientSml.c @@ -37,8 +37,8 @@ typedef enum { typedef struct { char sTableName[TSDB_TABLE_NAME_LEN]; - SHashObj *tags; - SHashObj *fields; + SArray *tags; + SArray *fields; } SCreateSTableActionInfo; typedef struct { @@ -78,14 +78,17 @@ typedef struct { // colsFormat store cols formated, for quick parse, if info->formatData is true SArray *colsFormat; // elements are SArray - // cols & colsColumn store cols un formated + // cols store cols un formated SArray *cols; // elements are SHashObj for find by key quickly - SHashObj *columnsHash; // elements are , just for judge if key exists quickly. } SSmlTableInfo; typedef struct { - SHashObj *tagHash; + SArray *tags; // save the origin order to create table + SHashObj *tagHash; // elements are + + SArray *cols; SHashObj *fieldHash; + STableMeta *tableMeta; } SSmlSTableMeta; @@ -113,6 +116,8 @@ typedef struct { int32_t affectedRows; SSmlMsgBuf msgBuf; + SHashObj *dumplicateKey; // for dumplicate key + SArray *colsContainer; // for cols parse, if is dataFormat == false } SSmlHandle; //================================================================================================= @@ -143,8 +148,8 @@ static int32_t smlBuildInvalidDataMsg(SSmlMsgBuf* pBuf, const char *msg1, const } static int smlCompareKv(const void* p1, const void* p2) { - SSmlKv* kv1 = (SSmlKv*)p1; - SSmlKv* kv2 = (SSmlKv*)p2; + SSmlKv* kv1 = *(SSmlKv**)p1; + SSmlKv* kv2 = *(SSmlKv**)p2; int32_t kvLen1 = kv1->keyLen; int32_t kvLen2 = kv2->keyLen; int32_t res = strncasecmp(kv1->key, kv2->key, TMIN(kvLen1, kvLen2)); @@ -174,8 +179,9 @@ static void smlBuildChildTableName(SSmlTableInfo *tags) { tMD5Update(&context, (uint8_t *)keyJoined, (uint32_t)len); tMD5Final(&context); uint64_t digest1 = *(uint64_t*)(context.digest); - uint64_t digest2 = *(uint64_t*)(context.digest + 8); - snprintf(tags->childTableName, TSDB_TABLE_NAME_LEN, "t_%016"PRIx64"%016"PRIx64, digest1, digest2); + //uint64_t digest2 = *(uint64_t*)(context.digest + 8); + //snprintf(tags->childTableName, TSDB_TABLE_NAME_LEN, "t_%016"PRIx64"%016"PRIx64, digest1, digest2); + snprintf(tags->childTableName, TSDB_TABLE_NAME_LEN, "t_%016"PRIx64, digest1); taosStringBuilderDestroy(&sb); tags->uid = digest1; } @@ -350,37 +356,26 @@ static int32_t smlApplySchemaAction(SSmlHandle* info, SSchemaAction* action) { int n = sprintf(result, "create stable %s (", action->createSTable.sTableName); char* pos = result + n; int freeBytes = capacity - n; - size_t size = taosHashGetSize(action->createSTable.fields); - SArray *cols = taosArrayInit(size, POINTER_BYTES); - SSmlKv **kv = taosHashIterate(action->createSTable.fields, NULL); - while(kv){ - if(strncmp((*kv)->key, TS, strlen(TS)) == 0 && (*kv)->type == TSDB_DATA_TYPE_TIMESTAMP){ - taosArrayInsert(cols, 0, kv); - }else{ - taosArrayPush(cols, kv); - } - kv = taosHashIterate(action->createSTable.fields, kv); - } + SArray *cols = action->createSTable.fields; for(int i = 0; i < taosArrayGetSize(cols); i++){ - SSmlKv *kvNew = taosArrayGetP(cols, i); - smlBuildColumnDescription(kvNew, pos, freeBytes, &outBytes); + SSmlKv *kv = taosArrayGetP(cols, i); + smlBuildColumnDescription(kv, pos, freeBytes, &outBytes); pos += outBytes; freeBytes -= outBytes; *pos = ','; ++pos; --freeBytes; } - taosArrayDestroy(cols); --pos; ++freeBytes; outBytes = snprintf(pos, freeBytes, ") tags ("); pos += outBytes; freeBytes -= outBytes; - kv = taosHashIterate(action->createSTable.tags, NULL); - while(kv){ - smlBuildColumnDescription(*kv, pos, freeBytes, &outBytes); + cols = action->createSTable.tags; + for(int i = 0; i < taosArrayGetSize(cols); i++){ + SSmlKv *kv = taosArrayGetP(cols, i); + smlBuildColumnDescription(kv, pos, freeBytes, &outBytes); pos += outBytes; freeBytes -= outBytes; *pos = ','; ++pos; --freeBytes; - kv = taosHashIterate(action->createSTable.tags, kv); } pos--; ++freeBytes; outBytes = snprintf(pos, freeBytes, ")"); @@ -419,7 +414,7 @@ static int32_t smlModifyDBSchemas(SSmlHandle* info) { SSmlSTableMeta** tableMetaSml = taosHashIterate(info->superTables, NULL); while (tableMetaSml) { - SSmlSTableMeta* cTablePoints = *tableMetaSml; + SSmlSTableMeta* sTableData = *tableMetaSml; STableMeta *pTableMeta = NULL; SEpSet ep = getEpSet_s(&info->taos->pAppInfo->mgmtEp); @@ -436,8 +431,8 @@ static int32_t smlModifyDBSchemas(SSmlHandle* info) { SSchemaAction schemaAction = {0}; schemaAction.action = SCHEMA_ACTION_CREATE_STABLE; memcpy(schemaAction.createSTable.sTableName, superTable, superTableLen); - schemaAction.createSTable.tags = cTablePoints->tagHash; - schemaAction.createSTable.fields = cTablePoints->fieldHash; + schemaAction.createSTable.tags = sTableData->tags; + schemaAction.createSTable.fields = sTableData->cols; code = smlApplySchemaAction(info, &schemaAction); if (code != 0) { uError("SML:0x%"PRIx64" smlApplySchemaAction failed. can not create %s", info->id, schemaAction.createSTable.sTableName); @@ -454,7 +449,7 @@ static int32_t smlModifyDBSchemas(SSmlHandle* info) { uError("SML:0x%"PRIx64" load table meta error: %s", info->id, tstrerror(code)); return code; } - cTablePoints->tableMeta = pTableMeta; + sTableData->tableMeta = pTableMeta; tableMetaSml = taosHashIterate(info->superTables, tableMetaSml); } @@ -1034,7 +1029,7 @@ static int32_t smlParseString(const char* sql, SSmlLineInfo *elements, SSmlMsgBu return TSDB_CODE_SUCCESS; } -static int32_t smlParseCols(const char* data, int32_t len, SArray *cols, bool isTag, SSmlMsgBuf *msg){ +static int32_t smlParseCols(const char* data, int32_t len, SArray *cols, bool isTag, SHashObj *dumplicateKey, SSmlMsgBuf *msg){ if(isTag && len == 0){ SSmlKv *kv = taosMemoryCalloc(sizeof(SSmlKv), 1); kv->key = TAG; @@ -1062,6 +1057,13 @@ static int32_t smlParseCols(const char* data, int32_t len, SArray *cols, bool is return TSDB_CODE_SML_INVALID_DATA; } + if(taosHashGet(dumplicateKey, key, keyLen)){ + smlBuildInvalidDataMsg(msg, "dumplicate key", key); + return TSDB_CODE_SML_INVALID_DATA; + }else{ + taosHashPut(dumplicateKey, key, keyLen, key, CHAR_BYTES); + } + // parse value i++; const char *value = data + i; @@ -1295,14 +1297,19 @@ static bool smlUpdateMeta(SSmlSTableMeta* tableMeta, SArray *tags, SArray *cols, SSmlKv *kv = taosArrayGetP(tags, i); ASSERT(kv->type == TSDB_DATA_TYPE_NCHAR); - SSmlKv **value = taosHashGet(tableMeta->tagHash, kv->key, kv->keyLen); - if(value){ + uint8_t *index = taosHashGet(tableMeta->tagHash, kv->key, kv->keyLen); + if(index){ + SSmlKv **value = taosArrayGet(tableMeta->tags, *index); ASSERT((*value)->type == TSDB_DATA_TYPE_NCHAR); if(kv->valueLen > (*value)->valueLen){ // tags type is nchar *value = kv; } }else{ - taosHashPut(tableMeta->tagHash, kv->key, kv->keyLen, &kv, POINTER_BYTES); + size_t tmp = taosArrayGetSize(tableMeta->tags); + ASSERT(tmp <= UINT8_MAX); + uint8_t size = tmp; + taosArrayPush(tableMeta->tags, &kv); + taosHashPut(tableMeta->tagHash, kv->key, kv->keyLen, &size, CHAR_BYTES); } } } @@ -1310,8 +1317,10 @@ static bool smlUpdateMeta(SSmlSTableMeta* tableMeta, SArray *tags, SArray *cols, if(cols){ for (int i = 1; i < taosArrayGetSize(cols); ++i) { //jump timestamp SSmlKv *kv = taosArrayGetP(cols, i); - SSmlKv **value = taosHashGet(tableMeta->fieldHash, kv->key, kv->keyLen); - if(value){ + + int16_t *index = taosHashGet(tableMeta->fieldHash, kv->key, kv->keyLen); + if(index){ + SSmlKv **value = taosArrayGet(tableMeta->cols, *index); if(kv->type != (*value)->type){ smlBuildInvalidDataMsg(msg, "the type is not the same like before", kv->key); return false; @@ -1323,7 +1332,11 @@ static bool smlUpdateMeta(SSmlSTableMeta* tableMeta, SArray *tags, SArray *cols, } } }else{ - taosHashPut(tableMeta->fieldHash, kv->key, kv->keyLen, &kv, POINTER_BYTES); + size_t tmp = taosArrayGetSize(tableMeta->cols); + ASSERT(tmp <= INT16_MAX); + int16_t size = tmp; + taosArrayPush(tableMeta->cols, &kv); + taosHashPut(tableMeta->fieldHash, kv->key, kv->keyLen, &size, SHORT_BYTES); } } } @@ -1332,16 +1345,18 @@ static bool smlUpdateMeta(SSmlSTableMeta* tableMeta, SArray *tags, SArray *cols, static void smlInsertMeta(SSmlSTableMeta* tableMeta, SArray *tags, SArray *cols){ if(tags){ - for (int i = 0; i < taosArrayGetSize(tags); ++i) { + for (uint8_t i = 0; i < taosArrayGetSize(tags); ++i) { SSmlKv *kv = taosArrayGetP(tags, i); - taosHashPut(tableMeta->tagHash, kv->key, kv->keyLen, &kv, POINTER_BYTES); + taosArrayPush(tableMeta->tags, &kv); + taosHashPut(tableMeta->tagHash, kv->key, kv->keyLen, &i, CHAR_BYTES); } } if(cols){ - for (int i = 0; i < taosArrayGetSize(cols); ++i) { + for (int16_t i = 0; i < taosArrayGetSize(cols); ++i) { SSmlKv *kv = taosArrayGetP(cols, i); - taosHashPut(tableMeta->fieldHash, kv->key, kv->keyLen, &kv, POINTER_BYTES); + taosArrayPush(tableMeta->cols, &kv); + taosHashPut(tableMeta->fieldHash, kv->key, kv->keyLen, &i, SHORT_BYTES); } } } @@ -1364,12 +1379,6 @@ static SSmlTableInfo* smlBuildTableInfo(bool format){ uError("SML:smlParseLine failed to allocate memory"); goto cleanup; } - - tag->columnsHash = taosHashInit(32, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), false, HASH_NO_LOCK); - if (tag->columnsHash == NULL) { - uError("SML:smlParseLine failed to allocate memory"); - goto cleanup; - } } tag->tags = taosArrayInit(16, POINTER_BYTES); @@ -1399,7 +1408,6 @@ static void smlDestroyBuildTableInfo(SSmlTableInfo *tag, bool format){ } taosHashCleanup(kvHash); } - taosHashCleanup(tag->columnsHash); } taosArrayDestroy(tag->tags); taosMemoryFreeClear(tag); @@ -1408,23 +1416,20 @@ static void smlDestroyBuildTableInfo(SSmlTableInfo *tag, bool format){ static int32_t smlDealCols(SSmlTableInfo* oneTable, bool dataFormat, SArray *cols){ if(dataFormat){ taosArrayPush(oneTable->colsFormat, &cols); - }else{ - SHashObj *kvHash = taosHashInit(32, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), false, HASH_NO_LOCK); - if(!kvHash){ - uError("SML:smlDealCols failed to allocate memory"); - return TSDB_CODE_TSC_OUT_OF_MEMORY; - } - for(size_t i = 0; i < taosArrayGetSize(cols); i++){ - SSmlKv *kv = taosArrayGetP(cols, i); - taosHashPut(kvHash, kv->key, kv->keyLen, &kv, POINTER_BYTES); // todo key need escape, like \=, because find by schema name later - - if(taosHashGet(oneTable->columnsHash, kv->key, kv->keyLen) != NULL){ - continue; - } - taosHashPut(oneTable->columnsHash, kv->key, kv->keyLen, &kv, POINTER_BYTES); - } - taosArrayPush(oneTable->cols, &kvHash); + return TSDB_CODE_SUCCESS; } + + SHashObj *kvHash = taosHashInit(32, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), false, HASH_NO_LOCK); + if(!kvHash){ + uError("SML:smlDealCols failed to allocate memory"); + return TSDB_CODE_TSC_OUT_OF_MEMORY; + } + for(size_t i = 0; i < taosArrayGetSize(cols); i++){ + SSmlKv *kv = taosArrayGetP(cols, i); + taosHashPut(kvHash, kv->key, kv->keyLen, &kv, POINTER_BYTES); // todo key need escape, like \=, because find by schema name later + } + taosArrayPush(oneTable->cols, &kvHash); + return TSDB_CODE_SUCCESS; } @@ -1444,6 +1449,18 @@ static SSmlSTableMeta* smlBuildSTableMeta(){ uError("SML:smlBuildSTableMeta failed to allocate memory"); goto cleanup; } + + meta->tags = taosArrayInit(32, POINTER_BYTES); + if (meta->tags == NULL) { + uError("SML:smlBuildSTableMeta failed to allocate memory"); + goto cleanup; + } + + meta->cols = taosArrayInit(32, POINTER_BYTES); + if (meta->cols == NULL) { + uError("SML:smlBuildSTableMeta failed to allocate memory"); + goto cleanup; + } return meta; cleanup: @@ -1454,6 +1471,8 @@ cleanup: static void smlDestroySTableMeta(SSmlSTableMeta *meta){ taosHashCleanup(meta->tagHash); taosHashCleanup(meta->fieldHash); + taosArrayDestroy(meta->tags); + taosArrayDestroy(meta->cols); taosMemoryFree(meta->tableMeta); } @@ -1465,10 +1484,15 @@ static int32_t smlParseLine(SSmlHandle* info, const char* sql) { return ret; } - SArray *cols = taosArrayInit(16, POINTER_BYTES); - if (cols == NULL) { - uError("SML:0x%"PRIx64" smlParseLine failed to allocate memory", info->id); - return TSDB_CODE_TSC_OUT_OF_MEMORY; + SArray *cols = NULL; + if(info->dataFormat){ // if dataFormat, cols need new memory to save data + cols = taosArrayInit(16, POINTER_BYTES); + if (cols == NULL) { + uError("SML:0x%"PRIx64" smlParseLine failed to allocate memory", info->id); + return TSDB_CODE_TSC_OUT_OF_MEMORY; + } + }else{ // if dataFormat is false, cols do not need to save data, there is another new memory to save data + cols = info->colsContainer; } ret = smlParseTS(info, elements.timestamp, elements.timestampLen, cols); @@ -1476,7 +1500,7 @@ static int32_t smlParseLine(SSmlHandle* info, const char* sql) { uError("SML:0x%"PRIx64" smlParseTS failed", info->id); return ret; } - ret = smlParseCols(elements.cols, elements.colsLen, cols, false, &info->msgBuf); + ret = smlParseCols(elements.cols, elements.colsLen, cols, false, info->dumplicateKey, &info->msgBuf); if(ret != TSDB_CODE_SUCCESS){ uError("SML:0x%"PRIx64" smlParseCols parse cloums fields failed", info->id); return ret; @@ -1500,46 +1524,51 @@ static int32_t smlParseLine(SSmlHandle* info, const char* sql) { return ret; } }else{ - SSmlTableInfo *tag = smlBuildTableInfo(info->dataFormat); - if(!tag){ + SSmlTableInfo *tinfo = smlBuildTableInfo(info->dataFormat); + if(!tinfo){ return TSDB_CODE_TSC_OUT_OF_MEMORY; } - ret = smlDealCols(tag, info->dataFormat, cols); + ret = smlDealCols(tinfo, info->dataFormat, cols); if(ret != TSDB_CODE_SUCCESS){ return ret; } - ret = smlParseCols(elements.tags, elements.tagsLen, tag->tags, true, &info->msgBuf); + ret = smlParseCols(elements.tags, elements.tagsLen, tinfo->tags, true, info->dumplicateKey, &info->msgBuf); if(ret != TSDB_CODE_SUCCESS){ uError("SML:0x%"PRIx64" smlParseCols parse tag fields failed", info->id); return ret; } - if(taosArrayGetSize(tag->tags) > TSDB_MAX_TAGS){ + if(taosArrayGetSize(tinfo->tags) > TSDB_MAX_TAGS){ smlBuildInvalidDataMsg(&info->msgBuf, "too many tags than 128", NULL); return TSDB_CODE_SML_INVALID_DATA; } - tag->sTableName = elements.measure; - tag->sTableNameLen = elements.measureLen; - smlBuildChildTableName(tag); - uDebug("SML:0x%"PRIx64" child table name: %s", info->id, tag->childTableName); + tinfo->sTableName = elements.measure; + tinfo->sTableNameLen = elements.measureLen; + smlBuildChildTableName(tinfo); + uDebug("SML:0x%"PRIx64" child table name: %s", info->id, tinfo->childTableName); SSmlSTableMeta** tableMeta = taosHashGet(info->superTables, elements.measure, elements.measureLen); if(tableMeta){ // update meta - ret = smlUpdateMeta(*tableMeta, tag->tags, cols, &info->msgBuf); + ret = smlUpdateMeta(*tableMeta, tinfo->tags, cols, &info->msgBuf); if(!ret){ uError("SML:0x%"PRIx64" smlUpdateMeta failed", info->id); return TSDB_CODE_SML_INVALID_DATA; } }else{ SSmlSTableMeta *meta = smlBuildSTableMeta(); - smlInsertMeta(meta, tag->tags, cols); + smlInsertMeta(meta, tinfo->tags, cols); taosHashPut(info->superTables, elements.measure, elements.measureLen, &meta, POINTER_BYTES); } - taosHashPut(info->childTables, elements.measure, elements.measureTagsLen, &tag, POINTER_BYTES); + taosHashPut(info->childTables, elements.measure, elements.measureTagsLen, &tinfo, POINTER_BYTES); } + + if(!info->dataFormat){ + taosArrayClear(info->colsContainer); + } + taosHashClear(info->dumplicateKey); return TSDB_CODE_SUCCESS; } @@ -1568,6 +1597,7 @@ static void smlDestroyInfo(SSmlHandle* info){ // destroy info->pVgHash taosHashCleanup(info->pVgHash); + taosHashCleanup(info->dumplicateKey); taosMemoryFreeClear(info); } @@ -1614,8 +1644,17 @@ static SSmlHandle* smlBuildSmlInfo(TAOS* taos, SRequestObj* request, SMLProtocol info->superTables = taosHashInit(32, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), false, HASH_NO_LOCK); info->pVgHash = taosHashInit(16, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), true, HASH_NO_LOCK); + info->dumplicateKey = taosHashInit(32, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), false, HASH_NO_LOCK); + if(!dataFormat){ + info->colsContainer = taosArrayInit(32, POINTER_BYTES); + if(NULL == info->colsContainer){ + uError("SML:0x%"PRIx64" create info failed", info->id); + goto cleanup; + } + } if(NULL == info->exec || NULL == info->childTables - || NULL == info->superTables || NULL == info->pVgHash){ + || NULL == info->superTables || NULL == info->pVgHash + || NULL == info->dumplicateKey){ uError("SML:0x%"PRIx64" create info failed", info->id); goto cleanup; } @@ -1651,7 +1690,7 @@ static int32_t smlInsertData(SSmlHandle* info) { (*pMeta)->tableMeta->vgId = vg.vgId; (*pMeta)->tableMeta->uid = tableData->uid; // one table merge data block together according uid - code = smlBindData(info->exec, tableData->tags, tableData->colsFormat, tableData->columnsHash, + code = smlBindData(info->exec, tableData->tags, tableData->colsFormat, (*pMeta)->cols, tableData->cols, info->dataFormat, (*pMeta)->tableMeta, tableData->childTableName, info->msgBuf.buf, info->msgBuf.len); if(code != TSDB_CODE_SUCCESS){ return code; @@ -1730,7 +1769,7 @@ TAOS_RES* taos_schemaless_insert(TAOS* taos, char* lines[], int numLines, int pr return NULL; } - SSmlHandle* info = smlBuildSmlInfo(taos, request, protocol, precision, false); + SSmlHandle* info = smlBuildSmlInfo(taos, request, protocol, precision, true); if(!info){ return (TAOS_RES*)request; } diff --git a/source/client/test/smlTest.cpp b/source/client/test/smlTest.cpp index 5f0f188b0b..6a4823b855 100644 --- a/source/client/test/smlTest.cpp +++ b/source/client/test/smlTest.cpp @@ -190,17 +190,21 @@ TEST(testCase, smlParseCols_Error_Test) { "c=-3.402823466e+39u64", "c=-339u64", "c=18446744073709551616u64", + "c=1,c=2" }; + SHashObj *dumplicateKey = taosHashInit(32, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), false, HASH_NO_LOCK); for(int i = 0; i < sizeof(data)/sizeof(data[0]); i++){ char msg[256] = {0}; SSmlMsgBuf msgBuf; msgBuf.buf = msg; msgBuf.len = 256; int32_t len = strlen(data[i]); - int32_t ret = smlParseCols(data[i], len, NULL, false, &msgBuf); + int32_t ret = smlParseCols(data[i], len, NULL, false, dumplicateKey, &msgBuf); ASSERT_NE(ret, TSDB_CODE_SUCCESS); + taosHashClear(dumplicateKey); } + taosHashCleanup(dumplicateKey); } TEST(testCase, smlParseCols_tag_Test) { @@ -211,11 +215,12 @@ TEST(testCase, smlParseCols_tag_Test) { SArray *cols = taosArrayInit(16, POINTER_BYTES); ASSERT_NE(cols, NULL); + SHashObj *dumplicateKey = taosHashInit(32, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), false, HASH_NO_LOCK); const char *data = "cbin=\"passit hello,c=2\",cnch=L\"iisdfsf\",cbool=false,cf64=4.31f64,cf32_=8.32,cf32=8.23f32,ci8=-34i8,cu8=89u8,ci16=233i16,cu16=898u16,ci32=98289i32,cu32=12323u32,ci64=-89238i64,ci=989i,cu64=8989323u64,cbooltrue=true,cboolt=t,cboolf=f,cnch_=l\"iuwq\""; int32_t len = strlen(data); - int32_t ret = smlParseCols(data, len, cols, true, &msgBuf); + int32_t ret = smlParseCols(data, len, cols, true, dumplicateKey, &msgBuf); ASSERT_EQ(ret, TSDB_CODE_SUCCESS); int32_t size = taosArrayGetSize(cols); ASSERT_EQ(size, 19); @@ -239,10 +244,14 @@ TEST(testCase, smlParseCols_tag_Test) { taosMemoryFree(kv); taosArrayClear(cols); + + + // test tag is null data = "t=3e"; len = 0; memset(msgBuf.buf, 0, msgBuf.len); - ret = smlParseCols(data, len, cols, true, &msgBuf); + taosHashClear(dumplicateKey); + ret = smlParseCols(data, len, cols, true, dumplicateKey, &msgBuf); ASSERT_EQ(ret, TSDB_CODE_SUCCESS); size = taosArrayGetSize(cols); ASSERT_EQ(size, 1); @@ -255,6 +264,9 @@ TEST(testCase, smlParseCols_tag_Test) { ASSERT_EQ(kv->valueLen, strlen(TAG)); ASSERT_EQ(strncasecmp(kv->value, TAG, strlen(TAG)), 0); taosMemoryFree(kv); + + taosArrayDestroy(cols); + taosHashCleanup(dumplicateKey); } TEST(testCase, smlParseCols_Test) { @@ -266,9 +278,11 @@ TEST(testCase, smlParseCols_Test) { SArray *cols = taosArrayInit(16, POINTER_BYTES); ASSERT_NE(cols, NULL); + SHashObj *dumplicateKey = taosHashInit(32, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), false, HASH_NO_LOCK); + const char *data = "cbin=\"passit hello,c=2\",cnch=L\"iisdfsf\",cbool=false,cf64=4.31f64,cf32_=8.32,cf32=8.23f32,ci8=-34i8,cu8=89u8,ci16=233i16,cu16=898u16,ci32=98289i32,cu32=12323u32,ci64=-89238i64,ci=989i,cu64=8989323u64,cbooltrue=true,cboolt=t,cboolf=f,cnch_=l\"iuwq\""; int32_t len = strlen(data); - int32_t ret = smlParseCols(data, len, cols, false, &msgBuf); + int32_t ret = smlParseCols(data, len, cols, false, dumplicateKey, &msgBuf); ASSERT_EQ(ret, TSDB_CODE_SUCCESS); int32_t size = taosArrayGetSize(cols); ASSERT_EQ(size, 19); @@ -450,6 +464,7 @@ TEST(testCase, smlParseCols_Test) { taosMemoryFree(kv); taosArrayDestroy(cols); + taosHashCleanup(dumplicateKey); } TEST(testCase, smlParseLine_Test) { @@ -468,17 +483,47 @@ TEST(testCase, smlParseLine_Test) { SSmlHandle *info = smlBuildSmlInfo(taos, request, TSDB_SML_LINE_PROTOCOL, TSDB_SML_TIMESTAMP_NANO_SECONDS, true); ASSERT_NE(info, NULL); - const char *sql[3] = { - "readings,name=truck_0,fleet=South,driver=Trish,model=H-2,device_version=v2.3 load_capacity=1500,fuel_capacity=150,nominal_fuel_consumption=12,latitude=52.31854,longitude=4.72037,elevation=124,velocity=0,heading=221,grade=0,fuel_consumption=25 1451606400000000000", + const char *sql[9] = { + "readings,name=truck_0,fleet=South,driver=Trish,model=H-2,device_version=v2.3 load_capacity=1500,fuel_capacity=150,nominal_fuel_consumption=12,latitude=52.31854,longitude=4.72037,elevation=124,velocity=0,heading=221,grade=0 1451606400000000000", + "readings,name=truck_0,fleet=South,driver=Trish,model=H-2,device_version=v2.3 load_capacity=1500,fuel_capacity=150,nominal_fuel_consumption=12,latitude=52.31854,longitude=4.72037,elevation=124,velocity=0,heading=221,grade=0,fuel_consumption=25 1451607400000000000", + "readings,name=truck_0,fleet=South,driver=Trish,model=H-2,device_version=v2.3 load_capacity=1500,fuel_capacity=150,nominal_fuel_consumption=12,latitude=52.31854,longitude=4.72037,elevation=124,heading=221,grade=0,fuel_consumption=25 1451608400000000000", + "readings,name=truck_0,fleet=South,driver=Trish,model=H-2,device_version=v2.3 fuel_capacity=150,nominal_fuel_consumption=12,latitude=52.31854,longitude=4.72037,elevation=124,velocity=0,heading=221,grade=0,fuel_consumption=25 1451609400000000000", + "readings,name=truck_0,fleet=South,driver=Trish,model=H-2,device_version=v2.3 fuel_consumption=25,grade=0 1451619400000000000", "readings,name=truck_1,fleet=South,driver=Albert,model=F-150,device_version=v1.5 load_capacity=2000,fuel_capacity=200,nominal_fuel_consumption=15,latitude=72.45258,longitude=68.83761,elevation=255,velocity=0,heading=181,grade=0,fuel_consumption=25 1451606400000000000", - "readings,name=truck_2,fleet=North,driver=Derek,model=F-150,device_version=v1.5 load_capacity=2000,fuel_capacity=200,nominal_fuel_consumption=15,latitude=24.5208,longitude=28.09377,elevation=428,velocity=0,heading=304,grade=0,fuel_consumption=25 1451606400000000000" + "readings,name=truck_2,driver=Derek,model=F-150,device_version=v1.5 load_capacity=2000,fuel_capacity=200,nominal_fuel_consumption=15,latitude=24.5208,longitude=28.09377,elevation=428,velocity=0,heading=304,grade=0,fuel_consumption=25 1451606400000000000", + "readings,name=truck_2,fleet=North,driver=Derek,model=F-150 load_capacity=2000,fuel_capacity=200,nominal_fuel_consumption=15,latitude=24.5208,longitude=28.09377,elevation=428,velocity=0,heading=304,grade=0,fuel_consumption=25 1451609400000000000", + "readings,fleet=South,name=truck_0,driver=Trish,model=H-2,device_version=v2.3 fuel_consumption=25,grade=0 1451629400000000000" }; - smlInsertLines(info, sql, 3); + smlInsertLines(info, sql, 9); // for (int i = 0; i < 3; i++) { // smlParseLine(info, sql[i]); // } } +TEST(testCase, smlParseLine_error_Test) { + TAOS *taos = taos_connect("localhost", "root", "taosdata", NULL, 0); + ASSERT_NE(taos, NULL); + + TAOS_RES* pRes = taos_query(taos, "create database if not exists sml_db"); + taos_free_result(pRes); + + pRes = taos_query(taos, "use sml_db"); + taos_free_result(pRes); + + SRequestObj *request = createRequest(taos, NULL, NULL, TSDB_SQL_INSERT); + ASSERT_NE(request, NULL); + + SSmlHandle *info = smlBuildSmlInfo(taos, request, TSDB_SML_LINE_PROTOCOL, TSDB_SML_TIMESTAMP_NANO_SECONDS, true); + ASSERT_NE(info, NULL); + + const char *sql[2] = { + "measure,t1=3 c1=8", + "measure,t2=3 c1=8u8" + }; + int ret = smlInsertLines(info, sql, 2); + ASSERT_NE(ret, 0); +} + // TEST(testCase, smlParseTS_Test) { // char msg[256] = {0}; // SSmlMsgBuf msgBuf; diff --git a/source/common/src/systable.c b/source/common/src/systable.c index 51f924280a..fba14bbaf5 100644 --- a/source/common/src/systable.c +++ b/source/common/src/systable.c @@ -211,6 +211,7 @@ static const SSysDbTableSchema transSchema[] = { {.name = "stage", .bytes = TSDB_TRANS_STAGE_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, {.name = "db", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR}, {.name = "type", .bytes = TSDB_TRANS_TYPE_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, + {.name = "failed_times", .bytes = 4, .type = TSDB_DATA_TYPE_INT}, {.name = "last_exec_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP}, {.name = "last_error", .bytes = (TSDB_TRANS_ERROR_LEN - 1) + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, }; diff --git a/source/common/src/tdatablock.c b/source/common/src/tdatablock.c index 2c17f2c2fc..0d0bbb07be 100644 --- a/source/common/src/tdatablock.c +++ b/source/common/src/tdatablock.c @@ -363,9 +363,9 @@ int32_t blockDataMerge(SSDataBlock* pDest, const SSDataBlock* pSrc, SArray* pInd for (int32_t i = 0; i < pDest->info.numOfCols; ++i) { int32_t mapIndex = i; -// if (pIndexMap) { -// mapIndex = *(int32_t*)taosArrayGet(pIndexMap, i); -// } + // if (pIndexMap) { + // mapIndex = *(int32_t*)taosArrayGet(pIndexMap, i); + // } SColumnInfoData* pCol2 = taosArrayGet(pDest->pDataBlock, i); SColumnInfoData* pCol1 = taosArrayGet(pSrc->pDataBlock, mapIndex); @@ -1596,7 +1596,8 @@ int32_t buildSubmitReqFromDataBlock(SSubmitReq** pReq, const SArray* pDataBlocks return TSDB_CODE_SUCCESS; } -SSubmitReq* tdBlockToSubmit(const SArray* pBlocks, const STSchema* pTSchema) { +SSubmitReq* tdBlockToSubmit(const SArray* pBlocks, const STSchema* pTSchema, bool createTb, int64_t suid, + int32_t vgId) { SSubmitReq* ret = NULL; // cal size @@ -1608,13 +1609,37 @@ SSubmitReq* tdBlockToSubmit(const SArray* pBlocks, const STSchema* pTSchema) { // TODO min int32_t rowSize = pDataBlock->info.rowSize; int32_t maxLen = TD_ROW_MAX_BYTES_FROM_SCHEMA(pTSchema); - cap += sizeof(SSubmitBlk) + rows * maxLen; + int32_t schemaLen = 0; + + if (createTb) { + SVCreateTbReq createTbReq = {0}; + createTbReq.name = "a"; + createTbReq.flags = 0; + createTbReq.type = TSDB_CHILD_TABLE; + createTbReq.ctb.suid = htobe64(suid); + + SKVRowBuilder kvRowBuilder = {0}; + if (tdInitKVRowBuilder(&kvRowBuilder) < 0) { + ASSERT(0); + } + tdAddColToKVRow(&kvRowBuilder, 1, &pDataBlock->info.groupId, sizeof(uint64_t)); + createTbReq.ctb.pTag = tdGetKVRowFromBuilder(&kvRowBuilder); + tdDestroyKVRowBuilder(&kvRowBuilder); + + int32_t code; + tEncodeSize(tEncodeSVCreateTbReq, &createTbReq, schemaLen, code); + if (code < 0) return NULL; + } + + cap += sizeof(SSubmitBlk) + schemaLen + rows * maxLen; } // assign data - ret = taosMemoryCalloc(1, cap); + ret = taosMemoryCalloc(1, cap + 46); + ret = POINTER_SHIFT(ret, 46); + ret->header.vgId = vgId; ret->version = htonl(1); - ret->length = htonl(cap - sizeof(SSubmitReq)); + ret->length = sizeof(SSubmitReq); ret->numOfBlocks = htonl(sz); void* submitBlk = POINTER_SHIFT(ret, sizeof(SSubmitReq)); @@ -1623,19 +1648,47 @@ SSubmitReq* tdBlockToSubmit(const SArray* pBlocks, const STSchema* pTSchema) { SSubmitBlk* blkHead = submitBlk; blkHead->numOfRows = htons(pDataBlock->info.rows); - blkHead->schemaLen = 0; blkHead->sversion = htonl(pTSchema->version); // TODO - blkHead->suid = 0; - blkHead->uid = htobe64(pDataBlock->info.uid); + blkHead->suid = htobe64(suid); + // uid is assigned by vnode + blkHead->uid = 0; int32_t rows = pDataBlock->info.rows; /*int32_t maxLen = TD_ROW_MAX_BYTES_FROM_SCHEMA(pTSchema);*/ /*blkHead->dataLen = htonl(rows * maxLen);*/ blkHead->dataLen = 0; - void* blockData = POINTER_SHIFT(submitBlk, sizeof(SSubmitBlk)); - STSRow* rowData = blockData; + void* blockData = POINTER_SHIFT(submitBlk, sizeof(SSubmitBlk)); + + int32_t schemaLen = 0; + if (createTb) { + SVCreateTbReq createTbReq = {0}; + createTbReq.name = "a"; + createTbReq.flags = 0; + createTbReq.type = TSDB_CHILD_TABLE; + createTbReq.ctb.suid = suid; + + SKVRowBuilder kvRowBuilder = {0}; + if (tdInitKVRowBuilder(&kvRowBuilder) < 0) { + ASSERT(0); + } + tdAddColToKVRow(&kvRowBuilder, 1, &pDataBlock->info.groupId, sizeof(uint64_t)); + createTbReq.ctb.pTag = tdGetKVRowFromBuilder(&kvRowBuilder); + tdDestroyKVRowBuilder(&kvRowBuilder); + + int32_t code; + tEncodeSize(tEncodeSVCreateTbReq, &createTbReq, schemaLen, code); + if (code < 0) return NULL; + + SEncoder encoder = {0}; + tEncoderInit(&encoder, blockData, schemaLen); + if (tEncodeSVCreateTbReq(&encoder, &createTbReq) < 0) return NULL; + tEncoderClear(&encoder); + } + blkHead->schemaLen = htonl(schemaLen); + + STSRow* rowData = POINTER_SHIFT(blockData, schemaLen); for (int32_t j = 0; j < rows; j++) { SRowBuilder rb = {0}; @@ -1653,10 +1706,14 @@ SSubmitReq* tdBlockToSubmit(const SArray* pBlocks, const STSchema* pTSchema) { rowData = POINTER_SHIFT(rowData, rowLen); blkHead->dataLen += rowLen; } - int32_t len = blkHead->dataLen; - blkHead->dataLen = htonl(len); - blkHead = POINTER_SHIFT(blkHead, len); + int32_t dataLen = blkHead->dataLen; + blkHead->dataLen = htonl(dataLen); + + ret->length += sizeof(SSubmitBlk) + schemaLen + dataLen; + blkHead = POINTER_SHIFT(blkHead, schemaLen + dataLen); + /*submitBlk = blkHead;*/ } + ret->length = htonl(ret->length); return ret; } diff --git a/source/common/src/tglobal.c b/source/common/src/tglobal.c index 87a9c521af..c878109711 100644 --- a/source/common/src/tglobal.c +++ b/source/common/src/tglobal.c @@ -170,8 +170,8 @@ uint32_t tsCurRange = 100; // range char tsCompressor[32] = "ZSTD_COMPRESSOR"; // ZSTD_COMPRESSOR or GZIP_COMPRESSOR // internal -int32_t tsTransPullupMs = 6000; -int32_t tsMaRebalanceMs = 2000; +int32_t tsTransPullupInterval = 6; +int32_t tsMqRebalanceInterval = 2; void taosAddDataDir(int32_t index, char *v1, int32_t level, int32_t primary) { tstrncpy(tsDiskCfg[index].dir, v1, TSDB_FILENAME_LEN); @@ -438,6 +438,9 @@ static int32_t taosAddServerCfg(SConfig *pCfg) { if (cfgAddString(pCfg, "telemetryServer", tsTelemServer, 0) != 0) return -1; if (cfgAddInt32(pCfg, "telemetryPort", tsTelemPort, 1, 65056, 0) != 0) return -1; + if (cfgAddInt32(pCfg, "transPullupInterval", tsTransPullupInterval, 1, 10000, 1) != 0) return -1; + if (cfgAddInt32(pCfg, "mqRebalanceInterval", tsMqRebalanceInterval, 1, 10000, 1) != 0) return -1; + return 0; } @@ -575,6 +578,9 @@ static int32_t taosSetServerCfg(SConfig *pCfg) { tstrncpy(tsTelemServer, cfgGetItem(pCfg, "telemetryServer")->str, TSDB_FQDN_LEN); tsTelemPort = (uint16_t)cfgGetItem(pCfg, "telemetryPort")->i32; + tsTransPullupInterval = cfgGetItem(pCfg, "transPullupInterval")->i32; + tsMqRebalanceInterval = cfgGetItem(pCfg, "mqRebalanceInterval")->i32; + if (tsQueryBufferSize >= 0) { tsQueryBufferSizeBytes = tsQueryBufferSize * 1048576UL; } diff --git a/source/common/src/tmsg.c b/source/common/src/tmsg.c index 129143593e..56bb93faa4 100644 --- a/source/common/src/tmsg.c +++ b/source/common/src/tmsg.c @@ -56,7 +56,6 @@ int32_t tGetSubmitMsgNext(SSubmitMsgIter *pIter, SSubmitBlk **pPBlock) { ASSERT(0); } - SSubmitBlk *pSubmitBlk = (SSubmitBlk *)POINTER_SHIFT(pIter->pMsg, pIter->len); pIter->len += (sizeof(SSubmitBlk) + pIter->dataLen + pIter->schemaLen); ASSERT(pIter->len > 0); } @@ -4013,4 +4012,4 @@ int32_t tDecodeSVSubmitReq(SDecoder *pCoder, SVSubmitReq *pReq) { tEndDecode(pCoder); return 0; -} \ No newline at end of file +} diff --git a/source/dnode/mgmt/implement/src/dmEps.c b/source/dnode/mgmt/implement/src/dmEps.c index 853c238316..f5c9a1d91b 100644 --- a/source/dnode/mgmt/implement/src/dmEps.c +++ b/source/dnode/mgmt/implement/src/dmEps.c @@ -51,7 +51,7 @@ int32_t dmReadEps(SDnode *pDnode) { pDnode->data.dnodeEps = taosArrayInit(1, sizeof(SDnodeEp)); if (pDnode->data.dnodeEps == NULL) { dError("failed to calloc dnodeEp array since %s", strerror(errno)); - goto PRASE_DNODE_OVER; + goto _OVER; } snprintf(file, sizeof(file), "%s%sdnode.json", pDnode->wrappers[DNODE].path, TD_DIRSEP); @@ -59,53 +59,53 @@ int32_t dmReadEps(SDnode *pDnode) { if (pFile == NULL) { // dDebug("file %s not exist", file); code = 0; - goto PRASE_DNODE_OVER; + goto _OVER; } len = (int32_t)taosReadFile(pFile, content, maxLen); if (len <= 0) { dError("failed to read %s since content is null", file); - goto PRASE_DNODE_OVER; + goto _OVER; } content[len] = 0; root = cJSON_Parse(content); if (root == NULL) { dError("failed to read %s since invalid json format", file); - goto PRASE_DNODE_OVER; + goto _OVER; } cJSON *dnodeId = cJSON_GetObjectItem(root, "dnodeId"); if (!dnodeId || dnodeId->type != cJSON_Number) { dError("failed to read %s since dnodeId not found", file); - goto PRASE_DNODE_OVER; + goto _OVER; } pDnode->data.dnodeId = dnodeId->valueint; cJSON *clusterId = cJSON_GetObjectItem(root, "clusterId"); if (!clusterId || clusterId->type != cJSON_String) { dError("failed to read %s since clusterId not found", file); - goto PRASE_DNODE_OVER; + goto _OVER; } pDnode->data.clusterId = atoll(clusterId->valuestring); cJSON *dropped = cJSON_GetObjectItem(root, "dropped"); if (!dropped || dropped->type != cJSON_Number) { dError("failed to read %s since dropped not found", file); - goto PRASE_DNODE_OVER; + goto _OVER; } pDnode->data.dropped = dropped->valueint; cJSON *dnodes = cJSON_GetObjectItem(root, "dnodes"); if (!dnodes || dnodes->type != cJSON_Array) { dError("failed to read %s since dnodes not found", file); - goto PRASE_DNODE_OVER; + goto _OVER; } int32_t numOfDnodes = cJSON_GetArraySize(dnodes); if (numOfDnodes <= 0) { dError("failed to read %s since numOfDnodes:%d invalid", file, numOfDnodes); - goto PRASE_DNODE_OVER; + goto _OVER; } for (int32_t i = 0; i < numOfDnodes; ++i) { @@ -117,7 +117,7 @@ int32_t dmReadEps(SDnode *pDnode) { cJSON *did = cJSON_GetObjectItem(node, "id"); if (!did || did->type != cJSON_Number) { dError("failed to read %s since dnodeId not found", file); - goto PRASE_DNODE_OVER; + goto _OVER; } dnodeEp.id = did->valueint; @@ -125,14 +125,14 @@ int32_t dmReadEps(SDnode *pDnode) { cJSON *dnodeFqdn = cJSON_GetObjectItem(node, "fqdn"); if (!dnodeFqdn || dnodeFqdn->type != cJSON_String || dnodeFqdn->valuestring == NULL) { dError("failed to read %s since dnodeFqdn not found", file); - goto PRASE_DNODE_OVER; + goto _OVER; } tstrncpy(dnodeEp.ep.fqdn, dnodeFqdn->valuestring, TSDB_FQDN_LEN); cJSON *dnodePort = cJSON_GetObjectItem(node, "port"); if (!dnodePort || dnodePort->type != cJSON_Number) { dError("failed to read %s since dnodePort not found", file); - goto PRASE_DNODE_OVER; + goto _OVER; } dnodeEp.ep.port = dnodePort->valueint; @@ -140,7 +140,7 @@ int32_t dmReadEps(SDnode *pDnode) { cJSON *isMnode = cJSON_GetObjectItem(node, "isMnode"); if (!isMnode || isMnode->type != cJSON_Number) { dError("failed to read %s since isMnode not found", file); - goto PRASE_DNODE_OVER; + goto _OVER; } dnodeEp.isMnode = isMnode->valueint; @@ -151,7 +151,7 @@ int32_t dmReadEps(SDnode *pDnode) { dDebug("succcessed to read file %s", file); dmPrintEps(pDnode); -PRASE_DNODE_OVER: +_OVER: if (content != NULL) taosMemoryFree(content); if (root != NULL) cJSON_Delete(root); if (pFile != NULL) taosCloseFile(&pFile); @@ -176,7 +176,7 @@ PRASE_DNODE_OVER: int32_t dmWriteEps(SDnode *pDnode) { char file[PATH_MAX] = {0}; - char realfile[PATH_MAX]; + char realfile[PATH_MAX] = {0}; snprintf(file, sizeof(file), "%s%sdnode.json.bak", pDnode->wrappers[DNODE].path, TD_DIRSEP); snprintf(realfile, sizeof(realfile), "%s%sdnode.json", pDnode->wrappers[DNODE].path, TD_DIRSEP); diff --git a/source/dnode/mgmt/implement/src/dmHandle.c b/source/dnode/mgmt/implement/src/dmHandle.c index ca1b943fb2..32205b337c 100644 --- a/source/dnode/mgmt/implement/src/dmHandle.c +++ b/source/dnode/mgmt/implement/src/dmHandle.c @@ -241,7 +241,11 @@ static int32_t dmSpawnUdfd(SDnode *pDnode) { strncpy(path, tsProcPath, strlen(tsProcPath)); taosDirName(path); } +#ifdef WINDOWS + strcat(path, "udfd.exe"); +#else strcat(path, "/udfd"); +#endif char* argsUdfd[] = {path, "-c", configDir, NULL}; options.args = argsUdfd; options.file = path; diff --git a/source/dnode/mgmt/implement/src/dmWorker.c b/source/dnode/mgmt/implement/src/dmWorker.c index b19c2ab36b..72b2111591 100644 --- a/source/dnode/mgmt/implement/src/dmWorker.c +++ b/source/dnode/mgmt/implement/src/dmWorker.c @@ -105,12 +105,13 @@ void dmStopMonitorThread(SDnode *pDnode) { } static void dmProcessMgmtQueue(SQueueInfo *pInfo, SNodeMsg *pMsg) { - SDnode * pDnode = pInfo->ahandle; - SRpcMsg *pRpc = &pMsg->rpcMsg; - int32_t code = -1; + SDnode *pDnode = pInfo->ahandle; + + int32_t code = -1; + tmsg_t msgType = pMsg->rpcMsg.msgType; dTrace("msg:%p, will be processed in dnode-mgmt queue", pMsg); - switch (pRpc->msgType) { + switch (msgType) { case TDMT_DND_CONFIG_DNODE: code = dmProcessConfigReq(pDnode, pMsg); break; @@ -148,9 +149,14 @@ static void dmProcessMgmtQueue(SQueueInfo *pInfo, SNodeMsg *pMsg) { break; } - if (pRpc->msgType & 1u) { - if (code != 0) code = terrno; - SRpcMsg rsp = {.handle = pRpc->handle, .ahandle = pRpc->ahandle, .code = code, .refId = pRpc->refId}; + if (msgType & 1u) { + if (code != 0 && terrno != 0) code = terrno; + SRpcMsg rsp = { + .handle = pMsg->rpcMsg.handle, + .ahandle = pMsg->rpcMsg.ahandle, + .code = code, + .refId = pMsg->rpcMsg.refId, + }; rpcSendResponse(&rsp); } @@ -160,7 +166,13 @@ static void dmProcessMgmtQueue(SQueueInfo *pInfo, SNodeMsg *pMsg) { } int32_t dmStartWorker(SDnode *pDnode) { - SSingleWorkerCfg cfg = {.min = 1, .max = 1, .name = "dnode-mgmt", .fp = (FItem)dmProcessMgmtQueue, .param = pDnode}; + SSingleWorkerCfg cfg = { + .min = 1, + .max = 1, + .name = "dnode-mgmt", + .fp = (FItem)dmProcessMgmtQueue, + .param = pDnode, + }; if (tSingleWorkerInit(&pDnode->data.mgmtWorker, &cfg) != 0) { dError("failed to start dnode-mgmt worker since %s", terrstr()); return -1; diff --git a/source/dnode/mgmt/interface/inc/dmDef.h b/source/dnode/mgmt/interface/inc/dmDef.h index 087892e741..2e8ad982d8 100644 --- a/source/dnode/mgmt/interface/inc/dmDef.h +++ b/source/dnode/mgmt/interface/inc/dmDef.h @@ -41,6 +41,8 @@ #include "monitor.h" #include "sync.h" +#include "libs/function/function.h" + #ifdef __cplusplus extern "C" { #endif diff --git a/source/dnode/mgmt/interface/src/dmEnv.c b/source/dnode/mgmt/interface/src/dmEnv.c index 92a6e018cb..2c836714ce 100644 --- a/source/dnode/mgmt/interface/src/dmEnv.c +++ b/source/dnode/mgmt/interface/src/dmEnv.c @@ -55,6 +55,7 @@ void dmCleanup() { monCleanup(); syncCleanUp(); walCleanUp(); + udfcClose(); taosStopCacheRefreshWorker(); dInfo("dnode env is cleaned up"); } diff --git a/source/dnode/mgmt/mgmt_bnode/src/bmWorker.c b/source/dnode/mgmt/mgmt_bnode/src/bmWorker.c index 230fa23674..d3204039e6 100644 --- a/source/dnode/mgmt/mgmt_bnode/src/bmWorker.c +++ b/source/dnode/mgmt/mgmt_bnode/src/bmWorker.c @@ -18,7 +18,11 @@ static void bmSendErrorRsp(SNodeMsg *pMsg, int32_t code) { SRpcMsg rpcRsp = { - .handle = pMsg->rpcMsg.handle, .ahandle = pMsg->rpcMsg.ahandle, .code = code, .refId = pMsg->rpcMsg.refId}; + .handle = pMsg->rpcMsg.handle, + .ahandle = pMsg->rpcMsg.ahandle, + .code = code, + .refId = pMsg->rpcMsg.refId, + }; tmsgSendRsp(&rpcRsp); dTrace("msg:%p, is freed", pMsg); @@ -103,7 +107,7 @@ static void bmProcessWriteQueue(SQueueInfo *pInfo, STaosQall *qall, int32_t numO } int32_t bmProcessWriteMsg(SMgmtWrapper *pWrapper, SNodeMsg *pMsg) { - SBnodeMgmt * pMgmt = pWrapper->pMgmt; + SBnodeMgmt *pMgmt = pWrapper->pMgmt; SMultiWorker *pWorker = &pMgmt->writeWorker; dTrace("msg:%p, put into worker:%s", pMsg, pWorker->name); @@ -112,7 +116,7 @@ int32_t bmProcessWriteMsg(SMgmtWrapper *pWrapper, SNodeMsg *pMsg) { } int32_t bmProcessMonitorMsg(SMgmtWrapper *pWrapper, SNodeMsg *pMsg) { - SBnodeMgmt * pMgmt = pWrapper->pMgmt; + SBnodeMgmt *pMgmt = pWrapper->pMgmt; SSingleWorker *pWorker = &pMgmt->monitorWorker; dTrace("msg:%p, put into worker:%s", pMsg, pWorker->name); @@ -121,7 +125,12 @@ int32_t bmProcessMonitorMsg(SMgmtWrapper *pWrapper, SNodeMsg *pMsg) { } int32_t bmStartWorker(SBnodeMgmt *pMgmt) { - SMultiWorkerCfg cfg = {.max = 1, .name = "bnode-write", .fp = (FItems)bmProcessWriteQueue, .param = pMgmt}; + SMultiWorkerCfg cfg = { + .max = 1, + .name = "bnode-write", + .fp = (FItems)bmProcessWriteQueue, + .param = pMgmt, + }; if (tMultiWorkerInit(&pMgmt->writeWorker, &cfg) != 0) { dError("failed to start bnode-write worker since %s", terrstr()); return -1; @@ -129,7 +138,12 @@ int32_t bmStartWorker(SBnodeMgmt *pMgmt) { if (tsMultiProcess) { SSingleWorkerCfg mCfg = { - .min = 1, .max = 1, .name = "bnode-monitor", .fp = (FItem)bmProcessMonitorQueue, .param = pMgmt}; + .min = 1, + .max = 1, + .name = "bnode-monitor", + .fp = (FItem)bmProcessMonitorQueue, + .param = pMgmt, + }; if (tSingleWorkerInit(&pMgmt->monitorWorker, &mCfg) != 0) { dError("failed to start bnode-monitor worker since %s", terrstr()); return -1; diff --git a/source/dnode/mgmt/mgmt_mnode/src/mmFile.c b/source/dnode/mgmt/mgmt_mnode/src/mmFile.c index 75c48e79eb..83c832a41e 100644 --- a/source/dnode/mgmt/mgmt_mnode/src/mmFile.c +++ b/source/dnode/mgmt/mgmt_mnode/src/mmFile.c @@ -22,7 +22,7 @@ int32_t mmReadFile(SMnodeMgmt *pMgmt, bool *pDeployed) { int32_t maxLen = 4096; char *content = taosMemoryCalloc(1, maxLen + 1); cJSON *root = NULL; - char file[PATH_MAX]; + char file[PATH_MAX] = {0}; TdFilePtr pFile = NULL; snprintf(file, sizeof(file), "%s%smnode.json", pMgmt->path, TD_DIRSEP); @@ -30,39 +30,39 @@ int32_t mmReadFile(SMnodeMgmt *pMgmt, bool *pDeployed) { if (pFile == NULL) { // dDebug("file %s not exist", file); code = 0; - goto PRASE_MNODE_OVER; + goto _OVER; } len = (int32_t)taosReadFile(pFile, content, maxLen); if (len <= 0) { dError("failed to read %s since content is null", file); - goto PRASE_MNODE_OVER; + goto _OVER; } content[len] = 0; root = cJSON_Parse(content); if (root == NULL) { dError("failed to read %s since invalid json format", file); - goto PRASE_MNODE_OVER; + goto _OVER; } cJSON *deployed = cJSON_GetObjectItem(root, "deployed"); if (!deployed || deployed->type != cJSON_Number) { dError("failed to read %s since deployed not found", file); - goto PRASE_MNODE_OVER; + goto _OVER; } *pDeployed = deployed->valueint; cJSON *mnodes = cJSON_GetObjectItem(root, "mnodes"); if (!mnodes || mnodes->type != cJSON_Array) { dError("failed to read %s since nodes not found", file); - goto PRASE_MNODE_OVER; + goto _OVER; } pMgmt->replica = cJSON_GetArraySize(mnodes); if (pMgmt->replica <= 0 || pMgmt->replica > TSDB_MAX_REPLICA) { dError("failed to read %s since mnodes size %d invalid", file, pMgmt->replica); - goto PRASE_MNODE_OVER; + goto _OVER; } for (int32_t i = 0; i < pMgmt->replica; ++i) { @@ -74,21 +74,21 @@ int32_t mmReadFile(SMnodeMgmt *pMgmt, bool *pDeployed) { cJSON *id = cJSON_GetObjectItem(node, "id"); if (!id || id->type != cJSON_Number) { dError("failed to read %s since id not found", file); - goto PRASE_MNODE_OVER; + goto _OVER; } pReplica->id = id->valueint; cJSON *fqdn = cJSON_GetObjectItem(node, "fqdn"); if (!fqdn || fqdn->type != cJSON_String || fqdn->valuestring == NULL) { dError("failed to read %s since fqdn not found", file); - goto PRASE_MNODE_OVER; + goto _OVER; } tstrncpy(pReplica->fqdn, fqdn->valuestring, TSDB_FQDN_LEN); cJSON *port = cJSON_GetObjectItem(node, "port"); if (!port || port->type != cJSON_Number) { dError("failed to read %s since port not found", file); - goto PRASE_MNODE_OVER; + goto _OVER; } pReplica->port = port->valueint; } @@ -96,7 +96,7 @@ int32_t mmReadFile(SMnodeMgmt *pMgmt, bool *pDeployed) { code = 0; dDebug("succcessed to read file %s, deployed:%d", file, *pDeployed); -PRASE_MNODE_OVER: +_OVER: if (content != NULL) taosMemoryFree(content); if (root != NULL) cJSON_Delete(root); if (pFile != NULL) taosCloseFile(&pFile); diff --git a/source/dnode/mgmt/mgmt_mnode/src/mmInt.c b/source/dnode/mgmt/mgmt_mnode/src/mmInt.c index db69b62e58..0bf846b7fc 100644 --- a/source/dnode/mgmt/mgmt_mnode/src/mmInt.c +++ b/source/dnode/mgmt/mgmt_mnode/src/mmInt.c @@ -161,9 +161,7 @@ static int32_t mmOpen(SMgmtWrapper *pWrapper) { SMnodeOpt option = {0}; if (!deployed) { dInfo("mnode start to deploy"); - // if (pWrapper->procType == DND_PROC_CHILD) { - pWrapper->pDnode->data.dnodeId = 1; - // } + pWrapper->pDnode->data.dnodeId = 1; mmBuildOptionForDeploy(pMgmt, &option); } else { dInfo("mnode start to open"); diff --git a/source/dnode/mgmt/mgmt_mnode/src/mmWorker.c b/source/dnode/mgmt/mgmt_mnode/src/mmWorker.c index 1f27b314e2..aac5bbc16a 100644 --- a/source/dnode/mgmt/mgmt_mnode/src/mmWorker.c +++ b/source/dnode/mgmt/mgmt_mnode/src/mmWorker.c @@ -17,42 +17,48 @@ #include "mmInt.h" static inline void mmSendRsp(SNodeMsg *pMsg, int32_t code) { - SRpcMsg rsp = {.handle = pMsg->rpcMsg.handle, - .ahandle = pMsg->rpcMsg.ahandle, - .refId = pMsg->rpcMsg.refId, - .code = code, - .pCont = pMsg->pRsp, - .contLen = pMsg->rspLen}; + SRpcMsg rsp = { + .handle = pMsg->rpcMsg.handle, + .ahandle = pMsg->rpcMsg.ahandle, + .refId = pMsg->rpcMsg.refId, + .code = code, + .pCont = pMsg->pRsp, + .contLen = pMsg->rspLen, + }; tmsgSendRsp(&rsp); } static void mmProcessQueue(SQueueInfo *pInfo, SNodeMsg *pMsg) { SMnodeMgmt *pMgmt = pInfo->ahandle; + int32_t code = -1; + tmsg_t msgType = pMsg->rpcMsg.msgType; dTrace("msg:%p, get from mnode queue", pMsg); - SRpcMsg *pRpc = &pMsg->rpcMsg; - int32_t code = -1; - if (pMsg->rpcMsg.msgType == TDMT_DND_ALTER_MNODE) { - code = mmProcessAlterReq(pMgmt, pMsg); - } else if (pMsg->rpcMsg.msgType == TDMT_MON_MM_INFO) { - code = mmProcessGetMonMmInfoReq(pMgmt->pWrapper, pMsg); - } else if (pMsg->rpcMsg.msgType == TDMT_MON_MM_LOAD) { - code = mmProcessGetMnodeLoadsReq(pMgmt->pWrapper, pMsg); - } else { - pMsg->pNode = pMgmt->pMnode; - code = mndProcessMsg(pMsg); + switch (msgType) { + case TDMT_DND_ALTER_MNODE: + code = mmProcessAlterReq(pMgmt, pMsg); + break; + case TDMT_MON_MM_INFO: + code = mmProcessGetMonMmInfoReq(pMgmt->pWrapper, pMsg); + break; + case TDMT_MON_MM_LOAD: + code = mmProcessGetMnodeLoadsReq(pMgmt->pWrapper, pMsg); + break; + default: + pMsg->pNode = pMgmt->pMnode; + code = mndProcessMsg(pMsg); } - if (pRpc->msgType & 1U) { - if (pRpc->handle != NULL && code != TSDB_CODE_MND_ACTION_IN_PROGRESS) { + if (msgType & 1U) { + if (pMsg->rpcMsg.handle != NULL && code != TSDB_CODE_MND_ACTION_IN_PROGRESS) { if (code != 0 && terrno != 0) code = terrno; mmSendRsp(pMsg, code); } } dTrace("msg:%p, is freed, result:0x%04x:%s", pMsg, code & 0XFFFF, tstrerror(code)); - rpcFreeCont(pRpc->pCont); + rpcFreeCont(pMsg->rpcMsg.pCont); taosFreeQitem(pMsg); } @@ -78,38 +84,38 @@ static void mmProcessQueryQueue(SQueueInfo *pInfo, SNodeMsg *pMsg) { taosFreeQitem(pMsg); } -static void mmPutMsgToWorker(SSingleWorker *pWorker, SNodeMsg *pMsg) { +static void mmPutNodeMsgToWorker(SSingleWorker *pWorker, SNodeMsg *pMsg) { dTrace("msg:%p, put into worker %s", pMsg, pWorker->name); taosWriteQitem(pWorker->queue, pMsg); } int32_t mmProcessWriteMsg(SMgmtWrapper *pWrapper, SNodeMsg *pMsg) { SMnodeMgmt *pMgmt = pWrapper->pMgmt; - mmPutMsgToWorker(&pMgmt->writeWorker, pMsg); + mmPutNodeMsgToWorker(&pMgmt->writeWorker, pMsg); return 0; } int32_t mmProcessSyncMsg(SMgmtWrapper *pWrapper, SNodeMsg *pMsg) { SMnodeMgmt *pMgmt = pWrapper->pMgmt; - mmPutMsgToWorker(&pMgmt->syncWorker, pMsg); + mmPutNodeMsgToWorker(&pMgmt->syncWorker, pMsg); return 0; } int32_t mmProcessReadMsg(SMgmtWrapper *pWrapper, SNodeMsg *pMsg) { SMnodeMgmt *pMgmt = pWrapper->pMgmt; - mmPutMsgToWorker(&pMgmt->readWorker, pMsg); + mmPutNodeMsgToWorker(&pMgmt->readWorker, pMsg); return 0; } int32_t mmProcessQueryMsg(SMgmtWrapper *pWrapper, SNodeMsg *pMsg) { SMnodeMgmt *pMgmt = pWrapper->pMgmt; - mmPutMsgToWorker(&pMgmt->queryWorker, pMsg); + mmPutNodeMsgToWorker(&pMgmt->queryWorker, pMsg); return 0; } int32_t mmProcessMonitorMsg(SMgmtWrapper *pWrapper, SNodeMsg *pMsg) { SMnodeMgmt *pMgmt = pWrapper->pMgmt; - mmPutMsgToWorker(&pMgmt->monitorWorker, pMsg); + mmPutNodeMsgToWorker(&pMgmt->monitorWorker, pMsg); return 0; } @@ -144,40 +150,62 @@ int32_t mmPutMsgToSyncQueue(SMgmtWrapper *pWrapper, SRpcMsg *pRpc) { } int32_t mmStartWorker(SMnodeMgmt *pMgmt) { - SSingleWorkerCfg qCfg = {.min = tsNumOfMnodeQueryThreads, - .max = tsNumOfMnodeQueryThreads, - .name = "mnode-query", - .fp = (FItem)mmProcessQueryQueue, - .param = pMgmt}; + SSingleWorkerCfg qCfg = { + .min = tsNumOfMnodeQueryThreads, + .max = tsNumOfMnodeQueryThreads, + .name = "mnode-query", + .fp = (FItem)mmProcessQueryQueue, + .param = pMgmt, + }; if (tSingleWorkerInit(&pMgmt->queryWorker, &qCfg) != 0) { dError("failed to start mnode-query worker since %s", terrstr()); return -1; } - SSingleWorkerCfg rCfg = {.min = tsNumOfMnodeReadThreads, - .max = tsNumOfMnodeReadThreads, - .name = "mnode-read", - .fp = (FItem)mmProcessQueue, - .param = pMgmt}; + SSingleWorkerCfg rCfg = { + .min = tsNumOfMnodeReadThreads, + .max = tsNumOfMnodeReadThreads, + .name = "mnode-read", + .fp = (FItem)mmProcessQueue, + .param = pMgmt, + }; if (tSingleWorkerInit(&pMgmt->readWorker, &rCfg) != 0) { dError("failed to start mnode-read worker since %s", terrstr()); return -1; } - SSingleWorkerCfg wCfg = {.min = 1, .max = 1, .name = "mnode-write", .fp = (FItem)mmProcessQueue, .param = pMgmt}; + SSingleWorkerCfg wCfg = { + .min = 1, + .max = 1, + .name = "mnode-write", + .fp = (FItem)mmProcessQueue, + .param = pMgmt, + }; if (tSingleWorkerInit(&pMgmt->writeWorker, &wCfg) != 0) { dError("failed to start mnode-write worker since %s", terrstr()); return -1; } - SSingleWorkerCfg sCfg = {.min = 1, .max = 1, .name = "mnode-sync", .fp = (FItem)mmProcessQueue, .param = pMgmt}; + SSingleWorkerCfg sCfg = { + .min = 1, + .max = 1, + .name = "mnode-sync", + .fp = (FItem)mmProcessQueue, + .param = pMgmt, + }; if (tSingleWorkerInit(&pMgmt->syncWorker, &sCfg) != 0) { dError("failed to start mnode mnode-sync worker since %s", terrstr()); return -1; } if (tsMultiProcess) { - SSingleWorkerCfg mCfg = {.min = 1, .max = 1, .name = "mnode-monitor", .fp = (FItem)mmProcessQueue, .param = pMgmt}; + SSingleWorkerCfg mCfg = { + .min = 1, + .max = 1, + .name = "mnode-monitor", + .fp = (FItem)mmProcessQueue, + .param = pMgmt, + }; if (tSingleWorkerInit(&pMgmt->monitorWorker, &mCfg) != 0) { dError("failed to start mnode mnode-monitor worker since %s", terrstr()); return -1; diff --git a/source/dnode/mgmt/mgmt_qnode/src/qmWorker.c b/source/dnode/mgmt/mgmt_qnode/src/qmWorker.c index da85ee64a8..965d35cb3e 100644 --- a/source/dnode/mgmt/mgmt_qnode/src/qmWorker.c +++ b/source/dnode/mgmt/mgmt_qnode/src/qmWorker.c @@ -17,12 +17,14 @@ #include "qmInt.h" static inline void qmSendRsp(SNodeMsg *pMsg, int32_t code) { - SRpcMsg rsp = {.handle = pMsg->rpcMsg.handle, - .ahandle = pMsg->rpcMsg.ahandle, - .refId = pMsg->rpcMsg.refId, - .code = code, - .pCont = pMsg->pRsp, - .contLen = pMsg->rspLen}; + SRpcMsg rsp = { + .handle = pMsg->rpcMsg.handle, + .ahandle = pMsg->rpcMsg.ahandle, + .refId = pMsg->rpcMsg.refId, + .code = code, + .pCont = pMsg->pRsp, + .contLen = pMsg->rspLen, + }; tmsgSendRsp(&rsp); } @@ -145,22 +147,26 @@ int32_t qmGetQueueSize(SMgmtWrapper *pWrapper, int32_t vgId, EQueueType qtype) { } int32_t qmStartWorker(SQnodeMgmt *pMgmt) { - SSingleWorkerCfg queryCfg = {.min = tsNumOfVnodeQueryThreads, - .max = tsNumOfVnodeQueryThreads, - .name = "qnode-query", - .fp = (FItem)qmProcessQueryQueue, - .param = pMgmt}; + SSingleWorkerCfg queryCfg = { + .min = tsNumOfVnodeQueryThreads, + .max = tsNumOfVnodeQueryThreads, + .name = "qnode-query", + .fp = (FItem)qmProcessQueryQueue, + .param = pMgmt, + }; if (tSingleWorkerInit(&pMgmt->queryWorker, &queryCfg) != 0) { dError("failed to start qnode-query worker since %s", terrstr()); return -1; } - SSingleWorkerCfg fetchCfg = {.min = tsNumOfQnodeFetchThreads, - .max = tsNumOfQnodeFetchThreads, - .name = "qnode-fetch", - .fp = (FItem)qmProcessFetchQueue, - .param = pMgmt}; + SSingleWorkerCfg fetchCfg = { + .min = tsNumOfQnodeFetchThreads, + .max = tsNumOfQnodeFetchThreads, + .name = "qnode-fetch", + .fp = (FItem)qmProcessFetchQueue, + .param = pMgmt, + }; if (tSingleWorkerInit(&pMgmt->fetchWorker, &fetchCfg) != 0) { dError("failed to start qnode-fetch worker since %s", terrstr()); @@ -169,7 +175,12 @@ int32_t qmStartWorker(SQnodeMgmt *pMgmt) { if (tsMultiProcess) { SSingleWorkerCfg mCfg = { - .min = 1, .max = 1, .name = "qnode-monitor", .fp = (FItem)qmProcessMonitorQueue, .param = pMgmt}; + .min = 1, + .max = 1, + .name = "qnode-monitor", + .fp = (FItem)qmProcessMonitorQueue, + .param = pMgmt, + }; if (tSingleWorkerInit(&pMgmt->monitorWorker, &mCfg) != 0) { dError("failed to start qnode-monitor worker since %s", terrstr()); return -1; diff --git a/source/dnode/mgmt/mgmt_snode/src/smWorker.c b/source/dnode/mgmt/mgmt_snode/src/smWorker.c index 25872aec55..2ae439bbd6 100644 --- a/source/dnode/mgmt/mgmt_snode/src/smWorker.c +++ b/source/dnode/mgmt/mgmt_snode/src/smWorker.c @@ -17,12 +17,14 @@ #include "smInt.h" static inline void smSendRsp(SNodeMsg *pMsg, int32_t code) { - SRpcMsg rsp = {.handle = pMsg->rpcMsg.handle, - .ahandle = pMsg->rpcMsg.ahandle, - .refId = pMsg->rpcMsg.refId, - .code = code, - .pCont = pMsg->pRsp, - .contLen = pMsg->rspLen}; + SRpcMsg rsp = { + .handle = pMsg->rpcMsg.handle, + .ahandle = pMsg->rpcMsg.ahandle, + .refId = pMsg->rpcMsg.refId, + .code = code, + .pCont = pMsg->pRsp, + .contLen = pMsg->rspLen, + }; tmsgSendRsp(&rsp); } @@ -90,7 +92,12 @@ int32_t smStartWorker(SSnodeMgmt *pMgmt) { return -1; } - SMultiWorkerCfg cfg = {.max = 1, .name = "snode-unique", .fp = smProcessUniqueQueue, .param = pMgmt}; + SMultiWorkerCfg cfg = { + .max = 1, + .name = "snode-unique", + .fp = smProcessUniqueQueue, + .param = pMgmt, + }; if (tMultiWorkerInit(pUniqueWorker, &cfg) != 0) { dError("failed to start snode-unique worker since %s", terrstr()); return -1; @@ -101,11 +108,13 @@ int32_t smStartWorker(SSnodeMgmt *pMgmt) { } } - SSingleWorkerCfg cfg = {.min = tsNumOfSnodeSharedThreads, - .max = tsNumOfSnodeSharedThreads, - .name = "snode-shared", - .fp = (FItem)smProcessSharedQueue, - .param = pMgmt}; + SSingleWorkerCfg cfg = { + .min = tsNumOfSnodeSharedThreads, + .max = tsNumOfSnodeSharedThreads, + .name = "snode-shared", + .fp = (FItem)smProcessSharedQueue, + .param = pMgmt, + }; if (tSingleWorkerInit(&pMgmt->sharedWorker, &cfg)) { dError("failed to start snode shared-worker since %s", terrstr()); @@ -114,7 +123,12 @@ int32_t smStartWorker(SSnodeMgmt *pMgmt) { if (tsMultiProcess) { SSingleWorkerCfg mCfg = { - .min = 1, .max = 1, .name = "snode-monitor", .fp = (FItem)smProcessMonitorQueue, .param = pMgmt}; + .min = 1, + .max = 1, + .name = "snode-monitor", + .fp = (FItem)smProcessMonitorQueue, + .param = pMgmt, + }; if (tSingleWorkerInit(&pMgmt->monitorWorker, &mCfg) != 0) { dError("failed to start snode-monitor worker since %s", terrstr()); return -1; @@ -150,7 +164,7 @@ static FORCE_INLINE int32_t smGetSWTypeFromMsg(SRpcMsg *pMsg) { } int32_t smProcessMgmtMsg(SMgmtWrapper *pWrapper, SNodeMsg *pMsg) { - SSnodeMgmt * pMgmt = pWrapper->pMgmt; + SSnodeMgmt *pMgmt = pWrapper->pMgmt; SMultiWorker *pWorker = taosArrayGetP(pMgmt->uniqueWorkers, 0); if (pWorker == NULL) { terrno = TSDB_CODE_INVALID_MSG; @@ -163,7 +177,7 @@ int32_t smProcessMgmtMsg(SMgmtWrapper *pWrapper, SNodeMsg *pMsg) { } int32_t smProcessMonitorMsg(SMgmtWrapper *pWrapper, SNodeMsg *pMsg) { - SSnodeMgmt * pMgmt = pWrapper->pMgmt; + SSnodeMgmt *pMgmt = pWrapper->pMgmt; SSingleWorker *pWorker = &pMgmt->monitorWorker; dTrace("msg:%p, put into worker:%s", pMsg, pWorker->name); @@ -172,7 +186,7 @@ int32_t smProcessMonitorMsg(SMgmtWrapper *pWrapper, SNodeMsg *pMsg) { } int32_t smProcessUniqueMsg(SMgmtWrapper *pWrapper, SNodeMsg *pMsg) { - SSnodeMgmt * pMgmt = pWrapper->pMgmt; + SSnodeMgmt *pMgmt = pWrapper->pMgmt; int32_t index = smGetSWIdFromMsg(&pMsg->rpcMsg); SMultiWorker *pWorker = taosArrayGetP(pMgmt->uniqueWorkers, index); if (pWorker == NULL) { @@ -186,7 +200,7 @@ int32_t smProcessUniqueMsg(SMgmtWrapper *pWrapper, SNodeMsg *pMsg) { } int32_t smProcessSharedMsg(SMgmtWrapper *pWrapper, SNodeMsg *pMsg) { - SSnodeMgmt * pMgmt = pWrapper->pMgmt; + SSnodeMgmt *pMgmt = pWrapper->pMgmt; SSingleWorker *pWorker = &pMgmt->sharedWorker; dTrace("msg:%p, put into worker:%s", pMsg, pWorker->name); diff --git a/source/dnode/mgmt/mgmt_vnode/inc/vmInt.h b/source/dnode/mgmt/mgmt_vnode/inc/vmInt.h index 4b42f97f53..51b3860461 100644 --- a/source/dnode/mgmt/mgmt_vnode/inc/vmInt.h +++ b/source/dnode/mgmt/mgmt_vnode/inc/vmInt.h @@ -29,15 +29,15 @@ typedef struct SVnodesMgmt { SHashObj *hash; SRWLatch latch; SVnodesStat state; + const char *path; + SDnode *pDnode; + SMgmtWrapper *pWrapper; STfs *pTfs; SQWorkerPool queryPool; SQWorkerPool fetchPool; SWWorkerPool syncPool; SWWorkerPool writePool; SWWorkerPool mergePool; - const char *path; - SDnode *pDnode; - SMgmtWrapper *pWrapper; SSingleWorker mgmtWorker; SSingleWorker monitorWorker; } SVnodesMgmt; @@ -95,9 +95,9 @@ int32_t vmProcessGetVnodeLoadsReq(SMgmtWrapper *pWrapper, SNodeMsg *pReq); void vmGetVnodeLoads(SMgmtWrapper *pWrapper, SMonVloadInfo *pInfo); // vmFile.c -int32_t vmGetVnodesFromFile(SVnodesMgmt *pMgmt, SWrapperCfg **ppCfgs, int32_t *numOfVnodes); -int32_t vmWriteVnodesToFile(SVnodesMgmt *pMgmt); -SVnodeObj **vmGetVnodesFromHash(SVnodesMgmt *pMgmt, int32_t *numOfVnodes); +int32_t vmGetVnodeListFromFile(SVnodesMgmt *pMgmt, SWrapperCfg **ppCfgs, int32_t *numOfVnodes); +int32_t vmWriteVnodeListToFile(SVnodesMgmt *pMgmt); +SVnodeObj **vmGetVnodeListFromHash(SVnodesMgmt *pMgmt, int32_t *numOfVnodes); // vmWorker.c int32_t vmStartWorker(SVnodesMgmt *pMgmt); @@ -105,10 +105,12 @@ void vmStopWorker(SVnodesMgmt *pMgmt); int32_t vmAllocQueue(SVnodesMgmt *pMgmt, SVnodeObj *pVnode); void vmFreeQueue(SVnodesMgmt *pMgmt, SVnodeObj *pVnode); -int32_t vmPutMsgToSyncQueue(SMgmtWrapper *pWrapper, SRpcMsg *pRpc); // sync integration +int32_t vmPutMsgToWriteQueue(SMgmtWrapper *pWrapper, SRpcMsg *pMsg); +int32_t vmPutMsgToSyncQueue(SMgmtWrapper *pWrapper, SRpcMsg *pMsg); +int32_t vmPutMsgToApplyQueue(SMgmtWrapper *pWrapper, SRpcMsg *pMsg); int32_t vmPutMsgToQueryQueue(SMgmtWrapper *pWrapper, SRpcMsg *pMsg); int32_t vmPutMsgToFetchQueue(SMgmtWrapper *pWrapper, SRpcMsg *pMsg); -int32_t vmPutMsgToApplyQueue(SMgmtWrapper *pWrapper, SRpcMsg *pMsg); +int32_t vmPutMsgToMergeQueue(SMgmtWrapper *pWrapper, SRpcMsg *pMsg); int32_t vmGetQueueSize(SMgmtWrapper *pWrapper, int32_t vgId, EQueueType qtype); int32_t vmProcessWriteMsg(SMgmtWrapper *pWrapper, SNodeMsg *pMsg); diff --git a/source/dnode/mgmt/mgmt_vnode/src/vmFile.c b/source/dnode/mgmt/mgmt_vnode/src/vmFile.c index 7e00a022b2..f251dd120e 100644 --- a/source/dnode/mgmt/mgmt_vnode/src/vmFile.c +++ b/source/dnode/mgmt/mgmt_vnode/src/vmFile.c @@ -16,7 +16,7 @@ #define _DEFAULT_SOURCE #include "vmInt.h" -SVnodeObj **vmGetVnodesFromHash(SVnodesMgmt *pMgmt, int32_t *numOfVnodes) { +SVnodeObj **vmGetVnodeListFromHash(SVnodesMgmt *pMgmt, int32_t *numOfVnodes) { taosRLockLatch(&pMgmt->latch); int32_t num = 0; @@ -44,14 +44,14 @@ SVnodeObj **vmGetVnodesFromHash(SVnodesMgmt *pMgmt, int32_t *numOfVnodes) { return pVnodes; } -int32_t vmGetVnodesFromFile(SVnodesMgmt *pMgmt, SWrapperCfg **ppCfgs, int32_t *numOfVnodes) { +int32_t vmGetVnodeListFromFile(SVnodesMgmt *pMgmt, SWrapperCfg **ppCfgs, int32_t *numOfVnodes) { int32_t code = TSDB_CODE_INVALID_JSON_FORMAT; int32_t len = 0; int32_t maxLen = 30000; char *content = taosMemoryCalloc(1, maxLen + 1); cJSON *root = NULL; FILE *fp = NULL; - char file[PATH_MAX]; + char file[PATH_MAX] = {0}; SWrapperCfg *pCfgs = NULL; TdFilePtr pFile = NULL; @@ -61,26 +61,26 @@ int32_t vmGetVnodesFromFile(SVnodesMgmt *pMgmt, SWrapperCfg **ppCfgs, int32_t *n if (pFile == NULL) { dDebug("file %s not exist", file); code = 0; - goto PRASE_VNODE_OVER; + goto _OVER; } len = (int32_t)taosReadFile(pFile, content, maxLen); if (len <= 0) { dError("failed to read %s since content is null", file); - goto PRASE_VNODE_OVER; + goto _OVER; } content[len] = 0; root = cJSON_Parse(content); if (root == NULL) { dError("failed to read %s since invalid json format", file); - goto PRASE_VNODE_OVER; + goto _OVER; } cJSON *vnodes = cJSON_GetObjectItem(root, "vnodes"); if (!vnodes || vnodes->type != cJSON_Array) { dError("failed to read %s since vnodes not found", file); - goto PRASE_VNODE_OVER; + goto _OVER; } int32_t vnodesNum = cJSON_GetArraySize(vnodes); @@ -88,7 +88,7 @@ int32_t vmGetVnodesFromFile(SVnodesMgmt *pMgmt, SWrapperCfg **ppCfgs, int32_t *n pCfgs = taosMemoryCalloc(vnodesNum, sizeof(SWrapperCfg)); if (pCfgs == NULL) { dError("failed to read %s since out of memory", file); - goto PRASE_VNODE_OVER; + goto _OVER; } for (int32_t i = 0; i < vnodesNum; ++i) { @@ -98,7 +98,7 @@ int32_t vmGetVnodesFromFile(SVnodesMgmt *pMgmt, SWrapperCfg **ppCfgs, int32_t *n cJSON *vgId = cJSON_GetObjectItem(vnode, "vgId"); if (!vgId || vgId->type != cJSON_Number) { dError("failed to read %s since vgId not found", file); - goto PRASE_VNODE_OVER; + goto _OVER; } pCfg->vgId = vgId->valueint; snprintf(pCfg->path, sizeof(pCfg->path), "%s%svnode%d", pMgmt->path, TD_DIRSEP, pCfg->vgId); @@ -106,28 +106,28 @@ int32_t vmGetVnodesFromFile(SVnodesMgmt *pMgmt, SWrapperCfg **ppCfgs, int32_t *n cJSON *dropped = cJSON_GetObjectItem(vnode, "dropped"); if (!dropped || dropped->type != cJSON_Number) { dError("failed to read %s since dropped not found", file); - goto PRASE_VNODE_OVER; + goto _OVER; } pCfg->dropped = dropped->valueint; cJSON *vgVersion = cJSON_GetObjectItem(vnode, "vgVersion"); if (!vgVersion || vgVersion->type != cJSON_Number) { dError("failed to read %s since vgVersion not found", file); - goto PRASE_VNODE_OVER; + goto _OVER; } pCfg->vgVersion = vgVersion->valueint; cJSON *dbUid = cJSON_GetObjectItem(vnode, "dbUid"); if (!dbUid || dbUid->type != cJSON_String) { dError("failed to read %s since dbUid not found", file); - goto PRASE_VNODE_OVER; + goto _OVER; } pCfg->dbUid = atoll(dbUid->valuestring); cJSON *db = cJSON_GetObjectItem(vnode, "db"); if (!db || db->type != cJSON_String) { dError("failed to read %s since db not found", file); - goto PRASE_VNODE_OVER; + goto _OVER; } tstrncpy(pCfg->db, db->valuestring, TSDB_DB_FNAME_LEN); } @@ -139,7 +139,7 @@ int32_t vmGetVnodesFromFile(SVnodesMgmt *pMgmt, SWrapperCfg **ppCfgs, int32_t *n code = 0; dInfo("succcessed to read file %s", file); -PRASE_VNODE_OVER: +_OVER: if (content != NULL) taosMemoryFree(content); if (root != NULL) cJSON_Delete(root); if (pFile != NULL) taosCloseFile(&pFile); @@ -148,7 +148,7 @@ PRASE_VNODE_OVER: return code; } -int32_t vmWriteVnodesToFile(SVnodesMgmt *pMgmt) { +int32_t vmWriteVnodeListToFile(SVnodesMgmt *pMgmt) { char file[PATH_MAX]; char realfile[PATH_MAX]; snprintf(file, sizeof(file), "%s%svnodes.json.bak", pMgmt->path, TD_DIRSEP); @@ -162,7 +162,7 @@ int32_t vmWriteVnodesToFile(SVnodesMgmt *pMgmt) { } int32_t numOfVnodes = 0; - SVnodeObj **pVnodes = vmGetVnodesFromHash(pMgmt, &numOfVnodes); + SVnodeObj **pVnodes = vmGetVnodeListFromHash(pMgmt, &numOfVnodes); int32_t len = 0; int32_t maxLen = 65536; diff --git a/source/dnode/mgmt/mgmt_vnode/src/vmHandle.c b/source/dnode/mgmt/mgmt_vnode/src/vmHandle.c index d3003b59c4..0196dd0cec 100644 --- a/source/dnode/mgmt/mgmt_vnode/src/vmHandle.c +++ b/source/dnode/mgmt/mgmt_vnode/src/vmHandle.c @@ -16,12 +16,37 @@ #define _DEFAULT_SOURCE #include "vmInt.h" +void vmGetVnodeLoads(SMgmtWrapper *pWrapper, SMonVloadInfo *pInfo) { + SVnodesMgmt *pMgmt = pWrapper->pMgmt; + + pInfo->pVloads = taosArrayInit(pMgmt->state.totalVnodes, sizeof(SVnodeLoad)); + if (pInfo->pVloads == NULL) return; + + taosRLockLatch(&pMgmt->latch); + + void *pIter = taosHashIterate(pMgmt->hash, NULL); + while (pIter) { + SVnodeObj **ppVnode = pIter; + if (ppVnode == NULL || *ppVnode == NULL) continue; + + SVnodeObj *pVnode = *ppVnode; + SVnodeLoad vload = {0}; + vnodeGetLoad(pVnode->pImpl, &vload); + taosArrayPush(pInfo->pVloads, &vload); + pIter = taosHashIterate(pMgmt->hash, pIter); + } + + taosRUnLockLatch(&pMgmt->latch); +} + void vmGetMonitorInfo(SMgmtWrapper *pWrapper, SMonVmInfo *pInfo) { SVnodesMgmt *pMgmt = pWrapper->pMgmt; SMonVloadInfo vloads = {0}; vmGetVnodeLoads(pWrapper, &vloads); - if (vloads.pVloads == NULL) return; + + SArray *pVloads = vloads.pVloads; + if (pVloads == NULL) return; int32_t totalVnodes = 0; int32_t masterNum = 0; @@ -31,8 +56,8 @@ void vmGetMonitorInfo(SMgmtWrapper *pWrapper, SMonVmInfo *pInfo) { int64_t numOfBatchInsertReqs = 0; int64_t numOfBatchInsertSuccessReqs = 0; - for (int32_t i = 0; i < taosArrayGetSize(vloads.pVloads); ++i) { - SVnodeLoad *pLoad = taosArrayGet(vloads.pVloads, i); + for (int32_t i = 0; i < taosArrayGetSize(pVloads); ++i) { + SVnodeLoad *pLoad = taosArrayGet(pVloads, i); numOfSelectReqs += pLoad->numOfSelectReqs; numOfInsertReqs += pLoad->numOfInsertReqs; numOfInsertSuccessReqs += pLoad->numOfInsertSuccessReqs; @@ -49,9 +74,16 @@ void vmGetMonitorInfo(SMgmtWrapper *pWrapper, SMonVmInfo *pInfo) { pInfo->vstat.numOfInsertSuccessReqs = numOfInsertSuccessReqs - pMgmt->state.numOfInsertSuccessReqs; pInfo->vstat.numOfBatchInsertReqs = numOfBatchInsertReqs - pMgmt->state.numOfBatchInsertReqs; pInfo->vstat.numOfBatchInsertSuccessReqs = numOfBatchInsertSuccessReqs - pMgmt->state.numOfBatchInsertSuccessReqs; - pMgmt->state = pInfo->vstat; + pMgmt->state.totalVnodes = totalVnodes; + pMgmt->state.masterNum = masterNum; + pMgmt->state.numOfSelectReqs = numOfSelectReqs; + pMgmt->state.numOfInsertReqs = numOfInsertReqs; + pMgmt->state.numOfInsertSuccessReqs = numOfInsertSuccessReqs; + pMgmt->state.numOfBatchInsertReqs = numOfBatchInsertReqs; + pMgmt->state.numOfBatchInsertSuccessReqs = numOfBatchInsertSuccessReqs; - taosArrayDestroy(vloads.pVloads); + tfsGetMonitorInfo(pMgmt->pTfs, &pInfo->tfs); + taosArrayDestroy(pVloads); } int32_t vmProcessGetMonVmInfoReq(SMgmtWrapper *pWrapper, SNodeMsg *pReq) { @@ -106,12 +138,13 @@ static void vmGenerateVnodeCfg(SCreateVnodeReq *pCreate, SVnodeCfg *pCfg) { memcpy(pCfg, &vnodeCfgDefault, sizeof(SVnodeCfg)); pCfg->vgId = pCreate->vgId; - strcpy(pCfg->dbname, pCreate->db); + tstrncpy(pCfg->dbname, pCreate->db, sizeof(pCfg->dbname)); + pCfg->dbId = pCreate->dbUid; pCfg->isWeak = true; pCfg->tsdbCfg.days = 10; - pCfg->tsdbCfg.keep2 = 3650; pCfg->tsdbCfg.keep0 = 3650; pCfg->tsdbCfg.keep1 = 3650; + pCfg->tsdbCfg.keep2 = 3650; for (size_t i = 0; i < taosArrayGetSize(pCreate->pRetensions); ++i) { memcpy(&pCfg->tsdbCfg.retentions[i], taosArrayGet(pCreate->pRetensions, i), sizeof(SRetention)); } @@ -120,30 +153,30 @@ static void vmGenerateVnodeCfg(SCreateVnodeReq *pCreate, SVnodeCfg *pCfg) { pCfg->hashEnd = pCreate->hashEnd; pCfg->hashMethod = pCreate->hashMethod; - // sync integration pCfg->syncCfg.myIndex = pCreate->selfIndex; pCfg->syncCfg.replicaNum = pCreate->replica; - memset(&(pCfg->syncCfg.nodeInfo), 0, sizeof(pCfg->syncCfg.nodeInfo)); + memset(&pCfg->syncCfg.nodeInfo, 0, sizeof(pCfg->syncCfg.nodeInfo)); for (int i = 0; i < pCreate->replica; ++i) { - (pCfg->syncCfg.nodeInfo)[i].nodePort = (pCreate->replicas)[i].port; - snprintf((pCfg->syncCfg.nodeInfo)[i].nodeFqdn, sizeof((pCfg->syncCfg.nodeInfo)[i].nodeFqdn), "%s", - (pCreate->replicas)[i].fqdn); + pCfg->syncCfg.nodeInfo[i].nodePort = pCreate->replicas[i].port; + snprintf(pCfg->syncCfg.nodeInfo[i].nodeFqdn, sizeof(pCfg->syncCfg.nodeInfo[i].nodeFqdn), "%s", + pCreate->replicas[i].fqdn); } } static void vmGenerateWrapperCfg(SVnodesMgmt *pMgmt, SCreateVnodeReq *pCreate, SWrapperCfg *pCfg) { - memcpy(pCfg->db, pCreate->db, TSDB_DB_FNAME_LEN); - pCfg->dbUid = pCreate->dbUid; - pCfg->dropped = 0; - snprintf(pCfg->path, sizeof(pCfg->path), "%s%svnode%d", pMgmt->path, TD_DIRSEP, pCreate->vgId); pCfg->vgId = pCreate->vgId; pCfg->vgVersion = pCreate->vgVersion; + pCfg->dropped = 0; + pCfg->dbUid = pCreate->dbUid; + tstrncpy(pCfg->db, pCreate->db, TSDB_DB_FNAME_LEN); + snprintf(pCfg->path, sizeof(pCfg->path), "%s%svnode%d", pMgmt->path, TD_DIRSEP, pCreate->vgId); } int32_t vmProcessCreateVnodeReq(SVnodesMgmt *pMgmt, SNodeMsg *pMsg) { SRpcMsg *pReq = &pMsg->rpcMsg; SCreateVnodeReq createReq = {0}; - char path[TSDB_FILENAME_LEN]; + int32_t code = -1; + char path[TSDB_FILENAME_LEN] = {0}; if (tDeserializeSCreateVnodeReq(pReq->pCont, pReq->contLen, &createReq) != 0) { terrno = TSDB_CODE_INVALID_MSG; @@ -160,14 +193,13 @@ int32_t vmProcessCreateVnodeReq(SVnodesMgmt *pMgmt, SNodeMsg *pMsg) { SVnodeObj *pVnode = vmAcquireVnode(pMgmt, createReq.vgId); if (pVnode != NULL) { - tFreeSCreateVnodeReq(&createReq); dDebug("vgId:%d, already exist", createReq.vgId); + tFreeSCreateVnodeReq(&createReq); vmReleaseVnode(pMgmt, pVnode); terrno = TSDB_CODE_NODE_ALREADY_DEPLOYED; return -1; } - // create vnode snprintf(path, TSDB_FILENAME_LEN, "vnode%svnode%d", TD_DIRSEP, vnodeCfg.vgId); if (vnodeCreate(path, &vnodeCfg, pMgmt->pTfs) < 0) { tFreeSCreateVnodeReq(&createReq); @@ -177,49 +209,44 @@ int32_t vmProcessCreateVnodeReq(SVnodesMgmt *pMgmt, SNodeMsg *pMsg) { SMsgCb msgCb = pMgmt->pDnode->data.msgCb; msgCb.pWrapper = pMgmt->pWrapper; + msgCb.queueFps[WRITE_QUEUE] = vmPutMsgToWriteQueue; + msgCb.queueFps[SYNC_QUEUE] = vmPutMsgToSyncQueue; + msgCb.queueFps[APPLY_QUEUE] = vmPutMsgToApplyQueue; msgCb.queueFps[QUERY_QUEUE] = vmPutMsgToQueryQueue; msgCb.queueFps[FETCH_QUEUE] = vmPutMsgToFetchQueue; - msgCb.queueFps[APPLY_QUEUE] = vmPutMsgToApplyQueue; - msgCb.queueFps[SYNC_QUEUE] = vmPutMsgToSyncQueue; // sync integration + msgCb.queueFps[MERGE_QUEUE] = vmPutMsgToMergeQueue; msgCb.qsizeFp = vmGetQueueSize; SVnode *pImpl = vnodeOpen(path, pMgmt->pTfs, msgCb); if (pImpl == NULL) { dError("vgId:%d, failed to create vnode since %s", createReq.vgId, terrstr()); - tFreeSCreateVnodeReq(&createReq); - return -1; + goto _OVER; } - int32_t code = vmOpenVnode(pMgmt, &wrapperCfg, pImpl); + code = vmOpenVnode(pMgmt, &wrapperCfg, pImpl); if (code != 0) { - tFreeSCreateVnodeReq(&createReq); dError("vgId:%d, failed to open vnode since %s", createReq.vgId, terrstr()); - vnodeClose(pImpl); - vnodeDestroy(path, pMgmt->pTfs); - terrno = code; - return code; + goto _OVER; } code = vnodeStart(pImpl); if (code != 0) { - tFreeSCreateVnodeReq(&createReq); dError("vgId:%d, failed to start sync since %s", createReq.vgId, terrstr()); - vnodeClose(pImpl); - vnodeDestroy(path, pMgmt->pTfs); - terrno = code; - return code; + goto _OVER; } - code = vmWriteVnodesToFile(pMgmt); + code = vmWriteVnodeListToFile(pMgmt); + if (code != 0) goto _OVER; + +_OVER: if (code != 0) { - tFreeSCreateVnodeReq(&createReq); vnodeClose(pImpl); vnodeDestroy(path, pMgmt->pTfs); - terrno = code; - return code; } - return 0; + tFreeSCreateVnodeReq(&createReq); + terrno = code; + return code; } int32_t vmProcessDropVnodeReq(SVnodesMgmt *pMgmt, SNodeMsg *pMsg) { @@ -241,14 +268,14 @@ int32_t vmProcessDropVnodeReq(SVnodesMgmt *pMgmt, SNodeMsg *pMsg) { } pVnode->dropped = 1; - if (vmWriteVnodesToFile(pMgmt) != 0) { + if (vmWriteVnodeListToFile(pMgmt) != 0) { pVnode->dropped = 0; vmReleaseVnode(pMgmt, pVnode); return -1; } vmCloseVnode(pMgmt, pVnode); - vmWriteVnodesToFile(pMgmt); + vmWriteVnodeListToFile(pMgmt); return 0; } @@ -285,7 +312,7 @@ void vmInitMsgHandle(SMgmtWrapper *pWrapper) { dmSetMsgHandle(pWrapper, TDMT_VND_CANCEL_SMA, vmProcessWriteMsg, DEFAULT_HANDLE); dmSetMsgHandle(pWrapper, TDMT_VND_DROP_SMA, vmProcessWriteMsg, DEFAULT_HANDLE); dmSetMsgHandle(pWrapper, TDMT_VND_SUBMIT_RSMA, vmProcessWriteMsg, DEFAULT_HANDLE); - dmSetMsgHandle(pWrapper, TDMT_VND_MQ_VG_CHANGE, (NodeMsgFp)vmProcessWriteMsg, DEFAULT_HANDLE); + dmSetMsgHandle(pWrapper, TDMT_VND_MQ_VG_CHANGE, vmProcessWriteMsg, DEFAULT_HANDLE); dmSetMsgHandle(pWrapper, TDMT_VND_CONSUME, vmProcessFetchMsg, DEFAULT_HANDLE); dmSetMsgHandle(pWrapper, TDMT_VND_TASK_DEPLOY, vmProcessWriteMsg, DEFAULT_HANDLE); dmSetMsgHandle(pWrapper, TDMT_VND_QUERY_HEARTBEAT, vmProcessFetchMsg, DEFAULT_HANDLE); @@ -298,14 +325,13 @@ void vmInitMsgHandle(SMgmtWrapper *pWrapper) { dmSetMsgHandle(pWrapper, TDMT_DND_CREATE_VNODE, vmProcessMgmtMsg, DEFAULT_HANDLE); dmSetMsgHandle(pWrapper, TDMT_DND_DROP_VNODE, vmProcessMgmtMsg, DEFAULT_HANDLE); - // sync integration - dmSetMsgHandle(pWrapper, TDMT_VND_SYNC_TIMEOUT, (NodeMsgFp)vmProcessSyncMsg, DEFAULT_HANDLE); - dmSetMsgHandle(pWrapper, TDMT_VND_SYNC_PING, (NodeMsgFp)vmProcessSyncMsg, DEFAULT_HANDLE); - dmSetMsgHandle(pWrapper, TDMT_VND_SYNC_PING_REPLY, (NodeMsgFp)vmProcessSyncMsg, DEFAULT_HANDLE); - dmSetMsgHandle(pWrapper, TDMT_VND_SYNC_CLIENT_REQUEST, (NodeMsgFp)vmProcessSyncMsg, DEFAULT_HANDLE); - dmSetMsgHandle(pWrapper, TDMT_VND_SYNC_CLIENT_REQUEST_REPLY, (NodeMsgFp)vmProcessSyncMsg, DEFAULT_HANDLE); - dmSetMsgHandle(pWrapper, TDMT_VND_SYNC_REQUEST_VOTE, (NodeMsgFp)vmProcessSyncMsg, DEFAULT_HANDLE); - dmSetMsgHandle(pWrapper, TDMT_VND_SYNC_REQUEST_VOTE_REPLY, (NodeMsgFp)vmProcessSyncMsg, DEFAULT_HANDLE); - dmSetMsgHandle(pWrapper, TDMT_VND_SYNC_APPEND_ENTRIES, (NodeMsgFp)vmProcessSyncMsg, DEFAULT_HANDLE); - dmSetMsgHandle(pWrapper, TDMT_VND_SYNC_APPEND_ENTRIES_REPLY, (NodeMsgFp)vmProcessSyncMsg, DEFAULT_HANDLE); + dmSetMsgHandle(pWrapper, TDMT_VND_SYNC_TIMEOUT, vmProcessSyncMsg, DEFAULT_HANDLE); + dmSetMsgHandle(pWrapper, TDMT_VND_SYNC_PING, vmProcessSyncMsg, DEFAULT_HANDLE); + dmSetMsgHandle(pWrapper, TDMT_VND_SYNC_PING_REPLY, vmProcessSyncMsg, DEFAULT_HANDLE); + dmSetMsgHandle(pWrapper, TDMT_VND_SYNC_CLIENT_REQUEST, vmProcessSyncMsg, DEFAULT_HANDLE); + dmSetMsgHandle(pWrapper, TDMT_VND_SYNC_CLIENT_REQUEST_REPLY, vmProcessSyncMsg, DEFAULT_HANDLE); + dmSetMsgHandle(pWrapper, TDMT_VND_SYNC_REQUEST_VOTE, vmProcessSyncMsg, DEFAULT_HANDLE); + dmSetMsgHandle(pWrapper, TDMT_VND_SYNC_REQUEST_VOTE_REPLY, vmProcessSyncMsg, DEFAULT_HANDLE); + dmSetMsgHandle(pWrapper, TDMT_VND_SYNC_APPEND_ENTRIES, vmProcessSyncMsg, DEFAULT_HANDLE); + dmSetMsgHandle(pWrapper, TDMT_VND_SYNC_APPEND_ENTRIES_REPLY, vmProcessSyncMsg, DEFAULT_HANDLE); } diff --git a/source/dnode/mgmt/mgmt_vnode/src/vmInt.c b/source/dnode/mgmt/mgmt_vnode/src/vmInt.c index 3088c5dea4..ab4174857a 100644 --- a/source/dnode/mgmt/mgmt_vnode/src/vmInt.c +++ b/source/dnode/mgmt/mgmt_vnode/src/vmInt.c @@ -15,7 +15,6 @@ #define _DEFAULT_SOURCE #include "vmInt.h" -#include "libs/function/function.h" SVnodeObj *vmAcquireVnode(SVnodesMgmt *pMgmt, int32_t vgId) { SVnodeObj *pVnode = NULL; @@ -55,14 +54,14 @@ int32_t vmOpenVnode(SVnodesMgmt *pMgmt, SWrapperCfg *pCfg, SVnode *pImpl) { pVnode->vgId = pCfg->vgId; pVnode->refCount = 0; + pVnode->vgVersion = pCfg->vgVersion; pVnode->dropped = 0; pVnode->accessState = TSDB_VN_ALL_ACCCESS; - pVnode->pWrapper = pMgmt->pWrapper; - pVnode->pImpl = pImpl; - pVnode->vgVersion = pCfg->vgVersion; pVnode->dbUid = pCfg->dbUid; pVnode->db = tstrdup(pCfg->db); pVnode->path = tstrdup(pCfg->path); + pVnode->pImpl = pImpl; + pVnode->pWrapper = pMgmt->pWrapper; if (pVnode->path == NULL || pVnode->db == NULL) { terrno = TSDB_CODE_OUT_OF_MEMORY; @@ -78,14 +77,11 @@ int32_t vmOpenVnode(SVnodesMgmt *pMgmt, SWrapperCfg *pCfg, SVnode *pImpl) { int32_t code = taosHashPut(pMgmt->hash, &pVnode->vgId, sizeof(int32_t), &pVnode, sizeof(SVnodeObj *)); taosWUnLockLatch(&pMgmt->latch); - if (code != 0) { - terrno = TSDB_CODE_OUT_OF_MEMORY; - } return code; } void vmCloseVnode(SVnodesMgmt *pMgmt, SVnodeObj *pVnode) { - char path[TSDB_FILENAME_LEN]; + char path[TSDB_FILENAME_LEN] = {0}; taosWLockLatch(&pMgmt->latch); taosHashRemove(pMgmt->hash, &pVnode->vgId, sizeof(int32_t)); @@ -98,6 +94,7 @@ void vmCloseVnode(SVnodesMgmt *pMgmt, SVnodeObj *pVnode) { while (!taosQueueEmpty(pVnode->pApplyQ)) taosMsleep(10); while (!taosQueueEmpty(pVnode->pQueryQ)) taosMsleep(10); while (!taosQueueEmpty(pVnode->pFetchQ)) taosMsleep(10); + while (!taosQueueEmpty(pVnode->pMergeQ)) taosMsleep(10); vmFreeQueue(pMgmt, pVnode); vnodeClose(pVnode->pImpl); @@ -116,7 +113,7 @@ void vmCloseVnode(SVnodesMgmt *pMgmt, SVnodeObj *pVnode) { taosMemoryFree(pVnode); } -static void *vmOpenVnodeFunc(void *param) { +static void *vmOpenVnodeInThread(void *param) { SVnodeThread *pThread = param; SVnodesMgmt *pMgmt = pThread->pMgmt; SDnode *pDnode = pMgmt->pDnode; @@ -135,10 +132,12 @@ static void *vmOpenVnodeFunc(void *param) { SMsgCb msgCb = pMgmt->pDnode->data.msgCb; msgCb.pWrapper = pMgmt->pWrapper; + msgCb.queueFps[WRITE_QUEUE] = vmPutMsgToWriteQueue; + msgCb.queueFps[SYNC_QUEUE] = vmPutMsgToSyncQueue; + msgCb.queueFps[APPLY_QUEUE] = vmPutMsgToApplyQueue; msgCb.queueFps[QUERY_QUEUE] = vmPutMsgToQueryQueue; msgCb.queueFps[FETCH_QUEUE] = vmPutMsgToFetchQueue; - msgCb.queueFps[APPLY_QUEUE] = vmPutMsgToApplyQueue; - msgCb.queueFps[SYNC_QUEUE] = vmPutMsgToSyncQueue; // sync integration + msgCb.queueFps[MERGE_QUEUE] = vmPutMsgToMergeQueue; msgCb.qsizeFp = vmGetQueueSize; snprintf(path, TSDB_FILENAME_LEN, "vnode%svnode%d", TD_DIRSEP, pCfg->vgId); SVnode *pImpl = vnodeOpen(path, pMgmt->pTfs, msgCb); @@ -147,12 +146,10 @@ static void *vmOpenVnodeFunc(void *param) { pThread->failed++; } else { vmOpenVnode(pMgmt, pCfg, pImpl); - //vnodeStart(pImpl); dDebug("vgId:%d, is opened by thread:%d", pCfg->vgId, pThread->threadIndex); pThread->opened++; + atomic_add_fetch_32(&pMgmt->state.openVnodes, 1); } - - atomic_add_fetch_32(&pMgmt->state.openVnodes, 1); } dDebug("thread:%d, total vnodes:%d, opened:%d failed:%d", pThread->threadIndex, pThread->vnodeNum, pThread->opened, @@ -162,29 +159,24 @@ static void *vmOpenVnodeFunc(void *param) { static int32_t vmOpenVnodes(SVnodesMgmt *pMgmt) { SDnode *pDnode = pMgmt->pDnode; - taosInitRWLatch(&pMgmt->latch); pMgmt->hash = taosHashInit(TSDB_MIN_VNODES, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), true, HASH_NO_LOCK); if (pMgmt->hash == NULL) { - dError("failed to init vnode hash"); terrno = TSDB_CODE_OUT_OF_MEMORY; + dError("failed to init vnode hash since %s", terrstr()); return -1; } SWrapperCfg *pCfgs = NULL; int32_t numOfVnodes = 0; - if (vmGetVnodesFromFile(pMgmt, &pCfgs, &numOfVnodes) != 0) { + if (vmGetVnodeListFromFile(pMgmt, &pCfgs, &numOfVnodes) != 0) { dInfo("failed to get vnode list from disk since %s", terrstr()); return -1; } pMgmt->state.totalVnodes = numOfVnodes; -#if 0 - int32_t threadNum = tsNumOfCores; -#else - int32_t threadNum = 1; -#endif + int32_t threadNum = 1; // tsNumOfCores; int32_t vnodesPerThread = numOfVnodes / threadNum + 1; SVnodeThread *threads = taosMemoryCalloc(threadNum, sizeof(SVnodeThread)); @@ -209,7 +201,7 @@ static int32_t vmOpenVnodes(SVnodesMgmt *pMgmt) { TdThreadAttr thAttr; taosThreadAttrInit(&thAttr); taosThreadAttrSetDetachState(&thAttr, PTHREAD_CREATE_JOINABLE); - if (taosThreadCreate(&pThread->thread, &thAttr, vmOpenVnodeFunc, pThread) != 0) { + if (taosThreadCreate(&pThread->thread, &thAttr, vmOpenVnodeInThread, pThread) != 0) { dError("thread:%d, failed to create thread to open vnode, reason:%s", pThread->threadIndex, strerror(errno)); } @@ -239,7 +231,7 @@ static void vmCloseVnodes(SVnodesMgmt *pMgmt) { dInfo("start to close all vnodes"); int32_t numOfVnodes = 0; - SVnodeObj **pVnodes = vmGetVnodesFromHash(pMgmt, &numOfVnodes); + SVnodeObj **pVnodes = vmGetVnodeListFromHash(pMgmt, &numOfVnodes); for (int32_t i = 0; i < numOfVnodes; ++i) { vmCloseVnode(pMgmt, pVnodes[i]); @@ -266,12 +258,9 @@ static void vmCleanup(SMgmtWrapper *pWrapper) { vmStopWorker(pMgmt); vnodeCleanup(); tfsClose(pMgmt->pTfs); - // walCleanUp(); taosMemoryFree(pMgmt); pWrapper->pMgmt = NULL; - // syncCleanUp(); - udfcClose(); dInfo("vnode-mgmt is cleaned up"); } @@ -312,7 +301,6 @@ static int32_t vmInit(SMgmtWrapper *pWrapper) { } dmReportStartup(pDnode, "vnode-wal", "initialized"); - // sync integration if (syncInit() != 0) { dError("failed to open sync since %s", terrstr()); return -1; @@ -380,23 +368,7 @@ static int32_t vmStart(SMgmtWrapper *pWrapper) { } static void vmStop(SMgmtWrapper *pWrapper) { -#if 0 - dDebug("vnode-mgmt start to stop"); - SVnodesMgmt *pMgmt = pWrapper->pMgmt; - taosRLockLatch(&pMgmt->latch); - - void *pIter = taosHashIterate(pMgmt->hash, NULL); - while (pIter) { - SVnodeObj **ppVnode = pIter; - if (ppVnode == NULL || *ppVnode == NULL) continue; - - SVnodeObj *pVnode = *ppVnode; - vnodeStop(pVnode->pImpl); - pIter = taosHashIterate(pMgmt->hash, pIter); - } - - taosRUnLockLatch(&pMgmt->latch); -#endif + // process inside the vnode } void vmSetMgmtFp(SMgmtWrapper *pWrapper) { @@ -412,25 +384,3 @@ void vmSetMgmtFp(SMgmtWrapper *pWrapper) { pWrapper->fp = mgmtFp; } -void vmGetVnodeLoads(SMgmtWrapper *pWrapper, SMonVloadInfo *pInfo) { - SVnodesMgmt *pMgmt = pWrapper->pMgmt; - - pInfo->pVloads = taosArrayInit(pMgmt->state.totalVnodes, sizeof(SVnodeLoad)); - if (pInfo->pVloads == NULL) return; - - taosRLockLatch(&pMgmt->latch); - - void *pIter = taosHashIterate(pMgmt->hash, NULL); - while (pIter) { - SVnodeObj **ppVnode = pIter; - if (ppVnode == NULL || *ppVnode == NULL) continue; - - SVnodeObj *pVnode = *ppVnode; - SVnodeLoad vload = {0}; - vnodeGetLoad(pVnode->pImpl, &vload); - taosArrayPush(pInfo->pVloads, &vload); - pIter = taosHashIterate(pMgmt->hash, pIter); - } - - taosRUnLockLatch(&pMgmt->latch); -} diff --git a/source/dnode/mgmt/mgmt_vnode/src/vmWorker.c b/source/dnode/mgmt/mgmt_vnode/src/vmWorker.c index 1858359078..6cfcf67a99 100644 --- a/source/dnode/mgmt/mgmt_vnode/src/vmWorker.c +++ b/source/dnode/mgmt/mgmt_vnode/src/vmWorker.c @@ -14,29 +14,30 @@ */ #define _DEFAULT_SOURCE - #include "vmInt.h" #include "qworker.h" #include "sync.h" #include "syncTools.h" -static inline void vmSendRsp(SMgmtWrapper *pWrapper, SNodeMsg *pMsg, int32_t code) { - SRpcMsg rsp = {.handle = pMsg->rpcMsg.handle, - .ahandle = pMsg->rpcMsg.ahandle, - .refId = pMsg->rpcMsg.refId, - .code = code, - .pCont = pMsg->pRsp, - .contLen = pMsg->rspLen}; +static inline void vmSendRsp(SNodeMsg *pMsg, int32_t code) { + SRpcMsg rsp = { + .handle = pMsg->rpcMsg.handle, + .ahandle = pMsg->rpcMsg.ahandle, + .refId = pMsg->rpcMsg.refId, + .code = code, + .pCont = pMsg->pRsp, + .contLen = pMsg->rspLen, + }; tmsgSendRsp(&rsp); } -static void vmProcessMgmtQueue(SQueueInfo *pInfo, SNodeMsg *pMsg) { +static void vmProcessMgmtMonitorQueue(SQueueInfo *pInfo, SNodeMsg *pMsg) { SVnodesMgmt *pMgmt = pInfo->ahandle; int32_t code = -1; tmsg_t msgType = pMsg->rpcMsg.msgType; - dTrace("msg:%p, will be processed in vnode-m queue", pMsg); + dTrace("msg:%p, will be processed in vnode-mgmt/monitor queue", pMsg); switch (msgType) { case TDMT_MON_VM_INFO: @@ -58,12 +59,12 @@ static void vmProcessMgmtQueue(SQueueInfo *pInfo, SNodeMsg *pMsg) { break; default: terrno = TSDB_CODE_MSG_NOT_PROCESSED; - dError("msg:%p, not processed in vnode-mgmt queue", pMsg); + dError("msg:%p, not processed in vnode-mgmt/monitor queue", pMsg); } if (msgType & 1u) { if (code != 0 && terrno != 0) code = terrno; - vmSendRsp(pMgmt->pWrapper, pMsg, code); + vmSendRsp(pMsg, code); } dTrace("msg:%p, is freed, result:0x%04x:%s", pMsg, code & 0XFFFF, tstrerror(code)); @@ -77,7 +78,9 @@ static void vmProcessQueryQueue(SQueueInfo *pInfo, SNodeMsg *pMsg) { dTrace("msg:%p, will be processed in vnode-query queue", pMsg); int32_t code = vnodeProcessQueryMsg(pVnode->pImpl, &pMsg->rpcMsg); if (code != 0) { - vmSendRsp(pVnode->pWrapper, pMsg, code); + if (terrno != 0) code = terrno; + vmSendRsp(pMsg, code); + dTrace("msg:%p, is freed, result:0x%04x:%s", pMsg, code & 0XFFFF, tstrerror(code)); rpcFreeCont(pMsg->rpcMsg.pCont); taosFreeQitem(pMsg); @@ -90,7 +93,9 @@ static void vmProcessFetchQueue(SQueueInfo *pInfo, SNodeMsg *pMsg) { dTrace("msg:%p, will be processed in vnode-fetch queue", pMsg); int32_t code = vnodeProcessFetchMsg(pVnode->pImpl, &pMsg->rpcMsg, pInfo); if (code != 0) { - vmSendRsp(pVnode->pWrapper, pMsg, code); + if (terrno != 0) code = terrno; + vmSendRsp(pMsg, code); + dTrace("msg:%p, is freed, result:0x%04x:%s", pMsg, code & 0XFFFF, tstrerror(code)); rpcFreeCont(pMsg->rpcMsg.pCont); taosFreeQitem(pMsg); @@ -114,32 +119,10 @@ static void vmProcessWriteQueue(SQueueInfo *pInfo, STaosQall *qall, int32_t numO dTrace("msg:%p, will be processed in vnode-write queue", pMsg); if (taosArrayPush(pArray, &pMsg) == NULL) { dTrace("msg:%p, failed to process since %s", pMsg, terrstr()); - vmSendRsp(pVnode->pWrapper, pMsg, TSDB_CODE_OUT_OF_MEMORY); + vmSendRsp(pMsg, TSDB_CODE_OUT_OF_MEMORY); } } -#if 0 - int64_t version; - - vnodePreprocessWriteReqs(pVnode->pImpl, pArray, &version); - - numOfMsgs = taosArrayGetSize(pArray); - for (int32_t i = 0; i < numOfMsgs; i++) { - SNodeMsg *pMsg = *(SNodeMsg **)taosArrayGet(pArray, i); - SRpcMsg *pRpc = &pMsg->rpcMsg; - - rsp.pCont = NULL; - rsp.contLen = 0; - rsp.code = 0; - rsp.handle = pRpc->handle; - rsp.ahandle = pRpc->ahandle; - rsp.refId = pRpc->refId; - - int32_t code = vnodeProcessWriteReq(pVnode->pImpl, pRpc, version++, &rsp); - tmsgSendRsp(&rsp); - } -#else - // sync integration response for (int i = 0; i < taosArrayGetSize(pArray); i++) { SNodeMsg *pMsg; SRpcMsg *pRpc; @@ -174,7 +157,6 @@ static void vmProcessWriteQueue(SQueueInfo *pInfo, STaosQall *qall, int32_t numO assert(0); } } -#endif for (int32_t i = 0; i < numOfMsgs; i++) { SNodeMsg *pMsg = *(SNodeMsg **)taosArrayGet(pArray, i); @@ -192,9 +174,6 @@ static void vmProcessApplyQueue(SQueueInfo *pInfo, STaosQall *qall, int32_t numO SRpcMsg rsp; for (int32_t i = 0; i < numOfMsgs; ++i) { -#if 1 - // sync integration - taosGetQitem(qall, (void **)&pMsg); // init response rpc msg @@ -225,7 +204,9 @@ static void vmProcessApplyQueue(SQueueInfo *pInfo, STaosQall *qall, int32_t numO rsp.refId = pMsg->rpcMsg.refId; tmsgSendRsp(&rsp); } -#endif + + rpcFreeCont(pMsg->rpcMsg.pCont); + taosFreeQitem(pMsg); } } @@ -239,6 +220,9 @@ static void vmProcessSyncQueue(SQueueInfo *pInfo, STaosQall *qall, int32_t numOf // todo SRpcMsg *pRsp = NULL; (void)vnodeProcessSyncReq(pVnode->pImpl, &pMsg->rpcMsg, &pRsp); + + rpcFreeCont(pMsg->rpcMsg.pCont); + taosFreeQitem(pMsg); } } @@ -252,7 +236,9 @@ static void vmProcessMergeQueue(SQueueInfo *pInfo, STaosQall *qall, int32_t numO dTrace("msg:%p, will be processed in vnode-merge queue", pMsg); int32_t code = vnodeProcessFetchMsg(pVnode->pImpl, &pMsg->rpcMsg, pInfo); if (code != 0) { - vmSendRsp(pVnode->pWrapper, pMsg, code); + if (terrno != 0) code = terrno; + vmSendRsp(pMsg, code); + dTrace("msg:%p, is freed, result:0x%04x:%s", pMsg, code & 0XFFFF, tstrerror(code)); rpcFreeCont(pMsg->rpcMsg.pCont); taosFreeQitem(pMsg); @@ -263,16 +249,17 @@ static void vmProcessMergeQueue(SQueueInfo *pInfo, STaosQall *qall, int32_t numO static int32_t vmPutNodeMsgToQueue(SVnodesMgmt *pMgmt, SNodeMsg *pMsg, EQueueType qtype) { SRpcMsg *pRpc = &pMsg->rpcMsg; SMsgHead *pHead = pRpc->pCont; + int32_t code = 0; + pHead->contLen = ntohl(pHead->contLen); pHead->vgId = ntohl(pHead->vgId); SVnodeObj *pVnode = vmAcquireVnode(pMgmt, pHead->vgId); if (pVnode == NULL) { dError("vgId:%d, failed to write msg:%p to vnode-queue since %s", pHead->vgId, pMsg, terrstr()); - return terrno; + return terrno != 0 ? terrno : -1; } - int32_t code = 0; switch (qtype) { case QUERY_QUEUE: dTrace("msg:%p, type:%s will be written into vnode-query queue", pMsg, TMSG_INFO(pRpc->msgType)); @@ -332,7 +319,7 @@ int32_t vmProcessMergeMsg(SMgmtWrapper *pWrapper, SNodeMsg *pMsg) { int32_t vmProcessMgmtMsg(SMgmtWrapper *pWrapper, SNodeMsg *pMsg) { SVnodesMgmt *pMgmt = pWrapper->pMgmt; SSingleWorker *pWorker = &pMgmt->mgmtWorker; - dTrace("msg:%p, will be written to vnode-mgmt queue, worker:%s", pMsg, pWorker->name); + dTrace("msg:%p, will be put into vnode-mgmt queue, worker:%s", pMsg, pWorker->name); taosWriteQitem(pWorker->queue, pMsg); return 0; } @@ -341,7 +328,7 @@ int32_t vmProcessMonitorMsg(SMgmtWrapper *pWrapper, SNodeMsg *pMsg) { SVnodesMgmt *pMgmt = pWrapper->pMgmt; SSingleWorker *pWorker = &pMgmt->monitorWorker; - dTrace("msg:%p, put into worker:%s", pMsg, pWorker->name); + dTrace("msg:%p, will be put into vnode-monitor queue, worker:%s", pMsg, pWorker->name); taosWriteQitem(pWorker->queue, pMsg); return 0; } @@ -356,13 +343,15 @@ static int32_t vmPutRpcMsgToQueue(SMgmtWrapper *pWrapper, SRpcMsg *pRpc, EQueueT SNodeMsg *pMsg = taosAllocateQitem(sizeof(SNodeMsg)); int32_t code = 0; - if (pMsg == NULL) { - code = -1; - } else { + if (pMsg != NULL) { dTrace("msg:%p, is created, type:%s", pMsg, TMSG_INFO(pRpc->msgType)); pMsg->rpcMsg = *pRpc; // if (pMsg->rpcMsg.handle != NULL) assert(pMsg->rpcMsg.refId != 0); switch (qtype) { + case WRITE_QUEUE: + dTrace("msg:%p, will be put into vnode-write queue", pMsg); + taosWriteQitem(pVnode->pWriteQ, pMsg); + break; case QUERY_QUEUE: dTrace("msg:%p, will be put into vnode-query queue", pMsg); taosWriteQitem(pVnode->pQueryQ, pMsg); @@ -379,7 +368,7 @@ static int32_t vmPutRpcMsgToQueue(SMgmtWrapper *pWrapper, SRpcMsg *pRpc, EQueueT dTrace("msg:%p, will be put into vnode-merge queue", pMsg); taosWriteQitem(pVnode->pMergeQ, pMsg); break; - case SYNC_QUEUE: // sync integration + case SYNC_QUEUE: dTrace("msg:%p, will be put into vnode-sync queue", pMsg); taosWriteQitem(pVnode->pSyncQ, pMsg); break; @@ -389,10 +378,23 @@ static int32_t vmPutRpcMsgToQueue(SMgmtWrapper *pWrapper, SRpcMsg *pRpc, EQueueT break; } } + vmReleaseVnode(pMgmt, pVnode); return code; } +int32_t vmPutMsgToWriteQueue(SMgmtWrapper *pWrapper, SRpcMsg *pRpc) { + return vmPutRpcMsgToQueue(pWrapper, pRpc, WRITE_QUEUE); +} + +int32_t vmPutMsgToSyncQueue(SMgmtWrapper *pWrapper, SRpcMsg *pRpc) { + return vmPutRpcMsgToQueue(pWrapper, pRpc, SYNC_QUEUE); +} + +int32_t vmPutMsgToApplyQueue(SMgmtWrapper *pWrapper, SRpcMsg *pRpc) { + return vmPutRpcMsgToQueue(pWrapper, pRpc, APPLY_QUEUE); +} + int32_t vmPutMsgToQueryQueue(SMgmtWrapper *pWrapper, SRpcMsg *pRpc) { return vmPutRpcMsgToQueue(pWrapper, pRpc, QUERY_QUEUE); } @@ -401,30 +403,15 @@ int32_t vmPutMsgToFetchQueue(SMgmtWrapper *pWrapper, SRpcMsg *pRpc) { return vmPutRpcMsgToQueue(pWrapper, pRpc, FETCH_QUEUE); } -int32_t vmPutMsgToApplyQueue(SMgmtWrapper *pWrapper, SRpcMsg *pRpc) { - return vmPutRpcMsgToQueue(pWrapper, pRpc, APPLY_QUEUE); -} - int32_t vmPutMsgToMergeQueue(SMgmtWrapper *pWrapper, SRpcMsg *pRpc) { return vmPutRpcMsgToQueue(pWrapper, pRpc, MERGE_QUEUE); } -// sync integration -int32_t vmPutMsgToSyncQueue(SMgmtWrapper *pWrapper, SRpcMsg *pRpc) { - return vmPutRpcMsgToQueue(pWrapper, pRpc, SYNC_QUEUE); -} - int32_t vmGetQueueSize(SMgmtWrapper *pWrapper, int32_t vgId, EQueueType qtype) { int32_t size = -1; SVnodeObj *pVnode = vmAcquireVnode(pWrapper->pMgmt, vgId); if (pVnode != NULL) { switch (qtype) { - case QUERY_QUEUE: - size = taosQueueSize(pVnode->pQueryQ); - break; - case FETCH_QUEUE: - size = taosQueueSize(pVnode->pFetchQ); - break; case WRITE_QUEUE: size = taosQueueSize(pVnode->pWriteQ); break; @@ -434,6 +421,12 @@ int32_t vmGetQueueSize(SMgmtWrapper *pWrapper, int32_t vgId, EQueueType qtype) { case APPLY_QUEUE: size = taosQueueSize(pVnode->pApplyQ); break; + case QUERY_QUEUE: + size = taosQueueSize(pVnode->pQueryQ); + break; + case FETCH_QUEUE: + size = taosQueueSize(pVnode->pFetchQ); + break; case MERGE_QUEUE: size = taosQueueSize(pVnode->pMergeQ); break; @@ -447,14 +440,14 @@ int32_t vmGetQueueSize(SMgmtWrapper *pWrapper, int32_t vgId, EQueueType qtype) { int32_t vmAllocQueue(SVnodesMgmt *pMgmt, SVnodeObj *pVnode) { pVnode->pWriteQ = tWWorkerAllocQueue(&pMgmt->writePool, pVnode, (FItems)vmProcessWriteQueue); - pVnode->pApplyQ = tWWorkerAllocQueue(&pMgmt->writePool, pVnode, (FItems)vmProcessApplyQueue); - pVnode->pMergeQ = tWWorkerAllocQueue(&pMgmt->mergePool, pVnode, (FItems)vmProcessMergeQueue); pVnode->pSyncQ = tWWorkerAllocQueue(&pMgmt->syncPool, pVnode, (FItems)vmProcessSyncQueue); - pVnode->pFetchQ = tQWorkerAllocQueue(&pMgmt->fetchPool, pVnode, (FItem)vmProcessFetchQueue); + pVnode->pApplyQ = tWWorkerAllocQueue(&pMgmt->writePool, pVnode, (FItems)vmProcessApplyQueue); pVnode->pQueryQ = tQWorkerAllocQueue(&pMgmt->queryPool, pVnode, (FItem)vmProcessQueryQueue); + pVnode->pFetchQ = tQWorkerAllocQueue(&pMgmt->fetchPool, pVnode, (FItem)vmProcessFetchQueue); + pVnode->pMergeQ = tWWorkerAllocQueue(&pMgmt->mergePool, pVnode, (FItems)vmProcessMergeQueue); - if (pVnode->pApplyQ == NULL || pVnode->pWriteQ == NULL || pVnode->pSyncQ == NULL || pVnode->pFetchQ == NULL || - pVnode->pQueryQ == NULL || pVnode->pMergeQ == NULL) { + if (pVnode->pWriteQ == NULL || pVnode->pSyncQ == NULL || pVnode->pApplyQ == NULL || pVnode->pQueryQ == NULL || + pVnode->pFetchQ == NULL || pVnode->pMergeQ == NULL) { terrno = TSDB_CODE_OUT_OF_MEMORY; return -1; } @@ -464,17 +457,17 @@ int32_t vmAllocQueue(SVnodesMgmt *pMgmt, SVnodeObj *pVnode) { } void vmFreeQueue(SVnodesMgmt *pMgmt, SVnodeObj *pVnode) { + tWWorkerFreeQueue(&pMgmt->writePool, pVnode->pWriteQ); + tWWorkerFreeQueue(&pMgmt->syncPool, pVnode->pSyncQ); + tWWorkerFreeQueue(&pMgmt->writePool, pVnode->pApplyQ); tQWorkerFreeQueue(&pMgmt->queryPool, pVnode->pQueryQ); tQWorkerFreeQueue(&pMgmt->fetchPool, pVnode->pFetchQ); - tWWorkerFreeQueue(&pMgmt->writePool, pVnode->pWriteQ); - tWWorkerFreeQueue(&pMgmt->writePool, pVnode->pApplyQ); tWWorkerFreeQueue(&pMgmt->mergePool, pVnode->pMergeQ); - tWWorkerFreeQueue(&pMgmt->syncPool, pVnode->pSyncQ); pVnode->pWriteQ = NULL; - pVnode->pApplyQ = NULL; pVnode->pSyncQ = NULL; - pVnode->pFetchQ = NULL; + pVnode->pApplyQ = NULL; pVnode->pQueryQ = NULL; + pVnode->pFetchQ = NULL; pVnode->pMergeQ = NULL; dDebug("vgId:%d, vnode queue is freed", pVnode->vgId); } @@ -497,17 +490,23 @@ int32_t vmStartWorker(SVnodesMgmt *pMgmt) { pWPool->max = tsNumOfVnodeWriteThreads; if (tWWorkerInit(pWPool) != 0) return -1; - pWPool = &pMgmt->syncPool; - pWPool->name = "vnode-sync"; - pWPool->max = tsNumOfVnodeSyncThreads; - if (tWWorkerInit(pWPool) != 0) return -1; + SWWorkerPool *pSPool = &pMgmt->syncPool; + pSPool->name = "vnode-sync"; + pSPool->max = tsNumOfVnodeSyncThreads; + if (tWWorkerInit(pSPool) != 0) return -1; - pWPool = &pMgmt->mergePool; - pWPool->name = "vnode-merge"; - pWPool->max = tsNumOfVnodeMergeThreads; - if (tWWorkerInit(pWPool) != 0) return -1; + SWWorkerPool *pMPool = &pMgmt->mergePool; + pMPool->name = "vnode-merge"; + pMPool->max = tsNumOfVnodeMergeThreads; + if (tWWorkerInit(pMPool) != 0) return -1; - SSingleWorkerCfg cfg = {.min = 1, .max = 1, .name = "vnode-mgmt", .fp = (FItem)vmProcessMgmtQueue, .param = pMgmt}; + SSingleWorkerCfg cfg = { + .min = 1, + .max = 1, + .name = "vnode-mgmt", + .fp = (FItem)vmProcessMgmtMonitorQueue, + .param = pMgmt, + }; if (tSingleWorkerInit(&pMgmt->mgmtWorker, &cfg) != 0) { dError("failed to start vnode-mgmt worker since %s", terrstr()); return -1; @@ -515,7 +514,12 @@ int32_t vmStartWorker(SVnodesMgmt *pMgmt) { if (tsMultiProcess) { SSingleWorkerCfg mCfg = { - .min = 1, .max = 1, .name = "vnode-monitor", .fp = (FItem)vmProcessMgmtQueue, .param = pMgmt}; + .min = 1, + .max = 1, + .name = "vnode-monitor", + .fp = (FItem)vmProcessMgmtMonitorQueue, + .param = pMgmt, + }; if (tSingleWorkerInit(&pMgmt->monitorWorker, &mCfg) != 0) { dError("failed to start mnode vnode-monitor worker since %s", terrstr()); return -1; @@ -529,10 +533,10 @@ int32_t vmStartWorker(SVnodesMgmt *pMgmt) { void vmStopWorker(SVnodesMgmt *pMgmt) { tSingleWorkerCleanup(&pMgmt->monitorWorker); tSingleWorkerCleanup(&pMgmt->mgmtWorker); - tQWorkerCleanup(&pMgmt->fetchPool); - tQWorkerCleanup(&pMgmt->queryPool); tWWorkerCleanup(&pMgmt->writePool); tWWorkerCleanup(&pMgmt->syncPool); + tQWorkerCleanup(&pMgmt->queryPool); + tQWorkerCleanup(&pMgmt->fetchPool); tWWorkerCleanup(&pMgmt->mergePool); dDebug("vnode workers are closed"); } diff --git a/source/dnode/mnode/impl/inc/mndDef.h b/source/dnode/mnode/impl/inc/mndDef.h index 98d29a6ca5..2751e0752e 100644 --- a/source/dnode/mnode/impl/inc/mndDef.h +++ b/source/dnode/mnode/impl/inc/mndDef.h @@ -574,6 +574,7 @@ typedef struct { char sourceDb[TSDB_DB_FNAME_LEN]; char targetDb[TSDB_DB_FNAME_LEN]; char targetSTbName[TSDB_TABLE_FNAME_LEN]; + int64_t targetStbUid; int64_t createTime; int64_t updateTime; int64_t uid; diff --git a/source/dnode/mnode/impl/src/mndDef.c b/source/dnode/mnode/impl/src/mndDef.c index a143371089..a2c628b8a1 100644 --- a/source/dnode/mnode/impl/src/mndDef.c +++ b/source/dnode/mnode/impl/src/mndDef.c @@ -416,6 +416,9 @@ int32_t tEncodeSStreamObj(SEncoder *pEncoder, const SStreamObj *pObj) { /*int32_t outputNameSz = 0;*/ if (tEncodeCStr(pEncoder, pObj->name) < 0) return -1; if (tEncodeCStr(pEncoder, pObj->sourceDb) < 0) return -1; + if (tEncodeCStr(pEncoder, pObj->targetDb) < 0) return -1; + if (tEncodeCStr(pEncoder, pObj->targetSTbName) < 0) return -1; + if (tEncodeI64(pEncoder, pObj->targetStbUid) < 0) return -1; if (tEncodeI64(pEncoder, pObj->createTime) < 0) return -1; if (tEncodeI64(pEncoder, pObj->updateTime) < 0) return -1; if (tEncodeI64(pEncoder, pObj->uid) < 0) return -1; @@ -465,6 +468,9 @@ int32_t tEncodeSStreamObj(SEncoder *pEncoder, const SStreamObj *pObj) { int32_t tDecodeSStreamObj(SDecoder *pDecoder, SStreamObj *pObj) { if (tDecodeCStrTo(pDecoder, pObj->name) < 0) return -1; if (tDecodeCStrTo(pDecoder, pObj->sourceDb) < 0) return -1; + if (tDecodeCStrTo(pDecoder, pObj->targetDb) < 0) return -1; + if (tDecodeCStrTo(pDecoder, pObj->targetSTbName) < 0) return -1; + if (tDecodeI64(pDecoder, &pObj->targetStbUid) < 0) return -1; if (tDecodeI64(pDecoder, &pObj->createTime) < 0) return -1; if (tDecodeI64(pDecoder, &pObj->updateTime) < 0) return -1; if (tDecodeI64(pDecoder, &pObj->uid) < 0) return -1; @@ -529,4 +535,4 @@ void *tDecodeSMqOffsetObj(void *buf, SMqOffsetObj *pOffset) { buf = taosDecodeStringTo(buf, pOffset->key); buf = taosDecodeFixedI64(buf, &pOffset->offset); return buf; -} \ No newline at end of file +} diff --git a/source/dnode/mnode/impl/src/mndDnode.c b/source/dnode/mnode/impl/src/mndDnode.c index a55cf41262..e522be0629 100644 --- a/source/dnode/mnode/impl/src/mndDnode.c +++ b/source/dnode/mnode/impl/src/mndDnode.c @@ -335,7 +335,6 @@ static int32_t mndProcessStatusReq(SNodeMsg *pReq) { } bool roleChanged = false; for (int32_t vg = 0; vg < pVgroup->replica; ++vg) { - // sync integration if (pVgroup->vnodeGid[vg].dnodeId == statusReq.dnodeId) { if (pVgroup->vnodeGid[vg].role != pVload->syncState) { roleChanged = true; diff --git a/source/dnode/mnode/impl/src/mndScheduler.c b/source/dnode/mnode/impl/src/mndScheduler.c index 5bfa70e76d..824f031004 100644 --- a/source/dnode/mnode/impl/src/mndScheduler.c +++ b/source/dnode/mnode/impl/src/mndScheduler.c @@ -204,6 +204,7 @@ int32_t mndAddShuffledSinkToStream(SMnode* pMnode, STrans* pTrans, SStreamObj* p pTask->smaSink.smaId = pStream->smaId; } else { pTask->sinkType = TASK_SINK__TABLE; + pTask->tbSink.stbUid = pStream->targetStbUid; pTask->tbSink.pSchemaWrapper = tCloneSSchemaWrapper(&pStream->outputSchema); ASSERT(pTask->tbSink.pSchemaWrapper); } @@ -244,9 +245,10 @@ int32_t mndAddFixedSinkToStream(SMnode* pMnode, STrans* pTrans, SStreamObj* pStr pTask->smaSink.smaId = pStream->smaId; } else { pTask->sinkType = TASK_SINK__TABLE; + pTask->tbSink.stbUid = pStream->targetStbUid; pTask->tbSink.pSchemaWrapper = tCloneSSchemaWrapper(&pStream->outputSchema); } - // + // dispatch pTask->dispatchType = TASK_DISPATCH__NONE; @@ -319,6 +321,7 @@ int32_t mndScheduleStream(SMnode* pMnode, STrans* pTrans, SStreamObj* pStream) { pTask->smaSink.smaId = pStream->smaId; } else { pTask->sinkType = TASK_SINK__TABLE; + pTask->tbSink.stbUid = pStream->targetStbUid; pTask->tbSink.pSchemaWrapper = tCloneSSchemaWrapper(&pStream->outputSchema); } #endif diff --git a/source/dnode/mnode/impl/src/mndStream.c b/source/dnode/mnode/impl/src/mndStream.c index 2541232a53..8c1557b73d 100644 --- a/source/dnode/mnode/impl/src/mndStream.c +++ b/source/dnode/mnode/impl/src/mndStream.c @@ -360,6 +360,8 @@ static int32_t mndCreateStbForStream(SMnode *pMnode, STrans *pTrans, const SStre goto _OVER; } + stbObj.uid = pStream->targetStbUid; + if (mndAddStbToTrans(pMnode, pTrans, pDb, &stbObj) < 0) goto _OVER; return 0; @@ -379,6 +381,7 @@ static int32_t mndCreateStream(SMnode *pMnode, SNodeMsg *pReq, SCMCreateStreamRe streamObj.createTime = taosGetTimestampMs(); streamObj.updateTime = streamObj.createTime; streamObj.uid = mndGenerateUid(pCreate->name, strlen(pCreate->name)); + streamObj.targetStbUid = mndGenerateUid(pCreate->targetStbFullName, TSDB_TABLE_FNAME_LEN); streamObj.dbUid = pDb->uid; streamObj.version = 1; streamObj.sql = pCreate->sql; diff --git a/source/dnode/mnode/impl/src/mndTrans.c b/source/dnode/mnode/impl/src/mndTrans.c index e57e9a0461..5085de8610 100644 --- a/source/dnode/mnode/impl/src/mndTrans.c +++ b/source/dnode/mnode/impl/src/mndTrans.c @@ -967,7 +967,7 @@ static void mndTransResetActions(SMnode *pMnode, STrans *pTrans, SArray *pArray) pAction->msgSent = 0; pAction->msgReceived = 0; pAction->errCode = 0; - mDebug("trans:%d, action:%d is reset and will be re-executed", pTrans->id, action); + mDebug("trans:%d, action:%d execute status is reset", pTrans->id, action); } } @@ -1043,7 +1043,7 @@ static int32_t mndTransExecuteActions(SMnode *pMnode, STrans *pTrans, SArray *pA return errCode; } } else { - mDebug("trans:%d, %d of %d actions executing", pTrans->id, numOfReceived, numOfActions); + mDebug("trans:%d, %d of %d actions executed", pTrans->id, numOfReceived, numOfActions); return TSDB_CODE_MND_ACTION_IN_PROGRESS; } } @@ -1405,15 +1405,18 @@ static int32_t mndRetrieveTrans(SNodeMsg *pReq, SShowObj *pShow, SSDataBlock *pB colDataAppend(pColInfo, numOfRows, (const char *)dbname, false); char type[TSDB_TRANS_TYPE_LEN + VARSTR_HEADER_SIZE] = {0}; - STR_WITH_MAXSIZE_TO_VARSTR(dbname, mndTransType(pTrans->type), pShow->pMeta->pSchemas[cols].bytes); + STR_WITH_MAXSIZE_TO_VARSTR(type, mndTransType(pTrans->type), pShow->pMeta->pSchemas[cols].bytes); pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); colDataAppend(pColInfo, numOfRows, (const char *)type, false); + pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); + colDataAppend(pColInfo, numOfRows, (const char *)&pTrans->failedTimes, false); + pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); colDataAppend(pColInfo, numOfRows, (const char *)&pTrans->lastExecTime, false); char lastError[TSDB_TRANS_ERROR_LEN + VARSTR_HEADER_SIZE] = {0}; - STR_WITH_MAXSIZE_TO_VARSTR(dbname, pTrans->lastError, pShow->pMeta->pSchemas[cols].bytes); + STR_WITH_MAXSIZE_TO_VARSTR(lastError, pTrans->lastError, pShow->pMeta->pSchemas[cols].bytes); pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); colDataAppend(pColInfo, numOfRows, (const char *)lastError, false); diff --git a/source/dnode/mnode/impl/src/mnode.c b/source/dnode/mnode/impl/src/mnode.c index e2814e95f0..690399f099 100644 --- a/source/dnode/mnode/impl/src/mnode.c +++ b/source/dnode/mnode/impl/src/mnode.c @@ -65,7 +65,7 @@ static void mndPullupTrans(void *param, void *tmrId) { tmsgPutToQueue(&pMnode->msgCb, WRITE_QUEUE, &rpcMsg); } - taosTmrReset(mndPullupTrans, tsTransPullupMs, pMnode, pMnode->timer, &pMnode->transTimer); + taosTmrReset(mndPullupTrans, tsTransPullupInterval * 1000, pMnode, pMnode->timer, &pMnode->transTimer); } static void mndCalMqRebalance(void *param, void *tmrId) { @@ -81,7 +81,7 @@ static void mndCalMqRebalance(void *param, void *tmrId) { tmsgPutToQueue(&pMnode->msgCb, READ_QUEUE, &rpcMsg); } - taosTmrReset(mndCalMqRebalance, tsMaRebalanceMs, pMnode, pMnode->timer, &pMnode->mqTimer); + taosTmrReset(mndCalMqRebalance, tsMqRebalanceInterval * 1000, pMnode, pMnode->timer, &pMnode->mqTimer); } static void mndPullupTelem(void *param, void *tmrId) { @@ -103,12 +103,12 @@ static int32_t mndInitTimer(SMnode *pMnode) { return -1; } - if (taosTmrReset(mndPullupTrans, tsTransPullupMs, pMnode, pMnode->timer, &pMnode->transTimer)) { + if (taosTmrReset(mndPullupTrans, tsTransPullupInterval * 1000, pMnode, pMnode->timer, &pMnode->transTimer)) { terrno = TSDB_CODE_OUT_OF_MEMORY; return -1; } - if (taosTmrReset(mndCalMqRebalance, tsMaRebalanceMs, pMnode, pMnode->timer, &pMnode->mqTimer)) { + if (taosTmrReset(mndCalMqRebalance, tsMqRebalanceInterval * 1000, pMnode, pMnode->timer, &pMnode->mqTimer)) { terrno = TSDB_CODE_OUT_OF_MEMORY; return -1; } diff --git a/source/dnode/mnode/impl/test/trans/trans2.cpp b/source/dnode/mnode/impl/test/trans/trans2.cpp index b3cbcb6898..974c86b423 100644 --- a/source/dnode/mnode/impl/test/trans/trans2.cpp +++ b/source/dnode/mnode/impl/test/trans/trans2.cpp @@ -58,7 +58,7 @@ class MndTestTrans2 : public ::testing::Test { strcpy(opt.replicas[0].fqdn, "localhost"); opt.msgCb = msgCb; - tsTransPullupMs = 1000; + tsTransPullupInterval = 1; const char *mnodepath = "/tmp/mnode_test_trans"; taosRemoveDir(mnodepath); diff --git a/source/dnode/mnode/sdb/src/sdbFile.c b/source/dnode/mnode/sdb/src/sdbFile.c index f8bd14813c..e9037a7b11 100644 --- a/source/dnode/mnode/sdb/src/sdbFile.c +++ b/source/dnode/mnode/sdb/src/sdbFile.c @@ -310,7 +310,7 @@ static int32_t sdbWriteFileImp(SSdb *pSdb) { code = taosFsyncFile(pFile); if (code != 0) { code = TAOS_SYSTEM_ERROR(errno); - mError("failed to write file:%s since %s", tmpfile, tstrerror(code)); + mError("failed to sync file:%s since %s", tmpfile, tstrerror(code)); } } diff --git a/source/dnode/vnode/inc/vnode.h b/source/dnode/vnode/inc/vnode.h index f4e12d5f71..ebf49c644b 100644 --- a/source/dnode/vnode/inc/vnode.h +++ b/source/dnode/vnode/inc/vnode.h @@ -137,28 +137,18 @@ struct STsdbCfg { int8_t update; int8_t compression; int8_t slLevel; - int32_t days; int32_t minRows; int32_t maxRows; - int32_t keep0; - int32_t keep1; - int32_t keep2; - // TODO: save to tsdb cfg file - int8_t type; // ETsdbType + int32_t days; // just for save config, don't use in tsdbRead/tsdbCommit/..., and use STsdbKeepCfg in STsdb instead + int32_t keep0; // just for save config, don't use in tsdbRead/tsdbCommit/..., and use STsdbKeepCfg in STsdb instead + int32_t keep1; // just for save config, don't use in tsdbRead/tsdbCommit/..., and use STsdbKeepCfg in STsdb instead + int32_t keep2; // just for save config, don't use in tsdbRead/tsdbCommit/..., and use STsdbKeepCfg in STsdb instead SRetention retentions[TSDB_RETENTION_MAX]; }; -typedef enum { - TSDB_TYPE_TSDB = 0, // TSDB - TSDB_TYPE_TSMA = 1, // TSMA - TSDB_TYPE_RSMA_L0 = 2, // RSMA Level 0 - TSDB_TYPE_RSMA_L1 = 3, // RSMA Level 1 - TSDB_TYPE_RSMA_L2 = 4, // RSMA Level 2 -} ETsdbType; - struct SVnodeCfg { int32_t vgId; - char dbname[TSDB_DB_NAME_LEN]; + char dbname[TSDB_DB_FNAME_LEN]; uint64_t dbId; int32_t szPage; int32_t szCache; @@ -167,7 +157,7 @@ struct SVnodeCfg { bool isWeak; STsdbCfg tsdbCfg; SWalCfg walCfg; - SSyncCfg syncCfg; // sync integration + SSyncCfg syncCfg; uint32_t hashBegin; uint32_t hashEnd; int8_t hashMethod; diff --git a/source/dnode/vnode/src/inc/tsdb.h b/source/dnode/vnode/src/inc/tsdb.h index 58003a97d7..b8cbb2d997 100644 --- a/source/dnode/vnode/src/inc/tsdb.h +++ b/source/dnode/vnode/src/inc/tsdb.h @@ -70,9 +70,10 @@ struct SSmaEnvs { struct STsdb { char *path; SVnode *pVnode; + TdThreadMutex mutex; bool repoLocked; int8_t level; // retention level - TdThreadMutex mutex; + STsdbKeepCfg keepCfg; STsdbMemTable *mem; STsdbMemTable *imem; SRtn rtn; @@ -185,6 +186,7 @@ struct STsdbFS { #define REPO_ID(r) TD_VID((r)->pVnode) #define REPO_CFG(r) (&(r)->pVnode->config.tsdbCfg) +#define REPO_KEEP_CFG(r) (&(r)->keepCfg) #define REPO_LEVEL(r) ((r)->level) #define REPO_FS(r) ((r)->fs) #define REPO_META(r) ((r)->pVnode->pMeta) @@ -830,7 +832,7 @@ typedef struct { #define TSDB_FS_ITER_FORWARD TSDB_ORDER_ASC #define TSDB_FS_ITER_BACKWARD TSDB_ORDER_DESC -STsdbFS *tsdbNewFS(const STsdbCfg *pCfg); +STsdbFS *tsdbNewFS(const STsdbKeepCfg *pCfg); void *tsdbFreeFS(STsdbFS *pfs); int tsdbOpenFS(STsdb *pRepo); void tsdbCloseFS(STsdb *pRepo); diff --git a/source/dnode/vnode/src/inc/vnodeInt.h b/source/dnode/vnode/src/inc/vnodeInt.h index 5c7dddd98f..986b2740f3 100644 --- a/source/dnode/vnode/src/inc/vnodeInt.h +++ b/source/dnode/vnode/src/inc/vnodeInt.h @@ -60,6 +60,7 @@ typedef struct SQWorker SQHandle; #define VNODE_TQ_DIR "tq" #define VNODE_WAL_DIR "wal" #define VNODE_TSMA_DIR "tsma" +#define VNODE_RSMA0_DIR "tsdb" #define VNODE_RSMA1_DIR "rsma1" #define VNODE_RSMA2_DIR "rsma2" @@ -155,6 +156,22 @@ struct SVnodeInfo { SVState state; }; +typedef enum { + TSDB_TYPE_TSDB = 0, // TSDB + TSDB_TYPE_TSMA = 1, // TSMA + TSDB_TYPE_RSMA_L0 = 2, // RSMA Level 0 + TSDB_TYPE_RSMA_L1 = 3, // RSMA Level 1 + TSDB_TYPE_RSMA_L2 = 4, // RSMA Level 2 +} ETsdbType; + +typedef struct { + int8_t precision; // precision always be used with below keep cfgs + int32_t days; + int32_t keep0; + int32_t keep1; + int32_t keep2; +} STsdbKeepCfg; + struct SVnode { char* path; SVnodeCfg config; @@ -177,10 +194,11 @@ struct SVnode { SQHandle* pQuery; }; -#define VND_TSDB(vnd) ((vnd)->pTsdb) -#define VND_RSMA0(vnd) ((vnd)->pTsdb) -#define VND_RSMA1(vnd) ((vnd)->pRSma1) -#define VND_RSMA2(vnd) ((vnd)->pRSma2) +#define VND_TSDB(vnd) ((vnd)->pTsdb) +#define VND_RSMA0(vnd) ((vnd)->pTsdb) +#define VND_RSMA1(vnd) ((vnd)->pRSma1) +#define VND_RSMA2(vnd) ((vnd)->pRSma2) +#define VND_RETENTIONS(vnd) (&(vnd)->config.tsdbCfg.retentions) struct STbUidStore { tb_uid_t suid; diff --git a/source/dnode/vnode/src/meta/metaTable.c b/source/dnode/vnode/src/meta/metaTable.c index 3c140cc668..7663047429 100644 --- a/source/dnode/vnode/src/meta/metaTable.c +++ b/source/dnode/vnode/src/meta/metaTable.c @@ -141,16 +141,19 @@ int metaCreateTable(SMeta *pMeta, int64_t version, SVCreateTbReq *pReq) { goto _err; } - // preprocess req - pReq->uid = tGenIdPI64(); - pReq->ctime = taosGetTimestampMs(); - // validate req metaReaderInit(&mr, pMeta, 0); if (metaGetTableEntryByName(&mr, pReq->name) == 0) { + pReq->uid = mr.me.uid; + if (pReq->type == TSDB_CHILD_TABLE) { + pReq->ctb.suid = mr.me.ctbEntry.suid; + } terrno = TSDB_CODE_TDB_TABLE_ALREADY_EXIST; metaReaderClear(&mr); return -1; + } else { + pReq->uid = tGenIdPI64(); + pReq->ctime = taosGetTimestampMs(); } metaReaderClear(&mr); diff --git a/source/dnode/vnode/src/tq/tq.c b/source/dnode/vnode/src/tq/tq.c index 4dea691374..1292fed4fc 100644 --- a/source/dnode/vnode/src/tq/tq.c +++ b/source/dnode/vnode/src/tq/tq.c @@ -886,25 +886,48 @@ int32_t tqProcessVgChangeReq(STQ* pTq, char* msg, int32_t msgLen) { } } -int32_t tqExpandTask(STQ* pTq, SStreamTask* pTask, int32_t parallel) { - if (pTask->execType == TASK_EXEC__NONE) return 0; +void tqTableSink(SStreamTask* pTask, void* vnode, int64_t ver, void* data) { + const SArray* pRes = (const SArray*)data; + SVnode* pVnode = (SVnode*)vnode; - pTask->exec.numOfRunners = parallel; - pTask->exec.runners = taosMemoryCalloc(parallel, sizeof(SStreamRunner)); - if (pTask->exec.runners == NULL) { - return -1; + ASSERT(pTask->tbSink.pTSchema); + SSubmitReq* pReq = tdBlockToSubmit(pRes, pTask->tbSink.pTSchema, true, pTask->tbSink.stbUid, pVnode->config.vgId); + /*tPrintFixedSchemaSubmitReq(pReq, pTask->tbSink.pTSchema);*/ + // build write msg + SRpcMsg msg = { + .msgType = TDMT_VND_SUBMIT, + .pCont = pReq, + .contLen = ntohl(pReq->length), + }; + + ASSERT(tmsgPutToQueue(&pVnode->msgCb, WRITE_QUEUE, &msg) == 0); +} + +int32_t tqExpandTask(STQ* pTq, SStreamTask* pTask, int32_t parallel) { + if (pTask->execType != TASK_EXEC__NONE) { + // expand runners + pTask->exec.numOfRunners = parallel; + pTask->exec.runners = taosMemoryCalloc(parallel, sizeof(SStreamRunner)); + if (pTask->exec.runners == NULL) { + return -1; + } + for (int32_t i = 0; i < parallel; i++) { + STqReadHandle* pStreamReader = tqInitSubmitMsgScanner(pTq->pVnode->pMeta); + SReadHandle handle = { + .reader = pStreamReader, + .meta = pTq->pVnode->pMeta, + }; + pTask->exec.runners[i].inputHandle = pStreamReader; + pTask->exec.runners[i].executor = qCreateStreamExecTaskInfo(pTask->exec.qmsg, &handle); + ASSERT(pTask->exec.runners[i].executor); + } } - for (int32_t i = 0; i < parallel; i++) { - STqReadHandle* pStreamReader = tqInitSubmitMsgScanner(pTq->pVnode->pMeta); - SReadHandle handle = { - .reader = pStreamReader, - .meta = pTq->pVnode->pMeta, - .pMsgCb = &pTq->pVnode->msgCb, - }; - pTask->exec.runners[i].inputHandle = pStreamReader; - pTask->exec.runners[i].executor = qCreateStreamExecTaskInfo(pTask->exec.qmsg, &handle); - ASSERT(pTask->exec.runners[i].executor); + + if (pTask->sinkType == TASK_SINK__TABLE) { + pTask->tbSink.vnode = pTq->pVnode; + pTask->tbSink.tbSinkFunc = tqTableSink; } + return 0; } @@ -928,7 +951,7 @@ int32_t tqProcessTaskDeploy(STQ* pTq, char* msg, int32_t msgLen) { // sink pTask->ahandle = pTq->pVnode; if (pTask->sinkType == TASK_SINK__SMA) { - pTask->smaSink.smaHandle = smaHandleRes; + pTask->smaSink.smaSink = smaHandleRes; } else if (pTask->sinkType == TASK_SINK__TABLE) { ASSERT(pTask->tbSink.pSchemaWrapper); ASSERT(pTask->tbSink.pSchemaWrapper->pSchema); diff --git a/source/dnode/vnode/src/tsdb/tsdbCommit.c b/source/dnode/vnode/src/tsdb/tsdbCommit.c index f0b1baf1da..6c0df33d05 100644 --- a/source/dnode/vnode/src/tsdb/tsdbCommit.c +++ b/source/dnode/vnode/src/tsdb/tsdbCommit.c @@ -210,7 +210,7 @@ int tsdbCommit(STsdb *pRepo) { } void tsdbGetRtnSnap(STsdb *pRepo, SRtn *pRtn) { - STsdbCfg *pCfg = REPO_CFG(pRepo); + STsdbKeepCfg *pCfg = REPO_KEEP_CFG(pRepo); TSKEY minKey, midKey, maxKey, now; now = taosGetTimestamp(pCfg->precision); @@ -304,9 +304,9 @@ static void tsdbSeekCommitIter(SCommitH *pCommith, TSKEY key) { } static int tsdbNextCommitFid(SCommitH *pCommith) { - STsdb *pRepo = TSDB_COMMIT_REPO(pCommith); - STsdbCfg *pCfg = REPO_CFG(pRepo); - int fid = TSDB_IVLD_FID; + STsdb *pRepo = TSDB_COMMIT_REPO(pCommith); + STsdbKeepCfg *pCfg = REPO_KEEP_CFG(pRepo); + int fid = TSDB_IVLD_FID; for (int i = 0; i < pCommith->niters; i++) { SCommitIter *pIter = pCommith->iters + i; @@ -337,8 +337,8 @@ static void tsdbDestroyCommitH(SCommitH *pCommith) { } static int tsdbCommitToFile(SCommitH *pCommith, SDFileSet *pSet, int fid) { - STsdb *pRepo = TSDB_COMMIT_REPO(pCommith); - STsdbCfg *pCfg = REPO_CFG(pRepo); + STsdb *pRepo = TSDB_COMMIT_REPO(pCommith); + STsdbKeepCfg *pCfg = REPO_KEEP_CFG(pRepo); ASSERT(pSet == NULL || pSet->fid == fid); diff --git a/source/dnode/vnode/src/tsdb/tsdbFS.c b/source/dnode/vnode/src/tsdb/tsdbFS.c index 6eda476b65..52b466d0f6 100644 --- a/source/dnode/vnode/src/tsdb/tsdbFS.c +++ b/source/dnode/vnode/src/tsdb/tsdbFS.c @@ -191,7 +191,7 @@ static int tsdbAddDFileSetToStatus(SFSStatus *pStatus, const SDFileSet *pSet) { } // ================== STsdbFS -STsdbFS *tsdbNewFS(const STsdbCfg *pCfg) { +STsdbFS *tsdbNewFS(const STsdbKeepCfg *pCfg) { int keep = pCfg->keep2; int days = pCfg->days; int maxFSet = TSDB_MAX_FSETS(keep, days); diff --git a/source/dnode/vnode/src/tsdb/tsdbMemTable2.c b/source/dnode/vnode/src/tsdb/tsdbMemTable2.c index e401782747..952ccfda9c 100644 --- a/source/dnode/vnode/src/tsdb/tsdbMemTable2.c +++ b/source/dnode/vnode/src/tsdb/tsdbMemTable2.c @@ -76,6 +76,8 @@ struct SMemSkipListCurosr { #define SL_HEAD_NODE_FORWARD(n, l) SL_NODE_FORWARD(n, l) #define SL_TAIL_NODE_BACKWARD(n, l) SL_NODE_FORWARD(n, l) +static int8_t tsdbMemSkipListRandLevel(SMemSkipList *pSl); + // SMemTable int32_t tsdbMemTableCreate2(STsdb *pTsdb, SMemTable **ppMemTb) { SMemTable *pMemTb = NULL; @@ -176,20 +178,19 @@ int32_t tsdbInsertData2(SMemTable *pMemTb, int64_t version, const SVSubmitBlk *p // do insert data to SMemData SMemSkipListCurosr slc = {0}; - const uint8_t *p = pSubmitBlk->pData; - const uint8_t *pt; const STSRow *pRow; - uint64_t szRow; + uint32_t szRow; SDecoder decoder = {0}; - // tCoderInit(&coder, TD_LITTLE_ENDIAN, pSubmitBlk->pData, pSubmitBlk->nData, TD_DECODER); + tDecoderInit(&decoder, pSubmitBlk->pData, pSubmitBlk->nData); for (;;) { - // if (tDecodeIsEnd(&coder)) break; + if (tDecodeIsEnd(&decoder)) break; + + if (tDecodeBinary(&decoder, (const uint8_t **)&pRow, &szRow) < 0) { + terrno = TSDB_CODE_INVALID_MSG; + return -1; + } - // if (tDecodeBinary(&coder, (const uint8_t **)&pRow, &szRow) < 0) { - // terrno = TSDB_CODE_INVALID_MSG; - // return -1; - // } // check the row (todo) // // move the cursor to position to write (todo) @@ -197,11 +198,16 @@ int32_t tsdbInsertData2(SMemTable *pMemTb, int64_t version, const SVSubmitBlk *p // tsdbMemSkipListCursorMoveTo(&slc, pTSRow, version, &c); // ASSERT(c); - // // encode row - // int8_t level = tsdbMemSkipListRandLevel(&pMemData->sl); - // int32_t tsize = SL_NODE_SIZE(level) + sizeof(version) + (p - pt); - // pSlNode = vnodeBufPoolMalloc(pPool, tsize); - // pSlNode->level = level; + // encode row + int8_t level = tsdbMemSkipListRandLevel(&pMemData->sl); + int32_t tsize = SL_NODE_SIZE(level) + sizeof(version) + (0 /*todo*/); + SMemSkipListNode *pNode = vnodeBufPoolMalloc(pPool, tsize); + if (pNode == NULL) { + terrno = TSDB_CODE_OUT_OF_MEMORY; + return -1; + } + + pNode->level = level; // uint8_t *pData = SL_NODE_DATA(pSlNode); // *(int64_t *)pData = version; @@ -215,7 +221,7 @@ int32_t tsdbInsertData2(SMemTable *pMemTb, int64_t version, const SVSubmitBlk *p if (pRow->ts < pMemData->minKey) pMemData->minKey = pRow->ts; if (pRow->ts > pMemData->maxKey) pMemData->maxKey = pRow->ts; } - // tCoderClear(&coder); + tDecoderClear(&decoder); // tsdbMemSkipListCursorClose(&slc); // update status @@ -228,4 +234,19 @@ int32_t tsdbInsertData2(SMemTable *pMemTb, int64_t version, const SVSubmitBlk *p if (pMemTb->maxVer == -1 || pMemTb->maxVer < version) pMemTb->maxVer = version; return 0; +} + +static int8_t tsdbMemSkipListRandLevel(SMemSkipList *pSl) { + int8_t level = 1; + int8_t tlevel; + const uint32_t factor = 4; + + if (pSl->size) { + tlevel = TMIN(pSl->maxLevel, pSl->level + 1); + while ((taosRandR(&pSl->seed) % factor) == 0 && level < tlevel) { + level++; + } + } + + return level; } \ No newline at end of file diff --git a/source/dnode/vnode/src/tsdb/tsdbOpen.c b/source/dnode/vnode/src/tsdb/tsdbOpen.c index e18c01dc01..807ee95b03 100644 --- a/source/dnode/vnode/src/tsdb/tsdbOpen.c +++ b/source/dnode/vnode/src/tsdb/tsdbOpen.c @@ -15,7 +15,30 @@ #include "tsdb.h" -static int tsdbOpenImpl(SVnode *pVnode, int8_t type, STsdb **ppTsdb, const char *dir, int8_t level); +#define TSDB_OPEN_RSMA_IMPL(v, l) \ + do { \ + SRetention *r = VND_RETENTIONS(v)[0]; \ + if (RETENTION_VALID(r)) { \ + return tsdbOpenImpl((v), type, &VND_RSMA##l(v), VNODE_RSMA##l##_DIR, TSDB_RETENTION_L##l); \ + } \ + } while (0) + +#define TSDB_SET_KEEP_CFG(l) \ + do { \ + SRetention *r = &pCfg->retentions[l]; \ + pKeepCfg->keep2 = convertTimeFromPrecisionToUnit(r->keep, pCfg->precision, TIME_UNIT_MINUTE); \ + pKeepCfg->keep0 = pKeepCfg->keep2; \ + pKeepCfg->keep1 = pKeepCfg->keep2; \ + pKeepCfg->days = tsdbEvalDays(r, pCfg->precision); \ + } while (0) + +#define RETENTION_DAYS_SPLIT_RATIO 10 +#define RETENTION_DAYS_SPLIT_MIN 1 +#define RETENTION_DAYS_SPLIT_MAX 30 + +static int32_t tsdbSetKeepCfg(STsdbKeepCfg *pKeepCfg, STsdbCfg *pCfg, int8_t type); +static int32_t tsdbEvalDays(SRetention *r, int8_t precision); +static int32_t tsdbOpenImpl(SVnode *pVnode, int8_t type, STsdb **ppTsdb, const char *dir, int8_t level); int tsdbOpen(SVnode *pVnode, int8_t type) { switch (type) { @@ -25,11 +48,63 @@ int tsdbOpen(SVnode *pVnode, int8_t type) { ASSERT(0); break; case TSDB_TYPE_RSMA_L0: - return tsdbOpenImpl(pVnode, type, &VND_RSMA0(pVnode), VNODE_TSDB_DIR, TSDB_RETENTION_L0); + TSDB_OPEN_RSMA_IMPL(pVnode, 0); + break; case TSDB_TYPE_RSMA_L1: - return tsdbOpenImpl(pVnode, type, &VND_RSMA1(pVnode), VNODE_RSMA1_DIR, TSDB_RETENTION_L1); + TSDB_OPEN_RSMA_IMPL(pVnode, 1); + break; case TSDB_TYPE_RSMA_L2: - return tsdbOpenImpl(pVnode, type, &VND_RSMA2(pVnode), VNODE_RSMA2_DIR, TSDB_RETENTION_L2); + TSDB_OPEN_RSMA_IMPL(pVnode, 2); + break; + default: + ASSERT(0); + break; + } + return 0; +} + +static int32_t tsdbEvalDays(SRetention *r, int8_t precision) { + int32_t keepDays = convertTimeFromPrecisionToUnit(r->keep, precision, TIME_UNIT_DAY); + int32_t freqDays = convertTimeFromPrecisionToUnit(r->freq, precision, TIME_UNIT_DAY); + + int32_t days = keepDays / RETENTION_DAYS_SPLIT_RATIO; + if (days <= RETENTION_DAYS_SPLIT_MIN) { + days = RETENTION_DAYS_SPLIT_MIN; + if (days < freqDays) { + days = freqDays + 1; + } + } else { + if (days > RETENTION_DAYS_SPLIT_MAX) { + days = RETENTION_DAYS_SPLIT_MAX; + } + if (days < freqDays) { + days = freqDays + 1; + } + } + return days * 1440; +} + +static int32_t tsdbSetKeepCfg(STsdbKeepCfg *pKeepCfg, STsdbCfg *pCfg, int8_t type) { + pKeepCfg->precision = pCfg->precision; + switch (type) { + case TSDB_TYPE_TSDB: + pKeepCfg->days = pCfg->days; + pKeepCfg->keep0 = pCfg->keep0; + pKeepCfg->keep1 = pCfg->keep1; + pKeepCfg->keep2 = pCfg->keep2; + break; + case TSDB_TYPE_TSMA: + ASSERT(0); + break; + case TSDB_TYPE_RSMA_L0: + TSDB_SET_KEEP_CFG(0); + break; + case TSDB_TYPE_RSMA_L1: + TSDB_SET_KEEP_CFG(1); + break; + case TSDB_TYPE_RSMA_L2: + TSDB_SET_KEEP_CFG(2); + break; default: ASSERT(0); break; @@ -38,16 +113,16 @@ int tsdbOpen(SVnode *pVnode, int8_t type) { } /** - * @brief - * - * @param pVnode - * @param type - * @param ppTsdb - * @param dir + * @brief + * + * @param pVnode + * @param type + * @param ppTsdb + * @param dir * @param level retention level - * @return int + * @return int */ -int tsdbOpenImpl(SVnode *pVnode, int8_t type, STsdb **ppTsdb, const char *dir, int8_t level) { +int32_t tsdbOpenImpl(SVnode *pVnode, int8_t type, STsdb **ppTsdb, const char *dir, int8_t level) { STsdb *pTsdb = NULL; int slen = 0; @@ -62,13 +137,13 @@ int tsdbOpenImpl(SVnode *pVnode, int8_t type, STsdb **ppTsdb, const char *dir, i } pTsdb->path = (char *)&pTsdb[1]; - sprintf(pTsdb->path, "%s%s%s%s%s", tfsGetPrimaryPath(pVnode->pTfs), TD_DIRSEP, pVnode->path, TD_DIRSEP, - dir); + sprintf(pTsdb->path, "%s%s%s%s%s", tfsGetPrimaryPath(pVnode->pTfs), TD_DIRSEP, pVnode->path, TD_DIRSEP, dir); pTsdb->pVnode = pVnode; pTsdb->level = level; pTsdb->repoLocked = false; taosThreadMutexInit(&pTsdb->mutex, NULL); - pTsdb->fs = tsdbNewFS(REPO_CFG(pTsdb)); + tsdbSetKeepCfg(REPO_KEEP_CFG(pTsdb), REPO_CFG(pTsdb), type); + pTsdb->fs = tsdbNewFS(REPO_KEEP_CFG(pTsdb)); // create dir (TODO: use tfsMkdir) taosMkDir(pTsdb->path); diff --git a/source/dnode/vnode/src/tsdb/tsdbRead.c b/source/dnode/vnode/src/tsdb/tsdbRead.c index 0b9f21dbcf..9294718550 100644 --- a/source/dnode/vnode/src/tsdb/tsdbRead.c +++ b/source/dnode/vnode/src/tsdb/tsdbRead.c @@ -320,7 +320,7 @@ static bool emptyQueryTimewindow(STsdbReadHandle* pTsdbReadHandle) { // Update the query time window according to the data time to live(TTL) information, in order to avoid to return // the expired data to client, even it is queried already. static int64_t getEarliestValidTimestamp(STsdb* pTsdb) { - STsdbCfg* pCfg = REPO_CFG(pTsdb); + STsdbKeepCfg* pCfg = REPO_KEEP_CFG(pTsdb); int64_t now = taosGetTimestamp(pCfg->precision); return now - (tsTickPerDay[pCfg->precision] * pCfg->keep2) + 1; // needs to add one tick @@ -879,14 +879,14 @@ static TSKEY extractFirstTraverseKey(STableCheckInfo* pCheckInfo, int32_t order, } } - -static STSRow* getSRowInTableMem(STableCheckInfo* pCheckInfo, int32_t order, int32_t update, STSRow** extraRow, TDRowVerT maxVer) { +static STSRow* getSRowInTableMem(STableCheckInfo* pCheckInfo, int32_t order, int32_t update, STSRow** extraRow, + TDRowVerT maxVer) { STSRow *rmem = NULL, *rimem = NULL; if (pCheckInfo->iter) { SSkipListNode* node = tSkipListIterGet(pCheckInfo->iter); if (node != NULL) { rmem = (STSRow*)SL_GET_NODE_DATA(node); -#if 0 // TODO: skiplist refactor +#if 0 // TODO: skiplist refactor if (TD_ROW_VER(rmem) > maxVer) { rmem = NULL; } @@ -898,7 +898,7 @@ static STSRow* getSRowInTableMem(STableCheckInfo* pCheckInfo, int32_t order, int SSkipListNode* node = tSkipListIterGet(pCheckInfo->iiter); if (node != NULL) { rimem = (STSRow*)SL_GET_NODE_DATA(node); -#if 0 // TODO: skiplist refactor +#if 0 // TODO: skiplist refactor if (TD_ROW_VER(rimem) > maxVer) { rimem = NULL; } @@ -1677,7 +1677,7 @@ static int32_t mergeTwoRowFromMem(STsdbReadHandle* pTsdbReadHandle, int32_t capa colIdOfRow2 = tdKvRowColIdAt(row2, k); } - if (colIdOfRow1 < colIdOfRow2) { // the most probability + if (colIdOfRow1 < colIdOfRow2) { // the most probability if (colIdOfRow1 < pColInfo->info.colId) { ++j; continue; @@ -1720,7 +1720,7 @@ static int32_t mergeTwoRowFromMem(STsdbReadHandle* pTsdbReadHandle, int32_t capa ++(*curRow); } ++nResult; - } else if (update){ + } else if (update) { mergeOption = 2; } else { mergeOption = 0; @@ -1741,7 +1741,7 @@ static int32_t mergeTwoRowFromMem(STsdbReadHandle* pTsdbReadHandle, int32_t capa ++(*curRow); } ++nResult; - } else if(update) { + } else if (update) { mergeOption = 2; } else { mergeOption = 0; @@ -1985,7 +1985,7 @@ static void doMergeTwoLevelData(STsdbReadHandle* pTsdbReadHandle, STableCheckInf return; } else if (pCheckInfo->iter != NULL || pCheckInfo->iiter != NULL) { SSkipListNode* node = NULL; - TSKEY lastRowKey = TSKEY_INITIAL_VAL; + TSKEY lastKeyAppend = TSKEY_INITIAL_VAL; do { STSRow* row2 = NULL; @@ -2018,9 +2018,9 @@ static void doMergeTwoLevelData(STsdbReadHandle* pTsdbReadHandle, STableCheckInf rv2 = TD_ROW_SVER(row2); } - numOfRows += mergeTwoRowFromMem(pTsdbReadHandle, pTsdbReadHandle->outputCapacity, &curRow, row1, row2, numOfCols, - pCheckInfo->tableId, pSchema1, pSchema2, pCfg->update, &lastRowKey); - // numOfRows += 1; + numOfRows += + mergeTwoRowFromMem(pTsdbReadHandle, pTsdbReadHandle->outputCapacity, &curRow, row1, row2, numOfCols, + pCheckInfo->tableId, pSchema1, pSchema2, pCfg->update, &lastKeyAppend); if (cur->win.skey == TSKEY_INITIAL_VAL) { cur->win.skey = key; } @@ -2028,7 +2028,6 @@ static void doMergeTwoLevelData(STsdbReadHandle* pTsdbReadHandle, STableCheckInf cur->win.ekey = key; cur->lastKey = key + step; cur->mixBlock = true; - moveToNextRowInMem(pCheckInfo); } else if (key == tsArray[pos]) { // data in buffer has the same timestamp of data in file block, ignore it #if 0 @@ -2064,7 +2063,11 @@ static void doMergeTwoLevelData(STsdbReadHandle* pTsdbReadHandle, STableCheckInf } #endif if (TD_SUPPORT_UPDATE(pCfg->update)) { - doCopyRowsFromFileBlock(pTsdbReadHandle, pTsdbReadHandle->outputCapacity, curRow, pos, pos); + if (lastKeyAppend != key) { + lastKeyAppend = key; + ++curRow; + } + numOfRows = doCopyRowsFromFileBlock(pTsdbReadHandle, pTsdbReadHandle->outputCapacity, curRow, pos, pos); if (rv1 != TD_ROW_SVER(row1)) { // pSchema1 = tsdbGetTableSchemaByVersion(pTable, memRowVersion(row1)); @@ -2074,10 +2077,10 @@ static void doMergeTwoLevelData(STsdbReadHandle* pTsdbReadHandle, STableCheckInf // pSchema2 = tsdbGetTableSchemaByVersion(pTable, memRowVersion(row2)); rv2 = TD_ROW_SVER(row2); } + numOfRows += + mergeTwoRowFromMem(pTsdbReadHandle, pTsdbReadHandle->outputCapacity, &curRow, row1, row2, numOfCols, + pCheckInfo->tableId, pSchema1, pSchema2, pCfg->update, &lastKeyAppend); - numOfRows += mergeTwoRowFromMem(pTsdbReadHandle, pTsdbReadHandle->outputCapacity, &curRow, row1, row2, numOfCols, - pCheckInfo->tableId, pSchema1, pSchema2, pCfg->update, &lastRowKey); - // ++numOfRows; if (cur->win.skey == TSKEY_INITIAL_VAL) { cur->win.skey = key; } @@ -2118,11 +2121,19 @@ static void doMergeTwoLevelData(STsdbReadHandle* pTsdbReadHandle, STableCheckInf int32_t qstart = 0, qend = 0; getQualifiedRowsPos(pTsdbReadHandle, pos, end, numOfRows, &qstart, &qend); - numOfRows = doCopyRowsFromFileBlock(pTsdbReadHandle, pTsdbReadHandle->outputCapacity, numOfRows, qstart, qend); + if ((lastKeyAppend != TSKEY_INITIAL_VAL) && + (lastKeyAppend != (ASCENDING_TRAVERSE(pTsdbReadHandle->order) ? tsArray[qstart] : tsArray[qend]))) { + ++curRow; + } + numOfRows = doCopyRowsFromFileBlock(pTsdbReadHandle, pTsdbReadHandle->outputCapacity, curRow, qstart, qend); pos += (qend - qstart + 1) * step; + if (numOfRows > 0) { + curRow = numOfRows - 1; + } cur->win.ekey = ASCENDING_TRAVERSE(pTsdbReadHandle->order) ? tsArray[qend] : tsArray[qstart]; cur->lastKey = cur->win.ekey + step; + lastKeyAppend = cur->win.ekey; } } while (numOfRows < pTsdbReadHandle->outputCapacity); @@ -2425,8 +2436,8 @@ static int32_t getFirstFileDataBlock(STsdbReadHandle* pTsdbReadHandle, bool* exi int32_t numOfBlocks = 0; int32_t numOfTables = (int32_t)taosArrayGetSize(pTsdbReadHandle->pTableCheckInfo); - STsdbCfg* pCfg = REPO_CFG(pTsdbReadHandle->pTsdb); - STimeWindow win = TSWINDOW_INITIALIZER; + STsdbKeepCfg* pCfg = REPO_KEEP_CFG(pTsdbReadHandle->pTsdb); + STimeWindow win = TSWINDOW_INITIALIZER; while (true) { tsdbRLockFS(REPO_FS(pTsdbReadHandle->pTsdb)); @@ -2531,8 +2542,8 @@ int32_t tsdbGetFileBlocksDistInfo(tsdbReaderT* queryHandle, STableBlockDistInfo* // find the start data block in file pTsdbReadHandle->locateStart = true; - STsdbCfg* pCfg = REPO_CFG(pTsdbReadHandle->pTsdb); - int32_t fid = getFileIdFromKey(pTsdbReadHandle->window.skey, pCfg->days, pCfg->precision); + STsdbKeepCfg* pCfg = REPO_KEEP_CFG(pTsdbReadHandle->pTsdb); + int32_t fid = getFileIdFromKey(pTsdbReadHandle->window.skey, pCfg->days, pCfg->precision); tsdbRLockFS(pFileHandle); tsdbFSIterInit(&pTsdbReadHandle->fileIter, pFileHandle, pTsdbReadHandle->order); @@ -2632,8 +2643,8 @@ static int32_t getDataBlocksInFiles(STsdbReadHandle* pTsdbReadHandle, bool* exis // find the start data block in file if (!pTsdbReadHandle->locateStart) { pTsdbReadHandle->locateStart = true; - STsdbCfg* pCfg = REPO_CFG(pTsdbReadHandle->pTsdb); - int32_t fid = getFileIdFromKey(pTsdbReadHandle->window.skey, pCfg->days, pCfg->precision); + STsdbKeepCfg* pCfg = REPO_KEEP_CFG(pTsdbReadHandle->pTsdb); + int32_t fid = getFileIdFromKey(pTsdbReadHandle->window.skey, pCfg->days, pCfg->precision); tsdbRLockFS(pFileHandle); tsdbFSIterInit(&pTsdbReadHandle->fileIter, pFileHandle, pTsdbReadHandle->order); @@ -2732,7 +2743,6 @@ static int tsdbReadRowsFromCache(STableCheckInfo* pCheckInfo, TSKEY maxKey, int STSchema* pSchema = NULL; TSKEY lastRowKey = TSKEY_INITIAL_VAL; - do { STSRow* row = getSRowInTableMem(pCheckInfo, pTsdbReadHandle->order, pCfg->update, NULL, TD_VER_MAX); if (row == NULL) { @@ -2757,8 +2767,8 @@ static int tsdbReadRowsFromCache(STableCheckInfo* pCheckInfo, TSKEY maxKey, int pSchema = metaGetTbTSchema(REPO_META(pTsdbReadHandle->pTsdb), pCheckInfo->tableId, 0); rv = TD_ROW_SVER(row); } - numOfRows += mergeTwoRowFromMem(pTsdbReadHandle, maxRowsToRead, &curRows, row, NULL, numOfCols, pCheckInfo->tableId, pSchema, - NULL, pCfg->update, &lastRowKey); + numOfRows += mergeTwoRowFromMem(pTsdbReadHandle, maxRowsToRead, &curRows, row, NULL, numOfCols, pCheckInfo->tableId, + pSchema, NULL, pCfg->update, &lastRowKey); if (numOfRows >= maxRowsToRead) { moveToNextRowInMem(pCheckInfo); @@ -2767,7 +2777,7 @@ static int tsdbReadRowsFromCache(STableCheckInfo* pCheckInfo, TSKEY maxKey, int } while (moveToNextRowInMem(pCheckInfo)); - taosMemoryFreeClear(pSchema); // free the STSChema + taosMemoryFreeClear(pSchema); // free the STSChema assert(numOfRows <= maxRowsToRead); @@ -2895,8 +2905,8 @@ static bool loadCachedLastRow(STsdbReadHandle* pTsdbReadHandle) { // if (ret != TSDB_CODE_SUCCESS) { // return false; // } - mergeTwoRowFromMem(pTsdbReadHandle, pTsdbReadHandle->outputCapacity, &curRow, pRow, NULL, numOfCols, pCheckInfo->tableId, - NULL, NULL, true, &lastRowKey); + mergeTwoRowFromMem(pTsdbReadHandle, pTsdbReadHandle->outputCapacity, &curRow, pRow, NULL, numOfCols, + pCheckInfo->tableId, NULL, NULL, true, &lastRowKey); taosMemoryFreeClear(pRow); // update the last key value @@ -3465,7 +3475,7 @@ void tsdbRetrieveDataBlockInfo(tsdbReaderT* pTsdbReadHandle, SDataBlockInfo* pDa pDataBlockInfo->rows = cur->rows; pDataBlockInfo->window = cur->win; -// ASSERT(pDataBlockInfo->numOfCols >= (int32_t)(QH_GET_NUM_OF_COLS(pHandle)); + // ASSERT(pDataBlockInfo->numOfCols >= (int32_t)(QH_GET_NUM_OF_COLS(pHandle)); } /* diff --git a/source/dnode/vnode/src/tsdb/tsdbSma.c b/source/dnode/vnode/src/tsdb/tsdbSma.c index 6d396e6438..32051c2de4 100644 --- a/source/dnode/vnode/src/tsdb/tsdbSma.c +++ b/source/dnode/vnode/src/tsdb/tsdbSma.c @@ -1013,8 +1013,8 @@ static int32_t tsdbSetTSmaDataFile(STSmaWriteH *pSmaH, int64_t indexUid, int32_t * @return int32_t */ static int32_t tsdbGetTSmaDays(STsdb *pTsdb, int64_t interval, int32_t storageLevel) { - STsdbCfg *pCfg = REPO_CFG(pTsdb); - int32_t daysPerFile = pCfg->days; + STsdbKeepCfg *pCfg = REPO_KEEP_CFG(pTsdb); + int32_t daysPerFile = pCfg->days; if (storageLevel == SMA_STORAGE_LEVEL_TSDB) { int32_t days = SMA_STORAGE_TSDB_TIMES * (interval / tsTickPerDay[pCfg->precision]); diff --git a/source/dnode/vnode/src/tsdb/tsdbWrite.c b/source/dnode/vnode/src/tsdb/tsdbWrite.c index 88b637bc24..5a7892a750 100644 --- a/source/dnode/vnode/src/tsdb/tsdbWrite.c +++ b/source/dnode/vnode/src/tsdb/tsdbWrite.c @@ -60,7 +60,7 @@ static int tsdbScanAndConvertSubmitMsg(STsdb *pTsdb, SSubmitReq *pMsg) { SSubmitBlk *pBlock = NULL; SSubmitBlkIter blkIter = {0}; STSRow *row = NULL; - STsdbCfg *pCfg = REPO_CFG(pTsdb); + STsdbKeepCfg *pCfg = REPO_KEEP_CFG(pTsdb); TSKEY now = taosGetTimestamp(pCfg->precision); TSKEY minKey = now - tsTickPerDay[pCfg->precision] * pCfg->keep2; TSKEY maxKey = now + tsTickPerDay[pCfg->precision] * pCfg->days; diff --git a/source/dnode/vnode/src/vnd/vnodeCfg.c b/source/dnode/vnode/src/vnd/vnodeCfg.c index 5e21abb404..32866d7469 100644 --- a/source/dnode/vnode/src/vnd/vnodeCfg.c +++ b/source/dnode/vnode/src/vnd/vnodeCfg.c @@ -97,7 +97,6 @@ int vnodeEncodeConfig(const void *pObj, SJson *pJson) { if (tjsonAddIntegerToObject(pJson, "hashEnd", pCfg->hashEnd) < 0) return -1; if (tjsonAddIntegerToObject(pJson, "hashMethod", pCfg->hashMethod) < 0) return -1; - // sync integration if (tjsonAddIntegerToObject(pJson, "syncCfg.replicaNum", pCfg->syncCfg.replicaNum) < 0) return -1; if (tjsonAddIntegerToObject(pJson, "syncCfg.myIndex", pCfg->syncCfg.myIndex) < 0) return -1; SJson *pNodeInfoArr = tjsonCreateArray(); @@ -157,7 +156,6 @@ int vnodeDecodeConfig(const SJson *pJson, void *pObj) { if (tjsonGetNumberValue(pJson, "hashEnd", pCfg->hashEnd) < 0) return -1; if (tjsonGetNumberValue(pJson, "hashMethod", pCfg->hashMethod) < 0) return -1; - // sync integration if (tjsonGetNumberValue(pJson, "syncCfg.replicaNum", pCfg->syncCfg.replicaNum) < 0) return -1; if (tjsonGetNumberValue(pJson, "syncCfg.myIndex", pCfg->syncCfg.myIndex) < 0) return -1; diff --git a/source/dnode/vnode/src/vnd/vnodeQuery.c b/source/dnode/vnode/src/vnd/vnodeQuery.c index 7e80eacf8f..3e869650bf 100644 --- a/source/dnode/vnode/src/vnd/vnodeQuery.c +++ b/source/dnode/vnode/src/vnd/vnodeQuery.c @@ -124,8 +124,7 @@ _exit: int32_t vnodeGetLoad(SVnode *pVnode, SVnodeLoad *pLoad) { pLoad->vgId = TD_VID(pVnode); - // pLoad->syncState = TAOS_SYNC_STATE_LEADER; - pLoad->syncState = syncGetMyRole(pVnode->sync); // sync integration + pLoad->syncState = syncGetMyRole(pVnode->sync); pLoad->numOfTables = metaGetTbNum(pVnode->pMeta); pLoad->numOfTimeSeries = 400; pLoad->totalStorage = 300; diff --git a/source/dnode/vnode/src/vnd/vnodeSvr.c b/source/dnode/vnode/src/vnd/vnodeSvr.c index d9231361be..c1e0a202a8 100644 --- a/source/dnode/vnode/src/vnd/vnodeSvr.c +++ b/source/dnode/vnode/src/vnd/vnodeSvr.c @@ -198,7 +198,6 @@ void smaHandleRes(void *pVnode, int64_t smaId, const SArray *data) { tsdbInsertTSmaData(((SVnode *)pVnode)->pTsdb, smaId, (const char *)data); } -// sync integration int vnodeProcessSyncReq(SVnode *pVnode, SRpcMsg *pMsg, SRpcMsg **pRsp) { if (syncEnvIsStart()) { SSyncNode *pSyncNode = syncNodeAcquire(pVnode->sync); diff --git a/source/dnode/vnode/src/vnd/vnodeSync.c b/source/dnode/vnode/src/vnd/vnodeSync.c index 26393394e9..1260f9a3e7 100644 --- a/source/dnode/vnode/src/vnd/vnodeSync.c +++ b/source/dnode/vnode/src/vnd/vnodeSync.c @@ -14,12 +14,6 @@ */ #include "vnd.h" -// #include "sync.h" -// #include "syncTools.h" -// #include "tmsgcb.h" -// #include "vnodeInt.h" - -// sync integration int32_t vnodeSyncOpen(SVnode *pVnode, char *path) { SSyncInfo syncInfo; diff --git a/source/libs/executor/inc/executil.h b/source/libs/executor/inc/executil.h index 9f834fd659..e62729a051 100644 --- a/source/libs/executor/inc/executil.h +++ b/source/libs/executor/inc/executil.h @@ -15,8 +15,9 @@ #ifndef TDENGINE_QUERYUTIL_H #define TDENGINE_QUERYUTIL_H -#include "tcommon.h" +#include #include "tbuffer.h" +#include "tcommon.h" #include "tpagedbuf.h" #define SET_RES_WINDOW_KEY(_k, _ori, _len, _uid) \ @@ -56,9 +57,9 @@ typedef struct SResultRow { bool endInterp; // the time window end timestamp has done the interpolation already. bool closed; // this result status: closed or opened uint32_t numOfRows; // number of rows of current time window - struct SResultRowEntryInfo* pEntryInfo; // For each result column, there is a resultInfo STimeWindow win; - char *key; // start key of current result row + struct SResultRowEntryInfo pEntryInfo[]; // For each result column, there is a resultInfo +// char *key; // start key of current result row } SResultRow; typedef struct SResultRowPosition { diff --git a/source/libs/executor/inc/executorimpl.h b/source/libs/executor/inc/executorimpl.h index 58673b3677..4dcabcbc19 100644 --- a/source/libs/executor/inc/executorimpl.h +++ b/source/libs/executor/inc/executorimpl.h @@ -709,7 +709,7 @@ SOperatorInfo* createPartitionOperatorInfo(SOperatorInfo* downstream, SExprInfo* SOperatorInfo* createTimeSliceOperatorInfo(SOperatorInfo* downstream, SExprInfo* pExprInfo, int32_t numOfCols, SSDataBlock* pResultBlock, SExecTaskInfo* pTaskInfo); -SOperatorInfo* createJoinOperatorInfo(SOperatorInfo** pDownstream, int32_t numOfDownstream, SExprInfo* pExprInfo, int32_t numOfCols, SSDataBlock* pResBlock, SNode* pOnCondition, SExecTaskInfo* pTaskInfo); +SOperatorInfo* createMergeJoinOperatorInfo(SOperatorInfo** pDownstream, int32_t numOfDownstream, SExprInfo* pExprInfo, int32_t numOfCols, SSDataBlock* pResBlock, SNode* pOnCondition, SExecTaskInfo* pTaskInfo); SOperatorInfo* createTagScanOperatorInfo(SReadHandle* pReadHandle, SExprInfo* pExpr, int32_t numOfOutput, SSDataBlock* pResBlock, SArray* pColMatchInfo, STableGroupInfo* pTableGroupInfo, SExecTaskInfo* pTaskInfo); #if 0 diff --git a/source/libs/executor/src/executil.c b/source/libs/executor/src/executil.c index 3283ae2b55..763dcef790 100644 --- a/source/libs/executor/src/executil.c +++ b/source/libs/executor/src/executil.c @@ -157,8 +157,6 @@ void clearResultRow(STaskRuntimeEnv *pRuntimeEnv, SResultRow *pResultRow) { pResultRow->pageId = -1; pResultRow->offset = -1; pResultRow->closed = false; - - taosMemoryFreeClear(pResultRow->key); pResultRow->win = TSWINDOW_INITIALIZER; } diff --git a/source/libs/executor/src/executorimpl.c b/source/libs/executor/src/executorimpl.c index 32c7016534..59c6a7f959 100644 --- a/source/libs/executor/src/executorimpl.c +++ b/source/libs/executor/src/executorimpl.c @@ -388,6 +388,7 @@ SResultRow* doSetResultOutBufByKey(SDiskbasedBuf* pResultBuf, SResultRowInfo* pR // allocate a new buffer page prepareResultListBuffer(pResultRowInfo, pTaskInfo->env); if (pResult == NULL) { + ASSERT(pSup->resultRowSize > 0); pResult = getNewResultRow_rv(pResultBuf, groupId, pSup->resultRowSize); initResultRow(pResult); @@ -1152,7 +1153,7 @@ SqlFunctionCtx* createSqlFunctionCtx(SExprInfo* pExprInfo, int32_t numOfOutput, pCtx->resDataInfo.interBufSize = env.calcMemSize; } else if (pExpr->pExpr->nodeType == QUERY_NODE_COLUMN || pExpr->pExpr->nodeType == QUERY_NODE_OPERATOR || pExpr->pExpr->nodeType == QUERY_NODE_VALUE) { - // for simple column, the intermediate buffer needs to hold one element. + // for simple column, the result buffer needs to hold at least one element. pCtx->resDataInfo.interBufSize = pFunct->resSchema.bytes; } @@ -1872,7 +1873,7 @@ static void updateTableQueryInfoForReverseScan(STableQueryInfo* pTableQueryInfo) } void initResultRow(SResultRow* pResultRow) { - pResultRow->pEntryInfo = (struct SResultRowEntryInfo*)((char*)pResultRow + sizeof(SResultRow)); +// pResultRow->pEntryInfo = (struct SResultRowEntryInfo*)((char*)pResultRow + sizeof(SResultRow)); } /* @@ -1884,7 +1885,7 @@ void initResultRow(SResultRow* pResultRow) { * offset[0] offset[1] offset[2] */ // TODO refactor: some function move away -void setFunctionResultOutput(SOptrBasicInfo* pInfo, SAggSupporter* pSup, int32_t stage, SExecTaskInfo* pTaskInfo) { +void setFunctionResultOutput(SOptrBasicInfo* pInfo, SAggSupporter* pSup, int32_t stage, int32_t numOfExprs, SExecTaskInfo* pTaskInfo) { SqlFunctionCtx* pCtx = pInfo->pCtx; SSDataBlock* pDataBlock = pInfo->pRes; int32_t* rowCellInfoOffset = pInfo->rowCellInfoOffset; @@ -1897,6 +1898,7 @@ void setFunctionResultOutput(SOptrBasicInfo* pInfo, SAggSupporter* pSup, int32_t SResultRow* pRow = doSetResultOutBufByKey(pSup->pResultBuf, pResultRowInfo, (char*)&tid, sizeof(tid), true, groupId, pTaskInfo, false, pSup); + ASSERT(pDataBlock->info.numOfCols == numOfExprs); for (int32_t i = 0; i < pDataBlock->info.numOfCols; ++i) { struct SResultRowEntryInfo* pEntry = getResultCell(pRow, i, rowCellInfoOffset); cleanupResultRowEntry(pEntry); @@ -3604,7 +3606,7 @@ SOperatorInfo* createSortedMergeOperatorInfo(SOperatorInfo** downstream, int32_t goto _error; } - setFunctionResultOutput(&pInfo->binfo, &pInfo->aggSup, MAIN_SCAN, pTaskInfo); + setFunctionResultOutput(&pInfo->binfo, &pInfo->aggSup, MAIN_SCAN, num, pTaskInfo); code = initGroupCol(pExprInfo, num, pGroupInfo, pInfo); if (code != TSDB_CODE_SUCCESS) { goto _error; @@ -4197,12 +4199,22 @@ int32_t doInitAggInfoSup(SAggSupporter* pAggSup, SqlFunctionCtx* pCtx, int32_t n pAggSup->keyBuf = taosMemoryCalloc(1, keyBufSize + POINTER_BYTES + sizeof(int64_t)); pAggSup->pResultRowHashTable = taosHashInit(10, hashFn, true, HASH_NO_LOCK); - if (pAggSup->keyBuf == NULL /*|| pAggSup->pResultRowArrayList == NULL || pAggSup->pResultRowListSet == NULL*/ || - pAggSup->pResultRowHashTable == NULL) { + if (pAggSup->keyBuf == NULL || pAggSup->pResultRowHashTable == NULL) { return TSDB_CODE_OUT_OF_MEMORY; } - int32_t code = createDiskbasedBuf(&pAggSup->pResultBuf, 4096, 4096 * 256, pKey, "/tmp/"); + uint32_t defaultPgsz = 4096; + while(defaultPgsz < pAggSup->resultRowSize*4) { + defaultPgsz <<= 1u; + } + + // at least four pages need to be in buffer + int32_t defaultBufsz = 4096 * 256; + if (defaultBufsz <= defaultPgsz) { + defaultBufsz = defaultPgsz * 4; + } + + int32_t code = createDiskbasedBuf(&pAggSup->pResultBuf, defaultPgsz, defaultBufsz, pKey, "/tmp/"); if (code != TSDB_CODE_SUCCESS) { return code; } @@ -4342,6 +4354,10 @@ void destroyBasicOperatorInfo(void* param, int32_t numOfOutput) { doDestroyBasicInfo(pInfo, numOfOutput); } +void destroyMergeJoinOperator(void* param, int32_t numOfOutput) { + SJoinOperatorInfo* pJoinOperator = (SJoinOperatorInfo*) param; +} + void destroyAggOperatorInfo(void* param, int32_t numOfOutput) { SAggOperatorInfo* pInfo = (SAggOperatorInfo*)param; doDestroyBasicInfo(&pInfo->binfo, numOfOutput); @@ -4405,7 +4421,7 @@ SOperatorInfo* createProjectOperatorInfo(SOperatorInfo* downstream, SExprInfo* p initResultSizeInfo(pOperator, numOfRows); initAggInfo(&pInfo->binfo, &pInfo->aggSup, pExprInfo, numOfCols, pResBlock, keyBufSize, pTaskInfo->id.str); - setFunctionResultOutput(&pInfo->binfo, &pInfo->aggSup, MAIN_SCAN, pTaskInfo); + setFunctionResultOutput(&pInfo->binfo, &pInfo->aggSup, MAIN_SCAN, numOfCols, pTaskInfo); pInfo->pPseudoColInfo = setRowTsColumnOutputInfo(pInfo->binfo.pCtx, numOfCols); pOperator->name = "ProjectOperator"; @@ -4918,7 +4934,7 @@ SOperatorInfo* createOperatorTree(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo SSDataBlock* pResBlock = createResDataBlock(pPhyNode->pOutputDataBlockDesc); SExprInfo* pExprInfo = createExprInfo(pJoinNode->pTargets, NULL, &num); - pOptr = createJoinOperatorInfo(ops, size, pExprInfo, num, pResBlock, pJoinNode->pOnConditions, pTaskInfo); + pOptr = createMergeJoinOperatorInfo(ops, size, pExprInfo, num, pResBlock, pJoinNode->pOnConditions, pTaskInfo); } else if (QUERY_NODE_PHYSICAL_PLAN_FILL == type) { SFillPhysiNode* pFillNode = (SFillPhysiNode*)pPhyNode; SSDataBlock* pResBlock = createResDataBlock(pPhyNode->pOutputDataBlockDesc); @@ -5490,7 +5506,7 @@ static SSDataBlock* doMergeJoin(struct SOperatorInfo* pOperator) { return (pRes->info.rows > 0) ? pRes : NULL; } -SOperatorInfo* createJoinOperatorInfo(SOperatorInfo** pDownstream, int32_t numOfDownstream, SExprInfo* pExprInfo, +SOperatorInfo* createMergeJoinOperatorInfo(SOperatorInfo** pDownstream, int32_t numOfDownstream, SExprInfo* pExprInfo, int32_t numOfCols, SSDataBlock* pResBlock, SNode* pOnCondition, SExecTaskInfo* pTaskInfo) { SJoinOperatorInfo* pInfo = taosMemoryCalloc(1, sizeof(SJoinOperatorInfo)); @@ -5516,7 +5532,7 @@ SOperatorInfo* createJoinOperatorInfo(SOperatorInfo** pDownstream, int32_t numOf setJoinColumnInfo(&pInfo->rightCol, (SColumnNode*)pNode->pRight); pOperator->fpSet = - createOperatorFpSet(operatorDummyOpenFn, doMergeJoin, NULL, NULL, destroyBasicOperatorInfo, NULL, NULL, NULL); + createOperatorFpSet(operatorDummyOpenFn, doMergeJoin, NULL, NULL, destroyMergeJoinOperator, NULL, NULL, NULL); int32_t code = appendDownstream(pOperator, pDownstream, numOfDownstream); if (code != TSDB_CODE_SUCCESS) { goto _error; diff --git a/source/libs/executor/src/scanoperator.c b/source/libs/executor/src/scanoperator.c index 49a5b16a88..819340ec6c 100644 --- a/source/libs/executor/src/scanoperator.c +++ b/source/libs/executor/src/scanoperator.c @@ -38,7 +38,8 @@ #define SWITCH_ORDER(n) (((n) = ((n) == TSDB_ORDER_ASC) ? TSDB_ORDER_DESC : TSDB_ORDER_ASC)) static int32_t buildSysDbTableInfo(const SSysTableScanInfo* pInfo, int32_t capacity); -static int32_t buildDbTableInfoBlock(const SSDataBlock* p, const SSysTableMeta* pSysDbTableMeta, size_t size, const char* dbName); +static int32_t buildDbTableInfoBlock(const SSDataBlock* p, const SSysTableMeta* pSysDbTableMeta, size_t size, + const char* dbName); static void switchCtxOrder(SqlFunctionCtx* pCtx, int32_t numOfOutput) { for (int32_t i = 0; i < numOfOutput; ++i) { @@ -159,7 +160,8 @@ static bool overlapWithTimeWindow(SInterval* pInterval, SDataBlockInfo* pBlockIn return false; } -static int32_t loadDataBlock(SOperatorInfo* pOperator, STableScanInfo* pTableScanInfo, SSDataBlock* pBlock, uint32_t* status) { +static int32_t loadDataBlock(SOperatorInfo* pOperator, STableScanInfo* pTableScanInfo, SSDataBlock* pBlock, + uint32_t* status) { SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo; STableScanInfo* pInfo = pOperator->info; @@ -189,7 +191,7 @@ static int32_t loadDataBlock(SOperatorInfo* pOperator, STableScanInfo* pTableSca } else if (*status == FUNC_DATA_REQUIRED_STATIS_LOAD) { pCost->loadBlockStatis += 1; - bool allColumnsHaveAgg = true; + bool allColumnsHaveAgg = true; SColumnDataAgg** pColAgg = NULL; tsdbRetrieveDataBlockStatisInfo(pTableScanInfo->dataReader, &pColAgg, &allColumnsHaveAgg); @@ -261,7 +263,7 @@ static void prepareForDescendingScan(STableScanInfo* pTableScanInfo, SqlFunction static SSDataBlock* doTableScanImpl(SOperatorInfo* pOperator) { STableScanInfo* pTableScanInfo = pOperator->info; - SSDataBlock* pBlock = pTableScanInfo->pResBlock; + SSDataBlock* pBlock = pTableScanInfo->pResBlock; while (tsdbNextDataBlock(pTableScanInfo->dataReader)) { if (isTaskKilled(pOperator->pTaskInfo)) { @@ -344,7 +346,8 @@ static SSDataBlock* doTableScan(SOperatorInfo* pOperator) { pTableScanInfo->scanFlag = REPEAT_SCAN; qDebug("%s start to repeat descending order scan data blocks due to query func required, qrange:%" PRId64 - "-%" PRId64, GET_TASKID(pTaskInfo), pTaskInfo->window.skey, pTaskInfo->window.ekey); + "-%" PRId64, + GET_TASKID(pTaskInfo), pTaskInfo->window.skey, pTaskInfo->window.ekey); // do prepare for the next round table scan operation tsdbResetReadHandle(pTableScanInfo->dataReader, &pTableScanInfo->cond); @@ -373,22 +376,22 @@ SOperatorInfo* createTableScanOperatorInfo(void* pDataReader, SQueryTableDataCon pInfo->cond = *pCond; pInfo->scanInfo = (SScanInfo){.numOfAsc = scanInfo[0], .numOfDesc = scanInfo[1]}; - pInfo->interval = *pInterval; - pInfo->sampleRatio = sampleRatio; + pInfo->interval = *pInterval; + pInfo->sampleRatio = sampleRatio; pInfo->dataBlockLoadFlag = dataLoadFlag; - pInfo->pResBlock = pResBlock; - pInfo->pFilterNode = pCondition; - pInfo->dataReader = pDataReader; - pInfo->scanFlag = MAIN_SCAN; - pInfo->pColMatchInfo = pColMatchInfo; + pInfo->pResBlock = pResBlock; + pInfo->pFilterNode = pCondition; + pInfo->dataReader = pDataReader; + pInfo->scanFlag = MAIN_SCAN; + pInfo->pColMatchInfo = pColMatchInfo; - pOperator->name = "TableScanOperator"; // for dubug purpose - pOperator->operatorType = QUERY_NODE_PHYSICAL_PLAN_TABLE_SCAN; - pOperator->blocking = false; - pOperator->status = OP_NOT_OPENED; - pOperator->info = pInfo; - pOperator->numOfExprs = numOfOutput; - pOperator->pTaskInfo = pTaskInfo; + pOperator->name = "TableScanOperator"; // for dubug purpose + pOperator->operatorType = QUERY_NODE_PHYSICAL_PLAN_TABLE_SCAN; + pOperator->blocking = false; + pOperator->status = OP_NOT_OPENED; + pOperator->info = pInfo; + pOperator->numOfExprs = numOfOutput; + pOperator->pTaskInfo = pTaskInfo; pOperator->fpSet = createOperatorFpSet(operatorDummyOpenFn, doTableScan, NULL, NULL, NULL, NULL, NULL, NULL); @@ -404,17 +407,17 @@ SOperatorInfo* createTableScanOperatorInfo(void* pDataReader, SQueryTableDataCon SOperatorInfo* createTableSeqScanOperatorInfo(void* pReadHandle, SExecTaskInfo* pTaskInfo) { STableScanInfo* pInfo = taosMemoryCalloc(1, sizeof(STableScanInfo)); - SOperatorInfo* pOperator = taosMemoryCalloc(1, sizeof(SOperatorInfo)); + SOperatorInfo* pOperator = taosMemoryCalloc(1, sizeof(SOperatorInfo)); - pInfo->dataReader = pReadHandle; -// pInfo->prevGroupId = -1; + pInfo->dataReader = pReadHandle; + // pInfo->prevGroupId = -1; - pOperator->name = "TableSeqScanOperator"; + pOperator->name = "TableSeqScanOperator"; pOperator->operatorType = QUERY_NODE_PHYSICAL_PLAN_TABLE_SEQ_SCAN; - pOperator->blocking = false; - pOperator->status = OP_NOT_OPENED; - pOperator->info = pInfo; - pOperator->pTaskInfo = pTaskInfo; + pOperator->blocking = false; + pOperator->status = OP_NOT_OPENED; + pOperator->info = pInfo; + pOperator->pTaskInfo = pTaskInfo; pOperator->fpSet = createOperatorFpSet(operatorDummyOpenFn, doTableScanImpl, NULL, NULL, NULL, NULL, NULL, NULL); return pOperator; @@ -514,18 +517,18 @@ static void doClearBufferedBlocks(SStreamBlockScanInfo* pInfo) { static SSDataBlock* getUpdateDataBlock(SStreamBlockScanInfo* pInfo) { SColumnInfoData* pColDataInfo = taosArrayGet(pInfo->pRes->pDataBlock, pInfo->primaryTsIndex); - TSKEY* ts = (TSKEY*)pColDataInfo->pData; + TSKEY* ts = (TSKEY*)pColDataInfo->pData; for (int32_t i = 0; i < pInfo->pRes->info.rows; i++) { if (updateInfoIsUpdated(pInfo->pUpdateInfo, pInfo->pRes->info.uid, ts[i])) { - taosArrayPush(pInfo->tsArray, ts+i); + taosArrayPush(pInfo->tsArray, ts + i); } } if (taosArrayGetSize(pInfo->tsArray) > 0) { - //TODO(liuyao) get from tsdb - // SSDataBlock* p = createOneDataBlock(pInfo->pRes, true); - // p->info.type = STREAM_INVERT; - // taosArrayClear(pInfo->tsArray); - // return p; + // TODO(liuyao) get from tsdb + // SSDataBlock* p = createOneDataBlock(pInfo->pRes, true); + // p->info.type = STREAM_INVERT; + // taosArrayClear(pInfo->tsArray); + // return p; return NULL; } return NULL; @@ -535,7 +538,7 @@ static SSDataBlock* doStreamBlockScan(SOperatorInfo* pOperator) { // NOTE: this operator does never check if current status is done or not SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo; SStreamBlockScanInfo* pInfo = pOperator->info; - int32_t rows = 0; + int32_t rows = 0; pTaskInfo->code = pOperator->fpSet._openFn(pOperator); if (pTaskInfo->code != TSDB_CODE_SUCCESS || pOperator->status == OP_EXEC_DONE) { @@ -571,7 +574,7 @@ static SSDataBlock* doStreamBlockScan(SOperatorInfo* pOperator) { int32_t numOfRows = 0; int16_t outputCol = 0; - int32_t code = tqRetrieveDataBlock(&pCols, pInfo->readerHandle, &groupId, &uid, &numOfRows, &outputCol); + int32_t code = tqRetrieveDataBlock(&pCols, pInfo->readerHandle, &groupId, &uid, &numOfRows, &outputCol); if (code != TSDB_CODE_SUCCESS || numOfRows == 0) { pTaskInfo->code = code; @@ -652,8 +655,9 @@ SOperatorInfo* createStreamScanOperatorInfo(void* streamReadHandle, SSDataBlock* SArray* pColIds = taosArrayInit(4, sizeof(int16_t)); for (int32_t i = 0; i < numOfOutput; ++i) { - int16_t* id = taosArrayGet(pColList, i); - taosArrayPush(pColIds, id); + SColMatchInfo* id = taosArrayGet(pColList, i); + int16_t colId = id->colId; + taosArrayPush(pColIds, &colId); } pInfo->pColMatchInfo = pColList; @@ -678,8 +682,8 @@ SOperatorInfo* createStreamScanOperatorInfo(void* streamReadHandle, SSDataBlock* return NULL; } - pInfo->primaryTsIndex = 0; //TODO(liuyao) get it from physical plan - pInfo->pUpdateInfo = updateInfoInit(60000, 0, 100); //TODO(liuyao) get it from physical plan + pInfo->primaryTsIndex = 0; // TODO(liuyao) get it from physical plan + pInfo->pUpdateInfo = updateInfoInit(60000, 0, 100); // TODO(liuyao) get it from physical plan if (pInfo->pUpdateInfo == NULL) { taosMemoryFreeClear(pInfo); taosMemoryFreeClear(pOperator); @@ -687,25 +691,26 @@ SOperatorInfo* createStreamScanOperatorInfo(void* streamReadHandle, SSDataBlock* } pInfo->readerHandle = streamReadHandle; - pInfo->pRes = pResBlock; - pInfo->pCondition = pCondition; + pInfo->pRes = pResBlock; + pInfo->pCondition = pCondition; - pOperator->name = "StreamBlockScanOperator"; - pOperator->operatorType = QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN; - pOperator->blocking = false; - pOperator->status = OP_NOT_OPENED; - pOperator->info = pInfo; - pOperator->numOfExprs = pResBlock->info.numOfCols; - pOperator->fpSet._openFn = operatorDummyOpenFn; + pOperator->name = "StreamBlockScanOperator"; + pOperator->operatorType = QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN; + pOperator->blocking = false; + pOperator->status = OP_NOT_OPENED; + pOperator->info = pInfo; + pOperator->numOfExprs = pResBlock->info.numOfCols; + pOperator->fpSet._openFn = operatorDummyOpenFn; pOperator->fpSet.getNextFn = doStreamBlockScan; - pOperator->fpSet.closeFn = operatorDummyCloseFn; + pOperator->fpSet.closeFn = operatorDummyCloseFn; pOperator->pTaskInfo = pTaskInfo; - pOperator->fpSet = createOperatorFpSet(operatorDummyOpenFn, doStreamBlockScan, NULL, NULL, operatorDummyCloseFn, NULL, NULL, NULL); + pOperator->fpSet = + createOperatorFpSet(operatorDummyOpenFn, doStreamBlockScan, NULL, NULL, operatorDummyCloseFn, NULL, NULL, NULL); return pOperator; - _error: +_error: taosMemoryFreeClear(pInfo); taosMemoryFreeClear(pOperator); return NULL; @@ -774,7 +779,7 @@ static void getDBNameFromCondition(SNode* pCondition, const char* dbName) { if (NULL == pCondition) { return; } - nodesWalkExpr(pCondition, getDBNameFromConditionWalker, (char*) dbName); + nodesWalkExpr(pCondition, getDBNameFromConditionWalker, (char*)dbName); } static int32_t loadSysTableCallback(void* param, const SDataBuf* pMsg, int32_t code) { @@ -809,7 +814,7 @@ static SSDataBlock* doFilterResult(SSysTableScanInfo* pInfo) { code = filterSetDataFromSlotId(filter, ¶m1); int8_t* rowRes = NULL; - bool keep = filterExecute(filter, pInfo->pRes, &rowRes, NULL, param1.numOfCols); + bool keep = filterExecute(filter, pInfo->pRes, &rowRes, NULL, param1.numOfCols); filterFreeInfo(filter); SSDataBlock* px = createOneDataBlock(pInfo->pRes, false); @@ -853,13 +858,13 @@ static SSDataBlock* doFilterResult(SSysTableScanInfo* pInfo) { static SSDataBlock* buildSysTableMetaBlock() { SSDataBlock* pBlock = taosMemoryCalloc(1, sizeof(SSDataBlock)); - size_t size = 0; - const SSysTableMeta *pMeta = NULL; + size_t size = 0; + const SSysTableMeta* pMeta = NULL; getInfosDbMeta(&pMeta, &size); int32_t index = 0; - for(int32_t i = 0; i < size; ++i) { - if(strcmp(pMeta[i].name, TSDB_INS_TABLE_USER_TABLES) == 0) { + for (int32_t i = 0; i < size; ++i) { + if (strcmp(pMeta[i].name, TSDB_INS_TABLE_USER_TABLES) == 0) { index = i; break; } @@ -867,7 +872,7 @@ static SSDataBlock* buildSysTableMetaBlock() { pBlock->pDataBlock = taosArrayInit(pBlock->info.numOfCols, sizeof(SColumnInfoData)); - for(int32_t i = 0; i < pMeta[index].colNum; ++i) { + for (int32_t i = 0; i < pMeta[index].colNum; ++i) { SColumnInfoData colInfoData = {0}; colInfoData.info.colId = i + 1; colInfoData.info.type = pMeta[index].schema[i].type; @@ -1091,7 +1096,7 @@ int32_t buildSysDbTableInfo(const SSysTableScanInfo* pInfo, int32_t capacity) { SSDataBlock* p = buildSysTableMetaBlock(); blockDataEnsureCapacity(p, capacity); - size_t size = 0; + size_t size = 0; const SSysTableMeta* pSysDbTableMeta = NULL; getInfosDbMeta(&pSysDbTableMeta, &size); @@ -1100,18 +1105,19 @@ int32_t buildSysDbTableInfo(const SSysTableScanInfo* pInfo, int32_t capacity) { getPerfDbMeta(&pSysDbTableMeta, &size); p->info.rows = buildDbTableInfoBlock(p, pSysDbTableMeta, size, TSDB_PERFORMANCE_SCHEMA_DB); - relocateColumnData(pInfo->pRes, pInfo->scanCols, p->pDataBlock); -// blockDataDestroy(p); todo handle memory leak + relocateColumnData(pInfo->pRes, pInfo->scanCols, p->pDataBlock); + // blockDataDestroy(p); todo handle memory leak pInfo->pRes->info.rows = p->info.rows; return p->info.rows; } -int32_t buildDbTableInfoBlock(const SSDataBlock* p, const SSysTableMeta* pSysDbTableMeta, size_t size, const char* dbName) { - char n[TSDB_TABLE_FNAME_LEN + VARSTR_HEADER_SIZE] = {0}; +int32_t buildDbTableInfoBlock(const SSDataBlock* p, const SSysTableMeta* pSysDbTableMeta, size_t size, + const char* dbName) { + char n[TSDB_TABLE_FNAME_LEN + VARSTR_HEADER_SIZE] = {0}; int32_t numOfRows = p->info.rows; - for(int32_t i = 0; i < size; ++i) { + for (int32_t i = 0; i < size; ++i) { const SSysTableMeta* pm = &pSysDbTableMeta[i]; SColumnInfoData* pColInfoData = taosArrayGet(p->pDataBlock, 0); @@ -1132,7 +1138,7 @@ int32_t buildDbTableInfoBlock(const SSDataBlock* p, const SSysTableMeta* pSysDbT pColInfoData = taosArrayGet(p->pDataBlock, 3); colDataAppend(pColInfoData, numOfRows, (char*)&pm->colNum, false); - for(int32_t j = 4; j <= 8; ++j) { + for (int32_t j = 4; j <= 8; ++j) { pColInfoData = taosArrayGet(p->pDataBlock, j); colDataAppendNULL(pColInfoData, numOfRows); } @@ -1160,18 +1166,18 @@ SOperatorInfo* createSysTableScanOperatorInfo(void* readHandle, SSDataBlock* pRe return NULL; } - pInfo->accountId = accountId; + pInfo->accountId = accountId; pInfo->showRewrite = showRewrite; - pInfo->pRes = pResBlock; - pInfo->pCondition = pCondition; - pInfo->scanCols = colList; + pInfo->pRes = pResBlock; + pInfo->pCondition = pCondition; + pInfo->scanCols = colList; initResultSizeInfo(pOperator, 4096); tNameAssign(&pInfo->name, pName); const char* name = tNameGetTableName(&pInfo->name); if (strncasecmp(name, TSDB_INS_TABLE_USER_TABLES, TSDB_TABLE_FNAME_LEN) == 0) { - pInfo->readHandle = *(SReadHandle*) readHandle; + pInfo->readHandle = *(SReadHandle*)readHandle; blockDataEnsureCapacity(pInfo->pRes, pOperator->resultInfo.capacity); } else { tsem_init(&pInfo->ready, 0, 0); @@ -1179,14 +1185,14 @@ SOperatorInfo* createSysTableScanOperatorInfo(void* readHandle, SSDataBlock* pRe pInfo->readHandle = *(SReadHandle*)readHandle; } - pOperator->name = "SysTableScanOperator"; + pOperator->name = "SysTableScanOperator"; pOperator->operatorType = QUERY_NODE_PHYSICAL_PLAN_SYSTABLE_SCAN; - pOperator->blocking = false; - pOperator->status = OP_NOT_OPENED; - pOperator->info = pInfo; - pOperator->numOfExprs = pResBlock->info.numOfCols; - pOperator->fpSet = createOperatorFpSet(operatorDummyOpenFn, doSysTableScan, NULL, NULL, destroySysScanOperator, - NULL, NULL, NULL); + pOperator->blocking = false; + pOperator->status = OP_NOT_OPENED; + pOperator->info = pInfo; + pOperator->numOfExprs = pResBlock->info.numOfCols; + pOperator->fpSet = + createOperatorFpSet(operatorDummyOpenFn, doSysTableScan, NULL, NULL, destroySysScanOperator, NULL, NULL, NULL); pOperator->pTaskInfo = pTaskInfo; return pOperator; @@ -1333,26 +1339,27 @@ static void destroyTagScanOperatorInfo(void* param, int32_t numOfOutput) { } SOperatorInfo* createTagScanOperatorInfo(SReadHandle* pReadHandle, SExprInfo* pExpr, int32_t numOfOutput, - SSDataBlock* pResBlock, SArray* pColMatchInfo, STableGroupInfo* pTableGroupInfo, SExecTaskInfo* pTaskInfo) { + SSDataBlock* pResBlock, SArray* pColMatchInfo, + STableGroupInfo* pTableGroupInfo, SExecTaskInfo* pTaskInfo) { STagScanInfo* pInfo = taosMemoryCalloc(1, sizeof(STagScanInfo)); SOperatorInfo* pOperator = taosMemoryCalloc(1, sizeof(SOperatorInfo)); if (pInfo == NULL || pOperator == NULL) { goto _error; } - pInfo->pTableGroups = pTableGroupInfo; - pInfo->pColMatchInfo = pColMatchInfo; - pInfo->pRes = pResBlock; - pInfo->readHandle = *pReadHandle; - pInfo->curPos = 0; - pOperator->name = "TagScanOperator"; + pInfo->pTableGroups = pTableGroupInfo; + pInfo->pColMatchInfo = pColMatchInfo; + pInfo->pRes = pResBlock; + pInfo->readHandle = *pReadHandle; + pInfo->curPos = 0; + pOperator->name = "TagScanOperator"; pOperator->operatorType = QUERY_NODE_PHYSICAL_PLAN_TAG_SCAN; - pOperator->blocking = false; - pOperator->status = OP_NOT_OPENED; - pOperator->info = pInfo; - pOperator->pExpr = pExpr; - pOperator->numOfExprs = numOfOutput; - pOperator->pTaskInfo = pTaskInfo; + pOperator->blocking = false; + pOperator->status = OP_NOT_OPENED; + pOperator->info = pInfo; + pOperator->pExpr = pExpr; + pOperator->numOfExprs = numOfOutput; + pOperator->pTaskInfo = pTaskInfo; initResultSizeInfo(pOperator, 4096); blockDataEnsureCapacity(pInfo->pRes, pOperator->resultInfo.capacity); diff --git a/source/libs/executor/src/timewindowoperator.c b/source/libs/executor/src/timewindowoperator.c index 5c609303ff..ebde1c7997 100644 --- a/source/libs/executor/src/timewindowoperator.c +++ b/source/libs/executor/src/timewindowoperator.c @@ -1,7 +1,7 @@ -#include "ttime.h" -#include "tdatablock.h" #include "executorimpl.h" #include "functionMgt.h" +#include "tdatablock.h" +#include "ttime.h" typedef enum SResultTsInterpType { RESULT_ROW_START_INTERP = 1, @@ -545,7 +545,6 @@ static void setResultRowInterpo(SResultRow* pResult, SResultTsInterpType type) { } } - static void doWindowBorderInterpolation(SOperatorInfo* pOperatorInfo, SSDataBlock* pBlock, SqlFunctionCtx* pCtx, SResultRow* pResult, STimeWindow* win, int32_t startPos, int32_t forwardStep, int32_t order, bool timeWindowInterpo) { @@ -759,10 +758,10 @@ static int32_t doOpenIntervalAgg(SOperatorInfo* pOperator) { return TSDB_CODE_SUCCESS; } - SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo; + SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo; SIntervalAggOperatorInfo* pInfo = pOperator->info; - int32_t order = TSDB_ORDER_ASC; + int32_t order = TSDB_ORDER_ASC; SOperatorInfo* downstream = pOperator->pDownstream[0]; while (1) { @@ -808,7 +807,7 @@ static int32_t doOpenIntervalAgg(SOperatorInfo* pOperator) { } static void doStateWindowAggImpl(SOperatorInfo* pOperator, SStateWindowOperatorInfo* pInfo, SSDataBlock* pBlock) { - SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo; + SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo; SColumnInfoData* pStateColInfoData = taosArrayGet(pBlock->pDataBlock, pInfo->colIndex); int64_t gid = pBlock->info.groupId; @@ -932,7 +931,7 @@ static SSDataBlock* doStateWindowAgg(SOperatorInfo* pOperator) { static SSDataBlock* doBuildIntervalResult(SOperatorInfo* pOperator) { SIntervalAggOperatorInfo* pInfo = pOperator->info; - SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo; + SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo; if (pOperator->status == OP_EXEC_DONE) { return NULL; @@ -981,10 +980,10 @@ static void finalizeUpdatedResult(int32_t numOfOutput, SDiskbasedBuf* pBuf, SArr } } static void setInverFunction(SqlFunctionCtx* pCtx, int32_t num, EStreamType type) { - for ( int i = 0; i < num; i++) { + for (int i = 0; i < num; i++) { if (type == STREAM_INVERT) { fmSetInvertFunc(pCtx[i].functionId, &(pCtx[i].fpSet)); - } else if (type == STREAM_NORMAL){ + } else if (type == STREAM_NORMAL) { fmSetNormalFunc(pCtx[i].functionId, &(pCtx[i].fpSet)); } } @@ -992,7 +991,7 @@ static void setInverFunction(SqlFunctionCtx* pCtx, int32_t num, EStreamType type static SSDataBlock* doStreamIntervalAgg(SOperatorInfo* pOperator) { SIntervalAggOperatorInfo* pInfo = pOperator->info; - int32_t order = TSDB_ORDER_ASC; + int32_t order = TSDB_ORDER_ASC; if (pOperator->status == OP_EXEC_DONE) { return NULL; @@ -1038,7 +1037,8 @@ static SSDataBlock* doStreamIntervalAgg(SOperatorInfo* pOperator) { blockDataEnsureCapacity(pInfo->binfo.pRes, pOperator->resultInfo.capacity); doBuildResultDatablock(&pInfo->binfo, &pInfo->groupResInfo, pOperator->pExpr, pInfo->aggSup.pResultBuf); - ASSERT(pInfo->binfo.pRes->info.rows > 0); + // TODO: remove for stream + /*ASSERT(pInfo->binfo.pRes->info.rows > 0);*/ pOperator->status = OP_RES_TO_RETURN; return pInfo->binfo.pRes->info.rows == 0 ? NULL : pInfo->binfo.pRes; @@ -1070,17 +1070,17 @@ SOperatorInfo* createIntervalOperatorInfo(SOperatorInfo* downstream, SExprInfo* STimeWindowAggSupp* pTwAggSupp, const STableGroupInfo* pTableGroupInfo, SExecTaskInfo* pTaskInfo) { SIntervalAggOperatorInfo* pInfo = taosMemoryCalloc(1, sizeof(SIntervalAggOperatorInfo)); - SOperatorInfo* pOperator = taosMemoryCalloc(1, sizeof(SOperatorInfo)); + SOperatorInfo* pOperator = taosMemoryCalloc(1, sizeof(SOperatorInfo)); if (pInfo == NULL || pOperator == NULL) { goto _error; } - pInfo->order = TSDB_ORDER_ASC; - pInfo->interval = *pInterval; + pInfo->order = TSDB_ORDER_ASC; + pInfo->interval = *pInterval; // pInfo->execModel = OPTR_EXEC_MODEL_STREAM; - pInfo->execModel = pTaskInfo->execModel; - pInfo->win = pTaskInfo->window; - pInfo->twAggSup = *pTwAggSupp; + pInfo->execModel = pTaskInfo->execModel; + pInfo->win = pTaskInfo->window; + pInfo->twAggSup = *pTwAggSupp; pInfo->primaryTsIndex = primaryTsSlotId; size_t keyBufSize = sizeof(int64_t) + sizeof(int64_t) + POINTER_BYTES; @@ -1099,14 +1099,14 @@ SOperatorInfo* createIntervalOperatorInfo(SOperatorInfo* downstream, SExprInfo* initResultRowInfo(&pInfo->binfo.resultRowInfo, (int32_t)1); - pOperator->name = "TimeIntervalAggOperator"; + pOperator->name = "TimeIntervalAggOperator"; pOperator->operatorType = QUERY_NODE_PHYSICAL_PLAN_INTERVAL; - pOperator->blocking = true; - pOperator->status = OP_NOT_OPENED; - pOperator->pExpr = pExprInfo; - pOperator->pTaskInfo = pTaskInfo; - pOperator->numOfExprs = numOfCols; - pOperator->info = pInfo; + pOperator->blocking = true; + pOperator->status = OP_NOT_OPENED; + pOperator->pExpr = pExprInfo; + pOperator->pTaskInfo = pTaskInfo; + pOperator->numOfExprs = numOfCols; + pOperator->info = pInfo; pOperator->fpSet = createOperatorFpSet(doOpenIntervalAgg, doBuildIntervalResult, doStreamIntervalAgg, NULL, destroyIntervalOperatorInfo, aggEncodeResultRow, aggDecodeResultRow, NULL); @@ -1118,7 +1118,7 @@ SOperatorInfo* createIntervalOperatorInfo(SOperatorInfo* downstream, SExprInfo* return pOperator; - _error: +_error: destroyIntervalOperatorInfo(pInfo, numOfCols); taosMemoryFreeClear(pInfo); taosMemoryFreeClear(pOperator); @@ -1131,7 +1131,7 @@ SOperatorInfo* createStreamIntervalOperatorInfo(SOperatorInfo* downstream, SExpr STimeWindowAggSupp* pTwAggSupp, const STableGroupInfo* pTableGroupInfo, SExecTaskInfo* pTaskInfo) { SIntervalAggOperatorInfo* pInfo = taosMemoryCalloc(1, sizeof(SIntervalAggOperatorInfo)); - SOperatorInfo* pOperator = taosMemoryCalloc(1, sizeof(SOperatorInfo)); + SOperatorInfo* pOperator = taosMemoryCalloc(1, sizeof(SOperatorInfo)); if (pInfo == NULL || pOperator == NULL) { goto _error; } @@ -1177,7 +1177,7 @@ SOperatorInfo* createStreamIntervalOperatorInfo(SOperatorInfo* downstream, SExpr return pOperator; - _error: +_error: destroyIntervalOperatorInfo(pInfo, numOfCols); taosMemoryFreeClear(pInfo); taosMemoryFreeClear(pOperator); @@ -1321,7 +1321,7 @@ static SSDataBlock* doAllIntervalAgg(SOperatorInfo* pOperator) { return pSliceInfo->binfo.pRes; } - int32_t order = TSDB_ORDER_ASC; + int32_t order = TSDB_ORDER_ASC; SOperatorInfo* downstream = pOperator->pDownstream[0]; while (1) { @@ -1379,7 +1379,7 @@ SOperatorInfo* createTimeSliceOperatorInfo(SOperatorInfo* downstream, SExprInfo* int32_t code = appendDownstream(pOperator, &downstream, 1); return pOperator; - _error: +_error: taosMemoryFree(pInfo); taosMemoryFree(pOperator); pTaskInfo->code = TSDB_CODE_OUT_OF_MEMORY; @@ -1402,18 +1402,18 @@ SOperatorInfo* createStatewindowOperatorInfo(SOperatorInfo* downstream, SExprInf initAggInfo(&pInfo->binfo, &pInfo->aggSup, pExpr, numOfCols, pResBlock, keyBufSize, pTaskInfo->id.str); initResultRowInfo(&pInfo->binfo.resultRowInfo, 8); - pInfo->twAggSup = *pTwAggSup; + pInfo->twAggSup = *pTwAggSup; initExecTimeWindowInfo(&pInfo->twAggSup.timeWindowData, &pTaskInfo->window); - pInfo->tsSlotId = tsSlotId; - pOperator->name = "StateWindowOperator"; + pInfo->tsSlotId = tsSlotId; + pOperator->name = "StateWindowOperator"; pOperator->operatorType = QUERY_NODE_PHYSICAL_PLAN_STATE_WINDOW; pOperator->blocking = true; - pOperator->status = OP_NOT_OPENED; - pOperator->pExpr = pExpr; - pOperator->numOfExprs = numOfCols; - pOperator->pTaskInfo = pTaskInfo; - pOperator->info = pInfo; + pOperator->status = OP_NOT_OPENED; + pOperator->pExpr = pExpr; + pOperator->numOfExprs = numOfCols; + pOperator->pTaskInfo = pTaskInfo; + pOperator->info = pInfo; pOperator->fpSet = createOperatorFpSet(operatorDummyOpenFn, doStateWindowAgg, NULL, NULL, destroyStateWindowOperatorInfo, aggEncodeResultRow, aggDecodeResultRow, NULL); @@ -1421,7 +1421,7 @@ SOperatorInfo* createStatewindowOperatorInfo(SOperatorInfo* downstream, SExprInf int32_t code = appendDownstream(pOperator, &downstream, 1); return pOperator; - _error: +_error: pTaskInfo->code = TSDB_CODE_SUCCESS; return NULL; } @@ -1432,8 +1432,8 @@ void destroySWindowOperatorInfo(void* param, int32_t numOfOutput) { } SOperatorInfo* createSessionAggOperatorInfo(SOperatorInfo* downstream, SExprInfo* pExprInfo, int32_t numOfCols, - SSDataBlock* pResBlock, int64_t gap, int32_t tsSlotId, STimeWindowAggSupp* pTwAggSupp, - SExecTaskInfo* pTaskInfo) { + SSDataBlock* pResBlock, int64_t gap, int32_t tsSlotId, + STimeWindowAggSupp* pTwAggSupp, SExecTaskInfo* pTaskInfo) { SSessionAggOperatorInfo* pInfo = taosMemoryCalloc(1, sizeof(SSessionAggOperatorInfo)); SOperatorInfo* pOperator = taosMemoryCalloc(1, sizeof(SOperatorInfo)); if (pInfo == NULL || pOperator == NULL) { @@ -1453,18 +1453,18 @@ SOperatorInfo* createSessionAggOperatorInfo(SOperatorInfo* downstream, SExprInfo initResultRowInfo(&pInfo->binfo.resultRowInfo, 8); initExecTimeWindowInfo(&pInfo->twAggSup.timeWindowData, &pTaskInfo->window); - pInfo->tsSlotId = tsSlotId; - pInfo->gap = gap; - pInfo->binfo.pRes = pResBlock; - pInfo->winSup.prevTs = INT64_MIN; - pInfo->reptScan = false; - pOperator->name = "SessionWindowAggOperator"; + pInfo->tsSlotId = tsSlotId; + pInfo->gap = gap; + pInfo->binfo.pRes = pResBlock; + pInfo->winSup.prevTs = INT64_MIN; + pInfo->reptScan = false; + pOperator->name = "SessionWindowAggOperator"; pOperator->operatorType = QUERY_NODE_PHYSICAL_PLAN_SESSION_WINDOW; pOperator->blocking = true; - pOperator->status = OP_NOT_OPENED; - pOperator->pExpr = pExprInfo; - pOperator->numOfExprs = numOfCols; - pOperator->info = pInfo; + pOperator->status = OP_NOT_OPENED; + pOperator->pExpr = pExprInfo; + pOperator->numOfExprs = numOfCols; + pOperator->info = pInfo; pOperator->fpSet = createOperatorFpSet(operatorDummyOpenFn, doSessionWindowAgg, NULL, NULL, destroySWindowOperatorInfo, aggEncodeResultRow, aggDecodeResultRow, NULL); @@ -1473,7 +1473,7 @@ SOperatorInfo* createSessionAggOperatorInfo(SOperatorInfo* downstream, SExprInfo code = appendDownstream(pOperator, &downstream, 1); return pOperator; - _error: +_error: if (pInfo != NULL) { destroySWindowOperatorInfo(pInfo, numOfCols); } @@ -1482,4 +1482,4 @@ SOperatorInfo* createSessionAggOperatorInfo(SOperatorInfo* downstream, SExprInfo taosMemoryFreeClear(pOperator); pTaskInfo->code = code; return NULL; -} \ No newline at end of file +} diff --git a/source/libs/function/CMakeLists.txt b/source/libs/function/CMakeLists.txt index c31cabda19..7a4cd80922 100644 --- a/source/libs/function/CMakeLists.txt +++ b/source/libs/function/CMakeLists.txt @@ -36,7 +36,7 @@ target_link_libraries( PRIVATE os util common nodes function ) -add_library(udf1 MODULE test/udf1.c) +add_library(udf1 STATIC MODULE test/udf1.c) target_include_directories( udf1 PUBLIC @@ -50,7 +50,7 @@ target_include_directories( target_link_libraries( udf1 PUBLIC os) -add_library(udf2 MODULE test/udf2.c) +add_library(udf2 STATIC MODULE test/udf2.c) target_include_directories( udf2 PUBLIC diff --git a/source/libs/function/src/builtins.c b/source/libs/function/src/builtins.c index 9e2bdea7d5..5aa1b63c79 100644 --- a/source/libs/function/src/builtins.c +++ b/source/libs/function/src/builtins.c @@ -266,6 +266,20 @@ static int32_t translateFirstLast(SFunctionNode* pFunc, char* pErrBuf, int32_t l return TSDB_CODE_SUCCESS; } +static int32_t translateDiff(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { + int32_t paraLen = LIST_LENGTH(pFunc->pParameterList); + if (paraLen == 0 || paraLen > 2) { + return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName); + } + + SExprNode* p1 = (SExprNode*)nodesListGetNode(pFunc->pParameterList, 0); + if (!IS_NUMERIC_TYPE(p1->resType.type)) { + return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); + } + pFunc->node.resType = p1->resType; + return TSDB_CODE_SUCCESS; +} + static int32_t translateLength(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { if (1 != LIST_LENGTH(pFunc->pParameterList)) { return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName); @@ -617,7 +631,7 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { .name = "diff", .type = FUNCTION_TYPE_DIFF, .classification = FUNC_MGT_NONSTANDARD_SQL_FUNC | FUNC_MGT_TIMELINE_FUNC, - .translateFunc = translateInOutNum, + .translateFunc = translateDiff, .getEnvFunc = getDiffFuncEnv, .initFunc = diffFunctionSetup, .processFunc = diffFunction, diff --git a/source/libs/function/src/builtinsimpl.c b/source/libs/function/src/builtinsimpl.c index 1cb17ca493..9c1601b61a 100644 --- a/source/libs/function/src/builtinsimpl.c +++ b/source/libs/function/src/builtinsimpl.c @@ -75,7 +75,7 @@ typedef struct SPercentileInfo { typedef struct SDiffInfo { bool hasPrev; bool includeNull; - bool ignoreNegative; + bool ignoreNegative; // replace the ignore with case when bool firstOutput; union { int64_t i64; @@ -1419,248 +1419,192 @@ bool diffFunctionSetup(SqlFunctionCtx* pCtx, SResultRowEntryInfo* pResInfo) { SDiffInfo* pDiffInfo = GET_ROWCELL_INTERBUF(pResInfo); pDiffInfo->hasPrev = false; pDiffInfo->prev.i64 = 0; - pDiffInfo->ignoreNegative = false; // TODO set correct param + pDiffInfo->ignoreNegative = pCtx->param[1].param.i; // TODO set correct param pDiffInfo->includeNull = false; pDiffInfo->firstOutput = false; return true; } +static void doSetPrevVal(SDiffInfo* pDiffInfo, int32_t type, const char* pv) { + switch(type) { + case TSDB_DATA_TYPE_BOOL: + case TSDB_DATA_TYPE_TINYINT: + pDiffInfo->prev.i64 = *(int8_t*) pv; break; + case TSDB_DATA_TYPE_INT: + pDiffInfo->prev.i64 = *(int32_t*) pv; break; + case TSDB_DATA_TYPE_SMALLINT: + pDiffInfo->prev.i64 = *(int16_t*) pv; break; + case TSDB_DATA_TYPE_BIGINT: + pDiffInfo->prev.i64 = *(int64_t*) pv; break; + case TSDB_DATA_TYPE_FLOAT: + pDiffInfo->prev.d64 = *(float *) pv; break; + case TSDB_DATA_TYPE_DOUBLE: + pDiffInfo->prev.d64 = *(double*) pv; break; + default: + ASSERT(0); + } +} + +static void doHandleDiff(SDiffInfo* pDiffInfo, int32_t type, const char* pv, SColumnInfoData* pOutput, int32_t pos, int32_t order) { + int32_t factor = (order == TSDB_ORDER_ASC)? 1:-1; + switch (type) { + case TSDB_DATA_TYPE_INT: { + int32_t v = *(int32_t*)pv; + int32_t delta = factor*(v - pDiffInfo->prev.i64); // direct previous may be null + if (delta < 0 && pDiffInfo->ignoreNegative) { + colDataSetNull_f(pOutput->nullbitmap, pos); + } else { + colDataAppendInt32(pOutput, pos, &delta); + } + pDiffInfo->prev.i64 = v; + break; + } + case TSDB_DATA_TYPE_BOOL: + case TSDB_DATA_TYPE_TINYINT: { + int8_t v = *(int8_t*)pv; + int8_t delta = factor*(v - pDiffInfo->prev.i64); // direct previous may be null + if (delta < 0 && pDiffInfo->ignoreNegative) { + colDataSetNull_f(pOutput->nullbitmap, pos); + } else { + colDataAppendInt8(pOutput, pos, &delta); + } + pDiffInfo->prev.i64 = v; + break; + } + case TSDB_DATA_TYPE_SMALLINT: { + int16_t v = *(int16_t*)pv; + int16_t delta = factor*(v - pDiffInfo->prev.i64); // direct previous may be null + if (delta < 0 && pDiffInfo->ignoreNegative) { + colDataSetNull_f(pOutput->nullbitmap, pos); + } else { + colDataAppendInt16(pOutput, pos, &delta); + } + pDiffInfo->prev.i64 = v; + break; + } + case TSDB_DATA_TYPE_BIGINT: { + int64_t v = *(int64_t*)pv; + int64_t delta = factor*(v - pDiffInfo->prev.i64); // direct previous may be null + if (delta < 0 && pDiffInfo->ignoreNegative) { + colDataSetNull_f(pOutput->nullbitmap, pos); + } else { + colDataAppendInt64(pOutput, pos, &delta); + } + pDiffInfo->prev.i64 = v; + break; + } + case TSDB_DATA_TYPE_FLOAT: { + float v = *(float*)pv; + float delta = factor*(v - pDiffInfo->prev.d64); // direct previous may be null + if (delta < 0 && pDiffInfo->ignoreNegative) { + colDataSetNull_f(pOutput->nullbitmap, pos); + } else { + colDataAppendFloat(pOutput, pos, &delta); + } + pDiffInfo->prev.d64 = v; + break; + } + case TSDB_DATA_TYPE_DOUBLE: { + double v = *(double*)pv; + double delta = factor*(v - pDiffInfo->prev.d64); // direct previous may be null + if (delta < 0 && pDiffInfo->ignoreNegative) { + colDataSetNull_f(pOutput->nullbitmap, pos); + } else { + colDataAppendDouble(pOutput, pos, &delta); + } + pDiffInfo->prev.d64 = v; + break; + } + default: + ASSERT(0); + } + } + int32_t diffFunction(SqlFunctionCtx* pCtx) { SResultRowEntryInfo* pResInfo = GET_RES_INFO(pCtx); SDiffInfo* pDiffInfo = GET_ROWCELL_INTERBUF(pResInfo); SInputColumnInfoData* pInput = &pCtx->input; - SColumnInfoData* pInputCol = pInput->pData[0]; - - bool isFirstBlock = (pDiffInfo->hasPrev == false); - int32_t numOfElems = 0; + SColumnInfoData* pInputCol = pInput->pData[0]; SColumnInfoData* pTsOutput = pCtx->pTsOutput; - TSKEY* tsList = (int64_t*)pInput->pPTS->pData; + int32_t numOfElems = 0; + TSKEY* tsList = (int64_t*)pInput->pPTS->pData; int32_t startOffset = pCtx->offset; - switch (pInputCol->info.type) { - case TSDB_DATA_TYPE_INT: { - SColumnInfoData* pOutput = (SColumnInfoData*)pCtx->pOutput; - if (pCtx->order == TSDB_ORDER_ASC) { - for (int32_t i = pInput->startRowIndex; i < pInput->numOfRows + pInput->startRowIndex; i += 1) { - int32_t pos = startOffset + (isFirstBlock ? (numOfElems - 1) : numOfElems); - if (colDataIsNull_f(pInputCol->nullbitmap, i)) { - if (pDiffInfo->includeNull) { - colDataSetNull_f(pOutput->nullbitmap, pos); - if (tsList != NULL) { - colDataAppendInt64(pTsOutput, pos, &tsList[i]); - } - numOfElems += 1; - } - continue; + SColumnInfoData* pOutput = (SColumnInfoData*)pCtx->pOutput; + + if (pCtx->order == TSDB_ORDER_ASC) { + for (int32_t i = pInput->startRowIndex; i < pInput->numOfRows + pInput->startRowIndex; i += 1) { + int32_t pos = startOffset + numOfElems; + + if (colDataIsNull_f(pInputCol->nullbitmap, i)) { + if (pDiffInfo->includeNull) { + colDataSetNull_f(pOutput->nullbitmap, pos); + if (tsList != NULL) { + colDataAppendInt64(pTsOutput, pos, &tsList[i]); } - int32_t v = *(int32_t*)colDataGetData(pInputCol, i); - if (pDiffInfo->hasPrev) { - int32_t delta = (int32_t)(v - pDiffInfo->prev.i64); // direct previous may be null - if (delta < 0 && pDiffInfo->ignoreNegative) { - colDataSetNull_f(pOutput->nullbitmap, pos); - } else { - colDataAppendInt32(pOutput, pos, &delta); - } - - if (pTsOutput != NULL) { - colDataAppendInt64(pTsOutput, pos, &tsList[i]); - } - } - - pDiffInfo->prev.i64 = v; - pDiffInfo->hasPrev = true; - numOfElems++; + numOfElems += 1; } + continue; + } + + char* pv = colDataGetData(pInputCol, i); + + if (pDiffInfo->hasPrev) { + doHandleDiff(pDiffInfo, pInputCol->info.type, pv, pOutput, pos, pCtx->order); + if (pTsOutput != NULL) { + colDataAppendInt64(pTsOutput, pos, &tsList[i]); + } + + numOfElems++; } else { - for (int32_t i = pInput->startRowIndex; i < pInput->numOfRows + pInput->startRowIndex; i += 1) { - int32_t v = *(int32_t*)colDataGetData(pInputCol, i); - int32_t pos = startOffset + numOfElems; + doSetPrevVal(pDiffInfo, pInputCol->info.type, pv); + } - // there is a row of previous data block to be handled in the first place. - if (pDiffInfo->hasPrev) { - int32_t delta = (int32_t)(pDiffInfo->prev.i64 - v); // direct previous may be null - if (delta < 0 && pDiffInfo->ignoreNegative) { - colDataSetNull_f(pOutput->nullbitmap, pos); - } else { - colDataAppendInt32(pOutput, pos, &delta); - } + pDiffInfo->hasPrev = true; + } + } else { + for (int32_t i = pInput->startRowIndex; i < pInput->numOfRows + pInput->startRowIndex; i += 1) { + int32_t pos = startOffset + numOfElems; - if (pTsOutput != NULL) { - colDataAppendInt64(pTsOutput, pos, &pDiffInfo->prevTs); - } - pDiffInfo->hasPrev = false; + if (colDataIsNull_f(pInputCol->nullbitmap, i)) { + if (pDiffInfo->includeNull) { + colDataSetNull_f(pOutput->nullbitmap, pos); + if (tsList != NULL) { + colDataAppendInt64(pTsOutput, pos, &tsList[i]); } - // it is not the last row of current block - if (i < pInput->numOfRows + pInput->startRowIndex - 1) { - int32_t next = *(int32_t*)colDataGetData(pInputCol, i + 1); - - int32_t delta = v - next; // direct previous may be null - colDataAppendInt32(pOutput, pos, &delta); - - if (pTsOutput != NULL) { - colDataAppendInt64(pTsOutput, pos, &tsList[i]); - } - } else { - pDiffInfo->prev.i64 = v; - if (pTsOutput != NULL) { - pDiffInfo->prevTs = tsList[i]; - } - pDiffInfo->hasPrev = true; - } - numOfElems++; + numOfElems += 1; } - + continue; } - break; - } - case TSDB_DATA_TYPE_BIGINT: { - SColumnInfoData* pOutput = (SColumnInfoData*)pCtx->pOutput; - for (int32_t i = pInput->startRowIndex; i < pInput->numOfRows + pInput->startRowIndex; i += 1) { - if (colDataIsNull_f(pInputCol->nullbitmap, i)) { - continue; + char* pv = colDataGetData(pInputCol, i); + + // there is a row of previous data block to be handled in the first place. + if (pDiffInfo->hasPrev) { + doHandleDiff(pDiffInfo, pInputCol->info.type, pv, pOutput, pos, pCtx->order); + if (pTsOutput != NULL) { + colDataAppendInt64(pTsOutput, pos, &pDiffInfo->prevTs); } - int32_t v = 0; - if (pDiffInfo->hasPrev) { - v = *(int64_t*)colDataGetData(pInputCol, i); - int64_t delta = (int64_t)(v - pDiffInfo->prev.i64); // direct previous may be null - if (pDiffInfo->ignoreNegative) { - continue; - } - - // *(pOutput++) = delta; - // *pTimestamp = (tsList != NULL)? tsList[i]:0; - // - // pOutput += 1; - // pTimestamp += 1; - } - - pDiffInfo->prev.i64 = v; - pDiffInfo->hasPrev = true; numOfElems++; + } else { + doSetPrevVal(pDiffInfo, pInputCol->info.type, pv); } - break; - } -#if 0 - case TSDB_DATA_TYPE_DOUBLE: { - double *pData = (double *)data; - double *pOutput = (double *)pCtx->pOutput; - for (; i < pCtx->size && i >= 0; i += step) { - if (pCtx->hasNull && isNull((const char*) &pData[i], pCtx->inputType)) { - continue; - } - if ((pDiffInfo->ignoreNegative) && (pData[i] < 0)) { - continue; - } - - if (pDiffInfo->hasPrev) { // initial value is not set yet - SET_DOUBLE_VAL(pOutput, pData[i] - pDiffInfo->d64Prev); // direct previous may be null - *pTimestamp = (tsList != NULL)? tsList[i]:0; - pOutput += 1; - pTimestamp += 1; - } - - pDiffInfo->d64Prev = pData[i]; - pDiffInfo->hasPrev = true; - numOfElems++; + pDiffInfo->hasPrev = true; + if (pTsOutput != NULL) { + pDiffInfo->prevTs = tsList[i]; } - break; } - case TSDB_DATA_TYPE_FLOAT: { - float *pData = (float *)data; - float *pOutput = (float *)pCtx->pOutput; - - for (; i < pCtx->size && i >= 0; i += step) { - if (pCtx->hasNull && isNull((const char*) &pData[i], pCtx->inputType)) { - continue; - } - if ((pDiffInfo->ignoreNegative) && (pData[i] < 0)) { - continue; - } - - if (pDiffInfo->hasPrev) { // initial value is not set yet - *pOutput = (float)(pData[i] - pDiffInfo->d64Prev); // direct previous may be null - *pTimestamp = (tsList != NULL)? tsList[i]:0; - pOutput += 1; - pTimestamp += 1; - } - - pDiffInfo->d64Prev = pData[i]; - pDiffInfo->hasPrev = true; - numOfElems++; - } - break; - } - case TSDB_DATA_TYPE_SMALLINT: { - int16_t *pData = (int16_t *)data; - int16_t *pOutput = (int16_t *)pCtx->pOutput; - - for (; i < pCtx->size && i >= 0; i += step) { - if (pCtx->hasNull && isNull((const char*) &pData[i], pCtx->inputType)) { - continue; - } - if ((pDiffInfo->ignoreNegative) && (pData[i] < 0)) { - continue; - } - - if (pDiffInfo->hasPrev) { // initial value is not set yet - *pOutput = (int16_t)(pData[i] - pDiffInfo->i64Prev); // direct previous may be null - *pTimestamp = (tsList != NULL)? tsList[i]:0; - pOutput += 1; - pTimestamp += 1; - } - - pDiffInfo->i64Prev = pData[i]; - pDiffInfo->hasPrev = true; - numOfElems++; - } - break; - } - - case TSDB_DATA_TYPE_TINYINT: { - int8_t *pData = (int8_t *)data; - int8_t *pOutput = (int8_t *)pCtx->pOutput; - - for (; i < pCtx->size && i >= 0; i += step) { - if (pCtx->hasNull && isNull((char *)&pData[i], pCtx->inputType)) { - continue; - } - if ((pDiffInfo->ignoreNegative) && (pData[i] < 0)) { - continue; - } - - if (pDiffInfo->hasPrev) { // initial value is not set yet - *pOutput = (int8_t)(pData[i] - pDiffInfo->i64Prev); // direct previous may be null - *pTimestamp = (tsList != NULL)? tsList[i]:0; - pOutput += 1; - pTimestamp += 1; - } - - pDiffInfo->i64Prev = pData[i]; - pDiffInfo->hasPrev = true; - numOfElems++; - } - break; - } -#endif - default: - break; - // qError("error input type"); } // initial value is not set yet - if (numOfElems <= 0) { - return 0; - } else { - return (isFirstBlock) ? numOfElems - 1 : numOfElems; - } + return numOfElems; } bool getTopBotFuncEnv(SFunctionNode* pFunc, SFuncExecEnv* pEnv) { diff --git a/source/libs/function/src/taggfunction.c b/source/libs/function/src/taggfunction.c index c26342bfa8..4d80b88a3a 100644 --- a/source/libs/function/src/taggfunction.c +++ b/source/libs/function/src/taggfunction.c @@ -19,7 +19,6 @@ #include "thash.h" #include "ttypes.h" -//#include "tfill.h" #include "function.h" #include "taggfunction.h" #include "tbuffer.h" @@ -27,7 +26,6 @@ #include "thistogram.h" #include "tpercentile.h" #include "ttszip.h" -//#include "queryLog.h" #include "tdatablock.h" #include "tudf.h" diff --git a/source/libs/function/src/tudf.c b/source/libs/function/src/tudf.c index 0a99ef61ce..75b6aeaae9 100644 --- a/source/libs/function/src/tudf.c +++ b/source/libs/function/src/tudf.c @@ -127,7 +127,7 @@ enum { int64_t gUdfTaskSeqNum = 0; typedef struct SUdfdProxy { - char udfdPipeName[UDF_LISTEN_PIPE_NAME_LEN]; + char udfdPipeName[PATH_MAX + UDF_LISTEN_PIPE_NAME_LEN + 2]; uv_barrier_t gUdfInitBarrier; uv_loop_t gUdfdLoop; @@ -146,15 +146,15 @@ typedef struct SUdfdProxy { SUdfdProxy gUdfdProxy = {0}; -typedef struct SUdfUvSession { +typedef struct SClientUdfUvSession { SUdfdProxy *udfc; int64_t severHandle; - uv_pipe_t *udfSvcPipe; + uv_pipe_t *udfUvPipe; int8_t outputType; int32_t outputLen; int32_t bufSize; -} SUdfUvSession; +} SClientUdfUvSession; typedef struct SClientUvTaskNode { SUdfdProxy *udfc; @@ -177,7 +177,7 @@ typedef struct SClientUvTaskNode { typedef struct SClientUdfTask { int8_t type; - SUdfUvSession *session; + SClientUdfUvSession *session; int32_t errCode; @@ -209,6 +209,7 @@ typedef struct SClientUvConn { uv_pipe_t *pipe; QUEUE taskQueue; SClientConnBuf readBuf; + SClientUdfUvSession *session; } SClientUvConn; enum { @@ -223,9 +224,15 @@ int32_t getUdfdPipeName(char* pipeName, int32_t size) { size_t dnodeIdSize = sizeof(dnodeId); int32_t err = uv_os_getenv(UDF_DNODE_ID_ENV_NAME, dnodeId, &dnodeIdSize); if (err != 0) { + fnError("get dnode id from env. error: %s.", uv_err_name(err)); dnodeId[0] = '1'; } +#ifdef _WIN32 snprintf(pipeName, size, "%s%s", UDF_LISTEN_PIPE_NAME_PREFIX, dnodeId); +#else + snprintf(pipeName, size, "%s/%s%s", tsDataDir, UDF_LISTEN_PIPE_NAME_PREFIX, dnodeId); +#endif + fnInfo("get dnode id from env. dnode id: %s. pipe path: %s", dnodeId, pipeName); return 0; } @@ -617,18 +624,17 @@ void onUdfcPipeClose(uv_handle_t *handle) { QUEUE* h = QUEUE_HEAD(&conn->taskQueue); SClientUvTaskNode *task = QUEUE_DATA(h, SClientUvTaskNode, connTaskQueue); task->errCode = 0; - uv_sem_post(&task->taskSem); QUEUE_REMOVE(&task->procTaskQueue); + uv_sem_post(&task->taskSem); } - + conn->session->udfUvPipe = NULL; taosMemoryFree(conn->readBuf.buf); taosMemoryFree(conn); taosMemoryFree((uv_pipe_t *) handle); - } -int32_t udfcGetUvTaskResponseResult(SClientUdfTask *task, SClientUvTaskNode *uvTask) { - fnDebug("udfc get uv task result. task: %p", task); +int32_t udfcGetUdfTaskResultFromUvTask(SClientUdfTask *task, SClientUvTaskNode *uvTask) { + fnDebug("udfc get uv task result. task: %p, uvTask: %p", task, uvTask); if (uvTask->type == UV_TASK_REQ_RSP) { if (uvTask->rspBuf.base != NULL) { SUdfResponse rsp; @@ -748,8 +754,8 @@ void udfcUvHandleRsp(SClientUvConn *conn) { if (taskFound) { taskFound->rspBuf = uv_buf_init(connBuf->buf, connBuf->len); QUEUE_REMOVE(&taskFound->connTaskQueue); - uv_sem_post(&taskFound->taskSem); QUEUE_REMOVE(&taskFound->procTaskQueue); + uv_sem_post(&taskFound->taskSem); } else { fnError("no task is waiting for the response."); } @@ -764,14 +770,12 @@ void udfcUvHandleError(SClientUvConn *conn) { QUEUE* h = QUEUE_HEAD(&conn->taskQueue); SClientUvTaskNode *task = QUEUE_DATA(h, SClientUvTaskNode, connTaskQueue); task->errCode = UDFC_CODE_PIPE_READ_ERR; - uv_sem_post(&task->taskSem); + QUEUE_REMOVE(&task->connTaskQueue); QUEUE_REMOVE(&task->procTaskQueue); + uv_sem_post(&task->taskSem); } - uv_close((uv_handle_t *) conn->pipe, NULL); - taosMemoryFree(conn->pipe); - taosMemoryFree(conn->readBuf.buf); - taosMemoryFree(conn); + uv_close((uv_handle_t *) conn->pipe, onUdfcPipeClose); } void onUdfcRead(uv_stream_t *client, ssize_t nread, const uv_buf_t *buf) { @@ -788,9 +792,9 @@ void onUdfcRead(uv_stream_t *client, ssize_t nread, const uv_buf_t *buf) { } if (nread < 0) { - fnError("udfc client pipe %p read error: %s", client, uv_strerror(nread)); + fnError("udfc client pipe %p read error: %zd, %s.", client, nread, uv_strerror(nread)); if (nread == UV_EOF) { - fnError("udfc client pipe %p closed", client); + fnError("\tudfc client pipe %p closed", client); } udfcUvHandleError(conn); } @@ -823,14 +827,14 @@ void onUdfClientConnect(uv_connect_t *connect, int status) { QUEUE_REMOVE(&uvTask->procTaskQueue); } -int32_t createUdfcUvTask(SClientUdfTask *task, int8_t uvTaskType, SClientUvTaskNode **pUvTask) { +int32_t udfcCreateUvTask(SClientUdfTask *task, int8_t uvTaskType, SClientUvTaskNode **pUvTask) { SClientUvTaskNode *uvTask = taosMemoryCalloc(1, sizeof(SClientUvTaskNode)); uvTask->type = uvTaskType; uvTask->udfc = task->session->udfc; if (uvTaskType == UV_TASK_CONNECT) { } else if (uvTaskType == UV_TASK_REQ_RSP) { - uvTask->pipe = task->session->udfSvcPipe; + uvTask->pipe = task->session->udfUvPipe; SUdfRequest request; request.type = task->type; request.seqNum = atomic_fetch_add_64(&gUdfTaskSeqNum, 1); @@ -855,7 +859,7 @@ int32_t createUdfcUvTask(SClientUdfTask *task, int8_t uvTaskType, SClientUvTaskN uvTask->reqBuf = uv_buf_init(bufBegin, bufLen); uvTask->seqNum = request.seqNum; } else if (uvTaskType == UV_TASK_DISCONNECT) { - uvTask->pipe = task->session->udfSvcPipe; + uvTask->pipe = task->session->udfUvPipe; } uv_sem_init(&uvTask->taskSem, 0); @@ -863,7 +867,7 @@ int32_t createUdfcUvTask(SClientUdfTask *task, int8_t uvTaskType, SClientUvTaskN return 0; } -int32_t queueUvUdfTask(SClientUvTaskNode *uvTask) { +int32_t udfcQueueUvTask(SClientUvTaskNode *uvTask) { fnTrace("queue uv task to event loop, task: %d, %p", uvTask->type, uvTask); SUdfdProxy *udfc = uvTask->udfc; uv_mutex_lock(&udfc->gUdfTaskQueueMutex); @@ -872,12 +876,13 @@ int32_t queueUvUdfTask(SClientUvTaskNode *uvTask) { uv_async_send(&udfc->gUdfLoopTaskAync); uv_sem_wait(&uvTask->taskSem); + fnInfo("udfc uv task finished. task: %d, %p", uvTask->type, uvTask); uv_sem_destroy(&uvTask->taskSem); return 0; } -int32_t startUvUdfTask(SClientUvTaskNode *uvTask) { +int32_t udfcStartUvTask(SClientUvTaskNode *uvTask) { fnTrace("event loop start uv task. task: %d, %p", uvTask->type, uvTask); switch (uvTask->type) { case UV_TASK_CONNECT: { @@ -885,7 +890,7 @@ int32_t startUvUdfTask(SClientUvTaskNode *uvTask) { uv_pipe_init(&uvTask->udfc->gUdfdLoop, pipe, 0); uvTask->pipe = pipe; - SClientUvConn *conn = taosMemoryMalloc(sizeof(SClientUvConn)); + SClientUvConn *conn = taosMemoryCalloc(1, sizeof(SClientUvConn)); conn->pipe = pipe; conn->readBuf.len = 0; conn->readBuf.cap = 0; @@ -933,13 +938,14 @@ void udfClientAsyncCb(uv_async_t *async) { QUEUE* h = QUEUE_HEAD(&wq); QUEUE_REMOVE(h); SClientUvTaskNode *task = QUEUE_DATA(h, SClientUvTaskNode, recvTaskQueue); - startUvUdfTask(task); + udfcStartUvTask(task); QUEUE_INSERT_TAIL(&udfc->gUvProcTaskQueue, &task->procTaskQueue); } } void cleanUpUvTasks(SUdfdProxy *udfc) { + fnDebug("clean up uv tasks") QUEUE wq; uv_mutex_lock(&udfc->gUdfTaskQueueMutex); @@ -956,7 +962,6 @@ void cleanUpUvTasks(SUdfdProxy *udfc) { uv_sem_post(&task->taskSem); } - // TODO: deal with tasks that are waiting result. while (!QUEUE_EMPTY(&udfc->gUvProcTaskQueue)) { QUEUE* h = QUEUE_HEAD(&udfc->gUvProcTaskQueue); QUEUE_REMOVE(h); @@ -999,7 +1004,7 @@ int32_t udfcOpen() { return 0; } SUdfdProxy *proxy = &gUdfdProxy; - getUdfdPipeName(proxy->udfdPipeName, UDF_LISTEN_PIPE_NAME_LEN); + getUdfdPipeName(proxy->udfdPipeName, sizeof(proxy->udfdPipeName)); proxy->gUdfcState = UDFC_STATE_STARTNG; uv_barrier_init(&proxy->gUdfInitBarrier, 2); uv_thread_create(&proxy->gUdfLoopThread, constructUdfService, proxy); @@ -1027,14 +1032,16 @@ int32_t udfcClose() { return 0; } -int32_t udfcRunUvTask(SClientUdfTask *task, int8_t uvTaskType) { +int32_t udfcRunUdfUvTask(SClientUdfTask *task, int8_t uvTaskType) { SClientUvTaskNode *uvTask = NULL; - createUdfcUvTask(task, uvTaskType, &uvTask); - queueUvUdfTask(uvTask); - udfcGetUvTaskResponseResult(task, uvTask); + udfcCreateUvTask(task, uvTaskType, &uvTask); + udfcQueueUvTask(uvTask); + udfcGetUdfTaskResultFromUvTask(task, uvTask); if (uvTaskType == UV_TASK_CONNECT) { - task->session->udfSvcPipe = uvTask->pipe; + task->session->udfUvPipe = uvTask->pipe; + SClientUvConn *conn = uvTask->pipe->data; + conn->session = task->session; } taosMemoryFree(uvTask); uvTask = NULL; @@ -1046,22 +1053,22 @@ int32_t setupUdf(char udfName[], UdfcFuncHandle *funcHandle) { if (gUdfdProxy.gUdfcState != UDFC_STATE_READY) { return UDFC_CODE_INVALID_STATE; } - SClientUdfTask *task = taosMemoryMalloc(sizeof(SClientUdfTask)); + SClientUdfTask *task = taosMemoryCalloc(1,sizeof(SClientUdfTask)); task->errCode = 0; - task->session = taosMemoryMalloc(sizeof(SUdfUvSession)); + task->session = taosMemoryCalloc(1, sizeof(SClientUdfUvSession)); task->session->udfc = &gUdfdProxy; task->type = UDF_TASK_SETUP; SUdfSetupRequest *req = &task->_setup.req; memcpy(req->udfName, udfName, TSDB_FUNC_NAME_LEN); - int32_t errCode = udfcRunUvTask(task, UV_TASK_CONNECT); + int32_t errCode = udfcRunUdfUvTask(task, UV_TASK_CONNECT); if (errCode != 0) { fnError("failed to connect to pipe. udfName: %s, pipe: %s", udfName, (&gUdfdProxy)->udfdPipeName); return UDFC_CODE_CONNECT_PIPE_ERR; } - udfcRunUvTask(task, UV_TASK_REQ_RSP); + udfcRunUdfUvTask(task, UV_TASK_REQ_RSP); SUdfSetupResponse *rsp = &task->_setup.rsp; task->session->severHandle = rsp->udfHandle; @@ -1082,10 +1089,14 @@ int32_t setupUdf(char udfName[], UdfcFuncHandle *funcHandle) { int32_t callUdf(UdfcFuncHandle handle, int8_t callType, SSDataBlock *input, SUdfInterBuf *state, SUdfInterBuf *state2, SSDataBlock* output, SUdfInterBuf *newState) { fnTrace("udfc call udf. callType: %d, funcHandle: %p", callType, handle); - - SClientUdfTask *task = taosMemoryMalloc(sizeof(SClientUdfTask)); + SClientUdfUvSession *session = (SClientUdfUvSession *) handle; + if (session->udfUvPipe == NULL) { + fnError("No pipe to udfd"); + return UDFC_CODE_NO_PIPE; + } + SClientUdfTask *task = taosMemoryCalloc(1, sizeof(SClientUdfTask)); task->errCode = 0; - task->session = (SUdfUvSession *) handle; + task->session = (SClientUdfUvSession *) handle; task->type = UDF_TASK_CALL; SUdfCallRequest *req = &task->_call.req; @@ -1117,7 +1128,7 @@ int32_t callUdf(UdfcFuncHandle handle, int8_t callType, SSDataBlock *input, SUdf } } - udfcRunUvTask(task, UV_TASK_REQ_RSP); + udfcRunUdfUvTask(task, UV_TASK_REQ_RSP); if (task->errCode != 0) { fnError("call udf failure. err: %d", task->errCode); @@ -1145,9 +1156,10 @@ int32_t callUdf(UdfcFuncHandle handle, int8_t callType, SSDataBlock *input, SUdf break; } } - } + }; + int err = task->errCode; taosMemoryFree(task); - return task->errCode; + return err; } int32_t callUdfAggInit(UdfcFuncHandle handle, SUdfInterBuf *interBuf) { @@ -1188,28 +1200,36 @@ int32_t callUdfScalarFunc(UdfcFuncHandle handle, SScalarParam *input, int32_t nu convertScalarParamToDataBlock(input, numOfCols, &inputBlock); SSDataBlock resultBlock = {0}; int32_t err = callUdf(handle, callType, &inputBlock, NULL, NULL, &resultBlock, NULL); - convertDataBlockToScalarParm(&resultBlock, output); + if (err == 0) { + convertDataBlockToScalarParm(&resultBlock, output); + } return err; } int32_t teardownUdf(UdfcFuncHandle handle) { fnInfo("tear down udf. udf func handle: %p", handle); - SClientUdfTask *task = taosMemoryMalloc(sizeof(SClientUdfTask)); + SClientUdfUvSession *session = (SClientUdfUvSession *) handle; + if (session->udfUvPipe == NULL) { + fnError("pipe to udfd does not exist"); + return UDFC_CODE_NO_PIPE; + } + + SClientUdfTask *task = taosMemoryCalloc(1, sizeof(SClientUdfTask)); task->errCode = 0; - task->session = (SUdfUvSession *) handle; + task->session = session; task->type = UDF_TASK_TEARDOWN; SUdfTeardownRequest *req = &task->_teardown.req; req->udfHandle = task->session->severHandle; - udfcRunUvTask(task, UV_TASK_REQ_RSP); + udfcRunUdfUvTask(task, UV_TASK_REQ_RSP); SUdfTeardownResponse *rsp = &task->_teardown.rsp; int32_t err = task->errCode; - udfcRunUvTask(task, UV_TASK_DISCONNECT); + udfcRunUdfUvTask(task, UV_TASK_DISCONNECT); taosMemoryFree(task->session); taosMemoryFree(task); @@ -1219,7 +1239,7 @@ int32_t teardownUdf(UdfcFuncHandle handle) { //memory layout |---SUdfAggRes----|-----final result-----|---inter result----| typedef struct SUdfAggRes { - SUdfUvSession *session; + SClientUdfUvSession *session; int8_t finalResNum; int8_t interResNum; char* finalResBuf; @@ -1239,10 +1259,12 @@ bool udfAggInit(struct SqlFunctionCtx *pCtx, struct SResultRowEntryInfo* pResult return false; } UdfcFuncHandle handle; - if (setupUdf((char*)pCtx->udfName, &handle) != 0) { + int32_t udfCode = 0; + if ((udfCode = setupUdf((char*)pCtx->udfName, &handle)) != 0) { + fnError("udfAggInit error. step setupUdf. udf code: %d", udfCode); return false; } - SUdfUvSession *session = (SUdfUvSession *)handle; + SClientUdfUvSession *session = (SClientUdfUvSession *)handle; SUdfAggRes *udfRes = (SUdfAggRes*)GET_ROWCELL_INTERBUF(pResultCellInfo); int32_t envSize = sizeof(SUdfAggRes) + session->outputLen + session->bufSize; memset(udfRes, 0, envSize); @@ -1250,9 +1272,10 @@ bool udfAggInit(struct SqlFunctionCtx *pCtx, struct SResultRowEntryInfo* pResult udfRes->finalResBuf = (char*)udfRes + sizeof(SUdfAggRes); udfRes->interResBuf = (char*)udfRes + sizeof(SUdfAggRes) + session->outputLen; - udfRes->session = (SUdfUvSession *)handle; + udfRes->session = (SClientUdfUvSession *)handle; SUdfInterBuf buf = {0}; - if (callUdfAggInit(handle, &buf) != 0) { + if ((udfCode = callUdfAggInit(handle, &buf)) != 0) { + fnError("udfAggInit error. step callUdfAggInit. udf code: %d", udfCode); return false; } udfRes->interResNum = buf.numOfResult; @@ -1265,7 +1288,7 @@ int32_t udfAggProcess(struct SqlFunctionCtx *pCtx) { int32_t numOfCols = pInput->numOfInputCols; SUdfAggRes* udfRes = (SUdfAggRes *)GET_ROWCELL_INTERBUF(GET_RES_INFO(pCtx)); - SUdfUvSession *session = udfRes->session; + SClientUdfUvSession *session = udfRes->session; udfRes->finalResBuf = (char*)udfRes + sizeof(SUdfAggRes); udfRes->interResBuf = (char*)udfRes + sizeof(SUdfAggRes) + session->outputLen; @@ -1296,26 +1319,28 @@ int32_t udfAggProcess(struct SqlFunctionCtx *pCtx) { .numOfResult = udfRes->interResNum}; SUdfInterBuf newState = {0}; - callUdfAggProcess(session, inputBlock, &state, &newState); - - udfRes->interResNum = newState.numOfResult; - memcpy(udfRes->interResBuf, newState.buf, newState.bufLen); - + int32_t udfCode = callUdfAggProcess(session, inputBlock, &state, &newState); + if (udfCode != 0) { + fnError("udfAggProcess error. code: %d", udfCode); + newState.numOfResult = 0; + } else { + udfRes->interResNum = newState.numOfResult; + memcpy(udfRes->interResBuf, newState.buf, newState.bufLen); + } if (newState.numOfResult == 1 || state.numOfResult == 1) { GET_RES_INFO(pCtx)->numOfRes = 1; } blockDataDestroy(inputBlock); - taosArrayDestroy(tempBlock.pDataBlock); taosMemoryFree(newState.buf); - return 0; + return TSDB_CODE_SUCCESS; } int32_t udfAggFinalize(struct SqlFunctionCtx *pCtx, SSDataBlock* pBlock) { SUdfAggRes* udfRes = (SUdfAggRes *)GET_ROWCELL_INTERBUF(GET_RES_INFO(pCtx)); - SUdfUvSession *session = udfRes->session; + SClientUdfUvSession *session = udfRes->session; udfRes->finalResBuf = (char*)udfRes + sizeof(SUdfAggRes); udfRes->interResBuf = (char*)udfRes + sizeof(SUdfAggRes) + session->outputLen; @@ -1324,15 +1349,22 @@ int32_t udfAggFinalize(struct SqlFunctionCtx *pCtx, SSDataBlock* pBlock) { SUdfInterBuf state = {.buf = udfRes->interResBuf, .bufLen = session->bufSize, .numOfResult = udfRes->interResNum}; - callUdfAggFinalize(session, &state, &resultBuf); - - udfRes->finalResBuf = resultBuf.buf; - udfRes->finalResNum = resultBuf.numOfResult; - - teardownUdf(session); - - if (resultBuf.numOfResult == 1) { - GET_RES_INFO(pCtx)->numOfRes = 1; + int32_t udfCallCode= 0; + udfCallCode= callUdfAggFinalize(session, &state, &resultBuf); + if (udfCallCode!= 0) { + fnError("udfAggFinalize error. callUdfAggFinalize step. udf code:%d", udfCallCode); + GET_RES_INFO(pCtx)->numOfRes = 0; + } else { + memcpy(udfRes->finalResBuf, resultBuf.buf, session->outputLen); + udfRes->finalResNum = resultBuf.numOfResult; + GET_RES_INFO(pCtx)->numOfRes = udfRes->finalResNum; } + + int32_t code = teardownUdf(session); + if (code != 0) { + fnError("udfAggFinalize error. teardownUdf step. udf code: %d", code); + } + return functionFinalizeWithResultBuf(pCtx, pBlock, udfRes->finalResBuf); + } \ No newline at end of file diff --git a/source/libs/function/src/udfd.c b/source/libs/function/src/udfd.c index f5e4a9c6e6..7695598fb8 100644 --- a/source/libs/function/src/udfd.c +++ b/source/libs/function/src/udfd.c @@ -30,7 +30,7 @@ typedef struct SUdfdContext { uv_loop_t *loop; uv_pipe_t ctrlPipe; uv_signal_t intrSignal; - char listenPipeName[UDF_LISTEN_PIPE_NAME_LEN]; + char listenPipeName[PATH_MAX + UDF_LISTEN_PIPE_NAME_LEN + 2]; uv_pipe_t listeningPipe; void *clientRpc; @@ -652,7 +652,7 @@ static int32_t udfdUvInit() { uv_pipe_open(&global.ctrlPipe, 0); uv_read_start((uv_stream_t *)&global.ctrlPipe, udfdCtrlAllocBufCb, udfdCtrlReadCb); - getUdfdPipeName(global.listenPipeName, UDF_LISTEN_PIPE_NAME_LEN); + getUdfdPipeName(global.listenPipeName, sizeof(global.listenPipeName)); removeListeningPipe(); @@ -696,6 +696,7 @@ static int32_t udfdRun() { fnInfo("udfd stopped. result: %s, code: %d", uv_err_name(code), code); int codeClose = uv_loop_close(global.loop); fnDebug("uv loop close. result: %s", uv_err_name(codeClose)); + removeListeningPipe(); udfdCloseClientRpc(); uv_mutex_destroy(&global.udfsMutex); taosHashCleanup(global.udfsHash); diff --git a/source/libs/function/test/udf1.c b/source/libs/function/test/udf1.c index 4384d326cb..e58c9cc00a 100644 --- a/source/libs/function/test/udf1.c +++ b/source/libs/function/test/udf1.c @@ -26,11 +26,18 @@ int32_t udf1(SUdfDataBlock* block, SUdfColumn *resultCol) { SUdfColumnData *resultData = &resultCol->colData; resultData->numOfRows = block->numOfRows; - SUdfColumnData *srcData = &block->udfCols[0]->colData; - for (int32_t i = 0; i < resultData->numOfRows; ++i) { - int32_t luckyNum = 88; - udfColSetRow(resultCol, i, (char*)&luckyNum, false); + int j = 0; + for (; j < block->numOfCols; ++j) { + if (udfColDataIsNull(block->udfCols[j], i)) { + udfColDataSetNull(resultCol, i); + break; + } + } + if ( j == block->numOfCols) { + int32_t luckyNum = 88; + udfColDataSet(resultCol, i, (char *)&luckyNum, false); + } } return 0; diff --git a/source/libs/function/test/udf2.c b/source/libs/function/test/udf2.c index 69ed515d2b..be485bc905 100644 --- a/source/libs/function/test/udf2.c +++ b/source/libs/function/test/udf2.c @@ -26,24 +26,34 @@ int32_t udf2_start(SUdfInterBuf *buf) { int32_t udf2(SUdfDataBlock* block, SUdfInterBuf *interBuf, SUdfInterBuf *newInterBuf) { int64_t sumSquares = *(int64_t*)interBuf->buf; + int8_t numOutput = 0; for (int32_t i = 0; i < block->numOfCols; ++i) { for (int32_t j = 0; j < block->numOfRows; ++j) { SUdfColumn* col = block->udfCols[i]; - //TODO: check the bitmap for null value - int32_t* rows = (int32_t*)col->colData.fixLenCol.data; - sumSquares += rows[j] * rows[j]; + if (udfColDataIsNull(col, j)) { + continue; + } + + char* cell = udfColDataGetData(col, j); + int32_t num = *(int32_t*)cell; + sumSquares += num * num; + numOutput = 1; } } - *(int64_t*)(newInterBuf->buf) = sumSquares; - newInterBuf->bufLen = sizeof(int64_t); - //TODO: if all null value, numOfResult = 0; - newInterBuf->numOfResult = 1; + if (numOutput == 1) { + *(int64_t*)(newInterBuf->buf) = sumSquares; + newInterBuf->bufLen = sizeof(int64_t); + } + newInterBuf->numOfResult = numOutput; return 0; } int32_t udf2_finish(SUdfInterBuf* buf, SUdfInterBuf *resultData) { - //TODO: check numOfResults; + if (buf->numOfResult == 0) { + resultData->numOfResult = 0; + return 0; + } int64_t sumSquares = *(int64_t*)(buf->buf); *(double*)(resultData->buf) = sqrt(sumSquares); resultData->bufLen = sizeof(double); diff --git a/source/libs/index/src/indexCache.c b/source/libs/index/src/indexCache.c index b3ae7b7dbe..929f33909e 100644 --- a/source/libs/index/src/indexCache.c +++ b/source/libs/index/src/indexCache.c @@ -30,6 +30,7 @@ static void indexMemUnRef(MemTable* tbl); static void indexCacheTermDestroy(CacheTerm* ct); static int32_t indexCacheTermCompare(const void* l, const void* r); +static int32_t indexCacheJsonTermCompare(const void* l, const void* r); static char* indexCacheTermGet(const void* pData); static MemTable* indexInternalCacheCreate(int8_t type); @@ -63,6 +64,7 @@ typedef enum { MATCH, CONTINUE, BREAK } TExeCond; typedef TExeCond (*_cache_range_compare)(void* a, void* b, int8_t type); static TExeCond tDoCommpare(__compar_fn_t func, int8_t comType, void* a, void* b) { + // optime later int32_t ret = func(a, b); switch (comType) { case QUERY_LESS_THAN: { @@ -242,6 +244,7 @@ static int32_t cacheSearchTerm_JSON(void* cache, SIndexTerm* term, SIdxTempResul break; } CacheTerm* c = (CacheTerm*)SL_GET_NODE_DATA(node); + if (0 == strcmp(c->colVal, pCt->colVal)) { if (c->operaType == ADD_VALUE) { INDEX_MERGE_ADD_DEL(tr->deled, tr->added, c->uid) @@ -311,6 +314,7 @@ static int32_t cacheSearchCompareFunc_JSON(void* cache, SIndexTerm* term, SIdxTe } char* key = indexCacheTermGet(pCt); + // SSkipListIterator* iter = tSkipListCreateIter(mem->mem); SSkipListIterator* iter = tSkipListCreateIterFromVal(mem->mem, key, TSDB_DATA_TYPE_BINARY, TSDB_ORDER_ASC); while (tSkipListIterNext(iter)) { SSkipListNode* node = tSkipListIterGet(iter); @@ -318,6 +322,10 @@ static int32_t cacheSearchCompareFunc_JSON(void* cache, SIndexTerm* term, SIdxTe break; } CacheTerm* c = (CacheTerm*)SL_GET_NODE_DATA(node); + printf("json val: %s\n", c->colVal); + if (0 != strncmp(c->colVal, term->colName, term->nColName)) { + continue; + } TExeCond cond = cmpFn(c->colVal + skip, term->colVal, dType); if (cond == MATCH) { @@ -598,24 +606,11 @@ int indexCacheSearch(void* cache, SIndexTermQuery* query, SIdxTempResult* result indexMemRef(imm); taosThreadMutexUnlock(&pCache->mtx); - // SIndexTerm* term = query->term; - // EIndexQueryType qtype = query->qType; - - // bool isJson = INDEX_TYPE_CONTAIN_EXTERN_TYPE(term->colType, TSDB_DATA_TYPE_JSON); - // char* p = term->colVal; - // if (isJson) { - // p = indexPackJsonData(term); - //} - // CacheTerm ct = {.colVal = p, .version = atomic_load_32(&pCache->version)}; - int ret = indexQueryMem(mem, query, result, s); if (ret == 0 && *s != kTypeDeletion) { // continue search in imm ret = indexQueryMem(imm, query, result, s); } - // if (isJson) { - // taosMemoryFreeClear(p); - //} indexMemUnRef(mem); indexMemUnRef(imm); @@ -682,14 +677,52 @@ static int32_t indexCacheTermCompare(const void* l, const void* r) { return cmp; } +static int indexFindCh(char* a, char c) { + char* p = a; + while (*p != 0 && *p++ != c) { + } + return p - a; +} +static int indexCacheJsonTermCompareImpl(char* a, char* b) { + int alen = indexFindCh(a, '&'); + int blen = indexFindCh(b, '&'); + + int cmp = strncmp(a, b, MIN(alen, blen)); + if (cmp == 0) { + cmp = alen - blen; + if (cmp != 0) { + return cmp; + } + cmp = *(a + alen) - *(b + blen); + if (cmp != 0) { + return cmp; + } + alen += 2; + blen += 2; + cmp = strcmp(a + alen, b + blen); + } + return cmp; +} +static int32_t indexCacheJsonTermCompare(const void* l, const void* r) { + CacheTerm* lt = (CacheTerm*)l; + CacheTerm* rt = (CacheTerm*)r; + // compare colVal + int cmp = indexCacheJsonTermCompareImpl(lt->colVal, rt->colVal); + if (cmp == 0) { + return rt->version - lt->version; + } + return cmp; +} static MemTable* indexInternalCacheCreate(int8_t type) { - type = INDEX_TYPE_CONTAIN_EXTERN_TYPE(type, TSDB_DATA_TYPE_JSON) ? TSDB_DATA_TYPE_BINARY : type; + int ttype = INDEX_TYPE_CONTAIN_EXTERN_TYPE(type, TSDB_DATA_TYPE_JSON) ? TSDB_DATA_TYPE_BINARY : type; + int32_t (*cmpFn)(const void* l, const void* r) = + INDEX_TYPE_CONTAIN_EXTERN_TYPE(type, TSDB_DATA_TYPE_JSON) ? indexCacheJsonTermCompare : indexCacheTermCompare; MemTable* tbl = taosMemoryCalloc(1, sizeof(MemTable)); indexMemRef(tbl); - if (type == TSDB_DATA_TYPE_BINARY || type == TSDB_DATA_TYPE_NCHAR) { - tbl->mem = tSkipListCreate(MAX_SKIP_LIST_LEVEL, type, MAX_INDEX_KEY_LEN, indexCacheTermCompare, SL_ALLOW_DUP_KEY, - indexCacheTermGet); + if (ttype == TSDB_DATA_TYPE_BINARY || ttype == TSDB_DATA_TYPE_NCHAR) { + tbl->mem = + tSkipListCreate(MAX_SKIP_LIST_LEVEL, ttype, MAX_INDEX_KEY_LEN, cmpFn, SL_ALLOW_DUP_KEY, indexCacheTermGet); } return tbl; } diff --git a/source/libs/index/test/jsonUT.cc b/source/libs/index/test/jsonUT.cc index f789d23136..e5692b98f9 100644 --- a/source/libs/index/test/jsonUT.cc +++ b/source/libs/index/test/jsonUT.cc @@ -129,7 +129,7 @@ TEST_F(JsonEnv, testWriteMillonData) { SIndexMultiTerm* terms = indexMultiTermCreate(); indexMultiTermAdd(terms, term); - for (size_t i = 0; i < 1000000; i++) { + for (size_t i = 0; i < 1000; i++) { tIndexJsonPut(index, terms, i); } indexMultiTermDestroy(terms); @@ -148,4 +148,36 @@ TEST_F(JsonEnv, testWriteMillonData) { assert(100 == taosArrayGetSize(result)); indexMultiTermQueryDestroy(mq); } + { + { + std::string colName("test"); + std::string colVal("ab"); + + SIndexMultiTermQuery* mq = indexMultiTermQueryCreate(MUST); + SIndexTerm* q = indexTermCreate(1, ADD_VALUE, TSDB_DATA_TYPE_BINARY, colName.c_str(), colName.size(), + colVal.c_str(), colVal.size()); + + SArray* result = taosArrayInit(1, sizeof(uint64_t)); + indexMultiTermQueryAdd(mq, q, QUERY_GREATER_THAN); + tIndexJsonSearch(index, mq, result); + assert(0 == taosArrayGetSize(result)); + indexMultiTermQueryDestroy(mq); + } + { + { + std::string colName("test"); + std::string colVal("ab"); + + SIndexMultiTermQuery* mq = indexMultiTermQueryCreate(MUST); + SIndexTerm* q = indexTermCreate(1, ADD_VALUE, TSDB_DATA_TYPE_BINARY, colName.c_str(), colName.size(), + colVal.c_str(), colVal.size()); + + SArray* result = taosArrayInit(1, sizeof(uint64_t)); + indexMultiTermQueryAdd(mq, q, QUERY_GREATER_EQUAL); + tIndexJsonSearch(index, mq, result); + assert(100 == taosArrayGetSize(result)); + indexMultiTermQueryDestroy(mq); + } + } + } } diff --git a/source/libs/nodes/src/nodesCodeFuncs.c b/source/libs/nodes/src/nodesCodeFuncs.c index 5dcacc4354..507cd79411 100644 --- a/source/libs/nodes/src/nodesCodeFuncs.c +++ b/source/libs/nodes/src/nodesCodeFuncs.c @@ -168,8 +168,6 @@ const char* nodesNodeName(ENodeType type) { return "ShowConsumersStmt"; case QUERY_NODE_SHOW_SUBSCRIBES_STMT: return "ShowSubscribesStmt"; - case QUERY_NODE_SHOW_TRANS_STMT: - return "ShowTransStmt"; case QUERY_NODE_SHOW_SMAS_STMT: return "ShowSmasStmt"; case QUERY_NODE_SHOW_CONFIGS_STMT: @@ -1972,10 +1970,10 @@ static int32_t datumToJson(const void* pObj, SJson* pJson) { code = tjsonAddDoubleToObject(pJson, jkValueDatum, pNode->datum.d); break; case TSDB_DATA_TYPE_NCHAR: { - //cJSON only support utf-8 encoding. Convert memory content to hex string. - char *buf = taosMemoryCalloc(varDataLen(pNode->datum.p) * 2 + 1, sizeof(char)); + // cJSON only support utf-8 encoding. Convert memory content to hex string. + char* buf = taosMemoryCalloc(varDataLen(pNode->datum.p) * 2 + 1, sizeof(char)); code = taosHexEncode(varDataVal(pNode->datum.p), buf, varDataLen(pNode->datum.p)); - if(code != TSDB_CODE_SUCCESS) { + if (code != TSDB_CODE_SUCCESS) { taosMemoryFree(buf); return TSDB_CODE_TSC_INVALID_VALUE; } @@ -2086,7 +2084,7 @@ static int32_t jsonToDatum(const SJson* pJson, void* pObj) { } varDataSetLen(pNode->datum.p, pNode->node.resType.bytes); if (TSDB_DATA_TYPE_NCHAR == pNode->node.resType.type) { - char *buf = taosMemoryCalloc(1, pNode->node.resType.bytes * 2 + VARSTR_HEADER_SIZE + 1); + char* buf = taosMemoryCalloc(1, pNode->node.resType.bytes * 2 + VARSTR_HEADER_SIZE + 1); if (NULL == buf) { code = TSDB_CODE_OUT_OF_MEMORY; break; diff --git a/source/libs/nodes/src/nodesUtilFuncs.c b/source/libs/nodes/src/nodesUtilFuncs.c index 4ae502b0d8..eeb069383f 100644 --- a/source/libs/nodes/src/nodesUtilFuncs.c +++ b/source/libs/nodes/src/nodesUtilFuncs.c @@ -190,7 +190,6 @@ SNodeptr nodesMakeNode(ENodeType type) { case QUERY_NODE_SHOW_TOPICS_STMT: case QUERY_NODE_SHOW_CONSUMERS_STMT: case QUERY_NODE_SHOW_SUBSCRIBES_STMT: - case QUERY_NODE_SHOW_TRANS_STMT: case QUERY_NODE_SHOW_SMAS_STMT: case QUERY_NODE_SHOW_CONFIGS_STMT: case QUERY_NODE_SHOW_QUERIES_STMT: diff --git a/source/libs/parser/src/parAstCreater.c b/source/libs/parser/src/parAstCreater.c index e36d0d9a54..e8ac562072 100644 --- a/source/libs/parser/src/parAstCreater.c +++ b/source/libs/parser/src/parAstCreater.c @@ -1300,9 +1300,10 @@ SNode* createDropStreamStmt(SAstCreateContext* pCxt, bool ignoreNotExists, const } SNode* createKillStmt(SAstCreateContext* pCxt, ENodeType type, const SToken* pId) { - SNode* pStmt = nodesMakeNode(type); + SKillStmt* pStmt = nodesMakeNode(type); CHECK_OUT_OF_MEM(pStmt); - return pStmt; + pStmt->targetId = strtol(pId->z, NULL, 10); + return (SNode*)pStmt; } SNode* createMergeVgroupStmt(SAstCreateContext* pCxt, const SToken* pVgId1, const SToken* pVgId2) { diff --git a/source/libs/parser/src/parAuthenticator.c b/source/libs/parser/src/parAuthenticator.c index c343c934ea..8f686cefce 100644 --- a/source/libs/parser/src/parAuthenticator.c +++ b/source/libs/parser/src/parAuthenticator.c @@ -23,12 +23,17 @@ typedef struct SAuthCxt { static int32_t authQuery(SAuthCxt* pCxt, SNode* pStmt); -static int32_t checkAuth(SParseContext* pCxt, const char* dbName, AUTH_TYPE type) { +static int32_t checkAuth(SParseContext* pCxt, const char* pDbName, AUTH_TYPE type) { if (pCxt->isSuperUser) { return TSDB_CODE_SUCCESS; } + SName name; + tNameSetDbName(&name, pCxt->acctId, pDbName, strlen(pDbName)); + char dbFname[TSDB_DB_FNAME_LEN] = {0}; + tNameGetFullDbName(&name, dbFname); bool pass = false; - int32_t code = catalogChkAuth(pCxt->pCatalog, pCxt->pTransporter, &pCxt->mgmtEpSet, pCxt->pUser, dbName, type, &pass); + int32_t code = + catalogChkAuth(pCxt->pCatalog, pCxt->pTransporter, &pCxt->mgmtEpSet, pCxt->pUser, dbFname, type, &pass); return TSDB_CODE_SUCCESS == code ? (pass ? TSDB_CODE_SUCCESS : TSDB_CODE_PAR_PERMISSION_DENIED) : code; } @@ -130,7 +135,6 @@ static int32_t authQuery(SAuthCxt* pCxt, SNode* pStmt) { case QUERY_NODE_SHOW_TOPICS_STMT: case QUERY_NODE_SHOW_CONSUMERS_STMT: case QUERY_NODE_SHOW_SUBSCRIBES_STMT: - case QUERY_NODE_SHOW_TRANS_STMT: case QUERY_NODE_SHOW_SMAS_STMT: case QUERY_NODE_SHOW_CONFIGS_STMT: case QUERY_NODE_SHOW_CONNECTIONS_STMT: diff --git a/source/libs/parser/src/parInsert.c b/source/libs/parser/src/parInsert.c index 05d247c037..11dfe60015 100644 --- a/source/libs/parser/src/parInsert.c +++ b/source/libs/parser/src/parInsert.c @@ -1549,7 +1549,7 @@ typedef struct SmlExecHandle { SQuery* pQuery; } SSmlExecHandle; -static int32_t smlBoundColumns(SArray *cols, SParsedDataColInfo* pColList, SSchema* pSchema) { +static int32_t smlBoundColumnData(SArray *cols, SParsedDataColInfo* pColList, SSchema* pSchema) { col_id_t nCols = pColList->numOfCols; pColList->numOfBound = 0; @@ -1620,7 +1620,7 @@ static int32_t smlBoundColumns(SArray *cols, SParsedDataColInfo* pColList, SSche return TSDB_CODE_SUCCESS; } -static int32_t smlBoundTags(SArray *cols, SKVRowBuilder *tagsBuilder, SParsedDataColInfo* tags, SSchema* pSchema, SKVRow *row, SMsgBuf *msg) { +static int32_t smlBuildTagRow(SArray *cols, SKVRowBuilder *tagsBuilder, SParsedDataColInfo* tags, SSchema* pSchema, SKVRow *row, SMsgBuf *msg) { if (tdInitKVRowBuilder(tagsBuilder) < 0) { return TSDB_CODE_TSC_OUT_OF_MEMORY; } @@ -1642,20 +1642,20 @@ static int32_t smlBoundTags(SArray *cols, SKVRowBuilder *tagsBuilder, SParsedDat return TSDB_CODE_SUCCESS; } -int32_t smlBindData(void *handle, SArray *tags, SArray *colsFormat, SHashObj *colsHash, SArray *cols, bool format, +int32_t smlBindData(void *handle, SArray *tags, SArray *colsFormat, SArray *colsSchema, SArray *cols, bool format, STableMeta *pTableMeta, char *tableName, char *msgBuf, int16_t msgBufLen) { SMsgBuf pBuf = {.buf = msgBuf, .len = msgBufLen}; SSmlExecHandle *smlHandle = (SSmlExecHandle *)handle; SSchema* pTagsSchema = getTableTagSchema(pTableMeta); setBoundColumnInfo(&smlHandle->tags, pTagsSchema, getNumOfTags(pTableMeta)); - int ret = smlBoundColumns(tags, &smlHandle->tags, pTagsSchema); + int ret = smlBoundColumnData(tags, &smlHandle->tags, pTagsSchema); if(ret != TSDB_CODE_SUCCESS){ buildInvalidOperationMsg(&pBuf, "bound tags error"); return ret; } SKVRow row = NULL; - ret = smlBoundTags(tags, &smlHandle->tagsBuilder, &smlHandle->tags, pTagsSchema, &row, &pBuf); + ret = smlBuildTagRow(tags, &smlHandle->tagsBuilder, &smlHandle->tags, pTagsSchema, &row, &pBuf); if(ret != TSDB_CODE_SUCCESS){ return ret; } @@ -1673,21 +1673,7 @@ int32_t smlBindData(void *handle, SArray *tags, SArray *colsFormat, SHashObj *co SSchema* pSchema = getTableColumnSchema(pTableMeta); - - if(format){ - ret = smlBoundColumns(taosArrayGetP(colsFormat, 0), &pDataBlock->boundColumnInfo, pSchema); - }else{ - SArray *columns = taosArrayInit(16, POINTER_BYTES); - void **p1 = taosHashIterate(colsHash, NULL); - while (p1) { - SSmlKv* kv = *p1; - taosArrayPush(columns, &kv); - p1 = taosHashIterate(colsHash, p1); - } - ret = smlBoundColumns(columns, &pDataBlock->boundColumnInfo, pSchema); - taosArrayDestroy(columns); - } - + ret = smlBoundColumnData(colsSchema, &pDataBlock->boundColumnInfo, pSchema); if(ret != TSDB_CODE_SUCCESS){ buildInvalidOperationMsg(&pBuf, "bound cols error"); return ret; @@ -1712,14 +1698,16 @@ int32_t smlBindData(void *handle, SArray *tags, SArray *colsFormat, SHashObj *co STSRow* row = (STSRow*)(pDataBlock->pData + pDataBlock->size); // skip the SSubmitBlk header tdSRowResetBuf(pBuilder, row); void *rowData = NULL; + size_t rowDataSize = 0; if(format){ rowData = taosArrayGetP(colsFormat, r); + rowDataSize = taosArrayGetSize(rowData); }else{ rowData = taosArrayGetP(cols, r); } // 1. set the parsed value from sql string - for (int c = 0; c < spd->numOfBound; ++c) { + for (int c = 0, j = 0; c < spd->numOfBound; ++c) { SSchema* pColSchema = &pSchema[spd->boundColumns[c] - 1]; param.schema = pColSchema; @@ -1727,23 +1715,27 @@ int32_t smlBindData(void *handle, SArray *tags, SArray *colsFormat, SHashObj *co SSmlKv *kv = NULL; if(format){ - kv = taosArrayGetP(rowData, c); - if (!kv){ - char msg[64] = {0}; - sprintf(msg, "cols num not the same like before:%d", r); - return buildInvalidOperationMsg(&pBuf, msg); + if(j < rowDataSize){ + kv = taosArrayGetP(rowData, j); + if (rowDataSize != spd->numOfBound && (kv->keyLen != strlen(pColSchema->name) || strncmp(kv->key, pColSchema->name, kv->keyLen) != 0)){ + kv = NULL; + }else{ + j++; + } } }else{ void **p =taosHashGet(rowData, pColSchema->name, strlen(pColSchema->name)); - kv = *p; + if(p) kv = *p; } - if (kv->length == 0) { + if (!kv || kv->length == 0) { MemRowAppend(&pBuf, NULL, 0, ¶m); } else { int32_t colLen = pColSchema->bytes; if (IS_VAR_DATA_TYPE(pColSchema->type)) { colLen = kv->length; + } else if(pColSchema->type == TSDB_DATA_TYPE_TIMESTAMP){ + kv->i = convertTimePrecision(kv->i, TSDB_TIME_PRECISION_NANO, pTableMeta->tableInfo.precision); } MemRowAppend(&pBuf, &(kv->value), colLen, ¶m); diff --git a/source/libs/parser/src/parTranslater.c b/source/libs/parser/src/parTranslater.c index faf920ead0..1bd7e28c74 100644 --- a/source/libs/parser/src/parTranslater.c +++ b/source/libs/parser/src/parTranslater.c @@ -3127,6 +3127,13 @@ static int32_t translateCreateFunction(STranslateContext* pCxt, SCreateFunctionS return code; } +static int32_t translateDropFunction(STranslateContext* pCxt, SDropFunctionStmt* pStmt) { + SDropFuncReq req = {0}; + strcpy(req.name, pStmt->funcName); + req.igNotExists = pStmt->ignoreNotExists; + return buildCmdMsg(pCxt, TDMT_MND_DROP_FUNC, (FSerializeFunc)tSerializeSDropFuncReq, &req); +} + static int32_t translateGrant(STranslateContext* pCxt, SGrantStmt* pStmt) { SAlterUserReq req = {0}; if (PRIVILEGE_TYPE_TEST_MASK(pStmt->privileges, PRIVILEGE_TYPE_ALL) || @@ -3266,6 +3273,9 @@ static int32_t translateQuery(STranslateContext* pCxt, SNode* pNode) { case QUERY_NODE_CREATE_FUNCTION_STMT: code = translateCreateFunction(pCxt, (SCreateFunctionStmt*)pNode); break; + case QUERY_NODE_DROP_FUNCTION_STMT: + code = translateDropFunction(pCxt, (SDropFunctionStmt*)pNode); + break; case QUERY_NODE_GRANT_STMT: code = translateGrant(pCxt, (SGrantStmt*)pNode); break; @@ -4121,6 +4131,7 @@ static int32_t rewriteQuery(STranslateContext* pCxt, SQuery* pQuery) { case QUERY_NODE_SHOW_QUERIES_STMT: case QUERY_NODE_SHOW_CLUSTER_STMT: case QUERY_NODE_SHOW_TOPICS_STMT: + case QUERY_NODE_SHOW_TRANSACTIONS_STMT: code = rewriteShow(pCxt, pQuery); break; case QUERY_NODE_CREATE_TABLE_STMT: diff --git a/source/libs/parser/test/mockCatalog.cpp b/source/libs/parser/test/mockCatalog.cpp index a82fbfce26..7297e4e93a 100644 --- a/source/libs/parser/test/mockCatalog.cpp +++ b/source/libs/parser/test/mockCatalog.cpp @@ -100,6 +100,14 @@ void generateInformationSchema(MockCatalogService* mcs) { } } +void generatePerformanceSchema(MockCatalogService* mcs) { + { + ITableBuilder& builder = mcs->createTableBuilder("performance_schema", "trans", TSDB_SYSTEM_TABLE, 1) + .addColumn("id", TSDB_DATA_TYPE_INT); + builder.done(); + } +} + /* * Table:t1 * Field | Type | DataType | Bytes | @@ -244,6 +252,7 @@ void initMetaDataEnv() { void generateMetaData() { generateInformationSchema(mockCatalogService.get()); + generatePerformanceSchema(mockCatalogService.get()); generateTestT1(mockCatalogService.get()); generateTestST1(mockCatalogService.get()); mockCatalogService->showTables(); diff --git a/source/libs/scalar/src/scalar.c b/source/libs/scalar/src/scalar.c index f5fab814ff..5231890821 100644 --- a/source/libs/scalar/src/scalar.c +++ b/source/libs/scalar/src/scalar.c @@ -347,10 +347,21 @@ int32_t sclExecFunction(SFunctionNode *node, SScalarCtx *ctx, SScalarParam *outp if (fmIsUserDefinedFunc(node->funcId)) { UdfcFuncHandle udfHandle = NULL; - SCL_ERR_JRET(setupUdf(node->functionName, &udfHandle)); + code = setupUdf(node->functionName, &udfHandle); + if (code != 0) { + sclError("fmExecFunction error. setupUdf. function name: %s, code:%d", node->functionName, code); + goto _return; + } code = callUdfScalarFunc(udfHandle, params, paramNum, output); - teardownUdf(udfHandle); - SCL_ERR_JRET(code); + if (code != 0) { + sclError("fmExecFunction error. callUdfScalarFunc. function name: %s, udf code:%d", node->functionName, code); + goto _return; + } + code = teardownUdf(udfHandle); + if (code != 0) { + sclError("fmExecFunction error. callUdfScalarFunc. function name: %s, udf code:%d", node->functionName, code); + goto _return; + } } else { SScalarFuncExecFuncs ffpSet = {0}; code = fmGetScalarFuncExecFuncs(node->funcId, &ffpSet); diff --git a/source/libs/scalar/src/sclfunc.c b/source/libs/scalar/src/sclfunc.c index 77caf67a2b..0161323e37 100644 --- a/source/libs/scalar/src/sclfunc.c +++ b/source/libs/scalar/src/sclfunc.c @@ -432,7 +432,7 @@ int32_t concatFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOu if (pInput[i].numOfRows == 1) { inputLen += (pInputData[i]->varmeta.length - VARSTR_HEADER_SIZE) * factor * (numOfRows - numOfNulls); } else { - inputLen += pInputData[i]->varmeta.length - (numOfRows - numOfNulls) * VARSTR_HEADER_SIZE; + inputLen += (pInputData[i]->varmeta.length - (numOfRows - numOfNulls) * VARSTR_HEADER_SIZE) * factor; } } @@ -510,7 +510,7 @@ int32_t concatWsFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *p } else if (pInput[i].numOfRows == 1) { inputLen += (pInputData[i]->varmeta.length - VARSTR_HEADER_SIZE) * (numOfRows - numOfNulls) * factor; } else { - inputLen += pInputData[i]->varmeta.length - (numOfRows - numOfNulls) * VARSTR_HEADER_SIZE; + inputLen += (pInputData[i]->varmeta.length - (numOfRows - numOfNulls) * VARSTR_HEADER_SIZE) * factor; } } @@ -709,10 +709,6 @@ int32_t castFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutp int16_t outputType = GET_PARAM_TYPE(&pOutput[0]); int64_t outputLen = GET_PARAM_BYTES(&pOutput[0]); - if (IS_VAR_DATA_TYPE(outputType)) { - outputLen += VARSTR_HEADER_SIZE; - } - char *outputBuf = taosMemoryCalloc(outputLen * pInput[0].numOfRows, 1); char *output = outputBuf; @@ -826,7 +822,7 @@ int32_t castFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutp varDataSetLen(output, len); } //for constant conversion, need to set proper length of pOutput description - if (len < outputLen - VARSTR_HEADER_SIZE) { + if (len < outputLen) { pOutput->columnData->info.bytes = len; } break; diff --git a/source/libs/stream/src/tstream.c b/source/libs/stream/src/tstream.c index 707e153a0c..237f673a47 100644 --- a/source/libs/stream/src/tstream.c +++ b/source/libs/stream/src/tstream.c @@ -150,14 +150,14 @@ int32_t streamExecTask(SStreamTask* pTask, SMsgCb* pMsgCb, const void* input, in pRes = (SArray*)input; } + if (pRes == NULL || taosArrayGetSize(pRes) == 0) return 0; + // sink if (pTask->sinkType == TASK_SINK__TABLE) { /*blockDebugShowData(pRes);*/ - ASSERT(pTask->tbSink.pTSchema); - SSubmitReq* pReq = tdBlockToSubmit(pRes, pTask->tbSink.pTSchema); - tPrintFixedSchemaSubmitReq(pReq, pTask->tbSink.pTSchema); + pTask->tbSink.tbSinkFunc(pTask, pTask->tbSink.vnode, 0, pRes); } else if (pTask->sinkType == TASK_SINK__SMA) { - pTask->smaSink.smaHandle(pTask->ahandle, pTask->smaSink.smaId, pRes); + pTask->smaSink.smaSink(pTask->ahandle, pTask->smaSink.smaId, pRes); // } else if (pTask->sinkType == TASK_SINK__FETCH) { // @@ -276,7 +276,7 @@ int32_t tEncodeSStreamTask(SEncoder* pEncoder, const SStreamTask* pTask) { } if (pTask->sinkType == TASK_SINK__TABLE) { - /*if (tEncodeI8(pEncoder, pTask->tbSink.reserved) < 0) return -1;*/ + if (tEncodeI64(pEncoder, pTask->tbSink.stbUid) < 0) return -1; if (tEncodeSSchemaWrapper(pEncoder, pTask->tbSink.pSchemaWrapper) < 0) return -1; } else if (pTask->sinkType == TASK_SINK__SMA) { if (tEncodeI64(pEncoder, pTask->smaSink.smaId) < 0) return -1; @@ -321,7 +321,7 @@ int32_t tDecodeSStreamTask(SDecoder* pDecoder, SStreamTask* pTask) { } if (pTask->sinkType == TASK_SINK__TABLE) { - /*if (tDecodeI8(pDecoder, &pTask->tbSink.reserved) < 0) return -1;*/ + if (tDecodeI64(pDecoder, &pTask->tbSink.stbUid) < 0) return -1; pTask->tbSink.pSchemaWrapper = taosMemoryCalloc(1, sizeof(SSchemaWrapper)); if (pTask->tbSink.pSchemaWrapper == NULL) return -1; if (tDecodeSSchemaWrapper(pDecoder, pTask->tbSink.pSchemaWrapper) < 0) return -1; diff --git a/source/libs/sync/src/syncCommit.c b/source/libs/sync/src/syncCommit.c index 8b54b0a090..5febb9a14c 100644 --- a/source/libs/sync/src/syncCommit.c +++ b/source/libs/sync/src/syncCommit.c @@ -72,6 +72,8 @@ void syncMaybeAdvanceCommitIndex(SSyncNode* pSyncNode) { "pSyncNode->pRaftStore->currentTerm:%lu", pEntry->term, pSyncNode->pRaftStore->currentTerm); } + + syncEntryDestory(pEntry); } } diff --git a/source/libs/transport/src/transSrv.c b/source/libs/transport/src/transSrv.c index 27efbcda53..7f8ad150f0 100644 --- a/source/libs/transport/src/transSrv.c +++ b/source/libs/transport/src/transSrv.c @@ -67,6 +67,7 @@ typedef struct SSrvMsg { typedef struct SWorkThrdObj { TdThread thread; + uv_connect_t connect_req; uv_pipe_t* pipe; uv_os_fd_t fd; uv_loop_t* loop; @@ -87,8 +88,10 @@ typedef struct SServerObj { // work thread info int workerIdx; int numOfThreads; + int numOfWorkerReady; SWorkThrdObj** pThreadObj; + uv_pipe_t pipeListen; uv_pipe_t** pipe; uint32_t ip; uint32_t port; @@ -161,7 +164,7 @@ static void* transWorkerThread(void* arg); static void* transAcceptThread(void* arg); // add handle loop -static bool addHandleToWorkloop(void* arg); +static bool addHandleToWorkloop(SWorkThrdObj* pThrd,char *pipeName); static bool addHandleToAcceptloop(void* arg); #define CONN_SHOULD_RELEASE(conn, head) \ @@ -577,6 +580,12 @@ void uvOnAcceptCb(uv_stream_t* stream, int status) { uv_tcp_init(pObj->loop, cli); if (uv_accept(stream, (uv_stream_t*)cli) == 0) { + if (pObj->numOfWorkerReady < pObj->numOfThreads) { + tError("worker-threads are not ready for all, need %d instead of %d.", pObj->numOfThreads, pObj->numOfWorkerReady); + uv_close((uv_handle_t*)cli, NULL); + return; + } + uv_write_t* wr = (uv_write_t*)taosMemoryMalloc(sizeof(uv_write_t)); wr->data = cli; uv_buf_t buf = uv_buf_init((char*)notify, strlen(notify)); @@ -672,15 +681,21 @@ void* transAcceptThread(void* arg) { return NULL; } -static bool addHandleToWorkloop(void* arg) { - SWorkThrdObj* pThrd = arg; +void uvOnPipeConnectionCb(uv_connect_t *connect, int status) { + if (status != 0) { + return; + } + SWorkThrdObj* pThrd = container_of(connect, SWorkThrdObj, connect_req); + uv_read_start((uv_stream_t*)pThrd->pipe, uvAllocConnBufferCb, uvOnConnectionCb); +} +static bool addHandleToWorkloop(SWorkThrdObj* pThrd,char *pipeName) { pThrd->loop = (uv_loop_t*)taosMemoryMalloc(sizeof(uv_loop_t)); if (0 != uv_loop_init(pThrd->loop)) { return false; } uv_pipe_init(pThrd->loop, pThrd->pipe, 1); - uv_pipe_open(pThrd->pipe, pThrd->fd); + // int r = uv_pipe_open(pThrd->pipe, pThrd->fd); pThrd->pipe->data = pThrd; @@ -691,7 +706,8 @@ static bool addHandleToWorkloop(void* arg) { QUEUE_INIT(&pThrd->conn); pThrd->asyncPool = transCreateAsyncPool(pThrd->loop, 5, pThrd, uvWorkerAsyncCb); - uv_read_start((uv_stream_t*)pThrd->pipe, uvAllocConnBufferCb, uvOnConnectionCb); + uv_pipe_connect(&pThrd->connect_req, pThrd->pipe, pipeName, uvOnPipeConnectionCb); + // uv_read_start((uv_stream_t*)pThrd->pipe, uvAllocConnBufferCb, uvOnConnectionCb); return true; } @@ -802,12 +818,32 @@ static void uvDestroyConn(uv_handle_t* handle) { uv_walk(thrd->loop, uvWalkCb, NULL); } } +static void uvPipeListenCb(uv_stream_t* handle, int status) { + ASSERT(status == 0); + + SServerObj* srv = container_of(handle, SServerObj, pipeListen); + uv_pipe_t* pipe = &(srv->pipe[srv->numOfWorkerReady][0]); + ASSERT(0 == uv_pipe_init(srv->loop, pipe, 1)); + ASSERT(0 == uv_accept((uv_stream_t*)&srv->pipeListen, (uv_stream_t*)pipe)); + + ASSERT(1 == uv_is_readable((uv_stream_t*)pipe)); + ASSERT(1 == uv_is_writable((uv_stream_t*)pipe)); + ASSERT(0 == uv_is_closing((uv_handle_t*)pipe)); + + srv->numOfWorkerReady++; + + // ASSERT(0 == uv_listen((uv_stream_t*)&ctx.send.tcp, 512, uvOnAcceptCb)); + + // r = uv_read_start((uv_stream_t*)&ctx.channel, alloc_cb, read_cb); + // ASSERT(r == 0); +} void* transInitServer(uint32_t ip, uint32_t port, char* label, int numOfThreads, void* fp, void* shandle) { SServerObj* srv = taosMemoryCalloc(1, sizeof(SServerObj)); srv->loop = (uv_loop_t*)taosMemoryMalloc(sizeof(uv_loop_t)); srv->numOfThreads = numOfThreads; srv->workerIdx = 0; + srv->numOfWorkerReady = 0; srv->pThreadObj = (SWorkThrdObj**)taosMemoryCalloc(srv->numOfThreads, sizeof(SWorkThrdObj*)); srv->pipe = (uv_pipe_t**)taosMemoryCalloc(srv->numOfThreads, sizeof(uv_pipe_t*)); srv->ip = ip; @@ -817,6 +853,16 @@ void* transInitServer(uint32_t ip, uint32_t port, char* label, int numOfThreads, taosThreadOnce(&transModuleInit, uvInitEnv); transSrvInst++; + char pipeName[64]; + assert(0 == uv_pipe_init(srv->loop, &srv->pipeListen, 0)); +#ifdef WINDOWS + snprintf(pipeName, sizeof(pipeName), "\\\\?\\pipe\\trans.rpc\\%p-%lu", taosSafeRand(), GetCurrentProcessId()); +#else + snprintf(pipeName, sizeof(pipeName), ".trans.rpc\\%08X-%lu", taosSafeRand(), taosGetSelfPthreadId()); +#endif + assert(0 == uv_pipe_bind(&srv->pipeListen, pipeName)); + assert(0 == uv_listen((uv_stream_t*)&srv->pipeListen, SOMAXCONN, uvPipeListenCb)); + for (int i = 0; i < srv->numOfThreads; i++) { SWorkThrdObj* thrd = (SWorkThrdObj*)taosMemoryCalloc(1, sizeof(SWorkThrdObj)); thrd->pTransInst = shandle; @@ -826,17 +872,22 @@ void* transInitServer(uint32_t ip, uint32_t port, char* label, int numOfThreads, srv->pipe[i] = (uv_pipe_t*)taosMemoryCalloc(2, sizeof(uv_pipe_t)); - uv_os_sock_t fds[2]; - if (uv_socketpair(SOCK_STREAM, 0, fds, UV_NONBLOCK_PIPE, UV_NONBLOCK_PIPE) != 0) { - goto End; - } - uv_pipe_init(srv->loop, &(srv->pipe[i][0]), 1); - uv_pipe_open(&(srv->pipe[i][0]), fds[1]); // init write + // #ifdef WINDOWS + // uv_file fds[2]; + // if (uv_pipe(fds, UV_READABLE_PIPE|UV_WRITABLE_PIPE|UV_NONBLOCK_PIPE, UV_READABLE_PIPE|UV_WRITABLE_PIPE|UV_NONBLOCK_PIPE) != 0) { + // #else + // uv_os_sock_t fds[2]; + // if (uv_socketpair(SOCK_STREAM, 0, fds, UV_NONBLOCK_PIPE, UV_NONBLOCK_PIPE) != 0) { + // #endif + // goto End; + // } + // uv_pipe_init(srv->loop, &(srv->pipe[i][0]), 1); + // uv_pipe_open(&(srv->pipe[i][0]), fds[1]); // init write - thrd->fd = fds[0]; + // thrd->fd = fds[0]; thrd->pipe = &(srv->pipe[i][1]); // init read - if (false == addHandleToWorkloop(thrd)) { + if (false == addHandleToWorkloop(thrd,pipeName)) { goto End; } int err = taosThreadCreate(&(thrd->thread), NULL, transWorkerThread, (void*)(thrd)); diff --git a/source/os/src/osDir.c b/source/os/src/osDir.c index 19e4defafc..72654d0084 100644 --- a/source/os/src/osDir.c +++ b/source/os/src/osDir.c @@ -26,7 +26,6 @@ typedef struct TdDirEntry { WIN32_FIND_DATA findFileData; } TdDirEntry; - typedef struct TdDir { TdDirEntry dirEntry; HANDLE hFind; @@ -59,7 +58,7 @@ void wordfree(wordexp_t *pwordexp) {} #include typedef struct dirent dirent; -typedef struct DIR TdDir; +typedef struct DIR TdDir; typedef struct dirent TdDirEntry; #endif @@ -78,14 +77,14 @@ void taosRemoveDir(const char *dirname) { taosRemoveDir(filename); } else { (void)taosRemoveFile(filename); - //printf("file:%s is removed\n", filename); + // printf("file:%s is removed\n", filename); } } taosCloseDir(&pDir); rmdir(dirname); - //printf("dir:%s is removed\n", dirname); + // printf("dir:%s is removed\n", dirname); return; } @@ -102,8 +101,8 @@ int32_t taosMkDir(const char *dirname) { int32_t taosMulMkDir(const char *dirname) { if (dirname == NULL) return -1; - char *temp = strdup(dirname); - char *pos = temp; + char * temp = strdup(dirname); + char * pos = temp; int32_t code = 0; if (strncmp(temp, "/", 1) == 0) { @@ -111,8 +110,8 @@ int32_t taosMulMkDir(const char *dirname) { } else if (strncmp(temp, "./", 2) == 0) { pos += 2; } - - for ( ; *pos != '\0'; pos++) { + + for (; *pos != '\0'; pos++) { if (*pos == '/') { *pos = '\0'; code = mkdir(temp, 0755); @@ -123,7 +122,7 @@ int32_t taosMulMkDir(const char *dirname) { *pos = '/'; } } - + if (*(pos - 1) != '/') { code = mkdir(temp, 0755); if (code < 0 && errno != EEXIST) { @@ -145,7 +144,7 @@ void taosRemoveOldFiles(const char *dirname, int32_t keepDays) { TdDirPtr pDir = taosOpenDir(dirname); if (pDir == NULL) return; - int64_t sec = taosGetTimestampSec(); + int64_t sec = taosGetTimestampSec(); TdDirEntryPtr de = NULL; while ((de = taosReadDir(pDir)) != NULL) { @@ -173,9 +172,9 @@ void taosRemoveOldFiles(const char *dirname, int32_t keepDays) { int32_t days = (int32_t)(TABS(sec - fileSec) / 86400 + 1); if (days > keepDays) { (void)taosRemoveFile(filename); - //printf("file:%s is removed, days:%d keepDays:%d", filename, days, keepDays); + // printf("file:%s is removed, days:%d keepDays:%d", filename, days, keepDays); } else { - //printf("file:%s won't be removed, days:%d keepDays:%d", filename, days, keepDays); + // printf("file:%s won't be removed, days:%d keepDays:%d", filename, days, keepDays); } } } @@ -187,7 +186,7 @@ void taosRemoveOldFiles(const char *dirname, int32_t keepDays) { int32_t taosExpandDir(const char *dirname, char *outname, int32_t maxlen) { wordexp_t full_path; if (0 != wordexp(dirname, &full_path, 0)) { - //printf("failed to expand path:%s since %s", dirname, strerror(errno)); + // printf("failed to expand path:%s since %s", dirname, strerror(errno)); wordfree(&full_path); return -1; } @@ -204,7 +203,7 @@ int32_t taosExpandDir(const char *dirname, char *outname, int32_t maxlen) { int32_t taosRealPath(char *dirname, char *realPath, int32_t maxlen) { char tmp[PATH_MAX] = {0}; #ifdef WINDOWS - if (_fullpath(dirname, tmp, maxlen) != NULL) { + if (_fullpath(tmp, dirname, maxlen) != NULL) { #else if (realpath(dirname, tmp) != NULL) { #endif @@ -228,9 +227,9 @@ bool taosIsDir(const char *dirname) { return false; } -char* taosDirName(char *name) { +char *taosDirName(char *name) { #ifdef WINDOWS - char Drive1[MAX_PATH], Dir1[MAX_PATH]; + char Drive1[MAX_PATH], Dir1[MAX_PATH]; _splitpath(name, Drive1, Dir1, NULL, NULL); size_t dirNameLen = strlen(Drive1) + strlen(Dir1); if (dirNameLen > 0) { @@ -242,13 +241,13 @@ char* taosDirName(char *name) { #endif } -char* taosDirEntryBaseName(char *name) { +char *taosDirEntryBaseName(char *name) { #ifdef WINDOWS char Filename1[MAX_PATH], Ext1[MAX_PATH]; _splitpath(name, NULL, NULL, Filename1, Ext1); return name + (strlen(name) - strlen(Filename1) - strlen(Ext1)); #else - return (char*)basename(name); + return (char *)basename(name); #endif } @@ -258,8 +257,8 @@ TdDirPtr taosOpenDir(const char *dirname) { } #ifdef WINDOWS - char szFind[MAX_PATH]; //这是要找的 - HANDLE hFind; + char szFind[MAX_PATH]; //这是要找的 + HANDLE hFind; TdDirPtr pDir = taosMemoryMalloc(sizeof(TdDir)); @@ -275,7 +274,6 @@ TdDirPtr taosOpenDir(const char *dirname) { #else return (TdDirPtr)opendir(dirname); #endif - } TdDirEntryPtr taosReadDir(TdDirPtr pDir) { @@ -286,9 +284,9 @@ TdDirEntryPtr taosReadDir(TdDirPtr pDir) { if (!FindNextFile(pDir->hFind, &(pDir->dirEntry.findFileData))) { return NULL; } - return (TdDirEntryPtr)&(pDir->dirEntry.findFileData); + return (TdDirEntryPtr) & (pDir->dirEntry.findFileData); #else - return (TdDirEntryPtr)readdir((DIR*)pDir); + return (TdDirEntryPtr)readdir((DIR *)pDir); #endif } @@ -299,18 +297,18 @@ bool taosDirEntryIsDir(TdDirEntryPtr pDirEntry) { #ifdef WINDOWS return (pDirEntry->findFileData.dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY) != 0; #else - return (((dirent*)pDirEntry)->d_type & DT_DIR) != 0; + return (((dirent *)pDirEntry)->d_type & DT_DIR) != 0; #endif } -char* taosGetDirEntryName(TdDirEntryPtr pDirEntry) { +char *taosGetDirEntryName(TdDirEntryPtr pDirEntry) { if (pDirEntry == NULL) { return NULL; } #ifdef WINDOWS return pDirEntry->findFileData.cFileName; #else - return ((dirent*)pDirEntry)->d_name; + return ((dirent *)pDirEntry)->d_name; #endif } @@ -324,7 +322,7 @@ int32_t taosCloseDir(TdDirPtr *ppDir) { *ppDir = NULL; return 0; #else - closedir((DIR*)*ppDir); + closedir((DIR *)*ppDir); *ppDir = NULL; return 0; #endif diff --git a/source/os/src/osFile.c b/source/os/src/osFile.c index d378b5234a..ab68c69b8d 100644 --- a/source/os/src/osFile.c +++ b/source/os/src/osFile.c @@ -543,7 +543,7 @@ int32_t taosFsyncFile(TdFilePtr pFile) { HANDLE h = (HANDLE)_get_osfhandle(pFile->fd); - return FlushFileBuffers(h); + return !FlushFileBuffers(h); #else if (pFile == NULL) { return 0; diff --git a/source/os/src/osSocket.c b/source/os/src/osSocket.c index f3da490f36..9e29f44ed3 100644 --- a/source/os/src/osSocket.c +++ b/source/os/src/osSocket.c @@ -28,6 +28,7 @@ #else #include #include +#include #include #include #include @@ -638,6 +639,73 @@ int32_t taosKeepTcpAlive(TdSocketPtr pSocket) { return 0; } +int taosGetLocalIp(const char *eth, char *ip) { +#if defined(WINDOWS) + // DO NOTHAING + return 0; +#else + int fd; + struct ifreq ifr; + struct sockaddr_in sin; + + fd = socket(AF_INET, SOCK_DGRAM, 0); + if (-1 == fd) { + return -1; + } + strncpy(ifr.ifr_name, eth, IFNAMSIZ); + ifr.ifr_name[IFNAMSIZ - 1] = 0; + + if (ioctl(fd, SIOCGIFADDR, &ifr) < 0) { + taosCloseSocketNoCheck1(fd); + return -1; + } + memcpy(&sin, &ifr.ifr_addr, sizeof(sin)); + snprintf(ip, 64, "%s", inet_ntoa(sin.sin_addr)); + taosCloseSocketNoCheck1(fd); +#endif + return 0; +} +int taosValidIp(uint32_t ip) { +#if defined(WINDOWS) + // DO NOTHAING + return 0; +#else + int ret = -1; + int fd; + + struct ifconf ifconf; + + char buf[512] = {0}; + ifconf.ifc_len = 512; + ifconf.ifc_buf = buf; + + if ((fd = socket(AF_INET, SOCK_DGRAM, 0)) < 0) { + return -1; + } + + ioctl(fd, SIOCGIFCONF, &ifconf); + struct ifreq *ifreq = (struct ifreq *)ifconf.ifc_buf; + for (int i = (ifconf.ifc_len / sizeof(struct ifreq)); i > 0; i--) { + char ip_str[64] = {0}; + if (ifreq->ifr_flags == AF_INET) { + ret = taosGetLocalIp(ifreq->ifr_name, ip_str); + if (ret != 0) { + break; + } + ret = -1; + if (ip == (uint32_t)taosInetAddr(ip_str)) { + ret = 0; + break; + } + ifreq++; + } + } + taosCloseSocketNoCheck1(fd); + return ret; +#endif + return 0; +} + bool taosValidIpAndPort(uint32_t ip, uint16_t port) { struct sockaddr_in serverAdd; SocketFd fd; @@ -677,13 +745,8 @@ bool taosValidIpAndPort(uint32_t ip, uint16_t port) { taosCloseSocket(&pSocket); return false; } - if (listen(pSocket->fd, 1024) < 0) { - // printf("listen tcp server socket failed, 0x%x:%hu(%s)", ip, port, strerror(errno)); - taosCloseSocket(&pSocket); - return false; - } taosCloseSocket(&pSocket); - return true; + return 0 == taosValidIp(ip) ? true : false; } TdSocketServerPtr taosOpenTcpServerSocket(uint32_t ip, uint16_t port) { struct sockaddr_in serverAdd; diff --git a/source/os/src/osSysinfo.c b/source/os/src/osSysinfo.c index 3c3612854c..348424b372 100644 --- a/source/os/src/osSysinfo.c +++ b/source/os/src/osSysinfo.c @@ -869,11 +869,15 @@ SysNameInfo taosGetSysNameInfo() { SysNameInfo info = {0}; DWORD dwVersion = GetVersion(); - tstrncpy(info.sysname, getenv("OS"), sizeof(info.sysname)); - tstrncpy(info.nodename, getenv("COMPUTERNAME"), sizeof(info.nodename)); + char *tmp = NULL; + tmp = getenv("OS"); + if (tmp != NULL) tstrncpy(info.sysname, tmp, sizeof(info.sysname)); + tmp = getenv("COMPUTERNAME"); + if (tmp != NULL) tstrncpy(info.nodename, tmp, sizeof(info.nodename)); sprintf_s(info.release, sizeof(info.release), "%d", dwVersion & 0x0F); sprintf_s(info.version, sizeof(info.release), "%d", (dwVersion >> 8) & 0x0F); - tstrncpy(info.machine, getenv("PROCESSOR_ARCHITECTURE"), sizeof(info.machine)); + tmp = getenv("PROCESSOR_ARCHITECTURE"); + if (tmp != NULL) tstrncpy(info.machine, tmp, sizeof(info.machine)); return info; #elif defined(_TD_DARWIN_64) diff --git a/source/util/test/encodeTest.cpp b/source/util/test/encodeTest.cpp index 038926021e..974677d26c 100644 --- a/source/util/test/encodeTest.cpp +++ b/source/util/test/encodeTest.cpp @@ -443,4 +443,4 @@ TEST(td_encode_test, compound_struct_encode_test) { #pragma GCC diagnostic pop -#endif \ No newline at end of file +#endif diff --git a/tests/script/jenkins/basic.txt b/tests/script/jenkins/basic.txt index b95e822df3..f2b9e0caab 100644 --- a/tests/script/jenkins/basic.txt +++ b/tests/script/jenkins/basic.txt @@ -59,6 +59,12 @@ # ---- table ./test.sh -f tsim/table/basic1.sim +# ---- tstream +./test.sh -f tsim/tstream/basic0.sim + +# ---- transaction + ./test.sh -f tsim/trans/create_db.sim + # ---- tmq ./test.sh -f tsim/tmq/basic1.sim ./test.sh -f tsim/tmq/basic2.sim diff --git a/tests/script/tsim/query/udf.sim b/tests/script/tsim/query/udf.sim index c76569b40f..cabb88ea09 100644 --- a/tests/script/tsim/query/udf.sim +++ b/tests/script/tsim/query/udf.sim @@ -64,6 +64,47 @@ if $data00 != 1.414213562 then return -1 endi -#sql drop function udf1; -#sql drop function udf2; -system sh/exec.sh -n dnode1 -s stop -x SIGKILL +sql insert into t2 values(now+2s, 1, null)(now+3s, null, 2); +sql select udf1(f1, f2) from t2; +print $rows , $data00 , $data10 , $data20 , $data30 +if $rows != 4 then + return -1 +endi +if $data00 != 88 then + return -1 +endi +if $data10 != 88 then + return -1 +endi + +if $data20 != NULL then + return -1 +endi + +if $data30 != NULL then + return -1 +endi + +sql select udf2(f1, f2) from t2; +print $rows, $data00 +if $rows != 1 then + return -1 +endi +if $data00 != 2.645751311 then + return -1 +endi +sql drop function udf1; +sql show functions; +if $rows != 1 then + return -1 +endi +if $data00 != @udf2@ then + return -1 + endi +sql drop function udf2; +sql show functions; +if $rows != 0 then + return -1 +endi + +system sh/exec.sh -n dnode1 -s stop -x SIGTERM diff --git a/tests/script/tsim/trans/create_db.sim b/tests/script/tsim/trans/create_db.sim new file mode 100644 index 0000000000..0db5add88a --- /dev/null +++ b/tests/script/tsim/trans/create_db.sim @@ -0,0 +1,166 @@ +system sh/stop_dnodes.sh +system sh/deploy.sh -n dnode1 -i 1 +system sh/deploy.sh -n dnode2 -i 2 +system sh/cfg.sh -n dnode1 -c transPullupInterval -v 1 +system sh/cfg.sh -n dnode2 -c transPullupInterval -v 1 +system sh/exec.sh -n dnode1 -s start +system sh/exec.sh -n dnode2 -s start +sql connect + +print =============== show dnodes +sql show dnodes; +if $rows != 1 then + return -1 +endi + +if $data00 != 1 then + return -1 +endi + +sql show mnodes; +if $rows != 1 then + return -1 +endi + +if $data00 != 1 then + return -1 +endi + +if $data02 != LEADER then + return -1 +endi + +print =============== create dnodes +sql create dnode $hostname port 7200 +sleep 2000 + +sql show dnodes; +if $rows != 2 then + return -1 +endi + +if $data00 != 1 then + return -1 +endi + +if $data10 != 2 then + return -1 +endi + +print =============== kill dnode2 +system sh/exec.sh -n dnode2 -s stop -x SIGINT + +print =============== create database +sql show transactions +if $rows != 0 then + return -1 +endi + +sql_error create database d1 vgroups 2; + +print =============== show transactions +sql show transactions +if $rows != 1 then + return -1 +endi + +if $data[0][0] != 2 then + return -1 +endi + +if $data[0][2] != undoAction then + return -1 +endi + +if $data[0][3] != d1 then + return -1 +endi + +if $data[0][4] != create-db then + return -1 +endi + +if $data[0][7] != @Unable to establish connection@ then + return -1 +endi + +sql_error create database d1 vgroups 2; + +print =============== start dnode2 +system sh/exec.sh -n dnode2 -s start +sleep 3000 + +sql show transactions +if $rows != 0 then + return -1 +endi + +sql create database d1 vgroups 2; + +print =============== kill dnode2 +system sh/exec.sh -n dnode2 -s stop -x SIGINT + +print =============== create database +sql show transactions +if $rows != 0 then + return -1 +endi + +sql_error create database d2 vgroups 2; + +print =============== show transactions +sql show transactions +if $rows != 1 then + return -1 +endi + +if $data[0][0] != 4 then + return -1 +endi + +if $data[0][2] != undoAction then + return -1 +endi + +if $data[0][3] != d2 then + return -1 +endi + +if $data[0][4] != create-db then + return -1 +endi + +if $data[0][7] != @Unable to establish connection@ then + return -1 +endi + +sql_error create database d2 vgroups 2; + +print =============== kill transaction +sql kill transaction 4; +sleep 2000 + +sql show transactions +if $rows != 0 then + return -1 +endi + +print =============== start dnode2 +system sh/exec.sh -n dnode2 -s start +sleep 3000 + +sql show transactions +if $rows != 0 then + return -1 +endi + +sql create database d2 vgroups 2; +sql_error kill transaction 1; +sql_error kill transaction 2; +sql_error kill transaction 3; +sql_error kill transaction 4; +sql_error kill transaction 5; + +return +system sh/exec.sh -n dnode1 -s stop -x SIGINT +system sh/exec.sh -n dnode2 -s stop -x SIGINT \ No newline at end of file diff --git a/tests/script/tsim/tstream/basic0.sim b/tests/script/tsim/tstream/basic0.sim new file mode 100644 index 0000000000..2a1bd14531 --- /dev/null +++ b/tests/script/tsim/tstream/basic0.sim @@ -0,0 +1,139 @@ +system sh/stop_dnodes.sh +system sh/deploy.sh -n dnode1 -i 1 +system sh/exec.sh -n dnode1 -s start +sleep 50 +sql connect + +print =============== create database +sql create database d0 vgroups 1 +sql show databases +if $rows != 3 then + return -1 +endi + +print $data00 $data01 $data02 + +sql use d0 + +print =============== create super table, include column type for count/sum/min/max/first +sql create table if not exists stb (ts timestamp, k int) tags (a int) + +sql show stables +if $rows != 1 then + return -1 +endi + +print =============== create child table +sql create table ct1 using stb tags(1000) +sql create table ct2 using stb tags(2000) +sql create table ct3 using stb tags(3000) + +sql show tables +if $rows != 3 then + return -1 +endi + +sql create stream s1 into outstb as select _wstartts, min(k), max(k), sum(k) as sum_alias from ct1 interval(10m) + +sql show stables +if $rows != 2 then + return -1 +endi + +print =============== insert data + +sql insert into ct1 values('2022-05-08 03:42:00.000', 234) +sleep 100 + +#=================================================================== +print =============== query data from child table + +sql select `_wstartts`,`min(k)`,`max(k)`,sum_alias from outstb +print rows: $rows +print $data00 $data01 $data02 $data03 +if $rows != 1 then + return -1 +endi + +if $data01 != 234 then + return -1 +endi + +if $data02 != 234 then + return -1 +endi + +if $data03 != 234 then + return -1 +endi + +#=================================================================== +print =============== insert data + +sql insert into ct1 values('2022-05-08 03:43:00.000', -111) +sleep 100 + +#=================================================================== +print =============== query data from child table + +sql select `_wstartts`,`min(k)`,`max(k)`,sum_alias from outstb +print rows: $rows +print $data00 $data01 $data02 $data03 +if $rows != 1 then + return -1 +endi + +if $data01 != -111 then + return -1 +endi + +if $data02 != 234 then + return -1 +endi + +if $data03 != 123 then + return -1 +endi + +#=================================================================== +print =============== insert data + +sql insert into ct1 values('2022-05-08 03:53:00.000', 789) +sleep 100 + +#=================================================================== +print =============== query data from child table + +sql select `_wstartts`,`min(k)`,`max(k)`,sum_alias from outstb +print rows: $rows +print $data00 $data01 $data02 $data03 +print $data10 $data11 $data12 $data13 +if $rows != 2 then + return -1 +endi + +if $data01 != -111 then + return -1 +endi + +if $data02 != 234 then + return -1 +endi + +if $data03 != 123 then + return -1 +endi + +if $data11 != 789 then + return -1 +endi + +if $data12 != 789 then + return -1 +endi + +if $data13 != 789 then + return -1 +endi + +system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/system-test/0-others/taosdMonitor.py b/tests/system-test/0-others/taosdMonitor.py new file mode 100644 index 0000000000..a3d3b05204 --- /dev/null +++ b/tests/system-test/0-others/taosdMonitor.py @@ -0,0 +1,311 @@ +import taos +import sys +import time +import socket +import pexpect +import os +import http.server +import gzip +import threading +import json + +from util.log import * +from util.sql import * +from util.cases import * +from util.dnodes import * + +telemetryPort = '6043' + + +def telemetryInfoCheck(infoDict=''): + + hostname = socket.gethostname() + serverPort = 7080 + + if "ts" not in infoDict or len(infoDict["ts"]) == 0: + tdLog.exit("ts is null!") + + if "dnode_id" not in infoDict or infoDict["dnode_id"] != 1: + tdLog.exit("dnode_id is null!") + + if "dnode_ep" not in infoDict: + tdLog.exit("dnode_ep is null!") + + if "cluster_id" not in infoDict: + tdLog.exit("cluster_id is null!") + + if "protocol" not in infoDict or infoDict["protocol"] != 1: + tdLog.exit("protocol is null!") + + if "cluster_info" not in infoDict : + tdLog.exit("cluster_info is null!") + + # cluster_info ==================================== + + if "first_ep" not in infoDict["cluster_info"] or infoDict["cluster_info"]["first_ep"] == None: + tdLog.exit("first_ep is null!") + + if "first_ep_dnode_id" not in infoDict["cluster_info"] or infoDict["cluster_info"]["first_ep_dnode_id"] != 1: + tdLog.exit("first_ep_dnode_id is null!") + + if "version" not in infoDict["cluster_info"] or infoDict["cluster_info"]["version"] == None: + tdLog.exit("first_ep_dnode_id is null!") + + if "master_uptime" not in infoDict["cluster_info"] or infoDict["cluster_info"]["master_uptime"] == None: + tdLog.exit("master_uptime is null!") + + if "monitor_interval" not in infoDict["cluster_info"] or infoDict["cluster_info"]["monitor_interval"] !=5: + tdLog.exit("monitor_interval is null!") + + if "vgroups_total" not in infoDict["cluster_info"] or infoDict["cluster_info"]["vgroups_total"] < 0: + tdLog.exit("vgroups_total is null!") + + if "vgroups_alive" not in infoDict["cluster_info"] or infoDict["cluster_info"]["vgroups_alive"] < 0: + tdLog.exit("vgroups_alive is null!") + + if "connections_total" not in infoDict["cluster_info"] or infoDict["cluster_info"]["connections_total"] < 0 : + tdLog.exit("connections_total is null!") + + if "dnodes" not in infoDict["cluster_info"] or infoDict["cluster_info"]["dnodes"] == None : + tdLog.exit("dnodes is null!") + + dnodes_info = { "dnode_id": 1,"dnode_ep": f"{hostname}:{serverPort}","status":"ready"} + + for k ,v in dnodes_info.items(): + if k not in infoDict["cluster_info"]["dnodes"][0] or v != infoDict["cluster_info"]["dnodes"][0][k] : + tdLog.exit("dnodes info is null!") + + mnodes_info = { "mnode_id":1, "mnode_ep":f"{hostname}:{serverPort}","role": "LEADER" } + + for k ,v in mnodes_info.items(): + if k not in infoDict["cluster_info"]["mnodes"][0] or v != infoDict["cluster_info"]["mnodes"][0][k] : + tdLog.exit("mnodes info is null!") + + # vgroup_infos ==================================== + + if "vgroup_infos" not in infoDict or infoDict["vgroup_infos"]== None: + tdLog.exit("vgroup_infos is null!") + + vgroup_infos_nums = len(infoDict["vgroup_infos"]) + + for index in range(vgroup_infos_nums): + if "vgroup_id" not in infoDict["vgroup_infos"][index] or infoDict["vgroup_infos"][index]["vgroup_id"]<0: + tdLog.exit("vgroup_id is null!") + if "database_name" not in infoDict["vgroup_infos"][index] or len(infoDict["vgroup_infos"][index]["database_name"]) < 0: + tdLog.exit("database_name is null!") + if "tables_num" not in infoDict["vgroup_infos"][index] or infoDict["vgroup_infos"][index]["tables_num"]!= 0: + tdLog.exit("tables_num is null!") + if "status" not in infoDict["vgroup_infos"][index] or len(infoDict["vgroup_infos"][index]["status"]) < 0 : + tdLog.exit("status is null!") + if "vnodes" not in infoDict["vgroup_infos"][index] or infoDict["vgroup_infos"][index]["vnodes"] ==None : + tdLog.exit("vnodes is null!") + if "dnode_id" not in infoDict["vgroup_infos"][index]["vnodes"][0] or infoDict["vgroup_infos"][index]["vnodes"][0]["dnode_id"] < 0 : + tdLog.exit("vnodes is null!") + + # grant_info ==================================== + + if "grant_info" not in infoDict or infoDict["grant_info"]== None: + tdLog.exit("grant_info is null!") + + if "expire_time" not in infoDict["grant_info"] or not infoDict["grant_info"]["expire_time"] > 0: + tdLog.exit("expire_time is null!") + + if "timeseries_used" not in infoDict["grant_info"] or not infoDict["grant_info"]["timeseries_used"] > 0: + tdLog.exit("timeseries_used is null!") + + if "timeseries_total" not in infoDict["grant_info"] or not infoDict["grant_info"]["timeseries_total"] > 0: + tdLog.exit("timeseries_total is null!") + + # dnode_info ==================================== + + if "dnode_info" not in infoDict or infoDict["dnode_info"]== None: + tdLog.exit("dnode_info is null!") + + dnode_infos = ['uptime', 'cpu_engine', 'cpu_system', 'cpu_cores', 'mem_engine', 'mem_system', 'mem_total', 'disk_engine', + 'disk_used', 'disk_total', 'net_in', 'net_out', 'io_read', 'io_write', 'io_read_disk', 'io_write_disk', 'req_select', + 'req_select_rate', 'req_insert', 'req_insert_success', 'req_insert_rate', 'req_insert_batch', 'req_insert_batch_success', + 'req_insert_batch_rate', 'errors', 'vnodes_num', 'masters', 'has_mnode', 'has_qnode', 'has_snode', 'has_bnode'] + for elem in dnode_infos: + if elem not in infoDict["dnode_info"] or infoDict["dnode_info"][elem] < 0: + tdLog.exit(f"{elem} is null!") + + # dnode_info ==================================== + + if "disk_infos" not in infoDict or infoDict["disk_infos"]== None: + tdLog.exit("disk_infos is null!") + + # bug for data_dir + if "datadir" not in infoDict["disk_infos"] or len(infoDict["disk_infos"]["datadir"]) <=0 : + tdLog.exit("datadir is null!") + + if "name" not in infoDict["disk_infos"]["datadir"][0] or len(infoDict["disk_infos"]["datadir"][0]["name"]) <= 0: + tdLog.exit("name is null!") + + if "level" not in infoDict["disk_infos"]["datadir"][0] or infoDict["disk_infos"]["datadir"][0]["level"] < 0: + tdLog.exit("level is null!") + + if "avail" not in infoDict["disk_infos"]["datadir"][0] or infoDict["disk_infos"]["datadir"][0]["avail"] <= 0: + tdLog.exit("avail is null!") + + if "used" not in infoDict["disk_infos"]["datadir"][0] or infoDict["disk_infos"]["datadir"][0]["used"] <= 0: + tdLog.exit("used is null!") + + if "total" not in infoDict["disk_infos"]["datadir"][0] or infoDict["disk_infos"]["datadir"][0]["total"] <= 0: + tdLog.exit("total is null!") + + + if "logdir" not in infoDict["disk_infos"] or infoDict["disk_infos"]["logdir"]== None: + tdLog.exit("logdir is null!") + + if "name" not in infoDict["disk_infos"]["logdir"] or len(infoDict["disk_infos"]["logdir"]["name"]) <= 0: + tdLog.exit("name is null!") + + if "avail" not in infoDict["disk_infos"]["logdir"] or infoDict["disk_infos"]["logdir"]["avail"] <= 0: + tdLog.exit("avail is null!") + + if "used" not in infoDict["disk_infos"]["logdir"] or infoDict["disk_infos"]["logdir"]["used"] <= 0: + tdLog.exit("used is null!") + + if "total" not in infoDict["disk_infos"]["logdir"] or infoDict["disk_infos"]["logdir"]["total"] <= 0: + tdLog.exit("total is null!") + + + + if "tempdir" not in infoDict["disk_infos"] or infoDict["disk_infos"]["tempdir"]== None: + tdLog.exit("tempdir is null!") + + if "name" not in infoDict["disk_infos"]["tempdir"] or len(infoDict["disk_infos"]["tempdir"]["name"]) <= 0: + tdLog.exit("name is null!") + + if "avail" not in infoDict["disk_infos"]["tempdir"] or infoDict["disk_infos"]["tempdir"]["avail"] <= 0: + tdLog.exit("avail is null!") + + if "used" not in infoDict["disk_infos"]["tempdir"] or infoDict["disk_infos"]["tempdir"]["used"] <= 0: + tdLog.exit("used is null!") + + if "total" not in infoDict["disk_infos"]["tempdir"] or infoDict["disk_infos"]["tempdir"]["total"] <= 0: + tdLog.exit("total is null!") + + + # log_infos ==================================== + + if "log_infos" not in infoDict or infoDict["log_infos"]== None: + tdLog.exit("log_infos is null!") + + if "logs" not in infoDict["log_infos"] or len(infoDict["log_infos"]["logs"])!= 10: + tdLog.exit("logs is null!") + + if "ts" not in infoDict["log_infos"]["logs"][0] or len(infoDict["log_infos"]["logs"][0]["ts"]) <= 10: + tdLog.exit("ts is null!") + + if "level" not in infoDict["log_infos"]["logs"][0] or infoDict["log_infos"]["logs"][0]["level"] not in ["error" ,"info" , "debug" ,"trace"]: + tdLog.exit("level is null!") + + if "content" not in infoDict["log_infos"]["logs"][0] or len(infoDict["log_infos"]["logs"][0]["ts"]) <= 1: + tdLog.exit("content is null!") + + if "summary" not in infoDict["log_infos"] or len(infoDict["log_infos"]["summary"])!= 4: + tdLog.exit("summary is null!") + + + if "total" not in infoDict["log_infos"]["summary"][0] or infoDict["log_infos"]["summary"][0]["total"] < 0 : + tdLog.exit("total is null!") + + if "level" not in infoDict["log_infos"]["summary"][0] or infoDict["log_infos"]["summary"][0]["level"] not in ["error" ,"info" , "debug" ,"trace"]: + tdLog.exit("level is null!") + +class RequestHandlerImpl(http.server.BaseHTTPRequestHandler): + def do_GET(self): + """ + process GET request + """ + + def do_POST(self): + """ + process POST request + """ + contentEncoding = self.headers["Content-Encoding"] + + if contentEncoding == 'gzip': + req_body = self.rfile.read(int(self.headers["Content-Length"])) + plainText = gzip.decompress(req_body).decode() + else: + plainText = self.rfile.read(int(self.headers["Content-Length"])).decode() + + print(plainText) + # 1. send response code and header + self.send_response(200) + self.send_header("Content-Type", "text/html; charset=utf-8") + self.end_headers() + + # 2. send response content + #self.wfile.write(("Hello World: " + req_body + "\n").encode("utf-8")) + + # 3. check request body info + infoDict = json.loads(plainText) + #print("================") + # print(infoDict) + telemetryInfoCheck(infoDict) + + # 4. shutdown the server and exit case + assassin = threading.Thread(target=httpServer.shutdown) + assassin.daemon = True + assassin.start() + print ("==== shutdown http server ====") + +class TDTestCase: + hostname = socket.gethostname() + serverPort = '7080' + rpcDebugFlagVal = '143' + clientCfgDict = {'serverPort': '', 'firstEp': '', 'secondEp':'', 'rpcDebugFlag':'135', 'fqdn':''} + clientCfgDict["serverPort"] = serverPort + clientCfgDict["firstEp"] = hostname + ':' + serverPort + clientCfgDict["secondEp"] = hostname + ':' + serverPort + clientCfgDict["rpcDebugFlag"] = rpcDebugFlagVal + clientCfgDict["fqdn"] = hostname + + updatecfgDict = {'clientCfg': {}, 'serverPort': '', 'firstEp': '', 'secondEp':'', 'rpcDebugFlag':'135', 'fqdn':''} + updatecfgDict["clientCfg"] = clientCfgDict + updatecfgDict["serverPort"] = serverPort + updatecfgDict["firstEp"] = hostname + ':' + serverPort + updatecfgDict["secondEp"] = hostname + ':' + serverPort + updatecfgDict["fqdn"] = hostname + + updatecfgDict["monitorFqdn"] = hostname + updatecfgDict["monitorPort"] = '6043' + updatecfgDict["monitor"] = '1' + updatecfgDict["monitorInterval"] = "5" + updatecfgDict["monitorMaxLogs"] = "10" + updatecfgDict["monitorComp"] = "1" + + print ("===================: ", updatecfgDict) + + def init(self, conn, logSql): + tdLog.debug(f"start to excute {__file__}") + tdSql.init(conn.cursor()) + + def run(self): # sourcery skip: extract-duplicate-method, remove-redundant-fstring + tdSql.prepare() + # time.sleep(2) + vgroups = "30" + sql = "create database db3 vgroups " + vgroups + tdSql.query(sql) + + # loop to wait request + httpServer.serve_forever() + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + +# create http server: bing ip/port , and request processor +serverAddress = ("", int(telemetryPort)) +httpServer = http.server.HTTPServer(serverAddress, RequestHandlerImpl) + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) + + + + + diff --git a/tests/system-test/1-insert/insertWithMoreVgroup.py b/tests/system-test/1-insert/insertWithMoreVgroup.py index a7d17bc41e..b583ee93e8 100644 --- a/tests/system-test/1-insert/insertWithMoreVgroup.py +++ b/tests/system-test/1-insert/insertWithMoreVgroup.py @@ -12,6 +12,7 @@ # -*- coding: utf-8 -*- import sys +import os import threading import multiprocessing as mp from numpy.lib.function_base import insert @@ -66,14 +67,19 @@ class TDTestCase: # run case def run(self): - # test base case - self.test_case1() - tdLog.debug(" LIMIT test_case1 ............ [OK]") + # # test base case + # self.test_case1() + # tdLog.debug(" LIMIT test_case1 ............ [OK]") - # test advance case + # test case # self.test_case2() # tdLog.debug(" LIMIT test_case2 ............ [OK]") + # test case + self.test_case3() + tdLog.debug(" LIMIT test_case3 ............ [OK]") + + # stop def stop(self): tdSql.close() @@ -115,11 +121,12 @@ class TDTestCase: return cur def new_create_tables(self,dbname,vgroups,stbname,tcountStart,tcountStop): - host = "chenhaoran02" + host = "localhost" buildPath = self.getBuildPath() config = buildPath+ "../sim/dnode1/cfg/" tsql=self.newcur(host,config) + tsql.execute("drop database if exists %s"%dbname) tsql.execute("create database %s vgroups %d"%(dbname,vgroups)) tsql.execute("use %s" %dbname) tsql.execute("create stable %s(ts timestamp, c1 int, c2 binary(10)) tags(t1 int)"%stbname) @@ -182,7 +189,52 @@ class TDTestCase: tdLog.debug("INSERT TABLE DATA ............ [OK]") return + def taosBench(self,jsonFile): + buildPath = self.getBuildPath() + if (buildPath == ""): + tdLog.exit("taosd not found!") + else: + tdLog.info("taosd found in %s" % buildPath) + taosBenchbin = buildPath+ "/build/bin/taosBenchmark" + os.system("%s -f %s -y " %(taosBenchbin,jsonFile)) + + return + def taosBenchCreate(self,dbname,stbname,vgroups,threadNumbers,count): + # count=50000 + buildPath = self.getBuildPath() + if (buildPath == ""): + tdLog.exit("taosd not found!") + else: + tdLog.info("taosd found in %s" % buildPath) + taosBenchbin = buildPath+ "/build/bin/taosBenchmark" + # insert: create one or mutiple tables per sql and insert multiple rows per sql + tdSql.execute("drop database if exists %s"%dbname) + + tdSql.execute("create database %s vgroups %d"%(dbname,vgroups)) + tdSql.execute("use %s" %dbname) + + threads = [] + # threadNumbers=2 + for i in range(threadNumbers): + jsonfile="1-insert/Vgroups%d%d.json"%(vgroups,i) + os.system("cp -f 1-insert/manyVgroups.json %s"%(jsonfile)) + os.system("sed -i 's/\"name\": \"db\",/\"name\": \"%s%d\",/g' %s"%(dbname,i,jsonfile)) + os.system("sed -i 's/\"childtable_count\": 300000,/\"childtable_count\": %d,/g' %s "%(count,jsonfile)) + os.system("sed -i 's/\"name\": \"stb1\",/\"name\": \"%s%d\",/g' %s "%(stbname,i,jsonfile)) + os.system("sed -i 's/\"childtable_prefix\": \"stb1_\",/\"childtable_prefix\": \"%s%d_\",/g' %s "%(stbname,i,jsonfile)) + threads.append(mp.Process(target=self.taosBench, args=("%s"%jsonfile,))) + start_time = time.time() + for tr in threads: + tr.start() + for tr in threads: + tr.join() + end_time = time.time() + + spendTime=end_time-start_time + speedCreate=count/spendTime + tdLog.debug("spent %.2fs to create 1 stable and %d table, create speed is %.2f table/s... [OK]"% (spendTime,count,speedCreate)) + return # test case1 base def test_case1(self): tdLog.debug("-----create database and tables test------- ") @@ -284,6 +336,12 @@ class TDTestCase: return + def test_case3(self): + + self.taosBenchCreate("db1", "stb1", 1, 2, 1*50000) + + return + # # add case with filename # diff --git a/tests/system-test/1-insert/manyVgroups.json b/tests/system-test/1-insert/manyVgroups.json new file mode 100644 index 0000000000..df6f1163e8 --- /dev/null +++ b/tests/system-test/1-insert/manyVgroups.json @@ -0,0 +1,76 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos/", + "host": "test216", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 8, + "thread_count_create_tbl": 8, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 0, + "interlace_rows": 100000, + "num_of_records_per_req": 100000, + "databases": [ + { + "dbinfo": { + "name": "db", + "drop": "yes", + "vgroups": 1 + }, + "super_tables": [ + { + "name": "stb1", + "child_table_exists": "no", + "childtable_count": 300000, + "childtable_prefix": "stb1_", + "auto_create_table": "no", + "batch_create_tbl_num": 50000, + "data_source": "rand", + "insert_mode": "taosc", + "insert_rows": 0, + "interlace_rows": 0, + "insert_interval": 0, + "max_sql_len": 10000000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 10, + "sample_format": "csv", + "use_sample_ts": "no", + "tags_file": "", + "columns": [ + { + "type": "INT" + }, + { + "type": "DOUBLE", + "count": 100 + }, + { + "type": "BINARY", + "len": 400, + "count": 10 + }, + { + "type": "nchar", + "len": 200, + "count": 20 + } + ], + "tags": [ + { + "type": "TINYINT", + "count": 2 + }, + { + "type": "BINARY", + "len": 16, + "count": 2 + } + ] + } + ] + } + ] +} \ No newline at end of file diff --git a/tests/system-test/1-insert/mutipythonnodebugtaosd.py b/tests/system-test/1-insert/mutipythonnodebugtaosd.py new file mode 100644 index 0000000000..73d70b4348 --- /dev/null +++ b/tests/system-test/1-insert/mutipythonnodebugtaosd.py @@ -0,0 +1,299 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + + +import sys +import os +selfPath = os.path.dirname(os.path.realpath(__file__)) +utilPath="%s/../../pytest/"%selfPath +import threading +import multiprocessing as mp +from numpy.lib.function_base import insert +import taos +sys.path.append(utilPath) +from util.log import * +from util.cases import * +from util.sql import * +import numpy as np +import datetime as dt +import time +# constant define +WAITS = 5 # wait seconds + +class TDTestCase: + # + # --------------- main frame ------------------- + # + + def caseDescription(self): + ''' + limit and offset keyword function test cases; + case1: limit offset base function test + case2: offset return valid + ''' + return + + def getBuildPath(self): + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + for root, dirs, files in os.walk(projPath): + if ("taosd" in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + buildPath = root[:len(root)-len("/build/bin")] + break + return buildPath + + # init + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + # tdSql.init(conn.cursor()) + # tdSql.prepare() + # self.create_tables(); + self.ts = 1500000000000 + + + # run case + def run(self): + + # test base case + self.test_case1() + tdLog.debug(" LIMIT test_case1 ............ [OK]") + + # test advance case + # self.test_case2() + # tdLog.debug(" LIMIT test_case2 ............ [OK]") + + # stop + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + # --------------- case ------------------- + + # create tables + def create_tables(self,dbname,stbname,count): + tdSql.execute("use %s" %dbname) + tdSql.execute("create stable %s(ts timestamp, c1 int, c2 binary(10)) tags(t1 int)"%stbname) + pre_create = "create table" + sql = pre_create + tdLog.debug("doing create one stable %s and %d child table in %s ..." %(stbname, count ,dbname)) + # print(time.time()) + exeStartTime=time.time() + for i in range(count): + sql += " %s_%d using %s tags(%d)"%(stbname,i,stbname,i+1) + if i >0 and i%3000 == 0: + tdSql.execute(sql) + sql = pre_create + # print(time.time()) + # end sql + if sql != pre_create: + tdSql.execute(sql) + exeEndTime=time.time() + spendTime=exeEndTime-exeStartTime + speedCreate=count/spendTime + tdLog.debug("spent %.2fs to create 1 stable and %d table, create speed is %.2f table/s... [OK]"% (spendTime,count,speedCreate)) + return + + def newcur(self,host,cfg): + user = "root" + password = "taosdata" + port =6030 + con=taos.connect(host=host, user=user, password=password, config=cfg ,port=port) + cur=con.cursor() + print(cur) + return cur + + def new_create_tables(self,dbname,vgroups,stbname,tcountStart,tcountStop): + host = "127.0.0.1" + buildPath = self.getBuildPath() + config = buildPath+ "../sim/dnode1/cfg/" + + tsql=self.newcur(host,config) + tsql.execute("drop database if exists %s" %(dbname)) + tsql.execute("create database if not exists %s vgroups %d"%(dbname,vgroups)) + tsql.execute("use %s" %dbname) + tsql.execute("create stable %s(ts timestamp, c1 int, c2 binary(10)) tags(t1 int)"%stbname) + + pre_create = "create table" + sql = pre_create + tcountStop=int(tcountStop) + tcountStart=int(tcountStart) + count=tcountStop-tcountStart + + tdLog.debug("doing create one stable %s and %d child table in %s ..." %(stbname, count ,dbname)) + # print(time.time()) + exeStartTime=time.time() + # print(type(tcountStop),type(tcountStart)) + for i in range(tcountStart,tcountStop): + sql += " %s_%d using %s tags(%d)"%(stbname,i,stbname,i+1) + if i >0 and i%20000 == 0: + # print(sql) + tsql.execute(sql) + sql = pre_create + # print(time.time()) + # end sql + if sql != pre_create: + # print(sql) + tsql.execute(sql) + exeEndTime=time.time() + spendTime=exeEndTime-exeStartTime + speedCreate=count/spendTime + # tdLog.debug("spent %.2fs to create 1 stable and %d table, create speed is %.2f table/s... [OK]"% (spendTime,count,speedCreate)) + return + + + + # insert data + def insert_data(self, dbname, stbname, ts_start, tcountStart,tcountStop,rowCount): + tdSql.execute("use %s" %dbname) + pre_insert = "insert into " + sql = pre_insert + tcount=tcountStop-tcountStart + allRows=tcount*rowCount + tdLog.debug("doing insert data into stable:%s rows:%d ..."%(stbname, allRows)) + exeStartTime=time.time() + for i in range(tcountStart,tcountStop): + sql += " %s_%d values "%(stbname,i) + for j in range(rowCount): + sql += "(%d, %d, 'taos_%d') "%(ts_start + j*1000, j, j) + if j >0 and j%5000 == 0: + # print(sql) + tdSql.execute(sql) + sql = "insert into %s_%d values " %(stbname,i) + # end sql + if sql != pre_insert: + # print(sql) + tdSql.execute(sql) + exeEndTime=time.time() + spendTime=exeEndTime-exeStartTime + speedInsert=allRows/spendTime + # tdLog.debug("spent %.2fs to INSERT %d rows , insert rate is %.2f rows/s... [OK]"% (spendTime,allRows,speedInsert)) + + tdLog.debug("INSERT TABLE DATA ............ [OK]") + return + + + # test case1 base + def test_case1(self): + tdLog.debug("-----create database and tables test------- ") + # tdSql.execute("drop database if exists db1") + # tdSql.execute("drop database if exists db4") + # tdSql.execute("drop database if exists db6") + # tdSql.execute("drop database if exists db8") + # tdSql.execute("drop database if exists db12") + # tdSql.execute("drop database if exists db16") + + #create database and tables; + + # tdSql.execute("create database db11 vgroups 1") + # # self.create_tables("db1", "stb1", 30*10000) + # tdSql.execute("use db1") + # tdSql.execute("create stable stb1(ts timestamp, c1 int, c2 binary(10)) tags(t1 int)") + + # tdSql.execute("create database db12 vgroups 1") + # # self.create_tables("db1", "stb1", 30*10000) + # tdSql.execute("use db1") + + # t1 = threading.Thread(target=self.new_create_tables("db1", "stb1", 15*10000), args=(1,)) + # t2 = threading.Thread(target=self.new_create_tables("db1", "stb1", 15*10000), args=(2,)) + # t1 = mp.Process(target=self.new_create_tables, args=("db1", "stb1", 0,count/2,)) + # t2 = mp.Process(target=self.new_create_tables, args=("db1", "stb1", count/2,count,)) + + count=500000 + vgroups=1 + threads = [] + threadNumbers=2 + for i in range(threadNumbers): + threads.append(mp.Process(target=self.new_create_tables, args=("db1%d"%i, vgroups, "stb1", 0,count,))) + start_time = time.time() + for tr in threads: + tr.start() + for tr in threads: + tr.join() + end_time = time.time() + spendTime=end_time-start_time + speedCreate=count/spendTime + tdLog.debug("spent %.2fs to create 1 stable and %d table, create speed is %.2f table/s... [OK]"% (spendTime,count,speedCreate)) + # self.new_create_tables("db1", "stb1", 15*10000) + # self.new_create_tables("db1", "stb1", 15*10000) + + # tdSql.execute("create database db4 vgroups 4") + # self.create_tables("db4", "stb4", 30*10000) + + # tdSql.execute("create database db6 vgroups 6") + # self.create_tables("db6", "stb6", 30*10000) + + # tdSql.execute("create database db8 vgroups 8") + # self.create_tables("db8", "stb8", 30*10000) + + # tdSql.execute("create database db12 vgroups 12") + # self.create_tables("db12", "stb12", 30*10000) + + # tdSql.execute("create database db16 vgroups 16") + # self.create_tables("db16", "stb16", 30*10000) + return + + # test case2 base:insert data + def test_case2(self): + + tdLog.debug("-----insert data test------- ") + # drop database + tdSql.execute("drop database if exists db1") + tdSql.execute("drop database if exists db4") + tdSql.execute("drop database if exists db6") + tdSql.execute("drop database if exists db8") + tdSql.execute("drop database if exists db12") + tdSql.execute("drop database if exists db16") + + #create database and tables; + + tdSql.execute("create database db1 vgroups 1") + self.create_tables("db1", "stb1", 1*100) + self.insert_data("db1", "stb1", self.ts, 1*50,1*10000) + + + tdSql.execute("create database db4 vgroups 4") + self.create_tables("db4", "stb4", 1*100) + self.insert_data("db4", "stb4", self.ts, 1*100,1*10000) + + tdSql.execute("create database db6 vgroups 6") + self.create_tables("db6", "stb6", 1*100) + self.insert_data("db6", "stb6", self.ts, 1*100,1*10000) + + tdSql.execute("create database db8 vgroups 8") + self.create_tables("db8", "stb8", 1*100) + self.insert_data("db8", "stb8", self.ts, 1*100,1*10000) + + tdSql.execute("create database db12 vgroups 12") + self.create_tables("db12", "stb12", 1*100) + self.insert_data("db12", "stb12", self.ts, 1*100,1*10000) + + tdSql.execute("create database db16 vgroups 16") + self.create_tables("db16", "stb16", 1*100) + self.insert_data("db16", "stb16", self.ts, 1*100,1*10000) + + return + +# +# add case with filename +# +# tdCases.addWindows(__file__, TDTestCase()) +# tdCases.addLinux(__file__, TDTestCase()) +case=TDTestCase() +case.test_case1() \ No newline at end of file diff --git a/tests/system-test/2-query/cast.py b/tests/system-test/2-query/cast.py index 0e849410b7..e07ab95f45 100644 --- a/tests/system-test/2-query/cast.py +++ b/tests/system-test/2-query/cast.py @@ -20,7 +20,7 @@ class TDTestCase: __sql = f"select cast({col_name} as bigint), {col_name} from {tbname}" tdSql.query(sql=__sql) data_tb_col = [result[1] for result in tdSql.queryResult] - for i in range(len(tdSql.queryRows)): + for i in range(tdSql.queryRows): tdSql.checkData( i, 0, None ) if data_tb_col[i] is None else tdSql.checkData( i, 0, int(data_tb_col[i]) ) def __range_to_bigint(self,cols,tables): @@ -32,7 +32,7 @@ class TDTestCase: __sql = f"select cast({col_name} as timestamp), {col_name} from {tbname}" tdSql.query(sql=__sql) data_tb_col = [result[1] for result in tdSql.queryResult] - for i in range(len(tdSql.queryRows)): + for i in range(tdSql.queryRows): if data_tb_col[i] is None: tdSql.checkData( i, 0 , None ) if col_name not in ["c2", "double"] or tbname != "t1" or i != 10: @@ -597,37 +597,37 @@ class TDTestCase: tdLog.printNoPrefix("==========step39: cast constant operation to bigint, expect change to int ") tdSql.query("select cast(12121.23323131 as bigint) as b from ct4") - ( tdSql.checkData(i, 0, 12121) for i in range(len(tdSql.queryRows) ) ) + ( tdSql.checkData(i, 0, 12121) for i in range(tdSql.queryRows) ) tdSql.query("select cast(12121.23323131 as binary(16)) as b from ct4") - ( tdSql.checkData(i, 0, '12121.233231') for i in range(len(tdSql.queryRows)) ) + ( tdSql.checkData(i, 0, '12121.233231') for i in range(tdSql.queryRows) ) tdSql.query("select cast(12121.23323131 as binary(2)) as b from ct4") - ( tdSql.checkData(i, 0, '12') for i in range(len(tdSql.queryRows) ) ) + ( tdSql.checkData(i, 0, '12') for i in range(tdSql.queryRows) ) tdSql.query("select cast(12121.23323131 as nchar(16)) as b from ct4") - ( tdSql.checkData(i, 0, '12121.233231') for i in range(len(tdSql.queryRows) ) ) + ( tdSql.checkData(i, 0, '12121.233231') for i in range(tdSql.queryRows) ) tdSql.query("select cast(12121.23323131 as nchar(2)) as b from ct4") - ( tdSql.checkData(i, 0, '12') for i in range(len(tdSql.queryRows) ) ) + ( tdSql.checkData(i, 0, '12') for i in range(tdSql.queryRows) ) tdSql.query("select cast(12121.23323131 + 321.876897998 as bigint) as b from ct4") - ( tdSql.checkData(i, 0, 12443) for i in range(len(tdSql.queryRows) ) ) + ( tdSql.checkData(i, 0, 12443) for i in range(tdSql.queryRows) ) tdSql.query("select cast(12121.23323131 + 321.876897998 as binary(16)) as b from ct4") - ( tdSql.checkData(i, 0, '12443.110129') for i in range(len(tdSql.queryRows)) ) + ( tdSql.checkData(i, 0, '12443.110129') for i in range(tdSql.queryRows) ) tdSql.query("select cast(12121.23323131 + 321.876897998 as binary(3)) as b from ct4") - ( tdSql.checkData(i, 0, '124') for i in range(len(tdSql.queryRows) ) ) + ( tdSql.checkData(i, 0, '124') for i in range(tdSql.queryRows) ) tdSql.query("select cast(12121.23323131 + 321.876897998 as nchar(16)) as b from ct4") - ( tdSql.checkData(i, 0, '12443.110129') for i in range(len(tdSql.queryRows)) ) + ( tdSql.checkData(i, 0, '12443.110129') for i in range(tdSql.queryRows) ) tdSql.query("select cast(12121.23323131 + 321.876897998 as nchar(3)) as b from ct4") - ( tdSql.checkData(i, 0, '124') for i in range(len(tdSql.queryRows) ) ) + ( tdSql.checkData(i, 0, '124') for i in range(tdSql.queryRows) ) tdSql.query("select cast(12121.23323131 + 'test~!@`#$%^&*()}{][;><.,' as bigint) as b from ct4") - ( tdSql.checkData(i, 0, 12121) for i in range(len(tdSql.queryRows) ) ) + ( tdSql.checkData(i, 0, 12121) for i in range(tdSql.queryRows) ) tdSql.query("select cast(12121.23323131 + 'test~!@`#$%^&*()}{][;><.,' as binary(16)) as b from ct4") - ( tdSql.checkData(i, 0, '12121.233231') for i in range(len(tdSql.queryRows)) ) + ( tdSql.checkData(i, 0, '12121.233231') for i in range(tdSql.queryRows) ) tdSql.query("select cast(12121.23323131 + 'test~!@`#$%^&*()}{][;><.,' as binary(2)) as b from ct4") - ( tdSql.checkData(i, 0, '12') for i in range(len(tdSql.queryRows) ) ) + ( tdSql.checkData(i, 0, '12') for i in range(tdSql.queryRows) ) tdSql.query("select cast(12121.23323131 + 'test~!@`#$%^&*()}{][;><.,' as nchar(16)) as b from ct4") - ( tdSql.checkData(i, 0, '12121.233231') for i in range(len(tdSql.queryRows) ) ) + ( tdSql.checkData(i, 0, '12121.233231') for i in range(tdSql.queryRows) ) tdSql.query("select cast(12121.23323131 + 'test~!@`#$%^&*()}{][;><.,' as nchar(2)) as b from ct4") - ( tdSql.checkData(i, 0, '12') for i in range(len(tdSql.queryRows) ) ) + ( tdSql.checkData(i, 0, '12') for i in range(tdSql.queryRows) ) tdLog.printNoPrefix("==========step40: error cast condition, should return error ") tdSql.error("select cast(c1 as int) as b from ct4") diff --git a/tests/system-test/2-query/char_length.py b/tests/system-test/2-query/char_length.py index e78db3b8b0..97d5a5f59a 100644 --- a/tests/system-test/2-query/char_length.py +++ b/tests/system-test/2-query/char_length.py @@ -232,13 +232,13 @@ class TDTestCase: tdLog.printNoPrefix("==========step3:all check") self.all_test() - # tdDnodes.stop(1) - # tdDnodes.start(1) + tdDnodes.stop(1) + tdDnodes.start(1) - # tdSql.execute("use db") + tdSql.execute("use db") - # tdLog.printNoPrefix("==========step4:after wal, all check again ") - # self.all_test() + tdLog.printNoPrefix("==========step4:after wal, all check again ") + self.all_test() def stop(self): tdSql.close() diff --git a/tests/system-test/2-query/concat.py b/tests/system-test/2-query/concat.py new file mode 100644 index 0000000000..1167b444d2 --- /dev/null +++ b/tests/system-test/2-query/concat.py @@ -0,0 +1,287 @@ +from util.log import * +from util.sql import * +from util.cases import * +from util.dnodes import * + + +PRIMARY_COL = "ts" + +INT_COL = "c1" +BINT_COL = "c2" +SINT_COL = "c3" +TINT_COL = "c4" +FLOAT_COL = "c5" +DOUBLE_COL = "c6" +BOOL_COL = "c7" + +BINARY_COL = "c8" +NCHAR_COL = "c9" +TS_COL = "c10" + +NUM_COL = [ INT_COL, BINT_COL, SINT_COL, TINT_COL, FLOAT_COL, DOUBLE_COL, ] +CHAR_COL = [ BINARY_COL, NCHAR_COL, ] +BOOLEAN_COL = [ BOOL_COL, ] +TS_TYPE_COL = [ TS_COL, ] + + +class TDTestCase: + + def init(self, conn, logSql): + tdLog.debug(f"start to excute {__file__}") + tdSql.init(conn.cursor()) + + def __concat_condition(self): # sourcery skip: extract-method + concat_condition = [] + for char_col in CHAR_COL: + concat_condition.extend( + ( + char_col, + f"upper( {char_col} )", + ) + ) + concat_condition.extend( f"cast( {num_col} as binary(16) ) " for num_col in NUM_COL) + concat_condition.extend( f"cast( {char_col} + {num_col} as binary(16) ) " for num_col in NUM_COL ) + concat_condition.extend( f"cast( {bool_col} as binary(16) )" for bool_col in BOOLEAN_COL ) + concat_condition.extend( f"cast( {char_col} + {bool_col} as binary(16) )" for bool_col in BOOLEAN_COL ) + concat_condition.extend( f"cast( {ts_col} as binary(16) )" for ts_col in TS_TYPE_COL ) + # concat_condition.extend( f"cast( {char_col} + {ts_col} as binary(16) )" for ts_col in TS_TYPE_COL ) + concat_condition.extend( f"cast( {char_col} + {char_col_2} as binary(16) ) " for char_col_2 in CHAR_COL ) + + for num_col in NUM_COL: + concat_condition.extend( f"cast( {num_col} + {bool_col} as binary(16) )" for bool_col in BOOLEAN_COL ) + concat_condition.extend( f"cast( {num_col} + {ts_col} as binary(16) )" for ts_col in TS_TYPE_COL if num_col is not FLOAT_COL and num_col is not DOUBLE_COL) + + concat_condition.extend( f"cast( {bool_col} + {ts_col} as binary(16) )" for bool_col in BOOLEAN_COL for ts_col in TS_TYPE_COL ) + + concat_condition.append('''"test1234!@#$%^&*():'> 0 " + return "" + + def __concat_num(self, concat_lists, num): + return [ concat_lists[i] for i in range(num) ] + + + def __group_condition(self, col, having = ""): + return f" group by {col} having {having}" if having else f" group by {col} " + + def __concat_check(self, tbname, num): + concat_condition = self.__concat_condition() + for i in range(len(concat_condition) - num + 1 ): + condition = self.__concat_num(concat_condition[i:], num) + concat_filter = f"concat( {','.join( condition ) }) " + where_condition = self.__where_condition(condition[0]) + # group_having = self.__group_condition(condition[0], having=f"{condition[0]} is not null " ) + concat_group_having = self.__group_condition(concat_filter, having=f"{concat_filter} is not null " ) + # group_no_having= self.__group_condition(condition[0] ) + concat_group_no_having= self.__group_condition(concat_filter) + groups = ["", concat_group_having, concat_group_no_having] + + if num > 8 or num < 2 : + [tdSql.error(f"select concat( {','.join( condition ) }) from {tbname} {where_condition} {group} ") for group in groups ] + break + + tdSql.query(f"select {','.join(condition)} from {tbname} ") + rows = tdSql.queryRows + concat_data = [] + for m in range(rows): + concat_data.append("".join(tdSql.queryResult[m])) if tdSql.getData(m, 0) else concat_data.append(None) + tdSql.query(f"select concat( {','.join( condition ) }) from {tbname} ") + tdSql.checkRows(rows) + for j in range(tdSql.queryRows): + assert tdSql.getData(j, 0) in concat_data + + [ tdSql.query(f"select concat( {','.join( condition ) }) from {tbname} {where_condition} {group} ") for group in groups ] + + + def __concat_err_check(self,tbname): + sqls = [] + + for char_col in CHAR_COL: + sqls.extend( + ( + f"select concat( {char_col} ) from {tbname} ", + f"select concat(ceil( {char_col} )) from {tbname} ", + f"select {char_col} from {tbname} group by concat( {char_col} ) ", + ) + ) + + sqls.extend( f"select concat( {char_col} , {num_col} ) from {tbname} " for num_col in NUM_COL ) + sqls.extend( f"select concat( {char_col} , {ts_col} ) from {tbname} " for ts_col in TS_TYPE_COL ) + sqls.extend( f"select concat( {char_col} , {bool_col} ) from {tbname} " for bool_col in BOOLEAN_COL ) + + sqls.extend( f"select concat( {ts_col}, {bool_col} ) from {tbname} " for ts_col in TS_TYPE_COL for bool_col in BOOLEAN_COL ) + sqls.extend( f"select concat( {num_col} , {ts_col} ) from {tbname} " for num_col in NUM_COL for ts_col in TS_TYPE_COL) + sqls.extend( f"select concat( {num_col} , {bool_col} ) from {tbname} " for num_col in NUM_COL for bool_col in BOOLEAN_COL) + sqls.extend( f"select concat( {num_col} , {num_col} ) from {tbname} " for num_col in NUM_COL for num_col in NUM_COL) + sqls.extend( f"select concat( {ts_col}, {ts_col} ) from {tbname} " for ts_col in TS_TYPE_COL for ts_col in TS_TYPE_COL ) + sqls.extend( f"select concat( {bool_col}, {bool_col} ) from {tbname} " for bool_col in BOOLEAN_COL for bool_col in BOOLEAN_COL ) + + sqls.extend( f"select concat( {char_col} + {char_col_2} ) from {tbname} " for char_col in CHAR_COL for char_col_2 in CHAR_COL ) + sqls.extend( f"select concat({char_col}, 11) from {tbname} " for char_col in CHAR_COL ) + sqls.extend( f"select concat({num_col}, '1') from {tbname} " for num_col in NUM_COL ) + sqls.extend( f"select concat({ts_col}, '1') from {tbname} " for ts_col in TS_TYPE_COL ) + sqls.extend( f"select concat({bool_col}, '1') from {tbname} " for bool_col in BOOLEAN_COL ) + sqls.extend( f"select concat({char_col},'1') from {tbname} interval(2d) sliding(1d)" for char_col in CHAR_COL ) + sqls.extend( + ( + f"select concat() from {tbname} ", + f"select concat(*) from {tbname} ", + f"select concat(ccccccc) from {tbname} ", + f"select concat(111) from {tbname} ", + ) + ) + + return sqls + + def __test_current(self): # sourcery skip: use-itertools-product + tdLog.printNoPrefix("==========current sql condition check , must return query ok==========") + tbname = ["ct1", "ct2", "ct4", "t1", "stb1"] + for tb in tbname: + for i in range(2,8): + self.__concat_check(tb,i) + tdLog.printNoPrefix(f"==========current sql condition check in {tb}, col num: {i} over==========") + + def __test_error(self): + tdLog.printNoPrefix("==========err sql condition check , must return error==========") + tbname = ["ct1", "ct2", "ct4", "t1", "stb1"] + + for tb in tbname: + for errsql in self.__concat_err_check(tb): + tdSql.error(sql=errsql) + self.__concat_check(tb,1) + self.__concat_check(tb,9) + tdLog.printNoPrefix(f"==========err sql condition check in {tb} over==========") + + + def all_test(self): + self.__test_current() + self.__test_error() + + + def __create_tb(self): + tdSql.prepare() + + tdLog.printNoPrefix("==========step1:create table") + create_stb_sql = f'''create table stb1( + ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint, + {FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool, + {BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp + ) tags (t1 int) + ''' + create_ntb_sql = f'''create table t1( + ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint, + {FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool, + {BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp + ) + ''' + tdSql.execute(create_stb_sql) + tdSql.execute(create_ntb_sql) + + for i in range(4): + tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )') + + def __insert_data(self, rows): + now_time = int(datetime.datetime.timestamp(datetime.datetime.now()) * 1000) + for i in range(rows): + tdSql.execute( + f"insert into ct1 values ( { now_time - i * 1000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )" + ) + tdSql.execute( + f"insert into ct4 values ( { now_time - i * 7776000000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )" + ) + tdSql.execute( + f"insert into ct2 values ( { now_time - i * 7776000000 }, {-i}, {-11111 * i}, {-111 * i % 32767 }, {-11 * i % 127}, {-1.11*i}, {-1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )" + ) + tdSql.execute( + f'''insert into ct1 values + ( { now_time - rows * 5 }, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar_测试_0', { now_time + 8 } ) + ( { now_time + 10000 }, { rows }, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar_测试_9', { now_time + 9 } ) + ''' + ) + + tdSql.execute( + f'''insert into ct4 values + ( { now_time - rows * 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( { now_time - rows * 3888000000 + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( { now_time + 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( + { now_time + 5184000000}, {pow(2,31)-pow(2,15)}, {pow(2,63)-pow(2,30)}, 32767, 127, + { 3.3 * pow(10,38) }, { 1.3 * pow(10,308) }, { rows % 2 }, "binary_limit-1", "nchar_测试_limit-1", { now_time - 86400000} + ) + ( + { now_time + 2592000000 }, {pow(2,31)-pow(2,16)}, {pow(2,63)-pow(2,31)}, 32766, 126, + { 3.2 * pow(10,38) }, { 1.2 * pow(10,308) }, { (rows-1) % 2 }, "binary_limit-2", "nchar_测试_limit-2", { now_time - 172800000} + ) + ''' + ) + + tdSql.execute( + f'''insert into ct2 values + ( { now_time - rows * 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( { now_time - rows * 3888000000 + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( { now_time + 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( + { now_time + 5184000000 }, { -1 * pow(2,31) + pow(2,15) }, { -1 * pow(2,63) + pow(2,30) }, -32766, -126, + { -1 * 3.2 * pow(10,38) }, { -1.2 * pow(10,308) }, { rows % 2 }, "binary_limit-1", "nchar_测试_limit-1", { now_time - 86400000 } + ) + ( + { now_time + 2592000000 }, { -1 * pow(2,31) + pow(2,16) }, { -1 * pow(2,63) + pow(2,31) }, -32767, -127, + { - 3.3 * pow(10,38) }, { -1.3 * pow(10,308) }, { (rows-1) % 2 }, "binary_limit-2", "nchar_测试_limit-2", { now_time - 172800000 } + ) + ''' + ) + + for i in range(rows): + insert_data = f'''insert into t1 values + ( { now_time - i * 3600000 }, {i}, {i * 11111}, { i % 32767 }, { i % 127}, { i * 1.11111 }, { i * 1000.1111 }, { i % 2}, + "binary_{i}", "nchar_测试_{i}", { now_time - 1000 * i } ) + ''' + tdSql.execute(insert_data) + tdSql.execute( + f'''insert into t1 values + ( { now_time + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( { now_time - (( rows // 2 ) * 60 + 30) * 60000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( { now_time - rows * 3600000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( { now_time + 7200000 }, { pow(2,31) - pow(2,15) }, { pow(2,63) - pow(2,30) }, 32767, 127, + { 3.3 * pow(10,38) }, { 1.3 * pow(10,308) }, { rows % 2 }, + "binary_limit-1", "nchar_测试_limit-1", { now_time - 86400000 } + ) + ( + { now_time + 3600000 } , { pow(2,31) - pow(2,16) }, { pow(2,63) - pow(2,31) }, 32766, 126, + { 3.2 * pow(10,38) }, { 1.2 * pow(10,308) }, { (rows-1) % 2 }, + "binary_limit-2", "nchar_测试_limit-2", { now_time - 172800000 } + ) + ''' + ) + + def run(self): + tdSql.prepare() + + tdLog.printNoPrefix("==========step1:create table") + self.__create_tb() + + tdLog.printNoPrefix("==========step2:insert data") + self.rows = 10 + self.__insert_data(self.rows) + + tdLog.printNoPrefix("==========step3:all check") + self.all_test() + + tdDnodes.stop(1) + tdDnodes.start(1) + + tdSql.execute("use db") + + tdLog.printNoPrefix("==========step4:after wal, all check again ") + self.all_test() + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) diff --git a/tests/system-test/2-query/concat_ws.py b/tests/system-test/2-query/concat_ws.py new file mode 100644 index 0000000000..876a1c8805 --- /dev/null +++ b/tests/system-test/2-query/concat_ws.py @@ -0,0 +1,287 @@ +from util.log import * +from util.sql import * +from util.cases import * +from util.dnodes import * + + +PRIMARY_COL = "ts" + +INT_COL = "c1" +BINT_COL = "c2" +SINT_COL = "c3" +TINT_COL = "c4" +FLOAT_COL = "c5" +DOUBLE_COL = "c6" +BOOL_COL = "c7" + +BINARY_COL = "c8" +NCHAR_COL = "c9" +TS_COL = "c10" + +NUM_COL = [ INT_COL, BINT_COL, SINT_COL, TINT_COL, FLOAT_COL, DOUBLE_COL, ] +CHAR_COL = [ BINARY_COL, NCHAR_COL, ] +BOOLEAN_COL = [ BOOL_COL, ] +TS_TYPE_COL = [ TS_COL, ] + + +class TDTestCase: + + def init(self, conn, logSql): + tdLog.debug(f"start to excute {__file__}") + tdSql.init(conn.cursor()) + + def __concat_ws_condition(self): # sourcery skip: extract-method + concat_ws_condition = [] + for char_col in CHAR_COL: + concat_ws_condition.extend( + ( + char_col, + f"upper( {char_col} )", + ) + ) + concat_ws_condition.extend( f"cast( {num_col} as binary(16) ) " for num_col in NUM_COL) + concat_ws_condition.extend( f"cast( {char_col} + {num_col} as binary(16) ) " for num_col in NUM_COL ) + concat_ws_condition.extend( f"cast( {bool_col} as binary(16) )" for bool_col in BOOLEAN_COL ) + concat_ws_condition.extend( f"cast( {char_col} + {bool_col} as binary(16) )" for bool_col in BOOLEAN_COL ) + concat_ws_condition.extend( f"cast( {ts_col} as binary(16) )" for ts_col in TS_TYPE_COL ) + # concat_ws_condition.extend( f"cast( {char_col} + {ts_col} as binary(16) )" for ts_col in TS_TYPE_COL ) + concat_ws_condition.extend( f"cast( {char_col} + {char_col_2} as binary(16) ) " for char_col_2 in CHAR_COL ) + + for num_col in NUM_COL: + concat_ws_condition.extend( f"cast( {num_col} + {bool_col} as binary(16) )" for bool_col in BOOLEAN_COL ) + concat_ws_condition.extend( f"cast( {num_col} + {ts_col} as binary(16) )" for ts_col in TS_TYPE_COL if num_col is not FLOAT_COL and num_col is not DOUBLE_COL) + + concat_ws_condition.extend( f"cast( {bool_col} + {ts_col} as binary(16) )" for bool_col in BOOLEAN_COL for ts_col in TS_TYPE_COL ) + + concat_ws_condition.append('''"test1234!@#$%^&*():'> 0 " + return "" + + def __concat_ws_num(self, concat_ws_lists, num): + return [ concat_ws_lists[i] for i in range(num) ] + + + def __group_condition(self, col, having = ""): + return f" group by {col} having {having}" if having else f" group by {col} " + + def __concat_ws_check(self, tbname, num): + concat_ws_condition = self.__concat_ws_condition() + for i in range(len(concat_ws_condition) - num + 1 ): + condition = self.__concat_ws_num(concat_ws_condition[i:], num) + concat_ws_filter = f"concat_ws('_', {','.join( condition ) }) " + where_condition = self.__where_condition(condition[0]) + # group_having = self.__group_condition(condition[0], having=f"{condition[0]} is not null " ) + concat_ws_group_having = self.__group_condition(concat_ws_filter, having=f"{concat_ws_filter} is not null " ) + # group_no_having= self.__group_condition(condition[0] ) + concat_ws_group_no_having= self.__group_condition(concat_ws_filter) + groups = ["", concat_ws_group_having, concat_ws_group_no_having] + + if num > 8 or num < 2 : + [tdSql.error(f"select concat_ws('_', {','.join( condition ) }) from {tbname} {where_condition} {group} ") for group in groups ] + break + + tdSql.query(f"select {','.join(condition)} from {tbname} ") + rows = tdSql.queryRows + concat_ws_data = [] + for m in range(rows): + concat_ws_data.append("_".join(tdSql.queryResult[m])) if tdSql.getData(m, 0) else concat_ws_data.append(None) + tdSql.query(f"select concat_ws('_', {','.join( condition ) }) from {tbname} ") + tdSql.checkRows(rows) + for j in range(tdSql.queryRows): + assert tdSql.getData(j, 0) in concat_ws_data + + [ tdSql.query(f"select concat_ws('_', {','.join( condition ) }) from {tbname} {where_condition} {group} ") for group in groups ] + + + def __concat_ws_err_check(self,tbname): + sqls = [] + + for char_col in CHAR_COL: + sqls.extend( + ( + f"select concat_ws('_', {char_col} ) from {tbname} ", + f"select concat_ws('_', ceil( {char_col} )) from {tbname} ", + f"select {char_col} from {tbname} group by concat_ws('_', {char_col} ) ", + ) + ) + + sqls.extend( f"select concat_ws('_', {char_col} , {num_col} ) from {tbname} " for num_col in NUM_COL ) + sqls.extend( f"select concat_ws('_', {char_col} , {ts_col} ) from {tbname} " for ts_col in TS_TYPE_COL ) + sqls.extend( f"select concat_ws('_', {char_col} , {bool_col} ) from {tbname} " for bool_col in BOOLEAN_COL ) + + sqls.extend( f"select concat_ws('_', {ts_col}, {bool_col} ) from {tbname} " for ts_col in TS_TYPE_COL for bool_col in BOOLEAN_COL ) + sqls.extend( f"select concat_ws('_', {num_col} , {ts_col} ) from {tbname} " for num_col in NUM_COL for ts_col in TS_TYPE_COL) + sqls.extend( f"select concat_ws('_', {num_col} , {bool_col} ) from {tbname} " for num_col in NUM_COL for bool_col in BOOLEAN_COL) + sqls.extend( f"select concat_ws('_', {num_col} , {num_col} ) from {tbname} " for num_col in NUM_COL for num_col in NUM_COL) + sqls.extend( f"select concat_ws('_', {ts_col}, {ts_col} ) from {tbname} " for ts_col in TS_TYPE_COL for ts_col in TS_TYPE_COL ) + sqls.extend( f"select concat_ws('_', {bool_col}, {bool_col} ) from {tbname} " for bool_col in BOOLEAN_COL for bool_col in BOOLEAN_COL ) + + sqls.extend( f"select concat_ws('_', {char_col} + {char_col_2} ) from {tbname} " for char_col in CHAR_COL for char_col_2 in CHAR_COL ) + sqls.extend( f"select concat_ws('_', {char_col}, 11) from {tbname} " for char_col in CHAR_COL ) + sqls.extend( f"select concat_ws('_', {num_col}, '1') from {tbname} " for num_col in NUM_COL ) + sqls.extend( f"select concat_ws('_', {ts_col}, '1') from {tbname} " for ts_col in TS_TYPE_COL ) + sqls.extend( f"select concat_ws('_', {bool_col}, '1') from {tbname} " for bool_col in BOOLEAN_COL ) + sqls.extend( f"select concat_ws('_', {char_col},'1') from {tbname} interval(2d) sliding(1d)" for char_col in CHAR_COL ) + sqls.extend( + ( + f"select concat_ws('_', ) from {tbname} ", + f"select concat_ws('_', *) from {tbname} ", + f"select concat_ws('_', ccccccc) from {tbname} ", + f"select concat_ws('_', 111) from {tbname} ", + ) + ) + + return sqls + + def __test_current(self): # sourcery skip: use-itertools-product + tdLog.printNoPrefix("==========current sql condition check , must return query ok==========") + tbname = ["ct1", "ct2", "ct4", "t1", "stb1"] + for tb in tbname: + for i in range(2,8): + self.__concat_ws_check(tb,i) + tdLog.printNoPrefix(f"==========current sql condition check in {tb}, col num: {i} over==========") + + def __test_error(self): + tdLog.printNoPrefix("==========err sql condition check , must return error==========") + tbname = ["ct1", "ct2", "ct4", "t1", "stb1"] + + for tb in tbname: + for errsql in self.__concat_ws_err_check(tb): + tdSql.error(sql=errsql) + self.__concat_ws_check(tb,1) + self.__concat_ws_check(tb,9) + tdLog.printNoPrefix(f"==========err sql condition check in {tb} over==========") + + + def all_test(self): + self.__test_current() + self.__test_error() + + + def __create_tb(self): + tdSql.prepare() + + tdLog.printNoPrefix("==========step1:create table") + create_stb_sql = f'''create table stb1( + ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint, + {FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool, + {BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp + ) tags (t1 int) + ''' + create_ntb_sql = f'''create table t1( + ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint, + {FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool, + {BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp + ) + ''' + tdSql.execute(create_stb_sql) + tdSql.execute(create_ntb_sql) + + for i in range(4): + tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )') + + def __insert_data(self, rows): + now_time = int(datetime.datetime.timestamp(datetime.datetime.now()) * 1000) + for i in range(rows): + tdSql.execute( + f"insert into ct1 values ( { now_time - i * 1000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )" + ) + tdSql.execute( + f"insert into ct4 values ( { now_time - i * 7776000000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )" + ) + tdSql.execute( + f"insert into ct2 values ( { now_time - i * 7776000000 }, {-i}, {-11111 * i}, {-111 * i % 32767 }, {-11 * i % 127}, {-1.11*i}, {-1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )" + ) + tdSql.execute( + f'''insert into ct1 values + ( { now_time - rows * 5 }, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar_测试_0', { now_time + 8 } ) + ( { now_time + 10000 }, { rows }, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar_测试_9', { now_time + 9 } ) + ''' + ) + + tdSql.execute( + f'''insert into ct4 values + ( { now_time - rows * 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( { now_time - rows * 3888000000 + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( { now_time + 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( + { now_time + 5184000000}, {pow(2,31)-pow(2,15)}, {pow(2,63)-pow(2,30)}, 32767, 127, + { 3.3 * pow(10,38) }, { 1.3 * pow(10,308) }, { rows % 2 }, "binary_limit-1", "nchar_测试_limit-1", { now_time - 86400000} + ) + ( + { now_time + 2592000000 }, {pow(2,31)-pow(2,16)}, {pow(2,63)-pow(2,31)}, 32766, 126, + { 3.2 * pow(10,38) }, { 1.2 * pow(10,308) }, { (rows-1) % 2 }, "binary_limit-2", "nchar_测试_limit-2", { now_time - 172800000} + ) + ''' + ) + + tdSql.execute( + f'''insert into ct2 values + ( { now_time - rows * 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( { now_time - rows * 3888000000 + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( { now_time + 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( + { now_time + 5184000000 }, { -1 * pow(2,31) + pow(2,15) }, { -1 * pow(2,63) + pow(2,30) }, -32766, -126, + { -1 * 3.2 * pow(10,38) }, { -1.2 * pow(10,308) }, { rows % 2 }, "binary_limit-1", "nchar_测试_limit-1", { now_time - 86400000 } + ) + ( + { now_time + 2592000000 }, { -1 * pow(2,31) + pow(2,16) }, { -1 * pow(2,63) + pow(2,31) }, -32767, -127, + { - 3.3 * pow(10,38) }, { -1.3 * pow(10,308) }, { (rows-1) % 2 }, "binary_limit-2", "nchar_测试_limit-2", { now_time - 172800000 } + ) + ''' + ) + + for i in range(rows): + insert_data = f'''insert into t1 values + ( { now_time - i * 3600000 }, {i}, {i * 11111}, { i % 32767 }, { i % 127}, { i * 1.11111 }, { i * 1000.1111 }, { i % 2}, + "binary_{i}", "nchar_测试_{i}", { now_time - 1000 * i } ) + ''' + tdSql.execute(insert_data) + tdSql.execute( + f'''insert into t1 values + ( { now_time + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( { now_time - (( rows // 2 ) * 60 + 30) * 60000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( { now_time - rows * 3600000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( { now_time + 7200000 }, { pow(2,31) - pow(2,15) }, { pow(2,63) - pow(2,30) }, 32767, 127, + { 3.3 * pow(10,38) }, { 1.3 * pow(10,308) }, { rows % 2 }, + "binary_limit-1", "nchar_测试_limit-1", { now_time - 86400000 } + ) + ( + { now_time + 3600000 } , { pow(2,31) - pow(2,16) }, { pow(2,63) - pow(2,31) }, 32766, 126, + { 3.2 * pow(10,38) }, { 1.2 * pow(10,308) }, { (rows-1) % 2 }, + "binary_limit-2", "nchar_测试_limit-2", { now_time - 172800000 } + ) + ''' + ) + + def run(self): + tdSql.prepare() + + tdLog.printNoPrefix("==========step1:create table") + self.__create_tb() + + tdLog.printNoPrefix("==========step2:insert data") + self.rows = 10 + self.__insert_data(self.rows) + + tdLog.printNoPrefix("==========step3:all check") + self.all_test() + + tdDnodes.stop(1) + tdDnodes.start(1) + + tdSql.execute("use db") + + tdLog.printNoPrefix("==========step4:after wal, all check again ") + self.all_test() + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) diff --git a/tests/system-test/2-query/join.py b/tests/system-test/2-query/join.py index a39bc21946..289dd3d62d 100644 --- a/tests/system-test/2-query/join.py +++ b/tests/system-test/2-query/join.py @@ -99,6 +99,7 @@ class TDTestCase: if not join_flag : tdSql.error(sql=sql) + break if len(tblist) == 2: if "ct1" in tblist or "t1" in tblist: self.__join_current(sql, checkrows) @@ -111,42 +112,9 @@ class TDTestCase: if len(tblist) > 2 or len(tblist) < 1: tdSql.error(sql=sql) - # def __join_err_check(self,tbname): - # sqls = [] - - # for un_char_col in NUM_COL: - # sqls.extend( - # ( - # f"select length( {un_char_col} ) from {tbname} ", - # f"select length(ceil( {un_char_col} )) from {tbname} ", - # f"select {un_char_col} from {tbname} group by length( {un_char_col} ) ", - # ) - # ) - - # sqls.extend( f"select length( {un_char_col} + {un_char_col_2} ) from {tbname} " for un_char_col_2 in NUM_COL ) - # sqls.extend( f"select length( {un_char_col} + {ts_col} ) from {tbname} " for ts_col in TS_TYPE_COL ) - - # sqls.extend( f"select {char_col} from {tbname} group by length( {char_col} ) " for char_col in CHAR_COL) - # sqls.extend( f"select length( {ts_col} ) from {tbname} " for ts_col in TS_TYPE_COL ) - # sqls.extend( f"select length( {char_col} + {ts_col} ) from {tbname} " for char_col in NUM_COL for ts_col in TS_TYPE_COL) - # sqls.extend( f"select length( {char_col} + {char_col_2} ) from {tbname} " for char_col in CHAR_COL for char_col_2 in CHAR_COL ) - # sqls.extend( f"select upper({char_col}, 11) from {tbname} " for char_col in CHAR_COL ) - # sqls.extend( f"select upper({char_col}) from {tbname} interval(2d) sliding(1d)" for char_col in CHAR_COL ) - # sqls.extend( - # ( - # f"select length() from {tbname} ", - # f"select length(*) from {tbname} ", - # f"select length(ccccccc) from {tbname} ", - # f"select length(111) from {tbname} ", - # f"select length(c8, 11) from {tbname} ", - # ) - # ) - - # return sqls - def __join_current(self, sql, checkrows): tdSql.query(sql=sql) - tdSql.checkRows(checkrows) + # tdSql.checkRows(checkrows) def __test_current(self): @@ -197,10 +165,10 @@ class TDTestCase: tbname = ["ct1", "ct2", "ct4", "t1"] - for tb in tbname: - for errsql in self.__length_err_check(tb): - tdSql.error(sql=errsql) - tdLog.printNoPrefix(f"==========err sql condition check in {tb} over==========") + # for tb in tbname: + # for errsql in self.__join_err_check(tb): + # tdSql.error(sql=errsql) + # tdLog.printNoPrefix(f"==========err sql condition check in {tb} over==========") def all_test(self): @@ -319,13 +287,13 @@ class TDTestCase: tdLog.printNoPrefix("==========step3:all check") self.all_test() - # tdDnodes.stop(1) - # tdDnodes.start(1) + tdDnodes.stop(1) + tdDnodes.start(1) - # tdSql.execute("use db") + tdSql.execute("use db") - # tdLog.printNoPrefix("==========step4:after wal, all check again ") - # self.all_test() + tdLog.printNoPrefix("==========step4:after wal, all check again ") + self.all_test() def stop(self): tdSql.close() diff --git a/tests/system-test/2-query/length.py b/tests/system-test/2-query/length.py index 083bc62c9a..ed604c41ae 100644 --- a/tests/system-test/2-query/length.py +++ b/tests/system-test/2-query/length.py @@ -233,13 +233,13 @@ class TDTestCase: tdLog.printNoPrefix("==========step3:all check") self.all_test() - # tdDnodes.stop(1) - # tdDnodes.start(1) + tdDnodes.stop(1) + tdDnodes.start(1) - # tdSql.execute("use db") + tdSql.execute("use db") - # tdLog.printNoPrefix("==========step4:after wal, all check again ") - # self.all_test() + tdLog.printNoPrefix("==========step4:after wal, all check again ") + self.all_test() def stop(self): tdSql.close() diff --git a/tests/system-test/2-query/lower.py b/tests/system-test/2-query/lower.py index 5445c37b8a..0917fb63fc 100644 --- a/tests/system-test/2-query/lower.py +++ b/tests/system-test/2-query/lower.py @@ -59,12 +59,9 @@ class TDTestCase: groups = ["", group_having, group_no_having] for group_condition in groups: - tdSql.query(f"select {condition} from {tbname} {where_condition} {group_condition} ") - datas = [tdSql.getData(i,0) for i in range(tdSql.queryRows)] - lower_data = [ str(data).lower() if data else None for data in datas ] - tdSql.query(f"select lower( {condition} ) from {tbname} {where_condition} {group_condition}") - for i in range(len(lower_data)): - tdSql.checkData(i, 0, lower_data[i] ) if lower_data[i] else tdSql.checkData(i, 0, None) + tdSql.query(f"select lower( {condition} ), {condition} from {tbname} {where_condition} {group_condition}") + for i in range(tdSql.queryRows): + tdSql.checkData(i, 0, str(tdSql.getData(i, 1)).lower() ) if tdSql.getData(i, 1) else tdSql.checkData(i, 0, None) def __lower_err_check(self,tbname): sqls = [] @@ -230,13 +227,13 @@ class TDTestCase: tdLog.printNoPrefix("==========step3:all check") self.all_test() - # tdDnodes.stop(1) - # tdDnodes.start(1) + tdDnodes.stop(1) + tdDnodes.start(1) - # tdSql.execute("use db") + tdSql.execute("use db") - # tdLog.printNoPrefix("==========step4:after wal, all check again ") - # self.all_test() + tdLog.printNoPrefix("==========step4:after wal, all check again ") + self.all_test() def stop(self): tdSql.close() diff --git a/tests/system-test/2-query/ltrim.py b/tests/system-test/2-query/ltrim.py new file mode 100644 index 0000000000..15f40a09c3 --- /dev/null +++ b/tests/system-test/2-query/ltrim.py @@ -0,0 +1,267 @@ +from util.log import * +from util.sql import * +from util.cases import * +from util.dnodes import * + + +PRIMARY_COL = "ts" + +INT_COL = "c1" +BINT_COL = "c2" +SINT_COL = "c3" +TINT_COL = "c4" +FLOAT_COL = "c5" +DOUBLE_COL = "c6" +BOOL_COL = "c7" + +BINARY_COL = "c8" +NCHAR_COL = "c9" +TS_COL = "c10" + +NUM_COL = [ INT_COL, BINT_COL, SINT_COL, TINT_COL, FLOAT_COL, DOUBLE_COL, ] +CHAR_COL = [ BINARY_COL, NCHAR_COL, ] +BOOLEAN_COL = [ BOOL_COL, ] +TS_TYPE_COL = [ TS_COL, ] + + +class TDTestCase: + + def init(self, conn, logSql): + tdLog.debug(f"start to excute {__file__}") + tdSql.init(conn.cursor()) + + def __ltrim_condition(self): # sourcery skip: extract-method + ltrim_condition = [] + for char_col in CHAR_COL: + ltrim_condition.extend( + ( + char_col, + f"upper( {char_col} )", + ) + ) + ltrim_condition.extend( f"cast( {num_col} as binary(16) ) " for num_col in NUM_COL) + ltrim_condition.extend( f"cast( {char_col} + {num_col} as binary(16) ) " for num_col in NUM_COL ) + ltrim_condition.extend( f"concat( cast( {char_col} + {num_col} as binary(16) ), {char_col}) " for num_col in NUM_COL ) + ltrim_condition.extend( f"cast( {bool_col} as binary(16) )" for bool_col in BOOLEAN_COL ) + ltrim_condition.extend( f"cast( {char_col} + {bool_col} as binary(16) )" for bool_col in BOOLEAN_COL ) + ltrim_condition.extend( f"cast( {ts_col} as binary(16) )" for ts_col in TS_TYPE_COL ) + # ltrim_condition.extend( f"cast( {char_col} + {ts_col} as binary(16) )" for ts_col in TS_TYPE_COL ) + ltrim_condition.extend( f"cast( {char_col} + {char_col_2} as binary(16) ) " for char_col_2 in CHAR_COL ) + ltrim_condition.extend( f"concat( {char_col}, {char_col_2} ) " for char_col_2 in CHAR_COL ) + + for num_col in NUM_COL: + ltrim_condition.extend( f"cast( {num_col} + {bool_col} as binary(16) )" for bool_col in BOOLEAN_COL ) + ltrim_condition.extend( f"cast( {num_col} + {ts_col} as binary(16) )" for ts_col in TS_TYPE_COL if num_col is not FLOAT_COL and num_col is not DOUBLE_COL) + + ltrim_condition.extend( f"cast( {bool_col} + {ts_col} as binary(16) )" for bool_col in BOOLEAN_COL for ts_col in TS_TYPE_COL ) + + ltrim_condition.append(''' " test1234!@#$%^&*() :'> 0 " + return "" + + def __group_condition(self, col, having = ""): + return f" group by {col} having {having}" if having else f" group by {col} " + + def __ltrim_check(self, tbname): + ltrim_condition = self.__ltrim_condition() + for condition in ltrim_condition: + where_condition = self.__where_condition(condition) + ltrim_group_having = self.__group_condition(condition, having=f"{condition} is not null " ) + ltrim_group_no_having= self.__group_condition(condition) + groups = ["", ltrim_group_having, ltrim_group_no_having] + + tdSql.query(f"select ltrim( {condition}) , {condition} from {tbname} ") + for j in range(tdSql.queryRows): + tdSql.checkData(j,0, tdSql.getData(j,1).lstrip()) if tdSql.getData(j,1) else tdSql.checkData(j, 0, None) + + [ tdSql.query(f"select ltrim({condition}) from {tbname} {where_condition} {group} ") for group in groups ] + + + def __ltrim_err_check(self,tbname): + sqls = [] + + for num_col in NUM_COL: + sqls.extend( + ( + f"select ltrim( {num_col} ) from {tbname} ", + f"select ltrim(ceil( {num_col} )) from {tbname} ", + f"select {num_col} from {tbname} group by ltrim( {num_col} ) ", + ) + ) + + sqls.extend( f"select ltrim( {char_col} , {num_col} ) from {tbname} " for char_col in CHAR_COL ) + sqls.extend( f"select ltrim( {num_col} , {ts_col} ) from {tbname} " for ts_col in TS_TYPE_COL ) + sqls.extend( f"select ltrim( {num_col} , {bool_col} ) from {tbname} " for bool_col in BOOLEAN_COL ) + + sqls.extend( f"select ltrim( {ts_col}+{bool_col} ) from {tbname} " for ts_col in TS_TYPE_COL for bool_col in BOOLEAN_COL ) + sqls.extend( f"select ltrim( {num_col}+{ts_col} ) from {tbname} " for num_col in NUM_COL for ts_col in TS_TYPE_COL) + sqls.extend( f"select ltrim( {num_col}+ {bool_col} ) from {tbname} " for num_col in NUM_COL for bool_col in BOOLEAN_COL) + sqls.extend( f"select ltrim( {num_col}+ {num_col} ) from {tbname} " for num_col in NUM_COL for num_col in NUM_COL) + sqls.extend( f"select ltrim( {ts_col}+{ts_col} ) from {tbname} " for ts_col in TS_TYPE_COL for ts_col in TS_TYPE_COL ) + sqls.extend( f"select ltrim( {bool_col}+ {bool_col} ) from {tbname} " for bool_col in BOOLEAN_COL for bool_col in BOOLEAN_COL ) + + sqls.extend( f"select ltrim( {char_col} + {char_col_2} ) from {tbname} " for char_col in CHAR_COL for char_col_2 in CHAR_COL ) + sqls.extend( f"select ltrim({num_col}, '1') from {tbname} " for num_col in NUM_COL ) + sqls.extend( f"select ltrim({ts_col}, '1') from {tbname} " for ts_col in TS_TYPE_COL ) + sqls.extend( f"select ltrim({bool_col}, '1') from {tbname} " for bool_col in BOOLEAN_COL ) + sqls.extend( f"select ltrim({char_col},'1') from {tbname} interval(2d) sliding(1d)" for char_col in CHAR_COL ) + sqls.extend( + ( + f"select ltrim() from {tbname} ", + f"select ltrim(*) from {tbname} ", + f"select ltrim(ccccccc) from {tbname} ", + f"select ltrim(111) from {tbname} ", + ) + ) + + return sqls + + def __test_current(self): # sourcery skip: use-itertools-product + tdLog.printNoPrefix("==========current sql condition check , must return query ok==========") + tbname = ["ct1", "ct2", "ct4", "t1", "stb1"] + for tb in tbname: + self.__ltrim_check(tb) + tdLog.printNoPrefix(f"==========current sql condition check in {tb} over==========") + + def __test_error(self): + tdLog.printNoPrefix("==========err sql condition check , must return error==========") + tbname = ["ct1", "ct2", "ct4", "t1", "stb1"] + + for tb in tbname: + for errsql in self.__ltrim_err_check(tb): + tdSql.error(sql=errsql) + tdLog.printNoPrefix(f"==========err sql condition check in {tb} over==========") + + + def all_test(self): + self.__test_current() + self.__test_error() + + + def __create_tb(self): + tdSql.prepare() + + tdLog.printNoPrefix("==========step1:create table") + create_stb_sql = f'''create table stb1( + ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint, + {FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool, + {BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp + ) tags (t1 int) + ''' + create_ntb_sql = f'''create table t1( + ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint, + {FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool, + {BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp + ) + ''' + tdSql.execute(create_stb_sql) + tdSql.execute(create_ntb_sql) + + for i in range(4): + tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )') + + def __insert_data(self, rows): + now_time = int(datetime.datetime.timestamp(datetime.datetime.now()) * 1000) + for i in range(rows): + tdSql.execute( + f"insert into ct1 values ( { now_time - i * 1000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )" + ) + tdSql.execute( + f"insert into ct4 values ( { now_time - i * 7776000000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )" + ) + tdSql.execute( + f"insert into ct2 values ( { now_time - i * 7776000000 }, {-i}, {-11111 * i}, {-111 * i % 32767 }, {-11 * i % 127}, {-1.11*i}, {-1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )" + ) + tdSql.execute( + f'''insert into ct1 values + ( { now_time - rows * 5 }, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar_测试_0', { now_time + 8 } ) + ( { now_time + 10000 }, { rows }, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar_测试_9', { now_time + 9 } ) + ''' + ) + + tdSql.execute( + f'''insert into ct4 values + ( { now_time - rows * 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( { now_time - rows * 3888000000 + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( { now_time + 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( + { now_time + 5184000000}, {pow(2,31)-pow(2,15)}, {pow(2,63)-pow(2,30)}, 32767, 127, + { 3.3 * pow(10,38) }, { 1.3 * pow(10,308) }, { rows % 2 }, "binary_limit-1", "nchar_测试_limit-1", { now_time - 86400000} + ) + ( + { now_time + 2592000000 }, {pow(2,31)-pow(2,16)}, {pow(2,63)-pow(2,31)}, 32766, 126, + { 3.2 * pow(10,38) }, { 1.2 * pow(10,308) }, { (rows-1) % 2 }, "binary_limit-2", "nchar_测试_limit-2", { now_time - 172800000} + ) + ''' + ) + + tdSql.execute( + f'''insert into ct2 values + ( { now_time - rows * 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( { now_time - rows * 3888000000 + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( { now_time + 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( + { now_time + 5184000000 }, { -1 * pow(2,31) + pow(2,15) }, { -1 * pow(2,63) + pow(2,30) }, -32766, -126, + { -1 * 3.2 * pow(10,38) }, { -1.2 * pow(10,308) }, { rows % 2 }, "binary_limit-1", "nchar_测试_limit-1", { now_time - 86400000 } + ) + ( + { now_time + 2592000000 }, { -1 * pow(2,31) + pow(2,16) }, { -1 * pow(2,63) + pow(2,31) }, -32767, -127, + { - 3.3 * pow(10,38) }, { -1.3 * pow(10,308) }, { (rows-1) % 2 }, "binary_limit-2", "nchar_测试_limit-2", { now_time - 172800000 } + ) + ''' + ) + + for i in range(rows): + insert_data = f'''insert into t1 values + ( { now_time - i * 3600000 }, {i}, {i * 11111}, { i % 32767 }, { i % 127}, { i * 1.11111 }, { i * 1000.1111 }, { i % 2}, + "binary_{i}", "nchar_测试_{i}", { now_time - 1000 * i } ) + ''' + tdSql.execute(insert_data) + tdSql.execute( + f'''insert into t1 values + ( { now_time + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( { now_time - (( rows // 2 ) * 60 + 30) * 60000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( { now_time - rows * 3600000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( { now_time + 7200000 }, { pow(2,31) - pow(2,15) }, { pow(2,63) - pow(2,30) }, 32767, 127, + { 3.3 * pow(10,38) }, { 1.3 * pow(10,308) }, { rows % 2 }, + "binary_limit-1", "nchar_测试_limit-1", { now_time - 86400000 } + ) + ( + { now_time + 3600000 } , { pow(2,31) - pow(2,16) }, { pow(2,63) - pow(2,31) }, 32766, 126, + { 3.2 * pow(10,38) }, { 1.2 * pow(10,308) }, { (rows-1) % 2 }, + "binary_limit-2", "nchar_测试_limit-2", { now_time - 172800000 } + ) + ''' + ) + + def run(self): + tdSql.prepare() + + tdLog.printNoPrefix("==========step1:create table") + self.__create_tb() + + tdLog.printNoPrefix("==========step2:insert data") + self.rows = 10 + self.__insert_data(self.rows) + + tdLog.printNoPrefix("==========step3:all check") + self.all_test() + + tdDnodes.stop(1) + tdDnodes.start(1) + + tdSql.execute("use db") + + tdLog.printNoPrefix("==========step4:after wal, all check again ") + self.all_test() + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) diff --git a/tests/system-test/2-query/query_cols_tags_and_or.py b/tests/system-test/2-query/query_cols_tags_and_or.py new file mode 100644 index 0000000000..55881db149 --- /dev/null +++ b/tests/system-test/2-query/query_cols_tags_and_or.py @@ -0,0 +1,2135 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- +from util.log import tdLog +from util.cases import tdCases +from util.sql import tdSql +from util.common import tdCom +import random +class TDTestCase: + def init(self, conn, logSql): + ## add for TD-6672 + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + def insertData(self, tb_name): + # insert_sql_list = [f'insert into {tb_name} values ("2021-01-01 12:00:00", 1, 1, 1, 3, 1.1, 1.1, "binary", "nchar", true, 1)', + # f'insert into {tb_name} values ("2021-01-05 12:00:00", 2, 2, 1, 3, 1.1, 1.1, "binary", "nchar", true, 2)', + # f'insert into {tb_name} values ("2021-01-07 12:00:00", 1, 3, 1, 2, 1.1, 1.1, "binary", "nchar", true, 3)', + # f'insert into {tb_name} values ("2021-01-09 12:00:00", 1, 2, 4, 3, 1.1, 1.1, "binary", "nchar", true, 4)', + # f'insert into {tb_name} values ("2021-01-11 12:00:00", 1, 2, 5, 5, 1.1, 1.1, "binary", "nchar", true, 5)', + # f'insert into {tb_name} values ("2021-01-13 12:00:00", 1, 2, 1, 3, 6.6, 1.1, "binary", "nchar", true, 6)', + # f'insert into {tb_name} values ("2021-01-15 12:00:00", 1, 2, 1, 3, 1.1, 7.7, "binary", "nchar", true, 7)', + # f'insert into {tb_name} values ("2021-01-17 12:00:00", 1, 2, 1, 3, 1.1, 1.1, "binary8", "nchar", true, 8)', + # f'insert into {tb_name} values ("2021-01-19 12:00:00", 1, 2, 1, 3, 1.1, 1.1, "binary", "nchar9", true, 9)', + # f'insert into {tb_name} values ("2021-01-21 12:00:00", 1, 2, 1, 3, 1.1, 1.1, "binary", "nchar", false, 10)', + # f'insert into {tb_name} values ("2021-01-23 12:00:00", 1, 3, 1, 3, 1.1, 1.1, Null, Null, false, 11)' + # ] + insert_sql_list = [f'insert into {tb_name} values ("2021-01-01 12:00:00", 1, 1, 1, 3, 1.1, 1.1, "binary", "nchar", true, 1, 2, 3, 4)', + f'insert into {tb_name} values ("2021-01-05 12:00:00", 2, 2, 1, 3, 1.1, 1.1, "binary", "nchar", true, 2, 3, 4, 5)', + f'insert into {tb_name} values ("2021-01-07 12:00:00", 1, 3, 1, 2, 1.1, 1.1, "binary", "nchar", true, 3, 4, 5, 6)', + f'insert into {tb_name} values ("2021-01-09 12:00:00", 1, 2, 4, 3, 1.1, 1.1, "binary", "nchar", true, 4, 5, 6, 7)', + f'insert into {tb_name} values ("2021-01-11 12:00:00", 1, 2, 5, 5, 1.1, 1.1, "binary", "nchar", true, 5, 6, 7, 8)', + f'insert into {tb_name} values ("2021-01-13 12:00:00", 1, 2, 1, 3, 6.6, 1.1, "binary", "nchar", true, 6, 7, 8, 9)', + f'insert into {tb_name} values ("2021-01-15 12:00:00", 1, 2, 1, 3, 1.1, 7.7, "binary", "nchar", true, 7, 9, 9, 10)', + f'insert into {tb_name} values ("2021-01-17 12:00:00", 1, 2, 1, 3, 1.1, 1.1, "binary8", "nchar", true, 8, 9, 10, 11)', + f'insert into {tb_name} values ("2021-01-19 12:00:00", 1, 2, 1, 3, 1.1, 1.1, "binary", "nchar9", true, 9, 10, 11, 12)', + f'insert into {tb_name} values ("2021-01-21 12:00:00", 1, 2, 1, 3, 1.1, 1.1, "binary", "nchar", false, 10, 11, 12, 13)', + f'insert into {tb_name} values ("2021-01-23 12:00:00", 1, 3, 1, 3, 1.1, 1.1, Null, Null, false, 11, 12, 13, 14)' + ] + for sql in insert_sql_list: + tdSql.execute(sql) + + def initTb(self): + tdCom.cleanTb() + tb_name = tdCom.getLongName(8, "letters") + tdSql.execute( + f"CREATE TABLE {tb_name} (ts timestamp, c1 tinyint, c2 smallint, c3 int, c4 bigint, c5 float, c6 double, c7 binary(100), c8 nchar(200), c9 bool, c10 tinyint unsigned, c11 smallint unsigned, c12 int unsigned, c13 bigint unsigned)") + # f"CREATE TABLE {tb_name} (ts timestamp, c1 tinyint, c2 smallint, c3 int, c4 bigint, c5 float, c6 double, c7 binary(100), c8 nchar(200), c9 bool, c10 int)") + self.insertData(tb_name) + return tb_name + + def initStb(self, count=5): + tdCom.cleanTb() + tb_name = tdCom.getLongName(8, "letters") + tdSql.execute( + f"CREATE TABLE {tb_name} (ts timestamp, c1 tinyint, c2 smallint, c3 int, c4 bigint, c5 float, c6 double, c7 binary(100), c8 nchar(200), c9 bool, c10 tinyint unsigned, c11 smallint unsigned, c12 int unsigned, c13 bigint unsigned) tags (t1 tinyint, t2 smallint, t3 int, t4 bigint, t5 float, t6 double, t7 binary(100), t8 nchar(200), t9 bool, t10 tinyint unsigned, t11 smallint unsigned, t12 int unsigned, t13 bigint unsigned)") + for i in range(1, count+1): + tdSql.execute( + f'CREATE TABLE {tb_name}_sub_{i} using {tb_name} tags ({i}, {i}, {i}, {i}, {i}.{i}, {i}.{i}, "binary{i}", "nchar{i}", true, {i}, {i}, {i}, {i})') + self.insertData(f'{tb_name}_sub_{i}') + return tb_name + + def initTwoStb(self): + tdCom.cleanTb() + tb_name = tdCom.getLongName(8, "letters") + tb_name1 = f'{tb_name}1' + tb_name2 = f'{tb_name}2' + tdSql.execute( + f"CREATE TABLE {tb_name1} (ts timestamp, c1 tinyint, c2 smallint, c3 int, c4 bigint, c5 float, c6 double, c7 binary(100), c8 nchar(200), c9 bool, c10 int) tags (t1 tinyint, t2 smallint, t3 int, t4 bigint, t5 float, t6 double, t7 binary(100), t8 nchar(200), t9 bool, t10 int)") + tdSql.execute( + f"CREATE TABLE {tb_name2} (ts timestamp, c1 tinyint, c2 smallint, c3 int, c4 bigint, c5 float, c6 double, c7 binary(100), c8 nchar(200), c9 bool, c10 int) tags (t1 tinyint, t2 smallint, t3 int, t4 bigint, t5 float, t6 double, t7 binary(100), t8 nchar(200), t9 bool, t10 int)") + tdSql.execute( + f'CREATE TABLE {tb_name1}_sub using {tb_name1} tags (1, 1, 1, 1, 1.1, 1.1, "binary1", "nchar1", true, 1)') + tdSql.execute( + f'CREATE TABLE {tb_name2}_sub using {tb_name2} tags (1, 1, 1, 1, 1.1, 1.1, "binary1", "nchar1", true, 1)') + self.insertData(f'{tb_name1}_sub') + self.insertData(f'{tb_name2}_sub') + return tb_name + + def queryLastC10(self, query_sql, multi=False): + if multi: + res = tdSql.query(query_sql.replace('c10', 'last(*)'), True) + else: + res = tdSql.query(query_sql.replace('*', 'last(*)'), True) + return int(res[0][-4]) + + def queryTsCol(self, tb_name): + # ts and ts + query_sql = f'select * from {tb_name} where ts > "2021-01-11 12:00:00" or ts < "2021-01-13 12:00:00"' + tdSql.query(query_sql) + tdSql.checkRows(11) + tdSql.checkEqual(self.queryLastC10(query_sql), 11) + + query_sql = f'select * from {tb_name} where ts >= "2021-01-11 12:00:00" and ts <= "2021-01-13 12:00:00"' + tdSql.query(query_sql) + # tdSql.checkRows(2) + # tdSql.checkEqual(self.queryLastC10(query_sql), 6) + + ## ts or and tinyint col + query_sql = f'select * from {tb_name} where ts > "2021-01-11 12:00:00" or c1 = 2' + tdSql.error(query_sql) + + query_sql = f'select * from {tb_name} where ts <= "2021-01-11 12:00:00" and c1 != 2' + tdSql.query(query_sql) + tdSql.checkRows(4) + tdSql.checkEqual(self.queryLastC10(query_sql), 5) + + ## ts or and smallint col + query_sql = f'select * from {tb_name} where ts <> "2021-01-11 12:00:00" or c2 = 10' + tdSql.error(query_sql) + + query_sql = f'select * from {tb_name} where ts <= "2021-01-11 12:00:00" and c2 <= 1' + tdSql.query(query_sql) + tdSql.checkRows(1) + tdSql.checkEqual(self.queryLastC10(query_sql), 1) + + ## ts or and int col + query_sql = f'select * from {tb_name} where ts >= "2021-01-11 12:00:00" or c3 = 4' + tdSql.error(query_sql) + + query_sql = f'select * from {tb_name} where ts < "2021-01-11 12:00:00" and c3 = 4' + tdSql.query(query_sql) + tdSql.checkRows(1) + tdSql.checkEqual(self.queryLastC10(query_sql), 4) + + ## ts or and big col + query_sql = f'select * from {tb_name} where ts is Null or c4 = 5' + tdSql.error(query_sql) + + query_sql = f'select * from {tb_name} where ts is not Null and c4 = 2' + tdSql.query(query_sql) + tdSql.checkRows(1) + tdSql.checkEqual(self.queryLastC10(query_sql), 3) + + ## ts or and float col + query_sql = f'select * from {tb_name} where ts between "2021-01-17 12:00:00" and "2021-01-23 12:00:00" or c5 = 6.6' + tdSql.error(query_sql) + + query_sql = f'select * from {tb_name} where ts < "2021-01-11 12:00:00" and c5 = 1.1' + tdSql.query(query_sql) + tdSql.checkRows(4) + tdSql.checkEqual(self.queryLastC10(query_sql), 4) + + ## ts or and double col + query_sql = f'select * from {tb_name} where ts between "2021-01-17 12:00:00" and "2021-01-23 12:00:00" or c6 = 7.7' + tdSql.error(query_sql) + + query_sql = f'select * from {tb_name} where ts < "2021-01-11 12:00:00" and c6 = 1.1' + tdSql.query(query_sql) + tdSql.checkRows(4) + tdSql.checkEqual(self.queryLastC10(query_sql), 4) + + ## ts or and binary col + query_sql = f'select * from {tb_name} where ts < "2021-01-11 12:00:00" or c7 like "binary_"' + tdSql.error(query_sql) + + query_sql = f'select * from {tb_name} where ts <= "2021-01-11 12:00:00" and c7 in ("binary")' + tdSql.query(query_sql) + tdSql.checkRows(5) + tdSql.checkEqual(self.queryLastC10(query_sql), 5) + + ## ts or and nchar col + query_sql = f'select * from {tb_name} where ts < "2021-01-11 12:00:00" or c8 like "nchar%"' + tdSql.error(query_sql) + + query_sql = f'select * from {tb_name} where ts >= "2021-01-11 12:00:00" and c8 is Null' + tdSql.query(query_sql) + tdSql.checkRows(1) + tdSql.checkEqual(self.queryLastC10(query_sql), 11) + + ## ts or and bool col + query_sql = f'select * from {tb_name} where ts < "2021-01-11 12:00:00" or c9=false' + tdSql.error(query_sql) + + query_sql = f'select * from {tb_name} where ts >= "2021-01-11 12:00:00" and c9=true' + tdSql.query(query_sql) + tdSql.checkRows(5) + tdSql.checkEqual(self.queryLastC10(query_sql), 9) + + ## multi cols + query_sql = f'select * from {tb_name} where ts > "2021-01-03 12:00:00" and c1 != 2 and c2 >= 2 and c3 <> 4 and c4 < 4 and c5 > 1 and c6 >= 1.1 and c7 is not Null and c8 = "nchar" and c9=false' + tdSql.query(query_sql) + tdSql.checkRows(1) + tdSql.checkEqual(self.queryLastC10(query_sql), 10) + + def queryTsTag(self, tb_name): + ## ts and tinyint col + query_sql = f'select * from {tb_name} where ts <= "2021-01-11 12:00:00" and t1 != 2' + tdSql.query(query_sql) + tdSql.checkRows(20) + + ## ts and smallint col + query_sql = f'select * from {tb_name} where ts <= "2021-01-11 12:00:00" and t2 <= 1' + tdSql.query(query_sql) + tdSql.checkRows(5) + + ## ts or and int col + query_sql = f'select * from {tb_name} where ts < "2021-01-11 12:00:00" and t3 = 4' + tdSql.query(query_sql) + tdSql.checkRows(4) + + ## ts or and big col + query_sql = f'select * from {tb_name} where ts is not Null and t4 = 2' + tdSql.query(query_sql) + tdSql.checkRows(11) + + ## ts or and float col + query_sql = f'select * from {tb_name} where ts < "2021-01-11 12:00:00" and t5 = 1.1' + tdSql.query(query_sql) + tdSql.checkRows(4) + + ## ts or and double col + query_sql = f'select * from {tb_name} where ts < "2021-01-11 12:00:00" and t6 = 1.1' + tdSql.query(query_sql) + tdSql.checkRows(4) + tdSql.checkEqual(self.queryLastC10(query_sql), 4) + + ## ts or and binary col + query_sql = f'select * from {tb_name} where ts <= "2021-01-11 12:00:00" and t7 in ("binary1")' + tdSql.query(query_sql) + tdSql.checkRows(5) + + ## ts or and nchar col + query_sql = f'select * from {tb_name} where ts >= "2021-01-11 12:00:00" and t8 is not Null' + tdSql.query(query_sql) + tdSql.checkRows(35) + + ## ts or and bool col + query_sql = f'select * from {tb_name} where ts >= "2021-01-11 12:00:00" and t9=true' + tdSql.query(query_sql) + tdSql.checkRows(35) + + ## multi cols + query_sql = f'select * from {tb_name} where ts > "2021-01-03 12:00:00" and t1 != 2 and t2 >= 2 and t3 <> 4 and t4 < 4 and t5 > 1 and t6 >= 1.1 and t7 is not Null and t8 = "nchar3" and t9=true' + tdSql.query(query_sql) + tdSql.checkRows(10) + + def queryTsColTag(self, tb_name): + ## ts and tinyint col tag + query_sql = f'select * from {tb_name} where ts <= "2021-01-11 12:00:00" and c1 >= 2 and t1 != 2' + tdSql.query(query_sql) + tdSql.checkRows(4) + + ## ts and smallint col tag + query_sql = f'select * from {tb_name} where ts <= "2021-01-11 12:00:00" and c2 >=3 and t2 <= 1' + tdSql.query(query_sql) + tdSql.checkRows(1) + + ## ts or and int col tag + query_sql = f'select * from {tb_name} where ts < "2021-01-11 12:00:00" and c3 < 3 and t3 = 4' + tdSql.query(query_sql) + tdSql.checkRows(3) + + ## ts or and big col tag + query_sql = f'select * from {tb_name} where ts is not Null and c4 <> 1 and t4 = 2' + tdSql.query(query_sql) + tdSql.checkRows(11) + + ## ts or and float col tag + query_sql = f'select * from {tb_name} where ts < "2021-01-11 12:00:00" and c5 is not Null and t5 = 1.1' + tdSql.query(query_sql) + tdSql.checkRows(4) + + ## ts or and double col tag + query_sql = f'select * from {tb_name} where ts < "2021-01-11 12:00:00"and c6 = 1.1 and t6 = 1.1' + tdSql.query(query_sql) + tdSql.checkRows(4) + tdSql.checkEqual(self.queryLastC10(query_sql), 4) + + ## ts or and binary col tag + query_sql = f'select * from {tb_name} where ts <= "2021-01-11 12:00:00" and c7 is Null and t7 in ("binary1")' + tdSql.query(query_sql) + tdSql.checkRows(0) + + ## ts or and nchar col tag + query_sql = f'select * from {tb_name} where ts >= "2021-01-11 12:00:00" and c8 like "nch%" and t8 is not Null' + tdSql.query(query_sql) + tdSql.checkRows(30) + + ## ts or and bool col tag + query_sql = f'select * from {tb_name} where ts >= "2021-01-11 12:00:00" and c9=false and t9=true' + tdSql.query(query_sql) + tdSql.checkRows(10) + + ## multi cols tag + query_sql = f'select * from {tb_name} where ts > "2021-01-03 12:00:00" and c1 = 1 and c2 != 3 and c3 <= 2 and c4 >= 2 and c5 in (1.2, 1.1) and c6 < 2.2 and c7 like "bina%" and c8 is not Null and c9 = true and t1 != 2 and t2 >= 2 and t3 <> 4 and t4 < 4 and t5 > 1 and t6 >= 1.1 and t7 is not Null and t8 = "nchar3" and t9=true' + tdSql.query(query_sql) + tdSql.checkRows(2) + + def queryTinyintCol(self, tb_name, check_elm=None): + select_elm = "*" if check_elm is None else check_elm + # > + query_sql = f'select {select_elm} from {tb_name} where c1 > 1' + tdSql.query(query_sql) + tdSql.checkRows(1) + tdSql.checkEqual(self.queryLastC10(query_sql), 2) if select_elm == "*" else False + # >= + query_sql = f'select {select_elm} from {tb_name} where c1 >= 2' + tdSql.query(query_sql) + tdSql.checkRows(1) + tdSql.checkEqual(self.queryLastC10(query_sql), 2) if select_elm == "*" else False + # < + query_sql = f'select {select_elm} from {tb_name} where c1 < 2' + tdSql.query(query_sql) + tdSql.checkRows(10) + tdSql.checkEqual(self.queryLastC10(query_sql), 11) if select_elm == "*" else False + # <= + query_sql = f'select {select_elm} from {tb_name} where c1 <= 2' + tdSql.query(query_sql) + tdSql.checkRows(11) + tdSql.checkEqual(self.queryLastC10(query_sql), 11) if select_elm == "*" else False + # = + query_sql = f'select {select_elm} from {tb_name} where c1 = 2' + tdSql.query(query_sql) + tdSql.checkRows(1) + tdSql.checkEqual(self.queryLastC10(query_sql), 2) if select_elm == "*" else False + # != + query_sql = f'select {select_elm} from {tb_name} where c1 != 1' + tdSql.query(query_sql) + tdSql.checkRows(1) + tdSql.checkEqual(self.queryLastC10(query_sql), 2) if select_elm == "*" else False + # <> + query_sql = f'select {select_elm} from {tb_name} where c1 <> 2' + tdSql.query(query_sql) + tdSql.checkRows(10) + tdSql.checkEqual(self.queryLastC10(query_sql), 11) if select_elm == "*" else False + # is null + query_sql = f'select {select_elm} from {tb_name} where c1 is null' + tdSql.query(query_sql) + tdSql.checkRows(0) + # is not null + query_sql = f'select {select_elm} from {tb_name} where c1 is not null' + tdSql.query(query_sql) + tdSql.checkRows(11) + tdSql.checkEqual(self.queryLastC10(query_sql), 11) if select_elm == "*" else False + # between and + query_sql = f'select {select_elm} from {tb_name} where c1 between 2 and 3' + tdSql.query(query_sql) + tdSql.checkRows(1) + tdSql.checkEqual(self.queryLastC10(query_sql), 2) if select_elm == "*" else False + # not between and + query_sql = f'select {select_elm} from {tb_name} where c1 not between 2 and 3' + tdSql.query(query_sql) + tdSql.checkRows(10) + tdSql.checkEqual(self.queryLastC10(query_sql), 11) if select_elm == "*" else False + # in + query_sql = f'select {select_elm} from {tb_name} where c1 in (2, 3)' + tdSql.query(query_sql) + tdSql.checkRows(1) + tdSql.checkEqual(self.queryLastC10(query_sql), 2) if select_elm == "*" else False + # not in + query_sql = f'select {select_elm} from {tb_name} where c1 not in (2, 3)' + tdSql.query(query_sql) + tdSql.checkRows(10) + tdSql.checkEqual(self.queryLastC10(query_sql), 11) if select_elm == "*" else False + # and + query_sql = f'select {select_elm} from {tb_name} where c1 > 0 and c1 >= 1 and c1 < 2 and c1 <= 3 and c1 =1 and c1 != 5 and c1 <> 4 and c1 is not null and c1 between 1 and 2 and c1 not between 2 and 3 and c1 in (1,2) and c1 not in (2, 3)' + tdSql.query(query_sql) + tdSql.checkRows(10) + tdSql.checkEqual(self.queryLastC10(query_sql), 11) if select_elm == "*" else False + # or + query_sql = f'select {select_elm} from {tb_name} where c1 > 2 or c1 >= 3 or c1 < 1 or c1 <= 0 or c1 =2 or c1 != 1 or c1 <> 1 or c1 is null or c1 between 2 and 3 and c1 not between 1 and 1 and c1 in (2, 3) and c1 not in (1, 2)' + tdSql.query(query_sql) + tdSql.checkRows(1) + tdSql.checkEqual(self.queryLastC10(query_sql), 2) if select_elm == "*" else False + # and or + query_sql = f'select {select_elm} from {tb_name} where c1 > 2 and c1 >= 3 or c1 < 1 or c1 <= 0 or c1 =2 or c1 != 1 or c1 <> 1 and c1 is null or c1 between 2 and 3 and c1 not between 1 and 1 and c1 in (2, 3) and c1 not in (1, 2)' + tdSql.query(query_sql) + tdSql.checkRows(1) + tdSql.checkEqual(self.queryLastC10(query_sql), 2) if select_elm == "*" else False + query_sql = f'select c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13 from {tb_name} where c1 > 2 and c1 >= 3 or c1 < 1 or c1 <= 0 or c1 =2 or c1 != 1 or c1 <> 1 and c1 is null or c1 between 2 and 3 and c1 not between 1 and 1 and c1 in (2, 3) and c1 not in (1, 2)' + res = tdSql.query(query_sql) + tdSql.checkRows(1) + + def queryUtinyintCol(self, tb_name, check_elm=None): + select_elm = "*" if check_elm is None else check_elm + # > + query_sql = f'select {select_elm} from {tb_name} where c10 > 10' + tdSql.query(query_sql) + tdSql.checkRows(1) + tdSql.checkEqual(self.queryLastC10(query_sql), 11) if select_elm == "*" else False + # >= + query_sql = f'select {select_elm} from {tb_name} where c10 >= 10' + tdSql.query(query_sql) + tdSql.checkRows(2) + tdSql.checkEqual(self.queryLastC10(query_sql), 11) if select_elm == "*" else False + # < + query_sql = f'select {select_elm} from {tb_name} where c10 < 2' + tdSql.query(query_sql) + tdSql.checkRows(1) + tdSql.checkEqual(self.queryLastC10(query_sql), 1) if select_elm == "*" else False + # <= + query_sql = f'select {select_elm} from {tb_name} where c10 <= 2' + tdSql.query(query_sql) + tdSql.checkRows(2) + tdSql.checkEqual(self.queryLastC10(query_sql), 2) if select_elm == "*" else False + # = + query_sql = f'select {select_elm} from {tb_name} where c10 = 2' + tdSql.query(query_sql) + tdSql.checkRows(1) + tdSql.checkEqual(self.queryLastC10(query_sql), 2) if select_elm == "*" else False + # != + query_sql = f'select {select_elm} from {tb_name} where c10 != 11' + tdSql.query(query_sql) + tdSql.checkRows(10) + tdSql.checkEqual(self.queryLastC10(query_sql), 10) if select_elm == "*" else False + # <> + query_sql = f'select {select_elm} from {tb_name} where c10 <> 2' + tdSql.query(query_sql) + tdSql.checkRows(10) + tdSql.checkEqual(self.queryLastC10(query_sql), 11) if select_elm == "*" else False + # is null + query_sql = f'select {select_elm} from {tb_name} where c10 is null' + tdSql.query(query_sql) + tdSql.checkRows(0) + # is not null + query_sql = f'select {select_elm} from {tb_name} where c10 is not null' + tdSql.query(query_sql) + tdSql.checkRows(11) + tdSql.checkEqual(self.queryLastC10(query_sql), 11) if select_elm == "*" else False + # between and + query_sql = f'select {select_elm} from {tb_name} where c10 between 2 and 4' + tdSql.query(query_sql) + tdSql.checkRows(3) + tdSql.checkEqual(self.queryLastC10(query_sql), 4) if select_elm == "*" else False + # not between and + query_sql = f'select {select_elm} from {tb_name} where c10 not between 2 and 4' + tdSql.query(query_sql) + tdSql.checkRows(8) + tdSql.checkEqual(self.queryLastC10(query_sql), 11) if select_elm == "*" else False + # in + query_sql = f'select {select_elm} from {tb_name} where c10 in (2, 3)' + tdSql.query(query_sql) + tdSql.checkRows(2) + tdSql.checkEqual(self.queryLastC10(query_sql), 3) if select_elm == "*" else False + # not in + query_sql = f'select {select_elm} from {tb_name} where c10 not in (2, 3)' + tdSql.query(query_sql) + tdSql.checkRows(9) + tdSql.checkEqual(self.queryLastC10(query_sql), 11) if select_elm == "*" else False + # and + query_sql = f'select {select_elm} from {tb_name} where c10 > 0 and c10 >= 1 and c10 < 2 and c10 <= 3 and c10 =1 and c10 != 5 and c10 <> 4 and c10 is not null and c10 between 1 and 2 and c10 not between 2 and 3 and c10 in (1,2) and c10 not in (2, 3)' + tdSql.query(query_sql) + tdSql.checkRows(1) + tdSql.checkEqual(self.queryLastC10(query_sql), 1) if select_elm == "*" else False + # or + query_sql = f'select {select_elm} from {tb_name} where c10 > 2 or c10 >= 3 or c10 < 1 or c10 <= 0 or c10 =2 or c10 != 1 or c10 <> 1 or c10 is null or c10 between 2 and 3 or c10 not between 1 and 1 or c10 in (2, 3) or c10 not in (1, 2)' + tdSql.query(query_sql) + tdSql.checkRows(10) + tdSql.checkEqual(self.queryLastC10(query_sql), 11) if select_elm == "*" else False + # and or + query_sql = f'select {select_elm} from {tb_name} where c10 > 2 and c10 >= 3 or c10 < 1 or c10 <= 0 or c10 =2 or c10 != 1 or c10 <> 1 and c10 is null or c10 between 2 and 3 and c10 not between 1 and 1 and c10 in (2, 3) and c10 not in (1, 2)' + tdSql.query(query_sql) + tdSql.checkRows(10) + tdSql.checkEqual(self.queryLastC10(query_sql), 11) if select_elm == "*" else False + query_sql = f'select c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13 from {tb_name} where c10 > 2 and c10 >= 3 or c10 < 1 or c10 <= 0 or c10 =2 or c10 != 1 or c10 <> 1 and c10 is null or c10 between 2 and 3 and c10 not between 1 and 1 and c10 in (2, 3) and c10 not in (1, 2)' + res = tdSql.query(query_sql) + tdSql.checkRows(10) + + def querySmallintCol(self, tb_name, check_elm=None): + select_elm = "*" if check_elm is None else check_elm + # > + query_sql = f'select {select_elm} from {tb_name} where c2 > 2' + tdSql.query(query_sql) + tdSql.checkRows(2) + tdSql.checkEqual(self.queryLastC10(query_sql), 11) if select_elm == "*" else False + # >= + query_sql = f'select {select_elm} from {tb_name} where c2 >= 3' + tdSql.query(query_sql) + tdSql.checkRows(2) + tdSql.checkEqual(self.queryLastC10(query_sql), 11) if select_elm == "*" else False + # < + query_sql = f'select {select_elm} from {tb_name} where c2 < 3' + tdSql.query(query_sql) + tdSql.checkRows(9) + tdSql.checkEqual(self.queryLastC10(query_sql), 10) if select_elm == "*" else False + # <= + query_sql = f'select {select_elm} from {tb_name} where c2 <= 3' + tdSql.query(query_sql) + tdSql.checkRows(11) + tdSql.checkEqual(self.queryLastC10(query_sql), 11) if select_elm == "*" else False + # = + query_sql = f'select {select_elm} from {tb_name} where c2 = 3' + tdSql.query(query_sql) + tdSql.checkRows(2) + tdSql.checkEqual(self.queryLastC10(query_sql), 11) if select_elm == "*" else False + # != + query_sql = f'select {select_elm} from {tb_name} where c2 != 1' + tdSql.query(query_sql) + tdSql.checkRows(10) + tdSql.checkEqual(self.queryLastC10(query_sql), 11) if select_elm == "*" else False + # <> + query_sql = f'select {select_elm} from {tb_name} where c2 <> 2' + tdSql.query(query_sql) + tdSql.checkRows(3) + tdSql.checkEqual(self.queryLastC10(query_sql), 11) if select_elm == "*" else False + # is null + query_sql = f'select {select_elm} from {tb_name} where c2 is null' + tdSql.query(query_sql) + tdSql.checkRows(0) + # is not null + query_sql = f'select {select_elm} from {tb_name} where c2 is not null' + tdSql.query(query_sql) + tdSql.checkRows(11) + tdSql.checkEqual(self.queryLastC10(query_sql), 11) if select_elm == "*" else False + # between and + query_sql = f'select {select_elm} from {tb_name} where c2 between 2 and 3' + tdSql.query(query_sql) + tdSql.checkRows(10) + tdSql.checkEqual(self.queryLastC10(query_sql), 11) if select_elm == "*" else False + # not between and + query_sql = f'select {select_elm} from {tb_name} where c2 not between 2 and 3' + tdSql.query(query_sql) + tdSql.checkRows(1) + tdSql.checkEqual(self.queryLastC10(query_sql), 1) if select_elm == "*" else False + # in + query_sql = f'select {select_elm} from {tb_name} where c2 in (2, 3)' + tdSql.query(query_sql) + tdSql.checkRows(10) + tdSql.checkEqual(self.queryLastC10(query_sql), 11) if select_elm == "*" else False + # not in + query_sql = f'select {select_elm} from {tb_name} where c2 not in (2, 3)' + tdSql.query(query_sql) + tdSql.checkRows(1) + tdSql.checkEqual(self.queryLastC10(query_sql), 1) if select_elm == "*" else False + # and + query_sql = f'select {select_elm} from {tb_name} where c2 > 0 and c2 >= 1 and c2 < 4 and c2 <= 3 and c2 != 2 and c2 <> 2 and c2 = 3 and c2 is not null and c2 between 2 and 3 and c2 not between 1 and 2 and c2 in (2,3) and c2 not in (1,2)' + tdSql.query(query_sql) + tdSql.checkRows(2) + tdSql.checkEqual(self.queryLastC10(query_sql), 11) if select_elm == "*" else False + # or + query_sql = f'select {select_elm} from {tb_name} where c2 > 4 or c2 >= 3 or c2 < 1 or c2 <= 0 or c2 != 2 or c2 <> 2 or c2 = 3 or c2 is null or c2 between 3 and 4 or c2 not between 1 and 3 or c2 in (3,4) or c2 not in (1,2)' + tdSql.query(query_sql) + tdSql.checkRows(3) + tdSql.checkEqual(self.queryLastC10(query_sql), 11) if select_elm == "*" else False + # and or + query_sql = f'select {select_elm} from {tb_name} where c2 > 0 and c2 >= 1 or c2 < 4 and c2 <= 3 and c2 != 1 and c2 <> 2 and c2 = 3 or c2 is not null and c2 between 2 and 3 and c2 not between 1 and 2 and c2 in (2,3) and c2 not in (1,2)' + tdSql.query(query_sql) + tdSql.checkRows(11) + tdSql.checkEqual(self.queryLastC10(query_sql), 11) if select_elm == "*" else False + query_sql = f'select c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13 from {tb_name} where c2 > 0 and c2 >= 1 or c2 < 4 and c2 <= 3 and c2 != 1 and c2 <> 2 and c2 = 3 or c2 is not null and c2 between 2 and 3 and c2 not between 1 and 2 and c2 in (2,3) and c2 not in (1,2)' + tdSql.query(query_sql) + tdSql.checkRows(11) + + def queryUsmallintCol(self, tb_name, check_elm=None): + select_elm = "*" if check_elm is None else check_elm + # > + query_sql = f'select {select_elm} from {tb_name} where c11 > 11' + tdSql.query(query_sql) + tdSql.checkRows(1) + tdSql.checkEqual(self.queryLastC10(query_sql), 11) if select_elm == "*" else False + # >= + query_sql = f'select {select_elm} from {tb_name} where c11 >= 11' + tdSql.query(query_sql) + tdSql.checkRows(2) + tdSql.checkEqual(self.queryLastC10(query_sql), 11) if select_elm == "*" else False + # < + query_sql = f'select {select_elm} from {tb_name} where c11 < 3' + tdSql.query(query_sql) + tdSql.checkRows(1) + tdSql.checkEqual(self.queryLastC10(query_sql), 1) if select_elm == "*" else False + # <= + query_sql = f'select {select_elm} from {tb_name} where c11 <= 3' + tdSql.query(query_sql) + tdSql.checkRows(2) + tdSql.checkEqual(self.queryLastC10(query_sql), 2) if select_elm == "*" else False + # = + query_sql = f'select {select_elm} from {tb_name} where c11 = 3' + tdSql.query(query_sql) + tdSql.checkRows(1) + tdSql.checkEqual(self.queryLastC10(query_sql), 2) if select_elm == "*" else False + # != + query_sql = f'select {select_elm} from {tb_name} where c11 != 1' + tdSql.query(query_sql) + tdSql.checkRows(11) + tdSql.checkEqual(self.queryLastC10(query_sql), 11) if select_elm == "*" else False + # <> + query_sql = f'select {select_elm} from {tb_name} where c11 <> 2' + tdSql.query(query_sql) + tdSql.checkRows(10) + tdSql.checkEqual(self.queryLastC10(query_sql), 11) if select_elm == "*" else False + # is null + query_sql = f'select {select_elm} from {tb_name} where c11 is null' + tdSql.query(query_sql) + tdSql.checkRows(0) + # is not null + query_sql = f'select {select_elm} from {tb_name} where c11 is not null' + tdSql.query(query_sql) + tdSql.checkRows(11) + tdSql.checkEqual(self.queryLastC10(query_sql), 11) if select_elm == "*" else False + # between and + query_sql = f'select {select_elm} from {tb_name} where c11 between 2 and 3' + tdSql.query(query_sql) + tdSql.checkRows(2) + tdSql.checkEqual(self.queryLastC10(query_sql), 2) if select_elm == "*" else False + # not between and + query_sql = f'select {select_elm} from {tb_name} where c11 not between 2 and 3' + tdSql.query(query_sql) + tdSql.checkRows(9) + tdSql.checkEqual(self.queryLastC10(query_sql), 11) if select_elm == "*" else False + # in + query_sql = f'select {select_elm} from {tb_name} where c11 in (2, 3)' + tdSql.query(query_sql) + tdSql.checkRows(2) + tdSql.checkEqual(self.queryLastC10(query_sql), 2) if select_elm == "*" else False + # not in + query_sql = f'select {select_elm} from {tb_name} where c11 not in (2, 3)' + tdSql.query(query_sql) + tdSql.checkRows(9) + tdSql.checkEqual(self.queryLastC10(query_sql), 11) if select_elm == "*" else False + # and + query_sql = f'select {select_elm} from {tb_name} where c11 > 0 and c11 >= 1 and c11 < 4 and c11 <= 3 and c11 != 2 and c11 <> 2 and c11 = 3 and c11 is not null and c11 between 2 and 3 and c11 not between 1 and 2 and c11 in (2,3) and c11 not in (1,2)' + tdSql.query(query_sql) + tdSql.checkRows(1) + tdSql.checkEqual(self.queryLastC10(query_sql), 2) if select_elm == "*" else False + # or + query_sql = f'select {select_elm} from {tb_name} where c11 > 4 or c11 >= 3 or c11 < 1 or c11 <= 0 or c11 != 2 or c11 <> 2 or c11 = 3 or c11 is null or c11 between 3 and 4 or c11 not between 1 and 3 or c11 in (3,4) or c11 not in (1,2)' + tdSql.query(query_sql) + tdSql.checkRows(10) + tdSql.checkEqual(self.queryLastC10(query_sql), 11) if select_elm == "*" else False + # and or + query_sql = f'select {select_elm} from {tb_name} where c11 > 0 and c11 >= 1 or c11 < 4 and c11 <= 3 and c11 != 1 and c11 <> 2 and c11 = 3 or c11 is not null and c11 between 2 and 3 and c11 not between 1 and 2 and c11 in (2,3) and c11 not in (1,2)' + tdSql.query(query_sql) + tdSql.checkRows(11) + tdSql.checkEqual(self.queryLastC10(query_sql), 11) if select_elm == "*" else False + query_sql = f'select c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13 from {tb_name} where c2 > 0 and c2 >= 1 or c2 < 4 and c2 <= 3 and c2 != 1 and c2 <> 2 and c2 = 3 or c2 is not null and c2 between 2 and 3 and c2 not between 1 and 2 and c2 in (2,3) and c2 not in (1,2)' + tdSql.query(query_sql) + tdSql.checkRows(11) + + def queryIntCol(self, tb_name, check_elm=None): + select_elm = "*" if check_elm is None else check_elm + # > + query_sql = f'select {select_elm} from {tb_name} where c3 > 4' + tdSql.query(query_sql) + tdSql.checkRows(1) + tdSql.checkEqual(self.queryLastC10(query_sql), 5) if select_elm == "*" else False + # >= + query_sql = f'select {select_elm} from {tb_name} where c3 >= 4' + tdSql.query(query_sql) + tdSql.checkRows(2) + tdSql.checkEqual(self.queryLastC10(query_sql), 5) if select_elm == "*" else False + # < + query_sql = f'select {select_elm} from {tb_name} where c3 < 4' + tdSql.query(query_sql) + tdSql.checkRows(9) + tdSql.checkEqual(self.queryLastC10(query_sql), 11) if select_elm == "*" else False + # <= + query_sql = f'select {select_elm} from {tb_name} where c3 <= 4' + tdSql.query(query_sql) + tdSql.checkRows(10) + tdSql.checkEqual(self.queryLastC10(query_sql), 11) if select_elm == "*" else False + # = + query_sql = f'select {select_elm} from {tb_name} where c3 = 5' + tdSql.query(query_sql) + tdSql.checkRows(1) + tdSql.checkEqual(self.queryLastC10(query_sql), 5) if select_elm == "*" else False + # != + query_sql = f'select {select_elm} from {tb_name} where c3 != 5' + tdSql.query(query_sql) + tdSql.checkRows(10) + tdSql.checkEqual(self.queryLastC10(query_sql), 11) if select_elm == "*" else False + # <> + query_sql = f'select {select_elm} from {tb_name} where c3 <> 1' + tdSql.query(query_sql) + tdSql.checkRows(2) + tdSql.checkEqual(self.queryLastC10(query_sql), 5) if select_elm == "*" else False + # is null + query_sql = f'select {select_elm} from {tb_name} where c3 is null' + tdSql.query(query_sql) + tdSql.checkRows(0) + # is not null + query_sql = f'select {select_elm} from {tb_name} where c3 is not null' + tdSql.query(query_sql) + tdSql.checkRows(11) + tdSql.checkEqual(self.queryLastC10(query_sql), 11) if select_elm == "*" else False + # between and + query_sql = f'select {select_elm} from {tb_name} where c3 between 1 and 2' + tdSql.query(query_sql) + tdSql.checkRows(9) + tdSql.checkEqual(self.queryLastC10(query_sql), 11) if select_elm == "*" else False + # not between and + query_sql = f'select {select_elm} from {tb_name} where c3 not between 1 and 2' + tdSql.query(query_sql) + tdSql.checkRows(2) + tdSql.checkEqual(self.queryLastC10(query_sql), 5) if select_elm == "*" else False + # in + query_sql = f'select {select_elm} from {tb_name} where c3 in (1, 2)' + tdSql.query(query_sql) + tdSql.checkRows(9) + tdSql.checkEqual(self.queryLastC10(query_sql), 11) if select_elm == "*" else False + # not in + query_sql = f'select {select_elm} from {tb_name} where c3 not in (2, 3)' + tdSql.query(query_sql) + tdSql.checkRows(11) + tdSql.checkEqual(self.queryLastC10(query_sql), 11) if select_elm == "*" else False + # and + query_sql = f'select {select_elm} from {tb_name} where c3 > 0 and c3 >= 1 and c3 < 5 and c3 <= 4 and c3 != 2 and c3 <> 2 and c3 = 4 and c3 is not null and c3 between 2 and 4 and c3 not between 1 and 2 and c3 in (2,4) and c3 not in (1,2)' + tdSql.query(query_sql) + tdSql.checkRows(1) + tdSql.checkEqual(self.queryLastC10(query_sql), 4) if select_elm == "*" else False + # or + query_sql = f'select {select_elm} from {tb_name} where c3 > 4 or c3 >= 3 or c3 < 1 or c3 <= 0 or c3 != 1 or c3 <> 1 or c3 = 4 or c3 is null or c3 between 3 and 4 or c3 not between 1 and 3 or c3 in (3,4) or c3 not in (1,2)' + tdSql.query(query_sql) + tdSql.checkRows(2) + tdSql.checkEqual(self.queryLastC10(query_sql), 5) if select_elm == "*" else False + # and or + query_sql = f'select {select_elm} from {tb_name} where c3 > 0 and c3 >= 1 or c3 < 5 and c3 <= 4 and c3 != 2 and c3 <> 2 and c3 = 4 or c3 is not null and c3 between 2 and 4 and c3 not between 1 and 2 and c3 in (2,4) and c3 not in (1,2)' + tdSql.query(query_sql) + tdSql.checkRows(11) + tdSql.checkEqual(self.queryLastC10(query_sql), 11) if select_elm == "*" else False + query_sql = f'select c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13 from {tb_name} where c3 > 0 and c3 >= 1 or c3 < 5 and c3 <= 4 and c3 != 2 and c3 <> 2 and c3 = 4 or c3 is not null and c3 between 2 and 4 and c3 not between 1 and 2 and c3 in (2,4) and c3 not in (1,2)' + tdSql.query(query_sql) + tdSql.checkRows(11) + + def queryUintCol(self, tb_name, check_elm=None): + select_elm = "*" if check_elm is None else check_elm + # > + query_sql = f'select {select_elm} from {tb_name} where c12 > 12' + tdSql.query(query_sql) + tdSql.checkRows(1) + tdSql.checkEqual(self.queryLastC10(query_sql), 11) if select_elm == "*" else False + # >= + query_sql = f'select {select_elm} from {tb_name} where c12 >= 12' + tdSql.query(query_sql) + tdSql.checkRows(2) + tdSql.checkEqual(self.queryLastC10(query_sql), 11) if select_elm == "*" else False + # < + query_sql = f'select {select_elm} from {tb_name} where c12 < 4' + tdSql.query(query_sql) + tdSql.checkRows(1) + tdSql.checkEqual(self.queryLastC10(query_sql), 1) if select_elm == "*" else False + # <= + query_sql = f'select {select_elm} from {tb_name} where c12 <= 4' + tdSql.query(query_sql) + tdSql.checkRows(2) + tdSql.checkEqual(self.queryLastC10(query_sql), 2) if select_elm == "*" else False + # = + query_sql = f'select {select_elm} from {tb_name} where c12 = 5' + tdSql.query(query_sql) + tdSql.checkRows(1) + tdSql.checkEqual(self.queryLastC10(query_sql), 3) if select_elm == "*" else False + # != + query_sql = f'select {select_elm} from {tb_name} where c12 != 5' + tdSql.query(query_sql) + tdSql.checkRows(10) + tdSql.checkEqual(self.queryLastC10(query_sql), 11) if select_elm == "*" else False + # <> + query_sql = f'select {select_elm} from {tb_name} where c12 <> 1' + tdSql.query(query_sql) + tdSql.checkRows(11) + tdSql.checkEqual(self.queryLastC10(query_sql), 11) if select_elm == "*" else False + # is null + query_sql = f'select {select_elm} from {tb_name} where c12 is null' + tdSql.query(query_sql) + tdSql.checkRows(0) + # is not null + query_sql = f'select {select_elm} from {tb_name} where c12 is not null' + tdSql.query(query_sql) + tdSql.checkRows(11) + tdSql.checkEqual(self.queryLastC10(query_sql), 11) if select_elm == "*" else False + # between and + query_sql = f'select {select_elm} from {tb_name} where c12 between 2 and 3' + tdSql.query(query_sql) + tdSql.checkRows(1) + tdSql.checkEqual(self.queryLastC10(query_sql), 1) if select_elm == "*" else False + # not between and + query_sql = f'select {select_elm} from {tb_name} where c12 not between 1 and 2' + tdSql.query(query_sql) + tdSql.checkRows(11) + tdSql.checkEqual(self.queryLastC10(query_sql), 11) if select_elm == "*" else False + # in + query_sql = f'select {select_elm} from {tb_name} where c12 in (3, 2)' + tdSql.query(query_sql) + tdSql.checkRows(1) + tdSql.checkEqual(self.queryLastC10(query_sql), 1) if select_elm == "*" else False + # not in + query_sql = f'select {select_elm} from {tb_name} where c12 not in (2, 3)' + tdSql.query(query_sql) + tdSql.checkRows(10) + tdSql.checkEqual(self.queryLastC10(query_sql), 11) if select_elm == "*" else False + # and + query_sql = f'select {select_elm} from {tb_name} where c12 > 0 and c12 >= 1 and c12 < 5 and c12 <= 4 and c12 != 2 and c12 <> 2 and c12 = 4 and c12 is not null and c12 between 2 and 4 and c12 not between 1 and 2 and c12 in (2,4) and c12 not in (1,2)' + tdSql.query(query_sql) + tdSql.checkRows(1) + tdSql.checkEqual(self.queryLastC10(query_sql), 2) if select_elm == "*" else False + # or + query_sql = f'select {select_elm} from {tb_name} where c12 > 4 or c12 >= 3 or c12 < 1 or c12 <= 0 or c12 != 1 or c12 <> 1 or c12 = 4 or c12 is null or c12 between 3 and 4 or c12 not between 1 and 3 or c12 in (3,4) or c12 not in (1,2)' + tdSql.query(query_sql) + tdSql.checkRows(11) + tdSql.checkEqual(self.queryLastC10(query_sql), 11) if select_elm == "*" else False + # and or + query_sql = f'select {select_elm} from {tb_name} where c12 > 0 and c12 >= 1 or c12 < 5 and c12 <= 4 and c12 != 2 and c12 <> 2 and c12 = 4 or c12 is not null and c12 between 2 and 4 and c12 not between 1 and 2 and c12 in (2,4) and c12 not in (1,2)' + tdSql.query(query_sql) + tdSql.checkRows(11) + tdSql.checkEqual(self.queryLastC10(query_sql), 11) if select_elm == "*" else False + query_sql = f'select c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13 from {tb_name} where c3 > 0 and c3 >= 1 or c3 < 5 and c3 <= 4 and c3 != 2 and c3 <> 2 and c3 = 4 or c3 is not null and c3 between 2 and 4 and c3 not between 1 and 2 and c3 in (2,4) and c3 not in (1,2)' + tdSql.query(query_sql) + tdSql.checkRows(11) + + def queryBigintCol(self, tb_name, check_elm=None): + select_elm = "*" if check_elm is None else check_elm + # > + query_sql = f'select {select_elm} from {tb_name} where c4 > 4' + tdSql.query(query_sql) + tdSql.checkRows(1) + tdSql.checkEqual(self.queryLastC10(query_sql), 5) if select_elm == "*" else False + # >= + query_sql = f'select {select_elm} from {tb_name} where c4 >= 4' + tdSql.query(query_sql) + tdSql.checkRows(1) + tdSql.checkEqual(self.queryLastC10(query_sql), 5) if select_elm == "*" else False + # < + query_sql = f'select {select_elm} from {tb_name} where c4 < 4' + tdSql.query(query_sql) + tdSql.checkRows(10) + tdSql.checkEqual(self.queryLastC10(query_sql), 11) if select_elm == "*" else False + # <= + query_sql = f'select {select_elm} from {tb_name} where c4 <= 3' + tdSql.query(query_sql) + tdSql.checkRows(10) + tdSql.checkEqual(self.queryLastC10(query_sql), 11) if select_elm == "*" else False + # = + query_sql = f'select {select_elm} from {tb_name} where c4 = 5' + tdSql.query(query_sql) + tdSql.checkRows(1) + tdSql.checkEqual(self.queryLastC10(query_sql), 5) if select_elm == "*" else False + # != + query_sql = f'select {select_elm} from {tb_name} where c4 != 5' + tdSql.query(query_sql) + tdSql.checkRows(10) + tdSql.checkEqual(self.queryLastC10(query_sql), 11) if select_elm == "*" else False + # <> + query_sql = f'select {select_elm} from {tb_name} where c4 <> 3' + tdSql.query(query_sql) + tdSql.checkRows(2) + tdSql.checkEqual(self.queryLastC10(query_sql), 5) if select_elm == "*" else False + # is null + query_sql = f'select {select_elm} from {tb_name} where c4 is null' + tdSql.query(query_sql) + tdSql.checkRows(0) + # is not null + query_sql = f'select {select_elm} from {tb_name} where c4 is not null' + tdSql.query(query_sql) + tdSql.checkRows(11) + tdSql.checkEqual(self.queryLastC10(query_sql), 11) if select_elm == "*" else False + # between and + query_sql = f'select {select_elm} from {tb_name} where c4 between 4 and 5' + tdSql.query(query_sql) + tdSql.checkRows(1) + tdSql.checkEqual(self.queryLastC10(query_sql), 5) if select_elm == "*" else False + # not between and + query_sql = f'select {select_elm} from {tb_name} where c4 not between 1 and 3' + tdSql.query(query_sql) + tdSql.checkRows(1) + tdSql.checkEqual(self.queryLastC10(query_sql), 5) if select_elm == "*" else False + # in + query_sql = f'select {select_elm} from {tb_name} where c4 in (1, 5)' + tdSql.query(query_sql) + tdSql.checkRows(1) + tdSql.checkEqual(self.queryLastC10(query_sql), 5) if select_elm == "*" else False + # not in + query_sql = f'select {select_elm} from {tb_name} where c4 not in (2, 3)' + tdSql.query(query_sql) + tdSql.checkRows(1) + tdSql.checkEqual(self.queryLastC10(query_sql), 5) if select_elm == "*" else False + # and + query_sql = f'select {select_elm} from {tb_name} where c4 > 0 and c4 >= 1 and c4 < 6 and c4 <= 5 and c4 != 2 and c4 <> 2 and c4 = 5 and c4 is not null and c4 between 2 and 5 and c4 not between 1 and 2 and c4 in (2,5) and c4 not in (1,2)' + tdSql.query(query_sql) + tdSql.checkRows(1) + tdSql.checkEqual(self.queryLastC10(query_sql), 5) if select_elm == "*" else False + # or + query_sql = f'select {select_elm} from {tb_name} where c4 > 5 or c4 >= 4 or c4 < 1 or c4 <= 0 or c4 != 3 or c4 <> 3 or c4 = 5 or c4 is null or c4 between 4 and 5 or c4 not between 1 and 3 or c4 in (4,5) or c4 not in (1,3)' + tdSql.query(query_sql) + tdSql.checkRows(2) + tdSql.checkEqual(self.queryLastC10(query_sql), 5) if select_elm == "*" else False + # and or + query_sql = f'select {select_elm} from {tb_name} where c4 > 0 and c4 >= 1 or c4 < 5 and c4 <= 4 and c4 != 2 and c4 <> 2 and c4 = 4 or c4 is not null and c4 between 2 and 4 and c4 not between 1 and 2 and c4 in (2,4) and c4 not in (1,2)' + tdSql.query(query_sql) + tdSql.checkRows(11) + tdSql.checkEqual(self.queryLastC10(query_sql), 11) if select_elm == "*" else False + query_sql = f'select c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13 from {tb_name} where c4 > 0 and c4 >= 1 or c4 < 5 and c4 <= 4 and c4 != 2 and c4 <> 2 and c4 = 4 or c4 is not null and c4 between 2 and 4 and c4 not between 1 and 2 and c4 in (2,4) and c4 not in (1,2)' + tdSql.query(query_sql) + tdSql.checkRows(11) + + def queryUbigintCol(self, tb_name, check_elm=None): + select_elm = "*" if check_elm is None else check_elm + # > + query_sql = f'select {select_elm} from {tb_name} where c13 > 4' + tdSql.query(query_sql) + tdSql.checkRows(10) + tdSql.checkEqual(self.queryLastC10(query_sql), 11) if select_elm == "*" else False + # >= + query_sql = f'select {select_elm} from {tb_name} where c13 >= 4' + tdSql.query(query_sql) + tdSql.checkRows(11) + tdSql.checkEqual(self.queryLastC10(query_sql), 11) if select_elm == "*" else False + # < + query_sql = f'select {select_elm} from {tb_name} where c13 < 5' + tdSql.query(query_sql) + tdSql.checkRows(1) + tdSql.checkEqual(self.queryLastC10(query_sql), 1) if select_elm == "*" else False + # <= + query_sql = f'select {select_elm} from {tb_name} where c13 <= 4' + tdSql.query(query_sql) + tdSql.checkRows(1) + tdSql.checkEqual(self.queryLastC10(query_sql), 1) if select_elm == "*" else False + # = + query_sql = f'select {select_elm} from {tb_name} where c13 = 5' + tdSql.query(query_sql) + tdSql.checkRows(1) + tdSql.checkEqual(self.queryLastC10(query_sql), 2) if select_elm == "*" else False + # != + query_sql = f'select {select_elm} from {tb_name} where c13 != 5' + tdSql.query(query_sql) + tdSql.checkRows(10) + tdSql.checkEqual(self.queryLastC10(query_sql), 11) if select_elm == "*" else False + # <> + query_sql = f'select {select_elm} from {tb_name} where c13 <> 3' + tdSql.query(query_sql) + tdSql.checkRows(11) + tdSql.checkEqual(self.queryLastC10(query_sql), 11) if select_elm == "*" else False + # is null + query_sql = f'select {select_elm} from {tb_name} where c13 is null' + tdSql.query(query_sql) + tdSql.checkRows(0) + # is not null + query_sql = f'select {select_elm} from {tb_name} where c13 is not null' + tdSql.query(query_sql) + tdSql.checkRows(11) + tdSql.checkEqual(self.queryLastC10(query_sql), 11) if select_elm == "*" else False + # between and + query_sql = f'select {select_elm} from {tb_name} where c13 between 4 and 5' + tdSql.query(query_sql) + tdSql.checkRows(2) + tdSql.checkEqual(self.queryLastC10(query_sql), 2) if select_elm == "*" else False + # not between and + query_sql = f'select {select_elm} from {tb_name} where c13 not between 1 and 3' + tdSql.query(query_sql) + tdSql.checkRows(11) + tdSql.checkEqual(self.queryLastC10(query_sql), 11) if select_elm == "*" else False + # in + query_sql = f'select {select_elm} from {tb_name} where c13 in (1, 5)' + tdSql.query(query_sql) + tdSql.checkRows(1) + tdSql.checkEqual(self.queryLastC10(query_sql), 2) if select_elm == "*" else False + # not in + query_sql = f'select {select_elm} from {tb_name} where c13 not in (2, 6)' + tdSql.query(query_sql) + tdSql.checkRows(10) + tdSql.checkEqual(self.queryLastC10(query_sql), 11) if select_elm == "*" else False + # and + query_sql = f'select {select_elm} from {tb_name} where c13 > 0 and c13 >= 1 and c13 < 6 and c13 <= 5 and c13 != 2 and c13 <> 2 and c13 = 5 and c13 is not null and c13 between 2 and 5 and c13 not between 1 and 2 and c13 in (2,5) and c13 not in (1,2)' + tdSql.query(query_sql) + tdSql.checkRows(1) + tdSql.checkEqual(self.queryLastC10(query_sql), 2) if select_elm == "*" else False + # or + query_sql = f'select {select_elm} from {tb_name} where c13 > 5 or c13 >= 4 or c13 < 1 or c13 <= 0 or c13 != 3 or c13 <> 3 or c13 = 5 or c13 is null or c13 between 4 and 5 or c13 not between 1 and 3 or c13 in (4,5) or c13 not in (1,3)' + tdSql.query(query_sql) + tdSql.checkRows(11) + tdSql.checkEqual(self.queryLastC10(query_sql), 11) if select_elm == "*" else False + # and or + query_sql = f'select {select_elm} from {tb_name} where c13 > 0 and c13 >= 1 or c13 < 5 and c13 <= 4 and c13 != 2 and c13 <> 2 and c13 = 4 or c13 is not null and c13 between 2 and 4 and c13 not between 1 and 2 and c13 in (2,4) and c13 not in (1,2)' + tdSql.query(query_sql) + tdSql.checkRows(11) + tdSql.checkEqual(self.queryLastC10(query_sql), 11) if select_elm == "*" else False + query_sql = f'select c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13 from {tb_name} where c4 > 0 and c4 >= 1 or c4 < 5 and c4 <= 4 and c4 != 2 and c4 <> 2 and c4 = 4 or c4 is not null and c4 between 2 and 4 and c4 not between 1 and 2 and c4 in (2,4) and c4 not in (1,2)' + tdSql.query(query_sql) + tdSql.checkRows(11) + + def queryFloatCol(self, tb_name, check_elm=None): + select_elm = "*" if check_elm is None else check_elm + # > + query_sql = f'select {select_elm} from {tb_name} where c5 > 1.1' + tdSql.query(query_sql) + tdSql.checkRows(1) + tdSql.checkEqual(self.queryLastC10(query_sql), 6) if select_elm == "*" else False + # >= + query_sql = f'select {select_elm} from {tb_name} where c5 >= 1.1' + tdSql.query(query_sql) + tdSql.checkRows(11) + tdSql.checkEqual(self.queryLastC10(query_sql), 11) if select_elm == "*" else False + # < + query_sql = f'select {select_elm} from {tb_name} where c5 < 1.2' + tdSql.query(query_sql) + tdSql.checkRows(10) + tdSql.checkEqual(self.queryLastC10(query_sql), 11) if select_elm == "*" else False + # <= + query_sql = f'select {select_elm} from {tb_name} where c5 <= 6.6' + tdSql.query(query_sql) + tdSql.checkRows(11) + tdSql.checkEqual(self.queryLastC10(query_sql), 11) if select_elm == "*" else False + # = + query_sql = f'select {select_elm} from {tb_name} where c5 = 6.6' + tdSql.query(query_sql) + tdSql.checkRows(1) + tdSql.checkEqual(self.queryLastC10(query_sql), 6) if select_elm == "*" else False + # != + query_sql = f'select {select_elm} from {tb_name} where c5 != 1.1' + tdSql.query(query_sql) + tdSql.checkRows(1) + tdSql.checkEqual(self.queryLastC10(query_sql), 6) if select_elm == "*" else False + # <> + query_sql = f'select {select_elm} from {tb_name} where c5 <> 3' + tdSql.query(query_sql) + tdSql.checkRows(11) + tdSql.checkEqual(self.queryLastC10(query_sql), 11) if select_elm == "*" else False + # is null + query_sql = f'select {select_elm} from {tb_name} where c5 is null' + tdSql.query(query_sql) + tdSql.checkRows(0) + # is not null + query_sql = f'select {select_elm} from {tb_name} where c5 is not null' + tdSql.query(query_sql) + tdSql.checkRows(11) + tdSql.checkEqual(self.queryLastC10(query_sql), 11) if select_elm == "*" else False + # between and + query_sql = f'select {select_elm} from {tb_name} where c5 between 4 and 6.6' + tdSql.query(query_sql) + tdSql.checkRows(1) + tdSql.checkEqual(self.queryLastC10(query_sql), 6) if select_elm == "*" else False + # not between and + query_sql = f'select {select_elm} from {tb_name} where c5 not between 2 and 3' + tdSql.query(query_sql) + tdSql.checkRows(11) + tdSql.checkEqual(self.queryLastC10(query_sql), 11) if select_elm == "*" else False + # in + query_sql = f'select {select_elm} from {tb_name} where c5 in (1, 6.6)' + tdSql.query(query_sql) + tdSql.checkRows(1) + tdSql.checkEqual(self.queryLastC10(query_sql), 6) if select_elm == "*" else False + # not in + query_sql = f'select {select_elm} from {tb_name} where c5 not in (2, 3)' + tdSql.query(query_sql) + tdSql.checkRows(11) + tdSql.checkEqual(self.queryLastC10(query_sql), 11) if select_elm == "*" else False + # and + query_sql = f'select {select_elm} from {tb_name} where c5 > 0 and c5 >= 1 and c5 < 7 and c5 <= 6.6 and c5 != 2 and c5 <> 2 and c5 = 6.6 and c5 is not null and c5 between 2 and 6.6 and c5 not between 1 and 2 and c5 in (2,6.6) and c5 not in (1,2)' + tdSql.query(query_sql) + tdSql.checkRows(1) + tdSql.checkEqual(self.queryLastC10(query_sql), 6) if select_elm == "*" else False + # or + query_sql = f'select {select_elm} from {tb_name} where c5 > 6 or c5 >= 6.6 or c5 < 1 or c5 <= 0 or c5 != 1.1 or c5 <> 1.1 or c5 = 5 or c5 is null or c5 between 4 and 5 or c5 not between 1 and 3 or c5 in (4,5) or c5 not in (1.1,3)' + tdSql.query(query_sql) + tdSql.checkRows(1) + tdSql.checkEqual(self.queryLastC10(query_sql), 6) if select_elm == "*" else False + # and or + query_sql = f'select {select_elm} from {tb_name} where c5 > 0 and c5 >= 1 or c5 < 5 and c5 <= 6.6 and c5 != 2 and c5 <> 2 and c5 = 4 or c5 is not null and c5 between 2 and 4 and c5 not between 1 and 2 and c5 in (2,4) and c5 not in (1,2)' + tdSql.query(query_sql) + tdSql.checkRows(11) + tdSql.checkEqual(self.queryLastC10(query_sql), 11) if select_elm == "*" else False + query_sql = f'select c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13 from {tb_name} where c5 > 0 and c5 >= 1 or c5 < 5 and c5 <= 6.6 and c5 != 2 and c5 <> 2 and c5 = 4 or c5 is not null and c5 between 2 and 4 and c5 not between 1 and 2 and c5 in (2,4) and c5 not in (1,2)' + tdSql.query(query_sql) + tdSql.checkRows(11) + + def queryDoubleCol(self, tb_name, check_elm=None): + select_elm = "*" if check_elm is None else check_elm + # > + query_sql = f'select {select_elm} from {tb_name} where c6 > 1.1' + tdSql.query(query_sql) + tdSql.checkRows(1) + tdSql.checkEqual(self.queryLastC10(query_sql), 7) if select_elm == "*" else False + # >= + query_sql = f'select {select_elm} from {tb_name} where c6 >= 1.1' + tdSql.query(query_sql) + tdSql.checkRows(11) + tdSql.checkEqual(self.queryLastC10(query_sql), 11) if select_elm == "*" else False + # < + query_sql = f'select {select_elm} from {tb_name} where c6 < 1.2' + tdSql.query(query_sql) + tdSql.checkRows(10) + tdSql.checkEqual(self.queryLastC10(query_sql), 11) if select_elm == "*" else False + # <= + query_sql = f'select {select_elm} from {tb_name} where c6 <= 7.7' + tdSql.query(query_sql) + tdSql.checkRows(11) + tdSql.checkEqual(self.queryLastC10(query_sql), 11) if select_elm == "*" else False + # = + query_sql = f'select {select_elm} from {tb_name} where c6 = 7.7' + tdSql.query(query_sql) + tdSql.checkRows(1) + tdSql.checkEqual(self.queryLastC10(query_sql), 7) if select_elm == "*" else False + # != + query_sql = f'select {select_elm} from {tb_name} where c6 != 1.1' + tdSql.query(query_sql) + tdSql.checkRows(1) + tdSql.checkEqual(self.queryLastC10(query_sql), 7) if select_elm == "*" else False + # <> + query_sql = f'select {select_elm} from {tb_name} where c6 <> 3' + tdSql.query(query_sql) + tdSql.checkRows(11) + tdSql.checkEqual(self.queryLastC10(query_sql), 11) if select_elm == "*" else False + # is null + query_sql = f'select {select_elm} from {tb_name} where c6 is null' + tdSql.query(query_sql) + tdSql.checkRows(0) + # is not null + query_sql = f'select {select_elm} from {tb_name} where c6 is not null' + tdSql.query(query_sql) + tdSql.checkRows(11) + tdSql.checkEqual(self.queryLastC10(query_sql), 11) if select_elm == "*" else False + # between and + query_sql = f'select {select_elm} from {tb_name} where c6 between 4 and 7.7' + tdSql.query(query_sql) + tdSql.checkRows(1) + tdSql.checkEqual(self.queryLastC10(query_sql), 7) if select_elm == "*" else False + # not between and + query_sql = f'select {select_elm} from {tb_name} where c6 not between 2 and 3' + tdSql.query(query_sql) + tdSql.checkRows(11) + tdSql.checkEqual(self.queryLastC10(query_sql), 11) if select_elm == "*" else False + # in + query_sql = f'select {select_elm} from {tb_name} where c6 in (1, 7.7)' + tdSql.query(query_sql) + tdSql.checkRows(1) + tdSql.checkEqual(self.queryLastC10(query_sql), 7) if select_elm == "*" else False + # not in + query_sql = f'select {select_elm} from {tb_name} where c6 not in (2, 3)' + tdSql.query(query_sql) + tdSql.checkRows(11) + tdSql.checkEqual(self.queryLastC10(query_sql), 11) if select_elm == "*" else False + # and + query_sql = f'select {select_elm} from {tb_name} where c6 > 0 and c6 >= 1 and c6 < 8 and c6 <= 7.7 and c6 != 2 and c6 <> 2 and c6 = 7.7 and c6 is not null and c6 between 2 and 7.7 and c6 not between 1 and 2 and c6 in (2,7.7) and c6 not in (1,2)' + tdSql.query(query_sql) + tdSql.checkRows(1) + tdSql.checkEqual(self.queryLastC10(query_sql), 7) if select_elm == "*" else False + # or + query_sql = f'select {select_elm} from {tb_name} where c6 > 7 or c6 >= 7.7 or c6 < 1 or c6 <= 0 or c6 != 1.1 or c6 <> 1.1 or c6 = 5 or c6 is null or c6 between 4 and 5 or c6 not between 1 and 3 or c6 in (4,5) or c6 not in (1.1,3)' + tdSql.query(query_sql) + tdSql.checkRows(1) + tdSql.checkEqual(self.queryLastC10(query_sql), 7) if select_elm == "*" else False + # and or + query_sql = f'select {select_elm} from {tb_name} where c6 > 0 and c6 >= 1 or c6 < 5 and c6 <= 7.7 and c6 != 2 and c6 <> 2 and c6 = 4 or c6 is not null and c6 between 2 and 4 and c6 not between 1 and 2 and c6 in (2,4) and c6 not in (1,2)' + tdSql.query(query_sql) + tdSql.checkRows(11) + tdSql.checkEqual(self.queryLastC10(query_sql), 11) if select_elm == "*" else False + query_sql = f'select c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13 from {tb_name} where c6 > 0 and c6 >= 1 or c6 < 5 and c6 <= 7.7 and c6 != 2 and c6 <> 2 and c6 = 4 or c6 is not null and c6 between 2 and 4 and c6 not between 1 and 2 and c6 in (2,4) and c6 not in (1,2)' + tdSql.query(query_sql) + tdSql.checkRows(11) + + def queryBinaryCol(self, tb_name, check_elm=None): + select_elm = "*" if check_elm is None else check_elm + # > + query_sql = f'select {select_elm} from {tb_name} where c7 > "binary"' + tdSql.query(query_sql) + tdSql.checkRows(1) + tdSql.checkEqual(self.queryLastC10(query_sql), 8) if select_elm == "*" else False + # >= + query_sql = f'select {select_elm} from {tb_name} where c7 >= "binary8"' + tdSql.query(query_sql) + tdSql.checkRows(1) + tdSql.checkEqual(self.queryLastC10(query_sql), 8) if select_elm == "*" else False + # < + query_sql = f'select {select_elm} from {tb_name} where c7 < "binary8"' + tdSql.query(query_sql) + tdSql.checkRows(9) + tdSql.checkEqual(self.queryLastC10(query_sql), 10) if select_elm == "*" else False + # <= + query_sql = f'select {select_elm} from {tb_name} where c7 <= "binary8"' + tdSql.query(query_sql) + tdSql.checkRows(10) + tdSql.checkEqual(self.queryLastC10(query_sql), 10) if select_elm == "*" else False + # = + query_sql = f'select {select_elm} from {tb_name} where c7 = "binary8"' + tdSql.query(query_sql) + tdSql.checkRows(1) + tdSql.checkEqual(self.queryLastC10(query_sql), 8) if select_elm == "*" else False + # != + query_sql = f'select {select_elm} from {tb_name} where c7 != "binary"' + tdSql.query(query_sql) + tdSql.checkRows(1) + tdSql.checkEqual(self.queryLastC10(query_sql), 8) if select_elm == "*" else False + # <> + query_sql = f'select {select_elm} from {tb_name} where c7 <> "binary8"' + tdSql.query(query_sql) + tdSql.checkRows(9) + tdSql.checkEqual(self.queryLastC10(query_sql), 10) if select_elm == "*" else False + # is null + query_sql = f'select {select_elm} from {tb_name} where c7 is null' + tdSql.query(query_sql) + tdSql.checkRows(1) + tdSql.checkEqual(self.queryLastC10(query_sql), 11) if select_elm == "*" else False + # is not null + query_sql = f'select {select_elm} from {tb_name} where c7 is not null' + tdSql.query(query_sql) + tdSql.checkRows(10) + tdSql.checkEqual(self.queryLastC10(query_sql), 10) if select_elm == "*" else False + # between and + query_sql = f'select {select_elm} from {tb_name} where c7 between "bi" and "binary7"' + tdSql.query(query_sql) + tdSql.checkRows(9) + tdSql.checkEqual(self.queryLastC10(query_sql), 10) if select_elm == "*" else False + # not between and + query_sql = f'select {select_elm} from {tb_name} where c7 not between "bi" and "binary7"' + tdSql.query(query_sql) + tdSql.checkRows(1) + tdSql.checkEqual(self.queryLastC10(query_sql), 8) if select_elm == "*" else False + # in + query_sql = f'select {select_elm} from {tb_name} where c7 in ("binar", "binary8")' + tdSql.query(query_sql) + tdSql.checkRows(1) + tdSql.checkEqual(self.queryLastC10(query_sql), 8) if select_elm == "*" else False + # not in + query_sql = f'select {select_elm} from {tb_name} where c7 not in ("bi", "binary8")' + tdSql.query(query_sql) + tdSql.checkRows(9) + tdSql.checkEqual(self.queryLastC10(query_sql), 10) if select_elm == "*" else False + # match + query_sql = f'select {select_elm} from {tb_name} where c7 match "binary[28]"' + tdSql.query(query_sql) + tdSql.checkRows(1) + tdSql.checkEqual(self.queryLastC10(query_sql), 8) if select_elm == "*" else False + # nmatch + query_sql = f'select {select_elm} from {tb_name} where c7 nmatch "binary[28]"' + tdSql.query(query_sql) + tdSql.checkRows(9) + tdSql.checkEqual(self.queryLastC10(query_sql), 10) if select_elm == "*" else False + + # ! bug TD-15324 not in + # query_sql = f'select {select_elm} from {tb_name} where c7 not in (1, "binary8")' + # tdSql.query(query_sql) + # tdSql.checkRows(9) + # tdSql.checkEqual(self.queryLastC10(query_sql), 11) if select_elm == "*" else False + # and + query_sql = f'select {select_elm} from {tb_name} where c7 > "binary" and c7 >= "binary8" and c7 < "binary9" and c7 <= "binary8" and c7 != "binary" and c7 <> "333" and c7 = "binary8" and c7 is not null and c7 between "binary" and "binary8" and c7 not between 1 and 2 and c7 in ("binary","binary8") and c7 not in ("binary") and c7 match "binary[28]" and c7 nmatch "binary[2]"' + tdSql.query(query_sql) + tdSql.checkRows(1) + tdSql.checkEqual(self.queryLastC10(query_sql), 8) if select_elm == "*" else False + # or + query_sql = f'select {select_elm} from {tb_name} where c7 > "binary" or c7 >= "binary8" or c7 < "binary" or c7 <= "binar" or c7 != "binary" or c7 <> "binary" or c7 = 5 or c7 is null or c7 between 4 and 5 or c7 not between "binary" and "binary7" or c7 in ("binary2222") or c7 not in ("binary") or c7 match "binary[28]" or c7 nmatch "binary"' + tdSql.query(query_sql) + tdSql.checkRows(2) + tdSql.checkEqual(self.queryLastC10(query_sql), 11) if select_elm == "*" else False + # and or + query_sql = f'select {select_elm} from {tb_name} where c7 > "binary" and c7 >= "binary8" or c7 < "binary9" and c7 <= "binary" and c7 != 2 and c7 <> 2 and c7 = 4 or c7 is not null and c7 between 2 and 4 and c7 not between 1 and 2 and c7 in (2,4) and c7 not in (1,2) or c7 match "binary[28]" or c7 nmatch "binary"' + tdSql.query(query_sql) + tdSql.checkRows(11) + tdSql.checkEqual(self.queryLastC10(query_sql), 11) if select_elm == "*" else False + query_sql = f'select c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13 from {tb_name} where c7 > "binary" and c7 >= "binary8" or c7 < "binary9" and c7 <= "binary" and c7 != 2 and c7 <> 2 and c7 = 4 or c7 is not null and c7 between 2 and 4 and c7 not between 1 and 2 and c7 in (2,4) and c7 not in (1,2) or c7 match "binary[28]" or c7 nmatch "binary"' + tdSql.query(query_sql) + tdSql.checkRows(11) + + def queryNcharCol(self, tb_name, check_elm=None): + select_elm = "*" if check_elm is None else check_elm + # > + query_sql = f'select {select_elm} from {tb_name} where c8 > "nchar"' + tdSql.query(query_sql) + tdSql.checkRows(1) + tdSql.checkEqual(self.queryLastC10(query_sql), 9) if select_elm == "*" else False + # >= + query_sql = f'select {select_elm} from {tb_name} where c8 >= "nchar9"' + tdSql.query(query_sql) + tdSql.checkRows(1) + tdSql.checkEqual(self.queryLastC10(query_sql), 9) if select_elm == "*" else False + # < + query_sql = f'select {select_elm} from {tb_name} where c8 < "nchar9"' + tdSql.query(query_sql) + tdSql.checkRows(9) + tdSql.checkEqual(self.queryLastC10(query_sql), 10) if select_elm == "*" else False + # <= + query_sql = f'select {select_elm} from {tb_name} where c8 <= "nchar9"' + tdSql.query(query_sql) + tdSql.checkRows(10) + tdSql.checkEqual(self.queryLastC10(query_sql), 10) if select_elm == "*" else False + # = + query_sql = f'select {select_elm} from {tb_name} where c8 = "nchar9"' + tdSql.query(query_sql) + tdSql.checkRows(1) + tdSql.checkEqual(self.queryLastC10(query_sql), 9) if select_elm == "*" else False + # != + query_sql = f'select {select_elm} from {tb_name} where c8 != "nchar"' + tdSql.query(query_sql) + tdSql.checkRows(1) + tdSql.checkEqual(self.queryLastC10(query_sql), 9) if select_elm == "*" else False + # <> + query_sql = f'select {select_elm} from {tb_name} where c8 <> "nchar9"' + tdSql.query(query_sql) + tdSql.checkRows(9) + tdSql.checkEqual(self.queryLastC10(query_sql), 10) if select_elm == "*" else False + # is null + query_sql = f'select {select_elm} from {tb_name} where c8 is null' + tdSql.query(query_sql) + tdSql.checkRows(1) + tdSql.checkEqual(self.queryLastC10(query_sql), 11) if select_elm == "*" else False + # is not null + query_sql = f'select {select_elm} from {tb_name} where c8 is not null' + tdSql.query(query_sql) + tdSql.checkRows(10) + tdSql.checkEqual(self.queryLastC10(query_sql), 10) if select_elm == "*" else False + # between and + query_sql = f'select {select_elm} from {tb_name} where c8 between "na" and "nchar8"' + tdSql.query(query_sql) + tdSql.checkRows(9) + tdSql.checkEqual(self.queryLastC10(query_sql), 10) if select_elm == "*" else False + # not between and + query_sql = f'select {select_elm} from {tb_name} where c8 not between "na" and "nchar8"' + tdSql.query(query_sql) + tdSql.checkRows(1) + tdSql.checkEqual(self.queryLastC10(query_sql), 9) if select_elm == "*" else False + # ! bug TD-15240 in + # query_sql = f'select {select_elm} from {tb_name} where c8 in ("ncha", "nchar9")' + # tdSql.query(query_sql) + # tdSql.checkRows(1) + # tdSql.checkEqual(self.queryLastC10(query_sql), 9) if select_elm == "*" else False + # not in + # query_sql = f'select {select_elm} from {tb_name} where c8 not in ("na", "nchar9")' + # tdSql.query(query_sql) + # tdSql.checkRows(9) + # tdSql.checkEqual(self.queryLastC10(query_sql), 10) if select_elm == "*" else False + # ! bug TD-15324 not in + # query_sql = f'select {select_elm} from {tb_name} where c8 not in (1, "nchar9")' + # tdSql.query(query_sql) + # tdSql.checkRows(9) + # tdSql.checkEqual(self.queryLastC10(query_sql), 11) if select_elm == "*" else False + # match + query_sql = f'select {select_elm} from {tb_name} where c8 match "nchar[19]"' + tdSql.query(query_sql) + tdSql.checkRows(1) + tdSql.checkEqual(self.queryLastC10(query_sql), 9) if select_elm == "*" else False + # nmatch + query_sql = f'select {select_elm} from {tb_name} where c8 nmatch "nchar[19]"' + tdSql.query(query_sql) + tdSql.checkRows(9) + tdSql.checkEqual(self.queryLastC10(query_sql), 10) if select_elm == "*" else False + + # and + # query_sql = f'select {select_elm} from {tb_name} where c8 > "nchar" and c8 >= "nchar8" and c8 < "nchar9" and c8 <= "nchar8" and c8 != "nchar" and c8 <> "333" and c8 = "nchar8" and c8 is not null and c8 between "nchar" and "nchar8" and c8 not between 1 and 2 and c8 in ("nchar","nchar8") and c8 not in ("nchar")' + # tdSql.query(query_sql) + # tdSql.checkRows(1) + # tdSql.checkEqual(self.queryLastC10(query_sql), 8) if select_elm == "*" else False + # # or + # query_sql = f'select {select_elm} from {tb_name} where c8 > "nchar" or c8 >= "nchar8" or c8 < "nchar" or c8 <= "binar" or c8 != "nchar" or c8 <> "nchar" or c8 = 5 or c8 is null or c8 between 4 and 5 or c8 not between "nchar" and "nchar7" or c8 in ("nchar2222") or c8 not in ("nchar")' + # tdSql.query(query_sql) + # tdSql.checkRows(2) + # tdSql.checkEqual(self.queryLastC10(query_sql), 11) if select_elm == "*" else False + # # and or + # query_sql = f'select {select_elm} from {tb_name} where c8 > "nchar" and c8 >= "nchar8" or c8 < "nchar9" and c8 <= "nchar" and c8 != 2 and c8 <> 2 and c8 = 4 or c8 is not null and c8 between 2 and 4 and c8 not between 1 and 2 and c8 in (2,4) and c8 not in (1,2)' + # tdSql.query(query_sql) + # tdSql.checkRows(11) + # tdSql.checkEqual(self.queryLastC10(query_sql), 11) if select_elm == "*" else False + # query_sql = f'select c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13 from {tb_name} where c8 > "nchar" and c8 >= "nchar8" or c8 < "nchar9" and c8 <= "nchar" and c8 != 2 and c8 <> 2 and c8 = 4 or c8 is not null and c8 between 2 and 4 and c8 not between 1 and 2 and c8 in (2,4) and c8 not in (1,2)' + # tdSql.query(query_sql) + # tdSql.checkRows(11) + + def queryBoolCol(self, tb_name, check_elm=None): + select_elm = "*" if check_elm is None else check_elm + # = + query_sql = f'select {select_elm} from {tb_name} where c9 = false' + tdSql.query(query_sql) + tdSql.checkRows(2) + tdSql.checkEqual(self.queryLastC10(query_sql), 11) if select_elm == "*" else False + # != + query_sql = f'select {select_elm} from {tb_name} where c9 != false' + tdSql.query(query_sql) + tdSql.checkRows(9) + tdSql.checkEqual(self.queryLastC10(query_sql), 9) if select_elm == "*" else False + # <> + query_sql = f'select {select_elm} from {tb_name} where c9 <> true' + tdSql.query(query_sql) + tdSql.checkRows(2) + tdSql.checkEqual(self.queryLastC10(query_sql), 11) if select_elm == "*" else False + # is null + query_sql = f'select {select_elm} from {tb_name} where c9 is null' + tdSql.query(query_sql) + tdSql.checkRows(0) + # is not null + query_sql = f'select {select_elm} from {tb_name} where c9 is not null' + tdSql.query(query_sql) + tdSql.checkRows(11) + tdSql.checkEqual(self.queryLastC10(query_sql), 11) if select_elm == "*" else False + # in + query_sql = f'select {select_elm} from {tb_name} where c9 in ("binar", false)' + tdSql.query(query_sql) + tdSql.checkRows(2) + tdSql.checkEqual(self.queryLastC10(query_sql), 11) if select_elm == "*" else False + # # not in + query_sql = f'select {select_elm} from {tb_name} where c9 not in (true)' + tdSql.query(query_sql) + tdSql.checkRows(2) + tdSql.checkEqual(self.queryLastC10(query_sql), 11) if select_elm == "*" else False + + # # and + query_sql = f'select {select_elm} from {tb_name} where c9 = true and c9 != "false" and c9 <> "binary" and c9 is not null and c9 in ("binary", true) and c9 not in ("binary")' + tdSql.query(query_sql) + tdSql.checkRows(9) + tdSql.checkEqual(self.queryLastC10(query_sql), 9) if select_elm == "*" else False + # # or + query_sql = f'select {select_elm} from {tb_name} where c9 = true or c9 != "false" or c9 <> "binary" or c9 = "true" or c9 is not null or c9 in ("binary", true) or c9 not in ("binary")' + tdSql.query(query_sql) + tdSql.checkRows(11) + tdSql.checkEqual(self.queryLastC10(query_sql), 11) if select_elm == "*" else False + # # and or + query_sql = f'select {select_elm} from {tb_name} where c9 = true and c9 != "false" or c9 <> "binary" or c9 = "true" and c9 is not null or c9 in ("binary", true) or c9 not in ("binary")' + tdSql.query(query_sql) + tdSql.checkRows(11) + tdSql.checkEqual(self.queryLastC10(query_sql), 11) if select_elm == "*" else False + query_sql = f'select c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13 from {tb_name} where c9 > "binary" and c9 >= "binary8" or c9 < "binary9" and c9 <= "binary" and c9 != 2 and c9 <> 2 and c9 = 4 or c9 is not null and c9 between 2 and 4 and c9 not between 1 and 2 and c9 in (2,4) and c9 not in (1,2) or c9 match "binary[28]" or c9 nmatch "binary"' + tdSql.query(query_sql) + tdSql.checkRows(11) + + def queryFullColType(self, tb_name, check_elm=None): + select_elm = "*" if check_elm is None else check_elm + ## != or and + query_sql = f'select {select_elm} from {tb_name} where c1 != 1 or c2 = 3' + tdSql.query(query_sql) + tdSql.checkRows(3) + tdSql.checkEqual(self.queryLastC10(query_sql), 11) if select_elm == "*" else False + + query_sql = f'select {select_elm} from {tb_name} where c1 != 1 and c2 = 2' + tdSql.query(query_sql) + tdSql.checkRows(1) + tdSql.checkEqual(self.queryLastC10(query_sql), 2) if select_elm == "*" else False + + ## <> or and + query_sql = f'select {select_elm} from {tb_name} where c1 <> 1 or c3 = 3' + tdSql.query(query_sql) + tdSql.checkRows(1) + tdSql.checkEqual(self.queryLastC10(query_sql), 2) if select_elm == "*" else False + + query_sql = f'select {select_elm} from {tb_name} where c1 <> 2 and c3 = 4' + tdSql.query(query_sql) + tdSql.checkRows(1) + tdSql.checkEqual(self.queryLastC10(query_sql), 4) if select_elm == "*" else False + + ## >= or and + query_sql = f'select {select_elm} from {tb_name} where c1 >= 2 or c3 = 4' + tdSql.query(query_sql) + tdSql.checkRows(2) + tdSql.checkEqual(self.queryLastC10(query_sql), 4) if select_elm == "*" else False + + query_sql = f'select {select_elm} from {tb_name} where c1 >= 2 and c3 = 1' + tdSql.query(query_sql) + tdSql.checkRows(1) + tdSql.checkEqual(self.queryLastC10(query_sql), 2) if select_elm == "*" else False + + ## <= or and + query_sql = f'select {select_elm} from {tb_name} where c1 <= 1 or c3 = 4' + tdSql.query(query_sql) + tdSql.checkRows(10) + tdSql.checkEqual(self.queryLastC10(query_sql), 11) if select_elm == "*" else False + + query_sql = f'select {select_elm} from {tb_name} where c1 <= 1 and c3 = 4' + tdSql.query(query_sql) + tdSql.checkRows(1) + tdSql.checkEqual(self.queryLastC10(query_sql), 4) if select_elm == "*" else False + + ## <> or and is Null + query_sql = f'select {select_elm} from {tb_name} where c1 <> 1 or c7 is Null' + tdSql.query(query_sql) + tdSql.checkRows(2) + tdSql.checkEqual(self.queryLastC10(query_sql), 11) if select_elm == "*" else False + + query_sql = f'select {select_elm} from {tb_name} where c1 <> 2 and c7 is Null' + tdSql.query(query_sql) + tdSql.checkRows(1) + tdSql.checkEqual(self.queryLastC10(query_sql), 11) if select_elm == "*" else False + + ## > or and is not Null + query_sql = f'select {select_elm} from {tb_name} where c2 > 2 or c8 is not Null' + tdSql.query(query_sql) + tdSql.checkRows(11) + tdSql.checkEqual(self.queryLastC10(query_sql), 11) if select_elm == "*" else False + + query_sql = f'select {select_elm} from {tb_name} where c2 > 2 and c8 is not Null' + tdSql.query(query_sql) + tdSql.checkRows(1) + tdSql.checkEqual(self.queryLastC10(query_sql), 3) if select_elm == "*" else False + + ## > or < or >= or <= or != or <> or = Null + query_sql = f'select {select_elm} from {tb_name} where c1 > 1 or c2 < 2 or c3 >= 4 or c4 <= 2 or c5 != 1.1 or c6 <> 1.1 or c7 is Null' + tdSql.query(query_sql) + tdSql.checkRows(8) + tdSql.checkEqual(self.queryLastC10(query_sql), 11) if select_elm == "*" else False + + query_sql = f'select {select_elm} from {tb_name} where c1 = 1 and c2 > 1 and c3 >= 1 and c4 <= 5 and c5 != 6.6 and c6 <> 7.7 and c7 is Null' + tdSql.query(query_sql) + tdSql.checkRows(1) + tdSql.checkEqual(self.queryLastC10(query_sql), 11) if select_elm == "*" else False + + ## tiny small int big or + query_sql = f'select {select_elm} from {tb_name} where c1 = 2 or c2 = 3 or c3 = 4 or c4 = 5' + tdSql.query(query_sql) + tdSql.checkRows(5) + tdSql.checkEqual(self.queryLastC10(query_sql), 11) if select_elm == "*" else False + + query_sql = f'select {select_elm} from {tb_name} where c1 = 1 and c2 = 2 and c3 = 1 and c4 = 3' + tdSql.query(query_sql) + tdSql.checkRows(5) + tdSql.checkEqual(self.queryLastC10(query_sql), 10) if select_elm == "*" else False + + ## float double binary nchar bool or + query_sql = f'select {select_elm} from {tb_name} where c5=6.6 or c6=7.7 or c7="binary8" or c8="nchar9" or c9=false' + tdSql.query(query_sql) + tdSql.checkRows(6) + tdSql.checkEqual(self.queryLastC10(query_sql), 11) if select_elm == "*" else False + + query_sql = f'select {select_elm} from {tb_name} where c5=1.1 and c6=7.7 and c7="binary" and c8="nchar" and c9=true' + tdSql.query(query_sql) + tdSql.checkRows(1) + tdSql.checkEqual(self.queryLastC10(query_sql), 7) if select_elm == "*" else False + + ## all types or + query_sql = f'select {select_elm} from {tb_name} where c1=2 or c2=3 or c3=4 or c4=5 or c5=6.6 or c6=7.7 or c7 nmatch "binary[134]" or c8="nchar9" or c9=false' + tdSql.query(query_sql) + tdSql.checkRows(11) + tdSql.checkEqual(self.queryLastC10(query_sql), 11) if select_elm == "*" else False + + query_sql = f'select {select_elm} from {tb_name} where c1=1 and c2=2 and c3=1 and c4=3 and c5=1.1 and c6=1.1 and c7 match "binary[28]" and c8 in ("nchar") and c9=true' + tdSql.query(query_sql) + tdSql.checkRows(1) + tdSql.checkEqual(self.queryLastC10(query_sql), 8) if select_elm == "*" else False + + query_sql = f'select {select_elm} from {tb_name} where c1=1 and c2=2 or c3=1 and c4=3 and c5=1.1 and c6=1.1 and c7 match "binary[28]" and c8 in ("nchar") and c9=true' + tdSql.query(query_sql) + tdSql.checkRows(7) + tdSql.checkEqual(self.queryLastC10(query_sql), 10) if select_elm == "*" else False + + def queryFullTagType(self, tb_name): + ## != or and + query_sql = f'select * from {tb_name} where t1 != 1 or t2 = 3' + tdSql.query(query_sql) + tdSql.checkRows(44) + tdSql.checkEqual(self.queryLastC10(query_sql), 11) + + query_sql = f'select * from {tb_name} where t1 != 1 and t2 = 2' + tdSql.query(query_sql) + tdSql.checkRows(11) + tdSql.checkEqual(self.queryLastC10(query_sql), 11) + + ## <> or and + query_sql = f'select * from {tb_name} where t1 <> 1 or t3 = 3' + tdSql.query(query_sql) + tdSql.checkRows(44) + tdSql.checkEqual(self.queryLastC10(query_sql), 11) + + query_sql = f'select * from {tb_name} where t1 <> 2 and t3 = 4' + tdSql.query(query_sql) + tdSql.checkRows(11) + tdSql.checkEqual(self.queryLastC10(query_sql), 11) + + ## >= or and + query_sql = f'select * from {tb_name} where t1 >= 2 or t3 = 4' + tdSql.query(query_sql) + tdSql.checkRows(44) + tdSql.checkEqual(self.queryLastC10(query_sql), 11) + + query_sql = f'select * from {tb_name} where t1 >= 1 and t3 = 1' + tdSql.query(query_sql) + tdSql.checkRows(11) + tdSql.checkEqual(self.queryLastC10(query_sql), 11) + + ## <= or and + query_sql = f'select * from {tb_name} where t1 <= 1 or t3 = 4' + tdSql.query(query_sql) + tdSql.checkRows(22) + tdSql.checkEqual(self.queryLastC10(query_sql), 11) + + query_sql = f'select * from {tb_name} where t1 <= 3 and t3 = 2' + tdSql.query(query_sql) + tdSql.checkRows(11) + tdSql.checkEqual(self.queryLastC10(query_sql), 11) + + ## <> or and is Null + query_sql = f'select * from {tb_name} where t1 <> 1 or t7 is Null' + tdSql.query(query_sql) + tdSql.checkRows(44) + tdSql.checkEqual(self.queryLastC10(query_sql), 11) + + query_sql = f'select * from {tb_name} where t1 <> 2 and t7 is not Null' + tdSql.query(query_sql) + tdSql.checkRows(44) + tdSql.checkEqual(self.queryLastC10(query_sql), 11) + + ## > or and is not Null + query_sql = f'select * from {tb_name} where t2 > 2 or t8 is not Null' + tdSql.query(query_sql) + tdSql.checkRows(55) + tdSql.checkEqual(self.queryLastC10(query_sql), 11) + + query_sql = f'select * from {tb_name} where t2 > 2 and t8 is not Null' + tdSql.query(query_sql) + tdSql.checkRows(33) + tdSql.checkEqual(self.queryLastC10(query_sql), 11) + + ## > or < or >= or <= or != or <> or = Null + query_sql = f'select * from {tb_name} where t1 > 1 or t2 < 2 or t3 >= 4 or t4 <= 2 or t5 != 1.1 or t6 <> 1.1 or t7 is Null' + tdSql.query(query_sql) + tdSql.checkRows(55) + tdSql.checkEqual(self.queryLastC10(query_sql), 11) + + query_sql = f'select * from {tb_name} where t1 >= 1 and t2 > 1 and t3 >= 1 and t4 <= 5 and t5 != 6.6 and t6 <> 7.7 and t7 is not Null' + tdSql.query(query_sql) + tdSql.checkRows(44) + tdSql.checkEqual(self.queryLastC10(query_sql), 11) + + ## tiny small int big or and + query_sql = f'select * from {tb_name} where t1 = 2 or t2 = 3 or t3 = 4 or t4 = 5' + tdSql.query(query_sql) + tdSql.checkRows(44) + tdSql.checkEqual(self.queryLastC10(query_sql), 11) + + query_sql = f'select * from {tb_name} where t1 = 1 and t2 = 2 and t3 = 1 and t4 = 3' + tdSql.query(query_sql) + tdSql.checkRows(0) + + ## float double binary nchar bool or and + query_sql = f'select * from {tb_name} where t5=2.2 or t6=7.7 or t7="binary8" or t8="nchar9" or t9=false' + tdSql.query(query_sql) + tdSql.checkRows(11) + tdSql.checkEqual(self.queryLastC10(query_sql), 11) + + query_sql = f'select * from {tb_name} where t5=2.2 and t6=2.2 and t7="binary2" and t8="nchar2" and t9=true' + tdSql.query(query_sql) + tdSql.checkRows(11) + tdSql.checkEqual(self.queryLastC10(query_sql), 11) + + ## all types or and + query_sql = f'select * from {tb_name} where t1=2 or t2=3 or t3=4 or t4=5 or t5=6.6 or t6=7.7 or t7 nmatch "binary[134]" or t8="nchar9" or t9=false' + tdSql.query(query_sql) + tdSql.checkRows(44) + tdSql.checkEqual(self.queryLastC10(query_sql), 11) + + query_sql = f'select * from {tb_name} where t1=1 and t2=1 and t3>=1 and t4!=2 and t5=1.1 and t6=1.1 and t7 match "binary[18]" and t8 in ("nchar1") and t9=true' + tdSql.query(query_sql) + tdSql.checkRows(11) + tdSql.checkEqual(self.queryLastC10(query_sql), 11) + + query_sql = f'select * from {tb_name} where t1=1 and t2=1 or t3>=1 and t4!=2 and t5=1.1 and t6=1.1 and t7 match "binary[18]" and t8 in ("nchar1") and t9=true' + tdSql.query(query_sql) + tdSql.checkRows(11) + tdSql.checkEqual(self.queryLastC10(query_sql), 11) + + def queryColMultiExpression(self, tb_name): + ## condition_A and condition_B or condition_C (> < >=) + query_sql = f'select * from {tb_name} where c1 > 2 and c2 < 4 or c3 >= 4' + tdSql.query(query_sql) + tdSql.checkRows(2) + tdSql.checkEqual(self.queryLastC10(query_sql), 5) + + ## (condition_A and condition_B) or condition_C (<= != <>) + query_sql = f'select * from {tb_name} where (c1 <= 1 and c2 != 2) or c4 <> 3' + tdSql.query(query_sql) + tdSql.checkRows(4) + tdSql.checkEqual(self.queryLastC10(query_sql), 11) + + ## condition_A and (condition_B or condition_C) (Null not Null) + query_sql = f'select * from {tb_name} where c1 is not Null and (c6 = 7.7 or c8 is Null)' + tdSql.query(query_sql) + tdSql.checkRows(2) + tdSql.checkEqual(self.queryLastC10(query_sql), 11) + + ## condition_A or condition_B and condition_C (> < >=) + query_sql = f'select * from {tb_name} where c1 > 2 or c2 < 4 and c3 >= 4' + tdSql.query(query_sql) + tdSql.checkRows(2) + tdSql.checkEqual(self.queryLastC10(query_sql), 5) + + ## (condition_A or condition_B) and condition_C (<= != <>) + query_sql = f'select * from {tb_name} where (c1 <= 1 or c2 != 2) and c4 <> 3' + tdSql.query(query_sql) + tdSql.checkRows(2) + tdSql.checkEqual(self.queryLastC10(query_sql), 5) + + ## condition_A or (condition_B and condition_C) (Null not Null) + query_sql = f'select * from {tb_name} where c6 >= 7.7 or (c1 is not Null and c3 =5)' + tdSql.query(query_sql) + tdSql.checkRows(2) + tdSql.checkEqual(self.queryLastC10(query_sql), 7) + + ## condition_A or (condition_B and condition_C) or condition_D (> != < Null) + query_sql = f'select * from {tb_name} where c1 != 1 or (c2 >2 and c3 < 1) or c7 is Null' + tdSql.query(query_sql) + tdSql.checkRows(2) + tdSql.checkEqual(self.queryLastC10(query_sql), 11) + + ## condition_A and (condition_B or condition_C) and condition_D (>= = <= not Null) + query_sql = f'select * from {tb_name} where c4 >= 4 and (c1 = 2 or c5 <= 1.1) and c7 is not Null' + tdSql.query(query_sql) + tdSql.checkRows(1) + tdSql.checkEqual(self.queryLastC10(query_sql), 5) + + ## (condition_A and condition_B) or (condition_C or condition_D) (Null >= > =) + query_sql = f'select * from {tb_name} where (c8 is Null and c1 >= 1) or (c3 > 3 or c4 =2)' + tdSql.query(query_sql) + tdSql.checkRows(4) + tdSql.checkEqual(self.queryLastC10(query_sql), 11) + + ## (condition_A or condition_B) or condition_C or (condition_D and condition_E) (>= <= = not Null <>) + query_sql = f'select * from {tb_name} where (c1 >= 2 or c2 <= 1) or c3 = 4 or (c7 is not Null and c6 <> 1.1)' + tdSql.query(query_sql) + tdSql.checkRows(4) + tdSql.checkEqual(self.queryLastC10(query_sql), 7) + + ## condition_A or (condition_B and condition_C) or (condition_D and condition_E) and condition_F + query_sql = f'select * from {tb_name} where c1 != 1 or (c2 <= 1 and c3 <4) or (c3 >= 4 or c7 is not Null) and c9 <> true' + tdSql.query(query_sql) + tdSql.checkRows(3) + tdSql.checkEqual(self.queryLastC10(query_sql), 10) + + ## (condition_A or (condition_B and condition_C) or (condition_D and condition_E)) and condition_F + query_sql = f'select * from {tb_name} where (c1 != 1 or (c2 <= 2 and c3 >= 4) or (c3 >= 4 or c7 is not Null)) and c9 != false' + tdSql.query(query_sql) + tdSql.checkRows(9) + tdSql.checkEqual(self.queryLastC10(query_sql), 9) + + ## (condition_A or condition_B) or (condition_C or condition_D) and (condition_E or condition_F or condition_G) + query_sql = f'select * from {tb_name} where c1 != 1 or (c2 <= 3 and c3 > 4) and c3 <= 5 and (c7 is not Null and c9 != false)' + tdSql.query(query_sql) + tdSql.checkRows(2) + tdSql.checkEqual(self.queryLastC10(query_sql), 5) + + def queryTagMultiExpression(self, tb_name): + ## condition_A and condition_B or condition_C (> < >=) + query_sql = f'select * from {tb_name} where t1 > 2 and t2 < 4 or t3 >= 4' + tdSql.query(query_sql) + tdSql.checkRows(33) + + ## (condition_A and condition_B) or condition_C (<= != <>) + query_sql = f'select * from {tb_name} where (t1 <= 1 and t2 != 2) or t4 <> 3' + tdSql.query(query_sql) + tdSql.checkRows(44) + + ## condition_A and (condition_B or condition_C) (Null not Null) + query_sql = f'select * from {tb_name} where t1 is not Null and (t6 = 7.7 or t8 is not Null)' + tdSql.query(query_sql) + tdSql.checkRows(55) + + ## condition_A or condition_B and condition_C (> < >=) + query_sql = f'select * from {tb_name} where t1 > 2 or t2 < 4 and t3 >= 4' + tdSql.query(query_sql) + tdSql.checkRows(33) + + ## (condition_A or condition_B) and condition_C (<= != <>) + query_sql = f'select * from {tb_name} where (t1 <= 1 or t2 != 2) and t4 <> 3' + tdSql.query(query_sql) + tdSql.checkRows(33) + + ## condition_A or (condition_B and condition_C) (Null not Null) + query_sql = f'select * from {tb_name} where t6 >= 7.7 or (t1 is not Null and t3 =5)' + tdSql.query(query_sql) + tdSql.checkRows(11) + + ## condition_A or (condition_B and condition_C) or condition_D (> != < Null) + query_sql = f'select * from {tb_name} where t1 != 1 or (t2 >2 and t3 < 1) or t7 is Null' + tdSql.query(query_sql) + tdSql.checkRows(44) + + ## condition_A and (condition_B or condition_C) and condition_D (>= = <= not Null) + query_sql = f'select * from {tb_name} where t4 >= 2 and (t1 = 2 or t5 <= 1.1) and t7 is not Null' + tdSql.query(query_sql) + tdSql.checkRows(11) + + ## (condition_A and condition_B) or (condition_C or condition_D) (Null >= > =) + query_sql = f'select * from {tb_name} where (t8 is Null and t1 >= 1) or (t3 > 3 or t4 =2)' + tdSql.query(query_sql) + tdSql.checkRows(33) + + ## (condition_A or condition_B) or condition_C or (condition_D and condition_E) (>= <= = not Null <>) + query_sql = f'select * from {tb_name} where (t1 >= 2 or t2 <= 1) or t3 = 4 or (t7 is not Null and t6 <> 1.1)' + tdSql.query(query_sql) + tdSql.checkRows(55) + + ## condition_A or (condition_B and condition_C) or (condition_D and condition_E) and condition_F + query_sql = f'select * from {tb_name} where t1 != 1 or (t2 <= 1 and t3 <4) or (t3 >= 4 or t7 is not Null) and t9 <> true' + tdSql.query(query_sql) + tdSql.checkRows(55) + + ## (condition_A or (condition_B and condition_C) or (condition_D and condition_E)) and condition_F + query_sql = f'select * from {tb_name} where (t1 != 1 or (t2 <= 2 and t3 >= 4) or (t3 >= 4 or t7 is not Null)) and t9 != false' + tdSql.query(query_sql) + tdSql.checkRows(55) + + ## (condition_A or condition_B) or (condition_C or condition_D) and (condition_E or condition_F or condition_G) + query_sql = f'select * from {tb_name} where t1 != 1 or (t2 <= 3 and t3 > 4) and t3 <= 5 and (t7 is not Null and t9 != false)' + tdSql.query(query_sql) + tdSql.checkRows(44) + + def queryColPreCal(self, tb_name): + ## avg sum condition_A or/and condition_B + query_sql = f'select avg(c3), sum(c3) from {tb_name} where c10 = 5 or c8 is Null' + res = tdSql.query(query_sql, True)[0] + tdSql.checkEqual(int(res[0]), 3) + tdSql.checkEqual(int(res[1]), 6) + query_sql = f'select avg(c3), sum(c3) from {tb_name} where c6 = 1.1 and c8 is not Null' + res = tdSql.query(query_sql, True)[0] + tdSql.checkEqual(int(res[1]), 16) + + ## avg sum condition_A or/and condition_B or/and condition_C + query_sql = f'select avg(c3), sum(c3) from {tb_name} where c10 = 4 or c8 is Null or c9 = false ' + res = tdSql.query(query_sql, True)[0] + tdSql.checkEqual(int(res[0]), 2) + tdSql.checkEqual(int(res[1]), 6) + query_sql = f'select avg(c3), sum(c3) from {tb_name} where c6 = 1.1 and c8 is not Null and c9 = false ' + res = tdSql.query(query_sql, True)[0] + tdSql.checkEqual(int(res[0]), 1) + tdSql.checkEqual(int(res[1]), 1) + query_sql = f'select avg(c3), sum(c3) from {tb_name} where c6 = 1.1 and c8 is not Null or c9 = false ' + res = tdSql.query(query_sql, True)[0] + tdSql.checkEqual(int(res[1]), 17) + query_sql = f'select avg(c3), sum(c3) from {tb_name} where c6 = 1.1 or c8 is not Null and c9 = false ' + res = tdSql.query(query_sql, True)[0] + tdSql.checkEqual(int(res[1]), 17) + + ## count avg sum condition_A or/and condition_B or/and condition_C interval + query_sql = f'select count(*), avg(c3), sum(c3) from {tb_name} where c10 = 4 or c8 is Null or c9 = false interval(16d)' + res = tdSql.query(query_sql, True) + tdSql.checkRows(2) + tdSql.checkEqual(int(res[0][1]), 1) + tdSql.checkEqual(int(res[0][2]), 4) + tdSql.checkEqual(int(res[0][3]), 4) + tdSql.checkEqual(int(res[1][1]), 2) + tdSql.checkEqual(int(res[1][2]), 1) + tdSql.checkEqual(int(res[1][3]), 2) + query_sql = f'select count(*), avg(c3), sum(c3) from {tb_name} where c6 = 1.1 and c8 is not Null and c9 = false interval(16d)' + res = tdSql.query(query_sql, True) + tdSql.checkRows(1) + tdSql.checkEqual(int(res[0][1]), 1) + tdSql.checkEqual(int(res[0][2]), 1) + tdSql.checkEqual(int(res[0][3]), 1) + + ## count avg sum condition_A or condition_B or in and like or condition_C interval + query_sql = f'select count(*), sum(c3) from {tb_name} where c10 = 4 or c8 is Null or c2 in (1, 2) and c7 like "binary_" or c1 <> 1 interval(16d)' + res = tdSql.query(query_sql, True) + tdSql.checkRows(2) + tdSql.checkEqual(int(res[0][1]), 2) + tdSql.checkEqual(int(res[0][2]), 5) + tdSql.checkEqual(int(res[1][1]), 2) + tdSql.checkEqual(int(res[1][2]), 2) + + def queryTagPreCal(self, tb_name): + ## avg sum condition_A or/and condition_B + query_sql = f'select avg(c3), sum(c3) from {tb_name} where t10 = 5 or t8 is Null' + res = tdSql.query(query_sql, True)[0] + tdSql.checkEqual(int(res[0]), 1) + tdSql.checkEqual(int(res[1]), 18) + query_sql = f'select avg(c3), sum(c3) from {tb_name} where t6 = 1.1 and t8 is not Null' + res = tdSql.query(query_sql, True)[0] + tdSql.checkEqual(int(res[1]), 18) + + ## avg sum condition_A or/and condition_B or/and condition_C + query_sql = f'select avg(c3), sum(c3) from {tb_name} where t10 = 4 or t8 is Null or t9 = true ' + res = tdSql.query(query_sql, True)[0] + tdSql.checkEqual(int(res[0]), 1) + tdSql.checkEqual(int(res[1]), 90) + query_sql = f'select avg(c3), sum(c3) from {tb_name} where t6 = 1.1 and t8 is not Null and t9 = true ' + res = tdSql.query(query_sql, True)[0] + tdSql.checkEqual(int(res[0]), 1) + tdSql.checkEqual(int(res[1]), 18) + query_sql = f'select avg(c3), sum(c3) from {tb_name} where t6 = 1.1 and t8 is not Null or t9 = true ' + res = tdSql.query(query_sql, True)[0] + tdSql.checkEqual(int(res[1]), 90) + query_sql = f'select avg(c3), sum(c3) from {tb_name} where t6 = 1.1 or t8 is not Null and t9 = true ' + res = tdSql.query(query_sql, True)[0] + tdSql.checkEqual(int(res[1]), 90) + + ## count avg sum condition_A or/and condition_B or/and condition_C interval + query_sql = f'select count(*), avg(c3), sum(c3) from {tb_name} where t10 = 4 or t8 is Null or t9 = true interval(16d)' + res = tdSql.query(query_sql, True) + tdSql.checkRows(2) + tdSql.checkEqual(int(res[0][1]), 25) + tdSql.checkEqual(int(res[0][2]), 2) + tdSql.checkEqual(int(res[0][3]), 60) + tdSql.checkEqual(int(res[1][1]), 30) + tdSql.checkEqual(int(res[1][2]), 1) + tdSql.checkEqual(int(res[1][3]), 30) + query_sql = f'select count(*), avg(c3), sum(c3) from {tb_name} where t6 = 1.1 and t8 is not Null and t9 = true interval(16d)' + res = tdSql.query(query_sql, True) + tdSql.checkRows(2) + tdSql.checkEqual(int(res[0][1]), 5) + tdSql.checkEqual(int(res[0][2]), 2) + tdSql.checkEqual(int(res[0][3]), 12) + tdSql.checkEqual(int(res[1][1]), 6) + tdSql.checkEqual(int(res[1][2]), 1) + tdSql.checkEqual(int(res[1][3]), 6) + + ## count avg sum condition_A or condition_B or in and like or condition_C interval + query_sql = f'select count(*), sum(c3) from {tb_name} where t10 = 4 or t8 is Null or t2 in (1, 2) and t7 like "binary_" or t1 <> 1 interval(16d)' + res = tdSql.query(query_sql, True) + tdSql.checkRows(2) + tdSql.checkEqual(int(res[0][1]), 25) + tdSql.checkEqual(int(res[0][2]), 60) + tdSql.checkEqual(int(res[1][1]), 30) + tdSql.checkEqual(int(res[1][2]), 30) + + def queryMultiTb(self, tb_name): + ## select from (condition_A or condition_B) + query_sql = f'select c10 from (select * from {tb_name} where c1 >1 or c2 >=3)' + res = tdSql.query(query_sql, True) + tdSql.checkRows(3) + tdSql.checkEqual(int(res[2][0]), 11) + + ## select from (condition_A or condition_B) where condition_A or condition_B + query_sql = f'select c10 from (select * from {tb_name} where c1 >1 or c2 >=3) where c1 =2 or c4 = 2' + res = tdSql.query(query_sql, True) + tdSql.checkRows(2) + tdSql.checkEqual(int(res[1][0]), 3) + + ## select from (condition_A or condition_B and like and in) where condition_A or condition_B or like and in + query_sql = f'select c10 from (select * from {tb_name} where c1 >1 or c2 = 2 and c7 like "binar_" and c4 in (3, 5)) where c1 != 2 or c3 = 1 or c8 like "ncha_" and c9 in (true)' + res = tdSql.query(query_sql, True) + tdSql.checkRows(7) + tdSql.checkEqual(int(res[6][0]), 10) + + ## select count avg sum from (condition_A or condition_B and like and in) where condition_A or condition_B or like and in interval + query_sql = f'select count(*), avg(c6), sum(c3) from (select * from {tb_name} where c1 >1 or c2 = 2 and c7 like "binar_" and c4 in (3, 5)) where c1 != 2 or c3 = 1 or c8 like "ncha_" and c9 in (true) interval(8d)' + res = tdSql.query(query_sql, True) + tdSql.checkRows(3) + tdSql.checkEqual(int(res[0][1]), 3) + tdSql.checkEqual(int(res[0][2]), 1) + tdSql.checkEqual(int(res[0][3]), 10) + tdSql.checkEqual(int(res[1][1]), 3) + tdSql.checkEqual(int(res[1][2]), 3) + tdSql.checkEqual(int(res[1][3]), 3) + tdSql.checkEqual(int(res[2][1]), 1) + tdSql.checkEqual(int(res[2][2]), 1) + tdSql.checkEqual(int(res[2][3]), 1) + + ## cname + query_sql = f'select c10 from (select * from {tb_name} where c1 >1 or c2 = 2 and c7 like "binar_" and c4 in (3, 5)) a where a.c1 != 2 or a.c3 = 1 or a.c8 like "ncha_" and a.c9 in (true)' + res = tdSql.query(query_sql, True) + tdSql.checkRows(7) + tdSql.checkEqual(int(res[6][0]), 10) + + ## multi cname + query_sql = f'select b.c10 from (select * from {tb_name} where c9 = true or c2 = 2) a, (select * from {tb_name} where c7 like "binar_" or c4 in (3, 5)) b where a.ts = b.ts' + res = tdSql.query(query_sql, True) + tdSql.checkRows(10) + tdSql.checkEqual(int(res[9][0]), 10) + + def queryMultiTbWithTag(self, tb_name): + ## select count avg sum from (condition_A or condition_B and like and in) where condition_A or condition_B or condition_tag_C or condition_tag_D or like and in interval + query_sql = f'select count(*), avg(c6), sum(c3) from (select * from {tb_name} where c1 >1 or c2 = 2 and c7 like "binar_" and c4 in (3, 5)) where c1 != 2 or c3 = 1 or t1=2 or t1=3 or c8 like "ncha_" and c9 in (true) interval(8d)' + res = tdSql.query(query_sql, True) + tdSql.checkRows(3) + tdSql.checkEqual(int(res[0][1]), 17) + tdSql.checkEqual(int(res[0][2]), 1) + tdSql.checkEqual(int(res[0][3]), 38) + tdSql.checkEqual(int(res[1][1]), 10) + tdSql.checkEqual(int(res[1][2]), 2) + tdSql.checkEqual(int(res[1][3]), 17) + tdSql.checkEqual(int(res[2][1]), 8) + tdSql.checkEqual(int(res[2][2]), 1) + tdSql.checkEqual(int(res[2][3]), 15) + + ## select count avg sum from (condition_A and condition_B and and line and in and ts and condition_tag_A and condition_tag_B and between) where condition_C orr condition_D or condition_tag_C or condition_tag_D or like and in interval + query_sql = f'select count(*), avg(c6), sum(c3) from (select * from {tb_name} where c1 >= 1 and c2 = 2 and c7 like "binar_" and c4 in (3, 5) and ts > "2021-01-11 12:00:00" and t1 < 2 and t1 > 0 and c6 between 0 and 7) where c1 != 2 or c3 = 1 or t1=2 or t1=3 or c8 like "ncha_" and c9 in (true) interval(8d)' + res = tdSql.query(query_sql, True) + tdSql.checkRows(2) + tdSql.checkEqual(int(res[0][1]), 2) + tdSql.checkEqual(int(res[0][2]), 1) + tdSql.checkEqual(int(res[0][3]), 2) + tdSql.checkEqual(int(res[1][1]), 1) + tdSql.checkEqual(int(res[1][2]), 1) + tdSql.checkEqual(int(res[1][3]), 1) + + def queryJoin(self, tb_name): + ## between tss tag + query_sql = f'select stb1.ts, stb2.ts, stb1.t1, stb1.c10 from {tb_name}1 stb1, {tb_name}2 stb2 where stb1.ts = stb2.ts and stb1.ts <= "2021-01-07 12:00:00" and stb2.ts < "2021-01-07 12:00:00" and stb1.t1 = stb2.t1' + res = tdSql.query(query_sql, True) + tdSql.checkRows(2) + tdSql.checkEqual(str(res[0][0]), "2021-01-01 12:00:00") + tdSql.checkEqual(str(res[1][1]), "2021-01-05 12:00:00") + ## between ts tag col + query_sql = f'select stb1.t1, stb2.t1, stb1.c1, stb2.c2 from {tb_name}1 stb1, {tb_name}2 stb2 where stb1.ts = stb2.ts and stb1.t1 = stb2.t1 and stb2.c2 <= 2 and stb1.c1 > 0' + res = tdSql.query(query_sql, True) + tdSql.checkRows(9) + ## between ts tags + query_sql = f'select stb1.t1, stb2.t1, stb1.c1, stb2.c2 from {tb_name}1 stb1, {tb_name}2 stb2 where stb1.ts = stb2.ts and stb1.t1 = stb2.t1 and stb1.t1 = 1 ' + res = tdSql.query(query_sql, True) + tdSql.checkRows(11) + ## between ts tag tbnames + query_sql = f'select stb1.t1, stb2.t1, stb1.c1, stb2.c2 from {tb_name}1 stb1, {tb_name}2 stb2 where stb1.ts = stb2.ts and stb1.t1 = stb2.t1 and stb1.tbname is not Null' + res = tdSql.query(query_sql, True) + tdSql.checkRows(11) + ## between ts col tag tbname + query_sql = f'select stb1.tbname, stb1.t1, stb2.t1, stb1.c1, stb2.c2 from {tb_name}1 stb1, {tb_name}2 stb2 where stb1.ts = stb2.ts and stb1.t1 = stb2.t1 and stb1.tbname is not Null and stb1.c2 = 3' + res = tdSql.query(query_sql, True) + tdSql.checkRows(2) + query_sql = f'select stb1.tbname, stb1.*, stb2.tbname, stb1.* from {tb_name}1 stb1, {tb_name}2 stb2 where stb1.ts = stb2.ts and stb1.t1 = stb2.t1 and (stb1.t2 != 1 or stb1.t3 <= 1) and (stb2.tbname like "{tb_name}%" or stb2.tbname is Null ) and stb1.tbname is not Null and stb2.c2 = 3' + res = tdSql.query(query_sql, True) + tdSql.checkRows(2) + + def checkColType(self, tb_name, check_elm): + self.queryTinyintCol(tb_name, check_elm) + self.queryUtinyintCol(tb_name, check_elm) + self.querySmallintCol(tb_name, check_elm) + self.queryUsmallintCol(tb_name, check_elm) + self.queryIntCol(tb_name, check_elm) + self.queryUintCol(tb_name, check_elm) + self.queryBigintCol(tb_name, check_elm) + self.queryUbigintCol(tb_name, check_elm) + self.queryFloatCol(tb_name, check_elm) + self.queryDoubleCol(tb_name, check_elm) + self.queryBinaryCol(tb_name, check_elm) + self.queryNcharCol(tb_name, check_elm) + self.queryBoolCol(tb_name, check_elm) + self.queryFullColType(tb_name, check_elm) + + def checkTbColTypeOperator(self, check_elm): + ''' + Ordinary table full column type and operator + ''' + tb_name = self.initTb() + self.checkColType(tb_name, check_elm) + + def checkStbColTypeOperator(self, check_elm): + ''' + Super table full column type and operator + ''' + stb_name = self.initStb(count=1) + tb_name = stb_name + "_sub_1" + if check_elm is None: + self.checkColType(tb_name, check_elm) + else: + self.checkColType(stb_name, check_elm) + + def checkStbTagTypeOperator(self): + ''' + Super table full tag type and operator + ''' + tb_name = self.initStb() + self.queryFullTagType(tb_name) + + def checkTbTsCol(self): + ''' + Ordinary table ts and col check + ''' + tb_name = self.initTb() + self.queryTsCol(tb_name) + + def checkStbTsTol(self): + tb_name = self.initStb() + self.queryTsCol(f'{tb_name}_sub_1') + + def checkStbTsTag(self): + tb_name = self.initStb() + self.queryTsTag(tb_name) + + def checkStbTsColTag(self): + tb_name = self.initStb() + self.queryTsColTag(tb_name) + + def checkTbMultiExpression(self): + ''' + Ordinary table multiExpression + ''' + tb_name = self.initTb() + self.queryColMultiExpression(tb_name) + + def checkStbMultiExpression(self): + ''' + Super table multiExpression + ''' + tb_name = self.initStb() + self.queryColMultiExpression(f'{tb_name}_sub_1') + self.queryTagMultiExpression(tb_name) + + def checkTbPreCal(self): + ''' + Ordinary table precal + ''' + tb_name = self.initTb() + self.queryColPreCal(tb_name) + + def checkStbPreCal(self): + ''' + Super table precal + ''' + tb_name = self.initStb() + self.queryColPreCal(f'{tb_name}_sub_1') + self.queryTagPreCal(tb_name) + + def checkMultiTb(self): + ''' + test "or" in multi ordinary table + ''' + tb_name = self.initTb() + self.queryMultiTb(tb_name) + + def checkMultiStb(self): + ''' + test "or" in multi super table + ''' + tb_name = self.initStb() + self.queryMultiTb(f'{tb_name}_sub_1') + + def checkMultiTbWithTag(self): + ''' + test Multi tb with tag + ''' + tb_name = self.initStb() + self.queryMultiTbWithTag(tb_name) + + def checkMultiStbJoin(self): + ''' + join test + ''' + tb_name = self.initTwoStb() + self.queryJoin(tb_name) + + def run(self): + tdSql.prepare() + column_name = random.choice(["c1", "c2", "c3", "c4", "c5", "c6", "c7", "c8", "c9", "c10", "c11", "c12", "c13"]) + for check_elm in [None, column_name]: + self.checkTbColTypeOperator(check_elm) + self.checkStbColTypeOperator(check_elm) + # self.checkStbTagTypeOperator() + # self.checkTbTsCol() + # self.checkStbTsTol() + # self.checkStbTsTag() + # self.checkStbTsColTag() + self.checkTbMultiExpression() + # self.checkStbMultiExpression() + # self.checkTbPreCal() + # self.checkStbPreCal() + # self.checkMultiTb() + # self.checkMultiStb() + # self.checkMultiTbWithTag() + # self.checkMultiStbJoin() + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/system-test/2-query/rtrim.py b/tests/system-test/2-query/rtrim.py new file mode 100644 index 0000000000..30624792cc --- /dev/null +++ b/tests/system-test/2-query/rtrim.py @@ -0,0 +1,267 @@ +from util.log import * +from util.sql import * +from util.cases import * +from util.dnodes import * + + +PRIMARY_COL = "ts" + +INT_COL = "c1" +BINT_COL = "c2" +SINT_COL = "c3" +TINT_COL = "c4" +FLOAT_COL = "c5" +DOUBLE_COL = "c6" +BOOL_COL = "c7" + +BINARY_COL = "c8" +NCHAR_COL = "c9" +TS_COL = "c10" + +NUM_COL = [ INT_COL, BINT_COL, SINT_COL, TINT_COL, FLOAT_COL, DOUBLE_COL, ] +CHAR_COL = [ BINARY_COL, NCHAR_COL, ] +BOOLEAN_COL = [ BOOL_COL, ] +TS_TYPE_COL = [ TS_COL, ] + + +class TDTestCase: + + def init(self, conn, logSql): + tdLog.debug(f"start to excute {__file__}") + tdSql.init(conn.cursor()) + + def __rtrim_condition(self): # sourcery skip: extract-method + rtrim_condition = [] + for char_col in CHAR_COL: + rtrim_condition.extend( + ( + char_col, + f"upper( {char_col} )", + ) + ) + rtrim_condition.extend( f"cast( {num_col} as binary(16) ) " for num_col in NUM_COL) + rtrim_condition.extend( f"cast( {char_col} + {num_col} as binary(16) ) " for num_col in NUM_COL ) + rtrim_condition.extend( f"concat( cast( {char_col} + {num_col} as binary(16) ), {char_col}) " for num_col in NUM_COL ) + rtrim_condition.extend( f"cast( {bool_col} as binary(16) )" for bool_col in BOOLEAN_COL ) + rtrim_condition.extend( f"cast( {char_col} + {bool_col} as binary(16) )" for bool_col in BOOLEAN_COL ) + rtrim_condition.extend( f"cast( {ts_col} as binary(16) )" for ts_col in TS_TYPE_COL ) + # rtrim_condition.extend( f"cast( {char_col} + {ts_col} as binary(16) )" for ts_col in TS_TYPE_COL ) + rtrim_condition.extend( f"cast( {char_col} + {char_col_2} as binary(16) ) " for char_col_2 in CHAR_COL ) + rtrim_condition.extend( f"concat( {char_col}, {char_col_2} ) " for char_col_2 in CHAR_COL ) + + for num_col in NUM_COL: + rtrim_condition.extend( f"cast( {num_col} + {bool_col} as binary(16) )" for bool_col in BOOLEAN_COL ) + rtrim_condition.extend( f"cast( {num_col} + {ts_col} as binary(16) )" for ts_col in TS_TYPE_COL if num_col is not FLOAT_COL and num_col is not DOUBLE_COL ) + + rtrim_condition.extend( f"cast( {bool_col} + {ts_col} as binary(16) )" for bool_col in BOOLEAN_COL for ts_col in TS_TYPE_COL ) + + rtrim_condition.append(''' " test1234!@#$%^&*() :'> 0 " + return "" + + def __group_condition(self, col, having = ""): + return f" group by {col} having {having}" if having else f" group by {col} " + + def __rtrim_check(self, tbname): + rtrim_condition = self.__rtrim_condition() + for condition in rtrim_condition: + where_condition = self.__where_condition(condition) + rtrim_group_having = self.__group_condition(condition, having=f"{condition} is not null " ) + rtrim_group_no_having= self.__group_condition(condition) + groups = ["", rtrim_group_having, rtrim_group_no_having] + + tdSql.query(f"select rtrim( {condition}) , {condition} from {tbname} ") + for j in range(tdSql.queryRows): + tdSql.checkData(j,0, tdSql.getData(j,1).rstrip()) if tdSql.getData(j,1) else tdSql.checkData(j, 0, None) + + [ tdSql.query(f"select rtrim({condition}) from {tbname} {where_condition} {group} ") for group in groups ] + + + def __rtrim_err_check(self,tbname): + sqls = [] + + for num_col in NUM_COL: + sqls.extend( + ( + f"select rtrim( {num_col} ) from {tbname} ", + f"select rtrim(ceil( {num_col} )) from {tbname} ", + f"select {num_col} from {tbname} group by rtrim( {num_col} ) ", + ) + ) + + sqls.extend( f"select rtrim( {char_col} , {num_col} ) from {tbname} " for char_col in CHAR_COL ) + sqls.extend( f"select rtrim( {num_col} , {ts_col} ) from {tbname} " for ts_col in TS_TYPE_COL ) + sqls.extend( f"select rtrim( {num_col} , {bool_col} ) from {tbname} " for bool_col in BOOLEAN_COL ) + + sqls.extend( f"select rtrim( {ts_col}+{bool_col} ) from {tbname} " for ts_col in TS_TYPE_COL for bool_col in BOOLEAN_COL ) + sqls.extend( f"select rtrim( {num_col}+{ts_col} ) from {tbname} " for num_col in NUM_COL for ts_col in TS_TYPE_COL) + sqls.extend( f"select rtrim( {num_col}+ {bool_col} ) from {tbname} " for num_col in NUM_COL for bool_col in BOOLEAN_COL) + sqls.extend( f"select rtrim( {num_col}+ {num_col} ) from {tbname} " for num_col in NUM_COL for num_col in NUM_COL) + sqls.extend( f"select rtrim( {ts_col}+{ts_col} ) from {tbname} " for ts_col in TS_TYPE_COL for ts_col in TS_TYPE_COL ) + sqls.extend( f"select rtrim( {bool_col}+ {bool_col} ) from {tbname} " for bool_col in BOOLEAN_COL for bool_col in BOOLEAN_COL ) + + sqls.extend( f"select rtrim( {char_col} + {char_col_2} ) from {tbname} " for char_col in CHAR_COL for char_col_2 in CHAR_COL ) + sqls.extend( f"select rtrim({num_col}, '1') from {tbname} " for num_col in NUM_COL ) + sqls.extend( f"select rtrim({ts_col}, '1') from {tbname} " for ts_col in TS_TYPE_COL ) + sqls.extend( f"select rtrim({bool_col}, '1') from {tbname} " for bool_col in BOOLEAN_COL ) + sqls.extend( f"select rtrim({char_col},'1') from {tbname} interval(2d) sliding(1d)" for char_col in CHAR_COL ) + sqls.extend( + ( + f"select rtrim() from {tbname} ", + f"select rtrim(*) from {tbname} ", + f"select rtrim(ccccccc) from {tbname} ", + f"select rtrim(111) from {tbname} ", + ) + ) + + return sqls + + def __test_current(self): # sourcery skip: use-itertools-product + tdLog.printNoPrefix("==========current sql condition check , must return query ok==========") + tbname = ["ct1", "ct2", "ct4", "t1", "stb1"] + for tb in tbname: + self.__rtrim_check(tb) + tdLog.printNoPrefix(f"==========current sql condition check in {tb} over==========") + + def __test_error(self): + tdLog.printNoPrefix("==========err sql condition check , must return error==========") + tbname = ["ct1", "ct2", "ct4", "t1", "stb1"] + + for tb in tbname: + for errsql in self.__rtrim_err_check(tb): + tdSql.error(sql=errsql) + tdLog.printNoPrefix(f"==========err sql condition check in {tb} over==========") + + + def all_test(self): + self.__test_current() + self.__test_error() + + + def __create_tb(self): + tdSql.prepare() + + tdLog.printNoPrefix("==========step1:create table") + create_stb_sql = f'''create table stb1( + ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint, + {FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool, + {BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp + ) tags (t1 int) + ''' + create_ntb_sql = f'''create table t1( + ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint, + {FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool, + {BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp + ) + ''' + tdSql.execute(create_stb_sql) + tdSql.execute(create_ntb_sql) + + for i in range(4): + tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )') + + def __insert_data(self, rows): + now_time = int(datetime.datetime.timestamp(datetime.datetime.now()) * 1000) + for i in range(rows): + tdSql.execute( + f"insert into ct1 values ( { now_time - i * 1000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )" + ) + tdSql.execute( + f"insert into ct4 values ( { now_time - i * 7776000000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )" + ) + tdSql.execute( + f"insert into ct2 values ( { now_time - i * 7776000000 }, {-i}, {-11111 * i}, {-111 * i % 32767 }, {-11 * i % 127}, {-1.11*i}, {-1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )" + ) + tdSql.execute( + f'''insert into ct1 values + ( { now_time - rows * 5 }, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar_测试_0', { now_time + 8 } ) + ( { now_time + 10000 }, { rows }, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar_测试_9', { now_time + 9 } ) + ''' + ) + + tdSql.execute( + f'''insert into ct4 values + ( { now_time - rows * 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( { now_time - rows * 3888000000 + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( { now_time + 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( + { now_time + 5184000000}, {pow(2,31)-pow(2,15)}, {pow(2,63)-pow(2,30)}, 32767, 127, + { 3.3 * pow(10,38) }, { 1.3 * pow(10,308) }, { rows % 2 }, "binary_limit-1", "nchar_测试_limit-1", { now_time - 86400000} + ) + ( + { now_time + 2592000000 }, {pow(2,31)-pow(2,16)}, {pow(2,63)-pow(2,31)}, 32766, 126, + { 3.2 * pow(10,38) }, { 1.2 * pow(10,308) }, { (rows-1) % 2 }, "binary_limit-2", "nchar_测试_limit-2", { now_time - 172800000} + ) + ''' + ) + + tdSql.execute( + f'''insert into ct2 values + ( { now_time - rows * 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( { now_time - rows * 3888000000 + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( { now_time + 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( + { now_time + 5184000000 }, { -1 * pow(2,31) + pow(2,15) }, { -1 * pow(2,63) + pow(2,30) }, -32766, -126, + { -1 * 3.2 * pow(10,38) }, { -1.2 * pow(10,308) }, { rows % 2 }, "binary_limit-1", "nchar_测试_limit-1", { now_time - 86400000 } + ) + ( + { now_time + 2592000000 }, { -1 * pow(2,31) + pow(2,16) }, { -1 * pow(2,63) + pow(2,31) }, -32767, -127, + { - 3.3 * pow(10,38) }, { -1.3 * pow(10,308) }, { (rows-1) % 2 }, "binary_limit-2", "nchar_测试_limit-2", { now_time - 172800000 } + ) + ''' + ) + + for i in range(rows): + insert_data = f'''insert into t1 values + ( { now_time - i * 3600000 }, {i}, {i * 11111}, { i % 32767 }, { i % 127}, { i * 1.11111 }, { i * 1000.1111 }, { i % 2}, + "binary_{i}", "nchar_测试_{i}", { now_time - 1000 * i } ) + ''' + tdSql.execute(insert_data) + tdSql.execute( + f'''insert into t1 values + ( { now_time + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( { now_time - (( rows // 2 ) * 60 + 30) * 60000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( { now_time - rows * 3600000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( { now_time + 7200000 }, { pow(2,31) - pow(2,15) }, { pow(2,63) - pow(2,30) }, 32767, 127, + { 3.3 * pow(10,38) }, { 1.3 * pow(10,308) }, { rows % 2 }, + "binary_limit-1", "nchar_测试_limit-1", { now_time - 86400000 } + ) + ( + { now_time + 3600000 } , { pow(2,31) - pow(2,16) }, { pow(2,63) - pow(2,31) }, 32766, 126, + { 3.2 * pow(10,38) }, { 1.2 * pow(10,308) }, { (rows-1) % 2 }, + "binary_limit-2", "nchar_测试_limit-2", { now_time - 172800000 } + ) + ''' + ) + + def run(self): + tdSql.prepare() + + tdLog.printNoPrefix("==========step1:create table") + self.__create_tb() + + tdLog.printNoPrefix("==========step2:insert data") + self.rows = 10 + self.__insert_data(self.rows) + + tdLog.printNoPrefix("==========step3:all check") + self.all_test() + + tdDnodes.stop(1) + tdDnodes.start(1) + + tdSql.execute("use db") + + tdLog.printNoPrefix("==========step4:after wal, all check again ") + self.all_test() + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) diff --git a/tests/system-test/2-query/substr.py b/tests/system-test/2-query/substr.py new file mode 100644 index 0000000000..e78606826b --- /dev/null +++ b/tests/system-test/2-query/substr.py @@ -0,0 +1,271 @@ +from util.log import * +from util.sql import * +from util.cases import * +from util.dnodes import * + + +PRIMARY_COL = "ts" + +INT_COL = "c1" +BINT_COL = "c2" +SINT_COL = "c3" +TINT_COL = "c4" +FLOAT_COL = "c5" +DOUBLE_COL = "c6" +BOOL_COL = "c7" + +BINARY_COL = "c8" +NCHAR_COL = "c9" +TS_COL = "c10" + +NUM_COL = [ INT_COL, BINT_COL, SINT_COL, TINT_COL, FLOAT_COL, DOUBLE_COL, ] +CHAR_COL = [ BINARY_COL, NCHAR_COL, ] +BOOLEAN_COL = [ BOOL_COL, ] +TS_TYPE_COL = [ TS_COL, ] + + +class TDTestCase: + + def init(self, conn, logSql): + tdLog.debug(f"start to excute {__file__}") + tdSql.init(conn.cursor()) + + def __substr_condition(self): # sourcery skip: extract-method + substr_condition = [] + for char_col in CHAR_COL: + substr_condition.extend( + ( + char_col, + f"upper( {char_col} )", + ) + ) + substr_condition.extend( f"cast( {num_col} as binary(16) ) " for num_col in NUM_COL) + substr_condition.extend( f"cast( {char_col} + {num_col} as binary(16) ) " for num_col in NUM_COL ) + substr_condition.extend( f"concat( cast( {char_col} + {num_col} as binary(16) ), {char_col}) " for num_col in NUM_COL ) + substr_condition.extend( f"cast( {bool_col} as binary(16) )" for bool_col in BOOLEAN_COL ) + substr_condition.extend( f"cast( {char_col} + {bool_col} as binary(16) )" for bool_col in BOOLEAN_COL ) + substr_condition.extend( f"cast( {ts_col} as binary(16) )" for ts_col in TS_TYPE_COL ) + # substr_condition.extend( f"cast( {char_col} + {ts_col} as binary(16) )" for ts_col in TS_TYPE_COL ) + substr_condition.extend( f"cast( {char_col} + {char_col_2} as binary(16) ) " for char_col_2 in CHAR_COL ) + substr_condition.extend( f"concat( {char_col}, {char_col_2} ) " for char_col_2 in CHAR_COL ) + + for num_col in NUM_COL: + substr_condition.extend( f"cast( {num_col} + {bool_col} as binary(16) )" for bool_col in BOOLEAN_COL ) + substr_condition.extend( f"cast( {num_col} + {ts_col} as binary(16) )" for ts_col in TS_TYPE_COL if num_col is not FLOAT_COL and num_col is not DOUBLE_COL) + + substr_condition.extend( f"cast( {bool_col} + {ts_col} as binary(16) )" for bool_col in BOOLEAN_COL for ts_col in TS_TYPE_COL ) + + substr_condition.append(''' " test1234!@#$%^&*() :'> 0 " + return "" + + def __group_condition(self, col, having = ""): + return f" group by {col} having {having}" if having else f" group by {col} " + + def __substr_check(self, tbname,pos, lens=None): + substr_condition = self.__substr_condition() + for condition in substr_condition: + where_condition = self.__where_condition(condition) + substr_group_having = self.__group_condition(condition, having=f"{condition} is not null " ) + substr_group_no_having= self.__group_condition(condition) + groups = ["", substr_group_having, substr_group_no_having] + + if pos < 1: + tdSql.error(f"select substr( {condition}, {pos}, {lens}) , {condition} from {tbname} ") + + tdSql.query(f"select substr( {condition}, {pos}, {lens}) , {condition} from {tbname} ") + for j in range(tdSql.queryRows): + tdSql.checkData(j,0, tdSql.getData(j,1)[pos-1:lens]) if tdSql.getData(j,1) else tdSql.checkData(j, 0, None) + + [ tdSql.query(f"select substr({condition}, {pos}, {lens}) from {tbname} {where_condition} {group} ") for group in groups ] + + + def __substr_err_check(self,tbname): + sqls = [] + + for num_col in NUM_COL: + sqls.extend( + ( + f"select substr( {num_col} ) from {tbname} ", + f"select substr(ceil( {num_col} )) from {tbname} ", + f"select {num_col} from {tbname} group by substr( {num_col} ) ", + ) + ) + + sqls.extend( f"select substr( {char_col} + {num_col} ) from {tbname} " for char_col in CHAR_COL ) + sqls.extend( f"select substr( {num_col} , {ts_col} ) from {tbname} " for ts_col in TS_TYPE_COL ) + sqls.extend( f"select substr( {num_col} , {bool_col} ) from {tbname} " for bool_col in BOOLEAN_COL ) + + sqls.extend( f"select substr( {ts_col}+{bool_col} ) from {tbname} " for ts_col in TS_TYPE_COL for bool_col in BOOLEAN_COL ) + sqls.extend( f"select substr( {num_col}+{ts_col} ) from {tbname} " for num_col in NUM_COL for ts_col in TS_TYPE_COL) + sqls.extend( f"select substr( {num_col}+ {bool_col} ) from {tbname} " for num_col in NUM_COL for bool_col in BOOLEAN_COL) + sqls.extend( f"select substr( {num_col}+ {num_col} ) from {tbname} " for num_col in NUM_COL for num_col in NUM_COL) + sqls.extend( f"select substr( {ts_col}+{ts_col} ) from {tbname} " for ts_col in TS_TYPE_COL for ts_col in TS_TYPE_COL ) + sqls.extend( f"select substr( {bool_col}+ {bool_col} ) from {tbname} " for bool_col in BOOLEAN_COL for bool_col in BOOLEAN_COL ) + + sqls.extend( f"select substr( {char_col} + {char_col_2} ) from {tbname} " for char_col in CHAR_COL for char_col_2 in CHAR_COL ) + sqls.extend( f"select substr({num_col}, '1') from {tbname} " for num_col in NUM_COL ) + sqls.extend( f"select substr({ts_col}, '1') from {tbname} " for ts_col in TS_TYPE_COL ) + sqls.extend( f"select substr({bool_col}, '1') from {tbname} " for bool_col in BOOLEAN_COL ) + sqls.extend( f"select substr({char_col},'1') from {tbname} interval(2d) sliding(1d)" for char_col in CHAR_COL ) + sqls.extend( + ( + f"select substr() from {tbname} ", + f"select substr(*) from {tbname} ", + f"select substr(ccccccc) from {tbname} ", + f"select substr(111) from {tbname} ", + ) + ) + + return sqls + + def __test_current(self): # sourcery skip: use-itertools-product + tdLog.printNoPrefix("==========current sql condition check , must return query ok==========") + tbname = ["ct1", "ct2", "ct4", "t1", "stb1"] + for tb in tbname: + self.__substr_check(tb, 1, 6) + tdLog.printNoPrefix(f"==========current sql condition check in {tb} over==========") + + def __test_error(self): + tdLog.printNoPrefix("==========err sql condition check , must return error==========") + tbname = ["ct1", "ct2", "ct4", "t1", "stb1"] + + for tb in tbname: + for errsql in self.__substr_err_check(tb): + tdSql.error(sql=errsql) + self.__substr_check(tb, 0, 6) + tdLog.printNoPrefix(f"==========err sql condition check in {tb} over==========") + + + def all_test(self): + self.__test_current() + self.__test_error() + + + def __create_tb(self): + tdSql.prepare() + + tdLog.printNoPrefix("==========step1:create table") + create_stb_sql = f'''create table stb1( + ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint, + {FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool, + {BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp + ) tags (t1 int) + ''' + create_ntb_sql = f'''create table t1( + ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint, + {FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool, + {BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp + ) + ''' + tdSql.execute(create_stb_sql) + tdSql.execute(create_ntb_sql) + + for i in range(4): + tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )') + + def __insert_data(self, rows): + now_time = int(datetime.datetime.timestamp(datetime.datetime.now()) * 1000) + for i in range(rows): + tdSql.execute( + f"insert into ct1 values ( { now_time - i * 1000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )" + ) + tdSql.execute( + f"insert into ct4 values ( { now_time - i * 7776000000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )" + ) + tdSql.execute( + f"insert into ct2 values ( { now_time - i * 7776000000 }, {-i}, {-11111 * i}, {-111 * i % 32767 }, {-11 * i % 127}, {-1.11*i}, {-1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )" + ) + tdSql.execute( + f'''insert into ct1 values + ( { now_time - rows * 5 }, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar_测试_0', { now_time + 8 } ) + ( { now_time + 10000 }, { rows }, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar_测试_9', { now_time + 9 } ) + ''' + ) + + tdSql.execute( + f'''insert into ct4 values + ( { now_time - rows * 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( { now_time - rows * 3888000000 + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( { now_time + 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( + { now_time + 5184000000}, {pow(2,31)-pow(2,15)}, {pow(2,63)-pow(2,30)}, 32767, 127, + { 3.3 * pow(10,38) }, { 1.3 * pow(10,308) }, { rows % 2 }, "binary_limit-1", "nchar_测试_limit-1", { now_time - 86400000} + ) + ( + { now_time + 2592000000 }, {pow(2,31)-pow(2,16)}, {pow(2,63)-pow(2,31)}, 32766, 126, + { 3.2 * pow(10,38) }, { 1.2 * pow(10,308) }, { (rows-1) % 2 }, "binary_limit-2", "nchar_测试_limit-2", { now_time - 172800000} + ) + ''' + ) + + tdSql.execute( + f'''insert into ct2 values + ( { now_time - rows * 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( { now_time - rows * 3888000000 + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( { now_time + 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( + { now_time + 5184000000 }, { -1 * pow(2,31) + pow(2,15) }, { -1 * pow(2,63) + pow(2,30) }, -32766, -126, + { -1 * 3.2 * pow(10,38) }, { -1.2 * pow(10,308) }, { rows % 2 }, "binary_limit-1", "nchar_测试_limit-1", { now_time - 86400000 } + ) + ( + { now_time + 2592000000 }, { -1 * pow(2,31) + pow(2,16) }, { -1 * pow(2,63) + pow(2,31) }, -32767, -127, + { - 3.3 * pow(10,38) }, { -1.3 * pow(10,308) }, { (rows-1) % 2 }, "binary_limit-2", "nchar_测试_limit-2", { now_time - 172800000 } + ) + ''' + ) + + for i in range(rows): + insert_data = f'''insert into t1 values + ( { now_time - i * 3600000 }, {i}, {i * 11111}, { i % 32767 }, { i % 127}, { i * 1.11111 }, { i * 1000.1111 }, { i % 2}, + "binary_{i}", "nchar_测试_{i}", { now_time - 1000 * i } ) + ''' + tdSql.execute(insert_data) + tdSql.execute( + f'''insert into t1 values + ( { now_time + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( { now_time - (( rows // 2 ) * 60 + 30) * 60000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( { now_time - rows * 3600000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( { now_time + 7200000 }, { pow(2,31) - pow(2,15) }, { pow(2,63) - pow(2,30) }, 32767, 127, + { 3.3 * pow(10,38) }, { 1.3 * pow(10,308) }, { rows % 2 }, + "binary_limit-1", "nchar_测试_limit-1", { now_time - 86400000 } + ) + ( + { now_time + 3600000 } , { pow(2,31) - pow(2,16) }, { pow(2,63) - pow(2,31) }, 32766, 126, + { 3.2 * pow(10,38) }, { 1.2 * pow(10,308) }, { (rows-1) % 2 }, + "binary_limit-2", "nchar_测试_limit-2", { now_time - 172800000 } + ) + ''' + ) + + def run(self): + tdSql.prepare() + + tdLog.printNoPrefix("==========step1:create table") + self.__create_tb() + + tdLog.printNoPrefix("==========step2:insert data") + self.rows = 10 + self.__insert_data(self.rows) + + tdLog.printNoPrefix("==========step3:all check") + self.all_test() + + tdDnodes.stop(1) + tdDnodes.start(1) + + tdSql.execute("use db") + + tdLog.printNoPrefix("==========step4:after wal, all check again ") + self.all_test() + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) diff --git a/tests/system-test/2-query/upper.py b/tests/system-test/2-query/upper.py index 3c3fddfb45..bb485161dd 100644 --- a/tests/system-test/2-query/upper.py +++ b/tests/system-test/2-query/upper.py @@ -59,12 +59,9 @@ class TDTestCase: groups = ["", group_having, group_no_having] for group_condition in groups: - tdSql.query(f"select {condition} from {tbname} {where_condition} {group_condition} ") - datas = [tdSql.getData(i,0) for i in range(tdSql.queryRows)] - upper_data = [ str(data).upper() if data else None for data in datas ] - tdSql.query(f"select upper( {condition} ) from {tbname} {where_condition} {group_condition}") - for i in range(len(upper_data)): - tdSql.checkData(i, 0, upper_data[i] ) if upper_data[i] else tdSql.checkData(i, 0, None) + tdSql.query(f"select upper( {condition} ), {condition} from {tbname} {where_condition} {group_condition}") + for i in range(tdSql.queryRows): + tdSql.checkData(i, 0, str(tdSql.getData(i, 1)).upper() ) if tdSql.getData(i, 1) else tdSql.checkData(i, 0, None) def __upper_err_check(self,tbname): sqls = [] @@ -229,13 +226,13 @@ class TDTestCase: tdLog.printNoPrefix("==========step3:all check") self.all_test() - # tdDnodes.stop(1) - # tdDnodes.start(1) + tdDnodes.stop(1) + tdDnodes.start(1) - # tdSql.execute("use db") + tdSql.execute("use db") - # tdLog.printNoPrefix("==========step4:after wal, all check again ") - # self.all_test() + tdLog.printNoPrefix("==========step4:after wal, all check again ") + self.all_test() def stop(self): tdSql.close() diff --git a/tests/system-test/fulltest.sh b/tests/system-test/fulltest.sh index cb9d472116..f713f707cb 100755 --- a/tests/system-test/fulltest.sh +++ b/tests/system-test/fulltest.sh @@ -6,11 +6,20 @@ python3 ./test.py -f 0-others/taosShell.py python3 ./test.py -f 0-others/taosShellError.py python3 ./test.py -f 0-others/taosShellNetChk.py python3 ./test.py -f 0-others/telemetry.py - +python3 ./test.py -f 0-others/taosdMonitor.py #python3 ./test.py -f 2-query/between.py python3 ./test.py -f 2-query/distinct.py python3 ./test.py -f 2-query/varchar.py +python3 ./test.py -f 2-query/ltrim.py +python3 ./test.py -f 2-query/rtrim.py +python3 ./test.py -f 2-query/length.py +python3 ./test.py -f 2-query/char_length.py +python3 ./test.py -f 2-query/upper.py +python3 ./test.py -f 2-query/lower.py +python3 ./test.py -f 2-query/join.py +# python3 ./test.py -f 2-query/concat.py # after wal ,crash occured +# python3 ./test.py -f 2-query/concat_ws.py python3 ./test.py -f 2-query/timezone.py python3 ./test.py -f 2-query/Now.py @@ -23,8 +32,7 @@ python3 ./test.py -f 2-query/last.py python3 ./test.py -f 2-query/To_unixtimestamp.py python3 ./test.py -f 2-query/timetruncate.py -python3 ./test.py -f 2-query/Timediff.py -# python3 ./test.py -f 2-query/diff.py +# python3 ./test.py -f 2-query/Timediff.py #python3 ./test.py -f 2-query/cast.py