diff --git a/cmake/bdb_CMakeLists.txt.in.bak b/cmake/bdb_CMakeLists.txt.in
similarity index 100%
rename from cmake/bdb_CMakeLists.txt.in.bak
rename to cmake/bdb_CMakeLists.txt.in
diff --git a/cmake/cmake.define b/cmake/cmake.define
index 0eb5206aab..4e27ff5f47 100644
--- a/cmake/cmake.define
+++ b/cmake/cmake.define
@@ -14,25 +14,6 @@ MESSAGE(STATUS "Project binary files output path: " ${PROJECT_BINARY_DIR})
MESSAGE(STATUS "Project executable files output path: " ${EXECUTABLE_OUTPUT_PATH})
MESSAGE(STATUS "Project library files output path: " ${LIBRARY_OUTPUT_PATH})
-find_package(Git QUIET)
-if(GIT_FOUND AND EXISTS "${TD_SOURCE_DIR}/.git")
-# Update submodules as needed
- option(GIT_SUBMODULE "Check submodules during build" ON)
- if(GIT_SUBMODULE)
- message(STATUS "Submodule update")
- execute_process(COMMAND cd ${TD_SOURCE_DIR} && ${GIT_EXECUTABLE} submodule update --init --recursive
- WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}
- RESULT_VARIABLE GIT_SUBMOD_RESULT)
- if(NOT GIT_SUBMOD_RESULT EQUAL "0")
- message(WARNING "git submodule update --init --recursive failed with ${GIT_SUBMOD_RESULT}, please checkout submodules")
- endif()
- endif()
-endif()
-
-if(NOT EXISTS "${TD_SOURCE_DIR}/tools/taos-tools/CMakeLists.txt")
- message(WARNING "The submodules were not downloaded! GIT_SUBMODULE was turned off or failed. Please update submodules manually if you need build them.")
-endif()
-
if (NOT DEFINED TD_GRANT)
SET(TD_GRANT FALSE)
endif()
diff --git a/cmake/cmake.options b/cmake/cmake.options
index a60f5c7282..d83ab49fd5 100644
--- a/cmake/cmake.options
+++ b/cmake/cmake.options
@@ -78,6 +78,12 @@ option(
OFF
)
+option(
+ BUILD_WITH_BDB
+ "If build with BDB"
+ OFF
+)
+
option(
BUILD_WITH_LUCENE
"If build with lucene"
diff --git a/contrib/CMakeLists.txt b/contrib/CMakeLists.txt
index 14a85ee4f6..97bfcfb8c0 100644
--- a/contrib/CMakeLists.txt
+++ b/contrib/CMakeLists.txt
@@ -78,9 +78,9 @@ if(${BUILD_WITH_UV})
endif(${BUILD_WITH_UV})
# bdb
-#if(${BUILD_WITH_BDB})
- #cat("${TD_SUPPORT_DIR}/bdb_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
-#endif(${BUILD_WITH_BDB})
+if(${BUILD_WITH_BDB})
+ cat("${TD_SUPPORT_DIR}/bdb_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
+endif(${BUILD_WITH_BDB})
# sqlite
if(${BUILD_WITH_SQLITE})
diff --git a/contrib/test/CMakeLists.txt b/contrib/test/CMakeLists.txt
index 740488b39b..eacaeb9524 100644
--- a/contrib/test/CMakeLists.txt
+++ b/contrib/test/CMakeLists.txt
@@ -7,9 +7,9 @@ if(${BUILD_WITH_LUCENE})
add_subdirectory(lucene)
endif(${BUILD_WITH_LUCENE})
-#if(${BUILD_WITH_BDB})
- #add_subdirectory(bdb)
-#endif(${BUILD_WITH_BDB})
+if(${BUILD_WITH_BDB})
+ add_subdirectory(bdb)
+endif(${BUILD_WITH_BDB})
if(${BUILD_WITH_SQLITE})
add_subdirectory(sqlite)
diff --git a/include/common/taosdef.h b/include/common/taosdef.h
index e1f8832edf..72d2c142d2 100644
--- a/include/common/taosdef.h
+++ b/include/common/taosdef.h
@@ -37,7 +37,8 @@ typedef enum {
TSDB_STREAM_TABLE = 4, // table created from stream computing
TSDB_TEMP_TABLE = 5, // temp table created by nest query
TSDB_SYSTEM_TABLE = 6,
- TSDB_TABLE_MAX = 7
+ TSDB_TSMA_TABLE = 7, // time-range-wise sma
+ TSDB_TABLE_MAX = 8
} ETableType;
typedef enum {
diff --git a/include/common/tmsg.h b/include/common/tmsg.h
index ae21986c56..544af9b6ee 100644
--- a/include/common/tmsg.h
+++ b/include/common/tmsg.h
@@ -1670,6 +1670,7 @@ typedef struct SVDropStbReq {
int32_t tEncodeSVDropStbReq(SEncoder* pCoder, const SVDropStbReq* pReq);
int32_t tDecodeSVDropStbReq(SDecoder* pCoder, SVDropStbReq* pReq);
+// TDMT_VND_CREATE_TABLE ==============
#define TD_CREATE_IF_NOT_EXISTS 0x1
typedef struct SVCreateTbReq {
int32_t flags;
@@ -1759,6 +1760,43 @@ typedef struct {
int32_t tEncodeSVDropTbBatchRsp(SEncoder* pCoder, const SVDropTbBatchRsp* pRsp);
int32_t tDecodeSVDropTbBatchRsp(SDecoder* pCoder, SVDropTbBatchRsp* pRsp);
+// TDMT_VND_ALTER_TABLE =====================
+typedef struct {
+ const char* tbName;
+ int8_t action;
+ const char* colName;
+ // TSDB_ALTER_TABLE_ADD_COLUMN
+ int8_t type;
+ int8_t flags;
+ int32_t bytes;
+ // TSDB_ALTER_TABLE_DROP_COLUMN
+ // TSDB_ALTER_TABLE_UPDATE_COLUMN_BYTES
+ int32_t colModBytes;
+ // TSDB_ALTER_TABLE_UPDATE_COLUMN_NAME
+ const char* colNewName;
+ // TSDB_ALTER_TABLE_UPDATE_TAG_VAL
+ const char* tagName;
+ int8_t isNull;
+ uint32_t nTagVal;
+ const uint8_t* pTagVal;
+ // TSDB_ALTER_TABLE_UPDATE_OPTIONS
+ int8_t updateTTL;
+ int32_t newTTL;
+ int8_t updateComment;
+ const char* newComment;
+} SVAlterTbReq;
+
+int32_t tEncodeSVAlterTbReq(SEncoder* pEncoder, const SVAlterTbReq* pReq);
+int32_t tDecodeSVAlterTbReq(SDecoder* pDecoder, SVAlterTbReq* pReq);
+
+typedef struct {
+ int32_t code;
+} SVAlterTbRsp;
+
+int32_t tEncodeSVAlterTbRsp(SEncoder* pEncoder, const SVAlterTbRsp* pRsp);
+int32_t tDecodeSVAlterTbRsp(SDecoder* pDecoder, SVAlterTbRsp* pRsp);
+// ======================
+
typedef struct {
SMsgHead head;
int64_t uid;
@@ -2160,26 +2198,23 @@ int32_t tSerializeSMDropSmaReq(void* buf, int32_t bufLen, SMDropSmaReq* pReq);
int32_t tDeserializeSMDropSmaReq(void* buf, int32_t bufLen, SMDropSmaReq* pReq);
typedef struct {
- int8_t version; // for compatibility(default 0)
- int8_t intervalUnit; // MACRO: TIME_UNIT_XXX
- int8_t slidingUnit; // MACRO: TIME_UNIT_XXX
- int8_t timezoneInt; // sma data expired if timezone changes.
- char indexName[TSDB_INDEX_NAME_LEN];
- int32_t exprLen;
- int32_t tagsFilterLen;
- int64_t indexUid;
- tb_uid_t tableUid; // super/child/common table uid
- int64_t interval;
- int64_t offset; // use unit by precision of DB
- int64_t sliding;
- char* expr; // sma expression
- char* tagsFilter;
+ int8_t version; // for compatibility(default 0)
+ int8_t intervalUnit; // MACRO: TIME_UNIT_XXX
+ int8_t slidingUnit; // MACRO: TIME_UNIT_XXX
+ int8_t timezoneInt; // sma data expired if timezone changes.
+ char indexName[TSDB_INDEX_NAME_LEN];
+ int32_t exprLen;
+ int32_t tagsFilterLen;
+ int64_t indexUid;
+ tb_uid_t tableUid; // super/child/common table uid
+ int64_t interval;
+ int64_t offset; // use unit by precision of DB
+ int64_t sliding;
+ const char* expr; // sma expression
+ const char* tagsFilter;
} STSma; // Time-range-wise SMA
-typedef struct {
- int64_t ver; // use a general definition
- STSma tSma;
-} SVCreateTSmaReq;
+typedef STSma SVCreateTSmaReq;
typedef struct {
int8_t type; // 0 status report, 1 update data
@@ -2188,7 +2223,6 @@ typedef struct {
} STSmaMsg;
typedef struct {
- int64_t ver; // use a general definition
int64_t indexUid;
char indexName[TSDB_INDEX_NAME_LEN];
} SVDropTSmaReq;
@@ -2197,28 +2231,21 @@ typedef struct {
int tmp; // TODO: to avoid compile error
} SVCreateTSmaRsp, SVDropTSmaRsp;
+#if 0
int32_t tSerializeSVCreateTSmaReq(void** buf, SVCreateTSmaReq* pReq);
void* tDeserializeSVCreateTSmaReq(void* buf, SVCreateTSmaReq* pReq);
int32_t tSerializeSVDropTSmaReq(void** buf, SVDropTSmaReq* pReq);
void* tDeserializeSVDropTSmaReq(void* buf, SVDropTSmaReq* pReq);
+#endif
-// RSma: Rollup SMA
-typedef struct {
- int64_t interval;
- int32_t retention; // unit: day
- uint16_t days; // unit: day
- int8_t intervalUnit;
-} SSmaParams;
+int32_t tEncodeSVCreateTSmaReq(SEncoder* pCoder, const SVCreateTSmaReq* pReq);
+int32_t tDecodeSVCreateTSmaReq(SDecoder* pCoder, SVCreateTSmaReq* pReq);
+int32_t tEncodeSVDropTSmaReq(SEncoder* pCoder, const SVDropTSmaReq* pReq);
+int32_t tDecodeSVDropTSmaReq(SDecoder* pCoder, SVDropTSmaReq* pReq);
typedef struct {
- STSma tsma;
- float xFilesFactor;
- SArray* smaParams; // SSmaParams
-} SRSma;
-
-typedef struct {
- uint32_t number;
- STSma* tSma;
+ int32_t number;
+ STSma* tSma;
} STSmaWrapper;
static FORCE_INLINE void tdDestroyTSma(STSma* pSma) {
@@ -2245,96 +2272,26 @@ static FORCE_INLINE void* tdFreeTSmaWrapper(STSmaWrapper* pSW) {
return NULL;
}
-static FORCE_INLINE int32_t tEncodeTSma(void** buf, const STSma* pSma) {
- int32_t tlen = 0;
+int32_t tEncodeSVCreateTSmaReq(SEncoder* pCoder, const SVCreateTSmaReq* pReq);
+int32_t tDecodeSVCreateTSmaReq(SDecoder* pCoder, SVCreateTSmaReq* pReq);
- tlen += taosEncodeFixedI8(buf, pSma->version);
- tlen += taosEncodeFixedI8(buf, pSma->intervalUnit);
- tlen += taosEncodeFixedI8(buf, pSma->slidingUnit);
- tlen += taosEncodeFixedI8(buf, pSma->timezoneInt);
- tlen += taosEncodeString(buf, pSma->indexName);
- tlen += taosEncodeFixedI32(buf, pSma->exprLen);
- tlen += taosEncodeFixedI32(buf, pSma->tagsFilterLen);
- tlen += taosEncodeFixedI64(buf, pSma->indexUid);
- tlen += taosEncodeFixedI64(buf, pSma->tableUid);
- tlen += taosEncodeFixedI64(buf, pSma->interval);
- tlen += taosEncodeFixedI64(buf, pSma->offset);
- tlen += taosEncodeFixedI64(buf, pSma->sliding);
+int32_t tEncodeTSma(SEncoder* pCoder, const STSma* pSma);
+int32_t tDecodeTSma(SDecoder* pCoder, STSma* pSma);
- if (pSma->exprLen > 0) {
- tlen += taosEncodeString(buf, pSma->expr);
+static int32_t tEncodeTSmaWrapper(SEncoder* pEncoder, const STSmaWrapper* pReq) {
+ if (tEncodeI32(pEncoder, pReq->number) < 0) return -1;
+ for (int32_t i = 0; i < pReq->number; ++i) {
+ tEncodeTSma(pEncoder, pReq->tSma + i);
}
-
- if (pSma->tagsFilterLen > 0) {
- tlen += taosEncodeString(buf, pSma->tagsFilter);
- }
-
- return tlen;
+ return 0;
}
-static FORCE_INLINE int32_t tEncodeTSmaWrapper(void** buf, const STSmaWrapper* pSW) {
- int32_t tlen = 0;
-
- tlen += taosEncodeFixedU32(buf, pSW->number);
- for (uint32_t i = 0; i < pSW->number; ++i) {
- tlen += tEncodeTSma(buf, pSW->tSma + i);
+static int32_t tDecodeTSmaWrapper(SDecoder* pDecoder, STSmaWrapper* pReq) {
+ if (tDecodeI32(pDecoder, &pReq->number) < 0) return -1;
+ for (int32_t i = 0; i < pReq->number; ++i) {
+ tDecodeTSma(pDecoder, pReq->tSma + i);
}
- return tlen;
-}
-
-static FORCE_INLINE void* tDecodeTSma(void* buf, STSma* pSma) {
- buf = taosDecodeFixedI8(buf, &pSma->version);
- buf = taosDecodeFixedI8(buf, &pSma->intervalUnit);
- buf = taosDecodeFixedI8(buf, &pSma->slidingUnit);
- buf = taosDecodeFixedI8(buf, &pSma->timezoneInt);
- buf = taosDecodeStringTo(buf, pSma->indexName);
- buf = taosDecodeFixedI32(buf, &pSma->exprLen);
- buf = taosDecodeFixedI32(buf, &pSma->tagsFilterLen);
- buf = taosDecodeFixedI64(buf, &pSma->indexUid);
- buf = taosDecodeFixedI64(buf, &pSma->tableUid);
- buf = taosDecodeFixedI64(buf, &pSma->interval);
- buf = taosDecodeFixedI64(buf, &pSma->offset);
- buf = taosDecodeFixedI64(buf, &pSma->sliding);
-
- if (pSma->exprLen > 0) {
- if ((buf = taosDecodeString(buf, &pSma->expr)) == NULL) {
- tdDestroyTSma(pSma);
- return NULL;
- }
- } else {
- pSma->expr = NULL;
- }
-
- if (pSma->tagsFilterLen > 0) {
- if ((buf = taosDecodeString(buf, &pSma->tagsFilter)) == NULL) {
- tdDestroyTSma(pSma);
- return NULL;
- }
- } else {
- pSma->tagsFilter = NULL;
- }
-
- return buf;
-}
-
-static FORCE_INLINE void* tDecodeTSmaWrapper(void* buf, STSmaWrapper* pSW) {
- buf = taosDecodeFixedU32(buf, &pSW->number);
-
- pSW->tSma = (STSma*)taosMemoryCalloc(pSW->number, sizeof(STSma));
- if (pSW->tSma == NULL) {
- return NULL;
- }
-
- for (uint32_t i = 0; i < pSW->number; ++i) {
- if ((buf = tDecodeTSma(buf, pSW->tSma + i)) == NULL) {
- for (uint32_t j = i; j >= 0; --i) {
- tdDestroyTSma(pSW->tSma + j);
- }
- taosMemoryFree(pSW->tSma);
- return NULL;
- }
- }
- return buf;
+ return 0;
}
typedef struct {
@@ -2574,6 +2531,14 @@ static FORCE_INLINE void tDeleteSMqAskEpRsp(SMqAskEpRsp* pRsp) {
taosArrayDestroyEx(pRsp->topics, (void (*)(void*))tDeleteSMqSubTopicEp);
}
+typedef struct {
+ void* data;
+} SStreamDispatchReq;
+
+typedef struct {
+ int8_t status;
+} SStreamDispatchRsp;
+
#define TD_AUTO_CREATE_TABLE 0x1
typedef struct {
int64_t suid;
diff --git a/include/common/ttime.h b/include/common/ttime.h
index 3de0b98d85..cd704bb1f7 100644
--- a/include/common/ttime.h
+++ b/include/common/ttime.h
@@ -59,10 +59,11 @@ static FORCE_INLINE int64_t taosGetTimestamp(int32_t precision) {
* precision == TSDB_TIME_PRECISION_NANO, it returns timestamp in nanosecond.
*/
static FORCE_INLINE int64_t taosGetTimestampToday(int32_t precision) {
- int64_t factor = (precision == TSDB_TIME_PRECISION_MILLI) ? 1000 :
- (precision == TSDB_TIME_PRECISION_MICRO) ? 1000000 : 1000000000;
- time_t t = taosTime(NULL);
- struct tm * tm= taosLocalTime(&t, NULL);
+ int64_t factor = (precision == TSDB_TIME_PRECISION_MILLI) ? 1000
+ : (precision == TSDB_TIME_PRECISION_MICRO) ? 1000000
+ : 1000000000;
+ time_t t = taosTime(NULL);
+ struct tm* tm = taosLocalTime(&t, NULL);
tm->tm_hour = 0;
tm->tm_min = 0;
tm->tm_sec = 0;
@@ -79,13 +80,13 @@ int32_t parseNatualDuration(const char* token, int32_t tokenLen, int64_t* durati
int32_t taosParseTime(const char* timestr, int64_t* time, int32_t len, int32_t timePrec, int8_t dayligth);
void deltaToUtcInitOnce();
-char getPrecisionUnit(int32_t precision);
+char getPrecisionUnit(int32_t precision);
int64_t convertTimePrecision(int64_t time, int32_t fromPrecision, int32_t toPrecision);
int64_t convertTimeFromPrecisionToUnit(int64_t time, int32_t fromPrecision, char toUnit);
-int32_t convertStringToTimestamp(int16_t type, char *inputData, int64_t timePrec, int64_t *timeVal);
+int32_t convertStringToTimestamp(int16_t type, char* inputData, int64_t timePrec, int64_t* timeVal);
-void taosFormatUtcTime(char *buf, int32_t bufLen, int64_t time, int32_t precision);
+void taosFormatUtcTime(char* buf, int32_t bufLen, int64_t time, int32_t precision);
#ifdef __cplusplus
}
diff --git a/include/libs/function/function.h b/include/libs/function/function.h
index 8d0b93dde2..616aec8c02 100644
--- a/include/libs/function/function.h
+++ b/include/libs/function/function.h
@@ -309,7 +309,7 @@ void qAddUdfInfo(uint64_t id, struct SUdfInfo* pUdfInfo);
void qRemoveUdfInfo(uint64_t id, struct SUdfInfo* pUdfInfo);
/**
- * create udfd proxy, called once in process that call setupUdf/callUdfxxx/teardownUdf
+ * create udfd proxy, called once in process that call doSetupUdf/callUdfxxx/doTeardownUdf
* @return error code
*/
int32_t udfcOpen();
diff --git a/include/libs/function/tudf.h b/include/libs/function/tudf.h
index b5c38e14f4..6a98138c6c 100644
--- a/include/libs/function/tudf.h
+++ b/include/libs/function/tudf.h
@@ -39,16 +39,6 @@ extern "C" {
//======================================================================================
//begin API to taosd and qworker
-typedef void *UdfcFuncHandle;
-
-/**
- * setup udf
- * @param udf, in
- * @param handle, out
- * @return error code
- */
-int32_t setupUdf(char udfName[], UdfcFuncHandle *handle);
-
typedef struct SUdfColumnMeta {
int16_t type;
int32_t bytes;
@@ -95,32 +85,44 @@ typedef struct SUdfInterBuf {
char* buf;
int8_t numOfResult; //zero or one
} SUdfInterBuf;
+typedef void *UdfcFuncHandle;
+/**
+ * setup udf
+ * @param udf, in
+ * @param funcHandle, out
+ * @return error code
+ */
+int32_t doSetupUdf(char udfName[], UdfcFuncHandle *funcHandle);
// output: interBuf
-int32_t callUdfAggInit(UdfcFuncHandle handle, SUdfInterBuf *interBuf);
+int32_t doCallUdfAggInit(UdfcFuncHandle handle, SUdfInterBuf *interBuf);
// input: block, state
// output: newState
-int32_t callUdfAggProcess(UdfcFuncHandle handle, SSDataBlock *block, SUdfInterBuf *state, SUdfInterBuf *newState);
+int32_t doCallUdfAggProcess(UdfcFuncHandle handle, SSDataBlock *block, SUdfInterBuf *state, SUdfInterBuf *newState);
// input: interBuf
// output: resultData
-int32_t callUdfAggFinalize(UdfcFuncHandle handle, SUdfInterBuf *interBuf, SUdfInterBuf *resultData);
+int32_t doCallUdfAggFinalize(UdfcFuncHandle handle, SUdfInterBuf *interBuf, SUdfInterBuf *resultData);
// input: interbuf1, interbuf2
// output: resultBuf
-int32_t callUdfAggMerge(UdfcFuncHandle handle, SUdfInterBuf *interBuf1, SUdfInterBuf *interBuf2, SUdfInterBuf *resultBuf);
+int32_t doCallUdfAggMerge(UdfcFuncHandle handle, SUdfInterBuf *interBuf1, SUdfInterBuf *interBuf2, SUdfInterBuf *resultBuf);
// input: block
// output: resultData
-int32_t callUdfScalarFunc(UdfcFuncHandle handle, SScalarParam *input, int32_t numOfCols, SScalarParam *output);
+int32_t doCallUdfScalarFunc(UdfcFuncHandle handle, SScalarParam *input, int32_t numOfCols, SScalarParam *output);
/**
* tearn down udf
* @param handle
* @return
*/
-int32_t teardownUdf(UdfcFuncHandle handle);
+int32_t doTeardownUdf(UdfcFuncHandle handle);
bool udfAggGetEnv(struct SFunctionNode* pFunc, SFuncExecEnv* pEnv);
bool udfAggInit(struct SqlFunctionCtx *pCtx, struct SResultRowEntryInfo* pResultCellInfo);
int32_t udfAggProcess(struct SqlFunctionCtx *pCtx);
int32_t udfAggFinalize(struct SqlFunctionCtx *pCtx, SSDataBlock* pBlock);
+
+int32_t callUdfScalarFunc(char *udfName, SScalarParam *input, int32_t numOfCols, SScalarParam *output);
+
+int32_t cleanUpUdfs();
// end API to taosd and qworker
//=============================================================================================================================
// begin API to UDF writer.
diff --git a/include/libs/qcom/query.h b/include/libs/qcom/query.h
index c390f67153..711db65e97 100644
--- a/include/libs/qcom/query.h
+++ b/include/libs/qcom/query.h
@@ -51,14 +51,12 @@ typedef struct STableComInfo {
} STableComInfo;
typedef struct SIndexMeta {
-
#ifdef WINDOWS
size_t avoidCompilationErrors;
#endif
} SIndexMeta;
-
/*
* ASSERT(sizeof(SCTableMeta) == 24)
* ASSERT(tableType == TSDB_CHILD_TABLE)
@@ -95,7 +93,7 @@ typedef struct SDBVgInfo {
int32_t vgVersion;
int8_t hashMethod;
int32_t numOfTable; // DB's table num, unit is TSDB_TABLE_NUM_UNIT
- SHashObj *vgHash; //key:vgId, value:SVgroupInfo
+ SHashObj* vgHash; // key:vgId, value:SVgroupInfo
} SDBVgInfo;
typedef struct SUseDbOutput {
@@ -135,7 +133,7 @@ typedef struct SMsgSendInfo {
} SMsgSendInfo;
typedef struct SQueryNodeStat {
- int32_t tableNum; // vg table number, unit is TSDB_TABLE_NUM_UNIT
+ int32_t tableNum; // vg table number, unit is TSDB_TABLE_NUM_UNIT
} SQueryNodeStat;
int32_t initTaskQueue();
@@ -172,7 +170,7 @@ const SSchema* tGetTbnameColumnSchema();
bool tIsValidSchema(struct SSchema* pSchema, int32_t numOfCols, int32_t numOfTags);
int32_t queryCreateTableMetaFromMsg(STableMetaRsp* msg, bool isSuperTable, STableMeta** pMeta);
-char *jobTaskStatusStr(int32_t status);
+char* jobTaskStatusStr(int32_t status);
SSchema createSchema(int8_t type, int32_t bytes, col_id_t colId, const char* name);
@@ -184,62 +182,87 @@ extern int32_t (*queryProcessMsgRsp[TDMT_MAX])(void* output, char* msg, int32_t
#define SET_META_TYPE_TABLE(t) (t) = META_TYPE_TABLE
#define SET_META_TYPE_BOTH_TABLE(t) (t) = META_TYPE_BOTH_TABLE
-#define NEED_CLIENT_RM_TBLMETA_ERROR(_code) ((_code) == TSDB_CODE_PAR_TABLE_NOT_EXIST || (_code) == TSDB_CODE_VND_TB_NOT_EXIST)
-#define NEED_CLIENT_REFRESH_VG_ERROR(_code) ((_code) == TSDB_CODE_VND_HASH_MISMATCH || (_code) == TSDB_CODE_VND_INVALID_VGROUP_ID)
+#define NEED_CLIENT_RM_TBLMETA_ERROR(_code) \
+ ((_code) == TSDB_CODE_PAR_TABLE_NOT_EXIST || (_code) == TSDB_CODE_VND_TB_NOT_EXIST)
+#define NEED_CLIENT_REFRESH_VG_ERROR(_code) \
+ ((_code) == TSDB_CODE_VND_HASH_MISMATCH || (_code) == TSDB_CODE_VND_INVALID_VGROUP_ID)
#define NEED_CLIENT_REFRESH_TBLMETA_ERROR(_code) ((_code) == TSDB_CODE_TDB_TABLE_RECREATED)
-#define NEED_CLIENT_HANDLE_ERROR(_code) (NEED_CLIENT_RM_TBLMETA_ERROR(_code) || NEED_CLIENT_REFRESH_VG_ERROR(_code) || NEED_CLIENT_REFRESH_TBLMETA_ERROR(_code))
+#define NEED_CLIENT_HANDLE_ERROR(_code) \
+ (NEED_CLIENT_RM_TBLMETA_ERROR(_code) || NEED_CLIENT_REFRESH_VG_ERROR(_code) || \
+ NEED_CLIENT_REFRESH_TBLMETA_ERROR(_code))
-#define NEED_SCHEDULER_RETRY_ERROR(_code) ((_code) == TSDB_CODE_RPC_REDIRECT || (_code) == TSDB_CODE_RPC_NETWORK_UNAVAIL)
+#define NEED_SCHEDULER_RETRY_ERROR(_code) \
+ ((_code) == TSDB_CODE_RPC_REDIRECT || (_code) == TSDB_CODE_RPC_NETWORK_UNAVAIL)
#define REQUEST_MAX_TRY_TIMES 5
-#define qFatal(...) \
- do { \
- if (qDebugFlag & DEBUG_FATAL) { \
- taosPrintLog("QRY FATAL ", DEBUG_FATAL, qDebugFlag, __VA_ARGS__); \
- } \
+#define qFatal(...) \
+ do { \
+ if (qDebugFlag & DEBUG_FATAL) { \
+ taosPrintLog("QRY FATAL ", DEBUG_FATAL, tsLogEmbedded ? 255 : qDebugFlag, __VA_ARGS__); \
+ } \
} while (0)
-#define qError(...) \
- do { \
- if (qDebugFlag & DEBUG_ERROR) { \
- taosPrintLog("QRY ERROR ", DEBUG_ERROR, qDebugFlag, __VA_ARGS__); \
- } \
+#define qError(...) \
+ do { \
+ if (qDebugFlag & DEBUG_ERROR) { \
+ taosPrintLog("QRY ERROR ", DEBUG_ERROR, tsLogEmbedded ? 255 : qDebugFlag, __VA_ARGS__); \
+ } \
} while (0)
-#define qWarn(...) \
- do { \
- if (qDebugFlag & DEBUG_WARN) { \
- taosPrintLog("QRY WARN ", DEBUG_WARN, qDebugFlag, __VA_ARGS__); \
- } \
+#define qWarn(...) \
+ do { \
+ if (qDebugFlag & DEBUG_WARN) { \
+ taosPrintLog("QRY WARN ", DEBUG_WARN, tsLogEmbedded ? 255 : qDebugFlag, __VA_ARGS__); \
+ } \
} while (0)
-#define qInfo(...) \
- do { \
- if (qDebugFlag & DEBUG_INFO) { \
- taosPrintLog("QRY ", DEBUG_INFO, qDebugFlag, __VA_ARGS__); \
- } \
+#define qInfo(...) \
+ do { \
+ if (qDebugFlag & DEBUG_INFO) { \
+ taosPrintLog("QRY ", DEBUG_INFO, tsLogEmbedded ? 255 : qDebugFlag, __VA_ARGS__); \
+ } \
} while (0)
-#define qDebug(...) \
- do { \
- if (qDebugFlag & DEBUG_DEBUG) { \
- taosPrintLog("QRY ", DEBUG_DEBUG, qDebugFlag, __VA_ARGS__); \
- } \
+#define qDebug(...) \
+ do { \
+ if (qDebugFlag & DEBUG_DEBUG) { \
+ taosPrintLog("QRY ", DEBUG_DEBUG, tsLogEmbedded ? 255 : qDebugFlag, __VA_ARGS__); \
+ } \
} while (0)
-#define qTrace(...) \
- do { \
- if (qDebugFlag & DEBUG_TRACE) { \
- taosPrintLog("QRY ", DEBUG_TRACE, qDebugFlag, __VA_ARGS__); \
- } \
+#define qTrace(...) \
+ do { \
+ if (qDebugFlag & DEBUG_TRACE) { \
+ taosPrintLog("QRY ", DEBUG_TRACE, tsLogEmbedded ? 255 : qDebugFlag, __VA_ARGS__); \
+ } \
} while (0)
-#define qDebugL(...) \
- do { \
- if (qDebugFlag & DEBUG_DEBUG) { \
- taosPrintLongString("QRY ", DEBUG_DEBUG, qDebugFlag, __VA_ARGS__); \
- } \
+#define qDebugL(...) \
+ do { \
+ if (qDebugFlag & DEBUG_DEBUG) { \
+ taosPrintLongString("QRY ", DEBUG_DEBUG, tsLogEmbedded ? 255 : qDebugFlag, __VA_ARGS__); \
+ } \
} while (0)
-#define QRY_ERR_RET(c) do { int32_t _code = c; if (_code != TSDB_CODE_SUCCESS) { terrno = _code; return _code; } } while (0)
-#define QRY_RET(c) do { int32_t _code = c; if (_code != TSDB_CODE_SUCCESS) { terrno = _code; } return _code; } while (0)
-#define QRY_ERR_JRET(c) do { code = c; if (code != TSDB_CODE_SUCCESS) { terrno = code; goto _return; } } while (0)
-
+#define QRY_ERR_RET(c) \
+ do { \
+ int32_t _code = c; \
+ if (_code != TSDB_CODE_SUCCESS) { \
+ terrno = _code; \
+ return _code; \
+ } \
+ } while (0)
+#define QRY_RET(c) \
+ do { \
+ int32_t _code = c; \
+ if (_code != TSDB_CODE_SUCCESS) { \
+ terrno = _code; \
+ } \
+ return _code; \
+ } while (0)
+#define QRY_ERR_JRET(c) \
+ do { \
+ code = c; \
+ if (code != TSDB_CODE_SUCCESS) { \
+ terrno = code; \
+ goto _return; \
+ } \
+ } while (0)
#ifdef __cplusplus
}
diff --git a/include/libs/stream/tstream.h b/include/libs/stream/tstream.h
index 56e6a39ce8..4460327b88 100644
--- a/include/libs/stream/tstream.h
+++ b/include/libs/stream/tstream.h
@@ -13,6 +13,7 @@
* along with this program. If not, see .
*/
+#include "os.h"
#include "tdatablock.h"
#include "tmsg.h"
#include "tmsgcb.h"
@@ -29,8 +30,23 @@ extern "C" {
typedef struct SStreamTask SStreamTask;
enum {
- STREAM_TASK_STATUS__RUNNING = 1,
- STREAM_TASK_STATUS__STOP,
+ TASK_STATUS__IDLE = 1,
+ TASK_STATUS__EXECUTING,
+ TASK_STATUS__CLOSING,
+};
+
+enum {
+ TASK_INPUT_STATUS__NORMAL = 1,
+ TASK_INPUT_STATUS__BLOCKED,
+ TASK_INPUT_STATUS__RECOVER,
+ TASK_INPUT_STATUS__STOP,
+ TASK_INPUT_STATUS__FAILED,
+};
+
+enum {
+ TASK_OUTPUT_STATUS__NORMAL = 1,
+ TASK_OUTPUT_STATUS__WAIT,
+ TASK_OUTPUT_STATUS__BLOCKED,
};
enum {
@@ -38,10 +54,64 @@ enum {
STREAM_CREATED_BY__SMA,
};
+enum {
+ STREAM_INPUT__DATA_SUBMIT = 1,
+ STREAM_INPUT__DATA_BLOCK,
+ STREAM_INPUT__CHECKPOINT,
+};
+
typedef struct {
- int32_t nodeId; // 0 for snode
- SEpSet epSet;
-} SStreamTaskEp;
+ int8_t type;
+
+ int32_t sourceVg;
+ int64_t sourceVer;
+
+ int32_t* dataRef;
+ SSubmitReq* data;
+} SStreamDataSubmit;
+
+typedef struct {
+ int8_t type;
+
+ int32_t sourceVg;
+ int64_t sourceVer;
+
+ SArray* blocks; // SArray
+} SStreamDataBlock;
+
+typedef struct {
+ int8_t type;
+} SStreamCheckpoint;
+
+static FORCE_INLINE SStreamDataSubmit* streamDataSubmitNew(SSubmitReq* pReq) {
+ SStreamDataSubmit* pDataSubmit = (SStreamDataSubmit*)taosMemoryCalloc(1, sizeof(SStreamDataSubmit));
+ if (pDataSubmit == NULL) return NULL;
+ pDataSubmit->data = pReq;
+ pDataSubmit->dataRef = (int32_t*)taosMemoryMalloc(sizeof(int32_t));
+ if (pDataSubmit->data == NULL) goto FAIL;
+ *pDataSubmit->dataRef = 1;
+ return pDataSubmit;
+FAIL:
+ taosMemoryFree(pDataSubmit);
+ return NULL;
+}
+
+static FORCE_INLINE void streamDataSubmitRefInc(SStreamDataSubmit* pDataSubmit) {
+ //
+ atomic_add_fetch_32(pDataSubmit->dataRef, 1);
+}
+
+static FORCE_INLINE void streamDataSubmitRefDec(SStreamDataSubmit* pDataSubmit) {
+ int32_t ref = atomic_sub_fetch_32(pDataSubmit->dataRef, 1);
+ ASSERT(ref >= 0);
+ if (ref == 0) {
+ taosMemoryFree(pDataSubmit->data);
+ taosMemoryFree(pDataSubmit->dataRef);
+ }
+}
+
+int32_t streamDataBlockEncode(void** buf, const SStreamDataBlock* pOutput);
+void* streamDataBlockDecode(const void* buf, SStreamDataBlock* pInput);
typedef struct {
void* inputHandle;
@@ -122,9 +192,15 @@ enum {
TASK_SINK__FETCH,
};
+enum {
+ TASK_INPUT_TYPE__SUMBIT_BLOCK = 1,
+ TASK_INPUT_TYPE__DATA_BLOCK,
+};
+
struct SStreamTask {
int64_t streamId;
int32_t taskId;
+ int8_t inputType;
int8_t status;
int8_t sourceType;
@@ -155,9 +231,13 @@ struct SStreamTask {
STaskDispatcherShuffle shuffleDispatcher;
};
- // msg buffer
- int32_t memUsed;
+ int8_t inputStatus;
+ int8_t outputStatus;
+
STaosQueue* inputQ;
+ STaosQall* inputQAll;
+ STaosQueue* outputQ;
+ STaosQall* outputQAll;
// application storage
void* ahandle;
@@ -199,10 +279,16 @@ typedef struct {
SArray* res; // SArray
} SStreamSinkReq;
-int32_t streamEnqueueData(SStreamTask* pTask, const void* input, int32_t inputType);
+int32_t streamEnqueueDataSubmit(SStreamTask* pTask, SStreamDataSubmit* input);
+int32_t streamEnqueueDataBlk(SStreamTask* pTask, SStreamDataBlock* input);
+int32_t streamDequeueOutput(SStreamTask* pTask, void** output);
int32_t streamExecTask(SStreamTask* pTask, SMsgCb* pMsgCb, const void* input, int32_t inputType, int32_t workId);
+int32_t streamTaskRun(SStreamTask* pTask);
+
+int32_t streamTaskHandleInput(SStreamTask* pTask, void* data);
+
#ifdef __cplusplus
}
#endif
diff --git a/include/libs/sync/sync.h b/include/libs/sync/sync.h
index ca10465ed4..551e0fc7b8 100644
--- a/include/libs/sync/sync.h
+++ b/include/libs/sync/sync.h
@@ -144,6 +144,7 @@ int32_t syncInit();
void syncCleanUp();
int64_t syncOpen(const SSyncInfo* pSyncInfo);
void syncStart(int64_t rid);
+void syncStartStandBy(int64_t rid);
void syncStop(int64_t rid);
int32_t syncReconfig(int64_t rid, const SSyncCfg* pSyncCfg);
ESyncState syncGetMyRole(int64_t rid);
diff --git a/include/os/osAtomic.h b/include/os/osAtomic.h
index e2a122a0fe..8600992d68 100644
--- a/include/os/osAtomic.h
+++ b/include/os/osAtomic.h
@@ -23,92 +23,92 @@ extern "C" {
// If the error is in a third-party library, place this header file under the third-party library header file.
// When you want to use this feature, you should find or add the same function in the following section.
#ifndef ALLOW_FORBID_FUNC
- #define __atomic_load_n __ATOMIC_LOAD_N_FUNC_TAOS_FORBID
- #define __atomic_store_n __ATOMIC_STORE_N_FUNC_TAOS_FORBID
- #define __atomic_exchange_n __ATOMIC_EXCHANGE_N_FUNC_TAOS_FORBID
- #define __sync_val_compare_and_swap __SYNC_VAL_COMPARE_AND_SWAP_FUNC_TAOS_FORBID
- #define __atomic_add_fetch __ATOMIC_ADD_FETCH_FUNC_TAOS_FORBID
- #define __atomic_fetch_add __ATOMIC_FETCH_ADD_FUNC_TAOS_FORBID
- #define __atomic_sub_fetch __ATOMIC_SUB_FETCH_FUNC_TAOS_FORBID
- #define __atomic_fetch_sub __ATOMIC_FETCH_SUB_FUNC_TAOS_FORBID
- #define __atomic_and_fetch __ATOMIC_AND_FETCH_FUNC_TAOS_FORBID
- #define __atomic_fetch_and __ATOMIC_FETCH_AND_FUNC_TAOS_FORBID
- #define __atomic_or_fetch __ATOMIC_OR_FETCH_FUNC_TAOS_FORBID
- #define __atomic_fetch_or __ATOMIC_FETCH_OR_FUNC_TAOS_FORBID
- #define __atomic_xor_fetch __ATOMIC_XOR_FETCH_FUNC_TAOS_FORBID
- #define __atomic_fetch_xor __ATOMIC_FETCH_XOR_FUNC_TAOS_FORBID
+#define __atomic_load_n __ATOMIC_LOAD_N_FUNC_TAOS_FORBID
+#define __atomic_store_n __ATOMIC_STORE_N_FUNC_TAOS_FORBID
+#define __atomic_exchange_n __ATOMIC_EXCHANGE_N_FUNC_TAOS_FORBID
+#define __sync_val_compare_and_swap __SYNC_VAL_COMPARE_AND_SWAP_FUNC_TAOS_FORBID
+#define __atomic_add_fetch __ATOMIC_ADD_FETCH_FUNC_TAOS_FORBID
+#define __atomic_fetch_add __ATOMIC_FETCH_ADD_FUNC_TAOS_FORBID
+#define __atomic_sub_fetch __ATOMIC_SUB_FETCH_FUNC_TAOS_FORBID
+#define __atomic_fetch_sub __ATOMIC_FETCH_SUB_FUNC_TAOS_FORBID
+#define __atomic_and_fetch __ATOMIC_AND_FETCH_FUNC_TAOS_FORBID
+#define __atomic_fetch_and __ATOMIC_FETCH_AND_FUNC_TAOS_FORBID
+#define __atomic_or_fetch __ATOMIC_OR_FETCH_FUNC_TAOS_FORBID
+#define __atomic_fetch_or __ATOMIC_FETCH_OR_FUNC_TAOS_FORBID
+#define __atomic_xor_fetch __ATOMIC_XOR_FETCH_FUNC_TAOS_FORBID
+#define __atomic_fetch_xor __ATOMIC_FETCH_XOR_FUNC_TAOS_FORBID
#endif
-int8_t atomic_load_8(int8_t volatile *ptr);
+int8_t atomic_load_8(int8_t volatile *ptr);
int16_t atomic_load_16(int16_t volatile *ptr);
int32_t atomic_load_32(int32_t volatile *ptr);
int64_t atomic_load_64(int64_t volatile *ptr);
-void* atomic_load_ptr(void *ptr);
-void atomic_store_8(int8_t volatile *ptr, int8_t val);
-void atomic_store_16(int16_t volatile *ptr, int16_t val);
-void atomic_store_32(int32_t volatile *ptr, int32_t val);
-void atomic_store_64(int64_t volatile *ptr, int64_t val);
-void atomic_store_ptr(void *ptr, void *val);
-int8_t atomic_exchange_8(int8_t volatile *ptr, int8_t val);
+void *atomic_load_ptr(void *ptr);
+void atomic_store_8(int8_t volatile *ptr, int8_t val);
+void atomic_store_16(int16_t volatile *ptr, int16_t val);
+void atomic_store_32(int32_t volatile *ptr, int32_t val);
+void atomic_store_64(int64_t volatile *ptr, int64_t val);
+void atomic_store_ptr(void *ptr, void *val);
+int8_t atomic_exchange_8(int8_t volatile *ptr, int8_t val);
int16_t atomic_exchange_16(int16_t volatile *ptr, int16_t val);
int32_t atomic_exchange_32(int32_t volatile *ptr, int32_t val);
int64_t atomic_exchange_64(int64_t volatile *ptr, int64_t val);
-void* atomic_exchange_ptr(void *ptr, void *val);
-int8_t atomic_val_compare_exchange_8(int8_t volatile *ptr, int8_t oldval, int8_t newval);
+void *atomic_exchange_ptr(void *ptr, void *val);
+int8_t atomic_val_compare_exchange_8(int8_t volatile *ptr, int8_t oldval, int8_t newval);
int16_t atomic_val_compare_exchange_16(int16_t volatile *ptr, int16_t oldval, int16_t newval);
int32_t atomic_val_compare_exchange_32(int32_t volatile *ptr, int32_t oldval, int32_t newval);
int64_t atomic_val_compare_exchange_64(int64_t volatile *ptr, int64_t oldval, int64_t newval);
-void* atomic_val_compare_exchange_ptr(void *ptr, void *oldval, void *newval);
-int8_t atomic_add_fetch_8(int8_t volatile *ptr, int8_t val);
+void *atomic_val_compare_exchange_ptr(void *ptr, void *oldval, void *newval);
+int8_t atomic_add_fetch_8(int8_t volatile *ptr, int8_t val);
int16_t atomic_add_fetch_16(int16_t volatile *ptr, int16_t val);
int32_t atomic_add_fetch_32(int32_t volatile *ptr, int32_t val);
int64_t atomic_add_fetch_64(int64_t volatile *ptr, int64_t val);
-void* atomic_add_fetch_ptr(void *ptr, void *val);
-int8_t atomic_fetch_add_8(int8_t volatile *ptr, int8_t val);
+void *atomic_add_fetch_ptr(void *ptr, void *val);
+int8_t atomic_fetch_add_8(int8_t volatile *ptr, int8_t val);
int16_t atomic_fetch_add_16(int16_t volatile *ptr, int16_t val);
int32_t atomic_fetch_add_32(int32_t volatile *ptr, int32_t val);
int64_t atomic_fetch_add_64(int64_t volatile *ptr, int64_t val);
-void* atomic_fetch_add_ptr(void *ptr, void *val);
-int8_t atomic_sub_fetch_8(int8_t volatile *ptr, int8_t val);
+void *atomic_fetch_add_ptr(void *ptr, void *val);
+int8_t atomic_sub_fetch_8(int8_t volatile *ptr, int8_t val);
int16_t atomic_sub_fetch_16(int16_t volatile *ptr, int16_t val);
int32_t atomic_sub_fetch_32(int32_t volatile *ptr, int32_t val);
int64_t atomic_sub_fetch_64(int64_t volatile *ptr, int64_t val);
-void* atomic_sub_fetch_ptr(void *ptr, void *val);
-int8_t atomic_fetch_sub_8(int8_t volatile *ptr, int8_t val);
+void *atomic_sub_fetch_ptr(void *ptr, void *val);
+int8_t atomic_fetch_sub_8(int8_t volatile *ptr, int8_t val);
int16_t atomic_fetch_sub_16(int16_t volatile *ptr, int16_t val);
int32_t atomic_fetch_sub_32(int32_t volatile *ptr, int32_t val);
int64_t atomic_fetch_sub_64(int64_t volatile *ptr, int64_t val);
-void* atomic_fetch_sub_ptr(void *ptr, void *val);
-int8_t atomic_and_fetch_8(int8_t volatile *ptr, int8_t val);
+void *atomic_fetch_sub_ptr(void *ptr, void *val);
+int8_t atomic_and_fetch_8(int8_t volatile *ptr, int8_t val);
int16_t atomic_and_fetch_16(int16_t volatile *ptr, int16_t val);
int32_t atomic_and_fetch_32(int32_t volatile *ptr, int32_t val);
int64_t atomic_and_fetch_64(int64_t volatile *ptr, int64_t val);
-void* atomic_and_fetch_ptr(void *ptr, void *val);
-int8_t atomic_fetch_and_8(int8_t volatile *ptr, int8_t val);
+void *atomic_and_fetch_ptr(void *ptr, void *val);
+int8_t atomic_fetch_and_8(int8_t volatile *ptr, int8_t val);
int16_t atomic_fetch_and_16(int16_t volatile *ptr, int16_t val);
int32_t atomic_fetch_and_32(int32_t volatile *ptr, int32_t val);
int64_t atomic_fetch_and_64(int64_t volatile *ptr, int64_t val);
-void* atomic_fetch_and_ptr(void *ptr, void *val);
-int8_t atomic_or_fetch_8(int8_t volatile *ptr, int8_t val);
+void *atomic_fetch_and_ptr(void *ptr, void *val);
+int8_t atomic_or_fetch_8(int8_t volatile *ptr, int8_t val);
int16_t atomic_or_fetch_16(int16_t volatile *ptr, int16_t val);
int32_t atomic_or_fetch_32(int32_t volatile *ptr, int32_t val);
int64_t atomic_or_fetch_64(int64_t volatile *ptr, int64_t val);
-void* atomic_or_fetch_ptr(void *ptr, void *val);
-int8_t atomic_fetch_or_8(int8_t volatile *ptr, int8_t val);
+void *atomic_or_fetch_ptr(void *ptr, void *val);
+int8_t atomic_fetch_or_8(int8_t volatile *ptr, int8_t val);
int16_t atomic_fetch_or_16(int16_t volatile *ptr, int16_t val);
int32_t atomic_fetch_or_32(int32_t volatile *ptr, int32_t val);
int64_t atomic_fetch_or_64(int64_t volatile *ptr, int64_t val);
-void* atomic_fetch_or_ptr(void *ptr, void *val);
-int8_t atomic_xor_fetch_8(int8_t volatile *ptr, int8_t val);
+void *atomic_fetch_or_ptr(void *ptr, void *val);
+int8_t atomic_xor_fetch_8(int8_t volatile *ptr, int8_t val);
int16_t atomic_xor_fetch_16(int16_t volatile *ptr, int16_t val);
int32_t atomic_xor_fetch_32(int32_t volatile *ptr, int32_t val);
int64_t atomic_xor_fetch_64(int64_t volatile *ptr, int64_t val);
-void* atomic_xor_fetch_ptr(void *ptr, void *val);
-int8_t atomic_fetch_xor_8(int8_t volatile *ptr, int8_t val);
+void *atomic_xor_fetch_ptr(void *ptr, void *val);
+int8_t atomic_fetch_xor_8(int8_t volatile *ptr, int8_t val);
int16_t atomic_fetch_xor_16(int16_t volatile *ptr, int16_t val);
int32_t atomic_fetch_xor_32(int32_t volatile *ptr, int32_t val);
int64_t atomic_fetch_xor_64(int64_t volatile *ptr, int64_t val);
-void* atomic_fetch_xor_ptr(void *ptr, void *val);
+void *atomic_fetch_xor_ptr(void *ptr, void *val);
#ifdef __cplusplus
}
diff --git a/include/util/taoserror.h b/include/util/taoserror.h
index b73d39090b..cc10348d24 100644
--- a/include/util/taoserror.h
+++ b/include/util/taoserror.h
@@ -323,6 +323,9 @@ int32_t* taosGetErrno();
#define TSDB_CODE_VND_SMA_NOT_EXIST TAOS_DEF_ERROR_CODE(0, 0x0516)
#define TSDB_CODE_VND_HASH_MISMATCH TAOS_DEF_ERROR_CODE(0, 0x0517)
#define TSDB_CODE_VND_TABLE_NOT_EXIST TAOS_DEF_ERROR_CODE(0, 0x0518)
+#define TSDB_CODE_VND_INVALID_TABLE_ACTION TAOS_DEF_ERROR_CODE(0, 0x0519)
+#define TSDB_CODE_VND_COL_ALREADY_EXISTS TAOS_DEF_ERROR_CODE(0, 0x051a)
+#define TSDB_CODE_VND_TABLE_COL_NOT_EXISTS TAOS_DEF_ERROR_CODE(0, 0x051b)
// tsdb
#define TSDB_CODE_TDB_INVALID_TABLE_ID TAOS_DEF_ERROR_CODE(0, 0x0600)
@@ -354,7 +357,8 @@ int32_t* taosGetErrno();
#define TSDB_CODE_TDB_TABLE_RECREATED TAOS_DEF_ERROR_CODE(0, 0x061A)
#define TSDB_CODE_TDB_TDB_ENV_OPEN_ERROR TAOS_DEF_ERROR_CODE(0, 0x061B)
#define TSDB_CODE_TDB_NO_SMA_INDEX_IN_META TAOS_DEF_ERROR_CODE(0, 0x061C)
-#define TSDB_CODE_TDB_INVALID_SMA_STAT TAOS_DEF_ERROR_CODE(0, 0x062D)
+#define TSDB_CODE_TDB_INVALID_SMA_STAT TAOS_DEF_ERROR_CODE(0, 0x061D)
+#define TSDB_CODE_TDB_TSMA_ALREADY_EXIST TAOS_DEF_ERROR_CODE(0, 0x061E)
// query
#define TSDB_CODE_QRY_INVALID_QHANDLE TAOS_DEF_ERROR_CODE(0, 0x0700)
@@ -636,6 +640,9 @@ int32_t* taosGetErrno();
#define TSDB_CODE_PAR_PERMISSION_DENIED TAOS_DEF_ERROR_CODE(0, 0x2644)
#define TSDB_CODE_PAR_INVALID_STREAM_QUERY TAOS_DEF_ERROR_CODE(0, 0x2645)
#define TSDB_CODE_PAR_INVALID_INTERNAL_PK TAOS_DEF_ERROR_CODE(0, 0x2646)
+#define TSDB_CODE_PAR_INVALID_TIMELINE_FUNC TAOS_DEF_ERROR_CODE(0, 0x2647)
+#define TSDB_CODE_PAR_INVALID_PASSWD TAOS_DEF_ERROR_CODE(0, 0x2648)
+#define TSDB_CODE_PAR_INVALID_ALTER_TABLE TAOS_DEF_ERROR_CODE(0, 0x2649)
//planner
#define TSDB_CODE_PLAN_INTERNAL_ERROR TAOS_DEF_ERROR_CODE(0, 0x2700)
@@ -657,6 +664,7 @@ int32_t* taosGetErrno();
#define TSDB_CODE_UDF_LOAD_UDF_FAILURE TAOS_DEF_ERROR_CODE(0, 0x2905)
#define TSDB_CODE_UDF_INVALID_STATE TAOS_DEF_ERROR_CODE(0, 0x2906)
#define TSDB_CODE_UDF_INVALID_INPUT TAOS_DEF_ERROR_CODE(0, 0x2907)
+#define TSDB_CODE_UDF_NO_FUNC_HANDLE TAOS_DEF_ERROR_CODE(0, 0x2908)
#define TSDB_CODE_SML_INVALID_PROTOCOL_TYPE TAOS_DEF_ERROR_CODE(0, 0x3000)
#define TSDB_CODE_SML_INVALID_PRECISION_TYPE TAOS_DEF_ERROR_CODE(0, 0x3001)
diff --git a/include/util/tencode.h b/include/util/tencode.h
index 2a43d7934f..e49429f865 100644
--- a/include/util/tencode.h
+++ b/include/util/tencode.h
@@ -18,7 +18,6 @@
#include "tcoding.h"
#include "tlist.h"
-// #include "tfreelist.h"
#ifdef __cplusplus
extern "C" {
diff --git a/include/util/tfreelist.h b/include/util/tfreelist.h
deleted file mode 100644
index e9b5ca5fca..0000000000
--- a/include/util/tfreelist.h
+++ /dev/null
@@ -1,60 +0,0 @@
-/*
- * Copyright (c) 2019 TAOS Data, Inc.
- *
- * This program is free software: you can use, redistribute, and/or modify
- * it under the terms of the GNU Affero General Public License, version 3
- * or later ("AGPL"), as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.
- *
- * You should have received a copy of the GNU Affero General Public License
- * along with this program. If not, see .
- */
-
-#ifndef _TD_UTIL_FREELIST_H_
-#define _TD_UTIL_FREELIST_H_
-
-#include "tlist.h"
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-struct SFreeListNode {
- TD_SLIST_NODE(SFreeListNode);
- char payload[];
-};
-
-typedef TD_SLIST(SFreeListNode) SFreeList;
-
-#define TFL_MALLOC(PTR, TYPE, SIZE, LIST) \
- do { \
- void *ptr = taosMemoryMalloc((SIZE) + sizeof(struct SFreeListNode)); \
- if (ptr) { \
- TD_SLIST_PUSH((LIST), (struct SFreeListNode *)ptr); \
- ptr = ((struct SFreeListNode *)ptr)->payload; \
- (PTR) = (TYPE)(ptr); \
- }else{ \
- (PTR) = NULL; \
- } \
- }while(0);
-
-#define tFreeListInit(pFL) TD_SLIST_INIT(pFL)
-
-static FORCE_INLINE void tFreeListClear(SFreeList *pFL) {
- struct SFreeListNode *pNode;
- for (;;) {
- pNode = TD_SLIST_HEAD(pFL);
- if (pNode == NULL) break;
- TD_SLIST_POP(pFL);
- taosMemoryFree(pNode);
- }
-}
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /*_TD_UTIL_FREELIST_H_*/
\ No newline at end of file
diff --git a/include/util/tlog.h b/include/util/tlog.h
index dead25a4a8..be31aa8115 100644
--- a/include/util/tlog.h
+++ b/include/util/tlog.h
@@ -61,6 +61,7 @@ extern int32_t tqDebugFlag;
extern int32_t fsDebugFlag;
extern int32_t metaDebugFlag;
extern int32_t fnDebugFlag;
+extern int32_t smaDebugFlag;
int32_t taosInitLog(const char *logName, int32_t maxFiles);
void taosCloseLog();
diff --git a/packaging/release.sh b/packaging/release.sh
index ef3018a913..9230cafa85 100755
--- a/packaging/release.sh
+++ b/packaging/release.sh
@@ -67,7 +67,6 @@ bin_files="${compile_dir}/build/bin/taosd ${compile_dir}/build/bin/taos ${compi
cp -rf ${bin_files} ${install_dir}/bin && chmod a+x ${install_dir}/bin/* || :
cp ${compile_dir}/build/lib/libtaos.so ${install_dir}/lib/
-cp ${compile_dir}/build/lib/libtdb.so ${install_dir}/lib/
cp ${compile_dir}/build/lib/libavro* ${install_dir}/lib/ > /dev/null || echo -e "failed to copy avro libraries"
cp -rf ${compile_dir}/build/lib/pkgconfig ${install_dir}/lib/ > /dev/null || echo -e "failed to copy pkgconfig directory"
diff --git a/packaging/tools/install.sh b/packaging/tools/install.sh
index 740d356f80..d2d52af955 100755
--- a/packaging/tools/install.sh
+++ b/packaging/tools/install.sh
@@ -215,15 +215,9 @@ function install_lib() {
${csudo} ln -s ${install_main_dir}/lib/libtaos.* ${lib_link_dir}/libtaos.so.1
${csudo} ln -s ${lib_link_dir}/libtaos.so.1 ${lib_link_dir}/libtaos.so
- ${csudo} ln -s ${install_main_dir}/lib/libtdb.* ${lib_link_dir}/libtdb.so.1
- ${csudo} ln -s ${lib_link_dir}/libtdb.so.1 ${lib_link_dir}/libtdb.so
-
if [[ -d ${lib64_link_dir} && ! -e ${lib64_link_dir}/libtaos.so ]]; then
${csudo} ln -s ${install_main_dir}/lib/libtaos.* ${lib64_link_dir}/libtaos.so.1 || :
${csudo} ln -s ${lib64_link_dir}/libtaos.so.1 ${lib64_link_dir}/libtaos.so || :
-
- ${csudo} ln -s ${install_main_dir}/lib/libtdb.* ${lib64_link_dir}/libtdb.so.1 || :
- ${csudo} ln -s ${lib64_link_dir}/libtdb.so.1 ${lib64_link_dir}/libtdb.so || :
fi
${csudo} ldconfig
diff --git a/source/client/src/clientImpl.c b/source/client/src/clientImpl.c
index d1ec07e285..58e0506475 100644
--- a/source/client/src/clientImpl.c
+++ b/source/client/src/clientImpl.c
@@ -172,7 +172,8 @@ int32_t parseSql(SRequestObj* pRequest, bool topicQuery, SQuery** pQuery, SStmtC
.msgLen = ERROR_MSG_BUF_DEFAULT_SIZE,
.pTransporter = pTscObj->pAppInfo->pTransporter,
.pStmtCb = pStmtCb,
- .pUser = pTscObj->user};
+ .pUser = pTscObj->user,
+ .isSuperUser = (0 == strcmp(pTscObj->user, TSDB_DEFAULT_USER))};
cxt.mgmtEpSet = getEpSet_s(&pTscObj->pAppInfo->mgmtEp);
int32_t code = catalogGetHandle(pTscObj->pAppInfo->clusterId, &cxt.pCatalog);
@@ -517,8 +518,9 @@ STscObj* taosConnectImpl(const char* user, const char* auth, const char* db, __t
if (pRequest->code != TSDB_CODE_SUCCESS) {
const char* errorMsg =
(pRequest->code == TSDB_CODE_RPC_FQDN_ERROR) ? taos_errstr(pRequest) : tstrerror(pRequest->code);
- printf("failed to connect to server, reason: %s\n\n", errorMsg);
+ fprintf(stderr,"failed to connect to server, reason: %s\n\n", errorMsg);
+ terrno = pRequest->code;
destroyRequest(pRequest);
taos_close(pTscObj);
pTscObj = NULL;
@@ -947,8 +949,7 @@ int32_t setQueryResultFromRsp(SReqResultInfo* pResultInfo, const SRetrieveTableR
// TODO handle the compressed case
pResultInfo->totalRows += pResultInfo->numOfRows;
- return setResultDataPtr(pResultInfo, pResultInfo->fields, pResultInfo->numOfCols, pResultInfo->numOfRows,
- convertUcs4);
+ return setResultDataPtr(pResultInfo, pResultInfo->fields, pResultInfo->numOfCols, pResultInfo->numOfRows, convertUcs4);
}
TSDB_SERVER_STATUS taos_check_server_status(const char* fqdn, int port, char* details, int maxlen) {
diff --git a/source/client/src/clientSml.c b/source/client/src/clientSml.c
index fc909cb0a3..8fce5bfb00 100644
--- a/source/client/src/clientSml.c
+++ b/source/client/src/clientSml.c
@@ -9,7 +9,6 @@
#include "tdef.h"
#include "tlog.h"
#include "tmsg.h"
-#include "tstrbuild.h"
#include "ttime.h"
#include "ttypes.h"
#include "tcommon.h"
@@ -26,6 +25,35 @@
#define SLASH '\\'
#define tsMaxSQLStringLen (1024*1024)
+#define JUMP_SPACE(sql) while (*sql != '\0'){if(*sql == SPACE) sql++;else break;}
+// comma ,
+#define IS_SLASH_COMMA(sql) (*(sql) == COMMA && *((sql) - 1) == SLASH)
+#define IS_COMMA(sql) (*(sql) == COMMA && *((sql) - 1) != SLASH)
+// space
+#define IS_SLASH_SPACE(sql) (*(sql) == SPACE && *((sql) - 1) == SLASH)
+#define IS_SPACE(sql) (*(sql) == SPACE && *((sql) - 1) != SLASH)
+// equal =
+#define IS_SLASH_EQUAL(sql) (*(sql) == EQUAL && *((sql) - 1) == SLASH)
+#define IS_EQUAL(sql) (*(sql) == EQUAL && *((sql) - 1) != SLASH)
+// quote "
+#define IS_SLASH_QUOTE(sql) (*(sql) == QUOTE && *((sql) - 1) == SLASH)
+#define IS_QUOTE(sql) (*(sql) == QUOTE && *((sql) - 1) != SLASH)
+// SLASH
+#define IS_SLASH_SLASH(sql) (*(sql) == SLASH && *((sql) - 1) == SLASH)
+
+#define IS_SLASH_LETTER(sql) (IS_SLASH_COMMA(sql) || IS_SLASH_SPACE(sql) || IS_SLASH_EQUAL(sql) || IS_SLASH_QUOTE(sql) || IS_SLASH_SLASH(sql))
+
+#define MOVE_FORWARD_ONE(sql,len) (memmove((void*)((sql) - 1), (sql), len))
+
+#define PROCESS_SLASH(key,keyLen) \
+for (int i = 1; i < keyLen; ++i) { \
+ if(IS_SLASH_LETTER(key+i)){ \
+ MOVE_FORWARD_ONE(key+i, keyLen-i); \
+ i--; \
+ keyLen--; \
+ } \
+}
+
#define OTD_MAX_FIELDS_NUM 2
#define OTD_JSON_SUB_FIELDS_NUM 2
#define OTD_JSON_FIELDS_NUM 4
@@ -42,6 +70,7 @@
#define BINARY_ADD_LEN 2 // "binary" 2 means " "
#define NCHAR_ADD_LEN 3 // L"nchar" 3 means L" "
+#define CHAR_SAVE_LENGTH 8
//=================================================================================================
typedef TSDB_SML_PROTOCOL_TYPE SMLProtocolType;
@@ -231,7 +260,7 @@ static int32_t smlBuildColumnDescription(SSmlKv* field, char* buf, int32_t bufSi
char tname[TSDB_TABLE_NAME_LEN] = {0};
memcpy(tname, field->key, field->keyLen);
if (type == TSDB_DATA_TYPE_BINARY || type == TSDB_DATA_TYPE_NCHAR) {
- int32_t bytes = field->length; // todo
+ int32_t bytes = field->length > CHAR_SAVE_LENGTH ? (2*field->length) : CHAR_SAVE_LENGTH;
int out = snprintf(buf, bufSize,"`%s` %s(%d)",
tname, tDataTypes[field->type].name, bytes);
*outBytes = out;
@@ -431,7 +460,7 @@ static int32_t smlModifyDBSchemas(SSmlHandle* info) {
SEpSet ep = getEpSet_s(&info->taos->pAppInfo->mgmtEp);
size_t superTableLen = 0;
- void *superTable = taosHashGetKey(tableMetaSml, &superTableLen); // todo escape
+ void *superTable = taosHashGetKey(tableMetaSml, &superTableLen);
SName pName = {TSDB_TABLE_NAME_T, info->taos->acctId, {0}, {0}};
strcpy(pName.dbname, info->pRequest->pDb);
memcpy(pName.tname, superTable, superTableLen);
@@ -760,7 +789,7 @@ static int64_t smlParseInfluxTime(SSmlHandle* info, const char* data, int32_t le
smlBuildInvalidDataMsg(&info->msgBuf, "invalid timestamp precision", NULL);
return -1;
}
- if(!data){
+ if(len == 0){
return smlGetTimeNow(tsType);
}
@@ -850,66 +879,56 @@ static bool smlParseValue(SSmlKv *pVal, SSmlMsgBuf *msg) {
static int32_t smlParseInfluxString(const char* sql, SSmlLineInfo *elements, SSmlMsgBuf *msg){
if(!sql) return TSDB_CODE_SML_INVALID_DATA;
- while (*sql != '\0') { // jump the space at the begining
- if(*sql != SPACE) {
- elements->measure = sql;
- break;
- }
- sql++;
- }
- if (!elements->measure || *sql == COMMA) {
- smlBuildInvalidDataMsg(msg, "invalid data", sql);
- return TSDB_CODE_SML_INVALID_DATA;
- }
+ JUMP_SPACE(sql)
+ if(*sql == COMMA) return TSDB_CODE_SML_INVALID_DATA;
+ elements->measure = sql;
- // parse measure and tag
+ // parse measure
while (*sql != '\0') {
- if (elements->measureLen == 0 && *sql == COMMA && *(sql - 1) != SLASH) { // find the first comma
- elements->measureLen = sql - elements->measure;
- sql++;
- elements->tags = sql;
+ if((sql != elements->measure) && IS_SLASH_LETTER(sql)){
+ MOVE_FORWARD_ONE(sql,strlen(sql) + 1);
continue;
}
-
- if (*sql == SPACE && *(sql - 1) != SLASH) { // find the first space
- if (elements->measureLen == 0) {
- elements->measureLen = sql - elements->measure;
- elements->tags = sql;
- }
- elements->tagsLen = sql - elements->tags;
- elements->measureTagsLen = sql - elements->measure;
+ if(IS_COMMA(sql)){
break;
}
+ if(IS_SPACE(sql)){
+ break;
+ }
sql++;
}
- if(elements->tagsLen == 0){ // measure, cols1=a measure cols1=a
- elements->measureTagsLen = elements->measureLen;
- }
+ elements->measureLen = sql - elements->measure;
if(elements->measureLen == 0) {
- smlBuildInvalidDataMsg(msg, "invalid measure", elements->measure);
+ smlBuildInvalidDataMsg(msg, "measure is empty", NULL);
return TSDB_CODE_SML_INVALID_DATA;
}
+ // parse tag
+ if(*sql == SPACE){
+ elements->tagsLen = 0;
+ }else{
+ if(*sql == COMMA) sql++;
+ elements->tags = sql;
+ while (*sql != '\0') {
+ if(IS_SPACE(sql)){
+ break;
+ }
+ sql++;
+ }
+ elements->tagsLen = sql - elements->tags;
+ }
+ elements->measureTagsLen = sql - elements->measure;
+
// parse cols
- while (*sql != '\0') {
- if(*sql != SPACE) {
- elements->cols = sql;
- break;
- }
- sql++;
- }
- if(!elements->cols) {
- smlBuildInvalidDataMsg(msg, "invalid columns", elements->cols);
- return TSDB_CODE_SML_INVALID_DATA;
- }
-
+ JUMP_SPACE(sql)
+ elements->cols = sql;
bool isInQuote = false;
while (*sql != '\0') {
- if(*sql == QUOTE && *(sql - 1) != SLASH){
+ if(IS_QUOTE(sql)){
isInQuote = !isInQuote;
}
- if(!isInQuote && *sql == SPACE && *(sql - 1) != SLASH) {
+ if(!isInQuote && IS_SPACE(sql)){
break;
}
sql++;
@@ -919,20 +938,21 @@ static int32_t smlParseInfluxString(const char* sql, SSmlLineInfo *elements, SSm
return TSDB_CODE_SML_INVALID_DATA;
}
elements->colsLen = sql - elements->cols;
+ if(elements->colsLen == 0) {
+ smlBuildInvalidDataMsg(msg, "cols is empty", NULL);
+ return TSDB_CODE_SML_INVALID_DATA;
+ }
- // parse ts,ts can be empty
+ // parse timestamp
+ JUMP_SPACE(sql)
+ elements->timestamp = sql;
while (*sql != '\0') {
- if(*sql != SPACE && elements->timestamp == NULL) {
- elements->timestamp = sql;
- }
- if(*sql == SPACE && elements->timestamp != NULL){
+ if(*sql == SPACE){
break;
}
sql++;
}
- if(elements->timestamp){
- elements->timestampLen = sql - elements->timestamp;
- }
+ elements->timestampLen = sql - elements->timestamp;
return TSDB_CODE_SUCCESS;
}
@@ -949,38 +969,54 @@ static void smlParseTelnetElement(const char **sql, const char **data, int32_t *
}
}
-static int32_t smlParseTelnetTags(const char* data, int32_t len, SArray *cols, SHashObj *dumplicateKey, SSmlMsgBuf *msg){
- for(int i = 0; i < len; i++){
- // parse key
- const char *key = data + i;
+static int32_t smlParseTelnetTags(const char* data, SArray *cols, SHashObj *dumplicateKey, SSmlMsgBuf *msg){
+ const char *sql = data;
+ while(*sql != '\0'){
+ JUMP_SPACE(sql)
+ if(*sql == '\0') break;
+
+ const char *key = sql;
int32_t keyLen = 0;
- while(i < len){
- if(data[i] == EQUAL){
- keyLen = data + i - key;
+
+ // parse key
+ while(*sql != '\0'){
+ if(*sql == SPACE) {
+ smlBuildInvalidDataMsg(msg, "invalid data", sql);
+ return TSDB_CODE_SML_INVALID_DATA;
+ }
+ if(*sql == EQUAL) {
+ keyLen = sql - key;
+ sql++;
break;
}
- i++;
+ sql++;
}
+
if(keyLen == 0 || keyLen >= TSDB_COL_NAME_LEN){
smlBuildInvalidDataMsg(msg, "invalid key or key is too long than 64", key);
return TSDB_CODE_SML_INVALID_DATA;
}
-
if(smlCheckDuplicateKey(key, keyLen, dumplicateKey)){
smlBuildInvalidDataMsg(msg, "dumplicate key", key);
return TSDB_CODE_TSC_DUP_TAG_NAMES;
}
// parse value
- i++;
- const char *value = data + i;
- while(i < len){
- if(data[i] == SPACE){
+ const char *value = sql;
+ int32_t valueLen = 0;
+ while(*sql != '\0') {
+ // parse value
+ if (*sql == SPACE) {
break;
}
- i++;
+ if (*sql == EQUAL) {
+ smlBuildInvalidDataMsg(msg, "invalid data", sql);
+ return TSDB_CODE_SML_INVALID_DATA;
+ }
+ sql++;
}
- int32_t valueLen = data + i - value;
+ valueLen = sql - value;
+
if(valueLen == 0){
smlBuildInvalidDataMsg(msg, "invalid value", value);
return TSDB_CODE_SML_INVALID_DATA;
@@ -993,13 +1029,14 @@ static int32_t smlParseTelnetTags(const char* data, int32_t len, SArray *cols, S
kv->keyLen = keyLen;
kv->value = value;
kv->length = valueLen;
- kv->type = TSDB_DATA_TYPE_NCHAR; //todo
+ kv->type = TSDB_DATA_TYPE_NCHAR;
if(cols) taosArrayPush(cols, &kv);
}
return TSDB_CODE_SUCCESS;
}
+
// format: =[ =]
static int32_t smlParseTelnetString(SSmlHandle *info, const char* sql, SSmlTableInfo *tinfo, SArray *cols){
if(!sql) return TSDB_CODE_SML_INVALID_DATA;
@@ -1048,10 +1085,7 @@ static int32_t smlParseTelnetString(SSmlHandle *info, const char* sql, SSmlTable
}
// parse tags
- while(*sql == SPACE){
- sql++;
- }
- ret = smlParseTelnetTags(sql, strlen(sql), tinfo->tags, info->dumplicateKey, &info->msgBuf);
+ ret = smlParseTelnetTags(sql, tinfo->tags, info->dumplicateKey, &info->msgBuf);
if (ret != TSDB_CODE_SUCCESS) {
smlBuildInvalidDataMsg(&info->msgBuf, "invalid data", sql);
return TSDB_CODE_SML_INVALID_DATA;
@@ -1073,49 +1107,67 @@ static int32_t smlParseCols(const char* data, int32_t len, SArray *cols, bool is
return TSDB_CODE_SUCCESS;
}
- for(int i = 0; i < len; i++){
- // parse key
- const char *key = data + i;
+ const char *sql = data;
+ while(sql < data + len){
+ const char *key = sql;
int32_t keyLen = 0;
- while(i < len){
- if(data[i] == EQUAL && i > 0 && data[i-1] != SLASH){
- keyLen = data + i - key;
+
+ while(sql < data + len){
+ // parse key
+ if(IS_COMMA(sql)) {
+ smlBuildInvalidDataMsg(msg, "invalid data", sql);
+ return TSDB_CODE_SML_INVALID_DATA;
+ }
+ if(IS_EQUAL(sql)) {
+ keyLen = sql - key;
+ sql++;
break;
}
- i++;
+ sql++;
}
+
if(keyLen == 0 || keyLen >= TSDB_COL_NAME_LEN){
smlBuildInvalidDataMsg(msg, "invalid key or key is too long than 64", key);
return TSDB_CODE_SML_INVALID_DATA;
}
-
if(smlCheckDuplicateKey(key, keyLen, dumplicateKey)){
smlBuildInvalidDataMsg(msg, "dumplicate key", key);
return TSDB_CODE_TSC_DUP_TAG_NAMES;
}
// parse value
- i++;
- const char *value = data + i;
+ const char *value = sql;
+ int32_t valueLen = 0;
bool isInQuote = false;
- while(i < len){
- if(!isTag && data[i] == QUOTE && data[i-1] != SLASH){
+ while(sql < data + len) {
+ // parse value
+ if(!isTag && IS_QUOTE(sql)){
isInQuote = !isInQuote;
+ sql++;
+ continue;
}
- if(!isInQuote && data[i] == COMMA && i > 0 && data[i-1] != SLASH){
+ if (!isInQuote && IS_COMMA(sql)) {
break;
}
- i++;
+ if (!isInQuote && IS_EQUAL(sql)) {
+ smlBuildInvalidDataMsg(msg, "invalid data", sql);
+ return TSDB_CODE_SML_INVALID_DATA;
+ }
+ sql++;
}
- if(!isTag && isInQuote){
+ valueLen = sql - value;
+ sql++;
+
+ if(isInQuote){
smlBuildInvalidDataMsg(msg, "only one quote", value);
return TSDB_CODE_SML_INVALID_DATA;
}
- int32_t valueLen = data + i - value;
if(valueLen == 0){
smlBuildInvalidDataMsg(msg, "invalid value", value);
return TSDB_CODE_SML_INVALID_DATA;
}
+ PROCESS_SLASH(key, keyLen)
+ PROCESS_SLASH(value, valueLen)
// add kv to SSmlKv
SSmlKv *kv = (SSmlKv *)taosMemoryCalloc(sizeof(SSmlKv), 1);
@@ -1138,49 +1190,6 @@ static int32_t smlParseCols(const char* data, int32_t len, SArray *cols, bool is
return TSDB_CODE_SUCCESS;
}
-//static int32_t parseSmlCols(const char* data, SArray *cols){
-// while(*data != '\0'){
-// if(*data == EQUAL) return TSDB_CODE_SML_INVALID_DATA;
-// const char *key = data;
-// int32_t keyLen = 0;
-// while(*data != '\0'){
-// if(*data == EQUAL && *(data-1) != SLASH){
-// keyLen = data - key;
-// data ++;
-// break;
-// }
-// data++;
-// }
-// if(keyLen == 0){
-// return TSDB_CODE_SML_INVALID_DATA;
-// }
-//
-// if(*data == COMMA) return TSDB_CODE_SML_INVALID_DATA;
-// const char *value = data;
-// int32_t valueLen = 0;
-// while(*data != '\0'){
-// if(*data == COMMA && *(data-1) != SLASH){
-// valueLen = data - value;
-// data ++;
-// break;
-// }
-// data++;
-// }
-// if(valueLen == 0){
-// return TSDB_CODE_SML_INVALID_DATA;
-// }
-//
-// TAOS_SML_KV *kv = taosMemoryCalloc(sizeof(TAOS_SML_KV), 1);
-// kv->key = key;
-// kv->keyLen = keyLen;
-// kv->value = value;
-// kv->valueLen = valueLen;
-// kv->type = TSDB_DATA_TYPE_NCHAR;
-// if(cols) taosArrayPush(cols, &kv);
-// }
-// return TSDB_CODE_SUCCESS;
-//}
-
static bool smlUpdateMeta(SHashObj *metaHash, SArray *metaArray, SArray *cols, SSmlMsgBuf *msg){
for (int i = 0; i < taosArrayGetSize(cols); ++i) { //jump timestamp
SSmlKv *kv = (SSmlKv *)taosArrayGetP(cols, i);
@@ -1298,7 +1307,7 @@ static int32_t smlDealCols(SSmlTableInfo* oneTable, bool dataFormat, SArray *col
}
for(size_t i = 0; i < taosArrayGetSize(cols); i++){
SSmlKv *kv = (SSmlKv *)taosArrayGetP(cols, i);
- taosHashPut(kvHash, kv->key, kv->keyLen, &kv, POINTER_BYTES); // todo key need escape, like \=, because find by schema name later
+ taosHashPut(kvHash, kv->key, kv->keyLen, &kv, POINTER_BYTES);
}
taosArrayPush(oneTable->cols, &kvHash);
@@ -1346,12 +1355,13 @@ static void smlDestroySTableMeta(SSmlSTableMeta *meta){
taosArrayDestroy(meta->tags);
taosArrayDestroy(meta->cols);
taosMemoryFree(meta->tableMeta);
+ taosMemoryFree(meta);
}
static void smlDestroyCols(SArray *cols) {
if (!cols) return;
for (int i = 0; i < taosArrayGetSize(cols); ++i) {
- void *kv = taosArrayGet(cols, i);
+ void *kv = taosArrayGetP(cols, i);
taosMemoryFree(kv);
}
}
@@ -2063,12 +2073,16 @@ static int32_t smlParseTelnetLine(SSmlHandle* info, void *data) {
if(ret != TSDB_CODE_SUCCESS){
uError("SML:0x%"PRIx64" smlParseTelnetLine failed", info->id);
smlDestroyTableInfo(info, tinfo);
+ smlDestroyCols(cols);
taosArrayDestroy(cols);
return ret;
}
if(taosArrayGetSize(tinfo->tags) <= 0 || taosArrayGetSize(tinfo->tags) > TSDB_MAX_TAGS){
smlBuildInvalidDataMsg(&info->msgBuf, "invalidate tags length:[1,128]", NULL);
+ smlDestroyTableInfo(info, tinfo);
+ smlDestroyCols(cols);
+ taosArrayDestroy(cols);
return TSDB_CODE_SML_INVALID_DATA;
}
taosHashClear(info->dumplicateKey);
diff --git a/source/client/test/smlTest.cpp b/source/client/test/smlTest.cpp
index b9870633db..e567a0c3e8 100644
--- a/source/client/test/smlTest.cpp
+++ b/source/client/test/smlTest.cpp
@@ -41,12 +41,14 @@ TEST(testCase, smlParseInfluxString_Test) {
SSmlLineInfo elements = {0};
// case 1
- char *sql = "st,t1=3,t2=4,t3=t3 c1=3i64,c3=\"passit hello,c1=2\",c2=false,c4=4f64 1626006833639000000 ,32,c=3";
+ char *tmp = "\\,st,t1=3,t2=4,t3=t3 c1=3i64,c3=\"passit hello,c1=2\",c2=false,c4=4f64 1626006833639000000 ,32,c=3";
+ char *sql = (char*)taosMemoryCalloc(256, 1);
+ memcpy(sql, tmp, strlen(tmp) + 1);
int ret = smlParseInfluxString(sql, &elements, &msgBuf);
ASSERT_EQ(ret, 0);
ASSERT_EQ(elements.measure, sql);
- ASSERT_EQ(elements.measureLen, strlen("st"));
- ASSERT_EQ(elements.measureTagsLen, strlen("st,t1=3,t2=4,t3=t3"));
+ ASSERT_EQ(elements.measureLen, strlen(",st"));
+ ASSERT_EQ(elements.measureTagsLen, strlen(",st,t1=3,t2=4,t3=t3"));
ASSERT_EQ(elements.tags, sql + elements.measureLen + 1);
ASSERT_EQ(elements.tagsLen, strlen("t1=3,t2=4,t3=t3"));
@@ -58,76 +60,79 @@ TEST(testCase, smlParseInfluxString_Test) {
ASSERT_EQ(elements.timestampLen, strlen("1626006833639000000"));
// case 2 false
- sql = "st,t1=3,t2=4,t3=t3 c1=3i64,c3=\"passit hello,c1=2,c2=false,c4=4f64 1626006833639000000";
+ tmp = "st,t1=3,t2=4,t3=t3 c1=3i64,c3=\"passit hello,c1=2,c2=false,c4=4f64 1626006833639000000";
+ memcpy(sql, tmp, strlen(tmp) + 1);
memset(&elements, 0, sizeof(SSmlLineInfo));
ret = smlParseInfluxString(sql, &elements, &msgBuf);
ASSERT_NE(ret, 0);
// case 3 false
- sql = "st, t1=3,t2=4,t3=t3 c1=3i64,c3=\"passit hello,c1=2,c2=false,c4=4f64 1626006833639000000";
+ tmp = "st, t1=3,t2=4,t3=t3 c1=3i64,c3=\"passit hello,c1=2,c2=false,c4=4f64 1626006833639000000";
+ memcpy(sql, tmp, strlen(tmp) + 1);
memset(&elements, 0, sizeof(SSmlLineInfo));
ret = smlParseInfluxString(sql, &elements, &msgBuf);
ASSERT_EQ(ret, 0);
- ASSERT_EQ(elements.cols, sql + elements.measureTagsLen + 2);
+ ASSERT_EQ(elements.cols, sql + elements.measureTagsLen + 1);
ASSERT_EQ(elements.colsLen, strlen("t1=3,t2=4,t3=t3"));
// case 4 tag is null
- sql = "st, c1=3i64,c3=\"passit hello,c1=2\",c2=false,c4=4f64 1626006833639000000";
+ tmp = "st, c1=3i64,c3=\"passit hello,c1=2\",c2=false,c4=4f64 1626006833639000000";
+ memcpy(sql, tmp, strlen(tmp) + 1);
memset(&elements, 0, sizeof(SSmlLineInfo));
ret = smlParseInfluxString(sql, &elements, &msgBuf);
ASSERT_EQ(ret, 0);
ASSERT_EQ(elements.measure, sql);
ASSERT_EQ(elements.measureLen, strlen("st"));
- ASSERT_EQ(elements.measureTagsLen, strlen("st"));
+ ASSERT_EQ(elements.measureTagsLen, strlen("st,"));
- ASSERT_EQ(elements.tags, sql + elements.measureLen + 1);
+ ASSERT_EQ(elements.tags, sql + elements.measureTagsLen);
ASSERT_EQ(elements.tagsLen, 0);
- ASSERT_EQ(elements.cols, sql + elements.measureTagsLen + 2);
+ ASSERT_EQ(elements.cols, sql + elements.measureTagsLen + 1);
ASSERT_EQ(elements.colsLen, strlen("c1=3i64,c3=\"passit hello,c1=2\",c2=false,c4=4f64"));
- ASSERT_EQ(elements.timestamp, sql + elements.measureTagsLen + 2 + elements.colsLen + 1);
+ ASSERT_EQ(elements.timestamp, sql + elements.measureTagsLen + 1 + elements.colsLen + 1);
ASSERT_EQ(elements.timestampLen, strlen("1626006833639000000"));
// case 5 tag is null
- sql = " st c1=3i64,c3=\"passit hello,c1=2\",c2=false,c4=4f64 1626006833639000000 ";
+ tmp = " st c1=3i64,c3=\"passit hello,c1=2\",c2=false,c4=4f64 1626006833639000000 ";
+ memcpy(sql, tmp, strlen(tmp) + 1);
memset(&elements, 0, sizeof(SSmlLineInfo));
ret = smlParseInfluxString(sql, &elements, &msgBuf);
- sql++;
ASSERT_EQ(ret, 0);
- ASSERT_EQ(elements.measure, sql);
+ ASSERT_EQ(elements.measure, sql + 1);
ASSERT_EQ(elements.measureLen, strlen("st"));
ASSERT_EQ(elements.measureTagsLen, strlen("st"));
- ASSERT_EQ(elements.tags, sql + elements.measureLen);
ASSERT_EQ(elements.tagsLen, 0);
- ASSERT_EQ(elements.cols, sql + elements.measureTagsLen + 3);
+ ASSERT_EQ(elements.cols, sql + 1 + elements.measureTagsLen + 3);
ASSERT_EQ(elements.colsLen, strlen("c1=3i64,c3=\"passit hello,c1=2\",c2=false,c4=4f64"));
- ASSERT_EQ(elements.timestamp, sql + elements.measureTagsLen + 3 + elements.colsLen + 2);
+ ASSERT_EQ(elements.timestamp, sql + 1 + elements.measureTagsLen + 3 + elements.colsLen + 2);
ASSERT_EQ(elements.timestampLen, strlen("1626006833639000000"));
// case 6
- sql = " st c1=3i64,c3=\"passit hello,c1=2\",c2=false,c4=4f64 ";
+ tmp = " st c1=3i64,c3=\"passit hello,c1=2\",c2=false,c4=4f64 ";
+ memcpy(sql, tmp, strlen(tmp) + 1);
memset(&elements, 0, sizeof(SSmlLineInfo));
ret = smlParseInfluxString(sql, &elements, &msgBuf);
ASSERT_EQ(ret, 0);
// case 7
- sql = " st , ";
+ tmp = " st , ";
+ memcpy(sql, tmp, strlen(tmp) + 1);
memset(&elements, 0, sizeof(SSmlLineInfo));
ret = smlParseInfluxString(sql, &elements, &msgBuf);
- sql++;
ASSERT_EQ(ret, 0);
- ASSERT_EQ(elements.cols, sql + elements.measureTagsLen + 3);
- ASSERT_EQ(elements.colsLen, strlen(","));
// case 8 false
- sql = ", st , ";
+ tmp = ", st , ";
+ memcpy(sql, tmp, strlen(tmp) + 1);
memset(&elements, 0, sizeof(SSmlLineInfo));
ret = smlParseInfluxString(sql, &elements, &msgBuf);
ASSERT_NE(ret, 0);
+ taosMemoryFree(sql);
}
TEST(testCase, smlParseCols_Error_Test) {
@@ -188,7 +193,8 @@ TEST(testCase, smlParseCols_Error_Test) {
"c=-3.402823466e+39u64",
"c=-339u64",
"c=18446744073709551616u64",
- "c=1,c=2"
+ "c=1,c=2",
+ "c=1=2"
};
SHashObj *dumplicateKey = taosHashInit(32, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), false, HASH_NO_LOCK);
@@ -198,9 +204,18 @@ TEST(testCase, smlParseCols_Error_Test) {
msgBuf.buf = msg;
msgBuf.len = 256;
int32_t len = strlen(data[i]);
- int32_t ret = smlParseCols(data[i], len, NULL, false, dumplicateKey, &msgBuf);
+ char *sql = (char*)taosMemoryCalloc(256, 1);
+ memcpy(sql, data[i], len + 1);
+ SArray *cols = taosArrayInit(8, POINTER_BYTES);
+ int32_t ret = smlParseCols(sql, len, cols, false, dumplicateKey, &msgBuf);
ASSERT_NE(ret, TSDB_CODE_SUCCESS);
taosHashClear(dumplicateKey);
+ taosMemoryFree(sql);
+ for(int j = 0; j < taosArrayGetSize(cols); j++){
+ void *kv = taosArrayGetP(cols, j);
+ taosMemoryFree(kv);
+ }
+ taosArrayDestroy(cols);
}
taosHashCleanup(dumplicateKey);
}
@@ -216,7 +231,7 @@ TEST(testCase, smlParseCols_tag_Test) {
SHashObj *dumplicateKey = taosHashInit(32, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), false, HASH_NO_LOCK);
const char *data =
- "cbin=\"passit helloc=2\",cnch=L\"iisdfsf\",cbool=false,cf64=4.31f64,cf64_=8.32,cf32=8.23f32,ci8=-34i8,cu8=89u8,ci16=233i16,cu16=898u16,ci32=98289i32,cu32=12323u32,ci64=-89238i64,ci=989i,cu64=8989323u64,cbooltrue=true,cboolt=t,cboolf=f,cnch_=l\"iuwq\"";
+ "cbin=\"passit helloc\",cnch=L\"iisdfsf\",cbool=false,cf64=4.31f64,cf64_=8.32,cf32=8.23f32,ci8=-34i8,cu8=89u8,ci16=233i16,cu16=898u16,ci32=98289i32,cu32=12323u32,ci64=-89238i64,ci=989i,cu64=8989323u64,cbooltrue=true,cboolt=t,cboolf=f,cnch_=l\"iuwq\"";
int32_t len = strlen(data);
int32_t ret = smlParseCols(data, len, cols, true, dumplicateKey, &msgBuf);
ASSERT_EQ(ret, TSDB_CODE_SUCCESS);
@@ -228,9 +243,8 @@ TEST(testCase, smlParseCols_tag_Test) {
ASSERT_EQ(strncasecmp(kv->key, "cbin", 4), 0);
ASSERT_EQ(kv->keyLen, 4);
ASSERT_EQ(kv->type, TSDB_DATA_TYPE_NCHAR);
- ASSERT_EQ(kv->length, 17);
+ ASSERT_EQ(kv->length, 15);
ASSERT_EQ(strncasecmp(kv->value, "\"passit", 7), 0);
- taosMemoryFree(kv);
// nchar
kv = (SSmlKv *)taosArrayGetP(cols, 3);
@@ -239,11 +253,13 @@ TEST(testCase, smlParseCols_tag_Test) {
ASSERT_EQ(kv->type, TSDB_DATA_TYPE_NCHAR);
ASSERT_EQ(kv->length, 7);
ASSERT_EQ(strncasecmp(kv->value, "4.31f64", 7), 0);
- taosMemoryFree(kv);
+ for(int i = 0; i < size; i++){
+ void *tmp = taosArrayGetP(cols, i);
+ taosMemoryFree(tmp);
+ }
taosArrayClear(cols);
-
// test tag is null
data = "t=3e";
len = 0;
@@ -278,20 +294,22 @@ TEST(testCase, smlParseCols_Test) {
SHashObj *dumplicateKey = taosHashInit(32, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), false, HASH_NO_LOCK);
- const char *data = "cbin=\"passit hello,c=2\",cnch=L\"iisdfsf\",cbool=false,cf64=4.31f64,cf64_=8.32,cf32=8.23f32,ci8=-34i8,cu8=89u8,ci16=233i16,cu16=898u16,ci32=98289i32,cu32=12323u32,ci64=-89238i64,ci=989i,cu64=8989323u64,cbooltrue=true,cboolt=t,cboolf=f,cnch_=l\"iuwq\"";
+ const char *data = "cb\\=in=\"pass\\,it hello,c=2\",cnch=L\"ii\\=sdfsf\",cbool=false,cf64=4.31f64,cf64_=8.32,cf32=8.23f32,ci8=-34i8,cu8=89u8,ci16=233i16,cu16=898u16,ci32=98289i32,cu32=12323u32,ci64=-89238i64,ci=989i,cu64=8989323u64,cbooltrue=true,cboolt=t,cboolf=f,cnch_=l\"iuwq\"";
int32_t len = strlen(data);
- int32_t ret = smlParseCols(data, len, cols, false, dumplicateKey, &msgBuf);
+ char *sql = (char*)taosMemoryCalloc(1024, 1);
+ memcpy(sql, data, len + 1);
+ int32_t ret = smlParseCols(sql, len, cols, false, dumplicateKey, &msgBuf);
ASSERT_EQ(ret, TSDB_CODE_SUCCESS);
int32_t size = taosArrayGetSize(cols);
ASSERT_EQ(size, 19);
// binary
SSmlKv *kv = (SSmlKv *)taosArrayGetP(cols, 0);
- ASSERT_EQ(strncasecmp(kv->key, "cbin", 4), 0);
- ASSERT_EQ(kv->keyLen, 4);
+ ASSERT_EQ(strncasecmp(kv->key, "cb=in", 5), 0);
+ ASSERT_EQ(kv->keyLen, 5);
ASSERT_EQ(kv->type, TSDB_DATA_TYPE_BINARY);
- ASSERT_EQ(kv->length, 16);
- ASSERT_EQ(strncasecmp(kv->value, "passit", 6), 0);
+ ASSERT_EQ(kv->length, 17);
+ ASSERT_EQ(strncasecmp(kv->value, "pass,it ", 8), 0);
taosMemoryFree(kv);
// nchar
@@ -299,8 +317,8 @@ TEST(testCase, smlParseCols_Test) {
ASSERT_EQ(strncasecmp(kv->key, "cnch", 4), 0);
ASSERT_EQ(kv->keyLen, 4);
ASSERT_EQ(kv->type, TSDB_DATA_TYPE_NCHAR);
- ASSERT_EQ(kv->length, 7);
- ASSERT_EQ(strncasecmp(kv->value, "iisd", 4), 0);
+ ASSERT_EQ(kv->length, 8);
+ ASSERT_EQ(strncasecmp(kv->value, "ii=sd", 5), 0);
taosMemoryFree(kv);
// bool
@@ -463,6 +481,7 @@ TEST(testCase, smlParseCols_Test) {
taosArrayDestroy(cols);
taosHashCleanup(dumplicateKey);
+ taosMemoryFree(sql);
}
TEST(testCase, smlProcess_influx_Test) {
@@ -481,7 +500,7 @@ TEST(testCase, smlProcess_influx_Test) {
SSmlHandle *info = smlBuildSmlInfo(taos, request, TSDB_SML_LINE_PROTOCOL, TSDB_SML_TIMESTAMP_NANO_SECONDS, true);
ASSERT_NE(info, nullptr);
- const char *sql[11] = {
+ const char *sql[] = {
"readings,name=truck_0,fleet=South,driver=Trish,model=H-2,device_version=v2.3 load_capacity=1500,fuel_capacity=150,nominal_fuel_consumption=12,latitude=52.31854,longitude=4.72037,elevation=124,velocity=0,heading=221,grade=0 1451606400000000000",
"readings,name=truck_0,fleet=South,driver=Trish,model=H-2,device_version=v2.3 load_capacity=1500,fuel_capacity=150,nominal_fuel_consumption=12,latitude=52.31854,longitude=4.72037,elevation=124,velocity=0,heading=221,grade=0,fuel_consumption=25 1451607400000000000",
"readings,name=truck_0,fleet=South,driver=Trish,model=H-2,device_version=v2.3 load_capacity=1500,fuel_capacity=150,nominal_fuel_consumption=12,latitude=52.31854,longitude=4.72037,elevation=124,heading=221,grade=0,fuel_consumption=25 1451608400000000000",
@@ -492,20 +511,22 @@ TEST(testCase, smlProcess_influx_Test) {
"readings,name=truck_2,fleet=North,driver=Derek,model=F-150 load_capacity=2000,fuel_capacity=200,nominal_fuel_consumption=15,latitude=24.5208,longitude=28.09377,elevation=428,velocity=0,heading=304,grade=0,fuel_consumption=25 1451609400000000000",
"readings,fleet=South,name=truck_0,driver=Trish,model=H-2,device_version=v2.3 fuel_consumption=25,grade=0 1451629400000000000",
"stable,t1=t1,t2=t2,t3=t3 c1=1,c2=2,c3=3,c4=4 1451629500000000000",
- "stable,t2=t2,t1=t1,t3=t3 c1=1,c3=3,c4=4 1451629600000000000"
+ "stable,t2=t2,t1=t1,t3=t3 c1=1,c3=3,c4=4 1451629600000000000",
};
- smlProcess(info, (char**)sql, sizeof(sql)/sizeof(sql[0]));
+ int ret = smlProcess(info, (char**)sql, sizeof(sql)/sizeof(sql[0]));
+ ASSERT_EQ(ret, 0);
- TAOS_RES *res = taos_query(taos, "select * from t_6885c584b98481584ee13dac399e173d");
- ASSERT_NE(res, nullptr);
- int fieldNum = taos_field_count(res);
- ASSERT_EQ(fieldNum, 5);
- int rowNum = taos_affected_rows(res);
- ASSERT_EQ(rowNum, 2);
- for (int i = 0; i < rowNum; ++i) {
- TAOS_ROW rows = taos_fetch_row(res);
- }
- taos_free_result(res);
+// TAOS_RES *res = taos_query(taos, "select * from t_6885c584b98481584ee13dac399e173d");
+// ASSERT_NE(res, nullptr);
+// int fieldNum = taos_field_count(res);
+// ASSERT_EQ(fieldNum, 5);
+// int rowNum = taos_affected_rows(res);
+// ASSERT_EQ(rowNum, 2);
+// for (int i = 0; i < rowNum; ++i) {
+// TAOS_ROW rows = taos_fetch_row(res);
+// }
+// taos_free_result(res);
+ destroyRequest(request);
smlDestroyInfo(info);
}
@@ -526,7 +547,7 @@ TEST(testCase, smlParseLine_error_Test) {
SSmlHandle *info = smlBuildSmlInfo(taos, request, TSDB_SML_LINE_PROTOCOL, TSDB_SML_TIMESTAMP_NANO_SECONDS, true);
ASSERT_NE(info, nullptr);
- const char *sql[2] = {
+ const char *sql[] = {
"measure,t1=3 c1=8",
"measure,t2=3 c1=8u8"
};
@@ -575,37 +596,37 @@ TEST(testCase, smlProcess_telnet_Test) {
SSmlHandle *info = smlBuildSmlInfo(taos, request, TSDB_SML_TELNET_PROTOCOL, TSDB_SML_TIMESTAMP_NANO_SECONDS, true);
ASSERT_NE(info, nullptr);
- const char *sql[5] = {
- "sys.if.bytes.out 1479496100 1.3E0 host=web01 interface=eth0",
- "sys.if.bytes.out 1479496101 1.3E1 interface=eth0 host=web01",
- "sys.if.bytes.out 1479496102 1.3E3 network=tcp",
- "sys.procs.running 1479496100 42 host=web01 ",
- " sys.procs.running 1479496200 42 host=web01=4"
+ const char *sql[] = {
+ "sys.if.bytes.out 1479496100 1.3E0 host=web01 interface=eth0",
+ "sys.if.bytes.out 1479496101 1.3E1 interface=eth0 host=web01 ",
+ "sys.if.bytes.out 1479496102 1.3E3 network=tcp",
+ " sys.procs.running 1479496100 42 host=web01 "
};
int ret = smlProcess(info, (char**)sql, sizeof(sql)/sizeof(sql[0]));
ASSERT_EQ(ret, 0);
- TAOS_RES *res = taos_query(taos, "select * from t_8c30283b3c4131a071d1e16cf6d7094a");
- ASSERT_NE(res, nullptr);
- int fieldNum = taos_field_count(res);
- ASSERT_EQ(fieldNum, 2);
- int rowNum = taos_affected_rows(res);
- ASSERT_EQ(rowNum, 1);
- for (int i = 0; i < rowNum; ++i) {
- TAOS_ROW rows = taos_fetch_row(res);
- }
- taos_free_result(pRes);
+// TAOS_RES *res = taos_query(taos, "select * from t_8c30283b3c4131a071d1e16cf6d7094a");
+// ASSERT_NE(res, nullptr);
+// int fieldNum = taos_field_count(res);
+// ASSERT_EQ(fieldNum, 2);
+// int rowNum = taos_affected_rows(res);
+// ASSERT_EQ(rowNum, 1);
+// for (int i = 0; i < rowNum; ++i) {
+// TAOS_ROW rows = taos_fetch_row(res);
+// }
+// taos_free_result(res);
- res = taos_query(taos, "select * from t_6931529054e5637ca92c78a1ad441961");
- ASSERT_NE(res, nullptr);
- fieldNum = taos_field_count(res);
- ASSERT_EQ(fieldNum, 2);
- rowNum = taos_affected_rows(res);
- ASSERT_EQ(rowNum, 2);
- for (int i = 0; i < rowNum; ++i) {
- TAOS_ROW rows = taos_fetch_row(res);
- }
- taos_free_result(pRes);
+// res = taos_query(taos, "select * from t_6931529054e5637ca92c78a1ad441961");
+// ASSERT_NE(res, nullptr);
+// fieldNum = taos_field_count(res);
+// ASSERT_EQ(fieldNum, 2);
+// rowNum = taos_affected_rows(res);
+// ASSERT_EQ(rowNum, 2);
+// for (int i = 0; i < rowNum; ++i) {
+// TAOS_ROW rows = taos_fetch_row(res);
+// }
+// taos_free_result(res);
+ destroyRequest(request);
smlDestroyInfo(info);
}
@@ -649,16 +670,17 @@ TEST(testCase, smlProcess_json1_Test) {
int ret = smlProcess(info, (char **)(&sql), -1);
ASSERT_EQ(ret, 0);
- TAOS_RES *res = taos_query(taos, "select * from t_cb27a7198d637b4f1c6464bd73f756a7");
- ASSERT_NE(res, nullptr);
- int fieldNum = taos_field_count(res);
- ASSERT_EQ(fieldNum, 2);
+// TAOS_RES *res = taos_query(taos, "select * from t_cb27a7198d637b4f1c6464bd73f756a7");
+// ASSERT_NE(res, nullptr);
+// int fieldNum = taos_field_count(res);
+// ASSERT_EQ(fieldNum, 2);
// int rowNum = taos_affected_rows(res);
// ASSERT_EQ(rowNum, 1);
// for (int i = 0; i < rowNum; ++i) {
// TAOS_ROW rows = taos_fetch_row(res);
// }
- taos_free_result(pRes);
+// taos_free_result(res);
+ destroyRequest(request);
smlDestroyInfo(info);
}
@@ -702,7 +724,7 @@ TEST(testCase, smlProcess_json2_Test) {
"}";
int32_t ret = smlProcess(info, (char **)(&sql), -1);
ASSERT_EQ(ret, 0);
- taos_free_result(pRes);
+ destroyRequest(request);
smlDestroyInfo(info);
}
@@ -774,7 +796,7 @@ TEST(testCase, smlProcess_json3_Test) {
"}";
int32_t ret = smlProcess(info, (char **)(&sql), -1);
ASSERT_EQ(ret, 0);
- taos_free_result(pRes);
+ destroyRequest(request);
smlDestroyInfo(info);
}
@@ -836,7 +858,7 @@ TEST(testCase, smlProcess_json4_Test) {
"}";
int32_t ret = smlProcess(info, (char**)(&sql), -1);
ASSERT_EQ(ret, 0);
- taos_free_result(pRes);
+ destroyRequest(request);
smlDestroyInfo(info);
}
@@ -857,7 +879,7 @@ TEST(testCase, smlParseTelnetLine_error_Test) {
ASSERT_NE(info, nullptr);
int32_t ret = 0;
- const char *sql[19] = {
+ const char *sql[] = {
"sys.procs.running 14794961040 42 host=web01",
"sys.procs.running 14791040 42 host=web01",
"sys.procs.running erere 42 host=web01",
@@ -877,6 +899,8 @@ TEST(testCase, smlParseTelnetLine_error_Test) {
"sys.procs.running 1479496100 42 host=web01 cpu= ",
"sys.procs.running 1479496100 42 host=web01 host=w2",
"sys.procs.running 1479496100 42 host=web01 host",
+ "sys.procs.running 1479496100 42 host=web01=er",
+ "sys.procs.running 1479496100 42 host= web01",
};
for(int i = 0; i < sizeof(sql)/sizeof(sql[0]); i++){
ret = smlParseTelnetLine(info, (void*)sql[i]);
diff --git a/source/common/src/tglobal.c b/source/common/src/tglobal.c
index 56dea72e90..2e2bf5193d 100644
--- a/source/common/src/tglobal.c
+++ b/source/common/src/tglobal.c
@@ -299,6 +299,7 @@ static int32_t taosAddServerLogCfg(SConfig *pCfg) {
if (cfgAddInt32(pCfg, "tqDebugFlag", tqDebugFlag, 0, 255, 0) != 0) return -1;
if (cfgAddInt32(pCfg, "fsDebugFlag", fsDebugFlag, 0, 255, 0) != 0) return -1;
if (cfgAddInt32(pCfg, "fnDebugFlag", fnDebugFlag, 0, 255, 0) != 0) return -1;
+ if (cfgAddInt32(pCfg, "smaDebugFlag", smaDebugFlag, 0, 255, 0) != 0) return -1;
return 0;
}
@@ -480,6 +481,7 @@ static void taosSetServerLogCfg(SConfig *pCfg) {
tqDebugFlag = cfgGetItem(pCfg, "tqDebugFlag")->i32;
fsDebugFlag = cfgGetItem(pCfg, "fsDebugFlag")->i32;
fnDebugFlag = cfgGetItem(pCfg, "fnDebugFlag")->i32;
+ smaDebugFlag = cfgGetItem(pCfg, "smaDebugFlag")->i32;
}
static int32_t taosSetClientCfg(SConfig *pCfg) {
diff --git a/source/common/src/tmsg.c b/source/common/src/tmsg.c
index 021ee8455e..d180c2ff6e 100644
--- a/source/common/src/tmsg.c
+++ b/source/common/src/tmsg.c
@@ -3562,39 +3562,92 @@ int tDecodeSVCreateTbBatchRsp(SDecoder *pCoder, SVCreateTbBatchRsp *pRsp) {
return 0;
}
-int32_t tSerializeSVCreateTSmaReq(void **buf, SVCreateTSmaReq *pReq) {
- int32_t tlen = 0;
-
- tlen += taosEncodeFixedI64(buf, pReq->ver);
- tlen += tEncodeTSma(buf, &pReq->tSma);
-
- return tlen;
-}
-
-void *tDeserializeSVCreateTSmaReq(void *buf, SVCreateTSmaReq *pReq) {
- buf = taosDecodeFixedI64(buf, &(pReq->ver));
-
- if ((buf = tDecodeTSma(buf, &pReq->tSma)) == NULL) {
- tdDestroyTSma(&pReq->tSma);
+int32_t tEncodeTSma(SEncoder *pCoder, const STSma *pSma) {
+ if (tEncodeI8(pCoder, pSma->version) < 0) return -1;
+ if (tEncodeI8(pCoder, pSma->intervalUnit) < 0) return -1;
+ if (tEncodeI8(pCoder, pSma->slidingUnit) < 0) return -1;
+ if (tEncodeI8(pCoder, pSma->timezoneInt) < 0) return -1;
+ if (tEncodeCStr(pCoder, pSma->indexName) < 0) return -1;
+ if (tEncodeI32(pCoder, pSma->exprLen) < 0) return -1;
+ if (tEncodeI32(pCoder, pSma->tagsFilterLen) < 0) return -1;
+ if (tEncodeI64(pCoder, pSma->indexUid) < 0) return -1;
+ if (tEncodeI64(pCoder, pSma->tableUid) < 0) return -1;
+ if (tEncodeI64(pCoder, pSma->interval) < 0) return -1;
+ if (tEncodeI64(pCoder, pSma->offset) < 0) return -1;
+ if (tEncodeI64(pCoder, pSma->sliding) < 0) return -1;
+ if (pSma->exprLen > 0) {
+ if (tEncodeCStr(pCoder, pSma->expr) < 0) return -1;
}
- return buf;
+ if (pSma->tagsFilterLen > 0) {
+ if (tEncodeCStr(pCoder, pSma->tagsFilter) < 0) return -1;
+ }
+
+ return 0;
}
-int32_t tSerializeSVDropTSmaReq(void **buf, SVDropTSmaReq *pReq) {
- int32_t tlen = 0;
+int32_t tDecodeTSma(SDecoder *pCoder, STSma *pSma) {
+ if (tDecodeI8(pCoder, &pSma->version) < 0) return -1;
+ if (tDecodeI8(pCoder, &pSma->intervalUnit) < 0) return -1;
+ if (tDecodeI8(pCoder, &pSma->slidingUnit) < 0) return -1;
+ if (tDecodeI8(pCoder, &pSma->timezoneInt) < 0) return -1;
+ if (tDecodeCStrTo(pCoder, pSma->indexName) < 0) return -1;
+ if (tDecodeI32(pCoder, &pSma->exprLen) < 0) return -1;
+ if (tDecodeI32(pCoder, &pSma->tagsFilterLen) < 0) return -1;
+ if (tDecodeI64(pCoder, &pSma->indexUid) < 0) return -1;
+ if (tDecodeI64(pCoder, &pSma->tableUid) < 0) return -1;
+ if (tDecodeI64(pCoder, &pSma->interval) < 0) return -1;
+ if (tDecodeI64(pCoder, &pSma->offset) < 0) return -1;
+ if (tDecodeI64(pCoder, &pSma->sliding) < 0) return -1;
+ if (pSma->exprLen > 0) {
+ if (tDecodeCStr(pCoder, &pSma->expr) < 0) return -1;
+ } else {
+ pSma->expr = NULL;
+ }
+ if (pSma->tagsFilterLen > 0) {
+ if (tDecodeCStr(pCoder, &pSma->tagsFilter) < 0) return -1;
+ } else {
+ pSma->tagsFilter = NULL;
+ }
- tlen += taosEncodeFixedI64(buf, pReq->ver);
- tlen += taosEncodeFixedI64(buf, pReq->indexUid);
- tlen += taosEncodeString(buf, pReq->indexName);
-
- return tlen;
+ return 0;
}
-void *tDeserializeSVDropTSmaReq(void *buf, SVDropTSmaReq *pReq) {
- buf = taosDecodeFixedI64(buf, &(pReq->ver));
- buf = taosDecodeFixedI64(buf, &(pReq->indexUid));
- buf = taosDecodeStringTo(buf, pReq->indexName);
- return buf;
+int32_t tEncodeSVCreateTSmaReq(SEncoder *pCoder, const SVCreateTSmaReq *pReq) {
+ if (tStartEncode(pCoder) < 0) return -1;
+
+ tEncodeTSma(pCoder, pReq);
+
+ tEndEncode(pCoder);
+ return 0;
+}
+
+int32_t tDecodeSVCreateTSmaReq(SDecoder *pCoder, SVCreateTSmaReq *pReq) {
+ if (tStartDecode(pCoder) < 0) return -1;
+
+ tDecodeTSma(pCoder, pReq);
+
+ tEndDecode(pCoder);
+ return 0;
+}
+
+int32_t tEncodeSVDropTSmaReq(SEncoder *pCoder, const SVDropTSmaReq *pReq) {
+ if (tStartEncode(pCoder) < 0) return -1;
+
+ if (tEncodeI64(pCoder, pReq->indexUid) < 0) return -1;
+ if (tEncodeCStr(pCoder, pReq->indexName) < 0) return -1;
+
+ tEndEncode(pCoder);
+ return 0;
+}
+
+int32_t tDecodeSVDropTSmaReq(SDecoder *pCoder, SVDropTSmaReq *pReq) {
+ if (tStartDecode(pCoder) < 0) return -1;
+
+ if (tDecodeI64(pCoder, &pReq->indexUid) < 0) return -1;
+ if (tDecodeCStrTo(pCoder, pReq->indexName) < 0) return -1;
+
+ tEndDecode(pCoder);
+ return 0;
}
int32_t tSerializeSCMCreateStreamReq(void *buf, int32_t bufLen, const SCMCreateStreamReq *pReq) {
@@ -4110,3 +4163,113 @@ void tFreeSSubmitRsp(SSubmitRsp *pRsp) {
taosMemoryFree(pRsp);
}
+
+int32_t tEncodeSVAlterTbReq(SEncoder *pEncoder, const SVAlterTbReq *pReq) {
+ if (tStartEncode(pEncoder) < 0) return -1;
+
+ if (tEncodeCStr(pEncoder, pReq->tbName) < 0) return -1;
+ if (tEncodeI8(pEncoder, pReq->action) < 0) return -1;
+ switch (pReq->action) {
+ case TSDB_ALTER_TABLE_ADD_COLUMN:
+ if (tEncodeCStr(pEncoder, pReq->colName) < 0) return -1;
+ if (tEncodeI8(pEncoder, pReq->type) < 0) return -1;
+ if (tEncodeI8(pEncoder, pReq->flags) < 0) return -1;
+ if (tEncodeI32v(pEncoder, pReq->bytes) < 0) return -1;
+ break;
+ case TSDB_ALTER_TABLE_DROP_COLUMN:
+ if (tEncodeCStr(pEncoder, pReq->colName) < 0) return -1;
+ break;
+ case TSDB_ALTER_TABLE_UPDATE_COLUMN_BYTES:
+ if (tEncodeCStr(pEncoder, pReq->colName) < 0) return -1;
+ if (tEncodeI32v(pEncoder, pReq->colModBytes) < 0) return -1;
+ break;
+ case TSDB_ALTER_TABLE_UPDATE_COLUMN_NAME:
+ if (tEncodeCStr(pEncoder, pReq->colName) < 0) return -1;
+ if (tEncodeCStr(pEncoder, pReq->colNewName) < 0) return -1;
+ break;
+ case TSDB_ALTER_TABLE_UPDATE_TAG_VAL:
+ if (tEncodeCStr(pEncoder, pReq->tagName) < 0) return -1;
+ if (tEncodeI8(pEncoder, pReq->isNull) < 0) return -1;
+ if (!pReq->isNull) {
+ if (tEncodeBinary(pEncoder, pReq->pTagVal, pReq->nTagVal) < 0) return -1;
+ }
+ break;
+ case TSDB_ALTER_TABLE_UPDATE_OPTIONS:
+ if (tEncodeI8(pEncoder, pReq->updateTTL) < 0) return -1;
+ if (pReq->updateTTL) {
+ if (tEncodeI32v(pEncoder, pReq->newTTL) < 0) return -1;
+ }
+ if (tEncodeI8(pEncoder, pReq->updateComment) < 0) return -1;
+ if (pReq->updateComment) {
+ if (tEncodeCStr(pEncoder, pReq->newComment) < 0) return -1;
+ }
+ break;
+ default:
+ break;
+ }
+
+ tEndEncode(pEncoder);
+ return 0;
+}
+
+int32_t tDecodeSVAlterTbReq(SDecoder *pDecoder, SVAlterTbReq *pReq) {
+ if (tStartDecode(pDecoder) < 0) return -1;
+
+ if (tDecodeCStr(pDecoder, &pReq->tbName) < 0) return -1;
+ if (tDecodeI8(pDecoder, &pReq->action) < 0) return -1;
+ switch (pReq->action) {
+ case TSDB_ALTER_TABLE_ADD_COLUMN:
+ if (tDecodeCStr(pDecoder, &pReq->colName) < 0) return -1;
+ if (tDecodeI8(pDecoder, &pReq->type) < 0) return -1;
+ if (tDecodeI8(pDecoder, &pReq->flags) < 0) return -1;
+ if (tDecodeI32v(pDecoder, &pReq->bytes) < 0) return -1;
+ break;
+ case TSDB_ALTER_TABLE_DROP_COLUMN:
+ if (tDecodeCStr(pDecoder, &pReq->colName) < 0) return -1;
+ break;
+ case TSDB_ALTER_TABLE_UPDATE_COLUMN_BYTES:
+ if (tDecodeCStr(pDecoder, &pReq->colName) < 0) return -1;
+ if (tDecodeI32v(pDecoder, &pReq->colModBytes) < 0) return -1;
+ break;
+ case TSDB_ALTER_TABLE_UPDATE_COLUMN_NAME:
+ if (tDecodeCStr(pDecoder, &pReq->colName) < 0) return -1;
+ if (tDecodeCStr(pDecoder, &pReq->colNewName) < 0) return -1;
+ break;
+ case TSDB_ALTER_TABLE_UPDATE_TAG_VAL:
+ if (tDecodeCStr(pDecoder, &pReq->tagName) < 0) return -1;
+ if (tDecodeI8(pDecoder, &pReq->isNull) < 0) return -1;
+ if (!pReq->isNull) {
+ if (tDecodeBinary(pDecoder, &pReq->pTagVal, &pReq->nTagVal) < 0) return -1;
+ }
+ break;
+ case TSDB_ALTER_TABLE_UPDATE_OPTIONS:
+ if (tDecodeI8(pDecoder, &pReq->updateTTL) < 0) return -1;
+ if (pReq->updateTTL) {
+ if (tDecodeI32v(pDecoder, &pReq->newTTL) < 0) return -1;
+ }
+ if (tDecodeI8(pDecoder, &pReq->updateComment) < 0) return -1;
+ if (pReq->updateComment) {
+ if (tDecodeCStr(pDecoder, &pReq->newComment) < 0) return -1;
+ }
+ break;
+ default:
+ break;
+ }
+
+ tEndDecode(pDecoder);
+ return 0;
+}
+
+int32_t tEncodeSVAlterTbRsp(SEncoder *pEncoder, const SVAlterTbRsp *pRsp) {
+ if (tStartEncode(pEncoder) < 0) return -1;
+ if (tEncodeI32(pEncoder, pRsp->code) < 0) return -1;
+ tEndEncode(pEncoder);
+ return 0;
+}
+
+int32_t tDecodeSVAlterTbRsp(SDecoder *pDecoder, SVAlterTbRsp *pRsp) {
+ if (tStartDecode(pDecoder) < 0) return -1;
+ if (tDecodeI32(pDecoder, &pRsp->code) < 0) return -1;
+ tEndDecode(pDecoder);
+ return 0;
+}
diff --git a/source/dnode/mgmt/mgmt_vnode/src/vmWorker.c b/source/dnode/mgmt/mgmt_vnode/src/vmWorker.c
index 077b691462..d7298804fe 100644
--- a/source/dnode/mgmt/mgmt_vnode/src/vmWorker.c
+++ b/source/dnode/mgmt/mgmt_vnode/src/vmWorker.c
@@ -188,7 +188,8 @@ static void vmProcessApplyQueue(SQueueInfo *pInfo, STaosQall *qall, int32_t numO
rpcFreeCont(originalRpcMsg.pCont);
// if leader, send response
- if (pMsg->info.handle != NULL && pMsg->info.ahandle != NULL) {
+ // if (pMsg->rpcMsg.handle != NULL && pMsg->rpcMsg.ahandle != NULL) {
+ if (pMsg->info.handle != NULL) {
rsp.info = pMsg->info;
tmsgSendRsp(&rsp);
}
@@ -200,14 +201,24 @@ static void vmProcessApplyQueue(SQueueInfo *pInfo, STaosQall *qall, int32_t numO
static void vmProcessSyncQueue(SQueueInfo *pInfo, STaosQall *qall, int32_t numOfMsgs) {
SVnodeObj *pVnode = pInfo->ahandle;
- SRpcMsg *pMsg = NULL;
+ SRpcMsg *pMsg = NULL;
for (int32_t i = 0; i < numOfMsgs; ++i) {
taosGetQitem(qall, (void **)&pMsg);
// todo
SRpcMsg *pRsp = NULL;
- (void)vnodeProcessSyncReq(pVnode->pImpl, pMsg, &pRsp);
+ int32_t ret = vnodeProcessSyncReq(pVnode->pImpl, pMsg, &pRsp);
+ if (ret != 0) {
+ // if leader, send response
+ if (pMsg->info.handle != NULL) {
+ SRpcMsg rsp = {0};
+ rsp.code = terrno;
+ rsp.info = pMsg->info;
+ dTrace("vmProcessSyncQueue error, code:%d", terrno);
+ tmsgSendRsp(&rsp);
+ }
+ }
rpcFreeCont(pMsg->pCont);
taosFreeQitem(pMsg);
diff --git a/source/dnode/mnode/impl/src/mndDef.c b/source/dnode/mnode/impl/src/mndDef.c
index 8225eca659..35ba25acd5 100644
--- a/source/dnode/mnode/impl/src/mndDef.c
+++ b/source/dnode/mnode/impl/src/mndDef.c
@@ -17,6 +17,147 @@
#include "mndDef.h"
#include "mndConsumer.h"
+int32_t tEncodeSStreamObj(SEncoder *pEncoder, const SStreamObj *pObj) {
+ int32_t sz = 0;
+ /*int32_t outputNameSz = 0;*/
+ if (tEncodeCStr(pEncoder, pObj->name) < 0) return -1;
+ if (tEncodeCStr(pEncoder, pObj->sourceDb) < 0) return -1;
+ if (tEncodeCStr(pEncoder, pObj->targetDb) < 0) return -1;
+ if (tEncodeCStr(pEncoder, pObj->targetSTbName) < 0) return -1;
+ if (tEncodeI64(pEncoder, pObj->targetStbUid) < 0) return -1;
+ if (tEncodeI64(pEncoder, pObj->createTime) < 0) return -1;
+ if (tEncodeI64(pEncoder, pObj->updateTime) < 0) return -1;
+ if (tEncodeI64(pEncoder, pObj->uid) < 0) return -1;
+ if (tEncodeI64(pEncoder, pObj->dbUid) < 0) return -1;
+ if (tEncodeI32(pEncoder, pObj->version) < 0) return -1;
+ if (tEncodeI8(pEncoder, pObj->status) < 0) return -1;
+ if (tEncodeI8(pEncoder, pObj->createdBy) < 0) return -1;
+ if (tEncodeI8(pEncoder, pObj->trigger) < 0) return -1;
+ if (tEncodeI32(pEncoder, pObj->triggerParam) < 0) return -1;
+ if (tEncodeI64(pEncoder, pObj->waterMark) < 0) return -1;
+ if (tEncodeI32(pEncoder, pObj->fixedSinkVgId) < 0) return -1;
+ if (tEncodeI64(pEncoder, pObj->smaId) < 0) return -1;
+ if (tEncodeCStr(pEncoder, pObj->sql) < 0) return -1;
+ /*if (tEncodeCStr(pEncoder, pObj->logicalPlan) < 0) return -1;*/
+ if (tEncodeCStr(pEncoder, pObj->physicalPlan) < 0) return -1;
+ // TODO encode tasks
+ if (pObj->tasks) {
+ sz = taosArrayGetSize(pObj->tasks);
+ }
+ if (tEncodeI32(pEncoder, sz) < 0) return -1;
+
+ for (int32_t i = 0; i < sz; i++) {
+ SArray *pArray = taosArrayGetP(pObj->tasks, i);
+ int32_t innerSz = taosArrayGetSize(pArray);
+ if (tEncodeI32(pEncoder, innerSz) < 0) return -1;
+ for (int32_t j = 0; j < innerSz; j++) {
+ SStreamTask *pTask = taosArrayGetP(pArray, j);
+ if (tEncodeSStreamTask(pEncoder, pTask) < 0) return -1;
+ }
+ }
+
+ if (tEncodeSSchemaWrapper(pEncoder, &pObj->outputSchema) < 0) return -1;
+
+#if 0
+ if (pObj->ColAlias != NULL) {
+ outputNameSz = taosArrayGetSize(pObj->ColAlias);
+ }
+ if (tEncodeI32(pEncoder, outputNameSz) < 0) return -1;
+ for (int32_t i = 0; i < outputNameSz; i++) {
+ char *name = taosArrayGetP(pObj->ColAlias, i);
+ if (tEncodeCStr(pEncoder, name) < 0) return -1;
+ }
+#endif
+ return pEncoder->pos;
+}
+
+int32_t tDecodeSStreamObj(SDecoder *pDecoder, SStreamObj *pObj) {
+ if (tDecodeCStrTo(pDecoder, pObj->name) < 0) return -1;
+ if (tDecodeCStrTo(pDecoder, pObj->sourceDb) < 0) return -1;
+ if (tDecodeCStrTo(pDecoder, pObj->targetDb) < 0) return -1;
+ if (tDecodeCStrTo(pDecoder, pObj->targetSTbName) < 0) return -1;
+ if (tDecodeI64(pDecoder, &pObj->targetStbUid) < 0) return -1;
+ if (tDecodeI64(pDecoder, &pObj->createTime) < 0) return -1;
+ if (tDecodeI64(pDecoder, &pObj->updateTime) < 0) return -1;
+ if (tDecodeI64(pDecoder, &pObj->uid) < 0) return -1;
+ if (tDecodeI64(pDecoder, &pObj->dbUid) < 0) return -1;
+ if (tDecodeI32(pDecoder, &pObj->version) < 0) return -1;
+ if (tDecodeI8(pDecoder, &pObj->status) < 0) return -1;
+ if (tDecodeI8(pDecoder, &pObj->createdBy) < 0) return -1;
+ if (tDecodeI8(pDecoder, &pObj->trigger) < 0) return -1;
+ if (tDecodeI32(pDecoder, &pObj->triggerParam) < 0) return -1;
+ if (tDecodeI64(pDecoder, &pObj->waterMark) < 0) return -1;
+ if (tDecodeI32(pDecoder, &pObj->fixedSinkVgId) < 0) return -1;
+ if (tDecodeI64(pDecoder, &pObj->smaId) < 0) return -1;
+ if (tDecodeCStrAlloc(pDecoder, &pObj->sql) < 0) return -1;
+ /*if (tDecodeCStrAlloc(pDecoder, &pObj->logicalPlan) < 0) return -1;*/
+ if (tDecodeCStrAlloc(pDecoder, &pObj->physicalPlan) < 0) return -1;
+ pObj->tasks = NULL;
+ int32_t sz;
+ if (tDecodeI32(pDecoder, &sz) < 0) return -1;
+ if (sz != 0) {
+ pObj->tasks = taosArrayInit(sz, sizeof(void *));
+ for (int32_t i = 0; i < sz; i++) {
+ int32_t innerSz;
+ if (tDecodeI32(pDecoder, &innerSz) < 0) return -1;
+ SArray *pArray = taosArrayInit(innerSz, sizeof(void *));
+ for (int32_t j = 0; j < innerSz; j++) {
+ SStreamTask *pTask = taosMemoryCalloc(1, sizeof(SStreamTask));
+ if (pTask == NULL) return -1;
+ if (tDecodeSStreamTask(pDecoder, pTask) < 0) return -1;
+ taosArrayPush(pArray, &pTask);
+ }
+ taosArrayPush(pObj->tasks, &pArray);
+ }
+ }
+
+ if (tDecodeSSchemaWrapper(pDecoder, &pObj->outputSchema) < 0) return -1;
+#if 0
+ int32_t outputNameSz;
+ if (tDecodeI32(pDecoder, &outputNameSz) < 0) return -1;
+ if (outputNameSz != 0) {
+ pObj->ColAlias = taosArrayInit(outputNameSz, sizeof(void *));
+ if (pObj->ColAlias == NULL) {
+ return -1;
+ }
+ }
+ for (int32_t i = 0; i < outputNameSz; i++) {
+ char *name;
+ if (tDecodeCStrAlloc(pDecoder, &name) < 0) return -1;
+ taosArrayPush(pObj->ColAlias, &name);
+ }
+#endif
+ return 0;
+}
+
+SMqVgEp *tCloneSMqVgEp(const SMqVgEp *pVgEp) {
+ SMqVgEp *pVgEpNew = taosMemoryMalloc(sizeof(SMqVgEp));
+ if (pVgEpNew == NULL) return NULL;
+ pVgEpNew->vgId = pVgEp->vgId;
+ pVgEpNew->qmsg = strdup(pVgEp->qmsg);
+ pVgEpNew->epSet = pVgEp->epSet;
+ return pVgEpNew;
+}
+
+void tDeleteSMqVgEp(SMqVgEp *pVgEp) {
+ if (pVgEp->qmsg) taosMemoryFree(pVgEp->qmsg);
+}
+
+int32_t tEncodeSMqVgEp(void **buf, const SMqVgEp *pVgEp) {
+ int32_t tlen = 0;
+ tlen += taosEncodeFixedI32(buf, pVgEp->vgId);
+ tlen += taosEncodeString(buf, pVgEp->qmsg);
+ tlen += taosEncodeSEpSet(buf, &pVgEp->epSet);
+ return tlen;
+}
+
+void *tDecodeSMqVgEp(const void *buf, SMqVgEp *pVgEp) {
+ buf = taosDecodeFixedI32(buf, &pVgEp->vgId);
+ buf = taosDecodeString(buf, &pVgEp->qmsg);
+ buf = taosDecodeSEpSet(buf, &pVgEp->epSet);
+ return (void *)buf;
+}
+
SMqConsumerObj *tNewSMqConsumerObj(int64_t consumerId, char cgroup[TSDB_CGROUP_LEN]) {
SMqConsumerObj *pConsumer = taosMemoryCalloc(1, sizeof(SMqConsumerObj));
if (pConsumer == NULL) {
@@ -187,34 +328,6 @@ void *tDecodeSMqConsumerObj(const void *buf, SMqConsumerObj *pConsumer) {
return (void *)buf;
}
-SMqVgEp *tCloneSMqVgEp(const SMqVgEp *pVgEp) {
- SMqVgEp *pVgEpNew = taosMemoryMalloc(sizeof(SMqVgEp));
- if (pVgEpNew == NULL) return NULL;
- pVgEpNew->vgId = pVgEp->vgId;
- pVgEpNew->qmsg = strdup(pVgEp->qmsg);
- pVgEpNew->epSet = pVgEp->epSet;
- return pVgEpNew;
-}
-
-void tDeleteSMqVgEp(SMqVgEp *pVgEp) {
- if (pVgEp->qmsg) taosMemoryFree(pVgEp->qmsg);
-}
-
-int32_t tEncodeSMqVgEp(void **buf, const SMqVgEp *pVgEp) {
- int32_t tlen = 0;
- tlen += taosEncodeFixedI32(buf, pVgEp->vgId);
- tlen += taosEncodeString(buf, pVgEp->qmsg);
- tlen += taosEncodeSEpSet(buf, &pVgEp->epSet);
- return tlen;
-}
-
-void *tDecodeSMqVgEp(const void *buf, SMqVgEp *pVgEp) {
- buf = taosDecodeFixedI32(buf, &pVgEp->vgId);
- buf = taosDecodeString(buf, &pVgEp->qmsg);
- buf = taosDecodeSEpSet(buf, &pVgEp->epSet);
- return (void *)buf;
-}
-
SMqConsumerEp *tCloneSMqConsumerEp(const SMqConsumerEp *pConsumerEpOld) {
SMqConsumerEp *pConsumerEpNew = taosMemoryMalloc(sizeof(SMqConsumerEp));
if (pConsumerEpNew == NULL) return NULL;
@@ -413,119 +526,6 @@ void *tDecodeSMqSubActionLogObj(const void *buf, SMqSubActionLogObj *pLog) {
return (void *)buf;
}
-int32_t tEncodeSStreamObj(SEncoder *pEncoder, const SStreamObj *pObj) {
- int32_t sz = 0;
- /*int32_t outputNameSz = 0;*/
- if (tEncodeCStr(pEncoder, pObj->name) < 0) return -1;
- if (tEncodeCStr(pEncoder, pObj->sourceDb) < 0) return -1;
- if (tEncodeCStr(pEncoder, pObj->targetDb) < 0) return -1;
- if (tEncodeCStr(pEncoder, pObj->targetSTbName) < 0) return -1;
- if (tEncodeI64(pEncoder, pObj->targetStbUid) < 0) return -1;
- if (tEncodeI64(pEncoder, pObj->createTime) < 0) return -1;
- if (tEncodeI64(pEncoder, pObj->updateTime) < 0) return -1;
- if (tEncodeI64(pEncoder, pObj->uid) < 0) return -1;
- if (tEncodeI64(pEncoder, pObj->dbUid) < 0) return -1;
- if (tEncodeI32(pEncoder, pObj->version) < 0) return -1;
- if (tEncodeI8(pEncoder, pObj->status) < 0) return -1;
- if (tEncodeI8(pEncoder, pObj->createdBy) < 0) return -1;
- if (tEncodeI8(pEncoder, pObj->trigger) < 0) return -1;
- if (tEncodeI32(pEncoder, pObj->triggerParam) < 0) return -1;
- if (tEncodeI64(pEncoder, pObj->waterMark) < 0) return -1;
- if (tEncodeI32(pEncoder, pObj->fixedSinkVgId) < 0) return -1;
- if (tEncodeI64(pEncoder, pObj->smaId) < 0) return -1;
- if (tEncodeCStr(pEncoder, pObj->sql) < 0) return -1;
- /*if (tEncodeCStr(pEncoder, pObj->logicalPlan) < 0) return -1;*/
- if (tEncodeCStr(pEncoder, pObj->physicalPlan) < 0) return -1;
- // TODO encode tasks
- if (pObj->tasks) {
- sz = taosArrayGetSize(pObj->tasks);
- }
- if (tEncodeI32(pEncoder, sz) < 0) return -1;
-
- for (int32_t i = 0; i < sz; i++) {
- SArray *pArray = taosArrayGetP(pObj->tasks, i);
- int32_t innerSz = taosArrayGetSize(pArray);
- if (tEncodeI32(pEncoder, innerSz) < 0) return -1;
- for (int32_t j = 0; j < innerSz; j++) {
- SStreamTask *pTask = taosArrayGetP(pArray, j);
- if (tEncodeSStreamTask(pEncoder, pTask) < 0) return -1;
- }
- }
-
- if (tEncodeSSchemaWrapper(pEncoder, &pObj->outputSchema) < 0) return -1;
-
-#if 0
- if (pObj->ColAlias != NULL) {
- outputNameSz = taosArrayGetSize(pObj->ColAlias);
- }
- if (tEncodeI32(pEncoder, outputNameSz) < 0) return -1;
- for (int32_t i = 0; i < outputNameSz; i++) {
- char *name = taosArrayGetP(pObj->ColAlias, i);
- if (tEncodeCStr(pEncoder, name) < 0) return -1;
- }
-#endif
- return pEncoder->pos;
-}
-
-int32_t tDecodeSStreamObj(SDecoder *pDecoder, SStreamObj *pObj) {
- if (tDecodeCStrTo(pDecoder, pObj->name) < 0) return -1;
- if (tDecodeCStrTo(pDecoder, pObj->sourceDb) < 0) return -1;
- if (tDecodeCStrTo(pDecoder, pObj->targetDb) < 0) return -1;
- if (tDecodeCStrTo(pDecoder, pObj->targetSTbName) < 0) return -1;
- if (tDecodeI64(pDecoder, &pObj->targetStbUid) < 0) return -1;
- if (tDecodeI64(pDecoder, &pObj->createTime) < 0) return -1;
- if (tDecodeI64(pDecoder, &pObj->updateTime) < 0) return -1;
- if (tDecodeI64(pDecoder, &pObj->uid) < 0) return -1;
- if (tDecodeI64(pDecoder, &pObj->dbUid) < 0) return -1;
- if (tDecodeI32(pDecoder, &pObj->version) < 0) return -1;
- if (tDecodeI8(pDecoder, &pObj->status) < 0) return -1;
- if (tDecodeI8(pDecoder, &pObj->createdBy) < 0) return -1;
- if (tDecodeI8(pDecoder, &pObj->trigger) < 0) return -1;
- if (tDecodeI32(pDecoder, &pObj->triggerParam) < 0) return -1;
- if (tDecodeI64(pDecoder, &pObj->waterMark) < 0) return -1;
- if (tDecodeI32(pDecoder, &pObj->fixedSinkVgId) < 0) return -1;
- if (tDecodeI64(pDecoder, &pObj->smaId) < 0) return -1;
- if (tDecodeCStrAlloc(pDecoder, &pObj->sql) < 0) return -1;
- /*if (tDecodeCStrAlloc(pDecoder, &pObj->logicalPlan) < 0) return -1;*/
- if (tDecodeCStrAlloc(pDecoder, &pObj->physicalPlan) < 0) return -1;
- pObj->tasks = NULL;
- int32_t sz;
- if (tDecodeI32(pDecoder, &sz) < 0) return -1;
- if (sz != 0) {
- pObj->tasks = taosArrayInit(sz, sizeof(void *));
- for (int32_t i = 0; i < sz; i++) {
- int32_t innerSz;
- if (tDecodeI32(pDecoder, &innerSz) < 0) return -1;
- SArray *pArray = taosArrayInit(innerSz, sizeof(void *));
- for (int32_t j = 0; j < innerSz; j++) {
- SStreamTask *pTask = taosMemoryCalloc(1, sizeof(SStreamTask));
- if (pTask == NULL) return -1;
- if (tDecodeSStreamTask(pDecoder, pTask) < 0) return -1;
- taosArrayPush(pArray, &pTask);
- }
- taosArrayPush(pObj->tasks, &pArray);
- }
- }
-
- if (tDecodeSSchemaWrapper(pDecoder, &pObj->outputSchema) < 0) return -1;
-#if 0
- int32_t outputNameSz;
- if (tDecodeI32(pDecoder, &outputNameSz) < 0) return -1;
- if (outputNameSz != 0) {
- pObj->ColAlias = taosArrayInit(outputNameSz, sizeof(void *));
- if (pObj->ColAlias == NULL) {
- return -1;
- }
- }
- for (int32_t i = 0; i < outputNameSz; i++) {
- char *name;
- if (tDecodeCStrAlloc(pDecoder, &name) < 0) return -1;
- taosArrayPush(pObj->ColAlias, &name);
- }
-#endif
- return 0;
-}
-
int32_t tEncodeSMqOffsetObj(void **buf, const SMqOffsetObj *pOffset) {
int32_t tlen = 0;
tlen += taosEncodeString(buf, pOffset->key);
diff --git a/source/dnode/mnode/impl/src/mndScheduler.c b/source/dnode/mnode/impl/src/mndScheduler.c
index 824f031004..22a5f37334 100644
--- a/source/dnode/mnode/impl/src/mndScheduler.c
+++ b/source/dnode/mnode/impl/src/mndScheduler.c
@@ -194,6 +194,7 @@ int32_t mndAddShuffledSinkToStream(SMnode* pMnode, STrans* pTrans, SStreamObj* p
// source
pTask->sourceType = TASK_SOURCE__MERGE;
+ pTask->inputType = TASK_INPUT_TYPE__DATA_BLOCK;
// exec
pTask->execType = TASK_EXEC__NONE;
@@ -235,6 +236,7 @@ int32_t mndAddFixedSinkToStream(SMnode* pMnode, STrans* pTrans, SStreamObj* pStr
pTask->epSet = mndGetVgroupEpset(pMnode, pVgroup);
// source
pTask->sourceType = TASK_SOURCE__MERGE;
+ pTask->inputType = TASK_INPUT_TYPE__DATA_BLOCK;
// exec
pTask->execType = TASK_EXEC__NONE;
@@ -309,6 +311,7 @@ int32_t mndScheduleStream(SMnode* pMnode, STrans* pTrans, SStreamObj* pStream) {
SStreamTask* pTask = tNewSStreamTask(pStream->uid);
// source part
pTask->sourceType = TASK_SOURCE__SCAN;
+ pTask->inputType = TASK_INPUT_TYPE__SUMBIT_BLOCK;
// sink part
if (level == 0) {
@@ -372,6 +375,7 @@ int32_t mndScheduleStream(SMnode* pMnode, STrans* pTrans, SStreamObj* pStream) {
// source part, currently only support multi source
pTask->sourceType = TASK_SOURCE__PIPE;
+ pTask->inputType = TASK_INPUT_TYPE__DATA_BLOCK;
// sink part
pTask->sinkType = TASK_SINK__NONE;
@@ -459,6 +463,7 @@ int32_t mndScheduleStream(SMnode* pMnode, STrans* pTrans, SStreamObj* pStream) {
// source part
pTask->sourceType = TASK_SOURCE__MERGE;
+ pTask->inputType = TASK_INPUT_TYPE__DATA_BLOCK;
// sink part
pTask->sinkType = TASK_SINK__NONE;
diff --git a/source/dnode/mnode/impl/src/mndSma.c b/source/dnode/mnode/impl/src/mndSma.c
index 15243df506..a03a1e68fb 100644
--- a/source/dnode/mnode/impl/src/mndSma.c
+++ b/source/dnode/mnode/impl/src/mndSma.c
@@ -242,26 +242,35 @@ SDbObj *mndAcquireDbBySma(SMnode *pMnode, const char *smaName) {
}
static void *mndBuildVCreateSmaReq(SMnode *pMnode, SVgObj *pVgroup, SSmaObj *pSma, int32_t *pContLen) {
- SName name = {0};
+ SEncoder encoder = {0};
+ int32_t contLen = 0;
+ SName name = {0};
tNameFromString(&name, pSma->name, T_NAME_ACCT | T_NAME_DB | T_NAME_TABLE);
SVCreateTSmaReq req = {0};
- req.tSma.version = 0;
- req.tSma.intervalUnit = pSma->intervalUnit;
- req.tSma.slidingUnit = pSma->slidingUnit;
- req.tSma.timezoneInt = pSma->timezone;
- tstrncpy(req.tSma.indexName, (char *)tNameGetTableName(&name), TSDB_INDEX_NAME_LEN);
- req.tSma.exprLen = pSma->exprLen;
- req.tSma.tagsFilterLen = pSma->tagsFilterLen;
- req.tSma.indexUid = pSma->uid;
- req.tSma.tableUid = pSma->stbUid;
- req.tSma.interval = pSma->interval;
- req.tSma.offset = pSma->offset;
- req.tSma.sliding = pSma->sliding;
- req.tSma.expr = pSma->expr;
- req.tSma.tagsFilter = pSma->tagsFilter;
+ req.version = 0;
+ req.intervalUnit = pSma->intervalUnit;
+ req.slidingUnit = pSma->slidingUnit;
+ req.timezoneInt = pSma->timezone;
+ tstrncpy(req.indexName, (char *)tNameGetTableName(&name), TSDB_INDEX_NAME_LEN);
+ req.exprLen = pSma->exprLen;
+ req.tagsFilterLen = pSma->tagsFilterLen;
+ req.indexUid = pSma->uid;
+ req.tableUid = pSma->stbUid;
+ req.interval = pSma->interval;
+ req.offset = pSma->offset;
+ req.sliding = pSma->sliding;
+ req.expr = pSma->expr;
+ req.tagsFilter = pSma->tagsFilter;
+
+ // get length
+ int32_t ret = 0;
+ tEncodeSize(tEncodeSVCreateTSmaReq, &req, contLen, ret);
+ if (ret < 0) {
+ return NULL;
+ }
+ contLen += sizeof(SMsgHead);
- int32_t contLen = tSerializeSVCreateTSmaReq(NULL, &req) + sizeof(SMsgHead);
SMsgHead *pHead = taosMemoryMalloc(contLen);
if (pHead == NULL) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
@@ -272,22 +281,38 @@ static void *mndBuildVCreateSmaReq(SMnode *pMnode, SVgObj *pVgroup, SSmaObj *pSm
pHead->vgId = htonl(pVgroup->vgId);
void *pBuf = POINTER_SHIFT(pHead, sizeof(SMsgHead));
- tSerializeSVCreateTSmaReq(&pBuf, &req);
+ tEncoderInit(&encoder, pBuf, contLen - sizeof(SMsgHead));
+ if (tEncodeSVCreateTSmaReq(&encoder, &req) < 0) {
+ taosMemoryFreeClear(pHead);
+ tEncoderClear(&encoder);
+ return NULL;
+ }
+
+ tEncoderClear(&encoder);
*pContLen = contLen;
return pHead;
}
static void *mndBuildVDropSmaReq(SMnode *pMnode, SVgObj *pVgroup, SSmaObj *pSma, int32_t *pContLen) {
+ SEncoder encoder = {0};
+ int32_t contLen;
SName name = {0};
tNameFromString(&name, pSma->name, T_NAME_ACCT | T_NAME_DB | T_NAME_TABLE);
SVDropTSmaReq req = {0};
- req.ver = 0;
req.indexUid = pSma->uid;
tstrncpy(req.indexName, (char *)tNameGetTableName(&name), TSDB_INDEX_NAME_LEN);
- int32_t contLen = tSerializeSVDropTSmaReq(NULL, &req) + sizeof(SMsgHead);
+ // get length
+ int32_t ret = 0;
+ tEncodeSize(tEncodeSVDropTSmaReq, &req, contLen, ret);
+ if (ret < 0) {
+ return NULL;
+ }
+
+ contLen += sizeof(SMsgHead);
+
SMsgHead *pHead = taosMemoryMalloc(contLen);
if (pHead == NULL) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
@@ -298,7 +323,14 @@ static void *mndBuildVDropSmaReq(SMnode *pMnode, SVgObj *pVgroup, SSmaObj *pSma,
pHead->vgId = htonl(pVgroup->vgId);
void *pBuf = POINTER_SHIFT(pHead, sizeof(SMsgHead));
- tDeserializeSVDropTSmaReq(&pBuf, &req);
+ tEncoderInit(&encoder, pBuf, contLen - sizeof(SMsgHead));
+
+ if (tEncodeSVDropTSmaReq(&encoder, &req) < 0) {
+ taosMemoryFreeClear(pHead);
+ tEncoderClear(&encoder);
+ return NULL;
+ }
+ tEncoderClear(&encoder);
*pContLen = contLen;
return pHead;
diff --git a/source/dnode/mnode/impl/src/mndStb.c b/source/dnode/mnode/impl/src/mndStb.c
index 4fc3b970de..626f0fcab5 100644
--- a/source/dnode/mnode/impl/src/mndStb.c
+++ b/source/dnode/mnode/impl/src/mndStb.c
@@ -383,9 +383,10 @@ static void *mndBuildVCreateStbReq(SMnode *pMnode, SVgObj *pVgroup, SStbObj *pSt
req.suid = pStb->uid;
req.rollup = pStb->ast1Len > 0 ? 1 : 0;
req.schema.nCols = pStb->numOfColumns;
- req.schema.sver = 0;
+ req.schema.sver = pStb->version;
req.schema.pSchema = pStb->pColumns;
req.schemaTag.nCols = pStb->numOfTags;
+ req.schemaTag.nCols = 0;
req.schemaTag.pSchema = pStb->pTags;
if (req.rollup) {
@@ -425,6 +426,10 @@ static void *mndBuildVCreateStbReq(SMnode *pMnode, SVgObj *pVgroup, SStbObj *pSt
void *pBuf = POINTER_SHIFT(pHead, sizeof(SMsgHead));
tEncoderInit(&encoder, pBuf, contLen - sizeof(SMsgHead));
if (tEncodeSVCreateStbReq(&encoder, &req) < 0) {
+ taosMemoryFreeClear(pHead);
+ taosMemoryFreeClear(req.pRSmaParam.qmsg1);
+ taosMemoryFreeClear(req.pRSmaParam.qmsg2);
+ tEncoderClear(&encoder);
return NULL;
}
tEncoderClear(&encoder);
diff --git a/source/dnode/mnode/impl/test/trans/trans2.cpp b/source/dnode/mnode/impl/test/trans/trans2.cpp
index e796ff9763..622ee25c5c 100644
--- a/source/dnode/mnode/impl/test/trans/trans2.cpp
+++ b/source/dnode/mnode/impl/test/trans/trans2.cpp
@@ -510,4 +510,4 @@ TEST_F(MndTestTrans2, 04_Conflict) {
ASSERT_EQ(pUser, nullptr);
mndReleaseUser(pMnode, pUser);
}
-}
\ No newline at end of file
+}
diff --git a/source/dnode/vnode/CMakeLists.txt b/source/dnode/vnode/CMakeLists.txt
index 58e00ee34a..a8e3860ed1 100644
--- a/source/dnode/vnode/CMakeLists.txt
+++ b/source/dnode/vnode/CMakeLists.txt
@@ -18,12 +18,21 @@ target_sources(
"src/meta/metaOpen.c"
"src/meta/metaIdx.c"
"src/meta/metaTable.c"
+ "src/meta/metaSma.c"
"src/meta/metaQuery.c"
"src/meta/metaCommit.c"
"src/meta/metaEntry.c"
+ # sma
+ "src/sma/sma.c"
+ "src/sma/smaTDBImpl.c"
+ "src/sma/smaEnv.c"
+ "src/sma/smaOpen.c"
+ "src/sma/smaRollup.c"
+ "src/sma/smaTimeRange.c"
+
# tsdb
- "src/tsdb/tsdbTDBImpl.c"
+ # "src/tsdb/tsdbTDBImpl.c"
"src/tsdb/tsdbCommit.c"
"src/tsdb/tsdbCommit2.c"
"src/tsdb/tsdbFile.c"
@@ -33,7 +42,7 @@ target_sources(
"src/tsdb/tsdbMemTable2.c"
"src/tsdb/tsdbRead.c"
"src/tsdb/tsdbReadImpl.c"
- "src/tsdb/tsdbSma.c"
+ # "src/tsdb/tsdbSma.c"
"src/tsdb/tsdbWrite.c"
# tq
diff --git a/source/dnode/vnode/inc/vnode.h b/source/dnode/vnode/inc/vnode.h
index 130cebf0b1..4431a2c48b 100644
--- a/source/dnode/vnode/inc/vnode.h
+++ b/source/dnode/vnode/inc/vnode.h
@@ -189,8 +189,12 @@ struct SMetaEntry {
struct {
int64_t ctime;
int32_t ttlDays;
+ int32_t ncid; // next column id
SSchemaWrapper schema;
} ntbEntry;
+ struct {
+ STSma *tsma;
+ } smaEntry;
};
};
diff --git a/source/dnode/vnode/src/inc/meta.h b/source/dnode/vnode/src/inc/meta.h
index 60b3a889ef..c5ca806829 100644
--- a/source/dnode/vnode/src/inc/meta.h
+++ b/source/dnode/vnode/src/inc/meta.h
@@ -73,6 +73,7 @@ struct SMeta {
TDB* pCtbIdx;
TDB* pTagIdx;
TDB* pTtlIdx;
+ TDB* pSmaIdx;
SMetaIdx* pIdx;
};
@@ -108,6 +109,11 @@ typedef struct {
tb_uid_t uid;
} STtlIdxKey;
+typedef struct {
+ tb_uid_t uid;
+ int64_t smaUid;
+} SSmaIdxKey;
+
#if 1
SMSmaCursor* metaOpenSmaCursor(SMeta* pMeta, tb_uid_t uid);
@@ -118,7 +124,7 @@ int64_t metaSmaCursorNext(SMSmaCursor* pSmaCur);
// SMetaDB
int metaOpenDB(SMeta* pMeta);
void metaCloseDB(SMeta* pMeta);
-// int metaSaveTableToDB(SMeta* pMeta, STbCfg* pTbCfg, STbDdlH* pHandle);
+int metaSaveTableToDB(SMeta* pMeta, STbCfg* pTbCfg, STbDdlH* pHandle);
int metaRemoveTableFromDb(SMeta* pMeta, tb_uid_t uid);
int metaSaveSmaToDB(SMeta* pMeta, STSma* pTbCfg);
int metaRemoveSmaFromDb(SMeta* pMeta, int64_t indexUid);
diff --git a/source/dnode/vnode/src/inc/sma.h b/source/dnode/vnode/src/inc/sma.h
new file mode 100644
index 0000000000..76a30f58dd
--- /dev/null
+++ b/source/dnode/vnode/src/inc/sma.h
@@ -0,0 +1,225 @@
+/*
+ * Copyright (c) 2019 TAOS Data, Inc.
+ *
+ * This program is free software: you can use, redistribute, and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3
+ * or later ("AGPL"), as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see .
+ */
+
+#ifndef _TD_VNODE_SMA_H_
+#define _TD_VNODE_SMA_H_
+
+#include "vnodeInt.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+// smaDebug ================
+// clang-format off
+#define smaFatal(...) do { if (smaDebugFlag & DEBUG_FATAL) { taosPrintLog("SMA FATAL ", DEBUG_FATAL, 255, __VA_ARGS__); }} while(0)
+#define smaError(...) do { if (smaDebugFlag & DEBUG_ERROR) { taosPrintLog("SMA ERROR ", DEBUG_ERROR, 255, __VA_ARGS__); }} while(0)
+#define smaWarn(...) do { if (smaDebugFlag & DEBUG_WARN) { taosPrintLog("SMA WARN ", DEBUG_WARN, 255, __VA_ARGS__); }} while(0)
+#define smaInfo(...) do { if (smaDebugFlag & DEBUG_INFO) { taosPrintLog("SMA ", DEBUG_INFO, 255, __VA_ARGS__); }} while(0)
+#define smaDebug(...) do { if (smaDebugFlag & DEBUG_DEBUG) { taosPrintLog("SMA ", DEBUG_DEBUG, tsdbDebugFlag, __VA_ARGS__); }} while(0)
+#define smaTrace(...) do { if (smaDebugFlag & DEBUG_TRACE) { taosPrintLog("SMA ", DEBUG_TRACE, tsdbDebugFlag, __VA_ARGS__); }} while(0)
+// clang-format on
+
+typedef struct SSmaEnv SSmaEnv;
+typedef struct SSmaStat SSmaStat;
+typedef struct SSmaStatItem SSmaStatItem;
+typedef struct SSmaKey SSmaKey;
+typedef struct SRSmaInfo SRSmaInfo;
+
+#define SMA_IVLD_FID INT_MIN
+
+struct SSmaEnv {
+ TdThreadRwlock lock;
+ int8_t type;
+ TXN txn;
+ void *pPool; // SPoolMem
+ SDiskID did;
+ TENV *dbEnv; // TODO: If it's better to put it in smaIndex level?
+ char *path; // relative path
+ SSmaStat *pStat;
+};
+
+#define SMA_ENV_LOCK(env) ((env)->lock)
+#define SMA_ENV_TYPE(env) ((env)->type)
+#define SMA_ENV_DID(env) ((env)->did)
+#define SMA_ENV_ENV(env) ((env)->dbEnv)
+#define SMA_ENV_PATH(env) ((env)->path)
+#define SMA_ENV_STAT(env) ((env)->pStat)
+#define SMA_ENV_STAT_ITEMS(env) ((env)->pStat->smaStatItems)
+
+struct SSmaStatItem {
+ /**
+ * @brief The field 'state' is here to demonstrate if one smaIndex is ready to provide service.
+ * - TSDB_SMA_STAT_OK: 1) The sma calculation of history data is finished; 2) Or recevied information from
+ * Streaming Module or TSDB local persistence.
+ * - TSDB_SMA_STAT_EXPIRED: 1) If sma calculation of history TS data is not finished; 2) Or if the TSDB is open,
+ * without information about its previous state.
+ * - TSDB_SMA_STAT_DROPPED: 1)sma dropped
+ * N.B. only applicable to tsma
+ */
+ int8_t state; // ETsdbSmaStat
+ SHashObj *expiredWindows; // key: skey of time window, value: N/A
+ STSma *pTSma; // cache schema
+};
+
+struct SSmaStat {
+ union {
+ SHashObj *smaStatItems; // key: indexUid, value: SSmaStatItem for tsma
+ SHashObj *rsmaInfoHash; // key: stbUid, value: SRSmaInfo;
+ };
+ T_REF_DECLARE()
+};
+#define SMA_STAT_ITEMS(s) ((s)->smaStatItems)
+#define SMA_STAT_INFO_HASH(s) ((s)->rsmaInfoHash)
+
+struct SSmaKey {
+ TSKEY skey;
+ int64_t groupId;
+};
+
+typedef struct SDBFile SDBFile;
+
+struct SDBFile {
+ int32_t fid;
+ TDB *pDB;
+ char *path;
+};
+
+int32_t tdSmaBeginCommit(SSmaEnv *pEnv);
+int32_t tdSmaEndCommit(SSmaEnv *pEnv);
+
+int32_t smaOpenDBEnv(TENV **ppEnv, const char *path);
+int32_t smaCloseDBEnv(TENV *pEnv);
+int32_t smaOpenDBF(TENV *pEnv, SDBFile *pDBF);
+int32_t smaCloseDBF(SDBFile *pDBF);
+int32_t smaSaveSmaToDB(SDBFile *pDBF, void *pKey, int32_t keyLen, void *pVal, int32_t valLen, TXN *txn);
+void *smaGetSmaDataByKey(SDBFile *pDBF, const void *pKey, int32_t keyLen, int32_t *valLen);
+
+void tdDestroySmaEnv(SSmaEnv *pSmaEnv);
+void *tdFreeSmaEnv(SSmaEnv *pSmaEnv);
+#if 0
+int32_t tbGetTSmaStatus(SSma *pSma, STSma *param, void *result);
+int32_t tbRemoveTSmaData(SSma *pSma, STSma *param, STimeWindow *pWin);
+#endif
+
+static FORCE_INLINE int32_t tdEncodeTSmaKey(int64_t groupId, TSKEY tsKey, void **pData) {
+ int32_t len = 0;
+ len += taosEncodeFixedI64(pData, tsKey);
+ len += taosEncodeFixedI64(pData, groupId);
+ return len;
+}
+
+int32_t tdInitSma(SSma *pSma);
+int32_t tdDropTSma(SSma *pSma, char *pMsg);
+int32_t tdDropTSmaData(SSma *pSma, int64_t indexUid);
+int32_t tdInsertRSmaData(SSma *pSma, char *msg);
+
+int32_t tdRefSmaStat(SSma *pSma, SSmaStat *pStat);
+int32_t tdUnRefSmaStat(SSma *pSma, SSmaStat *pStat);
+int32_t tdCheckAndInitSmaEnv(SSma *pSma, int8_t smaType);
+
+int32_t tdLockSma(SSma *pSma);
+int32_t tdUnLockSma(SSma *pSma);
+
+int32_t tdProcessTSmaInsertImpl(SSma *pSma, int64_t indexUid, const char *msg);
+
+static FORCE_INLINE int16_t tdTSmaAdd(SSma *pSma, int16_t n) { return atomic_add_fetch_16(&SMA_TSMA_NUM(pSma), n); }
+static FORCE_INLINE int16_t tdTSmaSub(SSma *pSma, int16_t n) { return atomic_sub_fetch_16(&SMA_TSMA_NUM(pSma), n); }
+
+static FORCE_INLINE int32_t tdRLockSmaEnv(SSmaEnv *pEnv) {
+ int code = taosThreadRwlockRdlock(&(pEnv->lock));
+ if (code != 0) {
+ terrno = TAOS_SYSTEM_ERROR(code);
+ return -1;
+ }
+ return 0;
+}
+
+static FORCE_INLINE int32_t tdWLockSmaEnv(SSmaEnv *pEnv) {
+ int code = taosThreadRwlockWrlock(&(pEnv->lock));
+ if (code != 0) {
+ terrno = TAOS_SYSTEM_ERROR(code);
+ return -1;
+ }
+ return 0;
+}
+
+static FORCE_INLINE int32_t tdUnLockSmaEnv(SSmaEnv *pEnv) {
+ int code = taosThreadRwlockUnlock(&(pEnv->lock));
+ if (code != 0) {
+ terrno = TAOS_SYSTEM_ERROR(code);
+ return -1;
+ }
+ return 0;
+}
+
+static FORCE_INLINE int8_t tdSmaStat(SSmaStatItem *pStatItem) {
+ if (pStatItem) {
+ return atomic_load_8(&pStatItem->state);
+ }
+ return TSDB_SMA_STAT_UNKNOWN;
+}
+
+static FORCE_INLINE bool tdSmaStatIsOK(SSmaStatItem *pStatItem, int8_t *state) {
+ if (!pStatItem) {
+ return false;
+ }
+
+ if (state) {
+ *state = atomic_load_8(&pStatItem->state);
+ return *state == TSDB_SMA_STAT_OK;
+ }
+ return atomic_load_8(&pStatItem->state) == TSDB_SMA_STAT_OK;
+}
+
+static FORCE_INLINE bool tdSmaStatIsExpired(SSmaStatItem *pStatItem) {
+ return pStatItem ? (atomic_load_8(&pStatItem->state) & TSDB_SMA_STAT_EXPIRED) : true;
+}
+
+static FORCE_INLINE bool tdSmaStatIsDropped(SSmaStatItem *pStatItem) {
+ return pStatItem ? (atomic_load_8(&pStatItem->state) & TSDB_SMA_STAT_DROPPED) : true;
+}
+
+static FORCE_INLINE void tdSmaStatSetOK(SSmaStatItem *pStatItem) {
+ if (pStatItem) {
+ atomic_store_8(&pStatItem->state, TSDB_SMA_STAT_OK);
+ }
+}
+
+static FORCE_INLINE void tdSmaStatSetExpired(SSmaStatItem *pStatItem) {
+ if (pStatItem) {
+ atomic_or_fetch_8(&pStatItem->state, TSDB_SMA_STAT_EXPIRED);
+ }
+}
+
+static FORCE_INLINE void tdSmaStatSetDropped(SSmaStatItem *pStatItem) {
+ if (pStatItem) {
+ atomic_or_fetch_8(&pStatItem->state, TSDB_SMA_STAT_DROPPED);
+ }
+}
+
+static int32_t tdInitSmaStat(SSmaStat **pSmaStat, int8_t smaType);
+void *tdFreeSmaStatItem(SSmaStatItem *pSmaStatItem);
+static int32_t tdDestroySmaState(SSmaStat *pSmaStat, int8_t smaType);
+static SSmaEnv *tdNewSmaEnv(const SSma *pSma, int8_t smaType, const char *path, SDiskID did);
+static int32_t tdInitSmaEnv(SSma *pSma, int8_t smaType, const char *path, SDiskID did, SSmaEnv **pEnv);
+
+void *tdFreeRSmaInfo(SRSmaInfo *pInfo);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /*_TD_VNODE_SMA_H_*/
\ No newline at end of file
diff --git a/source/dnode/vnode/src/inc/tsdb.h b/source/dnode/vnode/src/inc/tsdb.h
index 102c40337d..93a25da0a8 100644
--- a/source/dnode/vnode/src/inc/tsdb.h
+++ b/source/dnode/vnode/src/inc/tsdb.h
@@ -60,31 +60,22 @@ typedef struct {
TSKEY minKey;
} SRtn;
-struct SSmaEnvs {
- int16_t nTSma;
- int16_t nRSma;
- SSmaEnv *pTSmaEnv;
- SSmaEnv *pRSmaEnv;
-};
-
+#define TSDB_DATA_DIR_LEN 6
struct STsdb {
char *path;
SVnode *pVnode;
TdThreadMutex mutex;
+ char dir[TSDB_DATA_DIR_LEN];
bool repoLocked;
- int8_t level; // retention level
STsdbKeepCfg keepCfg;
STsdbMemTable *mem;
STsdbMemTable *imem;
SRtn rtn;
STsdbFS *fs;
- SSmaEnvs smaEnvs;
};
#if 1 // ======================================
-typedef struct SSmaStat SSmaStat;
-
struct STable {
uint64_t tid;
uint64_t uid;
@@ -95,10 +86,6 @@ struct STable {
#define TABLE_UID(t) (t)->uid
int tsdbPrepareCommit(STsdb *pTsdb);
-int32_t tsdbInitSma(STsdb *pTsdb);
-int32_t tsdbDropTSma(STsdb *pTsdb, char *pMsg);
-int32_t tsdbDropTSmaData(STsdb *pTsdb, int64_t indexUid);
-int32_t tsdbInsertRSmaData(STsdb *pTsdb, char *msg);
typedef enum {
TSDB_FILE_HEAD = 0, // .head
TSDB_FILE_DATA, // .data
@@ -107,8 +94,6 @@ typedef enum {
TSDB_FILE_SMAL, // .smal(Block-wise SMA)
TSDB_FILE_MAX, //
TSDB_FILE_META, // meta
- TSDB_FILE_TSMA, // v2t100.${sma_index_name}, Time-range-wise SMA
- TSDB_FILE_RSMA, // v2r100.${sma_index_name}, Time-range-wise Rollup SMA
} E_TSDB_FILE_T;
typedef struct {
@@ -186,15 +171,10 @@ struct STsdbFS {
#define REPO_ID(r) TD_VID((r)->pVnode)
#define REPO_CFG(r) (&(r)->pVnode->config.tsdbCfg)
#define REPO_KEEP_CFG(r) (&(r)->keepCfg)
-#define REPO_LEVEL(r) ((r)->level)
#define REPO_FS(r) ((r)->fs)
#define REPO_META(r) ((r)->pVnode->pMeta)
#define REPO_TFS(r) ((r)->pVnode->pTfs)
#define IS_REPO_LOCKED(r) ((r)->repoLocked)
-#define REPO_TSMA_NUM(r) ((r)->smaEnvs.nTSma)
-#define REPO_RSMA_NUM(r) ((r)->smaEnvs.nRSma)
-#define REPO_TSMA_ENV(r) ((r)->smaEnvs.pTSmaEnv)
-#define REPO_RSMA_ENV(r) ((r)->smaEnvs.pRSmaEnv)
int tsdbLockRepo(STsdb *pTsdb);
int tsdbUnlockRepo(STsdb *pTsdb);
@@ -794,25 +774,6 @@ typedef struct {
} SFSHeader;
// ================== TSDB File System Meta
-
-/**
- * @brief Directory structure of .tsma data files.
- *
- * /vnode2/tsdb $ tree tsma/
- * tsma/
- * ├── v2f100.index_name_1
- * ├── v2f101.index_name_1
- * ├── v2f102.index_name_1
- * ├── v2f1900.index_name_3
- * ├── v2f1901.index_name_3
- * ├── v2f1902.index_name_3
- * ├── v2f200.index_name_2
- * ├── v2f201.index_name_2
- * └── v2f202.index_name_2
- *
- * 0 directories, 9 files
- */
-
#define FS_CURRENT_STATUS(pfs) ((pfs)->cstatus)
#define FS_NEW_STATUS(pfs) ((pfs)->nstatus)
#define FS_IN_TXN(pfs) (pfs)->intxn
@@ -874,43 +835,6 @@ static FORCE_INLINE int tsdbUnLockFS(STsdbFS *pFs) {
return 0;
}
-typedef struct SSmaKey SSmaKey;
-
-struct SSmaKey {
- TSKEY skey;
- int64_t groupId;
-};
-
-typedef struct SDBFile SDBFile;
-
-struct SDBFile {
- int32_t fid;
- TDB *pDB;
- char *path;
-};
-
-int32_t tsdbOpenDBEnv(TENV **ppEnv, const char *path);
-int32_t tsdbCloseDBEnv(TENV *pEnv);
-int32_t tsdbOpenDBF(TENV *pEnv, SDBFile *pDBF);
-int32_t tsdbCloseDBF(SDBFile *pDBF);
-int32_t tsdbSaveSmaToDB(SDBFile *pDBF, void *pKey, int32_t keyLen, void *pVal, int32_t valLen, TXN *txn);
-void *tsdbGetSmaDataByKey(SDBFile *pDBF, const void *pKey, int32_t keyLen, int32_t *valLen);
-
-void tsdbDestroySmaEnv(SSmaEnv *pSmaEnv);
-void *tsdbFreeSmaEnv(SSmaEnv *pSmaEnv);
-#if 0
-int32_t tsdbGetTSmaStatus(STsdb *pTsdb, STSma *param, void *result);
-int32_t tsdbRemoveTSmaData(STsdb *pTsdb, STSma *param, STimeWindow *pWin);
-#endif
-
-// internal func
-static FORCE_INLINE int32_t tsdbEncodeTSmaKey(int64_t groupId, TSKEY tsKey, void **pData) {
- int32_t len = 0;
- len += taosEncodeFixedI64(pData, tsKey);
- len += taosEncodeFixedI64(pData, groupId);
- return len;
-}
-
#endif
#ifdef __cplusplus
diff --git a/source/dnode/vnode/src/inc/vnodeInt.h b/source/dnode/vnode/src/inc/vnodeInt.h
index 9a36fc6eae..2018f0a68c 100644
--- a/source/dnode/vnode/src/inc/vnodeInt.h
+++ b/source/dnode/vnode/src/inc/vnodeInt.h
@@ -47,13 +47,15 @@
extern "C" {
#endif
-typedef struct SVnodeInfo SVnodeInfo;
-typedef struct SMeta SMeta;
-typedef struct STsdb STsdb;
-typedef struct STQ STQ;
-typedef struct SVState SVState;
-typedef struct SVBufPool SVBufPool;
-typedef struct SQWorker SQHandle;
+typedef struct SVnodeInfo SVnodeInfo;
+typedef struct SMeta SMeta;
+typedef struct SSma SSma;
+typedef struct STsdb STsdb;
+typedef struct STQ STQ;
+typedef struct SVState SVState;
+typedef struct SVBufPool SVBufPool;
+typedef struct SQWorker SQHandle;
+typedef struct STsdbKeepCfg STsdbKeepCfg;
#define VNODE_META_DIR "meta"
#define VNODE_TSDB_DIR "tsdb"
@@ -77,9 +79,11 @@ int metaClose(SMeta* pMeta);
int metaBegin(SMeta* pMeta);
int metaCommit(SMeta* pMeta);
int metaCreateSTable(SMeta* pMeta, int64_t version, SVCreateStbReq* pReq);
+int metaAlterSTable(SMeta* pMeta, int64_t version, SVCreateStbReq* pReq);
int metaDropSTable(SMeta* pMeta, int64_t verison, SVDropStbReq* pReq);
int metaCreateTable(SMeta* pMeta, int64_t version, SVCreateTbReq* pReq);
int metaDropTable(SMeta* pMeta, int64_t version, SVDropTbReq* pReq);
+int metaAlterTable(SMeta* pMeta, int64_t version, SVAlterTbReq* pReq);
SSchemaWrapper* metaGetTableSchema(SMeta* pMeta, tb_uid_t uid, int32_t sver, bool isinline);
STSchema* metaGetTbTSchema(SMeta* pMeta, tb_uid_t uid, int32_t sver);
int metaGetTableEntryByName(SMetaReader* pReader, const char* name);
@@ -90,17 +94,14 @@ tb_uid_t metaCtbCursorNext(SMCtbCursor* pCtbCur);
SArray* metaGetSmaTbUids(SMeta* pMeta, bool isDup);
void* metaGetSmaInfoByIndex(SMeta* pMeta, int64_t indexUid, bool isDecode);
STSmaWrapper* metaGetSmaInfoByTable(SMeta* pMeta, tb_uid_t uid);
-int32_t metaCreateTSma(SMeta* pMeta, SSmaCfg* pCfg);
+int32_t metaCreateTSma(SMeta* pMeta, int64_t version, SSmaCfg* pCfg);
int32_t metaDropTSma(SMeta* pMeta, int64_t indexUid);
// tsdb
-int tsdbOpen(SVnode* pVnode, int8_t type);
-int tsdbClose(STsdb* pTsdb);
+int tsdbOpen(SVnode* pVnode, STsdb** ppTsdb, const char* dir, STsdbKeepCfg* pKeepCfg);
+int tsdbClose(STsdb** pTsdb);
int tsdbBegin(STsdb* pTsdb);
int tsdbCommit(STsdb* pTsdb);
-int32_t tsdbUpdateSmaWindow(STsdb* pTsdb, SSubmitReq* pMsg, int64_t version);
-int32_t tsdbCreateTSma(STsdb* pTsdb, char* pMsg);
-int32_t tsdbInsertTSmaData(STsdb* pTsdb, int64_t indexUid, const char* msg);
int tsdbInsertData(STsdb* pTsdb, int64_t version, SSubmitReq* pMsg, SSubmitRsp* pRsp);
int tsdbInsertTableData(STsdb* pTsdb, SSubmitMsgIter* pMsgIter, SSubmitBlk* pBlock, SSubmitBlkRsp* pRsp);
tsdbReaderT* tsdbQueryTables(SVnode* pVnode, SQueryTableDataCond* pCond, STableGroupInfo* groupList, uint64_t qId,
@@ -121,13 +122,31 @@ int32_t tqProcessStreamTrigger(STQ* pTq, void* data, int32_t dataLen, int32_t wo
int32_t tqProcessPollReq(STQ* pTq, SRpcMsg* pMsg, int32_t workerId);
// sma
+int32_t smaOpen(SVnode* pVnode);
+int32_t smaClose(SSma* pSma);
+int32_t tdUpdateExpireWindow(SSma* pSma, SSubmitReq* pMsg, int64_t version);
+int32_t tdProcessTSmaCreate(SSma* pSma, char* pMsg);
+int32_t tdProcessTSmaInsert(SSma* pSma, int64_t indexUid, const char* msg);
+
+int32_t tdProcessRSmaCreate(SSma* pSma, SMeta* pMeta, SVCreateStbReq* pReq, SMsgCb* pMsgCb);
+int32_t tdProcessRSmaSubmit(SSma* pSma, void* pMsg, int32_t inputType);
+int32_t tdFetchTbUidList(SSma* pSma, STbUidStore** ppStore, tb_uid_t suid, tb_uid_t uid);
+int32_t tdUpdateTbUidList(SSma* pSma, STbUidStore* pUidStore);
+void tdUidStoreDestory(STbUidStore* pStore);
+void* tdUidStoreFree(STbUidStore* pStore);
+
+#if 0
+int32_t tsdbUpdateSmaWindow(STsdb* pTsdb, SSubmitReq* pMsg, int64_t version);
+int32_t tsdbCreateTSma(STsdb* pTsdb, char* pMsg);
+int32_t tsdbInsertTSmaData(STsdb* pTsdb, int64_t indexUid, const char* msg);
int32_t tsdbRegisterRSma(STsdb* pTsdb, SMeta* pMeta, SVCreateStbReq* pReq, SMsgCb* pMsgCb);
int32_t tsdbFetchTbUidList(STsdb* pTsdb, STbUidStore** ppStore, tb_uid_t suid, tb_uid_t uid);
int32_t tsdbUpdateTbUidList(STsdb* pTsdb, STbUidStore* pUidStore);
void tsdbUidStoreDestory(STbUidStore* pStore);
void* tsdbUidStoreFree(STbUidStore* pStore);
int32_t tsdbTriggerRSma(STsdb* pTsdb, void* pMsg, int32_t inputType);
+#endif
typedef struct {
int8_t streamType; // sma or other
@@ -164,13 +183,13 @@ typedef enum {
TSDB_TYPE_RSMA_L2 = 4, // RSMA Level 2
} ETsdbType;
-typedef struct {
+struct STsdbKeepCfg {
int8_t precision; // precision always be used with below keep cfgs
int32_t days;
int32_t keep0;
int32_t keep1;
int32_t keep2;
-} STsdbKeepCfg;
+};
struct SVnode {
char* path;
@@ -183,9 +202,8 @@ struct SVnode {
SVBufPool* onCommit;
SVBufPool* onRecycle;
SMeta* pMeta;
+ SSma* pSma;
STsdb* pTsdb;
- STsdb* pRSma1;
- STsdb* pRSma2;
SWal* pWal;
STQ* pTq;
SSink* pSink;
@@ -194,10 +212,12 @@ struct SVnode {
SQHandle* pQuery;
};
+#define TD_VID(PVNODE) (PVNODE)->config.vgId
+
#define VND_TSDB(vnd) ((vnd)->pTsdb)
#define VND_RSMA0(vnd) ((vnd)->pTsdb)
-#define VND_RSMA1(vnd) ((vnd)->pRSma1)
-#define VND_RSMA2(vnd) ((vnd)->pRSma2)
+#define VND_RSMA1(vnd) ((vnd)->pSma->pRSmaTsdb1)
+#define VND_RSMA2(vnd) ((vnd)->pSma->pRSmaTsdb2)
#define VND_RETENTIONS(vnd) (&(vnd)->config.tsdbCfg.retentions)
struct STbUidStore {
@@ -207,7 +227,29 @@ struct STbUidStore {
SHashObj* uidHash;
};
-#define TD_VID(PVNODE) (PVNODE)->config.vgId
+struct SSma {
+ int16_t nTSma;
+ bool locked;
+ TdThreadMutex mutex;
+ SVnode* pVnode;
+ STsdb* pRSmaTsdb1;
+ STsdb* pRSmaTsdb2;
+ void* pTSmaEnv;
+ void* pRSmaEnv;
+};
+
+#define SMA_CFG(s) (&(s)->pVnode->config)
+#define SMA_TSDB_CFG(s) (&(s)->pVnode->config.tsdbCfg)
+#define SMA_LOCKED(s) ((s)->locked)
+#define SMA_META(s) ((s)->pVnode->pMeta)
+#define SMA_VID(s) TD_VID((s)->pVnode)
+#define SMA_TFS(s) ((s)->pVnode->pTfs)
+#define SMA_TSMA_NUM(s) ((s)->nTSma)
+#define SMA_TSMA_ENV(s) ((s)->pTSmaEnv)
+#define SMA_RSMA_ENV(s) ((s)->pRSmaEnv)
+#define SMA_RSMA_TSDB0(s) ((s)->pVnode->pTsdb)
+#define SMA_RSMA_TSDB1(s) ((s)->pRSmaTsdb1)
+#define SMA_RSMA_TSDB2(s) ((s)->pRSmaTsdb2)
static FORCE_INLINE bool vnodeIsRollup(SVnode* pVnode) {
SRetention* pRetention = &(pVnode->config.tsdbCfg.retentions[0]);
diff --git a/source/dnode/vnode/src/meta/metaEntry.c b/source/dnode/vnode/src/meta/metaEntry.c
index 581b876e84..2bc0d7517d 100644
--- a/source/dnode/vnode/src/meta/metaEntry.c
+++ b/source/dnode/vnode/src/meta/metaEntry.c
@@ -34,7 +34,10 @@ int metaEncodeEntry(SEncoder *pCoder, const SMetaEntry *pME) {
} else if (pME->type == TSDB_NORMAL_TABLE) {
if (tEncodeI64(pCoder, pME->ntbEntry.ctime) < 0) return -1;
if (tEncodeI32(pCoder, pME->ntbEntry.ttlDays) < 0) return -1;
+ if (tEncodeI32v(pCoder, pME->ntbEntry.ncid) < 0) return -1;
if (tEncodeSSchemaWrapper(pCoder, &pME->ntbEntry.schema) < 0) return -1;
+ } else if (pME->type == TSDB_TSMA_TABLE) {
+ if (tEncodeTSma(pCoder, pME->smaEntry.tsma) < 0) return -1;
} else {
ASSERT(0);
}
@@ -63,7 +66,10 @@ int metaDecodeEntry(SDecoder *pCoder, SMetaEntry *pME) {
} else if (pME->type == TSDB_NORMAL_TABLE) {
if (tDecodeI64(pCoder, &pME->ntbEntry.ctime) < 0) return -1;
if (tDecodeI32(pCoder, &pME->ntbEntry.ttlDays) < 0) return -1;
+ if (tDecodeI32v(pCoder, &pME->ntbEntry.ncid) < 0) return -1;
if (tDecodeSSchemaWrapper(pCoder, &pME->ntbEntry.schema) < 0) return -1;
+ } else if (pME->type == TSDB_TSMA_TABLE) {
+ if (tDecodeTSma(pCoder, pME->smaEntry.tsma) < 0) return -1;
} else {
ASSERT(0);
}
diff --git a/source/dnode/vnode/src/meta/metaIdx.c b/source/dnode/vnode/src/meta/metaIdx.c
index 853b2ecefb..3f52071315 100644
--- a/source/dnode/vnode/src/meta/metaIdx.c
+++ b/source/dnode/vnode/src/meta/metaIdx.c
@@ -112,35 +112,4 @@ int metaRemoveTableFromIdx(SMeta *pMeta, tb_uid_t uid) {
#endif
// TODO
return 0;
-}
-
-int32_t metaCreateTSma(SMeta *pMeta, SSmaCfg *pCfg) {
- // TODO: Validate the cfg
- // The table uid should exists and be super table or common table.
- // Check other cfg value
-
- // TODO: add atomicity
-
-#ifdef META_REFACT
-#else
- if (metaSaveSmaToDB(pMeta, &pCfg->tSma) < 0) {
- // TODO: handle error
- return -1;
- }
-#endif
- return TSDB_CODE_SUCCESS;
-}
-
-int32_t metaDropTSma(SMeta *pMeta, int64_t indexUid) {
- // TODO: Validate the cfg
- // TODO: add atomicity
-
-#ifdef META_REFACT
-#else
- if (metaRemoveSmaFromDb(pMeta, indexUid) < 0) {
- // TODO: handle error
- return -1;
- }
-#endif
- return TSDB_CODE_SUCCESS;
}
\ No newline at end of file
diff --git a/source/dnode/vnode/src/meta/metaOpen.c b/source/dnode/vnode/src/meta/metaOpen.c
index 07422e3193..3ce146904c 100644
--- a/source/dnode/vnode/src/meta/metaOpen.c
+++ b/source/dnode/vnode/src/meta/metaOpen.c
@@ -21,6 +21,7 @@ static int ctbIdxKeyCmpr(const void *pKey1, int kLen1, const void *pKey2, int kL
static int tagIdxKeyCmpr(const void *pKey1, int kLen1, const void *pKey2, int kLen2);
static int ttlIdxKeyCmpr(const void *pKey1, int kLen1, const void *pKey2, int kLen2);
static int uidIdxKeyCmpr(const void *pKey1, int kLen1, const void *pKey2, int kLen2);
+static int smaIdxKeyCmpr(const void *pKey1, int kLen1, const void *pKey2, int kLen2);
static int32_t metaInitLock(SMeta *pMeta) { return taosThreadRwlockInit(&pMeta->lock, NULL); }
static int32_t metaDestroyLock(SMeta *pMeta) { return taosThreadRwlockDestroy(&pMeta->lock); }
@@ -104,6 +105,13 @@ int metaOpen(SVnode *pVnode, SMeta **ppMeta) {
goto _err;
}
+ // open pSmaIdx
+ ret = tdbDbOpen("sma.idx", sizeof(SSmaIdxKey), 0, smaIdxKeyCmpr, pMeta->pEnv, &pMeta->pSmaIdx);
+ if (ret < 0) {
+ metaError("vgId:%d failed to open meta sma index since %s", TD_VID(pVnode), tstrerror(terrno));
+ goto _err;
+ }
+
// open index
if (metaOpenIdx(pMeta) < 0) {
metaError("vgId:%d failed to open meta index since %s", TD_VID(pVnode), tstrerror(terrno));
@@ -117,11 +125,12 @@ int metaOpen(SVnode *pVnode, SMeta **ppMeta) {
_err:
if (pMeta->pIdx) metaCloseIdx(pMeta);
+ if (pMeta->pSmaIdx) tdbDbClose(pMeta->pSmaIdx);
if (pMeta->pTtlIdx) tdbDbClose(pMeta->pTtlIdx);
if (pMeta->pTagIdx) tdbDbClose(pMeta->pTagIdx);
if (pMeta->pCtbIdx) tdbDbClose(pMeta->pCtbIdx);
if (pMeta->pNameIdx) tdbDbClose(pMeta->pNameIdx);
- if (pMeta->pNameIdx) tdbDbClose(pMeta->pUidIdx);
+ if (pMeta->pUidIdx) tdbDbClose(pMeta->pUidIdx);
if (pMeta->pSkmDb) tdbDbClose(pMeta->pSkmDb);
if (pMeta->pTbDb) tdbDbClose(pMeta->pTbDb);
if (pMeta->pEnv) tdbEnvClose(pMeta->pEnv);
@@ -133,11 +142,12 @@ _err:
int metaClose(SMeta *pMeta) {
if (pMeta) {
if (pMeta->pIdx) metaCloseIdx(pMeta);
+ if (pMeta->pSmaIdx) tdbDbClose(pMeta->pSmaIdx);
if (pMeta->pTtlIdx) tdbDbClose(pMeta->pTtlIdx);
if (pMeta->pTagIdx) tdbDbClose(pMeta->pTagIdx);
if (pMeta->pCtbIdx) tdbDbClose(pMeta->pCtbIdx);
if (pMeta->pNameIdx) tdbDbClose(pMeta->pNameIdx);
- if (pMeta->pNameIdx) tdbDbClose(pMeta->pUidIdx);
+ if (pMeta->pUidIdx) tdbDbClose(pMeta->pUidIdx);
if (pMeta->pSkmDb) tdbDbClose(pMeta->pSkmDb);
if (pMeta->pTbDb) tdbDbClose(pMeta->pTbDb);
if (pMeta->pEnv) tdbEnvClose(pMeta->pEnv);
@@ -295,3 +305,22 @@ static int ttlIdxKeyCmpr(const void *pKey1, int kLen1, const void *pKey2, int kL
return 0;
}
+
+static int smaIdxKeyCmpr(const void *pKey1, int kLen1, const void *pKey2, int kLen2) {
+ SSmaIdxKey *pSmaIdxKey1 = (SSmaIdxKey *)pKey1;
+ SSmaIdxKey *pSmaIdxKey2 = (SSmaIdxKey *)pKey2;
+
+ if (pSmaIdxKey1->uid > pSmaIdxKey2->uid) {
+ return 1;
+ } else if (pSmaIdxKey1->uid < pSmaIdxKey2->uid) {
+ return -1;
+ }
+
+ if (pSmaIdxKey1->smaUid > pSmaIdxKey2->smaUid) {
+ return 1;
+ } else if (pSmaIdxKey1->smaUid < pSmaIdxKey2->smaUid) {
+ return -1;
+ }
+
+ return 0;
+}
diff --git a/source/dnode/vnode/src/meta/metaSma.c b/source/dnode/vnode/src/meta/metaSma.c
new file mode 100644
index 0000000000..8ce7ea5895
--- /dev/null
+++ b/source/dnode/vnode/src/meta/metaSma.c
@@ -0,0 +1,208 @@
+/*
+ * Copyright (c) 2019 TAOS Data, Inc.
+ *
+ * This program is free software: you can use, redistribute, and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3
+ * or later ("AGPL"), as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see .
+ */
+
+#include "meta.h"
+
+static int metaHandleSmaEntry(SMeta *pMeta, const SMetaEntry *pME);
+static int metaSaveSmaToDB(SMeta *pMeta, const SMetaEntry *pME);
+
+int32_t metaCreateTSma(SMeta *pMeta, int64_t version, SSmaCfg *pCfg) {
+ // TODO: Validate the cfg
+ // The table uid should exists and be super table or normal table.
+ // Check other cfg value
+
+ SMetaEntry me = {0};
+ int kLen = 0;
+ int vLen = 0;
+ const void *pKey = NULL;
+ const void *pVal = NULL;
+ void *pBuf = NULL;
+ int32_t szBuf = 0;
+ void *p = NULL;
+ SMetaReader mr = {0};
+
+ // validate req
+ metaReaderInit(&mr, pMeta, 0);
+ if (metaGetTableEntryByUid(&mr, pCfg->indexUid) == 0) {
+// TODO: just for pass case
+#if 1
+ terrno = TSDB_CODE_TDB_TSMA_ALREADY_EXIST;
+ metaReaderClear(&mr);
+ return -1;
+#else
+ metaReaderClear(&mr);
+ return 0;
+#endif
+ }
+ metaReaderClear(&mr);
+
+ // set structs
+ me.version = version;
+ me.type = TSDB_TSMA_TABLE;
+ me.uid = pCfg->indexUid;
+ me.name = pCfg->indexName;
+ me.smaEntry.tsma = pCfg;
+
+ if (metaHandleSmaEntry(pMeta, &me) < 0) goto _err;
+
+ metaDebug("vgId:%d tsma is created, name:%s uid: %" PRId64, TD_VID(pMeta->pVnode), pCfg->indexName, pCfg->indexUid);
+
+ return 0;
+
+_err:
+ metaError("vgId:%d failed to create tsma: %s uid: %" PRId64 " since %s", TD_VID(pMeta->pVnode), pCfg->indexName,
+ pCfg->indexUid, tstrerror(terrno));
+ return -1;
+}
+
+int32_t metaDropTSma(SMeta *pMeta, int64_t indexUid) {
+ // TODO: Validate the cfg
+ // TODO: add atomicity
+
+#ifdef META_REFACT
+#else
+ if (metaRemoveSmaFromDb(pMeta, indexUid) < 0) {
+ // TODO: handle error
+ return -1;
+ }
+#endif
+ return TSDB_CODE_SUCCESS;
+}
+
+// static int metaSaveSmaToDB(SMeta *pMeta, STSma *pSmaCfg) {
+// int32_t ret = 0;
+// void *pBuf = NULL, *qBuf = NULL;
+// void *key = {0}, *val = {0};
+
+// // save sma info
+// int32_t len = tEncodeTSma(NULL, pSmaCfg);
+// pBuf = taosMemoryCalloc(1, len);
+// if (pBuf == NULL) {
+// terrno = TSDB_CODE_OUT_OF_MEMORY;
+// return -1;
+// }
+
+// key = (void *)&pSmaCfg->indexUid;
+// qBuf = pBuf;
+// tEncodeTSma(&qBuf, pSmaCfg);
+// val = pBuf;
+
+// int32_t kLen = sizeof(pSmaCfg->indexUid);
+// int32_t vLen = POINTER_DISTANCE(qBuf, pBuf);
+
+// ret = tdbDbInsert(pMeta->pTbDb, key, kLen, val, vLen, &pMeta->txn);
+// if (ret < 0) {
+// taosMemoryFreeClear(pBuf);
+// return -1;
+// }
+
+// // add sma idx
+// SSmaIdxKey smaIdxKey;
+// smaIdxKey.uid = pSmaCfg->tableUid;
+// smaIdxKey.smaUid = pSmaCfg->indexUid;
+// key = &smaIdxKey;
+// kLen = sizeof(smaIdxKey);
+// val = NULL;
+// vLen = 0;
+
+// ret = tdbDbInsert(pMeta->pSmaIdx, key, kLen, val, vLen, &pMeta->txn);
+// if (ret < 0) {
+// taosMemoryFreeClear(pBuf);
+// return -1;
+// }
+
+// // release
+// taosMemoryFreeClear(pBuf);
+
+// return 0;
+// }
+
+
+static int metaSaveSmaToDB(SMeta *pMeta, const SMetaEntry *pME) {
+ STbDbKey tbDbKey;
+ void *pKey = NULL;
+ void *pVal = NULL;
+ int kLen = 0;
+ int vLen = 0;
+ SEncoder coder = {0};
+
+ // set key and value
+ tbDbKey.version = pME->version;
+ tbDbKey.uid = pME->uid;
+
+ pKey = &tbDbKey;
+ kLen = sizeof(tbDbKey);
+
+ int32_t ret = 0;
+ tEncodeSize(metaEncodeEntry, pME, vLen, ret);
+ if (ret < 0) {
+ goto _err;
+ }
+
+ pVal = taosMemoryMalloc(vLen);
+ if (pVal == NULL) {
+ terrno = TSDB_CODE_OUT_OF_MEMORY;
+ goto _err;
+ }
+
+ tEncoderInit(&coder, pVal, vLen);
+
+ if (metaEncodeEntry(&coder, pME) < 0) {
+ goto _err;
+ }
+
+ tEncoderClear(&coder);
+
+ // write to table.db
+ if (tdbDbInsert(pMeta->pTbDb, pKey, kLen, pVal, vLen, &pMeta->txn) < 0) {
+ goto _err;
+ }
+
+ taosMemoryFree(pVal);
+ return 0;
+
+_err:
+ taosMemoryFree(pVal);
+ return -1;
+}
+
+static int metaUpdateUidIdx(SMeta *pMeta, const SMetaEntry *pME) {
+ return tdbDbInsert(pMeta->pUidIdx, &pME->uid, sizeof(tb_uid_t), &pME->version, sizeof(int64_t), &pMeta->txn);
+}
+
+static int metaUpdateSmaIdx(SMeta *pMeta, const SMetaEntry *pME) {
+ SSmaIdxKey smaIdxKey = {.uid = pME->smaEntry.tsma->tableUid, .smaUid = pME->smaEntry.tsma->indexUid};
+
+ return tdbDbInsert(pMeta->pSmaIdx, &smaIdxKey, sizeof(smaIdxKey), NULL, 0, &pMeta->txn);
+}
+
+static int metaHandleSmaEntry(SMeta *pMeta, const SMetaEntry *pME) {
+ metaWLock(pMeta);
+
+ // save to table.db
+ if (metaSaveSmaToDB(pMeta, pME) < 0) goto _err;
+
+ // // update uid.idx
+ if (metaUpdateUidIdx(pMeta, pME) < 0) goto _err;
+
+ if (metaUpdateSmaIdx(pMeta, pME) < 0) goto _err;
+
+ metaULock(pMeta);
+ return 0;
+
+_err:
+ metaULock(pMeta);
+ return -1;
+}
diff --git a/source/dnode/vnode/src/meta/metaTable.c b/source/dnode/vnode/src/meta/metaTable.c
index d666bd22c1..e61064fe67 100644
--- a/source/dnode/vnode/src/meta/metaTable.c
+++ b/source/dnode/vnode/src/meta/metaTable.c
@@ -131,6 +131,75 @@ _err:
return -1;
}
+int metaAlterSTable(SMeta *pMeta, int64_t version, SVCreateStbReq *pReq) {
+ SMetaEntry oStbEntry = {0};
+ SMetaEntry nStbEntry = {0};
+ TDBC *pUidIdxc = NULL;
+ TDBC *pTbDbc = NULL;
+ const void *pData;
+ int nData;
+ int64_t oversion;
+ SDecoder dc = {0};
+ int32_t ret;
+ int32_t c;
+
+ tdbDbcOpen(pMeta->pUidIdx, &pUidIdxc, &pMeta->txn);
+ ret = tdbDbcMoveTo(pUidIdxc, &pReq->suid, sizeof(tb_uid_t), &c);
+ if (ret < 0 || c) {
+ ASSERT(0);
+ return -1;
+ }
+
+ ret = tdbDbcGet(pUidIdxc, NULL, NULL, &pData, &nData);
+ if (ret < 0) {
+ ASSERT(0);
+ return -1;
+ }
+
+ oversion = *(int64_t *)pData;
+
+ tdbDbcOpen(pMeta->pTbDb, &pTbDbc, &pMeta->txn);
+ ret = tdbDbcMoveTo(pTbDbc, &((STbDbKey){.uid = pReq->suid, .version = oversion}), sizeof(STbDbKey), &c);
+ ASSERT(ret == 0 && c == 0);
+
+ ret = tdbDbcGet(pTbDbc, NULL, NULL, &pData, &nData);
+ ASSERT(ret == 0);
+
+ tDecoderInit(&dc, pData, nData);
+ metaDecodeEntry(&dc, &oStbEntry);
+
+ nStbEntry.version = version;
+ nStbEntry.type = TSDB_SUPER_TABLE;
+ nStbEntry.uid = pReq->suid;
+ nStbEntry.name = pReq->name;
+ nStbEntry.stbEntry.schema = pReq->schema;
+ nStbEntry.stbEntry.schemaTag = pReq->schemaTag;
+
+ metaWLock(pMeta);
+ // compare two entry
+ if (oStbEntry.stbEntry.schema.sver != pReq->schema.sver) {
+ if (oStbEntry.stbEntry.schema.nCols != pReq->schema.nCols) {
+ metaSaveToSkmDb(pMeta, &nStbEntry);
+ }
+ }
+
+ // if (oStbEntry.stbEntry.schemaTag.sver != pReq->schemaTag.sver) {
+ // // change tag schema
+ // }
+
+ // update table.db
+ metaSaveToTbDb(pMeta, &nStbEntry);
+
+ // update uid index
+ tdbDbcUpsert(pUidIdxc, &pReq->suid, sizeof(tb_uid_t), &version, sizeof(version), 0);
+
+ metaULock(pMeta);
+ tDecoderClear(&dc);
+ tdbDbcClose(pTbDbc);
+ tdbDbcClose(pUidIdxc);
+ return 0;
+}
+
int metaCreateTable(SMeta *pMeta, int64_t version, SVCreateTbReq *pReq) {
SMetaEntry me = {0};
SMetaReader mr = {0};
@@ -171,6 +240,7 @@ int metaCreateTable(SMeta *pMeta, int64_t version, SVCreateTbReq *pReq) {
me.ntbEntry.ctime = pReq->ctime;
me.ntbEntry.ttlDays = pReq->ttl;
me.ntbEntry.schema = pReq->ntb.schema;
+ me.ntbEntry.ncid = me.ntbEntry.schema.pSchema[me.ntbEntry.schema.nCols - 1].colId + 1;
}
if (metaHandleEntry(pMeta, &me) < 0) goto _err;
@@ -305,6 +375,170 @@ int metaDropTable(SMeta *pMeta, int64_t version, SVDropTbReq *pReq) {
return 0;
}
+static int metaAlterTableColumn(SMeta *pMeta, int64_t version, SVAlterTbReq *pAlterTbReq) {
+ void *pVal = NULL;
+ int nVal = 0;
+ const void *pData = NULL;
+ int nData = 0;
+ int ret = 0;
+ tb_uid_t uid;
+ int64_t oversion;
+ SSchema *pColumn = NULL;
+ SMetaEntry entry = {0};
+ SSchemaWrapper *pSchema;
+ int c;
+
+ // search name index
+ ret = tdbDbGet(pMeta->pNameIdx, pAlterTbReq->tbName, strlen(pAlterTbReq->tbName) + 1, &pVal, &nVal);
+ if (ret < 0) {
+ terrno = TSDB_CODE_VND_TABLE_NOT_EXIST;
+ return -1;
+ }
+
+ uid = *(tb_uid_t *)pVal;
+ tdbFree(pVal);
+ pVal = NULL;
+
+ // search uid index
+ TDBC *pUidIdxc = NULL;
+
+ tdbDbcOpen(pMeta->pUidIdx, &pUidIdxc, &pMeta->txn);
+ tdbDbcMoveTo(pUidIdxc, &uid, sizeof(uid), &c);
+ ASSERT(c == 0);
+
+ tdbDbcGet(pUidIdxc, NULL, NULL, &pData, &nData);
+ oversion = *(int64_t *)pData;
+
+ // search table.db
+ TDBC *pTbDbc = NULL;
+
+ tdbDbcOpen(pMeta->pTbDb, &pTbDbc, &pMeta->txn);
+ tdbDbcMoveTo(pTbDbc, &((STbDbKey){.uid = uid, .version = oversion}), sizeof(STbDbKey), &c);
+ ASSERT(c == 0);
+ tdbDbcGet(pTbDbc, NULL, NULL, &pData, &nData);
+
+ // get table entry
+ SDecoder dc = {0};
+ tDecoderInit(&dc, pData, nData);
+ metaDecodeEntry(&dc, &entry);
+
+ if (entry.type != TSDB_NORMAL_TABLE) {
+ terrno = TSDB_CODE_VND_INVALID_TABLE_ACTION;
+ goto _err;
+ }
+
+ // search the column to add/drop/update
+ pSchema = &entry.ntbEntry.schema;
+ int32_t iCol = 0;
+ for (;;) {
+ pColumn = NULL;
+
+ if (iCol >= pSchema->nCols) break;
+ pColumn = &pSchema->pSchema[iCol];
+
+ if (strcmp(pColumn->name, pAlterTbReq->colName) == 0) break;
+ iCol++;
+ }
+
+ entry.version = version;
+ int tlen;
+ switch (pAlterTbReq->action) {
+ case TSDB_ALTER_TABLE_ADD_COLUMN:
+ if (pColumn) {
+ terrno = TSDB_CODE_VND_COL_ALREADY_EXISTS;
+ goto _err;
+ }
+ pSchema->sver++;
+ pSchema->nCols++;
+ pSchema->pSchema =
+ taosMemoryRealloc(entry.ntbEntry.schema.pSchema, sizeof(SSchema) * entry.ntbEntry.schema.nCols);
+ pSchema->pSchema[entry.ntbEntry.schema.nCols - 1].bytes = pAlterTbReq->bytes;
+ pSchema->pSchema[entry.ntbEntry.schema.nCols - 1].type = pAlterTbReq->type;
+ pSchema->pSchema[entry.ntbEntry.schema.nCols - 1].flags = pAlterTbReq->flags;
+ pSchema->pSchema[entry.ntbEntry.schema.nCols - 1].colId = entry.ntbEntry.ncid++;
+ strcpy(pSchema->pSchema[entry.ntbEntry.schema.nCols - 1].name, pAlterTbReq->colName);
+ break;
+ case TSDB_ALTER_TABLE_DROP_COLUMN:
+ if (pColumn == NULL) {
+ terrno = TSDB_CODE_VND_TABLE_COL_NOT_EXISTS;
+ goto _err;
+ }
+ if (pColumn->colId == 0) {
+ terrno = TSDB_CODE_VND_INVALID_TABLE_ACTION;
+ goto _err;
+ }
+ pSchema->sver++;
+ pSchema->nCols--;
+ tlen = (pSchema->nCols - iCol - 1) * sizeof(SSchema);
+ if (tlen) {
+ memmove(pColumn, pColumn + 1, tlen);
+ }
+ break;
+ case TSDB_ALTER_TABLE_UPDATE_COLUMN_BYTES:
+ if (pColumn == NULL) {
+ terrno = TSDB_CODE_VND_TABLE_COL_NOT_EXISTS;
+ goto _err;
+ }
+ if (!IS_VAR_DATA_TYPE(pColumn->type) || pColumn->bytes <= pAlterTbReq->bytes) {
+ terrno = TSDB_CODE_VND_INVALID_TABLE_ACTION;
+ goto _err;
+ }
+ pSchema->sver++;
+ pColumn->bytes = pAlterTbReq->bytes;
+ break;
+ case TSDB_ALTER_TABLE_UPDATE_COLUMN_NAME:
+ if (pColumn == NULL) {
+ terrno = TSDB_CODE_VND_TABLE_COL_NOT_EXISTS;
+ goto _err;
+ }
+ pSchema->sver++;
+ strcpy(pColumn->name, pAlterTbReq->colNewName);
+ break;
+ }
+
+ entry.version = version;
+
+ tDecoderClear(&dc);
+ tdbDbcClose(pTbDbc);
+ tdbDbcClose(pUidIdxc);
+ return 0;
+
+_err:
+ tDecoderClear(&dc);
+ tdbDbcClose(pTbDbc);
+ tdbDbcClose(pUidIdxc);
+ return -1;
+}
+
+static int metaUpdateTableTagVal(SMeta *pMeta, int64_t version, SVAlterTbReq *pAlterTbReq) {
+ // TODO
+ return 0;
+}
+
+static int metaUpdateTableOptions(SMeta *pMeta, int64_t version, SVAlterTbReq *pAlterTbReq) {
+ // TODO
+ ASSERT(0);
+ return 0;
+}
+
+int metaAlterTable(SMeta *pMeta, int64_t version, SVAlterTbReq *pReq) {
+ switch (pReq->action) {
+ case TSDB_ALTER_TABLE_ADD_COLUMN:
+ case TSDB_ALTER_TABLE_DROP_COLUMN:
+ case TSDB_ALTER_TABLE_UPDATE_COLUMN_BYTES:
+ case TSDB_ALTER_TABLE_UPDATE_COLUMN_NAME:
+ return metaAlterTableColumn(pMeta, version, pReq);
+ case TSDB_ALTER_TABLE_UPDATE_TAG_VAL:
+ return metaUpdateTableTagVal(pMeta, version, pReq);
+ case TSDB_ALTER_TABLE_UPDATE_OPTIONS:
+ return metaUpdateTableOptions(pMeta, version, pReq);
+ default:
+ terrno = TSDB_CODE_VND_INVALID_TABLE_ACTION;
+ return -1;
+ break;
+ }
+}
+
static int metaSaveToTbDb(SMeta *pMeta, const SMetaEntry *pME) {
STbDbKey tbDbKey;
void *pKey = NULL;
diff --git a/source/dnode/vnode/src/inc/tsdbSma.h b/source/dnode/vnode/src/sma/sma.c
similarity index 50%
rename from source/dnode/vnode/src/inc/tsdbSma.h
rename to source/dnode/vnode/src/sma/sma.c
index 5215812ac5..2c54e10087 100644
--- a/source/dnode/vnode/src/inc/tsdbSma.h
+++ b/source/dnode/vnode/src/sma/sma.c
@@ -13,35 +13,18 @@
* along with this program. If not, see .
*/
-#ifndef _TD_VNODE_TSDB_SMA_H_
-#define _TD_VNODE_TSDB_SMA_H_
+#include "sma.h"
-#include "tsdb.h"
-#ifdef __cplusplus
-extern "C" {
-#endif
+// TODO: Who is responsible for resource allocate and release?
+int32_t tdProcessTSmaInsert(SSma* pSma, int64_t indexUid, const char* msg) {
+ int32_t code = TSDB_CODE_SUCCESS;
-// typedef int32_t (*__tb_ddl_fn_t)(void *ahandle, void **result, void *p1, void *p2);
-
-// struct STbDdlH {
-// void *ahandle;
-// void *result;
-// __tb_ddl_fn_t fp;
-// };
-
-static FORCE_INLINE int32_t tsdbUidStoreInit(STbUidStore **pStore) {
- ASSERT(*pStore == NULL);
- *pStore = taosMemoryCalloc(1, sizeof(STbUidStore));
- if (*pStore == NULL) {
- terrno = TSDB_CODE_OUT_OF_MEMORY;
- return TSDB_CODE_FAILED;
+ if ((code = tdProcessTSmaInsertImpl(pSma, indexUid, msg)) < 0) {
+ smaWarn("vgId:%d insert tsma data failed since %s", SMA_VID(pSma), tstrerror(terrno));
}
- return TSDB_CODE_SUCCESS;
+ // TODO: destroy SSDataBlocks(msg)
+ return code;
}
-#ifdef __cplusplus
-}
-#endif
-#endif /*_TD_VNODE_TSDB_SMA_H_*/
\ No newline at end of file
diff --git a/source/dnode/vnode/src/sma/smaEnv.c b/source/dnode/vnode/src/sma/smaEnv.c
new file mode 100644
index 0000000000..c02276f5fe
--- /dev/null
+++ b/source/dnode/vnode/src/sma/smaEnv.c
@@ -0,0 +1,463 @@
+/*
+ * Copyright (c) 2019 TAOS Data, Inc.
+ *
+ * This program is free software: you can use, redistribute, and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3
+ * or later ("AGPL"), as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see .
+ */
+
+#include "sma.h"
+
+typedef struct SSmaStat SSmaStat;
+
+static const char *TSDB_SMA_DNAME[] = {
+ "", // TSDB_SMA_TYPE_BLOCK
+ "tsma", // TSDB_SMA_TYPE_TIME_RANGE
+ "rsma", // TSDB_SMA_TYPE_ROLLUP
+};
+
+#define SMA_TEST_INDEX_NAME "smaTestIndexName" // TODO: just for test
+#define SMA_TEST_INDEX_UID 2000000001 // TODO: just for test
+#define SMA_STATE_HASH_SLOT 4
+
+#define RSMA_TASK_INFO_HASH_SLOT 8
+
+typedef struct SPoolMem {
+ int64_t size;
+ struct SPoolMem *prev;
+ struct SPoolMem *next;
+} SPoolMem;
+
+// declaration of static functions
+
+// insert data
+
+static void tdGetSmaDir(int32_t vgId, ETsdbSmaType smaType, char dirName[]);
+
+// Pool Memory
+static SPoolMem *openPool();
+static void clearPool(SPoolMem *pPool);
+static void closePool(SPoolMem *pPool);
+static void *poolMalloc(void *arg, size_t size);
+static void poolFree(void *arg, void *ptr);
+
+// implementation
+
+static SPoolMem *openPool() {
+ SPoolMem *pPool = (SPoolMem *)taosMemoryMalloc(sizeof(*pPool));
+
+ pPool->prev = pPool->next = pPool;
+ pPool->size = 0;
+
+ return pPool;
+}
+
+static void clearPool(SPoolMem *pPool) {
+ if (!pPool) return;
+
+ SPoolMem *pMem;
+
+ do {
+ pMem = pPool->next;
+
+ if (pMem == pPool) break;
+
+ pMem->next->prev = pMem->prev;
+ pMem->prev->next = pMem->next;
+ pPool->size -= pMem->size;
+
+ taosMemoryFree(pMem);
+ } while (1);
+
+ assert(pPool->size == 0);
+}
+
+static void closePool(SPoolMem *pPool) {
+ if (pPool) {
+ clearPool(pPool);
+ taosMemoryFree(pPool);
+ }
+}
+
+static void *poolMalloc(void *arg, size_t size) {
+ void *ptr = NULL;
+ SPoolMem *pPool = (SPoolMem *)arg;
+ SPoolMem *pMem;
+
+ pMem = (SPoolMem *)taosMemoryMalloc(sizeof(*pMem) + size);
+ if (!pMem) {
+ assert(0);
+ }
+
+ pMem->size = sizeof(*pMem) + size;
+ pMem->next = pPool->next;
+ pMem->prev = pPool;
+
+ pPool->next->prev = pMem;
+ pPool->next = pMem;
+ pPool->size += pMem->size;
+
+ ptr = (void *)(&pMem[1]);
+ return ptr;
+}
+
+static void poolFree(void *arg, void *ptr) {
+ SPoolMem *pPool = (SPoolMem *)arg;
+ SPoolMem *pMem;
+
+ pMem = &(((SPoolMem *)ptr)[-1]);
+
+ pMem->next->prev = pMem->prev;
+ pMem->prev->next = pMem->next;
+ pPool->size -= pMem->size;
+
+ taosMemoryFree(pMem);
+}
+
+int32_t tdInitSma(SSma *pSma) {
+ // tSma
+ int32_t numOfTSma = taosArrayGetSize(metaGetSmaTbUids(SMA_META(pSma), false));
+ if (numOfTSma > 0) {
+ atomic_store_16(&SMA_TSMA_NUM(pSma), (int16_t)numOfTSma);
+ }
+ // TODO: rSma
+ return TSDB_CODE_SUCCESS;
+}
+
+static void tdGetSmaDir(int32_t vgId, ETsdbSmaType smaType, char dirName[]) {
+ snprintf(dirName, TSDB_FILENAME_LEN, "vnode%svnode%d%s%s", TD_DIRSEP, vgId, TD_DIRSEP, TSDB_SMA_DNAME[smaType]);
+}
+
+static SSmaEnv *tdNewSmaEnv(const SSma *pSma, int8_t smaType, const char *path, SDiskID did) {
+ SSmaEnv *pEnv = NULL;
+
+ pEnv = (SSmaEnv *)taosMemoryCalloc(1, sizeof(SSmaEnv));
+ if (!pEnv) {
+ terrno = TSDB_CODE_OUT_OF_MEMORY;
+ return NULL;
+ }
+
+ SMA_ENV_TYPE(pEnv) = smaType;
+
+ int code = taosThreadRwlockInit(&(pEnv->lock), NULL);
+ if (code) {
+ terrno = TAOS_SYSTEM_ERROR(code);
+ taosMemoryFree(pEnv);
+ return NULL;
+ }
+
+ ASSERT(path && (strlen(path) > 0));
+ SMA_ENV_PATH(pEnv) = strdup(path);
+ if (!SMA_ENV_PATH(pEnv)) {
+ tdFreeSmaEnv(pEnv);
+ return NULL;
+ }
+
+ SMA_ENV_DID(pEnv) = did;
+
+ if (tdInitSmaStat(&SMA_ENV_STAT(pEnv), smaType) != TSDB_CODE_SUCCESS) {
+ tdFreeSmaEnv(pEnv);
+ return NULL;
+ }
+
+ char aname[TSDB_FILENAME_LEN] = {0};
+ tfsAbsoluteName(SMA_TFS(pSma), did, path, aname);
+ if (smaOpenDBEnv(&pEnv->dbEnv, aname) != TSDB_CODE_SUCCESS) {
+ tdFreeSmaEnv(pEnv);
+ return NULL;
+ }
+
+ if (!(pEnv->pPool = openPool())) {
+ tdFreeSmaEnv(pEnv);
+ return NULL;
+ }
+
+ return pEnv;
+}
+
+static int32_t tdInitSmaEnv(SSma *pSma, int8_t smaType, const char *path, SDiskID did, SSmaEnv **pEnv) {
+ if (!pEnv) {
+ terrno = TSDB_CODE_INVALID_PTR;
+ return TSDB_CODE_FAILED;
+ }
+
+ if (!(*pEnv)) {
+ if (!(*pEnv = tdNewSmaEnv(pSma, smaType, path, did))) {
+ return TSDB_CODE_FAILED;
+ }
+ }
+
+ return TSDB_CODE_SUCCESS;
+}
+
+/**
+ * @brief Release resources allocated for its member fields, not including itself.
+ *
+ * @param pSmaEnv
+ * @return int32_t
+ */
+void tdDestroySmaEnv(SSmaEnv *pSmaEnv) {
+ if (pSmaEnv) {
+ tdDestroySmaState(pSmaEnv->pStat, SMA_ENV_TYPE(pSmaEnv));
+ taosMemoryFreeClear(pSmaEnv->pStat);
+ taosMemoryFreeClear(pSmaEnv->path);
+ taosThreadRwlockDestroy(&(pSmaEnv->lock));
+ smaCloseDBEnv(pSmaEnv->dbEnv);
+ closePool(pSmaEnv->pPool);
+ }
+}
+
+void *tdFreeSmaEnv(SSmaEnv *pSmaEnv) {
+ tdDestroySmaEnv(pSmaEnv);
+ taosMemoryFreeClear(pSmaEnv);
+ return NULL;
+}
+
+int32_t tdRefSmaStat(SSma *pSma, SSmaStat *pStat) {
+ if (!pStat) return 0;
+
+ int ref = T_REF_INC(pStat);
+ smaDebug("vgId:%d ref sma stat:%p, val:%d", SMA_VID(pSma), pStat, ref);
+ return 0;
+}
+
+int32_t tdUnRefSmaStat(SSma *pSma, SSmaStat *pStat) {
+ if (!pStat) return 0;
+
+ int ref = T_REF_DEC(pStat);
+ smaDebug("vgId:%d unref sma stat:%p, val:%d", SMA_VID(pSma), pStat, ref);
+ return 0;
+}
+
+static int32_t tdInitSmaStat(SSmaStat **pSmaStat, int8_t smaType) {
+ ASSERT(pSmaStat != NULL);
+
+ if (*pSmaStat) { // no lock
+ return TSDB_CODE_SUCCESS;
+ }
+
+ /**
+ * 1. Lazy mode utilized when init SSmaStat to update expired window(or hungry mode when tdNew).
+ * 2. Currently, there is mutex lock when init SSmaEnv, thus no need add lock on SSmaStat, and please add lock if
+ * tdInitSmaStat invoked in other multithread environment later.
+ */
+ if (!(*pSmaStat)) {
+ *pSmaStat = (SSmaStat *)taosMemoryCalloc(1, sizeof(SSmaStat));
+ if (!(*pSmaStat)) {
+ terrno = TSDB_CODE_OUT_OF_MEMORY;
+ return TSDB_CODE_FAILED;
+ }
+
+ if (smaType == TSDB_SMA_TYPE_ROLLUP) {
+ SMA_STAT_INFO_HASH(*pSmaStat) = taosHashInit(
+ RSMA_TASK_INFO_HASH_SLOT, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), true, HASH_ENTRY_LOCK);
+
+ if (!SMA_STAT_INFO_HASH(*pSmaStat)) {
+ taosMemoryFreeClear(*pSmaStat);
+ return TSDB_CODE_FAILED;
+ }
+ } else if (smaType == TSDB_SMA_TYPE_TIME_RANGE) {
+ SMA_STAT_ITEMS(*pSmaStat) =
+ taosHashInit(SMA_STATE_HASH_SLOT, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_ENTRY_LOCK);
+
+ if (!SMA_STAT_ITEMS(*pSmaStat)) {
+ taosMemoryFreeClear(*pSmaStat);
+ return TSDB_CODE_FAILED;
+ }
+ } else {
+ ASSERT(0);
+ }
+ }
+ return TSDB_CODE_SUCCESS;
+}
+
+void *tdFreeSmaStatItem(SSmaStatItem *pSmaStatItem) {
+ if (pSmaStatItem) {
+ tdDestroyTSma(pSmaStatItem->pTSma);
+ taosMemoryFreeClear(pSmaStatItem->pTSma);
+ taosHashCleanup(pSmaStatItem->expiredWindows);
+ taosMemoryFreeClear(pSmaStatItem);
+ }
+ return NULL;
+}
+
+/**
+ * @brief Release resources allocated for its member fields, not including itself.
+ *
+ * @param pSmaStat
+ * @return int32_t
+ */
+int32_t tdDestroySmaState(SSmaStat *pSmaStat, int8_t smaType) {
+ if (pSmaStat) {
+ // TODO: use taosHashSetFreeFp when taosHashSetFreeFp is ready.
+ if (smaType == TSDB_SMA_TYPE_TIME_RANGE) {
+ void *item = taosHashIterate(SMA_STAT_ITEMS(pSmaStat), NULL);
+ while (item) {
+ SSmaStatItem *pItem = *(SSmaStatItem **)item;
+ tdFreeSmaStatItem(pItem);
+ item = taosHashIterate(SMA_STAT_ITEMS(pSmaStat), item);
+ }
+ taosHashCleanup(SMA_STAT_ITEMS(pSmaStat));
+ } else if (smaType == TSDB_SMA_TYPE_ROLLUP) {
+ void *infoHash = taosHashIterate(SMA_STAT_INFO_HASH(pSmaStat), NULL);
+ while (infoHash) {
+ SRSmaInfo *pInfoHash = *(SRSmaInfo **)infoHash;
+ tdFreeRSmaInfo(pInfoHash);
+ infoHash = taosHashIterate(SMA_STAT_INFO_HASH(pSmaStat), infoHash);
+ }
+ taosHashCleanup(SMA_STAT_INFO_HASH(pSmaStat));
+ } else {
+ ASSERT(0);
+ }
+ }
+ return TSDB_CODE_SUCCESS;
+}
+
+int32_t tdLockSma(SSma *pSma) {
+ int code = taosThreadMutexLock(&pSma->mutex);
+ if (code != 0) {
+ smaError("vgId:%d failed to lock td since %s", SMA_VID(pSma), strerror(errno));
+ terrno = TAOS_SYSTEM_ERROR(code);
+ return -1;
+ }
+ pSma->locked = true;
+ return 0;
+}
+
+int32_t tdUnLockSma(SSma *pSma) {
+ ASSERT(SMA_LOCKED(pSma));
+ pSma->locked = false;
+ int code = taosThreadMutexUnlock(&pSma->mutex);
+ if (code != 0) {
+ smaError("vgId:%d failed to unlock td since %s", SMA_VID(pSma), strerror(errno));
+ terrno = TAOS_SYSTEM_ERROR(code);
+ return -1;
+ }
+ return 0;
+}
+
+int32_t tdCheckAndInitSmaEnv(SSma *pSma, int8_t smaType) {
+ SSmaEnv *pEnv = NULL;
+
+ // return if already init
+ switch (smaType) {
+ case TSDB_SMA_TYPE_TIME_RANGE:
+ if ((pEnv = (SSmaEnv *)atomic_load_ptr(&SMA_TSMA_ENV(pSma)))) {
+ return TSDB_CODE_SUCCESS;
+ }
+ break;
+ case TSDB_SMA_TYPE_ROLLUP:
+ if ((pEnv = (SSmaEnv *)atomic_load_ptr(&SMA_RSMA_ENV(pSma)))) {
+ return TSDB_CODE_SUCCESS;
+ }
+ break;
+ default:
+ TASSERT(0);
+ return TSDB_CODE_FAILED;
+ }
+
+ // init sma env
+ tdLockSma(pSma);
+ pEnv = (smaType == TSDB_SMA_TYPE_TIME_RANGE) ? atomic_load_ptr(&SMA_TSMA_ENV(pSma))
+ : atomic_load_ptr(&SMA_RSMA_ENV(pSma));
+ if (!pEnv) {
+ char rname[TSDB_FILENAME_LEN] = {0};
+
+ SDiskID did = {0};
+ if (tfsAllocDisk(SMA_TFS(pSma), TFS_PRIMARY_LEVEL, &did) < 0) {
+ tdUnLockSma(pSma);
+ return TSDB_CODE_FAILED;
+ }
+
+ if (did.level < 0 || did.id < 0) {
+ tdUnLockSma(pSma);
+ smaError("vgId:%d init sma env failed since invalid did(%d,%d)", SMA_VID(pSma), did.level, did.id);
+ return TSDB_CODE_FAILED;
+ }
+
+ tdGetSmaDir(SMA_VID(pSma), smaType, rname);
+
+ if (tfsMkdirRecurAt(SMA_TFS(pSma), rname, did) < 0) {
+ tdUnLockSma(pSma);
+ return TSDB_CODE_FAILED;
+ }
+
+ if (tdInitSmaEnv(pSma, smaType, rname, did, &pEnv) < 0) {
+ tdUnLockSma(pSma);
+ return TSDB_CODE_FAILED;
+ }
+
+ (smaType == TSDB_SMA_TYPE_TIME_RANGE) ? atomic_store_ptr(&SMA_TSMA_ENV(pSma), pEnv)
+ : atomic_store_ptr(&SMA_RSMA_ENV(pSma), pEnv);
+ }
+ tdUnLockSma(pSma);
+
+ return TSDB_CODE_SUCCESS;
+};
+
+int32_t tdSmaBeginCommit(SSmaEnv *pEnv) {
+ TXN *pTxn = &pEnv->txn;
+ // start a new txn
+ tdbTxnOpen(pTxn, 0, poolMalloc, poolFree, pEnv->pPool, TDB_TXN_WRITE | TDB_TXN_READ_UNCOMMITTED);
+ if (tdbBegin(pEnv->dbEnv, pTxn) != 0) {
+ smaWarn("tdSma tdb begin commit fail");
+ return -1;
+ }
+ return 0;
+}
+
+int32_t tdSmaEndCommit(SSmaEnv *pEnv) {
+ TXN *pTxn = &pEnv->txn;
+
+ // Commit current txn
+ if (tdbCommit(pEnv->dbEnv, pTxn) != 0) {
+ smaWarn("tdSma tdb end commit fail");
+ return -1;
+ }
+ tdbTxnClose(pTxn);
+ clearPool(pEnv->pPool);
+ return 0;
+}
+
+#if 0
+/**
+ * @brief Get the start TS key of the last data block of one interval/sliding.
+ *
+ * @param pSma
+ * @param param
+ * @param result
+ * @return int32_t
+ * 1) Return 0 and fill the result if the check procedure is normal;
+ * 2) Return -1 if error occurs during the check procedure.
+ */
+int32_t tdGetTSmaStatus(SSma *pSma, void *smaIndex, void *result) {
+ const char *procedure = "";
+ if (strncmp(procedure, "get the start TS key of the last data block", 100) != 0) {
+ return -1;
+ }
+ // fill the result
+ return TSDB_CODE_SUCCESS;
+}
+
+/**
+ * @brief Remove the tSma data files related to param between pWin.
+ *
+ * @param pSma
+ * @param param
+ * @param pWin
+ * @return int32_t
+ */
+int32_t tdRemoveTSmaData(SSma *pSma, void *smaIndex, STimeWindow *pWin) {
+ // for ("tSmaFiles of param-interval-sliding between pWin") {
+ // // remove the tSmaFile
+ // }
+ return TSDB_CODE_SUCCESS;
+}
+#endif
diff --git a/source/dnode/vnode/src/sma/smaOpen.c b/source/dnode/vnode/src/sma/smaOpen.c
new file mode 100644
index 0000000000..1c7db28e18
--- /dev/null
+++ b/source/dnode/vnode/src/sma/smaOpen.c
@@ -0,0 +1,137 @@
+/*
+ * Copyright (c) 2019 TAOS Data, Inc.
+ *
+ * This program is free software: you can use, redistribute, and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3
+ * or later ("AGPL"), as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see .
+ */
+
+#include "sma.h"
+#include "tsdb.h"
+
+static int32_t smaEvalDays(SRetention *r, int8_t precision);
+static int32_t smaSetKeepCfg(STsdbKeepCfg *pKeepCfg, STsdbCfg *pCfg, int type);
+
+#define SMA_SET_KEEP_CFG(l) \
+ do { \
+ SRetention *r = &pCfg->retentions[l]; \
+ pKeepCfg->keep2 = convertTimeFromPrecisionToUnit(r->keep, pCfg->precision, TIME_UNIT_MINUTE); \
+ pKeepCfg->keep0 = pKeepCfg->keep2; \
+ pKeepCfg->keep1 = pKeepCfg->keep2; \
+ pKeepCfg->days = smaEvalDays(r, pCfg->precision); \
+ } while (0)
+
+#define SMA_OPEN_RSMA_IMPL(v, l) \
+ do { \
+ SRetention *r = (SRetention *)VND_RETENTIONS(v) + l; \
+ if (!RETENTION_VALID(r)) { \
+ if (l == 0) { \
+ goto _err; \
+ } \
+ break; \
+ } \
+ smaSetKeepCfg(&keepCfg, pCfg, TSDB_TYPE_RSMA_L##l); \
+ if (tsdbOpen(v, &SMA_RSMA_TSDB##l(pSma), VNODE_RSMA##l##_DIR, &keepCfg) < 0) { \
+ goto _err; \
+ } \
+ } while (0)
+
+#define RETENTION_DAYS_SPLIT_RATIO 10
+#define RETENTION_DAYS_SPLIT_MIN 1
+#define RETENTION_DAYS_SPLIT_MAX 30
+
+static int32_t smaEvalDays(SRetention *r, int8_t precision) {
+ int32_t keepDays = convertTimeFromPrecisionToUnit(r->keep, precision, TIME_UNIT_DAY);
+ int32_t freqDays = convertTimeFromPrecisionToUnit(r->freq, precision, TIME_UNIT_DAY);
+
+ int32_t days = keepDays / RETENTION_DAYS_SPLIT_RATIO;
+ if (days <= RETENTION_DAYS_SPLIT_MIN) {
+ days = RETENTION_DAYS_SPLIT_MIN;
+ if (days < freqDays) {
+ days = freqDays + 1;
+ }
+ } else {
+ if (days > RETENTION_DAYS_SPLIT_MAX) {
+ days = RETENTION_DAYS_SPLIT_MAX;
+ }
+ if (days < freqDays) {
+ days = freqDays + 1;
+ }
+ }
+ return days * 1440;
+}
+
+int smaSetKeepCfg(STsdbKeepCfg *pKeepCfg, STsdbCfg *pCfg, int type) {
+ pKeepCfg->precision = pCfg->precision;
+ switch (type) {
+ case TSDB_TYPE_TSMA:
+ ASSERT(0);
+ break;
+ case TSDB_TYPE_RSMA_L0:
+ SMA_SET_KEEP_CFG(0);
+ break;
+ case TSDB_TYPE_RSMA_L1:
+ SMA_SET_KEEP_CFG(1);
+ break;
+ case TSDB_TYPE_RSMA_L2:
+ SMA_SET_KEEP_CFG(2);
+ break;
+ default:
+ ASSERT(0);
+ break;
+ }
+ return 0;
+}
+
+int32_t smaOpen(SVnode *pVnode) {
+ STsdbCfg *pCfg = &pVnode->config.tsdbCfg;
+
+ ASSERT(!pVnode->pSma);
+
+ SSma *pSma = taosMemoryCalloc(1, sizeof(SSma));
+ if (!pSma) {
+ terrno = TSDB_CODE_OUT_OF_MEMORY;
+ return -1;
+ }
+ pSma->pVnode = pVnode;
+ taosThreadMutexInit(&pSma->mutex, NULL);
+ pSma->locked = false;
+
+ if (vnodeIsRollup(pVnode)) {
+ STsdbKeepCfg keepCfg = {0};
+ for (int i = 0; i < TSDB_RETENTION_MAX; ++i) {
+ if (i == TSDB_RETENTION_L0) {
+ SMA_OPEN_RSMA_IMPL(pVnode, 0);
+ } else if (i == TSDB_RETENTION_L1) {
+ SMA_OPEN_RSMA_IMPL(pVnode, 1);
+ } else if (i == TSDB_RETENTION_L2) {
+ SMA_OPEN_RSMA_IMPL(pVnode, 2);
+ } else {
+ ASSERT(0);
+ }
+ }
+ }
+
+ pVnode->pSma = pSma;
+ return 0;
+_err:
+ taosMemoryFreeClear(pSma);
+ return -1;
+}
+
+int32_t smaClose(SSma *pSma) {
+ if (pSma) {
+ taosThreadMutexDestroy(&pSma->mutex);
+ if SMA_RSMA_TSDB0 (pSma) tsdbClose(&SMA_RSMA_TSDB0(pSma));
+ if SMA_RSMA_TSDB1 (pSma) tsdbClose(&SMA_RSMA_TSDB1(pSma));
+ if SMA_RSMA_TSDB2 (pSma) tsdbClose(&SMA_RSMA_TSDB2(pSma));
+ }
+ return 0;
+}
\ No newline at end of file
diff --git a/source/dnode/vnode/src/sma/smaRollup.c b/source/dnode/vnode/src/sma/smaRollup.c
new file mode 100644
index 0000000000..f9cb5a1a09
--- /dev/null
+++ b/source/dnode/vnode/src/sma/smaRollup.c
@@ -0,0 +1,484 @@
+/*
+ * Copyright (c) 2019 TAOS Data, Inc.
+ *
+ * This program is free software: you can use, redistribute, and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3
+ * or later ("AGPL"), as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see .
+ */
+
+#include "sma.h"
+
+static FORCE_INLINE int32_t tdUidStorePut(STbUidStore *pStore, tb_uid_t suid, tb_uid_t *uid);
+static FORCE_INLINE int32_t tdUpdateTbUidListImpl(SSma *pSma, tb_uid_t *suid, SArray *tbUids);
+static FORCE_INLINE int32_t tdExecuteRSmaImpl(SSma *pSma, const void *pMsg, int32_t inputType, qTaskInfo_t *taskInfo,
+ STSchema *pTSchema, tb_uid_t suid, tb_uid_t uid, int8_t level);
+
+struct SRSmaInfo {
+ void *taskInfo[TSDB_RETENTION_L2]; // qTaskInfo_t
+};
+
+static FORCE_INLINE void tdFreeTaskHandle(qTaskInfo_t *taskHandle) {
+ // Note: free/kill may in RC
+ qTaskInfo_t otaskHandle = atomic_load_ptr(taskHandle);
+ if (otaskHandle && atomic_val_compare_exchange_ptr(taskHandle, otaskHandle, NULL)) {
+ qDestroyTask(otaskHandle);
+ }
+}
+
+void *tdFreeRSmaInfo(SRSmaInfo *pInfo) {
+ for (int32_t i = 0; i < TSDB_RETENTION_MAX; ++i) {
+ if (pInfo->taskInfo[i]) {
+ tdFreeTaskHandle(pInfo->taskInfo[i]);
+ }
+ }
+ return NULL;
+}
+
+static FORCE_INLINE int32_t tdUidStoreInit(STbUidStore **pStore) {
+ ASSERT(*pStore == NULL);
+ *pStore = taosMemoryCalloc(1, sizeof(STbUidStore));
+ if (*pStore == NULL) {
+ terrno = TSDB_CODE_OUT_OF_MEMORY;
+ return TSDB_CODE_FAILED;
+ }
+ return TSDB_CODE_SUCCESS;
+}
+
+static FORCE_INLINE int32_t tdUpdateTbUidListImpl(SSma *pSma, tb_uid_t *suid, SArray *tbUids) {
+ SSmaEnv *pEnv = SMA_RSMA_ENV(pSma);
+ SSmaStat *pStat = SMA_ENV_STAT(pEnv);
+ SRSmaInfo *pRSmaInfo = NULL;
+
+ if (!suid || !tbUids) {
+ terrno = TSDB_CODE_INVALID_PTR;
+ smaError("vgId:%d failed to get rsma info for uid:%" PRIi64 " since %s", SMA_VID(pSma), *suid, terrstr(terrno));
+ return TSDB_CODE_FAILED;
+ }
+
+ pRSmaInfo = taosHashGet(SMA_STAT_INFO_HASH(pStat), suid, sizeof(tb_uid_t));
+ if (!pRSmaInfo || !(pRSmaInfo = *(SRSmaInfo **)pRSmaInfo)) {
+ smaError("vgId:%d failed to get rsma info for uid:%" PRIi64, SMA_VID(pSma), *suid);
+ terrno = TSDB_CODE_TDB_INVALID_SMA_STAT;
+ return TSDB_CODE_FAILED;
+ }
+
+ if (pRSmaInfo->taskInfo[0] && (qUpdateQualifiedTableId(pRSmaInfo->taskInfo[0], tbUids, true) != 0)) {
+ smaError("vgId:%d update tbUidList failed for uid:%" PRIi64 " since %s", SMA_VID(pSma), *suid, terrstr(terrno));
+ return TSDB_CODE_FAILED;
+ } else {
+ smaDebug("vgId:%d update tbUidList succeed for qTaskInfo:%p with suid:%" PRIi64 ", uid:%" PRIi64, SMA_VID(pSma),
+ pRSmaInfo->taskInfo[0], *suid, *(int64_t *)taosArrayGet(tbUids, 0));
+ }
+
+ if (pRSmaInfo->taskInfo[1] && (qUpdateQualifiedTableId(pRSmaInfo->taskInfo[1], tbUids, true) != 0)) {
+ smaError("vgId:%d update tbUidList failed for uid:%" PRIi64 " since %s", SMA_VID(pSma), *suid, terrstr(terrno));
+ return TSDB_CODE_FAILED;
+ } else {
+ smaDebug("vgId:%d update tbUidList succeed for qTaskInfo:%p with suid:%" PRIi64 ", uid:%" PRIi64, SMA_VID(pSma),
+ pRSmaInfo->taskInfo[1], *suid, *(int64_t *)taosArrayGet(tbUids, 0));
+ }
+
+ return TSDB_CODE_SUCCESS;
+}
+
+int32_t tdUpdateTbUidList(SSma *pSma, STbUidStore *pStore) {
+ if (!pStore || (taosArrayGetSize(pStore->tbUids) == 0)) {
+ return TSDB_CODE_SUCCESS;
+ }
+
+ if (tdUpdateTbUidListImpl(pSma, &pStore->suid, pStore->tbUids) != TSDB_CODE_SUCCESS) {
+ return TSDB_CODE_FAILED;
+ }
+
+ void *pIter = taosHashIterate(pStore->uidHash, NULL);
+ while (pIter) {
+ tb_uid_t *pTbSuid = (tb_uid_t *)taosHashGetKey(pIter, NULL);
+ SArray *pTbUids = *(SArray **)pIter;
+
+ if (tdUpdateTbUidListImpl(pSma, pTbSuid, pTbUids) != TSDB_CODE_SUCCESS) {
+ taosHashCancelIterate(pStore->uidHash, pIter);
+ return TSDB_CODE_FAILED;
+ }
+
+ pIter = taosHashIterate(pStore->uidHash, pIter);
+ }
+ return TSDB_CODE_SUCCESS;
+}
+
+/**
+ * @brief fetch suid/uids when create child tables of rollup SMA
+ *
+ * @param pTsdb
+ * @param ppStore
+ * @param suid
+ * @param uid
+ * @return int32_t
+ */
+int32_t tdFetchTbUidList(SSma *pSma, STbUidStore **ppStore, tb_uid_t suid, tb_uid_t uid) {
+ SSmaEnv *pEnv = SMA_RSMA_ENV(pSma);
+
+ // only applicable to rollup SMA ctables
+ if (!pEnv) {
+ return TSDB_CODE_SUCCESS;
+ }
+
+ SSmaStat *pStat = SMA_ENV_STAT(pEnv);
+ SHashObj *infoHash = NULL;
+ if (!pStat || !(infoHash = SMA_STAT_INFO_HASH(pStat))) {
+ terrno = TSDB_CODE_TDB_INVALID_SMA_STAT;
+ return TSDB_CODE_FAILED;
+ }
+
+ // info cached when create rsma stable and return directly for non-rsma ctables
+ if (!taosHashGet(infoHash, &suid, sizeof(tb_uid_t))) {
+ return TSDB_CODE_SUCCESS;
+ }
+
+ ASSERT(ppStore != NULL);
+
+ if (!(*ppStore)) {
+ if (tdUidStoreInit(ppStore) != 0) {
+ return TSDB_CODE_FAILED;
+ }
+ }
+
+ if (tdUidStorePut(*ppStore, suid, &uid) != 0) {
+ *ppStore = tdUidStoreFree(*ppStore);
+ return TSDB_CODE_FAILED;
+ }
+
+ return TSDB_CODE_SUCCESS;
+}
+
+/**
+ * @brief Check and init qTaskInfo_t, only applicable to stable with SRSmaParam.
+ *
+ * @param pTsdb
+ * @param pMeta
+ * @param pReq
+ * @return int32_t
+ */
+int32_t tdProcessRSmaCreate(SSma *pSma, SMeta *pMeta, SVCreateStbReq *pReq, SMsgCb *pMsgCb) {
+ if (!pReq->rollup) {
+ smaTrace("vgId:%d return directly since no rollup for stable %s %" PRIi64, SMA_VID(pSma), pReq->name, pReq->suid);
+ return TSDB_CODE_SUCCESS;
+ }
+
+ SRSmaParam *param = &pReq->pRSmaParam;
+
+ if ((param->qmsg1Len == 0) && (param->qmsg2Len == 0)) {
+ smaWarn("vgId:%d no qmsg1/qmsg2 for rollup stable %s %" PRIi64, SMA_VID(pSma), pReq->name, pReq->suid);
+ return TSDB_CODE_SUCCESS;
+ }
+
+ if (tdCheckAndInitSmaEnv(pSma, TSDB_SMA_TYPE_ROLLUP) != TSDB_CODE_SUCCESS) {
+ terrno = TSDB_CODE_TDB_INIT_FAILED;
+ return TSDB_CODE_FAILED;
+ }
+
+ SSmaEnv *pEnv = SMA_RSMA_ENV(pSma);
+ SSmaStat *pStat = SMA_ENV_STAT(pEnv);
+ SRSmaInfo *pRSmaInfo = NULL;
+
+ pRSmaInfo = taosHashGet(SMA_STAT_INFO_HASH(pStat), &pReq->suid, sizeof(tb_uid_t));
+ if (pRSmaInfo) {
+ smaWarn("vgId:%d rsma info already exists for stb: %s, %" PRIi64, SMA_VID(pSma), pReq->name, pReq->suid);
+ return TSDB_CODE_SUCCESS;
+ }
+
+ pRSmaInfo = (SRSmaInfo *)taosMemoryCalloc(1, sizeof(SRSmaInfo));
+ if (!pRSmaInfo) {
+ terrno = TSDB_CODE_OUT_OF_MEMORY;
+ return TSDB_CODE_FAILED;
+ }
+
+ STqReadHandle *pReadHandle = tqInitSubmitMsgScanner(pMeta);
+ if (!pReadHandle) {
+ taosMemoryFree(pRSmaInfo);
+ terrno = TSDB_CODE_OUT_OF_MEMORY;
+ return TSDB_CODE_FAILED;
+ }
+
+ SReadHandle handle = {
+ .reader = pReadHandle,
+ .meta = pMeta,
+ .pMsgCb = pMsgCb,
+ };
+
+ if (param->qmsg1) {
+ pRSmaInfo->taskInfo[0] = qCreateStreamExecTaskInfo(param->qmsg1, &handle);
+ if (!pRSmaInfo->taskInfo[0]) {
+ taosMemoryFree(pRSmaInfo);
+ taosMemoryFree(pReadHandle);
+ return TSDB_CODE_FAILED;
+ }
+ }
+
+ if (param->qmsg2) {
+ pRSmaInfo->taskInfo[1] = qCreateStreamExecTaskInfo(param->qmsg2, &handle);
+ if (!pRSmaInfo->taskInfo[1]) {
+ taosMemoryFree(pRSmaInfo);
+ taosMemoryFree(pReadHandle);
+ return TSDB_CODE_FAILED;
+ }
+ }
+
+ if (taosHashPut(SMA_STAT_INFO_HASH(pStat), &pReq->suid, sizeof(tb_uid_t), &pRSmaInfo, sizeof(pRSmaInfo)) !=
+ TSDB_CODE_SUCCESS) {
+ return TSDB_CODE_FAILED;
+ } else {
+ smaDebug("vgId:%d register rsma info succeed for suid:%" PRIi64, SMA_VID(pSma), pReq->suid);
+ }
+
+ return TSDB_CODE_SUCCESS;
+}
+
+/**
+ * @brief store suid/[uids], prefer to use array and then hash
+ *
+ * @param pStore
+ * @param suid
+ * @param uid
+ * @return int32_t
+ */
+static int32_t tdUidStorePut(STbUidStore *pStore, tb_uid_t suid, tb_uid_t *uid) {
+ // prefer to store suid/uids in array
+ if ((suid == pStore->suid) || (pStore->suid == 0)) {
+ if (pStore->suid == 0) {
+ pStore->suid = suid;
+ }
+ if (uid) {
+ if (!pStore->tbUids) {
+ if (!(pStore->tbUids = taosArrayInit(1, sizeof(tb_uid_t)))) {
+ terrno = TSDB_CODE_OUT_OF_MEMORY;
+ return TSDB_CODE_FAILED;
+ }
+ }
+ if (!taosArrayPush(pStore->tbUids, uid)) {
+ return TSDB_CODE_FAILED;
+ }
+ }
+ } else {
+ // store other suid/uids in hash when multiple stable/table included in 1 batch of request
+ if (!pStore->uidHash) {
+ pStore->uidHash = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), false, HASH_ENTRY_LOCK);
+ if (!pStore->uidHash) {
+ return TSDB_CODE_FAILED;
+ }
+ }
+ if (uid) {
+ SArray *uidArray = taosHashGet(pStore->uidHash, &suid, sizeof(tb_uid_t));
+ if (uidArray && ((uidArray = *(SArray **)uidArray))) {
+ taosArrayPush(uidArray, uid);
+ } else {
+ SArray *pUidArray = taosArrayInit(1, sizeof(tb_uid_t));
+ if (!pUidArray) {
+ terrno = TSDB_CODE_OUT_OF_MEMORY;
+ return TSDB_CODE_FAILED;
+ }
+ if (!taosArrayPush(pUidArray, uid)) {
+ terrno = TSDB_CODE_OUT_OF_MEMORY;
+ return TSDB_CODE_FAILED;
+ }
+ if (taosHashPut(pStore->uidHash, &suid, sizeof(suid), &pUidArray, sizeof(pUidArray)) != 0) {
+ return TSDB_CODE_FAILED;
+ }
+ }
+ } else {
+ if (taosHashPut(pStore->uidHash, &suid, sizeof(suid), NULL, 0) != 0) {
+ return TSDB_CODE_FAILED;
+ }
+ }
+ }
+ return TSDB_CODE_SUCCESS;
+}
+
+void tdUidStoreDestory(STbUidStore *pStore) {
+ if (pStore) {
+ if (pStore->uidHash) {
+ if (pStore->tbUids) {
+ // When pStore->tbUids not NULL, the pStore->uidHash has k/v; otherwise pStore->uidHash only has keys.
+ void *pIter = taosHashIterate(pStore->uidHash, NULL);
+ while (pIter) {
+ SArray *arr = *(SArray **)pIter;
+ taosArrayDestroy(arr);
+ pIter = taosHashIterate(pStore->uidHash, pIter);
+ }
+ }
+ taosHashCleanup(pStore->uidHash);
+ }
+ taosArrayDestroy(pStore->tbUids);
+ }
+}
+
+void *tdUidStoreFree(STbUidStore *pStore) {
+ if (pStore) {
+ tdUidStoreDestory(pStore);
+ taosMemoryFree(pStore);
+ }
+ return NULL;
+}
+
+static int32_t tdProcessSubmitReq(STsdb *pTsdb, int64_t version, void *pReq) {
+ if (!pReq) {
+ terrno = TSDB_CODE_INVALID_PTR;
+ return TSDB_CODE_FAILED;
+ }
+
+ SSubmitReq *pSubmitReq = (SSubmitReq *)pReq;
+
+ if (tsdbInsertData(pTsdb, version, pSubmitReq, NULL) < 0) {
+ return TSDB_CODE_FAILED;
+ }
+
+ return TSDB_CODE_SUCCESS;
+}
+
+static int32_t tdFetchSubmitReqSuids(SSubmitReq *pMsg, STbUidStore *pStore) {
+ ASSERT(pMsg != NULL);
+ SSubmitMsgIter msgIter = {0};
+ SSubmitBlk *pBlock = NULL;
+ SSubmitBlkIter blkIter = {0};
+ STSRow *row = NULL;
+
+ terrno = TSDB_CODE_SUCCESS;
+
+ if (tInitSubmitMsgIter(pMsg, &msgIter) < 0) return -1;
+ while (true) {
+ if (tGetSubmitMsgNext(&msgIter, &pBlock) < 0) return -1;
+
+ if (!pBlock) break;
+ tdUidStorePut(pStore, msgIter.suid, NULL);
+ pStore->uid = msgIter.uid; // TODO: remove, just for debugging
+ }
+
+ if (terrno != TSDB_CODE_SUCCESS) return -1;
+ return 0;
+}
+
+static FORCE_INLINE int32_t tdExecuteRSmaImpl(SSma *pSma, const void *pMsg, int32_t inputType, qTaskInfo_t *taskInfo,
+ STSchema *pTSchema, tb_uid_t suid, tb_uid_t uid, int8_t level) {
+ SArray *pResult = NULL;
+
+ if (!taskInfo) {
+ smaDebug("vgId:%d no qTaskInfo to execute rsma %" PRIi8 " task for suid:%" PRIu64, SMA_VID(pSma), level, suid);
+ return TSDB_CODE_SUCCESS;
+ }
+
+ smaDebug("vgId:%d execute rsma %" PRIi8 " task for qTaskInfo:%p suid:%" PRIu64, SMA_VID(pSma), level, taskInfo, suid);
+
+ qSetStreamInput(taskInfo, pMsg, inputType);
+ while (1) {
+ SSDataBlock *output = NULL;
+ uint64_t ts;
+ if (qExecTask(taskInfo, &output, &ts) < 0) {
+ ASSERT(false);
+ }
+ if (!output) {
+ break;
+ }
+ if (!pResult) {
+ pResult = taosArrayInit(0, sizeof(SSDataBlock));
+ if (!pResult) {
+ terrno = TSDB_CODE_OUT_OF_MEMORY;
+ return TSDB_CODE_FAILED;
+ }
+ }
+
+ taosArrayPush(pResult, output);
+ }
+
+ if (taosArrayGetSize(pResult) > 0) {
+ blockDebugShowData(pResult);
+ STsdb *sinkTsdb = (level == TSDB_RETENTION_L1 ? pSma->pRSmaTsdb1 : pSma->pRSmaTsdb2);
+ SSubmitReq *pReq = NULL;
+ if (buildSubmitReqFromDataBlock(&pReq, pResult, pTSchema, SMA_VID(pSma), uid, suid) != 0) {
+ taosArrayDestroy(pResult);
+ return TSDB_CODE_FAILED;
+ }
+ if (tdProcessSubmitReq(sinkTsdb, INT64_MAX, pReq) != 0) {
+ taosArrayDestroy(pResult);
+ taosMemoryFreeClear(pReq);
+ return TSDB_CODE_FAILED;
+ }
+ taosMemoryFreeClear(pReq);
+ } else {
+ smaWarn("vgId:%d no rsma % " PRIi8 " data generated since %s", SMA_VID(pSma), level, tstrerror(terrno));
+ }
+
+ taosArrayDestroy(pResult);
+
+ return TSDB_CODE_SUCCESS;
+}
+
+static int32_t tdExecuteRSma(SSma *pSma, const void *pMsg, int32_t inputType, tb_uid_t suid, tb_uid_t uid) {
+ SSmaEnv *pEnv = SMA_RSMA_ENV(pSma);
+ if (!pEnv) {
+ // only applicable when rsma env exists
+ return TSDB_CODE_SUCCESS;
+ }
+
+ ASSERT(uid != 0); // TODO: remove later
+
+ SSmaStat *pStat = SMA_ENV_STAT(pEnv);
+ SRSmaInfo *pRSmaInfo = NULL;
+
+ pRSmaInfo = taosHashGet(SMA_STAT_INFO_HASH(pStat), &suid, sizeof(tb_uid_t));
+
+ if (!pRSmaInfo || !(pRSmaInfo = *(SRSmaInfo **)pRSmaInfo)) {
+ smaDebug("vgId:%d no rsma info for suid:%" PRIu64, SMA_VID(pSma), suid);
+ return TSDB_CODE_SUCCESS;
+ }
+ if (!pRSmaInfo->taskInfo[0]) {
+ smaDebug("vgId:%d no rsma qTaskInfo for suid:%" PRIu64, SMA_VID(pSma), suid);
+ return TSDB_CODE_SUCCESS;
+ }
+
+ if (inputType == STREAM_DATA_TYPE_SUBMIT_BLOCK) {
+ // TODO: use the proper schema instead of 0, and cache STSchema in cache
+ STSchema *pTSchema = metaGetTbTSchema(SMA_META(pSma), suid, 0);
+ if (!pTSchema) {
+ terrno = TSDB_CODE_TDB_IVD_TB_SCHEMA_VERSION;
+ return TSDB_CODE_FAILED;
+ }
+ tdExecuteRSmaImpl(pSma, pMsg, inputType, pRSmaInfo->taskInfo[0], pTSchema, suid, uid, TSDB_RETENTION_L1);
+ tdExecuteRSmaImpl(pSma, pMsg, inputType, pRSmaInfo->taskInfo[1], pTSchema, suid, uid, TSDB_RETENTION_L2);
+ taosMemoryFree(pTSchema);
+ }
+
+ return TSDB_CODE_SUCCESS;
+}
+
+int32_t tdProcessRSmaSubmit(SSma *pSma, void *pMsg, int32_t inputType) {
+ SSmaEnv *pEnv = SMA_RSMA_ENV(pSma);
+ if (!pEnv) {
+ // only applicable when rsma env exists
+ return TSDB_CODE_SUCCESS;
+ }
+
+ if (inputType == STREAM_DATA_TYPE_SUBMIT_BLOCK) {
+ STbUidStore uidStore = {0};
+ tdFetchSubmitReqSuids(pMsg, &uidStore);
+
+ if (uidStore.suid != 0) {
+ tdExecuteRSma(pSma, pMsg, inputType, uidStore.suid, uidStore.uid);
+
+ void *pIter = taosHashIterate(uidStore.uidHash, NULL);
+ while (pIter) {
+ tb_uid_t *pTbSuid = (tb_uid_t *)taosHashGetKey(pIter, NULL);
+ tdExecuteRSma(pSma, pMsg, inputType, *pTbSuid, 0);
+ pIter = taosHashIterate(uidStore.uidHash, pIter);
+ }
+
+ tdUidStoreDestory(&uidStore);
+ }
+ }
+ return TSDB_CODE_SUCCESS;
+}
diff --git a/source/dnode/vnode/src/sma/smaTDBImpl.c b/source/dnode/vnode/src/sma/smaTDBImpl.c
new file mode 100644
index 0000000000..821ec44aa5
--- /dev/null
+++ b/source/dnode/vnode/src/sma/smaTDBImpl.c
@@ -0,0 +1,128 @@
+/*
+ * Copyright (c) 2019 TAOS Data, Inc.
+ *
+ * This program is free software: you can use, redistribute, and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3
+ * or later ("AGPL"), as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see .
+ */
+
+#define ALLOW_FORBID_FUNC
+
+#include "sma.h"
+
+int32_t smaOpenDBEnv(TENV **ppEnv, const char *path) {
+ int ret = 0;
+
+ if (path == NULL) return -1;
+
+ ret = tdbEnvOpen(path, 4096, 256, ppEnv); // use as param
+
+ if (ret != 0) {
+ smaError("failed to create tsdb db env, ret = %d", ret);
+ return -1;
+ }
+
+ return 0;
+}
+
+int32_t smaCloseDBEnv(TENV *pEnv) { return tdbEnvClose(pEnv); }
+
+static inline int tdSmaKeyCmpr(const void *arg1, int len1, const void *arg2, int len2) {
+ const SSmaKey *pKey1 = (const SSmaKey *)arg1;
+ const SSmaKey *pKey2 = (const SSmaKey *)arg2;
+
+ ASSERT(len1 == len2 && len1 == sizeof(SSmaKey));
+
+ if (pKey1->skey < pKey2->skey) {
+ return -1;
+ } else if (pKey1->skey > pKey2->skey) {
+ return 1;
+ }
+ if (pKey1->groupId < pKey2->groupId) {
+ return -1;
+ } else if (pKey1->groupId > pKey2->groupId) {
+ return 1;
+ }
+
+ return 0;
+}
+
+static int32_t smaOpenDBDb(TDB **ppDB, TENV *pEnv, const char *pFName) {
+ int ret;
+ tdb_cmpr_fn_t compFunc;
+
+ // Create a database
+ compFunc = tdSmaKeyCmpr;
+ ret = tdbDbOpen(pFName, -1, -1, compFunc, pEnv, ppDB);
+
+ return 0;
+}
+
+static int32_t smaCloseDBDb(TDB *pDB) { return tdbDbClose(pDB); }
+
+int32_t smaOpenDBF(TENV *pEnv, SDBFile *pDBF) {
+ // TEnv is shared by a group of SDBFile
+ if (!pEnv || !pDBF) {
+ terrno = TSDB_CODE_INVALID_PTR;
+ return -1;
+ }
+
+ // Open DBF
+ if (smaOpenDBDb(&(pDBF->pDB), pEnv, pDBF->path) < 0) {
+ terrno = TSDB_CODE_TDB_INIT_FAILED;
+ smaCloseDBDb(pDBF->pDB);
+ return -1;
+ }
+
+ return 0;
+}
+
+int32_t smaCloseDBF(SDBFile *pDBF) {
+ int32_t ret = 0;
+ if (pDBF->pDB) {
+ ret = smaCloseDBDb(pDBF->pDB);
+ pDBF->pDB = NULL;
+ }
+ taosMemoryFreeClear(pDBF->path);
+ return ret;
+}
+
+int32_t smaSaveSmaToDB(SDBFile *pDBF, void *pKey, int32_t keyLen, void *pVal, int32_t valLen, TXN *txn) {
+ int32_t ret;
+
+ ret = tdbDbInsert(pDBF->pDB, pKey, keyLen, pVal, valLen, txn);
+ if (ret < 0) {
+ smaError("failed to create insert sma data into db, ret = %d", ret);
+ return -1;
+ }
+
+ return 0;
+}
+
+void *smaGetSmaDataByKey(SDBFile *pDBF, const void *pKey, int32_t keyLen, int32_t *valLen) {
+ void *pVal = NULL;
+ int ret;
+
+ ret = tdbDbGet(pDBF->pDB, pKey, keyLen, &pVal, valLen);
+
+ if (ret < 0) {
+ smaError("failed to get sma data from db, ret = %d", ret);
+ return NULL;
+ }
+
+ ASSERT(*valLen >= 0);
+
+ // TODO: lock?
+ // TODO: Would the key/value be destoryed during return the data?
+ // TODO: How about the key is updated while value length is changed? The original value buffer would be freed
+ // automatically?
+
+ return pVal;
+}
\ No newline at end of file
diff --git a/source/dnode/vnode/src/sma/smaTimeRange.c b/source/dnode/vnode/src/sma/smaTimeRange.c
new file mode 100644
index 0000000000..b04885c5f0
--- /dev/null
+++ b/source/dnode/vnode/src/sma/smaTimeRange.c
@@ -0,0 +1,1103 @@
+/*
+ * Copyright (c) 2019 TAOS Data, Inc.
+ *
+ * This program is free software: you can use, redistribute, and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3
+ * or later ("AGPL"), as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see .
+ */
+
+#include "sma.h"
+#include "tsdb.h"
+
+#undef _TEST_SMA_PRINT_DEBUG_LOG_
+#define SMA_STORAGE_TSDB_DAYS 30
+#define SMA_STORAGE_TSDB_TIMES 10
+#define SMA_STORAGE_SPLIT_HOURS 24
+#define SMA_KEY_LEN 16 // TSKEY+groupId 8+8
+#define SMA_DROP_EXPIRED_TIME 10 // default is 10 seconds
+
+#define SMA_STATE_ITEM_HASH_SLOT 32
+
+
+typedef struct {
+ SSma *pSma;
+ SDBFile dFile;
+ const SArray *pDataBlocks; // sma data
+ int32_t interval; // interval with the precision of DB
+} STSmaWriteH;
+
+typedef struct {
+ int32_t iter;
+ int32_t fid;
+} SmaFsIter;
+
+typedef struct {
+ STsdb *pTsdb;
+ SSma *pSma;
+ SDBFile dFile;
+ int32_t interval; // interval with the precision of DB
+ int32_t blockSize; // size of SMA block item
+ int8_t storageLevel;
+ int8_t days;
+ SmaFsIter smaFsIter;
+} STSmaReadH;
+
+typedef enum {
+ SMA_STORAGE_LEVEL_TSDB = 0, // use days of self-defined e.g. vnode${N}/tsdb/tsma/sma_index_uid/v2f200.tsma
+ SMA_STORAGE_LEVEL_DFILESET = 1 // use days of TS data e.g. vnode${N}/tsdb/tsma/sma_index_uid/v2f1906.tsma
+} ESmaStorageLevel;
+
+
+// static func
+
+static int64_t tdGetIntervalByPrecision(int64_t interval, uint8_t intervalUnit, int8_t precision, bool adjusted);
+static int32_t tdGetSmaStorageLevel(int64_t interval, int8_t intervalUnit);
+static int32_t tdInitTSmaWriteH(STSmaWriteH *pSmaH, SSma *pSma, const SArray *pDataBlocks, int64_t interval,
+ int8_t intervalUnit);
+static int32_t tdInitTSmaReadH(STSmaReadH *pSmaH, SSma *pSma, int64_t interval, int8_t intervalUnit);
+static void tdDestroyTSmaWriteH(STSmaWriteH *pSmaH);
+static int32_t tdGetTSmaDays(SSma *pSma, int64_t interval, int32_t storageLevel);
+static int32_t tdSetTSmaDataFile(STSmaWriteH *pSmaH, int64_t indexUid, int32_t fid);
+static int32_t tdInitTSmaFile(STSmaReadH *pSmaH, int64_t indexUid, TSKEY skey);
+static bool tdSetAndOpenTSmaFile(STSmaReadH *pReadH, TSKEY *queryKey);
+static int32_t tdInsertTSmaBlocks(STSmaWriteH *pSmaH, void *smaKey, int32_t keyLen, void *pData, int32_t dataLen,
+ TXN *txn);
+// expired window
+static int32_t tdUpdateExpiredWindowImpl(SSma *pSma, SSubmitReq *pMsg, int64_t version);
+static int32_t tdSetExpiredWindow(SSma *pSma, SHashObj *pItemsHash, int64_t indexUid, int64_t winSKey,
+ int64_t version);
+static int32_t tdResetExpiredWindow(SSma *pSma, SSmaStat *pStat, int64_t indexUid, TSKEY skey);
+static int32_t tdDropTSmaDataImpl(SSma *pSma, int64_t indexUid);
+
+// read data
+// TODO: This is the basic params, and should wrap the params to a queryHandle.
+static int32_t tdGetTSmaDataImpl(SSma *pSma, char *pData, int64_t indexUid, TSKEY querySKey, int32_t nMaxResult);
+
+// implementation
+
+/**
+ * @brief
+ *
+ * @param pSmaH
+ * @param pSma
+ * @param interval
+ * @param intervalUnit
+ * @return int32_t
+ */
+static int32_t tdInitTSmaReadH(STSmaReadH *pSmaH, SSma *pSma, int64_t interval, int8_t intervalUnit) {
+ pSmaH->pSma = pSma;
+ pSmaH->interval = tdGetIntervalByPrecision(interval, intervalUnit, SMA_TSDB_CFG(pSma)->precision, true);
+ pSmaH->storageLevel = tdGetSmaStorageLevel(interval, intervalUnit);
+ pSmaH->days = tdGetTSmaDays(pSma, pSmaH->interval, pSmaH->storageLevel);
+ return TSDB_CODE_SUCCESS;
+}
+
+/**
+ * @brief Init of tSma FS
+ *
+ * @param pReadH
+ * @param indexUid
+ * @param skey
+ * @return int32_t
+ */
+static int32_t tdInitTSmaFile(STSmaReadH *pSmaH, int64_t indexUid, TSKEY skey) {
+ SSma *pSma = pSmaH->pSma;
+
+ int32_t fid = (int32_t)(TSDB_KEY_FID(skey, pSmaH->days, SMA_TSDB_CFG(pSma)->precision));
+ char tSmaFile[TSDB_FILENAME_LEN] = {0};
+ snprintf(tSmaFile, TSDB_FILENAME_LEN, "%" PRIi64 "%sv%df%d.tsma", indexUid, TD_DIRSEP, SMA_VID(pSma), fid);
+ pSmaH->dFile.path = strdup(tSmaFile);
+ pSmaH->smaFsIter.iter = 0;
+ pSmaH->smaFsIter.fid = fid;
+ return TSDB_CODE_SUCCESS;
+}
+
+/**
+ * @brief Set and open tSma file if it has key locates in queryWin.
+ *
+ * @param pReadH
+ * @param param
+ * @param queryWin
+ * @return true
+ * @return false
+ */
+static bool tdSetAndOpenTSmaFile(STSmaReadH *pReadH, TSKEY *queryKey) {
+ // SArray *smaFs = pReadH->pTsdb->fs->cstatus->sf;
+ // int32_t nSmaFs = taosArrayGetSize(smaFs);
+
+ smaCloseDBF(&pReadH->dFile);
+
+#if 0
+ while (pReadH->smaFsIter.iter < nSmaFs) {
+ void *pSmaFile = taosArrayGet(smaFs, pReadH->smaFsIter.iter);
+ if (pSmaFile) { // match(indexName, queryWindow)
+ // TODO: select the file by index_name ...
+ pReadH->dFile = pSmaFile;
+ ++pReadH->smaFsIter.iter;
+ break;
+ }
+ ++pReadH->smaFsIter.iter;
+ }
+
+ if (pReadH->pDFile) {
+ tdDebug("vg%d: smaFile %s matched", REPO_ID(pReadH->pTsdb), "[pSmaFile dir]");
+ return true;
+ }
+#endif
+
+ return false;
+}
+
+
+/**
+ * @brief Approximate value for week/month/year.
+ *
+ * @param interval
+ * @param intervalUnit
+ * @param precision
+ * @param adjusted Interval already adjusted according to DB precision
+ * @return int64_t
+ */
+static int64_t tdGetIntervalByPrecision(int64_t interval, uint8_t intervalUnit, int8_t precision, bool adjusted) {
+ if (adjusted) {
+ return interval;
+ }
+
+ switch (intervalUnit) {
+ case TIME_UNIT_YEAR: // approximate value
+ interval *= 365 * 86400 * 1e3;
+ break;
+ case TIME_UNIT_MONTH: // approximate value
+ interval *= 30 * 86400 * 1e3;
+ break;
+ case TIME_UNIT_WEEK: // approximate value
+ interval *= 7 * 86400 * 1e3;
+ break;
+ case TIME_UNIT_DAY: // the interval for tSma calculation must <= day
+ interval *= 86400 * 1e3;
+ break;
+ case TIME_UNIT_HOUR:
+ interval *= 3600 * 1e3;
+ break;
+ case TIME_UNIT_MINUTE:
+ interval *= 60 * 1e3;
+ break;
+ case TIME_UNIT_SECOND:
+ interval *= 1e3;
+ break;
+ default:
+ break;
+ }
+
+ switch (precision) {
+ case TSDB_TIME_PRECISION_MILLI:
+ if (TIME_UNIT_MICROSECOND == intervalUnit) { // us
+ return interval / 1e3;
+ } else if (TIME_UNIT_NANOSECOND == intervalUnit) { // nano second
+ return interval / 1e6;
+ } else { // ms
+ return interval;
+ }
+ break;
+ case TSDB_TIME_PRECISION_MICRO:
+ if (TIME_UNIT_MICROSECOND == intervalUnit) { // us
+ return interval;
+ } else if (TIME_UNIT_NANOSECOND == intervalUnit) { // ns
+ return interval / 1e3;
+ } else { // ms
+ return interval * 1e3;
+ }
+ break;
+ case TSDB_TIME_PRECISION_NANO:
+ if (TIME_UNIT_MICROSECOND == intervalUnit) { // us
+ return interval * 1e3;
+ } else if (TIME_UNIT_NANOSECOND == intervalUnit) { // ns
+ return interval;
+ } else { // ms
+ return interval * 1e6;
+ }
+ break;
+ default: // ms
+ if (TIME_UNIT_MICROSECOND == intervalUnit) { // us
+ return interval / 1e3;
+ } else if (TIME_UNIT_NANOSECOND == intervalUnit) { // ns
+ return interval / 1e6;
+ } else { // ms
+ return interval;
+ }
+ break;
+ }
+ return interval;
+}
+
+
+static int32_t tdInitTSmaWriteH(STSmaWriteH *pSmaH, SSma *pSma, const SArray *pDataBlocks, int64_t interval,
+ int8_t intervalUnit) {
+ pSmaH->pSma = pSma;
+ pSmaH->interval = tdGetIntervalByPrecision(interval, intervalUnit, SMA_TSDB_CFG(pSma)->precision, true);
+ pSmaH->pDataBlocks = pDataBlocks;
+ pSmaH->dFile.fid = SMA_IVLD_FID;
+ return TSDB_CODE_SUCCESS;
+}
+
+static void tdDestroyTSmaWriteH(STSmaWriteH *pSmaH) {
+ if (pSmaH) {
+ smaCloseDBF(&pSmaH->dFile);
+ }
+}
+
+static int32_t tdSetTSmaDataFile(STSmaWriteH *pSmaH, int64_t indexUid, int32_t fid) {
+ SSma *pSma = pSmaH->pSma;
+ ASSERT(!pSmaH->dFile.path && !pSmaH->dFile.pDB);
+
+ pSmaH->dFile.fid = fid;
+ char tSmaFile[TSDB_FILENAME_LEN] = {0};
+ snprintf(tSmaFile, TSDB_FILENAME_LEN, "%" PRIi64 "%sv%df%d.tsma", indexUid, TD_DIRSEP, SMA_VID(pSma), fid);
+ pSmaH->dFile.path = strdup(tSmaFile);
+
+ return TSDB_CODE_SUCCESS;
+}
+
+/**
+ * @brief
+ *
+ * @param pSma
+ * @param interval Interval calculated by DB's precision
+ * @param storageLevel
+ * @return int32_t
+ */
+static int32_t tdGetTSmaDays(SSma *pSma, int64_t interval, int32_t storageLevel) {
+ STsdbCfg *pCfg = SMA_TSDB_CFG(pSma);
+ int32_t daysPerFile = pCfg->days;
+
+ if (storageLevel == SMA_STORAGE_LEVEL_TSDB) {
+ int32_t days = SMA_STORAGE_TSDB_TIMES * (interval / tsTickPerMin[pCfg->precision]);
+ daysPerFile = days > SMA_STORAGE_TSDB_DAYS ? days : SMA_STORAGE_TSDB_DAYS;
+ }
+
+ return daysPerFile;
+}
+
+/**
+ * @brief Judge the tSma storage level
+ *
+ * @param interval
+ * @param intervalUnit
+ * @return int32_t
+ */
+static int32_t tdGetSmaStorageLevel(int64_t interval, int8_t intervalUnit) {
+ // TODO: configurable for SMA_STORAGE_SPLIT_HOURS?
+ switch (intervalUnit) {
+ case TIME_UNIT_HOUR:
+ if (interval < SMA_STORAGE_SPLIT_HOURS) {
+ return SMA_STORAGE_LEVEL_DFILESET;
+ }
+ break;
+ case TIME_UNIT_MINUTE:
+ if (interval < 60 * SMA_STORAGE_SPLIT_HOURS) {
+ return SMA_STORAGE_LEVEL_DFILESET;
+ }
+ break;
+ case TIME_UNIT_SECOND:
+ if (interval < 3600 * SMA_STORAGE_SPLIT_HOURS) {
+ return SMA_STORAGE_LEVEL_DFILESET;
+ }
+ break;
+ case TIME_UNIT_MILLISECOND:
+ if (interval < 3600 * 1e3 * SMA_STORAGE_SPLIT_HOURS) {
+ return SMA_STORAGE_LEVEL_DFILESET;
+ }
+ break;
+ case TIME_UNIT_MICROSECOND:
+ if (interval < 3600 * 1e6 * SMA_STORAGE_SPLIT_HOURS) {
+ return SMA_STORAGE_LEVEL_DFILESET;
+ }
+ break;
+ case TIME_UNIT_NANOSECOND:
+ if (interval < 3600 * 1e9 * SMA_STORAGE_SPLIT_HOURS) {
+ return SMA_STORAGE_LEVEL_DFILESET;
+ }
+ break;
+ default:
+ break;
+ }
+ return SMA_STORAGE_LEVEL_TSDB;
+}
+
+/**
+ * @brief Insert/Update Time-range-wise SMA data.
+ * - If interval < SMA_STORAGE_SPLIT_HOURS(e.g. 24), save the SMA data as a part of DFileSet to e.g.
+ * v3f1900.tsma.${sma_index_name}. The days is the same with that for TS data files.
+ * - If interval >= SMA_STORAGE_SPLIT_HOURS, save the SMA data to e.g. vnode3/tsma/v3f632.tsma.${sma_index_name}. The
+ * days is 30 times of the interval, and the minimum days is SMA_STORAGE_TSDB_DAYS(30d).
+ * - The destination file of one data block for some interval is determined by its start TS key.
+ *
+ * @param pSma
+ * @param msg
+ * @return int32_t
+ */
+int32_t tdProcessTSmaInsertImpl(SSma *pSma, int64_t indexUid, const char *msg) {
+ STsdbCfg *pCfg = SMA_TSDB_CFG(pSma);
+ const SArray *pDataBlocks = (const SArray *)msg;
+
+ // TODO: destroy SSDataBlocks(msg)
+
+ // For super table aggregation, the sma data is stored in vgroup calculated from the hash value of stable name. Thus
+ // the sma data would arrive ahead of the update-expired-window msg.
+ if (tdCheckAndInitSmaEnv(pSma, TSDB_SMA_TYPE_TIME_RANGE) != TSDB_CODE_SUCCESS) {
+ terrno = TSDB_CODE_TDB_INIT_FAILED;
+ return TSDB_CODE_FAILED;
+ }
+
+ if (!pDataBlocks) {
+ terrno = TSDB_CODE_INVALID_PTR;
+ smaWarn("vgId:%d insert tSma data failed since pDataBlocks is NULL", SMA_VID(pSma));
+ return terrno;
+ }
+
+ if (taosArrayGetSize(pDataBlocks) <= 0) {
+ terrno = TSDB_CODE_INVALID_PARA;
+ smaWarn("vgId:%d insert tSma data failed since pDataBlocks is empty", SMA_VID(pSma));
+ return TSDB_CODE_FAILED;
+ }
+
+ SSmaEnv *pEnv = SMA_TSMA_ENV(pSma);
+ SSmaStat *pStat = SMA_ENV_STAT(pEnv);
+ SSmaStatItem *pItem = NULL;
+
+ tdRefSmaStat(pSma, pStat);
+
+ if (pStat && SMA_STAT_ITEMS(pStat)) {
+ pItem = taosHashGet(SMA_STAT_ITEMS(pStat), &indexUid, sizeof(indexUid));
+ }
+
+ if (!pItem || !(pItem = *(SSmaStatItem **)pItem) || tdSmaStatIsDropped(pItem)) {
+ terrno = TSDB_CODE_TDB_INVALID_SMA_STAT;
+ tdUnRefSmaStat(pSma, pStat);
+ return TSDB_CODE_FAILED;
+ }
+
+ STSma *pTSma = pItem->pTSma;
+ STSmaWriteH tSmaH = {0};
+
+ if (tdInitTSmaWriteH(&tSmaH, pSma, pDataBlocks, pTSma->interval, pTSma->intervalUnit) != 0) {
+ return TSDB_CODE_FAILED;
+ }
+
+ char rPath[TSDB_FILENAME_LEN] = {0};
+ char aPath[TSDB_FILENAME_LEN] = {0};
+ snprintf(rPath, TSDB_FILENAME_LEN, "%s%s%" PRIi64, SMA_ENV_PATH(pEnv), TD_DIRSEP, indexUid);
+ tfsAbsoluteName(SMA_TFS(pSma), SMA_ENV_DID(pEnv), rPath, aPath);
+ if (!taosCheckExistFile(aPath)) {
+ if (tfsMkdirRecurAt(SMA_TFS(pSma), rPath, SMA_ENV_DID(pEnv)) != TSDB_CODE_SUCCESS) {
+ tdUnRefSmaStat(pSma, pStat);
+ return TSDB_CODE_FAILED;
+ }
+ }
+
+ // Step 1: Judge the storage level and days
+ int32_t storageLevel = tdGetSmaStorageLevel(pTSma->interval, pTSma->intervalUnit);
+ int32_t daysPerFile = tdGetTSmaDays(pSma, tSmaH.interval, storageLevel);
+
+ char smaKey[SMA_KEY_LEN] = {0}; // key: skey + groupId
+ char dataBuf[512] = {0}; // val: aggr data // TODO: handle 512 buffer?
+ void *pDataBuf = NULL;
+ int32_t sz = taosArrayGetSize(pDataBlocks);
+ for (int32_t i = 0; i < sz; ++i) {
+ SSDataBlock *pDataBlock = taosArrayGet(pDataBlocks, i);
+ int32_t colNum = pDataBlock->info.numOfCols;
+ int32_t rows = pDataBlock->info.rows;
+ int32_t rowSize = pDataBlock->info.rowSize;
+ int64_t groupId = pDataBlock->info.groupId;
+ for (int32_t j = 0; j < rows; ++j) {
+ printf("|");
+ TSKEY skey = TSKEY_INITIAL_VAL; // the start key of TS window by interval
+ void *pSmaKey = &smaKey;
+ bool isStartKey = false;
+
+ int32_t tlen = 0; // reset the len
+ pDataBuf = &dataBuf; // reset the buf
+ for (int32_t k = 0; k < colNum; ++k) {
+ SColumnInfoData *pColInfoData = taosArrayGet(pDataBlock->pDataBlock, k);
+ void *var = POINTER_SHIFT(pColInfoData->pData, j * pColInfoData->info.bytes);
+ switch (pColInfoData->info.type) {
+ case TSDB_DATA_TYPE_TIMESTAMP:
+ if (!isStartKey) {
+ isStartKey = true;
+ skey = *(TSKEY *)var;
+ printf("= skey %" PRIi64 " groupId = %" PRIi64 "|", skey, groupId);
+ tdEncodeTSmaKey(groupId, skey, &pSmaKey);
+ } else {
+ printf(" %" PRIi64 " |", *(int64_t *)var);
+ tlen += taosEncodeFixedI64(&pDataBuf, *(int64_t *)var);
+ break;
+ }
+ break;
+ case TSDB_DATA_TYPE_BOOL:
+ case TSDB_DATA_TYPE_UTINYINT:
+ printf(" %15d |", *(uint8_t *)var);
+ tlen += taosEncodeFixedU8(&pDataBuf, *(uint8_t *)var);
+ break;
+ case TSDB_DATA_TYPE_TINYINT:
+ printf(" %15d |", *(int8_t *)var);
+ tlen += taosEncodeFixedI8(&pDataBuf, *(int8_t *)var);
+ break;
+ case TSDB_DATA_TYPE_SMALLINT:
+ printf(" %15d |", *(int16_t *)var);
+ tlen += taosEncodeFixedI16(&pDataBuf, *(int16_t *)var);
+ break;
+ case TSDB_DATA_TYPE_USMALLINT:
+ printf(" %15d |", *(uint16_t *)var);
+ tlen += taosEncodeFixedU16(&pDataBuf, *(uint16_t *)var);
+ break;
+ case TSDB_DATA_TYPE_INT:
+ printf(" %15d |", *(int32_t *)var);
+ tlen += taosEncodeFixedI32(&pDataBuf, *(int32_t *)var);
+ break;
+ case TSDB_DATA_TYPE_FLOAT:
+ printf(" %15f |", *(float *)var);
+ tlen += taosEncodeBinary(&pDataBuf, var, sizeof(float));
+ break;
+ case TSDB_DATA_TYPE_UINT:
+ printf(" %15u |", *(uint32_t *)var);
+ tlen += taosEncodeFixedU32(&pDataBuf, *(uint32_t *)var);
+ break;
+ case TSDB_DATA_TYPE_BIGINT:
+ printf(" %15ld |", *(int64_t *)var);
+ tlen += taosEncodeFixedI64(&pDataBuf, *(int64_t *)var);
+ break;
+ case TSDB_DATA_TYPE_DOUBLE:
+ printf(" %15lf |", *(double *)var);
+ tlen += taosEncodeBinary(&pDataBuf, var, sizeof(double));
+ case TSDB_DATA_TYPE_UBIGINT:
+ printf(" %15lu |", *(uint64_t *)var);
+ tlen += taosEncodeFixedU64(&pDataBuf, *(uint64_t *)var);
+ break;
+ case TSDB_DATA_TYPE_NCHAR: {
+ char tmpChar[100] = {0};
+ strncpy(tmpChar, varDataVal(var), varDataLen(var));
+ printf(" %s |", tmpChar);
+ tlen += taosEncodeBinary(&pDataBuf, varDataVal(var), varDataLen(var));
+ break;
+ }
+ case TSDB_DATA_TYPE_VARCHAR: { // TSDB_DATA_TYPE_BINARY
+ char tmpChar[100] = {0};
+ strncpy(tmpChar, varDataVal(var), varDataLen(var));
+ printf(" %s |", tmpChar);
+ tlen += taosEncodeBinary(&pDataBuf, varDataVal(var), varDataLen(var));
+ break;
+ }
+ case TSDB_DATA_TYPE_VARBINARY:
+ // TODO: add binary/varbinary
+ TASSERT(0);
+ default:
+ printf("the column type %" PRIi16 " is undefined\n", pColInfoData->info.type);
+ TASSERT(0);
+ break;
+ }
+ }
+ // if ((tlen > 0) && (skey != TSKEY_INITIAL_VAL)) {
+ if (tlen > 0) {
+ int32_t fid = (int32_t)(TSDB_KEY_FID(skey, daysPerFile, pCfg->precision));
+
+ // Step 2: Set the DFile for storage of SMA index, and iterate/split the TSma data and store to B+Tree index
+ // file
+ // - Set and open the DFile or the B+Tree file
+ // TODO: tsdbStartTSmaCommit();
+ if (fid != tSmaH.dFile.fid) {
+ if (tSmaH.dFile.fid != SMA_IVLD_FID) {
+ tdSmaEndCommit(pEnv);
+ smaCloseDBF(&tSmaH.dFile);
+ }
+ tdSetTSmaDataFile(&tSmaH, indexUid, fid);
+ if (smaOpenDBF(pEnv->dbEnv, &tSmaH.dFile) != 0) {
+ smaWarn("vgId:%d open DB file %s failed since %s", SMA_VID(pSma),
+ tSmaH.dFile.path ? tSmaH.dFile.path : "path is NULL", tstrerror(terrno));
+ tdDestroyTSmaWriteH(&tSmaH);
+ tdUnRefSmaStat(pSma, pStat);
+ return TSDB_CODE_FAILED;
+ }
+ tdSmaBeginCommit(pEnv);
+ }
+
+ if (tdInsertTSmaBlocks(&tSmaH, &smaKey, SMA_KEY_LEN, dataBuf, tlen, &pEnv->txn) != 0) {
+ smaWarn("vgId:%d insert tSma data blocks fail for index %" PRIi64 ", skey %" PRIi64 ", groupId %" PRIi64
+ " since %s",
+ SMA_VID(pSma), indexUid, skey, groupId, tstrerror(terrno));
+ tdSmaEndCommit(pEnv);
+ tdDestroyTSmaWriteH(&tSmaH);
+ tdUnRefSmaStat(pSma, pStat);
+ return TSDB_CODE_FAILED;
+ }
+ smaDebug("vgId:%d insert tSma data blocks success for index %" PRIi64 ", skey %" PRIi64 ", groupId %" PRIi64,
+ SMA_VID(pSma), indexUid, skey, groupId);
+ // TODO:tsdbEndTSmaCommit();
+
+ // Step 3: reset the SSmaStat
+ tdResetExpiredWindow(pSma, pStat, indexUid, skey);
+ } else {
+ smaWarn("vgId:%d invalid data skey:%" PRIi64 ", tlen %" PRIi32 " during insert tSma data for %" PRIi64,
+ SMA_VID(pSma), skey, tlen, indexUid);
+ }
+
+ printf("\n");
+ }
+ }
+ tdSmaEndCommit(pEnv); // TODO: not commit for every insert
+ tdDestroyTSmaWriteH(&tSmaH);
+ tdUnRefSmaStat(pSma, pStat);
+
+ return TSDB_CODE_SUCCESS;
+}
+
+int32_t tdDropTSmaData(SSma *pSma, int64_t indexUid) {
+ int32_t code = TSDB_CODE_SUCCESS;
+ if ((code = tdDropTSmaDataImpl(pSma, indexUid)) < 0) {
+ smaWarn("vgId:%d drop tSma data failed since %s", SMA_VID(pSma), tstrerror(terrno));
+ }
+ return code;
+}
+
+/**
+ * @brief Insert TSma data blocks to DB File build by B+Tree
+ *
+ * @param pSmaH
+ * @param smaKey tableUid-colId-skeyOfWindow(8-2-8)
+ * @param keyLen
+ * @param pData
+ * @param dataLen
+ * @return int32_t
+ */
+static int32_t tdInsertTSmaBlocks(STSmaWriteH *pSmaH, void *smaKey, int32_t keyLen, void *pData, int32_t dataLen,
+ TXN *txn) {
+ SDBFile *pDBFile = &pSmaH->dFile;
+
+ // TODO: insert sma data blocks into B+Tree(TDB)
+ if (smaSaveSmaToDB(pDBFile, smaKey, keyLen, pData, dataLen, txn) != 0) {
+ smaWarn("vgId:%d insert sma data blocks into %s: smaKey %" PRIx64 "-%" PRIx64 ", dataLen %" PRIu32 " fail",
+ SMA_VID(pSmaH->pSma), pDBFile->path, *(int64_t *)smaKey, *(int64_t *)POINTER_SHIFT(smaKey, 8), dataLen);
+ return TSDB_CODE_FAILED;
+ }
+ smaDebug("vgId:%d insert sma data blocks into %s: smaKey %" PRIx64 "-%" PRIx64 ", dataLen %" PRIu32 " succeed",
+ SMA_VID(pSmaH->pSma), pDBFile->path, *(int64_t *)smaKey, *(int64_t *)POINTER_SHIFT(smaKey, 8), dataLen);
+
+#ifdef _TEST_SMA_PRINT_DEBUG_LOG_
+ uint32_t valueSize = 0;
+ void *data = tdGetSmaDataByKey(pDBFile, smaKey, keyLen, &valueSize);
+ ASSERT(data != NULL);
+ for (uint32_t v = 0; v < valueSize; v += 8) {
+ smaWarn("vgId:%d insert sma data val[%d] %" PRIi64, REPO_ID(pSmaH->pTsdb), v, *(int64_t *)POINTER_SHIFT(data, v));
+ }
+#endif
+ return TSDB_CODE_SUCCESS;
+}
+
+/**
+ * @brief When sma data received from stream computing, make the relative expired window valid.
+ *
+ * @param pSma
+ * @param pStat
+ * @param indexUid
+ * @param skey
+ * @return int32_t
+ */
+static int32_t tdResetExpiredWindow(SSma *pSma, SSmaStat *pStat, int64_t indexUid, TSKEY skey) {
+ SSmaStatItem *pItem = NULL;
+
+ tdRefSmaStat(pSma, pStat);
+
+ if (pStat && SMA_STAT_ITEMS(pStat)) {
+ pItem = taosHashGet(SMA_STAT_ITEMS(pStat), &indexUid, sizeof(indexUid));
+ }
+ if ((pItem) && ((pItem = *(SSmaStatItem **)pItem))) {
+ // pItem resides in hash buffer all the time unless drop sma index
+ // TODO: multithread protect
+ if (taosHashRemove(pItem->expiredWindows, &skey, sizeof(TSKEY)) != 0) {
+ // error handling
+ tdUnRefSmaStat(pSma, pStat);
+ smaWarn("vgId:%d remove skey %" PRIi64 " from expired window for sma index %" PRIi64 " fail", SMA_VID(pSma),
+ skey, indexUid);
+ return TSDB_CODE_FAILED;
+ }
+ smaDebug("vgId:%d remove skey %" PRIi64 " from expired window for sma index %" PRIi64 " succeed", SMA_VID(pSma),
+ skey, indexUid);
+ // TODO: use a standalone interface to received state upate notification from stream computing module.
+ /**
+ * @brief state
+ * - When SMA env init in TSDB, its status is TSDB_SMA_STAT_OK.
+ * - In startup phase of stream computing module, it should notify the SMA env in TSDB to expired if needed(e.g.
+ * when batch data caculation not finised)
+ * - When TSDB_SMA_STAT_OK, the stream computing module should also notify that to the SMA env in TSDB.
+ */
+ pItem->state = TSDB_SMA_STAT_OK;
+ } else {
+ // error handling
+ tdUnRefSmaStat(pSma, pStat);
+ smaWarn("vgId:%d expired window %" PRIi64 " not exists for sma index %" PRIi64, SMA_VID(pSma), skey, indexUid);
+ return TSDB_CODE_FAILED;
+ }
+
+ tdUnRefSmaStat(pSma, pStat);
+ return TSDB_CODE_SUCCESS;
+}
+
+/**
+ * @brief Drop tSma data and local cache
+ * - insert/query reference
+ * @param pSma
+ * @param msg
+ * @return int32_t
+ */
+static int32_t tdDropTSmaDataImpl(SSma *pSma, int64_t indexUid) {
+ SSmaEnv *pEnv = atomic_load_ptr(&SMA_TSMA_ENV(pSma));
+
+ // clear local cache
+ if (pEnv) {
+ smaDebug("vgId:%d drop tSma local cache for %" PRIi64, SMA_VID(pSma), indexUid);
+
+ SSmaStatItem *pItem = taosHashGet(SMA_ENV_STAT_ITEMS(pEnv), &indexUid, sizeof(indexUid));
+ if ((pItem) || ((pItem = *(SSmaStatItem **)pItem))) {
+ if (tdSmaStatIsDropped(pItem)) {
+ smaDebug("vgId:%d tSma stat is already dropped for %" PRIi64, SMA_VID(pSma), indexUid);
+ return TSDB_CODE_TDB_INVALID_ACTION; // TODO: duplicate drop msg would be intercepted by mnode
+ }
+
+ tdWLockSmaEnv(pEnv);
+ if (tdSmaStatIsDropped(pItem)) {
+ tdUnLockSmaEnv(pEnv);
+ smaDebug("vgId:%d tSma stat is already dropped for %" PRIi64, SMA_VID(pSma), indexUid);
+ return TSDB_CODE_TDB_INVALID_ACTION; // TODO: duplicate drop msg would be intercepted by mnode
+ }
+ tdSmaStatSetDropped(pItem);
+ tdUnLockSmaEnv(pEnv);
+
+ int32_t nSleep = 0;
+ int32_t refVal = INT32_MAX;
+ while (true) {
+ if ((refVal = T_REF_VAL_GET(SMA_ENV_STAT(pEnv))) <= 0) {
+ smaDebug("vgId:%d drop index %" PRIi64 " since refVal=%d", SMA_VID(pSma), indexUid, refVal);
+ break;
+ }
+ smaDebug("vgId:%d wait 1s to drop index %" PRIi64 " since refVal=%d", SMA_VID(pSma), indexUid, refVal);
+ taosSsleep(1);
+ if (++nSleep > SMA_DROP_EXPIRED_TIME) {
+ smaDebug("vgId:%d drop index %" PRIi64 " after wait %d (refVal=%d)", SMA_VID(pSma), indexUid, nSleep,
+ refVal);
+ break;
+ };
+ }
+
+ tdFreeSmaStatItem(pItem);
+ smaDebug("vgId:%d getTSmaDataImpl failed since no index %" PRIi64 " in local cache", SMA_VID(pSma), indexUid);
+ }
+ }
+ // clear sma data files
+ // TODO:
+ return TSDB_CODE_SUCCESS;
+}
+
+/**
+ * @brief
+ *
+ * @param pSma Return the data between queryWin and fill the pData.
+ * @param pData
+ * @param indexUid
+ * @param pQuerySKey
+ * @param nMaxResult The query invoker should control the nMaxResult need to return to avoid OOM.
+ * @return int32_t
+ */
+static int32_t tdGetTSmaDataImpl(SSma *pSma, char *pData, int64_t indexUid, TSKEY querySKey, int32_t nMaxResult) {
+ SSmaEnv *pEnv = atomic_load_ptr(&SMA_TSMA_ENV(pSma));
+ SSmaStat *pStat = NULL;
+
+ if (!pEnv) {
+ terrno = TSDB_CODE_INVALID_PTR;
+ smaWarn("vgId:%d getTSmaDataImpl failed since pTSmaEnv is NULL", SMA_VID(pSma));
+ return TSDB_CODE_FAILED;
+ }
+
+ pStat = SMA_ENV_STAT(pEnv);
+
+ tdRefSmaStat(pSma, pStat);
+ SSmaStatItem *pItem = taosHashGet(SMA_ENV_STAT_ITEMS(pEnv), &indexUid, sizeof(indexUid));
+ if (!pItem || !(pItem = *(SSmaStatItem **)pItem)) {
+ // Normally pItem should not be NULL, mark all windows as expired and notify query module to fetch raw TS data if
+ // it's NULL.
+ tdUnRefSmaStat(pSma, pStat);
+ terrno = TSDB_CODE_TDB_INVALID_ACTION;
+ smaDebug("vgId:%d getTSmaDataImpl failed since no index %" PRIi64, SMA_VID(pSma), indexUid);
+ return TSDB_CODE_FAILED;
+ }
+
+#if 0
+ int32_t nQueryWin = taosArrayGetSize(pQuerySKey);
+ for (int32_t n = 0; n < nQueryWin; ++n) {
+ TSKEY skey = taosArrayGet(pQuerySKey, n);
+ if (taosHashGet(pItem->expiredWindows, &skey, sizeof(TSKEY))) {
+ // TODO: mark this window as expired.
+ }
+ }
+#endif
+
+#if 1
+ int8_t smaStat = 0;
+ if (!tdSmaStatIsOK(pItem, &smaStat)) { // TODO: multiple check for large scale sma query
+ tdUnRefSmaStat(pSma, pStat);
+ terrno = TSDB_CODE_TDB_INVALID_SMA_STAT;
+ smaWarn("vgId:%d getTSmaDataImpl failed from index %" PRIi64 " since %s %" PRIi8, SMA_VID(pSma), indexUid,
+ tstrerror(terrno), smaStat);
+ return TSDB_CODE_FAILED;
+ }
+
+ if (taosHashGet(pItem->expiredWindows, &querySKey, sizeof(TSKEY))) {
+ // TODO: mark this window as expired.
+ smaDebug("vgId:%d skey %" PRIi64 " of window exists in expired window for index %" PRIi64, SMA_VID(pSma),
+ querySKey, indexUid);
+ } else {
+ smaDebug("vgId:%d skey %" PRIi64 " of window not in expired window for index %" PRIi64, SMA_VID(pSma), querySKey,
+ indexUid);
+ }
+
+ STSma *pTSma = pItem->pTSma;
+#endif
+
+#if 1
+ STSmaReadH tReadH = {0};
+ tdInitTSmaReadH(&tReadH, pSma, pTSma->interval, pTSma->intervalUnit);
+ smaCloseDBF(&tReadH.dFile);
+
+ tdUnRefSmaStat(pSma, pStat);
+
+ tdInitTSmaFile(&tReadH, indexUid, querySKey);
+ if (smaOpenDBF(pEnv->dbEnv, &tReadH.dFile) != 0) {
+ smaWarn("vgId:%d open DBF %s failed since %s", SMA_VID(pSma), tReadH.dFile.path, tstrerror(terrno));
+ return TSDB_CODE_FAILED;
+ }
+
+ char smaKey[SMA_KEY_LEN] = {0};
+ void *pSmaKey = &smaKey;
+ int64_t queryGroupId = 1;
+ tdEncodeTSmaKey(queryGroupId, querySKey, (void **)&pSmaKey);
+
+ smaDebug("vgId:%d get sma data from %s: smaKey %" PRIx64 "-%" PRIx64 ", keyLen %d", SMA_VID(pSma),
+ tReadH.dFile.path, *(int64_t *)smaKey, *(int64_t *)POINTER_SHIFT(smaKey, 8), SMA_KEY_LEN);
+
+ void *result = NULL;
+ int32_t valueSize = 0;
+ if (!(result = smaGetSmaDataByKey(&tReadH.dFile, smaKey, SMA_KEY_LEN, &valueSize))) {
+ smaWarn("vgId:%d get sma data failed from smaIndex %" PRIi64 ", smaKey %" PRIx64 "-%" PRIx64 " since %s",
+ SMA_VID(pSma), indexUid, *(int64_t *)smaKey, *(int64_t *)POINTER_SHIFT(smaKey, 8), tstrerror(terrno));
+ smaCloseDBF(&tReadH.dFile);
+ return TSDB_CODE_FAILED;
+ }
+ #endif
+
+#ifdef _TEST_SMA_PRINT_DEBUG_LOG_
+ for (uint32_t v = 0; v < valueSize; v += 8) {
+ smaWarn("vgId:%d get sma data v[%d]=%" PRIi64, SMA_VID(pSma), v, *(int64_t *)POINTER_SHIFT(result, v));
+ }
+#endif
+ taosMemoryFreeClear(result); // TODO: fill the result to output
+
+#if 0
+ int32_t nResult = 0;
+ int64_t lastKey = 0;
+
+ while (true) {
+ if (nResult >= nMaxResult) {
+ break;
+ }
+
+ // set and open the file according to the STSma param
+ if (tdSetAndOpenTSmaFile(&tReadH, queryWin)) {
+ char bTree[100] = "\0";
+ while (strncmp(bTree, "has more nodes", 100) == 0) {
+ if (nResult >= nMaxResult) {
+ break;
+ }
+ // tdGetDataFromBTree(bTree, queryWin, lastKey)
+ // fill the pData
+ ++nResult;
+ }
+ }
+ }
+#endif
+ // read data from file and fill the result
+ smaCloseDBF(&tReadH.dFile);
+ return TSDB_CODE_SUCCESS;
+}
+
+int32_t tdProcessTSmaCreate(SSma *pSma, char *pMsg) {
+ #if 0
+ SSmaCfg vCreateSmaReq = {0};
+ if (!tDeserializeSVCreateTSmaReq(pMsg, &vCreateSmaReq)) {
+ terrno = TSDB_CODE_OUT_OF_MEMORY;
+ smaWarn("vgId:%d tsma create msg received but deserialize failed since %s", SMA_VID(pSma), terrstr(terrno));
+ return -1;
+ }
+
+ smaDebug("vgId:%d tsma create msg %s:%" PRIi64 " for table %" PRIi64 " received", SMA_VID(pSma),
+ vCreateSmaReq.tSma.indexName, vCreateSmaReq.tSma.indexUid, vCreateSmaReq.tSma.tableUid);
+
+ // record current timezone of server side
+ vCreateSmaReq.tSma.timezoneInt = tsTimezone;
+
+ if (metaCreateTSma(SMA_META(pSma), &vCreateSmaReq) < 0) {
+ // TODO: handle error
+ smaWarn("vgId:%d tsma %s:%" PRIi64 " create failed for table %" PRIi64 " since %s", SMA_VID(pSma),
+ vCreateSmaReq.tSma.indexName, vCreateSmaReq.tSma.indexUid, vCreateSmaReq.tSma.tableUid, terrstr(terrno));
+ tdDestroyTSma(&vCreateSmaReq.tSma);
+ return -1;
+ }
+
+ tdTSmaAdd(pSma, 1);
+
+ tdDestroyTSma(&vCreateSmaReq.tSma);
+ // TODO: return directly or go on follow steps?
+#endif
+ return TSDB_CODE_SUCCESS;
+}
+
+int32_t tdDropTSma(SSma *pSma, char *pMsg) {
+#if 0
+ SVDropTSmaReq vDropSmaReq = {0};
+ if (!tDeserializeSVDropTSmaReq(pMsg, &vDropSmaReq)) {
+ terrno = TSDB_CODE_OUT_OF_MEMORY;
+ return -1;
+ }
+
+ // TODO: send msg to stream computing to drop tSma
+ // if ((send msg to stream computing) < 0) {
+ // tdDestroyTSma(&vCreateSmaReq);
+ // return -1;
+ // }
+ //
+
+ if (metaDropTSma(SMA_META(pSma), vDropSmaReq.indexUid) < 0) {
+ // TODO: handle error
+ return -1;
+ }
+
+ if (tdDropTSmaData(pSma, vDropSmaReq.indexUid) < 0) {
+ // TODO: handle error
+ return -1;
+ }
+
+ tdTSmaSub(pSma, 1);
+#endif
+
+ // TODO: return directly or go on follow steps?
+ return TSDB_CODE_SUCCESS;
+}
+
+static SSmaStatItem *tdNewSmaStatItem(int8_t state) {
+ SSmaStatItem *pItem = NULL;
+
+ pItem = (SSmaStatItem *)taosMemoryCalloc(1, sizeof(SSmaStatItem));
+ if (!pItem) {
+ terrno = TSDB_CODE_OUT_OF_MEMORY;
+ return NULL;
+ }
+
+ pItem->state = state;
+ pItem->expiredWindows = taosHashInit(SMA_STATE_ITEM_HASH_SLOT, taosGetDefaultHashFunction(TSDB_DATA_TYPE_TIMESTAMP),
+ true, HASH_ENTRY_LOCK);
+ if (!pItem->expiredWindows) {
+ taosMemoryFreeClear(pItem);
+ return NULL;
+ }
+
+ return pItem;
+}
+
+static int32_t tdSetExpiredWindow(SSma *pSma, SHashObj *pItemsHash, int64_t indexUid, int64_t winSKey,
+ int64_t version) {
+ SSmaStatItem *pItem = taosHashGet(pItemsHash, &indexUid, sizeof(indexUid));
+ if (!pItem) {
+ // TODO: use TSDB_SMA_STAT_EXPIRED and update by stream computing later
+ pItem = tdNewSmaStatItem(TSDB_SMA_STAT_OK); // TODO use the real state
+ if (!pItem) {
+ // Response to stream computing: OOM
+ // For query, if the indexUid not found, the TSDB should tell query module to query raw TS data.
+ return TSDB_CODE_FAILED;
+ }
+
+ // cache smaMeta
+ STSma *pTSma = metaGetSmaInfoByIndex(SMA_META(pSma), indexUid, true);
+ if (!pTSma) {
+ terrno = TSDB_CODE_TDB_NO_SMA_INDEX_IN_META;
+ taosHashCleanup(pItem->expiredWindows);
+ taosMemoryFree(pItem);
+ smaWarn("vgId:%d update expired window failed for smaIndex %" PRIi64 " since %s", SMA_VID(pSma), indexUid,
+ tstrerror(terrno));
+ return TSDB_CODE_FAILED;
+ }
+ pItem->pTSma = pTSma;
+
+ if (taosHashPut(pItemsHash, &indexUid, sizeof(indexUid), &pItem, sizeof(pItem)) != 0) {
+ // If error occurs during put smaStatItem, free the resources of pItem
+ taosHashCleanup(pItem->expiredWindows);
+ taosMemoryFree(pItem);
+ return TSDB_CODE_FAILED;
+ }
+ } else if (!(pItem = *(SSmaStatItem **)pItem)) {
+ terrno = TSDB_CODE_INVALID_PTR;
+ return TSDB_CODE_FAILED;
+ }
+
+ if (taosHashPut(pItem->expiredWindows, &winSKey, sizeof(TSKEY), &version, sizeof(version)) != 0) {
+ // If error occurs during taosHashPut expired windows, remove the smaIndex from pSma->pSmaStat, thus TSDB would
+ // tell query module to query raw TS data.
+ // N.B.
+ // 1) It is assumed to be extemely little probability event of fail to taosHashPut.
+ // 2) This would solve the inconsistency to some extent, but not completely, unless we record all expired
+ // windows failed to put into hash table.
+ taosHashCleanup(pItem->expiredWindows);
+ taosMemoryFreeClear(pItem->pTSma);
+ taosHashRemove(pItemsHash, &indexUid, sizeof(indexUid));
+ smaWarn("vgId:%d smaIndex %" PRIi64 ", put skey %" PRIi64 " to expire window fail", SMA_VID(pSma), indexUid,
+ winSKey);
+ return TSDB_CODE_FAILED;
+ }
+
+ smaDebug("vgId:%d smaIndex %" PRIi64 ", put skey %" PRIi64 " to expire window succeed", SMA_VID(pSma), indexUid,
+ winSKey);
+ return TSDB_CODE_SUCCESS;
+}
+
+
+
+/**
+ * @brief Update expired window according to msg from stream computing module.
+ *
+ * @param pSma
+ * @param msg SSubmitReq
+ * @return int32_t
+ */
+int32_t tdUpdateExpiredWindowImpl(SSma *pSma, SSubmitReq *pMsg, int64_t version) {
+ // no time-range-sma, just return success
+ if (atomic_load_16(&SMA_TSMA_NUM(pSma)) <= 0) {
+ smaTrace("vgId:%d not update expire window since no tSma", SMA_VID(pSma));
+ return TSDB_CODE_SUCCESS;
+ }
+
+ if (!SMA_META(pSma)) {
+ terrno = TSDB_CODE_INVALID_PTR;
+ smaError("vgId:%d update expire window failed since no meta ptr", SMA_VID(pSma));
+ return TSDB_CODE_FAILED;
+ }
+
+ if (tdCheckAndInitSmaEnv(pSma, TSDB_SMA_TYPE_TIME_RANGE) < 0) {
+ smaError("vgId:%d init sma env failed since %s", SMA_VID(pSma), terrstr(terrno));
+ terrno = TSDB_CODE_TDB_INIT_FAILED;
+ return TSDB_CODE_FAILED;
+ }
+
+ // Firstly, assume that tSma can only be created on super table/normal table.
+ // getActiveTimeWindow
+
+ SSmaEnv *pEnv = SMA_TSMA_ENV(pSma);
+ SSmaStat *pStat = SMA_ENV_STAT(pEnv);
+ SHashObj *pItemsHash = SMA_ENV_STAT_ITEMS(pEnv);
+
+ TASSERT(pEnv && pStat && pItemsHash);
+
+ // basic procedure
+ // TODO: optimization
+ tdRefSmaStat(pSma, pStat);
+
+ SSubmitMsgIter msgIter = {0};
+ SSubmitBlk *pBlock = NULL;
+ SInterval interval = {0};
+ TSKEY lastWinSKey = INT64_MIN;
+
+ if (tInitSubmitMsgIter(pMsg, &msgIter) < 0) {
+ return TSDB_CODE_FAILED;
+ }
+
+ while (true) {
+ tGetSubmitMsgNext(&msgIter, &pBlock);
+ if (!pBlock) break;
+
+ STSmaWrapper *pSW = NULL;
+ STSma *pTSma = NULL;
+
+ SSubmitBlkIter blkIter = {0};
+ if (tInitSubmitBlkIter(&msgIter, pBlock, &blkIter) < 0) {
+ pSW = tdFreeTSmaWrapper(pSW);
+ break;
+ }
+
+ while (true) {
+ STSRow *row = tGetSubmitBlkNext(&blkIter);
+ if (!row) {
+ tdFreeTSmaWrapper(pSW);
+ break;
+ }
+ if (!pSW || (pTSma->tableUid != pBlock->suid)) {
+ if (pSW) {
+ pSW = tdFreeTSmaWrapper(pSW);
+ }
+ if (!(pSW = metaGetSmaInfoByTable(SMA_META(pSma), pBlock->suid))) {
+ break;
+ }
+ if ((pSW->number) <= 0 || !pSW->tSma) {
+ pSW = tdFreeTSmaWrapper(pSW);
+ break;
+ }
+
+ pTSma = pSW->tSma;
+
+ interval.interval = pTSma->interval;
+ interval.intervalUnit = pTSma->intervalUnit;
+ interval.offset = pTSma->offset;
+ interval.precision = SMA_TSDB_CFG(pSma)->precision;
+ interval.sliding = pTSma->sliding;
+ interval.slidingUnit = pTSma->slidingUnit;
+ }
+
+ TSKEY winSKey = taosTimeTruncate(TD_ROW_KEY(row), &interval, interval.precision);
+
+ if (lastWinSKey != winSKey) {
+ lastWinSKey = winSKey;
+ if (tdSetExpiredWindow(pSma, pItemsHash, pTSma->indexUid, winSKey, version) < 0) {
+ tdUnRefSmaStat(pSma, pStat);
+ return TSDB_CODE_FAILED;
+ }
+ } else {
+ smaDebug("vgId:%d smaIndex %" PRIi64 ", put skey %" PRIi64 " to expire window ignore as duplicated",
+ SMA_VID(pSma), pTSma->indexUid, winSKey);
+ }
+ }
+ }
+
+ tdUnRefSmaStat(pSma, pStat);
+
+ return TSDB_CODE_SUCCESS;
+}
+
+
+int32_t tdUpdateExpireWindow(SSma *pSma, SSubmitReq *pMsg, int64_t version) {
+ int32_t code = TSDB_CODE_SUCCESS;
+ if ((code = tdUpdateExpiredWindowImpl(pSma, pMsg, version)) < 0) {
+ smaWarn("vgId:%d update expired sma window failed since %s", SMA_VID(pSma), tstrerror(terrno));
+ }
+ return code;
+}
+
+int32_t tdGetTSmaData(SSma *pSma, char *pData, int64_t indexUid, TSKEY querySKey, int32_t nMaxResult) {
+ int32_t code = TSDB_CODE_SUCCESS;
+ if ((code = tdGetTSmaDataImpl(pSma, pData, indexUid, querySKey, nMaxResult)) < 0) {
+ smaWarn("vgId:%d get tSma data failed since %s", SMA_VID(pSma), tstrerror(terrno));
+ }
+ return code;
+}
+
+
diff --git a/source/dnode/vnode/src/tq/tq.c b/source/dnode/vnode/src/tq/tq.c
index 9526451907..873db62dd8 100644
--- a/source/dnode/vnode/src/tq/tq.c
+++ b/source/dnode/vnode/src/tq/tq.c
@@ -14,6 +14,7 @@
*/
#include "tq.h"
+#include "tqueue.h"
int32_t tqInit() {
//
@@ -234,7 +235,7 @@ int tqPushMsg(STQ* pTq, void* msg, int32_t msgLen, tmsg_t msgType, int64_t ver)
if (msgType != TDMT_VND_SUBMIT) return 0;
// make sure msgType == TDMT_VND_SUBMIT
- if (tsdbUpdateSmaWindow(pTq->pVnode->pTsdb, msg, ver) != 0) {
+ if (tdUpdateExpireWindow(pTq->pVnode->pSma, msg, ver) != 0) {
return -1;
}
@@ -1031,3 +1032,90 @@ int32_t tqProcessTaskExec(STQ* pTq, char* msg, int32_t msgLen, int32_t workerId)
}
return 0;
}
+
+int32_t tqProcessStreamTrigger2(STQ* pTq, SSubmitReq* pReq, int64_t ver) {
+ void* pIter = NULL;
+ bool failed = false;
+
+ SStreamDataSubmit* pSubmit = taosAllocateQitem(sizeof(SStreamDataSubmit), DEF_QITEM);
+ if (pSubmit == NULL) {
+ failed = true;
+ }
+ pSubmit->dataRef = taosMemoryMalloc(sizeof(int32_t));
+ if (pSubmit->dataRef == NULL) {
+ failed = true;
+ }
+
+ pSubmit->type = STREAM_DATA_TYPE_SUBMIT_BLOCK;
+ pSubmit->sourceVer = ver;
+ pSubmit->sourceVg = pTq->pVnode->config.vgId;
+ pSubmit->data = pReq;
+ *pSubmit->dataRef = 1;
+
+ while (1) {
+ pIter = taosHashIterate(pTq->pStreamTasks, pIter);
+ if (pIter == NULL) break;
+ SStreamTask* pTask = (SStreamTask*)pIter;
+ if (pTask->inputType != STREAM_INPUT__DATA_SUBMIT) continue;
+
+ int8_t inputStatus = atomic_load_8(&pTask->inputStatus);
+ if (inputStatus == TASK_INPUT_STATUS__NORMAL) {
+ if (failed) {
+ atomic_store_8(&pTask->inputStatus, TASK_INPUT_STATUS__FAILED);
+ continue;
+ }
+
+ streamDataSubmitRefInc(pSubmit);
+ taosWriteQitem(pTask->inputQ, pSubmit);
+
+ int8_t execStatus = atomic_load_8(&pTask->status);
+ if (execStatus == TASK_STATUS__IDLE || execStatus == TASK_STATUS__CLOSING) {
+ // TODO dispatch task launch msg to fetch queue
+ }
+
+ } else {
+ // blocked or stopped, do nothing
+ }
+ }
+
+ if (!failed) {
+ streamDataSubmitRefDec(pSubmit);
+ return 0;
+ } else {
+ return -1;
+ }
+}
+
+int32_t tqProcessTaskExec2(STQ* pTq, char* msg, int32_t msgLen) {
+ SStreamTaskExecReq req = {0};
+ tDecodeSStreamTaskExecReq(msg, &req);
+ int32_t taskId = req.taskId;
+
+ SStreamTask* pTask = taosHashGet(pTq->pStreamTasks, &taskId, sizeof(int32_t));
+ ASSERT(pTask);
+ ASSERT(pTask->inputType == TASK_INPUT_TYPE__DATA_BLOCK);
+
+ // enqueue
+ int32_t inputStatus = streamEnqueueDataBlk(pTask, (SStreamDataBlock*)req.data);
+ if (inputStatus == TASK_INPUT_STATUS__BLOCKED) {
+ // TODO rsp blocked
+ return 0;
+ }
+
+ // try exec
+ int8_t execStatus = atomic_val_compare_exchange_8(&pTask->status, TASK_STATUS__IDLE, TASK_STATUS__EXECUTING);
+ if (execStatus == TASK_STATUS__IDLE) {
+ if (streamTaskRun(pTask) < 0) {
+ atomic_store_8(&pTask->status, TASK_STATUS__CLOSING);
+
+ goto FAIL;
+ }
+ } else if (execStatus == TASK_STATUS__EXECUTING) {
+ return 0;
+ }
+
+ // TODO rsp success
+ return 0;
+FAIL:
+ return -1;
+}
diff --git a/source/dnode/vnode/src/tq/tqRead.c b/source/dnode/vnode/src/tq/tqRead.c
index 996d789e24..8fbd1e24e1 100644
--- a/source/dnode/vnode/src/tq/tqRead.c
+++ b/source/dnode/vnode/src/tq/tqRead.c
@@ -91,7 +91,7 @@ int32_t tqRetrieveDataBlock(SArray** ppCols, STqReadHandle* pHandle, uint64_t* p
// TODO set to real sversion
*pUid = 0;
- int32_t sversion = 0;
+ int32_t sversion = 1;
if (pHandle->sver != sversion || pHandle->cachedSchemaUid != pHandle->msgIter.suid) {
pHandle->pSchema = metaGetTbTSchema(pHandle->pVnodeMeta, pHandle->msgIter.uid, sversion);
diff --git a/source/dnode/vnode/src/tsdb/tsdbCommit.c b/source/dnode/vnode/src/tsdb/tsdbCommit.c
index d180799e58..76d5c3cb3a 100644
--- a/source/dnode/vnode/src/tsdb/tsdbCommit.c
+++ b/source/dnode/vnode/src/tsdb/tsdbCommit.c
@@ -465,7 +465,7 @@ static int tsdbCreateCommitIters(SCommitH *pCommith) {
pTbData = (STbData *)pNode->pData;
pCommitIter = pCommith->iters + i;
- pTSchema = metaGetTbTSchema(REPO_META(pRepo), pTbData->uid, 0); // TODO: schema version
+ pTSchema = metaGetTbTSchema(REPO_META(pRepo), pTbData->uid, 1); // TODO: schema version
if (pTSchema) {
pCommitIter->pIter = tSkipListCreateIter(pTbData->pData);
@@ -912,7 +912,7 @@ static int tsdbMoveBlkIdx(SCommitH *pCommith, SBlockIdx *pIdx) {
while (bidx < nBlocks) {
if (!pTSchema && !tsdbCommitIsSameFile(pCommith, bidx)) {
// Set commit table
- pTSchema = metaGetTbTSchema(REPO_META(pTsdb), pIdx->uid, 0); // TODO: schema version
+ pTSchema = metaGetTbTSchema(REPO_META(pTsdb), pIdx->uid, 1); // TODO: schema version
if (!pTSchema) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
return -1;
diff --git a/source/dnode/vnode/src/tsdb/tsdbCommit2.c b/source/dnode/vnode/src/tsdb/tsdbCommit2.c
index 585ef63531..844cfc094b 100644
--- a/source/dnode/vnode/src/tsdb/tsdbCommit2.c
+++ b/source/dnode/vnode/src/tsdb/tsdbCommit2.c
@@ -16,6 +16,8 @@
#include "tsdb.h"
int tsdbBegin(STsdb *pTsdb) {
+ if (!pTsdb) return 0;
+
STsdbMemTable *pMem;
if (tsdbMemTableCreate(pTsdb, &pTsdb->mem) < 0) {
diff --git a/source/dnode/vnode/src/tsdb/tsdbFS.c b/source/dnode/vnode/src/tsdb/tsdbFS.c
index 52b466d0f6..6dfd73158e 100644
--- a/source/dnode/vnode/src/tsdb/tsdbFS.c
+++ b/source/dnode/vnode/src/tsdb/tsdbFS.c
@@ -37,12 +37,12 @@ static void tsdbScanAndTryFixDFilesHeader(STsdb *pRepo, int32_t *nExpired);
// static int tsdbProcessExpiredFS(STsdb *pRepo);
// static int tsdbCreateMeta(STsdb *pRepo);
-static void tsdbGetRootDir(int repoid, int8_t level, char dirName[]) {
- snprintf(dirName, TSDB_FILENAME_LEN, "vnode/vnode%d/%s", repoid, TSDB_LEVEL_DNAME[level]);
+static void tsdbGetRootDir(int repoid, const char* dir, char dirName[]) {
+ snprintf(dirName, TSDB_FILENAME_LEN, "vnode/vnode%d/%s", repoid, dir);
}
-static void tsdbGetDataDir(int repoid, int8_t level, char dirName[]) {
- snprintf(dirName, TSDB_FILENAME_LEN, "vnode/vnode%d/%s/data", repoid, TSDB_LEVEL_DNAME[level]);
+static void tsdbGetDataDir(int repoid, const char* dir, char dirName[]) {
+ snprintf(dirName, TSDB_FILENAME_LEN, "vnode/vnode%d/%s/data", repoid, dir);
}
// For backward compatibility
@@ -591,7 +591,7 @@ static int tsdbComparFidFSet(const void *arg1, const void *arg2) {
static void tsdbGetTxnFname(STsdb *pRepo, TSDB_TXN_FILE_T ftype, char fname[]) {
snprintf(fname, TSDB_FILENAME_LEN, "%s/vnode/vnode%d/%s/%s", tfsGetPrimaryPath(REPO_TFS(pRepo)), REPO_ID(pRepo),
- TSDB_LEVEL_DNAME[REPO_LEVEL(pRepo)], tsdbTxnFname[ftype]);
+ pRepo->dir, tsdbTxnFname[ftype]);
}
static int tsdbOpenFSFromCurrent(STsdb *pRepo) {
@@ -721,7 +721,7 @@ static int tsdbScanRootDir(STsdb *pRepo) {
STsdbFS *pfs = REPO_FS(pRepo);
const STfsFile *pf;
- tsdbGetRootDir(REPO_ID(pRepo), REPO_LEVEL(pRepo), rootDir);
+ tsdbGetRootDir(REPO_ID(pRepo), pRepo->dir, rootDir);
STfsDir *tdir = tfsOpendir(REPO_TFS(pRepo), rootDir);
if (tdir == NULL) {
tsdbError("vgId:%d failed to open directory %s since %s", REPO_ID(pRepo), rootDir, tstrerror(terrno));
@@ -755,7 +755,7 @@ static int tsdbScanDataDir(STsdb *pRepo) {
STsdbFS *pfs = REPO_FS(pRepo);
const STfsFile *pf;
- tsdbGetDataDir(REPO_ID(pRepo), REPO_LEVEL(pRepo), dataDir);
+ tsdbGetDataDir(REPO_ID(pRepo), pRepo->dir, dataDir);
STfsDir *tdir = tfsOpendir(REPO_TFS(pRepo), dataDir);
if (tdir == NULL) {
tsdbError("vgId:%d failed to open directory %s since %s", REPO_ID(pRepo), dataDir, tstrerror(terrno));
@@ -803,7 +803,7 @@ static int tsdbRestoreDFileSet(STsdb *pRepo) {
regex_t regex;
STsdbFS *pfs = REPO_FS(pRepo);
- tsdbGetDataDir(REPO_ID(pRepo), REPO_LEVEL(pRepo), dataDir);
+ tsdbGetDataDir(REPO_ID(pRepo), pRepo->dir, dataDir);
// Resource allocation and init
regcomp(®ex, pattern, REG_EXTENDED);
diff --git a/source/dnode/vnode/src/tsdb/tsdbFile.c b/source/dnode/vnode/src/tsdb/tsdbFile.c
index 7f024786de..04be2a48de 100644
--- a/source/dnode/vnode/src/tsdb/tsdbFile.c
+++ b/source/dnode/vnode/src/tsdb/tsdbFile.c
@@ -23,14 +23,6 @@ static const char *TSDB_FNAME_SUFFIX[] = {
"smal", // TSDB_FILE_SMAL
"", // TSDB_FILE_MAX
"meta", // TSDB_FILE_META
- "tsma", // TSDB_FILE_TSMA
- "rsma", // TSDB_FILE_RSMA
-};
-
-const char *TSDB_LEVEL_DNAME[] = {
- "tsdb",
- "rsma1",
- "rsma2",
};
static void tsdbGetFilename(int vid, int fid, uint32_t ver, TSDB_FILE_T ftype, const char* dname, char *fname);
@@ -51,7 +43,7 @@ void tsdbInitDFile(STsdb *pRepo, SDFile *pDFile, SDiskID did, int fid, uint32_t
pDFile->info.magic = TSDB_FILE_INIT_MAGIC;
pDFile->info.fver = tsdbGetDFSVersion(ftype);
- tsdbGetFilename(REPO_ID(pRepo), fid, ver, ftype, TSDB_LEVEL_DNAME[pRepo->level], fname);
+ tsdbGetFilename(REPO_ID(pRepo), fid, ver, ftype, pRepo->dir, fname);
tfsInitFile(REPO_TFS(pRepo), &(pDFile->f), did, fname);
}
diff --git a/source/dnode/vnode/src/tsdb/tsdbOpen.c b/source/dnode/vnode/src/tsdb/tsdbOpen.c
index 807ee95b03..8e689fc185 100644
--- a/source/dnode/vnode/src/tsdb/tsdbOpen.c
+++ b/source/dnode/vnode/src/tsdb/tsdbOpen.c
@@ -15,100 +15,17 @@
#include "tsdb.h"
-#define TSDB_OPEN_RSMA_IMPL(v, l) \
- do { \
- SRetention *r = VND_RETENTIONS(v)[0]; \
- if (RETENTION_VALID(r)) { \
- return tsdbOpenImpl((v), type, &VND_RSMA##l(v), VNODE_RSMA##l##_DIR, TSDB_RETENTION_L##l); \
- } \
- } while (0)
+static int tsdbSetKeepCfg(STsdbKeepCfg *pKeepCfg, STsdbCfg *pCfg);
-#define TSDB_SET_KEEP_CFG(l) \
- do { \
- SRetention *r = &pCfg->retentions[l]; \
- pKeepCfg->keep2 = convertTimeFromPrecisionToUnit(r->keep, pCfg->precision, TIME_UNIT_MINUTE); \
- pKeepCfg->keep0 = pKeepCfg->keep2; \
- pKeepCfg->keep1 = pKeepCfg->keep2; \
- pKeepCfg->days = tsdbEvalDays(r, pCfg->precision); \
- } while (0)
-#define RETENTION_DAYS_SPLIT_RATIO 10
-#define RETENTION_DAYS_SPLIT_MIN 1
-#define RETENTION_DAYS_SPLIT_MAX 30
+// implementation
-static int32_t tsdbSetKeepCfg(STsdbKeepCfg *pKeepCfg, STsdbCfg *pCfg, int8_t type);
-static int32_t tsdbEvalDays(SRetention *r, int8_t precision);
-static int32_t tsdbOpenImpl(SVnode *pVnode, int8_t type, STsdb **ppTsdb, const char *dir, int8_t level);
-
-int tsdbOpen(SVnode *pVnode, int8_t type) {
- switch (type) {
- case TSDB_TYPE_TSDB:
- return tsdbOpenImpl(pVnode, type, &VND_TSDB(pVnode), VNODE_TSDB_DIR, TSDB_RETENTION_L0);
- case TSDB_TYPE_TSMA:
- ASSERT(0);
- break;
- case TSDB_TYPE_RSMA_L0:
- TSDB_OPEN_RSMA_IMPL(pVnode, 0);
- break;
- case TSDB_TYPE_RSMA_L1:
- TSDB_OPEN_RSMA_IMPL(pVnode, 1);
- break;
- case TSDB_TYPE_RSMA_L2:
- TSDB_OPEN_RSMA_IMPL(pVnode, 2);
- break;
- default:
- ASSERT(0);
- break;
- }
- return 0;
-}
-
-static int32_t tsdbEvalDays(SRetention *r, int8_t precision) {
- int32_t keepDays = convertTimeFromPrecisionToUnit(r->keep, precision, TIME_UNIT_DAY);
- int32_t freqDays = convertTimeFromPrecisionToUnit(r->freq, precision, TIME_UNIT_DAY);
-
- int32_t days = keepDays / RETENTION_DAYS_SPLIT_RATIO;
- if (days <= RETENTION_DAYS_SPLIT_MIN) {
- days = RETENTION_DAYS_SPLIT_MIN;
- if (days < freqDays) {
- days = freqDays + 1;
- }
- } else {
- if (days > RETENTION_DAYS_SPLIT_MAX) {
- days = RETENTION_DAYS_SPLIT_MAX;
- }
- if (days < freqDays) {
- days = freqDays + 1;
- }
- }
- return days * 1440;
-}
-
-static int32_t tsdbSetKeepCfg(STsdbKeepCfg *pKeepCfg, STsdbCfg *pCfg, int8_t type) {
+static int tsdbSetKeepCfg(STsdbKeepCfg *pKeepCfg, STsdbCfg *pCfg) {
pKeepCfg->precision = pCfg->precision;
- switch (type) {
- case TSDB_TYPE_TSDB:
- pKeepCfg->days = pCfg->days;
- pKeepCfg->keep0 = pCfg->keep0;
- pKeepCfg->keep1 = pCfg->keep1;
- pKeepCfg->keep2 = pCfg->keep2;
- break;
- case TSDB_TYPE_TSMA:
- ASSERT(0);
- break;
- case TSDB_TYPE_RSMA_L0:
- TSDB_SET_KEEP_CFG(0);
- break;
- case TSDB_TYPE_RSMA_L1:
- TSDB_SET_KEEP_CFG(1);
- break;
- case TSDB_TYPE_RSMA_L2:
- TSDB_SET_KEEP_CFG(2);
- break;
- default:
- ASSERT(0);
- break;
- }
+ pKeepCfg->days = pCfg->days;
+ pKeepCfg->keep0 = pCfg->keep0;
+ pKeepCfg->keep1 = pCfg->keep1;
+ pKeepCfg->keep2 = pCfg->keep2;
return 0;
}
@@ -116,18 +33,16 @@ static int32_t tsdbSetKeepCfg(STsdbKeepCfg *pKeepCfg, STsdbCfg *pCfg, int8_t typ
* @brief
*
* @param pVnode
- * @param type
* @param ppTsdb
* @param dir
- * @param level retention level
* @return int
*/
-int32_t tsdbOpenImpl(SVnode *pVnode, int8_t type, STsdb **ppTsdb, const char *dir, int8_t level) {
+int tsdbOpen(SVnode *pVnode, STsdb **ppTsdb, const char *dir, STsdbKeepCfg *pKeepCfg) {
STsdb *pTsdb = NULL;
int slen = 0;
*ppTsdb = NULL;
- slen = strlen(tfsGetPrimaryPath(pVnode->pTfs)) + strlen(pVnode->path) + strlen(dir) + 3;
+ slen = strlen(tfsGetPrimaryPath(pVnode->pTfs)) + strlen(pVnode->path) + strlen(dir) + TSDB_DATA_DIR_LEN + 3;
// create handle
pTsdb = (STsdb *)taosMemoryCalloc(1, sizeof(*pTsdb) + slen);
@@ -136,13 +51,18 @@ int32_t tsdbOpenImpl(SVnode *pVnode, int8_t type, STsdb **ppTsdb, const char *di
return -1;
}
+ ASSERT(strlen(dir) < TSDB_DATA_DIR_LEN);
+ memcpy(pTsdb->dir, dir, strlen(dir));
pTsdb->path = (char *)&pTsdb[1];
sprintf(pTsdb->path, "%s%s%s%s%s", tfsGetPrimaryPath(pVnode->pTfs), TD_DIRSEP, pVnode->path, TD_DIRSEP, dir);
pTsdb->pVnode = pVnode;
- pTsdb->level = level;
pTsdb->repoLocked = false;
taosThreadMutexInit(&pTsdb->mutex, NULL);
- tsdbSetKeepCfg(REPO_KEEP_CFG(pTsdb), REPO_CFG(pTsdb), type);
+ if (!pKeepCfg) {
+ tsdbSetKeepCfg(&pTsdb->keepCfg, &pVnode->config.tsdbCfg);
+ } else {
+ memcpy(&pTsdb->keepCfg, pKeepCfg, sizeof(STsdbKeepCfg));
+ }
pTsdb->fs = tsdbNewFS(REPO_KEEP_CFG(pTsdb));
// create dir (TODO: use tfsMkdir)
@@ -163,12 +83,13 @@ _err:
return -1;
}
-int tsdbClose(STsdb *pTsdb) {
- if (pTsdb) {
+int tsdbClose(STsdb **pTsdb) {
+ if (*pTsdb) {
// TODO: destroy mem/imem
- tsdbCloseFS(pTsdb);
- tsdbFreeFS(pTsdb->fs);
- taosMemoryFree(pTsdb);
+ taosThreadMutexDestroy(&(*pTsdb)->mutex);
+ tsdbCloseFS(*pTsdb);
+ tsdbFreeFS((*pTsdb)->fs);
+ taosMemoryFreeClear(*pTsdb);
}
return 0;
}
diff --git a/source/dnode/vnode/src/tsdb/tsdbRead.c b/source/dnode/vnode/src/tsdb/tsdbRead.c
index b293f1399d..55fe8a3945 100644
--- a/source/dnode/vnode/src/tsdb/tsdbRead.c
+++ b/source/dnode/vnode/src/tsdb/tsdbRead.c
@@ -490,7 +490,7 @@ tsdbReaderT* tsdbQueryTables(SVnode* pVnode, SQueryTableDataCond* pCond, STableG
STableCheckInfo* pCheckInfo = taosArrayGet(pTsdbReadHandle->pTableCheckInfo, 0);
- pTsdbReadHandle->pSchema = metaGetTbTSchema(pVnode->pMeta, pCheckInfo->tableId, 0);
+ pTsdbReadHandle->pSchema = metaGetTbTSchema(pVnode->pMeta, pCheckInfo->tableId, 1);
int32_t numOfCols = taosArrayGetSize(pTsdbReadHandle->suppInfo.defaultLoadColumn);
int16_t* ids = pTsdbReadHandle->suppInfo.defaultLoadColumn->pData;
@@ -1271,7 +1271,6 @@ _error:
static int32_t getEndPosInDataBlock(STsdbReadHandle* pTsdbReadHandle, SDataBlockInfo* pBlockInfo);
static int32_t doCopyRowsFromFileBlock(STsdbReadHandle* pTsdbReadHandle, int32_t capacity, int32_t numOfRows,
int32_t start, int32_t end);
-static void moveDataToFront(STsdbReadHandle* pTsdbReadHandle, int32_t numOfRows, int32_t numOfCols);
static void doCheckGeneratedBlockRange(STsdbReadHandle* pTsdbReadHandle);
static void copyAllRemainRowsFromFileBlock(STsdbReadHandle* pTsdbReadHandle, STableCheckInfo* pCheckInfo,
SDataBlockInfo* pBlockInfo, int32_t endPos);
@@ -1301,7 +1300,7 @@ static int32_t handleDataMergeIfNeeded(STsdbReadHandle* pTsdbReadHandle, SBlock*
if ((ascScan && (key != TSKEY_INITIAL_VAL && key < binfo.window.skey)) ||
(!ascScan && (key != TSKEY_INITIAL_VAL && key > binfo.window.ekey))) {
// do not load file block into buffer
- int32_t step = ASCENDING_TRAVERSE(pTsdbReadHandle->order) ? 1 : -1;
+ int32_t step = ascScan ? 1 : -1;
TSKEY maxKey =
ASCENDING_TRAVERSE(pTsdbReadHandle->order) ? (binfo.window.skey - step) : (binfo.window.ekey - step);
@@ -1618,7 +1617,7 @@ static int32_t mergeTwoRowFromMem(STsdbReadHandle* pTsdbReadHandle, int32_t capa
if (pSchema1 == NULL) {
// pSchema1 = metaGetTbTSchema(REPO_META(pTsdbReadHandle->pTsdb), uid, TD_ROW_SVER(row1));
// TODO: use the real schemaVersion
- pSchema1 = metaGetTbTSchema(REPO_META(pTsdbReadHandle->pTsdb), uid, 0);
+ pSchema1 = metaGetTbTSchema(REPO_META(pTsdbReadHandle->pTsdb), uid, 1);
}
#ifdef TD_DEBUG_PRINT_ROW
@@ -1637,7 +1636,7 @@ static int32_t mergeTwoRowFromMem(STsdbReadHandle* pTsdbReadHandle, int32_t capa
if (pSchema2 == NULL) {
// pSchema2 = metaGetTbTSchema(REPO_META(pTsdbReadHandle->pTsdb), uid, TD_ROW_SVER(row2));
// TODO: use the real schemaVersion
- pSchema2 = metaGetTbTSchema(REPO_META(pTsdbReadHandle->pTsdb), uid, 0);
+ pSchema2 = metaGetTbTSchema(REPO_META(pTsdbReadHandle->pTsdb), uid, 1);
}
if (isRow2DataRow) {
numOfColsOfRow2 = schemaNCols(pSchema2);
@@ -1790,22 +1789,6 @@ static int32_t mergeTwoRowFromMem(STsdbReadHandle* pTsdbReadHandle, int32_t capa
#endif
}
-static void moveDataToFront(STsdbReadHandle* pTsdbReadHandle, int32_t numOfRows, int32_t numOfCols) {
- if (numOfRows == 0 || ASCENDING_TRAVERSE(pTsdbReadHandle->order)) {
- return;
- }
-
- // if the buffer is not full in case of descending order query, move the data in the front of the buffer
- if (numOfRows < pTsdbReadHandle->outputCapacity) {
- int32_t emptySize = pTsdbReadHandle->outputCapacity - numOfRows;
- for (int32_t i = 0; i < numOfCols; ++i) {
- SColumnInfoData* pColInfo = taosArrayGet(pTsdbReadHandle->pColumns, i);
- memmove((char*)pColInfo->pData, (char*)pColInfo->pData + emptySize * pColInfo->info.bytes,
- numOfRows * pColInfo->info.bytes);
- }
- }
-}
-
static void getQualifiedRowsPos(STsdbReadHandle* pTsdbReadHandle, int32_t startPos, int32_t endPos,
int32_t numOfExisted, int32_t* start, int32_t* end) {
*start = -1;
@@ -1891,9 +1874,6 @@ static void copyAllRemainRowsFromFileBlock(STsdbReadHandle* pTsdbReadHandle, STa
cur->lastKey = tsArray[endPos] + step;
cur->blockCompleted = true;
- // if the buffer is not full in case of descending order query, move the data in the front of the buffer
- moveDataToFront(pTsdbReadHandle, numOfRows, numOfCols);
-
// The value of pos may be -1 or pBlockInfo->rows, and it is invalid in both cases.
pos = endPos + step;
updateInfoAfterMerge(pTsdbReadHandle, pCheckInfo, numOfRows, pos);
@@ -1944,18 +1924,18 @@ static void doMergeTwoLevelData(STsdbReadHandle* pTsdbReadHandle, STableCheckInf
assert(pCols->numOfRows == pBlock->numOfRows && tsArray[0] == pBlock->keyFirst &&
tsArray[pBlock->numOfRows - 1] == pBlock->keyLast);
+ bool ascScan = ASCENDING_TRAVERSE(pTsdbReadHandle->order);
+ int32_t step = ascScan ? 1 : -1;
+
// for search the endPos, so the order needs to reverse
- int32_t order = (pTsdbReadHandle->order == TSDB_ORDER_ASC) ? TSDB_ORDER_DESC : TSDB_ORDER_ASC;
+ int32_t order = ascScan ? TSDB_ORDER_DESC : TSDB_ORDER_ASC;
- int32_t step = ASCENDING_TRAVERSE(pTsdbReadHandle->order) ? 1 : -1;
int32_t numOfCols = (int32_t)(QH_GET_NUM_OF_COLS(pTsdbReadHandle));
-
- STable* pTable = NULL;
int32_t endPos = getEndPosInDataBlock(pTsdbReadHandle, &blockInfo);
+ STimeWindow* pWin = &blockInfo.window;
tsdbDebug("%p uid:%" PRIu64 " start merge data block, file block range:%" PRIu64 "-%" PRIu64
- " rows:%d, start:%d, end:%d, %s",
- pTsdbReadHandle, pCheckInfo->tableId, blockInfo.window.skey, blockInfo.window.ekey, blockInfo.rows,
+ " rows:%d, start:%d, end:%d, %s", pTsdbReadHandle, pCheckInfo->tableId, pWin->skey, pWin->ekey, blockInfo.rows,
cur->pos, endPos, pTsdbReadHandle->idStr);
// compared with the data from in-memory buffer, to generate the correct timestamp array list
@@ -1986,20 +1966,16 @@ static void doMergeTwoLevelData(STsdbReadHandle* pTsdbReadHandle, STableCheckInf
}
TSKEY key = TD_ROW_KEY(row1);
- if ((key > pTsdbReadHandle->window.ekey && ASCENDING_TRAVERSE(pTsdbReadHandle->order)) ||
- (key < pTsdbReadHandle->window.ekey && !ASCENDING_TRAVERSE(pTsdbReadHandle->order))) {
+ if ((key > pTsdbReadHandle->window.ekey && ascScan) || (key < pTsdbReadHandle->window.ekey && !ascScan)) {
break;
}
- if (((pos > endPos || tsArray[pos] > pTsdbReadHandle->window.ekey) &&
- ASCENDING_TRAVERSE(pTsdbReadHandle->order)) ||
- ((pos < endPos || tsArray[pos] < pTsdbReadHandle->window.ekey) &&
- !ASCENDING_TRAVERSE(pTsdbReadHandle->order))) {
+ if (((pos > endPos || tsArray[pos] > pTsdbReadHandle->window.ekey) && ascScan) ||
+ ((pos < endPos || tsArray[pos] < pTsdbReadHandle->window.ekey) && !ascScan)) {
break;
}
- if ((key < tsArray[pos] && ASCENDING_TRAVERSE(pTsdbReadHandle->order)) ||
- (key > tsArray[pos] && !ASCENDING_TRAVERSE(pTsdbReadHandle->order))) {
+ if ((key < tsArray[pos] && ascScan) || (key > tsArray[pos] && !ascScan)) {
if (rv1 != TD_ROW_SVER(row1)) {
// pSchema1 = tsdbGetTableSchemaByVersion(pTable, memRowVersion(row1));
rv1 = TD_ROW_SVER(row1);
@@ -2054,23 +2030,19 @@ static void doMergeTwoLevelData(STsdbReadHandle* pTsdbReadHandle, STableCheckInf
}
#endif
if (TD_SUPPORT_UPDATE(pCfg->update)) {
- if (lastKeyAppend != key) {
- lastKeyAppend = key;
- ++curRow;
- }
numOfRows = doCopyRowsFromFileBlock(pTsdbReadHandle, pTsdbReadHandle->outputCapacity, curRow, pos, pos);
+ lastKeyAppend = key;
if (rv1 != TD_ROW_SVER(row1)) {
- // pSchema1 = tsdbGetTableSchemaByVersion(pTable, memRowVersion(row1));
rv1 = TD_ROW_SVER(row1);
}
if (row2 && rv2 != TD_ROW_SVER(row2)) {
- // pSchema2 = tsdbGetTableSchemaByVersion(pTable, memRowVersion(row2));
rv2 = TD_ROW_SVER(row2);
}
- numOfRows +=
- mergeTwoRowFromMem(pTsdbReadHandle, pTsdbReadHandle->outputCapacity, &curRow, row1, row2, numOfCols,
- pCheckInfo->tableId, pSchema1, pSchema2, pCfg->update, &lastKeyAppend);
+
+ // still assign data into current row
+ mergeTwoRowFromMem(pTsdbReadHandle, pTsdbReadHandle->outputCapacity, &curRow, row1, row2, numOfCols,
+ pCheckInfo->tableId, pSchema1, pSchema2, pCfg->update, &lastKeyAppend);
if (cur->win.skey == TSKEY_INITIAL_VAL) {
cur->win.skey = key;
@@ -2081,12 +2053,13 @@ static void doMergeTwoLevelData(STsdbReadHandle* pTsdbReadHandle, STableCheckInf
cur->mixBlock = true;
moveToNextRowInMem(pCheckInfo);
+ ++curRow;
+
pos += step;
} else {
moveToNextRowInMem(pCheckInfo);
}
- } else if ((key > tsArray[pos] && ASCENDING_TRAVERSE(pTsdbReadHandle->order)) ||
- (key < tsArray[pos] && !ASCENDING_TRAVERSE(pTsdbReadHandle->order))) {
+ } else if ((key > tsArray[pos] && ascScan) || (key < tsArray[pos] && !ascScan)) {
if (cur->win.skey == TSKEY_INITIAL_VAL) {
cur->win.skey = tsArray[pos];
}
@@ -2112,17 +2085,17 @@ static void doMergeTwoLevelData(STsdbReadHandle* pTsdbReadHandle, STableCheckInf
int32_t qstart = 0, qend = 0;
getQualifiedRowsPos(pTsdbReadHandle, pos, end, numOfRows, &qstart, &qend);
- if ((lastKeyAppend != TSKEY_INITIAL_VAL) &&
- (lastKeyAppend != (ASCENDING_TRAVERSE(pTsdbReadHandle->order) ? tsArray[qstart] : tsArray[qend]))) {
+ if ((lastKeyAppend != TSKEY_INITIAL_VAL) && (lastKeyAppend != (ascScan ? tsArray[qstart] : tsArray[qend]))) {
++curRow;
}
+
numOfRows = doCopyRowsFromFileBlock(pTsdbReadHandle, pTsdbReadHandle->outputCapacity, curRow, qstart, qend);
pos += (qend - qstart + 1) * step;
if (numOfRows > 0) {
curRow = numOfRows - 1;
}
- cur->win.ekey = ASCENDING_TRAVERSE(pTsdbReadHandle->order) ? tsArray[qend] : tsArray[qstart];
+ cur->win.ekey = ascScan ? tsArray[qend] : tsArray[qstart];
cur->lastKey = cur->win.ekey + step;
lastKeyAppend = cur->win.ekey;
}
@@ -2134,10 +2107,8 @@ static void doMergeTwoLevelData(STsdbReadHandle* pTsdbReadHandle, STableCheckInf
* copy them all to result buffer, since it may be overlapped with file data block.
*/
if (node == NULL ||
- ((TD_ROW_KEY((STSRow*)SL_GET_NODE_DATA(node)) > pTsdbReadHandle->window.ekey) &&
- ASCENDING_TRAVERSE(pTsdbReadHandle->order)) ||
- ((TD_ROW_KEY((STSRow*)SL_GET_NODE_DATA(node)) < pTsdbReadHandle->window.ekey) &&
- !ASCENDING_TRAVERSE(pTsdbReadHandle->order))) {
+ ((TD_ROW_KEY((STSRow*)SL_GET_NODE_DATA(node)) > pTsdbReadHandle->window.ekey) && ascScan) ||
+ ((TD_ROW_KEY((STSRow*)SL_GET_NODE_DATA(node)) < pTsdbReadHandle->window.ekey) && !ascScan)) {
// no data in cache or data in cache is greater than the ekey of time window, load data from file block
if (cur->win.skey == TSKEY_INITIAL_VAL) {
cur->win.skey = tsArray[pos];
@@ -2149,22 +2120,20 @@ static void doMergeTwoLevelData(STsdbReadHandle* pTsdbReadHandle, STableCheckInf
numOfRows = doCopyRowsFromFileBlock(pTsdbReadHandle, pTsdbReadHandle->outputCapacity, numOfRows, start, end);
pos += (end - start + 1) * step;
- cur->win.ekey = ASCENDING_TRAVERSE(pTsdbReadHandle->order) ? tsArray[end] : tsArray[start];
+ cur->win.ekey = ascScan ? tsArray[end] : tsArray[start];
cur->lastKey = cur->win.ekey + step;
cur->mixBlock = true;
}
}
}
- cur->blockCompleted =
- (((pos > endPos || cur->lastKey > pTsdbReadHandle->window.ekey) && ASCENDING_TRAVERSE(pTsdbReadHandle->order)) ||
- ((pos < endPos || cur->lastKey < pTsdbReadHandle->window.ekey) && !ASCENDING_TRAVERSE(pTsdbReadHandle->order)));
+ cur->blockCompleted = (((pos > endPos || cur->lastKey > pTsdbReadHandle->window.ekey) && ascScan) ||
+ ((pos < endPos || cur->lastKey < pTsdbReadHandle->window.ekey) && !ascScan));
- if (!ASCENDING_TRAVERSE(pTsdbReadHandle->order)) {
+ if (!ascScan) {
TSWAP(cur->win.skey, cur->win.ekey);
}
- moveDataToFront(pTsdbReadHandle, numOfRows, numOfCols);
updateInfoAfterMerge(pTsdbReadHandle, pCheckInfo, numOfRows, pos);
doCheckGeneratedBlockRange(pTsdbReadHandle);
@@ -2755,7 +2724,7 @@ static int tsdbReadRowsFromCache(STableCheckInfo* pCheckInfo, TSKEY maxKey, int
win->ekey = key;
if (rv != TD_ROW_SVER(row)) {
- pSchema = metaGetTbTSchema(REPO_META(pTsdbReadHandle->pTsdb), pCheckInfo->tableId, 0);
+ pSchema = metaGetTbTSchema(REPO_META(pTsdbReadHandle->pTsdb), pCheckInfo->tableId, 1);
rv = TD_ROW_SVER(row);
}
numOfRows += mergeTwoRowFromMem(pTsdbReadHandle, maxRowsToRead, &curRows, row, NULL, numOfCols, pCheckInfo->tableId,
@@ -2769,20 +2738,8 @@ static int tsdbReadRowsFromCache(STableCheckInfo* pCheckInfo, TSKEY maxKey, int
} while (moveToNextRowInMem(pCheckInfo));
taosMemoryFreeClear(pSchema); // free the STSChema
-
assert(numOfRows <= maxRowsToRead);
- // if the buffer is not full in case of descending order query, move the data in the front of the buffer
- if (!ASCENDING_TRAVERSE(pTsdbReadHandle->order) && numOfRows < maxRowsToRead) {
- int32_t emptySize = maxRowsToRead - numOfRows;
-
- for (int32_t i = 0; i < numOfCols; ++i) {
- SColumnInfoData* pColInfo = taosArrayGet(pTsdbReadHandle->pColumns, i);
- memmove((char*)pColInfo->pData, (char*)pColInfo->pData + emptySize * pColInfo->info.bytes,
- numOfRows * pColInfo->info.bytes);
- }
- }
-
int64_t elapsedTime = taosGetTimestampUs() - st;
tsdbDebug("%p build data block from cache completed, elapsed time:%" PRId64 " us, numOfRows:%d, numOfCols:%d, %s",
pTsdbReadHandle, elapsedTime, numOfRows, numOfCols, pTsdbReadHandle->idStr);
@@ -3889,7 +3846,7 @@ int32_t tsdbQuerySTableByTagCond(void* pMeta, uint64_t uid, TSKEY skey, const ch
// NOTE: not add ref count for super table
SArray* res = taosArrayInit(8, sizeof(STableKeyInfo));
- SSchemaWrapper* pTagSchema = metaGetTableSchema(pMeta, uid, 0, true);
+ SSchemaWrapper* pTagSchema = metaGetTableSchema(pMeta, uid, 1, true);
// no tags and tbname condition, all child tables of this stable are involved
if (tbnameCond == NULL && (pTagCond == NULL || len == 0)) {
diff --git a/source/dnode/vnode/src/tsdb/tsdbSma.c b/source/dnode/vnode/src/tsdb/tsdbSma.c
index e878668654..1589513110 100644
--- a/source/dnode/vnode/src/tsdb/tsdbSma.c
+++ b/source/dnode/vnode/src/tsdb/tsdbSma.c
@@ -2084,7 +2084,7 @@ static int32_t tsdbExecuteRSma(STsdb *pTsdb, const void *pMsg, int32_t inputType
if (inputType == STREAM_DATA_TYPE_SUBMIT_BLOCK) {
// TODO: use the proper schema instead of 0, and cache STSchema in cache
- STSchema *pTSchema = metaGetTbTSchema(pTsdb->pVnode->pMeta, suid, 0);
+ STSchema *pTSchema = metaGetTbTSchema(pTsdb->pVnode->pMeta, suid, 1);
if (!pTSchema) {
terrno = TSDB_CODE_TDB_IVD_TB_SCHEMA_VERSION;
return TSDB_CODE_FAILED;
diff --git a/source/dnode/vnode/src/vnd/vnodeCommit.c b/source/dnode/vnode/src/vnd/vnodeCommit.c
index e7bee3342a..b4fbd01c63 100644
--- a/source/dnode/vnode/src/vnd/vnodeCommit.c
+++ b/source/dnode/vnode/src/vnd/vnodeCommit.c
@@ -47,7 +47,7 @@ int vnodeBegin(SVnode *pVnode) {
}
// begin tsdb
- if (vnodeIsRollup(pVnode)) {
+ if (pVnode->pSma) {
if (tsdbBegin(VND_RSMA0(pVnode)) < 0) {
vError("vgId:%d failed to begin rsma0 since %s", TD_VID(pVnode), tstrerror(terrno));
return -1;
diff --git a/source/dnode/vnode/src/vnd/vnodeOpen.c b/source/dnode/vnode/src/vnd/vnodeOpen.c
index 7476da2a0f..d44e30988d 100644
--- a/source/dnode/vnode/src/vnd/vnodeOpen.c
+++ b/source/dnode/vnode/src/vnd/vnodeOpen.c
@@ -96,26 +96,15 @@ SVnode *vnodeOpen(const char *path, STfs *pTfs, SMsgCb msgCb) {
}
// open tsdb
- if (vnodeIsRollup(pVnode)) {
- if (tsdbOpen(pVnode, TSDB_TYPE_RSMA_L0) < 0) {
- vError("vgId:%d failed to open vnode rsma0 since %s", TD_VID(pVnode), tstrerror(terrno));
- goto _err;
- }
+ if (!vnodeIsRollup(pVnode) && tsdbOpen(pVnode, &VND_TSDB(pVnode), VNODE_TSDB_DIR, TSDB_TYPE_TSDB) < 0) {
+ vError("vgId:%d failed to open vnode tsdb since %s", TD_VID(pVnode), tstrerror(terrno));
+ goto _err;
+ }
- if (tsdbOpen(pVnode, TSDB_TYPE_RSMA_L1) < 0) {
- vError("vgId:%d failed to open vnode rsma1 since %s", TD_VID(pVnode), tstrerror(terrno));
- goto _err;
- }
-
- if (tsdbOpen(pVnode, TSDB_TYPE_RSMA_L2) < 0) {
- vError("vgId:%d failed to open vnode rsma2 since %s", TD_VID(pVnode), tstrerror(terrno));
- goto _err;
- }
- } else {
- if (tsdbOpen(pVnode, TSDB_TYPE_TSDB) < 0) {
- vError("vgId:%d failed to open vnode tsdb since %s", TD_VID(pVnode), tstrerror(terrno));
- goto _err;
- }
+ // open sma
+ if (smaOpen(pVnode)) {
+ vError("vgId:%d failed to open vnode tsdb since %s", TD_VID(pVnode), tstrerror(terrno));
+ goto _err;
}
// open wal
@@ -161,10 +150,10 @@ _err:
if (pVnode->pQuery) vnodeQueryClose(pVnode);
if (pVnode->pTq) tqClose(pVnode->pTq);
if (pVnode->pWal) walClose(pVnode->pWal);
- if (pVnode->pTsdb) tsdbClose(pVnode->pTsdb);
+ if (pVnode->pTsdb) tsdbClose(&pVnode->pTsdb);
if (pVnode->pMeta) metaClose(pVnode->pMeta);
- tsdbClose(VND_RSMA1(pVnode));
- tsdbClose(VND_RSMA2(pVnode));
+ if (pVnode->pSma) smaClose(pVnode->pSma);
+
tsem_destroy(&(pVnode->canCommit));
taosMemoryFree(pVnode);
return NULL;
@@ -177,9 +166,8 @@ void vnodeClose(SVnode *pVnode) {
vnodeQueryClose(pVnode);
walClose(pVnode->pWal);
tqClose(pVnode->pTq);
- tsdbClose(VND_TSDB(pVnode));
- tsdbClose(VND_RSMA1(pVnode));
- tsdbClose(VND_RSMA2(pVnode));
+ if (pVnode->pTsdb) tsdbClose(&pVnode->pTsdb);
+ smaClose(pVnode->pSma);
metaClose(pVnode->pMeta);
vnodeCloseBufPool(pVnode);
// destroy handle
diff --git a/source/dnode/vnode/src/vnd/vnodeSvr.c b/source/dnode/vnode/src/vnd/vnodeSvr.c
index 7414da6bbc..68386ee40a 100644
--- a/source/dnode/vnode/src/vnd/vnodeSvr.c
+++ b/source/dnode/vnode/src/vnd/vnodeSvr.c
@@ -16,12 +16,13 @@
#include "vnd.h"
static int vnodeProcessCreateStbReq(SVnode *pVnode, int64_t version, void *pReq, int len, SRpcMsg *pRsp);
-static int vnodeProcessAlterStbReq(SVnode *pVnode, void *pReq, int32_t len, SRpcMsg *pRsp);
+static int vnodeProcessAlterStbReq(SVnode *pVnode, int64_t version, void *pReq, int32_t len, SRpcMsg *pRsp);
static int vnodeProcessDropStbReq(SVnode *pVnode, int64_t version, void *pReq, int32_t len, SRpcMsg *pRsp);
static int vnodeProcessCreateTbReq(SVnode *pVnode, int64_t version, void *pReq, int len, SRpcMsg *pRsp);
-static int vnodeProcessAlterTbReq(SVnode *pVnode, void *pReq, int32_t len, SRpcMsg *pRsp);
+static int vnodeProcessAlterTbReq(SVnode *pVnode, int64_t version, void *pReq, int32_t len, SRpcMsg *pRsp);
static int vnodeProcessDropTbReq(SVnode *pVnode, int64_t version, void *pReq, int32_t len, SRpcMsg *pRsp);
static int vnodeProcessSubmitReq(SVnode *pVnode, int64_t version, void *pReq, int32_t len, SRpcMsg *pRsp);
+static int vnodeProcessCreateTSmaReq(SVnode *pVnode, int64_t version, void *pReq, int len, SRpcMsg *pRsp);
int vnodePreprocessWriteReqs(SVnode *pVnode, SArray *pMsgs, int64_t *version) {
#if 0
@@ -72,7 +73,7 @@ int vnodeProcessWriteReq(SVnode *pVnode, SRpcMsg *pMsg, int64_t version, SRpcMsg
if (vnodeProcessCreateStbReq(pVnode, version, pReq, len, pRsp) < 0) goto _err;
break;
case TDMT_VND_ALTER_STB:
- if (vnodeProcessAlterStbReq(pVnode, pReq, len, pRsp) < 0) goto _err;
+ if (vnodeProcessAlterStbReq(pVnode, version, pReq, len, pRsp) < 0) goto _err;
break;
case TDMT_VND_DROP_STB:
if (vnodeProcessDropStbReq(pVnode, version, pReq, len, pRsp) < 0) goto _err;
@@ -81,15 +82,13 @@ int vnodeProcessWriteReq(SVnode *pVnode, SRpcMsg *pMsg, int64_t version, SRpcMsg
if (vnodeProcessCreateTbReq(pVnode, version, pReq, len, pRsp) < 0) goto _err;
break;
case TDMT_VND_ALTER_TABLE:
- if (vnodeProcessAlterTbReq(pVnode, pReq, len, pRsp) < 0) goto _err;
+ if (vnodeProcessAlterTbReq(pVnode, version, pReq, len, pRsp) < 0) goto _err;
break;
case TDMT_VND_DROP_TABLE:
if (vnodeProcessDropTbReq(pVnode, version, pReq, len, pRsp) < 0) goto _err;
break;
- case TDMT_VND_CREATE_SMA: { // timeRangeSMA
- if (tsdbCreateTSma(pVnode->pTsdb, POINTER_SHIFT(pMsg->pCont, sizeof(SMsgHead))) < 0) {
- // TODO
- }
+ case TDMT_VND_CREATE_SMA: {
+ if (vnodeProcessCreateTSmaReq(pVnode, version, pReq, len, pRsp) < 0) goto _err;
} break;
/* TSDB */
case TDMT_VND_SUBMIT:
@@ -195,10 +194,12 @@ void smaHandleRes(void *pVnode, int64_t smaId, const SArray *data) {
// TODO
// blockDebugShowData(data);
- tsdbInsertTSmaData(((SVnode *)pVnode)->pTsdb, smaId, (const char *)data);
+ tdProcessTSmaInsert(((SVnode *)pVnode)->pSma, smaId, (const char *)data);
}
int vnodeProcessSyncReq(SVnode *pVnode, SRpcMsg *pMsg, SRpcMsg **pRsp) {
+ int32_t ret = TAOS_SYNC_PROPOSE_OTHER_ERROR;
+
if (syncEnvIsStart()) {
SSyncNode *pSyncNode = syncNodeAcquire(pVnode->sync);
assert(pSyncNode != NULL);
@@ -220,67 +221,70 @@ int vnodeProcessSyncReq(SVnode *pVnode, SRpcMsg *pMsg, SRpcMsg **pRsp) {
SyncTimeout *pSyncMsg = syncTimeoutFromRpcMsg2(pRpcMsg);
assert(pSyncMsg != NULL);
- syncNodeOnTimeoutCb(pSyncNode, pSyncMsg);
+ ret = syncNodeOnTimeoutCb(pSyncNode, pSyncMsg);
syncTimeoutDestroy(pSyncMsg);
} else if (pRpcMsg->msgType == TDMT_VND_SYNC_PING) {
SyncPing *pSyncMsg = syncPingFromRpcMsg2(pRpcMsg);
assert(pSyncMsg != NULL);
- syncNodeOnPingCb(pSyncNode, pSyncMsg);
+ ret = syncNodeOnPingCb(pSyncNode, pSyncMsg);
syncPingDestroy(pSyncMsg);
} else if (pRpcMsg->msgType == TDMT_VND_SYNC_PING_REPLY) {
SyncPingReply *pSyncMsg = syncPingReplyFromRpcMsg2(pRpcMsg);
assert(pSyncMsg != NULL);
- syncNodeOnPingReplyCb(pSyncNode, pSyncMsg);
+ ret = syncNodeOnPingReplyCb(pSyncNode, pSyncMsg);
syncPingReplyDestroy(pSyncMsg);
} else if (pRpcMsg->msgType == TDMT_VND_SYNC_CLIENT_REQUEST) {
SyncClientRequest *pSyncMsg = syncClientRequestFromRpcMsg2(pRpcMsg);
assert(pSyncMsg != NULL);
- syncNodeOnClientRequestCb(pSyncNode, pSyncMsg);
+ ret = syncNodeOnClientRequestCb(pSyncNode, pSyncMsg);
syncClientRequestDestroy(pSyncMsg);
} else if (pRpcMsg->msgType == TDMT_VND_SYNC_REQUEST_VOTE) {
SyncRequestVote *pSyncMsg = syncRequestVoteFromRpcMsg2(pRpcMsg);
assert(pSyncMsg != NULL);
- syncNodeOnRequestVoteCb(pSyncNode, pSyncMsg);
+ ret = syncNodeOnRequestVoteCb(pSyncNode, pSyncMsg);
syncRequestVoteDestroy(pSyncMsg);
} else if (pRpcMsg->msgType == TDMT_VND_SYNC_REQUEST_VOTE_REPLY) {
SyncRequestVoteReply *pSyncMsg = syncRequestVoteReplyFromRpcMsg2(pRpcMsg);
assert(pSyncMsg != NULL);
- syncNodeOnRequestVoteReplyCb(pSyncNode, pSyncMsg);
+ ret = syncNodeOnRequestVoteReplyCb(pSyncNode, pSyncMsg);
syncRequestVoteReplyDestroy(pSyncMsg);
} else if (pRpcMsg->msgType == TDMT_VND_SYNC_APPEND_ENTRIES) {
SyncAppendEntries *pSyncMsg = syncAppendEntriesFromRpcMsg2(pRpcMsg);
assert(pSyncMsg != NULL);
- syncNodeOnAppendEntriesCb(pSyncNode, pSyncMsg);
+ ret = syncNodeOnAppendEntriesCb(pSyncNode, pSyncMsg);
syncAppendEntriesDestroy(pSyncMsg);
} else if (pRpcMsg->msgType == TDMT_VND_SYNC_APPEND_ENTRIES_REPLY) {
SyncAppendEntriesReply *pSyncMsg = syncAppendEntriesReplyFromRpcMsg2(pRpcMsg);
assert(pSyncMsg != NULL);
- syncNodeOnAppendEntriesReplyCb(pSyncNode, pSyncMsg);
+ ret = syncNodeOnAppendEntriesReplyCb(pSyncNode, pSyncMsg);
syncAppendEntriesReplyDestroy(pSyncMsg);
} else {
vError("==vnodeProcessSyncReq== error msg type:%d", pRpcMsg->msgType);
+ ret = TAOS_SYNC_PROPOSE_OTHER_ERROR;
}
syncNodeRelease(pSyncNode);
} else {
vError("==vnodeProcessSyncReq== error syncEnv stop");
+ ret = TAOS_SYNC_PROPOSE_OTHER_ERROR;
}
- return 0;
+
+ return ret;
}
static int vnodeProcessCreateStbReq(SVnode *pVnode, int64_t version, void *pReq, int len, SRpcMsg *pRsp) {
@@ -305,7 +309,7 @@ static int vnodeProcessCreateStbReq(SVnode *pVnode, int64_t version, void *pReq,
goto _err;
}
- tsdbRegisterRSma(pVnode->pTsdb, pVnode->pMeta, &req, &pVnode->msgCb);
+ tdProcessRSmaCreate(pVnode->pSma, pVnode->pMeta, &req, &pVnode->msgCb);
tDecoderClear(&coder);
return 0;
@@ -366,7 +370,7 @@ static int vnodeProcessCreateTbReq(SVnode *pVnode, int64_t version, void *pReq,
}
} else {
cRsp.code = TSDB_CODE_SUCCESS;
- tsdbFetchTbUidList(pVnode->pTsdb, &pStore, pCreateReq->ctb.suid, pCreateReq->uid);
+ tdFetchTbUidList(pVnode->pSma, &pStore, pCreateReq->ctb.suid, pCreateReq->uid);
}
taosArrayPush(rsp.pArray, &cRsp);
@@ -374,8 +378,8 @@ static int vnodeProcessCreateTbReq(SVnode *pVnode, int64_t version, void *pReq,
tDecoderClear(&decoder);
- tsdbUpdateTbUidList(pVnode->pTsdb, pStore);
- tsdbUidStoreFree(pStore);
+ tdUpdateTbUidList(pVnode->pSma, pStore);
+ tdUidStoreFree(pStore);
// prepare rsp
SEncoder encoder = {0};
@@ -398,20 +402,32 @@ _exit:
return rcode;
}
-static int vnodeProcessAlterStbReq(SVnode *pVnode, void *pReq, int32_t len, SRpcMsg *pRsp) {
- // ASSERT(0);
-#if 0
- SVCreateTbReq vAlterTbReq = {0};
- vTrace("vgId:%d, process alter stb req", TD_VID(pVnode));
- tDeserializeSVCreateTbReq(pReq, &vAlterTbReq);
- // TODO: to encapsule a free API
- taosMemoryFree(vAlterTbReq.stbCfg.pSchema);
- taosMemoryFree(vAlterTbReq.stbCfg.pTagSchema);
- if (vAlterTbReq.stbCfg.pRSmaParam) {
- taosMemoryFree(vAlterTbReq.stbCfg.pRSmaParam);
+static int vnodeProcessAlterStbReq(SVnode *pVnode, int64_t version, void *pReq, int32_t len, SRpcMsg *pRsp) {
+ SVCreateStbReq req = {0};
+ SDecoder dc = {0};
+
+ pRsp->msgType = TDMT_VND_ALTER_STB_RSP;
+ pRsp->code = TSDB_CODE_SUCCESS;
+ pRsp->pCont = NULL;
+ pRsp->contLen = 0;
+
+ tDecoderInit(&dc, pReq, len);
+
+ // decode req
+ if (tDecodeSVCreateStbReq(&dc, &req) < 0) {
+ terrno = TSDB_CODE_INVALID_MSG;
+ tDecoderClear(&dc);
+ return -1;
}
- taosMemoryFree(vAlterTbReq.name);
-#endif
+
+ if (metaAlterSTable(pVnode->pMeta, version, &req) < 0) {
+ pRsp->code = terrno;
+ tDecoderClear(&dc);
+ return -1;
+ }
+
+ tDecoderClear(&dc);
+
return 0;
}
@@ -444,9 +460,32 @@ _exit:
return 0;
}
-static int vnodeProcessAlterTbReq(SVnode *pVnode, void *pReq, int32_t len, SRpcMsg *pRsp) {
- // TODO
- ASSERT(0);
+static int vnodeProcessAlterTbReq(SVnode *pVnode, int64_t version, void *pReq, int32_t len, SRpcMsg *pRsp) {
+ SVAlterTbReq vAlterTbReq = {0};
+ SDecoder dc = {0};
+
+ pRsp->msgType = TDMT_VND_ALTER_TABLE_RSP;
+ pRsp->pCont = NULL;
+ pRsp->contLen = 0;
+ pRsp->code = TSDB_CODE_SUCCESS;
+
+ tDecoderInit(&dc, pReq, len);
+
+ // decode
+ if (tDecodeSVAlterTbReq(&dc, &vAlterTbReq) < 0) {
+ pRsp->code = TSDB_CODE_INVALID_MSG;
+ tDecoderClear(&dc);
+ return -1;
+ }
+
+ // process
+ if (metaAlterTable(pVnode->pMeta, version, &vAlterTbReq) < 0) {
+ pRsp->code = terrno;
+ tDecoderClear(&dc);
+ return -1;
+ }
+
+ tDecoderClear(&dc);
return 0;
}
@@ -514,7 +553,7 @@ static int vnodeDebugPrintSingleSubmitMsg(SMeta *pMeta, SSubmitBlk *pBlock, SSub
if (pSchema) {
taosMemoryFreeClear(pSchema);
}
- pSchema = metaGetTbTSchema(pMeta, msgIter->suid, 0); // TODO: use the real schema
+ pSchema = metaGetTbTSchema(pMeta, msgIter->suid, 1); // TODO: use the real schema
if (pSchema) {
suid = msgIter->suid;
}
@@ -649,8 +688,38 @@ _exit:
// TODO: refactor
if ((terrno == TSDB_CODE_SUCCESS || terrno == TSDB_CODE_TDB_TABLE_ALREADY_EXIST) &&
(pRsp->code == TSDB_CODE_SUCCESS)) {
- tsdbTriggerRSma(pVnode->pTsdb, pReq, STREAM_DATA_TYPE_SUBMIT_BLOCK);
+ tdProcessRSmaSubmit(pVnode->pSma, pReq, STREAM_DATA_TYPE_SUBMIT_BLOCK);
}
return 0;
}
+
+static int vnodeProcessCreateTSmaReq(SVnode *pVnode, int64_t version, void *pReq, int len, SRpcMsg *pRsp) {
+ SVCreateTSmaReq req = {0};
+ SDecoder coder;
+
+ pRsp->msgType = TDMT_VND_CREATE_SMA_RSP;
+ pRsp->code = TSDB_CODE_SUCCESS;
+ pRsp->pCont = NULL;
+ pRsp->contLen = 0;
+
+ // decode and process req
+ tDecoderInit(&coder, pReq, len);
+
+ if (tDecodeSVCreateTSmaReq(&coder, &req) < 0) {
+ pRsp->code = terrno;
+ goto _err;
+ }
+
+ if (metaCreateTSma(pVnode->pMeta, version, &req) < 0) {
+ pRsp->code = terrno;
+ goto _err;
+ }
+
+ tDecoderClear(&coder);
+ return 0;
+
+_err:
+ tDecoderClear(&coder);
+ return -1;
+}
diff --git a/source/libs/catalog/src/catalog.c b/source/libs/catalog/src/catalog.c
index 64090d0283..c96ad140a1 100644
--- a/source/libs/catalog/src/catalog.c
+++ b/source/libs/catalog/src/catalog.c
@@ -2576,12 +2576,6 @@ int32_t catalogGetHandle(uint64_t clusterId, SCatalog** catalogHandle) {
CTG_ERR_JRET(TSDB_CODE_CTG_MEM_ERROR);
}
- SHashObj *metaCache = taosHashInit(gCtgMgmt.cfg.maxTblCacheNum, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_ENTRY_LOCK);
- if (NULL == metaCache) {
- qError("taosHashInit failed, num:%d", gCtgMgmt.cfg.maxTblCacheNum);
- CTG_ERR_RET(TSDB_CODE_CTG_MEM_ERROR);
- }
-
code = taosHashPut(gCtgMgmt.pCluster, &clusterId, sizeof(clusterId), &clusterCtg, POINTER_BYTES);
if (code) {
if (HASH_NODE_EXIST(code)) {
diff --git a/source/libs/executor/inc/executorimpl.h b/source/libs/executor/inc/executorimpl.h
index dd0bcbff0e..6100d416b1 100644
--- a/source/libs/executor/inc/executorimpl.h
+++ b/source/libs/executor/inc/executorimpl.h
@@ -651,13 +651,14 @@ void getAlignQueryTimeWindow(SInterval* pInterval, int32_t precision, int64_t
int32_t getTableScanInfo(SOperatorInfo* pOperator, int32_t *order, int32_t* scanFlag);
void doSetOperatorCompleted(SOperatorInfo* pOperator);
-void doFilter(const SNode* pFilterNode, SSDataBlock* pBlock);
+void doFilter(const SNode* pFilterNode, SSDataBlock* pBlock, SArray* pColMatchInfo);
SqlFunctionCtx* createSqlFunctionCtx(SExprInfo* pExprInfo, int32_t numOfOutput, int32_t** rowCellInfoOffset);
void relocateColumnData(SSDataBlock* pBlock, const SArray* pColMatchInfo, SArray* pCols);
void initExecTimeWindowInfo(SColumnInfoData* pColData, STimeWindow* pQueryWindow);
void cleanupAggSup(SAggSupporter* pAggSup);
void destroyBasicOperatorInfo(void* param, int32_t numOfOutput);
void appendOneRowToDataBlock(SSDataBlock* pBlock, STupleHandle* pTupleHandle);
+void setTbNameColData(void* pMeta, const SSDataBlock* pBlock, SColumnInfoData* pColInfoData, int32_t functionId);
SInterval extractIntervalInfo(const STableScanPhysiNode* pTableScanNode);
SColumn extractColumnFromColumnNode(SColumnNode* pColNode);
diff --git a/source/libs/executor/src/executorMain.c b/source/libs/executor/src/executorMain.c
index 3cc75a815d..354f4d8752 100644
--- a/source/libs/executor/src/executorMain.c
+++ b/source/libs/executor/src/executorMain.c
@@ -21,13 +21,14 @@
#include "tcache.h"
#include "tglobal.h"
#include "tmsg.h"
+#include "tudf.h"
-#include "thash.h"
-#include "executorimpl.h"
#include "executor.h"
+#include "executorimpl.h"
+#include "query.h"
+#include "thash.h"
#include "tlosertree.h"
#include "ttypes.h"
-#include "query.h"
typedef struct STaskMgmt {
TdThreadMutex lock;
@@ -156,6 +157,7 @@ int32_t qExecTask(qTaskInfo_t tinfo, SSDataBlock** pRes, uint64_t *useconds) {
int32_t current = (*pRes != NULL)? (*pRes)->info.rows:0;
pTaskInfo->totalRows += current;
+ cleanUpUdfs();
qDebug("%s task suspended, %d rows returned, total:%" PRId64 " rows, in sinkNode:%d, elapsed:%.2f ms",
GET_TASKID(pTaskInfo), current, pTaskInfo->totalRows, 0, el/1000.0);
diff --git a/source/libs/executor/src/executorimpl.c b/source/libs/executor/src/executorimpl.c
index e12040749d..ece0013e52 100644
--- a/source/libs/executor/src/executorimpl.c
+++ b/source/libs/executor/src/executorimpl.c
@@ -13,7 +13,6 @@
* along with this program. If not, see .
*/
-#include
#include "filter.h"
#include "function.h"
#include "functionMgt.h"
@@ -1601,9 +1600,6 @@ void doCompactSDataBlock(SSDataBlock* pBlock, int32_t numOfRows, int8_t* p) {
}
}
-static SColumnInfo* doGetTagColumnInfoById(SColumnInfo* pTagColList, int32_t numOfTags, int16_t colId);
-static void doSetTagValueInParam(void* pTable, int32_t tagColId, SVariant* tag, int16_t type, int16_t bytes);
-
static uint32_t doFilterByBlockTimeWindow(STableScanInfo* pTableScanInfo, SSDataBlock* pBlock) {
SqlFunctionCtx* pCtx = pTableScanInfo->pCtx;
uint32_t status = BLK_DATA_NOT_LOAD;
@@ -1771,100 +1767,6 @@ int32_t loadDataBlockOnDemand(SExecTaskInfo* pTaskInfo, STableScanInfo* pTableSc
return TSDB_CODE_SUCCESS;
}
-/*
- * set tag value in SqlFunctionCtx
- * e.g.,tag information into input buffer
- */
-static void doSetTagValueInParam(void* pTable, int32_t tagColId, SVariant* tag, int16_t type, int16_t bytes) {
- taosVariantDestroy(tag);
-
- char* val = NULL;
- // if (tagColId == TSDB_TBNAME_COLUMN_INDEX) {
- // val = tsdbGetTableName(pTable);
- // assert(val != NULL);
- // } else {
- // val = tsdbGetTableTagVal(pTable, tagColId, type, bytes);
- // }
-
- if (val == NULL || isNull(val, type)) {
- tag->nType = TSDB_DATA_TYPE_NULL;
- return;
- }
-
- if (type == TSDB_DATA_TYPE_BINARY || type == TSDB_DATA_TYPE_NCHAR) {
- int32_t maxLen = bytes - VARSTR_HEADER_SIZE;
- int32_t len = (varDataLen(val) > maxLen) ? maxLen : varDataLen(val);
- taosVariantCreateFromBinary(tag, varDataVal(val), len, type);
- // taosVariantCreateFromBinary(tag, varDataVal(val), varDataLen(val), type);
- } else {
- taosVariantCreateFromBinary(tag, val, bytes, type);
- }
-}
-
-static SColumnInfo* doGetTagColumnInfoById(SColumnInfo* pTagColList, int32_t numOfTags, int16_t colId) {
- assert(pTagColList != NULL && numOfTags > 0);
-
- for (int32_t i = 0; i < numOfTags; ++i) {
- if (pTagColList[i].colId == colId) {
- return &pTagColList[i];
- }
- }
-
- return NULL;
-}
-
-void setTagValue(SOperatorInfo* pOperatorInfo, void* pTable, SqlFunctionCtx* pCtx, int32_t numOfOutput) {
- SExprInfo* pExpr = pOperatorInfo->pExpr;
- SExprInfo* pExprInfo = &pExpr[0];
- int32_t functionId = getExprFunctionId(pExprInfo);
-#if 0
- if (pQueryAttr->numOfOutput == 1 && functionId == FUNCTION_TS_COMP && pQueryAttr->stableQuery) {
- assert(pExprInfo->base.numOfParams == 1);
-
- // int16_t tagColId = (int16_t)pExprInfo->base.param[0].i;
- int16_t tagColId = -1;
- SColumnInfo* pColInfo = doGetTagColumnInfoById(pQueryAttr->tagColList, pQueryAttr->numOfTags, tagColId);
-
- doSetTagValueInParam(pTable, tagColId, &pCtx[0].tag, pColInfo->type, pColInfo->bytes);
-
- } else {
- // set tag value, by which the results are aggregated.
- int32_t offset = 0;
- memset(pRuntimeEnv->tagVal, 0, pQueryAttr->tagLen);
-
- for (int32_t idx = 0; idx < numOfOutput; ++idx) {
- SExprInfo* pLocalExprInfo = &pExpr[idx];
-
- // ts_comp column required the tag value for join filter
- if (!TSDB_COL_IS_TAG(pLocalExprInfo->base.pParam[0].pCol->flag)) {
- continue;
- }
-
- // todo use tag column index to optimize performance
- doSetTagValueInParam(pTable, pLocalExprInfo->base.pParam[0].pCol->colId, &pCtx[idx].tag,
- pLocalExprInfo->base.resSchema.type, pLocalExprInfo->base.resSchema.bytes);
-
- if (IS_NUMERIC_TYPE(pLocalExprInfo->base.resSchema.type) ||
- pLocalExprInfo->base.resSchema.type == TSDB_DATA_TYPE_BOOL ||
- pLocalExprInfo->base.resSchema.type == TSDB_DATA_TYPE_TIMESTAMP) {
- memcpy(pRuntimeEnv->tagVal + offset, &pCtx[idx].tag.i, pLocalExprInfo->base.resSchema.bytes);
- } else {
- if (pCtx[idx].tag.pz != NULL) {
- memcpy(pRuntimeEnv->tagVal + offset, pCtx[idx].tag.pz, pCtx[idx].tag.nLen);
- }
- }
-
- offset += pLocalExprInfo->base.resSchema.bytes;
- }
- }
-
- // set the tsBuf start position before check each data block
- if (pRuntimeEnv->pTsBuf != NULL) {
- setCtxTagForJoin(pRuntimeEnv, &pCtx[0], pExprInfo, pTable);
- }
-#endif
-}
-
void copyToSDataBlock(SSDataBlock* pBlock, int32_t* offset, SGroupResInfo* pGroupResInfo, SDiskbasedBuf* pResBuf) {
pBlock->info.rows = 0;
@@ -2115,7 +2017,7 @@ void setResultRowInitCtx(SResultRow* pResult, SqlFunctionCtx* pCtx, int32_t numO
}
static void extractQualifiedTupleByFilterResult(SSDataBlock* pBlock, const int8_t* rowRes, bool keep);
-void doFilter(const SNode* pFilterNode, SSDataBlock* pBlock) {
+void doFilter(const SNode* pFilterNode, SSDataBlock* pBlock, SArray* pColMatchInfo) {
if (pFilterNode == NULL) {
return;
}
@@ -2129,8 +2031,9 @@ void doFilter(const SNode* pFilterNode, SSDataBlock* pBlock) {
code = filterSetDataFromSlotId(filter, ¶m1);
int8_t* rowRes = NULL;
+
// todo the keep seems never to be True??
- bool keep = filterExecute(filter, pBlock, &rowRes, NULL, param1.numOfCols);
+ bool keep = filterExecute(filter, pBlock, &rowRes, NULL, param1.numOfCols);
filterFreeInfo(filter);
extractQualifiedTupleByFilterResult(pBlock, rowRes, keep);
@@ -2152,7 +2055,7 @@ void extractQualifiedTupleByFilterResult(SSDataBlock* pBlock, const int8_t* rowR
SColumnInfoData* pDst = taosArrayGet(px->pDataBlock, i);
SColumnInfoData* pSrc = taosArrayGet(pBlock->pDataBlock, i);
- // For the reserved column, the value is not filled yet, so the whole column data may be NULL.
+ // it is a reserved column for scalar function, and no data in this column yet.
if (pSrc->pData == NULL) {
continue;
}
@@ -4042,12 +3945,6 @@ static SSDataBlock* doProjectOperation(SOperatorInfo* pOperator) {
if (pProjectInfo->existDataBlock) { // TODO refactor
SSDataBlock* pBlock = pProjectInfo->existDataBlock;
pProjectInfo->existDataBlock = NULL;
- *newgroup = true;
-
- // todo dynamic set tags
- // if (pTableQueryInfo != NULL) {
- // setTagValue(pOperator, pTableQueryInfo->pTable, pInfo->pCtx, pOperator->numOfExprs);
- // }
// the pDataBlock are always the same one, no need to call this again
setInputDataBlock(pOperator, pInfo->pCtx, pBlock, TSDB_ORDER_ASC);
@@ -4088,13 +3985,6 @@ static SSDataBlock* doProjectOperation(SOperatorInfo* pOperator) {
}
}
- // todo set tags
-
- // STableQueryInfo* pTableQueryInfo = pRuntimeEnv->current;
- // if (pTableQueryInfo != NULL) {
- // setTagValue(pOperator, pTableQueryInfo->pTable, pInfo->pCtx, pOperator->numOfExprs);
- // }
-
// the pDataBlock are always the same one, no need to call this again
int32_t code = getTableScanInfo(pOperator->pDownstream[0], &order, &scanFlag);
@@ -4434,10 +4324,6 @@ void destroyBasicOperatorInfo(void* param, int32_t numOfOutput) {
doDestroyBasicInfo(pInfo, numOfOutput);
}
-void destroyMergeJoinOperator(void* param, int32_t numOfOutput) {
- SJoinOperatorInfo* pJoinOperator = (SJoinOperatorInfo*)param;
-}
-
void destroyAggOperatorInfo(void* param, int32_t numOfOutput) {
SAggOperatorInfo* pInfo = (SAggOperatorInfo*)param;
doDestroyBasicInfo(&pInfo->binfo, numOfOutput);
@@ -4491,9 +4377,9 @@ SOperatorInfo* createProjectOperatorInfo(SOperatorInfo* downstream, SExprInfo* p
goto _error;
}
- pInfo->limit = *pLimit;
- pInfo->slimit = *pSlimit;
- pInfo->curOffset = pLimit->offset;
+ pInfo->limit = *pLimit;
+ pInfo->slimit = *pSlimit;
+ pInfo->curOffset = pLimit->offset;
pInfo->curSOffset = pSlimit->offset;
pInfo->binfo.pRes = pResBlock;
@@ -4782,7 +4668,6 @@ static SArray* extractColumnInfo(SNodeList* pNodeList);
static SArray* createSortInfo(SNodeList* pNodeList);
static SArray* extractPartitionColInfo(SNodeList* pNodeList);
-static void setJoinColumnInfo(SColumnInfo* pColumn, const SColumnNode* pColumnNode);
SOperatorInfo* createOperatorTree(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo, SReadHandle* pHandle,
uint64_t queryId, uint64_t taskId, STableGroupInfo* pTableGroupInfo) {
@@ -5451,147 +5336,3 @@ int32_t getOperatorExplainExecInfo(SOperatorInfo* operatorInfo, SExplainExecInfo
return TSDB_CODE_SUCCESS;
}
-
-static SSDataBlock* doMergeJoin(struct SOperatorInfo* pOperator) {
- SJoinOperatorInfo* pJoinInfo = pOperator->info;
-
- SSDataBlock* pRes = pJoinInfo->pRes;
- blockDataCleanup(pRes);
- blockDataEnsureCapacity(pRes, 4096);
-
- int32_t nrows = 0;
-
- while (1) {
- if (pJoinInfo->pLeft == NULL || pJoinInfo->leftPos >= pJoinInfo->pLeft->info.rows) {
- SOperatorInfo* ds1 = pOperator->pDownstream[0];
- publishOperatorProfEvent(ds1, QUERY_PROF_BEFORE_OPERATOR_EXEC);
- pJoinInfo->pLeft = ds1->fpSet.getNextFn(ds1);
- publishOperatorProfEvent(ds1, QUERY_PROF_AFTER_OPERATOR_EXEC);
-
- pJoinInfo->leftPos = 0;
- if (pJoinInfo->pLeft == NULL) {
- setTaskStatus(pOperator->pTaskInfo, TASK_COMPLETED);
- break;
- }
- }
-
- if (pJoinInfo->pRight == NULL || pJoinInfo->rightPos >= pJoinInfo->pRight->info.rows) {
- SOperatorInfo* ds2 = pOperator->pDownstream[1];
- publishOperatorProfEvent(ds2, QUERY_PROF_BEFORE_OPERATOR_EXEC);
- pJoinInfo->pRight = ds2->fpSet.getNextFn(ds2);
- publishOperatorProfEvent(ds2, QUERY_PROF_AFTER_OPERATOR_EXEC);
-
- pJoinInfo->rightPos = 0;
- if (pJoinInfo->pRight == NULL) {
- setTaskStatus(pOperator->pTaskInfo, TASK_COMPLETED);
- break;
- }
- }
-
- SColumnInfoData* pLeftCol = taosArrayGet(pJoinInfo->pLeft->pDataBlock, pJoinInfo->leftCol.slotId);
- char* pLeftVal = colDataGetData(pLeftCol, pJoinInfo->leftPos);
-
- SColumnInfoData* pRightCol = taosArrayGet(pJoinInfo->pRight->pDataBlock, pJoinInfo->rightCol.slotId);
- char* pRightVal = colDataGetData(pRightCol, pJoinInfo->rightPos);
-
- // only the timestamp match support for ordinary table
- ASSERT(pLeftCol->info.type == TSDB_DATA_TYPE_TIMESTAMP);
- if (*(int64_t*)pLeftVal == *(int64_t*)pRightVal) {
- for (int32_t i = 0; i < pOperator->numOfExprs; ++i) {
- SColumnInfoData* pDst = taosArrayGet(pRes->pDataBlock, i);
-
- SExprInfo* pExprInfo = &pOperator->pExpr[i];
-
- int32_t blockId = pExprInfo->base.pParam[0].pCol->dataBlockId;
- int32_t slotId = pExprInfo->base.pParam[0].pCol->slotId;
-
- SColumnInfoData* pSrc = NULL;
- if (pJoinInfo->pLeft->info.blockId == blockId) {
- pSrc = taosArrayGet(pJoinInfo->pLeft->pDataBlock, slotId);
- } else {
- pSrc = taosArrayGet(pJoinInfo->pRight->pDataBlock, slotId);
- }
-
- if (colDataIsNull_s(pSrc, pJoinInfo->leftPos)) {
- colDataAppendNULL(pDst, nrows);
- } else {
- char* p = colDataGetData(pSrc, pJoinInfo->leftPos);
- colDataAppend(pDst, nrows, p, false);
- }
- }
-
- pJoinInfo->leftPos += 1;
- pJoinInfo->rightPos += 1;
-
- nrows += 1;
- } else if (*(int64_t*)pLeftVal < *(int64_t*)pRightVal) {
- pJoinInfo->leftPos += 1;
-
- if (pJoinInfo->leftPos >= pJoinInfo->pLeft->info.rows) {
- continue;
- }
- } else if (*(int64_t*)pLeftVal > *(int64_t*)pRightVal) {
- pJoinInfo->rightPos += 1;
- if (pJoinInfo->rightPos >= pJoinInfo->pRight->info.rows) {
- continue;
- }
- }
-
- // the pDataBlock are always the same one, no need to call this again
- pRes->info.rows = nrows;
- if (pRes->info.rows >= pOperator->resultInfo.threshold) {
- break;
- }
- }
-
- return (pRes->info.rows > 0) ? pRes : NULL;
-}
-
-SOperatorInfo* createMergeJoinOperatorInfo(SOperatorInfo** pDownstream, int32_t numOfDownstream, SExprInfo* pExprInfo,
- int32_t numOfCols, SSDataBlock* pResBlock, SNode* pOnCondition,
- SExecTaskInfo* pTaskInfo) {
- SJoinOperatorInfo* pInfo = taosMemoryCalloc(1, sizeof(SJoinOperatorInfo));
- SOperatorInfo* pOperator = taosMemoryCalloc(1, sizeof(SOperatorInfo));
- if (pOperator == NULL || pInfo == NULL) {
- goto _error;
- }
-
- initResultSizeInfo(pOperator, 4096);
-
- pInfo->pRes = pResBlock;
- pOperator->name = "MergeJoinOperator";
- pOperator->operatorType = QUERY_NODE_PHYSICAL_PLAN_JOIN;
- pOperator->blocking = false;
- pOperator->status = OP_NOT_OPENED;
- pOperator->pExpr = pExprInfo;
- pOperator->numOfExprs = numOfCols;
- pOperator->info = pInfo;
- pOperator->pTaskInfo = pTaskInfo;
-
- SOperatorNode* pNode = (SOperatorNode*)pOnCondition;
- setJoinColumnInfo(&pInfo->leftCol, (SColumnNode*)pNode->pLeft);
- setJoinColumnInfo(&pInfo->rightCol, (SColumnNode*)pNode->pRight);
-
- pOperator->fpSet =
- createOperatorFpSet(operatorDummyOpenFn, doMergeJoin, NULL, NULL, destroyMergeJoinOperator, NULL, NULL, NULL);
- int32_t code = appendDownstream(pOperator, pDownstream, numOfDownstream);
- if (code != TSDB_CODE_SUCCESS) {
- goto _error;
- }
-
- return pOperator;
-
-_error:
- taosMemoryFree(pInfo);
- taosMemoryFree(pOperator);
- pTaskInfo->code = TSDB_CODE_OUT_OF_MEMORY;
- return NULL;
-}
-
-void setJoinColumnInfo(SColumnInfo* pColumn, const SColumnNode* pColumnNode) {
- pColumn->slotId = pColumnNode->slotId;
- pColumn->type = pColumnNode->node.resType.type;
- pColumn->bytes = pColumnNode->node.resType.bytes;
- pColumn->precision = pColumnNode->node.resType.precision;
- pColumn->scale = pColumnNode->node.resType.scale;
-}
diff --git a/source/libs/executor/src/groupoperator.c b/source/libs/executor/src/groupoperator.c
index d8ccac8cea..483ac67e5e 100644
--- a/source/libs/executor/src/groupoperator.c
+++ b/source/libs/executor/src/groupoperator.c
@@ -318,7 +318,7 @@ static SSDataBlock* hashGroupbyAggregate(SOperatorInfo* pOperator) {
while(1) {
doBuildResultDatablock(pOperator, &pInfo->binfo, &pInfo->groupResInfo, pInfo->aggSup.pResultBuf);
- doFilter(pInfo->pCondition, pRes);
+ doFilter(pInfo->pCondition, pRes, NULL);
bool hasRemain = hasRemainDataInCurrentGroup(&pInfo->groupResInfo);
if (!hasRemain) {
diff --git a/source/libs/executor/src/indexoperator.c b/source/libs/executor/src/indexoperator.c
index c17fcacf1f..2c204e9356 100644
--- a/source/libs/executor/src/indexoperator.c
+++ b/source/libs/executor/src/indexoperator.c
@@ -398,6 +398,10 @@ static int32_t sifExecOper(SOperatorNode *node, SIFCtx *ctx, SIFParam *output) {
output->status = SFLT_ACCURATE_INDEX;
}
+ if (ctx->noExec) {
+ SIF_RET(code);
+ }
+
return operFn(¶ms[0], nParam > 1 ? ¶ms[1] : NULL, output);
_return:
taosMemoryFree(params);
diff --git a/source/libs/executor/src/joinoperator.c b/source/libs/executor/src/joinoperator.c
new file mode 100644
index 0000000000..d7d6d96346
--- /dev/null
+++ b/source/libs/executor/src/joinoperator.c
@@ -0,0 +1,199 @@
+/*
+ * Copyright (c) 2019 TAOS Data, Inc.
+ *
+ * This program is free software: you can use, redistribute, and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3
+ * or later ("AGPL"), as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see .
+ */
+
+#include "function.h"
+#include "os.h"
+#include "querynodes.h"
+#include "tdatablock.h"
+#include "tmsg.h"
+#include "executorimpl.h"
+#include "tcompare.h"
+#include "thash.h"
+#include "ttypes.h"
+
+static void setJoinColumnInfo(SColumnInfo* pColumn, const SColumnNode* pColumnNode);
+static SSDataBlock* doMergeJoin(struct SOperatorInfo* pOperator);
+static void destroyMergeJoinOperator(void* param, int32_t numOfOutput);
+static void extractTimeCondition(SJoinOperatorInfo *Info, SLogicConditionNode* pLogicConditionNode);
+
+SOperatorInfo* createMergeJoinOperatorInfo(SOperatorInfo** pDownstream, int32_t numOfDownstream, SExprInfo* pExprInfo,
+ int32_t numOfCols, SSDataBlock* pResBlock, SNode* pOnCondition,
+ SExecTaskInfo* pTaskInfo) {
+ SJoinOperatorInfo* pInfo = taosMemoryCalloc(1, sizeof(SJoinOperatorInfo));
+ SOperatorInfo* pOperator = taosMemoryCalloc(1, sizeof(SOperatorInfo));
+ if (pOperator == NULL || pInfo == NULL) {
+ goto _error;
+ }
+
+ initResultSizeInfo(pOperator, 4096);
+
+ pInfo->pRes = pResBlock;
+ pOperator->name = "MergeJoinOperator";
+ pOperator->operatorType = QUERY_NODE_PHYSICAL_PLAN_JOIN;
+ pOperator->blocking = false;
+ pOperator->status = OP_NOT_OPENED;
+ pOperator->pExpr = pExprInfo;
+ pOperator->numOfExprs = numOfCols;
+ pOperator->info = pInfo;
+ pOperator->pTaskInfo = pTaskInfo;
+
+ if (nodeType(pOnCondition) == QUERY_NODE_OPERATOR) {
+ SOperatorNode* pNode = (SOperatorNode*)pOnCondition;
+ setJoinColumnInfo(&pInfo->leftCol, (SColumnNode*)pNode->pLeft);
+ setJoinColumnInfo(&pInfo->rightCol, (SColumnNode*)pNode->pRight);
+ } else if (nodeType(pOnCondition) == QUERY_NODE_LOGIC_CONDITION) {
+ extractTimeCondition(pInfo, (SLogicConditionNode*) pOnCondition);
+ }
+
+ pOperator->fpSet =
+ createOperatorFpSet(operatorDummyOpenFn, doMergeJoin, NULL, NULL, destroyMergeJoinOperator, NULL, NULL, NULL);
+ int32_t code = appendDownstream(pOperator, pDownstream, numOfDownstream);
+ if (code != TSDB_CODE_SUCCESS) {
+ goto _error;
+ }
+
+ return pOperator;
+
+ _error:
+ taosMemoryFree(pInfo);
+ taosMemoryFree(pOperator);
+ pTaskInfo->code = TSDB_CODE_OUT_OF_MEMORY;
+ return NULL;
+}
+
+void setJoinColumnInfo(SColumnInfo* pColumn, const SColumnNode* pColumnNode) {
+ pColumn->slotId = pColumnNode->slotId;
+ pColumn->type = pColumnNode->node.resType.type;
+ pColumn->bytes = pColumnNode->node.resType.bytes;
+ pColumn->precision = pColumnNode->node.resType.precision;
+ pColumn->scale = pColumnNode->node.resType.scale;
+}
+
+void destroyMergeJoinOperator(void* param, int32_t numOfOutput) {
+ SJoinOperatorInfo* pJoinOperator = (SJoinOperatorInfo*)param;
+}
+
+SSDataBlock* doMergeJoin(struct SOperatorInfo* pOperator) {
+ SJoinOperatorInfo* pJoinInfo = pOperator->info;
+
+ SSDataBlock* pRes = pJoinInfo->pRes;
+ blockDataCleanup(pRes);
+ blockDataEnsureCapacity(pRes, 4096);
+
+ int32_t nrows = 0;
+
+ while (1) {
+ // todo extract method
+ if (pJoinInfo->pLeft == NULL || pJoinInfo->leftPos >= pJoinInfo->pLeft->info.rows) {
+ SOperatorInfo* ds1 = pOperator->pDownstream[0];
+ publishOperatorProfEvent(ds1, QUERY_PROF_BEFORE_OPERATOR_EXEC);
+ pJoinInfo->pLeft = ds1->fpSet.getNextFn(ds1);
+ publishOperatorProfEvent(ds1, QUERY_PROF_AFTER_OPERATOR_EXEC);
+
+ pJoinInfo->leftPos = 0;
+ if (pJoinInfo->pLeft == NULL) {
+ setTaskStatus(pOperator->pTaskInfo, TASK_COMPLETED);
+ break;
+ }
+ }
+
+ if (pJoinInfo->pRight == NULL || pJoinInfo->rightPos >= pJoinInfo->pRight->info.rows) {
+ SOperatorInfo* ds2 = pOperator->pDownstream[1];
+ publishOperatorProfEvent(ds2, QUERY_PROF_BEFORE_OPERATOR_EXEC);
+ pJoinInfo->pRight = ds2->fpSet.getNextFn(ds2);
+ publishOperatorProfEvent(ds2, QUERY_PROF_AFTER_OPERATOR_EXEC);
+
+ pJoinInfo->rightPos = 0;
+ if (pJoinInfo->pRight == NULL) {
+ setTaskStatus(pOperator->pTaskInfo, TASK_COMPLETED);
+ break;
+ }
+ }
+
+ SColumnInfoData* pLeftCol = taosArrayGet(pJoinInfo->pLeft->pDataBlock, pJoinInfo->leftCol.slotId);
+ char* pLeftVal = colDataGetData(pLeftCol, pJoinInfo->leftPos);
+
+ SColumnInfoData* pRightCol = taosArrayGet(pJoinInfo->pRight->pDataBlock, pJoinInfo->rightCol.slotId);
+ char* pRightVal = colDataGetData(pRightCol, pJoinInfo->rightPos);
+
+ // only the timestamp match support for ordinary table
+ ASSERT(pLeftCol->info.type == TSDB_DATA_TYPE_TIMESTAMP);
+ if (*(int64_t*)pLeftVal == *(int64_t*)pRightVal) {
+ for (int32_t i = 0; i < pOperator->numOfExprs; ++i) {
+ SColumnInfoData* pDst = taosArrayGet(pRes->pDataBlock, i);
+
+ SExprInfo* pExprInfo = &pOperator->pExpr[i];
+
+ int32_t blockId = pExprInfo->base.pParam[0].pCol->dataBlockId;
+ int32_t slotId = pExprInfo->base.pParam[0].pCol->slotId;
+ int32_t rowIndex = -1;
+
+ SColumnInfoData* pSrc = NULL;
+ if (pJoinInfo->pLeft->info.blockId == blockId) {
+ pSrc = taosArrayGet(pJoinInfo->pLeft->pDataBlock, slotId);
+ rowIndex = pJoinInfo->leftPos;
+ } else {
+ pSrc = taosArrayGet(pJoinInfo->pRight->pDataBlock, slotId);
+ rowIndex = pJoinInfo->rightPos;
+ }
+
+ if (colDataIsNull_s(pSrc, rowIndex)) {
+ colDataAppendNULL(pDst, nrows);
+ } else {
+ char* p = colDataGetData(pSrc, rowIndex);
+ colDataAppend(pDst, nrows, p, false);
+ }
+ }
+
+ pJoinInfo->leftPos += 1;
+ pJoinInfo->rightPos += 1;
+
+ nrows += 1;
+ } else if (*(int64_t*)pLeftVal < *(int64_t*)pRightVal) {
+ pJoinInfo->leftPos += 1;
+
+ if (pJoinInfo->leftPos >= pJoinInfo->pLeft->info.rows) {
+ continue;
+ }
+ } else if (*(int64_t*)pLeftVal > *(int64_t*)pRightVal) {
+ pJoinInfo->rightPos += 1;
+ if (pJoinInfo->rightPos >= pJoinInfo->pRight->info.rows) {
+ continue;
+ }
+ }
+
+ // the pDataBlock are always the same one, no need to call this again
+ pRes->info.rows = nrows;
+ if (pRes->info.rows >= pOperator->resultInfo.threshold) {
+ break;
+ }
+ }
+
+ return (pRes->info.rows > 0) ? pRes : NULL;
+}
+
+static void extractTimeCondition(SJoinOperatorInfo *pInfo, SLogicConditionNode* pLogicConditionNode) {
+ int32_t len = LIST_LENGTH(pLogicConditionNode->pParameterList);
+
+ for(int32_t i = 0; i < len; ++i) {
+ SNode* pNode = nodesListGetNode(pLogicConditionNode->pParameterList, i);
+ if (nodeType(pNode) == QUERY_NODE_OPERATOR) {
+ SOperatorNode* pn1 = (SOperatorNode*)pNode;
+ setJoinColumnInfo(&pInfo->leftCol, (SColumnNode*)pn1->pLeft);
+ setJoinColumnInfo(&pInfo->rightCol, (SColumnNode*)pn1->pRight);
+ break;
+ }
+ }
+}
diff --git a/source/libs/executor/src/scanoperator.c b/source/libs/executor/src/scanoperator.c
index c6cb01e8fb..08539206a6 100644
--- a/source/libs/executor/src/scanoperator.c
+++ b/source/libs/executor/src/scanoperator.c
@@ -159,6 +159,8 @@ static bool overlapWithTimeWindow(SInterval* pInterval, SDataBlockInfo* pBlockIn
return false;
}
+static void addTagPseudoColumnData(STableScanInfo* pTableScanInfo, SSDataBlock* pBlock);
+
static int32_t loadDataBlock(SOperatorInfo* pOperator, STableScanInfo* pTableScanInfo, SSDataBlock* pBlock,
uint32_t* status) {
SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
@@ -238,8 +240,15 @@ static int32_t loadDataBlock(SOperatorInfo* pOperator, STableScanInfo* pTableSca
}
relocateColumnData(pBlock, pTableScanInfo->pColMatchInfo, pCols);
+
+ // currently only the tbname pseudo column
+ if (pTableScanInfo->numOfPseudoExpr > 0) {
+ addTagPseudoColumnData(pTableScanInfo, pBlock);
+ }
+
// todo record the filter time cost
- doFilter(pTableScanInfo->pFilterNode, pBlock);
+ doFilter(pTableScanInfo->pFilterNode, pBlock, pTableScanInfo->pColMatchInfo);
+
if (pBlock->info.rows == 0) {
pCost->filterOutBlocks += 1;
qDebug("%s data block filter out, brange:%" PRId64 "-%" PRId64 ", rows:%d", GET_TASKID(pTaskInfo),
@@ -260,7 +269,7 @@ static void prepareForDescendingScan(STableScanInfo* pTableScanInfo, SqlFunction
pTableScanInfo->cond.order = TSDB_ORDER_DESC;
}
-static void addTagPseudoColumnData(STableScanInfo* pTableScanInfo, SSDataBlock* pBlock) {
+void addTagPseudoColumnData(STableScanInfo* pTableScanInfo, SSDataBlock* pBlock) {
// currently only the tbname pseudo column
if (pTableScanInfo->numOfPseudoExpr == 0) {
return;
@@ -282,20 +291,7 @@ static void addTagPseudoColumnData(STableScanInfo* pTableScanInfo, SSDataBlock*
// this is to handle the tbname
if (fmIsScanPseudoColumnFunc(functionId)) {
- struct SScalarFuncExecFuncs fpSet = {0};
- fmGetScalarFuncExecFuncs(functionId, &fpSet);
-
- SColumnInfoData infoData = {0};
- infoData.info.type = TSDB_DATA_TYPE_BIGINT;
- infoData.info.bytes = sizeof(uint64_t);
- colInfoDataEnsureCapacity(&infoData, 0, 1);
-
- colDataAppendInt64(&infoData, 0, &pBlock->info.uid);
- SScalarParam srcParam = {
- .numOfRows = pBlock->info.rows, .param = pTableScanInfo->readHandle.meta, .columnData = &infoData};
-
- SScalarParam param = {.columnData = pColInfoData};
- fpSet.process(&srcParam, 1, ¶m);
+ setTbNameColData(pTableScanInfo->readHandle.meta, pBlock, pColInfoData, functionId);
} else { // these are tags
const char* p = metaGetTableTagVal(&mr.me, pExpr->base.pParam[0].pCol->colId);
for (int32_t i = 0; i < pBlock->info.rows; ++i) {
@@ -307,6 +303,23 @@ static void addTagPseudoColumnData(STableScanInfo* pTableScanInfo, SSDataBlock*
metaReaderClear(&mr);
}
+void setTbNameColData(void* pMeta, const SSDataBlock* pBlock, SColumnInfoData* pColInfoData, int32_t functionId) {
+ struct SScalarFuncExecFuncs fpSet = {0};
+ fmGetScalarFuncExecFuncs(functionId, &fpSet);
+
+ SColumnInfoData infoData = {0};
+ infoData.info.type = TSDB_DATA_TYPE_BIGINT;
+ infoData.info.bytes = sizeof(uint64_t);
+ colInfoDataEnsureCapacity(&infoData, 0, 1);
+
+ colDataAppendInt64(&infoData, 0, (int64_t*) &pBlock->info.uid);
+ SScalarParam srcParam = {
+ .numOfRows = pBlock->info.rows, .param = pMeta, .columnData = &infoData};
+
+ SScalarParam param = {.columnData = pColInfoData};
+ fpSet.process(&srcParam, 1, ¶m);
+}
+
static SSDataBlock* doTableScanImpl(SOperatorInfo* pOperator) {
STableScanInfo* pTableScanInfo = pOperator->info;
SSDataBlock* pBlock = pTableScanInfo->pResBlock;
@@ -330,11 +343,6 @@ static SSDataBlock* doTableScanImpl(SOperatorInfo* pOperator) {
continue;
}
- // currently only the tbname pseudo column
- if (pTableScanInfo->numOfPseudoExpr > 0) {
- addTagPseudoColumnData(pTableScanInfo, pBlock);
- }
-
return pBlock;
}
@@ -750,7 +758,7 @@ static SSDataBlock* doStreamBlockScan(SOperatorInfo* pOperator) {
return NULL;
}
rows = pBlockInfo->rows;
- doFilter(pInfo->pCondition, pInfo->pRes);
+ doFilter(pInfo->pCondition, pInfo->pRes, NULL);
break;
}
diff --git a/source/libs/executor/src/timewindowoperator.c b/source/libs/executor/src/timewindowoperator.c
index 10dc482462..79e675e2df 100644
--- a/source/libs/executor/src/timewindowoperator.c
+++ b/source/libs/executor/src/timewindowoperator.c
@@ -773,7 +773,6 @@ static int32_t doOpenIntervalAgg(SOperatorInfo* pOperator) {
break;
}
- // setTagValue(pOperator, pRuntimeEnv->current->pTable, pInfo->pCtx, pOperator->numOfExprs);
// the pDataBlock are always the same one, no need to call this again
setInputDataBlock(pOperator, pInfo->binfo.pCtx, pBlock, order, MAIN_SCAN, true);
STableQueryInfo* pTableQueryInfo = pInfo->pCurrent;
@@ -1062,8 +1061,6 @@ static SSDataBlock* doStreamIntervalAgg(SOperatorInfo* pOperator) {
// The timewindows that overlaps the timestamps of the input pBlock need to be recalculated and return to the
// caller. Note that all the time window are not close till now.
-
- // setTagValue(pOperator, pRuntimeEnv->current->pTable, pInfo->pCtx, pOperator->numOfExprs);
// the pDataBlock are always the same one, no need to call this again
setInputDataBlock(pOperator, pInfo->binfo.pCtx, pBlock, order, MAIN_SCAN, true);
if (pInfo->invertible) {
@@ -1377,7 +1374,6 @@ static SSDataBlock* doAllIntervalAgg(SOperatorInfo* pOperator) {
break;
}
- // setTagValue(pOperator, pRuntimeEnv->current->pTable, pIntervalInfo->pCtx, pOperator->numOfExprs);
// the pDataBlock are always the same one, no need to call this again
setInputDataBlock(pOperator, pSliceInfo->binfo.pCtx, pBlock, order, MAIN_SCAN, true);
// hashAllIntervalAgg(pOperator, &pSliceInfo->binfo.resultRowInfo, pBlock, 0);
diff --git a/source/libs/executor/test/index_executor_tests.cpp b/source/libs/executor/test/index_executor_tests.cpp
index 5b03da034e..2449bd1da1 100644
--- a/source/libs/executor/test/index_executor_tests.cpp
+++ b/source/libs/executor/test/index_executor_tests.cpp
@@ -249,7 +249,7 @@ TEST(testCase, index_filter_varify) {
sifMakeOpNode(&opNode, OP_TYPE_LOWER_THAN, TSDB_DATA_TYPE_DOUBLE, pLeft, pRight);
SIdxFltStatus st = idxGetFltStatus(opNode);
- EXPECT_EQ(st, SFLT_COARSE_INDEX);
+ EXPECT_EQ(st, SFLT_ACCURATE_INDEX);
nodesDestroyNode(res);
}
{
@@ -269,7 +269,7 @@ TEST(testCase, index_filter_varify) {
sifMakeOpNode(&opNode, OP_TYPE_GREATER_THAN, TSDB_DATA_TYPE_DOUBLE, pLeft, pRight);
SIdxFltStatus st = idxGetFltStatus(opNode);
- EXPECT_EQ(st, SFLT_COARSE_INDEX);
+ EXPECT_EQ(st, SFLT_ACCURATE_INDEX);
nodesDestroyNode(res);
}
}
diff --git a/source/libs/function/inc/udfc.h b/source/libs/function/inc/udfc.h
index a693e476e8..f414c2b29e 100644
--- a/source/libs/function/inc/udfc.h
+++ b/source/libs/function/inc/udfc.h
@@ -43,7 +43,7 @@ int32_t setupUdf(SUdfInfo* udf, UdfcFuncHandle* handle);
int32_t callUdf(UdfcFuncHandle handle, int8_t step, char *state, int32_t stateSize, SSDataBlock input, char **newstate,
int32_t *newStateSize, SSDataBlock *output);
-int32_t teardownUdf(UdfcFuncHandle handle);
+int32_t doTeardownUdf(UdfcFuncHandle handle);
typedef struct SUdfSetupRequest {
char udfName[16]; //
diff --git a/source/libs/function/src/tudf.c b/source/libs/function/src/tudf.c
index 3a388d1c07..4841e05267 100644
--- a/source/libs/function/src/tudf.c
+++ b/source/libs/function/src/tudf.c
@@ -25,8 +25,6 @@
#include "functionMgt.h"
//TODO: add unit test
-//TODO: include all global variable under context struct
-
typedef struct SUdfdData {
bool startCalled;
bool needCleanUp;
@@ -310,6 +308,12 @@ enum {
};
int64_t gUdfTaskSeqNum = 0;
+typedef struct SUdfcFuncStub {
+ char udfName[TSDB_FUNC_NAME_LEN];
+ UdfcFuncHandle handle;
+ int32_t refCount;
+} SUdfcFuncStub;
+
typedef struct SUdfcProxy {
char udfdPipeName[PATH_MAX + UDF_LISTEN_PIPE_NAME_LEN + 2];
uv_barrier_t initBarrier;
@@ -325,12 +329,15 @@ typedef struct SUdfcProxy {
QUEUE taskQueue;
QUEUE uvProcTaskQueue;
+ uv_mutex_t udfStubsMutex;
+ SArray* udfStubs; // SUdfcFuncStub
+
int8_t initialized;
} SUdfcProxy;
SUdfcProxy gUdfdProxy = {0};
-typedef struct SClientUdfUvSession {
+typedef struct SUdfcUvSession {
SUdfcProxy *udfc;
int64_t severHandle;
uv_pipe_t *udfUvPipe;
@@ -338,7 +345,9 @@ typedef struct SClientUdfUvSession {
int8_t outputType;
int32_t outputLen;
int32_t bufSize;
-} SClientUdfUvSession;
+
+ char udfName[TSDB_FUNC_NAME_LEN];
+} SUdfcUvSession;
typedef struct SClientUvTaskNode {
SUdfcProxy *udfc;
@@ -361,7 +370,7 @@ typedef struct SClientUvTaskNode {
typedef struct SClientUdfTask {
int8_t type;
- SClientUdfUvSession *session;
+ SUdfcUvSession *session;
int32_t errCode;
@@ -393,7 +402,7 @@ typedef struct SClientUvConn {
uv_pipe_t *pipe;
QUEUE taskQueue;
SClientConnBuf readBuf;
- SClientUdfUvSession *session;
+ SUdfcUvSession *session;
} SClientUvConn;
enum {
@@ -1072,6 +1081,8 @@ int32_t udfcQueueUvTask(SClientUvTaskNode *uvTask) {
int32_t udfcStartUvTask(SClientUvTaskNode *uvTask) {
fnTrace("event loop start uv task. task: %d, %p", uvTask->type, uvTask);
+ int32_t code = 0;
+
switch (uvTask->type) {
case UV_TASK_CONNECT: {
uv_pipe_t *pipe = taosMemoryMalloc(sizeof(uv_pipe_t));
@@ -1091,22 +1102,34 @@ int32_t udfcStartUvTask(SClientUvTaskNode *uvTask) {
uv_connect_t *connReq = taosMemoryMalloc(sizeof(uv_connect_t));
connReq->data = uvTask;
uv_pipe_connect(connReq, pipe, uvTask->udfc->udfdPipeName, onUdfcPipeConnect);
+ code = 0;
break;
}
case UV_TASK_REQ_RSP: {
uv_pipe_t *pipe = uvTask->pipe;
- uv_write_t *write = taosMemoryMalloc(sizeof(uv_write_t));
- write->data = uvTask;
- int err = uv_write(write, (uv_stream_t *)pipe, &uvTask->reqBuf, 1, onUdfcPipetWrite);
- if (err != 0) {
- fnError("udfc event loop start req/rsp task uv_write failed. code: %s", uv_strerror(err));
+ if (pipe == NULL) {
+ code = TSDB_CODE_UDF_PIPE_NO_PIPE;
+ } else {
+ uv_write_t *write = taosMemoryMalloc(sizeof(uv_write_t));
+ write->data = uvTask;
+ int err = uv_write(write, (uv_stream_t *)pipe, &uvTask->reqBuf, 1, onUdfcPipetWrite);
+ if (err != 0) {
+ fnError("udfc event loop start req/rsp task uv_write failed. code: %s", uv_strerror(err));
+ }
+ code = err;
}
break;
}
case UV_TASK_DISCONNECT: {
- SClientUvConn *conn = uvTask->pipe->data;
- QUEUE_INSERT_TAIL(&conn->taskQueue, &uvTask->connTaskQueue);
- uv_close((uv_handle_t *) uvTask->pipe, onUdfcPipeClose);
+ uv_pipe_t *pipe = uvTask->pipe;
+ if (pipe == NULL) {
+ code = TSDB_CODE_UDF_PIPE_NO_PIPE;
+ } else {
+ SClientUvConn *conn = pipe->data;
+ QUEUE_INSERT_TAIL(&conn->taskQueue, &uvTask->connTaskQueue);
+ uv_close((uv_handle_t *)uvTask->pipe, onUdfcPipeClose);
+ code = 0;
+ }
break;
}
default: {
@@ -1115,10 +1138,10 @@ int32_t udfcStartUvTask(SClientUvTaskNode *uvTask) {
}
}
- return 0;
+ return code;
}
-void udfClientAsyncCb(uv_async_t *async) {
+void udfcAsyncTaskCb(uv_async_t *async) {
SUdfcProxy *udfc = async->data;
QUEUE wq;
@@ -1133,6 +1156,9 @@ void udfClientAsyncCb(uv_async_t *async) {
int32_t code = udfcStartUvTask(task);
if (code == 0) {
QUEUE_INSERT_TAIL(&udfc->uvProcTaskQueue, &task->procTaskQueue);
+ } else {
+ task->errCode = code;
+ uv_sem_post(&task->taskSem);
}
}
@@ -1179,7 +1205,7 @@ void constructUdfService(void *argsThread) {
SUdfcProxy *udfc = (SUdfcProxy *)argsThread;
uv_loop_init(&udfc->uvLoop);
- uv_async_init(&udfc->uvLoop, &udfc->loopTaskAync, udfClientAsyncCb);
+ uv_async_init(&udfc->uvLoop, &udfc->loopTaskAync, udfcAsyncTaskCb);
udfc->loopTaskAync.data = udfc;
uv_async_init(&udfc->uvLoop, &udfc->loopStopAsync, udfStopAsyncCb);
udfc->loopStopAsync.data = udfc;
@@ -1205,6 +1231,8 @@ int32_t udfcOpen() {
atomic_store_8(&proxy->udfcState, UDFC_STATE_READY);
proxy->udfcState = UDFC_STATE_READY;
uv_barrier_wait(&proxy->initBarrier);
+ uv_mutex_init(&proxy->udfStubsMutex);
+ proxy->udfStubs = taosArrayInit(8, sizeof(SUdfcFuncStub));
fnInfo("udfc initialized")
return 0;
}
@@ -1221,6 +1249,8 @@ int32_t udfcClose() {
uv_thread_join(&udfc->loopThread);
uv_mutex_destroy(&udfc->taskQueueMutex);
uv_barrier_destroy(&udfc->initBarrier);
+ taosArrayDestroy(udfc->udfStubs);
+ uv_mutex_destroy(&udfc->udfStubsMutex);
udfc->udfcState = UDFC_STATE_INITAL;
fnInfo("udfc cleaned up");
return 0;
@@ -1242,14 +1272,13 @@ int32_t udfcRunUdfUvTask(SClientUdfTask *task, int8_t uvTaskType) {
return task->errCode;
}
-int32_t setupUdf(char udfName[], UdfcFuncHandle *funcHandle) {
- fnInfo("udfc setup udf. udfName: %s", udfName);
+int32_t doSetupUdf(char udfName[], UdfcFuncHandle *funcHandle) {
if (gUdfdProxy.udfcState != UDFC_STATE_READY) {
return TSDB_CODE_UDF_INVALID_STATE;
}
SClientUdfTask *task = taosMemoryCalloc(1,sizeof(SClientUdfTask));
task->errCode = 0;
- task->session = taosMemoryCalloc(1, sizeof(SClientUdfUvSession));
+ task->session = taosMemoryCalloc(1, sizeof(SUdfcUvSession));
task->session->udfc = &gUdfdProxy;
task->type = UDF_TASK_SETUP;
@@ -1269,10 +1298,11 @@ int32_t setupUdf(char udfName[], UdfcFuncHandle *funcHandle) {
task->session->outputType = rsp->outputType;
task->session->outputLen = rsp->outputLen;
task->session->bufSize = rsp->bufSize;
+ strcpy(task->session->udfName, udfName);
if (task->errCode != 0) {
- fnError("failed to setup udf. err: %d", task->errCode)
+ fnError("failed to setup udf. udfname: %s, err: %d", udfName, task->errCode)
} else {
- fnInfo("sucessfully setup udf func handle. handle: %p", task->session);
+ fnInfo("sucessfully setup udf func handle. udfName: %s, handle: %p", udfName, task->session);
*funcHandle = task->session;
}
int32_t err = task->errCode;
@@ -1283,14 +1313,14 @@ int32_t setupUdf(char udfName[], UdfcFuncHandle *funcHandle) {
int32_t callUdf(UdfcFuncHandle handle, int8_t callType, SSDataBlock *input, SUdfInterBuf *state, SUdfInterBuf *state2,
SSDataBlock* output, SUdfInterBuf *newState) {
fnTrace("udfc call udf. callType: %d, funcHandle: %p", callType, handle);
- SClientUdfUvSession *session = (SClientUdfUvSession *) handle;
+ SUdfcUvSession *session = (SUdfcUvSession *) handle;
if (session->udfUvPipe == NULL) {
fnError("No pipe to udfd");
return TSDB_CODE_UDF_PIPE_NO_PIPE;
}
SClientUdfTask *task = taosMemoryCalloc(1, sizeof(SClientUdfTask));
task->errCode = 0;
- task->session = (SClientUdfUvSession *) handle;
+ task->session = (SUdfcUvSession *) handle;
task->type = UDF_TASK_CALL;
SUdfCallRequest *req = &task->_call.req;
@@ -1356,7 +1386,7 @@ int32_t callUdf(UdfcFuncHandle handle, int8_t callType, SSDataBlock *input, SUdf
return err;
}
-int32_t callUdfAggInit(UdfcFuncHandle handle, SUdfInterBuf *interBuf) {
+int32_t doCallUdfAggInit(UdfcFuncHandle handle, SUdfInterBuf *interBuf) {
int8_t callType = TSDB_UDF_CALL_AGG_INIT;
int32_t err = callUdf(handle, callType, NULL, NULL, NULL, NULL, interBuf);
@@ -1366,7 +1396,7 @@ int32_t callUdfAggInit(UdfcFuncHandle handle, SUdfInterBuf *interBuf) {
// input: block, state
// output: interbuf,
-int32_t callUdfAggProcess(UdfcFuncHandle handle, SSDataBlock *block, SUdfInterBuf *state, SUdfInterBuf *newState) {
+int32_t doCallUdfAggProcess(UdfcFuncHandle handle, SSDataBlock *block, SUdfInterBuf *state, SUdfInterBuf *newState) {
int8_t callType = TSDB_UDF_CALL_AGG_PROC;
int32_t err = callUdf(handle, callType, block, state, NULL, NULL, newState);
return err;
@@ -1374,7 +1404,7 @@ int32_t callUdfAggProcess(UdfcFuncHandle handle, SSDataBlock *block, SUdfInterBu
// input: interbuf1, interbuf2
// output: resultBuf
-int32_t callUdfAggMerge(UdfcFuncHandle handle, SUdfInterBuf *interBuf1, SUdfInterBuf *interBuf2, SUdfInterBuf *resultBuf) {
+int32_t doCallUdfAggMerge(UdfcFuncHandle handle, SUdfInterBuf *interBuf1, SUdfInterBuf *interBuf2, SUdfInterBuf *resultBuf) {
int8_t callType = TSDB_UDF_CALL_AGG_MERGE;
int32_t err = callUdf(handle, callType, NULL, interBuf1, interBuf2, NULL, resultBuf);
return err;
@@ -1382,13 +1412,13 @@ int32_t callUdfAggMerge(UdfcFuncHandle handle, SUdfInterBuf *interBuf1, SUdfInte
// input: interBuf
// output: resultData
-int32_t callUdfAggFinalize(UdfcFuncHandle handle, SUdfInterBuf *interBuf, SUdfInterBuf *resultData) {
+int32_t doCallUdfAggFinalize(UdfcFuncHandle handle, SUdfInterBuf *interBuf, SUdfInterBuf *resultData) {
int8_t callType = TSDB_UDF_CALL_AGG_FIN;
int32_t err = callUdf(handle, callType, NULL, interBuf, NULL, NULL, resultData);
return err;
}
-int32_t callUdfScalarFunc(UdfcFuncHandle handle, SScalarParam *input, int32_t numOfCols, SScalarParam* output) {
+int32_t doCallUdfScalarFunc(UdfcFuncHandle handle, SScalarParam *input, int32_t numOfCols, SScalarParam* output) {
int8_t callType = TSDB_UDF_CALL_SCALA_PROC;
SSDataBlock inputBlock = {0};
convertScalarParamToDataBlock(input, numOfCols, &inputBlock);
@@ -1400,12 +1430,68 @@ int32_t callUdfScalarFunc(UdfcFuncHandle handle, SScalarParam *input, int32_t nu
return err;
}
-int32_t teardownUdf(UdfcFuncHandle handle) {
- fnInfo("tear down udf. udf func handle: %p", handle);
+int compareUdfcFuncSub(const void* elem1, const void* elem2) {
+ SUdfcFuncStub *stub1 = (SUdfcFuncStub *)elem1;
+ SUdfcFuncStub *stub2 = (SUdfcFuncStub *)elem2;
+ return strcmp(stub1->udfName, stub2->udfName);
+}
+
+int32_t accquireUdfFuncHandle(char* udfName, UdfcFuncHandle* pHandle) {
+ int32_t code = 0;
+ uv_mutex_lock(&gUdfdProxy.udfStubsMutex);
+ SUdfcFuncStub key = {0};
+ strcpy(key.udfName, udfName);
+ SUdfcFuncStub *foundStub = taosArraySearch(gUdfdProxy.udfStubs, &key, compareUdfcFuncSub, TD_EQ);
+ if (foundStub != NULL) {
+ uv_mutex_unlock(&gUdfdProxy.udfStubsMutex);
+ *pHandle = foundStub->handle;
+ ++foundStub->refCount;
+ return 0;
+ }
+ *pHandle = NULL;
+ code = doSetupUdf(udfName, pHandle);
+ if (code == TSDB_CODE_SUCCESS) {
+ SUdfcFuncStub stub = {0};
+ strcpy(stub.udfName, udfName);
+ stub.handle = *pHandle;
+ ++stub.refCount;
+ taosArrayPush(gUdfdProxy.udfStubs, &stub);
+ taosArraySort(gUdfdProxy.udfStubs, compareUdfcFuncSub);
+ } else {
+ *pHandle = NULL;
+ }
+
+ uv_mutex_unlock(&gUdfdProxy.udfStubsMutex);
+ return code;
+}
+
+void releaseUdfFuncHandle(char* udfName) {
+ uv_mutex_lock(&gUdfdProxy.udfStubsMutex);
+ SUdfcFuncStub key = {0};
+ strcpy(key.udfName, udfName);
+ SUdfcFuncStub *foundStub = taosArraySearch(gUdfdProxy.udfStubs, &key, compareUdfcFuncSub, TD_EQ);
+ ASSERT(foundStub);
+ --foundStub->refCount;
+ ASSERT(foundStub->refCount>=0);
+ uv_mutex_unlock(&gUdfdProxy.udfStubsMutex);
+}
+
+int32_t callUdfScalarFunc(char *udfName, SScalarParam *input, int32_t numOfCols, SScalarParam *output) {
+ UdfcFuncHandle handle = NULL;
+ int32_t code = accquireUdfFuncHandle(udfName, &handle);
+ if (code != 0) {
+ return code;
+ }
+ code = doCallUdfScalarFunc(handle, input, numOfCols, output);
+ releaseUdfFuncHandle(udfName);
+ return code;
+}
+
+int32_t doTeardownUdf(UdfcFuncHandle handle) {
+ SUdfcUvSession *session = (SUdfcUvSession *) handle;
- SClientUdfUvSession *session = (SClientUdfUvSession *) handle;
if (session->udfUvPipe == NULL) {
- fnError("pipe to udfd does not exist");
+ fnError("tear down udf. pipe to udfd does not exist. udf name: %s", session->udfName);
return TSDB_CODE_UDF_PIPE_NO_PIPE;
}
@@ -1420,7 +1506,6 @@ int32_t teardownUdf(UdfcFuncHandle handle) {
udfcRunUdfUvTask(task, UV_TASK_REQ_RSP);
SUdfTeardownResponse *rsp = &task->_teardown.rsp;
-
int32_t err = task->errCode;
udfcRunUdfUvTask(task, UV_TASK_DISCONNECT);
@@ -1428,12 +1513,14 @@ int32_t teardownUdf(UdfcFuncHandle handle) {
taosMemoryFree(task->session);
taosMemoryFree(task);
+ fnInfo("tear down udf. udf name: %s, udf func handle: %p", session->udfName, handle);
+
return err;
}
//memory layout |---SUdfAggRes----|-----final result-----|---inter result----|
typedef struct SUdfAggRes {
- SClientUdfUvSession *session;
+ SUdfcUvSession *session;
int8_t finalResNum;
int8_t interResNum;
char* finalResBuf;
@@ -1454,11 +1541,11 @@ bool udfAggInit(struct SqlFunctionCtx *pCtx, struct SResultRowEntryInfo* pResult
}
UdfcFuncHandle handle;
int32_t udfCode = 0;
- if ((udfCode = setupUdf((char*)pCtx->udfName, &handle)) != 0) {
- fnError("udfAggInit error. step setupUdf. udf code: %d", udfCode);
+ if ((udfCode = accquireUdfFuncHandle((char *)pCtx->udfName, &handle)) != 0) {
+ fnError("udfAggInit error. step doSetupUdf. udf code: %d", udfCode);
return false;
}
- SClientUdfUvSession *session = (SClientUdfUvSession *)handle;
+ SUdfcUvSession *session = (SUdfcUvSession *)handle;
SUdfAggRes *udfRes = (SUdfAggRes*)GET_ROWCELL_INTERBUF(pResultCellInfo);
int32_t envSize = sizeof(SUdfAggRes) + session->outputLen + session->bufSize;
memset(udfRes, 0, envSize);
@@ -1466,10 +1553,10 @@ bool udfAggInit(struct SqlFunctionCtx *pCtx, struct SResultRowEntryInfo* pResult
udfRes->finalResBuf = (char*)udfRes + sizeof(SUdfAggRes);
udfRes->interResBuf = (char*)udfRes + sizeof(SUdfAggRes) + session->outputLen;
- udfRes->session = (SClientUdfUvSession *)handle;
+ udfRes->session = (SUdfcUvSession *)handle;
SUdfInterBuf buf = {0};
- if ((udfCode = callUdfAggInit(handle, &buf)) != 0) {
- fnError("udfAggInit error. step callUdfAggInit. udf code: %d", udfCode);
+ if ((udfCode = doCallUdfAggInit(handle, &buf)) != 0) {
+ fnError("udfAggInit error. step doCallUdfAggInit. udf code: %d", udfCode);
return false;
}
udfRes->interResNum = buf.numOfResult;
@@ -1482,7 +1569,10 @@ int32_t udfAggProcess(struct SqlFunctionCtx *pCtx) {
int32_t numOfCols = pInput->numOfInputCols;
SUdfAggRes* udfRes = (SUdfAggRes *)GET_ROWCELL_INTERBUF(GET_RES_INFO(pCtx));
- SClientUdfUvSession *session = udfRes->session;
+ SUdfcUvSession *session = udfRes->session;
+ if (session == NULL) {
+ return TSDB_CODE_UDF_NO_FUNC_HANDLE;
+ }
udfRes->finalResBuf = (char*)udfRes + sizeof(SUdfAggRes);
udfRes->interResBuf = (char*)udfRes + sizeof(SUdfAggRes) + session->outputLen;
@@ -1513,7 +1603,7 @@ int32_t udfAggProcess(struct SqlFunctionCtx *pCtx) {
.numOfResult = udfRes->interResNum};
SUdfInterBuf newState = {0};
- int32_t udfCode = callUdfAggProcess(session, inputBlock, &state, &newState);
+ int32_t udfCode = doCallUdfAggProcess(session, inputBlock, &state, &newState);
if (udfCode != 0) {
fnError("udfAggProcess error. code: %d", udfCode);
newState.numOfResult = 0;
@@ -1534,7 +1624,10 @@ int32_t udfAggProcess(struct SqlFunctionCtx *pCtx) {
int32_t udfAggFinalize(struct SqlFunctionCtx *pCtx, SSDataBlock* pBlock) {
SUdfAggRes* udfRes = (SUdfAggRes *)GET_ROWCELL_INTERBUF(GET_RES_INFO(pCtx));
- SClientUdfUvSession *session = udfRes->session;
+ SUdfcUvSession *session = udfRes->session;
+ if (session == NULL) {
+ return TSDB_CODE_UDF_NO_FUNC_HANDLE;
+ }
udfRes->finalResBuf = (char*)udfRes + sizeof(SUdfAggRes);
udfRes->interResBuf = (char*)udfRes + sizeof(SUdfAggRes) + session->outputLen;
@@ -1544,9 +1637,9 @@ int32_t udfAggFinalize(struct SqlFunctionCtx *pCtx, SSDataBlock* pBlock) {
.bufLen = session->bufSize,
.numOfResult = udfRes->interResNum};
int32_t udfCallCode= 0;
- udfCallCode= callUdfAggFinalize(session, &state, &resultBuf);
- if (udfCallCode!= 0) {
- fnError("udfAggFinalize error. callUdfAggFinalize step. udf code:%d", udfCallCode);
+ udfCallCode= doCallUdfAggFinalize(session, &state, &resultBuf);
+ if (udfCallCode != 0) {
+ fnError("udfAggFinalize error. doCallUdfAggFinalize step. udf code:%d", udfCallCode);
GET_RES_INFO(pCtx)->numOfRes = 0;
} else {
memcpy(udfRes->finalResBuf, resultBuf.buf, session->outputLen);
@@ -1554,11 +1647,28 @@ int32_t udfAggFinalize(struct SqlFunctionCtx *pCtx, SSDataBlock* pBlock) {
GET_RES_INFO(pCtx)->numOfRes = udfRes->finalResNum;
}
- int32_t code = teardownUdf(session);
- if (code != 0) {
- fnError("udfAggFinalize error. teardownUdf step. udf code: %d", code);
+ int32_t numOfResults = functionFinalizeWithResultBuf(pCtx, pBlock, udfRes->finalResBuf);
+ releaseUdfFuncHandle(pCtx->udfName);
+ return udfCallCode == 0 ? numOfResults : udfCallCode;
+}
+
+int32_t cleanUpUdfs() {
+ uv_mutex_lock(&gUdfdProxy.udfStubsMutex);
+ int32_t i = 0;
+ SArray* udfStubs = taosArrayInit(16, sizeof(SUdfcFuncStub));
+ while (i < taosArrayGetSize(gUdfdProxy.udfStubs)) {
+ SUdfcFuncStub *stub = taosArrayGet(gUdfdProxy.udfStubs, i);
+ if (stub->refCount == 0) {
+ fnInfo("tear down udf. udf name: %s, handle: %p", stub->udfName, stub->handle);
+ doTeardownUdf(stub->handle);
+ } else {
+ fnInfo("udf still in use. udf name: %s, ref count: %d, handle: %p", stub->udfName, stub->refCount, stub->handle);
+ taosArrayPush(udfStubs, stub);
+ }
+ ++i;
}
-
- return functionFinalizeWithResultBuf(pCtx, pBlock, udfRes->finalResBuf);
-
+ taosArrayDestroy(gUdfdProxy.udfStubs);
+ gUdfdProxy.udfStubs = udfStubs;
+ uv_mutex_unlock(&gUdfdProxy.udfStubsMutex);
+ return 0;
}
\ No newline at end of file
diff --git a/source/libs/function/test/runUdf.c b/source/libs/function/test/runUdf.c
index a8d6fbd715..d7c539e5c2 100644
--- a/source/libs/function/test/runUdf.c
+++ b/source/libs/function/test/runUdf.c
@@ -47,7 +47,7 @@ int main(int argc, char *argv[]) {
UdfcFuncHandle handle;
- setupUdf("udf1", &handle);
+ doSetupUdf("udf1", &handle);
SSDataBlock block = {0};
SSDataBlock *pBlock = █
@@ -73,12 +73,12 @@ int main(int argc, char *argv[]) {
input.numOfRows = pBlock->info.rows;
input.columnData = taosArrayGet(pBlock->pDataBlock, 0);
SScalarParam output = {0};
- callUdfScalarFunc(handle, &input, 1, &output);
+ doCallUdfScalarFunc(handle, &input, 1, &output);
SColumnInfoData *col = output.columnData;
for (int32_t i = 0; i < output.numOfRows; ++i) {
fprintf(stderr, "%d\t%d\n", i, *(int32_t *)(col->pData + i * sizeof(int32_t)));
}
- teardownUdf(handle);
+ doTeardownUdf(handle);
udfcClose();
}
diff --git a/source/libs/index/inc/indexComm.h b/source/libs/index/inc/indexComm.h
index 4cab71f92c..043404f48f 100644
--- a/source/libs/index/inc/indexComm.h
+++ b/source/libs/index/inc/indexComm.h
@@ -37,6 +37,8 @@ TExeCond tDoCommpare(__compar_fn_t func, int8_t comType, void* a, void* b);
_cache_range_compare indexGetCompare(RangeType ty);
+int32_t indexConvertData(void* src, int8_t type, void** dst);
+
#ifdef __cplusplus
}
#endif
diff --git a/source/libs/index/inc/indexInt.h b/source/libs/index/inc/indexInt.h
index 7b7050d80e..27c380beaf 100644
--- a/source/libs/index/inc/indexInt.h
+++ b/source/libs/index/inc/indexInt.h
@@ -46,9 +46,7 @@ typedef struct SIndexStat {
} SIndexStat;
struct SIndex {
-#ifdef USE_LUCENE
- index_t* index;
-#endif
+ int64_t refId;
void* cache;
void* tindex;
SHashObj* colObj; // < field name, field id>
@@ -124,6 +122,11 @@ typedef struct TFileCacheKey {
int indexFlushCacheToTFile(SIndex* sIdx, void*);
+int64_t indexAddRef(void* p);
+int32_t indexRemoveRef(int64_t ref);
+void indexAcquireRef(int64_t ref);
+void indexReleaseRef(int64_t ref);
+
int32_t indexSerialCacheKey(ICacheKey* key, char* buf);
// int32_t indexSerialKey(ICacheKey* key, char* buf);
// int32_t indexSerialTermKey(SIndexTerm* itm, char* buf);
diff --git a/source/libs/index/src/index.c b/source/libs/index/src/index.c
index d56413f840..46f2f7a93b 100644
--- a/source/libs/index/src/index.c
+++ b/source/libs/index/src/index.c
@@ -19,7 +19,10 @@
#include "indexInt.h"
#include "indexTfile.h"
#include "indexUtil.h"
+#include "tcoding.h"
+#include "tdataformat.h"
#include "tdef.h"
+#include "tref.h"
#include "tsched.h"
#ifdef USE_LUCENE
@@ -27,36 +30,40 @@
#endif
#define INDEX_NUM_OF_THREADS 4
-#define INDEX_QUEUE_SIZE 200
+#define INDEX_QUEUE_SIZE 200
-void* indexQhandle = NULL;
-
-#define INDEX_DATA_BOOL_NULL 0x02
-#define INDEX_DATA_TINYINT_NULL 0x80
-#define INDEX_DATA_SMALLINT_NULL 0x8000
-#define INDEX_DATA_INT_NULL 0x80000000L
-#define INDEX_DATA_BIGINT_NULL 0x8000000000000000L
+#define INDEX_DATA_BOOL_NULL 0x02
+#define INDEX_DATA_TINYINT_NULL 0x80
+#define INDEX_DATA_SMALLINT_NULL 0x8000
+#define INDEX_DATA_INT_NULL 0x80000000L
+#define INDEX_DATA_BIGINT_NULL 0x8000000000000000L
#define INDEX_DATA_TIMESTAMP_NULL TSDB_DATA_BIGINT_NULL
-#define INDEX_DATA_FLOAT_NULL 0x7FF00000 // it is an NAN
-#define INDEX_DATA_DOUBLE_NULL 0x7FFFFF0000000000L // an NAN
-#define INDEX_DATA_NCHAR_NULL 0xFFFFFFFF
-#define INDEX_DATA_BINARY_NULL 0xFF
-#define INDEX_DATA_JSON_NULL 0xFFFFFFFF
-#define INDEX_DATA_JSON_null 0xFFFFFFFE
+#define INDEX_DATA_FLOAT_NULL 0x7FF00000 // it is an NAN
+#define INDEX_DATA_DOUBLE_NULL 0x7FFFFF0000000000L // an NAN
+#define INDEX_DATA_NCHAR_NULL 0xFFFFFFFF
+#define INDEX_DATA_BINARY_NULL 0xFF
+#define INDEX_DATA_JSON_NULL 0xFFFFFFFF
+#define INDEX_DATA_JSON_null 0xFFFFFFFE
#define INDEX_DATA_JSON_NOT_NULL 0x01
-#define INDEX_DATA_UTINYINT_NULL 0xFF
+#define INDEX_DATA_UTINYINT_NULL 0xFF
#define INDEX_DATA_USMALLINT_NULL 0xFFFF
-#define INDEX_DATA_UINT_NULL 0xFFFFFFFF
-#define INDEX_DATA_UBIGINT_NULL 0xFFFFFFFFFFFFFFFFL
+#define INDEX_DATA_UINT_NULL 0xFFFFFFFF
+#define INDEX_DATA_UBIGINT_NULL 0xFFFFFFFFFFFFFFFFL
-#define INDEX_DATA_NULL_STR "NULL"
+#define INDEX_DATA_NULL_STR "NULL"
#define INDEX_DATA_NULL_STR_L "null"
+void* indexQhandle = NULL;
+int32_t indexRefMgt;
+
+static void indexDestroy(void* sIdx);
+
void indexInit() {
// refactor later
indexQhandle = taosInitScheduler(INDEX_QUEUE_SIZE, INDEX_NUM_OF_THREADS, "index");
+ indexRefMgt = taosOpenRef(10, indexDestroy);
}
void indexCleanUp() {
// refacto later
@@ -100,7 +107,12 @@ int indexOpen(SIndexOpts* opts, const char* path, SIndex** index) {
sIdx->cVersion = 1;
sIdx->path = tstrdup(path);
taosThreadMutexInit(&sIdx->mtx, NULL);
+
+ sIdx->refId = indexAddRef(sIdx);
+ taosAcquireRef(indexRefMgt, sIdx->refId);
+
*index = sIdx;
+
return 0;
END:
@@ -112,8 +124,9 @@ END:
return -1;
}
-void indexClose(SIndex* sIdx) {
- void* iter = taosHashIterate(sIdx->colObj, NULL);
+void indexDestroy(void* handle) {
+ SIndex* sIdx = handle;
+ void* iter = taosHashIterate(sIdx->colObj, NULL);
while (iter) {
IndexCache** pCache = iter;
if (*pCache) {
@@ -128,6 +141,27 @@ void indexClose(SIndex* sIdx) {
taosMemoryFree(sIdx);
return;
}
+void indexClose(SIndex* sIdx) {
+ indexReleaseRef(sIdx->refId);
+ indexRemoveRef(sIdx->refId);
+}
+int64_t indexAddRef(void* p) {
+ // impl
+ return taosAddRef(indexRefMgt, p);
+}
+int32_t indexRemoveRef(int64_t ref) {
+ // impl later
+ return taosRemoveRef(indexRefMgt, ref);
+}
+
+void indexAcquireRef(int64_t ref) {
+ // impl
+ taosAcquireRef(indexRefMgt, ref);
+}
+void indexReleaseRef(int64_t ref) {
+ // impl
+ taosReleaseRef(indexRefMgt, ref);
+}
int indexPut(SIndex* index, SIndexMultiTerm* fVals, uint64_t uid) {
// TODO(yihao): reduce the lock range
@@ -222,6 +256,7 @@ SIndexTerm* indexTermCreate(int64_t suid, SIndexOperOnColumn oper, uint8_t colTy
tm->operType = oper;
tm->colType = colType;
+#if 0
tm->colName = (char*)taosMemoryCalloc(1, nColName + 1);
memcpy(tm->colName, colName, nColName);
tm->nColName = nColName;
@@ -229,6 +264,22 @@ SIndexTerm* indexTermCreate(int64_t suid, SIndexOperOnColumn oper, uint8_t colTy
tm->colVal = (char*)taosMemoryCalloc(1, nColVal + 1);
memcpy(tm->colVal, colVal, nColVal);
tm->nColVal = nColVal;
+#endif
+
+#if 1
+
+ tm->colName = (char*)taosMemoryCalloc(1, nColName + 1);
+ memcpy(tm->colName, colName, nColName);
+ tm->nColName = nColName;
+
+ char* buf = NULL;
+ int32_t len = indexConvertData((void*)colVal, INDEX_TYPE_GET_TYPE(colType), (void**)&buf);
+ assert(len != -1);
+
+ tm->colVal = buf;
+ tm->nColVal = len;
+
+#endif
return tm;
}
@@ -457,6 +508,7 @@ int indexFlushCacheToTFile(SIndex* sIdx, void* cache) {
} else {
indexInfo("success to merge , time cost: %" PRId64 "ms", cost / 1000);
}
+ indexReleaseRef(sIdx->refId);
return ret;
}
void iterateValueDestroy(IterateValue* value, bool destroy) {
diff --git a/source/libs/index/src/indexCache.c b/source/libs/index/src/indexCache.c
index 5294ac8c19..d4231619ec 100644
--- a/source/libs/index/src/indexCache.c
+++ b/source/libs/index/src/indexCache.c
@@ -460,8 +460,11 @@ int indexCacheSchedToMerge(IndexCache* pCache) {
schedMsg.fp = doMergeWork;
schedMsg.ahandle = pCache;
schedMsg.thandle = NULL;
+ // schedMsg.thandle = taosMemoryCalloc(1, sizeof(int64_t));
+ // memcpy((char*)(schedMsg.thandle), (char*)&(pCache->index->refId), sizeof(int64_t));
schedMsg.msg = NULL;
+ indexAcquireRef(pCache->index->refId);
taosScheduleTask(indexQhandle, &schedMsg);
return 0;
diff --git a/source/libs/index/src/indexComm.c b/source/libs/index/src/indexComm.c
index 9e85a6680a..ac26ed1fab 100644
--- a/source/libs/index/src/indexComm.c
+++ b/source/libs/index/src/indexComm.c
@@ -16,25 +16,33 @@
#include "indexComm.h"
#include "index.h"
#include "indexInt.h"
+#include "tcoding.h"
#include "tcompare.h"
+#include "tdataformat.h"
char JSON_COLUMN[] = "JSON";
char JSON_VALUE_DELIM = '&';
+static __compar_fn_t indexGetCompar(int8_t type) {
+ if (type == TSDB_DATA_TYPE_BINARY || type == TSDB_DATA_TYPE_NCHAR) {
+ return (__compar_fn_t)strcmp;
+ }
+ return getComparFunc(type, 0);
+}
static TExeCond tCompareLessThan(void* a, void* b, int8_t type) {
- __compar_fn_t func = getComparFunc(type, 0);
+ __compar_fn_t func = indexGetCompar(type);
return tDoCommpare(func, QUERY_LESS_THAN, a, b);
}
static TExeCond tCompareLessEqual(void* a, void* b, int8_t type) {
- __compar_fn_t func = getComparFunc(type, 0);
+ __compar_fn_t func = indexGetCompar(type);
return tDoCommpare(func, QUERY_LESS_EQUAL, a, b);
}
static TExeCond tCompareGreaterThan(void* a, void* b, int8_t type) {
- __compar_fn_t func = getComparFunc(type, 0);
+ __compar_fn_t func = indexGetCompar(type);
return tDoCommpare(func, QUERY_GREATER_THAN, a, b);
}
static TExeCond tCompareGreaterEqual(void* a, void* b, int8_t type) {
- __compar_fn_t func = getComparFunc(type, 0);
+ __compar_fn_t func = indexGetCompar(type);
return tDoCommpare(func, QUERY_GREATER_EQUAL, a, b);
}
@@ -120,3 +128,101 @@ char* indexPackJsonDataPrefix(SIndexTerm* itm, int32_t* skip) {
return buf;
}
+
+int32_t indexConvertData(void* src, int8_t type, void** dst) {
+ int tlen = -1;
+ switch (type) {
+ case TSDB_DATA_TYPE_TIMESTAMP:
+ tlen = taosEncodeFixedI64(NULL, *(int64_t*)src);
+ *dst = taosMemoryCalloc(1, tlen + 1);
+ tlen = taosEncodeFixedI64(dst, *(int64_t*)src);
+ break;
+ case TSDB_DATA_TYPE_BOOL:
+ case TSDB_DATA_TYPE_UTINYINT:
+ tlen = taosEncodeFixedU8(NULL, *(uint8_t*)src);
+ *dst = taosMemoryCalloc(1, tlen + 1);
+ tlen = taosEncodeFixedU8(dst, *(uint8_t*)src);
+ break;
+ case TSDB_DATA_TYPE_TINYINT:
+ tlen = taosEncodeFixedI8(NULL, *(uint8_t*)src);
+ *dst = taosMemoryCalloc(1, tlen + 1);
+ tlen = taosEncodeFixedI8(dst, *(uint8_t*)src);
+ break;
+ case TSDB_DATA_TYPE_SMALLINT:
+ tlen = taosEncodeFixedI16(NULL, *(int16_t*)src);
+ *dst = taosMemoryCalloc(1, tlen + 1);
+ tlen = taosEncodeFixedI16(dst, *(int16_t*)src);
+ break;
+ case TSDB_DATA_TYPE_USMALLINT:
+ tlen = taosEncodeFixedU16(NULL, *(uint16_t*)src);
+ *dst = taosMemoryCalloc(1, tlen + 1);
+ tlen = taosEncodeFixedU16(dst, *(uint16_t*)src);
+ break;
+ case TSDB_DATA_TYPE_INT:
+ tlen = taosEncodeFixedI32(NULL, *(int32_t*)src);
+ *dst = taosMemoryCalloc(1, tlen + 1);
+ tlen = taosEncodeFixedI32(dst, *(int32_t*)src);
+ break;
+ case TSDB_DATA_TYPE_FLOAT:
+ tlen = taosEncodeBinary(NULL, src, sizeof(float));
+ *dst = taosMemoryCalloc(1, tlen + 1);
+ tlen = taosEncodeBinary(dst, src, sizeof(float));
+ break;
+ case TSDB_DATA_TYPE_UINT:
+ tlen = taosEncodeFixedU32(NULL, *(uint32_t*)src);
+ *dst = taosMemoryCalloc(1, tlen + 1);
+ tlen = taosEncodeFixedU32(dst, *(uint32_t*)src);
+ break;
+ case TSDB_DATA_TYPE_BIGINT:
+ tlen = taosEncodeFixedI64(NULL, *(uint32_t*)src);
+ *dst = taosMemoryCalloc(1, tlen + 1);
+ tlen = taosEncodeFixedI64(dst, *(uint32_t*)src);
+ break;
+ case TSDB_DATA_TYPE_DOUBLE:
+ tlen = taosEncodeBinary(NULL, src, sizeof(double));
+ *dst = taosMemoryCalloc(1, tlen + 1);
+ tlen = taosEncodeBinary(dst, src, sizeof(double));
+ break;
+ case TSDB_DATA_TYPE_UBIGINT:
+ tlen = taosEncodeFixedU64(NULL, *(uint32_t*)src);
+ *dst = taosMemoryCalloc(1, tlen + 1);
+ tlen = taosEncodeFixedU64(dst, *(uint32_t*)src);
+ break;
+ case TSDB_DATA_TYPE_NCHAR: {
+ tlen = taosEncodeBinary(NULL, varDataVal(src), varDataLen(src));
+ *dst = taosMemoryCalloc(1, tlen + 1);
+ tlen = taosEncodeBinary(dst, varDataVal(src), varDataLen(src));
+
+ break;
+ }
+ case TSDB_DATA_TYPE_VARCHAR: { // TSDB_DATA_TYPE_BINARY
+#if 1
+ tlen = taosEncodeBinary(NULL, src, strlen(src));
+ *dst = taosMemoryCalloc(1, tlen + 1);
+ tlen = taosEncodeBinary(dst, src, strlen(src));
+ break;
+#endif
+ }
+ case TSDB_DATA_TYPE_VARBINARY:
+#if 1
+ tlen = taosEncodeBinary(NULL, src, strlen(src));
+ *dst = taosMemoryCalloc(1, tlen + 1);
+ tlen = taosEncodeBinary(dst, src, strlen(src));
+ break;
+#endif
+ default:
+ TASSERT(0);
+ break;
+ }
+ *dst = *dst - tlen;
+ if (type != TSDB_DATA_TYPE_BINARY && type != TSDB_DATA_TYPE_NCHAR && type != TSDB_DATA_TYPE_VARBINARY &&
+ type == TSDB_DATA_TYPE_VARCHAR) {
+ uint8_t* p = *dst;
+ for (int i = 0; i < tlen; i++) {
+ if (p[i] == 0) {
+ p[i] = (uint8_t)'0';
+ }
+ }
+ }
+ return tlen;
+}
diff --git a/source/libs/index/src/indexFstUtil.c b/source/libs/index/src/indexFstUtil.c
index ec9a6943dc..a980c6b740 100644
--- a/source/libs/index/src/indexFstUtil.c
+++ b/source/libs/index/src/indexFstUtil.c
@@ -82,7 +82,10 @@ FstSlice fstSliceCreate(uint8_t* data, uint64_t len) {
str->ref = 1;
str->len = len;
str->data = taosMemoryMalloc(len * sizeof(uint8_t));
- memcpy(str->data, data, len);
+
+ if (data != NULL) {
+ memcpy(str->data, data, len);
+ }
FstSlice s = {.str = str, .start = 0, .end = len - 1};
return s;
diff --git a/source/libs/index/src/indexTfile.c b/source/libs/index/src/indexTfile.c
index 4cc2a4975f..b787da117d 100644
--- a/source/libs/index/src/indexTfile.c
+++ b/source/libs/index/src/indexTfile.c
@@ -469,13 +469,19 @@ static int32_t tfSearchCompareFunc_JSON(void* reader, SIndexTerm* tem, SIdxTempR
while ((rt = streamWithStateNextWith(st, NULL)) != NULL) {
FstSlice* s = &rt->data;
- char* ch = (char*)fstSliceData(s, NULL);
- if (0 != strncmp(ch, p, skip)) {
+ int32_t sz = 0;
+ char* ch = (char*)fstSliceData(s, &sz);
+ char* tmp = taosMemoryCalloc(1, sz + 1);
+ memcpy(tmp, ch, sz);
+
+ if (0 != strncmp(tmp, p, skip)) {
swsResultDestroy(rt);
+ taosMemoryFree(tmp);
break;
}
- TExeCond cond = cmpFn(ch + skip, tem->colVal, tem->colType);
+ TExeCond cond = cmpFn(tmp + skip, tem->colVal, INDEX_TYPE_GET_TYPE(tem->colType));
+
if (MATCH == cond) {
tfileReaderLoadTableIds((TFileReader*)reader, rt->out.out, tr->total);
} else if (CONTINUE == cond) {
@@ -483,6 +489,7 @@ static int32_t tfSearchCompareFunc_JSON(void* reader, SIndexTerm* tem, SIdxTempR
swsResultDestroy(rt);
break;
}
+ taosMemoryFree(tmp);
swsResultDestroy(rt);
}
streamWithStateDestroy(st);
diff --git a/source/libs/index/test/jsonUT.cc b/source/libs/index/test/jsonUT.cc
index 08d58da07f..3de7cb66f2 100644
--- a/source/libs/index/test/jsonUT.cc
+++ b/source/libs/index/test/jsonUT.cc
@@ -17,12 +17,32 @@
#include "tutil.h"
static std::string dir = "/tmp/json";
+static std::string logDir = "/tmp/log";
+
+static void initLog() {
+ const char* defaultLogFileNamePrefix = "taoslog";
+ const int32_t maxLogFileNum = 10;
+
+ tsAsyncLog = 0;
+ sDebugFlag = 143;
+ strcpy(tsLogDir, logDir.c_str());
+ taosRemoveDir(tsLogDir);
+ taosMkDir(tsLogDir);
+
+ if (taosInitLog(defaultLogFileNamePrefix, maxLogFileNum) < 0) {
+ printf("failed to open log file in directory:%s\n", tsLogDir);
+ }
+}
class JsonEnv : public ::testing::Test {
protected:
virtual void SetUp() {
+ taosRemoveDir(logDir.c_str());
+ taosMkDir(logDir.c_str());
taosRemoveDir(dir.c_str());
taosMkDir(dir.c_str());
printf("set up\n");
+
+ initLog();
opts = indexOptsCreate();
int ret = tIndexJsonOpen(opts, dir.c_str(), &index);
assert(ret == 0);
diff --git a/source/libs/nodes/src/nodesCodeFuncs.c b/source/libs/nodes/src/nodesCodeFuncs.c
index 6e0775ff17..763ccbf7a0 100644
--- a/source/libs/nodes/src/nodesCodeFuncs.c
+++ b/source/libs/nodes/src/nodesCodeFuncs.c
@@ -318,15 +318,19 @@ static int32_t jsonToTableComInfo(const SJson* pJson, void* pObj) {
STableComInfo* pNode = (STableComInfo*)pObj;
int32_t code;
- tjsonGetNumberValue(pJson, jkTableComInfoNumOfTags, pNode->numOfTags, code);;
+ tjsonGetNumberValue(pJson, jkTableComInfoNumOfTags, pNode->numOfTags, code);
+ ;
if (TSDB_CODE_SUCCESS == code) {
- tjsonGetNumberValue(pJson, jkTableComInfoPrecision, pNode->precision, code);;
+ tjsonGetNumberValue(pJson, jkTableComInfoPrecision, pNode->precision, code);
+ ;
}
if (TSDB_CODE_SUCCESS == code) {
- tjsonGetNumberValue(pJson, jkTableComInfoNumOfColumns, pNode->numOfColumns, code);;
+ tjsonGetNumberValue(pJson, jkTableComInfoNumOfColumns, pNode->numOfColumns, code);
+ ;
}
if (TSDB_CODE_SUCCESS == code) {
- tjsonGetNumberValue(pJson, jkTableComInfoRowSize, pNode->rowSize, code);;
+ tjsonGetNumberValue(pJson, jkTableComInfoRowSize, pNode->rowSize, code);
+ ;
}
return code;
@@ -358,12 +362,15 @@ static int32_t jsonToSchema(const SJson* pJson, void* pObj) {
SSchema* pNode = (SSchema*)pObj;
int32_t code;
- tjsonGetNumberValue(pJson, jkSchemaType, pNode->type, code);;
+ tjsonGetNumberValue(pJson, jkSchemaType, pNode->type, code);
+ ;
if (TSDB_CODE_SUCCESS == code) {
- tjsonGetNumberValue(pJson, jkSchemaColId, pNode->colId, code);;
+ tjsonGetNumberValue(pJson, jkSchemaColId, pNode->colId, code);
+ ;
}
if (TSDB_CODE_SUCCESS == code) {
- tjsonGetNumberValue(pJson, jkSchemaBytes, pNode->bytes, code);;
+ tjsonGetNumberValue(pJson, jkSchemaBytes, pNode->bytes, code);
+ ;
}
if (TSDB_CODE_SUCCESS == code) {
code = tjsonGetStringValue(pJson, jkSchemaName, pNode->name);
@@ -415,21 +422,27 @@ static int32_t jsonToTableMeta(const SJson* pJson, void* pObj) {
STableMeta* pNode = (STableMeta*)pObj;
int32_t code;
- tjsonGetNumberValue(pJson, jkTableMetaVgId, pNode->vgId, code);;
+ tjsonGetNumberValue(pJson, jkTableMetaVgId, pNode->vgId, code);
+ ;
if (TSDB_CODE_SUCCESS == code) {
- tjsonGetNumberValue(pJson, jkTableMetaTableType, pNode->tableType, code);;
+ tjsonGetNumberValue(pJson, jkTableMetaTableType, pNode->tableType, code);
+ ;
}
if (TSDB_CODE_SUCCESS == code) {
- tjsonGetNumberValue(pJson, jkTableMetaUid, pNode->uid, code);;
+ tjsonGetNumberValue(pJson, jkTableMetaUid, pNode->uid, code);
+ ;
}
if (TSDB_CODE_SUCCESS == code) {
- tjsonGetNumberValue(pJson, jkTableMetaSuid, pNode->suid, code);;
+ tjsonGetNumberValue(pJson, jkTableMetaSuid, pNode->suid, code);
+ ;
}
if (TSDB_CODE_SUCCESS == code) {
- tjsonGetNumberValue(pJson, jkTableMetaSversion, pNode->sversion, code);;
+ tjsonGetNumberValue(pJson, jkTableMetaSversion, pNode->sversion, code);
+ ;
}
if (TSDB_CODE_SUCCESS == code) {
- tjsonGetNumberValue(pJson, jkTableMetaTversion, pNode->tversion, code);;
+ tjsonGetNumberValue(pJson, jkTableMetaTversion, pNode->tversion, code);
+ ;
}
if (TSDB_CODE_SUCCESS == code) {
code = tjsonToObject(pJson, jkTableMetaComInfo, jsonToTableComInfo, &pNode->tableInfo);
@@ -571,6 +584,37 @@ static int32_t jsonToLogicProjectNode(const SJson* pJson, void* pObj) {
return code;
}
+static const char* jkExchangeLogicPlanSrcGroupId = "SrcGroupId";
+static const char* jkExchangeLogicPlanSrcPrecision = "Precision";
+
+static int32_t logicExchangeNodeToJson(const void* pObj, SJson* pJson) {
+ const SExchangeLogicNode* pNode = (const SExchangeLogicNode*)pObj;
+
+ int32_t code = logicPlanNodeToJson(pObj, pJson);
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tjsonAddIntegerToObject(pJson, jkExchangeLogicPlanSrcGroupId, pNode->srcGroupId);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tjsonAddIntegerToObject(pJson, jkExchangeLogicPlanSrcPrecision, pNode->precision);
+ }
+
+ return code;
+}
+
+static int32_t jsonToLogicExchangeNode(const SJson* pJson, void* pObj) {
+ SExchangeLogicNode* pNode = (SExchangeLogicNode*)pObj;
+
+ int32_t code = jsonToLogicPlanNode(pJson, pObj);
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tjsonGetIntValue(pJson, jkExchangeLogicPlanSrcGroupId, &pNode->srcGroupId);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tjsonGetUTinyIntValue(pJson, jkExchangeLogicPlanSrcPrecision, &pNode->precision);
+ }
+
+ return code;
+}
+
static const char* jkFillLogicPlanMode = "Mode";
static const char* jkFillLogicPlanWStartTs = "WStartTs";
static const char* jkFillLogicPlanValues = "Values";
@@ -605,7 +649,8 @@ static int32_t jsonToLogicFillNode(const SJson* pJson, void* pObj) {
int32_t code = jsonToLogicPlanNode(pJson, pObj);
if (TSDB_CODE_SUCCESS == code) {
- tjsonGetNumberValue(pJson, jkFillLogicPlanMode, pNode->mode, code);;
+ tjsonGetNumberValue(pJson, jkFillLogicPlanMode, pNode->mode, code);
+ ;
}
if (TSDB_CODE_SUCCESS == code) {
code = jsonToNodeObject(pJson, jkFillLogicPlanWStartTs, &pNode->pWStartTs);
@@ -881,7 +926,8 @@ static int32_t jsonToLogicSubplan(const SJson* pJson, void* pObj) {
code = jsonToNodeObject(pJson, jkLogicSubplanRootNode, (SNode**)&pNode->pNode);
}
if (TSDB_CODE_SUCCESS == code) {
- tjsonGetNumberValue(pJson, jkLogicSubplanType, pNode->subplanType, code);;
+ tjsonGetNumberValue(pJson, jkLogicSubplanType, pNode->subplanType, code);
+ ;
}
int32_t objSize = 0;
if (TSDB_CODE_SUCCESS == code) {
@@ -1121,33 +1167,43 @@ static int32_t jsonToPhysiTableScanNode(const SJson* pJson, void* pObj) {
code = tjsonGetDoubleValue(pJson, jkTableScanPhysiPlanRatio, &pNode->ratio);
}
if (TSDB_CODE_SUCCESS == code) {
- tjsonGetNumberValue(pJson, jkTableScanPhysiPlanDataRequired, pNode->dataRequired, code);;
+ tjsonGetNumberValue(pJson, jkTableScanPhysiPlanDataRequired, pNode->dataRequired, code);
+ ;
}
if (TSDB_CODE_SUCCESS == code) {
code = jsonToNodeList(pJson, jkTableScanPhysiPlanDynamicScanFuncs, &pNode->pDynamicScanFuncs);
}
if (TSDB_CODE_SUCCESS == code) {
- tjsonGetNumberValue(pJson, jkTableScanPhysiPlanInterval, pNode->interval, code);;
+ tjsonGetNumberValue(pJson, jkTableScanPhysiPlanInterval, pNode->interval, code);
+ ;
}
if (TSDB_CODE_SUCCESS == code) {
- tjsonGetNumberValue(pJson, jkTableScanPhysiPlanOffset, pNode->offset, code);;
+ tjsonGetNumberValue(pJson, jkTableScanPhysiPlanOffset, pNode->offset, code);
+ ;
}
if (TSDB_CODE_SUCCESS == code) {
- tjsonGetNumberValue(pJson, jkTableScanPhysiPlanSliding, pNode->sliding, code);;
+ tjsonGetNumberValue(pJson, jkTableScanPhysiPlanSliding, pNode->sliding, code);
+ ;
}
if (TSDB_CODE_SUCCESS == code) {
- tjsonGetNumberValue(pJson, jkTableScanPhysiPlanIntervalUnit, pNode->intervalUnit, code);;
+ tjsonGetNumberValue(pJson, jkTableScanPhysiPlanIntervalUnit, pNode->intervalUnit, code);
+ ;
}
if (TSDB_CODE_SUCCESS == code) {
- tjsonGetNumberValue(pJson, jkTableScanPhysiPlanSlidingUnit, pNode->slidingUnit, code);;
+ tjsonGetNumberValue(pJson, jkTableScanPhysiPlanSlidingUnit, pNode->slidingUnit, code);
+ ;
}
return code;
}
-static int32_t physiStreamScanNodeToJson(const void* pObj, SJson* pJson) { return physiTableScanNodeToJson(pObj, pJson); }
+static int32_t physiStreamScanNodeToJson(const void* pObj, SJson* pJson) {
+ return physiTableScanNodeToJson(pObj, pJson);
+}
-static int32_t jsonToPhysiStreamScanNode(const SJson* pJson, void* pObj) { return jsonToPhysiTableScanNode(pJson, pObj); }
+static int32_t jsonToPhysiStreamScanNode(const SJson* pJson, void* pObj) {
+ return jsonToPhysiTableScanNode(pJson, pObj);
+}
static const char* jkSysTableScanPhysiPlanMnodeEpSet = "MnodeEpSet";
static const char* jkSysTableScanPhysiPlanShowRewrite = "ShowRewrite";
@@ -1181,7 +1237,8 @@ static int32_t jsonToPhysiSysTableScanNode(const SJson* pJson, void* pObj) {
code = tjsonGetBoolValue(pJson, jkSysTableScanPhysiPlanShowRewrite, &pNode->showRewrite);
}
if (TSDB_CODE_SUCCESS == code) {
- tjsonGetNumberValue(pJson, jkSysTableScanPhysiPlanAccountId, pNode->accountId, code);;
+ tjsonGetNumberValue(pJson, jkSysTableScanPhysiPlanAccountId, pNode->accountId, code);
+ ;
}
return code;
@@ -1265,7 +1322,8 @@ static int32_t jsonToPhysiJoinNode(const SJson* pJson, void* pObj) {
int32_t code = jsonToPhysicPlanNode(pJson, pObj);
if (TSDB_CODE_SUCCESS == code) {
- tjsonGetNumberValue(pJson, jkJoinPhysiPlanJoinType, pNode->joinType, code);;
+ tjsonGetNumberValue(pJson, jkJoinPhysiPlanJoinType, pNode->joinType, code);
+ ;
}
if (TSDB_CODE_SUCCESS == code) {
code = jsonToNodeObject(pJson, jkJoinPhysiPlanOnConditions, &pNode->pOnConditions);
@@ -1427,10 +1485,12 @@ static int32_t jsonToPhysiWindowNode(const SJson* pJson, void* pObj) {
code = jsonToNodeObject(pJson, jkWindowPhysiPlanTsPk, (SNode**)&pNode->pTspk);
}
if (TSDB_CODE_SUCCESS == code) {
- tjsonGetNumberValue(pJson, jkWindowPhysiPlanTriggerType, pNode->triggerType, code);;
+ tjsonGetNumberValue(pJson, jkWindowPhysiPlanTriggerType, pNode->triggerType, code);
+ ;
}
if (TSDB_CODE_SUCCESS == code) {
- tjsonGetNumberValue(pJson, jkWindowPhysiPlanWatermark, pNode->watermark, code);;
+ tjsonGetNumberValue(pJson, jkWindowPhysiPlanWatermark, pNode->watermark, code);
+ ;
}
return code;
@@ -1526,7 +1586,8 @@ static int32_t jsonToPhysiFillNode(const SJson* pJson, void* pObj) {
int32_t code = jsonToPhysicPlanNode(pJson, pObj);
if (TSDB_CODE_SUCCESS == code) {
- tjsonGetNumberValue(pJson, jkFillPhysiPlanMode, pNode->mode, code);;
+ tjsonGetNumberValue(pJson, jkFillPhysiPlanMode, pNode->mode, code);
+ ;
}
if (TSDB_CODE_SUCCESS == code) {
code = jsonToNodeObject(pJson, jkFillPhysiPlanWStartTs, &pNode->pWStartTs);
@@ -1565,7 +1626,8 @@ static int32_t jsonToPhysiSessionWindowNode(const SJson* pJson, void* pObj) {
int32_t code = jsonToPhysiWindowNode(pJson, pObj);
if (TSDB_CODE_SUCCESS == code) {
- tjsonGetNumberValue(pJson, jkSessionWindowPhysiPlanGap, pNode->gap, code);;
+ tjsonGetNumberValue(pJson, jkSessionWindowPhysiPlanGap, pNode->gap, code);
+ ;
}
return code;
@@ -1727,7 +1789,8 @@ static int32_t jsonToSubplan(const SJson* pJson, void* pObj) {
int32_t code = tjsonToObject(pJson, jkSubplanId, jsonToSubplanId, &pNode->id);
if (TSDB_CODE_SUCCESS == code) {
- tjsonGetNumberValue(pJson, jkSubplanType, pNode->subplanType, code);;
+ tjsonGetNumberValue(pJson, jkSubplanType, pNode->subplanType, code);
+ ;
}
if (TSDB_CODE_SUCCESS == code) {
code = tjsonGetIntValue(pJson, jkSubplanMsgType, &pNode->msgType);
@@ -1917,7 +1980,8 @@ static int32_t jsonToColumnNode(const SJson* pJson, void* pObj) {
code = tjsonGetSmallIntValue(pJson, jkColumnColId, &pNode->colId);
}
if (TSDB_CODE_SUCCESS == code) {
- tjsonGetNumberValue(pJson, jkColumnColType, pNode->colType, code);;
+ tjsonGetNumberValue(pJson, jkColumnColType, pNode->colType, code);
+ ;
}
if (TSDB_CODE_SUCCESS == code) {
code = tjsonGetStringValue(pJson, jkColumnDbName, pNode->dbName);
@@ -2171,7 +2235,8 @@ static int32_t jsonToOperatorNode(const SJson* pJson, void* pObj) {
int32_t code = jsonToExprNode(pJson, pObj);
if (TSDB_CODE_SUCCESS == code) {
- tjsonGetNumberValue(pJson, jkOperatorType, pNode->opType, code);;
+ tjsonGetNumberValue(pJson, jkOperatorType, pNode->opType, code);
+ ;
}
if (TSDB_CODE_SUCCESS == code) {
code = jsonToNodeObject(pJson, jkOperatorLeft, &pNode->pLeft);
@@ -2205,7 +2270,8 @@ static int32_t jsonToLogicConditionNode(const SJson* pJson, void* pObj) {
int32_t code = jsonToExprNode(pJson, pObj);
if (TSDB_CODE_SUCCESS == code) {
- tjsonGetNumberValue(pJson, jkLogicCondType, pNode->condType, code);;
+ tjsonGetNumberValue(pJson, jkLogicCondType, pNode->condType, code);
+ ;
}
if (TSDB_CODE_SUCCESS == code) {
code = jsonToNodeList(pJson, jkLogicCondParameters, &pNode->pParameterList);
@@ -2350,6 +2416,30 @@ static int32_t jsonToRealTableNode(const SJson* pJson, void* pObj) {
return code;
}
+static const char* jkTempTableSubquery = "Subquery";
+
+static int32_t tempTableNodeToJson(const void* pObj, SJson* pJson) {
+ const STempTableNode* pNode = (const STempTableNode*)pObj;
+
+ int32_t code = tableNodeToJson(pObj, pJson);
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tjsonAddObject(pJson, jkTempTableSubquery, nodeToJson, pNode->pSubquery);
+ }
+
+ return code;
+}
+
+static int32_t jsonToTempTableNode(const SJson* pJson, void* pObj) {
+ STempTableNode* pNode = (STempTableNode*)pObj;
+
+ int32_t code = jsonToTableNode(pJson, pObj);
+ if (TSDB_CODE_SUCCESS == code) {
+ code = jsonToNodeObject(pJson, jkTempTableSubquery, &pNode->pSubquery);
+ }
+
+ return code;
+}
+
static const char* jkGroupingSetType = "GroupingSetType";
static const char* jkGroupingSetParameter = "Parameters";
@@ -2387,10 +2477,12 @@ static int32_t jsonToOrderByExprNode(const SJson* pJson, void* pObj) {
int32_t code = jsonToNodeObject(pJson, jkOrderByExprExpr, &pNode->pExpr);
if (TSDB_CODE_SUCCESS == code) {
- tjsonGetNumberValue(pJson, jkOrderByExprOrder, pNode->order, code);;
+ tjsonGetNumberValue(pJson, jkOrderByExprOrder, pNode->order, code);
+ ;
}
if (TSDB_CODE_SUCCESS == code) {
- tjsonGetNumberValue(pJson, jkOrderByExprNullOrder, pNode->nullOrder, code);;
+ tjsonGetNumberValue(pJson, jkOrderByExprNullOrder, pNode->nullOrder, code);
+ ;
}
return code;
@@ -2497,7 +2589,8 @@ static int32_t jsonToFillNode(const SJson* pJson, void* pObj) {
SFillNode* pNode = (SFillNode*)pObj;
int32_t code;
- tjsonGetNumberValue(pJson, jkFillMode, pNode->mode, code);;
+ tjsonGetNumberValue(pJson, jkFillMode, pNode->mode, code);
+ ;
if (TSDB_CODE_SUCCESS == code) {
code = jsonToNodeObject(pJson, jkFillValues, &pNode->pValues);
}
@@ -2663,6 +2756,60 @@ static int32_t jsonToDataBlockDescNode(const SJson* pJson, void* pObj) {
return code;
}
+static const char* jkSetOperatorOpType = "OpType";
+static const char* jkSetOperatorProjections = "Projections";
+static const char* jkSetOperatorLeft = "Left";
+static const char* jkSetOperatorRight = "Right";
+static const char* jkSetOperatorOrderByList = "OrderByList";
+static const char* jkSetOperatorLimit = "Limit";
+
+static int32_t setOperatorToJson(const void* pObj, SJson* pJson) {
+ const SSetOperator* pNode = (const SSetOperator*)pObj;
+
+ int32_t code = tjsonAddIntegerToObject(pJson, jkSetOperatorOpType, pNode->opType);
+ if (TSDB_CODE_SUCCESS == code) {
+ code = nodeListToJson(pJson, jkSetOperatorProjections, pNode->pProjectionList);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tjsonAddObject(pJson, jkSetOperatorLeft, nodeToJson, pNode->pLeft);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tjsonAddObject(pJson, jkSetOperatorRight, nodeToJson, pNode->pRight);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = nodeListToJson(pJson, jkSetOperatorOrderByList, pNode->pOrderByList);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tjsonAddObject(pJson, jkSetOperatorLimit, nodeToJson, pNode->pLimit);
+ }
+
+ return code;
+}
+
+static int32_t jsonToSetOperator(const SJson* pJson, void* pObj) {
+ SSetOperator* pNode = (SSetOperator*)pObj;
+
+ int32_t code = TSDB_CODE_SUCCESS;
+ tjsonGetNumberValue(pJson, jkSetOperatorOpType, pNode->opType, code);
+ if (TSDB_CODE_SUCCESS == code) {
+ code = jsonToNodeList(pJson, jkSetOperatorProjections, &pNode->pProjectionList);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = jsonToNodeObject(pJson, jkSetOperatorLeft, &pNode->pLeft);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = jsonToNodeObject(pJson, jkSetOperatorRight, &pNode->pRight);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = jsonToNodeList(pJson, jkSetOperatorOrderByList, &pNode->pOrderByList);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = jsonToNodeObject(pJson, jkSetOperatorLimit, &pNode->pLimit);
+ }
+
+ return code;
+}
+
static const char* jkSelectStmtDistinct = "Distinct";
static const char* jkSelectStmtProjections = "Projections";
static const char* jkSelectStmtFrom = "From";
@@ -2677,7 +2824,7 @@ static const char* jkSelectStmtSlimit = "Slimit";
static const char* jkSelectStmtStmtName = "StmtName";
static const char* jkSelectStmtHasAggFuncs = "HasAggFuncs";
-static int32_t selectStmtTojson(const void* pObj, SJson* pJson) {
+static int32_t selectStmtToJson(const void* pObj, SJson* pJson) {
const SSelectStmt* pNode = (const SSelectStmt*)pObj;
int32_t code = tjsonAddBoolToObject(pJson, jkSelectStmtDistinct, pNode->isDistinct);
@@ -2819,6 +2966,7 @@ static int32_t specificNodeToJson(const void* pObj, SJson* pJson) {
case QUERY_NODE_REAL_TABLE:
return realTableNodeToJson(pObj, pJson);
case QUERY_NODE_TEMP_TABLE:
+ return tempTableNodeToJson(pObj, pJson);
case QUERY_NODE_JOIN_TABLE:
break;
case QUERY_NODE_GROUPING_SET:
@@ -2848,9 +2996,9 @@ static int32_t specificNodeToJson(const void* pObj, SJson* pJson) {
case QUERY_NODE_DOWNSTREAM_SOURCE:
return downstreamSourceNodeToJson(pObj, pJson);
case QUERY_NODE_SET_OPERATOR:
- break;
+ return setOperatorToJson(pObj, pJson);
case QUERY_NODE_SELECT_STMT:
- return selectStmtTojson(pObj, pJson);
+ return selectStmtToJson(pObj, pJson);
case QUERY_NODE_VNODE_MODIF_STMT:
case QUERY_NODE_CREATE_DATABASE_STMT:
case QUERY_NODE_CREATE_TABLE_STMT:
@@ -2870,6 +3018,8 @@ static int32_t specificNodeToJson(const void* pObj, SJson* pJson) {
return logicProjectNodeToJson(pObj, pJson);
case QUERY_NODE_LOGIC_PLAN_VNODE_MODIF:
break;
+ case QUERY_NODE_LOGIC_PLAN_EXCHANGE:
+ return logicExchangeNodeToJson(pObj, pJson);
case QUERY_NODE_LOGIC_PLAN_FILL:
return logicFillNodeToJson(pObj, pJson);
case QUERY_NODE_LOGIC_PLAN_SORT:
@@ -2918,7 +3068,6 @@ static int32_t specificNodeToJson(const void* pObj, SJson* pJson) {
case QUERY_NODE_PHYSICAL_PLAN:
return planToJson(pObj, pJson);
default:
- // assert(0);
break;
}
nodesWarn("specificNodeToJson unknown node = %s", nodesNodeName(nodeType(pObj)));
@@ -2939,6 +3088,8 @@ static int32_t jsonToSpecificNode(const SJson* pJson, void* pObj) {
return jsonToFunctionNode(pJson, pObj);
case QUERY_NODE_REAL_TABLE:
return jsonToRealTableNode(pJson, pObj);
+ case QUERY_NODE_TEMP_TABLE:
+ return jsonToTempTableNode(pJson, pObj);
case QUERY_NODE_ORDER_BY_EXPR:
return jsonToOrderByExprNode(pJson, pObj);
case QUERY_NODE_INTERVAL_WINDOW:
@@ -2955,6 +3106,8 @@ static int32_t jsonToSpecificNode(const SJson* pJson, void* pObj) {
return jsonToSlotDescNode(pJson, pObj);
case QUERY_NODE_DOWNSTREAM_SOURCE:
return jsonToDownstreamSourceNode(pJson, pObj);
+ case QUERY_NODE_SET_OPERATOR:
+ return jsonToSetOperator(pJson, pObj);
case QUERY_NODE_SELECT_STMT:
return jsonToSelectStmt(pJson, pObj);
case QUERY_NODE_CREATE_TOPIC_STMT:
@@ -2963,6 +3116,8 @@ static int32_t jsonToSpecificNode(const SJson* pJson, void* pObj) {
return jsonToLogicScanNode(pJson, pObj);
case QUERY_NODE_LOGIC_PLAN_PROJECT:
return jsonToLogicProjectNode(pJson, pObj);
+ case QUERY_NODE_LOGIC_PLAN_EXCHANGE:
+ return jsonToLogicExchangeNode(pJson, pObj);
case QUERY_NODE_LOGIC_PLAN_FILL:
return jsonToLogicFillNode(pJson, pObj);
case QUERY_NODE_LOGIC_PLAN_SORT:
@@ -3007,7 +3162,6 @@ static int32_t jsonToSpecificNode(const SJson* pJson, void* pObj) {
case QUERY_NODE_PHYSICAL_PLAN:
return jsonToPlan(pJson, pObj);
default:
- assert(0);
break;
}
nodesWarn("jsonToSpecificNode unknown node = %s", nodesNodeName(nodeType(pObj)));
@@ -3038,7 +3192,8 @@ static int32_t jsonToNode(const SJson* pJson, void* pObj) {
SNode* pNode = (SNode*)pObj;
int32_t code;
- tjsonGetNumberValue(pJson, jkNodeType, pNode->type, code);;
+ tjsonGetNumberValue(pJson, jkNodeType, pNode->type, code);
+ ;
if (TSDB_CODE_SUCCESS == code) {
code = tjsonToObject(pJson, nodesNodeName(pNode->type), jsonToSpecificNode, pNode);
if (TSDB_CODE_SUCCESS != code) {
diff --git a/source/libs/parser/inc/sql.y b/source/libs/parser/inc/sql.y
index 9c83b600d2..836e97c4db 100644
--- a/source/libs/parser/inc/sql.y
+++ b/source/libs/parser/inc/sql.y
@@ -241,7 +241,7 @@ alter_table_clause(A) ::=
alter_table_clause(A) ::=
full_table_name(B) RENAME TAG column_name(C) column_name(D). { A = createAlterTableRenameCol(pCxt, B, TSDB_ALTER_TABLE_UPDATE_TAG_NAME, &C, &D); }
alter_table_clause(A) ::=
- full_table_name(B) SET TAG column_name(C) NK_EQ literal(D). { A = createAlterTableSetTag(pCxt, B, &C, D); }
+ full_table_name(B) SET TAG column_name(C) NK_EQ literal(D). { A = createAlterTableSetTag(pCxt, B, &C, releaseRawExprNode(pCxt, D)); }
%type multi_create_clause { SNodeList* }
%destructor multi_create_clause { nodesDestroyList($$); }
diff --git a/source/libs/parser/src/parAstCreater.c b/source/libs/parser/src/parAstCreater.c
index 639da98f48..7dc2978ec7 100644
--- a/source/libs/parser/src/parAstCreater.c
+++ b/source/libs/parser/src/parAstCreater.c
@@ -14,6 +14,8 @@
* along with this program. If not, see .
*/
+#include
+
#include "parAst.h"
#include "parUtil.h"
#include "ttime.h"
@@ -76,6 +78,19 @@ static bool checkUserName(SAstCreateContext* pCxt, SToken* pUserName) {
return TSDB_CODE_SUCCESS == pCxt->errCode;
}
+static bool invalidPassword(const char* pPassword) {
+ regex_t regex;
+
+ if (regcomp(®ex, "[ '\"`\\]", REG_EXTENDED | REG_ICASE) != 0) {
+ return false;
+ }
+
+ /* Execute regular expression */
+ int32_t res = regexec(®ex, pPassword, 0, NULL, 0);
+ regfree(®ex);
+ return 0 == res;
+}
+
static bool checkPassword(SAstCreateContext* pCxt, const SToken* pPasswordToken, char* pPassword) {
if (NULL == pPasswordToken) {
pCxt->errCode = TSDB_CODE_PAR_SYNTAX_ERROR;
@@ -86,6 +101,8 @@ static bool checkPassword(SAstCreateContext* pCxt, const SToken* pPasswordToken,
strdequote(pPassword);
if (strtrim(pPassword) <= 0) {
pCxt->errCode = generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_PASSWD_EMPTY);
+ } else if (invalidPassword(pPassword)) {
+ pCxt->errCode = generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_PASSWD);
}
}
return TSDB_CODE_SUCCESS == pCxt->errCode;
diff --git a/source/libs/parser/src/parAuthenticator.c b/source/libs/parser/src/parAuthenticator.c
index 8f686cefce..250e7910d6 100644
--- a/source/libs/parser/src/parAuthenticator.c
+++ b/source/libs/parser/src/parAuthenticator.c
@@ -14,6 +14,7 @@
*/
#include "catalog.h"
+#include "cmdnodes.h"
#include "parInt.h"
typedef struct SAuthCxt {
@@ -65,13 +66,19 @@ static int32_t authSetOperator(SAuthCxt* pCxt, SSetOperator* pSetOper) {
return code;
}
+static int32_t authDropUser(SAuthCxt* pCxt, SDropUserStmt* pStmt) {
+ if (!pCxt->pParseCxt->isSuperUser || 0 == strcmp(pStmt->useName, TSDB_DEFAULT_USER)) {
+ return TSDB_CODE_PAR_PERMISSION_DENIED;
+ }
+ return TSDB_CODE_SUCCESS;
+}
+
static int32_t authQuery(SAuthCxt* pCxt, SNode* pStmt) {
switch (nodeType(pStmt)) {
case QUERY_NODE_SET_OPERATOR:
return authSetOperator(pCxt, (SSetOperator*)pStmt);
case QUERY_NODE_SELECT_STMT:
return authSelect(pCxt, (SSelectStmt*)pStmt);
- case QUERY_NODE_VNODE_MODIF_STMT:
case QUERY_NODE_CREATE_DATABASE_STMT:
case QUERY_NODE_DROP_DATABASE_STMT:
case QUERY_NODE_ALTER_DATABASE_STMT:
@@ -84,7 +91,10 @@ static int32_t authQuery(SAuthCxt* pCxt, SNode* pStmt) {
case QUERY_NODE_ALTER_TABLE_STMT:
case QUERY_NODE_CREATE_USER_STMT:
case QUERY_NODE_ALTER_USER_STMT:
- case QUERY_NODE_DROP_USER_STMT:
+ break;
+ case QUERY_NODE_DROP_USER_STMT: {
+ return authDropUser(pCxt, (SDropUserStmt*)pStmt);
+ }
case QUERY_NODE_USE_DATABASE_STMT:
case QUERY_NODE_CREATE_DNODE_STMT:
case QUERY_NODE_DROP_DNODE_STMT:
diff --git a/source/libs/parser/src/parCalcConst.c b/source/libs/parser/src/parCalcConst.c
index 9c2bd10686..646ef4cf62 100644
--- a/source/libs/parser/src/parCalcConst.c
+++ b/source/libs/parser/src/parCalcConst.c
@@ -262,9 +262,9 @@ static int32_t calcConstQuery(SCalcConstContext* pCxt, SNode* pStmt, bool subque
break;
case QUERY_NODE_SET_OPERATOR: {
SSetOperator* pSetOp = (SSetOperator*)pStmt;
- code = calcConstQuery(pCxt, pSetOp->pLeft, subquery);
+ code = calcConstQuery(pCxt, pSetOp->pLeft, false);
if (TSDB_CODE_SUCCESS == code) {
- code = calcConstQuery(pCxt, pSetOp->pRight, subquery);
+ code = calcConstQuery(pCxt, pSetOp->pRight, false);
}
break;
}
diff --git a/source/libs/parser/src/parInsert.c b/source/libs/parser/src/parInsert.c
index 27383c0a51..01833e1776 100644
--- a/source/libs/parser/src/parInsert.c
+++ b/source/libs/parser/src/parInsert.c
@@ -1041,18 +1041,6 @@ static void destroyInsertParseContextForTable(SInsertParseContext* pCxt) {
destroyCreateSubTbReq(&pCxt->createTblReq);
}
-static void destroyDataBlock(STableDataBlocks* pDataBlock) {
- if (pDataBlock == NULL) {
- return;
- }
-
- taosMemoryFreeClear(pDataBlock->pData);
- if (!pDataBlock->cloned) {
- destroyBoundColumnInfo(&pDataBlock->boundColumnInfo);
- }
- taosMemoryFreeClear(pDataBlock);
-}
-
static void destroyInsertParseContext(SInsertParseContext* pCxt) {
destroyInsertParseContextForTable(pCxt);
taosHashCleanup(pCxt->pVgroupsHashObj);
@@ -1301,6 +1289,7 @@ int32_t qBuildStmtOutput(SQuery* pQuery, SHashObj* pVgHash, SHashObj* pBlockHash
CHECK_CODE(buildOutput(&insertCtx));
+ destroyBlockArrayList(insertCtx.pVgDataBlocks);
return TSDB_CODE_SUCCESS;
}
@@ -1580,16 +1569,25 @@ int32_t qBuildStmtColFields(void* pBlock, int32_t* fieldNum, TAOS_FIELD** fields
// schemaless logic start
-typedef struct SmlExecHandle {
- SHashObj* pBlockHash;
-
+typedef struct SmlExecTableHandle {
SParsedDataColInfo tags; // each table
SKVRowBuilder tagsBuilder; // each table
SVCreateTbReq createTblReq; // each table
+} SmlExecTableHandle;
- SQuery* pQuery;
+typedef struct SmlExecHandle {
+ SHashObj* pBlockHash;
+ SmlExecTableHandle tableExecHandle;
+ SQuery *pQuery;
} SSmlExecHandle;
+static void smlDestroyTableHandle(void* pHandle) {
+ SmlExecTableHandle* handle = (SmlExecTableHandle*)pHandle;
+ tdDestroyKVRowBuilder(&handle->tagsBuilder);
+ destroyBoundColumnInfo(&handle->tags);
+ destroyCreateSubTbReq(&handle->createTblReq);
+}
+
static int32_t smlBoundColumnData(SArray* cols, SParsedDataColInfo* pColList, SSchema* pSchema) {
col_id_t nCols = pColList->numOfCols;
@@ -1692,25 +1690,26 @@ int32_t smlBindData(void *handle, SArray *tags, SArray *colsSchema, SArray *cols
SMsgBuf pBuf = {.buf = msgBuf, .len = msgBufLen};
SSmlExecHandle* smlHandle = (SSmlExecHandle*)handle;
+ smlDestroyTableHandle(&smlHandle->tableExecHandle); // free for each table
SSchema* pTagsSchema = getTableTagSchema(pTableMeta);
- setBoundColumnInfo(&smlHandle->tags, pTagsSchema, getNumOfTags(pTableMeta));
- int ret = smlBoundColumnData(tags, &smlHandle->tags, pTagsSchema);
+ setBoundColumnInfo(&smlHandle->tableExecHandle.tags, pTagsSchema, getNumOfTags(pTableMeta));
+ int ret = smlBoundColumnData(tags, &smlHandle->tableExecHandle.tags, pTagsSchema);
if (ret != TSDB_CODE_SUCCESS) {
buildInvalidOperationMsg(&pBuf, "bound tags error");
return ret;
}
SKVRow row = NULL;
- ret = smlBuildTagRow(tags, &smlHandle->tagsBuilder, &smlHandle->tags, pTagsSchema, &row, &pBuf);
+ ret = smlBuildTagRow(tags, &smlHandle->tableExecHandle.tagsBuilder, &smlHandle->tableExecHandle.tags, pTagsSchema, &row, &pBuf);
if (ret != TSDB_CODE_SUCCESS) {
return ret;
}
- buildCreateTbReq(&smlHandle->createTblReq, tableName, row, pTableMeta->suid);
+ buildCreateTbReq(&smlHandle->tableExecHandle.createTblReq, tableName, row, pTableMeta->suid);
STableDataBlocks* pDataBlock = NULL;
ret = getDataBlockFromList(smlHandle->pBlockHash, &pTableMeta->uid, sizeof(pTableMeta->uid),
TSDB_DEFAULT_PAYLOAD_SIZE, sizeof(SSubmitBlk), getTableInfo(pTableMeta).rowSize,
- pTableMeta, &pDataBlock, NULL, &smlHandle->createTblReq);
+ pTableMeta, &pDataBlock, NULL, &smlHandle->tableExecHandle.createTblReq);
if (ret != TSDB_CODE_SUCCESS) {
buildInvalidOperationMsg(&pBuf, "create data block error");
return ret;
@@ -1826,6 +1825,7 @@ void smlDestroyHandle(void* pHandle) {
if (!pHandle) return;
SSmlExecHandle* handle = (SSmlExecHandle*)pHandle;
destroyBlockHashmap(handle->pBlockHash);
+ smlDestroyTableHandle(&handle->tableExecHandle);
taosMemoryFree(handle);
}
diff --git a/source/libs/parser/src/parInsertData.c b/source/libs/parser/src/parInsertData.c
index 677dbca0e9..deb899309e 100644
--- a/source/libs/parser/src/parInsertData.c
+++ b/source/libs/parser/src/parInsertData.c
@@ -237,9 +237,7 @@ static void destroyDataBlock(STableDataBlocks* pDataBlock) {
taosMemoryFreeClear(pDataBlock->pData);
if (!pDataBlock->cloned) {
// free the refcount for metermeta
-// if (pDataBlock->pTableMeta != NULL) {
-// taosMemoryFreeClear(pDataBlock->pTableMeta);
-// }
+ taosMemoryFreeClear(pDataBlock->pTableMeta);
destroyBoundColumnInfo(&pDataBlock->boundColumnInfo);
}
diff --git a/source/libs/parser/src/parTranslater.c b/source/libs/parser/src/parTranslater.c
index 95ab1500cc..3983b53b6e 100644
--- a/source/libs/parser/src/parTranslater.c
+++ b/source/libs/parser/src/parTranslater.c
@@ -480,6 +480,31 @@ static EDealRes translateColumn(STranslateContext* pCxt, SColumnNode* pCol) {
return res;
}
+static int32_t parseTimeFromValueNode(SValueNode* pVal) {
+ if (IS_SIGNED_NUMERIC_TYPE(pVal->node.resType.type)) {
+ return TSDB_CODE_SUCCESS;
+ } else if (IS_UNSIGNED_NUMERIC_TYPE(pVal->node.resType.type)) {
+ pVal->datum.i = pVal->datum.u;
+ return TSDB_CODE_SUCCESS;
+ } else if (IS_FLOAT_TYPE(pVal->node.resType.type)) {
+ pVal->datum.i = pVal->datum.d;
+ return TSDB_CODE_SUCCESS;
+ } else if (TSDB_DATA_TYPE_BOOL == pVal->node.resType.type) {
+ pVal->datum.i = pVal->datum.b;
+ return TSDB_CODE_SUCCESS;
+ } else if (IS_VAR_DATA_TYPE(pVal->node.resType.type) || TSDB_DATA_TYPE_TIMESTAMP == pVal->node.resType.type) {
+ if (TSDB_CODE_SUCCESS == taosParseTime(pVal->literal, &pVal->datum.i, pVal->node.resType.bytes,
+ pVal->node.resType.precision, tsDaylight)) {
+ return TSDB_CODE_SUCCESS;
+ }
+ char* pEnd = NULL;
+ pVal->datum.i = strtoll(pVal->literal, &pEnd, 10);
+ return (NULL != pEnd && '\0' == *pEnd) ? TSDB_CODE_SUCCESS : TSDB_CODE_FAILED;
+ } else {
+ return TSDB_CODE_FAILED;
+ }
+}
+
static EDealRes translateValueImpl(STranslateContext* pCxt, SValueNode* pVal, SDataType targetDt) {
uint8_t precision = (NULL != pCxt->pCurrStmt ? pCxt->pCurrStmt->precision : targetDt.precision);
pVal->node.resType.precision = precision;
@@ -571,7 +596,7 @@ static EDealRes translateValueImpl(STranslateContext* pCxt, SValueNode* pVal, SD
break;
}
case TSDB_DATA_TYPE_TIMESTAMP: {
- if (taosParseTime(pVal->literal, &pVal->datum.i, targetDt.bytes, precision, tsDaylight) != TSDB_CODE_SUCCESS) {
+ if (TSDB_CODE_SUCCESS != parseTimeFromValueNode(pVal)) {
return generateDealNodeErrMsg(pCxt, TSDB_CODE_PAR_WRONG_VALUE_TYPE, pVal->literal);
}
*(int64_t*)&pVal->typeData = pVal->datum.i;
@@ -1660,10 +1685,10 @@ static int32_t createPrimaryKeyColByTable(STranslateContext* pCxt, STableNode* p
if (NULL == pCol) {
return TSDB_CODE_OUT_OF_MEMORY;
}
- if (QUERY_NODE_REAL_TABLE == nodeType(pTable)) {
- setColumnInfoBySchema((SRealTableNode*)pTable, ((SRealTableNode*)pTable)->pMeta->schema, false, pCol);
- } else {
- // todo
+ pCol->colId = PRIMARYKEY_TIMESTAMP_COL_ID;
+ strcpy(pCol->colName, PK_TS_COL_INTERNAL_NAME);
+ if (!findAndSetColumn(pCol, pTable)) {
+ return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_TIMELINE_FUNC);
}
*pPrimaryKey = (SNode*)pCol;
return TSDB_CODE_SUCCESS;
@@ -2924,6 +2949,8 @@ static int32_t translateCreateIndex(STranslateContext* pCxt, SCreateIndexStmt* p
}
static int32_t translateDropIndex(STranslateContext* pCxt, SDropIndexStmt* pStmt) {
+ SEncoder encoder = {0};
+ int32_t contLen = 0;
SVDropTSmaReq dropSmaReq = {0};
strcpy(dropSmaReq.indexName, pStmt->indexName);
@@ -2931,16 +2958,26 @@ static int32_t translateDropIndex(STranslateContext* pCxt, SDropIndexStmt* pStmt
if (NULL == pCxt->pCmdMsg) {
return TSDB_CODE_OUT_OF_MEMORY;
}
+
+ int32_t ret = 0;
+ tEncodeSize(tEncodeSVDropTSmaReq, &dropSmaReq, contLen, ret);
+ if (ret < 0) {
+ return TSDB_CODE_OUT_OF_MEMORY;
+ }
+
pCxt->pCmdMsg->epSet = pCxt->pParseCxt->mgmtEpSet;
pCxt->pCmdMsg->msgType = TDMT_VND_DROP_SMA;
- pCxt->pCmdMsg->msgLen = tSerializeSVDropTSmaReq(NULL, &dropSmaReq);
+ pCxt->pCmdMsg->msgLen = contLen;
pCxt->pCmdMsg->pMsg = taosMemoryMalloc(pCxt->pCmdMsg->msgLen);
if (NULL == pCxt->pCmdMsg->pMsg) {
return TSDB_CODE_OUT_OF_MEMORY;
}
void* pBuf = pCxt->pCmdMsg->pMsg;
- tSerializeSVDropTSmaReq(&pBuf, &dropSmaReq);
-
+ if (tEncodeSVDropTSmaReq(&encoder, &dropSmaReq) < 0) {
+ tEncoderClear(&encoder);
+ return TSDB_CODE_OUT_OF_MEMORY;
+ }
+ tEncoderClear(&encoder);
return TSDB_CODE_SUCCESS;
}
@@ -3688,7 +3725,7 @@ static int32_t buildNormalTableBatchReq(int32_t acctId, const SCreateTableStmt*
req.type = TD_NORMAL_TABLE;
req.name = strdup(pStmt->tableName);
req.ntb.schema.nCols = LIST_LENGTH(pStmt->pCols);
- req.ntb.schema.sver = 0;
+ req.ntb.schema.sver = 1;
req.ntb.schema.pSchema = taosMemoryCalloc(req.ntb.schema.nCols, sizeof(SSchema));
if (NULL == req.name || NULL == req.ntb.schema.pSchema) {
destroyCreateTbReq(&req);
@@ -3763,7 +3800,7 @@ static void destroyCreateTbReqBatch(SVgroupCreateTableBatch* pTbBatch) {
taosArrayDestroy(pTbBatch->req.pArray);
}
-static int32_t rewriteToVnodeModifOpStmt(SQuery* pQuery, SArray* pBufArray) {
+static int32_t rewriteToVnodeModifyOpStmt(SQuery* pQuery, SArray* pBufArray) {
SVnodeModifOpStmt* pNewStmt = nodesMakeNode(QUERY_NODE_VNODE_MODIF_STMT);
if (pNewStmt == NULL) {
return TSDB_CODE_OUT_OF_MEMORY;
@@ -3818,7 +3855,7 @@ static int32_t rewriteCreateTable(STranslateContext* pCxt, SQuery* pQuery) {
code = buildCreateTableDataBlock(pCxt->pParseCxt->acctId, pStmt, &info, &pBufArray);
}
if (TSDB_CODE_SUCCESS == code) {
- code = rewriteToVnodeModifOpStmt(pQuery, pBufArray);
+ code = rewriteToVnodeModifyOpStmt(pQuery, pBufArray);
if (TSDB_CODE_SUCCESS != code) {
destroyCreateTbReqArray(pBufArray);
}
@@ -4074,7 +4111,7 @@ static int32_t rewriteCreateMultiTable(STranslateContext* pCxt, SQuery* pQuery)
return TSDB_CODE_OUT_OF_MEMORY;
}
- return rewriteToVnodeModifOpStmt(pQuery, pBufArray);
+ return rewriteToVnodeModifyOpStmt(pQuery, pBufArray);
}
typedef struct SVgroupDropTableBatch {
@@ -4214,12 +4251,160 @@ static int32_t rewriteDropTable(STranslateContext* pCxt, SQuery* pQuery) {
return TSDB_CODE_OUT_OF_MEMORY;
}
- return rewriteToVnodeModifOpStmt(pQuery, pBufArray);
+ return rewriteToVnodeModifyOpStmt(pQuery, pBufArray);
+}
+
+static int32_t buildAlterTbReq(STranslateContext* pCxt, SAlterTableStmt* pStmt, SVAlterTbReq* pReq) {
+ pReq->tbName = strdup(pStmt->tableName);
+ if (NULL == pReq->tbName) {
+ return TSDB_CODE_OUT_OF_MEMORY;
+ }
+ pReq->action = pStmt->alterType;
+
+ switch (pStmt->alterType) {
+ case TSDB_ALTER_TABLE_ADD_TAG:
+ case TSDB_ALTER_TABLE_DROP_TAG:
+ case TSDB_ALTER_TABLE_UPDATE_TAG_NAME:
+ case TSDB_ALTER_TABLE_UPDATE_TAG_BYTES:
+ return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_ALTER_TABLE);
+ case TSDB_ALTER_TABLE_UPDATE_TAG_VAL:
+ pReq->tagName = strdup(pStmt->colName);
+ if (NULL == pReq->tagName) {
+ return TSDB_CODE_OUT_OF_MEMORY;
+ }
+ if (DEAL_RES_ERROR == translateValue(pCxt, pStmt->pVal)) {
+ return pCxt->errCode;
+ }
+ pReq->isNull = (TSDB_DATA_TYPE_NULL == pStmt->pVal->node.resType.type);
+ pReq->nTagVal = pStmt->pVal->node.resType.bytes;
+ char* pVal = nodesGetValueFromNode(pStmt->pVal);
+ pReq->pTagVal = IS_VAR_DATA_TYPE(pStmt->pVal->node.resType.type) ? pVal + VARSTR_HEADER_SIZE : pVal;
+ break;
+ case TSDB_ALTER_TABLE_ADD_COLUMN:
+ case TSDB_ALTER_TABLE_DROP_COLUMN:
+ pReq->colName = strdup(pStmt->colName);
+ if (NULL == pReq->colName) {
+ return TSDB_CODE_OUT_OF_MEMORY;
+ }
+ pReq->type = pStmt->dataType.type;
+ pReq->flags = COL_SMA_ON;
+ pReq->bytes = pStmt->dataType.bytes;
+ break;
+ case TSDB_ALTER_TABLE_UPDATE_COLUMN_BYTES:
+ pReq->colName = strdup(pStmt->colName);
+ if (NULL == pReq->colName) {
+ return TSDB_CODE_OUT_OF_MEMORY;
+ }
+ pReq->colModBytes = calcTypeBytes(pStmt->dataType);
+ break;
+ case TSDB_ALTER_TABLE_UPDATE_OPTIONS:
+ if (-1 != pStmt->pOptions->ttl) {
+ pReq->updateTTL = true;
+ pReq->newTTL = pStmt->pOptions->ttl;
+ }
+ if ('\0' != pStmt->pOptions->comment[0]) {
+ pReq->updateComment = true;
+ pReq->newComment = strdup(pStmt->pOptions->comment);
+ if (NULL == pReq->newComment) {
+ return TSDB_CODE_OUT_OF_MEMORY;
+ }
+ }
+ break;
+ case TSDB_ALTER_TABLE_UPDATE_COLUMN_NAME:
+ pReq->colName = strdup(pStmt->colName);
+ pReq->colNewName = strdup(pStmt->newColName);
+ if (NULL == pReq->colName || NULL == pReq->colNewName) {
+ return TSDB_CODE_OUT_OF_MEMORY;
+ }
+ break;
+ default:
+ break;
+ }
+
+ return TSDB_CODE_SUCCESS;
+}
+
+static int32_t serializeAlterTbReq(STranslateContext* pCxt, SAlterTableStmt* pStmt, SVAlterTbReq* pReq,
+ SArray* pArray) {
+ SVgroupInfo vg = {0};
+ int32_t code = getTableHashVgroup(pCxt, pStmt->dbName, pStmt->tableName, &vg);
+ int tlen = 0;
+ if (TSDB_CODE_SUCCESS == code) {
+ tEncodeSize(tEncodeSVAlterTbReq, pReq, tlen, code);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ tlen += sizeof(SMsgHead);
+ void* pMsg = taosMemoryMalloc(tlen);
+ if (NULL == pMsg) {
+ return TSDB_CODE_OUT_OF_MEMORY;
+ }
+ ((SMsgHead*)pMsg)->vgId = htonl(vg.vgId);
+ ((SMsgHead*)pMsg)->contLen = htonl(tlen);
+ void* pBuf = POINTER_SHIFT(pMsg, sizeof(SMsgHead));
+ SEncoder coder = {0};
+ tEncoderInit(&coder, pBuf, tlen - sizeof(SMsgHead));
+ tEncodeSVAlterTbReq(&coder, pReq);
+ tEncoderClear(&coder);
+
+ SVgDataBlocks* pVgData = taosMemoryCalloc(1, sizeof(SVgDataBlocks));
+ if (NULL == pVgData) {
+ taosMemoryFree(pMsg);
+ return TSDB_CODE_OUT_OF_MEMORY;
+ }
+ pVgData->vg = vg;
+ pVgData->pData = pMsg;
+ pVgData->size = tlen;
+ pVgData->numOfTables = 1;
+ taosArrayPush(pArray, &pVgData);
+ }
+
+ return code;
+}
+
+static int32_t buildModifyVnodeArray(STranslateContext* pCxt, SAlterTableStmt* pStmt, SVAlterTbReq* pReq,
+ SArray** pArray) {
+ SArray* pTmpArray = taosArrayInit(1, sizeof(void*));
+ if (NULL == pTmpArray) {
+ return TSDB_CODE_OUT_OF_MEMORY;
+ }
+
+ int32_t code = serializeAlterTbReq(pCxt, pStmt, pReq, pTmpArray);
+ if (TSDB_CODE_SUCCESS == code) {
+ *pArray = pTmpArray;
+ } else {
+ taosArrayDestroy(pTmpArray);
+ }
+
+ return code;
}
static int32_t rewriteAlterTable(STranslateContext* pCxt, SQuery* pQuery) {
- // todo
- return TSDB_CODE_SUCCESS;
+ SAlterTableStmt* pStmt = (SAlterTableStmt*)pQuery->pRoot;
+
+ STableMeta* pTableMeta = NULL;
+ int32_t code = getTableMeta(pCxt, pStmt->dbName, pStmt->tableName, &pTableMeta);
+ if (TSDB_CODE_SUCCESS != code) {
+ return code;
+ }
+
+ if (TSDB_SUPER_TABLE == pTableMeta->tableType) {
+ return TSDB_CODE_SUCCESS;
+ } else if (TSDB_CHILD_TABLE != pTableMeta->tableType && TSDB_NORMAL_TABLE != pTableMeta->tableType) {
+ return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_ALTER_TABLE);
+ }
+
+ SVAlterTbReq req = {0};
+ code = buildAlterTbReq(pCxt, pStmt, &req);
+
+ SArray* pArray = NULL;
+ if (TSDB_CODE_SUCCESS == code) {
+ code = buildModifyVnodeArray(pCxt, pStmt, &req, &pArray);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = rewriteToVnodeModifyOpStmt(pQuery, pArray);
+ }
+
+ return code;
}
static int32_t rewriteQuery(STranslateContext* pCxt, SQuery* pQuery) {
@@ -4259,9 +4444,7 @@ static int32_t rewriteQuery(STranslateContext* pCxt, SQuery* pQuery) {
code = rewriteDropTable(pCxt, pQuery);
break;
case QUERY_NODE_ALTER_TABLE_STMT:
- if (TSDB_ALTER_TABLE_UPDATE_TAG_VAL == ((SAlterTableStmt*)pQuery->pRoot)->alterType) {
- code = rewriteAlterTable(pCxt, pQuery);
- }
+ code = rewriteAlterTable(pCxt, pQuery);
break;
default:
break;
diff --git a/source/libs/parser/src/parUtil.c b/source/libs/parser/src/parUtil.c
index 7183d956a9..e7716741ed 100644
--- a/source/libs/parser/src/parUtil.c
+++ b/source/libs/parser/src/parUtil.c
@@ -148,6 +148,12 @@ static char* getSyntaxErrFormat(int32_t errCode) {
return "Invalid number of tag columns";
case TSDB_CODE_PAR_INVALID_INTERNAL_PK:
return "Invalid _c0 or _rowts expression";
+ case TSDB_CODE_PAR_INVALID_TIMELINE_FUNC:
+ return "Invalid timeline function";
+ case TSDB_CODE_PAR_INVALID_PASSWD:
+ return "Invalid password";
+ case TSDB_CODE_PAR_INVALID_ALTER_TABLE:
+ return "Invalid alter table statement";
case TSDB_CODE_OUT_OF_MEMORY:
return "Out of memory";
default:
diff --git a/source/libs/parser/src/sql.c b/source/libs/parser/src/sql.c
index 4293131338..ed812405e0 100644
--- a/source/libs/parser/src/sql.c
+++ b/source/libs/parser/src/sql.c
@@ -3500,7 +3500,7 @@ static YYACTIONTYPE yy_reduce(
yymsp[-4].minor.yy172 = yylhsminor.yy172;
break;
case 121: /* alter_table_clause ::= full_table_name SET TAG column_name NK_EQ literal */
-{ yylhsminor.yy172 = createAlterTableSetTag(pCxt, yymsp[-5].minor.yy172, &yymsp[-2].minor.yy105, yymsp[0].minor.yy172); }
+{ yylhsminor.yy172 = createAlterTableSetTag(pCxt, yymsp[-5].minor.yy172, &yymsp[-2].minor.yy105, releaseRawExprNode(pCxt, yymsp[0].minor.yy172)); }
yymsp[-5].minor.yy172 = yylhsminor.yy172;
break;
case 123: /* multi_create_clause ::= multi_create_clause create_subtable_clause */
diff --git a/source/libs/parser/test/parInitialATest.cpp b/source/libs/parser/test/parInitialATest.cpp
index 4ddb7736b2..e1fa8ffe8d 100644
--- a/source/libs/parser/test/parInitialATest.cpp
+++ b/source/libs/parser/test/parInitialATest.cpp
@@ -69,7 +69,7 @@ TEST_F(ParserInitialATest, alterDatabase) {
* | COMMENT 'string_value'
* }
*/
-TEST_F(ParserInitialATest, alterTable) {
+TEST_F(ParserInitialATest, alterSTable) {
useDb("root", "test");
SMAlterStbReq expect = {0};
@@ -119,7 +119,7 @@ TEST_F(ParserInitialATest, alterTable) {
setCheckDdlFunc([&](const SQuery* pQuery, ParserStage stage) {
ASSERT_EQ(nodeType(pQuery->pRoot), QUERY_NODE_ALTER_TABLE_STMT);
SMAlterStbReq req = {0};
- ASSERT_TRUE(TSDB_CODE_SUCCESS == tDeserializeSMAlterStbReq(pQuery->pCmdMsg->pMsg, pQuery->pCmdMsg->msgLen, &req));
+ ASSERT_EQ(tDeserializeSMAlterStbReq(pQuery->pCmdMsg->pMsg, pQuery->pCmdMsg->msgLen, &req), TSDB_CODE_SUCCESS);
ASSERT_EQ(std::string(req.name), std::string(expect.name));
ASSERT_EQ(req.alterType, expect.alterType);
ASSERT_EQ(req.numOfFields, expect.numOfFields);
@@ -139,24 +139,24 @@ TEST_F(ParserInitialATest, alterTable) {
}
});
- setAlterStbReqFunc("t1", TSDB_ALTER_TABLE_UPDATE_OPTIONS, 0, nullptr, 0, 0, nullptr, nullptr, 10);
- run("ALTER TABLE t1 TTL 10");
+ setAlterStbReqFunc("st1", TSDB_ALTER_TABLE_UPDATE_OPTIONS, 0, nullptr, 0, 0, nullptr, nullptr, 10);
+ run("ALTER TABLE st1 TTL 10");
- setAlterStbReqFunc("t1", TSDB_ALTER_TABLE_UPDATE_OPTIONS, 0, nullptr, 0, 0, nullptr, "test");
- run("ALTER TABLE t1 COMMENT 'test'");
+ setAlterStbReqFunc("st1", TSDB_ALTER_TABLE_UPDATE_OPTIONS, 0, nullptr, 0, 0, nullptr, "test");
+ run("ALTER TABLE st1 COMMENT 'test'");
- setAlterStbReqFunc("t1", TSDB_ALTER_TABLE_ADD_COLUMN, 1, "cc1", TSDB_DATA_TYPE_BIGINT);
- run("ALTER TABLE t1 ADD COLUMN cc1 BIGINT");
+ setAlterStbReqFunc("st1", TSDB_ALTER_TABLE_ADD_COLUMN, 1, "cc1", TSDB_DATA_TYPE_BIGINT);
+ run("ALTER TABLE st1 ADD COLUMN cc1 BIGINT");
- setAlterStbReqFunc("t1", TSDB_ALTER_TABLE_DROP_COLUMN, 1, "c1");
- run("ALTER TABLE t1 DROP COLUMN c1");
+ setAlterStbReqFunc("st1", TSDB_ALTER_TABLE_DROP_COLUMN, 1, "c1");
+ run("ALTER TABLE st1 DROP COLUMN c1");
- setAlterStbReqFunc("t1", TSDB_ALTER_TABLE_UPDATE_COLUMN_BYTES, 1, "c1", TSDB_DATA_TYPE_VARCHAR,
+ setAlterStbReqFunc("st1", TSDB_ALTER_TABLE_UPDATE_COLUMN_BYTES, 1, "c1", TSDB_DATA_TYPE_VARCHAR,
20 + VARSTR_HEADER_SIZE);
- run("ALTER TABLE t1 MODIFY COLUMN c1 VARCHAR(20)");
+ run("ALTER TABLE st1 MODIFY COLUMN c1 VARCHAR(20)");
- setAlterStbReqFunc("t1", TSDB_ALTER_TABLE_UPDATE_COLUMN_NAME, 2, "c1", 0, 0, "cc1");
- run("ALTER TABLE t1 RENAME COLUMN c1 cc1");
+ setAlterStbReqFunc("st1", TSDB_ALTER_TABLE_UPDATE_COLUMN_NAME, 2, "c1", 0, 0, "cc1");
+ run("ALTER TABLE st1 RENAME COLUMN c1 cc1");
setAlterStbReqFunc("st1", TSDB_ALTER_TABLE_ADD_TAG, 1, "tag11", TSDB_DATA_TYPE_BIGINT);
run("ALTER TABLE st1 ADD TAG tag11 BIGINT");
@@ -171,7 +171,127 @@ TEST_F(ParserInitialATest, alterTable) {
setAlterStbReqFunc("st1", TSDB_ALTER_TABLE_UPDATE_TAG_NAME, 2, "tag1", 0, 0, "tag11");
run("ALTER TABLE st1 RENAME TAG tag1 tag11");
- // run("ALTER TABLE st1s1 SET TAG tag1=10");
+ // todo
+ // ADD {FULLTEXT | SMA} INDEX index_name (col_name [, col_name] ...) [index_option]
+}
+
+TEST_F(ParserInitialATest, alterTable) {
+ useDb("root", "test");
+
+ SVAlterTbReq expect = {0};
+
+ auto setAlterColFunc = [&](const char* pTbname, int8_t alterType, const char* pColName, int8_t dataType = 0,
+ int32_t dataBytes = 0, const char* pNewColName = nullptr) {
+ memset(&expect, 0, sizeof(SVAlterTbReq));
+ expect.tbName = strdup(pTbname);
+ expect.action = alterType;
+ expect.colName = strdup(pColName);
+
+ switch (alterType) {
+ case TSDB_ALTER_TABLE_ADD_COLUMN:
+ expect.type = dataType;
+ expect.flags = COL_SMA_ON;
+ expect.bytes = dataBytes > 0 ? dataBytes : (dataType > 0 ? tDataTypes[dataType].bytes : 0);
+ break;
+ case TSDB_ALTER_TABLE_UPDATE_COLUMN_BYTES:
+ expect.colModBytes = dataBytes;
+ break;
+ case TSDB_ALTER_TABLE_UPDATE_COLUMN_NAME:
+ expect.colNewName = strdup(pNewColName);
+ break;
+ default:
+ break;
+ }
+ };
+
+ auto setAlterTagFunc = [&](const char* pTbname, const char* pTagName, const uint8_t* pNewVal, uint32_t bytes) {
+ memset(&expect, 0, sizeof(SVAlterTbReq));
+ expect.tbName = strdup(pTbname);
+ expect.action = TSDB_ALTER_TABLE_UPDATE_TAG_VAL;
+ expect.tagName = strdup(pTagName);
+
+ expect.isNull = (nullptr == pNewVal);
+ expect.nTagVal = bytes;
+ expect.pTagVal = pNewVal;
+ };
+
+ auto setAlterOptionsFunc = [&](const char* pTbname, int32_t ttl, const char* pComment = nullptr) {
+ memset(&expect, 0, sizeof(SVAlterTbReq));
+ expect.tbName = strdup(pTbname);
+ expect.action = TSDB_ALTER_TABLE_UPDATE_OPTIONS;
+ if (-1 != ttl) {
+ expect.updateTTL = true;
+ expect.newTTL = ttl;
+ }
+ if (nullptr != pComment) {
+ expect.updateComment = true;
+ expect.newComment = pComment;
+ }
+ };
+
+ setCheckDdlFunc([&](const SQuery* pQuery, ParserStage stage) {
+ ASSERT_EQ(nodeType(pQuery->pRoot), QUERY_NODE_VNODE_MODIF_STMT);
+ SVnodeModifOpStmt* pStmt = (SVnodeModifOpStmt*)pQuery->pRoot;
+
+ ASSERT_EQ(pStmt->sqlNodeType, QUERY_NODE_ALTER_TABLE_STMT);
+ ASSERT_NE(pStmt->pDataBlocks, nullptr);
+ ASSERT_EQ(taosArrayGetSize(pStmt->pDataBlocks), 1);
+ SVgDataBlocks* pVgData = (SVgDataBlocks*)taosArrayGetP(pStmt->pDataBlocks, 0);
+ void* pBuf = POINTER_SHIFT(pVgData->pData, sizeof(SMsgHead));
+ SVAlterTbReq req = {0};
+ SDecoder coder = {0};
+ tDecoderInit(&coder, (const uint8_t*)pBuf, pVgData->size);
+ ASSERT_EQ(tDecodeSVAlterTbReq(&coder, &req), TSDB_CODE_SUCCESS);
+
+ ASSERT_EQ(std::string(req.tbName), std::string(expect.tbName));
+ ASSERT_EQ(req.action, expect.action);
+ if (nullptr != expect.colName) {
+ ASSERT_EQ(std::string(req.colName), std::string(expect.colName));
+ }
+ ASSERT_EQ(req.type, expect.type);
+ ASSERT_EQ(req.flags, expect.flags);
+ ASSERT_EQ(req.bytes, expect.bytes);
+ ASSERT_EQ(req.colModBytes, expect.colModBytes);
+ if (nullptr != expect.colNewName) {
+ ASSERT_EQ(std::string(req.colNewName), std::string(expect.colNewName));
+ }
+ if (nullptr != expect.tagName) {
+ ASSERT_EQ(std::string(req.tagName), std::string(expect.tagName));
+ }
+ ASSERT_EQ(req.isNull, expect.isNull);
+ ASSERT_EQ(req.nTagVal, expect.nTagVal);
+ ASSERT_EQ(memcmp(req.pTagVal, expect.pTagVal, expect.nTagVal), 0);
+ ASSERT_EQ(req.updateTTL, expect.updateTTL);
+ ASSERT_EQ(req.newTTL, expect.newTTL);
+ ASSERT_EQ(req.updateComment, expect.updateComment);
+ if (nullptr != expect.newComment) {
+ ASSERT_EQ(std::string(req.newComment), std::string(expect.newComment));
+ }
+
+ tDecoderClear(&coder);
+ });
+
+ setAlterOptionsFunc("t1", 10, nullptr);
+ run("ALTER TABLE t1 TTL 10");
+
+ setAlterOptionsFunc("t1", -1, "test");
+ run("ALTER TABLE t1 COMMENT 'test'");
+
+ setAlterColFunc("t1", TSDB_ALTER_TABLE_ADD_COLUMN, "cc1", TSDB_DATA_TYPE_BIGINT);
+ run("ALTER TABLE t1 ADD COLUMN cc1 BIGINT");
+
+ setAlterColFunc("t1", TSDB_ALTER_TABLE_DROP_COLUMN, "c1");
+ run("ALTER TABLE t1 DROP COLUMN c1");
+
+ setAlterColFunc("t1", TSDB_ALTER_TABLE_UPDATE_COLUMN_BYTES, "c1", TSDB_DATA_TYPE_VARCHAR, 20 + VARSTR_HEADER_SIZE);
+ run("ALTER TABLE t1 MODIFY COLUMN c1 VARCHAR(20)");
+
+ setAlterColFunc("t1", TSDB_ALTER_TABLE_UPDATE_COLUMN_NAME, "c1", 0, 0, "cc1");
+ run("ALTER TABLE t1 RENAME COLUMN c1 cc1");
+
+ int64_t val = 10;
+ setAlterTagFunc("st1s1", "tag1", (const uint8_t*)&val, sizeof(val));
+ run("ALTER TABLE st1s1 SET TAG tag1=10");
// todo
// ADD {FULLTEXT | SMA} INDEX index_name (col_name [, col_name] ...) [index_option]
diff --git a/source/libs/parser/test/parSelectTest.cpp b/source/libs/parser/test/parSelectTest.cpp
index 0ba062ebe4..47424d3138 100644
--- a/source/libs/parser/test/parSelectTest.cpp
+++ b/source/libs/parser/test/parSelectTest.cpp
@@ -187,7 +187,7 @@ TEST_F(ParserSelectTest, semanticError) {
run("SELECT c2 FROM t1 tt1, t1 tt2 WHERE tt1.c1 = tt2.c1", TSDB_CODE_PAR_AMBIGUOUS_COLUMN, PARSER_STAGE_TRANSLATE);
// TSDB_CODE_PAR_WRONG_VALUE_TYPE
- run("SELECT timestamp '2010' FROM t1", TSDB_CODE_PAR_WRONG_VALUE_TYPE, PARSER_STAGE_TRANSLATE);
+ run("SELECT timestamp '2010a' FROM t1", TSDB_CODE_PAR_WRONG_VALUE_TYPE, PARSER_STAGE_TRANSLATE);
// TSDB_CODE_PAR_ILLEGAL_USE_AGG_FUNCTION
run("SELECT c2 FROM t1 tt1 join t1 tt2 on COUNT(*) > 0", TSDB_CODE_PAR_ILLEGAL_USE_AGG_FUNCTION,
@@ -238,6 +238,14 @@ TEST_F(ParserSelectTest, setOperator) {
run("SELECT * FROM t1 UNION ALL SELECT * FROM t1");
run("(SELECT * FROM t1) UNION ALL (SELECT * FROM t1)");
+
+ run("SELECT c1 FROM (SELECT c1 FROM t1 UNION ALL SELECT c1 FROM t1)");
+}
+
+TEST_F(ParserSelectTest, informationSchema) {
+ useDb("root", "test");
+
+ run("SELECT * FROM information_schema.user_databases WHERE name = 'information_schema'");
}
} // namespace ParserTest
diff --git a/source/libs/planner/src/planLogicCreater.c b/source/libs/planner/src/planLogicCreater.c
index d4b9f5b292..c2434e60f3 100644
--- a/source/libs/planner/src/planLogicCreater.c
+++ b/source/libs/planner/src/planLogicCreater.c
@@ -985,6 +985,8 @@ static int32_t getMsgType(ENodeType sqlType) {
return TDMT_VND_CREATE_TABLE;
case QUERY_NODE_DROP_TABLE_STMT:
return TDMT_VND_DROP_TABLE;
+ case QUERY_NODE_ALTER_TABLE_STMT:
+ return TDMT_VND_ALTER_TABLE;
default:
break;
}
diff --git a/source/libs/planner/src/planOptimizer.c b/source/libs/planner/src/planOptimizer.c
index 9968f63c5d..e38c180ac6 100644
--- a/source/libs/planner/src/planOptimizer.c
+++ b/source/libs/planner/src/planOptimizer.c
@@ -392,7 +392,8 @@ static int32_t cpdCalcTimeRange(SScanLogicNode* pScan, SNode** pPrimaryKeyCond,
}
static int32_t cpdOptimizeScanCondition(SOptimizeContext* pCxt, SScanLogicNode* pScan) {
- if (NULL == pScan->node.pConditions || OPTIMIZE_FLAG_TEST_MASK(pScan->node.optimizedFlag, OPTIMIZE_FLAG_CPD)) {
+ if (NULL == pScan->node.pConditions || OPTIMIZE_FLAG_TEST_MASK(pScan->node.optimizedFlag, OPTIMIZE_FLAG_CPD) ||
+ TSDB_SYSTEM_TABLE == pScan->pMeta->tableType) {
return TSDB_CODE_SUCCESS;
}
@@ -582,7 +583,7 @@ static bool cpdIsPrimaryKeyEqualCond(SJoinLogicNode* pJoin, SNode* pCond) {
return false;
}
- SOperatorNode* pOper = (SOperatorNode*)pJoin->pOnConditions;
+ SOperatorNode* pOper = (SOperatorNode*)pCond;
if (OP_TYPE_EQUAL != pOper->opType) {
return false;
}
@@ -608,12 +609,16 @@ static int32_t cpdCheckLogicCond(SOptimizeContext* pCxt, SJoinLogicNode* pJoin,
if (LOGIC_COND_TYPE_AND != pOnCond->condType) {
return generateUsageErrMsg(pCxt->pPlanCxt->pMsg, pCxt->pPlanCxt->msgLen, TSDB_CODE_PLAN_EXPECTED_TS_EQUAL);
}
+ bool hasPrimaryKeyEqualCond = false;
SNode* pCond = NULL;
FOREACH(pCond, pOnCond->pParameterList) {
- if (!cpdIsPrimaryKeyEqualCond(pJoin, pCond)) {
- return generateUsageErrMsg(pCxt->pPlanCxt->pMsg, pCxt->pPlanCxt->msgLen, TSDB_CODE_PLAN_EXPECTED_TS_EQUAL);
+ if (cpdIsPrimaryKeyEqualCond(pJoin, pCond)) {
+ hasPrimaryKeyEqualCond = true;
}
}
+ if (!hasPrimaryKeyEqualCond) {
+ return generateUsageErrMsg(pCxt->pPlanCxt->pMsg, pCxt->pPlanCxt->msgLen, TSDB_CODE_PLAN_EXPECTED_TS_EQUAL);
+ }
return TSDB_CODE_SUCCESS;
}
diff --git a/source/libs/planner/src/planPhysiCreater.c b/source/libs/planner/src/planPhysiCreater.c
index edf44424e3..affe9ef2f6 100644
--- a/source/libs/planner/src/planPhysiCreater.c
+++ b/source/libs/planner/src/planPhysiCreater.c
@@ -261,6 +261,22 @@ typedef struct SSetSlotIdCxt {
SHashObj* pRightHash;
} SSetSlotIdCxt;
+static void dumpSlots(const char* pName, SHashObj* pHash) {
+ if (NULL == pHash) {
+ return;
+ }
+ planDebug("%s", pName);
+ void* pIt = taosHashIterate(pHash, NULL);
+ while (NULL != pIt) {
+ size_t len = 0;
+ char* pKey = taosHashGetKey(pIt, &len);
+ char name[TSDB_TABLE_NAME_LEN + TSDB_COL_NAME_LEN] = {0};
+ strncpy(name, pKey, len);
+ planDebug("\tslot name = %s", name);
+ pIt = taosHashIterate(pHash, pIt);
+ }
+}
+
static EDealRes doSetSlotId(SNode* pNode, void* pContext) {
if (QUERY_NODE_COLUMN == nodeType(pNode) && 0 != strcmp(((SColumnNode*)pNode)->colName, "*")) {
SSetSlotIdCxt* pCxt = (SSetSlotIdCxt*)pContext;
@@ -273,6 +289,8 @@ static EDealRes doSetSlotId(SNode* pNode, void* pContext) {
// pIndex is definitely not NULL, otherwise it is a bug
if (NULL == pIndex) {
planError("doSetSlotId failed, invalid slot name %s", name);
+ dumpSlots("left datablock desc", pCxt->pLeftHash);
+ dumpSlots("right datablock desc", pCxt->pRightHash);
pCxt->errCode = TSDB_CODE_PLAN_INTERNAL_ERROR;
return DEAL_RES_ERROR;
}
diff --git a/source/libs/planner/src/planSpliter.c b/source/libs/planner/src/planSpliter.c
index 1266e8ae4b..54bc24e8bb 100644
--- a/source/libs/planner/src/planSpliter.c
+++ b/source/libs/planner/src/planSpliter.c
@@ -303,7 +303,7 @@ static SLogicNode* unMatchByNode(SLogicNode* pNode) {
}
SNode* pChild;
FOREACH(pChild, pNode->pChildren) {
- SLogicNode* pSplitNode = uaMatchByNode((SLogicNode*)pChild);
+ SLogicNode* pSplitNode = unMatchByNode((SLogicNode*)pChild);
if (NULL != pSplitNode) {
return pSplitNode;
}
@@ -318,7 +318,7 @@ static int32_t unCreateExchangeNode(SSplitContext* pCxt, SLogicSubplan* pSubplan
}
pExchange->srcGroupId = pCxt->groupId;
// pExchange->precision = pScan->pMeta->tableInfo.precision;
- pExchange->node.pTargets = nodesCloneList(pAgg->node.pTargets);
+ pExchange->node.pTargets = nodesCloneList(pAgg->pGroupKeys);
if (NULL == pExchange->node.pTargets) {
return TSDB_CODE_OUT_OF_MEMORY;
}
diff --git a/source/libs/planner/test/planJoinTest.cpp b/source/libs/planner/test/planJoinTest.cpp
index 4098d383f8..714900c4e5 100644
--- a/source/libs/planner/test/planJoinTest.cpp
+++ b/source/libs/planner/test/planJoinTest.cpp
@@ -23,10 +23,16 @@ class PlanJoinTest : public PlannerTestBase {};
TEST_F(PlanJoinTest, basic) {
useDb("root", "test");
- run("select t1.c1, t2.c2 from st1s1 t1, st1s2 t2 where t1.ts = t2.ts");
+ run("SELECT t1.c1, t2.c2 FROM st1s1 t1, st1s2 t2 WHERE t1.ts = t2.ts");
- run("select t1.*, t2.* from st1s1 t1, st1s2 t2 where t1.ts = t2.ts");
+ run("SELECT t1.*, t2.* FROM st1s1 t1, st1s2 t2 WHERE t1.ts = t2.ts");
- // run("select t1.c1, t2.c1 from st1s1 t1 join st1s2 t2 on t1.ts = t2.ts where t1.c1 > t2.c1 and t1.c2 = 'abc' and "
- // "t2.c2 = 'qwe'");
+ run("SELECT t1.c1, t2.c1 FROM st1s1 t1 JOIN st1s2 t2 ON t1.ts = t2.ts");
+}
+
+TEST_F(PlanJoinTest, withWhere) {
+ useDb("root", "test");
+
+ run("SELECT t1.c1, t2.c1 FROM st1s1 t1 JOIN st1s2 t2 ON t1.ts = t2.ts "
+ "WHERE t1.c1 > t2.c1 AND t1.c2 = 'abc' AND t2.c2 = 'qwe'");
}
diff --git a/source/libs/planner/test/planOtherTest.cpp b/source/libs/planner/test/planOtherTest.cpp
index b70cb4d19a..67c09d706e 100644
--- a/source/libs/planner/test/planOtherTest.cpp
+++ b/source/libs/planner/test/planOtherTest.cpp
@@ -47,4 +47,10 @@ TEST_F(PlanOtherTest, explain) {
run("explain analyze SELECT * FROM t1");
run("explain analyze verbose true ratio 0.01 SELECT * FROM t1");
-}
\ No newline at end of file
+}
+
+TEST_F(PlanOtherTest, show) {
+ useDb("root", "test");
+
+ run("SHOW DATABASES");
+}
diff --git a/source/libs/planner/test/planStateTest.cpp b/source/libs/planner/test/planStateTest.cpp
index 83c9621916..9ff035e148 100644
--- a/source/libs/planner/test/planStateTest.cpp
+++ b/source/libs/planner/test/planStateTest.cpp
@@ -23,13 +23,13 @@ class PlanStateTest : public PlannerTestBase {};
TEST_F(PlanStateTest, basic) {
useDb("root", "test");
- run("select count(*) from t1 state_window(c1)");
+ run("SELECT COUNT(*) FROM t1 STATE_WINDOW(c1)");
}
TEST_F(PlanStateTest, stateExpr) {
useDb("root", "test");
- run("select count(*) from t1 state_window(c1 + 10)");
+ run("SELECT COUNT(*) FROM t1 STATE_WINDOW(c1 + 10)");
}
TEST_F(PlanStateTest, selectFunc) {
diff --git a/source/libs/planner/test/planSubqueryTest.cpp b/source/libs/planner/test/planSubqueryTest.cpp
index f45cbc6f8f..6a7cb91bb9 100644
--- a/source/libs/planner/test/planSubqueryTest.cpp
+++ b/source/libs/planner/test/planSubqueryTest.cpp
@@ -23,9 +23,11 @@ class PlanSubqeuryTest : public PlannerTestBase {};
TEST_F(PlanSubqeuryTest, basic) {
useDb("root", "test");
- run("SELECT * FROM (SELECT * FROM t1)");
+ if (0 == g_skipSql) {
+ run("SELECT * FROM (SELECT * FROM t1)");
+ }
- // run("SELECT LAST(c1) FROM ( SELECT * FROM t1)");
+ run("SELECT LAST(c1) FROM (SELECT * FROM t1)");
}
TEST_F(PlanSubqeuryTest, doubleGroupBy) {
@@ -35,3 +37,11 @@ TEST_F(PlanSubqeuryTest, doubleGroupBy) {
"SELECT c1 + c3 a, c1 + COUNT(*) b FROM t1 WHERE c2 = 'abc' GROUP BY c1, c3) "
"WHERE a > 100 GROUP BY b");
}
+
+TEST_F(PlanSubqeuryTest, withSetOperator) {
+ useDb("root", "test");
+
+ run("SELECT c1 FROM (SELECT c1 FROM t1 UNION ALL SELECT c1 FROM t1)");
+
+ run("SELECT c1 FROM (SELECT c1 FROM t1 UNION SELECT c1 FROM t1)");
+}
diff --git a/source/libs/planner/test/planSysTbTest.cpp b/source/libs/planner/test/planSysTbTest.cpp
index fff6bfcca4..e5c30030b3 100644
--- a/source/libs/planner/test/planSysTbTest.cpp
+++ b/source/libs/planner/test/planSysTbTest.cpp
@@ -27,8 +27,8 @@ TEST_F(PlanSysTableTest, show) {
run("show stables");
}
-TEST_F(PlanSysTableTest, information) {
+TEST_F(PlanSysTableTest, informationSchema) {
useDb("root", "information_schema");
- run("show tables");
+ run("SELECT * FROM information_schema.user_databases WHERE name = 'information_schema'");
}
diff --git a/source/libs/planner/test/planTestMain.cpp b/source/libs/planner/test/planTestMain.cpp
index 464c636b66..36f66ddff6 100644
--- a/source/libs/planner/test/planTestMain.cpp
+++ b/source/libs/planner/test/planTestMain.cpp
@@ -25,23 +25,53 @@ class PlannerEnv : public testing::Environment {
virtual void SetUp() {
initMetaDataEnv();
generateMetaData();
+ initLog("/tmp/td");
}
virtual void TearDown() { destroyMetaDataEnv(); }
PlannerEnv() {}
virtual ~PlannerEnv() {}
+
+ private:
+ void initLog(const char* path) {
+ dDebugFlag = 143;
+ vDebugFlag = 0;
+ mDebugFlag = 143;
+ cDebugFlag = 0;
+ jniDebugFlag = 0;
+ tmrDebugFlag = 135;
+ uDebugFlag = 135;
+ rpcDebugFlag = 143;
+ qDebugFlag = 143;
+ wDebugFlag = 0;
+ sDebugFlag = 0;
+ tsdbDebugFlag = 0;
+ tsLogEmbedded = 1;
+ tsAsyncLog = 0;
+
+ taosRemoveDir(path);
+ taosMkDir(path);
+ tstrncpy(tsLogDir, path, PATH_MAX);
+ if (taosInitLog("taoslog", 1) != 0) {
+ std::cout << "failed to init log file" << std::endl;
+ }
+ }
};
static void parseArg(int argc, char* argv[]) {
int opt = 0;
const char* optstring = "";
- static struct option long_options[] = {{"dump", optional_argument, NULL, 'd'}, {0, 0, 0, 0}};
+ static struct option long_options[] = {
+ {"dump", optional_argument, NULL, 'd'}, {"skipSql", optional_argument, NULL, 's'}, {0, 0, 0, 0}};
while ((opt = getopt_long(argc, argv, optstring, long_options, NULL)) != -1) {
switch (opt) {
case 'd':
setDumpModule(optarg);
break;
+ case 's':
+ g_skipSql = 1;
+ break;
default:
break;
}
diff --git a/source/libs/planner/test/planTestUtil.cpp b/source/libs/planner/test/planTestUtil.cpp
index b2c590667e..6b038ae8ea 100644
--- a/source/libs/planner/test/planTestUtil.cpp
+++ b/source/libs/planner/test/planTestUtil.cpp
@@ -47,6 +47,7 @@ enum DumpModule {
};
DumpModule g_dumpModule = DUMP_MODULE_NOTHING;
+int32_t g_skipSql = 0;
void setDumpModule(const char* pModule) {
if (NULL == pModule) {
diff --git a/source/libs/planner/test/planTestUtil.h b/source/libs/planner/test/planTestUtil.h
index 7913ef531f..a63bba1a97 100644
--- a/source/libs/planner/test/planTestUtil.h
+++ b/source/libs/planner/test/planTestUtil.h
@@ -32,6 +32,8 @@ class PlannerTestBase : public testing::Test {
std::unique_ptr impl_;
};
+extern int32_t g_skipSql;
+
extern void setDumpModule(const char* pModule);
#endif // PLAN_TEST_UTIL_H
diff --git a/source/libs/qworker/src/qworker.c b/source/libs/qworker/src/qworker.c
index 0591b9ec79..403a6a734f 100644
--- a/source/libs/qworker/src/qworker.c
+++ b/source/libs/qworker/src/qworker.c
@@ -807,13 +807,11 @@ int32_t qwHandlePrePhaseEvents(QW_FPARAMS_DEF, int8_t phase, SQWPhaseInput *inpu
}
if (ctx->rspCode) {
- QW_TASK_ELOG("task already failed at phase %s, error:%x - %s", qwPhaseStr(phase), ctx->rspCode,
- tstrerror(ctx->rspCode));
+ QW_TASK_ELOG("task already failed at phase %s, code:%s", qwPhaseStr(phase), tstrerror(ctx->rspCode));
QW_ERR_JRET(ctx->rspCode);
}
_return:
-
if (ctx) {
QW_UPDATE_RSP_CODE(ctx, code);
@@ -831,7 +829,11 @@ _return:
QW_TASK_DLOG("cancel rsp send, handle:%p, code:%x - %s", cancelConnection->handle, code, tstrerror(code));
}
- QW_TASK_DLOG("end to handle event at phase %s, code:%x - %s", qwPhaseStr(phase), code, tstrerror(code));
+ if (code != TSDB_CODE_SUCCESS) {
+ QW_TASK_ELOG("end to handle event at phase %s, code:%s", qwPhaseStr(phase), tstrerror(code));
+ } else {
+ QW_TASK_DLOG("end to handle event at phase %s, code:%s", qwPhaseStr(phase), tstrerror(code));
+ }
QW_RET(code);
}
diff --git a/source/libs/scalar/inc/sclInt.h b/source/libs/scalar/inc/sclInt.h
index 99e61ad1db..9dbfeceb59 100644
--- a/source/libs/scalar/inc/sclInt.h
+++ b/source/libs/scalar/inc/sclInt.h
@@ -27,13 +27,11 @@ typedef struct SScalarCtx {
SArray *pBlockList; /* element is SSDataBlock* */
SHashObj *pRes; /* element is SScalarParam */
void *param; // additional parameter (meta actually) for acquire value such as tbname/tags values
- SHashObj *udf2Handle;
} SScalarCtx;
#define SCL_DATA_TYPE_DUMMY_HASH 9000
#define SCL_DEFAULT_OP_NUM 10
-#define SCL_DEFAULT_UDF_NUM 8
#define SCL_IS_CONST_NODE(_node) ((NULL == (_node)) || (QUERY_NODE_VALUE == (_node)->type) || (QUERY_NODE_NODE_LIST == (_node)->type))
#define SCL_IS_CONST_CALC(_ctx) (NULL == (_ctx)->pBlockList)
diff --git a/source/libs/scalar/src/scalar.c b/source/libs/scalar/src/scalar.c
index 8f4a9b9698..7e3dbaf7d0 100644
--- a/source/libs/scalar/src/scalar.c
+++ b/source/libs/scalar/src/scalar.c
@@ -154,18 +154,6 @@ void sclFreeRes(SHashObj *res) {
taosHashCleanup(res);
}
-void sclFreeUdfHandles(SHashObj *udf2handle) {
- void *pIter = taosHashIterate(udf2handle, NULL);
- while (pIter) {
- UdfcFuncHandle *handle = (UdfcFuncHandle *)pIter;
- if (handle) {
- teardownUdf(*handle);
- }
- pIter = taosHashIterate(udf2handle, pIter);
- }
- taosHashCleanup(udf2handle);
-}
-
void sclFreeParam(SScalarParam *param) {
if (param->columnData != NULL) {
colDataDestroy(param->columnData);
@@ -374,25 +362,7 @@ int32_t sclExecFunction(SFunctionNode *node, SScalarCtx *ctx, SScalarParam *outp
SCL_ERR_RET(sclInitParamList(¶ms, node->pParameterList, ctx, ¶mNum, &rowNum));
if (fmIsUserDefinedFunc(node->funcId)) {
- UdfcFuncHandle udfHandle = NULL;
- char* udfName = node->functionName;
- if (ctx->udf2Handle) {
- UdfcFuncHandle *pHandle = taosHashGet(ctx->udf2Handle, udfName, strlen(udfName));
- if (pHandle) {
- udfHandle = *pHandle;
- }
- }
- if (udfHandle == NULL) {
- code = setupUdf(udfName, &udfHandle);
- if (code != 0) {
- sclError("fmExecFunction error. setupUdf. function name: %s, code:%d", udfName, code);
- goto _return;
- }
- if (ctx->udf2Handle) {
- taosHashPut(ctx->udf2Handle, udfName, strlen(udfName), &udfHandle, sizeof(UdfcFuncHandle));
- }
- }
- code = callUdfScalarFunc(udfHandle, params, paramNum, output);
+ code = callUdfScalarFunc(node->functionName, params, paramNum, output);
if (code != 0) {
sclError("fmExecFunction error. callUdfScalarFunc. function name: %s, udf code:%d", node->functionName, code);
goto _return;
@@ -910,20 +880,15 @@ int32_t scalarCalculateConstants(SNode *pNode, SNode **pRes) {
SScalarCtx ctx = {0};
ctx.pRes = taosHashInit(SCL_DEFAULT_OP_NUM, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), false, HASH_NO_LOCK);
if (NULL == ctx.pRes) {
- sclError("taosHashInit result map failed, num:%d", SCL_DEFAULT_OP_NUM);
- SCL_ERR_RET(TSDB_CODE_QRY_OUT_OF_MEMORY);
- }
- ctx.udf2Handle = taosHashInit(SCL_DEFAULT_UDF_NUM, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_NO_LOCK);
- if (NULL == ctx.udf2Handle) {
- sclError("taosHashInit udf to handle map failed, num:%d", SCL_DEFAULT_OP_NUM);
+ sclError("taosHashInit failed, num:%d", SCL_DEFAULT_OP_NUM);
SCL_ERR_RET(TSDB_CODE_QRY_OUT_OF_MEMORY);
}
+
nodesRewriteExprPostOrder(&pNode, sclConstantsRewriter, (void *)&ctx);
SCL_ERR_JRET(ctx.code);
*pRes = pNode;
_return:
- sclFreeUdfHandles(ctx.udf2Handle);
sclFreeRes(ctx.pRes);
return code;
}
@@ -939,14 +904,10 @@ int32_t scalarCalculate(SNode *pNode, SArray *pBlockList, SScalarParam *pDst) {
// TODO: OPT performance
ctx.pRes = taosHashInit(SCL_DEFAULT_OP_NUM, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), false, HASH_NO_LOCK);
if (NULL == ctx.pRes) {
- sclError("taosHashInit result map failed, num:%d", SCL_DEFAULT_OP_NUM);
- SCL_ERR_RET(TSDB_CODE_QRY_OUT_OF_MEMORY);
- }
- ctx.udf2Handle = taosHashInit(SCL_DEFAULT_UDF_NUM, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_NO_LOCK);
- if (NULL == ctx.udf2Handle) {
- sclError("taosHashInit udf to handle map failed, num:%d", SCL_DEFAULT_OP_NUM);
+ sclError("taosHashInit failed, num:%d", SCL_DEFAULT_OP_NUM);
SCL_ERR_RET(TSDB_CODE_QRY_OUT_OF_MEMORY);
}
+
nodesWalkExprPostOrder(pNode, sclCalcWalker, (void *)&ctx);
SCL_ERR_JRET(ctx.code);
@@ -964,7 +925,6 @@ int32_t scalarCalculate(SNode *pNode, SArray *pBlockList, SScalarParam *pDst) {
_return:
//nodesDestroyNode(pNode);
- sclFreeUdfHandles(ctx.udf2Handle);
sclFreeRes(ctx.pRes);
return code;
}
diff --git a/source/libs/scheduler/src/scheduler.c b/source/libs/scheduler/src/scheduler.c
index 2710e54f95..5637539fea 100644
--- a/source/libs/scheduler/src/scheduler.c
+++ b/source/libs/scheduler/src/scheduler.c
@@ -256,6 +256,7 @@ int32_t schValidateTaskReceivedMsgType(SSchJob *pJob, SSchTask *pTask, int32_t m
return TSDB_CODE_SUCCESS;
case TDMT_VND_CREATE_TABLE_RSP:
case TDMT_VND_DROP_TABLE_RSP:
+ case TDMT_VND_ALTER_TABLE_RSP:
case TDMT_VND_SUBMIT_RSP:
break;
default:
@@ -1131,6 +1132,24 @@ int32_t schHandleResponseMsg(SSchJob *pJob, SSchTask *pTask, int32_t msgType, ch
SCH_ERR_RET(schProcessOnTaskSuccess(pJob, pTask));
break;
}
+ case TDMT_VND_ALTER_TABLE_RSP: {
+ SVAlterTbRsp rsp = {0};
+ if (msg) {
+ SDecoder coder = {0};
+ tDecoderInit(&coder, msg, msgSize);
+ code = tDecodeSVAlterTbRsp(&coder, &rsp);
+ tDecoderClear(&coder);
+ SCH_ERR_JRET(code);
+ SCH_ERR_JRET(rsp.code);
+ }
+
+ SCH_ERR_JRET(rspCode);
+
+ if (NULL == msg) {
+ SCH_ERR_JRET(TSDB_CODE_QRY_INVALID_INPUT);
+ }
+ break;
+ }
case TDMT_VND_SUBMIT_RSP: {
SCH_ERR_JRET(rspCode);
@@ -1391,6 +1410,10 @@ int32_t schHandleDropTableCallback(void *param, const SDataBuf *pMsg, int32_t co
return schHandleCallback(param, pMsg, TDMT_VND_DROP_TABLE_RSP, code);
}
+int32_t schHandleAlterTableCallback(void *param, const SDataBuf *pMsg, int32_t code) {
+ return schHandleCallback(param, pMsg, TDMT_VND_ALTER_TABLE_RSP, code);
+}
+
int32_t schHandleQueryCallback(void *param, const SDataBuf *pMsg, int32_t code) {
return schHandleCallback(param, pMsg, TDMT_VND_QUERY_RSP, code);
}
@@ -1490,6 +1513,9 @@ int32_t schGetCallbackFp(int32_t msgType, __async_send_cb_fn_t *fp) {
case TDMT_VND_DROP_TABLE:
*fp = schHandleDropTableCallback;
break;
+ case TDMT_VND_ALTER_TABLE:
+ *fp = schHandleAlterTableCallback;
+ break;
case TDMT_VND_SUBMIT:
*fp = schHandleSubmitCallback;
break;
@@ -2010,6 +2036,7 @@ int32_t schBuildAndSendMsg(SSchJob *pJob, SSchTask *pTask, SQueryNodeAddr *addr,
switch (msgType) {
case TDMT_VND_CREATE_TABLE:
case TDMT_VND_DROP_TABLE:
+ case TDMT_VND_ALTER_TABLE:
case TDMT_VND_SUBMIT: {
msgSize = pTask->msgLen;
msg = taosMemoryCalloc(1, msgSize);
diff --git a/source/libs/stream/src/tstream.c b/source/libs/stream/src/tstream.c
index 82b4a6a466..743fbd0e9f 100644
--- a/source/libs/stream/src/tstream.c
+++ b/source/libs/stream/src/tstream.c
@@ -16,6 +16,25 @@
#include "tstream.h"
#include "executor.h"
+int32_t streamDataBlockEncode(void** buf, const SStreamDataBlock* pOutput) {
+ int32_t tlen = 0;
+ tlen += taosEncodeFixedI8(buf, pOutput->type);
+ tlen += taosEncodeFixedI32(buf, pOutput->sourceVg);
+ tlen += taosEncodeFixedI64(buf, pOutput->sourceVer);
+ ASSERT(pOutput->type == STREAM_INPUT__DATA_BLOCK);
+ tlen += tEncodeDataBlocks(buf, pOutput->blocks);
+ return tlen;
+}
+
+void* streamDataBlockDecode(const void* buf, SStreamDataBlock* pInput) {
+ buf = taosDecodeFixedI8(buf, &pInput->type);
+ buf = taosDecodeFixedI32(buf, &pInput->sourceVg);
+ buf = taosDecodeFixedI64(buf, &pInput->sourceVer);
+ ASSERT(pInput->type == STREAM_INPUT__DATA_BLOCK);
+ buf = tDecodeDataBlocks(buf, &pInput->blocks);
+ return (void*)buf;
+}
+
static int32_t streamBuildDispatchMsg(SStreamTask* pTask, SArray* data, SRpcMsg* pMsg, SEpSet** ppEpSet) {
SStreamTaskExecReq req = {
.streamId = pTask->streamId,
@@ -97,6 +116,363 @@ static int32_t streamShuffleDispatch(SStreamTask* pTask, SMsgCb* pMsgCb, SHashOb
return 0;
}
+int32_t streamEnqueueDataSubmit(SStreamTask* pTask, SStreamDataSubmit* input) {
+ ASSERT(pTask->inputType == TASK_INPUT_TYPE__SUMBIT_BLOCK);
+ int8_t inputStatus = atomic_load_8(&pTask->inputStatus);
+ if (inputStatus == TASK_INPUT_STATUS__NORMAL) {
+ streamDataSubmitRefInc(input);
+ taosWriteQitem(pTask->inputQ, input);
+ }
+ return inputStatus;
+}
+
+int32_t streamEnqueueDataBlk(SStreamTask* pTask, SStreamDataBlock* input) {
+ ASSERT(pTask->inputType == TASK_INPUT_TYPE__DATA_BLOCK);
+ taosWriteQitem(pTask->inputQ, input);
+ int8_t inputStatus = atomic_load_8(&pTask->inputStatus);
+ return inputStatus;
+}
+
+int32_t streamTaskExecImpl(SStreamTask* pTask, void* data, SArray* pRes) {
+ void* exec = pTask->exec.runners[0].executor;
+
+ // set input
+ if (pTask->inputType == STREAM_INPUT__DATA_SUBMIT) {
+ SStreamDataSubmit* pSubmit = (SStreamDataSubmit*)data;
+ ASSERT(pSubmit->type == STREAM_INPUT__DATA_SUBMIT);
+
+ qSetStreamInput(exec, pSubmit->data, STREAM_DATA_TYPE_SUBMIT_BLOCK);
+ } else if (pTask->inputType == STREAM_INPUT__DATA_BLOCK) {
+ SStreamDataBlock* pBlock = (SStreamDataBlock*)data;
+ ASSERT(pBlock->type == STREAM_INPUT__DATA_BLOCK);
+
+ SArray* blocks = pBlock->blocks;
+ qSetMultiStreamInput(exec, blocks->pData, blocks->size, STREAM_DATA_TYPE_SSDATA_BLOCK);
+ }
+
+ // exec
+ while (1) {
+ SSDataBlock* output;
+ uint64_t ts = 0;
+ if (qExecTask(exec, &output, &ts) < 0) {
+ ASSERT(false);
+ }
+ if (output == NULL) break;
+ taosArrayPush(pRes, &output);
+ }
+
+ // destroy
+ if (pTask->inputType == STREAM_INPUT__DATA_SUBMIT) {
+ streamDataSubmitRefDec((SStreamDataSubmit*)data);
+ } else {
+ taosArrayDestroyEx(((SStreamDataBlock*)data)->blocks, (FDelete)tDeleteSSDataBlock);
+ }
+ return 0;
+}
+
+// TODO: handle version
+int32_t streamTaskExec2(SStreamTask* pTask, SMsgCb* pMsgCb) {
+ SArray* pRes = taosArrayInit(0, sizeof(SSDataBlock));
+ if (pRes == NULL) return -1;
+ while (1) {
+ int8_t execStatus = atomic_val_compare_exchange_8(&pTask->status, TASK_STATUS__IDLE, TASK_STATUS__EXECUTING);
+ void* exec = pTask->exec.runners[0].executor;
+ if (execStatus == TASK_STATUS__IDLE) {
+ // first run, from qall, handle failure from last exec
+ while (1) {
+ void* data = NULL;
+ taosGetQitem(pTask->inputQAll, &data);
+ if (data == NULL) break;
+
+ streamTaskExecImpl(pTask, data, pRes);
+
+ taosFreeQitem(data);
+
+ if (taosArrayGetSize(pRes) != 0) {
+ SStreamDataBlock* resQ = taosAllocateQitem(sizeof(void**), DEF_QITEM);
+ resQ->type = STREAM_INPUT__DATA_BLOCK;
+ resQ->blocks = pRes;
+ taosWriteQitem(pTask->outputQ, resQ);
+ pRes = taosArrayInit(0, sizeof(SSDataBlock));
+ if (pRes == NULL) goto FAIL;
+ }
+ }
+ // second run, from inputQ
+ taosReadAllQitems(pTask->inputQ, pTask->inputQAll);
+ while (1) {
+ void* data = NULL;
+ taosGetQitem(pTask->inputQAll, &data);
+ if (data == NULL) break;
+
+ streamTaskExecImpl(pTask, data, pRes);
+
+ taosFreeQitem(data);
+
+ if (taosArrayGetSize(pRes) != 0) {
+ SStreamDataBlock* resQ = taosAllocateQitem(sizeof(void**), DEF_QITEM);
+ resQ->type = STREAM_INPUT__DATA_BLOCK;
+ resQ->blocks = pRes;
+ taosWriteQitem(pTask->outputQ, resQ);
+ pRes = taosArrayInit(0, sizeof(SSDataBlock));
+ if (pRes == NULL) goto FAIL;
+ }
+ }
+ // set status closing
+ atomic_store_8(&pTask->status, TASK_STATUS__CLOSING);
+ // third run, make sure all inputQ is cleared
+ taosReadAllQitems(pTask->inputQ, pTask->inputQAll);
+ while (1) {
+ void* data = NULL;
+ taosGetQitem(pTask->inputQAll, &data);
+ if (data == NULL) break;
+
+ streamTaskExecImpl(pTask, data, pRes);
+
+ taosFreeQitem(data);
+
+ if (taosArrayGetSize(pRes) != 0) {
+ SStreamDataBlock* resQ = taosAllocateQitem(sizeof(void**), DEF_QITEM);
+ resQ->type = STREAM_INPUT__DATA_BLOCK;
+ resQ->blocks = pRes;
+ taosWriteQitem(pTask->outputQ, resQ);
+ pRes = taosArrayInit(0, sizeof(SSDataBlock));
+ if (pRes == NULL) goto FAIL;
+ }
+ }
+ // set status closing
+ atomic_store_8(&pTask->status, TASK_STATUS__CLOSING);
+ // third run, make sure all inputQ is cleared
+ taosReadAllQitems(pTask->inputQ, pTask->inputQAll);
+ while (1) {
+ void* data = NULL;
+ taosGetQitem(pTask->inputQAll, &data);
+ if (data == NULL) break;
+ }
+
+ atomic_store_8(&pTask->status, TASK_STATUS__IDLE);
+ break;
+ } else if (execStatus == TASK_STATUS__CLOSING) {
+ continue;
+ } else if (execStatus == TASK_STATUS__EXECUTING) {
+ break;
+ } else {
+ ASSERT(0);
+ }
+ }
+ return 0;
+FAIL:
+ atomic_store_8(&pTask->status, TASK_STATUS__IDLE);
+ return -1;
+}
+
+int32_t streamTaskDispatchDown(SStreamTask* pTask, SMsgCb* pMsgCb) {
+ //
+ return 0;
+}
+
+int32_t streamTaskSink(SStreamTask* pTask) {
+ //
+ return 0;
+}
+
+int32_t streamTaskProcessInputReq(SStreamTask* pTask, SMsgCb* pMsgCb, SStreamDataBlock* pBlock, SRpcMsg* pRsp) {
+ // 1. handle input
+ // 1.1 enqueue
+ taosWriteQitem(pTask->inputQ, pBlock);
+ // 1.2 calc back pressure
+ // 1.3 rsp by input status
+ int8_t inputStatus = atomic_load_8(&pTask->inputStatus);
+ SStreamDispatchRsp* pCont = rpcMallocCont(sizeof(SStreamDispatchRsp));
+ pCont->status = inputStatus;
+ pRsp->pCont = pCont;
+ pRsp->contLen = sizeof(SStreamDispatchRsp);
+ tmsgSendRsp(pRsp);
+ // 2. try exec
+ // 2.1. idle: exec
+ // 2.2. executing: return
+ // 2.3. closing: keep trying
+ while (1) {
+ int8_t execStatus = atomic_val_compare_exchange_8(&pTask->status, TASK_STATUS__IDLE, TASK_STATUS__EXECUTING);
+ if (execStatus == TASK_STATUS__IDLE) {
+ void* exec = pTask->exec.runners[0].executor;
+ SArray* pRes = taosArrayInit(0, sizeof(void*));
+ const SArray* blocks = pBlock->blocks;
+ qSetMultiStreamInput(exec, blocks->pData, blocks->size, STREAM_DATA_TYPE_SSDATA_BLOCK);
+ while (1) {
+ SSDataBlock* output;
+ uint64_t ts = 0;
+ if (qExecTask(exec, &output, &ts) < 0) {
+ ASSERT(false);
+ }
+ if (output == NULL) break;
+ taosArrayPush(pRes, &output);
+ }
+ // TODO: wrap destroy block
+ taosArrayDestroyP(pBlock->blocks, (FDelete)blockDataDestroy);
+
+ if (taosArrayGetSize(pRes) != 0) {
+ SArray** resQ = taosAllocateQitem(sizeof(void**), DEF_QITEM);
+ *resQ = pRes;
+ taosWriteQitem(pTask->outputQ, resQ);
+ }
+
+ } else if (execStatus == TASK_STATUS__CLOSING) {
+ continue;
+ } else if (execStatus == TASK_STATUS__EXECUTING)
+ break;
+ else {
+ ASSERT(0);
+ }
+ }
+ // 3. handle output
+ // 3.1 check and set status
+ // 3.2 dispatch / sink
+ STaosQall* qall = taosAllocateQall();
+ taosReadAllQitems(pTask->outputQ, qall);
+ SArray** ppRes = NULL;
+ while (1) {
+ taosGetQitem(qall, (void**)&ppRes);
+ if (ppRes == NULL) break;
+
+ SArray* pRes = *ppRes;
+ if (pTask->sinkType == TASK_SINK__TABLE) {
+ pTask->tbSink.tbSinkFunc(pTask, pTask->tbSink.vnode, pBlock->sourceVer, pRes);
+ } else if (pTask->sinkType == TASK_SINK__SMA) {
+ pTask->smaSink.smaSink(pTask->ahandle, pTask->smaSink.smaId, pRes);
+ } else {
+ }
+
+ // dispatch
+ if (pTask->dispatchType == TASK_DISPATCH__INPLACE) {
+ SRpcMsg dispatchMsg = {0};
+ if (streamBuildDispatchMsg(pTask, pRes, &dispatchMsg, NULL) < 0) {
+ ASSERT(0);
+ return -1;
+ }
+
+ int32_t qType;
+ if (pTask->dispatchMsgType == TDMT_VND_TASK_PIPE_EXEC || pTask->dispatchMsgType == TDMT_SND_TASK_PIPE_EXEC) {
+ qType = FETCH_QUEUE;
+ } else if (pTask->dispatchMsgType == TDMT_VND_TASK_MERGE_EXEC ||
+ pTask->dispatchMsgType == TDMT_SND_TASK_MERGE_EXEC) {
+ qType = MERGE_QUEUE;
+ } else if (pTask->dispatchMsgType == TDMT_VND_TASK_WRITE_EXEC) {
+ qType = WRITE_QUEUE;
+ } else {
+ ASSERT(0);
+ }
+ tmsgPutToQueue(pMsgCb, qType, &dispatchMsg);
+
+ } else if (pTask->dispatchType == TASK_DISPATCH__FIXED) {
+ SRpcMsg dispatchMsg = {0};
+ SEpSet* pEpSet = NULL;
+ if (streamBuildDispatchMsg(pTask, pRes, &dispatchMsg, &pEpSet) < 0) {
+ ASSERT(0);
+ return -1;
+ }
+
+ tmsgSendReq(pMsgCb, pEpSet, &dispatchMsg);
+
+ } else if (pTask->dispatchType == TASK_DISPATCH__SHUFFLE) {
+ SHashObj* pShuffleRes = taosHashInit(64, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), false, HASH_NO_LOCK);
+ if (pShuffleRes == NULL) {
+ return -1;
+ }
+
+ int32_t sz = taosArrayGetSize(pRes);
+ for (int32_t i = 0; i < sz; i++) {
+ SSDataBlock* pDataBlock = taosArrayGet(pRes, i);
+ SArray* pArray = taosHashGet(pShuffleRes, &pDataBlock->info.groupId, sizeof(int64_t));
+ if (pArray == NULL) {
+ pArray = taosArrayInit(0, sizeof(SSDataBlock));
+ if (pArray == NULL) {
+ return -1;
+ }
+ taosHashPut(pShuffleRes, &pDataBlock->info.groupId, sizeof(int64_t), &pArray, sizeof(void*));
+ }
+ taosArrayPush(pArray, pDataBlock);
+ }
+
+ if (streamShuffleDispatch(pTask, pMsgCb, pShuffleRes) < 0) {
+ return -1;
+ }
+
+ } else {
+ ASSERT(pTask->dispatchType == TASK_DISPATCH__NONE);
+ }
+ }
+ //
+ return 0;
+}
+
+int32_t streamTaskProcessDispatchRsp(SStreamTask* pTask, char* msg, int32_t msgLen) {
+ //
+ return 0;
+}
+
+int32_t streamTaskProcessRecoverReq(SStreamTask* pTask, char* msg) {
+ //
+ return 0;
+}
+
+int32_t streamTaskRun(SStreamTask* pTask) {
+ SArray* pRes = NULL;
+ if (pTask->execType == TASK_EXEC__PIPE || pTask->execType == TASK_EXEC__MERGE) {
+ // TODO remove multi runner
+ void* exec = pTask->exec.runners[0].executor;
+
+ int8_t status = atomic_val_compare_exchange_8(&pTask->status, TASK_STATUS__IDLE, TASK_STATUS__EXECUTING);
+ if (status == TASK_STATUS__IDLE) {
+ pRes = taosArrayInit(0, sizeof(void*));
+ if (pRes == NULL) {
+ return -1;
+ }
+
+ void* input = NULL;
+ taosWriteQitem(pTask->inputQ, &input);
+ if (input == NULL) return 0;
+
+ // TODO: fix type
+ if (pTask->sourceType == TASK_SOURCE__SCAN) {
+ SStreamDataSubmit* pSubmit = (SStreamDataSubmit*)input;
+ qSetStreamInput(exec, pSubmit->data, STREAM_DATA_TYPE_SUBMIT_BLOCK);
+ while (1) {
+ SSDataBlock* output;
+ uint64_t ts = 0;
+ if (qExecTask(exec, &output, &ts) < 0) {
+ ASSERT(false);
+ }
+ if (output == NULL) break;
+ taosArrayPush(pRes, &output);
+ }
+ streamDataSubmitRefDec(pSubmit);
+ } else {
+ SStreamDataBlock* pStreamBlock = (SStreamDataBlock*)input;
+ const SArray* blocks = pStreamBlock->blocks;
+ qSetMultiStreamInput(exec, blocks->pData, blocks->size, STREAM_DATA_TYPE_SSDATA_BLOCK);
+ while (1) {
+ SSDataBlock* output;
+ uint64_t ts = 0;
+ if (qExecTask(exec, &output, &ts) < 0) {
+ ASSERT(false);
+ }
+ if (output == NULL) break;
+ taosArrayPush(pRes, &output);
+ }
+ // TODO: wrap destroy block
+ taosArrayDestroyP(pStreamBlock->blocks, (FDelete)blockDataDestroy);
+ }
+
+ if (taosArrayGetSize(pRes) != 0) {
+ SArray** resQ = taosAllocateQitem(sizeof(void**), DEF_QITEM);
+ *resQ = pRes;
+ taosWriteQitem(pTask->outputQ, resQ);
+ }
+ }
+ }
+ return 0;
+}
+
int32_t streamExecTask(SStreamTask* pTask, SMsgCb* pMsgCb, const void* input, int32_t inputType, int32_t workId) {
SArray* pRes = NULL;
// source
@@ -251,15 +627,29 @@ SStreamTask* tNewSStreamTask(int64_t streamId) {
}
pTask->taskId = tGenIdPI32();
pTask->streamId = streamId;
- pTask->status = STREAM_TASK_STATUS__RUNNING;
- /*pTask->qmsg = NULL;*/
+ pTask->status = TASK_STATUS__IDLE;
+
+ pTask->inputQ = taosOpenQueue();
+ pTask->outputQ = taosOpenQueue();
+ pTask->inputQAll = taosAllocateQall();
+ pTask->outputQAll = taosAllocateQall();
+ if (pTask->inputQ == NULL || pTask->outputQ == NULL || pTask->inputQAll == NULL || pTask->outputQAll == NULL)
+ goto FAIL;
return pTask;
+FAIL:
+ if (pTask->inputQ) taosCloseQueue(pTask->inputQ);
+ if (pTask->outputQ) taosCloseQueue(pTask->outputQ);
+ if (pTask->inputQAll) taosFreeQall(pTask->inputQAll);
+ if (pTask->outputQAll) taosFreeQall(pTask->outputQAll);
+ if (pTask) taosMemoryFree(pTask);
+ return NULL;
}
int32_t tEncodeSStreamTask(SEncoder* pEncoder, const SStreamTask* pTask) {
/*if (tStartEncode(pEncoder) < 0) return -1;*/
if (tEncodeI64(pEncoder, pTask->streamId) < 0) return -1;
if (tEncodeI32(pEncoder, pTask->taskId) < 0) return -1;
+ if (tEncodeI8(pEncoder, pTask->inputType) < 0) return -1;
if (tEncodeI8(pEncoder, pTask->status) < 0) return -1;
if (tEncodeI8(pEncoder, pTask->sourceType) < 0) return -1;
if (tEncodeI8(pEncoder, pTask->execType) < 0) return -1;
@@ -305,6 +695,7 @@ int32_t tDecodeSStreamTask(SDecoder* pDecoder, SStreamTask* pTask) {
/*if (tStartDecode(pDecoder) < 0) return -1;*/
if (tDecodeI64(pDecoder, &pTask->streamId) < 0) return -1;
if (tDecodeI32(pDecoder, &pTask->taskId) < 0) return -1;
+ if (tDecodeI8(pDecoder, &pTask->inputType) < 0) return -1;
if (tDecodeI8(pDecoder, &pTask->status) < 0) return -1;
if (tDecodeI8(pDecoder, &pTask->sourceType) < 0) return -1;
if (tDecodeI8(pDecoder, &pTask->execType) < 0) return -1;
@@ -349,10 +740,16 @@ int32_t tDecodeSStreamTask(SDecoder* pDecoder, SStreamTask* pTask) {
}
void tFreeSStreamTask(SStreamTask* pTask) {
+ taosCloseQueue(pTask->inputQ);
+ taosCloseQueue(pTask->outputQ);
// TODO
- /*taosMemoryFree(pTask->qmsg);*/
+ if (pTask->exec.qmsg) taosMemoryFree(pTask->exec.qmsg);
+ for (int32_t i = 0; i < pTask->exec.numOfRunners; i++) {
+ qDestroyTask(pTask->exec.runners[i].executor);
+ }
+ taosMemoryFree(pTask->exec.runners);
/*taosMemoryFree(pTask->executor);*/
- /*taosMemoryFree(pTask);*/
+ taosMemoryFree(pTask);
}
#if 0
diff --git a/source/libs/sync/inc/syncIndexMgr.h b/source/libs/sync/inc/syncIndexMgr.h
index 63f24b104f..0a6e2428fe 100644
--- a/source/libs/sync/inc/syncIndexMgr.h
+++ b/source/libs/sync/inc/syncIndexMgr.h
@@ -35,6 +35,7 @@ typedef struct SSyncIndexMgr {
} SSyncIndexMgr;
SSyncIndexMgr *syncIndexMgrCreate(SSyncNode *pSyncNode);
+void syncIndexMgrUpdate(SSyncIndexMgr *pSyncIndexMgr, SSyncNode *pSyncNode);
void syncIndexMgrDestroy(SSyncIndexMgr *pSyncIndexMgr);
void syncIndexMgrClear(SSyncIndexMgr *pSyncIndexMgr);
void syncIndexMgrSetIndex(SSyncIndexMgr *pSyncIndexMgr, const SRaftId *pRaftId, SyncIndex index);
diff --git a/source/libs/sync/inc/syncInt.h b/source/libs/sync/inc/syncInt.h
index 8a21eea7b7..9b655fb0fa 100644
--- a/source/libs/sync/inc/syncInt.h
+++ b/source/libs/sync/inc/syncInt.h
@@ -247,6 +247,7 @@ typedef struct SSyncNode {
// open/close --------------
SSyncNode* syncNodeOpen(const SSyncInfo* pSyncInfo);
void syncNodeStart(SSyncNode* pSyncNode);
+void syncNodeStartStandBy(SSyncNode* pSyncNode);
void syncNodeClose(SSyncNode* pSyncNode);
// ping --------------
@@ -271,7 +272,7 @@ int32_t syncNodeSendMsgByInfo(const SNodeInfo* nodeInfo, SSyncNode* pSyncNode, S
cJSON* syncNode2Json(const SSyncNode* pSyncNode);
char* syncNode2Str(const SSyncNode* pSyncNode);
char* syncNode2SimpleStr(const SSyncNode* pSyncNode);
-void syncNodeUpdateConfig(SSyncNode* pSyncNode, SSyncCfg *newConfig);
+void syncNodeUpdateConfig(SSyncNode* pSyncNode, SSyncCfg* newConfig);
SSyncNode* syncNodeAcquire(int64_t rid);
void syncNodeRelease(SSyncNode* pNode);
diff --git a/source/libs/sync/inc/syncUtil.h b/source/libs/sync/inc/syncUtil.h
index 159af1610e..1b08d3f7a1 100644
--- a/source/libs/sync/inc/syncUtil.h
+++ b/source/libs/sync/inc/syncUtil.h
@@ -62,7 +62,6 @@ bool syncUtilUserPreCommit(tmsg_t msgType);
bool syncUtilUserCommit(tmsg_t msgType);
bool syncUtilUserRollback(tmsg_t msgType);
-
#ifdef __cplusplus
}
#endif
diff --git a/source/libs/sync/src/syncAppendEntries.c b/source/libs/sync/src/syncAppendEntries.c
index aed19d042e..1a5d418e75 100644
--- a/source/libs/sync/src/syncAppendEntries.c
+++ b/source/libs/sync/src/syncAppendEntries.c
@@ -15,11 +15,11 @@
#include "syncAppendEntries.h"
#include "syncInt.h"
+#include "syncRaftCfg.h"
#include "syncRaftLog.h"
#include "syncRaftStore.h"
#include "syncUtil.h"
#include "syncVoteMgr.h"
-#include "syncRaftCfg.h"
// TLA+ Spec
// HandleAppendEntriesRequest(i, j, m) ==
@@ -200,7 +200,7 @@ int32_t syncNodeOnAppendEntriesCb(SSyncNode* ths, SyncAppendEntries* pMsg) {
SSyncRaftEntry* pRollBackEntry = logStoreGetEntry(ths->pLogStore, index);
assert(pRollBackEntry != NULL);
- //if (pRollBackEntry->msgType != TDMT_VND_SYNC_NOOP) {
+ // if (pRollBackEntry->msgType != TDMT_VND_SYNC_NOOP) {
if (syncUtilUserRollback(pRollBackEntry->msgType)) {
SRpcMsg rpcMsg;
syncEntry2OriginalRpc(pRollBackEntry, &rpcMsg);
@@ -229,7 +229,7 @@ int32_t syncNodeOnAppendEntriesCb(SSyncNode* ths, SyncAppendEntries* pMsg) {
SRpcMsg rpcMsg;
syncEntry2OriginalRpc(pAppendEntry, &rpcMsg);
if (ths->pFsm != NULL) {
- //if (ths->pFsm->FpPreCommitCb != NULL && pAppendEntry->originalRpcType != TDMT_VND_SYNC_NOOP) {
+ // if (ths->pFsm->FpPreCommitCb != NULL && pAppendEntry->originalRpcType != TDMT_VND_SYNC_NOOP) {
if (ths->pFsm->FpPreCommitCb != NULL && syncUtilUserPreCommit(pAppendEntry->originalRpcType)) {
SFsmCbMeta cbMeta;
cbMeta.index = pAppendEntry->index;
@@ -261,7 +261,7 @@ int32_t syncNodeOnAppendEntriesCb(SSyncNode* ths, SyncAppendEntries* pMsg) {
SRpcMsg rpcMsg;
syncEntry2OriginalRpc(pAppendEntry, &rpcMsg);
if (ths->pFsm != NULL) {
- //if (ths->pFsm->FpPreCommitCb != NULL && pAppendEntry->originalRpcType != TDMT_VND_SYNC_NOOP) {
+ // if (ths->pFsm->FpPreCommitCb != NULL && pAppendEntry->originalRpcType != TDMT_VND_SYNC_NOOP) {
if (ths->pFsm->FpPreCommitCb != NULL && syncUtilUserPreCommit(pAppendEntry->originalRpcType)) {
SFsmCbMeta cbMeta;
cbMeta.index = pAppendEntry->index;
@@ -324,7 +324,7 @@ int32_t syncNodeOnAppendEntriesCb(SSyncNode* ths, SyncAppendEntries* pMsg) {
SRpcMsg rpcMsg;
syncEntry2OriginalRpc(pEntry, &rpcMsg);
- //if (ths->pFsm->FpCommitCb != NULL && pEntry->originalRpcType != TDMT_VND_SYNC_NOOP) {
+ // if (ths->pFsm->FpCommitCb != NULL && pEntry->originalRpcType != TDMT_VND_SYNC_NOOP) {
if (ths->pFsm->FpCommitCb != NULL && syncUtilUserCommit(pEntry->originalRpcType)) {
SFsmCbMeta cbMeta;
cbMeta.index = pEntry->index;
@@ -338,10 +338,15 @@ int32_t syncNodeOnAppendEntriesCb(SSyncNode* ths, SyncAppendEntries* pMsg) {
// config change
if (pEntry->originalRpcType == TDMT_VND_SYNC_CONFIG_CHANGE) {
SSyncCfg newSyncCfg;
- int32_t ret = syncCfgFromStr(rpcMsg.pCont, &newSyncCfg);
+ int32_t ret = syncCfgFromStr(rpcMsg.pCont, &newSyncCfg);
ASSERT(ret == 0);
syncNodeUpdateConfig(ths, &newSyncCfg);
+ if (ths->state == TAOS_SYNC_STATE_LEADER) {
+ syncNodeBecomeLeader(ths);
+ } else {
+ syncNodeBecomeFollower(ths);
+ }
}
rpcFreeCont(rpcMsg.pCont);
diff --git a/source/libs/sync/src/syncCommit.c b/source/libs/sync/src/syncCommit.c
index 620f0e9cd2..0f17cf267e 100644
--- a/source/libs/sync/src/syncCommit.c
+++ b/source/libs/sync/src/syncCommit.c
@@ -16,10 +16,10 @@
#include "syncCommit.h"
#include "syncIndexMgr.h"
#include "syncInt.h"
+#include "syncRaftCfg.h"
#include "syncRaftLog.h"
#include "syncRaftStore.h"
#include "syncUtil.h"
-#include "syncRaftCfg.h"
// \* Leader i advances its commitIndex.
// \* This is done as a separate step from handling AppendEntries responses,
@@ -102,7 +102,7 @@ void syncMaybeAdvanceCommitIndex(SSyncNode* pSyncNode) {
SRpcMsg rpcMsg;
syncEntry2OriginalRpc(pEntry, &rpcMsg);
- //if (pSyncNode->pFsm->FpCommitCb != NULL && pEntry->originalRpcType != TDMT_VND_SYNC_NOOP) {
+ // if (pSyncNode->pFsm->FpCommitCb != NULL && pEntry->originalRpcType != TDMT_VND_SYNC_NOOP) {
if (pSyncNode->pFsm->FpCommitCb != NULL && syncUtilUserCommit(pEntry->originalRpcType)) {
SFsmCbMeta cbMeta;
cbMeta.index = pEntry->index;
@@ -114,12 +114,17 @@ void syncMaybeAdvanceCommitIndex(SSyncNode* pSyncNode) {
}
// config change
- if (pEntry->originalRpcType == TDMT_VND_SYNC_CONFIG_CHANGE) {
- SSyncCfg newSyncCfg;
- int32_t ret = syncCfgFromStr(rpcMsg.pCont, &newSyncCfg);
- ASSERT(ret == 0);
+ if (pEntry->originalRpcType == TDMT_VND_SYNC_CONFIG_CHANGE) {
+ SSyncCfg newSyncCfg;
+ int32_t ret = syncCfgFromStr(rpcMsg.pCont, &newSyncCfg);
+ ASSERT(ret == 0);
- syncNodeUpdateConfig(pSyncNode, &newSyncCfg);
+ syncNodeUpdateConfig(pSyncNode, &newSyncCfg);
+ if (pSyncNode->state == TAOS_SYNC_STATE_LEADER) {
+ syncNodeBecomeLeader(pSyncNode);
+ } else {
+ syncNodeBecomeFollower(pSyncNode);
+ }
}
rpcFreeCont(rpcMsg.pCont);
diff --git a/source/libs/sync/src/syncIO.c b/source/libs/sync/src/syncIO.c
index e055113277..1117528b53 100644
--- a/source/libs/sync/src/syncIO.c
+++ b/source/libs/sync/src/syncIO.c
@@ -15,6 +15,7 @@
#include "syncIO.h"
#include
+#include "os.h"
#include "syncMessage.h"
#include "syncUtil.h"
#include "tglobal.h"
@@ -198,6 +199,7 @@ static int32_t syncIOStartInternal(SSyncIO *io) {
{
SRpcInit rpcInit;
memset(&rpcInit, 0, sizeof(rpcInit));
+ snprintf(rpcInit.localFqdn, sizeof(rpcInit.localFqdn), "%s", "127.0.0.1");
rpcInit.localPort = io->myAddr.eps[0].port;
rpcInit.label = "SYNC-IO-SERVER";
rpcInit.numOfThreads = 1;
diff --git a/source/libs/sync/src/syncIndexMgr.c b/source/libs/sync/src/syncIndexMgr.c
index d33075054a..5809cedb90 100644
--- a/source/libs/sync/src/syncIndexMgr.c
+++ b/source/libs/sync/src/syncIndexMgr.c
@@ -31,6 +31,13 @@ SSyncIndexMgr *syncIndexMgrCreate(SSyncNode *pSyncNode) {
return pSyncIndexMgr;
}
+void syncIndexMgrUpdate(SSyncIndexMgr *pSyncIndexMgr, SSyncNode *pSyncNode) {
+ pSyncIndexMgr->replicas = &(pSyncNode->replicasId);
+ pSyncIndexMgr->replicaNum = pSyncNode->replicaNum;
+ pSyncIndexMgr->pSyncNode = pSyncNode;
+ syncIndexMgrClear(pSyncIndexMgr);
+}
+
void syncIndexMgrDestroy(SSyncIndexMgr *pSyncIndexMgr) {
if (pSyncIndexMgr != NULL) {
taosMemoryFree(pSyncIndexMgr);
diff --git a/source/libs/sync/src/syncMain.c b/source/libs/sync/src/syncMain.c
index 5bc7d04dc9..562694bbbc 100644
--- a/source/libs/sync/src/syncMain.c
+++ b/source/libs/sync/src/syncMain.c
@@ -103,6 +103,16 @@ void syncStart(int64_t rid) {
taosReleaseRef(tsNodeRefId, pSyncNode->rid);
}
+void syncStartStandBy(int64_t rid) {
+ SSyncNode* pSyncNode = (SSyncNode*)taosAcquireRef(tsNodeRefId, rid);
+ if (pSyncNode == NULL) {
+ return;
+ }
+ syncNodeStartStandBy(pSyncNode);
+
+ taosReleaseRef(tsNodeRefId, pSyncNode->rid);
+}
+
void syncStop(int64_t rid) {
SSyncNode* pSyncNode = (SSyncNode*)taosAcquireRef(tsNodeRefId, rid);
if (pSyncNode == NULL) {
@@ -116,7 +126,7 @@ void syncStop(int64_t rid) {
int32_t syncReconfig(int64_t rid, const SSyncCfg* pSyncCfg) {
int32_t ret = 0;
- char *configChange = syncCfg2Str((SSyncCfg*)pSyncCfg);
+ char* configChange = syncCfg2Str((SSyncCfg*)pSyncCfg);
SRpcMsg rpcMsg = {0};
rpcMsg.msgType = TDMT_VND_SYNC_CONFIG_CHANGE;
rpcMsg.info.noResp = 1;
@@ -188,10 +198,9 @@ void syncGetEpSet(int64_t rid, SEpSet* pEpSet) {
(pEpSet->numOfEps)++;
sInfo("syncGetEpSet index:%d %s:%d", i, pEpSet->eps[i].fqdn, pEpSet->eps[i].port);
-
}
pEpSet->inUse = pSyncNode->pRaftCfg->cfg.myIndex;
-
+
sInfo("syncGetEpSet pEpSet->inUse:%d ", pEpSet->inUse);
taosReleaseRef(tsNodeRefId, pSyncNode->rid);
@@ -524,6 +533,17 @@ void syncNodeStart(SSyncNode* pSyncNode) {
assert(ret == 0);
}
+void syncNodeStartStandBy(SSyncNode* pSyncNode) {
+ // state change
+ pSyncNode->state = TAOS_SYNC_STATE_FOLLOWER;
+ syncNodeStopHeartbeatTimer(pSyncNode);
+
+ // reset elect timer, long enough
+ int32_t electMS = TIMER_MAX_MS;
+ int32_t ret = syncNodeRestartElectTimer(pSyncNode, electMS);
+ ASSERT(ret == 0);
+}
+
void syncNodeClose(SSyncNode* pSyncNode) {
int32_t ret;
assert(pSyncNode != NULL);
@@ -858,7 +878,7 @@ char* syncNode2SimpleStr(const SSyncNode* pSyncNode) {
return s;
}
-void syncNodeUpdateConfig(SSyncNode* pSyncNode, SSyncCfg *newConfig) {
+void syncNodeUpdateConfig(SSyncNode* pSyncNode, SSyncCfg* newConfig) {
pSyncNode->pRaftCfg->cfg = *newConfig;
int32_t ret = raftCfgPersist(pSyncNode->pRaftCfg);
ASSERT(ret == 0);
@@ -885,6 +905,11 @@ void syncNodeUpdateConfig(SSyncNode* pSyncNode, SSyncCfg *newConfig) {
for (int i = 0; i < pSyncNode->pRaftCfg->cfg.replicaNum; ++i) {
syncUtilnodeInfo2raftId(&pSyncNode->pRaftCfg->cfg.nodeInfo[i], pSyncNode->vgId, &pSyncNode->replicasId[i]);
}
+
+ syncIndexMgrUpdate(pSyncNode->pNextIndex, pSyncNode);
+ syncIndexMgrUpdate(pSyncNode->pMatchIndex, pSyncNode);
+
+ syncNodeLog2("==syncNodeUpdateConfig==", pSyncNode);
}
SSyncNode* syncNodeAcquire(int64_t rid) {
@@ -1245,7 +1270,7 @@ int32_t syncNodeOnClientRequestCb(SSyncNode* ths, SyncClientRequest* pMsg) {
syncEntry2OriginalRpc(pEntry, &rpcMsg);
if (ths->pFsm != NULL) {
- //if (ths->pFsm->FpPreCommitCb != NULL && pEntry->originalRpcType != TDMT_VND_SYNC_NOOP) {
+ // if (ths->pFsm->FpPreCommitCb != NULL && pEntry->originalRpcType != TDMT_VND_SYNC_NOOP) {
if (ths->pFsm->FpPreCommitCb != NULL && syncUtilUserPreCommit(pEntry->originalRpcType)) {
SFsmCbMeta cbMeta;
cbMeta.index = pEntry->index;
@@ -1267,7 +1292,7 @@ int32_t syncNodeOnClientRequestCb(SSyncNode* ths, SyncClientRequest* pMsg) {
syncEntry2OriginalRpc(pEntry, &rpcMsg);
if (ths->pFsm != NULL) {
- //if (ths->pFsm->FpPreCommitCb != NULL && pEntry->originalRpcType != TDMT_VND_SYNC_NOOP) {
+ // if (ths->pFsm->FpPreCommitCb != NULL && pEntry->originalRpcType != TDMT_VND_SYNC_NOOP) {
if (ths->pFsm->FpPreCommitCb != NULL && syncUtilUserPreCommit(pEntry->originalRpcType)) {
SFsmCbMeta cbMeta;
cbMeta.index = pEntry->index;
diff --git a/source/libs/sync/src/syncRaftLog.c b/source/libs/sync/src/syncRaftLog.c
index 8aeb9c4856..07a9397a58 100644
--- a/source/libs/sync/src/syncRaftLog.c
+++ b/source/libs/sync/src/syncRaftLog.c
@@ -58,14 +58,15 @@ int32_t logStoreAppendEntry(SSyncLogStore* pLogStore, SSyncRaftEntry* pEntry) {
syncMeta.term = pEntry->term;
code = walWriteWithSyncInfo(pWal, pEntry->index, pEntry->originalRpcType, syncMeta, pEntry->data, pEntry->dataLen);
if (code != 0) {
- int32_t err = terrno;
- const char *errStr = tstrerror(err);
- int32_t linuxErr = errno;
- const char *linuxErrMsg = strerror(errno);
- sError("walWriteWithSyncInfo error, err:%d %X, msg:%s, linuxErr:%d, linuxErrMsg:%s", err, err, errStr, linuxErr, linuxErrMsg);
+ int32_t err = terrno;
+ const char* errStr = tstrerror(err);
+ int32_t linuxErr = errno;
+ const char* linuxErrMsg = strerror(errno);
+ sError("walWriteWithSyncInfo error, err:%d %X, msg:%s, linuxErr:%d, linuxErrMsg:%s", err, err, errStr, linuxErr,
+ linuxErrMsg);
ASSERT(0);
- }
- //assert(code == 0);
+ }
+ // assert(code == 0);
walFsync(pWal, true);
return code;
@@ -77,16 +78,17 @@ SSyncRaftEntry* logStoreGetEntry(SSyncLogStore* pLogStore, SyncIndex index) {
if (index >= SYNC_INDEX_BEGIN && index <= logStoreLastIndex(pLogStore)) {
SWalReadHandle* pWalHandle = walOpenReadHandle(pWal);
- int32_t code = walReadWithHandle(pWalHandle, index);
+ int32_t code = walReadWithHandle(pWalHandle, index);
if (code != 0) {
- int32_t err = terrno;
- const char *errStr = tstrerror(err);
- int32_t linuxErr = errno;
- const char *linuxErrMsg = strerror(errno);
- sError("walReadWithHandle error, err:%d %X, msg:%s, linuxErr:%d, linuxErrMsg:%s", err, err, errStr, linuxErr, linuxErrMsg);
+ int32_t err = terrno;
+ const char* errStr = tstrerror(err);
+ int32_t linuxErr = errno;
+ const char* linuxErrMsg = strerror(errno);
+ sError("walReadWithHandle error, err:%d %X, msg:%s, linuxErr:%d, linuxErrMsg:%s", err, err, errStr, linuxErr,
+ linuxErrMsg);
ASSERT(0);
- }
- //assert(walReadWithHandle(pWalHandle, index) == 0);
+ }
+ // assert(walReadWithHandle(pWalHandle, index) == 0);
SSyncRaftEntry* pEntry = syncEntryBuild(pWalHandle->pHead->head.bodyLen);
assert(pEntry != NULL);
@@ -112,16 +114,17 @@ SSyncRaftEntry* logStoreGetEntry(SSyncLogStore* pLogStore, SyncIndex index) {
int32_t logStoreTruncate(SSyncLogStore* pLogStore, SyncIndex fromIndex) {
SSyncLogStoreData* pData = pLogStore->data;
SWal* pWal = pData->pWal;
- //assert(walRollback(pWal, fromIndex) == 0);
+ // assert(walRollback(pWal, fromIndex) == 0);
int32_t code = walRollback(pWal, fromIndex);
if (code != 0) {
- int32_t err = terrno;
- const char *errStr = tstrerror(err);
- int32_t linuxErr = errno;
- const char *linuxErrMsg = strerror(errno);
- sError("walRollback error, err:%d %X, msg:%s, linuxErr:%d, linuxErrMsg:%s", err, err, errStr, linuxErr, linuxErrMsg);
+ int32_t err = terrno;
+ const char* errStr = tstrerror(err);
+ int32_t linuxErr = errno;
+ const char* linuxErrMsg = strerror(errno);
+ sError("walRollback error, err:%d %X, msg:%s, linuxErr:%d, linuxErrMsg:%s", err, err, errStr, linuxErr,
+ linuxErrMsg);
ASSERT(0);
- }
+ }
return 0; // to avoid compiler error
}
@@ -145,16 +148,16 @@ SyncTerm logStoreLastTerm(SSyncLogStore* pLogStore) {
int32_t logStoreUpdateCommitIndex(SSyncLogStore* pLogStore, SyncIndex index) {
SSyncLogStoreData* pData = pLogStore->data;
SWal* pWal = pData->pWal;
- //assert(walCommit(pWal, index) == 0);
+ // assert(walCommit(pWal, index) == 0);
int32_t code = walCommit(pWal, index);
if (code != 0) {
- int32_t err = terrno;
- const char *errStr = tstrerror(err);
- int32_t linuxErr = errno;
- const char *linuxErrMsg = strerror(errno);
+ int32_t err = terrno;
+ const char* errStr = tstrerror(err);
+ int32_t linuxErr = errno;
+ const char* linuxErrMsg = strerror(errno);
sError("walCommit error, err:%d %X, msg:%s, linuxErr:%d, linuxErrMsg:%s", err, err, errStr, linuxErr, linuxErrMsg);
ASSERT(0);
- }
+ }
return 0; // to avoid compiler error
}
diff --git a/source/libs/sync/test/CMakeLists.txt b/source/libs/sync/test/CMakeLists.txt
index 8afe9ff2a7..cfbdf0e961 100644
--- a/source/libs/sync/test/CMakeLists.txt
+++ b/source/libs/sync/test/CMakeLists.txt
@@ -37,6 +37,7 @@ add_executable(syncRaftCfgTest "")
add_executable(syncRespMgrTest "")
add_executable(syncSnapshotTest "")
add_executable(syncApplyMsgTest "")
+add_executable(syncConfigChangeTest "")
target_sources(syncTest
@@ -195,6 +196,10 @@ target_sources(syncApplyMsgTest
PRIVATE
"syncApplyMsgTest.cpp"
)
+target_sources(syncConfigChangeTest
+ PRIVATE
+ "syncConfigChangeTest.cpp"
+)
target_include_directories(syncTest
@@ -392,6 +397,11 @@ target_include_directories(syncApplyMsgTest
"${TD_SOURCE_DIR}/include/libs/sync"
"${CMAKE_CURRENT_SOURCE_DIR}/../inc"
)
+target_include_directories(syncConfigChangeTest
+ PUBLIC
+ "${TD_SOURCE_DIR}/include/libs/sync"
+ "${CMAKE_CURRENT_SOURCE_DIR}/../inc"
+)
target_link_libraries(syncTest
@@ -550,6 +560,10 @@ target_link_libraries(syncApplyMsgTest
sync
gtest_main
)
+target_link_libraries(syncConfigChangeTest
+ sync
+ gtest_main
+)
enable_testing()
diff --git a/source/libs/sync/test/syncConfigChangeTest.cpp b/source/libs/sync/test/syncConfigChangeTest.cpp
new file mode 100644
index 0000000000..9a2d9a6b34
--- /dev/null
+++ b/source/libs/sync/test/syncConfigChangeTest.cpp
@@ -0,0 +1,259 @@
+#include
+#include
+#include "os.h"
+#include "syncEnv.h"
+#include "syncIO.h"
+#include "syncInt.h"
+#include "syncUtil.h"
+#include "wal.h"
+
+void logTest() {
+ sTrace("--- sync log test: trace");
+ sDebug("--- sync log test: debug");
+ sInfo("--- sync log test: info");
+ sWarn("--- sync log test: warn");
+ sError("--- sync log test: error");
+ sFatal("--- sync log test: fatal");
+}
+
+uint16_t gPorts[] = {7010, 7110, 7210, 7310, 7410};
+const char* gDir = "./syncReplicateTest";
+int32_t gVgId = 1234;
+SyncIndex gSnapshotLastApplyIndex;
+
+void init() {
+ int code = walInit();
+ assert(code == 0);
+
+ code = syncInit();
+ assert(code == 0);
+
+ sprintf(tsTempDir, "%s", ".");
+}
+
+void cleanup() { walCleanUp(); }
+
+void CommitCb(struct SSyncFSM* pFsm, const SRpcMsg* pMsg, SFsmCbMeta cbMeta) {
+ SyncIndex beginIndex = SYNC_INDEX_INVALID;
+ if (pFsm->FpGetSnapshot != NULL) {
+ SSnapshot snapshot;
+ pFsm->FpGetSnapshot(pFsm, &snapshot);
+ beginIndex = snapshot.lastApplyIndex;
+ }
+
+ if (cbMeta.index > beginIndex) {
+ char logBuf[256];
+ snprintf(logBuf, sizeof(logBuf), "==callback== ==CommitCb== pFsm:%p, index:%ld, isWeak:%d, code:%d, state:%d %s \n",
+ pFsm, cbMeta.index, cbMeta.isWeak, cbMeta.code, cbMeta.state, syncUtilState2String(cbMeta.state));
+ syncRpcMsgLog2(logBuf, (SRpcMsg*)pMsg);
+ } else {
+ sTrace("==callback== ==CommitCb== do not apply again %ld", cbMeta.index);
+ }
+}
+
+void PreCommitCb(struct SSyncFSM* pFsm, const SRpcMsg* pMsg, SFsmCbMeta cbMeta) {
+ char logBuf[256];
+ snprintf(logBuf, sizeof(logBuf),
+ "==callback== ==PreCommitCb== pFsm:%p, index:%ld, isWeak:%d, code:%d, state:%d %s \n", pFsm, cbMeta.index,
+ cbMeta.isWeak, cbMeta.code, cbMeta.state, syncUtilState2String(cbMeta.state));
+ syncRpcMsgLog2(logBuf, (SRpcMsg*)pMsg);
+}
+
+void RollBackCb(struct SSyncFSM* pFsm, const SRpcMsg* pMsg, SFsmCbMeta cbMeta) {
+ char logBuf[256];
+ snprintf(logBuf, sizeof(logBuf), "==callback== ==RollBackCb== pFsm:%p, index:%ld, isWeak:%d, code:%d, state:%d %s \n",
+ pFsm, cbMeta.index, cbMeta.isWeak, cbMeta.code, cbMeta.state, syncUtilState2String(cbMeta.state));
+ syncRpcMsgLog2(logBuf, (SRpcMsg*)pMsg);
+}
+
+int32_t GetSnapshotCb(struct SSyncFSM* pFsm, SSnapshot* pSnapshot) {
+ pSnapshot->data = NULL;
+ pSnapshot->lastApplyIndex = gSnapshotLastApplyIndex;
+ pSnapshot->lastApplyTerm = 100;
+ return 0;
+}
+
+SSyncFSM* createFsm() {
+ SSyncFSM* pFsm = (SSyncFSM*)taosMemoryMalloc(sizeof(SSyncFSM));
+ pFsm->FpCommitCb = CommitCb;
+ pFsm->FpPreCommitCb = PreCommitCb;
+ pFsm->FpRollBackCb = RollBackCb;
+ pFsm->FpGetSnapshot = GetSnapshotCb;
+ return pFsm;
+}
+
+SWal* createWal(char* path, int32_t vgId) {
+ SWalCfg walCfg;
+ memset(&walCfg, 0, sizeof(SWalCfg));
+ walCfg.vgId = vgId;
+ walCfg.fsyncPeriod = 1000;
+ walCfg.retentionPeriod = 1000;
+ walCfg.rollPeriod = 1000;
+ walCfg.retentionSize = 1000;
+ walCfg.segSize = 1000;
+ walCfg.level = TAOS_WAL_FSYNC;
+ SWal* pWal = walOpen(path, &walCfg);
+ assert(pWal != NULL);
+ return pWal;
+}
+
+int64_t createSyncNode(int32_t replicaNum, int32_t myIndex, int32_t vgId, SWal* pWal, char* path, bool isStandBy) {
+ SSyncInfo syncInfo;
+ syncInfo.vgId = vgId;
+ syncInfo.rpcClient = gSyncIO->clientRpc;
+ syncInfo.FpSendMsg = syncIOSendMsg;
+ syncInfo.queue = gSyncIO->pMsgQ;
+ syncInfo.FpEqMsg = syncIOEqMsg;
+ syncInfo.pFsm = createFsm();
+ snprintf(syncInfo.path, sizeof(syncInfo.path), "%s_sync_replica%d_index%d", path, replicaNum, myIndex);
+ syncInfo.pWal = pWal;
+
+ SSyncCfg* pCfg = &syncInfo.syncCfg;
+
+ if (isStandBy) {
+ pCfg->myIndex = 0;
+ pCfg->replicaNum = 1;
+ pCfg->nodeInfo[0].nodePort = gPorts[myIndex];
+ taosGetFqdn(pCfg->nodeInfo[0].nodeFqdn);
+
+ } else {
+ pCfg->myIndex = myIndex;
+ pCfg->replicaNum = replicaNum;
+
+ for (int i = 0; i < replicaNum; ++i) {
+ pCfg->nodeInfo[i].nodePort = gPorts[i];
+ taosGetFqdn(pCfg->nodeInfo[i].nodeFqdn);
+ // snprintf(pCfg->nodeInfo[i].nodeFqdn, sizeof(pCfg->nodeInfo[i].nodeFqdn), "%s", "127.0.0.1");
+ }
+ }
+
+ int64_t rid = syncOpen(&syncInfo);
+ assert(rid > 0);
+
+ SSyncNode* pSyncNode = (SSyncNode*)syncNodeAcquire(rid);
+ assert(pSyncNode != NULL);
+ gSyncIO->FpOnSyncPing = pSyncNode->FpOnPing;
+ gSyncIO->FpOnSyncPingReply = pSyncNode->FpOnPingReply;
+ gSyncIO->FpOnSyncRequestVote = pSyncNode->FpOnRequestVote;
+ gSyncIO->FpOnSyncRequestVoteReply = pSyncNode->FpOnRequestVoteReply;
+ gSyncIO->FpOnSyncAppendEntries = pSyncNode->FpOnAppendEntries;
+ gSyncIO->FpOnSyncAppendEntriesReply = pSyncNode->FpOnAppendEntriesReply;
+ gSyncIO->FpOnSyncPing = pSyncNode->FpOnPing;
+ gSyncIO->FpOnSyncPingReply = pSyncNode->FpOnPingReply;
+ gSyncIO->FpOnSyncTimeout = pSyncNode->FpOnTimeout;
+ gSyncIO->FpOnSyncClientRequest = pSyncNode->FpOnClientRequest;
+ gSyncIO->pSyncNode = pSyncNode;
+ syncNodeRelease(pSyncNode);
+
+ return rid;
+}
+
+void configChange(int64_t rid, int32_t replicaNum, int32_t myIndex) {
+ SSyncCfg syncCfg;
+
+ syncCfg.myIndex = myIndex;
+ syncCfg.replicaNum = replicaNum;
+
+ for (int i = 0; i < replicaNum; ++i) {
+ syncCfg.nodeInfo[i].nodePort = gPorts[i];
+ taosGetFqdn(syncCfg.nodeInfo[i].nodeFqdn);
+ }
+
+ syncReconfig(rid, &syncCfg);
+}
+
+void usage(char* exe) {
+ printf("usage: %s replicaNum myIndex lastApplyIndex writeRecordNum isStandBy isConfigChange \n", exe);
+}
+
+SRpcMsg* createRpcMsg(int i, int count, int myIndex) {
+ SRpcMsg* pMsg = (SRpcMsg*)taosMemoryMalloc(sizeof(SRpcMsg));
+ memset(pMsg, 0, sizeof(SRpcMsg));
+ pMsg->msgType = 9999;
+ pMsg->contLen = 256;
+ pMsg->pCont = rpcMallocCont(pMsg->contLen);
+ snprintf((char*)(pMsg->pCont), pMsg->contLen, "value-myIndex:%u-%d-%d-%ld", myIndex, i, count, taosGetTimestampMs());
+ return pMsg;
+}
+
+int main(int argc, char** argv) {
+ tsAsyncLog = 0;
+ sDebugFlag = DEBUG_TRACE + DEBUG_SCREEN + DEBUG_FILE;
+ if (argc != 7) {
+ usage(argv[0]);
+ exit(-1);
+ }
+
+ int32_t replicaNum = atoi(argv[1]);
+ int32_t myIndex = atoi(argv[2]);
+ int32_t lastApplyIndex = atoi(argv[3]);
+ int32_t writeRecordNum = atoi(argv[4]);
+ bool isStandBy = atoi(argv[5]);
+ bool isConfigChange = atoi(argv[6]);
+ gSnapshotLastApplyIndex = lastApplyIndex;
+
+ if (!isStandBy) {
+ assert(replicaNum >= 1 && replicaNum <= 5);
+ assert(myIndex >= 0 && myIndex < replicaNum);
+ assert(lastApplyIndex >= -1);
+ assert(writeRecordNum >= 0);
+ }
+
+ init();
+ int32_t ret = syncIOStart((char*)"127.0.0.1", gPorts[myIndex]);
+ assert(ret == 0);
+
+ char walPath[128];
+ snprintf(walPath, sizeof(walPath), "%s_wal_replica%d_index%d", gDir, replicaNum, myIndex);
+ SWal* pWal = createWal(walPath, gVgId);
+
+ int64_t rid = createSyncNode(replicaNum, myIndex, gVgId, pWal, (char*)gDir, isStandBy);
+ assert(rid > 0);
+
+ if (isStandBy) {
+ syncStartStandBy(rid);
+ } else {
+ syncStart(rid);
+ }
+
+ SSyncNode* pSyncNode = (SSyncNode*)syncNodeAcquire(rid);
+ assert(pSyncNode != NULL);
+
+ if (isConfigChange) {
+ configChange(rid, 3, myIndex);
+ }
+
+ //---------------------------
+ int32_t alreadySend = 0;
+ while (1) {
+ char* s = syncNode2SimpleStr(pSyncNode);
+
+ if (alreadySend < writeRecordNum) {
+ SRpcMsg* pRpcMsg = createRpcMsg(alreadySend, writeRecordNum, myIndex);
+ int32_t ret = syncPropose(rid, pRpcMsg, false);
+ if (ret == TAOS_SYNC_PROPOSE_NOT_LEADER) {
+ sTrace("%s value%d write not leader", s, alreadySend);
+ } else {
+ assert(ret == 0);
+ sTrace("%s value%d write ok", s, alreadySend);
+ }
+ alreadySend++;
+
+ rpcFreeCont(pRpcMsg->pCont);
+ taosMemoryFree(pRpcMsg);
+ } else {
+ sTrace("%s", s);
+ }
+
+ taosMsleep(1000);
+ taosMemoryFree(s);
+ taosMsleep(1000);
+ }
+
+ syncNodeRelease(pSyncNode);
+ syncStop(rid);
+ walClose(pWal);
+ syncIOStop();
+ cleanup();
+ return 0;
+}
diff --git a/source/libs/tdb/CMakeLists.txt b/source/libs/tdb/CMakeLists.txt
index 722f6bddef..01490030f2 100644
--- a/source/libs/tdb/CMakeLists.txt
+++ b/source/libs/tdb/CMakeLists.txt
@@ -1,5 +1,5 @@
# tdb
-add_library(tdb SHARED "")
+add_library(tdb STATIC "")
target_sources(tdb
PRIVATE
"src/db/tdbPCache.c"
diff --git a/source/libs/transport/src/transSrv.c b/source/libs/transport/src/transSrv.c
index a9e9d5dc6c..d1bd78f809 100644
--- a/source/libs/transport/src/transSrv.c
+++ b/source/libs/transport/src/transSrv.c
@@ -137,7 +137,9 @@ static void destroySmsg(SSrvMsg* smsg);
// check whether already read complete packet
static SSrvConn* createConn(void* hThrd);
static void destroyConn(SSrvConn* conn, bool clear /*clear handle or not*/);
-static int reallocConnRefHandle(SSrvConn* conn);
+static void destroyConnRegArg(SSrvConn* conn);
+
+static int reallocConnRefHandle(SSrvConn* conn);
static void uvHandleQuit(SSrvMsg* msg, SWorkThrdObj* thrd);
static void uvHandleRelease(SSrvMsg* msg, SWorkThrdObj* thrd);
@@ -430,6 +432,8 @@ static void uvPrepareSendData(SSrvMsg* smsg, uv_buf_t* wb) {
if (smsg->type == Release) {
pHead->msgType = 0;
pConn->status = ConnNormal;
+
+ destroyConnRegArg(pConn);
transUnrefSrvHandle(pConn);
} else {
pHead->msgType = pMsg->msgType;
@@ -801,6 +805,12 @@ static void destroyConn(SSrvConn* conn, bool clear) {
// uv_shutdown(req, (uv_stream_t*)conn->pTcp, uvShutDownCb);
}
}
+static void destroyConnRegArg(SSrvConn* conn) {
+ if (conn->regArg.init == 1) {
+ transFreeMsg(conn->regArg.msg.pCont);
+ conn->regArg.init = 0;
+ }
+}
static int reallocConnRefHandle(SSrvConn* conn) {
uvReleaseExHandle(conn->refId);
uvRemoveExHandle(conn->refId);
@@ -828,16 +838,9 @@ static void uvDestroyConn(uv_handle_t* handle) {
// uv_timer_stop(&conn->pTimer);
transQueueDestroy(&conn->srvMsgs);
- if (conn->regArg.init == 1) {
- transFreeMsg(conn->regArg.msg.pCont);
- conn->regArg.init = 0;
- }
QUEUE_REMOVE(&conn->queue);
taosMemoryFree(conn->pTcp);
- if (conn->regArg.init == 1) {
- transFreeMsg(conn->regArg.msg.pCont);
- conn->regArg.init = 0;
- }
+ destroyConnRegArg(conn);
taosMemoryFree(conn);
if (thrd->quit && QUEUE_IS_EMPTY(&thrd->conn)) {
diff --git a/source/util/src/terror.c b/source/util/src/terror.c
index 6c0a9b1324..b5e64242e4 100644
--- a/source/util/src/terror.c
+++ b/source/util/src/terror.c
@@ -322,6 +322,10 @@ TAOS_DEFINE_ERROR(TSDB_CODE_VND_TB_NOT_EXIST, "Table not exists")
TAOS_DEFINE_ERROR(TSDB_CODE_VND_SMA_NOT_EXIST, "SMA not exists")
TAOS_DEFINE_ERROR(TSDB_CODE_VND_HASH_MISMATCH, "Hash value mismatch")
TAOS_DEFINE_ERROR(TSDB_CODE_VND_TABLE_NOT_EXIST, "Table does not exists")
+TAOS_DEFINE_ERROR(TSDB_CODE_VND_INVALID_TABLE_ACTION, "Invalid table action")
+TAOS_DEFINE_ERROR(TSDB_CODE_VND_COL_ALREADY_EXISTS, "Table column already exists")
+TAOS_DEFINE_ERROR(TSDB_CODE_VND_TABLE_COL_NOT_EXISTS, "Table column not exists")
+
// tsdb
TAOS_DEFINE_ERROR(TSDB_CODE_TDB_INVALID_TABLE_ID, "Invalid table ID")
@@ -354,6 +358,7 @@ TAOS_DEFINE_ERROR(TSDB_CODE_TDB_TABLE_RECREATED, "Table re-created")
TAOS_DEFINE_ERROR(TSDB_CODE_TDB_TDB_ENV_OPEN_ERROR, "TDB env open error")
TAOS_DEFINE_ERROR(TSDB_CODE_TDB_NO_SMA_INDEX_IN_META, "No sma index in meta")
TAOS_DEFINE_ERROR(TSDB_CODE_TDB_INVALID_SMA_STAT, "Invalid sma state")
+TAOS_DEFINE_ERROR(TSDB_CODE_TDB_TSMA_ALREADY_EXIST, "Tsma already exists")
// query
@@ -463,7 +468,7 @@ TAOS_DEFINE_ERROR(TSDB_CODE_UDF_PIPE_NO_PIPE, "udf no pipe")
TAOS_DEFINE_ERROR(TSDB_CODE_UDF_LOAD_UDF_FAILURE, "udf load failure")
TAOS_DEFINE_ERROR(TSDB_CODE_UDF_INVALID_STATE, "udf invalid state")
TAOS_DEFINE_ERROR(TSDB_CODE_UDF_INVALID_INPUT, "udf invalid function input")
-
+TAOS_DEFINE_ERROR(TSDB_CODE_UDF_NO_FUNC_HANDLE, "udf no function handle")
//schemaless
TAOS_DEFINE_ERROR(TSDB_CODE_SML_INVALID_PROTOCOL_TYPE, "Invalid line protocol type")
TAOS_DEFINE_ERROR(TSDB_CODE_SML_INVALID_PRECISION_TYPE, "Invalid timestamp precision type")
diff --git a/source/util/src/tlog.c b/source/util/src/tlog.c
index 8531f2c8ea..9970ac24d7 100644
--- a/source/util/src/tlog.c
+++ b/source/util/src/tlog.c
@@ -94,6 +94,7 @@ int32_t tqDebugFlag = 135;
int32_t fsDebugFlag = 135;
int32_t metaDebugFlag = 135;
int32_t fnDebugFlag = 135;
+int32_t smaDebugFlag = 135;
int64_t dbgEmptyW = 0;
int64_t dbgWN = 0;
@@ -755,6 +756,7 @@ void taosSetAllDebugFlag(int32_t flag) {
tqDebugFlag = flag;
fsDebugFlag = flag;
fnDebugFlag = flag;
+ smaDebugFlag = flag;
uInfo("all debug flag are set to %d", flag);
}
diff --git a/source/util/test/CMakeLists.txt b/source/util/test/CMakeLists.txt
index b90b3ee3c9..8c0f0c76ef 100644
--- a/source/util/test/CMakeLists.txt
+++ b/source/util/test/CMakeLists.txt
@@ -33,13 +33,13 @@ ENDIF()
INCLUDE_DIRECTORIES(${TD_SOURCE_DIR}/src/util/inc)
-# freelistTest
-add_executable(freelistTest "")
-target_sources(freelistTest
- PRIVATE
- "freelistTest.cpp"
-)
-target_link_libraries(freelistTest os util gtest gtest_main)
+# # freelistTest
+# add_executable(freelistTest "")
+# target_sources(freelistTest
+# PRIVATE
+# "freelistTest.cpp"
+# )
+# target_link_libraries(freelistTest os util gtest gtest_main)
# # encodeTest
# add_executable(encodeTest "encodeTest.cpp")
diff --git a/source/util/test/freelistTest.cpp b/source/util/test/freelistTest.cpp
deleted file mode 100644
index a445a16ad3..0000000000
--- a/source/util/test/freelistTest.cpp
+++ /dev/null
@@ -1,17 +0,0 @@
-#include
-
-#include "tfreelist.h"
-
-TEST(TD_UTIL_FREELIST_TEST, simple_test) {
- SFreeList fl;
-
- tFreeListInit(&fl);
-
- for (size_t i = 0; i < 1000; i++) {
- void *ptr = NULL;
- TFL_MALLOC(ptr, void*, 1024, &fl);
- GTEST_ASSERT_NE(ptr, nullptr);
- }
-
- tFreeListClear(&fl);
-}
diff --git a/tests/script/tsim/tmq/basic1.sim b/tests/script/tsim/tmq/basic1.sim
index 0c96635a78..ee9e87cf04 100644
--- a/tests/script/tsim/tmq/basic1.sim
+++ b/tests/script/tsim/tmq/basic1.sim
@@ -1,278 +1,288 @@
-#### test scenario, please refer to https://jira.taosdata.com:18090/pages/viewpage.action?pageId=135120406
-#basic1.sim: vgroups=1, one topic for one consumer, firstly insert data, then start consume. Include six topics
-#basic2.sim: vgroups=1, multi topics for one consumer, firstly insert data, then start consume. Include six topics
-#basic3.sim: vgroups=4, one topic for one consumer, firstly insert data, then start consume. Include six topics
-#basic4.sim: vgroups=4, multi topics for one consumer, firstly insert data, then start consume. Include six topics
-
-# notes1: Scalar function: ABS/ACOS/ASIN/ATAN/CEIL/COS/FLOOR/LOG/POW/ROUND/SIN/SQRT/TAN
-# The above use cases are combined with where filter conditions, such as: where ts > "2017-08-12 18:25:58.128Z" and sin(a) > 0.5;
-#
-# notes2: not support aggregate functions(such as sum/count/min/max) and time-windows(interval).
-#
-
-run tsim/tmq/prepareBasicEnv-1vgrp.sim
-
-#---- global parameters start ----#
-$dbName = db
-$vgroups = 1
-$stbPrefix = stb
-$ctbPrefix = ctb
-$ntbPrefix = ntb
-$stbNum = 1
-$ctbNum = 10
-$ntbNum = 10
-$rowsPerCtb = 10
-$tstart = 1640966400000 # 2022-01-01 00:00:00.000
-#---- global parameters end ----#
-
-$pullDelay = 3
-$ifcheckdata = 1
-$showMsg = 1
-$showRow = 0
-
-sql connect
-sql use $dbName
-
-print == create topics from super table
-sql create topic topic_stb_column as select ts, c3 from stb
-sql create topic topic_stb_all as select ts, c1, c2, c3 from stb
-sql create topic topic_stb_function as select ts, abs(c1), sin(c2) from stb
-
-print == create topics from child table
-sql create topic topic_ctb_column as select ts, c3 from ctb0
-sql create topic topic_ctb_all as select * from ctb0
-sql create topic topic_ctb_function as select ts, abs(c1), sin(c2) from ctb0
-
-print == create topics from normal table
-sql create topic topic_ntb_column as select ts, c3 from ntb0
-sql create topic topic_ntb_all as select * from ntb0
-sql create topic topic_ntb_function as select ts, abs(c1), sin(c2) from ntb0
-
-#sql show topics
-#if $rows != 9 then
-# return -1
-#endi
-
-$keyList = ' . group.id:cgrp1
-$keyList = $keyList . '
-
-$cdb_index = 0
-#=============================== start consume =============================#
-
-print ================ test consume from stb
-$loop_cnt = 0
-loop_consume_diff_topic_from_stb:
-
-#######################################################################################
-# clear consume info and consume result
-#run tsim/tmq/clearConsume.sim
-# because drop table function no stable, so by create new db for consume info and result. Modify it later
-$cdb_index = $cdb_index + 1
-$cdbName = cdb . $cdb_index
-sql create database $cdbName vgroups 1
-sleep 500
-sql use $cdbName
-
-print == create consume info table and consume result table
-sql create table consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int)
-sql create table consumeresult (ts timestamp, consumerid int, consummsgcnt bigint, consumrowcnt bigint, checkresult int)
-
-sql show tables
-if $rows != 2 then
- return -1
-endi
-#######################################################################################
-
-if $loop_cnt == 0 then
- print == scenario 1: topic_stb_column
- $topicList = ' . topic_stb_column
- $topicList = $topicList . '
-elif $loop_cnt == 1 then
- print == scenario 2: topic_stb_all
- $topicList = ' . topic_stb_all
- $topicList = $topicList . '
-elif $loop_cnt == 2 then
- print == scenario 3: topic_stb_function
- $topicList = ' . topic_stb_function
- $topicList = $topicList . '
-else
- goto loop_consume_diff_topic_from_stb_end
-endi
-
-$consumerId = 0
-$totalMsgOfStb = $ctbNum * $rowsPerCtb
-$expectmsgcnt = $totalMsgOfStb
-sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $expectmsgcnt , $ifcheckdata )
-
-print == start consumer to pull msgs from stb
-print == tsim/tmq/consume.sh -d $dbName -y $pullDelay -g $showMsg -r $showRow -w $cdbName -s start
-system tsim/tmq/consume.sh -d $dbName -y $pullDelay -g $showMsg -r $showRow -w $cdbName -s start
-
-print == check consume result
-wait_consumer_end_from_stb:
-sql select * from consumeresult
-print ==> rows: $rows
-print ==> rows[0]: $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6]
-if $rows != 1 then
- sleep 1000
- goto wait_consumer_end_from_stb
-endi
-if $data[0][1] != $consumerId then
- return -1
-endi
-if $data[0][2] != $expectmsgcnt then
- return -1
-endi
-if $data[0][3] != $expectmsgcnt then
- return -1
-endi
-$loop_cnt = $loop_cnt + 1
-goto loop_consume_diff_topic_from_stb
-loop_consume_diff_topic_from_stb_end:
-
-print ================ test consume from ctb
-$loop_cnt = 0
-loop_consume_diff_topic_from_ctb:
-
-#######################################################################################
-# clear consume info and consume result
-#run tsim/tmq/clearConsume.sim
-# because drop table function no stable, so by create new db for consume info and result. Modify it later
-$cdb_index = $cdb_index + 1
-$cdbName = cdb . $cdb_index
-sql create database $cdbName vgroups 1
-sleep 500
-sql use $cdbName
-
-print == create consume info table and consume result table
-sql create table consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int)
-sql create table consumeresult (ts timestamp, consumerid int, consummsgcnt bigint, consumrowcnt bigint, checkresult int)
-
-sql show tables
-if $rows != 2 then
- return -1
-endi
-#######################################################################################
-
-if $loop_cnt == 0 then
- print == scenario 1: topic_ctb_column
- $topicList = ' . topic_ctb_column
- $topicList = $topicList . '
-elif $loop_cnt == 1 then
- print == scenario 2: topic_ctb_all
- $topicList = ' . topic_ctb_all
- $topicList = $topicList . '
-elif $loop_cnt == 2 then
- print == scenario 3: topic_ctb_function
- $topicList = ' . topic_ctb_function
- $topicList = $topicList . '
-else
- goto loop_consume_diff_topic_from_ctb_end
-endi
-
-$consumerId = 0
-$totalMsgOfCtb = $rowsPerCtb
-$expectmsgcnt = $totalMsgOfCtb
-sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $expectmsgcnt , $ifcheckdata )
-
-print == start consumer to pull msgs from ctb
-print == tsim/tmq/consume.sh -d $dbName -y $pullDelay -g $showMsg -r $showRow -s start
-system tsim/tmq/consume.sh -d $dbName -y $pullDelay -g $showMsg -r $showRow -w $cdbName -s start
-
-print == check consume result
-wait_consumer_end_from_ctb:
-sql select * from consumeresult
-print ==> rows: $rows
-print ==> rows[0]: $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6]
-if $rows != 1 then
- sleep 1000
- goto wait_consumer_end_from_ctb
-endi
-if $data[0][1] != $consumerId then
- return -1
-endi
-if $data[0][2] != $totalMsgOfCtb then
- return -1
-endi
-if $data[0][3] != $totalMsgOfCtb then
- return -1
-endi
-$loop_cnt = $loop_cnt + 1
-goto loop_consume_diff_topic_from_ctb
-loop_consume_diff_topic_from_ctb_end:
-
-print ================ test consume from ntb
-$loop_cnt = 0
-loop_consume_diff_topic_from_ntb:
-
-#######################################################################################
-# clear consume info and consume result
-#run tsim/tmq/clearConsume.sim
-# because drop table function no stable, so by create new db for consume info and result. Modify it later
-$cdb_index = $cdb_index + 1
-$cdbName = cdb . $cdb_index
-sql create database $cdbName vgroups 1
-sleep 500
-sql use $cdbName
-
-print == create consume info table and consume result table
-sql create table consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int)
-sql create table consumeresult (ts timestamp, consumerid int, consummsgcnt bigint, consumrowcnt bigint, checkresult int)
-
-sql show tables
-if $rows != 2 then
- return -1
-endi
-#######################################################################################
-
-if $loop_cnt == 0 then
- print == scenario 1: topic_ntb_column
- $topicList = ' . topic_ntb_column
- $topicList = $topicList . '
-elif $loop_cnt == 1 then
- print == scenario 2: topic_ntb_all
- $topicList = ' . topic_ntb_all
- $topicList = $topicList . '
-elif $loop_cnt == 2 then
- print == scenario 3: topic_ntb_function
- $topicList = ' . topic_ntb_function
- $topicList = $topicList . '
-else
- goto loop_consume_diff_topic_from_ntb_end
-endi
-
-$consumerId = 0
-$totalMsgOfNtb = $rowsPerCtb
-$expectmsgcnt = $totalMsgOfNtb
-sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $expectmsgcnt , $ifcheckdata )
-
-print == start consumer to pull msgs from ntb
-print == tsim/tmq/consume.sh -d $dbName -y $pullDelay -g $showMsg -r $showRow -s start
-system tsim/tmq/consume.sh -d $dbName -y $pullDelay -g $showMsg -r $showRow -w $cdbName -s start
-
-print == check consume result from ntb
-wait_consumer_end_from_ntb:
-sql select * from consumeresult
-print ==> rows: $rows
-print ==> rows[0]: $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6]
-if $rows != 1 then
- sleep 1000
- goto wait_consumer_end_from_ntb
-endi
-if $data[0][1] != $consumerId then
- return -1
-endi
-if $data[0][2] != $totalMsgOfNtb then
- return -1
-endi
-if $data[0][3] != $totalMsgOfNtb then
- return -1
-endi
-$loop_cnt = $loop_cnt + 1
-goto loop_consume_diff_topic_from_ntb
-loop_consume_diff_topic_from_ntb_end:
-
-#------ not need stop consumer, because it exit after pull msg overthan expect msg
-#system tsim/tmq/consume.sh -s stop -x SIGINT
-
-system sh/exec.sh -n dnode1 -s stop -x SIGINT
+#### test scenario, please refer to https://jira.taosdata.com:18090/pages/viewpage.action?pageId=135120406
+#basic1.sim: vgroups=1, one topic for one consumer, firstly insert data, then start consume. Include six topics
+#basic2.sim: vgroups=1, multi topics for one consumer, firstly insert data, then start consume. Include six topics
+#basic3.sim: vgroups=4, one topic for one consumer, firstly insert data, then start consume. Include six topics
+#basic4.sim: vgroups=4, multi topics for one consumer, firstly insert data, then start consume. Include six topics
+
+# notes1: Scalar function: ABS/ACOS/ASIN/ATAN/CEIL/COS/FLOOR/LOG/POW/ROUND/SIN/SQRT/TAN
+# The above use cases are combined with where filter conditions, such as: where ts > "2017-08-12 18:25:58.128Z" and sin(a) > 0.5;
+#
+# notes2: not support aggregate functions(such as sum/count/min/max) and time-windows(interval).
+#
+
+run tsim/tmq/prepareBasicEnv-1vgrp.sim
+
+#---- global parameters start ----#
+$dbName = db
+$vgroups = 1
+$stbPrefix = stb
+$ctbPrefix = ctb
+$ntbPrefix = ntb
+$stbNum = 1
+$ctbNum = 10
+$ntbNum = 10
+$rowsPerCtb = 10
+$tstart = 1640966400000 # 2022-01-01 00:00:00.000
+#---- global parameters end ----#
+
+$pullDelay = 3
+$ifcheckdata = 1
+$ifmanualcommit = 1
+$showMsg = 1
+$showRow = 0
+
+sql connect
+sql use $dbName
+
+print == create topics from super table
+sql create topic topic_stb_column as select ts, c3 from stb
+sql create topic topic_stb_all as select ts, c1, c2, c3 from stb
+sql create topic topic_stb_function as select ts, abs(c1), sin(c2) from stb
+
+print == create topics from child table
+sql create topic topic_ctb_column as select ts, c3 from ctb0
+sql create topic topic_ctb_all as select * from ctb0
+sql create topic topic_ctb_function as select ts, abs(c1), sin(c2) from ctb0
+
+print == create topics from normal table
+sql create topic topic_ntb_column as select ts, c3 from ntb0
+sql create topic topic_ntb_all as select * from ntb0
+sql create topic topic_ntb_function as select ts, abs(c1), sin(c2) from ntb0
+
+#sql show topics
+#if $rows != 9 then
+# return -1
+#endi
+
+#'group.id:cgrp1,enable.auto.commit:false,auto.commit.interval.ms:6000,auto.offset.reset:earliest'
+$keyList = ' . group.id:cgrp1
+$keyList = $keyList . ,
+$keyList = $keyList . enable.auto.commit:false
+#$keyList = $keyList . ,
+#$keyList = $keyList . auto.commit.interval.ms:6000
+#$keyList = $keyList . ,
+#$keyList = $keyList . auto.offset.reset:earliest
+$keyList = $keyList . '
+print ========== key list: $keyList
+
+
+$cdb_index = 0
+#=============================== start consume =============================#
+
+print ================ test consume from stb
+$loop_cnt = 0
+loop_consume_diff_topic_from_stb:
+
+#######################################################################################
+# clear consume info and consume result
+#run tsim/tmq/clearConsume.sim
+# because drop table function no stable, so by create new db for consume info and result. Modify it later
+$cdb_index = $cdb_index + 1
+$cdbName = cdb . $cdb_index
+sql create database $cdbName vgroups 1
+sleep 500
+sql use $cdbName
+
+print == create consume info table and consume result table
+sql create table consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int, ifmanualcommit int)
+sql create table consumeresult (ts timestamp, consumerid int, consummsgcnt bigint, consumrowcnt bigint, checkresult int)
+
+sql show tables
+if $rows != 2 then
+ return -1
+endi
+#######################################################################################
+
+if $loop_cnt == 0 then
+ print == scenario 1: topic_stb_column
+ $topicList = ' . topic_stb_column
+ $topicList = $topicList . '
+elif $loop_cnt == 1 then
+ print == scenario 2: topic_stb_all
+ $topicList = ' . topic_stb_all
+ $topicList = $topicList . '
+elif $loop_cnt == 2 then
+ print == scenario 3: topic_stb_function
+ $topicList = ' . topic_stb_function
+ $topicList = $topicList . '
+else
+ goto loop_consume_diff_topic_from_stb_end
+endi
+
+$consumerId = 0
+$totalMsgOfStb = $ctbNum * $rowsPerCtb
+$expectmsgcnt = $totalMsgOfStb
+sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $expectmsgcnt , $ifcheckdata , $ifmanualcommit )
+
+print == start consumer to pull msgs from stb
+print == tsim/tmq/consume.sh -d $dbName -y $pullDelay -g $showMsg -r $showRow -w $cdbName -s start
+system tsim/tmq/consume.sh -d $dbName -y $pullDelay -g $showMsg -r $showRow -w $cdbName -s start
+
+print == check consume result
+wait_consumer_end_from_stb:
+sql select * from consumeresult
+print ==> rows: $rows
+print ==> rows[0]: $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6]
+if $rows != 1 then
+ sleep 1000
+ goto wait_consumer_end_from_stb
+endi
+if $data[0][1] != $consumerId then
+ return -1
+endi
+if $data[0][2] != $expectmsgcnt then
+ return -1
+endi
+if $data[0][3] != $expectmsgcnt then
+ return -1
+endi
+$loop_cnt = $loop_cnt + 1
+goto loop_consume_diff_topic_from_stb
+loop_consume_diff_topic_from_stb_end:
+
+print ================ test consume from ctb
+$loop_cnt = 0
+loop_consume_diff_topic_from_ctb:
+
+#######################################################################################
+# clear consume info and consume result
+#run tsim/tmq/clearConsume.sim
+# because drop table function no stable, so by create new db for consume info and result. Modify it later
+$cdb_index = $cdb_index + 1
+$cdbName = cdb . $cdb_index
+sql create database $cdbName vgroups 1
+sleep 500
+sql use $cdbName
+
+print == create consume info table and consume result table
+sql create table consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int, ifmanualcommit int)
+sql create table consumeresult (ts timestamp, consumerid int, consummsgcnt bigint, consumrowcnt bigint, checkresult int)
+
+sql show tables
+if $rows != 2 then
+ return -1
+endi
+#######################################################################################
+
+if $loop_cnt == 0 then
+ print == scenario 1: topic_ctb_column
+ $topicList = ' . topic_ctb_column
+ $topicList = $topicList . '
+elif $loop_cnt == 1 then
+ print == scenario 2: topic_ctb_all
+ $topicList = ' . topic_ctb_all
+ $topicList = $topicList . '
+elif $loop_cnt == 2 then
+ print == scenario 3: topic_ctb_function
+ $topicList = ' . topic_ctb_function
+ $topicList = $topicList . '
+else
+ goto loop_consume_diff_topic_from_ctb_end
+endi
+
+$consumerId = 0
+$totalMsgOfCtb = $rowsPerCtb
+$expectmsgcnt = $totalMsgOfCtb
+sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $expectmsgcnt , $ifcheckdata , $ifmanualcommit )
+
+print == start consumer to pull msgs from ctb
+print == tsim/tmq/consume.sh -d $dbName -y $pullDelay -g $showMsg -r $showRow -s start
+system tsim/tmq/consume.sh -d $dbName -y $pullDelay -g $showMsg -r $showRow -w $cdbName -s start
+
+print == check consume result
+wait_consumer_end_from_ctb:
+sql select * from consumeresult
+print ==> rows: $rows
+print ==> rows[0]: $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6]
+if $rows != 1 then
+ sleep 1000
+ goto wait_consumer_end_from_ctb
+endi
+if $data[0][1] != $consumerId then
+ return -1
+endi
+if $data[0][2] != $totalMsgOfCtb then
+ return -1
+endi
+if $data[0][3] != $totalMsgOfCtb then
+ return -1
+endi
+$loop_cnt = $loop_cnt + 1
+goto loop_consume_diff_topic_from_ctb
+loop_consume_diff_topic_from_ctb_end:
+
+print ================ test consume from ntb
+$loop_cnt = 0
+loop_consume_diff_topic_from_ntb:
+
+#######################################################################################
+# clear consume info and consume result
+#run tsim/tmq/clearConsume.sim
+# because drop table function no stable, so by create new db for consume info and result. Modify it later
+$cdb_index = $cdb_index + 1
+$cdbName = cdb . $cdb_index
+sql create database $cdbName vgroups 1
+sleep 500
+sql use $cdbName
+
+print == create consume info table and consume result table
+sql create table consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int, ifmanualcommit int)
+sql create table consumeresult (ts timestamp, consumerid int, consummsgcnt bigint, consumrowcnt bigint, checkresult int)
+
+sql show tables
+if $rows != 2 then
+ return -1
+endi
+#######################################################################################
+
+if $loop_cnt == 0 then
+ print == scenario 1: topic_ntb_column
+ $topicList = ' . topic_ntb_column
+ $topicList = $topicList . '
+elif $loop_cnt == 1 then
+ print == scenario 2: topic_ntb_all
+ $topicList = ' . topic_ntb_all
+ $topicList = $topicList . '
+elif $loop_cnt == 2 then
+ print == scenario 3: topic_ntb_function
+ $topicList = ' . topic_ntb_function
+ $topicList = $topicList . '
+else
+ goto loop_consume_diff_topic_from_ntb_end
+endi
+
+$consumerId = 0
+$totalMsgOfNtb = $rowsPerCtb
+$expectmsgcnt = $totalMsgOfNtb
+sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $expectmsgcnt , $ifcheckdata , $ifmanualcommit )
+
+print == start consumer to pull msgs from ntb
+print == tsim/tmq/consume.sh -d $dbName -y $pullDelay -g $showMsg -r $showRow -s start
+system tsim/tmq/consume.sh -d $dbName -y $pullDelay -g $showMsg -r $showRow -w $cdbName -s start
+
+print == check consume result from ntb
+wait_consumer_end_from_ntb:
+sql select * from consumeresult
+print ==> rows: $rows
+print ==> rows[0]: $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6]
+if $rows != 1 then
+ sleep 1000
+ goto wait_consumer_end_from_ntb
+endi
+if $data[0][1] != $consumerId then
+ return -1
+endi
+if $data[0][2] != $totalMsgOfNtb then
+ return -1
+endi
+if $data[0][3] != $totalMsgOfNtb then
+ return -1
+endi
+$loop_cnt = $loop_cnt + 1
+goto loop_consume_diff_topic_from_ntb
+loop_consume_diff_topic_from_ntb_end:
+
+#------ not need stop consumer, because it exit after pull msg overthan expect msg
+#system tsim/tmq/consume.sh -s stop -x SIGINT
+
+system sh/exec.sh -n dnode1 -s stop -x SIGINT
diff --git a/tests/script/tsim/tmq/basic1Of2Cons.sim b/tests/script/tsim/tmq/basic1Of2Cons.sim
index 957f1774f9..9d4b0e75da 100644
--- a/tests/script/tsim/tmq/basic1Of2Cons.sim
+++ b/tests/script/tsim/tmq/basic1Of2Cons.sim
@@ -1,372 +1,382 @@
-#### test scenario, please refer to https://jira.taosdata.com:18090/pages/viewpage.action?pageId=135120406
-#basic1Of2Cons.sim: vgroups=1, one topic for 2 consumers, firstly insert data, then start consume. Include six topics
-#basic2Of2Cons.sim: vgroups=1, multi topics for 2 consumers, firstly insert data, then start consume. Include six topics
-#basic3Of2Cons.sim: vgroups=4, one topic for 2 consumers, firstly insert data, then start consume. Include six topics
-#basic4Of2Cons.sim: vgroups=4, multi topics for 2 consumers, firstly insert data, then start consume. Include six topics
-
-# notes1: Scalar function: ABS/ACOS/ASIN/ATAN/CEIL/COS/FLOOR/LOG/POW/ROUND/SIN/SQRT/TAN
-# The above use cases are combined with where filter conditions, such as: where ts > "2017-08-12 18:25:58.128Z" and sin(a) > 0.5;
-#
-# notes2: not support aggregate functions(such as sum/count/min/max) and time-windows(interval).
-#
-
-run tsim/tmq/prepareBasicEnv-1vgrp.sim
-
-#---- global parameters start ----#
-$dbName = db
-$vgroups = 1
-$stbPrefix = stb
-$ctbPrefix = ctb
-$ntbPrefix = ntb
-$stbNum = 1
-$ctbNum = 10
-$ntbNum = 10
-$rowsPerCtb = 10
-$tstart = 1640966400000 # 2022-01-01 00:00:00.000
-#---- global parameters end ----#
-
-$pullDelay = 5
-$ifcheckdata = 1
-$showMsg = 1
-$showRow = 0
-
-sql connect
-sql use $dbName
-
-print == create topics from super table
-sql create topic topic_stb_column as select ts, c3 from stb
-sql create topic topic_stb_all as select ts, c1, c2, c3 from stb
-sql create topic topic_stb_function as select ts, abs(c1), sin(c2) from stb
-
-print == create topics from child table
-sql create topic topic_ctb_column as select ts, c3 from ctb0
-sql create topic topic_ctb_all as select * from ctb0
-sql create topic topic_ctb_function as select ts, abs(c1), sin(c2) from ctb0
-
-print == create topics from normal table
-sql create topic topic_ntb_column as select ts, c3 from ntb0
-sql create topic topic_ntb_all as select * from ntb0
-sql create topic topic_ntb_function as select ts, abs(c1), sin(c2) from ntb0
-
-#sql show topics
-#if $rows != 9 then
-# return -1
-#endi
-
-$keyList = ' . group.id:cgrp1
-$keyList = $keyList . '
-
-$cdb_index = 0
-#=============================== start consume =============================#
-
-print ================ test consume from stb
-$loop_cnt = 0
-loop_consume_diff_topic_from_stb:
-
-#######################################################################################
-# clear consume info and consume result
-#run tsim/tmq/clearConsume.sim
-# because drop table function no stable, so by create new db for consume info and result. Modify it later
-$cdb_index = $cdb_index + 1
-$cdbName = cdb . $cdb_index
-sql create database $cdbName vgroups 1
-sleep 500
-sql use $cdbName
-
-print == create consume info table and consume result table for stb
-sql create table consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int)
-sql create table consumeresult (ts timestamp, consumerid int, consummsgcnt bigint, consumrowcnt bigint, checkresult int)
-
-sql show tables
-if $rows != 2 then
- return -1
-endi
-#######################################################################################
-
-if $loop_cnt == 0 then
- print == scenario 1: topic_stb_column
- $topicList = ' . topic_stb_column
- $topicList = $topicList . '
-elif $loop_cnt == 1 then
- print == scenario 2: topic_stb_all
- $topicList = ' . topic_stb_all
- $topicList = $topicList . '
-elif $loop_cnt == 2 then
- print == scenario 3: topic_stb_function
- $topicList = ' . topic_stb_function
- $topicList = $topicList . '
-else
- goto loop_consume_diff_topic_from_stb_end
-endi
-
-$consumerId = 0
-$totalMsgOfStb = $ctbNum * $rowsPerCtb
-$expectmsgcnt = $totalMsgOfStb
-sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $expectmsgcnt , $ifcheckdata )
-
-$consumerId = 1
-sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $expectmsgcnt , $ifcheckdata )
-
-print == start consumer to pull msgs from stb
-print == tsim/tmq/consume.sh -d $dbName -y $pullDelay -g $showMsg -r $showRow -w $cdbName -s start
-system tsim/tmq/consume.sh -d $dbName -y $pullDelay -g $showMsg -r $showRow -w $cdbName -s start
-
-print == check consume result
-wait_consumer_end_from_stb:
-sql select * from consumeresult
-print ==> rows: $rows
-print ==> rows[0]: $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6]
-print ==> rows[1]: $data[1][0] $data[1][1] $data[1][2] $data[1][3] $data[1][4] $data[1][5] $data[1][6]
-if $rows != 2 then
- sleep 1000
- goto wait_consumer_end_from_stb
-endi
-if $data[0][1] == 0 then
- if $data[1][1] != 1 then
- return -1
- endi
-endi
-if $data[0][1] == 1 then
- if $data[1][1] != 0 then
- return -1
- endi
-endi
-
-# either $data[0][2] == $totalMsgOfStb and $data[1][2] == 0
-# or $data[0][2] == 0 and $data[1][2] == $totalMsgOfStb
-if $data[0][2] == $totalMsgOfStb then
- if $data[1][2] == 0 then
- goto check_ok_0
- endi
-elif $data[0][2] == 0 then
- if $data[1][2] == $totalMsgOfStb then
- goto check_ok_0
- endi
-endi
-return -1
-check_ok_0:
-
-if $data[0][3] == $totalMsgOfStb then
- if $data[1][3] == 0 then
- goto check_ok_1
- endi
-elif $data[0][3] == 0 then
- if $data[1][3] == $totalMsgOfStb then
- goto check_ok_1
- endi
-endi
-return -1
-check_ok_1:
-
-$loop_cnt = $loop_cnt + 1
-goto loop_consume_diff_topic_from_stb
-loop_consume_diff_topic_from_stb_end:
-
-print ================ test consume from ctb
-$loop_cnt = 0
-loop_consume_diff_topic_from_ctb:
-
-#######################################################################################
-# clear consume info and consume result
-#run tsim/tmq/clearConsume.sim
-# because drop table function no stable, so by create new db for consume info and result. Modify it later
-$cdb_index = $cdb_index + 1
-$cdbName = cdb . $cdb_index
-sql create database $cdbName vgroups 1
-sleep 500
-sql use $cdbName
-
-print == create consume info table and consume result table for ctb
-sql create table consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int)
-sql create table consumeresult (ts timestamp, consumerid int, consummsgcnt bigint, consumrowcnt bigint, checkresult int)
-
-sql show tables
-if $rows != 2 then
- return -1
-endi
-#######################################################################################
-
-if $loop_cnt == 0 then
- print == scenario 1: topic_ctb_column
- $topicList = ' . topic_ctb_column
- $topicList = $topicList . '
-elif $loop_cnt == 1 then
- print == scenario 2: topic_ctb_all
- $topicList = ' . topic_ctb_all
- $topicList = $topicList . '
-elif $loop_cnt == 2 then
- print == scenario 3: topic_ctb_function
- $topicList = ' . topic_ctb_function
- $topicList = $topicList . '
-else
- goto loop_consume_diff_topic_from_ctb_end
-endi
-
-$consumerId = 0
-$totalMsgOfCtb = $rowsPerCtb
-$expectmsgcnt = $totalMsgOfCtb
-sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $expectmsgcnt , $ifcheckdata )
-$consumerId = 1
-sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $expectmsgcnt , $ifcheckdata )
-
-print == start consumer to pull msgs from ctb
-print == tsim/tmq/consume.sh -d $dbName -y $pullDelay -g $showMsg -r $showRow -s start
-system tsim/tmq/consume.sh -d $dbName -y $pullDelay -g $showMsg -r $showRow -w $cdbName -s start
-
-print == check consume result
-wait_consumer_end_from_ctb:
-sql select * from consumeresult
-print ==> rows: $rows
-print ==> rows[0]: $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6]
-print ==> rows[1]: $data[1][0] $data[1][1] $data[1][2] $data[1][3] $data[1][4] $data[1][5] $data[1][6]
-if $rows != 2 then
- sleep 1000
- goto wait_consumer_end_from_ctb
-endi
-if $data[0][1] == 0 then
- if $data[1][1] != 1 then
- return -1
- endi
-endi
-if $data[0][1] == 1 then
- if $data[1][1] != 0 then
- return -1
- endi
-endi
-
-# either $data[0][2] == $totalMsgOfCtb and $data[1][2] == 0
-# or $data[0][2] == 0 and $data[1][2] == $totalMsgOfCtb
-if $data[0][2] == $totalMsgOfCtb then
- if $data[1][2] == 0 then
- goto check_ok_2
- endi
-elif $data[0][2] == 0 then
- if $data[1][2] == $totalMsgOfCtb then
- goto check_ok_2
- endi
-endi
-return -1
-check_ok_2:
-
-if $data[0][3] == $totalMsgOfCtb then
- if $data[1][3] == 0 then
- goto check_ok_3
- endi
-elif $data[0][3] == 0 then
- if $data[1][3] == $totalMsgOfCtb then
- goto check_ok_3
- endi
-endi
-return -1
-check_ok_3:
-
-$loop_cnt = $loop_cnt + 1
-goto loop_consume_diff_topic_from_ctb
-loop_consume_diff_topic_from_ctb_end:
-
-print ================ test consume from ntb
-$loop_cnt = 0
-loop_consume_diff_topic_from_ntb:
-
-#######################################################################################
-# clear consume info and consume result
-#run tsim/tmq/clearConsume.sim
-# because drop table function no stable, so by create new db for consume info and result. Modify it later
-$cdb_index = $cdb_index + 1
-$cdbName = cdb . $cdb_index
-sql create database $cdbName vgroups 1
-sleep 500
-sql use $cdbName
-
-print == create consume info table and consume result table for ntb
-sql create table consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int)
-sql create table consumeresult (ts timestamp, consumerid int, consummsgcnt bigint, consumrowcnt bigint, checkresult int)
-
-sql show tables
-if $rows != 2 then
- return -1
-endi
-#######################################################################################
-
-if $loop_cnt == 0 then
- print == scenario 1: topic_ntb_column
- $topicList = ' . topic_ntb_column
- $topicList = $topicList . '
-elif $loop_cnt == 1 then
- print == scenario 2: topic_ntb_all
- $topicList = ' . topic_ntb_all
- $topicList = $topicList . '
-elif $loop_cnt == 2 then
- print == scenario 3: topic_ntb_function
- $topicList = ' . topic_ntb_function
- $topicList = $topicList . '
-else
- goto loop_consume_diff_topic_from_ntb_end
-endi
-
-$consumerId = 0
-$totalMsgOfNtb = $rowsPerCtb
-$expectmsgcnt = $totalMsgOfNtb
-sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $expectmsgcnt , $ifcheckdata )
-$consumerId = 1
-sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $expectmsgcnt , $ifcheckdata )
-
-print == start consumer to pull msgs from ntb
-print == tsim/tmq/consume.sh -d $dbName -y $pullDelay -g $showMsg -r $showRow -s start
-system tsim/tmq/consume.sh -d $dbName -y $pullDelay -g $showMsg -r $showRow -w $cdbName -s start
-
-print == check consume result from ntb
-wait_consumer_end_from_ntb:
-sql select * from consumeresult
-print ==> rows: $rows
-print ==> rows[0]: $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6]
-print ==> rows[1]: $data[1][0] $data[1][1] $data[1][2] $data[1][3] $data[1][4] $data[1][5] $data[1][6]
-if $rows != 2 then
- sleep 1000
- goto wait_consumer_end_from_ntb
-endi
-if $data[0][1] == 0 then
- if $data[1][1] != 1 then
- return -1
- endi
-endi
-if $data[1][1] == 0 then
- if $data[0][1] != 1 then
- return -1
- endi
-endi
-
-# either $data[0][2] == $totalMsgOfNtb and $data[1][2] == 0
-# or $data[0][2] == 0 and $data[1][2] == $totalMsgOfNtb
-if $data[0][2] == $totalMsgOfNtb then
- if $data[1][2] == 0 then
- goto check_ok_4
- endi
-elif $data[0][2] == 0 then
- if $data[1][2] == $totalMsgOfNtb then
- goto check_ok_4
- endi
-endi
-return -1
-check_ok_4:
-
-if $data[0][3] == $totalMsgOfNtb then
- if $data[1][3] == 0 then
- goto check_ok_5
- endi
-elif $data[0][3] == 0 then
- if $data[1][3] == $totalMsgOfNtb then
- goto check_ok_5
- endi
-endi
-return -1
-check_ok_5:
-
-$loop_cnt = $loop_cnt + 1
-goto loop_consume_diff_topic_from_ntb
-loop_consume_diff_topic_from_ntb_end:
-
-#------ not need stop consumer, because it exit after pull msg overthan expect msg
-#system tsim/tmq/consume.sh -s stop -x SIGINT
-
-system sh/exec.sh -n dnode1 -s stop -x SIGINT
+#### test scenario, please refer to https://jira.taosdata.com:18090/pages/viewpage.action?pageId=135120406
+#basic1Of2Cons.sim: vgroups=1, one topic for 2 consumers, firstly insert data, then start consume. Include six topics
+#basic2Of2Cons.sim: vgroups=1, multi topics for 2 consumers, firstly insert data, then start consume. Include six topics
+#basic3Of2Cons.sim: vgroups=4, one topic for 2 consumers, firstly insert data, then start consume. Include six topics
+#basic4Of2Cons.sim: vgroups=4, multi topics for 2 consumers, firstly insert data, then start consume. Include six topics
+
+# notes1: Scalar function: ABS/ACOS/ASIN/ATAN/CEIL/COS/FLOOR/LOG/POW/ROUND/SIN/SQRT/TAN
+# The above use cases are combined with where filter conditions, such as: where ts > "2017-08-12 18:25:58.128Z" and sin(a) > 0.5;
+#
+# notes2: not support aggregate functions(such as sum/count/min/max) and time-windows(interval).
+#
+
+run tsim/tmq/prepareBasicEnv-1vgrp.sim
+
+#---- global parameters start ----#
+$dbName = db
+$vgroups = 1
+$stbPrefix = stb
+$ctbPrefix = ctb
+$ntbPrefix = ntb
+$stbNum = 1
+$ctbNum = 10
+$ntbNum = 10
+$rowsPerCtb = 10
+$tstart = 1640966400000 # 2022-01-01 00:00:00.000
+#---- global parameters end ----#
+
+$pullDelay = 5
+$ifcheckdata = 1
+$ifmanualcommit = 1
+$showMsg = 1
+$showRow = 0
+
+sql connect
+sql use $dbName
+
+print == create topics from super table
+sql create topic topic_stb_column as select ts, c3 from stb
+sql create topic topic_stb_all as select ts, c1, c2, c3 from stb
+sql create topic topic_stb_function as select ts, abs(c1), sin(c2) from stb
+
+print == create topics from child table
+sql create topic topic_ctb_column as select ts, c3 from ctb0
+sql create topic topic_ctb_all as select * from ctb0
+sql create topic topic_ctb_function as select ts, abs(c1), sin(c2) from ctb0
+
+print == create topics from normal table
+sql create topic topic_ntb_column as select ts, c3 from ntb0
+sql create topic topic_ntb_all as select * from ntb0
+sql create topic topic_ntb_function as select ts, abs(c1), sin(c2) from ntb0
+
+#sql show topics
+#if $rows != 9 then
+# return -1
+#endi
+
+#'group.id:cgrp1,enable.auto.commit:false,auto.commit.interval.ms:6000,auto.offset.reset:earliest'
+$keyList = ' . group.id:cgrp1
+$keyList = $keyList . ,
+$keyList = $keyList . enable.auto.commit:false
+#$keyList = $keyList . ,
+#$keyList = $keyList . auto.commit.interval.ms:6000
+#$keyList = $keyList . ,
+#$keyList = $keyList . auto.offset.reset:earliest
+$keyList = $keyList . '
+print ========== key list: $keyList
+
+$cdb_index = 0
+
+#=============================== start consume =============================#
+
+print ================ test consume from stb
+$loop_cnt = 0
+loop_consume_diff_topic_from_stb:
+
+#######################################################################################
+# clear consume info and consume result
+#run tsim/tmq/clearConsume.sim
+# because drop table function no stable, so by create new db for consume info and result. Modify it later
+$cdb_index = $cdb_index + 1
+$cdbName = cdb . $cdb_index
+sql create database $cdbName vgroups 1
+sleep 500
+sql use $cdbName
+
+print == create consume info table and consume result table for stb
+sql create table consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int, ifmanualcommit int)
+sql create table consumeresult (ts timestamp, consumerid int, consummsgcnt bigint, consumrowcnt bigint, checkresult int)
+
+sql show tables
+if $rows != 2 then
+ return -1
+endi
+#######################################################################################
+
+if $loop_cnt == 0 then
+ print == scenario 1: topic_stb_column
+ $topicList = ' . topic_stb_column
+ $topicList = $topicList . '
+elif $loop_cnt == 1 then
+ print == scenario 2: topic_stb_all
+ $topicList = ' . topic_stb_all
+ $topicList = $topicList . '
+elif $loop_cnt == 2 then
+ print == scenario 3: topic_stb_function
+ $topicList = ' . topic_stb_function
+ $topicList = $topicList . '
+else
+ goto loop_consume_diff_topic_from_stb_end
+endi
+
+$consumerId = 0
+$totalMsgOfStb = $ctbNum * $rowsPerCtb
+$expectmsgcnt = $totalMsgOfStb
+sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $expectmsgcnt , $ifcheckdata , $ifmanualcommit )
+
+$consumerId = 1
+sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $expectmsgcnt , $ifcheckdata , $ifmanualcommit )
+
+print == start consumer to pull msgs from stb
+print == tsim/tmq/consume.sh -d $dbName -y $pullDelay -g $showMsg -r $showRow -w $cdbName -s start
+system tsim/tmq/consume.sh -d $dbName -y $pullDelay -g $showMsg -r $showRow -w $cdbName -s start
+
+print == check consume result
+wait_consumer_end_from_stb:
+sql select * from consumeresult
+print ==> rows: $rows
+print ==> rows[0]: $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6]
+print ==> rows[1]: $data[1][0] $data[1][1] $data[1][2] $data[1][3] $data[1][4] $data[1][5] $data[1][6]
+if $rows != 2 then
+ sleep 1000
+ goto wait_consumer_end_from_stb
+endi
+if $data[0][1] == 0 then
+ if $data[1][1] != 1 then
+ return -1
+ endi
+endi
+if $data[0][1] == 1 then
+ if $data[1][1] != 0 then
+ return -1
+ endi
+endi
+
+# either $data[0][2] == $totalMsgOfStb and $data[1][2] == 0
+# or $data[0][2] == 0 and $data[1][2] == $totalMsgOfStb
+if $data[0][2] == $totalMsgOfStb then
+ if $data[1][2] == 0 then
+ goto check_ok_0
+ endi
+elif $data[0][2] == 0 then
+ if $data[1][2] == $totalMsgOfStb then
+ goto check_ok_0
+ endi
+endi
+return -1
+check_ok_0:
+
+if $data[0][3] == $totalMsgOfStb then
+ if $data[1][3] == 0 then
+ goto check_ok_1
+ endi
+elif $data[0][3] == 0 then
+ if $data[1][3] == $totalMsgOfStb then
+ goto check_ok_1
+ endi
+endi
+return -1
+check_ok_1:
+
+$loop_cnt = $loop_cnt + 1
+goto loop_consume_diff_topic_from_stb
+loop_consume_diff_topic_from_stb_end:
+
+print ================ test consume from ctb
+$loop_cnt = 0
+loop_consume_diff_topic_from_ctb:
+
+#######################################################################################
+# clear consume info and consume result
+#run tsim/tmq/clearConsume.sim
+# because drop table function no stable, so by create new db for consume info and result. Modify it later
+$cdb_index = $cdb_index + 1
+$cdbName = cdb . $cdb_index
+sql create database $cdbName vgroups 1
+sleep 500
+sql use $cdbName
+
+print == create consume info table and consume result table for ctb
+sql create table consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int, ifmanualcommit int)
+sql create table consumeresult (ts timestamp, consumerid int, consummsgcnt bigint, consumrowcnt bigint, checkresult int)
+
+sql show tables
+if $rows != 2 then
+ return -1
+endi
+#######################################################################################
+
+if $loop_cnt == 0 then
+ print == scenario 1: topic_ctb_column
+ $topicList = ' . topic_ctb_column
+ $topicList = $topicList . '
+elif $loop_cnt == 1 then
+ print == scenario 2: topic_ctb_all
+ $topicList = ' . topic_ctb_all
+ $topicList = $topicList . '
+elif $loop_cnt == 2 then
+ print == scenario 3: topic_ctb_function
+ $topicList = ' . topic_ctb_function
+ $topicList = $topicList . '
+else
+ goto loop_consume_diff_topic_from_ctb_end
+endi
+
+$consumerId = 0
+$totalMsgOfCtb = $rowsPerCtb
+$expectmsgcnt = $totalMsgOfCtb
+sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $expectmsgcnt , $ifcheckdata , $ifmanualcommit )
+$consumerId = 1
+sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $expectmsgcnt , $ifcheckdata , $ifmanualcommit )
+
+print == start consumer to pull msgs from ctb
+print == tsim/tmq/consume.sh -d $dbName -y $pullDelay -g $showMsg -r $showRow -s start
+system tsim/tmq/consume.sh -d $dbName -y $pullDelay -g $showMsg -r $showRow -w $cdbName -s start
+
+print == check consume result
+wait_consumer_end_from_ctb:
+sql select * from consumeresult
+print ==> rows: $rows
+print ==> rows[0]: $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6]
+print ==> rows[1]: $data[1][0] $data[1][1] $data[1][2] $data[1][3] $data[1][4] $data[1][5] $data[1][6]
+if $rows != 2 then
+ sleep 1000
+ goto wait_consumer_end_from_ctb
+endi
+if $data[0][1] == 0 then
+ if $data[1][1] != 1 then
+ return -1
+ endi
+endi
+if $data[0][1] == 1 then
+ if $data[1][1] != 0 then
+ return -1
+ endi
+endi
+
+# either $data[0][2] == $totalMsgOfCtb and $data[1][2] == 0
+# or $data[0][2] == 0 and $data[1][2] == $totalMsgOfCtb
+if $data[0][2] == $totalMsgOfCtb then
+ if $data[1][2] == 0 then
+ goto check_ok_2
+ endi
+elif $data[0][2] == 0 then
+ if $data[1][2] == $totalMsgOfCtb then
+ goto check_ok_2
+ endi
+endi
+return -1
+check_ok_2:
+
+if $data[0][3] == $totalMsgOfCtb then
+ if $data[1][3] == 0 then
+ goto check_ok_3
+ endi
+elif $data[0][3] == 0 then
+ if $data[1][3] == $totalMsgOfCtb then
+ goto check_ok_3
+ endi
+endi
+return -1
+check_ok_3:
+
+$loop_cnt = $loop_cnt + 1
+goto loop_consume_diff_topic_from_ctb
+loop_consume_diff_topic_from_ctb_end:
+
+print ================ test consume from ntb
+$loop_cnt = 0
+loop_consume_diff_topic_from_ntb:
+
+#######################################################################################
+# clear consume info and consume result
+#run tsim/tmq/clearConsume.sim
+# because drop table function no stable, so by create new db for consume info and result. Modify it later
+$cdb_index = $cdb_index + 1
+$cdbName = cdb . $cdb_index
+sql create database $cdbName vgroups 1
+sleep 500
+sql use $cdbName
+
+print == create consume info table and consume result table for ntb
+sql create table consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int, ifmanualcommit int)
+sql create table consumeresult (ts timestamp, consumerid int, consummsgcnt bigint, consumrowcnt bigint, checkresult int)
+
+sql show tables
+if $rows != 2 then
+ return -1
+endi
+#######################################################################################
+
+if $loop_cnt == 0 then
+ print == scenario 1: topic_ntb_column
+ $topicList = ' . topic_ntb_column
+ $topicList = $topicList . '
+elif $loop_cnt == 1 then
+ print == scenario 2: topic_ntb_all
+ $topicList = ' . topic_ntb_all
+ $topicList = $topicList . '
+elif $loop_cnt == 2 then
+ print == scenario 3: topic_ntb_function
+ $topicList = ' . topic_ntb_function
+ $topicList = $topicList . '
+else
+ goto loop_consume_diff_topic_from_ntb_end
+endi
+
+$consumerId = 0
+$totalMsgOfNtb = $rowsPerCtb
+$expectmsgcnt = $totalMsgOfNtb
+sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $expectmsgcnt , $ifcheckdata , $ifmanualcommit )
+$consumerId = 1
+sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $expectmsgcnt , $ifcheckdata , $ifmanualcommit )
+
+print == start consumer to pull msgs from ntb
+print == tsim/tmq/consume.sh -d $dbName -y $pullDelay -g $showMsg -r $showRow -s start
+system tsim/tmq/consume.sh -d $dbName -y $pullDelay -g $showMsg -r $showRow -w $cdbName -s start
+
+print == check consume result from ntb
+wait_consumer_end_from_ntb:
+sql select * from consumeresult
+print ==> rows: $rows
+print ==> rows[0]: $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6]
+print ==> rows[1]: $data[1][0] $data[1][1] $data[1][2] $data[1][3] $data[1][4] $data[1][5] $data[1][6]
+if $rows != 2 then
+ sleep 1000
+ goto wait_consumer_end_from_ntb
+endi
+if $data[0][1] == 0 then
+ if $data[1][1] != 1 then
+ return -1
+ endi
+endi
+if $data[1][1] == 0 then
+ if $data[0][1] != 1 then
+ return -1
+ endi
+endi
+
+# either $data[0][2] == $totalMsgOfNtb and $data[1][2] == 0
+# or $data[0][2] == 0 and $data[1][2] == $totalMsgOfNtb
+if $data[0][2] == $totalMsgOfNtb then
+ if $data[1][2] == 0 then
+ goto check_ok_4
+ endi
+elif $data[0][2] == 0 then
+ if $data[1][2] == $totalMsgOfNtb then
+ goto check_ok_4
+ endi
+endi
+return -1
+check_ok_4:
+
+if $data[0][3] == $totalMsgOfNtb then
+ if $data[1][3] == 0 then
+ goto check_ok_5
+ endi
+elif $data[0][3] == 0 then
+ if $data[1][3] == $totalMsgOfNtb then
+ goto check_ok_5
+ endi
+endi
+return -1
+check_ok_5:
+
+$loop_cnt = $loop_cnt + 1
+goto loop_consume_diff_topic_from_ntb
+loop_consume_diff_topic_from_ntb_end:
+
+#------ not need stop consumer, because it exit after pull msg overthan expect msg
+#system tsim/tmq/consume.sh -s stop -x SIGINT
+
+system sh/exec.sh -n dnode1 -s stop -x SIGINT
diff --git a/tests/script/tsim/tmq/basic2.sim b/tests/script/tsim/tmq/basic2.sim
index 53f10e2247..dce73be592 100644
--- a/tests/script/tsim/tmq/basic2.sim
+++ b/tests/script/tsim/tmq/basic2.sim
@@ -1,219 +1,229 @@
-#### test scenario, please refer to https://jira.taosdata.com:18090/pages/viewpage.action?pageId=135120406
-#basic1.sim: vgroups=1, one topic for one consumer, firstly insert data, then start consume. Include six topics
-#basic2.sim: vgroups=1, multi topics for one consumer, firstly insert data, then start consume. Include six topics
-#basic3.sim: vgroups=4, one topic for one consumer, firstly insert data, then start consume. Include six topics
-#basic4.sim: vgroups=4, multi topics for one consumer, firstly insert data, then start consume. Include six topics
-
-# notes1: Scalar function: ABS/ACOS/ASIN/ATAN/CEIL/COS/FLOOR/LOG/POW/ROUND/SIN/SQRT/TAN
-# The above use cases are combined with where filter conditions, such as: where ts > "2017-08-12 18:25:58.128Z" and sin(a) > 0.5;
-#
-# notes2: not support aggregate functions(such as sum/count/min/max) and time-windows(interval).
-#
-
-run tsim/tmq/prepareBasicEnv-1vgrp.sim
-
-#---- global parameters start ----#
-$dbName = db
-$vgroups = 1
-$stbPrefix = stb
-$ctbPrefix = ctb
-$ntbPrefix = ntb
-$stbNum = 1
-$ctbNum = 10
-$ntbNum = 10
-$rowsPerCtb = 10
-$tstart = 1640966400000 # 2022-01-01 00:00:00.000
-#---- global parameters end ----#
-
-$pullDelay = 3
-$ifcheckdata = 1
-$showMsg = 1
-$showRow = 0
-
-sql connect
-sql use $dbName
-
-print == create topics from super table
-sql create topic topic_stb_column as select ts, c3 from stb
-sql create topic topic_stb_all as select ts, c1, c2, c3 from stb
-sql create topic topic_stb_function as select ts, abs(c1), sin(c2) from stb
-
-print == create topics from child table
-sql create topic topic_ctb_column as select ts, c3 from ctb0
-sql create topic topic_ctb_all as select * from ctb0
-sql create topic topic_ctb_function as select ts, abs(c1), sin(c2) from ctb0
-
-print == create topics from normal table
-sql create topic topic_ntb_column as select ts, c3 from ntb0
-sql create topic topic_ntb_all as select * from ntb0
-sql create topic topic_ntb_function as select ts, abs(c1), sin(c2) from ntb0
-
-#sql show topics
-#if $rows != 9 then
-# return -1
-#endi
-
-$keyList = ' . group.id:cgrp1
-$keyList = $keyList . '
-
-$topicNum = 3
-
-#=============================== start consume =============================#
-
-
-print ================ test consume from stb
-print == multi toipcs: topic_stb_column + topic_stb_all + topic_stb_function
-$topicList = ' . topic_stb_column
-$topicList = $topicList . ,
-$topicList = $topicList . topic_stb_all
-$topicList = $topicList . ,
-$topicList = $topicList . topic_stb_function
-$topicList = $topicList . '
-
-$consumerId = 0
-$totalMsgOfStb = $ctbNum * $rowsPerCtb
-$totalMsgOfStb = $totalMsgOfStb * $topicNum
-$expectmsgcnt = $totalMsgOfStb
-sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $expectmsgcnt , $ifcheckdata )
-
-print == start consumer to pull msgs from stb
-print == tsim/tmq/consume.sh -d $dbName -y $pullDelay -g $showMsg -r $showRow -w $dbName -s start
-system tsim/tmq/consume.sh -d $dbName -y $pullDelay -g $showMsg -r $showRow -w $dbName -s start
-
-print == check consume result
-wait_consumer_end_from_stb:
-sql select * from consumeresult
-print ==> rows: $rows
-print ==> rows[0]: $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6]
-if $rows != 1 then
- sleep 1000
- goto wait_consumer_end_from_stb
-endi
-if $data[0][1] != $consumerId then
- return -1
-endi
-if $data[0][2] != $expectmsgcnt then
- return -1
-endi
-if $data[0][3] != $expectmsgcnt then
- return -1
-endi
-
-#######################################################################################
-# clear consume info and consume result
-#run tsim/tmq/clearConsume.sim
-# because drop table function no stable, so by create new db for consume info and result. Modify it later
-$cdbName = cdb1
-sql create database $cdbName vgroups 1
-sleep 500
-sql use $cdbName
-
-print == create consume info table and consume result table
-sql create table consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int)
-sql create table consumeresult (ts timestamp, consumerid int, consummsgcnt bigint, consumrowcnt bigint, checkresult int)
-
-sql show tables
-if $rows != 2 then
- return -1
-endi
-#######################################################################################
-
-
-print ================ test consume from ctb
-print == multi toipcs: topic_ctb_column + topic_ctb_all + topic_ctb_function
-$topicList = ' . topic_ctb_column
-$topicList = $topicList . ,
-$topicList = $topicList . topic_ctb_all
-$topicList = $topicList . ,
-$topicList = $topicList . topic_ctb_function
-$topicList = $topicList . '
-
-$consumerId = 0
-$totalMsgOfCtb = $rowsPerCtb * $topicNum
-$expectmsgcnt = $totalMsgOfCtb
-sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $expectmsgcnt , $ifcheckdata )
-
-print == start consumer to pull msgs from ctb
-print == tsim/tmq/consume.sh -d $dbName -y $pullDelay -g $showMsg -r $showRow -s start
-system tsim/tmq/consume.sh -d $dbName -y $pullDelay -g $showMsg -r $showRow -w $cdbName -s start
-
-print == check consume result
-wait_consumer_end_from_ctb:
-sql select * from consumeresult
-print ==> rows: $rows
-print ==> rows[0]: $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6]
-if $rows != 1 then
- sleep 1000
- goto wait_consumer_end_from_ctb
-endi
-if $data[0][1] != $consumerId then
- return -1
-endi
-if $data[0][2] != $totalMsgOfCtb then
- return -1
-endi
-if $data[0][3] != $totalMsgOfCtb then
- return -1
-endi
-
-#######################################################################################
-# clear consume info and consume result
-#run tsim/tmq/clearConsume.sim
-# because drop table function no stable, so by create new db for consume info and result. Modify it later
-$cdbName = cdb2
-sql create database $cdbName vgroups 1
-sleep 500
-sql use $cdbName
-
-print == create consume info table and consume result table
-sql create table consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int)
-sql create table consumeresult (ts timestamp, consumerid int, consummsgcnt bigint, consumrowcnt bigint, checkresult int)
-
-sql show tables
-if $rows != 2 then
- return -1
-endi
-#######################################################################################
-
-
-print ================ test consume from ntb
-print == multi toipcs: topic_ntb_column + topic_ntb_all + topic_ntb_function
-$topicList = ' . topic_ntb_column
-$topicList = $topicList . ,
-$topicList = $topicList . topic_ntb_all
-$topicList = $topicList . ,
-$topicList = $topicList . topic_ntb_function
-$topicList = $topicList . '
-
-$consumerId = 0
-$totalMsgOfNtb = $rowsPerCtb * $topicNum
-$expectmsgcnt = $totalMsgOfNtb
-sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $expectmsgcnt , $ifcheckdata )
-
-print == start consumer to pull msgs from ntb
-print == tsim/tmq/consume.sh -d $dbName -y $pullDelay -g $showMsg -r $showRow -s start
-system tsim/tmq/consume.sh -d $dbName -y $pullDelay -g $showMsg -r $showRow -w $cdbName -s start
-
-print == check consume result from ntb
-wait_consumer_end_from_ntb:
-sql select * from consumeresult
-print ==> rows: $rows
-print ==> rows[0]: $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6]
-if $rows != 1 then
- sleep 1000
- goto wait_consumer_end_from_ntb
-endi
-if $data[0][1] != $consumerId then
- return -1
-endi
-if $data[0][2] != $totalMsgOfNtb then
- return -1
-endi
-if $data[0][3] != $totalMsgOfNtb then
- return -1
-endi
-
-#------ not need stop consumer, because it exit after pull msg overthan expect msg
-#system tsim/tmq/consume.sh -s stop -x SIGINT
-
-system sh/exec.sh -n dnode1 -s stop -x SIGINT
+#### test scenario, please refer to https://jira.taosdata.com:18090/pages/viewpage.action?pageId=135120406
+#basic1.sim: vgroups=1, one topic for one consumer, firstly insert data, then start consume. Include six topics
+#basic2.sim: vgroups=1, multi topics for one consumer, firstly insert data, then start consume. Include six topics
+#basic3.sim: vgroups=4, one topic for one consumer, firstly insert data, then start consume. Include six topics
+#basic4.sim: vgroups=4, multi topics for one consumer, firstly insert data, then start consume. Include six topics
+
+# notes1: Scalar function: ABS/ACOS/ASIN/ATAN/CEIL/COS/FLOOR/LOG/POW/ROUND/SIN/SQRT/TAN
+# The above use cases are combined with where filter conditions, such as: where ts > "2017-08-12 18:25:58.128Z" and sin(a) > 0.5;
+#
+# notes2: not support aggregate functions(such as sum/count/min/max) and time-windows(interval).
+#
+
+run tsim/tmq/prepareBasicEnv-1vgrp.sim
+
+#---- global parameters start ----#
+$dbName = db
+$vgroups = 1
+$stbPrefix = stb
+$ctbPrefix = ctb
+$ntbPrefix = ntb
+$stbNum = 1
+$ctbNum = 10
+$ntbNum = 10
+$rowsPerCtb = 10
+$tstart = 1640966400000 # 2022-01-01 00:00:00.000
+#---- global parameters end ----#
+
+$pullDelay = 3
+$ifcheckdata = 1
+$ifmanualcommit = 1
+$showMsg = 1
+$showRow = 0
+
+sql connect
+sql use $dbName
+
+print == create topics from super table
+sql create topic topic_stb_column as select ts, c3 from stb
+sql create topic topic_stb_all as select ts, c1, c2, c3 from stb
+sql create topic topic_stb_function as select ts, abs(c1), sin(c2) from stb
+
+print == create topics from child table
+sql create topic topic_ctb_column as select ts, c3 from ctb0
+sql create topic topic_ctb_all as select * from ctb0
+sql create topic topic_ctb_function as select ts, abs(c1), sin(c2) from ctb0
+
+print == create topics from normal table
+sql create topic topic_ntb_column as select ts, c3 from ntb0
+sql create topic topic_ntb_all as select * from ntb0
+sql create topic topic_ntb_function as select ts, abs(c1), sin(c2) from ntb0
+
+#sql show topics
+#if $rows != 9 then
+# return -1
+#endi
+
+#'group.id:cgrp1,enable.auto.commit:false,auto.commit.interval.ms:6000,auto.offset.reset:earliest'
+$keyList = ' . group.id:cgrp1
+$keyList = $keyList . ,
+$keyList = $keyList . enable.auto.commit:false
+#$keyList = $keyList . ,
+#$keyList = $keyList . auto.commit.interval.ms:6000
+#$keyList = $keyList . ,
+#$keyList = $keyList . auto.offset.reset:earliest
+$keyList = $keyList . '
+print ========== key list: $keyList
+
+
+$topicNum = 3
+
+#=============================== start consume =============================#
+
+
+print ================ test consume from stb
+print == multi toipcs: topic_stb_column + topic_stb_all + topic_stb_function
+$topicList = ' . topic_stb_column
+$topicList = $topicList . ,
+$topicList = $topicList . topic_stb_all
+$topicList = $topicList . ,
+$topicList = $topicList . topic_stb_function
+$topicList = $topicList . '
+
+$consumerId = 0
+$totalMsgOfStb = $ctbNum * $rowsPerCtb
+$totalMsgOfStb = $totalMsgOfStb * $topicNum
+$expectmsgcnt = $totalMsgOfStb
+sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $expectmsgcnt , $ifcheckdata , $ifmanualcommit )
+
+print == start consumer to pull msgs from stb
+print == tsim/tmq/consume.sh -d $dbName -y $pullDelay -g $showMsg -r $showRow -w $dbName -s start
+system tsim/tmq/consume.sh -d $dbName -y $pullDelay -g $showMsg -r $showRow -w $dbName -s start
+
+print == check consume result
+wait_consumer_end_from_stb:
+sql select * from consumeresult
+print ==> rows: $rows
+print ==> rows[0]: $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6]
+if $rows != 1 then
+ sleep 1000
+ goto wait_consumer_end_from_stb
+endi
+if $data[0][1] != $consumerId then
+ return -1
+endi
+if $data[0][2] != $expectmsgcnt then
+ return -1
+endi
+if $data[0][3] != $expectmsgcnt then
+ return -1
+endi
+
+#######################################################################################
+# clear consume info and consume result
+#run tsim/tmq/clearConsume.sim
+# because drop table function no stable, so by create new db for consume info and result. Modify it later
+$cdbName = cdb1
+sql create database $cdbName vgroups 1
+sleep 500
+sql use $cdbName
+
+print == create consume info table and consume result table
+sql create table consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int, ifmanualcommit int)
+sql create table consumeresult (ts timestamp, consumerid int, consummsgcnt bigint, consumrowcnt bigint, checkresult int)
+
+sql show tables
+if $rows != 2 then
+ return -1
+endi
+#######################################################################################
+
+
+print ================ test consume from ctb
+print == multi toipcs: topic_ctb_column + topic_ctb_all + topic_ctb_function
+$topicList = ' . topic_ctb_column
+$topicList = $topicList . ,
+$topicList = $topicList . topic_ctb_all
+$topicList = $topicList . ,
+$topicList = $topicList . topic_ctb_function
+$topicList = $topicList . '
+
+$consumerId = 0
+$totalMsgOfCtb = $rowsPerCtb * $topicNum
+$expectmsgcnt = $totalMsgOfCtb
+sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $expectmsgcnt , $ifcheckdata , $ifmanualcommit )
+
+print == start consumer to pull msgs from ctb
+print == tsim/tmq/consume.sh -d $dbName -y $pullDelay -g $showMsg -r $showRow -s start
+system tsim/tmq/consume.sh -d $dbName -y $pullDelay -g $showMsg -r $showRow -w $cdbName -s start
+
+print == check consume result
+wait_consumer_end_from_ctb:
+sql select * from consumeresult
+print ==> rows: $rows
+print ==> rows[0]: $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6]
+if $rows != 1 then
+ sleep 1000
+ goto wait_consumer_end_from_ctb
+endi
+if $data[0][1] != $consumerId then
+ return -1
+endi
+if $data[0][2] != $totalMsgOfCtb then
+ return -1
+endi
+if $data[0][3] != $totalMsgOfCtb then
+ return -1
+endi
+
+#######################################################################################
+# clear consume info and consume result
+#run tsim/tmq/clearConsume.sim
+# because drop table function no stable, so by create new db for consume info and result. Modify it later
+$cdbName = cdb2
+sql create database $cdbName vgroups 1
+sleep 500
+sql use $cdbName
+
+print == create consume info table and consume result table
+sql create table consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int, ifmanualcommit int)
+sql create table consumeresult (ts timestamp, consumerid int, consummsgcnt bigint, consumrowcnt bigint, checkresult int)
+
+sql show tables
+if $rows != 2 then
+ return -1
+endi
+#######################################################################################
+
+
+print ================ test consume from ntb
+print == multi toipcs: topic_ntb_column + topic_ntb_all + topic_ntb_function
+$topicList = ' . topic_ntb_column
+$topicList = $topicList . ,
+$topicList = $topicList . topic_ntb_all
+$topicList = $topicList . ,
+$topicList = $topicList . topic_ntb_function
+$topicList = $topicList . '
+
+$consumerId = 0
+$totalMsgOfNtb = $rowsPerCtb * $topicNum
+$expectmsgcnt = $totalMsgOfNtb
+sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $expectmsgcnt , $ifcheckdata , $ifmanualcommit )
+
+print == start consumer to pull msgs from ntb
+print == tsim/tmq/consume.sh -d $dbName -y $pullDelay -g $showMsg -r $showRow -s start
+system tsim/tmq/consume.sh -d $dbName -y $pullDelay -g $showMsg -r $showRow -w $cdbName -s start
+
+print == check consume result from ntb
+wait_consumer_end_from_ntb:
+sql select * from consumeresult
+print ==> rows: $rows
+print ==> rows[0]: $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6]
+if $rows != 1 then
+ sleep 1000
+ goto wait_consumer_end_from_ntb
+endi
+if $data[0][1] != $consumerId then
+ return -1
+endi
+if $data[0][2] != $totalMsgOfNtb then
+ return -1
+endi
+if $data[0][3] != $totalMsgOfNtb then
+ return -1
+endi
+
+#------ not need stop consumer, because it exit after pull msg overthan expect msg
+#system tsim/tmq/consume.sh -s stop -x SIGINT
+
+system sh/exec.sh -n dnode1 -s stop -x SIGINT
diff --git a/tests/script/tsim/tmq/basic2Of2Cons.sim b/tests/script/tsim/tmq/basic2Of2Cons.sim
index 01ccb2b515..0494ddb5b8 100644
--- a/tests/script/tsim/tmq/basic2Of2Cons.sim
+++ b/tests/script/tsim/tmq/basic2Of2Cons.sim
@@ -1,309 +1,318 @@
-#### test scenario, please refer to https://jira.taosdata.com:18090/pages/viewpage.action?pageId=135120406
-#basic1Of2Cons.sim: vgroups=1, one topic for 2 consumers, firstly insert data, then start consume. Include six topics
-#basic2Of2Cons.sim: vgroups=1, multi topics for 2 consumers, firstly insert data, then start consume. Include six topics
-#basic3Of2Cons.sim: vgroups=4, one topic for 2 consumers, firstly insert data, then start consume. Include six topics
-#basic4Of2Cons.sim: vgroups=4, multi topics for 2 consumers, firstly insert data, then start consume. Include six topics
-
-# notes1: Scalar function: ABS/ACOS/ASIN/ATAN/CEIL/COS/FLOOR/LOG/POW/ROUND/SIN/SQRT/TAN
-# The above use cases are combined with where filter conditions, such as: where ts > "2017-08-12 18:25:58.128Z" and sin(a) > 0.5;
-#
-# notes2: not support aggregate functions(such as sum/count/min/max) and time-windows(interval).
-#
-
-run tsim/tmq/prepareBasicEnv-1vgrp.sim
-
-#---- global parameters start ----#
-$dbName = db
-$vgroups = 1
-$stbPrefix = stb
-$ctbPrefix = ctb
-$ntbPrefix = ntb
-$stbNum = 1
-$ctbNum = 10
-$ntbNum = 10
-$rowsPerCtb = 10
-$tstart = 1640966400000 # 2022-01-01 00:00:00.000
-#---- global parameters end ----#
-
-$pullDelay = 5
-$ifcheckdata = 1
-$showMsg = 1
-$showRow = 0
-
-sql connect
-sql use $dbName
-
-print == create topics from super table
-sql create topic topic_stb_column as select ts, c3 from stb
-sql create topic topic_stb_all as select ts, c1, c2, c3 from stb
-sql create topic topic_stb_function as select ts, abs(c1), sin(c2) from stb
-
-print == create topics from child table
-sql create topic topic_ctb_column as select ts, c3 from ctb0
-sql create topic topic_ctb_all as select * from ctb0
-sql create topic topic_ctb_function as select ts, abs(c1), sin(c2) from ctb0
-
-print == create topics from normal table
-sql create topic topic_ntb_column as select ts, c3 from ntb0
-sql create topic topic_ntb_all as select * from ntb0
-sql create topic topic_ntb_function as select ts, abs(c1), sin(c2) from ntb0
-
-#sql show topics
-#if $rows != 9 then
-# return -1
-#endi
-
-$keyList = ' . group.id:cgrp1
-$keyList = $keyList . '
-
-$topicNum = 3
-
-#=============================== start consume =============================#
-
-
-print ================ test consume from stb
-print == multi toipcs: topic_stb_column + topic_stb_all + topic_stb_function
-$topicList = ' . topic_stb_column
-$topicList = $topicList . ,
-$topicList = $topicList . topic_stb_all
-$topicList = $topicList . ,
-$topicList = $topicList . topic_stb_function
-$topicList = $topicList . '
-
-$consumerId = 0
-$totalMsgOfStb = $ctbNum * $rowsPerCtb
-$totalMsgOfStb = $totalMsgOfStb * $topicNum
-$expectmsgcnt = $totalMsgOfStb
-sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $expectmsgcnt , $ifcheckdata )
-$consumerId = 1
-sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $expectmsgcnt , $ifcheckdata )
-
-print == start consumer to pull msgs from stb
-print == tsim/tmq/consume.sh -d $dbName -y $pullDelay -g $showMsg -r $showRow -w $dbName -s start
-system tsim/tmq/consume.sh -d $dbName -y $pullDelay -g $showMsg -r $showRow -w $dbName -s start
-
-print == check consume result
-wait_consumer_end_from_stb:
-sql select * from consumeresult
-print ==> rows: $rows
-print ==> rows[0]: $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6]
-print ==> rows[1]: $data[1][0] $data[1][1] $data[1][2] $data[1][3] $data[1][4] $data[1][5] $data[1][6]
-if $rows != 2 then
- sleep 1000
- goto wait_consumer_end_from_stb
-endi
-if $data[0][1] == 0 then
- if $data[1][1] != 1 then
- return -1
- endi
-endi
-if $data[0][1] == 1 then
- if $data[1][1] != 0 then
- return -1
- endi
-endi
-
-# either $data[0][2] == $totalMsgOfStb and $data[1][2] == 0
-# or $data[0][2] == 0 and $data[1][2] == $totalMsgOfStb
-if $data[0][2] == $totalMsgOfStb then
- if $data[1][2] == 0 then
- goto check_ok_0
- endi
-elif $data[0][2] == 0 then
- if $data[1][2] == $totalMsgOfStb then
- goto check_ok_0
- endi
-endi
-return -1
-check_ok_0:
-
-if $data[0][3] == $totalMsgOfStb then
- if $data[1][3] == 0 then
- goto check_ok_1
- endi
-elif $data[0][3] == 0 then
- if $data[1][3] == $totalMsgOfStb then
- goto check_ok_1
- endi
-endi
-return -1
-check_ok_1:
-
-#######################################################################################
-# clear consume info and consume result
-#run tsim/tmq/clearConsume.sim
-# because drop table function no stable, so by create new db for consume info and result. Modify it later
-$cdbName = cdb1
-sql create database $cdbName vgroups 1
-sleep 500
-sql use $cdbName
-
-print == create consume info table and consume result table for ctb
-sql create table consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int)
-sql create table consumeresult (ts timestamp, consumerid int, consummsgcnt bigint, consumrowcnt bigint, checkresult int)
-
-sql show tables
-if $rows != 2 then
- return -1
-endi
-#######################################################################################
-
-
-print ================ test consume from ctb
-print == multi toipcs: topic_ctb_column + topic_ctb_all + topic_ctb_function
-$topicList = ' . topic_ctb_column
-$topicList = $topicList . ,
-$topicList = $topicList . topic_ctb_all
-$topicList = $topicList . ,
-$topicList = $topicList . topic_ctb_function
-$topicList = $topicList . '
-
-$consumerId = 0
-$totalMsgOfCtb = $rowsPerCtb * $topicNum
-$expectmsgcnt = $totalMsgOfCtb
-sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $expectmsgcnt , $ifcheckdata )
-$consumerId = 1
-sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $expectmsgcnt , $ifcheckdata )
-
-print == start consumer to pull msgs from ctb
-print == tsim/tmq/consume.sh -d $dbName -y $pullDelay -g $showMsg -r $showRow -s start
-system tsim/tmq/consume.sh -d $dbName -y $pullDelay -g $showMsg -r $showRow -w $cdbName -s start
-
-print == check consume result
-wait_consumer_end_from_ctb:
-sql select * from consumeresult
-print ==> rows: $rows
-print ==> rows[0]: $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6]
-print ==> rows[1]: $data[1][0] $data[1][1] $data[1][2] $data[1][3] $data[1][4] $data[1][5] $data[1][6]
-if $rows != 2 then
- sleep 1000
- goto wait_consumer_end_from_ctb
-endi
-if $data[0][1] == 0 then
- if $data[1][1] != 1 then
- return -1
- endi
-endi
-if $data[0][1] == 1 then
- if $data[1][1] != 0 then
- return -1
- endi
-endi
-
-# either $data[0][2] == $totalMsgOfCtb and $data[1][2] == 0
-# or $data[0][2] == 0 and $data[1][2] == $totalMsgOfCtb
-if $data[0][2] == $totalMsgOfCtb then
- if $data[1][2] == 0 then
- goto check_ok_2
- endi
-elif $data[0][2] == 0 then
- if $data[1][2] == $totalMsgOfCtb then
- goto check_ok_2
- endi
-endi
-return -1
-check_ok_2:
-
-if $data[0][3] == $totalMsgOfCtb then
- if $data[1][3] == 0 then
- goto check_ok_3
- endi
-elif $data[0][3] == 0 then
- if $data[1][3] == $totalMsgOfCtb then
- goto check_ok_3
- endi
-endi
-return -1
-check_ok_3:
-
-#######################################################################################
-# clear consume info and consume result
-#run tsim/tmq/clearConsume.sim
-# because drop table function no stable, so by create new db for consume info and result. Modify it later
-$cdbName = cdb2
-sql create database $cdbName vgroups 1
-sleep 500
-sql use $cdbName
-
-print == create consume info table and consume result table for ntb
-sql create table consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int)
-sql create table consumeresult (ts timestamp, consumerid int, consummsgcnt bigint, consumrowcnt bigint, checkresult int)
-
-sql show tables
-if $rows != 2 then
- return -1
-endi
-#######################################################################################
-
-
-print ================ test consume from ntb
-print == multi toipcs: topic_ntb_column + topic_ntb_all + topic_ntb_function
-$topicList = ' . topic_ntb_column
-$topicList = $topicList . ,
-$topicList = $topicList . topic_ntb_all
-$topicList = $topicList . ,
-$topicList = $topicList . topic_ntb_function
-$topicList = $topicList . '
-
-$consumerId = 0
-$totalMsgOfNtb = $rowsPerCtb * $topicNum
-$expectmsgcnt = $totalMsgOfNtb
-sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $expectmsgcnt , $ifcheckdata )
-$consumerId = 1
-sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $expectmsgcnt , $ifcheckdata )
-
-print == start consumer to pull msgs from ntb
-print == tsim/tmq/consume.sh -d $dbName -y $pullDelay -g $showMsg -r $showRow -s start
-system tsim/tmq/consume.sh -d $dbName -y $pullDelay -g $showMsg -r $showRow -w $cdbName -s start
-
-print == check consume result from ntb
-wait_consumer_end_from_ntb:
-sql select * from consumeresult
-print ==> rows: $rows
-print ==> rows[0]: $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6]
-print ==> rows[1]: $data[1][0] $data[1][1] $data[1][2] $data[1][3] $data[1][4] $data[1][5] $data[1][6]
-if $rows != 2 then
- sleep 1000
- goto wait_consumer_end_from_ntb
-endi
-if $data[0][1] == 0 then
- if $data[1][1] != 1 then
- return -1
- endi
-endi
-if $data[1][1] == 0 then
- if $data[0][1] != 1 then
- return -1
- endi
-endi
-
-# either $data[0][2] == $totalMsgOfNtb and $data[1][2] == 0
-# or $data[0][2] == 0 and $data[1][2] == $totalMsgOfNtb
-if $data[0][2] == $totalMsgOfNtb then
- if $data[1][2] == 0 then
- goto check_ok_4
- endi
-elif $data[0][2] == 0 then
- if $data[1][2] == $totalMsgOfNtb then
- goto check_ok_4
- endi
-endi
-return -1
-check_ok_4:
-
-if $data[0][3] == $totalMsgOfNtb then
- if $data[1][3] == 0 then
- goto check_ok_5
- endi
-elif $data[0][3] == 0 then
- if $data[1][3] == $totalMsgOfNtb then
- goto check_ok_5
- endi
-endi
-return -1
-check_ok_5:
-
-#------ not need stop consumer, because it exit after pull msg overthan expect msg
-#system tsim/tmq/consume.sh -s stop -x SIGINT
-
-system sh/exec.sh -n dnode1 -s stop -x SIGINT
+#### test scenario, please refer to https://jira.taosdata.com:18090/pages/viewpage.action?pageId=135120406
+#basic1Of2Cons.sim: vgroups=1, one topic for 2 consumers, firstly insert data, then start consume. Include six topics
+#basic2Of2Cons.sim: vgroups=1, multi topics for 2 consumers, firstly insert data, then start consume. Include six topics
+#basic3Of2Cons.sim: vgroups=4, one topic for 2 consumers, firstly insert data, then start consume. Include six topics
+#basic4Of2Cons.sim: vgroups=4, multi topics for 2 consumers, firstly insert data, then start consume. Include six topics
+
+# notes1: Scalar function: ABS/ACOS/ASIN/ATAN/CEIL/COS/FLOOR/LOG/POW/ROUND/SIN/SQRT/TAN
+# The above use cases are combined with where filter conditions, such as: where ts > "2017-08-12 18:25:58.128Z" and sin(a) > 0.5;
+#
+# notes2: not support aggregate functions(such as sum/count/min/max) and time-windows(interval).
+#
+
+run tsim/tmq/prepareBasicEnv-1vgrp.sim
+
+#---- global parameters start ----#
+$dbName = db
+$vgroups = 1
+$stbPrefix = stb
+$ctbPrefix = ctb
+$ntbPrefix = ntb
+$stbNum = 1
+$ctbNum = 10
+$ntbNum = 10
+$rowsPerCtb = 10
+$tstart = 1640966400000 # 2022-01-01 00:00:00.000
+#---- global parameters end ----#
+
+$pullDelay = 5
+$ifcheckdata = 1
+$ifmanualcommit = 1
+$showMsg = 1
+$showRow = 0
+
+sql connect
+sql use $dbName
+
+print == create topics from super table
+sql create topic topic_stb_column as select ts, c3 from stb
+sql create topic topic_stb_all as select ts, c1, c2, c3 from stb
+sql create topic topic_stb_function as select ts, abs(c1), sin(c2) from stb
+
+print == create topics from child table
+sql create topic topic_ctb_column as select ts, c3 from ctb0
+sql create topic topic_ctb_all as select * from ctb0
+sql create topic topic_ctb_function as select ts, abs(c1), sin(c2) from ctb0
+
+print == create topics from normal table
+sql create topic topic_ntb_column as select ts, c3 from ntb0
+sql create topic topic_ntb_all as select * from ntb0
+sql create topic topic_ntb_function as select ts, abs(c1), sin(c2) from ntb0
+
+#sql show topics
+#if $rows != 9 then
+# return -1
+#endi
+
+#'group.id:cgrp1,enable.auto.commit:false,auto.commit.interval.ms:6000,auto.offset.reset:earliest'
+$keyList = ' . group.id:cgrp1
+$keyList = $keyList . ,
+$keyList = $keyList . enable.auto.commit:false
+#$keyList = $keyList . ,
+#$keyList = $keyList . auto.commit.interval.ms:6000
+#$keyList = $keyList . ,
+#$keyList = $keyList . auto.offset.reset:earliest
+$keyList = $keyList . '
+print ========== key list: $keyList
+
+$topicNum = 3
+
+#=============================== start consume =============================#
+
+
+print ================ test consume from stb
+print == multi toipcs: topic_stb_column + topic_stb_all + topic_stb_function
+$topicList = ' . topic_stb_column
+$topicList = $topicList . ,
+$topicList = $topicList . topic_stb_all
+$topicList = $topicList . ,
+$topicList = $topicList . topic_stb_function
+$topicList = $topicList . '
+
+$consumerId = 0
+$totalMsgOfStb = $ctbNum * $rowsPerCtb
+$totalMsgOfStb = $totalMsgOfStb * $topicNum
+$expectmsgcnt = $totalMsgOfStb
+sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $expectmsgcnt , $ifcheckdata , $ifmanualcommit )
+$consumerId = 1
+sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $expectmsgcnt , $ifcheckdata , $ifmanualcommit )
+
+print == start consumer to pull msgs from stb
+print == tsim/tmq/consume.sh -d $dbName -y $pullDelay -g $showMsg -r $showRow -w $dbName -s start
+system tsim/tmq/consume.sh -d $dbName -y $pullDelay -g $showMsg -r $showRow -w $dbName -s start
+
+print == check consume result
+wait_consumer_end_from_stb:
+sql select * from consumeresult
+print ==> rows: $rows
+print ==> rows[0]: $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6]
+print ==> rows[1]: $data[1][0] $data[1][1] $data[1][2] $data[1][3] $data[1][4] $data[1][5] $data[1][6]
+if $rows != 2 then
+ sleep 1000
+ goto wait_consumer_end_from_stb
+endi
+if $data[0][1] == 0 then
+ if $data[1][1] != 1 then
+ return -1
+ endi
+endi
+if $data[0][1] == 1 then
+ if $data[1][1] != 0 then
+ return -1
+ endi
+endi
+
+# either $data[0][2] == $totalMsgOfStb and $data[1][2] == 0
+# or $data[0][2] == 0 and $data[1][2] == $totalMsgOfStb
+if $data[0][2] == $totalMsgOfStb then
+ if $data[1][2] == 0 then
+ goto check_ok_0
+ endi
+elif $data[0][2] == 0 then
+ if $data[1][2] == $totalMsgOfStb then
+ goto check_ok_0
+ endi
+endi
+return -1
+check_ok_0:
+
+if $data[0][3] == $totalMsgOfStb then
+ if $data[1][3] == 0 then
+ goto check_ok_1
+ endi
+elif $data[0][3] == 0 then
+ if $data[1][3] == $totalMsgOfStb then
+ goto check_ok_1
+ endi
+endi
+return -1
+check_ok_1:
+
+#######################################################################################
+# clear consume info and consume result
+#run tsim/tmq/clearConsume.sim
+# because drop table function no stable, so by create new db for consume info and result. Modify it later
+$cdbName = cdb1
+sql create database $cdbName vgroups 1
+sleep 500
+sql use $cdbName
+
+print == create consume info table and consume result table for ctb
+sql create table consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int, ifmanualcommit int)
+sql create table consumeresult (ts timestamp, consumerid int, consummsgcnt bigint, consumrowcnt bigint, checkresult int)
+
+sql show tables
+if $rows != 2 then
+ return -1
+endi
+#######################################################################################
+
+
+print ================ test consume from ctb
+print == multi toipcs: topic_ctb_column + topic_ctb_all + topic_ctb_function
+$topicList = ' . topic_ctb_column
+$topicList = $topicList . ,
+$topicList = $topicList . topic_ctb_all
+$topicList = $topicList . ,
+$topicList = $topicList . topic_ctb_function
+$topicList = $topicList . '
+
+$consumerId = 0
+$totalMsgOfCtb = $rowsPerCtb * $topicNum
+$expectmsgcnt = $totalMsgOfCtb
+sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $expectmsgcnt , $ifcheckdata , $ifmanualcommit )
+$consumerId = 1
+sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $expectmsgcnt , $ifcheckdata , $ifmanualcommit )
+
+print == start consumer to pull msgs from ctb
+print == tsim/tmq/consume.sh -d $dbName -y $pullDelay -g $showMsg -r $showRow -s start
+system tsim/tmq/consume.sh -d $dbName -y $pullDelay -g $showMsg -r $showRow -w $cdbName -s start
+
+print == check consume result
+wait_consumer_end_from_ctb:
+sql select * from consumeresult
+print ==> rows: $rows
+print ==> rows[0]: $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6]
+print ==> rows[1]: $data[1][0] $data[1][1] $data[1][2] $data[1][3] $data[1][4] $data[1][5] $data[1][6]
+if $rows != 2 then
+ sleep 1000
+ goto wait_consumer_end_from_ctb
+endi
+if $data[0][1] == 0 then
+ if $data[1][1] != 1 then
+ return -1
+ endi
+endi
+if $data[0][1] == 1 then
+ if $data[1][1] != 0 then
+ return -1
+ endi
+endi
+
+# either $data[0][2] == $totalMsgOfCtb and $data[1][2] == 0
+# or $data[0][2] == 0 and $data[1][2] == $totalMsgOfCtb
+if $data[0][2] == $totalMsgOfCtb then
+ if $data[1][2] == 0 then
+ goto check_ok_2
+ endi
+elif $data[0][2] == 0 then
+ if $data[1][2] == $totalMsgOfCtb then
+ goto check_ok_2
+ endi
+endi
+return -1
+check_ok_2:
+
+if $data[0][3] == $totalMsgOfCtb then
+ if $data[1][3] == 0 then
+ goto check_ok_3
+ endi
+elif $data[0][3] == 0 then
+ if $data[1][3] == $totalMsgOfCtb then
+ goto check_ok_3
+ endi
+endi
+return -1
+check_ok_3:
+
+#######################################################################################
+# clear consume info and consume result
+#run tsim/tmq/clearConsume.sim
+# because drop table function no stable, so by create new db for consume info and result. Modify it later
+$cdbName = cdb2
+sql create database $cdbName vgroups 1
+sleep 500
+sql use $cdbName
+
+print == create consume info table and consume result table for ntb
+sql create table consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int, ifmanualcommit int)
+sql create table consumeresult (ts timestamp, consumerid int, consummsgcnt bigint, consumrowcnt bigint, checkresult int)
+
+sql show tables
+if $rows != 2 then
+ return -1
+endi
+#######################################################################################
+
+
+print ================ test consume from ntb
+print == multi toipcs: topic_ntb_column + topic_ntb_all + topic_ntb_function
+$topicList = ' . topic_ntb_column
+$topicList = $topicList . ,
+$topicList = $topicList . topic_ntb_all
+$topicList = $topicList . ,
+$topicList = $topicList . topic_ntb_function
+$topicList = $topicList . '
+
+$consumerId = 0
+$totalMsgOfNtb = $rowsPerCtb * $topicNum
+$expectmsgcnt = $totalMsgOfNtb
+sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $expectmsgcnt , $ifcheckdata , $ifmanualcommit )
+$consumerId = 1
+sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $expectmsgcnt , $ifcheckdata , $ifmanualcommit )
+
+print == start consumer to pull msgs from ntb
+print == tsim/tmq/consume.sh -d $dbName -y $pullDelay -g $showMsg -r $showRow -s start
+system tsim/tmq/consume.sh -d $dbName -y $pullDelay -g $showMsg -r $showRow -w $cdbName -s start
+
+print == check consume result from ntb
+wait_consumer_end_from_ntb:
+sql select * from consumeresult
+print ==> rows: $rows
+print ==> rows[0]: $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6]
+print ==> rows[1]: $data[1][0] $data[1][1] $data[1][2] $data[1][3] $data[1][4] $data[1][5] $data[1][6]
+if $rows != 2 then
+ sleep 1000
+ goto wait_consumer_end_from_ntb
+endi
+if $data[0][1] == 0 then
+ if $data[1][1] != 1 then
+ return -1
+ endi
+endi
+if $data[1][1] == 0 then
+ if $data[0][1] != 1 then
+ return -1
+ endi
+endi
+
+# either $data[0][2] == $totalMsgOfNtb and $data[1][2] == 0
+# or $data[0][2] == 0 and $data[1][2] == $totalMsgOfNtb
+if $data[0][2] == $totalMsgOfNtb then
+ if $data[1][2] == 0 then
+ goto check_ok_4
+ endi
+elif $data[0][2] == 0 then
+ if $data[1][2] == $totalMsgOfNtb then
+ goto check_ok_4
+ endi
+endi
+return -1
+check_ok_4:
+
+if $data[0][3] == $totalMsgOfNtb then
+ if $data[1][3] == 0 then
+ goto check_ok_5
+ endi
+elif $data[0][3] == 0 then
+ if $data[1][3] == $totalMsgOfNtb then
+ goto check_ok_5
+ endi
+endi
+return -1
+check_ok_5:
+
+#------ not need stop consumer, because it exit after pull msg overthan expect msg
+#system tsim/tmq/consume.sh -s stop -x SIGINT
+
+system sh/exec.sh -n dnode1 -s stop -x SIGINT
diff --git a/tests/script/tsim/tmq/basic2Of2ConsOverlap.sim b/tests/script/tsim/tmq/basic2Of2ConsOverlap.sim
index d5c800b0e9..480cf520d9 100644
--- a/tests/script/tsim/tmq/basic2Of2ConsOverlap.sim
+++ b/tests/script/tsim/tmq/basic2Of2ConsOverlap.sim
@@ -27,6 +27,7 @@ $tstart = 1640966400000 # 2022-01-01 00:00:00.000
$pullDelay = 5
$ifcheckdata = 1
+$ifmanualcommit = 1
$showMsg = 1
$showRow = 0
@@ -53,8 +54,16 @@ sql create topic topic_ntb_function as select ts, abs(c1), sin(c2) from ntb0
# return -1
#endi
+#'group.id:cgrp1,enable.auto.commit:false,auto.commit.interval.ms:6000,auto.offset.reset:earliest'
$keyList = ' . group.id:cgrp1
+$keyList = $keyList . ,
+$keyList = $keyList . enable.auto.commit:false
+#$keyList = $keyList . ,
+#$keyList = $keyList . auto.commit.interval.ms:6000
+#$keyList = $keyList . ,
+#$keyList = $keyList . auto.offset.reset:earliest
$keyList = $keyList . '
+print ========== key list: $keyList
$topicNum = 2
@@ -72,7 +81,7 @@ $consumerId = 0
$totalMsgOfOneTopic = $ctbNum * $rowsPerCtb
$totalMsgOfStb = $totalMsgOfOneTopic * $topicNum
$expectmsgcnt = $totalMsgOfStb
-sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $expectmsgcnt , $ifcheckdata )
+sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $expectmsgcnt , $ifcheckdata , $ifmanualcommit )
$topicList = ' . topic_stb_all
@@ -80,7 +89,7 @@ $topicList = $topicList . ,
$topicList = $topicList . topic_stb_function
$topicList = $topicList . '
$consumerId = 1
-sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $expectmsgcnt , $ifcheckdata )
+sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $expectmsgcnt , $ifcheckdata , $ifmanualcommit )
print == start consumer to pull msgs from stb
print == tsim/tmq/consume.sh -d $dbName -y $pullDelay -g $showMsg -r $showRow -w $dbName -s start
@@ -158,7 +167,7 @@ sleep 500
sql use $cdbName
print == create consume info table and consume result table for ctb
-sql create table consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int)
+sql create table consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int, ifmanualcommit int)
sql create table consumeresult (ts timestamp, consumerid int, consummsgcnt bigint, consumrowcnt bigint, checkresult int)
sql show tables
@@ -179,14 +188,14 @@ $consumerId = 0
$totalMsgOfOneTopic = $rowsPerCtb
$totalMsgOfCtb = $totalMsgOfOneTopic * $topicNum
$expectmsgcnt = $totalMsgOfCtb
-sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $expectmsgcnt , $ifcheckdata )
+sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $expectmsgcnt , $ifcheckdata , $ifmanualcommit )
$topicList = ' . topic_ctb_function
$topicList = $topicList . ,
$topicList = $topicList . topic_ctb_all
$topicList = $topicList . '
$consumerId = 1
-sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $expectmsgcnt , $ifcheckdata )
+sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $expectmsgcnt , $ifcheckdata , $ifmanualcommit )
print == start consumer to pull msgs from ctb
print == tsim/tmq/consume.sh -d $dbName -y $pullDelay -g $showMsg -r $showRow -w $cdbName -s start
@@ -249,7 +258,7 @@ sleep 500
sql use $cdbName
print == create consume info table and consume result table for ntb
-sql create table consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int)
+sql create table consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int, ifmanualcommit int)
sql create table consumeresult (ts timestamp, consumerid int, consummsgcnt bigint, consumrowcnt bigint, checkresult int)
sql show tables
@@ -270,7 +279,7 @@ $consumerId = 0
$totalMsgOfOneTopic = $rowsPerCtb
$totalMsgOfNtb = $totalMsgOfOneTopic * $topicNum
$expectmsgcnt = $totalMsgOfNtb
-sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $expectmsgcnt , $ifcheckdata )
+sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $expectmsgcnt , $ifcheckdata , $ifmanualcommit )
$topicList = ' . topic_ntb_function
@@ -278,7 +287,7 @@ $topicList = $topicList . ,
$topicList = $topicList . topic_ntb_all
$topicList = $topicList . '
$consumerId = 1
-sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $expectmsgcnt , $ifcheckdata )
+sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $expectmsgcnt , $ifcheckdata , $ifmanualcommit )
print == start consumer to pull msgs from ntb
print == tsim/tmq/consume.sh -d $dbName -y $pullDelay -g $showMsg -r $showRow -s start
diff --git a/tests/script/tsim/tmq/basic3.sim b/tests/script/tsim/tmq/basic3.sim
index de771ba892..8d677766d7 100644
--- a/tests/script/tsim/tmq/basic3.sim
+++ b/tests/script/tsim/tmq/basic3.sim
@@ -1,278 +1,288 @@
-#### test scenario, please refer to https://jira.taosdata.com:18090/pages/viewpage.action?pageId=135120406
-#basic1.sim: vgroups=1, one topic for one consumer, firstly insert data, then start consume. Include six topics
-#basic2.sim: vgroups=1, multi topics for one consumer, firstly insert data, then start consume. Include six topics
-#basic3.sim: vgroups=4, one topic for one consumer, firstly insert data, then start consume. Include six topics
-#basic4.sim: vgroups=4, multi topics for one consumer, firstly insert data, then start consume. Include six topics
-
-# notes1: Scalar function: ABS/ACOS/ASIN/ATAN/CEIL/COS/FLOOR/LOG/POW/ROUND/SIN/SQRT/TAN
-# The above use cases are combined with where filter conditions, such as: where ts > "2017-08-12 18:25:58.128Z" and sin(a) > 0.5;
-#
-# notes2: not support aggregate functions(such as sum/count/min/max) and time-windows(interval).
-#
-
-run tsim/tmq/prepareBasicEnv-4vgrp.sim
-
-#---- global parameters start ----#
-$dbName = db
-$vgroups = 4
-$stbPrefix = stb
-$ctbPrefix = ctb
-$ntbPrefix = ntb
-$stbNum = 1
-$ctbNum = 10
-$ntbNum = 10
-$rowsPerCtb = 10
-$tstart = 1640966400000 # 2022-01-01 00:00:00.000
-#---- global parameters end ----#
-
-$pullDelay = 3
-$ifcheckdata = 1
-$showMsg = 1
-$showRow = 0
-
-sql connect
-sql use $dbName
-
-print == create topics from super table
-sql create topic topic_stb_column as select ts, c3 from stb
-sql create topic topic_stb_all as select ts, c1, c2, c3 from stb
-sql create topic topic_stb_function as select ts, abs(c1), sin(c2) from stb
-
-print == create topics from child table
-sql create topic topic_ctb_column as select ts, c3 from ctb0
-sql create topic topic_ctb_all as select * from ctb0
-sql create topic topic_ctb_function as select ts, abs(c1), sin(c2) from ctb0
-
-print == create topics from normal table
-sql create topic topic_ntb_column as select ts, c3 from ntb0
-sql create topic topic_ntb_all as select * from ntb0
-sql create topic topic_ntb_function as select ts, abs(c1), sin(c2) from ntb0
-
-#sql show topics
-#if $rows != 9 then
-# return -1
-#endi
-
-$keyList = ' . group.id:cgrp1
-$keyList = $keyList . '
-
-$cdb_index = 0
-#=============================== start consume =============================#
-
-print ================ test consume from stb
-$loop_cnt = 0
-loop_consume_diff_topic_from_stb:
-
-#######################################################################################
-# clear consume info and consume result
-#run tsim/tmq/clearConsume.sim
-# because drop table function no stable, so by create new db for consume info and result. Modify it later
-$cdb_index = $cdb_index + 1
-$cdbName = cdb . $cdb_index
-sql create database $cdbName vgroups 1
-sleep 500
-sql use $cdbName
-
-print == create consume info table and consume result table
-sql create table consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int)
-sql create table consumeresult (ts timestamp, consumerid int, consummsgcnt bigint, consumrowcnt bigint, checkresult int)
-
-sql show tables
-if $rows != 2 then
- return -1
-endi
-#######################################################################################
-
-if $loop_cnt == 0 then
- print == scenario 1: topic_stb_column
- $topicList = ' . topic_stb_column
- $topicList = $topicList . '
-elif $loop_cnt == 1 then
- print == scenario 2: topic_stb_all
- $topicList = ' . topic_stb_all
- $topicList = $topicList . '
-elif $loop_cnt == 2 then
- print == scenario 3: topic_stb_function
- $topicList = ' . topic_stb_function
- $topicList = $topicList . '
-else
- goto loop_consume_diff_topic_from_stb_end
-endi
-
-$consumerId = 0
-$totalMsgOfStb = $ctbNum * $rowsPerCtb
-$expectmsgcnt = $totalMsgOfStb
-sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $expectmsgcnt , $ifcheckdata )
-
-print == start consumer to pull msgs from stb
-print == tsim/tmq/consume.sh -d $dbName -y $pullDelay -g $showMsg -r $showRow -w $cdbName -s start
-system tsim/tmq/consume.sh -d $dbName -y $pullDelay -g $showMsg -r $showRow -w $cdbName -s start
-
-print == check consume result
-wait_consumer_end_from_stb:
-sql select * from consumeresult
-print ==> rows: $rows
-print ==> rows[0]: $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6]
-if $rows != 1 then
- sleep 1000
- goto wait_consumer_end_from_stb
-endi
-if $data[0][1] != $consumerId then
- return -1
-endi
-if $data[0][2] != $expectmsgcnt then
- return -1
-endi
-if $data[0][3] != $expectmsgcnt then
- return -1
-endi
-$loop_cnt = $loop_cnt + 1
-goto loop_consume_diff_topic_from_stb
-loop_consume_diff_topic_from_stb_end:
-
-print ================ test consume from ctb
-$loop_cnt = 0
-loop_consume_diff_topic_from_ctb:
-
-#######################################################################################
-# clear consume info and consume result
-#run tsim/tmq/clearConsume.sim
-# because drop table function no stable, so by create new db for consume info and result. Modify it later
-$cdb_index = $cdb_index + 1
-$cdbName = cdb . $cdb_index
-sql create database $cdbName vgroups 1
-sleep 500
-sql use $cdbName
-
-print == create consume info table and consume result table
-sql create table consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int)
-sql create table consumeresult (ts timestamp, consumerid int, consummsgcnt bigint, consumrowcnt bigint, checkresult int)
-
-sql show tables
-if $rows != 2 then
- return -1
-endi
-#######################################################################################
-
-if $loop_cnt == 0 then
- print == scenario 1: topic_ctb_column
- $topicList = ' . topic_ctb_column
- $topicList = $topicList . '
-elif $loop_cnt == 1 then
- print == scenario 2: topic_ctb_all
- $topicList = ' . topic_ctb_all
- $topicList = $topicList . '
-elif $loop_cnt == 2 then
- print == scenario 3: topic_ctb_function
- $topicList = ' . topic_ctb_function
- $topicList = $topicList . '
-else
- goto loop_consume_diff_topic_from_ctb_end
-endi
-
-$consumerId = 0
-$totalMsgOfCtb = $rowsPerCtb
-$expectmsgcnt = $totalMsgOfCtb
-sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $expectmsgcnt , $ifcheckdata )
-
-print == start consumer to pull msgs from ctb
-print == tsim/tmq/consume.sh -d $dbName -y $pullDelay -g $showMsg -r $showRow -s start
-system tsim/tmq/consume.sh -d $dbName -y $pullDelay -g $showMsg -r $showRow -w $cdbName -s start
-
-print == check consume result
-wait_consumer_end_from_ctb:
-sql select * from consumeresult
-print ==> rows: $rows
-print ==> rows[0]: $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6]
-if $rows != 1 then
- sleep 1000
- goto wait_consumer_end_from_ctb
-endi
-if $data[0][1] != $consumerId then
- return -1
-endi
-if $data[0][2] != $totalMsgOfCtb then
- return -1
-endi
-if $data[0][3] != $totalMsgOfCtb then
- return -1
-endi
-$loop_cnt = $loop_cnt + 1
-goto loop_consume_diff_topic_from_ctb
-loop_consume_diff_topic_from_ctb_end:
-
-print ================ test consume from ntb
-$loop_cnt = 0
-loop_consume_diff_topic_from_ntb:
-
-#######################################################################################
-# clear consume info and consume result
-#run tsim/tmq/clearConsume.sim
-# because drop table function no stable, so by create new db for consume info and result. Modify it later
-$cdb_index = $cdb_index + 1
-$cdbName = cdb . $cdb_index
-sql create database $cdbName vgroups 1
-sleep 500
-sql use $cdbName
-
-print == create consume info table and consume result table
-sql create table consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int)
-sql create table consumeresult (ts timestamp, consumerid int, consummsgcnt bigint, consumrowcnt bigint, checkresult int)
-
-sql show tables
-if $rows != 2 then
- return -1
-endi
-#######################################################################################
-
-if $loop_cnt == 0 then
- print == scenario 1: topic_ntb_column
- $topicList = ' . topic_ntb_column
- $topicList = $topicList . '
-elif $loop_cnt == 1 then
- print == scenario 2: topic_ntb_all
- $topicList = ' . topic_ntb_all
- $topicList = $topicList . '
-elif $loop_cnt == 2 then
- print == scenario 3: topic_ntb_function
- $topicList = ' . topic_ntb_function
- $topicList = $topicList . '
-else
- goto loop_consume_diff_topic_from_ntb_end
-endi
-
-$consumerId = 0
-$totalMsgOfNtb = $rowsPerCtb
-$expectmsgcnt = $totalMsgOfNtb
-sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $expectmsgcnt , $ifcheckdata )
-
-print == start consumer to pull msgs from ntb
-print == tsim/tmq/consume.sh -d $dbName -y $pullDelay -g $showMsg -r $showRow -s start
-system tsim/tmq/consume.sh -d $dbName -y $pullDelay -g $showMsg -r $showRow -w $cdbName -s start
-
-print == check consume result from ntb
-wait_consumer_end_from_ntb:
-sql select * from consumeresult
-print ==> rows: $rows
-print ==> rows[0]: $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6]
-if $rows != 1 then
- sleep 1000
- goto wait_consumer_end_from_ntb
-endi
-if $data[0][1] != $consumerId then
- return -1
-endi
-if $data[0][2] != $totalMsgOfNtb then
- return -1
-endi
-if $data[0][3] != $totalMsgOfNtb then
- return -1
-endi
-$loop_cnt = $loop_cnt + 1
-goto loop_consume_diff_topic_from_ntb
-loop_consume_diff_topic_from_ntb_end:
-
-#------ not need stop consumer, because it exit after pull msg overthan expect msg
-#system tsim/tmq/consume.sh -s stop -x SIGINT
-
-system sh/exec.sh -n dnode1 -s stop -x SIGINT
+#### test scenario, please refer to https://jira.taosdata.com:18090/pages/viewpage.action?pageId=135120406
+#basic1.sim: vgroups=1, one topic for one consumer, firstly insert data, then start consume. Include six topics
+#basic2.sim: vgroups=1, multi topics for one consumer, firstly insert data, then start consume. Include six topics
+#basic3.sim: vgroups=4, one topic for one consumer, firstly insert data, then start consume. Include six topics
+#basic4.sim: vgroups=4, multi topics for one consumer, firstly insert data, then start consume. Include six topics
+
+# notes1: Scalar function: ABS/ACOS/ASIN/ATAN/CEIL/COS/FLOOR/LOG/POW/ROUND/SIN/SQRT/TAN
+# The above use cases are combined with where filter conditions, such as: where ts > "2017-08-12 18:25:58.128Z" and sin(a) > 0.5;
+#
+# notes2: not support aggregate functions(such as sum/count/min/max) and time-windows(interval).
+#
+
+run tsim/tmq/prepareBasicEnv-4vgrp.sim
+
+#---- global parameters start ----#
+$dbName = db
+$vgroups = 4
+$stbPrefix = stb
+$ctbPrefix = ctb
+$ntbPrefix = ntb
+$stbNum = 1
+$ctbNum = 10
+$ntbNum = 10
+$rowsPerCtb = 10
+$tstart = 1640966400000 # 2022-01-01 00:00:00.000
+#---- global parameters end ----#
+
+$pullDelay = 3
+$ifcheckdata = 1
+$ifmanualcommit = 1
+$showMsg = 1
+$showRow = 0
+
+sql connect
+sql use $dbName
+
+print == create topics from super table
+sql create topic topic_stb_column as select ts, c3 from stb
+sql create topic topic_stb_all as select ts, c1, c2, c3 from stb
+sql create topic topic_stb_function as select ts, abs(c1), sin(c2) from stb
+
+print == create topics from child table
+sql create topic topic_ctb_column as select ts, c3 from ctb0
+sql create topic topic_ctb_all as select * from ctb0
+sql create topic topic_ctb_function as select ts, abs(c1), sin(c2) from ctb0
+
+print == create topics from normal table
+sql create topic topic_ntb_column as select ts, c3 from ntb0
+sql create topic topic_ntb_all as select * from ntb0
+sql create topic topic_ntb_function as select ts, abs(c1), sin(c2) from ntb0
+
+#sql show topics
+#if $rows != 9 then
+# return -1
+#endi
+
+#'group.id:cgrp1,enable.auto.commit:false,auto.commit.interval.ms:6000,auto.offset.reset:earliest'
+$keyList = ' . group.id:cgrp1
+$keyList = $keyList . ,
+$keyList = $keyList . enable.auto.commit:false
+#$keyList = $keyList . ,
+#$keyList = $keyList . auto.commit.interval.ms:6000
+#$keyList = $keyList . ,
+#$keyList = $keyList . auto.offset.reset:earliest
+$keyList = $keyList . '
+print ========== key list: $keyList
+
+
+$cdb_index = 0
+#=============================== start consume =============================#
+
+print ================ test consume from stb
+$loop_cnt = 0
+loop_consume_diff_topic_from_stb:
+
+#######################################################################################
+# clear consume info and consume result
+#run tsim/tmq/clearConsume.sim
+# because drop table function no stable, so by create new db for consume info and result. Modify it later
+$cdb_index = $cdb_index + 1
+$cdbName = cdb . $cdb_index
+sql create database $cdbName vgroups 1
+sleep 500
+sql use $cdbName
+
+print == create consume info table and consume result table
+sql create table consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int, ifmanualcommit int)
+sql create table consumeresult (ts timestamp, consumerid int, consummsgcnt bigint, consumrowcnt bigint, checkresult int)
+
+sql show tables
+if $rows != 2 then
+ return -1
+endi
+#######################################################################################
+
+if $loop_cnt == 0 then
+ print == scenario 1: topic_stb_column
+ $topicList = ' . topic_stb_column
+ $topicList = $topicList . '
+elif $loop_cnt == 1 then
+ print == scenario 2: topic_stb_all
+ $topicList = ' . topic_stb_all
+ $topicList = $topicList . '
+elif $loop_cnt == 2 then
+ print == scenario 3: topic_stb_function
+ $topicList = ' . topic_stb_function
+ $topicList = $topicList . '
+else
+ goto loop_consume_diff_topic_from_stb_end
+endi
+
+$consumerId = 0
+$totalMsgOfStb = $ctbNum * $rowsPerCtb
+$expectmsgcnt = $totalMsgOfStb
+sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $expectmsgcnt , $ifcheckdata , $ifmanualcommit )
+
+print == start consumer to pull msgs from stb
+print == tsim/tmq/consume.sh -d $dbName -y $pullDelay -g $showMsg -r $showRow -w $cdbName -s start
+system tsim/tmq/consume.sh -d $dbName -y $pullDelay -g $showMsg -r $showRow -w $cdbName -s start
+
+print == check consume result
+wait_consumer_end_from_stb:
+sql select * from consumeresult
+print ==> rows: $rows
+print ==> rows[0]: $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6]
+if $rows != 1 then
+ sleep 1000
+ goto wait_consumer_end_from_stb
+endi
+if $data[0][1] != $consumerId then
+ return -1
+endi
+if $data[0][2] != $expectmsgcnt then
+ return -1
+endi
+if $data[0][3] != $expectmsgcnt then
+ return -1
+endi
+$loop_cnt = $loop_cnt + 1
+goto loop_consume_diff_topic_from_stb
+loop_consume_diff_topic_from_stb_end:
+
+print ================ test consume from ctb
+$loop_cnt = 0
+loop_consume_diff_topic_from_ctb:
+
+#######################################################################################
+# clear consume info and consume result
+#run tsim/tmq/clearConsume.sim
+# because drop table function no stable, so by create new db for consume info and result. Modify it later
+$cdb_index = $cdb_index + 1
+$cdbName = cdb . $cdb_index
+sql create database $cdbName vgroups 1
+sleep 500
+sql use $cdbName
+
+print == create consume info table and consume result table
+sql create table consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int, ifmanualcommit int)
+sql create table consumeresult (ts timestamp, consumerid int, consummsgcnt bigint, consumrowcnt bigint, checkresult int)
+
+sql show tables
+if $rows != 2 then
+ return -1
+endi
+#######################################################################################
+
+if $loop_cnt == 0 then
+ print == scenario 1: topic_ctb_column
+ $topicList = ' . topic_ctb_column
+ $topicList = $topicList . '
+elif $loop_cnt == 1 then
+ print == scenario 2: topic_ctb_all
+ $topicList = ' . topic_ctb_all
+ $topicList = $topicList . '
+elif $loop_cnt == 2 then
+ print == scenario 3: topic_ctb_function
+ $topicList = ' . topic_ctb_function
+ $topicList = $topicList . '
+else
+ goto loop_consume_diff_topic_from_ctb_end
+endi
+
+$consumerId = 0
+$totalMsgOfCtb = $rowsPerCtb
+$expectmsgcnt = $totalMsgOfCtb
+sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $expectmsgcnt , $ifcheckdata , $ifmanualcommit )
+
+print == start consumer to pull msgs from ctb
+print == tsim/tmq/consume.sh -d $dbName -y $pullDelay -g $showMsg -r $showRow -s start
+system tsim/tmq/consume.sh -d $dbName -y $pullDelay -g $showMsg -r $showRow -w $cdbName -s start
+
+print == check consume result
+wait_consumer_end_from_ctb:
+sql select * from consumeresult
+print ==> rows: $rows
+print ==> rows[0]: $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6]
+if $rows != 1 then
+ sleep 1000
+ goto wait_consumer_end_from_ctb
+endi
+if $data[0][1] != $consumerId then
+ return -1
+endi
+if $data[0][2] != $totalMsgOfCtb then
+ return -1
+endi
+if $data[0][3] != $totalMsgOfCtb then
+ return -1
+endi
+$loop_cnt = $loop_cnt + 1
+goto loop_consume_diff_topic_from_ctb
+loop_consume_diff_topic_from_ctb_end:
+
+print ================ test consume from ntb
+$loop_cnt = 0
+loop_consume_diff_topic_from_ntb:
+
+#######################################################################################
+# clear consume info and consume result
+#run tsim/tmq/clearConsume.sim
+# because drop table function no stable, so by create new db for consume info and result. Modify it later
+$cdb_index = $cdb_index + 1
+$cdbName = cdb . $cdb_index
+sql create database $cdbName vgroups 1
+sleep 500
+sql use $cdbName
+
+print == create consume info table and consume result table
+sql create table consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int, ifmanualcommit int)
+sql create table consumeresult (ts timestamp, consumerid int, consummsgcnt bigint, consumrowcnt bigint, checkresult int)
+
+sql show tables
+if $rows != 2 then
+ return -1
+endi
+#######################################################################################
+
+if $loop_cnt == 0 then
+ print == scenario 1: topic_ntb_column
+ $topicList = ' . topic_ntb_column
+ $topicList = $topicList . '
+elif $loop_cnt == 1 then
+ print == scenario 2: topic_ntb_all
+ $topicList = ' . topic_ntb_all
+ $topicList = $topicList . '
+elif $loop_cnt == 2 then
+ print == scenario 3: topic_ntb_function
+ $topicList = ' . topic_ntb_function
+ $topicList = $topicList . '
+else
+ goto loop_consume_diff_topic_from_ntb_end
+endi
+
+$consumerId = 0
+$totalMsgOfNtb = $rowsPerCtb
+$expectmsgcnt = $totalMsgOfNtb
+sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $expectmsgcnt , $ifcheckdata , $ifmanualcommit )
+
+print == start consumer to pull msgs from ntb
+print == tsim/tmq/consume.sh -d $dbName -y $pullDelay -g $showMsg -r $showRow -s start
+system tsim/tmq/consume.sh -d $dbName -y $pullDelay -g $showMsg -r $showRow -w $cdbName -s start
+
+print == check consume result from ntb
+wait_consumer_end_from_ntb:
+sql select * from consumeresult
+print ==> rows: $rows
+print ==> rows[0]: $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6]
+if $rows != 1 then
+ sleep 1000
+ goto wait_consumer_end_from_ntb
+endi
+if $data[0][1] != $consumerId then
+ return -1
+endi
+if $data[0][2] != $totalMsgOfNtb then
+ return -1
+endi
+if $data[0][3] != $totalMsgOfNtb then
+ return -1
+endi
+$loop_cnt = $loop_cnt + 1
+goto loop_consume_diff_topic_from_ntb
+loop_consume_diff_topic_from_ntb_end:
+
+#------ not need stop consumer, because it exit after pull msg overthan expect msg
+#system tsim/tmq/consume.sh -s stop -x SIGINT
+
+system sh/exec.sh -n dnode1 -s stop -x SIGINT
diff --git a/tests/script/tsim/tmq/basic3Of2Cons.sim b/tests/script/tsim/tmq/basic3Of2Cons.sim
index bf640ae1a1..afaf824acb 100644
--- a/tests/script/tsim/tmq/basic3Of2Cons.sim
+++ b/tests/script/tsim/tmq/basic3Of2Cons.sim
@@ -1,384 +1,393 @@
-#### test scenario, please refer to https://jira.taosdata.com:18090/pages/viewpage.action?pageId=135120406
-#basic1Of2Cons.sim: vgroups=1, one topic for 2 consumers, firstly insert data, then start consume. Include six topics
-#basic2Of2Cons.sim: vgroups=1, multi topics for 2 consumers, firstly insert data, then start consume. Include six topics
-#basic3Of2Cons.sim: vgroups=4, one topic for 2 consumers, firstly insert data, then start consume. Include six topics
-#basic4Of2Cons.sim: vgroups=4, multi topics for 2 consumers, firstly insert data, then start consume. Include six topics
-
-# notes1: Scalar function: ABS/ACOS/ASIN/ATAN/CEIL/COS/FLOOR/LOG/POW/ROUND/SIN/SQRT/TAN
-# The above use cases are combined with where filter conditions, such as: where ts > "2017-08-12 18:25:58.128Z" and sin(a) > 0.5;
-#
-# notes2: not support aggregate functions(such as sum/count/min/max) and time-windows(interval).
-#
-
-run tsim/tmq/prepareBasicEnv-4vgrp.sim
-
-#---- global parameters start ----#
-$dbName = db
-$vgroups = 4
-$stbPrefix = stb
-$ctbPrefix = ctb
-$ntbPrefix = ntb
-$stbNum = 1
-$ctbNum = 10
-$ntbNum = 10
-$rowsPerCtb = 10
-$tstart = 1640966400000 # 2022-01-01 00:00:00.000
-#---- global parameters end ----#
-
-$pullDelay = 5
-$ifcheckdata = 1
-$showMsg = 1
-$showRow = 0
-
-sql connect
-sql use $dbName
-
-print == create topics from super table
-sql create topic topic_stb_column as select ts, c3 from stb
-sql create topic topic_stb_all as select ts, c1, c2, c3 from stb
-sql create topic topic_stb_function as select ts, abs(c1), sin(c2) from stb
-
-print == create topics from child table
-sql create topic topic_ctb_column as select ts, c3 from ctb0
-sql create topic topic_ctb_all as select * from ctb0
-sql create topic topic_ctb_function as select ts, abs(c1), sin(c2) from ctb0
-
-print == create topics from normal table
-sql create topic topic_ntb_column as select ts, c3 from ntb0
-sql create topic topic_ntb_all as select * from ntb0
-sql create topic topic_ntb_function as select ts, abs(c1), sin(c2) from ntb0
-
-#sql show topics
-#if $rows != 9 then
-# return -1
-#endi
-
-$keyList = ' . group.id:cgrp1
-$keyList = $keyList . '
-
-$cdb_index = 0
-#=============================== start consume =============================#
-
-print ================ test consume from stb
-$loop_cnt = 0
-loop_consume_diff_topic_from_stb:
-
-#######################################################################################
-# clear consume info and consume result
-#run tsim/tmq/clearConsume.sim
-# because drop table function no stable, so by create new db for consume info and result. Modify it later
-$cdb_index = $cdb_index + 1
-$cdbName = cdb . $cdb_index
-sql create database $cdbName vgroups 1
-sleep 500
-sql use $cdbName
-
-print == create consume info table and consume result table
-sql create table consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int)
-sql create table consumeresult (ts timestamp, consumerid int, consummsgcnt bigint, consumrowcnt bigint, checkresult int)
-
-sql show tables
-if $rows != 2 then
- return -1
-endi
-#######################################################################################
-
-if $loop_cnt == 0 then
- print == scenario 1: topic_stb_column
- $topicList = ' . topic_stb_column
- $topicList = $topicList . '
-elif $loop_cnt == 1 then
- print == scenario 2: topic_stb_all
- $topicList = ' . topic_stb_all
- $topicList = $topicList . '
-elif $loop_cnt == 2 then
- print == scenario 3: topic_stb_function
- $topicList = ' . topic_stb_function
- $topicList = $topicList . '
-else
- goto loop_consume_diff_topic_from_stb_end
-endi
-
-$consumerId = 0
-$totalMsgOfStb = $ctbNum * $rowsPerCtb
-$expectmsgcnt = $totalMsgOfStb
-sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $expectmsgcnt , $ifcheckdata )
-$consumerId = 1
-sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $expectmsgcnt , $ifcheckdata )
-
-print == start consumer to pull msgs from stb
-print == tsim/tmq/consume.sh -d $dbName -y $pullDelay -g $showMsg -r $showRow -w $cdbName -s start
-system tsim/tmq/consume.sh -d $dbName -y $pullDelay -g $showMsg -r $showRow -w $cdbName -s start
-
-print == check consume result
-wait_consumer_end_from_stb:
-sql select * from consumeresult
-print ==> rows: $rows
-print ==> rows[0]: $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6]
-print ==> rows[1]: $data[1][0] $data[1][1] $data[1][2] $data[1][3] $data[1][4] $data[1][5] $data[1][6]
-if $rows != 2 then
- sleep 1000
- goto wait_consumer_end_from_stb
-endi
-if $data[0][1] == 0 then
- if $data[1][1] != 1 then
- return -1
- endi
-endi
-if $data[0][1] == 1 then
- if $data[1][1] != 0 then
- return -1
- endi
-endi
-
-if $data[0][2] <= 0 then
- return -1
-endi
-if $data[0][2] >= $expectmsgcnt then
- return -1
-endi
-
-if $data[1][2] <= 0 then
- return -1
-endi
-if $data[1][2] >= $expectmsgcnt then
- return -1
-endi
-
-$sumOfMsgCnt = $data[0][2] + $data[1][2]
-if $sumOfMsgCnt != $expectmsgcnt then
- return -1
-endi
-
-
-if $data[0][3] <= 0 then
- return -1
-endi
-if $data[0][3] >= $expectmsgcnt then
- return -1
-endi
-
-if $data[1][3] <= 0 then
- return -1
-endi
-if $data[1][3] >= $expectmsgcnt then
- return -1
-endi
-
-$sumOfMsgRows = $data[0][3] + $data[1][3]
-if $sumOfMsgRows != $expectmsgcnt then
- return -1
-endi
-
-$loop_cnt = $loop_cnt + 1
-goto loop_consume_diff_topic_from_stb
-loop_consume_diff_topic_from_stb_end:
-
-print ================ test consume from ctb
-$loop_cnt = 0
-loop_consume_diff_topic_from_ctb:
-
-#######################################################################################
-# clear consume info and consume result
-#run tsim/tmq/clearConsume.sim
-# because drop table function no stable, so by create new db for consume info and result. Modify it later
-$cdb_index = $cdb_index + 1
-$cdbName = cdb . $cdb_index
-sql create database $cdbName vgroups 1
-sleep 500
-sql use $cdbName
-
-print == create consume info table and consume result table
-sql create table consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int)
-sql create table consumeresult (ts timestamp, consumerid int, consummsgcnt bigint, consumrowcnt bigint, checkresult int)
-
-sql show tables
-if $rows != 2 then
- return -1
-endi
-#######################################################################################
-
-if $loop_cnt == 0 then
- print == scenario 1: topic_ctb_column
- $topicList = ' . topic_ctb_column
- $topicList = $topicList . '
-elif $loop_cnt == 1 then
- print == scenario 2: topic_ctb_all
- $topicList = ' . topic_ctb_all
- $topicList = $topicList . '
-elif $loop_cnt == 2 then
- print == scenario 3: topic_ctb_function
- $topicList = ' . topic_ctb_function
- $topicList = $topicList . '
-else
- goto loop_consume_diff_topic_from_ctb_end
-endi
-
-$consumerId = 0
-$totalMsgOfCtb = $rowsPerCtb
-$expectmsgcnt = $totalMsgOfCtb
-sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $expectmsgcnt , $ifcheckdata )
-$consumerId = 1
-sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $expectmsgcnt , $ifcheckdata )
-
-print == start consumer to pull msgs from ctb
-print == tsim/tmq/consume.sh -d $dbName -y $pullDelay -g $showMsg -r $showRow -s start
-system tsim/tmq/consume.sh -d $dbName -y $pullDelay -g $showMsg -r $showRow -w $cdbName -s start
-
-print == check consume result
-wait_consumer_end_from_ctb:
-sql select * from consumeresult
-print ==> rows: $rows
-print ==> rows[0]: $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6]
-print ==> rows[1]: $data[1][0] $data[1][1] $data[1][2] $data[1][3] $data[1][4] $data[1][5] $data[1][6]
-if $rows != 2 then
- sleep 1000
- goto wait_consumer_end_from_ctb
-endi
-if $data[0][1] == 0 then
- if $data[1][1] != 1 then
- return -1
- endi
-endi
-if $data[0][1] == 1 then
- if $data[1][1] != 0 then
- return -1
- endi
-endi
-
-# either $data[0][2] == $totalMsgOfCtb and $data[1][2] == 0
-# or $data[0][2] == 0 and $data[1][2] == $totalMsgOfCtb
-if $data[0][2] == $totalMsgOfCtb then
- if $data[1][2] == 0 then
- goto check_ok_0
- endi
-elif $data[1][2] == $totalMsgOfCtb then
- if $data[0][2] == 0 then
- goto check_ok_0
- endi
-endi
-return -1
-check_ok_0:
-
-if $data[0][3] == $totalMsgOfCtb then
- if $data[1][3] == 0 then
- goto check_ok_1
- endi
-elif $data[1][3] == $totalMsgOfCtb then
- if $data[0][3] == 0 then
- goto check_ok_1
- endi
-endi
-return -1
-check_ok_1:
-
-$loop_cnt = $loop_cnt + 1
-goto loop_consume_diff_topic_from_ctb
-loop_consume_diff_topic_from_ctb_end:
-
-print ================ test consume from ntb
-$loop_cnt = 0
-loop_consume_diff_topic_from_ntb:
-
-#######################################################################################
-# clear consume info and consume result
-#run tsim/tmq/clearConsume.sim
-# because drop table function no stable, so by create new db for consume info and result. Modify it later
-$cdb_index = $cdb_index + 1
-$cdbName = cdb . $cdb_index
-sql create database $cdbName vgroups 1
-sleep 500
-sql use $cdbName
-
-print == create consume info table and consume result table
-sql create table consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int)
-sql create table consumeresult (ts timestamp, consumerid int, consummsgcnt bigint, consumrowcnt bigint, checkresult int)
-
-sql show tables
-if $rows != 2 then
- return -1
-endi
-#######################################################################################
-
-if $loop_cnt == 0 then
- print == scenario 1: topic_ntb_column
- $topicList = ' . topic_ntb_column
- $topicList = $topicList . '
-elif $loop_cnt == 1 then
- print == scenario 2: topic_ntb_all
- $topicList = ' . topic_ntb_all
- $topicList = $topicList . '
-elif $loop_cnt == 2 then
- print == scenario 3: topic_ntb_function
- $topicList = ' . topic_ntb_function
- $topicList = $topicList . '
-else
- goto loop_consume_diff_topic_from_ntb_end
-endi
-
-$consumerId = 0
-$totalMsgOfNtb = $rowsPerCtb
-$expectmsgcnt = $totalMsgOfNtb
-sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $expectmsgcnt , $ifcheckdata )
-$consumerId = 1
-sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $expectmsgcnt , $ifcheckdata )
-
-print == start consumer to pull msgs from ntb
-print == tsim/tmq/consume.sh -d $dbName -y $pullDelay -g $showMsg -r $showRow -s start
-system tsim/tmq/consume.sh -d $dbName -y $pullDelay -g $showMsg -r $showRow -w $cdbName -s start
-
-print == check consume result from ntb
-wait_consumer_end_from_ntb:
-sql select * from consumeresult
-print ==> rows: $rows
-print ==> rows[0]: $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6]
-print ==> rows[1]: $data[1][0] $data[1][1] $data[1][2] $data[1][3] $data[1][4] $data[1][5] $data[1][6]
-if $rows != 2 then
- sleep 1000
- goto wait_consumer_end_from_ntb
-endi
-if $data[0][1] == 0 then
- if $data[1][1] != 1 then
- return -1
- endi
-endi
-if $data[1][1] == 0 then
- if $data[0][1] != 1 then
- return -1
- endi
-endi
-
-# either $data[0][2] == $totalMsgOfNtb and $data[1][2] == 0
-# or $data[0][2] == 0 and $data[1][2] == $totalMsgOfNtb
-if $data[0][2] == $totalMsgOfNtb then
- if $data[1][2] == 0 then
- goto check_ok_2
- endi
-elif $data[1][2] == $totalMsgOfNtb then
- if $data[0][2] == 0 then
- goto check_ok_2
- endi
-endi
-return -1
-check_ok_2:
-
-if $data[0][3] == $totalMsgOfNtb then
- if $data[1][3] == 0 then
- goto check_ok_3
- endi
-elif $data[1][3] == $totalMsgOfNtb then
- if $data[0][3] == 0 then
- goto check_ok_3
- endi
-endi
-return -1
-check_ok_3:
-
-$loop_cnt = $loop_cnt + 1
-goto loop_consume_diff_topic_from_ntb
-loop_consume_diff_topic_from_ntb_end:
-
-#------ not need stop consumer, because it exit after pull msg overthan expect msg
-#system tsim/tmq/consume.sh -s stop -x SIGINT
-
-system sh/exec.sh -n dnode1 -s stop -x SIGINT
+#### test scenario, please refer to https://jira.taosdata.com:18090/pages/viewpage.action?pageId=135120406
+#basic1Of2Cons.sim: vgroups=1, one topic for 2 consumers, firstly insert data, then start consume. Include six topics
+#basic2Of2Cons.sim: vgroups=1, multi topics for 2 consumers, firstly insert data, then start consume. Include six topics
+#basic3Of2Cons.sim: vgroups=4, one topic for 2 consumers, firstly insert data, then start consume. Include six topics
+#basic4Of2Cons.sim: vgroups=4, multi topics for 2 consumers, firstly insert data, then start consume. Include six topics
+
+# notes1: Scalar function: ABS/ACOS/ASIN/ATAN/CEIL/COS/FLOOR/LOG/POW/ROUND/SIN/SQRT/TAN
+# The above use cases are combined with where filter conditions, such as: where ts > "2017-08-12 18:25:58.128Z" and sin(a) > 0.5;
+#
+# notes2: not support aggregate functions(such as sum/count/min/max) and time-windows(interval).
+#
+
+run tsim/tmq/prepareBasicEnv-4vgrp.sim
+
+#---- global parameters start ----#
+$dbName = db
+$vgroups = 4
+$stbPrefix = stb
+$ctbPrefix = ctb
+$ntbPrefix = ntb
+$stbNum = 1
+$ctbNum = 10
+$ntbNum = 10
+$rowsPerCtb = 10
+$tstart = 1640966400000 # 2022-01-01 00:00:00.000
+#---- global parameters end ----#
+
+$pullDelay = 5
+$ifcheckdata = 1
+$ifmanualcommit = 1
+$showMsg = 1
+$showRow = 0
+
+sql connect
+sql use $dbName
+
+print == create topics from super table
+sql create topic topic_stb_column as select ts, c3 from stb
+sql create topic topic_stb_all as select ts, c1, c2, c3 from stb
+sql create topic topic_stb_function as select ts, abs(c1), sin(c2) from stb
+
+print == create topics from child table
+sql create topic topic_ctb_column as select ts, c3 from ctb0
+sql create topic topic_ctb_all as select * from ctb0
+sql create topic topic_ctb_function as select ts, abs(c1), sin(c2) from ctb0
+
+print == create topics from normal table
+sql create topic topic_ntb_column as select ts, c3 from ntb0
+sql create topic topic_ntb_all as select * from ntb0
+sql create topic topic_ntb_function as select ts, abs(c1), sin(c2) from ntb0
+
+#sql show topics
+#if $rows != 9 then
+# return -1
+#endi
+
+#'group.id:cgrp1,enable.auto.commit:false,auto.commit.interval.ms:6000,auto.offset.reset:earliest'
+$keyList = ' . group.id:cgrp1
+$keyList = $keyList . ,
+$keyList = $keyList . enable.auto.commit:false
+#$keyList = $keyList . ,
+#$keyList = $keyList . auto.commit.interval.ms:6000
+#$keyList = $keyList . ,
+#$keyList = $keyList . auto.offset.reset:earliest
+$keyList = $keyList . '
+print ========== key list: $keyList
+
+$cdb_index = 0
+#=============================== start consume =============================#
+
+print ================ test consume from stb
+$loop_cnt = 0
+loop_consume_diff_topic_from_stb:
+
+#######################################################################################
+# clear consume info and consume result
+#run tsim/tmq/clearConsume.sim
+# because drop table function no stable, so by create new db for consume info and result. Modify it later
+$cdb_index = $cdb_index + 1
+$cdbName = cdb . $cdb_index
+sql create database $cdbName vgroups 1
+sleep 500
+sql use $cdbName
+
+print == create consume info table and consume result table
+sql create table consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int, ifmanualcommit int)
+sql create table consumeresult (ts timestamp, consumerid int, consummsgcnt bigint, consumrowcnt bigint, checkresult int)
+
+sql show tables
+if $rows != 2 then
+ return -1
+endi
+#######################################################################################
+
+if $loop_cnt == 0 then
+ print == scenario 1: topic_stb_column
+ $topicList = ' . topic_stb_column
+ $topicList = $topicList . '
+elif $loop_cnt == 1 then
+ print == scenario 2: topic_stb_all
+ $topicList = ' . topic_stb_all
+ $topicList = $topicList . '
+elif $loop_cnt == 2 then
+ print == scenario 3: topic_stb_function
+ $topicList = ' . topic_stb_function
+ $topicList = $topicList . '
+else
+ goto loop_consume_diff_topic_from_stb_end
+endi
+
+$consumerId = 0
+$totalMsgOfStb = $ctbNum * $rowsPerCtb
+$expectmsgcnt = $totalMsgOfStb
+sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $expectmsgcnt , $ifcheckdata , $ifmanualcommit )
+$consumerId = 1
+sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $expectmsgcnt , $ifcheckdata , $ifmanualcommit )
+
+print == start consumer to pull msgs from stb
+print == tsim/tmq/consume.sh -d $dbName -y $pullDelay -g $showMsg -r $showRow -w $cdbName -s start
+system tsim/tmq/consume.sh -d $dbName -y $pullDelay -g $showMsg -r $showRow -w $cdbName -s start
+
+print == check consume result
+wait_consumer_end_from_stb:
+sql select * from consumeresult
+print ==> rows: $rows
+print ==> rows[0]: $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6]
+print ==> rows[1]: $data[1][0] $data[1][1] $data[1][2] $data[1][3] $data[1][4] $data[1][5] $data[1][6]
+if $rows != 2 then
+ sleep 1000
+ goto wait_consumer_end_from_stb
+endi
+if $data[0][1] == 0 then
+ if $data[1][1] != 1 then
+ return -1
+ endi
+endi
+if $data[0][1] == 1 then
+ if $data[1][1] != 0 then
+ return -1
+ endi
+endi
+
+if $data[0][2] <= 0 then
+ return -1
+endi
+if $data[0][2] >= $expectmsgcnt then
+ return -1
+endi
+
+if $data[1][2] <= 0 then
+ return -1
+endi
+if $data[1][2] >= $expectmsgcnt then
+ return -1
+endi
+
+$sumOfMsgCnt = $data[0][2] + $data[1][2]
+if $sumOfMsgCnt != $expectmsgcnt then
+ return -1
+endi
+
+
+if $data[0][3] <= 0 then
+ return -1
+endi
+if $data[0][3] >= $expectmsgcnt then
+ return -1
+endi
+
+if $data[1][3] <= 0 then
+ return -1
+endi
+if $data[1][3] >= $expectmsgcnt then
+ return -1
+endi
+
+$sumOfMsgRows = $data[0][3] + $data[1][3]
+if $sumOfMsgRows != $expectmsgcnt then
+ return -1
+endi
+
+$loop_cnt = $loop_cnt + 1
+goto loop_consume_diff_topic_from_stb
+loop_consume_diff_topic_from_stb_end:
+
+print ================ test consume from ctb
+$loop_cnt = 0
+loop_consume_diff_topic_from_ctb:
+
+#######################################################################################
+# clear consume info and consume result
+#run tsim/tmq/clearConsume.sim
+# because drop table function no stable, so by create new db for consume info and result. Modify it later
+$cdb_index = $cdb_index + 1
+$cdbName = cdb . $cdb_index
+sql create database $cdbName vgroups 1
+sleep 500
+sql use $cdbName
+
+print == create consume info table and consume result table
+sql create table consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int, ifmanualcommit int)
+sql create table consumeresult (ts timestamp, consumerid int, consummsgcnt bigint, consumrowcnt bigint, checkresult int)
+
+sql show tables
+if $rows != 2 then
+ return -1
+endi
+#######################################################################################
+
+if $loop_cnt == 0 then
+ print == scenario 1: topic_ctb_column
+ $topicList = ' . topic_ctb_column
+ $topicList = $topicList . '
+elif $loop_cnt == 1 then
+ print == scenario 2: topic_ctb_all
+ $topicList = ' . topic_ctb_all
+ $topicList = $topicList . '
+elif $loop_cnt == 2 then
+ print == scenario 3: topic_ctb_function
+ $topicList = ' . topic_ctb_function
+ $topicList = $topicList . '
+else
+ goto loop_consume_diff_topic_from_ctb_end
+endi
+
+$consumerId = 0
+$totalMsgOfCtb = $rowsPerCtb
+$expectmsgcnt = $totalMsgOfCtb
+sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $expectmsgcnt , $ifcheckdata , $ifmanualcommit )
+$consumerId = 1
+sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $expectmsgcnt , $ifcheckdata , $ifmanualcommit )
+
+print == start consumer to pull msgs from ctb
+print == tsim/tmq/consume.sh -d $dbName -y $pullDelay -g $showMsg -r $showRow -s start
+system tsim/tmq/consume.sh -d $dbName -y $pullDelay -g $showMsg -r $showRow -w $cdbName -s start
+
+print == check consume result
+wait_consumer_end_from_ctb:
+sql select * from consumeresult
+print ==> rows: $rows
+print ==> rows[0]: $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6]
+print ==> rows[1]: $data[1][0] $data[1][1] $data[1][2] $data[1][3] $data[1][4] $data[1][5] $data[1][6]
+if $rows != 2 then
+ sleep 1000
+ goto wait_consumer_end_from_ctb
+endi
+if $data[0][1] == 0 then
+ if $data[1][1] != 1 then
+ return -1
+ endi
+endi
+if $data[0][1] == 1 then
+ if $data[1][1] != 0 then
+ return -1
+ endi
+endi
+
+# either $data[0][2] == $totalMsgOfCtb and $data[1][2] == 0
+# or $data[0][2] == 0 and $data[1][2] == $totalMsgOfCtb
+if $data[0][2] == $totalMsgOfCtb then
+ if $data[1][2] == 0 then
+ goto check_ok_0
+ endi
+elif $data[1][2] == $totalMsgOfCtb then
+ if $data[0][2] == 0 then
+ goto check_ok_0
+ endi
+endi
+return -1
+check_ok_0:
+
+if $data[0][3] == $totalMsgOfCtb then
+ if $data[1][3] == 0 then
+ goto check_ok_1
+ endi
+elif $data[1][3] == $totalMsgOfCtb then
+ if $data[0][3] == 0 then
+ goto check_ok_1
+ endi
+endi
+return -1
+check_ok_1:
+
+$loop_cnt = $loop_cnt + 1
+goto loop_consume_diff_topic_from_ctb
+loop_consume_diff_topic_from_ctb_end:
+
+print ================ test consume from ntb
+$loop_cnt = 0
+loop_consume_diff_topic_from_ntb:
+
+#######################################################################################
+# clear consume info and consume result
+#run tsim/tmq/clearConsume.sim
+# because drop table function no stable, so by create new db for consume info and result. Modify it later
+$cdb_index = $cdb_index + 1
+$cdbName = cdb . $cdb_index
+sql create database $cdbName vgroups 1
+sleep 500
+sql use $cdbName
+
+print == create consume info table and consume result table
+sql create table consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int, ifmanualcommit int)
+sql create table consumeresult (ts timestamp, consumerid int, consummsgcnt bigint, consumrowcnt bigint, checkresult int)
+
+sql show tables
+if $rows != 2 then
+ return -1
+endi
+#######################################################################################
+
+if $loop_cnt == 0 then
+ print == scenario 1: topic_ntb_column
+ $topicList = ' . topic_ntb_column
+ $topicList = $topicList . '
+elif $loop_cnt == 1 then
+ print == scenario 2: topic_ntb_all
+ $topicList = ' . topic_ntb_all
+ $topicList = $topicList . '
+elif $loop_cnt == 2 then
+ print == scenario 3: topic_ntb_function
+ $topicList = ' . topic_ntb_function
+ $topicList = $topicList . '
+else
+ goto loop_consume_diff_topic_from_ntb_end
+endi
+
+$consumerId = 0
+$totalMsgOfNtb = $rowsPerCtb
+$expectmsgcnt = $totalMsgOfNtb
+sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $expectmsgcnt , $ifcheckdata , $ifmanualcommit )
+$consumerId = 1
+sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $expectmsgcnt , $ifcheckdata , $ifmanualcommit )
+
+print == start consumer to pull msgs from ntb
+print == tsim/tmq/consume.sh -d $dbName -y $pullDelay -g $showMsg -r $showRow -s start
+system tsim/tmq/consume.sh -d $dbName -y $pullDelay -g $showMsg -r $showRow -w $cdbName -s start
+
+print == check consume result from ntb
+wait_consumer_end_from_ntb:
+sql select * from consumeresult
+print ==> rows: $rows
+print ==> rows[0]: $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6]
+print ==> rows[1]: $data[1][0] $data[1][1] $data[1][2] $data[1][3] $data[1][4] $data[1][5] $data[1][6]
+if $rows != 2 then
+ sleep 1000
+ goto wait_consumer_end_from_ntb
+endi
+if $data[0][1] == 0 then
+ if $data[1][1] != 1 then
+ return -1
+ endi
+endi
+if $data[1][1] == 0 then
+ if $data[0][1] != 1 then
+ return -1
+ endi
+endi
+
+# either $data[0][2] == $totalMsgOfNtb and $data[1][2] == 0
+# or $data[0][2] == 0 and $data[1][2] == $totalMsgOfNtb
+if $data[0][2] == $totalMsgOfNtb then
+ if $data[1][2] == 0 then
+ goto check_ok_2
+ endi
+elif $data[1][2] == $totalMsgOfNtb then
+ if $data[0][2] == 0 then
+ goto check_ok_2
+ endi
+endi
+return -1
+check_ok_2:
+
+if $data[0][3] == $totalMsgOfNtb then
+ if $data[1][3] == 0 then
+ goto check_ok_3
+ endi
+elif $data[1][3] == $totalMsgOfNtb then
+ if $data[0][3] == 0 then
+ goto check_ok_3
+ endi
+endi
+return -1
+check_ok_3:
+
+$loop_cnt = $loop_cnt + 1
+goto loop_consume_diff_topic_from_ntb
+loop_consume_diff_topic_from_ntb_end:
+
+#------ not need stop consumer, because it exit after pull msg overthan expect msg
+#system tsim/tmq/consume.sh -s stop -x SIGINT
+
+system sh/exec.sh -n dnode1 -s stop -x SIGINT
diff --git a/tests/script/tsim/tmq/basic4.sim b/tests/script/tsim/tmq/basic4.sim
index 42023bda7e..9b418f12f2 100644
--- a/tests/script/tsim/tmq/basic4.sim
+++ b/tests/script/tsim/tmq/basic4.sim
@@ -1,216 +1,226 @@
-#### test scenario, please refer to https://jira.taosdata.com:18090/pages/viewpage.action?pageId=135120406
-#basic1.sim: vgroups=1, one topic for one consumer, firstly insert data, then start consume. Include six topics
-#basic2.sim: vgroups=1, multi topics for one consumer, firstly insert data, then start consume. Include six topics
-#basic3.sim: vgroups=4, one topic for one consumer, firstly insert data, then start consume. Include six topics
-#basic4.sim: vgroups=4, multi topics for one consumer, firstly insert data, then start consume. Include six topics
-
-# notes1: Scalar function: ABS/ACOS/ASIN/ATAN/CEIL/COS/FLOOR/LOG/POW/ROUND/SIN/SQRT/TAN
-# The above use cases are combined with where filter conditions, such as: where ts > "2017-08-12 18:25:58.128Z" and sin(a) > 0.5;
-#
-# notes2: not support aggregate functions(such as sum/count/min/max) and time-windows(interval).
-#
-
-run tsim/tmq/prepareBasicEnv-4vgrp.sim
-
-#---- global parameters start ----#
-$dbName = db
-$vgroups = 4
-$stbPrefix = stb
-$ctbPrefix = ctb
-$ntbPrefix = ntb
-$stbNum = 1
-$ctbNum = 10
-$ntbNum = 10
-$rowsPerCtb = 10
-$tstart = 1640966400000 # 2022-01-01 00:00:00.000
-#---- global parameters end ----#
-
-$pullDelay = 3
-$ifcheckdata = 1
-$showMsg = 1
-$showRow = 0
-
-sql connect
-sql use $dbName
-
-print == create topics from super table
-sql create topic topic_stb_column as select ts, c3 from stb
-sql create topic topic_stb_all as select ts, c1, c2, c3 from stb
-sql create topic topic_stb_function as select ts, abs(c1), sin(c2) from stb
-
-print == create topics from child table
-sql create topic topic_ctb_column as select ts, c3 from ctb0
-sql create topic topic_ctb_all as select * from ctb0
-sql create topic topic_ctb_function as select ts, abs(c1), sin(c2) from ctb0
-
-print == create topics from normal table
-sql create topic topic_ntb_column as select ts, c3 from ntb0
-sql create topic topic_ntb_all as select * from ntb0
-sql create topic topic_ntb_function as select ts, abs(c1), sin(c2) from ntb0
-
-#sql show topics
-#if $rows != 9 then
-# return -1
-#endi
-
-$keyList = ' . group.id:cgrp1
-$keyList = $keyList . '
-
-$topicNum = 3
-
-print ================ test consume from stb
-print == multi toipcs: topic_stb_column + topic_stb_all + topic_stb_function
-$topicList = ' . topic_stb_column
-$topicList = $topicList . ,
-$topicList = $topicList . topic_stb_all
-$topicList = $topicList . ,
-$topicList = $topicList . topic_stb_function
-$topicList = $topicList . '
-
-$consumerId = 0
-$totalMsgOfStb = $ctbNum * $rowsPerCtb
-$totalMsgOfStb = $totalMsgOfStb * $topicNum
-$expectmsgcnt = $totalMsgOfStb
-sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $expectmsgcnt , $ifcheckdata )
-
-print == start consumer to pull msgs from stb
-print == tsim/tmq/consume.sh -d $dbName -y $pullDelay -g $showMsg -r $showRow -w $dbName -s start
-system tsim/tmq/consume.sh -d $dbName -y $pullDelay -g $showMsg -r $showRow -w $dbName -s start
-
-print == check consume result
-wait_consumer_end_from_stb:
-sql select * from consumeresult
-print ==> rows: $rows
-print ==> rows[0]: $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6]
-if $rows != 1 then
- sleep 1000
- goto wait_consumer_end_from_stb
-endi
-if $data[0][1] != $consumerId then
- return -1
-endi
-if $data[0][2] != $expectmsgcnt then
- return -1
-endi
-if $data[0][3] != $expectmsgcnt then
- return -1
-endi
-
-#######################################################################################
-# clear consume info and consume result
-#run tsim/tmq/clearConsume.sim
-# because drop table function no stable, so by create new db for consume info and result. Modify it later
-$cdbName = cdb1
-sql create database $cdbName vgroups 1
-sleep 500
-sql use $cdbName
-
-print == create consume info table and consume result table
-sql create table consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int)
-sql create table consumeresult (ts timestamp, consumerid int, consummsgcnt bigint, consumrowcnt bigint, checkresult int)
-
-sql show tables
-if $rows != 2 then
- return -1
-endi
-#######################################################################################
-
-
-print ================ test consume from ctb
-print == multi toipcs: topic_ctb_column + topic_ctb_all + topic_ctb_function
-$topicList = ' . topic_ctb_column
-$topicList = $topicList . ,
-$topicList = $topicList . topic_ctb_all
-$topicList = $topicList . ,
-$topicList = $topicList . topic_ctb_function
-$topicList = $topicList . '
-
-$consumerId = 0
-$totalMsgOfCtb = $rowsPerCtb * $topicNum
-$expectmsgcnt = $totalMsgOfCtb
-sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $expectmsgcnt , $ifcheckdata )
-
-print == start consumer to pull msgs from ctb
-print == tsim/tmq/consume.sh -d $dbName -y $pullDelay -g $showMsg -r $showRow -s start
-system tsim/tmq/consume.sh -d $dbName -y $pullDelay -g $showMsg -r $showRow -w $cdbName -s start
-
-print == check consume result
-wait_consumer_end_from_ctb:
-sql select * from consumeresult
-print ==> rows: $rows
-print ==> rows[0]: $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6]
-if $rows != 1 then
- sleep 1000
- goto wait_consumer_end_from_ctb
-endi
-if $data[0][1] != $consumerId then
- return -1
-endi
-if $data[0][2] != $totalMsgOfCtb then
- return -1
-endi
-if $data[0][3] != $totalMsgOfCtb then
- return -1
-endi
-
-#######################################################################################
-# clear consume info and consume result
-#run tsim/tmq/clearConsume.sim
-# because drop table function no stable, so by create new db for consume info and result. Modify it later
-$cdbName = cdb2
-sql create database $cdbName vgroups 1
-sleep 500
-sql use $cdbName
-
-print == create consume info table and consume result table
-sql create table consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int)
-sql create table consumeresult (ts timestamp, consumerid int, consummsgcnt bigint, consumrowcnt bigint, checkresult int)
-
-sql show tables
-if $rows != 2 then
- return -1
-endi
-#######################################################################################
-
-
-print ================ test consume from ntb
-print == multi toipcs: topic_ntb_column + topic_ntb_all + topic_ntb_function
-$topicList = ' . topic_ntb_column
-$topicList = $topicList . ,
-$topicList = $topicList . topic_ntb_all
-$topicList = $topicList . ,
-$topicList = $topicList . topic_ntb_function
-$topicList = $topicList . '
-
-$consumerId = 0
-$totalMsgOfNtb = $rowsPerCtb * $topicNum
-$expectmsgcnt = $totalMsgOfNtb
-sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $expectmsgcnt , $ifcheckdata )
-
-print == start consumer to pull msgs from ntb
-print == tsim/tmq/consume.sh -d $dbName -y $pullDelay -g $showMsg -r $showRow -s start
-system tsim/tmq/consume.sh -d $dbName -y $pullDelay -g $showMsg -r $showRow -w $cdbName -s start
-
-print == check consume result from ntb
-wait_consumer_end_from_ntb:
-sql select * from consumeresult
-print ==> rows: $rows
-print ==> rows[0]: $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6]
-if $rows != 1 then
- sleep 1000
- goto wait_consumer_end_from_ntb
-endi
-if $data[0][1] != $consumerId then
- return -1
-endi
-if $data[0][2] != $totalMsgOfNtb then
- return -1
-endi
-if $data[0][3] != $totalMsgOfNtb then
- return -1
-endi
-
-#------ not need stop consumer, because it exit after pull msg overthan expect msg
-#system tsim/tmq/consume.sh -s stop -x SIGINT
-
-system sh/exec.sh -n dnode1 -s stop -x SIGINT
+#### test scenario, please refer to https://jira.taosdata.com:18090/pages/viewpage.action?pageId=135120406
+#basic1.sim: vgroups=1, one topic for one consumer, firstly insert data, then start consume. Include six topics
+#basic2.sim: vgroups=1, multi topics for one consumer, firstly insert data, then start consume. Include six topics
+#basic3.sim: vgroups=4, one topic for one consumer, firstly insert data, then start consume. Include six topics
+#basic4.sim: vgroups=4, multi topics for one consumer, firstly insert data, then start consume. Include six topics
+
+# notes1: Scalar function: ABS/ACOS/ASIN/ATAN/CEIL/COS/FLOOR/LOG/POW/ROUND/SIN/SQRT/TAN
+# The above use cases are combined with where filter conditions, such as: where ts > "2017-08-12 18:25:58.128Z" and sin(a) > 0.5;
+#
+# notes2: not support aggregate functions(such as sum/count/min/max) and time-windows(interval).
+#
+
+run tsim/tmq/prepareBasicEnv-4vgrp.sim
+
+#---- global parameters start ----#
+$dbName = db
+$vgroups = 4
+$stbPrefix = stb
+$ctbPrefix = ctb
+$ntbPrefix = ntb
+$stbNum = 1
+$ctbNum = 10
+$ntbNum = 10
+$rowsPerCtb = 10
+$tstart = 1640966400000 # 2022-01-01 00:00:00.000
+#---- global parameters end ----#
+
+$pullDelay = 3
+$ifcheckdata = 1
+$ifmanualcommit = 1
+$showMsg = 1
+$showRow = 0
+
+sql connect
+sql use $dbName
+
+print == create topics from super table
+sql create topic topic_stb_column as select ts, c3 from stb
+sql create topic topic_stb_all as select ts, c1, c2, c3 from stb
+sql create topic topic_stb_function as select ts, abs(c1), sin(c2) from stb
+
+print == create topics from child table
+sql create topic topic_ctb_column as select ts, c3 from ctb0
+sql create topic topic_ctb_all as select * from ctb0
+sql create topic topic_ctb_function as select ts, abs(c1), sin(c2) from ctb0
+
+print == create topics from normal table
+sql create topic topic_ntb_column as select ts, c3 from ntb0
+sql create topic topic_ntb_all as select * from ntb0
+sql create topic topic_ntb_function as select ts, abs(c1), sin(c2) from ntb0
+
+#sql show topics
+#if $rows != 9 then
+# return -1
+#endi
+
+#'group.id:cgrp1,enable.auto.commit:false,auto.commit.interval.ms:6000,auto.offset.reset:earliest'
+$keyList = ' . group.id:cgrp1
+$keyList = $keyList . ,
+$keyList = $keyList . enable.auto.commit:false
+#$keyList = $keyList . ,
+#$keyList = $keyList . auto.commit.interval.ms:6000
+#$keyList = $keyList . ,
+#$keyList = $keyList . auto.offset.reset:earliest
+$keyList = $keyList . '
+print ========== key list: $keyList
+
+
+$topicNum = 3
+
+print ================ test consume from stb
+print == multi toipcs: topic_stb_column + topic_stb_all + topic_stb_function
+$topicList = ' . topic_stb_column
+$topicList = $topicList . ,
+$topicList = $topicList . topic_stb_all
+$topicList = $topicList . ,
+$topicList = $topicList . topic_stb_function
+$topicList = $topicList . '
+
+$consumerId = 0
+$totalMsgOfStb = $ctbNum * $rowsPerCtb
+$totalMsgOfStb = $totalMsgOfStb * $topicNum
+$expectmsgcnt = $totalMsgOfStb
+sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $expectmsgcnt , $ifcheckdata , $ifmanualcommit )
+
+print == start consumer to pull msgs from stb
+print == tsim/tmq/consume.sh -d $dbName -y $pullDelay -g $showMsg -r $showRow -w $dbName -s start
+system tsim/tmq/consume.sh -d $dbName -y $pullDelay -g $showMsg -r $showRow -w $dbName -s start
+
+print == check consume result
+wait_consumer_end_from_stb:
+sql select * from consumeresult
+print ==> rows: $rows
+print ==> rows[0]: $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6]
+if $rows != 1 then
+ sleep 1000
+ goto wait_consumer_end_from_stb
+endi
+if $data[0][1] != $consumerId then
+ return -1
+endi
+if $data[0][2] != $expectmsgcnt then
+ return -1
+endi
+if $data[0][3] != $expectmsgcnt then
+ return -1
+endi
+
+#######################################################################################
+# clear consume info and consume result
+#run tsim/tmq/clearConsume.sim
+# because drop table function no stable, so by create new db for consume info and result. Modify it later
+$cdbName = cdb1
+sql create database $cdbName vgroups 1
+sleep 500
+sql use $cdbName
+
+print == create consume info table and consume result table
+sql create table consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int, ifmanualcommit int)
+sql create table consumeresult (ts timestamp, consumerid int, consummsgcnt bigint, consumrowcnt bigint, checkresult int)
+
+sql show tables
+if $rows != 2 then
+ return -1
+endi
+#######################################################################################
+
+
+print ================ test consume from ctb
+print == multi toipcs: topic_ctb_column + topic_ctb_all + topic_ctb_function
+$topicList = ' . topic_ctb_column
+$topicList = $topicList . ,
+$topicList = $topicList . topic_ctb_all
+$topicList = $topicList . ,
+$topicList = $topicList . topic_ctb_function
+$topicList = $topicList . '
+
+$consumerId = 0
+$totalMsgOfCtb = $rowsPerCtb * $topicNum
+$expectmsgcnt = $totalMsgOfCtb
+sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $expectmsgcnt , $ifcheckdata , $ifmanualcommit )
+
+print == start consumer to pull msgs from ctb
+print == tsim/tmq/consume.sh -d $dbName -y $pullDelay -g $showMsg -r $showRow -s start
+system tsim/tmq/consume.sh -d $dbName -y $pullDelay -g $showMsg -r $showRow -w $cdbName -s start
+
+print == check consume result
+wait_consumer_end_from_ctb:
+sql select * from consumeresult
+print ==> rows: $rows
+print ==> rows[0]: $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6]
+if $rows != 1 then
+ sleep 1000
+ goto wait_consumer_end_from_ctb
+endi
+if $data[0][1] != $consumerId then
+ return -1
+endi
+if $data[0][2] != $totalMsgOfCtb then
+ return -1
+endi
+if $data[0][3] != $totalMsgOfCtb then
+ return -1
+endi
+
+#######################################################################################
+# clear consume info and consume result
+#run tsim/tmq/clearConsume.sim
+# because drop table function no stable, so by create new db for consume info and result. Modify it later
+$cdbName = cdb2
+sql create database $cdbName vgroups 1
+sleep 500
+sql use $cdbName
+
+print == create consume info table and consume result table
+sql create table consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int, ifmanualcommit int)
+sql create table consumeresult (ts timestamp, consumerid int, consummsgcnt bigint, consumrowcnt bigint, checkresult int)
+
+sql show tables
+if $rows != 2 then
+ return -1
+endi
+#######################################################################################
+
+
+print ================ test consume from ntb
+print == multi toipcs: topic_ntb_column + topic_ntb_all + topic_ntb_function
+$topicList = ' . topic_ntb_column
+$topicList = $topicList . ,
+$topicList = $topicList . topic_ntb_all
+$topicList = $topicList . ,
+$topicList = $topicList . topic_ntb_function
+$topicList = $topicList . '
+
+$consumerId = 0
+$totalMsgOfNtb = $rowsPerCtb * $topicNum
+$expectmsgcnt = $totalMsgOfNtb
+sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $expectmsgcnt , $ifcheckdata , $ifmanualcommit )
+
+print == start consumer to pull msgs from ntb
+print == tsim/tmq/consume.sh -d $dbName -y $pullDelay -g $showMsg -r $showRow -s start
+system tsim/tmq/consume.sh -d $dbName -y $pullDelay -g $showMsg -r $showRow -w $cdbName -s start
+
+print == check consume result from ntb
+wait_consumer_end_from_ntb:
+sql select * from consumeresult
+print ==> rows: $rows
+print ==> rows[0]: $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6]
+if $rows != 1 then
+ sleep 1000
+ goto wait_consumer_end_from_ntb
+endi
+if $data[0][1] != $consumerId then
+ return -1
+endi
+if $data[0][2] != $totalMsgOfNtb then
+ return -1
+endi
+if $data[0][3] != $totalMsgOfNtb then
+ return -1
+endi
+
+#------ not need stop consumer, because it exit after pull msg overthan expect msg
+#system tsim/tmq/consume.sh -s stop -x SIGINT
+
+system sh/exec.sh -n dnode1 -s stop -x SIGINT
diff --git a/tests/script/tsim/tmq/basic4Of2Cons.sim b/tests/script/tsim/tmq/basic4Of2Cons.sim
index a17d8a2f45..510aaf0e1a 100644
--- a/tests/script/tsim/tmq/basic4Of2Cons.sim
+++ b/tests/script/tsim/tmq/basic4Of2Cons.sim
@@ -1,319 +1,328 @@
-#### test scenario, please refer to https://jira.taosdata.com:18090/pages/viewpage.action?pageId=135120406
-#basic1Of2Cons.sim: vgroups=1, one topic for 2 consumers, firstly insert data, then start consume. Include six topics
-#basic2Of2Cons.sim: vgroups=1, multi topics for 2 consumers, firstly insert data, then start consume. Include six topics
-#basic3Of2Cons.sim: vgroups=4, one topic for 2 consumers, firstly insert data, then start consume. Include six topics
-#basic4Of2Cons.sim: vgroups=4, multi topics for 2 consumers, firstly insert data, then start consume. Include six topics
-
-# notes1: Scalar function: ABS/ACOS/ASIN/ATAN/CEIL/COS/FLOOR/LOG/POW/ROUND/SIN/SQRT/TAN
-# The above use cases are combined with where filter conditions, such as: where ts > "2017-08-12 18:25:58.128Z" and sin(a) > 0.5;
-#
-# notes2: not support aggregate functions(such as sum/count/min/max) and time-windows(interval).
-#
-
-run tsim/tmq/prepareBasicEnv-4vgrp.sim
-
-#---- global parameters start ----#
-$dbName = db
-$vgroups = 4
-$stbPrefix = stb
-$ctbPrefix = ctb
-$ntbPrefix = ntb
-$stbNum = 1
-$ctbNum = 10
-$ntbNum = 10
-$rowsPerCtb = 10
-$tstart = 1640966400000 # 2022-01-01 00:00:00.000
-#---- global parameters end ----#
-
-$pullDelay = 5
-$ifcheckdata = 1
-$showMsg = 1
-$showRow = 0
-
-sql connect
-sql use $dbName
-
-print == create topics from super table
-sql create topic topic_stb_column as select ts, c3 from stb
-sql create topic topic_stb_all as select ts, c1, c2, c3 from stb
-sql create topic topic_stb_function as select ts, abs(c1), sin(c2) from stb
-
-print == create topics from child table
-sql create topic topic_ctb_column as select ts, c3 from ctb0
-sql create topic topic_ctb_all as select * from ctb0
-sql create topic topic_ctb_function as select ts, abs(c1), sin(c2) from ctb0
-
-print == create topics from normal table
-sql create topic topic_ntb_column as select ts, c3 from ntb0
-sql create topic topic_ntb_all as select * from ntb0
-sql create topic topic_ntb_function as select ts, abs(c1), sin(c2) from ntb0
-
-#sql show topics
-#if $rows != 9 then
-# return -1
-#endi
-
-$keyList = ' . group.id:cgrp1
-$keyList = $keyList . '
-
-$topicNum = 3
-
-print ================ test consume from stb
-print == multi toipcs: topic_stb_column + topic_stb_all + topic_stb_function
-$topicList = ' . topic_stb_column
-$topicList = $topicList . ,
-$topicList = $topicList . topic_stb_all
-$topicList = $topicList . ,
-$topicList = $topicList . topic_stb_function
-$topicList = $topicList . '
-
-$consumerId = 0
-$totalMsgOfStb = $ctbNum * $rowsPerCtb
-$totalMsgOfStb = $totalMsgOfStb * $topicNum
-$expectmsgcnt = $totalMsgOfStb
-sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $expectmsgcnt , $ifcheckdata )
-$consumerId = 1
-sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $expectmsgcnt , $ifcheckdata )
-
-print == start consumer to pull msgs from stb
-print == tsim/tmq/consume.sh -d $dbName -y $pullDelay -g $showMsg -r $showRow -w $dbName -s start
-system tsim/tmq/consume.sh -d $dbName -y $pullDelay -g $showMsg -r $showRow -w $dbName -s start
-
-print == check consume result
-wait_consumer_end_from_stb:
-sql select * from consumeresult
-print ==> rows: $rows
-print ==> rows[0]: $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6]
-print ==> rows[1]: $data[1][0] $data[1][1] $data[1][2] $data[1][3] $data[1][4] $data[1][5] $data[1][6]
-if $rows != 2 then
- sleep 1000
- goto wait_consumer_end_from_stb
-endi
-if $data[0][1] == 0 then
- if $data[1][1] != 1 then
- return -1
- endi
-endi
-if $data[0][1] == 1 then
- if $data[1][1] != 0 then
- return -1
- endi
-endi
-
-if $data[0][2] <= 0 then
- return -1
-endi
-if $data[0][2] >= $expectmsgcnt then
- return -1
-endi
-
-if $data[1][2] <= 0 then
- return -1
-endi
-if $data[1][2] >= $expectmsgcnt then
- return -1
-endi
-
-$sumOfConsMsg = $data[0][2] + $data[1][2]
-if $sumOfConsMsg != $expectmsgcnt then
- return -1
-endi
-
-if $data[0][3] <= 0 then
- return -1
-endi
-if $data[0][3] >= $expectmsgcnt then
- return -1
-endi
-
-if $data[1][3] <= 0 then
- return -1
-endi
-if $data[1][3] >= $expectmsgcnt then
- return -1
-endi
-
-$sumOfConsRow = $data[0][3] + $data[1][3]
-if $sumOfConsRow != $expectmsgcnt then
- return -1
-endi
-
-#######################################################################################
-# clear consume info and consume result
-#run tsim/tmq/clearConsume.sim
-# because drop table function no stable, so by create new db for consume info and result. Modify it later
-$cdbName = cdb1
-sql create database $cdbName vgroups 1
-sleep 500
-sql use $cdbName
-
-print == create consume info table and consume result table
-sql create table consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int)
-sql create table consumeresult (ts timestamp, consumerid int, consummsgcnt bigint, consumrowcnt bigint, checkresult int)
-
-sql show tables
-if $rows != 2 then
- return -1
-endi
-#######################################################################################
-
-
-print ================ test consume from ctb
-print == multi toipcs: topic_ctb_column + topic_ctb_all + topic_ctb_function
-$topicList = ' . topic_ctb_column
-$topicList = $topicList . ,
-$topicList = $topicList . topic_ctb_all
-$topicList = $topicList . ,
-$topicList = $topicList . topic_ctb_function
-$topicList = $topicList . '
-
-$consumerId = 0
-$totalMsgOfCtb = $rowsPerCtb * $topicNum
-$expectmsgcnt = $totalMsgOfCtb
-sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $expectmsgcnt , $ifcheckdata )
-$consumerId = 1
-sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $expectmsgcnt , $ifcheckdata )
-
-print == start consumer to pull msgs from ctb
-print == tsim/tmq/consume.sh -d $dbName -y $pullDelay -g $showMsg -r $showRow -s start
-system tsim/tmq/consume.sh -d $dbName -y $pullDelay -g $showMsg -r $showRow -w $cdbName -s start
-
-print == check consume result
-wait_consumer_end_from_ctb:
-sql select * from consumeresult
-print ==> rows: $rows
-print ==> rows[0]: $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6]
-print ==> rows[1]: $data[1][0] $data[1][1] $data[1][2] $data[1][3] $data[1][4] $data[1][5] $data[1][6]
-if $rows != 2 then
- sleep 1000
- goto wait_consumer_end_from_ctb
-endi
-if $data[0][1] == 0 then
- if $data[1][1] != 1 then
- return -1
- endi
-endi
-if $data[0][1] == 1 then
- if $data[1][1] != 0 then
- return -1
- endi
-endi
-
-# either $data[0][2] == $totalMsgOfCtb and $data[1][2] == 0
-# or $data[0][2] == 0 and $data[1][2] == $totalMsgOfCtb
-if $data[0][2] == $totalMsgOfCtb then
- if $data[1][2] == 0 then
- goto check_ok_0
- endi
-elif $data[0][2] == 0 then
- if $data[1][2] == $totalMsgOfCtb then
- goto check_ok_0
- endi
-endi
-return -1
-check_ok_0:
-
-if $data[0][3] == $totalMsgOfCtb then
- if $data[1][3] == 0 then
- goto check_ok_1
- endi
-elif $data[0][3] == 0 then
- if $data[1][3] == $totalMsgOfCtb then
- goto check_ok_1
- endi
-endi
-return -1
-check_ok_1:
-
-
-#######################################################################################
-# clear consume info and consume result
-#run tsim/tmq/clearConsume.sim
-# because drop table function no stable, so by create new db for consume info and result. Modify it later
-$cdbName = cdb2
-sql create database $cdbName vgroups 1
-sleep 500
-sql use $cdbName
-
-print == create consume info table and consume result table
-sql create table consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int)
-sql create table consumeresult (ts timestamp, consumerid int, consummsgcnt bigint, consumrowcnt bigint, checkresult int)
-
-sql show tables
-if $rows != 2 then
- return -1
-endi
-#######################################################################################
-
-
-print ================ test consume from ntb
-print == multi toipcs: topic_ntb_column + topic_ntb_all + topic_ntb_function
-$topicList = ' . topic_ntb_column
-$topicList = $topicList . ,
-$topicList = $topicList . topic_ntb_all
-$topicList = $topicList . ,
-$topicList = $topicList . topic_ntb_function
-$topicList = $topicList . '
-
-$consumerId = 0
-$totalMsgOfNtb = $rowsPerCtb * $topicNum
-$expectmsgcnt = $totalMsgOfNtb
-sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $expectmsgcnt , $ifcheckdata )
-$consumerId = 1
-sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $expectmsgcnt , $ifcheckdata )
-
-print == start consumer to pull msgs from ntb
-print == tsim/tmq/consume.sh -d $dbName -y $pullDelay -g $showMsg -r $showRow -s start
-system tsim/tmq/consume.sh -d $dbName -y $pullDelay -g $showMsg -r $showRow -w $cdbName -s start
-
-print == check consume result from ntb
-wait_consumer_end_from_ntb:
-sql select * from consumeresult
-print ==> rows: $rows
-print ==> rows[0]: $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6]
-print ==> rows[1]: $data[1][0] $data[1][1] $data[1][2] $data[1][3] $data[1][4] $data[1][5] $data[1][6]
-if $rows != 2 then
- sleep 1000
- goto wait_consumer_end_from_ntb
-endi
-if $data[0][1] == 0 then
- if $data[1][1] != 1 then
- return -1
- endi
-endi
-if $data[1][1] == 0 then
- if $data[0][1] != 1 then
- return -1
- endi
-endi
-
-# either $data[0][2] == $totalMsgOfNtb and $data[1][2] == 0
-# or $data[0][2] == 0 and $data[1][2] == $totalMsgOfNtb
-if $data[0][2] == $totalMsgOfNtb then
- if $data[1][2] == 0 then
- goto check_ok_2
- endi
-elif $data[0][2] == 0 then
- if $data[1][2] == $totalMsgOfNtb then
- goto check_ok_2
- endi
-endi
-return -1
-check_ok_2:
-
-if $data[0][3] == $totalMsgOfNtb then
- if $data[1][3] == 0 then
- goto check_ok_3
- endi
-elif $data[0][3] == 0 then
- if $data[1][3] == $totalMsgOfNtb then
- goto check_ok_3
- endi
-endi
-return -1
-check_ok_3:
-
-#------ not need stop consumer, because it exit after pull msg overthan expect msg
-#system tsim/tmq/consume.sh -s stop -x SIGINT
-
-system sh/exec.sh -n dnode1 -s stop -x SIGINT
+#### test scenario, please refer to https://jira.taosdata.com:18090/pages/viewpage.action?pageId=135120406
+#basic1Of2Cons.sim: vgroups=1, one topic for 2 consumers, firstly insert data, then start consume. Include six topics
+#basic2Of2Cons.sim: vgroups=1, multi topics for 2 consumers, firstly insert data, then start consume. Include six topics
+#basic3Of2Cons.sim: vgroups=4, one topic for 2 consumers, firstly insert data, then start consume. Include six topics
+#basic4Of2Cons.sim: vgroups=4, multi topics for 2 consumers, firstly insert data, then start consume. Include six topics
+
+# notes1: Scalar function: ABS/ACOS/ASIN/ATAN/CEIL/COS/FLOOR/LOG/POW/ROUND/SIN/SQRT/TAN
+# The above use cases are combined with where filter conditions, such as: where ts > "2017-08-12 18:25:58.128Z" and sin(a) > 0.5;
+#
+# notes2: not support aggregate functions(such as sum/count/min/max) and time-windows(interval).
+#
+
+run tsim/tmq/prepareBasicEnv-4vgrp.sim
+
+#---- global parameters start ----#
+$dbName = db
+$vgroups = 4
+$stbPrefix = stb
+$ctbPrefix = ctb
+$ntbPrefix = ntb
+$stbNum = 1
+$ctbNum = 10
+$ntbNum = 10
+$rowsPerCtb = 10
+$tstart = 1640966400000 # 2022-01-01 00:00:00.000
+#---- global parameters end ----#
+
+$pullDelay = 5
+$ifcheckdata = 1
+$ifmanualcommit = 1
+$showMsg = 1
+$showRow = 0
+
+sql connect
+sql use $dbName
+
+print == create topics from super table
+sql create topic topic_stb_column as select ts, c3 from stb
+sql create topic topic_stb_all as select ts, c1, c2, c3 from stb
+sql create topic topic_stb_function as select ts, abs(c1), sin(c2) from stb
+
+print == create topics from child table
+sql create topic topic_ctb_column as select ts, c3 from ctb0
+sql create topic topic_ctb_all as select * from ctb0
+sql create topic topic_ctb_function as select ts, abs(c1), sin(c2) from ctb0
+
+print == create topics from normal table
+sql create topic topic_ntb_column as select ts, c3 from ntb0
+sql create topic topic_ntb_all as select * from ntb0
+sql create topic topic_ntb_function as select ts, abs(c1), sin(c2) from ntb0
+
+#sql show topics
+#if $rows != 9 then
+# return -1
+#endi
+
+#'group.id:cgrp1,enable.auto.commit:false,auto.commit.interval.ms:6000,auto.offset.reset:earliest'
+$keyList = ' . group.id:cgrp1
+$keyList = $keyList . ,
+$keyList = $keyList . enable.auto.commit:false
+#$keyList = $keyList . ,
+#$keyList = $keyList . auto.commit.interval.ms:6000
+#$keyList = $keyList . ,
+#$keyList = $keyList . auto.offset.reset:earliest
+$keyList = $keyList . '
+print ========== key list: $keyList
+
+$topicNum = 3
+
+print ================ test consume from stb
+print == multi toipcs: topic_stb_column + topic_stb_all + topic_stb_function
+$topicList = ' . topic_stb_column
+$topicList = $topicList . ,
+$topicList = $topicList . topic_stb_all
+$topicList = $topicList . ,
+$topicList = $topicList . topic_stb_function
+$topicList = $topicList . '
+
+$consumerId = 0
+$totalMsgOfStb = $ctbNum * $rowsPerCtb
+$totalMsgOfStb = $totalMsgOfStb * $topicNum
+$expectmsgcnt = $totalMsgOfStb
+sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $expectmsgcnt , $ifcheckdata , $ifmanualcommit )
+$consumerId = 1
+sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $expectmsgcnt , $ifcheckdata , $ifmanualcommit )
+
+print == start consumer to pull msgs from stb
+print == tsim/tmq/consume.sh -d $dbName -y $pullDelay -g $showMsg -r $showRow -w $dbName -s start
+system tsim/tmq/consume.sh -d $dbName -y $pullDelay -g $showMsg -r $showRow -w $dbName -s start
+
+print == check consume result
+wait_consumer_end_from_stb:
+sql select * from consumeresult
+print ==> rows: $rows
+print ==> rows[0]: $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6]
+print ==> rows[1]: $data[1][0] $data[1][1] $data[1][2] $data[1][3] $data[1][4] $data[1][5] $data[1][6]
+if $rows != 2 then
+ sleep 1000
+ goto wait_consumer_end_from_stb
+endi
+if $data[0][1] == 0 then
+ if $data[1][1] != 1 then
+ return -1
+ endi
+endi
+if $data[0][1] == 1 then
+ if $data[1][1] != 0 then
+ return -1
+ endi
+endi
+
+if $data[0][2] <= 0 then
+ return -1
+endi
+if $data[0][2] >= $expectmsgcnt then
+ return -1
+endi
+
+if $data[1][2] <= 0 then
+ return -1
+endi
+if $data[1][2] >= $expectmsgcnt then
+ return -1
+endi
+
+$sumOfConsMsg = $data[0][2] + $data[1][2]
+if $sumOfConsMsg != $expectmsgcnt then
+ return -1
+endi
+
+if $data[0][3] <= 0 then
+ return -1
+endi
+if $data[0][3] >= $expectmsgcnt then
+ return -1
+endi
+
+if $data[1][3] <= 0 then
+ return -1
+endi
+if $data[1][3] >= $expectmsgcnt then
+ return -1
+endi
+
+$sumOfConsRow = $data[0][3] + $data[1][3]
+if $sumOfConsRow != $expectmsgcnt then
+ return -1
+endi
+
+#######################################################################################
+# clear consume info and consume result
+#run tsim/tmq/clearConsume.sim
+# because drop table function no stable, so by create new db for consume info and result. Modify it later
+$cdbName = cdb1
+sql create database $cdbName vgroups 1
+sleep 500
+sql use $cdbName
+
+print == create consume info table and consume result table
+sql create table consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int, ifmanualcommit int)
+sql create table consumeresult (ts timestamp, consumerid int, consummsgcnt bigint, consumrowcnt bigint, checkresult int)
+
+sql show tables
+if $rows != 2 then
+ return -1
+endi
+#######################################################################################
+
+
+print ================ test consume from ctb
+print == multi toipcs: topic_ctb_column + topic_ctb_all + topic_ctb_function
+$topicList = ' . topic_ctb_column
+$topicList = $topicList . ,
+$topicList = $topicList . topic_ctb_all
+$topicList = $topicList . ,
+$topicList = $topicList . topic_ctb_function
+$topicList = $topicList . '
+
+$consumerId = 0
+$totalMsgOfCtb = $rowsPerCtb * $topicNum
+$expectmsgcnt = $totalMsgOfCtb
+sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $expectmsgcnt , $ifcheckdata , $ifmanualcommit )
+$consumerId = 1
+sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $expectmsgcnt , $ifcheckdata , $ifmanualcommit )
+
+print == start consumer to pull msgs from ctb
+print == tsim/tmq/consume.sh -d $dbName -y $pullDelay -g $showMsg -r $showRow -s start
+system tsim/tmq/consume.sh -d $dbName -y $pullDelay -g $showMsg -r $showRow -w $cdbName -s start
+
+print == check consume result
+wait_consumer_end_from_ctb:
+sql select * from consumeresult
+print ==> rows: $rows
+print ==> rows[0]: $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6]
+print ==> rows[1]: $data[1][0] $data[1][1] $data[1][2] $data[1][3] $data[1][4] $data[1][5] $data[1][6]
+if $rows != 2 then
+ sleep 1000
+ goto wait_consumer_end_from_ctb
+endi
+if $data[0][1] == 0 then
+ if $data[1][1] != 1 then
+ return -1
+ endi
+endi
+if $data[0][1] == 1 then
+ if $data[1][1] != 0 then
+ return -1
+ endi
+endi
+
+# either $data[0][2] == $totalMsgOfCtb and $data[1][2] == 0
+# or $data[0][2] == 0 and $data[1][2] == $totalMsgOfCtb
+if $data[0][2] == $totalMsgOfCtb then
+ if $data[1][2] == 0 then
+ goto check_ok_0
+ endi
+elif $data[0][2] == 0 then
+ if $data[1][2] == $totalMsgOfCtb then
+ goto check_ok_0
+ endi
+endi
+return -1
+check_ok_0:
+
+if $data[0][3] == $totalMsgOfCtb then
+ if $data[1][3] == 0 then
+ goto check_ok_1
+ endi
+elif $data[0][3] == 0 then
+ if $data[1][3] == $totalMsgOfCtb then
+ goto check_ok_1
+ endi
+endi
+return -1
+check_ok_1:
+
+
+#######################################################################################
+# clear consume info and consume result
+#run tsim/tmq/clearConsume.sim
+# because drop table function no stable, so by create new db for consume info and result. Modify it later
+$cdbName = cdb2
+sql create database $cdbName vgroups 1
+sleep 500
+sql use $cdbName
+
+print == create consume info table and consume result table
+sql create table consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int, ifmanualcommit int)
+sql create table consumeresult (ts timestamp, consumerid int, consummsgcnt bigint, consumrowcnt bigint, checkresult int)
+
+sql show tables
+if $rows != 2 then
+ return -1
+endi
+#######################################################################################
+
+
+print ================ test consume from ntb
+print == multi toipcs: topic_ntb_column + topic_ntb_all + topic_ntb_function
+$topicList = ' . topic_ntb_column
+$topicList = $topicList . ,
+$topicList = $topicList . topic_ntb_all
+$topicList = $topicList . ,
+$topicList = $topicList . topic_ntb_function
+$topicList = $topicList . '
+
+$consumerId = 0
+$totalMsgOfNtb = $rowsPerCtb * $topicNum
+$expectmsgcnt = $totalMsgOfNtb
+sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $expectmsgcnt , $ifcheckdata , $ifmanualcommit )
+$consumerId = 1
+sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $expectmsgcnt , $ifcheckdata , $ifmanualcommit )
+
+print == start consumer to pull msgs from ntb
+print == tsim/tmq/consume.sh -d $dbName -y $pullDelay -g $showMsg -r $showRow -s start
+system tsim/tmq/consume.sh -d $dbName -y $pullDelay -g $showMsg -r $showRow -w $cdbName -s start
+
+print == check consume result from ntb
+wait_consumer_end_from_ntb:
+sql select * from consumeresult
+print ==> rows: $rows
+print ==> rows[0]: $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6]
+print ==> rows[1]: $data[1][0] $data[1][1] $data[1][2] $data[1][3] $data[1][4] $data[1][5] $data[1][6]
+if $rows != 2 then
+ sleep 1000
+ goto wait_consumer_end_from_ntb
+endi
+if $data[0][1] == 0 then
+ if $data[1][1] != 1 then
+ return -1
+ endi
+endi
+if $data[1][1] == 0 then
+ if $data[0][1] != 1 then
+ return -1
+ endi
+endi
+
+# either $data[0][2] == $totalMsgOfNtb and $data[1][2] == 0
+# or $data[0][2] == 0 and $data[1][2] == $totalMsgOfNtb
+if $data[0][2] == $totalMsgOfNtb then
+ if $data[1][2] == 0 then
+ goto check_ok_2
+ endi
+elif $data[0][2] == 0 then
+ if $data[1][2] == $totalMsgOfNtb then
+ goto check_ok_2
+ endi
+endi
+return -1
+check_ok_2:
+
+if $data[0][3] == $totalMsgOfNtb then
+ if $data[1][3] == 0 then
+ goto check_ok_3
+ endi
+elif $data[0][3] == 0 then
+ if $data[1][3] == $totalMsgOfNtb then
+ goto check_ok_3
+ endi
+endi
+return -1
+check_ok_3:
+
+#------ not need stop consumer, because it exit after pull msg overthan expect msg
+#system tsim/tmq/consume.sh -s stop -x SIGINT
+
+system sh/exec.sh -n dnode1 -s stop -x SIGINT
diff --git a/tests/script/tsim/tmq/prepareBasicEnv-1vgrp.sim b/tests/script/tsim/tmq/prepareBasicEnv-1vgrp.sim
index e32a1df802..fbdea96f93 100644
--- a/tests/script/tsim/tmq/prepareBasicEnv-1vgrp.sim
+++ b/tests/script/tsim/tmq/prepareBasicEnv-1vgrp.sim
@@ -1,88 +1,88 @@
-# stop all dnodes before start this case
-system sh/stop_dnodes.sh
-
-# deploy dnode 1
-system sh/deploy.sh -n dnode1 -i 1
-
-# add some config items for this case
-#system sh/cfg.sh -n dnode1 -c supportVnodes -v 0
-
-# start dnode 1
-system sh/exec.sh -n dnode1 -s start
-
-sql connect
-
-#---- global parameters start ----#
-$dbName = db
-$vgroups = 1
-$stbPrefix = stb
-$ctbPrefix = ctb
-$ntbPrefix = ntb
-$stbNum = 1
-$ctbNum = 10
-$ntbNum = 10
-$rowsPerCtb = 10
-$tstart = 1640966400000 # 2022-01-01 00:00:00.000
-#---- global parameters end ----#
-
-print == create database $dbName vgroups $vgroups
-sql create database $dbName vgroups $vgroups
-
-#wait database ready
-$loop_cnt = 0
-check_db_ready:
-if $loop_cnt == 10 then
- print ====> database not ready!
- return -1
-endi
-sql show databases
-print ==> rows: $rows
-print ==> $data(db)[0] $data(db)[1] $data(db)[2] $data(db)[3] $data(db)[4] $data(db)[5] $data(db)[6] $data(db)[7] $data(db)[8] $data(db)[9] $data(db)[10] $data(db)[11] $data(db)[12]
-print $data(db)[13] $data(db)[14] $data(db)[15] $data(db)[16] $data(db)[17] $data(db)[18] $data(db)[19] $data(db)[20]
-if $data(db)[19] != nostrict then
- sleep 100
- $loop_cnt = $loop_cnt + 1
- goto check_db_ready
-endi
-
-sql use $dbName
-
-print == create consume info table and consume result table
-sql create table consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int)
-sql create table consumeresult (ts timestamp, consumerid int, consummsgcnt bigint, consumrowcnt bigint, checkresult int)
-
-sql show tables
-if $rows != 2 then
- return -1
-endi
-
-print == create super table
-sql create table $stbPrefix (ts timestamp, c1 int, c2 float, c3 binary(16)) tags (t1 int)
-sql show stables
-if $rows != 1 then
- return -1
-endi
-
-print == create child table, normal table and insert data
-$i = 0
-while $i < $ctbNum
- $ctb = $ctbPrefix . $i
- $ntb = $ntbPrefix . $i
- sql create table $ctb using $stbPrefix tags( $i )
- sql create table $ntb (ts timestamp, c1 int, c2 float, c3 binary(16))
-
- $x = 0
- while $x < $rowsPerCtb
- $binary = ' . binary-
- $binary = $binary . $i
- $binary = $binary . '
-
- sql insert into $ctb values ($tstart , $i , $x , $binary )
- sql insert into $ntb values ($tstart , $i , $x , $binary )
- $tstart = $tstart + 1
- $x = $x + 1
- endw
-
- $i = $i + 1
- $tstart = 1640966400000
-endw
+# stop all dnodes before start this case
+system sh/stop_dnodes.sh
+
+# deploy dnode 1
+system sh/deploy.sh -n dnode1 -i 1
+
+# add some config items for this case
+#system sh/cfg.sh -n dnode1 -c supportVnodes -v 0
+
+# start dnode 1
+system sh/exec.sh -n dnode1 -s start
+
+sql connect
+
+#---- global parameters start ----#
+$dbName = db
+$vgroups = 1
+$stbPrefix = stb
+$ctbPrefix = ctb
+$ntbPrefix = ntb
+$stbNum = 1
+$ctbNum = 10
+$ntbNum = 10
+$rowsPerCtb = 10
+$tstart = 1640966400000 # 2022-01-01 00:00:00.000
+#---- global parameters end ----#
+
+print == create database $dbName vgroups $vgroups
+sql create database $dbName vgroups $vgroups
+
+#wait database ready
+$loop_cnt = 0
+check_db_ready:
+if $loop_cnt == 10 then
+ print ====> database not ready!
+ return -1
+endi
+sql show databases
+print ==> rows: $rows
+print ==> $data(db)[0] $data(db)[1] $data(db)[2] $data(db)[3] $data(db)[4] $data(db)[5] $data(db)[6] $data(db)[7] $data(db)[8] $data(db)[9] $data(db)[10] $data(db)[11] $data(db)[12]
+print $data(db)[13] $data(db)[14] $data(db)[15] $data(db)[16] $data(db)[17] $data(db)[18] $data(db)[19] $data(db)[20]
+if $data(db)[19] != nostrict then
+ sleep 100
+ $loop_cnt = $loop_cnt + 1
+ goto check_db_ready
+endi
+
+sql use $dbName
+
+print == create consume info table and consume result table
+sql create table consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int, ifmanualcommit int)
+sql create table consumeresult (ts timestamp, consumerid int, consummsgcnt bigint, consumrowcnt bigint, checkresult int)
+
+sql show tables
+if $rows != 2 then
+ return -1
+endi
+
+print == create super table
+sql create table $stbPrefix (ts timestamp, c1 int, c2 float, c3 binary(16)) tags (t1 int)
+sql show stables
+if $rows != 1 then
+ return -1
+endi
+
+print == create child table, normal table and insert data
+$i = 0
+while $i < $ctbNum
+ $ctb = $ctbPrefix . $i
+ $ntb = $ntbPrefix . $i
+ sql create table $ctb using $stbPrefix tags( $i )
+ sql create table $ntb (ts timestamp, c1 int, c2 float, c3 binary(16))
+
+ $x = 0
+ while $x < $rowsPerCtb
+ $binary = ' . binary-
+ $binary = $binary . $i
+ $binary = $binary . '
+
+ sql insert into $ctb values ($tstart , $i , $x , $binary )
+ sql insert into $ntb values ($tstart , $i , $x , $binary )
+ $tstart = $tstart + 1
+ $x = $x + 1
+ endw
+
+ $i = $i + 1
+ $tstart = 1640966400000
+endw
diff --git a/tests/script/tsim/tmq/prepareBasicEnv-4vgrp.sim b/tests/script/tsim/tmq/prepareBasicEnv-4vgrp.sim
index 4750aab214..8b5573486d 100644
--- a/tests/script/tsim/tmq/prepareBasicEnv-4vgrp.sim
+++ b/tests/script/tsim/tmq/prepareBasicEnv-4vgrp.sim
@@ -1,88 +1,88 @@
-# stop all dnodes before start this case
-system sh/stop_dnodes.sh
-
-# deploy dnode 1
-system sh/deploy.sh -n dnode1 -i 1
-
-# add some config items for this case
-#system sh/cfg.sh -n dnode1 -c supportVnodes -v 0
-
-# start dnode 1
-system sh/exec.sh -n dnode1 -s start
-
-sql connect
-
-#---- global parameters start ----#
-$dbName = db
-$vgroups = 4
-$stbPrefix = stb
-$ctbPrefix = ctb
-$ntbPrefix = ntb
-$stbNum = 1
-$ctbNum = 10
-$ntbNum = 10
-$rowsPerCtb = 10
-$tstart = 1640966400000 # 2022-01-01 00:00:00.000
-#---- global parameters end ----#
-
-print == create database $dbName vgroups $vgroups
-sql create database $dbName vgroups $vgroups
-
-#wait database ready
-$loop_cnt = 0
-check_db_ready:
-if $loop_cnt == 10 then
- print ====> database not ready!
- return -1
-endi
-sql show databases
-print ==> rows: $rows
-print ==> $data(db)[0] $data(db)[1] $data(db)[2] $data(db)[3] $data(db)[4] $data(db)[5] $data(db)[6] $data(db)[7] $data(db)[8] $data(db)[9] $data(db)[10] $data(db)[11] $data(db)[12]
-print $data(db)[13] $data(db)[14] $data(db)[15] $data(db)[16] $data(db)[17] $data(db)[18] $data(db)[19] $data(db)[20]
-if $data(db)[19] != nostrict then
- sleep 100
- $loop_cnt = $loop_cnt + 1
- goto check_db_ready
-endi
-
-sql use $dbName
-
-print == create consume info table and consume result table
-sql create table consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int)
-sql create table consumeresult (ts timestamp, consumerid int, consummsgcnt bigint, consumrowcnt bigint, checkresult int)
-
-sql show tables
-if $rows != 2 then
- return -1
-endi
-
-print == create super table
-sql create table $stbPrefix (ts timestamp, c1 int, c2 float, c3 binary(16)) tags (t1 int)
-sql show stables
-if $rows != 1 then
- return -1
-endi
-
-print == create child table, normal table and insert data
-$i = 0
-while $i < $ctbNum
- $ctb = $ctbPrefix . $i
- $ntb = $ntbPrefix . $i
- sql create table $ctb using $stbPrefix tags( $i )
- sql create table $ntb (ts timestamp, c1 int, c2 float, c3 binary(16))
-
- $x = 0
- while $x < $rowsPerCtb
- $binary = ' . binary-
- $binary = $binary . $i
- $binary = $binary . '
-
- sql insert into $ctb values ($tstart , $i , $x , $binary )
- sql insert into $ntb values ($tstart , $i , $x , $binary )
- $tstart = $tstart + 1
- $x = $x + 1
- endw
-
- $i = $i + 1
- $tstart = 1640966400000
-endw
+# stop all dnodes before start this case
+system sh/stop_dnodes.sh
+
+# deploy dnode 1
+system sh/deploy.sh -n dnode1 -i 1
+
+# add some config items for this case
+#system sh/cfg.sh -n dnode1 -c supportVnodes -v 0
+
+# start dnode 1
+system sh/exec.sh -n dnode1 -s start
+
+sql connect
+
+#---- global parameters start ----#
+$dbName = db
+$vgroups = 4
+$stbPrefix = stb
+$ctbPrefix = ctb
+$ntbPrefix = ntb
+$stbNum = 1
+$ctbNum = 10
+$ntbNum = 10
+$rowsPerCtb = 10
+$tstart = 1640966400000 # 2022-01-01 00:00:00.000
+#---- global parameters end ----#
+
+print == create database $dbName vgroups $vgroups
+sql create database $dbName vgroups $vgroups
+
+#wait database ready
+$loop_cnt = 0
+check_db_ready:
+if $loop_cnt == 10 then
+ print ====> database not ready!
+ return -1
+endi
+sql show databases
+print ==> rows: $rows
+print ==> $data(db)[0] $data(db)[1] $data(db)[2] $data(db)[3] $data(db)[4] $data(db)[5] $data(db)[6] $data(db)[7] $data(db)[8] $data(db)[9] $data(db)[10] $data(db)[11] $data(db)[12]
+print $data(db)[13] $data(db)[14] $data(db)[15] $data(db)[16] $data(db)[17] $data(db)[18] $data(db)[19] $data(db)[20]
+if $data(db)[19] != nostrict then
+ sleep 100
+ $loop_cnt = $loop_cnt + 1
+ goto check_db_ready
+endi
+
+sql use $dbName
+
+print == create consume info table and consume result table
+sql create table consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int, ifmanualcommit int)
+sql create table consumeresult (ts timestamp, consumerid int, consummsgcnt bigint, consumrowcnt bigint, checkresult int)
+
+sql show tables
+if $rows != 2 then
+ return -1
+endi
+
+print == create super table
+sql create table $stbPrefix (ts timestamp, c1 int, c2 float, c3 binary(16)) tags (t1 int)
+sql show stables
+if $rows != 1 then
+ return -1
+endi
+
+print == create child table, normal table and insert data
+$i = 0
+while $i < $ctbNum
+ $ctb = $ctbPrefix . $i
+ $ntb = $ntbPrefix . $i
+ sql create table $ctb using $stbPrefix tags( $i )
+ sql create table $ntb (ts timestamp, c1 int, c2 float, c3 binary(16))
+
+ $x = 0
+ while $x < $rowsPerCtb
+ $binary = ' . binary-
+ $binary = $binary . $i
+ $binary = $binary . '
+
+ sql insert into $ctb values ($tstart , $i , $x , $binary )
+ sql insert into $ntb values ($tstart , $i , $x , $binary )
+ $tstart = $tstart + 1
+ $x = $x + 1
+ endw
+
+ $i = $i + 1
+ $tstart = 1640966400000
+endw
diff --git a/tests/script/tsim/tstream/basic1.sim b/tests/script/tsim/tstream/basic1.sim
index 3bb5943b3b..37f9cb94c9 100644
--- a/tests/script/tsim/tstream/basic1.sim
+++ b/tests/script/tsim/tstream/basic1.sim
@@ -136,7 +136,7 @@ if $data35 != 3 then
endi
sql insert into t1 values(1648791223001,12,14,13,11.1);
-sleep 100
+sleep 500
sql select _wstartts, c1, c2 ,c3 ,c4, c5 from streamt;
if $rows != 4 then
diff --git a/tests/system-test/0-others/udfTest.py b/tests/system-test/0-others/udfTest.py
new file mode 100644
index 0000000000..0a998aee2b
--- /dev/null
+++ b/tests/system-test/0-others/udfTest.py
@@ -0,0 +1,544 @@
+import taos
+import sys
+import time
+import os
+
+from util.log import *
+from util.sql import *
+from util.cases import *
+from util.dnodes import *
+import subprocess
+
+class TDTestCase:
+
+ def init(self, conn, logSql):
+ tdLog.debug(f"start to excute {__file__}")
+ tdSql.init(conn.cursor())
+
+ def getBuildPath(self):
+ selfPath = os.path.dirname(os.path.realpath(__file__))
+
+ if ("community" in selfPath):
+ projPath = selfPath[:selfPath.find("community")]
+ else:
+ projPath = selfPath[:selfPath.find("tests")]
+
+ for root, dirs, files in os.walk(projPath):
+ if ("taosd" in files):
+ rootRealPath = os.path.dirname(os.path.realpath(root))
+ if ("packaging" not in rootRealPath):
+ buildPath = root[:len(root) - len("/build/bin")]
+ break
+ return buildPath
+
+ def prepare_udf_so(self):
+ selfPath = os.path.dirname(os.path.realpath(__file__))
+
+ if ("community" in selfPath):
+ projPath = selfPath[:selfPath.find("community")]
+ else:
+ projPath = selfPath[:selfPath.find("tests")]
+ print(projPath)
+
+ libudf1 = subprocess.Popen('find %s -name "libudf1.so"|grep lib|head -n1'%projPath , shell=True, stdout=subprocess.PIPE,stderr=subprocess.STDOUT).stdout.read().decode("utf-8")
+ libudf2 = subprocess.Popen('find %s -name "libudf2.so"|grep lib|head -n1'%projPath , shell=True, stdout=subprocess.PIPE,stderr=subprocess.STDOUT).stdout.read().decode("utf-8")
+ os.system("mkdir /tmp/udf/")
+ os.system("sudo cp %s /tmp/udf/ "%libudf1.replace("\n" ,""))
+ os.system("sudo cp %s /tmp/udf/ "%libudf2.replace("\n" ,""))
+
+
+ def prepare_data(self):
+
+ tdSql.execute("use db")
+ tdSql.execute(
+ '''create table stb1
+ (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
+ tags (t1 int)
+ '''
+ )
+
+ tdSql.execute(
+ '''
+ create table t1
+ (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
+ '''
+ )
+ for i in range(4):
+ tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )')
+
+ for i in range(9):
+ tdSql.execute(
+ f"insert into ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
+ )
+ tdSql.execute(
+ f"insert into ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
+ )
+ tdSql.execute("insert into ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )")
+ tdSql.execute("insert into ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
+ tdSql.execute("insert into ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )")
+ tdSql.execute("insert into ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
+
+ tdSql.execute("insert into ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
+ tdSql.execute("insert into ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
+ tdSql.execute("insert into ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
+
+ tdSql.execute(
+ f'''insert into t1 values
+ ( '2020-04-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
+ ( '2020-10-21 01:01:01.000', 1, 11111, 111, 11, 1.11, 11.11, 1, "binary1", "nchar1", now()+1a )
+ ( '2020-12-31 01:01:01.000', 2, 22222, 222, 22, 2.22, 22.22, 0, "binary2", "nchar2", now()+2a )
+ ( '2021-01-01 01:01:06.000', 3, 33333, 333, 33, 3.33, 33.33, 0, "binary3", "nchar3", now()+3a )
+ ( '2021-05-07 01:01:10.000', 4, 44444, 444, 44, 4.44, 44.44, 1, "binary4", "nchar4", now()+4a )
+ ( '2021-07-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
+ ( '2021-09-30 01:01:16.000', 5, 55555, 555, 55, 5.55, 55.55, 0, "binary5", "nchar5", now()+5a )
+ ( '2022-02-01 01:01:20.000', 6, 66666, 666, 66, 6.66, 66.66, 1, "binary6", "nchar6", now()+6a )
+ ( '2022-10-28 01:01:26.000', 7, 00000, 000, 00, 0.00, 00.00, 1, "binary7", "nchar7", "1970-01-01 08:00:00.000" )
+ ( '2022-12-01 01:01:30.000', 8, -88888, -888, -88, -8.88, -88.88, 0, "binary8", "nchar8", "1969-01-01 01:00:00.000" )
+ ( '2022-12-31 01:01:36.000', 9, -99999999999999999, -999, -99, -9.99, -999999999999999999999.99, 1, "binary9", "nchar9", "1900-01-01 00:00:00.000" )
+ ( '2023-02-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
+ '''
+ )
+
+ tdSql.execute("create table tb (ts timestamp , num1 int , num2 int, num3 double , num4 binary(30))")
+ tdSql.execute(
+ f'''insert into tb values
+ ( '2020-04-21 01:01:01.000', NULL, 1, 1, "binary1" )
+ ( '2020-10-21 01:01:01.000', 1, 1, 1.11, "binary1" )
+ ( '2020-12-31 01:01:01.000', 2, 22222, 22, "binary1" )
+ ( '2021-01-01 01:01:06.000', 3, 33333, 33, "binary1" )
+ ( '2021-05-07 01:01:10.000', 4, 44444, 44, "binary1" )
+ ( '2021-07-21 01:01:01.000', NULL, NULL, NULL, "binary1" )
+ ( '2021-09-30 01:01:16.000', 5, 55555, 55, "binary1" )
+ ( '2022-02-01 01:01:20.000', 6, 66666, 66, "binary1" )
+ ( '2022-10-28 01:01:26.000', 0, 00000, 00, "binary1" )
+ ( '2022-12-01 01:01:30.000', 8, -88888, -88, "binary1" )
+ ( '2022-12-31 01:01:36.000', 9, -9999999, -99, "binary1" )
+ ( '2023-02-21 01:01:01.000', NULL, NULL, NULL, "binary1" )
+ '''
+ )
+
+
+ def create_udf_function(self):
+
+ for i in range(10):
+ # create scalar functions
+ tdSql.execute("create function udf1 as '/tmp/udf/libudf1.so' outputtype int bufSize 8;")
+
+ # create aggregate functions
+
+ tdSql.execute("create aggregate function udf2 as '/tmp/udf/libudf2.so' outputtype double bufSize 8;")
+
+ functions = tdSql.getResult("show functions")
+ function_nums = len(functions)
+ if function_nums == 2:
+ tdLog.info("create two udf functions success ")
+
+ # drop functions
+
+ tdSql.execute("drop function udf1")
+ tdSql.execute("drop function udf2")
+
+ functions = tdSql.getResult("show functions")
+ for function in functions:
+ if "udf1" in function[0] or "udf2" in function[0]:
+ tdLog.info("drop udf functions failed ")
+ tdLog.exit("drop udf functions failed")
+
+ tdLog.info("drop two udf functions success ")
+
+ # create scalar functions
+ tdSql.execute("create function udf1 as '/tmp/udf/libudf1.so' outputtype int bufSize 8;")
+
+ # create aggregate functions
+
+ tdSql.execute("create aggregate function udf2 as '/tmp/udf/libudf2.so' outputtype double bufSize 8;")
+
+ functions = tdSql.getResult("show functions")
+ function_nums = len(functions)
+ if function_nums == 2:
+ tdLog.info("create two udf functions success ")
+
+ def basic_udf_query(self):
+
+ # scalar functions
+
+ tdSql.execute("use db ")
+ tdSql.query("select num1 , udf1(num1) ,num2 ,udf1(num2),num3 ,udf1(num3),num4 ,udf1(num4) from tb")
+ tdSql.checkData(0,0,None)
+ tdSql.checkData(0,1,None)
+ tdSql.checkData(0,2,1)
+ tdSql.checkData(0,3,88)
+ tdSql.checkData(0,4,1.000000000)
+ tdSql.checkData(0,5,88)
+ tdSql.checkData(0,6,"binary1")
+ tdSql.checkData(0,7,88)
+
+ tdSql.checkData(3,0,3)
+ tdSql.checkData(3,1,88)
+ tdSql.checkData(3,2,33333)
+ tdSql.checkData(3,3,88)
+ tdSql.checkData(3,4,33.000000000)
+ tdSql.checkData(3,5,88)
+ tdSql.checkData(3,6,"binary1")
+ tdSql.checkData(3,7,88)
+
+ tdSql.checkData(11,0,None)
+ tdSql.checkData(11,1,None)
+ tdSql.checkData(11,2,None)
+ tdSql.checkData(11,3,None)
+ tdSql.checkData(11,4,None)
+ tdSql.checkData(11,5,None)
+ tdSql.checkData(11,6,"binary1")
+ tdSql.checkData(11,7,88)
+
+ tdSql.query("select c1 , udf1(c1) ,c2 ,udf1(c2), c3 ,udf1(c3), c4 ,udf1(c4) from stb1 order by c1")
+ tdSql.checkData(0,0,None)
+ tdSql.checkData(0,1,None)
+ tdSql.checkData(0,2,None)
+ tdSql.checkData(0,3,None)
+ tdSql.checkData(0,4,None)
+ tdSql.checkData(0,5,None)
+ tdSql.checkData(0,6,None)
+ tdSql.checkData(0,7,None)
+
+ tdSql.checkData(20,0,8)
+ tdSql.checkData(20,1,88)
+ tdSql.checkData(20,2,88888)
+ tdSql.checkData(20,3,88)
+ tdSql.checkData(20,4,888)
+ tdSql.checkData(20,5,88)
+ tdSql.checkData(20,6,88)
+ tdSql.checkData(20,7,88)
+
+
+ # aggregate functions
+ tdSql.query("select udf2(num1) ,udf2(num2), udf2(num3) from tb")
+ tdSql.checkData(0,0,15.362291496)
+ tdSql.checkData(0,1,10000949.553189287)
+ tdSql.checkData(0,2,168.633425216)
+
+ # Arithmetic compute
+ tdSql.query("select udf2(num1)+100 ,udf2(num2)-100, udf2(num3)*100 ,udf2(num3)/100 from tb")
+ tdSql.checkData(0,0,115.362291496)
+ tdSql.checkData(0,1,10000849.553189287)
+ tdSql.checkData(0,2,16863.342521576)
+ tdSql.checkData(0,3,1.686334252)
+
+ tdSql.query("select udf2(c1) ,udf2(c6) from stb1 ")
+ tdSql.checkData(0,0,25.514701644)
+ tdSql.checkData(0,1,265.247614504)
+
+ tdSql.query("select udf2(c1)+100 ,udf2(c6)-100 ,udf2(c1)*100 ,udf2(c6)/100 from stb1 ")
+ tdSql.checkData(0,0,125.514701644)
+ tdSql.checkData(0,1,165.247614504)
+ tdSql.checkData(0,2,2551.470164435)
+ tdSql.checkData(0,3,2.652476145)
+
+ # # bug for crash when query sub table
+ tdSql.query("select udf2(c1+100) ,udf2(c6-100) ,udf2(c1*100) ,udf2(c6/100) from ct1")
+ tdSql.checkData(0,0,378.215547010)
+ tdSql.checkData(0,1,353.808067460)
+ tdSql.checkData(0,2,2114.237451187)
+ tdSql.checkData(0,3,2.125468151)
+
+ tdSql.query("select udf2(c1+100) ,udf2(c6-100) ,udf2(c1*100) ,udf2(c6/100) from stb1 ")
+ tdSql.checkData(0,0,490.358032462)
+ tdSql.checkData(0,1,400.460106627)
+ tdSql.checkData(0,2,2551.470164435)
+ tdSql.checkData(0,3,2.652476145)
+
+
+ # regular table with aggregate functions
+
+ tdSql.error("select udf1(num1) , count(num1) from tb;")
+ tdSql.error("select udf1(num1) , avg(num1) from tb;")
+ tdSql.error("select udf1(num1) , twa(num1) from tb;")
+ tdSql.error("select udf1(num1) , irate(num1) from tb;")
+ tdSql.error("select udf1(num1) , sum(num1) from tb;")
+ tdSql.error("select udf1(num1) , stddev(num1) from tb;")
+ tdSql.error("select udf1(num1) , mode(num1) from tb;")
+ tdSql.error("select udf1(num1) , HYPERLOGLOG(num1) from tb;")
+ # stable
+ tdSql.error("select udf1(c1) , count(c1) from stb1;")
+ tdSql.error("select udf1(c1) , avg(c1) from stb1;")
+ tdSql.error("select udf1(c1) , twa(c1) from stb1;")
+ tdSql.error("select udf1(c1) , irate(c1) from stb1;")
+ tdSql.error("select udf1(c1) , sum(c1) from stb1;")
+ tdSql.error("select udf1(c1) , stddev(c1) from stb1;")
+ tdSql.error("select udf1(c1) , mode(c1) from stb1;")
+ tdSql.error("select udf1(c1) , HYPERLOGLOG(c1) from stb1;")
+
+ # regular table with select functions
+
+ tdSql.query("select udf1(num1) , max(num1) from tb;")
+ tdSql.checkRows(1)
+ tdSql.query("select floor(num1) , max(num1) from tb;")
+ tdSql.checkRows(1)
+ tdSql.query("select udf1(num1) , min(num1) from tb;")
+ tdSql.checkRows(1)
+ tdSql.query("select ceil(num1) , min(num1) from tb;")
+ tdSql.checkRows(1)
+ tdSql.error("select udf1(num1) , first(num1) from tb;")
+
+ tdSql.error("select abs(num1) , first(num1) from tb;")
+
+ tdSql.error("select udf1(num1) , last(num1) from tb;")
+
+ tdSql.error("select round(num1) , last(num1) from tb;")
+
+ tdSql.query("select udf1(num1) , top(num1,1) from tb;")
+ tdSql.checkRows(1)
+ tdSql.query("select udf1(num1) , bottom(num1,1) from tb;")
+ tdSql.checkRows(1)
+ tdSql.error("select udf1(num1) , last_row(num1) from tb;")
+
+ tdSql.error("select round(num1) , last_row(num1) from tb;")
+
+
+ # stable
+ tdSql.query("select udf1(c1) , max(c1) from stb1;")
+ tdSql.checkRows(1)
+ tdSql.query("select abs(c1) , max(c1) from stb1;")
+ tdSql.checkRows(1)
+ tdSql.query("select udf1(c1) , min(c1) from stb1;")
+ tdSql.checkRows(1)
+ tdSql.query("select floor(c1) , min(c1) from stb1;")
+ tdSql.checkRows(1)
+ tdSql.error("select udf1(c1) , first(c1) from stb1;")
+
+ tdSql.error("select udf1(c1) , last(c1) from stb1;")
+
+ tdSql.query("select udf1(c1) , top(c1 ,1) from stb1;")
+ tdSql.checkRows(1)
+ tdSql.query("select abs(c1) , top(c1 ,1) from stb1;")
+ tdSql.checkRows(1)
+ tdSql.query("select udf1(c1) , bottom(c1,1) from stb1;")
+ tdSql.checkRows(1)
+ tdSql.query("select ceil(c1) , bottom(c1,1) from stb1;")
+ tdSql.checkRows(1)
+
+ tdSql.error("select udf1(c1) , last_row(c1) from stb1;")
+ tdSql.error("select ceil(c1) , last_row(c1) from stb1;")
+
+ # regular table with compute functions
+
+ tdSql.query("select udf1(num1) , abs(num1) from tb;")
+ tdSql.checkRows(12)
+ tdSql.query("select floor(num1) , abs(num1) from tb;")
+ tdSql.checkRows(12)
+
+ # # bug need fix
+
+ tdSql.query("select udf1(num1) , csum(num1) from tb;")
+ tdSql.checkRows(9)
+ tdSql.query("select ceil(num1) , csum(num1) from tb;")
+ tdSql.checkRows(9)
+ tdSql.query("select udf1(c1) , csum(c1) from stb1;")
+ tdSql.checkRows(22)
+ tdSql.query("select floor(c1) , csum(c1) from stb1;")
+ tdSql.checkRows(22)
+
+ # stable with compute functions
+ tdSql.query("select udf1(c1) , abs(c1) from stb1;")
+ tdSql.checkRows(25)
+ tdSql.query("select abs(c1) , ceil(c1) from stb1;")
+ tdSql.checkRows(25)
+
+ # nest query
+ tdSql.query("select abs(udf1(c1)) , abs(ceil(c1)) from stb1 order by ts;")
+ tdSql.checkRows(25)
+ tdSql.checkData(0,0,None)
+ tdSql.checkData(0,1,None)
+ tdSql.checkData(1,0,88)
+ tdSql.checkData(1,1,8)
+
+ tdSql.query("select abs(udf1(c1)) , abs(ceil(c1)) from ct1 order by ts;")
+ tdSql.checkRows(13)
+ tdSql.checkData(0,0,88)
+ tdSql.checkData(0,1,8)
+ tdSql.checkData(1,0,88)
+ tdSql.checkData(1,1,7)
+
+ # bug fix for crash
+ # order by udf function result
+ for _ in range(50):
+ tdSql.query("select udf2(c1) from stb1 group by 1-udf1(c1)")
+ print(tdSql.queryResult)
+
+ # udf functions with filter
+
+ tdSql.query("select abs(udf1(c1)) , abs(ceil(c1)) from stb1 where c1 is null order by ts;")
+ tdSql.checkRows(3)
+ tdSql.checkData(0,0,None)
+ tdSql.checkData(0,1,None)
+
+ tdSql.query("select c1 ,udf1(c1) , c6 ,udf1(c6) from stb1 where c1 > 8 order by ts")
+ tdSql.checkRows(3)
+ tdSql.checkData(0,0,9)
+ tdSql.checkData(0,1,88)
+ tdSql.checkData(0,2,-99.990000000)
+ tdSql.checkData(0,3,88)
+
+ # udf functions with join
+ ts_start = 1652517451000
+ tdSql.execute("create stable st (ts timestamp , c1 int , c2 int ,c3 double ,c4 double ) tags(ind int)")
+ tdSql.execute("create table sub1 using st tags(1)")
+ tdSql.execute("create table sub2 using st tags(2)")
+
+ for i in range(10):
+ ts = ts_start + i *1000
+ tdSql.execute(" insert into sub1 values({} , {},{},{},{})".format(ts,i ,i*10,i*100.0,i*1000.0))
+ tdSql.execute(" insert into sub2 values({} , {},{},{},{})".format(ts,i ,i*10,i*100.0,i*1000.0))
+
+ tdSql.query("select sub1.c1, sub2.c2 from sub1, sub2 where sub1.ts=sub2.ts and sub1.c1 is not null")
+ tdSql.checkData(0,0,0)
+ tdSql.checkData(0,1,0)
+ tdSql.checkData(1,0,1)
+ tdSql.checkData(1,1,10)
+
+ tdSql.query("select udf1(sub1.c1), udf1(sub2.c2) from sub1, sub2 where sub1.ts=sub2.ts and sub1.c1 is not null")
+ tdSql.checkData(0,0,88)
+ tdSql.checkData(0,1,88)
+ tdSql.checkData(1,0,88)
+ tdSql.checkData(1,1,88)
+
+ tdSql.query("select sub1.c1 , udf1(sub1.c1), sub2.c2 ,udf1(sub2.c2) from sub1, sub2 where sub1.ts=sub2.ts and sub1.c1 is not null")
+ tdSql.checkData(0,0,0)
+ tdSql.checkData(0,1,88)
+ tdSql.checkData(0,2,0)
+ tdSql.checkData(0,3,88)
+ tdSql.checkData(1,0,1)
+ tdSql.checkData(1,1,88)
+ tdSql.checkData(1,2,10)
+ tdSql.checkData(1,3,88)
+
+ tdSql.query("select udf2(sub1.c1), udf2(sub2.c2) from sub1, sub2 where sub1.ts=sub2.ts and sub1.c1 is not null")
+ tdSql.checkData(0,0,16.881943016)
+ tdSql.checkData(0,1,168.819430161)
+ tdSql.error("select sub1.c1 , udf2(sub1.c1), sub2.c2 ,udf2(sub2.c2) from sub1, sub2 where sub1.ts=sub2.ts and sub1.c1 is not null")
+
+ # udf functions with group by
+ tdSql.query("select udf1(c1) from ct1 group by c1")
+ tdSql.checkRows(10)
+ tdSql.query("select udf1(c1) from stb1 group by c1")
+ tdSql.checkRows(11)
+ tdSql.query("select c1,c2, udf1(c1,c2) from ct1 group by c1,c2")
+ tdSql.checkRows(10)
+ tdSql.query("select c1,c2, udf1(c1,c2) from stb1 group by c1,c2")
+ tdSql.checkRows(11)
+
+ tdSql.query("select udf2(c1) from ct1 group by c1")
+ tdSql.checkRows(10)
+ tdSql.query("select udf2(c1) from stb1 group by c1")
+ tdSql.checkRows(11)
+ tdSql.query("select c1,c2, udf2(c1,c6) from ct1 group by c1,c2")
+ tdSql.checkRows(10)
+ tdSql.query("select c1,c2, udf2(c1,c6) from stb1 group by c1,c2")
+ tdSql.checkRows(11)
+ tdSql.query("select udf2(c1) from stb1 group by udf1(c1)")
+ tdSql.checkRows(2)
+ tdSql.query("select udf2(c1) from stb1 group by floor(c1)")
+ tdSql.checkRows(11)
+
+ # udf mix with order by
+ tdSql.query("select udf2(c1) from stb1 group by floor(c1) order by udf2(c1)")
+ tdSql.checkRows(11)
+
+
+ def multi_cols_udf(self):
+ tdSql.query("select num1,num2,num3,udf1(num1,num2,num3) from tb")
+ tdSql.checkData(0,0,None)
+ tdSql.checkData(0,1,1)
+ tdSql.checkData(0,2,1.000000000)
+ tdSql.checkData(0,3,None)
+ tdSql.checkData(1,0,1)
+ tdSql.checkData(1,1,1)
+ tdSql.checkData(1,2,1.110000000)
+ tdSql.checkData(1,3,88)
+
+ tdSql.query("select c1,c6,udf1(c1,c6) from stb1 order by ts")
+ tdSql.checkData(1,0,8)
+ tdSql.checkData(1,1,88.880000000)
+ tdSql.checkData(1,2,88)
+
+ tdSql.query("select abs(udf1(c1,c6,c1,c6)) , abs(ceil(c1)) from stb1 where c1 is not null order by ts;")
+ tdSql.checkRows(22)
+
+ tdSql.query("select udf2(sub1.c1 ,sub1.c2), udf2(sub2.c2 ,sub2.c1) from sub1, sub2 where sub1.ts=sub2.ts and sub1.c1 is not null")
+ tdSql.checkData(0,0,169.661427555)
+ tdSql.checkData(0,1,169.661427555)
+
+
+ def unexpected_create(self):
+
+ tdSql.query("select udf2(sub1.c1 ,sub1.c2), udf2(sub2.c2 ,sub2.c1) from sub1, sub2 where sub1.ts=sub2.ts and sub1.c1 is not null")
+
+ def loop_kill_udfd(self):
+
+ buildPath = self.getBuildPath()
+ if (buildPath == ""):
+ tdLog.exit("taosd not found!")
+ else:
+ tdLog.info("taosd found in %s" % buildPath)
+
+ cfgPath = buildPath + "/../sim/dnode1/cfg"
+ udfdPath = buildPath +'/build/bin/udfd'
+
+ for i in range(5):
+
+ tdLog.info(" loop restart udfd %d_th" % i)
+
+ tdSql.query("select udf2(sub1.c1 ,sub1.c2), udf2(sub2.c2 ,sub2.c1) from sub1, sub2 where sub1.ts=sub2.ts and sub1.c1 is not null")
+ tdSql.checkData(0,0,169.661427555)
+ tdSql.checkData(0,1,169.661427555)
+ # stop udfd cmds
+ get_processID = "ps -ef | grep -w udfd | grep 'root' | grep -v grep| grep -v defunct | awk '{print $2}'"
+ processID = subprocess.check_output(get_processID, shell=True).decode("utf-8")
+ stop_udfd = " kill -9 %s" % processID
+ os.system(stop_udfd)
+
+ time.sleep(2)
+
+ tdSql.query("select udf2(sub1.c1 ,sub1.c2), udf2(sub2.c2 ,sub2.c1) from sub1, sub2 where sub1.ts=sub2.ts and sub1.c1 is not null")
+ tdSql.checkData(0,0,169.661427555)
+ tdSql.checkData(0,1,169.661427555)
+
+ # # start udfd cmds
+ # start_udfd = "nohup " + udfdPath +'-c' +cfgPath +" > /dev/null 2>&1 &"
+ # tdLog.info("start udfd : %s " % start_udfd)
+
+
+ def restart_taosd_query_udf(self):
+
+ for i in range(5):
+ time.sleep(5)
+ tdLog.info(" this is %d_th restart taosd " %i)
+ tdSql.execute("use db ")
+ tdSql.query("select count(*) from stb1")
+ tdSql.checkRows(1)
+ tdSql.query("select udf2(sub1.c1 ,sub1.c2), udf2(sub2.c2 ,sub2.c1) from sub1, sub2 where sub1.ts=sub2.ts and sub1.c1 is not null")
+ tdSql.checkData(0,0,169.661427555)
+ tdSql.checkData(0,1,169.661427555)
+ tdDnodes.stop(1)
+ time.sleep(2)
+ tdDnodes.start(1)
+ time.sleep(5)
+
+
+
+ def run(self): # sourcery skip: extract-duplicate-method, remove-redundant-fstring
+ tdSql.prepare()
+
+ self.prepare_udf_so()
+ self.prepare_data()
+ self.create_udf_function()
+ self.basic_udf_query()
+ self.loop_kill_udfd()
+ # self.restart_taosd_query_udf()
+
+ def stop(self):
+ tdSql.close()
+ tdLog.success(f"{__file__} successfully executed")
+
+tdCases.addLinux(__file__, TDTestCase())
+tdCases.addWindows(__file__, TDTestCase())
diff --git a/tests/system-test/2-query/query_cols_tags_and_or.py b/tests/system-test/2-query/query_cols_tags_and_or.py
index 77e91aa983..5dc5a2f123 100644
--- a/tests/system-test/2-query/query_cols_tags_and_or.py
+++ b/tests/system-test/2-query/query_cols_tags_and_or.py
@@ -22,18 +22,6 @@ class TDTestCase:
tdSql.init(conn.cursor(), logSql)
def insertData(self, tb_name):
- # insert_sql_list = [f'insert into {tb_name} values ("2021-01-01 12:00:00", 1, 1, 1, 3, 1.1, 1.1, "binary", "nchar", true, 1)',
- # f'insert into {tb_name} values ("2021-01-05 12:00:00", 2, 2, 1, 3, 1.1, 1.1, "binary", "nchar", true, 2)',
- # f'insert into {tb_name} values ("2021-01-07 12:00:00", 1, 3, 1, 2, 1.1, 1.1, "binary", "nchar", true, 3)',
- # f'insert into {tb_name} values ("2021-01-09 12:00:00", 1, 2, 4, 3, 1.1, 1.1, "binary", "nchar", true, 4)',
- # f'insert into {tb_name} values ("2021-01-11 12:00:00", 1, 2, 5, 5, 1.1, 1.1, "binary", "nchar", true, 5)',
- # f'insert into {tb_name} values ("2021-01-13 12:00:00", 1, 2, 1, 3, 6.6, 1.1, "binary", "nchar", true, 6)',
- # f'insert into {tb_name} values ("2021-01-15 12:00:00", 1, 2, 1, 3, 1.1, 7.7, "binary", "nchar", true, 7)',
- # f'insert into {tb_name} values ("2021-01-17 12:00:00", 1, 2, 1, 3, 1.1, 1.1, "binary8", "nchar", true, 8)',
- # f'insert into {tb_name} values ("2021-01-19 12:00:00", 1, 2, 1, 3, 1.1, 1.1, "binary", "nchar9", true, 9)',
- # f'insert into {tb_name} values ("2021-01-21 12:00:00", 1, 2, 1, 3, 1.1, 1.1, "binary", "nchar", false, 10)',
- # f'insert into {tb_name} values ("2021-01-23 12:00:00", 1, 3, 1, 3, 1.1, 1.1, Null, Null, false, 11)'
- # ]
insert_sql_list = [f'insert into {tb_name} values ("2021-01-01 12:00:00", 1, 1, 1, 3, 1.1, 1.1, "binary", "nchar", true, 1, 2, 3, 4)',
f'insert into {tb_name} values ("2021-01-05 12:00:00", 2, 2, 1, 3, 1.1, 1.1, "binary", "nchar", true, 2, 3, 4, 5)',
f'insert into {tb_name} values ("2021-01-07 12:00:00", 1, 3, 1, 2, 1.1, 1.1, "binary", "nchar", true, 3, 4, 5, 6)',
@@ -54,7 +42,6 @@ class TDTestCase:
tb_name = tdCom.getLongName(8, "letters")
tdSql.execute(
f"CREATE TABLE {tb_name} (ts timestamp, c1 tinyint, c2 smallint, c3 int, c4 bigint, c5 float, c6 double, c7 binary(100), c8 nchar(200), c9 bool, c10 tinyint unsigned, c11 smallint unsigned, c12 int unsigned, c13 bigint unsigned)")
- # f"CREATE TABLE {tb_name} (ts timestamp, c1 tinyint, c2 smallint, c3 int, c4 bigint, c5 float, c6 double, c7 binary(100), c8 nchar(200), c9 bool, c10 int)")
self.insertData(tb_name)
return tb_name
@@ -95,6 +82,31 @@ class TDTestCase:
def queryTsCol(self, tb_name, check_elm=None):
select_elm = "*" if check_elm is None else check_elm
+ # ts in
+ query_sql = f'select {select_elm} from {tb_name} where ts in ("2021-01-11 12:00:00", "2021-01-13 12:00:00")'
+ tdSql.query(query_sql)
+ tdSql.checkRows(2)
+ tdSql.checkEqual(self.queryLastC10(query_sql), 6) if select_elm == "*" else False
+ # ts not in
+ query_sql = f'select {select_elm} from {tb_name} where ts not in ("2021-01-11 12:00:00", "2021-01-13 12:00:00")'
+ tdSql.query(query_sql)
+ tdSql.checkRows(9)
+ tdSql.checkEqual(self.queryLastC10(query_sql), 11) if select_elm == "*" else False
+ # ts not null
+ query_sql = f'select {select_elm} from {tb_name} where ts is not Null'
+ tdSql.query(query_sql)
+ tdSql.checkRows(11)
+ tdSql.checkEqual(self.queryLastC10(query_sql), 11) if select_elm == "*" else False
+ # ts null
+ query_sql = f'select {select_elm} from {tb_name} where ts is Null'
+ tdSql.query(query_sql)
+ tdSql.checkRows(0)
+ # not support like not like match nmatch
+ tdSql.error(f'select {select_elm} from {tb_name} where ts like ("2021-01-11 12:00:00%")')
+ tdSql.error(f'select {select_elm} from {tb_name} where ts not like ("2021-01-11 12:00:0_")')
+ tdSql.error(f'select {select_elm} from {tb_name} where ts match "2021-01-11 12:00:00%"')
+ tdSql.error(f'select {select_elm} from {tb_name} where ts nmatch "2021-01-11 12:00:00%"')
+
# ts and ts
query_sql = f'select {select_elm} from {tb_name} where ts > "2021-01-11 12:00:00" or ts < "2021-01-13 12:00:00"'
tdSql.query(query_sql)
@@ -1422,9 +1434,9 @@ class TDTestCase:
tdSql.query(query_sql)
tdSql.checkRows(11)
tdSql.checkEqual(self.queryLastC10(query_sql), 11) if select_elm == "*" else False
- query_sql = f'select c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13 from {tb_name} where c9 > "binary" and c9 >= "binary8" or c9 < "binary9" and c9 <= "binary" and c9 != 2 and c9 <> 2 and c9 = 4 or c9 is not null and c9 between 2 and 4 and c9 not between 1 and 2 and c9 in (2,4) and c9 not in (1,2) or c9 match "binary[28]" or c9 nmatch "binary"'
+ query_sql = f'select c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13 from {tb_name} where c9 > "binary" and c9 >= "binary8" or c9 < "binary9" and c9 <= "binary" and c9 != 2 and c9 <> 2 and c9 = 4 or c9 is not null and c9 between 2 and 4 and c9 not between 1 and 2 and c9 in (2,4) and c9 not in (1,2)'
tdSql.query(query_sql)
- tdSql.checkRows(11)
+ tdSql.checkRows(9)
def queryFullColType(self, tb_name, check_elm=None):
select_elm = "*" if check_elm is None else check_elm
diff --git a/tests/system-test/7-tmq/basic5.py b/tests/system-test/7-tmq/basic5.py
index 65840349ba..c2fe25efc4 100644
--- a/tests/system-test/7-tmq/basic5.py
+++ b/tests/system-test/7-tmq/basic5.py
@@ -52,7 +52,7 @@ class TDTestCase:
def create_tables(self,tsql, dbName,vgroups,stbName,ctbNum,rowsPerTbl):
tsql.execute("create database if not exists %s vgroups %d"%(dbName, vgroups))
tsql.execute("use %s" %dbName)
- tsql.execute("create table %s (ts timestamp, c1 bigint, c2 binary(16)) tags(t1 int)"%stbName)
+ tsql.execute("create table if not exists %s (ts timestamp, c1 bigint, c2 binary(16)) tags(t1 int)"%stbName)
pre_create = "create table"
sql = pre_create
#tdLog.debug("doing create one stable %s and %d child table in %s ..." %(stbname, count ,dbname))
@@ -345,11 +345,11 @@ class TDTestCase:
after starting consumer, create ctables ")
# create and start thread
parameterDict = {'cfg': '', \
- 'dbName': 'db2', \
+ 'dbName': 'db3', \
'vgroups': 1, \
'stbName': 'stb', \
'ctbNum': 10, \
- 'rowsPerTbl': 10000, \
+ 'rowsPerTbl': 30000, \
'batchNum': 100, \
'startTs': 1640966400000} # 2022-01-01 00:00:00.000
parameterDict['cfg'] = cfgPath
@@ -374,22 +374,33 @@ class TDTestCase:
break
else:
time.sleep(1)
-
+
+ tdLog.info("create stable2 for the seconde topic")
+ parameterDict2 = {'cfg': '', \
+ 'dbName': 'db3', \
+ 'vgroups': 1, \
+ 'stbName': 'stb2', \
+ 'ctbNum': 10, \
+ 'rowsPerTbl': 30000, \
+ 'batchNum': 100, \
+ 'startTs': 1640966400000} # 2022-01-01 00:00:00.000
+ parameterDict2['cfg'] = cfgPath
+ tdSql.execute("create stable if not exists %s.%s (ts timestamp, c1 bigint, c2 binary(16)) tags(t1 int)"%(parameterDict2['dbName'], parameterDict2['stbName']))
+
tdLog.info("create topics from super table")
- topicFromStb = 'topic_stb_column2'
- topicFromCtb = 'topic_ctb_column2'
+ topicFromStb = 'topic_stb_column3'
+ topicFromStb2 = 'topic_stb_column32'
tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb, parameterDict['dbName'], parameterDict['stbName']))
- tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s_0" %(topicFromCtb, parameterDict['dbName'], parameterDict['stbName']))
+ tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb2, parameterDict2['dbName'], parameterDict2['stbName']))
- time.sleep(1)
tdSql.query("show topics")
topic1 = tdSql.getData(0 , 0)
topic2 = tdSql.getData(1 , 0)
tdLog.info("show topics: %s, %s"%(topic1, topic2))
- if topic1 != topicFromStb and topic1 != topicFromCtb:
+ if topic1 != topicFromStb and topic1 != topicFromStb2:
tdLog.exit("topic error1")
- if topic2 != topicFromStb and topic2 != topicFromCtb:
+ if topic2 != topicFromStb and topic2 != topicFromStb2:
tdLog.exit("topic error2")
tdLog.info("create consume info table and consume result table")
@@ -397,10 +408,9 @@ class TDTestCase:
tdSql.query("create table %s.consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int)"%cdbName)
tdSql.query("create table %s.consumeresult (ts timestamp, consumerid int, consummsgcnt bigint, consumrowcnt bigint, checkresult int)"%cdbName)
- rowsOfNewCtb = 1000
consumerId = 0
- expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] + rowsOfNewCtb
- topicList = topicFromStb
+ expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] + parameterDict2["rowsPerTbl"] * parameterDict2["ctbNum"]
+ topicList = topicFromStb + ',' + topicFromStb2
ifcheckdata = 0
keyList = 'group.id:cgrp1,\
enable.auto.commit:false,\
@@ -432,17 +442,13 @@ class TDTestCase:
tdLog.info(shellCmd)
os.system(shellCmd)
- # create new child table and insert data
- newCtbName = 'newctb'
- tdSql.query("create table %s.%s using %s.%s tags(9999)"%(parameterDict["dbName"], newCtbName, parameterDict["dbName"], parameterDict["stbName"]))
- startTs = parameterDict["startTs"]
- for j in range(rowsOfNewCtb):
- sql = "insert into %s.%s values (%d, %d, 'tmqrow_%d') "%(parameterDict["dbName"], newCtbName, startTs + j, j, j)
- tdSql.execute(sql)
- tdLog.debug("insert data into new child table ............ [OK]")
+ # start the second thread to create new child table and insert data
+ prepareEnvThread2 = threading.Thread(target=self.prepareEnv, kwargs=parameterDict2)
+ prepareEnvThread2.start()
# wait for data ready
prepareEnvThread.join()
+ prepareEnvThread2.join()
tdLog.info("insert process end, and start to check consume result")
while 1:
@@ -457,7 +463,7 @@ class TDTestCase:
tdSql.checkData(0 , 3, expectrowcnt)
tdSql.query("drop topic %s"%topicFromStb)
- tdSql.query("drop topic %s"%topicFromCtb)
+ tdSql.query("drop topic %s"%topicFromStb2)
tdLog.printNoPrefix("======== test case 3 end ...... ")
@@ -474,7 +480,7 @@ class TDTestCase:
self.tmqCase1(cfgPath, buildPath)
self.tmqCase2(cfgPath, buildPath)
- #self.tmqCase3(cfgPath, buildPath)
+ self.tmqCase3(cfgPath, buildPath)
def stop(self):
tdSql.close()
diff --git a/tests/system-test/fulltest.sh b/tests/system-test/fulltest.sh
index c80206abbc..585acb733a 100755
--- a/tests/system-test/fulltest.sh
+++ b/tests/system-test/fulltest.sh
@@ -7,6 +7,7 @@ python3 ./test.py -f 0-others/taosShellError.py
python3 ./test.py -f 0-others/taosShellNetChk.py
python3 ./test.py -f 0-others/telemetry.py
python3 ./test.py -f 0-others/taosdMonitor.py
+python3 ./test.py -f 0-others/udfTest.py
python3 ./test.py -f 0-others/user_control.py
@@ -52,6 +53,6 @@ python3 ./test.py -f 2-query/tan.py
python3 ./test.py -f 2-query/arcsin.py
python3 ./test.py -f 2-query/arccos.py
python3 ./test.py -f 2-query/arctan.py
-# python3 ./test.py -f 2-query/query_cols_tags_and_or.py
+python3 ./test.py -f 2-query/query_cols_tags_and_or.py
python3 ./test.py -f 7-tmq/basic5.py
diff --git a/tests/test/c/tmqSim.c b/tests/test/c/tmqSim.c
index 1228d6174c..b3dba695a7 100644
--- a/tests/test/c/tmqSim.c
+++ b/tests/test/c/tmqSim.c
@@ -37,9 +37,10 @@ typedef struct {
TdThread thread;
int32_t consumerId;
- int32_t autoCommitIntervalMs; // 1000 ms
- char autoCommit[8]; // true, false
- char autoOffsetRest[16]; // none, earliest, latest
+ int32_t ifManualCommit;
+ //int32_t autoCommitIntervalMs; // 1000 ms
+ //char autoCommit[8]; // true, false
+ //char autoOffsetRest[16]; // none, earliest, latest
int32_t ifCheckData;
int64_t expectMsgCnt;
@@ -136,9 +137,9 @@ void saveConfigToLogFile() {
for (int32_t i = 0; i < g_stConfInfo.numOfThread; i++) {
taosFprintfFile(g_fp, "# consumer %d info:\n", g_stConfInfo.stThreads[i].consumerId);
- taosFprintfFile(g_fp, " auto commit: %s\n", g_stConfInfo.stThreads[i].autoCommit);
- taosFprintfFile(g_fp, " auto commit interval ms: %d\n", g_stConfInfo.stThreads[i].autoCommitIntervalMs);
- taosFprintfFile(g_fp, " auto offset rest: %s\n", g_stConfInfo.stThreads[i].autoOffsetRest);
+ //taosFprintfFile(g_fp, " auto commit: %s\n", g_stConfInfo.stThreads[i].autoCommit);
+ //taosFprintfFile(g_fp, " auto commit interval ms: %d\n", g_stConfInfo.stThreads[i].autoCommitIntervalMs);
+ //taosFprintfFile(g_fp, " auto offset rest: %s\n", g_stConfInfo.stThreads[i].autoOffsetRest);
taosFprintfFile(g_fp, " Topics: ");
for (int j = 0; j < g_stConfInfo.stThreads[i].numOfTopic; j++) {
taosFprintfFile(g_fp, "%s, ", g_stConfInfo.stThreads[i].topics[j]);
@@ -232,13 +233,18 @@ static int32_t msg_process(TAOS_RES* msg, int64_t msgIndex, int32_t threadLable)
while (1) {
TAOS_ROW row = taos_fetch_row(msg);
- if (row == NULL) break;
- if (0 != g_stConfInfo.showRowFlag) {
- TAOS_FIELD* fields = taos_fetch_fields(msg);
- int32_t numOfFields = taos_field_count(msg);
- taos_print_row(buf, row, fields, numOfFields);
+
+ if (row == NULL) break;
+
+ TAOS_FIELD* fields = taos_fetch_fields(msg);
+ int32_t numOfFields = taos_field_count(msg);
+
+ taos_print_row(buf, row, fields, numOfFields);
+
+ if (0 != g_stConfInfo.showRowFlag) {
taosFprintfFile(g_fp, "rows[%d]: %s\n", totalRows, buf);
}
+
totalRows++;
}
@@ -316,6 +322,8 @@ int32_t saveConsumeResult(SThreadInfo* pInfo) {
sprintf(sqlStr, "insert into %s.consumeresult values (now, %d, %" PRId64 ", %" PRId64 ", %d)", g_stConfInfo.cdbName,
pInfo->consumerId, pInfo->consumeMsgCnt, pInfo->consumeRowCnt, pInfo->checkresult);
+ taosFprintfFile(g_fp, "== save result sql: %s \n", sqlStr);
+
TAOS_RES* pRes = taos_query(pConn, sqlStr);
if (taos_errno(pRes) != 0) {
pError("error in save consumeinfo, reason:%s\n", taos_errstr(pRes));
@@ -384,8 +392,12 @@ void* consumeThreadFunc(void* param) {
loop_consume(pInfo);
- tmq_commit(pInfo->tmq, NULL, 0);
-
+ if (pInfo->ifManualCommit) {
+ taosFprintfFile(g_fp, "tmq_commit() manual commit when consume end.\n");
+ pPrint("tmq_commit() manual commit when consume end.\n");
+ tmq_commit(pInfo->tmq, NULL, 0);
+ }
+
err = tmq_unsubscribe(pInfo->tmq);
if (err) {
pError("tmq_unsubscribe() fail, reason: %s\n", tmq_err2str(err));
@@ -470,9 +482,9 @@ int32_t getConsumeInfo() {
int32_t* lengths = taos_fetch_lengths(pRes);
// set default value
- g_stConfInfo.stThreads[numOfThread].autoCommitIntervalMs = 5000;
- memcpy(g_stConfInfo.stThreads[numOfThread].autoCommit, "true", strlen("true"));
- memcpy(g_stConfInfo.stThreads[numOfThread].autoOffsetRest, "earlieast", strlen("earlieast"));
+ //g_stConfInfo.stThreads[numOfThread].autoCommitIntervalMs = 5000;
+ //memcpy(g_stConfInfo.stThreads[numOfThread].autoCommit, "true", strlen("true"));
+ //memcpy(g_stConfInfo.stThreads[numOfThread].autoOffsetRest, "earlieast", strlen("earlieast"));
for (int i = 0; i < num_fields; ++i) {
if (row[i] == NULL || 0 == i) {
@@ -489,12 +501,8 @@ int32_t getConsumeInfo() {
g_stConfInfo.stThreads[numOfThread].expectMsgCnt = *((int64_t*)row[i]);
} else if ((5 == i) && (fields[i].type == TSDB_DATA_TYPE_INT)) {
g_stConfInfo.stThreads[numOfThread].ifCheckData = *((int32_t*)row[i]);
- } else if ((6 == i) && (fields[i].type == TSDB_DATA_TYPE_BINARY)) {
- memcpy(g_stConfInfo.stThreads[numOfThread].autoCommit, row[i], lengths[i]);
- } else if ((7 == i) && (fields[i].type == TSDB_DATA_TYPE_INT)) {
- g_stConfInfo.stThreads[numOfThread].autoCommitIntervalMs = *((int32_t*)row[i]);
- } else if ((8 == i) && (fields[i].type == TSDB_DATA_TYPE_BINARY)) {
- memcpy(g_stConfInfo.stThreads[numOfThread].autoOffsetRest, row[i], lengths[i]);
+ } else if ((6 == i) && (fields[i].type == TSDB_DATA_TYPE_INT)) {
+ g_stConfInfo.stThreads[numOfThread].ifManualCommit = *((int32_t*)row[i]);
}
}
numOfThread++;
diff --git a/tools/shell/src/shellEngine.c b/tools/shell/src/shellEngine.c
index 39b97004ff..4825aae699 100644
--- a/tools/shell/src/shellEngine.c
+++ b/tools/shell/src/shellEngine.c
@@ -604,6 +604,7 @@ int32_t shellCalcColWidth(TAOS_FIELD *field, int32_t precision) {
case TSDB_DATA_TYPE_DOUBLE:
return TMAX(25, width);
+ case TSDB_DATA_TYPE_JSON:
case TSDB_DATA_TYPE_BINARY:
if (field->bytes > shell.args.displayWidth) {
return TMAX(shell.args.displayWidth, width);