diff --git a/include/common/tcommon.h b/include/common/tcommon.h index 3d15e8b087..d8264ac5b5 100644 --- a/include/common/tcommon.h +++ b/include/common/tcommon.h @@ -108,6 +108,7 @@ typedef struct SDataBlockInfo { // TODO: optimize and remove following int32_t childId; // used for stream, do not serialize EStreamType type; // used for stream, do not serialize + STimeWindow calWin; // used for stream, do not serialize } SDataBlockInfo; typedef struct SSDataBlock { diff --git a/include/common/tdataformat.h b/include/common/tdataformat.h index eaa8ac5cc4..1011b90ce8 100644 --- a/include/common/tdataformat.h +++ b/include/common/tdataformat.h @@ -64,18 +64,22 @@ int32_t tPutValue(uint8_t *p, SValue *pValue, int8_t type); int32_t tGetValue(uint8_t *p, SValue *pValue, int8_t type); int tValueCmprFn(const SValue *pValue1, const SValue *pValue2, int8_t type); -// STSRow2 +// SColVal #define COL_VAL_NONE(CID, TYPE) ((SColVal){.cid = (CID), .type = (TYPE), .isNone = 1}) #define COL_VAL_NULL(CID, TYPE) ((SColVal){.cid = (CID), .type = (TYPE), .isNull = 1}) #define COL_VAL_VALUE(CID, TYPE, V) ((SColVal){.cid = (CID), .type = (TYPE), .value = (V)}) +// STSRow2 +#define TSROW_LEN(PROW, V) tGetI32v((uint8_t *)(PROW)->data, (V) ? &(V) : NULL) +#define TSROW_SVER(PROW, V) tGetI32v((PROW)->data + TSROW_LEN(PROW, NULL), (V) ? &(V) : NULL) + int32_t tTSRowNew(STSRowBuilder *pBuilder, SArray *pArray, STSchema *pTSchema, STSRow2 **ppRow); int32_t tTSRowClone(const STSRow2 *pRow, STSRow2 **ppRow); void tTSRowFree(STSRow2 *pRow); void tTSRowGet(STSRow2 *pRow, STSchema *pTSchema, int32_t iCol, SColVal *pColVal); int32_t tTSRowToArray(STSRow2 *pRow, STSchema *pTSchema, SArray **ppArray); int32_t tPutTSRow(uint8_t *p, STSRow2 *pRow); -int32_t tGetTSRow(uint8_t *p, STSRow2 *pRow); +int32_t tGetTSRow(uint8_t *p, STSRow2 **ppRow); // STSRowBuilder #define tsRowBuilderInit() ((STSRowBuilder){0}) @@ -97,7 +101,7 @@ int32_t tEncodeTag(SEncoder *pEncoder, const STag *pTag); int32_t tDecodeTag(SDecoder *pDecoder, STag **ppTag); int32_t tTagToValArray(const STag *pTag, SArray **ppArray); void debugPrintSTag(STag *pTag, const char *tag, int32_t ln); // TODO: remove -int32_t parseJsontoTagData(const char* json, SArray* pTagVals, STag** ppTag, void* pMsgBuf); +int32_t parseJsontoTagData(const char *json, SArray *pTagVals, STag **ppTag, void *pMsgBuf); // STRUCT ================= struct STColumn { @@ -123,16 +127,16 @@ struct STSchema { #define TSROW_KV_SMALL ((uint8_t)0x10U) #define TSROW_KV_MID ((uint8_t)0x20U) #define TSROW_KV_BIG ((uint8_t)0x40U) +#pragma pack(push, 1) struct STSRow2 { - TSKEY ts; - uint8_t flags; - int32_t sver; - uint32_t nData; - uint8_t *pData; + TSKEY ts; + uint8_t flags; + uint8_t data[]; }; +#pragma pack(pop) struct STSRowBuilder { - STSRow2 tsRow; + // STSRow2 tsRow; int32_t szBuf; uint8_t *pBuf; }; @@ -226,50 +230,6 @@ struct STag { memcpy(varDataVal(x), (str), (_size)); \ } while (0); -// ----------------- TSDB COLUMN DEFINITION - -#define colType(col) ((col)->type) -#define colFlags(col) ((col)->flags) -#define colColId(col) ((col)->colId) -#define colBytes(col) ((col)->bytes) -#define colOffset(col) ((col)->offset) - -#define colSetType(col, t) (colType(col) = (t)) -#define colSetFlags(col, f) (colFlags(col) = (f)) -#define colSetColId(col, id) (colColId(col) = (id)) -#define colSetBytes(col, b) (colBytes(col) = (b)) -#define colSetOffset(col, o) (colOffset(col) = (o)) - -// ----------------- TSDB SCHEMA DEFINITION - -#define schemaNCols(s) ((s)->numOfCols) -#define schemaVersion(s) ((s)->version) -#define schemaTLen(s) ((s)->tlen) -#define schemaFLen(s) ((s)->flen) -#define schemaVLen(s) ((s)->vlen) -#define schemaColAt(s, i) ((s)->columns + i) -#define tdFreeSchema(s) taosMemoryFreeClear((s)) - -STSchema *tdDupSchema(const STSchema *pSchema); -int32_t tdEncodeSchema(void **buf, STSchema *pSchema); -void *tdDecodeSchema(void *buf, STSchema **pRSchema); - -static FORCE_INLINE int32_t comparColId(const void *key1, const void *key2) { - if (*(int16_t *)key1 > ((STColumn *)key2)->colId) { - return 1; - } else if (*(int16_t *)key1 < ((STColumn *)key2)->colId) { - return -1; - } else { - return 0; - } -} - -static FORCE_INLINE STColumn *tdGetColOfID(STSchema *pSchema, int16_t colId) { - void *ptr = bsearch(&colId, (void *)pSchema->columns, schemaNCols(pSchema), sizeof(STColumn), comparColId); - if (ptr == NULL) return NULL; - return (STColumn *)ptr; -} - // ----------------- SCHEMA BUILDER DEFINITION typedef struct { int32_t tCols; @@ -299,141 +259,6 @@ void tdResetTSchemaBuilder(STSchemaBuilder *pBuilder, schema_ver_t version) int32_t tdAddColToSchema(STSchemaBuilder *pBuilder, int8_t type, int8_t flags, col_id_t colId, col_bytes_t bytes); STSchema *tdGetSchemaFromBuilder(STSchemaBuilder *pBuilder); -// ----------------- Semantic timestamp key definition -// typedef uint64_t TKEY; -#define TKEY TSKEY - -#define TKEY_INVALID UINT64_MAX -#define TKEY_NULL TKEY_INVALID -#define TKEY_NEGATIVE_FLAG (((TKEY)1) << 63) -#define TKEY_VALUE_FILTER (~(TKEY_NEGATIVE_FLAG)) - -#define TKEY_IS_NEGATIVE(tkey) (((tkey)&TKEY_NEGATIVE_FLAG) != 0) -#define TKEY_IS_DELETED(tkey) (false) - -#define tdGetTKEY(key) (key) -#define tdGetKey(tskey) (tskey) - -#define MIN_TS_KEY ((TSKEY)0x8000000000000001) -#define MAX_TS_KEY ((TSKEY)0x7fffffffffffffff) - -#define TD_TO_TKEY(key) tdGetTKEY(((key) < MIN_TS_KEY) ? MIN_TS_KEY : (((key) > MAX_TS_KEY) ? MAX_TS_KEY : key)) - -static FORCE_INLINE TKEY keyToTkey(TSKEY key) { - TSKEY lkey = key; - if (key > MAX_TS_KEY) { - lkey = MAX_TS_KEY; - } else if (key < MIN_TS_KEY) { - lkey = MIN_TS_KEY; - } - - return tdGetTKEY(lkey); -} - -static FORCE_INLINE int32_t tkeyComparFn(const void *tkey1, const void *tkey2) { - TSKEY key1 = tdGetKey(*(TKEY *)tkey1); - TSKEY key2 = tdGetKey(*(TKEY *)tkey2); - - if (key1 < key2) { - return -1; - } else if (key1 > key2) { - return 1; - } else { - return 0; - } -} - -// ----------------- Data column structure -// SDataCol arrangement: data => bitmap => dataOffset -typedef struct SDataCol { - int8_t type; // column type - uint8_t bitmap : 1; // 0: no bitmap if all rows are NORM, 1: has bitmap if has NULL/NORM rows - uint8_t reserve : 7; - int16_t colId; // column ID - int32_t bytes; // column data bytes defined - int32_t offset; // data offset in a SDataRow (including the header size) - int32_t spaceSize; // Total space size for this column - int32_t len; // column data length - VarDataOffsetT *dataOff; // For binary and nchar data, the offset in the data column - void *pData; // Actual data pointer - void *pBitmap; // Bitmap pointer - TSKEY ts; // only used in last NULL column -} SDataCol; - -#define isAllRowsNull(pCol) ((pCol)->len == 0) -#define isAllRowsNone(pCol) ((pCol)->len == 0) -static FORCE_INLINE void dataColReset(SDataCol *pDataCol) { pDataCol->len = 0; } - -int32_t tdAllocMemForCol(SDataCol *pCol, int32_t maxPoints); - -void dataColInit(SDataCol *pDataCol, STColumn *pCol, int32_t maxPoints); -int32_t dataColAppendVal(SDataCol *pCol, const void *value, int32_t numOfRows, int32_t maxPoints); -void *dataColSetOffset(SDataCol *pCol, int32_t nEle); - -bool isNEleNull(SDataCol *pCol, int32_t nEle); - -typedef struct { - col_id_t maxCols; // max number of columns - col_id_t numOfCols; // Total number of cols - int32_t maxPoints; // max number of points - int32_t numOfRows; - int32_t bitmapMode : 1; // default is 0(2 bits), otherwise 1(1 bit) - int32_t sversion : 31; // TODO: set sversion(not used yet) - SDataCol *cols; -} SDataCols; - -static FORCE_INLINE bool tdDataColsIsBitmapI(SDataCols *pCols) { return pCols->bitmapMode != TSDB_BITMODE_DEFAULT; } -static FORCE_INLINE void tdDataColsSetBitmapI(SDataCols *pCols) { pCols->bitmapMode = TSDB_BITMODE_ONE_BIT; } -static FORCE_INLINE bool tdIsBitmapModeI(int8_t bitmapMode) { return bitmapMode != TSDB_BITMODE_DEFAULT; } - -#define keyCol(pCols) (&((pCols)->cols[0])) // Key column -#define dataColsTKeyAt(pCols, idx) ((TKEY *)(keyCol(pCols)->pData))[(idx)] // the idx row of column-wised data -#define dataColsKeyAt(pCols, idx) tdGetKey(dataColsTKeyAt(pCols, idx)) -static FORCE_INLINE TKEY dataColsTKeyFirst(SDataCols *pCols) { - if (pCols->numOfRows) { - return dataColsTKeyAt(pCols, 0); - } else { - return TKEY_INVALID; - } -} - -static FORCE_INLINE TSKEY dataColsKeyAtRow(SDataCols *pCols, int32_t row) { - assert(row < pCols->numOfRows); - return dataColsKeyAt(pCols, row); -} - -static FORCE_INLINE TSKEY dataColsKeyFirst(SDataCols *pCols) { - if (pCols->numOfRows) { - return dataColsKeyAt(pCols, 0); - } else { - return TSDB_DATA_TIMESTAMP_NULL; - } -} - -static FORCE_INLINE TKEY dataColsTKeyLast(SDataCols *pCols) { - if (pCols->numOfRows) { - return dataColsTKeyAt(pCols, pCols->numOfRows - 1); - } else { - return TKEY_INVALID; - } -} - -static FORCE_INLINE TSKEY dataColsKeyLast(SDataCols *pCols) { - if (pCols->numOfRows) { - return dataColsKeyAt(pCols, pCols->numOfRows - 1); - } else { - return TSDB_DATA_TIMESTAMP_NULL; - } -} - -SDataCols *tdNewDataCols(int32_t maxCols, int32_t maxRows); -void tdResetDataCols(SDataCols *pCols); -int32_t tdInitDataCols(SDataCols *pCols, STSchema *pSchema); -SDataCols *tdDupDataCols(SDataCols *pCols, bool keepData); -SDataCols *tdFreeDataCols(SDataCols *pCols); -int32_t tdMergeDataCols(SDataCols *target, SDataCols *source, int32_t rowsToMerge, int32_t *pOffset, bool update, - TDRowVerT maxVer); - #endif #ifdef __cplusplus diff --git a/include/common/trow.h b/include/common/trow.h index 086a6ce6fb..807a4c0f0a 100644 --- a/include/common/trow.h +++ b/include/common/trow.h @@ -168,7 +168,7 @@ typedef struct { // N.B. If without STSchema, getExtendedRowSize() is used to get the rowMaxBytes and // (int32_t)ceil((double)nCols/TD_VTYPE_PARTS) should be added if TD_SUPPORT_BITMAP defined. -#define TD_ROW_MAX_BYTES_FROM_SCHEMA(s) (schemaTLen(s) + TD_ROW_HEAD_LEN) +#define TD_ROW_MAX_BYTES_FROM_SCHEMA(s) ((s)->tlen + TD_ROW_HEAD_LEN) #define TD_ROW_SET_INFO(r, i) (TD_ROW_INFO(r) = (i)) #define TD_ROW_SET_TYPE(r, t) (TD_ROW_TYPE(r) = (t)) @@ -223,9 +223,10 @@ int32_t tdSetBitmapValTypeN(void *pBitmap, int16_t nEle, TDR static FORCE_INLINE int32_t tdGetBitmapValType(const void *pBitmap, int16_t colIdx, TDRowValT *pValType, int8_t bitmapMode); bool tdIsBitmapBlkNorm(const void *pBitmap, int32_t numOfBits, int8_t bitmapMode); -int32_t tdAppendValToDataCol(SDataCol *pCol, TDRowValT valType, const void *val, int32_t numOfRows, int32_t maxPoints, - int8_t bitmapMode, bool isMerge); -int32_t tdAppendSTSRowToDataCol(STSRow *pRow, STSchema *pSchema, SDataCols *pCols, bool isMerge); +// int32_t tdAppendValToDataCol(SDataCol *pCol, TDRowValT valType, const void *val, int32_t numOfRows, int32_t +// maxPoints, +// int8_t bitmapMode, bool isMerge); +// int32_t tdAppendSTSRowToDataCol(STSRow *pRow, STSchema *pSchema, SDataCols *pCols, bool isMerge); int32_t tdGetBitmapValTypeII(const void *pBitmap, int16_t colIdx, TDRowValT *pValType); int32_t tdSetBitmapValTypeI(void *pBitmap, int16_t colIdx, TDRowValT valType); @@ -318,12 +319,9 @@ bool tdSTSRowGetVal(STSRowIter *pIter, col_id_t colId, col_type_t colType, SC bool tdGetTpRowDataOfCol(STSRowIter *pIter, col_type_t colType, int32_t offset, SCellVal *pVal); bool tdGetKvRowValOfColEx(STSRowIter *pIter, col_id_t colId, col_type_t colType, col_id_t *nIdx, SCellVal *pVal); bool tdSTSRowIterNext(STSRowIter *pIter, col_id_t colId, col_type_t colType, SCellVal *pVal); -STSRow *mergeTwoRows(void *buffer, STSRow *row1, STSRow *row2, STSchema *pSchema1, STSchema *pSchema2); -int32_t tdGetColDataOfRow(SCellVal *pVal, SDataCol *pCol, int32_t row, int8_t bitmapMode); bool tdSTpRowGetVal(STSRow *pRow, col_id_t colId, col_type_t colType, int32_t flen, uint32_t offset, col_id_t colIdx, SCellVal *pVal); bool tdSKvRowGetVal(STSRow *pRow, col_id_t colId, col_id_t colIdx, SCellVal *pVal); -int32_t dataColGetNEleLen(SDataCol *pDataCol, int32_t rows, int8_t bitmapMode); void tdSCellValPrint(SCellVal *pVal, int8_t colType); void tdSRowPrint(STSRow *row, STSchema *pSchema, const char *tag); diff --git a/include/libs/executor/executor.h b/include/libs/executor/executor.h index 783193db49..630e983f81 100644 --- a/include/libs/executor/executor.h +++ b/include/libs/executor/executor.h @@ -42,25 +42,28 @@ typedef struct SReadHandle { bool initTqReader; } SReadHandle; +// in queue mode, data streams are seperated by msg typedef enum { OPTR_EXEC_MODEL_BATCH = 0x1, OPTR_EXEC_MODEL_STREAM = 0x2, + OPTR_EXEC_MODEL_QUEUE = 0x3, } EOPTR_EXEC_MODEL; /** - * Create the exec task for streaming mode + * Create the exec task for stream mode * @param pMsg - * @param streamReadHandle + * @param SReadHandle * @return */ qTaskInfo_t qCreateStreamExecTaskInfo(void* msg, SReadHandle* readers); /** - * Switch the stream scan to snapshot mode - * @param tinfo + * Create the exec task for queue mode + * @param pMsg + * @param SReadHandle * @return */ -int32_t qStreamScanSnapshot(qTaskInfo_t tinfo); +qTaskInfo_t qCreateQueueExecTaskInfo(void* msg, SReadHandle* readers); /** * Set the input data block for the stream scan. @@ -111,7 +114,7 @@ int32_t qCreateExecTask(SReadHandle* readHandle, int32_t vgId, uint64_t taskId, * @return */ int32_t qGetQueryTableSchemaVersion(qTaskInfo_t tinfo, char* dbName, char* tableName, int32_t* sversion, - int32_t* tversion); + int32_t* tversion); /** * The main task execution function, including query on both table and multiple tables, diff --git a/include/libs/stream/tstream.h b/include/libs/stream/tstream.h index d6cb2c27b0..ac9784b91b 100644 --- a/include/libs/stream/tstream.h +++ b/include/libs/stream/tstream.h @@ -14,6 +14,7 @@ */ #include "os.h" +#include "query.h" #include "tdatablock.h" #include "tmsg.h" #include "tmsgcb.h" @@ -119,6 +120,7 @@ static FORCE_INLINE void* streamQueueCurItem(SStreamQueue* queue) { return queue static FORCE_INLINE void* streamQueueNextItem(SStreamQueue* queue) { int8_t dequeueFlag = atomic_exchange_8(&queue->status, STREAM_QUEUE__PROCESSING); if (dequeueFlag == STREAM_QUEUE__FAILED) { + ASSERT(0); ASSERT(queue->qItem != NULL); return streamQueueCurItem(queue); } else { @@ -305,6 +307,7 @@ static FORCE_INLINE int32_t streamTaskInput(SStreamTask* pTask, SStreamQueueItem atomic_store_8(&pTask->inputStatus, TASK_INPUT_STATUS__FAILED); return -1; } + qInfo("task %d %p submit enqueue %p %p %p", pTask->taskId, pTask, pItem, pSubmitClone, pSubmitClone->data); taosWriteQitem(pTask->inputQueue->queue, pSubmitClone); } else if (pItem->type == STREAM_INPUT__DATA_BLOCK || pItem->type == STREAM_INPUT__DATA_RETRIEVE) { taosWriteQitem(pTask->inputQueue->queue, pItem); diff --git a/include/util/tlog.h b/include/util/tlog.h index a8c9eeabde..d186c32841 100644 --- a/include/util/tlog.h +++ b/include/util/tlog.h @@ -94,7 +94,7 @@ void taosPrintLongString(const char *flags, ELogLevel level, int32_t dflag, cons #define pError(...) { taosPrintLog("APP ERROR ", DEBUG_ERROR, 255, __VA_ARGS__); } #define pPrint(...) { taosPrintLog("APP ", DEBUG_INFO, 255, __VA_ARGS__); } // clang-format on -#define BUF_PAGE_DEBUG +//#define BUF_PAGE_DEBUG #ifdef __cplusplus } #endif diff --git a/include/util/tqueue.h b/include/util/tqueue.h index 466c577c00..0f4f1db9ee 100644 --- a/include/util/tqueue.h +++ b/include/util/tqueue.h @@ -44,6 +44,8 @@ typedef struct STaosQset STaosQset; typedef struct STaosQall STaosQall; typedef struct { void *ahandle; + void *fp; + void *queue; int32_t workerId; int32_t threadNum; int64_t timestamp; @@ -65,6 +67,7 @@ void taosFreeQitem(void *pItem); void taosWriteQitem(STaosQueue *queue, void *pItem); int32_t taosReadQitem(STaosQueue *queue, void **ppItem); bool taosQueueEmpty(STaosQueue *queue); +void taosUpdateItemSize(STaosQueue *queue, int32_t items); int32_t taosQueueItemSize(STaosQueue *queue); int64_t taosQueueMemorySize(STaosQueue *queue); @@ -81,8 +84,8 @@ int32_t taosAddIntoQset(STaosQset *qset, STaosQueue *queue, void *ahandle); void taosRemoveFromQset(STaosQset *qset, STaosQueue *queue); int32_t taosGetQueueNumber(STaosQset *qset); -int32_t taosReadQitemFromQset(STaosQset *qset, void **ppItem, int64_t *ts, void **ahandle, FItem *itemFp); -int32_t taosReadAllQitemsFromQset(STaosQset *qset, STaosQall *qall, void **ahandle, FItems *itemsFp); +int32_t taosReadQitemFromQset(STaosQset *qset, void **ppItem, SQueueInfo *qinfo); +int32_t taosReadAllQitemsFromQset(STaosQset *qset, STaosQall *qall, SQueueInfo *qinfo); void taosResetQsetThread(STaosQset *qset, void *pItem); extern int64_t tsRpcQueueMemoryAllowed; diff --git a/source/client/inc/clientInt.h b/source/client/inc/clientInt.h index 641d235dd1..700a4d9daf 100644 --- a/source/client/inc/clientInt.h +++ b/source/client/inc/clientInt.h @@ -223,8 +223,8 @@ typedef struct SRequestObj { SArray* tableList; SQueryExecMetric metric; SRequestSendRecvBody body; - bool stableQuery; // todo refactor - bool validateOnly; // todo refactor + bool stableQuery; // todo refactor + bool validateOnly; // todo refactor bool killed; uint32_t prevCode; // previous error code: todo refactor, add update flag for catalog @@ -325,7 +325,8 @@ int32_t parseSql(SRequestObj* pRequest, bool topicQuery, SQuery** pQuery, SStmtC int32_t getPlan(SRequestObj* pRequest, SQuery* pQuery, SQueryPlan** pPlan, SArray* pNodeList); -int32_t buildRequest(uint64_t connId, const char* sql, int sqlLen, void* param, bool validateSql, SRequestObj** pRequest); +int32_t buildRequest(uint64_t connId, const char* sql, int sqlLen, void* param, bool validateSql, + SRequestObj** pRequest); void taos_close_internal(void* taos); @@ -359,9 +360,6 @@ int32_t removeMeta(STscObj* pTscObj, SArray* tbList); // todo move to clie int32_t handleAlterTbExecRes(void* res, struct SCatalog* pCatalog); // todo move to xxx bool qnodeRequired(SRequestObj* pRequest); -void initTscQhandle(); -void cleanupTscQhandle(); - #ifdef __cplusplus } #endif diff --git a/source/client/src/clientEnv.c b/source/client/src/clientEnv.c index 6805e4d501..207ac01a2c 100644 --- a/source/client/src/clientEnv.c +++ b/source/client/src/clientEnv.c @@ -35,22 +35,10 @@ SAppInfo appInfo; int32_t clientReqRefPool = -1; int32_t clientConnRefPool = -1; -void *tscQhandle = NULL; - static TdThreadOnce tscinit = PTHREAD_ONCE_INIT; volatile int32_t tscInitRes = 0; -void initTscQhandle() { - // init handle - tscQhandle = taosInitScheduler(4096, 5, "tsc"); -} - -void cleanupTscQhandle() { - // destroy handle - taosCleanUpScheduler(tscQhandle); -} - -static int32_t registerRequest(SRequestObj *pRequest, STscObj* pTscObj) { +static int32_t registerRequest(SRequestObj *pRequest, STscObj *pTscObj) { // connection has been released already, abort creating request. pRequest->self = taosAddRef(clientReqRefPool, pRequest); @@ -72,7 +60,7 @@ static int32_t registerRequest(SRequestObj *pRequest, STscObj* pTscObj) { static void deregisterRequest(SRequestObj *pRequest) { assert(pRequest != NULL); - STscObj * pTscObj = pRequest->pTscObj; + STscObj *pTscObj = pRequest->pTscObj; SAppClusterSummary *pActivity = &pTscObj->pAppInfo->summary; int32_t currentInst = atomic_sub_fetch_64((int64_t *)&pActivity->currentRequests, 1); @@ -97,7 +85,8 @@ void closeTransporter(SAppInstInfo *pAppInfo) { static bool clientRpcRfp(int32_t code, tmsg_t msgType) { if (NEED_REDIRECT_ERROR(code)) { - if (msgType == TDMT_SCH_QUERY || msgType == TDMT_SCH_MERGE_QUERY || msgType == TDMT_SCH_FETCH || msgType == TDMT_SCH_MERGE_FETCH) { + if (msgType == TDMT_SCH_QUERY || msgType == TDMT_SCH_MERGE_QUERY || msgType == TDMT_SCH_FETCH || + msgType == TDMT_SCH_MERGE_FETCH) { return false; } return true; @@ -251,7 +240,7 @@ void *createRequest(uint64_t connId, int32_t type) { return NULL; } - STscObj* pTscObj = acquireTscObj(connId); + STscObj *pTscObj = acquireTscObj(connId); if (pTscObj == NULL) { terrno = TSDB_CODE_TSC_DISCONNECTED; return NULL; @@ -348,7 +337,6 @@ void taos_init_imp(void) { // In the APIs of other program language, taos_cleanup is not available yet. // So, to make sure taos_cleanup will be invoked to clean up the allocated resource to suppress the valgrind warning. atexit(taos_cleanup); - initTscQhandle(); errno = TSDB_CODE_SUCCESS; taosSeedRand(taosGetTimestampSec()); @@ -407,7 +395,7 @@ int taos_options_imp(TSDB_OPTION option, const char *str) { return 0; } - SConfig * pCfg = taosGetCfg(); + SConfig *pCfg = taosGetCfg(); SConfigItem *pItem = NULL; switch (option) { diff --git a/source/client/src/clientImpl.c b/source/client/src/clientImpl.c index 3605f49a5c..32c11983c2 100644 --- a/source/client/src/clientImpl.c +++ b/source/client/src/clientImpl.c @@ -1274,8 +1274,8 @@ typedef struct SchedArg { SEpSet* pEpset; } SchedArg; -void doProcessMsgFromServer(SSchedMsg* schedMsg) { - SchedArg* arg = (SchedArg*)schedMsg->ahandle; +int32_t doProcessMsgFromServer(void* param) { + SchedArg* arg = (SchedArg*)param; SRpcMsg* pMsg = &arg->msg; SEpSet* pEpSet = arg->pEpset; @@ -1328,11 +1328,10 @@ void doProcessMsgFromServer(SSchedMsg* schedMsg) { rpcFreeCont(pMsg->pCont); destroySendMsgInfo(pSendInfo); taosMemoryFree(arg); + return TSDB_CODE_SUCCESS; } void processMsgFromServer(void* parent, SRpcMsg* pMsg, SEpSet* pEpSet) { - SSchedMsg schedMsg = {0}; - SEpSet* tEpSet = NULL; if (pEpSet != NULL) { tEpSet = taosMemoryCalloc(1, sizeof(SEpSet)); @@ -1343,9 +1342,7 @@ void processMsgFromServer(void* parent, SRpcMsg* pMsg, SEpSet* pEpSet) { arg->msg = *pMsg; arg->pEpset = tEpSet; - schedMsg.fp = doProcessMsgFromServer; - schedMsg.ahandle = arg; - taosScheduleTask(tscQhandle, &schedMsg); + taosAsyncExec(doProcessMsgFromServer, arg, NULL); } TAOS* taos_connect_auth(const char* ip, const char* user, const char* auth, const char* db, uint16_t port) { diff --git a/source/client/src/clientMain.c b/source/client/src/clientMain.c index d3fdfa7a39..73def5b9b1 100644 --- a/source/client/src/clientMain.c +++ b/source/client/src/clientMain.c @@ -72,7 +72,6 @@ void taos_cleanup(void) { catalogDestroy(); schedulerDestroy(); - cleanupTscQhandle(); rpcCleanup(); tscInfo("all local resources released"); taosCleanupCfg(); @@ -242,7 +241,7 @@ TAOS_ROW taos_fetch_row(TAOS_RES *res) { #endif } else if (TD_RES_TMQ(res)) { - SMqRspObj * msg = ((SMqRspObj *)res); + SMqRspObj *msg = ((SMqRspObj *)res); SReqResultInfo *pResultInfo; if (msg->resIter == -1) { pResultInfo = tmqGetNextResInfo(res, true); @@ -418,7 +417,7 @@ int taos_affected_rows(TAOS_RES *res) { return 0; } - SRequestObj * pRequest = (SRequestObj *)res; + SRequestObj *pRequest = (SRequestObj *)res; SReqResultInfo *pResInfo = &pRequest->body.resInfo; return pResInfo->numOfRows; } @@ -601,7 +600,7 @@ int *taos_get_column_data_offset(TAOS_RES *res, int columnIndex) { } SReqResultInfo *pResInfo = tscGetCurResInfo(res); - TAOS_FIELD * pField = &pResInfo->userFields[columnIndex]; + TAOS_FIELD *pField = &pResInfo->userFields[columnIndex]; if (!IS_VAR_DATA_TYPE(pField->type)) { return 0; } @@ -645,8 +644,8 @@ const char *taos_get_server_info(TAOS *taos) { typedef struct SqlParseWrapper { SParseContext *pCtx; SCatalogReq catalogReq; - SRequestObj * pRequest; - SQuery * pQuery; + SRequestObj *pRequest; + SQuery *pQuery; } SqlParseWrapper; static void destorySqlParseWrapper(SqlParseWrapper *pWrapper) { @@ -665,8 +664,8 @@ static void destorySqlParseWrapper(SqlParseWrapper *pWrapper) { void retrieveMetaCallback(SMetaData *pResultMeta, void *param, int32_t code) { SqlParseWrapper *pWrapper = (SqlParseWrapper *)param; - SQuery * pQuery = pWrapper->pQuery; - SRequestObj * pRequest = pWrapper->pRequest; + SQuery *pQuery = pWrapper->pQuery; + SRequestObj *pRequest = pWrapper->pRequest; if (code == TSDB_CODE_SUCCESS) { code = qAnalyseSqlSemantic(pWrapper->pCtx, &pWrapper->catalogReq, pResultMeta, pQuery); @@ -684,7 +683,8 @@ void retrieveMetaCallback(SMetaData *pResultMeta, void *param, int32_t code) { destorySqlParseWrapper(pWrapper); - tscDebug("0x%"PRIx64" analysis semantics completed, start async query, reqId:0x%"PRIx64, pRequest->self, pRequest->requestId); + tscDebug("0x%" PRIx64 " analysis semantics completed, start async query, reqId:0x%" PRIx64, pRequest->self, + pRequest->requestId); launchAsyncQuery(pRequest, pQuery, pResultMeta); } else { destorySqlParseWrapper(pWrapper); @@ -705,7 +705,7 @@ void retrieveMetaCallback(SMetaData *pResultMeta, void *param, int32_t code) { } void taos_query_a(TAOS *taos, const char *sql, __taos_async_fn_t fp, void *param) { - int64_t connId = *(int64_t*)taos; + int64_t connId = *(int64_t *)taos; taosAsyncQueryImpl(connId, sql, fp, param, false); } @@ -739,7 +739,7 @@ int32_t createParseContext(const SRequestObj *pRequest, SParseContext **pCxt) { void doAsyncQuery(SRequestObj *pRequest, bool updateMetaForce) { SParseContext *pCxt = NULL; - STscObj * pTscObj = pRequest->pTscObj; + STscObj *pTscObj = pRequest->pTscObj; int32_t code = 0; if (pRequest->retry++ > REQUEST_TOTAL_EXEC_TIMES) { @@ -874,9 +874,9 @@ void taos_fetch_rows_a(TAOS_RES *res, __taos_async_fn_t fp, void *param) { SSchedulerReq req = { - .syncReq = false, - .fetchFp = fetchCallback, - .cbParam = pRequest, + .syncReq = false, + .fetchFp = fetchCallback, + .cbParam = pRequest, }; schedulerFetchRows(pRequest->body.queryJob, &req); @@ -886,7 +886,7 @@ void taos_fetch_raw_block_a(TAOS_RES *res, __taos_async_fn_t fp, void *param) { ASSERT(res != NULL && fp != NULL); ASSERT(TD_RES_QUERY(res)); - SRequestObj *pRequest = res; + SRequestObj *pRequest = res; SReqResultInfo *pResultInfo = &pRequest->body.resInfo; // set the current block is all consumed @@ -928,7 +928,7 @@ int taos_load_table_info(TAOS *taos, const char *tableNameList) { int64_t connId = *(int64_t *)taos; const int32_t MAX_TABLE_NAME_LENGTH = 12 * 1024 * 1024; // 12MB list int32_t code = 0; - SRequestObj * pRequest = NULL; + SRequestObj *pRequest = NULL; SCatalogReq catalogReq = {0}; if (NULL == tableNameList) { @@ -950,7 +950,7 @@ int taos_load_table_info(TAOS *taos, const char *tableNameList) { goto _return; } - STscObj* pTscObj = pRequest->pTscObj; + STscObj *pTscObj = pRequest->pTscObj; code = transferTableNameList(tableNameList, pTscObj->acctId, pTscObj->db, &catalogReq.pTableMeta); if (code) { goto _return; @@ -972,7 +972,7 @@ int taos_load_table_info(TAOS *taos, const char *tableNameList) { goto _return; } - SSyncQueryParam* pParam = pRequest->body.param; + SSyncQueryParam *pParam = pRequest->body.param; tsem_wait(&pParam->sem); _return: diff --git a/source/client/src/clientMsgHandler.c b/source/client/src/clientMsgHandler.c index dcccbb17c9..94bd5dd787 100644 --- a/source/client/src/clientMsgHandler.c +++ b/source/client/src/clientMsgHandler.c @@ -179,7 +179,6 @@ int32_t processUseDbRsp(void* param, SDataBuf* pMsg, int32_t code) { if (code != 0) { terrno = code; if (output.dbVgroup) taosHashCleanup(output.dbVgroup->vgHash); - taosMemoryFreeClear(output.dbVgroup); tscError("0x%" PRIx64 " failed to build use db output since %s", pRequest->requestId, terrstr()); } else if (output.dbVgroup && output.dbVgroup->vgHash) { @@ -189,12 +188,14 @@ int32_t processUseDbRsp(void* param, SDataBuf* pMsg, int32_t code) { if (code1 != TSDB_CODE_SUCCESS) { tscWarn("catalogGetHandle failed, clusterId:%" PRIx64 ", error:%s", pRequest->pTscObj->pAppInfo->clusterId, tstrerror(code1)); - taosMemoryFreeClear(output.dbVgroup); } else { catalogUpdateDBVgInfo(pCatalog, output.db, output.dbId, output.dbVgroup); + output.dbVgroup = NULL; } } + taosMemoryFreeClear(output.dbVgroup); + tFreeSUsedbRsp(&usedbRsp); char db[TSDB_DB_NAME_LEN] = {0}; diff --git a/source/client/src/tmq.c b/source/client/src/tmq.c index 110b839216..b0542e350f 100644 --- a/source/client/src/tmq.c +++ b/source/client/src/tmq.c @@ -1149,11 +1149,10 @@ int32_t tmqPollCb(void* param, SDataBuf* pMsg, int32_t code) { tDecoderInit(&decoder, POINTER_SHIFT(pMsg->pData, sizeof(SMqRspHead)), pMsg->len - sizeof(SMqRspHead)); tDecodeSMqDataRsp(&decoder, &pRspWrapper->dataRsp); memcpy(&pRspWrapper->dataRsp, pMsg->pData, sizeof(SMqRspHead)); - /*tDecodeSMqDataBlkRsp(POINTER_SHIFT(pMsg->pData, sizeof(SMqRspHead)), &pRspWrapper->dataRsp);*/ } else { ASSERT(rspType == TMQ_MSG_TYPE__POLL_META_RSP); - memcpy(&pRspWrapper->metaRsp, pMsg->pData, sizeof(SMqRspHead)); tDecodeSMqMetaRsp(POINTER_SHIFT(pMsg->pData, sizeof(SMqRspHead)), &pRspWrapper->metaRsp); + memcpy(&pRspWrapper->metaRsp, pMsg->pData, sizeof(SMqRspHead)); } taosMemoryFree(pMsg->pData); @@ -2427,15 +2426,15 @@ static void destroyCreateTbReqBatch(void* data) { taosArrayDestroy(pTbBatch->req.pArray); } -static int32_t taosCreateTable(TAOS *taos, void *meta, int32_t metaLen){ - SVCreateTbBatchReq req = {0}; - SDecoder coder = {0}; - int32_t code = TSDB_CODE_SUCCESS; - SRequestObj *pRequest = NULL; - SQuery *pQuery = NULL; - SHashObj *pVgroupHashmap = NULL; +static int32_t taosCreateTable(TAOS* taos, void* meta, int32_t metaLen) { + SVCreateTbBatchReq req = {0}; + SDecoder coder = {0}; + int32_t code = TSDB_CODE_SUCCESS; + SRequestObj* pRequest = NULL; + SQuery* pQuery = NULL; + SHashObj* pVgroupHashmap = NULL; - code = buildRequest(*(int64_t*) taos, "", 0, NULL, false, &pRequest); + code = buildRequest(*(int64_t*)taos, "", 0, NULL, false, &pRequest); if (code != TSDB_CODE_SUCCESS) { goto end; } @@ -2455,8 +2454,8 @@ static int32_t taosCreateTable(TAOS *taos, void *meta, int32_t metaLen){ STscObj* pTscObj = pRequest->pTscObj; - SVCreateTbReq *pCreateReq = NULL; - SCatalog* pCatalog = NULL; + SVCreateTbReq* pCreateReq = NULL; + SCatalog* pCatalog = NULL; code = catalogGetHandle(pTscObj->pAppInfo->clusterId, &pCatalog); if (code != TSDB_CODE_SUCCESS) { goto end; @@ -2540,13 +2539,13 @@ static void destroyDropTbReqBatch(void* data) { taosArrayDestroy(pTbBatch->req.pArray); } -static int32_t taosDropTable(TAOS *taos, void *meta, int32_t metaLen){ - SVDropTbBatchReq req = {0}; - SDecoder coder = {0}; - int32_t code = TSDB_CODE_SUCCESS; - SRequestObj *pRequest = NULL; - SQuery *pQuery = NULL; - SHashObj *pVgroupHashmap = NULL; +static int32_t taosDropTable(TAOS* taos, void* meta, int32_t metaLen) { + SVDropTbBatchReq req = {0}; + SDecoder coder = {0}; + int32_t code = TSDB_CODE_SUCCESS; + SRequestObj* pRequest = NULL; + SQuery* pQuery = NULL; + SHashObj* pVgroupHashmap = NULL; code = buildRequest(*(int64_t*)taos, "", 0, NULL, false, &pRequest); if (code != TSDB_CODE_SUCCESS) { @@ -2568,8 +2567,8 @@ static int32_t taosDropTable(TAOS *taos, void *meta, int32_t metaLen){ STscObj* pTscObj = pRequest->pTscObj; - SVDropTbReq *pDropReq = NULL; - SCatalog *pCatalog = NULL; + SVDropTbReq* pDropReq = NULL; + SCatalog* pCatalog = NULL; code = catalogGetHandle(pTscObj->pAppInfo->clusterId, &pCatalog); if (code != TSDB_CODE_SUCCESS) { goto end; @@ -2640,17 +2639,16 @@ end: return code; } -static int32_t taosAlterTable(TAOS *taos, void *meta, int32_t metaLen){ - SVAlterTbReq req = {0}; - SDecoder coder = {0}; - int32_t code = TSDB_CODE_SUCCESS; - SRequestObj *pRequest = NULL; - SQuery *pQuery = NULL; - SArray *pArray = NULL; - SVgDataBlocks *pVgData = NULL; +static int32_t taosAlterTable(TAOS* taos, void* meta, int32_t metaLen) { + SVAlterTbReq req = {0}; + SDecoder coder = {0}; + int32_t code = TSDB_CODE_SUCCESS; + SRequestObj* pRequest = NULL; + SQuery* pQuery = NULL; + SArray* pArray = NULL; + SVgDataBlocks* pVgData = NULL; - - code = buildRequest(*(int64_t*) taos, "", 0, NULL, false, &pRequest); + code = buildRequest(*(int64_t*)taos, "", 0, NULL, false, &pRequest); if (code != TSDB_CODE_SUCCESS) { goto end; } diff --git a/source/common/src/tdatablock.c b/source/common/src/tdatablock.c index 38f46b9b11..9b65e08d29 100644 --- a/source/common/src/tdatablock.c +++ b/source/common/src/tdatablock.c @@ -320,7 +320,9 @@ int32_t colDataAssign(SColumnInfoData* pColumnInfoData, const SColumnInfoData* p memcpy(pColumnInfoData->pData, pSource->pData, pSource->varmeta.length); } else { memcpy(pColumnInfoData->nullbitmap, pSource->nullbitmap, BitmapLen(numOfRows)); - memcpy(pColumnInfoData->pData, pSource->pData, pSource->info.bytes * numOfRows); + if (pSource->pData) { + memcpy(pColumnInfoData->pData, pSource->pData, pSource->info.bytes * numOfRows); + } } pColumnInfoData->hasNull = pSource->hasNull; @@ -1736,56 +1738,57 @@ char* dumpBlockData(SSDataBlock* pDataBlock, const char* flag, char** pDataBuf) int32_t colNum = taosArrayGetSize(pDataBlock->pDataBlock); int32_t rows = pDataBlock->info.rows; int32_t len = 0; - len += snprintf(dumpBuf + len, size - len, "\n%s |block type %d |child id %d|group id:%" PRIu64 "|\n", flag, - (int32_t)pDataBlock->info.type, pDataBlock->info.childId, pDataBlock->info.groupId); + len += snprintf(dumpBuf + len, size - len, "\n%s |block type %d |child id %d|group id:%" PRIu64 "| uid:%ld\n", flag, + (int32_t)pDataBlock->info.type, pDataBlock->info.childId, pDataBlock->info.groupId, + pDataBlock->info.uid); if (len >= size - 1) return dumpBuf; for (int32_t j = 0; j < rows; j++) { len += snprintf(dumpBuf + len, size - len, "%s |", flag); - if (len >= size -1) return dumpBuf; + if (len >= size - 1) return dumpBuf; for (int32_t k = 0; k < colNum; k++) { SColumnInfoData* pColInfoData = taosArrayGet(pDataBlock->pDataBlock, k); void* var = POINTER_SHIFT(pColInfoData->pData, j * pColInfoData->info.bytes); if (colDataIsNull(pColInfoData, rows, j, NULL) || !pColInfoData->pData) { len += snprintf(dumpBuf + len, size - len, " %15s |", "NULL"); - if (len >= size -1) return dumpBuf; + if (len >= size - 1) return dumpBuf; continue; } switch (pColInfoData->info.type) { case TSDB_DATA_TYPE_TIMESTAMP: formatTimestamp(pBuf, *(uint64_t*)var, TSDB_TIME_PRECISION_MILLI); len += snprintf(dumpBuf + len, size - len, " %25s |", pBuf); - if (len >= size -1) return dumpBuf; + if (len >= size - 1) return dumpBuf; break; case TSDB_DATA_TYPE_INT: len += snprintf(dumpBuf + len, size - len, " %15d |", *(int32_t*)var); - if (len >= size -1) return dumpBuf; + if (len >= size - 1) return dumpBuf; break; case TSDB_DATA_TYPE_UINT: len += snprintf(dumpBuf + len, size - len, " %15u |", *(uint32_t*)var); - if (len >= size -1) return dumpBuf; + if (len >= size - 1) return dumpBuf; break; case TSDB_DATA_TYPE_BIGINT: len += snprintf(dumpBuf + len, size - len, " %15ld |", *(int64_t*)var); - if (len >= size -1) return dumpBuf; + if (len >= size - 1) return dumpBuf; break; case TSDB_DATA_TYPE_UBIGINT: len += snprintf(dumpBuf + len, size - len, " %15lu |", *(uint64_t*)var); - if (len >= size -1) return dumpBuf; + if (len >= size - 1) return dumpBuf; break; case TSDB_DATA_TYPE_FLOAT: len += snprintf(dumpBuf + len, size - len, " %15f |", *(float*)var); - if (len >= size -1) return dumpBuf; + if (len >= size - 1) return dumpBuf; break; case TSDB_DATA_TYPE_DOUBLE: len += snprintf(dumpBuf + len, size - len, " %15lf |", *(double*)var); - if (len >= size -1) return dumpBuf; + if (len >= size - 1) return dumpBuf; break; } } len += snprintf(dumpBuf + len, size - len, "\n"); - if (len >= size -1) return dumpBuf; + if (len >= size - 1) return dumpBuf; } len += snprintf(dumpBuf + len, size - len, "%s |end\n", flag); return dumpBuf; diff --git a/source/common/src/tdataformat.c b/source/common/src/tdataformat.c index ec7be79934..8eeab77a15 100644 --- a/source/common/src/tdataformat.c +++ b/source/common/src/tdataformat.c @@ -175,7 +175,8 @@ static void setBitMap(uint8_t *pb, uint8_t v, int32_t idx, uint8_t flags) { } while (0) int32_t tTSRowNew(STSRowBuilder *pBuilder, SArray *pArray, STSchema *pTSchema, STSRow2 **ppRow) { - int32_t code = 0; + int32_t code = 0; +#if 0 STColumn *pTColumn; SColVal *pColVal; int32_t nColVal = taosArrayGetSize(pArray); @@ -462,30 +463,22 @@ int32_t tTSRowNew(STSRowBuilder *pBuilder, SArray *pArray, STSchema *pTSchema, S } } +#endif _exit: return code; } int32_t tTSRowClone(const STSRow2 *pRow, STSRow2 **ppRow) { int32_t code = 0; + int32_t rLen; - (*ppRow) = (STSRow2 *)taosMemoryMalloc(sizeof(**ppRow)); + TSROW_LEN(pRow, rLen); + (*ppRow) = (STSRow2 *)taosMemoryMalloc(rLen); if (*ppRow == NULL) { code = TSDB_CODE_OUT_OF_MEMORY; goto _exit; } - **ppRow = *pRow; - (*ppRow)->pData = NULL; - - if (pRow->nData) { - (*ppRow)->pData = taosMemoryMalloc(pRow->nData); - if ((*ppRow)->pData == NULL) { - taosMemoryFree(*ppRow); - code = TSDB_CODE_OUT_OF_MEMORY; - goto _exit; - } - memcpy((*ppRow)->pData, pRow->pData, pRow->nData); - } + memcpy(*ppRow, pRow, rLen); _exit: return code; @@ -493,12 +486,12 @@ _exit: void tTSRowFree(STSRow2 *pRow) { if (pRow) { - if (pRow->pData) taosMemoryFree(pRow->pData); taosMemoryFree(pRow); } } void tTSRowGet(STSRow2 *pRow, STSchema *pTSchema, int32_t iCol, SColVal *pColVal) { +#if 0 uint8_t isTuple = ((pRow->flags & 0xf0) == 0) ? 1 : 0; STColumn *pTColumn = &pTSchema->columns[iCol]; uint8_t flags = pRow->flags & (uint8_t)0xf; @@ -643,10 +636,12 @@ _return_null: _return_value: *pColVal = COL_VAL_VALUE(pTColumn->colId, pTColumn->type, value); return; +#endif } int32_t tTSRowToArray(STSRow2 *pRow, STSchema *pTSchema, SArray **ppArray) { int32_t code = 0; +#if 0 SColVal cv; (*ppArray) = taosArrayInit(pTSchema->numOfCols, sizeof(SColVal)); @@ -660,52 +655,27 @@ int32_t tTSRowToArray(STSRow2 *pRow, STSchema *pTSchema, SArray **ppArray) { taosArrayPush(*ppArray, &cv); } +#endif _exit: return code; } int32_t tPutTSRow(uint8_t *p, STSRow2 *pRow) { - int32_t n = 0; + int32_t n; - n += tPutI64(p ? p + n : p, pRow->ts); - n += tPutI8(p ? p + n : p, pRow->flags); - n += tPutI32v(p ? p + n : p, pRow->sver); - - ASSERT(pRow->flags & 0xf); - - switch (pRow->flags & 0xf) { - case TSROW_HAS_NONE: - case TSROW_HAS_NULL: - ASSERT(pRow->nData == 0); - ASSERT(pRow->pData == NULL); - break; - default: - ASSERT(pRow->nData && pRow->pData); - n += tPutBinary(p ? p + n : p, pRow->pData, pRow->nData); - break; + TSROW_LEN(pRow, n); + if (p) { + memcpy(p, pRow, n); } return n; } -int32_t tGetTSRow(uint8_t *p, STSRow2 *pRow) { - int32_t n = 0; +int32_t tGetTSRow(uint8_t *p, STSRow2 **ppRow) { + int32_t n; - n += tGetI64(p + n, &pRow->ts); - n += tGetI8(p + n, &pRow->flags); - n += tGetI32v(p + n, &pRow->sver); - - ASSERT(pRow->flags); - switch (pRow->flags & 0xf) { - case TSROW_HAS_NONE: - case TSROW_HAS_NULL: - pRow->nData = 0; - pRow->pData = NULL; - break; - default: - n += tGetBinary(p + n, &pRow->pData, &pRow->nData); - break; - } + *ppRow = (STSRow2 *)p; + TSROW_LEN(*ppRow, n); return n; } @@ -904,15 +874,13 @@ static int32_t tGetTagVal(uint8_t *p, STagVal *pTagVal, int8_t isJson) { return n; } -bool tTagIsJson(const void *pTag){ - return (((const STag *)pTag)->flags & TD_TAG_JSON); -} +bool tTagIsJson(const void *pTag) { return (((const STag *)pTag)->flags & TD_TAG_JSON); } -bool tTagIsJsonNull(void *data){ - STag *pTag = (STag*)data; - int8_t isJson = tTagIsJson(pTag); - if(!isJson) return false; - return ((STag*)data)->nTag == 0; +bool tTagIsJsonNull(void *data) { + STag *pTag = (STag *)data; + int8_t isJson = tTagIsJson(pTag); + if (!isJson) return false; + return ((STag *)data)->nTag == 0; } int32_t tTagNew(SArray *pArray, int32_t version, int8_t isJson, STag **ppTag) { @@ -1097,112 +1065,6 @@ _err: } #if 1 // =================================================================================================================== -static void dataColSetNEleNull(SDataCol *pCol, int nEle); -int tdAllocMemForCol(SDataCol *pCol, int maxPoints) { - int spaceNeeded = pCol->bytes * maxPoints; - if (IS_VAR_DATA_TYPE(pCol->type)) { - spaceNeeded += sizeof(VarDataOffsetT) * maxPoints; - } -#ifdef TD_SUPPORT_BITMAP - int32_t nBitmapBytes = (int32_t)TD_BITMAP_BYTES(maxPoints); - spaceNeeded += (int)nBitmapBytes; - // TODO: Currently, the compression of bitmap parts is affiliated to the column data parts, thus allocate 1 more - // TYPE_BYTES as to comprise complete TYPE_BYTES. Otherwise, invalid read/write would be triggered. - // spaceNeeded += TYPE_BYTES[pCol->type]; // the bitmap part is append as a single part since 2022.04.03, thus - // remove the additional space -#endif - - if (pCol->spaceSize < spaceNeeded) { - void *ptr = taosMemoryRealloc(pCol->pData, spaceNeeded); - if (ptr == NULL) { - uDebug("malloc failure, size:%" PRId64 " failed, reason:%s", (int64_t)spaceNeeded, strerror(errno)); - return -1; - } else { - pCol->pData = ptr; - pCol->spaceSize = spaceNeeded; - } - } -#ifdef TD_SUPPORT_BITMAP - - if (IS_VAR_DATA_TYPE(pCol->type)) { - pCol->pBitmap = POINTER_SHIFT(pCol->pData, pCol->bytes * maxPoints); - pCol->dataOff = POINTER_SHIFT(pCol->pBitmap, nBitmapBytes); - } else { - pCol->pBitmap = POINTER_SHIFT(pCol->pData, pCol->bytes * maxPoints); - } -#else - if (IS_VAR_DATA_TYPE(pCol->type)) { - pCol->dataOff = POINTER_SHIFT(pCol->pData, pCol->bytes * maxPoints); - } -#endif - return 0; -} - -/** - * Duplicate the schema and return a new object - */ -STSchema *tdDupSchema(const STSchema *pSchema) { - int tlen = sizeof(STSchema) + sizeof(STColumn) * schemaNCols(pSchema); - STSchema *tSchema = (STSchema *)taosMemoryMalloc(tlen); - if (tSchema == NULL) return NULL; - - memcpy((void *)tSchema, (void *)pSchema, tlen); - - return tSchema; -} - -/** - * Encode a schema to dst, and return the next pointer - */ -int tdEncodeSchema(void **buf, STSchema *pSchema) { - int tlen = 0; - tlen += taosEncodeFixedI32(buf, schemaVersion(pSchema)); - tlen += taosEncodeFixedI32(buf, schemaNCols(pSchema)); - - for (int i = 0; i < schemaNCols(pSchema); i++) { - STColumn *pCol = schemaColAt(pSchema, i); - tlen += taosEncodeFixedI8(buf, colType(pCol)); - tlen += taosEncodeFixedI8(buf, colFlags(pCol)); - tlen += taosEncodeFixedI16(buf, colColId(pCol)); - tlen += taosEncodeFixedI16(buf, colBytes(pCol)); - } - - return tlen; -} - -/** - * Decode a schema from a binary. - */ -void *tdDecodeSchema(void *buf, STSchema **pRSchema) { - int version = 0; - int numOfCols = 0; - STSchemaBuilder schemaBuilder; - - buf = taosDecodeFixedI32(buf, &version); - buf = taosDecodeFixedI32(buf, &numOfCols); - - if (tdInitTSchemaBuilder(&schemaBuilder, version) < 0) return NULL; - - for (int i = 0; i < numOfCols; i++) { - col_type_t type = 0; - int8_t flags = 0; - col_id_t colId = 0; - col_bytes_t bytes = 0; - buf = taosDecodeFixedI8(buf, &type); - buf = taosDecodeFixedI8(buf, &flags); - buf = taosDecodeFixedI16(buf, &colId); - buf = taosDecodeFixedI32(buf, &bytes); - if (tdAddColToSchema(&schemaBuilder, type, flags, colId, bytes) < 0) { - tdDestroyTSchemaBuilder(&schemaBuilder); - return NULL; - } - } - - *pRSchema = tdGetSchemaFromBuilder(&schemaBuilder); - tdDestroyTSchemaBuilder(&schemaBuilder); - return buf; -} - int tdInitTSchemaBuilder(STSchemaBuilder *pBuilder, schema_ver_t version) { if (pBuilder == NULL) return -1; @@ -1239,22 +1101,22 @@ int32_t tdAddColToSchema(STSchemaBuilder *pBuilder, int8_t type, int8_t flags, c } STColumn *pCol = &(pBuilder->columns[pBuilder->nCols]); - colSetType(pCol, type); - colSetColId(pCol, colId); - colSetFlags(pCol, flags); + pCol->type = type; + pCol->colId = colId; + pCol->flags = flags; if (pBuilder->nCols == 0) { - colSetOffset(pCol, 0); + pCol->offset = 0; } else { STColumn *pTCol = &(pBuilder->columns[pBuilder->nCols - 1]); - colSetOffset(pCol, pTCol->offset + TYPE_BYTES[pTCol->type]); + pCol->offset = pTCol->offset + TYPE_BYTES[pTCol->type]; } if (IS_VAR_DATA_TYPE(type)) { - colSetBytes(pCol, bytes); + pCol->bytes = bytes; pBuilder->tlen += (TYPE_BYTES[type] + bytes); pBuilder->vlen += bytes - sizeof(VarDataLenT); } else { - colSetBytes(pCol, TYPE_BYTES[type]); + pCol->bytes = TYPE_BYTES[type]; pBuilder->tlen += TYPE_BYTES[type]; pBuilder->vlen += TYPE_BYTES[type]; } @@ -1275,151 +1137,19 @@ STSchema *tdGetSchemaFromBuilder(STSchemaBuilder *pBuilder) { STSchema *pSchema = (STSchema *)taosMemoryMalloc(tlen); if (pSchema == NULL) return NULL; - schemaVersion(pSchema) = pBuilder->version; - schemaNCols(pSchema) = pBuilder->nCols; - schemaTLen(pSchema) = pBuilder->tlen; - schemaFLen(pSchema) = pBuilder->flen; - schemaVLen(pSchema) = pBuilder->vlen; + pSchema->version = pBuilder->version; + pSchema->numOfCols = pBuilder->nCols; + pSchema->tlen = pBuilder->tlen; + pSchema->flen = pBuilder->flen; + pSchema->vlen = pBuilder->vlen; #ifdef TD_SUPPORT_BITMAP - schemaTLen(pSchema) += (int)TD_BITMAP_BYTES(schemaNCols(pSchema)); + pSchema->tlen += (int)TD_BITMAP_BYTES(pSchema->numOfCols); #endif - memcpy(schemaColAt(pSchema, 0), pBuilder->columns, sizeof(STColumn) * pBuilder->nCols); + memcpy(&pSchema->columns[0], pBuilder->columns, sizeof(STColumn) * pBuilder->nCols); return pSchema; } -void dataColInit(SDataCol *pDataCol, STColumn *pCol, int maxPoints) { - pDataCol->type = colType(pCol); - pDataCol->colId = colColId(pCol); - pDataCol->bytes = colBytes(pCol); - pDataCol->offset = colOffset(pCol) + 0; // TD_DATA_ROW_HEAD_SIZE; - - pDataCol->len = 0; -} - -static FORCE_INLINE const void *tdGetColDataOfRowUnsafe(SDataCol *pCol, int row) { - if (IS_VAR_DATA_TYPE(pCol->type)) { - return POINTER_SHIFT(pCol->pData, pCol->dataOff[row]); - } else { - return POINTER_SHIFT(pCol->pData, TYPE_BYTES[pCol->type] * row); - } -} - -bool isNEleNull(SDataCol *pCol, int nEle) { - if (isAllRowsNull(pCol)) return true; - for (int i = 0; i < nEle; ++i) { - if (!isNull(tdGetColDataOfRowUnsafe(pCol, i), pCol->type)) return false; - } - return true; -} - -void *dataColSetOffset(SDataCol *pCol, int nEle) { - ASSERT(((pCol->type == TSDB_DATA_TYPE_BINARY) || (pCol->type == TSDB_DATA_TYPE_NCHAR))); - - void *tptr = pCol->pData; - // char *tptr = (char *)(pCol->pData); - - VarDataOffsetT offset = 0; - for (int i = 0; i < nEle; ++i) { - pCol->dataOff[i] = offset; - offset += varDataTLen(tptr); - tptr = POINTER_SHIFT(tptr, varDataTLen(tptr)); - } - return POINTER_SHIFT(tptr, varDataTLen(tptr)); -} - -SDataCols *tdNewDataCols(int maxCols, int maxRows) { - SDataCols *pCols = (SDataCols *)taosMemoryCalloc(1, sizeof(SDataCols)); - if (pCols == NULL) { - uDebug("malloc failure, size:%" PRId64 " failed, reason:%s", (int64_t)sizeof(SDataCols), strerror(errno)); - return NULL; - } - - pCols->maxPoints = maxRows; - pCols->maxCols = maxCols; - pCols->numOfRows = 0; - pCols->numOfCols = 0; - pCols->bitmapMode = TSDB_BITMODE_DEFAULT; - - if (maxCols > 0) { - pCols->cols = (SDataCol *)taosMemoryCalloc(maxCols, sizeof(SDataCol)); - if (pCols->cols == NULL) { - uDebug("malloc failure, size:%" PRId64 " failed, reason:%s", (int64_t)sizeof(SDataCol) * maxCols, - strerror(errno)); - tdFreeDataCols(pCols); - return NULL; - } -#if 0 // no need as calloc used - int i; - for (i = 0; i < maxCols; i++) { - pCols->cols[i].spaceSize = 0; - pCols->cols[i].len = 0; - pCols->cols[i].pData = NULL; - pCols->cols[i].dataOff = NULL; - } -#endif - } - - return pCols; -} - -int tdInitDataCols(SDataCols *pCols, STSchema *pSchema) { - int i; - int oldMaxCols = pCols->maxCols; - if (schemaNCols(pSchema) > oldMaxCols) { - pCols->maxCols = schemaNCols(pSchema); - void *ptr = (SDataCol *)taosMemoryRealloc(pCols->cols, sizeof(SDataCol) * pCols->maxCols); - if (ptr == NULL) return -1; - pCols->cols = ptr; - for (i = oldMaxCols; i < pCols->maxCols; ++i) { - pCols->cols[i].pData = NULL; - pCols->cols[i].dataOff = NULL; - pCols->cols[i].pBitmap = NULL; - pCols->cols[i].spaceSize = 0; - } - } -#if 0 - tdResetDataCols(pCols); // redundant loop to reset len/blen to 0, already reset in following dataColInit(...) -#endif - - pCols->numOfRows = 0; - pCols->bitmapMode = TSDB_BITMODE_DEFAULT; - pCols->numOfCols = schemaNCols(pSchema); - - for (i = 0; i < schemaNCols(pSchema); ++i) { - dataColInit(pCols->cols + i, schemaColAt(pSchema, i), pCols->maxPoints); - } - - return 0; -} - -SDataCols *tdFreeDataCols(SDataCols *pCols) { - int i; - if (pCols) { - if (pCols->cols) { - int maxCols = pCols->maxCols; - for (i = 0; i < maxCols; ++i) { - SDataCol *pCol = &pCols->cols[i]; - taosMemoryFreeClear(pCol->pData); - } - taosMemoryFree(pCols->cols); - pCols->cols = NULL; - } - taosMemoryFree(pCols); - } - return NULL; -} - -void tdResetDataCols(SDataCols *pCols) { - if (pCols != NULL) { - pCols->numOfRows = 0; - pCols->bitmapMode = 0; - for (int i = 0; i < pCols->maxCols; ++i) { - dataColReset(pCols->cols + i); - } - } -} - #endif \ No newline at end of file diff --git a/source/common/src/tglobal.c b/source/common/src/tglobal.c index 357f258951..24bf4e5c2d 100644 --- a/source/common/src/tglobal.c +++ b/source/common/src/tglobal.c @@ -412,7 +412,8 @@ static int32_t taosAddServerCfg(SConfig *pCfg) { tsNumOfVnodeQueryThreads = TMAX(tsNumOfVnodeQueryThreads, 2); if (cfgAddInt32(pCfg, "numOfVnodeQueryThreads", tsNumOfVnodeQueryThreads, 1, 1024, 0) != 0) return -1; - tsNumOfVnodeFetchThreads = TRANGE(tsNumOfVnodeFetchThreads, 1, 1); + tsNumOfVnodeFetchThreads = tsNumOfCores / 4; + tsNumOfVnodeFetchThreads = TMAX(tsNumOfVnodeFetchThreads, 4); if (cfgAddInt32(pCfg, "numOfVnodeFetchThreads", tsNumOfVnodeFetchThreads, 1, 1024, 0) != 0) return -1; tsNumOfVnodeWriteThreads = tsNumOfCores; diff --git a/source/common/src/tmsg.c b/source/common/src/tmsg.c index e5b9d8176f..eca8d49b6a 100644 --- a/source/common/src/tmsg.c +++ b/source/common/src/tmsg.c @@ -5526,6 +5526,11 @@ bool tOffsetEqual(const STqOffsetVal *pLeft, const STqOffsetVal *pRight) { ASSERT(0); // TODO return pLeft->uid == pRight->uid && pLeft->ts == pRight->ts; + } else { + ASSERT(0); + /*ASSERT(pLeft->type == TMQ_OFFSET__RESET_NONE || pLeft->type == TMQ_OFFSET__RESET_EARLIEAST ||*/ + /*pLeft->type == TMQ_OFFSET__RESET_LATEST);*/ + /*return true;*/ } } return false; diff --git a/source/common/src/trow.c b/source/common/src/trow.c index 052b6ffe58..f64250bce6 100644 --- a/source/common/src/trow.c +++ b/source/common/src/trow.c @@ -32,28 +32,10 @@ const uint8_t tdVTypeByte[2][3] = {{ }; // declaration -static uint8_t tdGetBitmapByte(uint8_t byte); -static int32_t tdCompareColId(const void *arg1, const void *arg2); +static uint8_t tdGetBitmapByte(uint8_t byte); +static int32_t tdCompareColId(const void *arg1, const void *arg2); static FORCE_INLINE int32_t compareKvRowColId(const void *key1, const void *key2); -// static void dataColSetNEleNull(SDataCol *pCol, int nEle); - -/** - * @brief src2 data has more priority than src1 - * - * @param target - * @param src1 - * @param iter1 - * @param limit1 - * @param src2 - * @param iter2 - * @param limit2 - * @param tRows - * @param update - */ -static void tdMergeTwoDataCols(SDataCols *target, SDataCols *src1, int *iter1, int limit1, SDataCols *src2, int *iter2, - int limit2, int tRows, bool update); - // implementation /** * @brief Compress bitmap bytes comprised of 2-bits to counterpart of 1-bit. @@ -287,33 +269,6 @@ void tdMergeBitmap(uint8_t *srcBitmap, int32_t nBits, uint8_t *dstBitmap) { } } -static FORCE_INLINE void dataColSetNullAt(SDataCol *pCol, int index, bool setBitmap, int8_t bitmapMode) { - if (IS_VAR_DATA_TYPE(pCol->type)) { - pCol->dataOff[index] = pCol->len; - char *ptr = POINTER_SHIFT(pCol->pData, pCol->len); - setVardataNull(ptr, pCol->type); - pCol->len += varDataTLen(ptr); - } else { - setNull(POINTER_SHIFT(pCol->pData, TYPE_BYTES[pCol->type] * index), pCol->type, pCol->bytes); - pCol->len += TYPE_BYTES[pCol->type]; - } - if (setBitmap) { - tdSetBitmapValType(pCol->pBitmap, index, TD_VTYPE_NONE, bitmapMode); - } -} - -// static void dataColSetNEleNull(SDataCol *pCol, int nEle) { -// if (IS_VAR_DATA_TYPE(pCol->type)) { -// pCol->len = 0; -// for (int i = 0; i < nEle; i++) { -// dataColSetNullAt(pCol, i); -// } -// } else { -// setNullN(pCol->pData, pCol->type, pCol->bytes, nEle); -// pCol->len = TYPE_BYTES[pCol->type] * nEle; -// } -// } - /** * @brief Set bitmap area by byte preferentially and then by bit. * @@ -362,56 +317,6 @@ bool tdIsBitmapBlkNorm(const void *pBitmap, int32_t numOfBits, int8_t bitmapMode return true; } -static FORCE_INLINE void dataColSetNoneAt(SDataCol *pCol, int index, bool setBitmap, int8_t bitmapMode) { - if (IS_VAR_DATA_TYPE(pCol->type)) { - pCol->dataOff[index] = pCol->len; - char *ptr = POINTER_SHIFT(pCol->pData, pCol->len); - setVardataNull(ptr, pCol->type); - pCol->len += varDataTLen(ptr); - } else { - setNull(POINTER_SHIFT(pCol->pData, TYPE_BYTES[pCol->type] * index), pCol->type, pCol->bytes); - pCol->len += TYPE_BYTES[pCol->type]; - } - if (setBitmap) { - tdSetBitmapValType(pCol->pBitmap, index, TD_VTYPE_NONE, bitmapMode); - } -} - -static void dataColSetNEleNone(SDataCol *pCol, int nEle, int8_t bitmapMode) { - if (IS_VAR_DATA_TYPE(pCol->type)) { - pCol->len = 0; - for (int i = 0; i < nEle; ++i) { - dataColSetNoneAt(pCol, i, false, bitmapMode); - } - } else { - setNullN(pCol->pData, pCol->type, pCol->bytes, nEle); - pCol->len = TYPE_BYTES[pCol->type] * nEle; - } -#ifdef TD_SUPPORT_BITMAP - tdSetBitmapValTypeN(pCol->pBitmap, nEle, TD_VTYPE_NONE, bitmapMode); -#endif -} - -#if 0 -void trbSetRowInfo(SRowBuilder *pRB, bool del, uint16_t sver) { - // TODO -} - -void trbSetRowVersion(SRowBuilder *pRB, uint64_t ver) { - // TODO -} - -void trbSetRowTS(SRowBuilder *pRB, TSKEY ts) { - // TODO -} - -int trbWriteCol(SRowBuilder *pRB, void *pData, col_id_t cid) { - // TODO - return 0; -} - -#endif - STSRow *tdRowDup(STSRow *row) { STSRow *trow = taosMemoryMalloc(TD_ROW_LEN(row)); if (trow == NULL) return NULL; @@ -420,511 +325,6 @@ STSRow *tdRowDup(STSRow *row) { return trow; } -/** - * @brief - * - * @param pCol - * @param valType - * @param val - * @param numOfRows - * @param maxPoints - * @param bitmapMode default is 0(2 bits), otherwise 1(1 bit) - * @param isMerge merge to current row - * @return int - */ -int tdAppendValToDataCol(SDataCol *pCol, TDRowValT valType, const void *val, int numOfRows, int maxPoints, - int8_t bitmapMode, bool isMerge) { - TASSERT(pCol != NULL); - - // Assume that the columns not specified during insert/upsert mean None. - if (isAllRowsNone(pCol)) { - if (tdValIsNone(valType)) { - // all None value yet, just return - return 0; - } - - if (tdAllocMemForCol(pCol, maxPoints) < 0) return -1; - if (numOfRows > 0) { - // Find the first not None value, fill all previous values as None - dataColSetNEleNone(pCol, numOfRows, bitmapMode); - } - } - const void *value = val; - if (!tdValTypeIsNorm(valType) || !val) { - // TODO: - // 1. back compatibility and easy to debug with codes of 2.0 to save NULL values. - // 2. later on, considering further optimization, don't save Null/None for VarType. - value = getNullValue(pCol->type); - } - if (!isMerge) { - if (IS_VAR_DATA_TYPE(pCol->type)) { - // set offset - pCol->dataOff[numOfRows] = pCol->len; - // Copy data - memcpy(POINTER_SHIFT(pCol->pData, pCol->len), value, varDataTLen(value)); - // Update the length - pCol->len += varDataTLen(value); - } else { - ASSERT(pCol->len == TYPE_BYTES[pCol->type] * numOfRows); - memcpy(POINTER_SHIFT(pCol->pData, pCol->len), value, pCol->bytes); - pCol->len += pCol->bytes; - } - } else if (!tdValTypeIsNone(valType)) { - if (IS_VAR_DATA_TYPE(pCol->type)) { - // keep the last offset - // discard the last var data - int32_t lastVarLen = varDataTLen(POINTER_SHIFT(pCol->pData, pCol->dataOff[numOfRows])); - pCol->len -= lastVarLen; - // Copy data - memcpy(POINTER_SHIFT(pCol->pData, pCol->len), value, varDataTLen(value)); - // Update the length - pCol->len += varDataTLen(value); - } else { - ASSERT(pCol->len - TYPE_BYTES[pCol->type] == TYPE_BYTES[pCol->type] * numOfRows); - memcpy(POINTER_SHIFT(pCol->pData, pCol->len - TYPE_BYTES[pCol->type]), value, pCol->bytes); - } - } - -#ifdef TD_SUPPORT_BITMAP - if (!isMerge || !tdValTypeIsNone(valType)) { - tdSetBitmapValType(pCol->pBitmap, numOfRows, valType, bitmapMode); - } -#endif - return 0; -} - -// internal -static int32_t tdAppendTpRowToDataCol(STSRow *pRow, STSchema *pSchema, SDataCols *pCols, bool isMerge) { -#if 0 - ASSERT(pCols->numOfRows == 0 || dataColsKeyLast(pCols) < TD_ROW_KEY(pRow)); -#endif - - // Multi-Version rows with the same key and different versions supported - ASSERT(pCols->numOfRows == 0 || dataColsKeyLast(pCols) <= TD_ROW_KEY(pRow)); - - int rcol = 1; - int dcol = 1; - void *pBitmap = tdGetBitmapAddrTp(pRow, pSchema->flen); - - SDataCol *pDataCol = &(pCols->cols[0]); - ASSERT(pDataCol->colId == PRIMARYKEY_TIMESTAMP_COL_ID); - tdAppendValToDataCol(pDataCol, TD_VTYPE_NORM, &pRow->ts, pCols->numOfRows, pCols->maxPoints, pCols->bitmapMode, - isMerge); - - while (dcol < pCols->numOfCols) { - pDataCol = &(pCols->cols[dcol]); - if (rcol >= schemaNCols(pSchema)) { - tdAppendValToDataCol(pDataCol, TD_VTYPE_NULL, NULL, pCols->numOfRows, pCols->maxPoints, pCols->bitmapMode, - isMerge); - ++dcol; - continue; - } - - STColumn *pRowCol = schemaColAt(pSchema, rcol); - SCellVal sVal = {0}; - if (pRowCol->colId == pDataCol->colId) { - if (tdGetTpRowValOfCol(&sVal, pRow, pBitmap, pRowCol->type, pRowCol->offset - sizeof(TSKEY), rcol - 1) < 0) { - return terrno; - } - tdAppendValToDataCol(pDataCol, sVal.valType, sVal.val, pCols->numOfRows, pCols->maxPoints, pCols->bitmapMode, - isMerge); - ++dcol; - ++rcol; - } else if (pRowCol->colId < pDataCol->colId) { - ++rcol; - } else { - tdAppendValToDataCol(pDataCol, TD_VTYPE_NULL, NULL, pCols->numOfRows, pCols->maxPoints, pCols->bitmapMode, - isMerge); - ++dcol; - } - } -#if 0 - ++pCols->numOfRows; -#endif - - return TSDB_CODE_SUCCESS; -} -// internal -static int32_t tdAppendKvRowToDataCol(STSRow *pRow, STSchema *pSchema, SDataCols *pCols, bool isMerge) { - ASSERT(pCols->numOfRows == 0 || dataColsKeyLast(pCols) < TD_ROW_KEY(pRow)); - - int rcol = 0; - int dcol = 1; - int tRowCols = tdRowGetNCols(pRow) - 1; // the primary TS key not included in kvRowColIdx part - int tSchemaCols = schemaNCols(pSchema) - 1; - void *pBitmap = tdGetBitmapAddrKv(pRow, tdRowGetNCols(pRow)); - - SDataCol *pDataCol = &(pCols->cols[0]); - ASSERT(pDataCol->colId == PRIMARYKEY_TIMESTAMP_COL_ID); - tdAppendValToDataCol(pDataCol, TD_VTYPE_NORM, &pRow->ts, pCols->numOfRows, pCols->maxPoints, pCols->bitmapMode, - isMerge); - - while (dcol < pCols->numOfCols) { - pDataCol = &(pCols->cols[dcol]); - if (rcol >= tRowCols || rcol >= tSchemaCols) { - tdAppendValToDataCol(pDataCol, TD_VTYPE_NULL, NULL, pCols->numOfRows, pCols->maxPoints, pCols->bitmapMode, - isMerge); - ++dcol; - continue; - } - - SKvRowIdx *pIdx = tdKvRowColIdxAt(pRow, rcol); - int16_t colIdx = -1; - if (pIdx) { - colIdx = POINTER_DISTANCE(pIdx, TD_ROW_COL_IDX(pRow)) / sizeof(SKvRowIdx); - } - TASSERT(colIdx >= 0); - SCellVal sVal = {0}; - if (pIdx->colId == pDataCol->colId) { - if (tdGetKvRowValOfCol(&sVal, pRow, pBitmap, pIdx->offset, colIdx) < 0) { - return terrno; - } - tdAppendValToDataCol(pDataCol, sVal.valType, sVal.val, pCols->numOfRows, pCols->maxPoints, pCols->bitmapMode, - isMerge); - ++dcol; - ++rcol; - } else if (pIdx->colId < pDataCol->colId) { - ++rcol; - } else { - tdAppendValToDataCol(pDataCol, TD_VTYPE_NULL, NULL, pCols->numOfRows, pCols->maxPoints, pCols->bitmapMode, - isMerge); - ++dcol; - } - } -#if 0 - ++pCols->numOfRows; -#endif - - return TSDB_CODE_SUCCESS; -} - -/** - * @brief exposed - * - * @param pRow - * @param pSchema - * @param pCols - */ -int32_t tdAppendSTSRowToDataCol(STSRow *pRow, STSchema *pSchema, SDataCols *pCols, bool isMerge) { -#ifdef TD_DEBUG_PRINT_TSDB_LOAD_DCOLS - printf("%s:%d ts: %" PRIi64 " sver:%d maxCols:%" PRIi16 " nCols:%" PRIi16 ", nRows:%d\n", __func__, __LINE__, - TD_ROW_KEY(pRow), TD_ROW_SVER(pRow), pCols->maxCols, pCols->numOfCols, pCols->numOfRows); -#endif - if (TD_IS_TP_ROW(pRow)) { - return tdAppendTpRowToDataCol(pRow, pSchema, pCols, isMerge); - } else if (TD_IS_KV_ROW(pRow)) { - return tdAppendKvRowToDataCol(pRow, pSchema, pCols, isMerge); - } else { - ASSERT(0); - } - return TSDB_CODE_SUCCESS; -} - -/** - * @brief source data has more priority than target - * - * @param target - * @param source - * @param rowsToMerge - * @param pOffset - * @param update - * @param maxVer - * @return int - */ -int tdMergeDataCols(SDataCols *target, SDataCols *source, int rowsToMerge, int *pOffset, bool update, - TDRowVerT maxVer) { - ASSERT(rowsToMerge > 0 && rowsToMerge <= source->numOfRows); - ASSERT(target->numOfCols == source->numOfCols); - int offset = 0; - - if (pOffset == NULL) { - pOffset = &offset; - } - - SDataCols *pTarget = NULL; - - if ((target->numOfRows == 0) || (dataColsKeyLast(target) < dataColsKeyAtRow(source, *pOffset))) { // No overlap - ASSERT(target->numOfRows + rowsToMerge <= target->maxPoints); - // TODO: filter the maxVer - TSKEY lastKey = TSKEY_INITIAL_VAL; - for (int i = 0; i < rowsToMerge; ++i) { - bool merge = false; - for (int j = 0; j < source->numOfCols; j++) { - if (source->cols[j].len > 0 || target->cols[j].len > 0) { - SCellVal sVal = {0}; - if (tdGetColDataOfRow(&sVal, source->cols + j, i + (*pOffset), source->bitmapMode) < 0) { - TASSERT(0); - } - - if (j == 0) { - if (lastKey == *(TSKEY *)sVal.val) { - if (!update) { - break; - } - merge = true; - } else if (lastKey != TSKEY_INITIAL_VAL) { - ++target->numOfRows; - } - - lastKey = *(TSKEY *)sVal.val; - } - if (i == 0) { - (target->cols + j)->bitmap = (source->cols + j)->bitmap; - } - - tdAppendValToDataCol(target->cols + j, sVal.valType, sVal.val, target->numOfRows, target->maxPoints, - target->bitmapMode, merge); - } - } - } - if (lastKey != TSKEY_INITIAL_VAL) { - ++target->numOfRows; - } - (*pOffset) += rowsToMerge; - } else { - pTarget = tdDupDataCols(target, true); - if (pTarget == NULL) goto _err; - - int iter1 = 0; - tdMergeTwoDataCols(target, pTarget, &iter1, pTarget->numOfRows, source, pOffset, source->numOfRows, - pTarget->numOfRows + rowsToMerge, update); - } - - tdFreeDataCols(pTarget); - return 0; - -_err: - tdFreeDataCols(pTarget); - return -1; -} - -static void tdAppendValToDataCols(SDataCols *target, SDataCols *src, int iter, bool isMerge) { - for (int i = 0; i < src->numOfCols; ++i) { - ASSERT(target->cols[i].type == src->cols[i].type); - if (src->cols[i].len > 0 || target->cols[i].len > 0) { - SCellVal sVal = {0}; - if (tdGetColDataOfRow(&sVal, src->cols + i, iter, src->bitmapMode) < 0) { - TASSERT(0); - } - if (isMerge) { - if (!tdValTypeIsNone(sVal.valType)) { - tdAppendValToDataCol(&(target->cols[i]), sVal.valType, sVal.val, target->numOfRows, target->maxPoints, - target->bitmapMode, isMerge); - } else { - // Keep the origin value for None - } - } else { - tdAppendValToDataCol(&(target->cols[i]), sVal.valType, sVal.val, target->numOfRows, target->maxPoints, - target->bitmapMode, isMerge); - } - } - } -} -/** - * @brief src2 data has more priority than src1 - * - * @param target - * @param src1 - * @param iter1 - * @param limit1 - * @param src2 - * @param iter2 - * @param limit2 - * @param tRows - * @param update - */ -static void tdMergeTwoDataCols(SDataCols *target, SDataCols *src1, int *iter1, int limit1, SDataCols *src2, int *iter2, - int limit2, int tRows, bool update) { - tdResetDataCols(target); - target->bitmapMode = src1->bitmapMode; - ASSERT(limit1 <= src1->numOfRows && limit2 <= src2->numOfRows); - int32_t nRows = 0; - - // TODO: filter the maxVer - // TODO: handle the delete function - TSKEY lastKey = TSKEY_INITIAL_VAL; - while (nRows < tRows) { - if (*iter1 >= limit1 && *iter2 >= limit2) break; - - TSKEY key1 = (*iter1 >= limit1) ? INT64_MAX : dataColsKeyAt(src1, *iter1); - // TKEY tkey1 = (*iter1 >= limit1) ? TKEY_NULL : dataColsTKeyAt(src1, *iter1); - TSKEY key2 = (*iter2 >= limit2) ? INT64_MAX : dataColsKeyAt(src2, *iter2); - // TKEY tkey2 = (*iter2 >= limit2) ? TKEY_NULL : dataColsTKeyAt(src2, *iter2); - - // ASSERT(tkey1 == TKEY_NULL || (!TKEY_IS_DELETED(tkey1))); - - if (key1 <= key2) { - // select key1 if not delete - if (update && (lastKey == key1)) { - tdAppendValToDataCols(target, src1, *iter1, true); - } else if (lastKey != key1) { - if (lastKey != TSKEY_INITIAL_VAL) { - ++target->numOfRows; - } - tdAppendValToDataCols(target, src1, *iter1, false); - } - ++nRows; - ++(*iter1); - lastKey = key1; - } else { - // use key2 if not deleted - // TODO: handle the delete function - if (update && (lastKey == key2)) { - tdAppendValToDataCols(target, src2, *iter2, true); - } else if (lastKey != key2) { - if (lastKey != TSKEY_INITIAL_VAL) { - ++target->numOfRows; - } - tdAppendValToDataCols(target, src2, *iter2, false); - } - - ++nRows; - ++(*iter2); - lastKey = key2; - } - - ASSERT(target->numOfRows <= target->maxPoints - 1); - } - if (lastKey != TSKEY_INITIAL_VAL) { - ++target->numOfRows; - } -} - -STSRow *mergeTwoRows(void *buffer, STSRow *row1, STSRow *row2, STSchema *pSchema1, STSchema *pSchema2) { -#if 0 - ASSERT(TD_ROW_KEY(row1) == TD_ROW_KEY(row2)); - ASSERT(schemaVersion(pSchema1) == TD_ROW_SVER(row1)); - ASSERT(schemaVersion(pSchema2) == TD_ROW_SVER(row2)); - ASSERT(schemaVersion(pSchema1) >= schemaVersion(pSchema2)); -#endif - -#if 0 - SArray *stashRow = taosArrayInit(pSchema1->numOfCols, sizeof(SColInfo)); - if (stashRow == NULL) { - return NULL; - } - - STSRow pRow = buffer; - STpRow dataRow = memRowDataBody(pRow); - memRowSetType(pRow, SMEM_ROW_DATA); - dataRowSetVersion(dataRow, schemaVersion(pSchema1)); // use latest schema version - dataRowSetLen(dataRow, (TDRowLenT)(TD_DATA_ROW_HEAD_SIZE + pSchema1->flen)); - - TDRowLenT dataLen = 0, kvLen = TD_MEM_ROW_KV_HEAD_SIZE; - - int32_t i = 0; // row1 - int32_t j = 0; // row2 - int32_t nCols1 = schemaNCols(pSchema1); - int32_t nCols2 = schemaNCols(pSchema2); - SColInfo colInfo = {0}; - int32_t kvIdx1 = 0, kvIdx2 = 0; - - while (i < nCols1) { - STColumn *pCol = schemaColAt(pSchema1, i); - void * val1 = tdGetMemRowDataOfColEx(row1, pCol->colId, pCol->type, TD_DATA_ROW_HEAD_SIZE + pCol->offset, &kvIdx1); - // if val1 != NULL, use val1; - if (val1 != NULL && !isNull(val1, pCol->type)) { - tdAppendColVal(dataRow, val1, pCol->type, pCol->offset); - kvLen += tdGetColAppendLen(SMEM_ROW_KV, val1, pCol->type); - setSColInfo(&colInfo, pCol->colId, pCol->type, val1); - taosArrayPush(stashRow, &colInfo); - ++i; // next col - continue; - } - - void *val2 = NULL; - while (j < nCols2) { - STColumn *tCol = schemaColAt(pSchema2, j); - if (tCol->colId < pCol->colId) { - ++j; - continue; - } - if (tCol->colId == pCol->colId) { - val2 = tdGetMemRowDataOfColEx(row2, tCol->colId, tCol->type, TD_DATA_ROW_HEAD_SIZE + tCol->offset, &kvIdx2); - } else if (tCol->colId > pCol->colId) { - // set NULL - } - break; - } // end of while(jtype); - } - tdAppendColVal(dataRow, val2, pCol->type, pCol->offset); - if (!isNull(val2, pCol->type)) { - kvLen += tdGetColAppendLen(SMEM_ROW_KV, val2, pCol->type); - setSColInfo(&colInfo, pCol->colId, pCol->type, val2); - taosArrayPush(stashRow, &colInfo); - } - - ++i; // next col - } - - dataLen = TD_ROW_LEN(pRow); - - if (kvLen < dataLen) { - // scan stashRow and generate SKVRow - memset(buffer, 0, sizeof(dataLen)); - STSRow tRow = buffer; - memRowSetType(tRow, SMEM_ROW_KV); - SKVRow kvRow = (SKVRow)memRowKvBody(tRow); - int16_t nKvNCols = (int16_t) taosArrayGetSize(stashRow); - kvRowSetLen(kvRow, (TDRowLenT)(TD_KV_ROW_HEAD_SIZE + sizeof(SColIdx) * nKvNCols)); - kvRowSetNCols(kvRow, nKvNCols); - memRowSetKvVersion(tRow, pSchema1->version); - - int32_t toffset = 0; - int16_t k; - for (k = 0; k < nKvNCols; ++k) { - SColInfo *pColInfo = taosArrayGet(stashRow, k); - tdAppendKvColVal(kvRow, pColInfo->colVal, true, pColInfo->colId, pColInfo->colType, toffset); - toffset += sizeof(SColIdx); - } - ASSERT(kvLen == TD_ROW_LEN(tRow)); - } - taosArrayDestroy(stashRow); - return buffer; -#endif - return NULL; -} - -SDataCols *tdDupDataCols(SDataCols *pDataCols, bool keepData) { - SDataCols *pRet = tdNewDataCols(pDataCols->maxCols, pDataCols->maxPoints); - if (pRet == NULL) return NULL; - - pRet->numOfCols = pDataCols->numOfCols; - pRet->bitmapMode = pDataCols->bitmapMode; - pRet->sversion = pDataCols->sversion; - if (keepData) pRet->numOfRows = pDataCols->numOfRows; - - for (int i = 0; i < pDataCols->numOfCols; ++i) { - pRet->cols[i].type = pDataCols->cols[i].type; - pRet->cols[i].bitmap = pDataCols->cols[i].bitmap; - pRet->cols[i].colId = pDataCols->cols[i].colId; - pRet->cols[i].bytes = pDataCols->cols[i].bytes; - pRet->cols[i].offset = pDataCols->cols[i].offset; - - if (keepData) { - if (pDataCols->cols[i].len > 0) { - if (tdAllocMemForCol(&pRet->cols[i], pRet->maxPoints) < 0) { - tdFreeDataCols(pRet); - return NULL; - } - pRet->cols[i].len = pDataCols->cols[i].len; - memcpy(pRet->cols[i].pData, pDataCols->cols[i].pData, pDataCols->cols[i].len); - if (IS_VAR_DATA_TYPE(pRet->cols[i].type)) { - int dataOffSize = sizeof(VarDataOffsetT) * pDataCols->maxPoints; - memcpy(pRet->cols[i].dataOff, pDataCols->cols[i].dataOff, dataOffSize); - } - if (!TD_COL_ROWS_NORM(pRet->cols + i)) { - memcpy(pRet->cols[i].pBitmap, pDataCols->cols[i].pBitmap, TD_BITMAP_BYTES(pDataCols->numOfRows)); - } - } - } - } - - return pRet; -} - void tdSRowPrint(STSRow *row, STSchema *pSchema, const char *tag) { STSRowIter iter = {0}; tdSTSRowIterInit(&iter, pSchema); @@ -1020,32 +420,6 @@ void tdSCellValPrint(SCellVal *pVal, int8_t colType) { } } -int32_t dataColGetNEleLen(SDataCol *pDataCol, int32_t rows, int8_t bitmapMode) { - ASSERT(rows > 0); - int32_t result = 0; - - if (IS_VAR_DATA_TYPE(pDataCol->type)) { - result += pDataCol->dataOff[rows - 1]; - SCellVal val = {0}; - if (tdGetColDataOfRow(&val, pDataCol, rows - 1, bitmapMode) < 0) { - TASSERT(0); - } - - // Currently, count the varDataTLen in of Null/None cols considering back compatibility test for 2.4 - result += varDataTLen(val.val); - // TODO: later on, don't save Null/None for VarDataT for 3.0 - // if (tdValTypeIsNorm(val.valType)) { - // result += varDataTLen(val.val); - // } - } else { - result += TYPE_BYTES[pDataCol->type] * rows; - } - - ASSERT(pDataCol->len == result); - - return result; -} - bool tdSKvRowGetVal(STSRow *pRow, col_id_t colId, col_id_t colIdx, SCellVal *pVal) { if (colId == PRIMARYKEY_TIMESTAMP_COL_ID) { tdRowSetVal(pVal, TD_VTYPE_NORM, TD_ROW_KEY_ADDR(pRow)); @@ -1082,40 +456,6 @@ bool tdSTpRowGetVal(STSRow *pRow, col_id_t colId, col_type_t colType, int32_t fl return true; } -int32_t tdGetColDataOfRow(SCellVal *pVal, SDataCol *pCol, int32_t row, int8_t bitmapMode) { - if (isAllRowsNone(pCol)) { - pVal->valType = TD_VTYPE_NONE; -#ifdef TD_SUPPORT_READ2 - pVal->val = (void *)getNullValue(pCol->type); -#else - pVal->val = NULL; -#endif - return TSDB_CODE_SUCCESS; - } - - if (TD_COL_ROWS_NORM(pCol)) { - pVal->valType = TD_VTYPE_NORM; - } else if (tdGetBitmapValType(pCol->pBitmap, row, &(pVal->valType), bitmapMode) < 0) { - return terrno; - } - - if (tdValTypeIsNorm(pVal->valType)) { - if (IS_VAR_DATA_TYPE(pCol->type)) { - pVal->val = POINTER_SHIFT(pCol->pData, pCol->dataOff[row]); - } else { - pVal->val = POINTER_SHIFT(pCol->pData, TYPE_BYTES[pCol->type] * row); - } - } else { - pVal->valType = TD_VTYPE_NULL; -#ifdef TD_SUPPORT_READ2 - pVal->val = (void *)getNullValue(pCol->type); -#else - pVal->val = NULL; -#endif - } - return TSDB_CODE_SUCCESS; -} - bool tdSTSRowIterNext(STSRowIter *pIter, col_id_t colId, col_type_t colType, SCellVal *pVal) { if (colId == PRIMARYKEY_TIMESTAMP_COL_ID) { pVal->val = &pIter->pRow->ts; diff --git a/source/common/test/dataformatTest.cpp b/source/common/test/dataformatTest.cpp index a52bb6b516..d16e35ff07 100644 --- a/source/common/test/dataformatTest.cpp +++ b/source/common/test/dataformatTest.cpp @@ -285,8 +285,8 @@ int32_t debugPrintSColVal(SColVal *cv, int8_t type) { } void debugPrintTSRow(STSRow2 *row, STSchema *pTSchema, const char *tags, int32_t ln) { - printf("%s:%d %s:v%d:%d ", tags, ln, (row->flags & 0xf0) ? "KV" : "TP", row->sver, row->nData); - for (int16_t i = 0; i < schemaNCols(pTSchema); ++i) { + // printf("%s:%d %s:v%d:%d ", tags, ln, (row->flags & 0xf0) ? "KV" : "TP", row->sver, row->nData); + for (int16_t i = 0; i < pTSchema->numOfCols; ++i) { SColVal cv = {0}; tTSRowGet(row, pTSchema, i, &cv); debugPrintSColVal(&cv, pTSchema->columns[i].type); @@ -393,7 +393,7 @@ static int32_t checkSColVal(const char *rawVal, SColVal *cv, int8_t type) { } static void checkTSRow(const char **data, STSRow2 *row, STSchema *pTSchema) { - for (int16_t i = 0; i < schemaNCols(pTSchema); ++i) { + for (int16_t i = 0; i < pTSchema->numOfCols; ++i) { SColVal cv = {0}; tTSRowGet(row, pTSchema, i, &cv); checkSColVal(data[i], &cv, pTSchema->columns[i].type); diff --git a/source/dnode/mgmt/mgmt_vnode/inc/vmInt.h b/source/dnode/mgmt/mgmt_vnode/inc/vmInt.h index 6f00767eb0..6fc0ab4e5d 100644 --- a/source/dnode/mgmt/mgmt_vnode/inc/vmInt.h +++ b/source/dnode/mgmt/mgmt_vnode/inc/vmInt.h @@ -31,7 +31,7 @@ typedef struct SVnodeMgmt { const char *path; const char *name; SQWorkerPool queryPool; - SQWorkerPool fetchPool; + SWWorkerPool fetchPool; SWWorkerPool syncPool; SWWorkerPool writePool; SWWorkerPool applyPool; diff --git a/source/dnode/mgmt/mgmt_vnode/src/vmFile.c b/source/dnode/mgmt/mgmt_vnode/src/vmFile.c index 90f852eed1..cbcb541200 100644 --- a/source/dnode/mgmt/mgmt_vnode/src/vmFile.c +++ b/source/dnode/mgmt/mgmt_vnode/src/vmFile.c @@ -31,7 +31,7 @@ SVnodeObj **vmGetVnodeListFromHash(SVnodeMgmt *pMgmt, int32_t *numOfVnodes) { SVnodeObj *pVnode = *ppVnode; if (pVnode && num < size) { int32_t refCount = atomic_add_fetch_32(&pVnode->refCount, 1); - // dTrace("vgId:%d, acquire vnode, refCount:%d", pVnode->vgId, refCount); + // dTrace("vgId:%d, acquire vnode list, ref:%d", pVnode->vgId, refCount); pVnodes[num++] = (*ppVnode); pIter = taosHashIterate(pMgmt->hash, pIter); } else { diff --git a/source/dnode/mgmt/mgmt_vnode/src/vmInt.c b/source/dnode/mgmt/mgmt_vnode/src/vmInt.c index eac9052289..36f6fab699 100644 --- a/source/dnode/mgmt/mgmt_vnode/src/vmInt.c +++ b/source/dnode/mgmt/mgmt_vnode/src/vmInt.c @@ -23,6 +23,7 @@ SVnodeObj *vmAcquireVnode(SVnodeMgmt *pMgmt, int32_t vgId) { taosHashGetDup(pMgmt->hash, &vgId, sizeof(int32_t), (void *)&pVnode); if (pVnode == NULL || pVnode->dropped) { terrno = TSDB_CODE_VND_INVALID_VGROUP_ID; + pVnode = NULL; } else { int32_t refCount = atomic_add_fetch_32(&pVnode->refCount, 1); // dTrace("vgId:%d, acquire vnode, ref:%d", pVnode->vgId, refCount); @@ -82,6 +83,7 @@ void vmCloseVnode(SVnodeMgmt *pMgmt, SVnodeObj *pVnode) { taosThreadRwlockUnlock(&pMgmt->lock); vmReleaseVnode(pMgmt, pVnode); + dTrace("vgId:%d, wait for vnode ref become 0", pVnode->vgId); while (pVnode->refCount > 0) taosMsleep(10); dTrace("vgId:%d, wait for vnode queue is empty", pVnode->vgId); diff --git a/source/dnode/mgmt/mgmt_vnode/src/vmWorker.c b/source/dnode/mgmt/mgmt_vnode/src/vmWorker.c index 1d795c74f2..93f93b1ab7 100644 --- a/source/dnode/mgmt/mgmt_vnode/src/vmWorker.c +++ b/source/dnode/mgmt/mgmt_vnode/src/vmWorker.c @@ -81,21 +81,26 @@ static void vmProcessQueryQueue(SQueueInfo *pInfo, SRpcMsg *pMsg) { taosFreeQitem(pMsg); } -static void vmProcessFetchQueue(SQueueInfo *pInfo, SRpcMsg *pMsg) { - SVnodeObj *pVnode = pInfo->ahandle; - const STraceId *trace = &pMsg->info.traceId; +static void vmProcessFetchQueue(SQueueInfo *pInfo, STaosQall *qall, int32_t numOfMsgs) { + SVnodeObj *pVnode = pInfo->ahandle; + SRpcMsg *pMsg = NULL; - dGTrace("vgId:%d, msg:%p get from vnode-fetch queue", pVnode->vgId, pMsg); - int32_t code = vnodeProcessFetchMsg(pVnode->pImpl, pMsg, pInfo); - if (code != 0) { - if (terrno != 0) code = terrno; - dGError("vgId:%d, msg:%p failed to fetch since %s", pVnode->vgId, pMsg, terrstr()); - vmSendRsp(pMsg, code); + for (int32_t i = 0; i < numOfMsgs; ++i) { + if (taosGetQitem(qall, (void **)&pMsg) == 0) continue; + const STraceId *trace = &pMsg->info.traceId; + dGTrace("vgId:%d, msg:%p get from vnode-fetch queue", pVnode->vgId, pMsg); + + int32_t code = vnodeProcessFetchMsg(pVnode->pImpl, pMsg, pInfo); + if (code != 0) { + if (terrno != 0) code = terrno; + dGError("vgId:%d, msg:%p failed to fetch since %s", pVnode->vgId, pMsg, terrstr()); + vmSendRsp(pMsg, code); + } + + dGTrace("vgId:%d, msg:%p is freed, code:0x%x", pVnode->vgId, pMsg, code); + rpcFreeCont(pMsg->pCont); + taosFreeQitem(pMsg); } - - dGTrace("vgId:%d, msg:%p is freed, code:0x%x", pVnode->vgId, pMsg, code); - rpcFreeCont(pMsg->pCont); - taosFreeQitem(pMsg); } static void vmProcessSyncQueue(SQueueInfo *pInfo, STaosQall *qall, int32_t numOfMsgs) { @@ -201,9 +206,9 @@ int32_t vmPutRpcMsgToQueue(SVnodeMgmt *pMgmt, EQueueType qtype, SRpcMsg *pRpc) { int32_t code = vmPutMsgToQueue(pMgmt, pMsg, qtype); if (code != 0) { dTrace("msg:%p, is freed", pMsg); - taosFreeQitem(pMsg); rpcFreeCont(pMsg->pCont); pRpc->pCont = NULL; + taosFreeQitem(pMsg); } return code; @@ -232,8 +237,8 @@ int32_t vmGetQueueSize(SVnodeMgmt *pMgmt, int32_t vgId, EQueueType qtype) { default: break; } + vmReleaseVnode(pMgmt, pVnode); } - vmReleaseVnode(pMgmt, pVnode); return size; } @@ -242,7 +247,7 @@ int32_t vmAllocQueue(SVnodeMgmt *pMgmt, SVnodeObj *pVnode) { pVnode->pSyncQ = tWWorkerAllocQueue(&pMgmt->syncPool, pVnode, (FItems)vmProcessSyncQueue); pVnode->pApplyQ = tWWorkerAllocQueue(&pMgmt->applyPool, pVnode->pImpl, (FItems)vnodeApplyWriteMsg); pVnode->pQueryQ = tQWorkerAllocQueue(&pMgmt->queryPool, pVnode, (FItem)vmProcessQueryQueue); - pVnode->pFetchQ = tQWorkerAllocQueue(&pMgmt->fetchPool, pVnode, (FItem)vmProcessFetchQueue); + pVnode->pFetchQ = tWWorkerAllocQueue(&pMgmt->fetchPool, pVnode, (FItems)vmProcessFetchQueue); if (pVnode->pWriteQ == NULL || pVnode->pSyncQ == NULL || pVnode->pApplyQ == NULL || pVnode->pQueryQ == NULL || pVnode->pFetchQ == NULL) { @@ -250,7 +255,11 @@ int32_t vmAllocQueue(SVnodeMgmt *pMgmt, SVnodeObj *pVnode) { return -1; } - dDebug("vgId:%d, queue is alloced", pVnode->vgId); + dDebug("vgId:%d, write-queue:%p is alloced", pVnode->vgId, pVnode->pWriteQ); + dDebug("vgId:%d, sync-queue:%p is alloced", pVnode->vgId, pVnode->pSyncQ); + dDebug("vgId:%d, apply-queue:%p is alloced", pVnode->vgId, pVnode->pApplyQ); + dDebug("vgId:%d, query-queue:%p is alloced", pVnode->vgId, pVnode->pQueryQ); + dDebug("vgId:%d, fetch-queue:%p is alloced", pVnode->vgId, pVnode->pFetchQ); return 0; } @@ -259,7 +268,7 @@ void vmFreeQueue(SVnodeMgmt *pMgmt, SVnodeObj *pVnode) { tWWorkerFreeQueue(&pMgmt->applyPool, pVnode->pApplyQ); tWWorkerFreeQueue(&pMgmt->syncPool, pVnode->pSyncQ); tQWorkerFreeQueue(&pMgmt->queryPool, pVnode->pQueryQ); - tQWorkerFreeQueue(&pMgmt->fetchPool, pVnode->pFetchQ); + tWWorkerFreeQueue(&pMgmt->fetchPool, pVnode->pFetchQ); pVnode->pWriteQ = NULL; pVnode->pSyncQ = NULL; pVnode->pApplyQ = NULL; @@ -275,11 +284,10 @@ int32_t vmStartWorker(SVnodeMgmt *pMgmt) { pQPool->max = tsNumOfVnodeQueryThreads; if (tQWorkerInit(pQPool) != 0) return -1; - SQWorkerPool *pFPool = &pMgmt->fetchPool; + SWWorkerPool *pFPool = &pMgmt->fetchPool; pFPool->name = "vnode-fetch"; - pFPool->min = tsNumOfVnodeFetchThreads; pFPool->max = tsNumOfVnodeFetchThreads; - if (tQWorkerInit(pFPool) != 0) return -1; + if (tWWorkerInit(pFPool) != 0) return -1; SWWorkerPool *pWPool = &pMgmt->writePool; pWPool->name = "vnode-write"; @@ -325,6 +333,6 @@ void vmStopWorker(SVnodeMgmt *pMgmt) { tWWorkerCleanup(&pMgmt->applyPool); tWWorkerCleanup(&pMgmt->syncPool); tQWorkerCleanup(&pMgmt->queryPool); - tQWorkerCleanup(&pMgmt->fetchPool); + tWWorkerCleanup(&pMgmt->fetchPool); dDebug("vnode workers are closed"); } diff --git a/source/dnode/mnode/impl/src/mndSync.c b/source/dnode/mnode/impl/src/mndSync.c index bcf926e5ee..3c3864b620 100644 --- a/source/dnode/mnode/impl/src/mndSync.c +++ b/source/dnode/mnode/impl/src/mndSync.c @@ -56,20 +56,22 @@ void mndSyncCommitMsg(struct SSyncFSM *pFsm, const SRpcMsg *pMsg, SFsmCbMeta cbM sdbSetApplyInfo(pMnode->pSdb, cbMeta.index, cbMeta.term, cbMeta.lastConfigIndex); } - if (pMgmt->transId == transId) { + if (pMgmt->transId == transId && transId != 0) { if (pMgmt->errCode != 0) { mError("trans:%d, failed to propose since %s", transId, tstrerror(pMgmt->errCode)); } pMgmt->transId = 0; tsem_post(&pMgmt->syncSem); } else { +#if 1 + mError("trans:%d, invalid commit msg since trandId not match with %d", transId, pMgmt->transId); +#else STrans *pTrans = mndAcquireTrans(pMnode, transId); if (pTrans != NULL) { mndTransExecute(pMnode, pTrans); mndReleaseTrans(pMnode, pTrans); } -#if 0 - sdbWriteFile(pMnode->pSdb, SDB_WRITE_DELTA); + // sdbWriteFile(pMnode->pSdb, SDB_WRITE_DELTA); #endif } } diff --git a/source/dnode/vnode/inc/vnode.h b/source/dnode/vnode/inc/vnode.h index 8f2d3bde09..9f32964fa9 100644 --- a/source/dnode/vnode/inc/vnode.h +++ b/source/dnode/vnode/inc/vnode.h @@ -137,8 +137,7 @@ int64_t tsdbGetNumOfRowsInMemTable(STsdbReader *pHandle); void *tsdbGetIdx(SMeta *pMeta); void *tsdbGetIvtIdx(SMeta *pMeta); -int32_t tsdbLastRowReaderOpen(void *pVnode, int32_t type, SArray *pTableIdList, int32_t *colId, int32_t numOfCols, - void **pReader); +int32_t tsdbLastRowReaderOpen(void *pVnode, int32_t type, SArray *pTableIdList, int32_t numOfCols, void **pReader); int32_t tsdbRetrieveLastRow(void *pReader, SSDataBlock *pResBlock, const int32_t *slotIds); int32_t tsdbLastrowReaderClose(void *pReader); int32_t tsdbGetTableSchema(SVnode *pVnode, int64_t uid, STSchema **pSchema, int64_t *suid); diff --git a/source/dnode/vnode/src/inc/tq.h b/source/dnode/vnode/src/inc/tq.h index 8abaac6dff..c62b7e95bf 100644 --- a/source/dnode/vnode/src/inc/tq.h +++ b/source/dnode/vnode/src/inc/tq.h @@ -89,8 +89,6 @@ typedef struct { STqExecTb execTb; STqExecDb execDb; }; - // TODO remove it - int64_t tsdbEndVer; } STqExecHandle; @@ -101,6 +99,8 @@ typedef struct { int32_t epoch; int8_t fetchMeta; + int64_t snapshotVer; + // TODO remove SWalReader* pWalReader; @@ -131,7 +131,7 @@ typedef struct { static STqMgmt tqMgmt = {0}; // tqRead -int64_t tqScan(STQ* pTq, const STqExecHandle* pExec, SMqDataRsp* pRsp, STqOffsetVal* offset); +int64_t tqScan(STQ* pTq, const STqHandle* pHandle, SMqDataRsp* pRsp, STqOffsetVal* offset); int64_t tqFetchLog(STQ* pTq, STqHandle* pHandle, int64_t* fetchOffset, SWalCkHead** pHeadWithCkSum); // tqExec diff --git a/source/dnode/vnode/src/tq/tq.c b/source/dnode/vnode/src/tq/tq.c index fbb972fafe..df80a4c218 100644 --- a/source/dnode/vnode/src/tq/tq.c +++ b/source/dnode/vnode/src/tq/tq.c @@ -244,6 +244,9 @@ int32_t tqProcessPollReq(STQ* pTq, SRpcMsg* pMsg, int32_t workerId) { STqOffsetVal reqOffset = pReq->reqOffset; STqOffsetVal fetchOffsetNew; + // todo + workerId = 0; + // 1.find handle STqHandle* pHandle = taosHashGet(pTq->handles, pReq->subKey, strlen(pReq->subKey)); /*ASSERT(pHandle);*/ @@ -284,7 +287,8 @@ int32_t tqProcessPollReq(STQ* pTq, SRpcMsg* pMsg, int32_t workerId) { fetchOffsetNew = pOffset->val; char formatBuf[80]; tFormatOffset(formatBuf, 80, &fetchOffsetNew); - tqDebug("tmq poll: consumer %" PRId64 ", subkey %s, offset reset to %s", consumerId, pHandle->subKey, formatBuf); + tqDebug("tmq poll: consumer %" PRId64 ", subkey %s, vg %d, offset reset to %s", consumerId, pHandle->subKey, + TD_VID(pTq->pVnode), formatBuf); } else { if (reqOffset.type == TMQ_OFFSET__RESET_EARLIEAST) { if (pReq->useSnapshot && pHandle->execHandle.subType == TOPIC_SUB_TYPE__COLUMN) { @@ -299,8 +303,8 @@ int32_t tqProcessPollReq(STQ* pTq, SRpcMsg* pMsg, int32_t workerId) { } } else if (reqOffset.type == TMQ_OFFSET__RESET_LATEST) { tqOffsetResetToLog(&dataRsp.rspOffset, walGetLastVer(pTq->pVnode->pWal)); - tqDebug("tmq poll: consumer %ld, subkey %s, offset reset to %ld", consumerId, pHandle->subKey, - dataRsp.rspOffset.version); + tqDebug("tmq poll: consumer %ld, subkey %s, vg %d, offset reset to %ld", consumerId, pHandle->subKey, + TD_VID(pTq->pVnode), dataRsp.rspOffset.version); if (tqSendDataRsp(pTq, pMsg, pReq, &dataRsp) < 0) { code = -1; } @@ -318,10 +322,10 @@ int32_t tqProcessPollReq(STQ* pTq, SRpcMsg* pMsg, int32_t workerId) { // 3.query if (pHandle->execHandle.subType == TOPIC_SUB_TYPE__COLUMN) { - if (fetchOffsetNew.type == TMQ_OFFSET__LOG) { - fetchOffsetNew.version++; - } - if (tqScan(pTq, &pHandle->execHandle, &dataRsp, &fetchOffsetNew) < 0) { + /*if (fetchOffsetNew.type == TMQ_OFFSET__LOG) {*/ + /*fetchOffsetNew.version++;*/ + /*}*/ + if (tqScan(pTq, pHandle, &dataRsp, &fetchOffsetNew) < 0) { ASSERT(0); code = -1; goto OVER; @@ -480,30 +484,28 @@ int32_t tqProcessVgChangeReq(STQ* pTq, char* msg, int32_t msgLen) { pHandle->fetchMeta = req.withMeta; pHandle->pWalReader = walOpenReader(pTq->pVnode->pWal, NULL); - /*for (int32_t i = 0; i < 5; i++) {*/ - /*pHandle->execHandle.pExecReader[i] = tqOpenReader(pTq->pVnode);*/ - /*}*/ + + // TODO version should be assigned in preprocess int64_t ver = walGetCommittedVer(pTq->pVnode->pWal); if (pHandle->execHandle.subType == TOPIC_SUB_TYPE__COLUMN) { pHandle->execHandle.execCol.qmsg = req.qmsg; + pHandle->snapshotVer = ver; req.qmsg = NULL; for (int32_t i = 0; i < 5; i++) { SReadHandle handle = { - .tqReader = pHandle->execHandle.pExecReader[i], .meta = pTq->pVnode->pMeta, .vnode = pTq->pVnode, .initTableReader = true, .initTqReader = true, .version = ver, }; - pHandle->execHandle.execCol.task[i] = qCreateStreamExecTaskInfo(pHandle->execHandle.execCol.qmsg, &handle); + pHandle->execHandle.execCol.task[i] = qCreateQueueExecTaskInfo(pHandle->execHandle.execCol.qmsg, &handle); ASSERT(pHandle->execHandle.execCol.task[i]); void* scanner = NULL; qExtractStreamScanner(pHandle->execHandle.execCol.task[i], &scanner); ASSERT(scanner); pHandle->execHandle.pExecReader[i] = qExtractReaderFromStreamScanner(scanner); ASSERT(pHandle->execHandle.pExecReader[i]); - pHandle->execHandle.tsdbEndVer = ver; } } else if (pHandle->execHandle.subType == TOPIC_SUB_TYPE__DB) { for (int32_t i = 0; i < 5; i++) { diff --git a/source/dnode/vnode/src/tq/tqExec.c b/source/dnode/vnode/src/tq/tqExec.c index 54e46e7b9a..0bdbe82b77 100644 --- a/source/dnode/vnode/src/tq/tqExec.c +++ b/source/dnode/vnode/src/tq/tqExec.c @@ -59,13 +59,13 @@ static int32_t tqAddTbNameToRsp(const STQ* pTq, int64_t uid, SMqDataRsp* pRsp) { return 0; } -int64_t tqScan(STQ* pTq, const STqExecHandle* pExec, SMqDataRsp* pRsp, STqOffsetVal* pOffset) { - qTaskInfo_t task = pExec->execCol.task[0]; +int64_t tqScan(STQ* pTq, const STqHandle* pHandle, SMqDataRsp* pRsp, STqOffsetVal* pOffset) { + const STqExecHandle* pExec = &pHandle->execHandle; + qTaskInfo_t task = pExec->execCol.task[0]; if (qStreamPrepareScan(task, pOffset) < 0) { ASSERT(pOffset->type == TMQ_OFFSET__LOG); pRsp->rspOffset = *pOffset; - pRsp->rspOffset.version--; return 0; } @@ -73,9 +73,11 @@ int64_t tqScan(STQ* pTq, const STqExecHandle* pExec, SMqDataRsp* pRsp, STqOffset while (1) { SSDataBlock* pDataBlock = NULL; uint64_t ts = 0; + tqDebug("task start to execute"); if (qExecTask(task, &pDataBlock, &ts) < 0) { ASSERT(0); } + tqDebug("task execute end, get %p", pDataBlock); if (pDataBlock != NULL) { tqAddBlockDataToRsp(pDataBlock, pRsp); @@ -97,7 +99,7 @@ int64_t tqScan(STQ* pTq, const STqExecHandle* pExec, SMqDataRsp* pRsp, STqOffset } if (pRsp->blockNum == 0 && pOffset->type == TMQ_OFFSET__SNAPSHOT_DATA) { - tqOffsetResetToLog(pOffset, pExec->tsdbEndVer + 1); + tqOffsetResetToLog(pOffset, pHandle->snapshotVer + 1); qStreamPrepareScan(task, pOffset); continue; } @@ -116,7 +118,7 @@ int64_t tqScan(STQ* pTq, const STqExecHandle* pExec, SMqDataRsp* pRsp, STqOffset if (pRsp->reqOffset.type == TMQ_OFFSET__LOG) { ASSERT(pRsp->rspOffset.version + 1 >= pRsp->reqOffset.version); } - + tqDebug("task exec exited"); break; } diff --git a/source/dnode/vnode/src/tq/tqMeta.c b/source/dnode/vnode/src/tq/tqMeta.c index 67fa4ed166..e6df58696d 100644 --- a/source/dnode/vnode/src/tq/tqMeta.c +++ b/source/dnode/vnode/src/tq/tqMeta.c @@ -19,6 +19,7 @@ static int32_t tEncodeSTqHandle(SEncoder* pEncoder, const STqHandle* pHandle) { if (tStartEncode(pEncoder) < 0) return -1; if (tEncodeCStr(pEncoder, pHandle->subKey) < 0) return -1; if (tEncodeI64(pEncoder, pHandle->consumerId) < 0) return -1; + if (tEncodeI64(pEncoder, pHandle->snapshotVer) < 0) return -1; if (tEncodeI32(pEncoder, pHandle->epoch) < 0) return -1; if (tEncodeI8(pEncoder, pHandle->execHandle.subType) < 0) return -1; if (pHandle->execHandle.subType == TOPIC_SUB_TYPE__COLUMN) { @@ -32,6 +33,7 @@ static int32_t tDecodeSTqHandle(SDecoder* pDecoder, STqHandle* pHandle) { if (tStartDecode(pDecoder) < 0) return -1; if (tDecodeCStrTo(pDecoder, pHandle->subKey) < 0) return -1; if (tDecodeI64(pDecoder, &pHandle->consumerId) < 0) return -1; + if (tDecodeI64(pDecoder, &pHandle->snapshotVer) < 0) return -1; if (tDecodeI32(pDecoder, &pHandle->epoch) < 0) return -1; if (tDecodeI8(pDecoder, &pHandle->execHandle.subType) < 0) return -1; if (pHandle->execHandle.subType == TOPIC_SUB_TYPE__COLUMN) { @@ -78,19 +80,25 @@ int32_t tqMetaOpen(STQ* pTq) { tDecoderInit(&decoder, (uint8_t*)pVal, vLen); tDecodeSTqHandle(&decoder, &handle); handle.pWalReader = walOpenReader(pTq->pVnode->pWal, NULL); - for (int32_t i = 0; i < 5; i++) { - handle.execHandle.pExecReader[i] = tqOpenReader(pTq->pVnode); - } + /*for (int32_t i = 0; i < 5; i++) {*/ + /*handle.execHandle.pExecReader[i] = tqOpenReader(pTq->pVnode);*/ + /*}*/ if (handle.execHandle.subType == TOPIC_SUB_TYPE__COLUMN) { for (int32_t i = 0; i < 5; i++) { SReadHandle reader = { - .tqReader = handle.execHandle.pExecReader[i], .meta = pTq->pVnode->pMeta, - .pMsgCb = &pTq->pVnode->msgCb, .vnode = pTq->pVnode, + .initTableReader = true, + .initTqReader = true, + .version = handle.snapshotVer, }; - handle.execHandle.execCol.task[i] = qCreateStreamExecTaskInfo(handle.execHandle.execCol.qmsg, &reader); + handle.execHandle.execCol.task[i] = qCreateQueueExecTaskInfo(handle.execHandle.execCol.qmsg, &reader); ASSERT(handle.execHandle.execCol.task[i]); + void* scanner = NULL; + qExtractStreamScanner(handle.execHandle.execCol.task[i], &scanner); + ASSERT(scanner); + handle.execHandle.pExecReader[i] = qExtractReaderFromStreamScanner(scanner); + ASSERT(handle.execHandle.pExecReader[i]); } } else { handle.execHandle.execDb.pFilterOutTbUid = diff --git a/source/dnode/vnode/src/tsdb/tsdbCacheRead.c b/source/dnode/vnode/src/tsdb/tsdbCacheRead.c index 150ed620bf..5c09c7663f 100644 --- a/source/dnode/vnode/src/tsdb/tsdbCacheRead.c +++ b/source/dnode/vnode/src/tsdb/tsdbCacheRead.c @@ -22,7 +22,6 @@ typedef struct SLastrowReader { SVnode* pVnode; STSchema* pSchema; uint64_t uid; - // int32_t* pSlotIds; char** transferBuf; // todo remove it soon int32_t numOfCols; int32_t type; @@ -31,25 +30,27 @@ typedef struct SLastrowReader { } SLastrowReader; static void saveOneRow(STSRow* pRow, SSDataBlock* pBlock, SLastrowReader* pReader, const int32_t* slotIds) { + ASSERT(pReader->numOfCols <= taosArrayGetSize(pBlock->pDataBlock)); int32_t numOfRows = pBlock->info.rows; - size_t numOfCols = taosArrayGetSize(pBlock->pDataBlock); SColVal colVal = {0}; - for (int32_t i = 0; i < numOfCols; ++i) { + for (int32_t i = 0; i < pReader->numOfCols; ++i) { SColumnInfoData* pColInfoData = taosArrayGet(pBlock->pDataBlock, i); if (slotIds[i] == -1) { colDataAppend(pColInfoData, numOfRows, (const char*)&pRow->ts, false); } else { - tTSRowGetVal(pRow, pReader->pSchema, slotIds[i], &colVal); + int32_t slotId = slotIds[i]; + + tTSRowGetVal(pRow, pReader->pSchema, slotId, &colVal); if (IS_VAR_DATA_TYPE(colVal.type)) { if (colVal.isNull || colVal.isNone) { colDataAppendNULL(pColInfoData, numOfRows); } else { - varDataSetLen(pReader->transferBuf[i], colVal.value.nData); - memcpy(varDataVal(pReader->transferBuf[i]), colVal.value.pData, colVal.value.nData); - colDataAppend(pColInfoData, numOfRows, pReader->transferBuf[i], false); + varDataSetLen(pReader->transferBuf[slotId], colVal.value.nData); + memcpy(varDataVal(pReader->transferBuf[slotId]), colVal.value.pData, colVal.value.nData); + colDataAppend(pColInfoData, numOfRows, pReader->transferBuf[slotId], false); } } else { colDataAppend(pColInfoData, numOfRows, (const char*)&colVal.value, colVal.isNull || colVal.isNone); @@ -60,8 +61,7 @@ static void saveOneRow(STSRow* pRow, SSDataBlock* pBlock, SLastrowReader* pReade pBlock->info.rows += 1; } -int32_t tsdbLastRowReaderOpen(void* pVnode, int32_t type, SArray* pTableIdList, int32_t* colId, int32_t numOfCols, - void** pReader) { +int32_t tsdbLastRowReaderOpen(void* pVnode, int32_t type, SArray* pTableIdList, int32_t numOfCols, void** pReader) { SLastrowReader* p = taosMemoryCalloc(1, sizeof(SLastrowReader)); if (p == NULL) { return TSDB_CODE_OUT_OF_MEMORY; @@ -70,13 +70,18 @@ int32_t tsdbLastRowReaderOpen(void* pVnode, int32_t type, SArray* pTableIdList, p->type = type; p->pVnode = pVnode; p->numOfCols = numOfCols; - p->transferBuf = taosMemoryCalloc(p->numOfCols, POINTER_BYTES); + + if (taosArrayGetSize(pTableIdList) == 0) { + *pReader = p; + return TSDB_CODE_SUCCESS; + } STableKeyInfo* pKeyInfo = taosArrayGet(pTableIdList, 0); p->pSchema = metaGetTbTSchema(p->pVnode->pMeta, pKeyInfo->uid, -1); p->pTableList = pTableIdList; - for (int32_t i = 0; i < p->numOfCols; ++i) { + p->transferBuf = taosMemoryCalloc(p->pSchema->numOfCols, POINTER_BYTES); + for (int32_t i = 0; i < p->pSchema->numOfCols; ++i) { if (IS_VAR_DATA_TYPE(p->pSchema->columns[i].type)) { p->transferBuf[i] = taosMemoryMalloc(p->pSchema->columns[i].bytes); } @@ -89,10 +94,11 @@ int32_t tsdbLastRowReaderOpen(void* pVnode, int32_t type, SArray* pTableIdList, int32_t tsdbLastrowReaderClose(void* pReader) { SLastrowReader* p = pReader; - for (int32_t i = 0; i < p->numOfCols; ++i) { + for (int32_t i = 0; i < p->pSchema->numOfCols; ++i) { taosMemoryFreeClear(p->transferBuf[i]); } + taosMemoryFree(p->pSchema); taosMemoryFree(p->transferBuf); taosMemoryFree(pReader); return TSDB_CODE_SUCCESS; diff --git a/source/dnode/vnode/src/tsdb/tsdbRead.c b/source/dnode/vnode/src/tsdb/tsdbRead.c index 6e68d30a08..e86a14a30f 100644 --- a/source/dnode/vnode/src/tsdb/tsdbRead.c +++ b/source/dnode/vnode/src/tsdb/tsdbRead.c @@ -71,7 +71,7 @@ typedef struct SFilesetIter { typedef struct SFileDataBlockInfo { int32_t - tbBlockIdx; // index position in STableBlockScanInfo in order to check whether neighbor block overlaps with it + tbBlockIdx; // index position in STableBlockScanInfo in order to check whether neighbor block overlaps with it uint64_t uid; } SFileDataBlockInfo; @@ -119,10 +119,10 @@ struct STsdbReader { int32_t type; // query type: 1. retrieve all data blocks, 2. retrieve direct prev|next rows SBlockLoadSuppInfo suppInfo; - SIOCostSummary cost; - STSchema* pSchema; - SDataFReader* pFileReader; - SVersionRange verRange; + SIOCostSummary cost; + STSchema* pSchema; + SDataFReader* pFileReader; + SVersionRange verRange; }; static SFileDataBlockInfo* getCurrentBlockInfo(SDataBlockIter* pBlockIter); @@ -287,9 +287,7 @@ static int32_t initFilesetIterator(SFilesetIter* pIter, const STsdbFSState* pFSt return TSDB_CODE_SUCCESS; } -static void cleanupFilesetIterator(SFilesetIter* pIter) { - taosArrayDestroy(pIter->pFileList); -} +static void cleanupFilesetIterator(SFilesetIter* pIter) { taosArrayDestroy(pIter->pFileList); } static bool filesetIteratorNext(SFilesetIter* pIter, STsdbReader* pReader) { bool asc = ASCENDING_TRAVERSE(pIter->order); @@ -304,6 +302,10 @@ static bool filesetIteratorNext(SFilesetIter* pIter, STsdbReader* pReader) { STimeWindow win = {0}; while (1) { +// if (pReader->pFileReader != NULL) { +// tsdbDataFReaderClose(&pReader->pFileReader); +// } + pReader->status.pCurrentFileset = (SDFileSet*)taosArrayGet(pIter->pFileList, pIter->index); int32_t code = tsdbDataFReaderOpen(&pReader->pFileReader, pReader->pTsdb, pReader->status.pCurrentFileset); @@ -349,9 +351,7 @@ static void resetDataBlockIterator(SDataBlockIter* pIter, int32_t order) { } } -static void cleanupDataBlockIterator(SDataBlockIter* pIter) { - taosArrayDestroy(pIter->blockList); -} +static void cleanupDataBlockIterator(SDataBlockIter* pIter) { taosArrayDestroy(pIter->blockList); } static void initReaderStatus(SReaderStatus* pStatus) { pStatus->pTableIter = NULL; @@ -392,8 +392,7 @@ static int32_t tsdbReaderCreate(SVnode* pVnode, SQueryTableDataCond* pCond, STsd initReaderStatus(&pReader->status); - pReader->pTsdb = - getTsdbByRetentions(pVnode, pCond->twindows.skey, pVnode->config.tsdbCfg.retentions, idstr, &level); + pReader->pTsdb = getTsdbByRetentions(pVnode, pCond->twindows.skey, pVnode->config.tsdbCfg.retentions, idstr, &level); pReader->suid = pCond->suid; pReader->order = pCond->order; pReader->capacity = 4096; @@ -833,7 +832,7 @@ static int32_t doLoadFileBlockData(STsdbReader* pReader, SDataBlockIter* pBlockI uint8_t *pb = NULL, *pb1 = NULL; int32_t code = tsdbReadColData(pReader->pFileReader, &pBlockScanInfo->blockIdx, pBlock, pSupInfo->colIds, numOfCols, - pBlockData, &pb, &pb1); + pBlockData, &pb, &pb1); if (code != TSDB_CODE_SUCCESS) { goto _error; } @@ -1459,18 +1458,18 @@ static bool overlapWithDelSkyline(STableBlockScanInfo* pBlockScanInfo, const SBl } TSDBKEY* pFirst = taosArrayGet(pBlockScanInfo->delSkyline, 0); - TSDBKEY* pLast = taosArrayGetLast(pBlockScanInfo->delSkyline); + TSDBKEY* pLast = taosArrayGetLast(pBlockScanInfo->delSkyline); // ts is not overlap if (pBlock->minKey.ts > pLast->ts || pBlock->maxKey.ts < pFirst->ts) { return false; } - int32_t step = ASCENDING_TRAVERSE(order)? 1:-1; + int32_t step = ASCENDING_TRAVERSE(order) ? 1 : -1; // version is not overlap size_t num = taosArrayGetSize(pBlockScanInfo->delSkyline); - for(int32_t i = pBlockScanInfo->fileDelIndex; i < num; i += step) { + for (int32_t i = pBlockScanInfo->fileDelIndex; i < num; i += step) { TSDBKEY* p = taosArrayGet(pBlockScanInfo->delSkyline, i); if (p->ts >= pBlock->minKey.ts && p->ts <= pBlock->maxKey.ts) { if (p->version >= pBlock->minVersion) { @@ -1502,8 +1501,8 @@ static bool fileBlockShouldLoad(STsdbReader* pReader, SFileDataBlockInfo* pFBloc } // has duplicated ts of different version in this block - bool hasDup = (pBlock->nSubBlock == 1)? pBlock->hasDup:true; - bool overlapWithDel= overlapWithDelSkyline(pScanInfo, pBlock, pReader->order); + bool hasDup = (pBlock->nSubBlock == 1) ? pBlock->hasDup : true; + bool overlapWithDel = overlapWithDelSkyline(pScanInfo, pBlock, pReader->order); return (overlapWithNeighbor || hasDup || dataBlockPartiallyRequired(&pReader->window, &pReader->verRange, pBlock) || keyOverlapFileBlock(key, pBlock, &pReader->verRange) || (pBlock->nRow > pReader->capacity) || overlapWithDel); @@ -2220,17 +2219,18 @@ static STsdb* getTsdbByRetentions(SVnode* pVnode, TSKEY winSKey, SRetention* ret } SVersionRange getQueryVerRange(SVnode* pVnode, SQueryTableDataCond* pCond, int8_t level) { - int64_t startVer = (pCond->startVersion == -1)? 0:pCond->startVersion; + int64_t startVer = (pCond->startVersion == -1) ? 0 : pCond->startVersion; if (VND_IS_RSMA(pVnode)) { return (SVersionRange){.minVer = startVer, .maxVer = tdRSmaGetMaxSubmitVer(pVnode->pSma, level)}; } int64_t endVer = 0; - if (pCond->endVersion == -1) { // user not specified end version, set current maximum version of vnode as the endVersion + if (pCond->endVersion == + -1) { // user not specified end version, set current maximum version of vnode as the endVersion endVer = pVnode->state.applied; } else { - endVer = (pCond->endVersion > pVnode->state.applied)? pVnode->state.applied:pCond->endVersion; + endVer = (pCond->endVersion > pVnode->state.applied) ? pVnode->state.applied : pCond->endVersion; } return (SVersionRange){.minVer = startVer, .maxVer = endVer}; @@ -2274,9 +2274,9 @@ bool hasBeenDropped(const SArray* pDelList, int32_t* index, TSDBKEY* pKey, int32 if (pDelList == NULL) { return false; } - size_t num = taosArrayGetSize(pDelList); - bool asc = ASCENDING_TRAVERSE(order); - int32_t step = asc? 1:-1; + size_t num = taosArrayGetSize(pDelList); + bool asc = ASCENDING_TRAVERSE(order); + int32_t step = asc ? 1 : -1; if (asc) { if (*index >= num - 1) { @@ -2437,6 +2437,7 @@ static int32_t doMergeRowsInFileBlockImpl(SBlockData* pBlockData, int32_t rowInd SVersionRange* pVerRange, int32_t step) { while (pBlockData->aTSKEY[rowIndex] == key && rowIndex < pBlockData->nRow && rowIndex >= 0) { if (pBlockData->aVersion[rowIndex] > pVerRange->maxVer || pBlockData->aVersion[rowIndex] < pVerRange->minVer) { + rowIndex += step; continue; } @@ -2823,7 +2824,7 @@ void tsdbReaderClose(STsdbReader* pReader) { taosMemoryFree(pSupInfo->colIds); taosArrayDestroy(pSupInfo->pColAgg); - for(int32_t i = 0; i < blockDataGetNumOfCols(pReader->pResBlock); ++i) { + for (int32_t i = 0; i < blockDataGetNumOfCols(pReader->pResBlock); ++i) { if (pSupInfo->buildBuf[i] != NULL) { taosMemoryFreeClear(pSupInfo->buildBuf[i]); } @@ -2835,6 +2836,9 @@ void tsdbReaderClose(STsdbReader* pReader) { destroyBlockScanInfo(pReader->status.pTableMap); blockDataDestroy(pReader->pResBlock); + if (pReader->pFileReader != NULL) { + tsdbDataFReaderClose(&pReader->pFileReader); + } #if 0 // if (pReader->status.pTableScanInfo != NULL) { @@ -3011,8 +3015,8 @@ int32_t tsdbReaderReset(STsdbReader* pReader, SQueryTableDataCond* pCond) { return TSDB_CODE_SUCCESS; } - pReader->order = pCond->order; - pReader->type = BLOCK_LOAD_OFFSET_ORDER; + pReader->order = pCond->order; + pReader->type = BLOCK_LOAD_OFFSET_ORDER; pReader->status.loadFromFile = true; pReader->status.pTableIter = NULL; @@ -3023,11 +3027,14 @@ int32_t tsdbReaderReset(STsdbReader* pReader, SQueryTableDataCond* pCond) { memset(pReader->suppInfo.plist, 0, POINTER_BYTES); pReader->suppInfo.tsColAgg.colId = PRIMARYKEY_TIMESTAMP_COL_ID; + tsdbDataFReaderClose(&pReader->pFileReader); // todo set the correct numOfTables int32_t numOfTables = 1; SDataBlockIter* pBlockIter = &pReader->status.blockIter; + tsdbDataFReaderClose(&pReader->pFileReader); + STsdbFSState* pFState = pReader->pTsdb->fs->cState; initFilesetIterator(&pReader->status.fileIter, pFState, pReader->order, pReader->idStr); resetDataBlockIterator(&pReader->status.blockIter, pReader->order); @@ -3114,13 +3121,12 @@ int32_t tsdbGetFileBlocksDistInfo(STsdbReader* pReader, STableBlockDistInfo* pTa pTableBlockInfo->numOfBlocks += pBlockIter->numOfBlocks; } -/* - hasNext = blockIteratorNext(&pStatus->blockIter); -*/ + /* + hasNext = blockIteratorNext(&pStatus->blockIter); + */ - -// tsdbDebug("%p %d blocks found in file for %d table(s), fid:%d, %s", pReader, numOfBlocks, numOfTables, -// pReader->pFileGroup->fid, pReader->idStr); + // tsdbDebug("%p %d blocks found in file for %d table(s), fid:%d, %s", pReader, numOfBlocks, numOfTables, + // pReader->pFileGroup->fid, pReader->idStr); } return code; @@ -3158,7 +3164,7 @@ int64_t tsdbGetNumOfRowsInMemTable(STsdbReader* pReader) { return rows; } -int32_t tsdbGetTableSchema(SVnode* pVnode, int64_t uid, STSchema** pSchema, int64_t *suid) { +int32_t tsdbGetTableSchema(SVnode* pVnode, int64_t uid, STSchema** pSchema, int64_t* suid) { int32_t sversion = 1; SMetaReader mr = {0}; @@ -3171,7 +3177,7 @@ int32_t tsdbGetTableSchema(SVnode* pVnode, int64_t uid, STSchema** pSchema, int6 } *suid = 0; - + if (mr.me.type == TSDB_CHILD_TABLE) { *suid = mr.me.ctbEntry.suid; code = metaGetTableEntryByUid(&mr, *suid); @@ -3188,8 +3194,7 @@ int32_t tsdbGetTableSchema(SVnode* pVnode, int64_t uid, STSchema** pSchema, int6 metaReaderClear(&mr); *pSchema = metaGetTbTSchema(pVnode->pMeta, uid, sversion); - + return TSDB_CODE_SUCCESS; } - diff --git a/source/libs/catalog/inc/catalogInt.h b/source/libs/catalog/inc/catalogInt.h index dce7adfea9..fb9f588bae 100644 --- a/source/libs/catalog/inc/catalogInt.h +++ b/source/libs/catalog/inc/catalogInt.h @@ -642,6 +642,7 @@ void ctgFreeSTableIndex(void *info); void ctgClearSubTaskRes(SCtgSubRes *pRes); void ctgFreeQNode(SCtgQNode *node); void ctgClearHandle(SCatalog* pCtg); +void ctgFreeTbCacheImpl(SCtgTbCache *pCache); extern SCatalogMgmt gCtgMgmt; diff --git a/source/libs/catalog/src/ctgCache.c b/source/libs/catalog/src/ctgCache.c index 77c1e5b8b1..499ce77276 100644 --- a/source/libs/catalog/src/ctgCache.c +++ b/source/libs/catalog/src/ctgCache.c @@ -647,6 +647,8 @@ int32_t ctgEnqueue(SCatalog* pCtg, SCtgCacheOperation *operation) { CTG_RET(TSDB_CODE_OUT_OF_MEMORY); } + bool syncOp = operation->syncOp; + char* opName = gCtgCacheOperation[operation->opId].name; if (operation->syncOp) { tsem_init(&operation->rspSem, 0, 0); } @@ -664,14 +666,14 @@ int32_t ctgEnqueue(SCatalog* pCtg, SCtgCacheOperation *operation) { gCtgMgmt.queue.tail = node; CTG_UNLOCK(CTG_WRITE, &gCtgMgmt.queue.qlock); + ctgDebug("action [%s] added into queue", opName); + CTG_QUEUE_INC(); CTG_RT_STAT_INC(numOfOpEnqueue, 1); tsem_post(&gCtgMgmt.queue.reqSem); - ctgDebug("action [%s] added into queue", gCtgCacheOperation[operation->opId].name); - - if (operation->syncOp) { + if (syncOp) { tsem_wait(&operation->rspSem); taosMemoryFree(operation); } @@ -840,6 +842,7 @@ _return: ctgFreeVgInfo(dbInfo); taosMemoryFreeClear(op->data); + taosMemoryFreeClear(op); CTG_RET(code); } @@ -852,7 +855,7 @@ int32_t ctgUpdateTbMetaEnqueue(SCatalog* pCtg, STableMetaOutput *output, bool sy SCtgUpdateTbMetaMsg *msg = taosMemoryMalloc(sizeof(SCtgUpdateTbMetaMsg)); if (NULL == msg) { ctgError("malloc %d failed", (int32_t)sizeof(SCtgUpdateTbMetaMsg)); - CTG_ERR_RET(TSDB_CODE_OUT_OF_MEMORY); + CTG_ERR_JRET(TSDB_CODE_OUT_OF_MEMORY); } char *p = strchr(output->dbFName, '.'); @@ -871,6 +874,11 @@ int32_t ctgUpdateTbMetaEnqueue(SCatalog* pCtg, STableMetaOutput *output, bool sy _return: + if (output) { + taosMemoryFree(output->tbMeta); + taosMemoryFree(output); + } + taosMemoryFreeClear(msg); CTG_RET(code); @@ -1753,6 +1761,16 @@ int32_t ctgOpDropStbMeta(SCtgCacheOperation *operation) { CTG_CACHE_STAT_DEC(numOfStb, 1); } + SCtgTbCache* pTbCache = taosHashGet(dbCache->tbCache, msg->stbName, strlen(msg->stbName)); + if (NULL == pTbCache) { + ctgDebug("stb %s already not in cache", msg->stbName); + goto _return; + } + + CTG_LOCK(CTG_WRITE, &pTbCache->metaLock); + ctgFreeTbCacheImpl(pTbCache); + CTG_UNLOCK(CTG_WRITE, &pTbCache->metaLock); + if (taosHashRemove(dbCache->tbCache, msg->stbName, strlen(msg->stbName))) { ctgError("stb not exist in cache, dbFName:%s, stb:%s, suid:0x%"PRIx64, msg->dbFName, msg->stbName, msg->suid); } else { @@ -1780,14 +1798,24 @@ int32_t ctgOpDropTbMeta(SCtgCacheOperation *operation) { SCtgDBCache *dbCache = NULL; ctgGetDBCache(pCtg, msg->dbFName, &dbCache); if (NULL == dbCache) { - return TSDB_CODE_SUCCESS; + goto _return; } if (dbCache->dbId != msg->dbId) { ctgDebug("dbId 0x%" PRIx64 " not match with curId 0x%"PRIx64", dbFName:%s, tbName:%s", msg->dbId, dbCache->dbId, msg->dbFName, msg->tbName); - return TSDB_CODE_SUCCESS; + goto _return; } + SCtgTbCache* pTbCache = taosHashGet(dbCache->tbCache, msg->tbName, strlen(msg->tbName)); + if (NULL == pTbCache) { + ctgDebug("tb %s already not in cache", msg->tbName); + goto _return; + } + + CTG_LOCK(CTG_WRITE, &pTbCache->metaLock); + ctgFreeTbCacheImpl(pTbCache); + CTG_UNLOCK(CTG_WRITE, &pTbCache->metaLock); + if (taosHashRemove(dbCache->tbCache, msg->tbName, strlen(msg->tbName))) { ctgError("tb %s not exist in cache, dbFName:%s", msg->tbName, msg->dbFName); CTG_ERR_JRET(TSDB_CODE_CTG_INTERNAL_ERROR); @@ -2063,6 +2091,8 @@ void* ctgUpdateThreadFunc(void* param) { if (operation->syncOp) { tsem_post(&operation->rspSem); + } else { + taosMemoryFreeClear(operation); } CTG_RT_STAT_INC(numOfOpDequeue, 1); diff --git a/source/libs/catalog/src/ctgRemote.c b/source/libs/catalog/src/ctgRemote.c index 59ad009527..8e0a5b7de3 100644 --- a/source/libs/catalog/src/ctgRemote.c +++ b/source/libs/catalog/src/ctgRemote.c @@ -261,6 +261,8 @@ int32_t ctgHandleMsgCallback(void *param, SDataBuf *pMsg, int32_t rspCode) { _return: + taosMemoryFree(pMsg->pData); + if (pJob) { taosReleaseRef(gCtgMgmt.jobPool, cbParam->refId); } diff --git a/source/libs/catalog/src/ctgUtil.c b/source/libs/catalog/src/ctgUtil.c index ad73fe40d2..a996a70973 100644 --- a/source/libs/catalog/src/ctgUtil.c +++ b/source/libs/catalog/src/ctgUtil.c @@ -152,6 +152,7 @@ void ctgFreeStbMetaCache(SCtgDBCache *dbCache) { } void ctgFreeTbCacheImpl(SCtgTbCache *pCache) { + qDebug("tbMeta freed, p:%p", pCache->pMeta); taosMemoryFreeClear(pCache->pMeta); if (pCache->pIndex) { taosArrayDestroyEx(pCache->pIndex->pIndex, tFreeSTableIndexInfo); @@ -831,6 +832,7 @@ int32_t ctgCloneMetaOutput(STableMetaOutput *output, STableMetaOutput **pOutput) if (output->tbMeta) { int32_t metaSize = CTG_META_SIZE(output->tbMeta); (*pOutput)->tbMeta = taosMemoryMalloc(metaSize); + qDebug("tbMeta cloned, size:%d, p:%p", metaSize, (*pOutput)->tbMeta); if (NULL == (*pOutput)->tbMeta) { qError("malloc %d failed", (int32_t)sizeof(STableMetaOutput)); taosMemoryFreeClear(*pOutput); diff --git a/source/libs/executor/inc/executorimpl.h b/source/libs/executor/inc/executorimpl.h index 69ba88916a..aab2f51421 100644 --- a/source/libs/executor/inc/executorimpl.h +++ b/source/libs/executor/inc/executorimpl.h @@ -389,6 +389,7 @@ typedef struct SStreamScanInfo { SSDataBlock* pPullDataRes; // pull data SSDataBlock SSDataBlock* pDeleteDataRes; // delete data SSDataBlock int32_t deleteDataIndex; + STimeWindow updateWin; // status for tmq // SSchemaWrapper schema; diff --git a/source/libs/executor/src/cachescanoperator.c b/source/libs/executor/src/cachescanoperator.c index b6ad3b6cc0..7b1351a024 100644 --- a/source/libs/executor/src/cachescanoperator.c +++ b/source/libs/executor/src/cachescanoperator.c @@ -46,7 +46,7 @@ SOperatorInfo* createLastrowScanOperator(SLastRowScanPhysiNode* pScanNode, SRead pInfo->pColMatchInfo = extractColMatchInfo(pScanNode->pScanCols, pScanNode->node.pOutputDataBlockDesc, &numOfCols, COL_MATCH_FROM_COL_ID); int32_t* pCols = taosMemoryMalloc(numOfCols * sizeof(int32_t)); - for (int32_t i = 0; i < numOfCols; ++i) { + for (int32_t i = 0; i < taosArrayGetSize(pInfo->pColMatchInfo); ++i) { SColMatchInfo* pColMatch = taosArrayGet(pInfo->pColMatchInfo, i); pCols[i] = pColMatch->colId; } @@ -56,7 +56,7 @@ SOperatorInfo* createLastrowScanOperator(SLastRowScanPhysiNode* pScanNode, SRead goto _error; } - tsdbLastRowReaderOpen(readHandle->vnode, LASTROW_RETRIEVE_TYPE_ALL, pTableList, pCols, numOfCols, + tsdbLastRowReaderOpen(readHandle->vnode, LASTROW_RETRIEVE_TYPE_ALL, pTableList, taosArrayGetSize(pInfo->pColMatchInfo), &pInfo->pLastrowReader); taosMemoryFree(pCols); diff --git a/source/libs/executor/src/executil.c b/source/libs/executor/src/executil.c index 2da8811e5e..2469062d09 100644 --- a/source/libs/executor/src/executil.c +++ b/source/libs/executor/src/executil.c @@ -191,6 +191,7 @@ SSDataBlock* createResDataBlock(SDataBlockDescNode* pNode) { pBlock->info.blockId = pNode->dataBlockId; pBlock->info.type = STREAM_INVALID; + pBlock->info.calWin = (STimeWindow){.skey = INT64_MIN, .ekey = INT64_MAX}; for (int32_t i = 0; i < numOfCols; ++i) { SSlotDescNode* pDescNode = (SSlotDescNode*)nodesListGetNode(pNode->pSlots, i); diff --git a/source/libs/executor/src/executor.c b/source/libs/executor/src/executor.c index 9f6b12c13a..6e4f02527f 100644 --- a/source/libs/executor/src/executor.c +++ b/source/libs/executor/src/executor.c @@ -106,6 +106,30 @@ int32_t qSetMultiStreamInput(qTaskInfo_t tinfo, const void* pBlocks, size_t numO return code; } +qTaskInfo_t qCreateQueueExecTaskInfo(void* msg, SReadHandle* readers) { + if (msg == NULL) { + // TODO create raw scan + return NULL; + } + + struct SSubplan* plan = NULL; + int32_t code = qStringToSubplan(msg, &plan); + if (code != TSDB_CODE_SUCCESS) { + terrno = code; + return NULL; + } + + qTaskInfo_t pTaskInfo = NULL; + code = qCreateExecTask(readers, 0, 0, plan, &pTaskInfo, NULL, NULL, OPTR_EXEC_MODEL_QUEUE); + if (code != TSDB_CODE_SUCCESS) { + // TODO: destroy SSubplan & pTaskInfo + terrno = code; + return NULL; + } + + return pTaskInfo; +} + qTaskInfo_t qCreateStreamExecTaskInfo(void* msg, SReadHandle* readers) { if (msg == NULL) { return NULL; @@ -186,7 +210,7 @@ int32_t qUpdateQualifiedTableId(qTaskInfo_t tinfo, const SArray* tableIdList, bo } int32_t qGetQueryTableSchemaVersion(qTaskInfo_t tinfo, char* dbName, char* tableName, int32_t* sversion, - int32_t* tversion) { + int32_t* tversion) { ASSERT(tinfo != NULL && dbName != NULL && tableName != NULL); SExecTaskInfo* pTaskInfo = (SExecTaskInfo*)tinfo; diff --git a/source/libs/executor/src/executorMain.c b/source/libs/executor/src/executorMain.c index b30800680b..5d9fea523c 100644 --- a/source/libs/executor/src/executorMain.c +++ b/source/libs/executor/src/executorMain.c @@ -269,13 +269,13 @@ const STqOffset* qExtractStatusFromStreamScanner(void* scanner) { void* qStreamExtractMetaMsg(qTaskInfo_t tinfo) { SExecTaskInfo* pTaskInfo = (SExecTaskInfo*)tinfo; - ASSERT(pTaskInfo->execModel == OPTR_EXEC_MODEL_STREAM); + ASSERT(pTaskInfo->execModel == OPTR_EXEC_MODEL_QUEUE); return pTaskInfo->streamInfo.metaBlk; } int32_t qStreamExtractOffset(qTaskInfo_t tinfo, STqOffsetVal* pOffset) { SExecTaskInfo* pTaskInfo = (SExecTaskInfo*)tinfo; - ASSERT(pTaskInfo->execModel == OPTR_EXEC_MODEL_STREAM); + ASSERT(pTaskInfo->execModel == OPTR_EXEC_MODEL_QUEUE); memcpy(pOffset, &pTaskInfo->streamInfo.lastStatus, sizeof(STqOffsetVal)); return 0; } @@ -283,35 +283,41 @@ int32_t qStreamExtractOffset(qTaskInfo_t tinfo, STqOffsetVal* pOffset) { int32_t qStreamPrepareScan(qTaskInfo_t tinfo, const STqOffsetVal* pOffset) { SExecTaskInfo* pTaskInfo = (SExecTaskInfo*)tinfo; SOperatorInfo* pOperator = pTaskInfo->pRoot; - ASSERT(pTaskInfo->execModel == OPTR_EXEC_MODEL_STREAM); + ASSERT(pTaskInfo->execModel == OPTR_EXEC_MODEL_QUEUE); pTaskInfo->streamInfo.prepareStatus = *pOffset; - // TODO: optimize - /*if (pTaskInfo->streamInfo.lastStatus.type != pOffset->type ||*/ - /*pTaskInfo->streamInfo.prepareStatus.version != pTaskInfo->streamInfo.lastStatus.version) {*/ - while (1) { - uint8_t type = pOperator->operatorType; - pOperator->status = OP_OPENED; - if (type == QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN) { - SStreamScanInfo* pInfo = pOperator->info; - if (pOffset->type == TMQ_OFFSET__LOG) { - if (tqSeekVer(pInfo->tqReader, pOffset->version) < 0) { - return -1; - } - ASSERT(pInfo->tqReader->pWalReader->curVersion == pOffset->version); - } else if (pOffset->type == TMQ_OFFSET__SNAPSHOT_DATA) { - /*pInfo->blockType = STREAM_INPUT__TABLE_SCAN;*/ - int64_t uid = pOffset->uid; - int64_t ts = pOffset->ts; - - if (uid == 0) { - if (taosArrayGetSize(pTaskInfo->tableqinfoList.pTableList) != 0) { - STableKeyInfo* pTableInfo = taosArrayGet(pTaskInfo->tableqinfoList.pTableList, 0); - uid = pTableInfo->uid; - ts = INT64_MIN; + if (!tOffsetEqual(pOffset, &pTaskInfo->streamInfo.lastStatus)) { + while (1) { + uint8_t type = pOperator->operatorType; + pOperator->status = OP_OPENED; + if (type == QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN) { + SStreamScanInfo* pInfo = pOperator->info; + if (pOffset->type == TMQ_OFFSET__LOG) { +#if 0 + if (tOffsetEqual(pOffset, &pTaskInfo->streamInfo.lastStatus) && + pInfo->tqReader->pWalReader->curVersion != pOffset->version) { + qError("prepare scan ver %ld actual ver %ld, last %ld", pOffset->version, + pInfo->tqReader->pWalReader->curVersion, pTaskInfo->streamInfo.lastStatus.version); + ASSERT(0); } - } - if (pTaskInfo->streamInfo.lastStatus.type != TMQ_OFFSET__SNAPSHOT_DATA || - pTaskInfo->streamInfo.lastStatus.uid != uid || pTaskInfo->streamInfo.lastStatus.ts != ts) { +#endif + if (tqSeekVer(pInfo->tqReader, pOffset->version + 1) < 0) { + return -1; + } + ASSERT(pInfo->tqReader->pWalReader->curVersion == pOffset->version + 1); + } else if (pOffset->type == TMQ_OFFSET__SNAPSHOT_DATA) { + /*pInfo->blockType = STREAM_INPUT__TABLE_SCAN;*/ + int64_t uid = pOffset->uid; + int64_t ts = pOffset->ts; + + if (uid == 0) { + if (taosArrayGetSize(pTaskInfo->tableqinfoList.pTableList) != 0) { + STableKeyInfo* pTableInfo = taosArrayGet(pTaskInfo->tableqinfoList.pTableList, 0); + uid = pTableInfo->uid; + ts = INT64_MIN; + } + } + /*if (pTaskInfo->streamInfo.lastStatus.type != TMQ_OFFSET__SNAPSHOT_DATA ||*/ + /*pTaskInfo->streamInfo.lastStatus.uid != uid || pTaskInfo->streamInfo.lastStatus.ts != ts) {*/ STableScanInfo* pTableScanInfo = pInfo->pTableScanOp->info; int32_t tableSz = taosArrayGetSize(pTaskInfo->tableqinfoList.pTableList); bool found = false; @@ -320,6 +326,7 @@ int32_t qStreamPrepareScan(qTaskInfo_t tinfo, const STqOffsetVal* pOffset) { if (pTableInfo->uid == uid) { found = true; pTableScanInfo->currentTable = i; + break; } } @@ -335,18 +342,18 @@ int32_t qStreamPrepareScan(qTaskInfo_t tinfo, const STqOffsetVal* pOffset) { qDebug("tsdb reader offset seek to uid %ld ts %ld, table cur set to %d , all table num %d", uid, ts, pTableScanInfo->currentTable, tableSz); - } + /*}*/ + } else { + ASSERT(0); + } + return 0; } else { - ASSERT(0); + ASSERT(pOperator->numOfDownstream == 1); + pOperator = pOperator->pDownstream[0]; } - return 0; - } else { - ASSERT(pOperator->numOfDownstream == 1); - pOperator = pOperator->pDownstream[0]; } } - /*}*/ return 0; } diff --git a/source/libs/executor/src/executorimpl.c b/source/libs/executor/src/executorimpl.c index 2ae09e0434..6f4a6b805d 100644 --- a/source/libs/executor/src/executorimpl.c +++ b/source/libs/executor/src/executorimpl.c @@ -594,10 +594,14 @@ int32_t projectApplyFunctions(SExprInfo* pExpr, SSDataBlock* pResult, SSDataBloc SColumnInfoData* pColInfoData = taosArrayGet(pResult->pDataBlock, outputSlotId); int32_t offset = createNewColModel ? 0 : pResult->info.rows; - for (int32_t i = 0; i < pSrcBlock->info.rows; ++i) { - colDataAppend(pColInfoData, i + offset, - taosVariantGet(&pExpr[k].base.pParam[0].param, pExpr[k].base.pParam[0].param.nType), - TSDB_DATA_TYPE_NULL == pExpr[k].base.pParam[0].param.nType); + + int32_t type = pExpr[k].base.pParam[0].param.nType; + if (TSDB_DATA_TYPE_NULL == type) { + colDataAppendNNULL(pColInfoData, offset, pSrcBlock->info.rows); + } else { + for (int32_t i = 0; i < pSrcBlock->info.rows; ++i) { + colDataAppend(pColInfoData, i + offset, taosVariantGet(&pExpr[k].base.pParam[0].param, type), false); + } } numOfRows = pSrcBlock->info.rows; @@ -3235,6 +3239,10 @@ static SSDataBlock* doProjectOperation(SOperatorInfo* pOperator) { SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo; if (pOperator->status == OP_EXEC_DONE) { + if (pTaskInfo->execModel == OPTR_EXEC_MODEL_QUEUE) { + pOperator->status = OP_OPENED; + return NULL; + } return NULL; } @@ -3268,11 +3276,15 @@ static SSDataBlock* doProjectOperation(SOperatorInfo* pOperator) { while (1) { // The downstream exec may change the value of the newgroup, so use a local variable instead. + qDebug("projection call next"); SSDataBlock* pBlock = downstream->fpSet.getNextFn(downstream); if (pBlock == NULL) { - // TODO optimize - /*if (pTaskInfo->execModel != OPTR_EXEC_MODEL_STREAM) {*/ + qDebug("projection get null"); + + /*if (pTaskInfo->execModel == OPTR_EXEC_MODEL_BATCH) {*/ doSetOperatorCompleted(pOperator); + /*} else if (pTaskInfo->execModel == OPTR_EXEC_MODEL_QUEUE) {*/ + /*pOperator->status = OP_RES_TO_RETURN;*/ /*}*/ break; } diff --git a/source/libs/executor/src/scanoperator.c b/source/libs/executor/src/scanoperator.c index f3c240b6f2..64c740decf 100644 --- a/source/libs/executor/src/scanoperator.c +++ b/source/libs/executor/src/scanoperator.c @@ -884,6 +884,28 @@ static bool prepareRangeScan(SStreamScanInfo* pInfo, SSDataBlock* pBlock, int32_ return true; } +static STimeWindow getSlidingWindow(TSKEY* tsCol, SInterval* pInterval, SDataBlockInfo* pDataBlockInfo, int32_t* pRowIndex) { + SResultRowInfo dumyInfo; + dumyInfo.cur.pageId = -1; + STimeWindow win = getActiveTimeWindow(NULL, &dumyInfo, tsCol[*pRowIndex], pInterval, + TSDB_ORDER_ASC); + STimeWindow endWin = win; + STimeWindow preWin = win; + while (1) { + (*pRowIndex) += getNumOfRowsInTimeWindow(pDataBlockInfo, tsCol, *pRowIndex, endWin.ekey, + binarySearchForKey, NULL, TSDB_ORDER_ASC); + do { + preWin = endWin; + getNextTimeWindow(pInterval, &endWin, TSDB_ORDER_ASC); + } while (tsCol[(*pRowIndex) - 1] >= endWin.skey); + endWin = preWin; + if (win.ekey == endWin.ekey || (*pRowIndex) == pDataBlockInfo->rows ) { + win.ekey = endWin.ekey; + return win; + } + win.ekey = endWin.ekey; + } +} static bool prepareDataScan(SStreamScanInfo* pInfo, SSDataBlock* pSDB, int32_t tsColIndex, int32_t* pRowIndex) { STimeWindow win = { .skey = INT64_MIN, @@ -905,10 +927,13 @@ static bool prepareDataScan(SStreamScanInfo* pInfo, SSDataBlock* pSDB, int32_t t setGroupId(pInfo, pSDB, GROUPID_COLUMN_INDEX, *pRowIndex); (*pRowIndex) += updateSessionWindowInfo(pCurWin, tsCols, NULL, pSDB->info.rows, *pRowIndex, gap, NULL); } else { - win = getActiveTimeWindow(NULL, &dumyInfo, tsCols[*pRowIndex], &pInfo->interval, TSDB_ORDER_ASC); setGroupId(pInfo, pSDB, GROUPID_COLUMN_INDEX, *pRowIndex); - (*pRowIndex) += - getNumOfRowsInTimeWindow(&pSDB->info, tsCols, *pRowIndex, win.ekey, binarySearchForKey, NULL, TSDB_ORDER_ASC); + pInfo->updateWin.skey = tsCols[*pRowIndex]; + win = getSlidingWindow(tsCols, &pInfo->interval, &pSDB->info, pRowIndex); + pInfo->updateWin.ekey = tsCols[*pRowIndex - 1]; + // win = getActiveTimeWindow(NULL, &dumyInfo, tsCols[*pRowIndex], &pInfo->interval, TSDB_ORDER_ASC); + // (*pRowIndex) += + // getNumOfRowsInTimeWindow(&pSDB->info, tsCols, *pRowIndex, win.ekey, binarySearchForKey, NULL, TSDB_ORDER_ASC); } needRead = true; } else if (isStateWindow(pInfo)) { @@ -974,10 +999,12 @@ static SSDataBlock* doDataScan(SStreamScanInfo* pInfo, SSDataBlock* pSDB, int32_ } } if (!pResult) { + pInfo->updateWin = (STimeWindow){.skey = INT64_MIN, .ekey = INT64_MAX}; return NULL; } if (pResult->info.groupId == pInfo->groupId) { + pResult->info.calWin = pInfo->updateWin; return pResult; } } @@ -1209,6 +1236,7 @@ static SSDataBlock* doStreamScan(SOperatorInfo* pOperator) { /*return NULL;*/ /*}*/ + qDebug("stream scan called"); if (pTaskInfo->streamInfo.prepareStatus.type == TMQ_OFFSET__LOG) { while (1) { SFetchRet ret = {0}; @@ -1220,6 +1248,7 @@ static SSDataBlock* doStreamScan(SOperatorInfo* pOperator) { } // TODO clean data block if (pInfo->pRes->info.rows > 0) { + qDebug("stream scan log return %d rows", pInfo->pRes->info.rows); return pInfo->pRes; } } else if (ret.fetchType == FETCH_TYPE__META) { @@ -1230,6 +1259,7 @@ static SSDataBlock* doStreamScan(SOperatorInfo* pOperator) { } else if (ret.fetchType == FETCH_TYPE__NONE) { pTaskInfo->streamInfo.lastStatus = ret.offset; ASSERT(pTaskInfo->streamInfo.lastStatus.version + 1 >= pTaskInfo->streamInfo.prepareStatus.version); + qDebug("stream scan log return null"); return NULL; } else { ASSERT(0); @@ -1237,7 +1267,12 @@ static SSDataBlock* doStreamScan(SOperatorInfo* pOperator) { } } else if (pTaskInfo->streamInfo.prepareStatus.type == TMQ_OFFSET__SNAPSHOT_DATA) { SSDataBlock* pResult = doTableScan(pInfo->pTableScanOp); - return pResult && pResult->info.rows > 0 ? pResult : NULL; + if (pResult && pResult->info.rows > 0) { + qDebug("stream scan tsdb return %d rows", pResult->info.rows); + return pResult; + } + qDebug("stream scan tsdb return null"); + return NULL; } else if (pTaskInfo->streamInfo.prepareStatus.type == TMQ_OFFSET__SNAPSHOT_META) { // TODO scan meta ASSERT(0); @@ -1256,8 +1291,13 @@ static SSDataBlock* doStreamScan(SOperatorInfo* pOperator) { int32_t current = pInfo->validBlockIndex++; SSDataBlock* pBlock = taosArrayGetP(pInfo->pBlockLists, current); // TODO move into scan + pBlock->info.calWin.skey = INT64_MIN; + pBlock->info.calWin.ekey = INT64_MAX; blockDataUpdateTsWindow(pBlock, 0); switch (pBlock->info.type) { + case STREAM_NORMAL: + case STREAM_GET_ALL: + return pBlock; case STREAM_RETRIEVE: { pInfo->blockType = STREAM_INPUT__DATA_SUBMIT; pInfo->scanMode = STREAM_SCAN_FROM_DATAREADER_RETRIEVE; @@ -1287,6 +1327,7 @@ static SSDataBlock* doStreamScan(SOperatorInfo* pOperator) { } return pBlock; } else if (pInfo->blockType == STREAM_INPUT__DATA_SUBMIT) { + qDebug("scan mode %d", pInfo->scanMode); if (pInfo->scanMode == STREAM_SCAN_FROM_RES) { blockDataDestroy(pInfo->pUpdateRes); pInfo->scanMode = STREAM_SCAN_FROM_READERHANDLE; @@ -1381,7 +1422,7 @@ static SSDataBlock* doStreamScan(SOperatorInfo* pOperator) { } } } - + qDebug("scan rows: %d", pBlockInfo->rows); return (pBlockInfo->rows == 0) ? NULL : pInfo->pRes; #if 0 @@ -1533,6 +1574,7 @@ SOperatorInfo* createStreamScanOperatorInfo(SReadHandle* pHandle, STableScanPhys pInfo->pStreamScanOp = pOperator; pInfo->deleteDataIndex = 0; pInfo->pDeleteDataRes = createPullDataBlock(); + pInfo->updateWin = (STimeWindow){.skey = INT64_MAX, .ekey = INT64_MAX}; pOperator->name = "StreamScanOperator"; pOperator->operatorType = QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN; diff --git a/source/libs/executor/src/timewindowoperator.c b/source/libs/executor/src/timewindowoperator.c index 78775073a4..773484a9b3 100644 --- a/source/libs/executor/src/timewindowoperator.c +++ b/source/libs/executor/src/timewindowoperator.c @@ -419,6 +419,14 @@ static bool setTimeWindowInterpolationEndTs(SIntervalAggOperatorInfo* pInfo, SEx return true; } +bool inSlidingWindow(SInterval* pInterval, STimeWindow* pWin, SDataBlockInfo* pBlockInfo) { + if (pInterval->interval != pInterval->sliding && + (pWin->ekey < pBlockInfo->calWin.skey || pWin->skey > pBlockInfo->calWin.ekey)) { + return false; + } + return true; +} + static int32_t getNextQualifiedWindow(SInterval* pInterval, STimeWindow* pNext, SDataBlockInfo* pDataBlockInfo, TSKEY* primaryKeys, int32_t prevPosition, int32_t order) { bool ascQuery = (order == TSDB_ORDER_ASC); @@ -432,6 +440,10 @@ static int32_t getNextQualifiedWindow(SInterval* pInterval, STimeWindow* pNext, return -1; } + if (!inSlidingWindow(pInterval, pNext, pDataBlockInfo) && order == TSDB_ORDER_ASC) { + return -1; + } + TSKEY skey = ascQuery ? pNext->skey : pNext->ekey; int32_t startPos = 0; @@ -801,7 +813,8 @@ static void hashIntervalAgg(SOperatorInfo* pOperatorInfo, SResultRowInfo* pResul STimeWindow win = getActiveTimeWindow(pInfo->aggSup.pResultBuf, pResultRowInfo, ts, &pInfo->interval, pInfo->order); int32_t ret = TSDB_CODE_SUCCESS; - if (!pInfo->ignoreExpiredData || !isCloseWindow(&win, &pInfo->twAggSup)) { + if ((!pInfo->ignoreExpiredData || !isCloseWindow(&win, &pInfo->twAggSup)) && + inSlidingWindow(&pInfo->interval, &win, &pBlock->info)) { ret = setTimeWindowOutputBuf(pResultRowInfo, &win, (scanFlag == MAIN_SCAN), &pResult, tableGroupId, pSup->pCtx, numOfOutput, pSup->rowEntryInfoOffset, &pInfo->aggSup, pTaskInfo); if (ret != TSDB_CODE_SUCCESS || pResult == NULL) { @@ -834,7 +847,8 @@ static void hashIntervalAgg(SOperatorInfo* pOperatorInfo, SResultRowInfo* pResul doWindowBorderInterpolation(pInfo, pBlock, pResult, &win, startPos, forwardRows, pSup); } - if (!pInfo->ignoreExpiredData || !isCloseWindow(&win, &pInfo->twAggSup)) { + if ((!pInfo->ignoreExpiredData || !isCloseWindow(&win, &pInfo->twAggSup)) && + inSlidingWindow(&pInfo->interval, &win, &pBlock->info)) { updateTimeWindowInfo(&pInfo->twAggSup.timeWindowData, &win, true); doApplyFunctions(pTaskInfo, pSup->pCtx, &win, &pInfo->twAggSup.timeWindowData, startPos, forwardRows, tsCols, pBlock->info.rows, numOfOutput, pInfo->order); @@ -1278,18 +1292,25 @@ static void doClearWindows(SAggSupporter* pAggSup, SExprSupp* pSup1, SInterval* SColumnInfoData* pGpCol = taosArrayGet(pBlock->pDataBlock, GROUPID_COLUMN_INDEX); pGpDatas = (uint64_t*)pGpCol->pData; } - int32_t step = 0; - for (int32_t i = 0; i < pBlock->info.rows; i += step) { - SResultRowInfo dumyInfo; - dumyInfo.cur.pageId = -1; - STimeWindow win = getActiveTimeWindow(NULL, &dumyInfo, tsCols[i], pInterval, TSDB_ORDER_ASC); - step = getNumOfRowsInTimeWindow(&pBlock->info, tsCols, i, win.ekey, binarySearchForKey, NULL, TSDB_ORDER_ASC); - uint64_t winGpId = pGpDatas ? pGpDatas[i] : pBlock->info.groupId; - bool res = doClearWindow(pAggSup, pSup1, (char*)&win.skey, sizeof(TKEY), winGpId, numOfOutput); + int32_t step = 0; + int32_t startPos = 0; + SResultRowInfo dumyInfo; + dumyInfo.cur.pageId = -1; + STimeWindow win = getActiveTimeWindow(NULL, &dumyInfo, tsCols[0], pInterval, TSDB_ORDER_ASC); + while (1) { + step = + getNumOfRowsInTimeWindow(&pBlock->info, tsCols, startPos, win.ekey, binarySearchForKey, NULL, TSDB_ORDER_ASC); + uint64_t winGpId = pGpDatas ? pGpDatas[startPos] : pBlock->info.groupId; + bool res = doClearWindow(pAggSup, pSup1, (char*)&win.skey, sizeof(TSKEY), winGpId, numOfOutput); if (pUpWins && res) { SWinRes winRes = {.ts = win.skey, .groupId = winGpId}; taosArrayPush(pUpWins, &winRes); } + int32_t prevEndPos = step - 1 + startPos; + startPos = getNextQualifiedWindow(pInterval, &win, &pBlock->info, tsCols, prevEndPos, TSDB_ORDER_ASC); + if (startPos < 0) { + break; + } } } @@ -1332,13 +1353,13 @@ static int32_t closeIntervalWindow(SHashObj* pHashMap, STimeWindowAggSupp* pSup, if (chIds && pPullDataMap) { SArray* chAy = *(SArray**)chIds; int32_t size = taosArrayGetSize(chAy); - qInfo("window %" PRId64 " wait child size:%d", win.skey, size); + qDebug("window %" PRId64 " wait child size:%d", win.skey, size); for (int32_t i = 0; i < size; i++) { - qInfo("window %" PRId64 " wait chid id:%d", win.skey, *(int32_t*)taosArrayGet(chAy, i)); + qDebug("window %" PRId64 " wait chid id:%d", win.skey, *(int32_t*)taosArrayGet(chAy, i)); } continue; } else if (pPullDataMap) { - qInfo("close window %" PRId64, win.skey); + qDebug("close window %" PRId64, win.skey); } SResultRowPosition* pPos = (SResultRowPosition*)pIte; if (pSup->calTrigger == STREAM_TRIGGER_WINDOW_CLOSE) { @@ -2434,7 +2455,7 @@ static void doHashInterval(SOperatorInfo* pOperatorInfo, SSDataBlock* pSDataBloc } while (1) { bool isClosed = isCloseWindow(&nextWin, &pInfo->twAggSup); - if (pInfo->ignoreExpiredData && isClosed) { + if ((pInfo->ignoreExpiredData && isClosed) || !inSlidingWindow(&pInfo->interval, &nextWin, &pSDataBlock->info)) { startPos = getNexWindowPos(&pInfo->interval, &pSDataBlock->info, tsCols, startPos, nextWin.ekey, &nextWin); if (startPos < 0) { break; @@ -2491,8 +2512,8 @@ static void doHashInterval(SOperatorInfo* pOperatorInfo, SSDataBlock* pSDataBloc if (IS_FINAL_OP(pInfo)) { forwardRows = 1; } else { - forwardRows = getNumOfRowsInTimeWindow(&pSDataBlock->info, tsCols, startPos, nextWin.ekey, binarySearchForKey, NULL, - TSDB_ORDER_ASC); + forwardRows = getNumOfRowsInTimeWindow(&pSDataBlock->info, tsCols, startPos, nextWin.ekey, binarySearchForKey, + NULL, TSDB_ORDER_ASC); } if (pInfo->twAggSup.calTrigger == STREAM_TRIGGER_AT_ONCE && pUpdated) { saveResultRow(pResult, tableGroupId, pUpdated); @@ -2609,6 +2630,8 @@ static SSDataBlock* doStreamFinalIntervalAgg(SOperatorInfo* pOperator) { SExprSupp* pSup = &pOperator->exprSupp; + qDebug("interval status %d %s", pOperator->status, IS_FINAL_OP(pInfo) ? "interval Final" : "interval Semi"); + if (pOperator->status == OP_EXEC_DONE) { return NULL; } else if (pOperator->status == OP_RES_TO_RETURN) { @@ -2659,7 +2682,7 @@ static SSDataBlock* doStreamFinalIntervalAgg(SOperatorInfo* pOperator) { clearSpecialDataBlock(pInfo->pUpdateRes); removeDeleteResults(pUpdated, pInfo->pDelWins); pOperator->status = OP_RES_TO_RETURN; - qInfo("%s return data", IS_FINAL_OP(pInfo) ? "interval Final" : "interval Semi"); + qDebug("%s return data", IS_FINAL_OP(pInfo) ? "interval Final" : "interval Semi"); break; } printDataBlock(pBlock, IS_FINAL_OP(pInfo) ? "interval Final recv" : "interval Semi recv"); @@ -3101,12 +3124,7 @@ int64_t getSessionWindowEndkey(void* data, int32_t index) { } bool isInTimeWindow(STimeWindow* pWin, TSKEY ts, int64_t gap) { - int64_t sGap = ts - pWin->skey + gap; - int64_t eGap = pWin->ekey - ts + gap; - // if ((sGap < 0 && sGap >= -gap) || (eGap < 0 && eGap >= -gap) || (sGap >= 0 && eGap >= 0)) { - // return true; - // } - if (sGap >= 0 && eGap >= 0) { + if (ts + gap >= pWin->skey && ts - gap <= pWin->ekey) { return true; } return false; diff --git a/source/libs/function/src/builtins.c b/source/libs/function/src/builtins.c index a735edafab..1ed6dcad39 100644 --- a/source/libs/function/src/builtins.c +++ b/source/libs/function/src/builtins.c @@ -1976,7 +1976,7 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { { .name = "leastsquares", .type = FUNCTION_TYPE_LEASTSQUARES, - .classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_FORBID_STREAM_FUNC, + .classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_TIMELINE_FUNC | FUNC_MGT_FORBID_STREAM_FUNC, .translateFunc = translateLeastSQR, .getEnvFunc = getLeastSQRFuncEnv, .initFunc = leastSQRFunctionSetup, @@ -2217,7 +2217,7 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { { .name = "last_row", .type = FUNCTION_TYPE_LAST_ROW, - .classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_MULTI_RES_FUNC | FUNC_MGT_SELECT_FUNC | FUNC_MGT_TIMELINE_FUNC, + .classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_MULTI_RES_FUNC | FUNC_MGT_SELECT_FUNC | FUNC_MGT_TIMELINE_FUNC | FUNC_MGT_IMPLICIT_TS_FUNC, .translateFunc = translateFirstLast, .getEnvFunc = getFirstLastFuncEnv, .initFunc = functionSetup, diff --git a/source/libs/function/src/builtinsimpl.c b/source/libs/function/src/builtinsimpl.c index 194d75a8a0..ccf28bfd78 100644 --- a/source/libs/function/src/builtinsimpl.c +++ b/source/libs/function/src/builtinsimpl.c @@ -80,8 +80,10 @@ typedef struct STopBotRes { } STopBotRes; typedef struct SFirstLastRes { - bool hasResult; - bool isNull; // used for last_row function only + bool hasResult; + // used for last_row function only, isNullRes in SResultRowEntry can not be passed to downstream.So, + // this attribute is required + bool isNull; int32_t bytes; char buf[]; } SFirstLastRes; @@ -1144,9 +1146,9 @@ static void copyTupleData(SqlFunctionCtx* pCtx, int32_t rowIndex, const SSDataBl static int32_t findRowIndex(int32_t start, int32_t num, SColumnInfoData* pCol, const char* tval) { // the data is loaded, not only the block SMA value - for(int32_t i = start; i < num + start; ++i) { + for (int32_t i = start; i < num + start; ++i) { char* p = colDataGetData(pCol, i); - if (memcpy((void*)tval, p, pCol->info.bytes) == 0) { + if (memcpy((void*)tval, p, pCol->info.bytes) == 0) { return i; } } @@ -1154,7 +1156,6 @@ static int32_t findRowIndex(int32_t start, int32_t num, SColumnInfoData* pCol, c ASSERT(0); } - int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) { int32_t numOfElems = 0; @@ -1631,10 +1632,14 @@ void setNullSelectivityValue(SqlFunctionCtx* pCtx, SSDataBlock* pBlock, int32_t } void setSelectivityValue(SqlFunctionCtx* pCtx, SSDataBlock* pBlock, const STuplePos* pTuplePos, int32_t rowIndex) { + if (pCtx->subsidiaries.num <= 0) { + return; + } + int32_t pageId = pTuplePos->pageId; int32_t offset = pTuplePos->offset; - if (pTuplePos->pageId != -1 && pCtx->subsidiaries.num > 0) { + if (pTuplePos->pageId != -1) { int32_t numOfCols = pCtx->subsidiaries.num; SFilePage* pPage = getBufPage(pCtx->pBuf, pageId); @@ -1934,7 +1939,7 @@ int32_t stddevFunctionMerge(SqlFunctionCtx* pCtx) { SStddevRes* pInfo = GET_ROWCELL_INTERBUF(GET_RES_INFO(pCtx)); - for(int32_t i = pInput->startRowIndex; i < pInput->startRowIndex + pInput->numOfRows; ++i) { + for (int32_t i = pInput->startRowIndex; i < pInput->startRowIndex + pInput->numOfRows; ++i) { char* data = colDataGetData(pCol, i); SStddevRes* pInputInfo = (SStddevRes*)varDataVal(data); stddevTransferInfo(pInputInfo, pInfo); @@ -2945,7 +2950,7 @@ int32_t firstLastFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock) { pResInfo->isNullRes = (pResInfo->numOfRes == 0) ? 1 : 0; SFirstLastRes* pRes = GET_ROWCELL_INTERBUF(pResInfo); - colDataAppend(pCol, pBlock->info.rows, pRes->buf, pResInfo->isNullRes); + colDataAppend(pCol, pBlock->info.rows, pRes->buf, pRes->isNull||pResInfo->isNullRes); // handle selectivity STuplePos* pTuplePos = (STuplePos*)(pRes->buf + pRes->bytes + sizeof(TSKEY)); setSelectivityValue(pCtx, pBlock, pTuplePos, pBlock->info.rows); @@ -3508,8 +3513,7 @@ void saveTupleData(SqlFunctionCtx* pCtx, int32_t rowIndex, const SSDataBlock* pS setBufPageDirty(pPage, true); releaseBufPage(pCtx->pBuf, pPage); #ifdef BUF_PAGE_DEBUG - qDebug("page_saveTuple pos:%p,pageId:%d, offset:%d\n", pPos, pPos->pageId, - pPos->offset); + qDebug("page_saveTuple pos:%p,pageId:%d, offset:%d\n", pPos, pPos->pageId, pPos->offset); #endif } @@ -3810,7 +3814,7 @@ bool elapsedFunctionSetup(SqlFunctionCtx* pCtx, SResultRowEntryInfo* pResultInfo SElapsedInfo* pInfo = GET_ROWCELL_INTERBUF(pResultInfo); pInfo->result = 0; - pInfo->min = MAX_TS_KEY; + pInfo->min = TSKEY_MAX; pInfo->max = 0; if (pCtx->numOfParams > 1) { @@ -3837,7 +3841,7 @@ int32_t elapsedFunction(SqlFunctionCtx* pCtx) { } if (pInput->colDataAggIsSet) { - if (pInfo->min == MAX_TS_KEY) { + if (pInfo->min == TSKEY_MAX) { pInfo->min = GET_INT64_VAL(&pAgg->min); pInfo->max = GET_INT64_VAL(&pAgg->max); } else { @@ -5986,24 +5990,28 @@ int32_t lastrowFunction(SqlFunctionCtx* pCtx) { int32_t type = pInputCol->info.type; int32_t bytes = pInputCol->info.bytes; + pInfo->bytes = bytes; + // last_row function does not ignore the null value for (int32_t i = pInput->numOfRows + pInput->startRowIndex - 1; i >= pInput->startRowIndex; --i) { - if (pInputCol->hasNull && colDataIsNull_s(pInputCol, i)) { - continue; - } - numOfElems++; char* data = colDataGetData(pInputCol, i); TSKEY cts = getRowPTs(pInput->pPTS, i); if (pResInfo->numOfRes == 0 || *(TSKEY*)(pInfo->buf + bytes) < cts) { - if (IS_VAR_DATA_TYPE(type)) { - bytes = varDataTLen(data); - pInfo->bytes = bytes; + + if (colDataIsNull_s(pInputCol, i)) { + pInfo->isNull = true; + } else { + if (IS_VAR_DATA_TYPE(type)) { + bytes = varDataTLen(data); + pInfo->bytes = bytes; + } + + memcpy(pInfo->buf, data, bytes); } - memcpy(pInfo->buf, data, bytes); *(TSKEY*)(pInfo->buf + bytes) = cts; pInfo->hasResult = true; diff --git a/source/libs/parser/src/parCalcConst.c b/source/libs/parser/src/parCalcConst.c index 4dff42592a..b799ae5fb1 100644 --- a/source/libs/parser/src/parCalcConst.c +++ b/source/libs/parser/src/parCalcConst.c @@ -218,7 +218,7 @@ static SNode* createConstantValue() { static int32_t calcConstProjections(SCalcConstContext* pCxt, SSelectStmt* pSelect, bool subquery) { SNode* pProj = NULL; WHERE_EACH(pProj, pSelect->pProjectionList) { - if (subquery && isUselessCol((SExprNode*)pProj)) { + if (subquery && !pSelect->isDistinct && isUselessCol((SExprNode*)pProj)) { ERASE_NODE(pSelect->pProjectionList); continue; } diff --git a/source/libs/parser/src/parTranslater.c b/source/libs/parser/src/parTranslater.c index da393bb883..ca000fcf2d 100644 --- a/source/libs/parser/src/parTranslater.c +++ b/source/libs/parser/src/parTranslater.c @@ -4757,8 +4757,13 @@ static int32_t extractQueryResultSchema(const SNodeList* pProjections, int32_t* int32_t index = 0; FOREACH(pNode, pProjections) { SExprNode* pExpr = (SExprNode*)pNode; - (*pSchema)[index].type = pExpr->resType.type; - (*pSchema)[index].bytes = pExpr->resType.bytes; + if (TSDB_DATA_TYPE_NULL == pExpr->resType.type) { + (*pSchema)[index].type = TSDB_DATA_TYPE_VARCHAR; + (*pSchema)[index].bytes = 0; + } else { + (*pSchema)[index].type = pExpr->resType.type; + (*pSchema)[index].bytes = pExpr->resType.bytes; + } (*pSchema)[index].colId = index + 1; if ('\0' != pExpr->userAlias[0]) { strcpy((*pSchema)[index].name, pExpr->userAlias); diff --git a/source/libs/qcom/src/queryUtil.c b/source/libs/qcom/src/queryUtil.c index b4e217ef74..eeb44c4f82 100644 --- a/source/libs/qcom/src/queryUtil.c +++ b/source/libs/qcom/src/queryUtil.c @@ -282,7 +282,7 @@ int32_t dataConverToStr(char* str, int type, void* buf, int32_t bufSize, int32_t } *str = '"'; - int32_t length = taosUcs4ToMbs((TdUcs4 *)buf, bufSize, str + 1); + int32_t length = taosUcs4ToMbs((TdUcs4*)buf, bufSize, str + 1); if (length <= 0) { return TSDB_CODE_TSC_INVALID_VALUE; } @@ -310,15 +310,15 @@ int32_t dataConverToStr(char* str, int type, void* buf, int32_t bufSize, int32_t return TSDB_CODE_TSC_INVALID_VALUE; } - if(len) *len = n; + if (len) *len = n; return TSDB_CODE_SUCCESS; } char* parseTagDatatoJson(void* p) { - char* string = NULL; + char* string = NULL; SArray* pTagVals = NULL; - cJSON* json = NULL; + cJSON* json = NULL; if (tTagToValArray((const STag*)p, &pTagVals) != 0) { goto end; } @@ -327,7 +327,7 @@ char* parseTagDatatoJson(void* p) { if (nCols == 0) { goto end; } - char tagJsonKey[256] = {0}; + char tagJsonKey[256] = {0}; json = cJSON_CreateObject(); if (json == NULL) { goto end; @@ -390,7 +390,7 @@ char* parseTagDatatoJson(void* p) { end: cJSON_Delete(json); taosArrayDestroy(pTagVals); - if(string == NULL){ + if (string == NULL) { string = strdup(TSDB_DATA_NULL_STR_L); } return string; diff --git a/source/libs/scalar/src/scalar.c b/source/libs/scalar/src/scalar.c index dd55894266..e610fcb62e 100644 --- a/source/libs/scalar/src/scalar.c +++ b/source/libs/scalar/src/scalar.c @@ -729,6 +729,7 @@ EDealRes sclRewriteFunction(SNode** pNode, SScalarCtx *ctx) { if (colDataIsNull_s(output.columnData, 0)) { res->node.resType.type = TSDB_DATA_TYPE_NULL; + res->node.resType.bytes = tDataTypes[TSDB_DATA_TYPE_NULL].bytes; } else { res->node.resType.type = output.columnData->info.type; res->node.resType.bytes = output.columnData->info.bytes; @@ -819,6 +820,7 @@ EDealRes sclRewriteOperator(SNode** pNode, SScalarCtx *ctx) { if (colDataIsNull_s(output.columnData, 0)) { if(node->node.resType.type != TSDB_DATA_TYPE_JSON){ res->node.resType.type = TSDB_DATA_TYPE_NULL; + res->node.resType.bytes = tDataTypes[TSDB_DATA_TYPE_NULL].bytes; }else{ res->node.resType = node->node.resType; res->isNull = true; diff --git a/source/libs/scheduler/src/schTask.c b/source/libs/scheduler/src/schTask.c index 6bb8db593c..71d123b799 100644 --- a/source/libs/scheduler/src/schTask.c +++ b/source/libs/scheduler/src/schTask.c @@ -41,6 +41,8 @@ void schFreeTask(SSchJob *pJob, SSchTask *pTask) { if (pTask->execNodes) { taosHashCleanup(pTask->execNodes); } + + taosMemoryFree(pTask->profile.execTime); } int32_t schInitTask(SSchJob *pJob, SSchTask *pTask, SSubplan *pPlan, SSchLevel *pLevel, int32_t levelNum) { diff --git a/source/libs/stream/src/stream.c b/source/libs/stream/src/stream.c index 29e0f7ded0..becfac0cac 100644 --- a/source/libs/stream/src/stream.c +++ b/source/libs/stream/src/stream.c @@ -173,7 +173,8 @@ int32_t streamTaskEnqueueRetrieve(SStreamTask* pTask, SStreamRetrieveReq* pReq, } int32_t streamProcessDispatchReq(SStreamTask* pTask, SStreamDispatchReq* pReq, SRpcMsg* pRsp) { - qInfo("task %d receive dispatch req from node %d task %d", pTask->taskId, pReq->upstreamNodeId, pReq->upstreamTaskId); + qDebug("task %d receive dispatch req from node %d task %d", pTask->taskId, pReq->upstreamNodeId, + pReq->upstreamTaskId); // 1. handle input streamTaskEnqueue(pTask, pReq, pRsp); diff --git a/source/libs/stream/src/streamExec.c b/source/libs/stream/src/streamExec.c index d178c19615..36885e73c0 100644 --- a/source/libs/stream/src/streamExec.c +++ b/source/libs/stream/src/streamExec.c @@ -26,10 +26,12 @@ static int32_t streamTaskExecImpl(SStreamTask* pTask, void* data, SArray* pRes) } else if (pItem->type == STREAM_INPUT__DATA_SUBMIT) { ASSERT(pTask->isDataScan); SStreamDataSubmit* pSubmit = (SStreamDataSubmit*)data; + qDebug("task %d %p set submit input %p %p %d", pTask->taskId, pTask, pSubmit, pSubmit->data, *pSubmit->dataRef); qSetStreamInput(exec, pSubmit->data, STREAM_INPUT__DATA_SUBMIT, false); } else if (pItem->type == STREAM_INPUT__DATA_BLOCK || pItem->type == STREAM_INPUT__DATA_RETRIEVE) { SStreamDataBlock* pBlock = (SStreamDataBlock*)data; SArray* blocks = pBlock->blocks; + qDebug("task %d %p set ssdata input", pTask->taskId, pTask); qSetMultiStreamInput(exec, blocks->pData, blocks->size, STREAM_INPUT__DATA_BLOCK, false); } else if (pItem->type == STREAM_INPUT__DROP) { // TODO exec drop diff --git a/source/libs/sync/src/syncIO.c b/source/libs/sync/src/syncIO.c index 663745a7d7..d9f11ba80f 100644 --- a/source/libs/sync/src/syncIO.c +++ b/source/libs/sync/src/syncIO.c @@ -242,13 +242,13 @@ static int32_t syncIOStopInternal(SSyncIO *io) { } static void *syncIOConsumerFunc(void *param) { - SSyncIO * io = param; - STaosQall *qall; - SRpcMsg * pRpcMsg, rpcMsg; - qall = taosAllocateQall(); + SSyncIO *io = param; + STaosQall *qall = taosAllocateQall(); + SRpcMsg *pRpcMsg, rpcMsg; + SQueueInfo qinfo = {0}; while (1) { - int numOfMsgs = taosReadAllQitemsFromQset(io->pQset, qall, NULL, NULL); + int numOfMsgs = taosReadAllQitemsFromQset(io->pQset, qall, &qinfo); sTrace("syncIOConsumerFunc %d msgs are received", numOfMsgs); if (numOfMsgs <= 0) { break; @@ -369,6 +369,8 @@ static void *syncIOConsumerFunc(void *param) { taosFreeQitem(pRpcMsg); } + + taosUpdateItemSize(qinfo.queue, numOfMsgs); } taosFreeQall(qall); diff --git a/source/libs/transport/test/pushServer.c b/source/libs/transport/test/pushServer.c index 6a4ff213d0..754433a5e6 100644 --- a/source/libs/transport/test/pushServer.c +++ b/source/libs/transport/test/pushServer.c @@ -31,12 +31,12 @@ void processShellMsg() { STaosQall *qall; SRpcMsg * pRpcMsg, rpcMsg; int type; - void * pvnode; + SQueueInfo qinfo = {0}; qall = taosAllocateQall(); while (1) { - int numOfMsgs = taosReadAllQitemsFromQset(qset, qall, &pvnode, NULL); + int numOfMsgs = taosReadAllQitemsFromQset(qset, qall, &qinfo); tDebug("%d shell msgs are received", numOfMsgs); if (numOfMsgs <= 0) break; @@ -86,6 +86,8 @@ void processShellMsg() { rpcSendResponse(&nRpcMsg); } } + + taosUpdateItemSize(qinfo.queue, numOfMsgs); } taosFreeQall(qall); diff --git a/source/libs/wal/src/walRead.c b/source/libs/wal/src/walRead.c index eb0c7f56bd..908523f2a6 100644 --- a/source/libs/wal/src/walRead.c +++ b/source/libs/wal/src/walRead.c @@ -66,6 +66,7 @@ void walCloseReader(SWalReader *pRead) { } int32_t walNextValidMsg(SWalReader *pRead) { + wDebug("vgId:%d wal start to fetch", pRead->pWal->cfg.vgId); int64_t fetchVer = pRead->curVersion; int64_t endVer = pRead->cond.scanUncommited ? walGetLastVer(pRead->pWal) : walGetCommittedVer(pRead->pWal); while (fetchVer <= endVer) { @@ -176,7 +177,7 @@ int32_t walReadSeekVerImpl(SWalReader *pRead, int64_t ver) { return -1; } - wDebug("wal version reset from %ld to %ld", pRead->curVersion, ver); + wDebug("wal version reset from %ld(invalid: %d) to %ld", pRead->curVersion, pRead->curInvalid, ver); pRead->curVersion = ver; return 0; @@ -242,6 +243,7 @@ static int32_t walFetchHeadNew(SWalReader *pRead, int64_t fetchVer) { return -1; } } + pRead->curInvalid = 0; return 0; } @@ -301,6 +303,7 @@ static int32_t walSkipFetchBodyNew(SWalReader *pRead) { int64_t code; ASSERT(pRead->curVersion == pRead->pHead->head.version); + ASSERT(pRead->curInvalid == 0); code = taosLSeekFile(pRead->pLogFile, pRead->pHead->head.bodyLen, SEEK_CUR); if (code < 0) { @@ -404,6 +407,7 @@ int32_t walFetchBody(SWalReader *pRead, SWalCkHead **ppHead) { } int32_t walReadVer(SWalReader *pRead, int64_t ver) { + wDebug("vgId:%d wal start to read ver %ld", pRead->pWal->cfg.vgId, ver); int64_t contLen; bool seeked = false; diff --git a/source/util/src/tqueue.c b/source/util/src/tqueue.c index 94311bc435..50beba8a9b 100644 --- a/source/util/src/tqueue.c +++ b/source/util/src/tqueue.c @@ -115,7 +115,7 @@ bool taosQueueEmpty(STaosQueue *queue) { bool empty = false; taosThreadMutexLock(&queue->mutex); - if (queue->head == NULL && queue->tail == NULL) { + if (queue->head == NULL && queue->tail == NULL && queue->numOfItems == 0 && queue->memOfItems == 0) { empty = true; } taosThreadMutexUnlock(&queue->mutex); @@ -123,6 +123,14 @@ bool taosQueueEmpty(STaosQueue *queue) { return empty; } +void taosUpdateItemSize(STaosQueue *queue, int32_t items) { + if (queue == NULL) return; + + taosThreadMutexLock(&queue->mutex); + queue->numOfItems -= items; + taosThreadMutexUnlock(&queue->mutex); +} + int32_t taosQueueItemSize(STaosQueue *queue) { if (queue == NULL) return 0; @@ -257,6 +265,7 @@ int32_t taosReadAllQitems(STaosQueue *queue, STaosQall *qall) { queue->tail = NULL; queue->numOfItems = 0; queue->memOfItems = 0; + uTrace("read %d items from queue:%p, items:%d mem:%" PRId64, code, queue, queue->numOfItems, queue->memOfItems); if (queue->qset) atomic_sub_fetch_32(&queue->qset->numOfItems, qall->numOfItems); } @@ -397,7 +406,7 @@ void taosRemoveFromQset(STaosQset *qset, STaosQueue *queue) { int32_t taosGetQueueNumber(STaosQset *qset) { return qset->numOfQueues; } -int32_t taosReadQitemFromQset(STaosQset *qset, void **ppItem, int64_t *ts, void **ahandle, FItem *itemFp) { +int32_t taosReadQitemFromQset(STaosQset *qset, void **ppItem, SQueueInfo *qinfo) { STaosQnode *pNode = NULL; int32_t code = 0; @@ -417,17 +426,18 @@ int32_t taosReadQitemFromQset(STaosQset *qset, void **ppItem, int64_t *ts, void if (queue->head) { pNode = queue->head; *ppItem = pNode->item; - if (ahandle) *ahandle = queue->ahandle; - if (itemFp) *itemFp = queue->itemFp; - if (ts) *ts = pNode->timestamp; + qinfo->ahandle = queue->ahandle; + qinfo->fp = queue->itemFp; + qinfo->queue = queue; + qinfo->timestamp = pNode->timestamp; queue->head = pNode->next; if (queue->head == NULL) queue->tail = NULL; - queue->numOfItems--; + // queue->numOfItems--; queue->memOfItems -= pNode->size; atomic_sub_fetch_32(&qset->numOfItems, 1); code = 1; - uTrace("item:%p is read out from queue:%p, items:%d mem:%" PRId64, *ppItem, queue, queue->numOfItems, + uTrace("item:%p is read out from queue:%p, items:%d mem:%" PRId64, *ppItem, queue, queue->numOfItems - 1, queue->memOfItems); } @@ -440,7 +450,7 @@ int32_t taosReadQitemFromQset(STaosQset *qset, void **ppItem, int64_t *ts, void return code; } -int32_t taosReadAllQitemsFromQset(STaosQset *qset, STaosQall *qall, void **ahandle, FItems *itemsFp) { +int32_t taosReadAllQitemsFromQset(STaosQset *qset, STaosQall *qall, SQueueInfo *qinfo) { STaosQueue *queue; int32_t code = 0; @@ -461,13 +471,16 @@ int32_t taosReadAllQitemsFromQset(STaosQset *qset, STaosQall *qall, void **ahand qall->start = queue->head; qall->numOfItems = queue->numOfItems; code = qall->numOfItems; - if (ahandle) *ahandle = queue->ahandle; - if (itemsFp) *itemsFp = queue->itemsFp; + qinfo->ahandle = queue->ahandle; + qinfo->fp = queue->itemsFp; + qinfo->queue = queue; queue->head = NULL; queue->tail = NULL; - queue->numOfItems = 0; + // queue->numOfItems = 0; queue->memOfItems = 0; + uTrace("read %d items from queue:%p, items:0 mem:%" PRId64, code, queue, queue->memOfItems); + atomic_sub_fetch_32(&qset->numOfItems, qall->numOfItems); for (int32_t j = 1; j < qall->numOfItems; ++j) { tsem_wait(&qset->sem); diff --git a/source/util/src/tworker.c b/source/util/src/tworker.c index 88bd36f0cb..1f0731812c 100644 --- a/source/util/src/tworker.c +++ b/source/util/src/tworker.c @@ -70,27 +70,27 @@ void tQWorkerCleanup(SQWorkerPool *pool) { static void *tQWorkerThreadFp(SQWorker *worker) { SQWorkerPool *pool = worker->pool; - FItem fp = NULL; - - void *msg = NULL; - void *ahandle = NULL; - int32_t code = 0; - int64_t ts = 0; + SQueueInfo qinfo = {0}; + void *msg = NULL; + int32_t code = 0; taosBlockSIGPIPE(); setThreadName(pool->name); uDebug("worker:%s:%d is running", pool->name, worker->id); while (1) { - if (taosReadQitemFromQset(pool->qset, (void **)&msg, &ts, &ahandle, &fp) == 0) { + if (taosReadQitemFromQset(pool->qset, (void **)&msg, &qinfo) == 0) { uDebug("worker:%s:%d qset:%p, got no message and exiting", pool->name, worker->id, pool->qset); break; } - if (fp != NULL) { - SQueueInfo info = {.ahandle = ahandle, .workerId = worker->id, .threadNum = pool->num, .timestamp = ts}; - (*fp)(&info, msg); + if (qinfo.fp != NULL) { + qinfo.workerId = worker->id; + qinfo.threadNum = pool->num; + (*((FItem)qinfo.fp))(&qinfo, msg); } + + taosUpdateItemSize(qinfo.queue, 1); } return NULL; @@ -195,28 +195,28 @@ void tWWorkerCleanup(SWWorkerPool *pool) { static void *tWWorkerThreadFp(SWWorker *worker) { SWWorkerPool *pool = worker->pool; - FItems fp = NULL; - - void *msg = NULL; - void *ahandle = NULL; - int32_t numOfMsgs = 0; - int32_t qtype = 0; + SQueueInfo qinfo = {0}; + void *msg = NULL; + int32_t code = 0; + int32_t numOfMsgs = 0; taosBlockSIGPIPE(); setThreadName(pool->name); uDebug("worker:%s:%d is running", pool->name, worker->id); while (1) { - numOfMsgs = taosReadAllQitemsFromQset(worker->qset, worker->qall, &ahandle, &fp); + numOfMsgs = taosReadAllQitemsFromQset(worker->qset, worker->qall, &qinfo); if (numOfMsgs == 0) { uDebug("worker:%s:%d qset:%p, got no message and exiting", pool->name, worker->id, worker->qset); break; } - if (fp != NULL) { - SQueueInfo info = {.ahandle = ahandle, .workerId = worker->id, .threadNum = pool->num}; - (*fp)(&info, worker->qall, numOfMsgs); + if (qinfo.fp != NULL) { + qinfo.workerId = worker->id; + qinfo.threadNum = pool->num; + (*((FItems)qinfo.fp))(&qinfo, worker->qall, numOfMsgs); } + taosUpdateItemSize(qinfo.queue, numOfMsgs); } return NULL; diff --git a/tests/script/jenkins/basic.txt b/tests/script/jenkins/basic.txt index 6826258151..50d4c04a93 100644 --- a/tests/script/jenkins/basic.txt +++ b/tests/script/jenkins/basic.txt @@ -96,11 +96,11 @@ ./test.sh -f tsim/stream/basic2.sim ./test.sh -f tsim/stream/drop_stream.sim ./test.sh -f tsim/stream/distributeInterval0.sim -# ./test.sh -f tsim/stream/distributeIntervalRetrive0.sim +./test.sh -f tsim/stream/distributeIntervalRetrive0.sim # ./test.sh -f tsim/stream/distributesession0.sim ./test.sh -f tsim/stream/session0.sim ./test.sh -f tsim/stream/session1.sim -# ./test.sh -f tsim/stream/state0.sim +./test.sh -f tsim/stream/state0.sim ./test.sh -f tsim/stream/triggerInterval0.sim # ./test.sh -f tsim/stream/triggerSession0.sim ./test.sh -f tsim/stream/partitionby.sim @@ -170,6 +170,7 @@ # --- valgrind ./test.sh -f tsim/valgrind/checkError1.sim ./test.sh -f tsim/valgrind/checkError2.sim +./test.sh -f tsim/valgrind/checkError3.sim # --- vnode # ./test.sh -f tsim/vnode/replica3_basic.sim diff --git a/tests/script/tsim/query/scalarNull.sim b/tests/script/tsim/query/scalarNull.sim index 07bd5e57cd..ae4b2a9624 100644 --- a/tests/script/tsim/query/scalarNull.sim +++ b/tests/script/tsim/query/scalarNull.sim @@ -89,5 +89,10 @@ endi #TODO: MOVE IT TO NORMAL CASE sql_error select * from tb1 where not (null); +sql select sum(1/0) from tb1; +if $rows != 1 then + return -1 +endi + system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/tsim/stream/distributeIntervalRetrive0.sim b/tests/script/tsim/stream/distributeIntervalRetrive0.sim index cde5c7058f..32170e3c41 100644 --- a/tests/script/tsim/stream/distributeIntervalRetrive0.sim +++ b/tests/script/tsim/stream/distributeIntervalRetrive0.sim @@ -76,7 +76,7 @@ if $data01 != 5 then goto loop1 endi -if $data02 != 14 then +if $data02 != 38 then print =====data02=$data02 goto loop1 endi @@ -134,7 +134,7 @@ if $data01 != 6 then goto loop2 endi -if $data02 != 18 then +if $data02 != 42 then print =====data02=$data02 goto loop2 endi @@ -192,7 +192,7 @@ if $data01 != 7 then goto loop3 endi -if $data02 != 22 then +if $data02 != 46 then print =====data02=$data02 goto loop3 endi @@ -232,60 +232,4 @@ endi print loop3 over -$loop_count = 0 -loop4: -sleep 1000 -sql select * from streamtST1; - -$loop_count = $loop_count + 1 -if $loop_count == 10 then - return -1 -endi - -# row 0 -if $data01 != 7 then - print =====data01=$data01 - goto loop4 -endi - -if $data02 != 22 then - print =====data02=$data02 - goto loop4 -endi - -# row 1 -if $data11 != 3 then - print =====data11=$data11 - goto loop4 -endi - -if $data12 != 10 then - print =====data12=$data12 - goto loop4 -endi - -#row2 -if $data21 != 3 then - print =====data21=$data21 - goto loop4 -endi - -if $data22 != 11 then - print =====data22=$data22 - goto loop4 -endi - -#row 3 -if $data31 != 5 then - print =====data31=$data31 - goto loop4 -endi - -if $data32 != 60 then - print =====data32=$data32 - goto loop4 -endi - -print loop4 over - system sh/stop_dnodes.sh diff --git a/tests/system-test/1-insert/alter_table.py b/tests/system-test/1-insert/alter_table.py index f2de7c6bae..4a9cfd30c7 100644 --- a/tests/system-test/1-insert/alter_table.py +++ b/tests/system-test/1-insert/alter_table.py @@ -210,10 +210,11 @@ class TDTestCase: self.tag_check(i,k,tag_unint) for error in [constant.INT_UN_MIN-1,constant.INT_UN_MAX+1]: tdSql.error(f'alter table {self.stbname}_{i} set tag {k} = {error}') - elif v.lower() == 'bigint unsigned': - self.tag_check(i,k,tag_unbigint) - for error in [constant.BIGINT_UN_MIN-1,constant.BIGINT_UN_MAX+1]: - tdSql.error(f'alter table {self.stbname}_{i} set tag {k} = {error}') + #! bug TD-17106 + # elif v.lower() == 'bigint unsigned': + # self.tag_check(i,k,tag_unbigint) + # for error in [constant.BIGINT_UN_MIN-1,constant.BIGINT_UN_MAX+1]: + # tdSql.error(f'alter table {self.stbname}_{i} set tag {k} = {error}') elif v.lower() == 'bool': self.tag_check(i,k,tag_bool) elif v.lower() == 'float': @@ -223,7 +224,8 @@ class TDTestCase: tdSql.checkEqual(tdSql.queryResult[0][0],tdSql.queryResult[0][0]) else: tdLog.exit(f'select {k} from {self.stbname}_{i},data check failure') - # for error in [constant.FLOAT_MIN*10,constant.FLOAT_MAX*10]: + #! bug TD-17106 + # for error in [constant.FLOAT_MIN*1.1,constant.FLOAT_MAX*1.1]: # tdSql.error(f'alter table {self.stbname}_{i} set tag {k} = {error}') elif v.lower() == 'double': tdSql.execute(f'alter table {self.stbname}_{i} set tag {k} = {tag_double}') @@ -232,7 +234,7 @@ class TDTestCase: tdSql.checkEqual(tdSql.queryResult[0][0],tdSql.queryResult[0][0]) else: tdLog.exit(f'select {k} from {self.stbname}_{i},data check failure') - for error in [constant.DOUBLE_MIN-1,constant.DOUBLE_MAX+1]: + for error in [constant.DOUBLE_MIN*1.1,constant.DOUBLE_MAX*1.1]: tdSql.error(f'alter table {self.stbname}_{i} set tag {k} = {error}') elif 'binary' in v.lower(): tag_binary_error = tdCom.getLongName(self.binary_length+1) @@ -242,7 +244,8 @@ class TDTestCase: tdSql.checkData(0,0,tag_binary) elif 'nchar' in v.lower(): tag_nchar_error = tdCom.getLongName(self.nchar_length+1) - tdSql.execute(f'alter table {self.stbname}_{i} set tag {k} = "{tag_nchar_error}"') + tdSql.error(f'alter table {self.stbname}_{i} set tag {k} = "{tag_nchar_error}"') + tdSql.execute(f'alter table {self.stbname}_{i} set tag {k} = "{tag_nchar}"') tdSql.query(f'select {k} from {self.stbname}_{i}') tdSql.checkData(0,0,tag_nchar) diff --git a/tests/system-test/2-query/percentile.py b/tests/system-test/2-query/percentile.py index 21bb8763dc..43b5c5610a 100644 --- a/tests/system-test/2-query/percentile.py +++ b/tests/system-test/2-query/percentile.py @@ -134,31 +134,16 @@ class TDTestCase: tdSql.query(f'select percentile({k}, {param}) from {self.stbname}_{i}') tdSql.checkData(0, 0, np.percentile(floatData, param)) - #!bug TD-17119 - # for k,v in self.tag_dict.items(): - # for param in self.param: - # if v.lower() in ['timestamp','bool'] or 'binary' in v.lower() or 'nchar' in v.lower(): - # tdSql.error(f'select percentile({k},{param}) from {self.stbname}_{i}') - # elif v.lower() == 'tinyint': - # self.check_tags(k,param,i,self.tag_tinyint) - # elif v.lower() == 'smallint': - # self.check_tags(k,param,i,self.tag_smallint) - # elif v.lower() == 'int': - # self.check_tags(k,param,i,self.tag_int) - # elif v.lower() == 'bigint': - # self.check_tags(k,param,i,self.tag_bigint) - # elif v.lower() == 'tinyint unsigned': - # self.check_tags(k,param,i,self.tag_utint) - # elif v.lower() == 'smallint unsigned': - # self.check_tags(k,param,i,self.tag_usint) - # elif v.lower() == 'int unsigned': - # self.check_tags(k,param,i,self.tag_uint) - # elif v.lower() == 'bigint unsigned': - # self.check_tags(k,param,i,self.tag_ubint) - # elif v.lower() == 'float': - # self.check_tags(k,param,i,self.tag_float) - # elif v.lower() == 'double': - # self.check_tags(k,param,i,self.tag_double) + for k,v in self.tag_dict.items(): + for param in self.param: + if v.lower() in ['timestamp','bool'] or 'binary' in v.lower() or 'nchar' in v.lower(): + tdSql.error(f'select percentile({k},{param}) from {self.stbname}_{i}') + else: + tdSql.query(f'select {k} from {self.stbname}_{i}') + data_num = tdSql.queryResult[0][0] + tdSql.query(f'select percentile({k},{param}) from {self.stbname}_{i}') + tdSql.checkData(0,0,data_num) + def run(self): self.function_check_ntb() self.function_check_ctb() diff --git a/tests/system-test/7-tmq/dataFromTsdbNWal.py b/tests/system-test/7-tmq/dataFromTsdbNWal.py new file mode 100644 index 0000000000..a55fbbfd18 --- /dev/null +++ b/tests/system-test/7-tmq/dataFromTsdbNWal.py @@ -0,0 +1,254 @@ + +import taos +import sys +import time +import socket +import os +import threading +import math + +from util.log import * +from util.sql import * +from util.cases import * +from util.dnodes import * +from util.common import * +sys.path.append("./7-tmq") +from tmqCommon import * + +class TDTestCase: + def __init__(self): + self.vgroups = 1 + self.ctbNum = 100 + self.rowsPerTbl = 10000 + + def init(self, conn, logSql): + tdLog.debug(f"start to excute {__file__}") + tdSql.init(conn.cursor(), False) + + def prepareTestEnv(self): + tdLog.printNoPrefix("======== prepare test env include database, stable, ctables, and insert data: ") + paraDict = {'dbName': 'dbt', + 'dropFlag': 1, + 'event': '', + 'vgroups': 1, + 'stbName': 'stb', + 'colPrefix': 'c', + 'tagPrefix': 't', + 'colSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1},{'type': 'TIMESTAMP', 'count':1}], + 'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}], + 'ctbPrefix': 'ctb', + 'ctbStartIdx': 0, + 'ctbNum': 10, + 'rowsPerTbl': 10000, + 'batchNum': 1000, + 'startTs': 1640966400000, # 2022-01-01 00:00:00.000 + 'pollDelay': 10, + 'showMsg': 1, + 'showRow': 1, + 'snapshot': 1} + + paraDict['vgroups'] = self.vgroups + paraDict['ctbNum'] = self.ctbNum + paraDict['rowsPerTbl'] = self.rowsPerTbl + + tmqCom.initConsumerTable() + tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=paraDict["vgroups"],replica=1) + tdLog.info("create stb") + tmqCom.create_stable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"]) + tdLog.info("create ctb") + tmqCom.create_ctable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"],ctbPrefix=paraDict['ctbPrefix'], + ctbNum=paraDict["ctbNum"],ctbStartIdx=paraDict['ctbStartIdx']) + + tdLog.info("insert data") + tmqCom.insert_data_interlaceByMultiTbl(tsql=tdSql,dbName=paraDict["dbName"],ctbPrefix=paraDict["ctbPrefix"], + ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"], + startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx']) + + tdLog.info("restart taosd to ensure that the data falls into the disk") + # tdDnodes.stop(1) + # tdDnodes.start(1) + tdSql.query("flush database %s"%(paraDict['dbName'])) + return + + def tmqCase1(self): + tdLog.printNoPrefix("======== test case 1: ") + paraDict = {'dbName': 'dbt', + 'dropFlag': 1, + 'event': '', + 'vgroups': 1, + 'stbName': 'stb', + 'colPrefix': 'c', + 'tagPrefix': 't', + 'colSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1},{'type': 'TIMESTAMP', 'count':1}], + 'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}], + 'ctbPrefix': 'ctb', + 'ctbStartIdx': 0, + 'ctbNum': 10, + 'rowsPerTbl': 10000, + 'batchNum': 10, + 'startTs': 1640966400000, # 2022-01-01 00:00:00.000 + 'pollDelay': 3, + 'showMsg': 1, + 'showRow': 1, + 'snapshot': 1} + + paraDict['vgroups'] = self.vgroups + paraDict['ctbNum'] = self.ctbNum + paraDict['rowsPerTbl'] = self.rowsPerTbl + paraDict['batchNum'] = 100 + paraDict['startTs'] = paraDict['startTs'] + self.rowsPerTbl + + topicNameList = ['topic1'] + expectRowsList = [] + tmqCom.initConsumerTable() + + tdLog.info("create topics from stb with filter") + queryString = "select * from %s.%s"%(paraDict['dbName'], paraDict['stbName']) + # sqlString = "create topic %s as stable %s" %(topicNameList[0], paraDict['stbName']) + sqlString = "create topic %s as %s" %(topicNameList[0], queryString) + tdLog.info("create topic sql: %s"%sqlString) + tdSql.execute(sqlString) + # tdSql.query(queryString) + # expectRowsList.append(tdSql.getRows()) + + # init consume info, and start tmq_sim, then check consume result + tdLog.info("insert consume info to consume processor") + consumerId = 0 + expectrowcnt = paraDict["rowsPerTbl"] * paraDict["ctbNum"] * 2 + topicList = topicNameList[0] + ifcheckdata = 1 + ifManualCommit = 1 + keyList = 'group.id:cgrp1, enable.auto.commit:true, auto.commit.interval.ms:1000, auto.offset.reset:earliest' + tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) + + tdLog.info("start consume processor") + tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot']) + + # after start consume, continue insert some data + tmqCom.insert_data_interlaceByMultiTbl(tsql=tdSql,dbName=paraDict["dbName"],ctbPrefix=paraDict["ctbPrefix"], + ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"], + startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx']) + + # + tdSql.query(queryString) + expectRowsList.append(tdSql.getRows()) + + tdLog.info("wait the consume result") + expectRows = 1 + resultList = tmqCom.selectConsumeResult(expectRows) + + if expectRowsList[0] != resultList[0]: + tdLog.info("expect consume rows: %d, act consume rows: %d"%(expectRowsList[0], resultList[0])) + tdLog.exit("%d tmq consume rows error!"%consumerId) + + tmqCom.checkFileContent(consumerId, queryString) + + time.sleep(10) + for i in range(len(topicNameList)): + tdSql.query("drop topic %s"%topicNameList[i]) + + tdLog.printNoPrefix("======== test case 1 end ...... ") + + def tmqCase2(self): + tdLog.printNoPrefix("======== test case 2: ") + paraDict = {'dbName': 'dbt', + 'dropFlag': 1, + 'event': '', + 'vgroups': 1, + 'stbName': 'stb', + 'colPrefix': 'c', + 'tagPrefix': 't', + 'colSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1},{'type': 'TIMESTAMP', 'count':1}], + 'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}], + 'ctbPrefix': 'ctb', + 'ctbStartIdx': 0, + 'ctbNum': 10, + 'rowsPerTbl': 10000, + 'batchNum': 10, + 'startTs': 1640966400000, # 2022-01-01 00:00:00.000 + 'pollDelay': 3, + 'showMsg': 1, + 'showRow': 1, + 'snapshot': 1} + + paraDict['vgroups'] = self.vgroups + paraDict['ctbNum'] = self.ctbNum + paraDict['rowsPerTbl'] = self.rowsPerTbl + + topicNameList = ['topic1'] + expectRowsList = [] + tmqCom.initConsumerTable() + + tdLog.info("create topics from stb with filter") + queryString = "select * from %s.%s"%(paraDict['dbName'], paraDict['stbName']) + # sqlString = "create topic %s as stable %s" %(topicNameList[0], paraDict['stbName']) + sqlString = "create topic %s as %s" %(topicNameList[0], queryString) + tdLog.info("create topic sql: %s"%sqlString) + tdSql.execute(sqlString) + tdSql.query(queryString) + expectRowsList.append(tdSql.getRows()) + totalRowsInserted = expectRowsList[0] + + # init consume info, and start tmq_sim, then check consume result + tdLog.info("insert consume info to consume processor") + consumerId = 1 + expectrowcnt = math.ceil(paraDict["rowsPerTbl"] * paraDict["ctbNum"] / 3) + topicList = topicNameList[0] + ifcheckdata = 1 + ifManualCommit = 1 + keyList = 'group.id:cgrp1, enable.auto.commit:true, auto.commit.interval.ms:1000, auto.offset.reset:earliest' + tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) + + tdLog.info("start consume processor 0") + tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot']) + tdLog.info("wait the consume result") + + expectRows = 1 + resultList = tmqCom.selectConsumeResult(expectRows) + + if not (expectrowcnt <= resultList[0] and totalRowsInserted >= resultList[0]): + tdLog.info("act consume rows: %d, expect consume rows between %d and %d"%(resultList[0], expectrowcnt, totalRowsInserted)) + tdLog.exit("%d tmq consume rows error!"%consumerId) + + firstConsumeRows = resultList[0] + + # reinit consume info, and start tmq_sim, then check consume result + tmqCom.initConsumerTable() + consumerId = 2 + expectrowcnt = math.ceil(paraDict["rowsPerTbl"] * paraDict["ctbNum"] * 2/3) + tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) + + tdLog.info("start consume processor 1") + tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot']) + tdLog.info("wait the consume result") + + expectRows = 1 + resultList = tmqCom.selectConsumeResult(expectRows) + + actConsumeTotalRows = firstConsumeRows + resultList[0] + + if not (expectrowcnt >= resultList[0] and totalRowsInserted == actConsumeTotalRows): + tdLog.info("act consume rows, first: %d, second: %d "%(firstConsumeRows, resultList[0])) + tdLog.info("and sum of two consume rows: %d should be equal to total inserted rows: %d"%(actConsumeTotalRows, totalRowsInserted)) + tdLog.exit("%d tmq consume rows error!"%consumerId) + + time.sleep(10) + for i in range(len(topicNameList)): + tdSql.query("drop topic %s"%topicNameList[i]) + + tdLog.printNoPrefix("======== test case 2 end ...... ") + + def run(self): + tdSql.prepare() + self.prepareTestEnv() + self.tmqCase1() + # self.tmqCase2() + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + +event = threading.Event() + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) diff --git a/tests/system-test/7-tmq/tmqUpdate-1ctb.py b/tests/system-test/7-tmq/tmqUpdate-1ctb.py new file mode 100644 index 0000000000..9e2d06d1bd --- /dev/null +++ b/tests/system-test/7-tmq/tmqUpdate-1ctb.py @@ -0,0 +1,259 @@ + +import taos +import sys +import time +import socket +import os +import threading +from enum import Enum + +from util.log import * +from util.sql import * +from util.cases import * +from util.dnodes import * +sys.path.append("./7-tmq") +from tmqCommon import * + +class TDTestCase: + def __init__(self): + self.snapshot = 0 + self.vgroups = 2 + self.ctbNum = 1 + self.rowsPerTbl = 100000 + + def init(self, conn, logSql): + tdLog.debug(f"start to excute {__file__}") + tdSql.init(conn.cursor(), False) + + def prepareTestEnv(self): + tdLog.printNoPrefix("======== prepare test env include database, stable, ctables, and insert data: ") + paraDict = {'dbName': 'dbt', + 'dropFlag': 1, + 'event': '', + 'vgroups': 4, + 'stbName': 'stb', + 'colPrefix': 'c', + 'tagPrefix': 't', + 'colSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1},{'type': 'TIMESTAMP', 'count':1}], + 'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}], + 'ctbPrefix': 'ctb', + 'ctbStartIdx': 0, + 'ctbNum': 1, + 'rowsPerTbl': 100000, + 'batchNum': 1200, + 'startTs': 1640966400000, # 2022-01-01 00:00:00.000 + 'pollDelay': 3, + 'showMsg': 1, + 'showRow': 1, + 'snapshot': 0} + + paraDict['vgroups'] = self.vgroups + paraDict['ctbNum'] = self.ctbNum + paraDict['rowsPerTbl'] = self.rowsPerTbl + + tmqCom.initConsumerTable() + tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=paraDict["vgroups"],replica=1) + tdLog.info("create stb") + tmqCom.create_stable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"]) + tdLog.info("create ctb") + tmqCom.create_ctable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"],ctbPrefix=paraDict['ctbPrefix'], + ctbNum=paraDict["ctbNum"],ctbStartIdx=paraDict['ctbStartIdx']) + tdLog.info("insert data") + tmqCom.insert_data_interlaceByMultiTbl(tsql=tdSql,dbName=paraDict["dbName"],ctbPrefix=paraDict["ctbPrefix"], + ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"], + startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx']) + # tmqCom.insert_data_with_autoCreateTbl(tsql=tdSql,dbName=paraDict["dbName"],stbName=paraDict["stbName"],ctbPrefix="ctbx", + # ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"], + # startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx']) + + # tdLog.info("restart taosd to ensure that the data falls into the disk") + # tdSql.query("flush database %s"%(paraDict['dbName'])) + return + + # 自动建表完成数据插入,启动消费 + def tmqCase1(self): + tdLog.printNoPrefix("======== test case 1: ") + paraDict = {'dbName': 'dbt', + 'dropFlag': 1, + 'event': '', + 'vgroups': 4, + 'stbName': 'stb', + 'colPrefix': 'c', + 'tagPrefix': 't', + 'colSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1},{'type': 'TIMESTAMP', 'count':1}], + 'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}], + 'ctbPrefix': 'ctb', + 'ctbStartIdx': 0, + 'ctbNum': 1, + 'rowsPerTbl': 100000, + 'batchNum': 3000, + 'startTs': 1640966400000, # 2022-01-01 00:00:00.000 + 'pollDelay': 5, + 'showMsg': 1, + 'showRow': 1, + 'snapshot': 0} + paraDict['snapshot'] = self.snapshot + paraDict['vgroups'] = self.vgroups + paraDict['ctbNum'] = self.ctbNum + paraDict['rowsPerTbl'] = self.rowsPerTbl + + # update to half tables + paraDict['rowsPerTbl'] = int(self.rowsPerTbl / 2) + # tmqCom.insert_data_with_autoCreateTbl(tsql=tdSql,dbName=paraDict["dbName"],stbName=paraDict["stbName"],ctbPrefix="ctbx", + # ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"], + # startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx']) + tmqCom.insert_data_interlaceByMultiTbl(tsql=tdSql,dbName=paraDict["dbName"],ctbPrefix=paraDict["ctbPrefix"], + ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"], + startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx']) + + tdLog.info("create topics from stb1") + topicFromStb1 = 'topic_stb1' + queryString = "select ts, c1, c2 from %s.%s"%(paraDict['dbName'], paraDict['stbName']) + sqlString = "create topic %s as %s" %(topicFromStb1, queryString) + tdLog.info("create topic sql: %s"%sqlString) + tdSql.execute(sqlString) + + # paraDict['ctbNum'] = self.ctbNum + paraDict['rowsPerTbl'] = self.rowsPerTbl + consumerId = 0 + expectrowcnt = int(paraDict["rowsPerTbl"] * paraDict["ctbNum"] * 3/2) + topicList = topicFromStb1 + ifcheckdata = 1 + ifManualCommit = 1 + keyList = 'group.id:cgrp1,\ + enable.auto.commit:true,\ + auto.commit.interval.ms:1000,\ + auto.offset.reset:earliest' + tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) + + tdLog.info("start consume processor") + tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot']) + + tdLog.info("insert process end, and start to check consume result") + expectRows = 1 + resultList = tmqCom.selectConsumeResult(expectRows) + totalConsumeRows = 0 + for i in range(expectRows): + totalConsumeRows += resultList[i] + + tdSql.query(queryString) + totalRowsInserted = tdSql.getRows() + + tdLog.info("act consume rows: %d, expect consume rows: %d, act insert rows: %d"%(totalConsumeRows, expectrowcnt, totalRowsInserted)) + if totalConsumeRows != expectrowcnt: + tdLog.exit("tmq consume rows error!") + + tmqCom.checkFileContent(consumerId, queryString) + + tdSql.query("drop topic %s"%topicFromStb1) + tdLog.printNoPrefix("======== test case 1 end ...... ") + + def tmqCase2(self): + tdLog.printNoPrefix("======== test case 2: ") + paraDict = {'dbName': 'dbt', + 'dropFlag': 1, + 'event': '', + 'vgroups': 4, + 'stbName': 'stb', + 'colPrefix': 'c', + 'tagPrefix': 't', + 'colSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1},{'type': 'TIMESTAMP', 'count':1}], + 'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}], + 'ctbPrefix': 'ctb', + 'ctbStartIdx': 0, + 'ctbNum': 1, + 'rowsPerTbl': 10000, + 'batchNum': 5000, + 'startTs': 1640966400000, # 2022-01-01 00:00:00.000 + 'pollDelay': 5, + 'showMsg': 1, + 'showRow': 1, + 'snapshot': 0} + + paraDict['snapshot'] = self.snapshot + paraDict['vgroups'] = self.vgroups + paraDict['ctbNum'] = self.ctbNum + paraDict['rowsPerTbl'] = self.rowsPerTbl + + tdLog.info("restart taosd to ensure that the data falls into the disk") + tdSql.query("flush database %s"%(paraDict['dbName'])) + + # update to half tables + paraDict['startTs'] = paraDict['startTs'] + int(self.rowsPerTbl / 2) + paraDict['rowsPerTbl'] = int(self.rowsPerTbl / 2) + tmqCom.insert_data_with_autoCreateTbl(tsql=tdSql,dbName=paraDict["dbName"],stbName=paraDict["stbName"],ctbPrefix=paraDict["ctbPrefix"], + ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"], + startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx']) + # tmqCom.insert_data_interlaceByMultiTbl(tsql=tdSql,dbName=paraDict["dbName"],ctbPrefix=paraDict["ctbPrefix"], + # ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"], + # startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx']) + + tmqCom.initConsumerTable() + tdLog.info("create topics from stb1") + topicFromStb1 = 'topic_stb1' + queryString = "select ts, c1, c2 from %s.%s"%(paraDict['dbName'], paraDict['stbName']) + sqlString = "create topic %s as %s" %(topicFromStb1, queryString) + tdLog.info("create topic sql: %s"%sqlString) + tdSql.execute(sqlString) + + # paraDict['ctbNum'] = self.ctbNum + paraDict['rowsPerTbl'] = self.rowsPerTbl + consumerId = 1 + expectrowcnt = int(paraDict["rowsPerTbl"] * paraDict["ctbNum"] * 2) + topicList = topicFromStb1 + ifcheckdata = 1 + ifManualCommit = 1 + keyList = 'group.id:cgrp1,\ + enable.auto.commit:true,\ + auto.commit.interval.ms:1000,\ + auto.offset.reset:earliest' + tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) + + tdLog.info("start consume processor") + tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot']) + + tdLog.info("insert process end, and start to check consume result") + expectRows = 1 + resultList = tmqCom.selectConsumeResult(expectRows) + totalConsumeRows = 0 + for i in range(expectRows): + totalConsumeRows += resultList[i] + + tdSql.query(queryString) + totalRowsInserted = tdSql.getRows() + + tdLog.info("act consume rows: %d, act insert rows: %d, expect consume rows: %d, "%(totalConsumeRows, totalRowsInserted, expectrowcnt)) + + if totalConsumeRows != expectrowcnt: + tdLog.exit("tmq consume rows error!") + + # tmqCom.checkFileContent(consumerId, queryString) + + tdSql.query("drop topic %s"%topicFromStb1) + + tdLog.printNoPrefix("======== test case 2 end ...... ") + + def run(self): + tdSql.prepare() + self.prepareTestEnv() + tdLog.printNoPrefix("=============================================") + tdLog.printNoPrefix("======== snapshot is 0: only consume from wal") + self.tmqCase1() + self.tmqCase2() + + self.prepareTestEnv() + tdLog.printNoPrefix("====================================================================") + tdLog.printNoPrefix("======== snapshot is 1: firstly consume from tsbs, and then from wal") + self.snapshot = 1 + self.tmqCase1() + self.tmqCase2() + + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + +event = threading.Event() + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) diff --git a/tests/system-test/7-tmq/tmqUpdate-multiCtb.py b/tests/system-test/7-tmq/tmqUpdate-multiCtb.py new file mode 100644 index 0000000000..9e2d06d1bd --- /dev/null +++ b/tests/system-test/7-tmq/tmqUpdate-multiCtb.py @@ -0,0 +1,259 @@ + +import taos +import sys +import time +import socket +import os +import threading +from enum import Enum + +from util.log import * +from util.sql import * +from util.cases import * +from util.dnodes import * +sys.path.append("./7-tmq") +from tmqCommon import * + +class TDTestCase: + def __init__(self): + self.snapshot = 0 + self.vgroups = 2 + self.ctbNum = 1 + self.rowsPerTbl = 100000 + + def init(self, conn, logSql): + tdLog.debug(f"start to excute {__file__}") + tdSql.init(conn.cursor(), False) + + def prepareTestEnv(self): + tdLog.printNoPrefix("======== prepare test env include database, stable, ctables, and insert data: ") + paraDict = {'dbName': 'dbt', + 'dropFlag': 1, + 'event': '', + 'vgroups': 4, + 'stbName': 'stb', + 'colPrefix': 'c', + 'tagPrefix': 't', + 'colSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1},{'type': 'TIMESTAMP', 'count':1}], + 'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}], + 'ctbPrefix': 'ctb', + 'ctbStartIdx': 0, + 'ctbNum': 1, + 'rowsPerTbl': 100000, + 'batchNum': 1200, + 'startTs': 1640966400000, # 2022-01-01 00:00:00.000 + 'pollDelay': 3, + 'showMsg': 1, + 'showRow': 1, + 'snapshot': 0} + + paraDict['vgroups'] = self.vgroups + paraDict['ctbNum'] = self.ctbNum + paraDict['rowsPerTbl'] = self.rowsPerTbl + + tmqCom.initConsumerTable() + tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=paraDict["vgroups"],replica=1) + tdLog.info("create stb") + tmqCom.create_stable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"]) + tdLog.info("create ctb") + tmqCom.create_ctable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"],ctbPrefix=paraDict['ctbPrefix'], + ctbNum=paraDict["ctbNum"],ctbStartIdx=paraDict['ctbStartIdx']) + tdLog.info("insert data") + tmqCom.insert_data_interlaceByMultiTbl(tsql=tdSql,dbName=paraDict["dbName"],ctbPrefix=paraDict["ctbPrefix"], + ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"], + startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx']) + # tmqCom.insert_data_with_autoCreateTbl(tsql=tdSql,dbName=paraDict["dbName"],stbName=paraDict["stbName"],ctbPrefix="ctbx", + # ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"], + # startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx']) + + # tdLog.info("restart taosd to ensure that the data falls into the disk") + # tdSql.query("flush database %s"%(paraDict['dbName'])) + return + + # 自动建表完成数据插入,启动消费 + def tmqCase1(self): + tdLog.printNoPrefix("======== test case 1: ") + paraDict = {'dbName': 'dbt', + 'dropFlag': 1, + 'event': '', + 'vgroups': 4, + 'stbName': 'stb', + 'colPrefix': 'c', + 'tagPrefix': 't', + 'colSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1},{'type': 'TIMESTAMP', 'count':1}], + 'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}], + 'ctbPrefix': 'ctb', + 'ctbStartIdx': 0, + 'ctbNum': 1, + 'rowsPerTbl': 100000, + 'batchNum': 3000, + 'startTs': 1640966400000, # 2022-01-01 00:00:00.000 + 'pollDelay': 5, + 'showMsg': 1, + 'showRow': 1, + 'snapshot': 0} + paraDict['snapshot'] = self.snapshot + paraDict['vgroups'] = self.vgroups + paraDict['ctbNum'] = self.ctbNum + paraDict['rowsPerTbl'] = self.rowsPerTbl + + # update to half tables + paraDict['rowsPerTbl'] = int(self.rowsPerTbl / 2) + # tmqCom.insert_data_with_autoCreateTbl(tsql=tdSql,dbName=paraDict["dbName"],stbName=paraDict["stbName"],ctbPrefix="ctbx", + # ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"], + # startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx']) + tmqCom.insert_data_interlaceByMultiTbl(tsql=tdSql,dbName=paraDict["dbName"],ctbPrefix=paraDict["ctbPrefix"], + ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"], + startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx']) + + tdLog.info("create topics from stb1") + topicFromStb1 = 'topic_stb1' + queryString = "select ts, c1, c2 from %s.%s"%(paraDict['dbName'], paraDict['stbName']) + sqlString = "create topic %s as %s" %(topicFromStb1, queryString) + tdLog.info("create topic sql: %s"%sqlString) + tdSql.execute(sqlString) + + # paraDict['ctbNum'] = self.ctbNum + paraDict['rowsPerTbl'] = self.rowsPerTbl + consumerId = 0 + expectrowcnt = int(paraDict["rowsPerTbl"] * paraDict["ctbNum"] * 3/2) + topicList = topicFromStb1 + ifcheckdata = 1 + ifManualCommit = 1 + keyList = 'group.id:cgrp1,\ + enable.auto.commit:true,\ + auto.commit.interval.ms:1000,\ + auto.offset.reset:earliest' + tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) + + tdLog.info("start consume processor") + tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot']) + + tdLog.info("insert process end, and start to check consume result") + expectRows = 1 + resultList = tmqCom.selectConsumeResult(expectRows) + totalConsumeRows = 0 + for i in range(expectRows): + totalConsumeRows += resultList[i] + + tdSql.query(queryString) + totalRowsInserted = tdSql.getRows() + + tdLog.info("act consume rows: %d, expect consume rows: %d, act insert rows: %d"%(totalConsumeRows, expectrowcnt, totalRowsInserted)) + if totalConsumeRows != expectrowcnt: + tdLog.exit("tmq consume rows error!") + + tmqCom.checkFileContent(consumerId, queryString) + + tdSql.query("drop topic %s"%topicFromStb1) + tdLog.printNoPrefix("======== test case 1 end ...... ") + + def tmqCase2(self): + tdLog.printNoPrefix("======== test case 2: ") + paraDict = {'dbName': 'dbt', + 'dropFlag': 1, + 'event': '', + 'vgroups': 4, + 'stbName': 'stb', + 'colPrefix': 'c', + 'tagPrefix': 't', + 'colSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1},{'type': 'TIMESTAMP', 'count':1}], + 'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}], + 'ctbPrefix': 'ctb', + 'ctbStartIdx': 0, + 'ctbNum': 1, + 'rowsPerTbl': 10000, + 'batchNum': 5000, + 'startTs': 1640966400000, # 2022-01-01 00:00:00.000 + 'pollDelay': 5, + 'showMsg': 1, + 'showRow': 1, + 'snapshot': 0} + + paraDict['snapshot'] = self.snapshot + paraDict['vgroups'] = self.vgroups + paraDict['ctbNum'] = self.ctbNum + paraDict['rowsPerTbl'] = self.rowsPerTbl + + tdLog.info("restart taosd to ensure that the data falls into the disk") + tdSql.query("flush database %s"%(paraDict['dbName'])) + + # update to half tables + paraDict['startTs'] = paraDict['startTs'] + int(self.rowsPerTbl / 2) + paraDict['rowsPerTbl'] = int(self.rowsPerTbl / 2) + tmqCom.insert_data_with_autoCreateTbl(tsql=tdSql,dbName=paraDict["dbName"],stbName=paraDict["stbName"],ctbPrefix=paraDict["ctbPrefix"], + ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"], + startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx']) + # tmqCom.insert_data_interlaceByMultiTbl(tsql=tdSql,dbName=paraDict["dbName"],ctbPrefix=paraDict["ctbPrefix"], + # ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"], + # startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx']) + + tmqCom.initConsumerTable() + tdLog.info("create topics from stb1") + topicFromStb1 = 'topic_stb1' + queryString = "select ts, c1, c2 from %s.%s"%(paraDict['dbName'], paraDict['stbName']) + sqlString = "create topic %s as %s" %(topicFromStb1, queryString) + tdLog.info("create topic sql: %s"%sqlString) + tdSql.execute(sqlString) + + # paraDict['ctbNum'] = self.ctbNum + paraDict['rowsPerTbl'] = self.rowsPerTbl + consumerId = 1 + expectrowcnt = int(paraDict["rowsPerTbl"] * paraDict["ctbNum"] * 2) + topicList = topicFromStb1 + ifcheckdata = 1 + ifManualCommit = 1 + keyList = 'group.id:cgrp1,\ + enable.auto.commit:true,\ + auto.commit.interval.ms:1000,\ + auto.offset.reset:earliest' + tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) + + tdLog.info("start consume processor") + tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot']) + + tdLog.info("insert process end, and start to check consume result") + expectRows = 1 + resultList = tmqCom.selectConsumeResult(expectRows) + totalConsumeRows = 0 + for i in range(expectRows): + totalConsumeRows += resultList[i] + + tdSql.query(queryString) + totalRowsInserted = tdSql.getRows() + + tdLog.info("act consume rows: %d, act insert rows: %d, expect consume rows: %d, "%(totalConsumeRows, totalRowsInserted, expectrowcnt)) + + if totalConsumeRows != expectrowcnt: + tdLog.exit("tmq consume rows error!") + + # tmqCom.checkFileContent(consumerId, queryString) + + tdSql.query("drop topic %s"%topicFromStb1) + + tdLog.printNoPrefix("======== test case 2 end ...... ") + + def run(self): + tdSql.prepare() + self.prepareTestEnv() + tdLog.printNoPrefix("=============================================") + tdLog.printNoPrefix("======== snapshot is 0: only consume from wal") + self.tmqCase1() + self.tmqCase2() + + self.prepareTestEnv() + tdLog.printNoPrefix("====================================================================") + tdLog.printNoPrefix("======== snapshot is 1: firstly consume from tsbs, and then from wal") + self.snapshot = 1 + self.tmqCase1() + self.tmqCase2() + + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + +event = threading.Event() + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) diff --git a/tests/system-test/7-tmq/tmqUpdate1.py b/tests/system-test/7-tmq/tmqUpdate1.py new file mode 100644 index 0000000000..5f11090385 --- /dev/null +++ b/tests/system-test/7-tmq/tmqUpdate1.py @@ -0,0 +1,175 @@ + +import taos +import sys +import time +import socket +import os +import threading +from enum import Enum + +from util.log import * +from util.sql import * +from util.cases import * +from util.dnodes import * +sys.path.append("./7-tmq") +from tmqCommon import * + +class TDTestCase: + def __init__(self): + self.vgroups = 4 + self.ctbNum = 1000 + self.rowsPerTbl = 1000 + + def init(self, conn, logSql): + tdLog.debug(f"start to excute {__file__}") + tdSql.init(conn.cursor(), False) + + def prepareTestEnv(self): + tdLog.printNoPrefix("======== prepare test env include database, stable, ctables, and insert data: ") + paraDict = {'dbName': 'dbt', + 'dropFlag': 1, + 'event': '', + 'vgroups': 4, + 'stbName': 'stb', + 'colPrefix': 'c', + 'tagPrefix': 't', + 'colSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1},{'type': 'TIMESTAMP', 'count':1}], + 'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}], + 'ctbPrefix': 'ctb', + 'ctbStartIdx': 0, + 'ctbNum': 1000, + 'rowsPerTbl': 1000, + 'batchNum': 400, + 'startTs': 1640966400000, # 2022-01-01 00:00:00.000 + 'pollDelay': 3, + 'showMsg': 1, + 'showRow': 1, + 'snapshot': 0} + + paraDict['vgroups'] = self.vgroups + paraDict['ctbNum'] = self.ctbNum + paraDict['rowsPerTbl'] = self.rowsPerTbl + + tmqCom.initConsumerTable() + tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=paraDict["vgroups"],replica=1) + tdLog.info("create stb") + tmqCom.create_stable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"]) + tdLog.info("create ctb") + tmqCom.create_ctable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"],ctbPrefix=paraDict['ctbPrefix'], + ctbNum=paraDict["ctbNum"],ctbStartIdx=paraDict['ctbStartIdx']) + tdLog.info("insert data") + paraDict['ctbNum'] = int(self.ctbNum / 2) + tmqCom.insert_data_interlaceByMultiTbl(tsql=tdSql,dbName=paraDict["dbName"],ctbPrefix=paraDict["ctbPrefix"], + ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"], + startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx']) + tmqCom.insert_data_with_autoCreateTbl(tsql=tdSql,dbName=paraDict["dbName"],stbName=paraDict["stbName"],ctbPrefix="ctbx", + ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"], + startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx']) + + # tdLog.info("restart taosd to ensure that the data falls into the disk") + # tdSql.query("flush database %s"%(paraDict['dbName'])) + return + + def tmqCase1(self): + tdLog.printNoPrefix("======== test case 1: ") + paraDict = {'dbName': 'dbt', + 'dropFlag': 1, + 'event': '', + 'vgroups': 4, + 'stbName': 'stb', + 'colPrefix': 'c', + 'tagPrefix': 't', + 'colSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1},{'type': 'TIMESTAMP', 'count':1}], + 'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}], + 'ctbPrefix': 'ctb', + 'ctbStartIdx': 0, + 'ctbNum': 1000, + 'rowsPerTbl': 1000, + 'batchNum': 1000, + 'startTs': 1640966400000, # 2022-01-01 00:00:00.000 + 'pollDelay': 5, + 'showMsg': 1, + 'showRow': 1, + 'snapshot': 1} + + paraDict['vgroups'] = self.vgroups + paraDict['ctbNum'] = self.ctbNum + paraDict['rowsPerTbl'] = self.rowsPerTbl + + tdLog.info("restart taosd to ensure that the data falls into the disk") + tdSql.query("flush database %s"%(paraDict['dbName'])) + + # update to half tables + paraDict['ctbNum'] = int(self.ctbNum / 4) + tmqCom.insert_data_with_autoCreateTbl(tsql=tdSql,dbName=paraDict["dbName"],stbName=paraDict["stbName"],ctbPrefix="ctbx", + ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"], + startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx']+paraDict['ctbNum']) + tmqCom.insert_data_interlaceByMultiTbl(tsql=tdSql,dbName=paraDict["dbName"],ctbPrefix=paraDict["ctbPrefix"], + ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"], + startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx']+paraDict['ctbNum']) + + tmqCom.initConsumerTable() + tdLog.info("create topics from stb1") + topicFromStb1 = 'topic_stb1' + queryString = "select ts, c1, c2 from %s.%s"%(paraDict['dbName'], paraDict['stbName']) + sqlString = "create topic %s as %s" %(topicFromStb1, queryString) + tdLog.info("create topic sql: %s"%sqlString) + tdSql.execute(sqlString) + + paraDict['ctbNum'] = self.ctbNum + consumerId = 0 + expectrowcnt = paraDict["rowsPerTbl"] * paraDict["ctbNum"] * (1 + 1) + topicList = topicFromStb1 + ifcheckdata = 0 + ifManualCommit = 0 + keyList = 'group.id:cgrp1,\ + enable.auto.commit:true,\ + auto.commit.interval.ms:1000,\ + auto.offset.reset:earliest' + tmqCom.insertConsumerInfo(consumerId, expectrowcnt + paraDict["rowsPerTbl"] * paraDict["ctbNum"],topicList,keyList,ifcheckdata,ifManualCommit) + + tdLog.info("start consume processor") + tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot']) + + paraDict['ctbNum'] = int(self.ctbNum / 2) + paraDict['ctbStartIdx'] += paraDict['ctbNum'] + _ = tmqCom.asyncInsertDataByInterlace(paraDict) + time.sleep(3) + pthread = tmqCom.asyncInsertDataByInterlace(paraDict) + pthread.join() + + tdLog.info("insert process end, and start to check consume result") + expectRows = 1 + resultList = tmqCom.selectConsumeResult(expectRows) + totalConsumeRows = 0 + for i in range(expectRows): + totalConsumeRows += resultList[i] + + tdSql.query(queryString) + totalRowsInserted = tdSql.getRows() + + tdLog.info("act consume rows: %d, act insert rows: %d, expect consume rows: %d, "%(totalConsumeRows, totalRowsInserted, expectrowcnt)) + + if totalConsumeRows <= totalRowsInserted or totalConsumeRows != expectrowcnt: + tdLog.exit("tmq consume rows error!") + + tdSql.query("drop topic %s"%topicFromStb1) + + tdLog.printNoPrefix("======== test case 1 end ...... ") + + + def run(self): + tdSql.prepare() + self.prepareTestEnv() + self.tmqCase1() + # self.tmqCase2() + + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + +event = threading.Event() + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) diff --git a/tests/system-test/fulltest.sh b/tests/system-test/fulltest.sh index 3e1d6b217a..269f83a139 100755 --- a/tests/system-test/fulltest.sh +++ b/tests/system-test/fulltest.sh @@ -21,7 +21,7 @@ python3 ./test.py -f 1-insert/opentsdb_json_taosc_insert.py python3 ./test.py -f 1-insert/test_stmt_muti_insert_query.py python3 ./test.py -f 1-insert/test_stmt_set_tbname_tag.py python3 ./test.py -f 1-insert/alter_stable.py -#python3 ./test.py -f 1-insert/alter_table.py +python3 ./test.py -f 1-insert/alter_table.py python3 ./test.py -f 1-insert/insertWithMoreVgroup.py python3 ./test.py -f 1-insert/table_comment.py python3 ./test.py -f 1-insert/time_range_wise.py diff --git a/tools/taosws-rs b/tools/taosws-rs index 6dccac192a..7a94ffab45 160000 --- a/tools/taosws-rs +++ b/tools/taosws-rs @@ -1 +1 @@ -Subproject commit 6dccac192a2ae7dd78718ab926201aab5419327a +Subproject commit 7a94ffab45f08e16f09b3f430fe75d717054adb6