Merge branch '3.0' into 3.0test/jcy

This commit is contained in:
jiacy-jcy 2022-07-13 08:46:37 +08:00
commit d16430765f
80 changed files with 2346 additions and 2604 deletions

View File

@ -108,6 +108,7 @@ typedef struct SDataBlockInfo {
// TODO: optimize and remove following // TODO: optimize and remove following
int32_t childId; // used for stream, do not serialize int32_t childId; // used for stream, do not serialize
EStreamType type; // used for stream, do not serialize EStreamType type; // used for stream, do not serialize
STimeWindow calWin; // used for stream, do not serialize
} SDataBlockInfo; } SDataBlockInfo;
typedef struct SSDataBlock { typedef struct SSDataBlock {

View File

@ -64,18 +64,22 @@ int32_t tPutValue(uint8_t *p, SValue *pValue, int8_t type);
int32_t tGetValue(uint8_t *p, SValue *pValue, int8_t type); int32_t tGetValue(uint8_t *p, SValue *pValue, int8_t type);
int tValueCmprFn(const SValue *pValue1, const SValue *pValue2, int8_t type); int tValueCmprFn(const SValue *pValue1, const SValue *pValue2, int8_t type);
// STSRow2 // SColVal
#define COL_VAL_NONE(CID, TYPE) ((SColVal){.cid = (CID), .type = (TYPE), .isNone = 1}) #define COL_VAL_NONE(CID, TYPE) ((SColVal){.cid = (CID), .type = (TYPE), .isNone = 1})
#define COL_VAL_NULL(CID, TYPE) ((SColVal){.cid = (CID), .type = (TYPE), .isNull = 1}) #define COL_VAL_NULL(CID, TYPE) ((SColVal){.cid = (CID), .type = (TYPE), .isNull = 1})
#define COL_VAL_VALUE(CID, TYPE, V) ((SColVal){.cid = (CID), .type = (TYPE), .value = (V)}) #define COL_VAL_VALUE(CID, TYPE, V) ((SColVal){.cid = (CID), .type = (TYPE), .value = (V)})
// STSRow2
#define TSROW_LEN(PROW, V) tGetI32v((uint8_t *)(PROW)->data, (V) ? &(V) : NULL)
#define TSROW_SVER(PROW, V) tGetI32v((PROW)->data + TSROW_LEN(PROW, NULL), (V) ? &(V) : NULL)
int32_t tTSRowNew(STSRowBuilder *pBuilder, SArray *pArray, STSchema *pTSchema, STSRow2 **ppRow); int32_t tTSRowNew(STSRowBuilder *pBuilder, SArray *pArray, STSchema *pTSchema, STSRow2 **ppRow);
int32_t tTSRowClone(const STSRow2 *pRow, STSRow2 **ppRow); int32_t tTSRowClone(const STSRow2 *pRow, STSRow2 **ppRow);
void tTSRowFree(STSRow2 *pRow); void tTSRowFree(STSRow2 *pRow);
void tTSRowGet(STSRow2 *pRow, STSchema *pTSchema, int32_t iCol, SColVal *pColVal); void tTSRowGet(STSRow2 *pRow, STSchema *pTSchema, int32_t iCol, SColVal *pColVal);
int32_t tTSRowToArray(STSRow2 *pRow, STSchema *pTSchema, SArray **ppArray); int32_t tTSRowToArray(STSRow2 *pRow, STSchema *pTSchema, SArray **ppArray);
int32_t tPutTSRow(uint8_t *p, STSRow2 *pRow); int32_t tPutTSRow(uint8_t *p, STSRow2 *pRow);
int32_t tGetTSRow(uint8_t *p, STSRow2 *pRow); int32_t tGetTSRow(uint8_t *p, STSRow2 **ppRow);
// STSRowBuilder // STSRowBuilder
#define tsRowBuilderInit() ((STSRowBuilder){0}) #define tsRowBuilderInit() ((STSRowBuilder){0})
@ -97,7 +101,7 @@ int32_t tEncodeTag(SEncoder *pEncoder, const STag *pTag);
int32_t tDecodeTag(SDecoder *pDecoder, STag **ppTag); int32_t tDecodeTag(SDecoder *pDecoder, STag **ppTag);
int32_t tTagToValArray(const STag *pTag, SArray **ppArray); int32_t tTagToValArray(const STag *pTag, SArray **ppArray);
void debugPrintSTag(STag *pTag, const char *tag, int32_t ln); // TODO: remove void debugPrintSTag(STag *pTag, const char *tag, int32_t ln); // TODO: remove
int32_t parseJsontoTagData(const char* json, SArray* pTagVals, STag** ppTag, void* pMsgBuf); int32_t parseJsontoTagData(const char *json, SArray *pTagVals, STag **ppTag, void *pMsgBuf);
// STRUCT ================= // STRUCT =================
struct STColumn { struct STColumn {
@ -123,16 +127,16 @@ struct STSchema {
#define TSROW_KV_SMALL ((uint8_t)0x10U) #define TSROW_KV_SMALL ((uint8_t)0x10U)
#define TSROW_KV_MID ((uint8_t)0x20U) #define TSROW_KV_MID ((uint8_t)0x20U)
#define TSROW_KV_BIG ((uint8_t)0x40U) #define TSROW_KV_BIG ((uint8_t)0x40U)
#pragma pack(push, 1)
struct STSRow2 { struct STSRow2 {
TSKEY ts; TSKEY ts;
uint8_t flags; uint8_t flags;
int32_t sver; uint8_t data[];
uint32_t nData;
uint8_t *pData;
}; };
#pragma pack(pop)
struct STSRowBuilder { struct STSRowBuilder {
STSRow2 tsRow; // STSRow2 tsRow;
int32_t szBuf; int32_t szBuf;
uint8_t *pBuf; uint8_t *pBuf;
}; };
@ -226,50 +230,6 @@ struct STag {
memcpy(varDataVal(x), (str), (_size)); \ memcpy(varDataVal(x), (str), (_size)); \
} while (0); } while (0);
// ----------------- TSDB COLUMN DEFINITION
#define colType(col) ((col)->type)
#define colFlags(col) ((col)->flags)
#define colColId(col) ((col)->colId)
#define colBytes(col) ((col)->bytes)
#define colOffset(col) ((col)->offset)
#define colSetType(col, t) (colType(col) = (t))
#define colSetFlags(col, f) (colFlags(col) = (f))
#define colSetColId(col, id) (colColId(col) = (id))
#define colSetBytes(col, b) (colBytes(col) = (b))
#define colSetOffset(col, o) (colOffset(col) = (o))
// ----------------- TSDB SCHEMA DEFINITION
#define schemaNCols(s) ((s)->numOfCols)
#define schemaVersion(s) ((s)->version)
#define schemaTLen(s) ((s)->tlen)
#define schemaFLen(s) ((s)->flen)
#define schemaVLen(s) ((s)->vlen)
#define schemaColAt(s, i) ((s)->columns + i)
#define tdFreeSchema(s) taosMemoryFreeClear((s))
STSchema *tdDupSchema(const STSchema *pSchema);
int32_t tdEncodeSchema(void **buf, STSchema *pSchema);
void *tdDecodeSchema(void *buf, STSchema **pRSchema);
static FORCE_INLINE int32_t comparColId(const void *key1, const void *key2) {
if (*(int16_t *)key1 > ((STColumn *)key2)->colId) {
return 1;
} else if (*(int16_t *)key1 < ((STColumn *)key2)->colId) {
return -1;
} else {
return 0;
}
}
static FORCE_INLINE STColumn *tdGetColOfID(STSchema *pSchema, int16_t colId) {
void *ptr = bsearch(&colId, (void *)pSchema->columns, schemaNCols(pSchema), sizeof(STColumn), comparColId);
if (ptr == NULL) return NULL;
return (STColumn *)ptr;
}
// ----------------- SCHEMA BUILDER DEFINITION // ----------------- SCHEMA BUILDER DEFINITION
typedef struct { typedef struct {
int32_t tCols; int32_t tCols;
@ -299,141 +259,6 @@ void tdResetTSchemaBuilder(STSchemaBuilder *pBuilder, schema_ver_t version)
int32_t tdAddColToSchema(STSchemaBuilder *pBuilder, int8_t type, int8_t flags, col_id_t colId, col_bytes_t bytes); int32_t tdAddColToSchema(STSchemaBuilder *pBuilder, int8_t type, int8_t flags, col_id_t colId, col_bytes_t bytes);
STSchema *tdGetSchemaFromBuilder(STSchemaBuilder *pBuilder); STSchema *tdGetSchemaFromBuilder(STSchemaBuilder *pBuilder);
// ----------------- Semantic timestamp key definition
// typedef uint64_t TKEY;
#define TKEY TSKEY
#define TKEY_INVALID UINT64_MAX
#define TKEY_NULL TKEY_INVALID
#define TKEY_NEGATIVE_FLAG (((TKEY)1) << 63)
#define TKEY_VALUE_FILTER (~(TKEY_NEGATIVE_FLAG))
#define TKEY_IS_NEGATIVE(tkey) (((tkey)&TKEY_NEGATIVE_FLAG) != 0)
#define TKEY_IS_DELETED(tkey) (false)
#define tdGetTKEY(key) (key)
#define tdGetKey(tskey) (tskey)
#define MIN_TS_KEY ((TSKEY)0x8000000000000001)
#define MAX_TS_KEY ((TSKEY)0x7fffffffffffffff)
#define TD_TO_TKEY(key) tdGetTKEY(((key) < MIN_TS_KEY) ? MIN_TS_KEY : (((key) > MAX_TS_KEY) ? MAX_TS_KEY : key))
static FORCE_INLINE TKEY keyToTkey(TSKEY key) {
TSKEY lkey = key;
if (key > MAX_TS_KEY) {
lkey = MAX_TS_KEY;
} else if (key < MIN_TS_KEY) {
lkey = MIN_TS_KEY;
}
return tdGetTKEY(lkey);
}
static FORCE_INLINE int32_t tkeyComparFn(const void *tkey1, const void *tkey2) {
TSKEY key1 = tdGetKey(*(TKEY *)tkey1);
TSKEY key2 = tdGetKey(*(TKEY *)tkey2);
if (key1 < key2) {
return -1;
} else if (key1 > key2) {
return 1;
} else {
return 0;
}
}
// ----------------- Data column structure
// SDataCol arrangement: data => bitmap => dataOffset
typedef struct SDataCol {
int8_t type; // column type
uint8_t bitmap : 1; // 0: no bitmap if all rows are NORM, 1: has bitmap if has NULL/NORM rows
uint8_t reserve : 7;
int16_t colId; // column ID
int32_t bytes; // column data bytes defined
int32_t offset; // data offset in a SDataRow (including the header size)
int32_t spaceSize; // Total space size for this column
int32_t len; // column data length
VarDataOffsetT *dataOff; // For binary and nchar data, the offset in the data column
void *pData; // Actual data pointer
void *pBitmap; // Bitmap pointer
TSKEY ts; // only used in last NULL column
} SDataCol;
#define isAllRowsNull(pCol) ((pCol)->len == 0)
#define isAllRowsNone(pCol) ((pCol)->len == 0)
static FORCE_INLINE void dataColReset(SDataCol *pDataCol) { pDataCol->len = 0; }
int32_t tdAllocMemForCol(SDataCol *pCol, int32_t maxPoints);
void dataColInit(SDataCol *pDataCol, STColumn *pCol, int32_t maxPoints);
int32_t dataColAppendVal(SDataCol *pCol, const void *value, int32_t numOfRows, int32_t maxPoints);
void *dataColSetOffset(SDataCol *pCol, int32_t nEle);
bool isNEleNull(SDataCol *pCol, int32_t nEle);
typedef struct {
col_id_t maxCols; // max number of columns
col_id_t numOfCols; // Total number of cols
int32_t maxPoints; // max number of points
int32_t numOfRows;
int32_t bitmapMode : 1; // default is 0(2 bits), otherwise 1(1 bit)
int32_t sversion : 31; // TODO: set sversion(not used yet)
SDataCol *cols;
} SDataCols;
static FORCE_INLINE bool tdDataColsIsBitmapI(SDataCols *pCols) { return pCols->bitmapMode != TSDB_BITMODE_DEFAULT; }
static FORCE_INLINE void tdDataColsSetBitmapI(SDataCols *pCols) { pCols->bitmapMode = TSDB_BITMODE_ONE_BIT; }
static FORCE_INLINE bool tdIsBitmapModeI(int8_t bitmapMode) { return bitmapMode != TSDB_BITMODE_DEFAULT; }
#define keyCol(pCols) (&((pCols)->cols[0])) // Key column
#define dataColsTKeyAt(pCols, idx) ((TKEY *)(keyCol(pCols)->pData))[(idx)] // the idx row of column-wised data
#define dataColsKeyAt(pCols, idx) tdGetKey(dataColsTKeyAt(pCols, idx))
static FORCE_INLINE TKEY dataColsTKeyFirst(SDataCols *pCols) {
if (pCols->numOfRows) {
return dataColsTKeyAt(pCols, 0);
} else {
return TKEY_INVALID;
}
}
static FORCE_INLINE TSKEY dataColsKeyAtRow(SDataCols *pCols, int32_t row) {
assert(row < pCols->numOfRows);
return dataColsKeyAt(pCols, row);
}
static FORCE_INLINE TSKEY dataColsKeyFirst(SDataCols *pCols) {
if (pCols->numOfRows) {
return dataColsKeyAt(pCols, 0);
} else {
return TSDB_DATA_TIMESTAMP_NULL;
}
}
static FORCE_INLINE TKEY dataColsTKeyLast(SDataCols *pCols) {
if (pCols->numOfRows) {
return dataColsTKeyAt(pCols, pCols->numOfRows - 1);
} else {
return TKEY_INVALID;
}
}
static FORCE_INLINE TSKEY dataColsKeyLast(SDataCols *pCols) {
if (pCols->numOfRows) {
return dataColsKeyAt(pCols, pCols->numOfRows - 1);
} else {
return TSDB_DATA_TIMESTAMP_NULL;
}
}
SDataCols *tdNewDataCols(int32_t maxCols, int32_t maxRows);
void tdResetDataCols(SDataCols *pCols);
int32_t tdInitDataCols(SDataCols *pCols, STSchema *pSchema);
SDataCols *tdDupDataCols(SDataCols *pCols, bool keepData);
SDataCols *tdFreeDataCols(SDataCols *pCols);
int32_t tdMergeDataCols(SDataCols *target, SDataCols *source, int32_t rowsToMerge, int32_t *pOffset, bool update,
TDRowVerT maxVer);
#endif #endif
#ifdef __cplusplus #ifdef __cplusplus

View File

@ -168,7 +168,7 @@ typedef struct {
// N.B. If without STSchema, getExtendedRowSize() is used to get the rowMaxBytes and // N.B. If without STSchema, getExtendedRowSize() is used to get the rowMaxBytes and
// (int32_t)ceil((double)nCols/TD_VTYPE_PARTS) should be added if TD_SUPPORT_BITMAP defined. // (int32_t)ceil((double)nCols/TD_VTYPE_PARTS) should be added if TD_SUPPORT_BITMAP defined.
#define TD_ROW_MAX_BYTES_FROM_SCHEMA(s) (schemaTLen(s) + TD_ROW_HEAD_LEN) #define TD_ROW_MAX_BYTES_FROM_SCHEMA(s) ((s)->tlen + TD_ROW_HEAD_LEN)
#define TD_ROW_SET_INFO(r, i) (TD_ROW_INFO(r) = (i)) #define TD_ROW_SET_INFO(r, i) (TD_ROW_INFO(r) = (i))
#define TD_ROW_SET_TYPE(r, t) (TD_ROW_TYPE(r) = (t)) #define TD_ROW_SET_TYPE(r, t) (TD_ROW_TYPE(r) = (t))
@ -223,9 +223,10 @@ int32_t tdSetBitmapValTypeN(void *pBitmap, int16_t nEle, TDR
static FORCE_INLINE int32_t tdGetBitmapValType(const void *pBitmap, int16_t colIdx, TDRowValT *pValType, static FORCE_INLINE int32_t tdGetBitmapValType(const void *pBitmap, int16_t colIdx, TDRowValT *pValType,
int8_t bitmapMode); int8_t bitmapMode);
bool tdIsBitmapBlkNorm(const void *pBitmap, int32_t numOfBits, int8_t bitmapMode); bool tdIsBitmapBlkNorm(const void *pBitmap, int32_t numOfBits, int8_t bitmapMode);
int32_t tdAppendValToDataCol(SDataCol *pCol, TDRowValT valType, const void *val, int32_t numOfRows, int32_t maxPoints, // int32_t tdAppendValToDataCol(SDataCol *pCol, TDRowValT valType, const void *val, int32_t numOfRows, int32_t
int8_t bitmapMode, bool isMerge); // maxPoints,
int32_t tdAppendSTSRowToDataCol(STSRow *pRow, STSchema *pSchema, SDataCols *pCols, bool isMerge); // int8_t bitmapMode, bool isMerge);
// int32_t tdAppendSTSRowToDataCol(STSRow *pRow, STSchema *pSchema, SDataCols *pCols, bool isMerge);
int32_t tdGetBitmapValTypeII(const void *pBitmap, int16_t colIdx, TDRowValT *pValType); int32_t tdGetBitmapValTypeII(const void *pBitmap, int16_t colIdx, TDRowValT *pValType);
int32_t tdSetBitmapValTypeI(void *pBitmap, int16_t colIdx, TDRowValT valType); int32_t tdSetBitmapValTypeI(void *pBitmap, int16_t colIdx, TDRowValT valType);
@ -318,12 +319,9 @@ bool tdSTSRowGetVal(STSRowIter *pIter, col_id_t colId, col_type_t colType, SC
bool tdGetTpRowDataOfCol(STSRowIter *pIter, col_type_t colType, int32_t offset, SCellVal *pVal); bool tdGetTpRowDataOfCol(STSRowIter *pIter, col_type_t colType, int32_t offset, SCellVal *pVal);
bool tdGetKvRowValOfColEx(STSRowIter *pIter, col_id_t colId, col_type_t colType, col_id_t *nIdx, SCellVal *pVal); bool tdGetKvRowValOfColEx(STSRowIter *pIter, col_id_t colId, col_type_t colType, col_id_t *nIdx, SCellVal *pVal);
bool tdSTSRowIterNext(STSRowIter *pIter, col_id_t colId, col_type_t colType, SCellVal *pVal); bool tdSTSRowIterNext(STSRowIter *pIter, col_id_t colId, col_type_t colType, SCellVal *pVal);
STSRow *mergeTwoRows(void *buffer, STSRow *row1, STSRow *row2, STSchema *pSchema1, STSchema *pSchema2);
int32_t tdGetColDataOfRow(SCellVal *pVal, SDataCol *pCol, int32_t row, int8_t bitmapMode);
bool tdSTpRowGetVal(STSRow *pRow, col_id_t colId, col_type_t colType, int32_t flen, uint32_t offset, col_id_t colIdx, bool tdSTpRowGetVal(STSRow *pRow, col_id_t colId, col_type_t colType, int32_t flen, uint32_t offset, col_id_t colIdx,
SCellVal *pVal); SCellVal *pVal);
bool tdSKvRowGetVal(STSRow *pRow, col_id_t colId, col_id_t colIdx, SCellVal *pVal); bool tdSKvRowGetVal(STSRow *pRow, col_id_t colId, col_id_t colIdx, SCellVal *pVal);
int32_t dataColGetNEleLen(SDataCol *pDataCol, int32_t rows, int8_t bitmapMode);
void tdSCellValPrint(SCellVal *pVal, int8_t colType); void tdSCellValPrint(SCellVal *pVal, int8_t colType);
void tdSRowPrint(STSRow *row, STSchema *pSchema, const char *tag); void tdSRowPrint(STSRow *row, STSchema *pSchema, const char *tag);

View File

@ -42,25 +42,28 @@ typedef struct SReadHandle {
bool initTqReader; bool initTqReader;
} SReadHandle; } SReadHandle;
// in queue mode, data streams are seperated by msg
typedef enum { typedef enum {
OPTR_EXEC_MODEL_BATCH = 0x1, OPTR_EXEC_MODEL_BATCH = 0x1,
OPTR_EXEC_MODEL_STREAM = 0x2, OPTR_EXEC_MODEL_STREAM = 0x2,
OPTR_EXEC_MODEL_QUEUE = 0x3,
} EOPTR_EXEC_MODEL; } EOPTR_EXEC_MODEL;
/** /**
* Create the exec task for streaming mode * Create the exec task for stream mode
* @param pMsg * @param pMsg
* @param streamReadHandle * @param SReadHandle
* @return * @return
*/ */
qTaskInfo_t qCreateStreamExecTaskInfo(void* msg, SReadHandle* readers); qTaskInfo_t qCreateStreamExecTaskInfo(void* msg, SReadHandle* readers);
/** /**
* Switch the stream scan to snapshot mode * Create the exec task for queue mode
* @param tinfo * @param pMsg
* @param SReadHandle
* @return * @return
*/ */
int32_t qStreamScanSnapshot(qTaskInfo_t tinfo); qTaskInfo_t qCreateQueueExecTaskInfo(void* msg, SReadHandle* readers);
/** /**
* Set the input data block for the stream scan. * Set the input data block for the stream scan.
@ -111,7 +114,7 @@ int32_t qCreateExecTask(SReadHandle* readHandle, int32_t vgId, uint64_t taskId,
* @return * @return
*/ */
int32_t qGetQueryTableSchemaVersion(qTaskInfo_t tinfo, char* dbName, char* tableName, int32_t* sversion, int32_t qGetQueryTableSchemaVersion(qTaskInfo_t tinfo, char* dbName, char* tableName, int32_t* sversion,
int32_t* tversion); int32_t* tversion);
/** /**
* The main task execution function, including query on both table and multiple tables, * The main task execution function, including query on both table and multiple tables,

View File

@ -14,6 +14,7 @@
*/ */
#include "os.h" #include "os.h"
#include "query.h"
#include "tdatablock.h" #include "tdatablock.h"
#include "tmsg.h" #include "tmsg.h"
#include "tmsgcb.h" #include "tmsgcb.h"
@ -119,6 +120,7 @@ static FORCE_INLINE void* streamQueueCurItem(SStreamQueue* queue) { return queue
static FORCE_INLINE void* streamQueueNextItem(SStreamQueue* queue) { static FORCE_INLINE void* streamQueueNextItem(SStreamQueue* queue) {
int8_t dequeueFlag = atomic_exchange_8(&queue->status, STREAM_QUEUE__PROCESSING); int8_t dequeueFlag = atomic_exchange_8(&queue->status, STREAM_QUEUE__PROCESSING);
if (dequeueFlag == STREAM_QUEUE__FAILED) { if (dequeueFlag == STREAM_QUEUE__FAILED) {
ASSERT(0);
ASSERT(queue->qItem != NULL); ASSERT(queue->qItem != NULL);
return streamQueueCurItem(queue); return streamQueueCurItem(queue);
} else { } else {
@ -305,6 +307,7 @@ static FORCE_INLINE int32_t streamTaskInput(SStreamTask* pTask, SStreamQueueItem
atomic_store_8(&pTask->inputStatus, TASK_INPUT_STATUS__FAILED); atomic_store_8(&pTask->inputStatus, TASK_INPUT_STATUS__FAILED);
return -1; return -1;
} }
qInfo("task %d %p submit enqueue %p %p %p", pTask->taskId, pTask, pItem, pSubmitClone, pSubmitClone->data);
taosWriteQitem(pTask->inputQueue->queue, pSubmitClone); taosWriteQitem(pTask->inputQueue->queue, pSubmitClone);
} else if (pItem->type == STREAM_INPUT__DATA_BLOCK || pItem->type == STREAM_INPUT__DATA_RETRIEVE) { } else if (pItem->type == STREAM_INPUT__DATA_BLOCK || pItem->type == STREAM_INPUT__DATA_RETRIEVE) {
taosWriteQitem(pTask->inputQueue->queue, pItem); taosWriteQitem(pTask->inputQueue->queue, pItem);

View File

@ -94,7 +94,7 @@ void taosPrintLongString(const char *flags, ELogLevel level, int32_t dflag, cons
#define pError(...) { taosPrintLog("APP ERROR ", DEBUG_ERROR, 255, __VA_ARGS__); } #define pError(...) { taosPrintLog("APP ERROR ", DEBUG_ERROR, 255, __VA_ARGS__); }
#define pPrint(...) { taosPrintLog("APP ", DEBUG_INFO, 255, __VA_ARGS__); } #define pPrint(...) { taosPrintLog("APP ", DEBUG_INFO, 255, __VA_ARGS__); }
// clang-format on // clang-format on
#define BUF_PAGE_DEBUG //#define BUF_PAGE_DEBUG
#ifdef __cplusplus #ifdef __cplusplus
} }
#endif #endif

View File

@ -44,6 +44,8 @@ typedef struct STaosQset STaosQset;
typedef struct STaosQall STaosQall; typedef struct STaosQall STaosQall;
typedef struct { typedef struct {
void *ahandle; void *ahandle;
void *fp;
void *queue;
int32_t workerId; int32_t workerId;
int32_t threadNum; int32_t threadNum;
int64_t timestamp; int64_t timestamp;
@ -65,6 +67,7 @@ void taosFreeQitem(void *pItem);
void taosWriteQitem(STaosQueue *queue, void *pItem); void taosWriteQitem(STaosQueue *queue, void *pItem);
int32_t taosReadQitem(STaosQueue *queue, void **ppItem); int32_t taosReadQitem(STaosQueue *queue, void **ppItem);
bool taosQueueEmpty(STaosQueue *queue); bool taosQueueEmpty(STaosQueue *queue);
void taosUpdateItemSize(STaosQueue *queue, int32_t items);
int32_t taosQueueItemSize(STaosQueue *queue); int32_t taosQueueItemSize(STaosQueue *queue);
int64_t taosQueueMemorySize(STaosQueue *queue); int64_t taosQueueMemorySize(STaosQueue *queue);
@ -81,8 +84,8 @@ int32_t taosAddIntoQset(STaosQset *qset, STaosQueue *queue, void *ahandle);
void taosRemoveFromQset(STaosQset *qset, STaosQueue *queue); void taosRemoveFromQset(STaosQset *qset, STaosQueue *queue);
int32_t taosGetQueueNumber(STaosQset *qset); int32_t taosGetQueueNumber(STaosQset *qset);
int32_t taosReadQitemFromQset(STaosQset *qset, void **ppItem, int64_t *ts, void **ahandle, FItem *itemFp); int32_t taosReadQitemFromQset(STaosQset *qset, void **ppItem, SQueueInfo *qinfo);
int32_t taosReadAllQitemsFromQset(STaosQset *qset, STaosQall *qall, void **ahandle, FItems *itemsFp); int32_t taosReadAllQitemsFromQset(STaosQset *qset, STaosQall *qall, SQueueInfo *qinfo);
void taosResetQsetThread(STaosQset *qset, void *pItem); void taosResetQsetThread(STaosQset *qset, void *pItem);
extern int64_t tsRpcQueueMemoryAllowed; extern int64_t tsRpcQueueMemoryAllowed;

View File

@ -179,7 +179,6 @@ int32_t processUseDbRsp(void* param, SDataBuf* pMsg, int32_t code) {
if (code != 0) { if (code != 0) {
terrno = code; terrno = code;
if (output.dbVgroup) taosHashCleanup(output.dbVgroup->vgHash); if (output.dbVgroup) taosHashCleanup(output.dbVgroup->vgHash);
taosMemoryFreeClear(output.dbVgroup);
tscError("0x%" PRIx64 " failed to build use db output since %s", pRequest->requestId, terrstr()); tscError("0x%" PRIx64 " failed to build use db output since %s", pRequest->requestId, terrstr());
} else if (output.dbVgroup && output.dbVgroup->vgHash) { } else if (output.dbVgroup && output.dbVgroup->vgHash) {
@ -189,12 +188,14 @@ int32_t processUseDbRsp(void* param, SDataBuf* pMsg, int32_t code) {
if (code1 != TSDB_CODE_SUCCESS) { if (code1 != TSDB_CODE_SUCCESS) {
tscWarn("catalogGetHandle failed, clusterId:%" PRIx64 ", error:%s", pRequest->pTscObj->pAppInfo->clusterId, tscWarn("catalogGetHandle failed, clusterId:%" PRIx64 ", error:%s", pRequest->pTscObj->pAppInfo->clusterId,
tstrerror(code1)); tstrerror(code1));
taosMemoryFreeClear(output.dbVgroup);
} else { } else {
catalogUpdateDBVgInfo(pCatalog, output.db, output.dbId, output.dbVgroup); catalogUpdateDBVgInfo(pCatalog, output.db, output.dbId, output.dbVgroup);
output.dbVgroup = NULL;
} }
} }
taosMemoryFreeClear(output.dbVgroup);
tFreeSUsedbRsp(&usedbRsp); tFreeSUsedbRsp(&usedbRsp);
char db[TSDB_DB_NAME_LEN] = {0}; char db[TSDB_DB_NAME_LEN] = {0};

View File

@ -1149,11 +1149,10 @@ int32_t tmqPollCb(void* param, SDataBuf* pMsg, int32_t code) {
tDecoderInit(&decoder, POINTER_SHIFT(pMsg->pData, sizeof(SMqRspHead)), pMsg->len - sizeof(SMqRspHead)); tDecoderInit(&decoder, POINTER_SHIFT(pMsg->pData, sizeof(SMqRspHead)), pMsg->len - sizeof(SMqRspHead));
tDecodeSMqDataRsp(&decoder, &pRspWrapper->dataRsp); tDecodeSMqDataRsp(&decoder, &pRspWrapper->dataRsp);
memcpy(&pRspWrapper->dataRsp, pMsg->pData, sizeof(SMqRspHead)); memcpy(&pRspWrapper->dataRsp, pMsg->pData, sizeof(SMqRspHead));
/*tDecodeSMqDataBlkRsp(POINTER_SHIFT(pMsg->pData, sizeof(SMqRspHead)), &pRspWrapper->dataRsp);*/
} else { } else {
ASSERT(rspType == TMQ_MSG_TYPE__POLL_META_RSP); ASSERT(rspType == TMQ_MSG_TYPE__POLL_META_RSP);
memcpy(&pRspWrapper->metaRsp, pMsg->pData, sizeof(SMqRspHead));
tDecodeSMqMetaRsp(POINTER_SHIFT(pMsg->pData, sizeof(SMqRspHead)), &pRspWrapper->metaRsp); tDecodeSMqMetaRsp(POINTER_SHIFT(pMsg->pData, sizeof(SMqRspHead)), &pRspWrapper->metaRsp);
memcpy(&pRspWrapper->metaRsp, pMsg->pData, sizeof(SMqRspHead));
} }
taosMemoryFree(pMsg->pData); taosMemoryFree(pMsg->pData);
@ -2427,15 +2426,15 @@ static void destroyCreateTbReqBatch(void* data) {
taosArrayDestroy(pTbBatch->req.pArray); taosArrayDestroy(pTbBatch->req.pArray);
} }
static int32_t taosCreateTable(TAOS *taos, void *meta, int32_t metaLen){ static int32_t taosCreateTable(TAOS* taos, void* meta, int32_t metaLen) {
SVCreateTbBatchReq req = {0}; SVCreateTbBatchReq req = {0};
SDecoder coder = {0}; SDecoder coder = {0};
int32_t code = TSDB_CODE_SUCCESS; int32_t code = TSDB_CODE_SUCCESS;
SRequestObj *pRequest = NULL; SRequestObj* pRequest = NULL;
SQuery *pQuery = NULL; SQuery* pQuery = NULL;
SHashObj *pVgroupHashmap = NULL; SHashObj* pVgroupHashmap = NULL;
code = buildRequest(*(int64_t*) taos, "", 0, NULL, false, &pRequest); code = buildRequest(*(int64_t*)taos, "", 0, NULL, false, &pRequest);
if (code != TSDB_CODE_SUCCESS) { if (code != TSDB_CODE_SUCCESS) {
goto end; goto end;
} }
@ -2455,8 +2454,8 @@ static int32_t taosCreateTable(TAOS *taos, void *meta, int32_t metaLen){
STscObj* pTscObj = pRequest->pTscObj; STscObj* pTscObj = pRequest->pTscObj;
SVCreateTbReq *pCreateReq = NULL; SVCreateTbReq* pCreateReq = NULL;
SCatalog* pCatalog = NULL; SCatalog* pCatalog = NULL;
code = catalogGetHandle(pTscObj->pAppInfo->clusterId, &pCatalog); code = catalogGetHandle(pTscObj->pAppInfo->clusterId, &pCatalog);
if (code != TSDB_CODE_SUCCESS) { if (code != TSDB_CODE_SUCCESS) {
goto end; goto end;
@ -2540,13 +2539,13 @@ static void destroyDropTbReqBatch(void* data) {
taosArrayDestroy(pTbBatch->req.pArray); taosArrayDestroy(pTbBatch->req.pArray);
} }
static int32_t taosDropTable(TAOS *taos, void *meta, int32_t metaLen){ static int32_t taosDropTable(TAOS* taos, void* meta, int32_t metaLen) {
SVDropTbBatchReq req = {0}; SVDropTbBatchReq req = {0};
SDecoder coder = {0}; SDecoder coder = {0};
int32_t code = TSDB_CODE_SUCCESS; int32_t code = TSDB_CODE_SUCCESS;
SRequestObj *pRequest = NULL; SRequestObj* pRequest = NULL;
SQuery *pQuery = NULL; SQuery* pQuery = NULL;
SHashObj *pVgroupHashmap = NULL; SHashObj* pVgroupHashmap = NULL;
code = buildRequest(*(int64_t*)taos, "", 0, NULL, false, &pRequest); code = buildRequest(*(int64_t*)taos, "", 0, NULL, false, &pRequest);
if (code != TSDB_CODE_SUCCESS) { if (code != TSDB_CODE_SUCCESS) {
@ -2568,8 +2567,8 @@ static int32_t taosDropTable(TAOS *taos, void *meta, int32_t metaLen){
STscObj* pTscObj = pRequest->pTscObj; STscObj* pTscObj = pRequest->pTscObj;
SVDropTbReq *pDropReq = NULL; SVDropTbReq* pDropReq = NULL;
SCatalog *pCatalog = NULL; SCatalog* pCatalog = NULL;
code = catalogGetHandle(pTscObj->pAppInfo->clusterId, &pCatalog); code = catalogGetHandle(pTscObj->pAppInfo->clusterId, &pCatalog);
if (code != TSDB_CODE_SUCCESS) { if (code != TSDB_CODE_SUCCESS) {
goto end; goto end;
@ -2640,17 +2639,16 @@ end:
return code; return code;
} }
static int32_t taosAlterTable(TAOS *taos, void *meta, int32_t metaLen){ static int32_t taosAlterTable(TAOS* taos, void* meta, int32_t metaLen) {
SVAlterTbReq req = {0}; SVAlterTbReq req = {0};
SDecoder coder = {0}; SDecoder coder = {0};
int32_t code = TSDB_CODE_SUCCESS; int32_t code = TSDB_CODE_SUCCESS;
SRequestObj *pRequest = NULL; SRequestObj* pRequest = NULL;
SQuery *pQuery = NULL; SQuery* pQuery = NULL;
SArray *pArray = NULL; SArray* pArray = NULL;
SVgDataBlocks *pVgData = NULL; SVgDataBlocks* pVgData = NULL;
code = buildRequest(*(int64_t*)taos, "", 0, NULL, false, &pRequest);
code = buildRequest(*(int64_t*) taos, "", 0, NULL, false, &pRequest);
if (code != TSDB_CODE_SUCCESS) { if (code != TSDB_CODE_SUCCESS) {
goto end; goto end;
} }

View File

@ -320,7 +320,9 @@ int32_t colDataAssign(SColumnInfoData* pColumnInfoData, const SColumnInfoData* p
memcpy(pColumnInfoData->pData, pSource->pData, pSource->varmeta.length); memcpy(pColumnInfoData->pData, pSource->pData, pSource->varmeta.length);
} else { } else {
memcpy(pColumnInfoData->nullbitmap, pSource->nullbitmap, BitmapLen(numOfRows)); memcpy(pColumnInfoData->nullbitmap, pSource->nullbitmap, BitmapLen(numOfRows));
memcpy(pColumnInfoData->pData, pSource->pData, pSource->info.bytes * numOfRows); if (pSource->pData) {
memcpy(pColumnInfoData->pData, pSource->pData, pSource->info.bytes * numOfRows);
}
} }
pColumnInfoData->hasNull = pSource->hasNull; pColumnInfoData->hasNull = pSource->hasNull;
@ -1736,56 +1738,57 @@ char* dumpBlockData(SSDataBlock* pDataBlock, const char* flag, char** pDataBuf)
int32_t colNum = taosArrayGetSize(pDataBlock->pDataBlock); int32_t colNum = taosArrayGetSize(pDataBlock->pDataBlock);
int32_t rows = pDataBlock->info.rows; int32_t rows = pDataBlock->info.rows;
int32_t len = 0; int32_t len = 0;
len += snprintf(dumpBuf + len, size - len, "\n%s |block type %d |child id %d|group id:%" PRIu64 "|\n", flag, len += snprintf(dumpBuf + len, size - len, "\n%s |block type %d |child id %d|group id:%" PRIu64 "| uid:%ld\n", flag,
(int32_t)pDataBlock->info.type, pDataBlock->info.childId, pDataBlock->info.groupId); (int32_t)pDataBlock->info.type, pDataBlock->info.childId, pDataBlock->info.groupId,
pDataBlock->info.uid);
if (len >= size - 1) return dumpBuf; if (len >= size - 1) return dumpBuf;
for (int32_t j = 0; j < rows; j++) { for (int32_t j = 0; j < rows; j++) {
len += snprintf(dumpBuf + len, size - len, "%s |", flag); len += snprintf(dumpBuf + len, size - len, "%s |", flag);
if (len >= size -1) return dumpBuf; if (len >= size - 1) return dumpBuf;
for (int32_t k = 0; k < colNum; k++) { for (int32_t k = 0; k < colNum; k++) {
SColumnInfoData* pColInfoData = taosArrayGet(pDataBlock->pDataBlock, k); SColumnInfoData* pColInfoData = taosArrayGet(pDataBlock->pDataBlock, k);
void* var = POINTER_SHIFT(pColInfoData->pData, j * pColInfoData->info.bytes); void* var = POINTER_SHIFT(pColInfoData->pData, j * pColInfoData->info.bytes);
if (colDataIsNull(pColInfoData, rows, j, NULL) || !pColInfoData->pData) { if (colDataIsNull(pColInfoData, rows, j, NULL) || !pColInfoData->pData) {
len += snprintf(dumpBuf + len, size - len, " %15s |", "NULL"); len += snprintf(dumpBuf + len, size - len, " %15s |", "NULL");
if (len >= size -1) return dumpBuf; if (len >= size - 1) return dumpBuf;
continue; continue;
} }
switch (pColInfoData->info.type) { switch (pColInfoData->info.type) {
case TSDB_DATA_TYPE_TIMESTAMP: case TSDB_DATA_TYPE_TIMESTAMP:
formatTimestamp(pBuf, *(uint64_t*)var, TSDB_TIME_PRECISION_MILLI); formatTimestamp(pBuf, *(uint64_t*)var, TSDB_TIME_PRECISION_MILLI);
len += snprintf(dumpBuf + len, size - len, " %25s |", pBuf); len += snprintf(dumpBuf + len, size - len, " %25s |", pBuf);
if (len >= size -1) return dumpBuf; if (len >= size - 1) return dumpBuf;
break; break;
case TSDB_DATA_TYPE_INT: case TSDB_DATA_TYPE_INT:
len += snprintf(dumpBuf + len, size - len, " %15d |", *(int32_t*)var); len += snprintf(dumpBuf + len, size - len, " %15d |", *(int32_t*)var);
if (len >= size -1) return dumpBuf; if (len >= size - 1) return dumpBuf;
break; break;
case TSDB_DATA_TYPE_UINT: case TSDB_DATA_TYPE_UINT:
len += snprintf(dumpBuf + len, size - len, " %15u |", *(uint32_t*)var); len += snprintf(dumpBuf + len, size - len, " %15u |", *(uint32_t*)var);
if (len >= size -1) return dumpBuf; if (len >= size - 1) return dumpBuf;
break; break;
case TSDB_DATA_TYPE_BIGINT: case TSDB_DATA_TYPE_BIGINT:
len += snprintf(dumpBuf + len, size - len, " %15ld |", *(int64_t*)var); len += snprintf(dumpBuf + len, size - len, " %15ld |", *(int64_t*)var);
if (len >= size -1) return dumpBuf; if (len >= size - 1) return dumpBuf;
break; break;
case TSDB_DATA_TYPE_UBIGINT: case TSDB_DATA_TYPE_UBIGINT:
len += snprintf(dumpBuf + len, size - len, " %15lu |", *(uint64_t*)var); len += snprintf(dumpBuf + len, size - len, " %15lu |", *(uint64_t*)var);
if (len >= size -1) return dumpBuf; if (len >= size - 1) return dumpBuf;
break; break;
case TSDB_DATA_TYPE_FLOAT: case TSDB_DATA_TYPE_FLOAT:
len += snprintf(dumpBuf + len, size - len, " %15f |", *(float*)var); len += snprintf(dumpBuf + len, size - len, " %15f |", *(float*)var);
if (len >= size -1) return dumpBuf; if (len >= size - 1) return dumpBuf;
break; break;
case TSDB_DATA_TYPE_DOUBLE: case TSDB_DATA_TYPE_DOUBLE:
len += snprintf(dumpBuf + len, size - len, " %15lf |", *(double*)var); len += snprintf(dumpBuf + len, size - len, " %15lf |", *(double*)var);
if (len >= size -1) return dumpBuf; if (len >= size - 1) return dumpBuf;
break; break;
} }
} }
len += snprintf(dumpBuf + len, size - len, "\n"); len += snprintf(dumpBuf + len, size - len, "\n");
if (len >= size -1) return dumpBuf; if (len >= size - 1) return dumpBuf;
} }
len += snprintf(dumpBuf + len, size - len, "%s |end\n", flag); len += snprintf(dumpBuf + len, size - len, "%s |end\n", flag);
return dumpBuf; return dumpBuf;

View File

@ -175,7 +175,8 @@ static void setBitMap(uint8_t *pb, uint8_t v, int32_t idx, uint8_t flags) {
} while (0) } while (0)
int32_t tTSRowNew(STSRowBuilder *pBuilder, SArray *pArray, STSchema *pTSchema, STSRow2 **ppRow) { int32_t tTSRowNew(STSRowBuilder *pBuilder, SArray *pArray, STSchema *pTSchema, STSRow2 **ppRow) {
int32_t code = 0; int32_t code = 0;
#if 0
STColumn *pTColumn; STColumn *pTColumn;
SColVal *pColVal; SColVal *pColVal;
int32_t nColVal = taosArrayGetSize(pArray); int32_t nColVal = taosArrayGetSize(pArray);
@ -462,30 +463,22 @@ int32_t tTSRowNew(STSRowBuilder *pBuilder, SArray *pArray, STSchema *pTSchema, S
} }
} }
#endif
_exit: _exit:
return code; return code;
} }
int32_t tTSRowClone(const STSRow2 *pRow, STSRow2 **ppRow) { int32_t tTSRowClone(const STSRow2 *pRow, STSRow2 **ppRow) {
int32_t code = 0; int32_t code = 0;
int32_t rLen;
(*ppRow) = (STSRow2 *)taosMemoryMalloc(sizeof(**ppRow)); TSROW_LEN(pRow, rLen);
(*ppRow) = (STSRow2 *)taosMemoryMalloc(rLen);
if (*ppRow == NULL) { if (*ppRow == NULL) {
code = TSDB_CODE_OUT_OF_MEMORY; code = TSDB_CODE_OUT_OF_MEMORY;
goto _exit; goto _exit;
} }
**ppRow = *pRow; memcpy(*ppRow, pRow, rLen);
(*ppRow)->pData = NULL;
if (pRow->nData) {
(*ppRow)->pData = taosMemoryMalloc(pRow->nData);
if ((*ppRow)->pData == NULL) {
taosMemoryFree(*ppRow);
code = TSDB_CODE_OUT_OF_MEMORY;
goto _exit;
}
memcpy((*ppRow)->pData, pRow->pData, pRow->nData);
}
_exit: _exit:
return code; return code;
@ -493,12 +486,12 @@ _exit:
void tTSRowFree(STSRow2 *pRow) { void tTSRowFree(STSRow2 *pRow) {
if (pRow) { if (pRow) {
if (pRow->pData) taosMemoryFree(pRow->pData);
taosMemoryFree(pRow); taosMemoryFree(pRow);
} }
} }
void tTSRowGet(STSRow2 *pRow, STSchema *pTSchema, int32_t iCol, SColVal *pColVal) { void tTSRowGet(STSRow2 *pRow, STSchema *pTSchema, int32_t iCol, SColVal *pColVal) {
#if 0
uint8_t isTuple = ((pRow->flags & 0xf0) == 0) ? 1 : 0; uint8_t isTuple = ((pRow->flags & 0xf0) == 0) ? 1 : 0;
STColumn *pTColumn = &pTSchema->columns[iCol]; STColumn *pTColumn = &pTSchema->columns[iCol];
uint8_t flags = pRow->flags & (uint8_t)0xf; uint8_t flags = pRow->flags & (uint8_t)0xf;
@ -643,10 +636,12 @@ _return_null:
_return_value: _return_value:
*pColVal = COL_VAL_VALUE(pTColumn->colId, pTColumn->type, value); *pColVal = COL_VAL_VALUE(pTColumn->colId, pTColumn->type, value);
return; return;
#endif
} }
int32_t tTSRowToArray(STSRow2 *pRow, STSchema *pTSchema, SArray **ppArray) { int32_t tTSRowToArray(STSRow2 *pRow, STSchema *pTSchema, SArray **ppArray) {
int32_t code = 0; int32_t code = 0;
#if 0
SColVal cv; SColVal cv;
(*ppArray) = taosArrayInit(pTSchema->numOfCols, sizeof(SColVal)); (*ppArray) = taosArrayInit(pTSchema->numOfCols, sizeof(SColVal));
@ -660,52 +655,27 @@ int32_t tTSRowToArray(STSRow2 *pRow, STSchema *pTSchema, SArray **ppArray) {
taosArrayPush(*ppArray, &cv); taosArrayPush(*ppArray, &cv);
} }
#endif
_exit: _exit:
return code; return code;
} }
int32_t tPutTSRow(uint8_t *p, STSRow2 *pRow) { int32_t tPutTSRow(uint8_t *p, STSRow2 *pRow) {
int32_t n = 0; int32_t n;
n += tPutI64(p ? p + n : p, pRow->ts); TSROW_LEN(pRow, n);
n += tPutI8(p ? p + n : p, pRow->flags); if (p) {
n += tPutI32v(p ? p + n : p, pRow->sver); memcpy(p, pRow, n);
ASSERT(pRow->flags & 0xf);
switch (pRow->flags & 0xf) {
case TSROW_HAS_NONE:
case TSROW_HAS_NULL:
ASSERT(pRow->nData == 0);
ASSERT(pRow->pData == NULL);
break;
default:
ASSERT(pRow->nData && pRow->pData);
n += tPutBinary(p ? p + n : p, pRow->pData, pRow->nData);
break;
} }
return n; return n;
} }
int32_t tGetTSRow(uint8_t *p, STSRow2 *pRow) { int32_t tGetTSRow(uint8_t *p, STSRow2 **ppRow) {
int32_t n = 0; int32_t n;
n += tGetI64(p + n, &pRow->ts); *ppRow = (STSRow2 *)p;
n += tGetI8(p + n, &pRow->flags); TSROW_LEN(*ppRow, n);
n += tGetI32v(p + n, &pRow->sver);
ASSERT(pRow->flags);
switch (pRow->flags & 0xf) {
case TSROW_HAS_NONE:
case TSROW_HAS_NULL:
pRow->nData = 0;
pRow->pData = NULL;
break;
default:
n += tGetBinary(p + n, &pRow->pData, &pRow->nData);
break;
}
return n; return n;
} }
@ -904,15 +874,13 @@ static int32_t tGetTagVal(uint8_t *p, STagVal *pTagVal, int8_t isJson) {
return n; return n;
} }
bool tTagIsJson(const void *pTag){ bool tTagIsJson(const void *pTag) { return (((const STag *)pTag)->flags & TD_TAG_JSON); }
return (((const STag *)pTag)->flags & TD_TAG_JSON);
}
bool tTagIsJsonNull(void *data){ bool tTagIsJsonNull(void *data) {
STag *pTag = (STag*)data; STag *pTag = (STag *)data;
int8_t isJson = tTagIsJson(pTag); int8_t isJson = tTagIsJson(pTag);
if(!isJson) return false; if (!isJson) return false;
return ((STag*)data)->nTag == 0; return ((STag *)data)->nTag == 0;
} }
int32_t tTagNew(SArray *pArray, int32_t version, int8_t isJson, STag **ppTag) { int32_t tTagNew(SArray *pArray, int32_t version, int8_t isJson, STag **ppTag) {
@ -1097,112 +1065,6 @@ _err:
} }
#if 1 // =================================================================================================================== #if 1 // ===================================================================================================================
static void dataColSetNEleNull(SDataCol *pCol, int nEle);
int tdAllocMemForCol(SDataCol *pCol, int maxPoints) {
int spaceNeeded = pCol->bytes * maxPoints;
if (IS_VAR_DATA_TYPE(pCol->type)) {
spaceNeeded += sizeof(VarDataOffsetT) * maxPoints;
}
#ifdef TD_SUPPORT_BITMAP
int32_t nBitmapBytes = (int32_t)TD_BITMAP_BYTES(maxPoints);
spaceNeeded += (int)nBitmapBytes;
// TODO: Currently, the compression of bitmap parts is affiliated to the column data parts, thus allocate 1 more
// TYPE_BYTES as to comprise complete TYPE_BYTES. Otherwise, invalid read/write would be triggered.
// spaceNeeded += TYPE_BYTES[pCol->type]; // the bitmap part is append as a single part since 2022.04.03, thus
// remove the additional space
#endif
if (pCol->spaceSize < spaceNeeded) {
void *ptr = taosMemoryRealloc(pCol->pData, spaceNeeded);
if (ptr == NULL) {
uDebug("malloc failure, size:%" PRId64 " failed, reason:%s", (int64_t)spaceNeeded, strerror(errno));
return -1;
} else {
pCol->pData = ptr;
pCol->spaceSize = spaceNeeded;
}
}
#ifdef TD_SUPPORT_BITMAP
if (IS_VAR_DATA_TYPE(pCol->type)) {
pCol->pBitmap = POINTER_SHIFT(pCol->pData, pCol->bytes * maxPoints);
pCol->dataOff = POINTER_SHIFT(pCol->pBitmap, nBitmapBytes);
} else {
pCol->pBitmap = POINTER_SHIFT(pCol->pData, pCol->bytes * maxPoints);
}
#else
if (IS_VAR_DATA_TYPE(pCol->type)) {
pCol->dataOff = POINTER_SHIFT(pCol->pData, pCol->bytes * maxPoints);
}
#endif
return 0;
}
/**
* Duplicate the schema and return a new object
*/
STSchema *tdDupSchema(const STSchema *pSchema) {
int tlen = sizeof(STSchema) + sizeof(STColumn) * schemaNCols(pSchema);
STSchema *tSchema = (STSchema *)taosMemoryMalloc(tlen);
if (tSchema == NULL) return NULL;
memcpy((void *)tSchema, (void *)pSchema, tlen);
return tSchema;
}
/**
* Encode a schema to dst, and return the next pointer
*/
int tdEncodeSchema(void **buf, STSchema *pSchema) {
int tlen = 0;
tlen += taosEncodeFixedI32(buf, schemaVersion(pSchema));
tlen += taosEncodeFixedI32(buf, schemaNCols(pSchema));
for (int i = 0; i < schemaNCols(pSchema); i++) {
STColumn *pCol = schemaColAt(pSchema, i);
tlen += taosEncodeFixedI8(buf, colType(pCol));
tlen += taosEncodeFixedI8(buf, colFlags(pCol));
tlen += taosEncodeFixedI16(buf, colColId(pCol));
tlen += taosEncodeFixedI16(buf, colBytes(pCol));
}
return tlen;
}
/**
* Decode a schema from a binary.
*/
void *tdDecodeSchema(void *buf, STSchema **pRSchema) {
int version = 0;
int numOfCols = 0;
STSchemaBuilder schemaBuilder;
buf = taosDecodeFixedI32(buf, &version);
buf = taosDecodeFixedI32(buf, &numOfCols);
if (tdInitTSchemaBuilder(&schemaBuilder, version) < 0) return NULL;
for (int i = 0; i < numOfCols; i++) {
col_type_t type = 0;
int8_t flags = 0;
col_id_t colId = 0;
col_bytes_t bytes = 0;
buf = taosDecodeFixedI8(buf, &type);
buf = taosDecodeFixedI8(buf, &flags);
buf = taosDecodeFixedI16(buf, &colId);
buf = taosDecodeFixedI32(buf, &bytes);
if (tdAddColToSchema(&schemaBuilder, type, flags, colId, bytes) < 0) {
tdDestroyTSchemaBuilder(&schemaBuilder);
return NULL;
}
}
*pRSchema = tdGetSchemaFromBuilder(&schemaBuilder);
tdDestroyTSchemaBuilder(&schemaBuilder);
return buf;
}
int tdInitTSchemaBuilder(STSchemaBuilder *pBuilder, schema_ver_t version) { int tdInitTSchemaBuilder(STSchemaBuilder *pBuilder, schema_ver_t version) {
if (pBuilder == NULL) return -1; if (pBuilder == NULL) return -1;
@ -1239,22 +1101,22 @@ int32_t tdAddColToSchema(STSchemaBuilder *pBuilder, int8_t type, int8_t flags, c
} }
STColumn *pCol = &(pBuilder->columns[pBuilder->nCols]); STColumn *pCol = &(pBuilder->columns[pBuilder->nCols]);
colSetType(pCol, type); pCol->type = type;
colSetColId(pCol, colId); pCol->colId = colId;
colSetFlags(pCol, flags); pCol->flags = flags;
if (pBuilder->nCols == 0) { if (pBuilder->nCols == 0) {
colSetOffset(pCol, 0); pCol->offset = 0;
} else { } else {
STColumn *pTCol = &(pBuilder->columns[pBuilder->nCols - 1]); STColumn *pTCol = &(pBuilder->columns[pBuilder->nCols - 1]);
colSetOffset(pCol, pTCol->offset + TYPE_BYTES[pTCol->type]); pCol->offset = pTCol->offset + TYPE_BYTES[pTCol->type];
} }
if (IS_VAR_DATA_TYPE(type)) { if (IS_VAR_DATA_TYPE(type)) {
colSetBytes(pCol, bytes); pCol->bytes = bytes;
pBuilder->tlen += (TYPE_BYTES[type] + bytes); pBuilder->tlen += (TYPE_BYTES[type] + bytes);
pBuilder->vlen += bytes - sizeof(VarDataLenT); pBuilder->vlen += bytes - sizeof(VarDataLenT);
} else { } else {
colSetBytes(pCol, TYPE_BYTES[type]); pCol->bytes = TYPE_BYTES[type];
pBuilder->tlen += TYPE_BYTES[type]; pBuilder->tlen += TYPE_BYTES[type];
pBuilder->vlen += TYPE_BYTES[type]; pBuilder->vlen += TYPE_BYTES[type];
} }
@ -1275,151 +1137,19 @@ STSchema *tdGetSchemaFromBuilder(STSchemaBuilder *pBuilder) {
STSchema *pSchema = (STSchema *)taosMemoryMalloc(tlen); STSchema *pSchema = (STSchema *)taosMemoryMalloc(tlen);
if (pSchema == NULL) return NULL; if (pSchema == NULL) return NULL;
schemaVersion(pSchema) = pBuilder->version; pSchema->version = pBuilder->version;
schemaNCols(pSchema) = pBuilder->nCols; pSchema->numOfCols = pBuilder->nCols;
schemaTLen(pSchema) = pBuilder->tlen; pSchema->tlen = pBuilder->tlen;
schemaFLen(pSchema) = pBuilder->flen; pSchema->flen = pBuilder->flen;
schemaVLen(pSchema) = pBuilder->vlen; pSchema->vlen = pBuilder->vlen;
#ifdef TD_SUPPORT_BITMAP #ifdef TD_SUPPORT_BITMAP
schemaTLen(pSchema) += (int)TD_BITMAP_BYTES(schemaNCols(pSchema)); pSchema->tlen += (int)TD_BITMAP_BYTES(pSchema->numOfCols);
#endif #endif
memcpy(schemaColAt(pSchema, 0), pBuilder->columns, sizeof(STColumn) * pBuilder->nCols); memcpy(&pSchema->columns[0], pBuilder->columns, sizeof(STColumn) * pBuilder->nCols);
return pSchema; return pSchema;
} }
void dataColInit(SDataCol *pDataCol, STColumn *pCol, int maxPoints) {
pDataCol->type = colType(pCol);
pDataCol->colId = colColId(pCol);
pDataCol->bytes = colBytes(pCol);
pDataCol->offset = colOffset(pCol) + 0; // TD_DATA_ROW_HEAD_SIZE;
pDataCol->len = 0;
}
static FORCE_INLINE const void *tdGetColDataOfRowUnsafe(SDataCol *pCol, int row) {
if (IS_VAR_DATA_TYPE(pCol->type)) {
return POINTER_SHIFT(pCol->pData, pCol->dataOff[row]);
} else {
return POINTER_SHIFT(pCol->pData, TYPE_BYTES[pCol->type] * row);
}
}
bool isNEleNull(SDataCol *pCol, int nEle) {
if (isAllRowsNull(pCol)) return true;
for (int i = 0; i < nEle; ++i) {
if (!isNull(tdGetColDataOfRowUnsafe(pCol, i), pCol->type)) return false;
}
return true;
}
void *dataColSetOffset(SDataCol *pCol, int nEle) {
ASSERT(((pCol->type == TSDB_DATA_TYPE_BINARY) || (pCol->type == TSDB_DATA_TYPE_NCHAR)));
void *tptr = pCol->pData;
// char *tptr = (char *)(pCol->pData);
VarDataOffsetT offset = 0;
for (int i = 0; i < nEle; ++i) {
pCol->dataOff[i] = offset;
offset += varDataTLen(tptr);
tptr = POINTER_SHIFT(tptr, varDataTLen(tptr));
}
return POINTER_SHIFT(tptr, varDataTLen(tptr));
}
SDataCols *tdNewDataCols(int maxCols, int maxRows) {
SDataCols *pCols = (SDataCols *)taosMemoryCalloc(1, sizeof(SDataCols));
if (pCols == NULL) {
uDebug("malloc failure, size:%" PRId64 " failed, reason:%s", (int64_t)sizeof(SDataCols), strerror(errno));
return NULL;
}
pCols->maxPoints = maxRows;
pCols->maxCols = maxCols;
pCols->numOfRows = 0;
pCols->numOfCols = 0;
pCols->bitmapMode = TSDB_BITMODE_DEFAULT;
if (maxCols > 0) {
pCols->cols = (SDataCol *)taosMemoryCalloc(maxCols, sizeof(SDataCol));
if (pCols->cols == NULL) {
uDebug("malloc failure, size:%" PRId64 " failed, reason:%s", (int64_t)sizeof(SDataCol) * maxCols,
strerror(errno));
tdFreeDataCols(pCols);
return NULL;
}
#if 0 // no need as calloc used
int i;
for (i = 0; i < maxCols; i++) {
pCols->cols[i].spaceSize = 0;
pCols->cols[i].len = 0;
pCols->cols[i].pData = NULL;
pCols->cols[i].dataOff = NULL;
}
#endif
}
return pCols;
}
int tdInitDataCols(SDataCols *pCols, STSchema *pSchema) {
int i;
int oldMaxCols = pCols->maxCols;
if (schemaNCols(pSchema) > oldMaxCols) {
pCols->maxCols = schemaNCols(pSchema);
void *ptr = (SDataCol *)taosMemoryRealloc(pCols->cols, sizeof(SDataCol) * pCols->maxCols);
if (ptr == NULL) return -1;
pCols->cols = ptr;
for (i = oldMaxCols; i < pCols->maxCols; ++i) {
pCols->cols[i].pData = NULL;
pCols->cols[i].dataOff = NULL;
pCols->cols[i].pBitmap = NULL;
pCols->cols[i].spaceSize = 0;
}
}
#if 0
tdResetDataCols(pCols); // redundant loop to reset len/blen to 0, already reset in following dataColInit(...)
#endif
pCols->numOfRows = 0;
pCols->bitmapMode = TSDB_BITMODE_DEFAULT;
pCols->numOfCols = schemaNCols(pSchema);
for (i = 0; i < schemaNCols(pSchema); ++i) {
dataColInit(pCols->cols + i, schemaColAt(pSchema, i), pCols->maxPoints);
}
return 0;
}
SDataCols *tdFreeDataCols(SDataCols *pCols) {
int i;
if (pCols) {
if (pCols->cols) {
int maxCols = pCols->maxCols;
for (i = 0; i < maxCols; ++i) {
SDataCol *pCol = &pCols->cols[i];
taosMemoryFreeClear(pCol->pData);
}
taosMemoryFree(pCols->cols);
pCols->cols = NULL;
}
taosMemoryFree(pCols);
}
return NULL;
}
void tdResetDataCols(SDataCols *pCols) {
if (pCols != NULL) {
pCols->numOfRows = 0;
pCols->bitmapMode = 0;
for (int i = 0; i < pCols->maxCols; ++i) {
dataColReset(pCols->cols + i);
}
}
}
#endif #endif

View File

@ -412,7 +412,8 @@ static int32_t taosAddServerCfg(SConfig *pCfg) {
tsNumOfVnodeQueryThreads = TMAX(tsNumOfVnodeQueryThreads, 2); tsNumOfVnodeQueryThreads = TMAX(tsNumOfVnodeQueryThreads, 2);
if (cfgAddInt32(pCfg, "numOfVnodeQueryThreads", tsNumOfVnodeQueryThreads, 1, 1024, 0) != 0) return -1; if (cfgAddInt32(pCfg, "numOfVnodeQueryThreads", tsNumOfVnodeQueryThreads, 1, 1024, 0) != 0) return -1;
tsNumOfVnodeFetchThreads = TRANGE(tsNumOfVnodeFetchThreads, 1, 1); tsNumOfVnodeFetchThreads = tsNumOfCores / 4;
tsNumOfVnodeFetchThreads = TMAX(tsNumOfVnodeFetchThreads, 4);
if (cfgAddInt32(pCfg, "numOfVnodeFetchThreads", tsNumOfVnodeFetchThreads, 1, 1024, 0) != 0) return -1; if (cfgAddInt32(pCfg, "numOfVnodeFetchThreads", tsNumOfVnodeFetchThreads, 1, 1024, 0) != 0) return -1;
tsNumOfVnodeWriteThreads = tsNumOfCores; tsNumOfVnodeWriteThreads = tsNumOfCores;

View File

@ -4962,7 +4962,7 @@ int tDecodeSVCreateTbReq(SDecoder *pCoder, SVCreateTbReq *pReq) {
if (tDecodeI64(pCoder, &pReq->ctb.suid) < 0) return -1; if (tDecodeI64(pCoder, &pReq->ctb.suid) < 0) return -1;
if (tDecodeTag(pCoder, (STag **)&pReq->ctb.pTag) < 0) return -1; if (tDecodeTag(pCoder, (STag **)&pReq->ctb.pTag) < 0) return -1;
} else if (pReq->type == TSDB_NORMAL_TABLE) { } else if (pReq->type == TSDB_NORMAL_TABLE) {
if (tDecodeSSchemaWrapper(pCoder, &pReq->ntb.schemaRow) < 0) return -1; if (tDecodeSSchemaWrapperEx(pCoder, &pReq->ntb.schemaRow) < 0) return -1;
} else { } else {
ASSERT(0); ASSERT(0);
} }
@ -5526,6 +5526,11 @@ bool tOffsetEqual(const STqOffsetVal *pLeft, const STqOffsetVal *pRight) {
ASSERT(0); ASSERT(0);
// TODO // TODO
return pLeft->uid == pRight->uid && pLeft->ts == pRight->ts; return pLeft->uid == pRight->uid && pLeft->ts == pRight->ts;
} else {
ASSERT(0);
/*ASSERT(pLeft->type == TMQ_OFFSET__RESET_NONE || pLeft->type == TMQ_OFFSET__RESET_EARLIEAST ||*/
/*pLeft->type == TMQ_OFFSET__RESET_LATEST);*/
/*return true;*/
} }
} }
return false; return false;

View File

@ -32,28 +32,10 @@ const uint8_t tdVTypeByte[2][3] = {{
}; };
// declaration // declaration
static uint8_t tdGetBitmapByte(uint8_t byte); static uint8_t tdGetBitmapByte(uint8_t byte);
static int32_t tdCompareColId(const void *arg1, const void *arg2); static int32_t tdCompareColId(const void *arg1, const void *arg2);
static FORCE_INLINE int32_t compareKvRowColId(const void *key1, const void *key2); static FORCE_INLINE int32_t compareKvRowColId(const void *key1, const void *key2);
// static void dataColSetNEleNull(SDataCol *pCol, int nEle);
/**
* @brief src2 data has more priority than src1
*
* @param target
* @param src1
* @param iter1
* @param limit1
* @param src2
* @param iter2
* @param limit2
* @param tRows
* @param update
*/
static void tdMergeTwoDataCols(SDataCols *target, SDataCols *src1, int *iter1, int limit1, SDataCols *src2, int *iter2,
int limit2, int tRows, bool update);
// implementation // implementation
/** /**
* @brief Compress bitmap bytes comprised of 2-bits to counterpart of 1-bit. * @brief Compress bitmap bytes comprised of 2-bits to counterpart of 1-bit.
@ -287,33 +269,6 @@ void tdMergeBitmap(uint8_t *srcBitmap, int32_t nBits, uint8_t *dstBitmap) {
} }
} }
static FORCE_INLINE void dataColSetNullAt(SDataCol *pCol, int index, bool setBitmap, int8_t bitmapMode) {
if (IS_VAR_DATA_TYPE(pCol->type)) {
pCol->dataOff[index] = pCol->len;
char *ptr = POINTER_SHIFT(pCol->pData, pCol->len);
setVardataNull(ptr, pCol->type);
pCol->len += varDataTLen(ptr);
} else {
setNull(POINTER_SHIFT(pCol->pData, TYPE_BYTES[pCol->type] * index), pCol->type, pCol->bytes);
pCol->len += TYPE_BYTES[pCol->type];
}
if (setBitmap) {
tdSetBitmapValType(pCol->pBitmap, index, TD_VTYPE_NONE, bitmapMode);
}
}
// static void dataColSetNEleNull(SDataCol *pCol, int nEle) {
// if (IS_VAR_DATA_TYPE(pCol->type)) {
// pCol->len = 0;
// for (int i = 0; i < nEle; i++) {
// dataColSetNullAt(pCol, i);
// }
// } else {
// setNullN(pCol->pData, pCol->type, pCol->bytes, nEle);
// pCol->len = TYPE_BYTES[pCol->type] * nEle;
// }
// }
/** /**
* @brief Set bitmap area by byte preferentially and then by bit. * @brief Set bitmap area by byte preferentially and then by bit.
* *
@ -362,56 +317,6 @@ bool tdIsBitmapBlkNorm(const void *pBitmap, int32_t numOfBits, int8_t bitmapMode
return true; return true;
} }
static FORCE_INLINE void dataColSetNoneAt(SDataCol *pCol, int index, bool setBitmap, int8_t bitmapMode) {
if (IS_VAR_DATA_TYPE(pCol->type)) {
pCol->dataOff[index] = pCol->len;
char *ptr = POINTER_SHIFT(pCol->pData, pCol->len);
setVardataNull(ptr, pCol->type);
pCol->len += varDataTLen(ptr);
} else {
setNull(POINTER_SHIFT(pCol->pData, TYPE_BYTES[pCol->type] * index), pCol->type, pCol->bytes);
pCol->len += TYPE_BYTES[pCol->type];
}
if (setBitmap) {
tdSetBitmapValType(pCol->pBitmap, index, TD_VTYPE_NONE, bitmapMode);
}
}
static void dataColSetNEleNone(SDataCol *pCol, int nEle, int8_t bitmapMode) {
if (IS_VAR_DATA_TYPE(pCol->type)) {
pCol->len = 0;
for (int i = 0; i < nEle; ++i) {
dataColSetNoneAt(pCol, i, false, bitmapMode);
}
} else {
setNullN(pCol->pData, pCol->type, pCol->bytes, nEle);
pCol->len = TYPE_BYTES[pCol->type] * nEle;
}
#ifdef TD_SUPPORT_BITMAP
tdSetBitmapValTypeN(pCol->pBitmap, nEle, TD_VTYPE_NONE, bitmapMode);
#endif
}
#if 0
void trbSetRowInfo(SRowBuilder *pRB, bool del, uint16_t sver) {
// TODO
}
void trbSetRowVersion(SRowBuilder *pRB, uint64_t ver) {
// TODO
}
void trbSetRowTS(SRowBuilder *pRB, TSKEY ts) {
// TODO
}
int trbWriteCol(SRowBuilder *pRB, void *pData, col_id_t cid) {
// TODO
return 0;
}
#endif
STSRow *tdRowDup(STSRow *row) { STSRow *tdRowDup(STSRow *row) {
STSRow *trow = taosMemoryMalloc(TD_ROW_LEN(row)); STSRow *trow = taosMemoryMalloc(TD_ROW_LEN(row));
if (trow == NULL) return NULL; if (trow == NULL) return NULL;
@ -420,511 +325,6 @@ STSRow *tdRowDup(STSRow *row) {
return trow; return trow;
} }
/**
* @brief
*
* @param pCol
* @param valType
* @param val
* @param numOfRows
* @param maxPoints
* @param bitmapMode default is 0(2 bits), otherwise 1(1 bit)
* @param isMerge merge to current row
* @return int
*/
int tdAppendValToDataCol(SDataCol *pCol, TDRowValT valType, const void *val, int numOfRows, int maxPoints,
int8_t bitmapMode, bool isMerge) {
TASSERT(pCol != NULL);
// Assume that the columns not specified during insert/upsert mean None.
if (isAllRowsNone(pCol)) {
if (tdValIsNone(valType)) {
// all None value yet, just return
return 0;
}
if (tdAllocMemForCol(pCol, maxPoints) < 0) return -1;
if (numOfRows > 0) {
// Find the first not None value, fill all previous values as None
dataColSetNEleNone(pCol, numOfRows, bitmapMode);
}
}
const void *value = val;
if (!tdValTypeIsNorm(valType) || !val) {
// TODO:
// 1. back compatibility and easy to debug with codes of 2.0 to save NULL values.
// 2. later on, considering further optimization, don't save Null/None for VarType.
value = getNullValue(pCol->type);
}
if (!isMerge) {
if (IS_VAR_DATA_TYPE(pCol->type)) {
// set offset
pCol->dataOff[numOfRows] = pCol->len;
// Copy data
memcpy(POINTER_SHIFT(pCol->pData, pCol->len), value, varDataTLen(value));
// Update the length
pCol->len += varDataTLen(value);
} else {
ASSERT(pCol->len == TYPE_BYTES[pCol->type] * numOfRows);
memcpy(POINTER_SHIFT(pCol->pData, pCol->len), value, pCol->bytes);
pCol->len += pCol->bytes;
}
} else if (!tdValTypeIsNone(valType)) {
if (IS_VAR_DATA_TYPE(pCol->type)) {
// keep the last offset
// discard the last var data
int32_t lastVarLen = varDataTLen(POINTER_SHIFT(pCol->pData, pCol->dataOff[numOfRows]));
pCol->len -= lastVarLen;
// Copy data
memcpy(POINTER_SHIFT(pCol->pData, pCol->len), value, varDataTLen(value));
// Update the length
pCol->len += varDataTLen(value);
} else {
ASSERT(pCol->len - TYPE_BYTES[pCol->type] == TYPE_BYTES[pCol->type] * numOfRows);
memcpy(POINTER_SHIFT(pCol->pData, pCol->len - TYPE_BYTES[pCol->type]), value, pCol->bytes);
}
}
#ifdef TD_SUPPORT_BITMAP
if (!isMerge || !tdValTypeIsNone(valType)) {
tdSetBitmapValType(pCol->pBitmap, numOfRows, valType, bitmapMode);
}
#endif
return 0;
}
// internal
static int32_t tdAppendTpRowToDataCol(STSRow *pRow, STSchema *pSchema, SDataCols *pCols, bool isMerge) {
#if 0
ASSERT(pCols->numOfRows == 0 || dataColsKeyLast(pCols) < TD_ROW_KEY(pRow));
#endif
// Multi-Version rows with the same key and different versions supported
ASSERT(pCols->numOfRows == 0 || dataColsKeyLast(pCols) <= TD_ROW_KEY(pRow));
int rcol = 1;
int dcol = 1;
void *pBitmap = tdGetBitmapAddrTp(pRow, pSchema->flen);
SDataCol *pDataCol = &(pCols->cols[0]);
ASSERT(pDataCol->colId == PRIMARYKEY_TIMESTAMP_COL_ID);
tdAppendValToDataCol(pDataCol, TD_VTYPE_NORM, &pRow->ts, pCols->numOfRows, pCols->maxPoints, pCols->bitmapMode,
isMerge);
while (dcol < pCols->numOfCols) {
pDataCol = &(pCols->cols[dcol]);
if (rcol >= schemaNCols(pSchema)) {
tdAppendValToDataCol(pDataCol, TD_VTYPE_NULL, NULL, pCols->numOfRows, pCols->maxPoints, pCols->bitmapMode,
isMerge);
++dcol;
continue;
}
STColumn *pRowCol = schemaColAt(pSchema, rcol);
SCellVal sVal = {0};
if (pRowCol->colId == pDataCol->colId) {
if (tdGetTpRowValOfCol(&sVal, pRow, pBitmap, pRowCol->type, pRowCol->offset - sizeof(TSKEY), rcol - 1) < 0) {
return terrno;
}
tdAppendValToDataCol(pDataCol, sVal.valType, sVal.val, pCols->numOfRows, pCols->maxPoints, pCols->bitmapMode,
isMerge);
++dcol;
++rcol;
} else if (pRowCol->colId < pDataCol->colId) {
++rcol;
} else {
tdAppendValToDataCol(pDataCol, TD_VTYPE_NULL, NULL, pCols->numOfRows, pCols->maxPoints, pCols->bitmapMode,
isMerge);
++dcol;
}
}
#if 0
++pCols->numOfRows;
#endif
return TSDB_CODE_SUCCESS;
}
// internal
static int32_t tdAppendKvRowToDataCol(STSRow *pRow, STSchema *pSchema, SDataCols *pCols, bool isMerge) {
ASSERT(pCols->numOfRows == 0 || dataColsKeyLast(pCols) < TD_ROW_KEY(pRow));
int rcol = 0;
int dcol = 1;
int tRowCols = tdRowGetNCols(pRow) - 1; // the primary TS key not included in kvRowColIdx part
int tSchemaCols = schemaNCols(pSchema) - 1;
void *pBitmap = tdGetBitmapAddrKv(pRow, tdRowGetNCols(pRow));
SDataCol *pDataCol = &(pCols->cols[0]);
ASSERT(pDataCol->colId == PRIMARYKEY_TIMESTAMP_COL_ID);
tdAppendValToDataCol(pDataCol, TD_VTYPE_NORM, &pRow->ts, pCols->numOfRows, pCols->maxPoints, pCols->bitmapMode,
isMerge);
while (dcol < pCols->numOfCols) {
pDataCol = &(pCols->cols[dcol]);
if (rcol >= tRowCols || rcol >= tSchemaCols) {
tdAppendValToDataCol(pDataCol, TD_VTYPE_NULL, NULL, pCols->numOfRows, pCols->maxPoints, pCols->bitmapMode,
isMerge);
++dcol;
continue;
}
SKvRowIdx *pIdx = tdKvRowColIdxAt(pRow, rcol);
int16_t colIdx = -1;
if (pIdx) {
colIdx = POINTER_DISTANCE(pIdx, TD_ROW_COL_IDX(pRow)) / sizeof(SKvRowIdx);
}
TASSERT(colIdx >= 0);
SCellVal sVal = {0};
if (pIdx->colId == pDataCol->colId) {
if (tdGetKvRowValOfCol(&sVal, pRow, pBitmap, pIdx->offset, colIdx) < 0) {
return terrno;
}
tdAppendValToDataCol(pDataCol, sVal.valType, sVal.val, pCols->numOfRows, pCols->maxPoints, pCols->bitmapMode,
isMerge);
++dcol;
++rcol;
} else if (pIdx->colId < pDataCol->colId) {
++rcol;
} else {
tdAppendValToDataCol(pDataCol, TD_VTYPE_NULL, NULL, pCols->numOfRows, pCols->maxPoints, pCols->bitmapMode,
isMerge);
++dcol;
}
}
#if 0
++pCols->numOfRows;
#endif
return TSDB_CODE_SUCCESS;
}
/**
* @brief exposed
*
* @param pRow
* @param pSchema
* @param pCols
*/
int32_t tdAppendSTSRowToDataCol(STSRow *pRow, STSchema *pSchema, SDataCols *pCols, bool isMerge) {
#ifdef TD_DEBUG_PRINT_TSDB_LOAD_DCOLS
printf("%s:%d ts: %" PRIi64 " sver:%d maxCols:%" PRIi16 " nCols:%" PRIi16 ", nRows:%d\n", __func__, __LINE__,
TD_ROW_KEY(pRow), TD_ROW_SVER(pRow), pCols->maxCols, pCols->numOfCols, pCols->numOfRows);
#endif
if (TD_IS_TP_ROW(pRow)) {
return tdAppendTpRowToDataCol(pRow, pSchema, pCols, isMerge);
} else if (TD_IS_KV_ROW(pRow)) {
return tdAppendKvRowToDataCol(pRow, pSchema, pCols, isMerge);
} else {
ASSERT(0);
}
return TSDB_CODE_SUCCESS;
}
/**
* @brief source data has more priority than target
*
* @param target
* @param source
* @param rowsToMerge
* @param pOffset
* @param update
* @param maxVer
* @return int
*/
int tdMergeDataCols(SDataCols *target, SDataCols *source, int rowsToMerge, int *pOffset, bool update,
TDRowVerT maxVer) {
ASSERT(rowsToMerge > 0 && rowsToMerge <= source->numOfRows);
ASSERT(target->numOfCols == source->numOfCols);
int offset = 0;
if (pOffset == NULL) {
pOffset = &offset;
}
SDataCols *pTarget = NULL;
if ((target->numOfRows == 0) || (dataColsKeyLast(target) < dataColsKeyAtRow(source, *pOffset))) { // No overlap
ASSERT(target->numOfRows + rowsToMerge <= target->maxPoints);
// TODO: filter the maxVer
TSKEY lastKey = TSKEY_INITIAL_VAL;
for (int i = 0; i < rowsToMerge; ++i) {
bool merge = false;
for (int j = 0; j < source->numOfCols; j++) {
if (source->cols[j].len > 0 || target->cols[j].len > 0) {
SCellVal sVal = {0};
if (tdGetColDataOfRow(&sVal, source->cols + j, i + (*pOffset), source->bitmapMode) < 0) {
TASSERT(0);
}
if (j == 0) {
if (lastKey == *(TSKEY *)sVal.val) {
if (!update) {
break;
}
merge = true;
} else if (lastKey != TSKEY_INITIAL_VAL) {
++target->numOfRows;
}
lastKey = *(TSKEY *)sVal.val;
}
if (i == 0) {
(target->cols + j)->bitmap = (source->cols + j)->bitmap;
}
tdAppendValToDataCol(target->cols + j, sVal.valType, sVal.val, target->numOfRows, target->maxPoints,
target->bitmapMode, merge);
}
}
}
if (lastKey != TSKEY_INITIAL_VAL) {
++target->numOfRows;
}
(*pOffset) += rowsToMerge;
} else {
pTarget = tdDupDataCols(target, true);
if (pTarget == NULL) goto _err;
int iter1 = 0;
tdMergeTwoDataCols(target, pTarget, &iter1, pTarget->numOfRows, source, pOffset, source->numOfRows,
pTarget->numOfRows + rowsToMerge, update);
}
tdFreeDataCols(pTarget);
return 0;
_err:
tdFreeDataCols(pTarget);
return -1;
}
static void tdAppendValToDataCols(SDataCols *target, SDataCols *src, int iter, bool isMerge) {
for (int i = 0; i < src->numOfCols; ++i) {
ASSERT(target->cols[i].type == src->cols[i].type);
if (src->cols[i].len > 0 || target->cols[i].len > 0) {
SCellVal sVal = {0};
if (tdGetColDataOfRow(&sVal, src->cols + i, iter, src->bitmapMode) < 0) {
TASSERT(0);
}
if (isMerge) {
if (!tdValTypeIsNone(sVal.valType)) {
tdAppendValToDataCol(&(target->cols[i]), sVal.valType, sVal.val, target->numOfRows, target->maxPoints,
target->bitmapMode, isMerge);
} else {
// Keep the origin value for None
}
} else {
tdAppendValToDataCol(&(target->cols[i]), sVal.valType, sVal.val, target->numOfRows, target->maxPoints,
target->bitmapMode, isMerge);
}
}
}
}
/**
* @brief src2 data has more priority than src1
*
* @param target
* @param src1
* @param iter1
* @param limit1
* @param src2
* @param iter2
* @param limit2
* @param tRows
* @param update
*/
static void tdMergeTwoDataCols(SDataCols *target, SDataCols *src1, int *iter1, int limit1, SDataCols *src2, int *iter2,
int limit2, int tRows, bool update) {
tdResetDataCols(target);
target->bitmapMode = src1->bitmapMode;
ASSERT(limit1 <= src1->numOfRows && limit2 <= src2->numOfRows);
int32_t nRows = 0;
// TODO: filter the maxVer
// TODO: handle the delete function
TSKEY lastKey = TSKEY_INITIAL_VAL;
while (nRows < tRows) {
if (*iter1 >= limit1 && *iter2 >= limit2) break;
TSKEY key1 = (*iter1 >= limit1) ? INT64_MAX : dataColsKeyAt(src1, *iter1);
// TKEY tkey1 = (*iter1 >= limit1) ? TKEY_NULL : dataColsTKeyAt(src1, *iter1);
TSKEY key2 = (*iter2 >= limit2) ? INT64_MAX : dataColsKeyAt(src2, *iter2);
// TKEY tkey2 = (*iter2 >= limit2) ? TKEY_NULL : dataColsTKeyAt(src2, *iter2);
// ASSERT(tkey1 == TKEY_NULL || (!TKEY_IS_DELETED(tkey1)));
if (key1 <= key2) {
// select key1 if not delete
if (update && (lastKey == key1)) {
tdAppendValToDataCols(target, src1, *iter1, true);
} else if (lastKey != key1) {
if (lastKey != TSKEY_INITIAL_VAL) {
++target->numOfRows;
}
tdAppendValToDataCols(target, src1, *iter1, false);
}
++nRows;
++(*iter1);
lastKey = key1;
} else {
// use key2 if not deleted
// TODO: handle the delete function
if (update && (lastKey == key2)) {
tdAppendValToDataCols(target, src2, *iter2, true);
} else if (lastKey != key2) {
if (lastKey != TSKEY_INITIAL_VAL) {
++target->numOfRows;
}
tdAppendValToDataCols(target, src2, *iter2, false);
}
++nRows;
++(*iter2);
lastKey = key2;
}
ASSERT(target->numOfRows <= target->maxPoints - 1);
}
if (lastKey != TSKEY_INITIAL_VAL) {
++target->numOfRows;
}
}
STSRow *mergeTwoRows(void *buffer, STSRow *row1, STSRow *row2, STSchema *pSchema1, STSchema *pSchema2) {
#if 0
ASSERT(TD_ROW_KEY(row1) == TD_ROW_KEY(row2));
ASSERT(schemaVersion(pSchema1) == TD_ROW_SVER(row1));
ASSERT(schemaVersion(pSchema2) == TD_ROW_SVER(row2));
ASSERT(schemaVersion(pSchema1) >= schemaVersion(pSchema2));
#endif
#if 0
SArray *stashRow = taosArrayInit(pSchema1->numOfCols, sizeof(SColInfo));
if (stashRow == NULL) {
return NULL;
}
STSRow pRow = buffer;
STpRow dataRow = memRowDataBody(pRow);
memRowSetType(pRow, SMEM_ROW_DATA);
dataRowSetVersion(dataRow, schemaVersion(pSchema1)); // use latest schema version
dataRowSetLen(dataRow, (TDRowLenT)(TD_DATA_ROW_HEAD_SIZE + pSchema1->flen));
TDRowLenT dataLen = 0, kvLen = TD_MEM_ROW_KV_HEAD_SIZE;
int32_t i = 0; // row1
int32_t j = 0; // row2
int32_t nCols1 = schemaNCols(pSchema1);
int32_t nCols2 = schemaNCols(pSchema2);
SColInfo colInfo = {0};
int32_t kvIdx1 = 0, kvIdx2 = 0;
while (i < nCols1) {
STColumn *pCol = schemaColAt(pSchema1, i);
void * val1 = tdGetMemRowDataOfColEx(row1, pCol->colId, pCol->type, TD_DATA_ROW_HEAD_SIZE + pCol->offset, &kvIdx1);
// if val1 != NULL, use val1;
if (val1 != NULL && !isNull(val1, pCol->type)) {
tdAppendColVal(dataRow, val1, pCol->type, pCol->offset);
kvLen += tdGetColAppendLen(SMEM_ROW_KV, val1, pCol->type);
setSColInfo(&colInfo, pCol->colId, pCol->type, val1);
taosArrayPush(stashRow, &colInfo);
++i; // next col
continue;
}
void *val2 = NULL;
while (j < nCols2) {
STColumn *tCol = schemaColAt(pSchema2, j);
if (tCol->colId < pCol->colId) {
++j;
continue;
}
if (tCol->colId == pCol->colId) {
val2 = tdGetMemRowDataOfColEx(row2, tCol->colId, tCol->type, TD_DATA_ROW_HEAD_SIZE + tCol->offset, &kvIdx2);
} else if (tCol->colId > pCol->colId) {
// set NULL
}
break;
} // end of while(j<nCols2)
if (val2 == NULL) {
val2 = (void *)getNullValue(pCol->type);
}
tdAppendColVal(dataRow, val2, pCol->type, pCol->offset);
if (!isNull(val2, pCol->type)) {
kvLen += tdGetColAppendLen(SMEM_ROW_KV, val2, pCol->type);
setSColInfo(&colInfo, pCol->colId, pCol->type, val2);
taosArrayPush(stashRow, &colInfo);
}
++i; // next col
}
dataLen = TD_ROW_LEN(pRow);
if (kvLen < dataLen) {
// scan stashRow and generate SKVRow
memset(buffer, 0, sizeof(dataLen));
STSRow tRow = buffer;
memRowSetType(tRow, SMEM_ROW_KV);
SKVRow kvRow = (SKVRow)memRowKvBody(tRow);
int16_t nKvNCols = (int16_t) taosArrayGetSize(stashRow);
kvRowSetLen(kvRow, (TDRowLenT)(TD_KV_ROW_HEAD_SIZE + sizeof(SColIdx) * nKvNCols));
kvRowSetNCols(kvRow, nKvNCols);
memRowSetKvVersion(tRow, pSchema1->version);
int32_t toffset = 0;
int16_t k;
for (k = 0; k < nKvNCols; ++k) {
SColInfo *pColInfo = taosArrayGet(stashRow, k);
tdAppendKvColVal(kvRow, pColInfo->colVal, true, pColInfo->colId, pColInfo->colType, toffset);
toffset += sizeof(SColIdx);
}
ASSERT(kvLen == TD_ROW_LEN(tRow));
}
taosArrayDestroy(stashRow);
return buffer;
#endif
return NULL;
}
SDataCols *tdDupDataCols(SDataCols *pDataCols, bool keepData) {
SDataCols *pRet = tdNewDataCols(pDataCols->maxCols, pDataCols->maxPoints);
if (pRet == NULL) return NULL;
pRet->numOfCols = pDataCols->numOfCols;
pRet->bitmapMode = pDataCols->bitmapMode;
pRet->sversion = pDataCols->sversion;
if (keepData) pRet->numOfRows = pDataCols->numOfRows;
for (int i = 0; i < pDataCols->numOfCols; ++i) {
pRet->cols[i].type = pDataCols->cols[i].type;
pRet->cols[i].bitmap = pDataCols->cols[i].bitmap;
pRet->cols[i].colId = pDataCols->cols[i].colId;
pRet->cols[i].bytes = pDataCols->cols[i].bytes;
pRet->cols[i].offset = pDataCols->cols[i].offset;
if (keepData) {
if (pDataCols->cols[i].len > 0) {
if (tdAllocMemForCol(&pRet->cols[i], pRet->maxPoints) < 0) {
tdFreeDataCols(pRet);
return NULL;
}
pRet->cols[i].len = pDataCols->cols[i].len;
memcpy(pRet->cols[i].pData, pDataCols->cols[i].pData, pDataCols->cols[i].len);
if (IS_VAR_DATA_TYPE(pRet->cols[i].type)) {
int dataOffSize = sizeof(VarDataOffsetT) * pDataCols->maxPoints;
memcpy(pRet->cols[i].dataOff, pDataCols->cols[i].dataOff, dataOffSize);
}
if (!TD_COL_ROWS_NORM(pRet->cols + i)) {
memcpy(pRet->cols[i].pBitmap, pDataCols->cols[i].pBitmap, TD_BITMAP_BYTES(pDataCols->numOfRows));
}
}
}
}
return pRet;
}
void tdSRowPrint(STSRow *row, STSchema *pSchema, const char *tag) { void tdSRowPrint(STSRow *row, STSchema *pSchema, const char *tag) {
STSRowIter iter = {0}; STSRowIter iter = {0};
tdSTSRowIterInit(&iter, pSchema); tdSTSRowIterInit(&iter, pSchema);
@ -1020,32 +420,6 @@ void tdSCellValPrint(SCellVal *pVal, int8_t colType) {
} }
} }
int32_t dataColGetNEleLen(SDataCol *pDataCol, int32_t rows, int8_t bitmapMode) {
ASSERT(rows > 0);
int32_t result = 0;
if (IS_VAR_DATA_TYPE(pDataCol->type)) {
result += pDataCol->dataOff[rows - 1];
SCellVal val = {0};
if (tdGetColDataOfRow(&val, pDataCol, rows - 1, bitmapMode) < 0) {
TASSERT(0);
}
// Currently, count the varDataTLen in of Null/None cols considering back compatibility test for 2.4
result += varDataTLen(val.val);
// TODO: later on, don't save Null/None for VarDataT for 3.0
// if (tdValTypeIsNorm(val.valType)) {
// result += varDataTLen(val.val);
// }
} else {
result += TYPE_BYTES[pDataCol->type] * rows;
}
ASSERT(pDataCol->len == result);
return result;
}
bool tdSKvRowGetVal(STSRow *pRow, col_id_t colId, col_id_t colIdx, SCellVal *pVal) { bool tdSKvRowGetVal(STSRow *pRow, col_id_t colId, col_id_t colIdx, SCellVal *pVal) {
if (colId == PRIMARYKEY_TIMESTAMP_COL_ID) { if (colId == PRIMARYKEY_TIMESTAMP_COL_ID) {
tdRowSetVal(pVal, TD_VTYPE_NORM, TD_ROW_KEY_ADDR(pRow)); tdRowSetVal(pVal, TD_VTYPE_NORM, TD_ROW_KEY_ADDR(pRow));
@ -1082,40 +456,6 @@ bool tdSTpRowGetVal(STSRow *pRow, col_id_t colId, col_type_t colType, int32_t fl
return true; return true;
} }
int32_t tdGetColDataOfRow(SCellVal *pVal, SDataCol *pCol, int32_t row, int8_t bitmapMode) {
if (isAllRowsNone(pCol)) {
pVal->valType = TD_VTYPE_NONE;
#ifdef TD_SUPPORT_READ2
pVal->val = (void *)getNullValue(pCol->type);
#else
pVal->val = NULL;
#endif
return TSDB_CODE_SUCCESS;
}
if (TD_COL_ROWS_NORM(pCol)) {
pVal->valType = TD_VTYPE_NORM;
} else if (tdGetBitmapValType(pCol->pBitmap, row, &(pVal->valType), bitmapMode) < 0) {
return terrno;
}
if (tdValTypeIsNorm(pVal->valType)) {
if (IS_VAR_DATA_TYPE(pCol->type)) {
pVal->val = POINTER_SHIFT(pCol->pData, pCol->dataOff[row]);
} else {
pVal->val = POINTER_SHIFT(pCol->pData, TYPE_BYTES[pCol->type] * row);
}
} else {
pVal->valType = TD_VTYPE_NULL;
#ifdef TD_SUPPORT_READ2
pVal->val = (void *)getNullValue(pCol->type);
#else
pVal->val = NULL;
#endif
}
return TSDB_CODE_SUCCESS;
}
bool tdSTSRowIterNext(STSRowIter *pIter, col_id_t colId, col_type_t colType, SCellVal *pVal) { bool tdSTSRowIterNext(STSRowIter *pIter, col_id_t colId, col_type_t colType, SCellVal *pVal) {
if (colId == PRIMARYKEY_TIMESTAMP_COL_ID) { if (colId == PRIMARYKEY_TIMESTAMP_COL_ID) {
pVal->val = &pIter->pRow->ts; pVal->val = &pIter->pRow->ts;

View File

@ -285,8 +285,8 @@ int32_t debugPrintSColVal(SColVal *cv, int8_t type) {
} }
void debugPrintTSRow(STSRow2 *row, STSchema *pTSchema, const char *tags, int32_t ln) { void debugPrintTSRow(STSRow2 *row, STSchema *pTSchema, const char *tags, int32_t ln) {
printf("%s:%d %s:v%d:%d ", tags, ln, (row->flags & 0xf0) ? "KV" : "TP", row->sver, row->nData); // printf("%s:%d %s:v%d:%d ", tags, ln, (row->flags & 0xf0) ? "KV" : "TP", row->sver, row->nData);
for (int16_t i = 0; i < schemaNCols(pTSchema); ++i) { for (int16_t i = 0; i < pTSchema->numOfCols; ++i) {
SColVal cv = {0}; SColVal cv = {0};
tTSRowGet(row, pTSchema, i, &cv); tTSRowGet(row, pTSchema, i, &cv);
debugPrintSColVal(&cv, pTSchema->columns[i].type); debugPrintSColVal(&cv, pTSchema->columns[i].type);
@ -393,7 +393,7 @@ static int32_t checkSColVal(const char *rawVal, SColVal *cv, int8_t type) {
} }
static void checkTSRow(const char **data, STSRow2 *row, STSchema *pTSchema) { static void checkTSRow(const char **data, STSRow2 *row, STSchema *pTSchema) {
for (int16_t i = 0; i < schemaNCols(pTSchema); ++i) { for (int16_t i = 0; i < pTSchema->numOfCols; ++i) {
SColVal cv = {0}; SColVal cv = {0};
tTSRowGet(row, pTSchema, i, &cv); tTSRowGet(row, pTSchema, i, &cv);
checkSColVal(data[i], &cv, pTSchema->columns[i].type); checkSColVal(data[i], &cv, pTSchema->columns[i].type);

View File

@ -31,7 +31,7 @@ typedef struct SVnodeMgmt {
const char *path; const char *path;
const char *name; const char *name;
SQWorkerPool queryPool; SQWorkerPool queryPool;
SQWorkerPool fetchPool; SWWorkerPool fetchPool;
SWWorkerPool syncPool; SWWorkerPool syncPool;
SWWorkerPool writePool; SWWorkerPool writePool;
SWWorkerPool applyPool; SWWorkerPool applyPool;

View File

@ -31,7 +31,7 @@ SVnodeObj **vmGetVnodeListFromHash(SVnodeMgmt *pMgmt, int32_t *numOfVnodes) {
SVnodeObj *pVnode = *ppVnode; SVnodeObj *pVnode = *ppVnode;
if (pVnode && num < size) { if (pVnode && num < size) {
int32_t refCount = atomic_add_fetch_32(&pVnode->refCount, 1); int32_t refCount = atomic_add_fetch_32(&pVnode->refCount, 1);
// dTrace("vgId:%d, acquire vnode, refCount:%d", pVnode->vgId, refCount); // dTrace("vgId:%d, acquire vnode list, ref:%d", pVnode->vgId, refCount);
pVnodes[num++] = (*ppVnode); pVnodes[num++] = (*ppVnode);
pIter = taosHashIterate(pMgmt->hash, pIter); pIter = taosHashIterate(pMgmt->hash, pIter);
} else { } else {

View File

@ -23,6 +23,7 @@ SVnodeObj *vmAcquireVnode(SVnodeMgmt *pMgmt, int32_t vgId) {
taosHashGetDup(pMgmt->hash, &vgId, sizeof(int32_t), (void *)&pVnode); taosHashGetDup(pMgmt->hash, &vgId, sizeof(int32_t), (void *)&pVnode);
if (pVnode == NULL || pVnode->dropped) { if (pVnode == NULL || pVnode->dropped) {
terrno = TSDB_CODE_VND_INVALID_VGROUP_ID; terrno = TSDB_CODE_VND_INVALID_VGROUP_ID;
pVnode = NULL;
} else { } else {
int32_t refCount = atomic_add_fetch_32(&pVnode->refCount, 1); int32_t refCount = atomic_add_fetch_32(&pVnode->refCount, 1);
// dTrace("vgId:%d, acquire vnode, ref:%d", pVnode->vgId, refCount); // dTrace("vgId:%d, acquire vnode, ref:%d", pVnode->vgId, refCount);
@ -82,6 +83,7 @@ void vmCloseVnode(SVnodeMgmt *pMgmt, SVnodeObj *pVnode) {
taosThreadRwlockUnlock(&pMgmt->lock); taosThreadRwlockUnlock(&pMgmt->lock);
vmReleaseVnode(pMgmt, pVnode); vmReleaseVnode(pMgmt, pVnode);
dTrace("vgId:%d, wait for vnode ref become 0", pVnode->vgId);
while (pVnode->refCount > 0) taosMsleep(10); while (pVnode->refCount > 0) taosMsleep(10);
dTrace("vgId:%d, wait for vnode queue is empty", pVnode->vgId); dTrace("vgId:%d, wait for vnode queue is empty", pVnode->vgId);

View File

@ -81,21 +81,26 @@ static void vmProcessQueryQueue(SQueueInfo *pInfo, SRpcMsg *pMsg) {
taosFreeQitem(pMsg); taosFreeQitem(pMsg);
} }
static void vmProcessFetchQueue(SQueueInfo *pInfo, SRpcMsg *pMsg) { static void vmProcessFetchQueue(SQueueInfo *pInfo, STaosQall *qall, int32_t numOfMsgs) {
SVnodeObj *pVnode = pInfo->ahandle; SVnodeObj *pVnode = pInfo->ahandle;
const STraceId *trace = &pMsg->info.traceId; SRpcMsg *pMsg = NULL;
dGTrace("vgId:%d, msg:%p get from vnode-fetch queue", pVnode->vgId, pMsg); for (int32_t i = 0; i < numOfMsgs; ++i) {
int32_t code = vnodeProcessFetchMsg(pVnode->pImpl, pMsg, pInfo); if (taosGetQitem(qall, (void **)&pMsg) == 0) continue;
if (code != 0) { const STraceId *trace = &pMsg->info.traceId;
if (terrno != 0) code = terrno; dGTrace("vgId:%d, msg:%p get from vnode-fetch queue", pVnode->vgId, pMsg);
dGError("vgId:%d, msg:%p failed to fetch since %s", pVnode->vgId, pMsg, terrstr());
vmSendRsp(pMsg, code); int32_t code = vnodeProcessFetchMsg(pVnode->pImpl, pMsg, pInfo);
if (code != 0) {
if (terrno != 0) code = terrno;
dGError("vgId:%d, msg:%p failed to fetch since %s", pVnode->vgId, pMsg, terrstr());
vmSendRsp(pMsg, code);
}
dGTrace("vgId:%d, msg:%p is freed, code:0x%x", pVnode->vgId, pMsg, code);
rpcFreeCont(pMsg->pCont);
taosFreeQitem(pMsg);
} }
dGTrace("vgId:%d, msg:%p is freed, code:0x%x", pVnode->vgId, pMsg, code);
rpcFreeCont(pMsg->pCont);
taosFreeQitem(pMsg);
} }
static void vmProcessSyncQueue(SQueueInfo *pInfo, STaosQall *qall, int32_t numOfMsgs) { static void vmProcessSyncQueue(SQueueInfo *pInfo, STaosQall *qall, int32_t numOfMsgs) {
@ -201,9 +206,9 @@ int32_t vmPutRpcMsgToQueue(SVnodeMgmt *pMgmt, EQueueType qtype, SRpcMsg *pRpc) {
int32_t code = vmPutMsgToQueue(pMgmt, pMsg, qtype); int32_t code = vmPutMsgToQueue(pMgmt, pMsg, qtype);
if (code != 0) { if (code != 0) {
dTrace("msg:%p, is freed", pMsg); dTrace("msg:%p, is freed", pMsg);
taosFreeQitem(pMsg);
rpcFreeCont(pMsg->pCont); rpcFreeCont(pMsg->pCont);
pRpc->pCont = NULL; pRpc->pCont = NULL;
taosFreeQitem(pMsg);
} }
return code; return code;
@ -232,8 +237,8 @@ int32_t vmGetQueueSize(SVnodeMgmt *pMgmt, int32_t vgId, EQueueType qtype) {
default: default:
break; break;
} }
vmReleaseVnode(pMgmt, pVnode);
} }
vmReleaseVnode(pMgmt, pVnode);
return size; return size;
} }
@ -242,7 +247,7 @@ int32_t vmAllocQueue(SVnodeMgmt *pMgmt, SVnodeObj *pVnode) {
pVnode->pSyncQ = tWWorkerAllocQueue(&pMgmt->syncPool, pVnode, (FItems)vmProcessSyncQueue); pVnode->pSyncQ = tWWorkerAllocQueue(&pMgmt->syncPool, pVnode, (FItems)vmProcessSyncQueue);
pVnode->pApplyQ = tWWorkerAllocQueue(&pMgmt->applyPool, pVnode->pImpl, (FItems)vnodeApplyWriteMsg); pVnode->pApplyQ = tWWorkerAllocQueue(&pMgmt->applyPool, pVnode->pImpl, (FItems)vnodeApplyWriteMsg);
pVnode->pQueryQ = tQWorkerAllocQueue(&pMgmt->queryPool, pVnode, (FItem)vmProcessQueryQueue); pVnode->pQueryQ = tQWorkerAllocQueue(&pMgmt->queryPool, pVnode, (FItem)vmProcessQueryQueue);
pVnode->pFetchQ = tQWorkerAllocQueue(&pMgmt->fetchPool, pVnode, (FItem)vmProcessFetchQueue); pVnode->pFetchQ = tWWorkerAllocQueue(&pMgmt->fetchPool, pVnode, (FItems)vmProcessFetchQueue);
if (pVnode->pWriteQ == NULL || pVnode->pSyncQ == NULL || pVnode->pApplyQ == NULL || pVnode->pQueryQ == NULL || if (pVnode->pWriteQ == NULL || pVnode->pSyncQ == NULL || pVnode->pApplyQ == NULL || pVnode->pQueryQ == NULL ||
pVnode->pFetchQ == NULL) { pVnode->pFetchQ == NULL) {
@ -250,7 +255,11 @@ int32_t vmAllocQueue(SVnodeMgmt *pMgmt, SVnodeObj *pVnode) {
return -1; return -1;
} }
dDebug("vgId:%d, queue is alloced", pVnode->vgId); dDebug("vgId:%d, write-queue:%p is alloced", pVnode->vgId, pVnode->pWriteQ);
dDebug("vgId:%d, sync-queue:%p is alloced", pVnode->vgId, pVnode->pSyncQ);
dDebug("vgId:%d, apply-queue:%p is alloced", pVnode->vgId, pVnode->pApplyQ);
dDebug("vgId:%d, query-queue:%p is alloced", pVnode->vgId, pVnode->pQueryQ);
dDebug("vgId:%d, fetch-queue:%p is alloced", pVnode->vgId, pVnode->pFetchQ);
return 0; return 0;
} }
@ -259,7 +268,7 @@ void vmFreeQueue(SVnodeMgmt *pMgmt, SVnodeObj *pVnode) {
tWWorkerFreeQueue(&pMgmt->applyPool, pVnode->pApplyQ); tWWorkerFreeQueue(&pMgmt->applyPool, pVnode->pApplyQ);
tWWorkerFreeQueue(&pMgmt->syncPool, pVnode->pSyncQ); tWWorkerFreeQueue(&pMgmt->syncPool, pVnode->pSyncQ);
tQWorkerFreeQueue(&pMgmt->queryPool, pVnode->pQueryQ); tQWorkerFreeQueue(&pMgmt->queryPool, pVnode->pQueryQ);
tQWorkerFreeQueue(&pMgmt->fetchPool, pVnode->pFetchQ); tWWorkerFreeQueue(&pMgmt->fetchPool, pVnode->pFetchQ);
pVnode->pWriteQ = NULL; pVnode->pWriteQ = NULL;
pVnode->pSyncQ = NULL; pVnode->pSyncQ = NULL;
pVnode->pApplyQ = NULL; pVnode->pApplyQ = NULL;
@ -275,11 +284,10 @@ int32_t vmStartWorker(SVnodeMgmt *pMgmt) {
pQPool->max = tsNumOfVnodeQueryThreads; pQPool->max = tsNumOfVnodeQueryThreads;
if (tQWorkerInit(pQPool) != 0) return -1; if (tQWorkerInit(pQPool) != 0) return -1;
SQWorkerPool *pFPool = &pMgmt->fetchPool; SWWorkerPool *pFPool = &pMgmt->fetchPool;
pFPool->name = "vnode-fetch"; pFPool->name = "vnode-fetch";
pFPool->min = tsNumOfVnodeFetchThreads;
pFPool->max = tsNumOfVnodeFetchThreads; pFPool->max = tsNumOfVnodeFetchThreads;
if (tQWorkerInit(pFPool) != 0) return -1; if (tWWorkerInit(pFPool) != 0) return -1;
SWWorkerPool *pWPool = &pMgmt->writePool; SWWorkerPool *pWPool = &pMgmt->writePool;
pWPool->name = "vnode-write"; pWPool->name = "vnode-write";
@ -325,6 +333,6 @@ void vmStopWorker(SVnodeMgmt *pMgmt) {
tWWorkerCleanup(&pMgmt->applyPool); tWWorkerCleanup(&pMgmt->applyPool);
tWWorkerCleanup(&pMgmt->syncPool); tWWorkerCleanup(&pMgmt->syncPool);
tQWorkerCleanup(&pMgmt->queryPool); tQWorkerCleanup(&pMgmt->queryPool);
tQWorkerCleanup(&pMgmt->fetchPool); tWWorkerCleanup(&pMgmt->fetchPool);
dDebug("vnode workers are closed"); dDebug("vnode workers are closed");
} }

View File

@ -137,8 +137,7 @@ int64_t tsdbGetNumOfRowsInMemTable(STsdbReader *pHandle);
void *tsdbGetIdx(SMeta *pMeta); void *tsdbGetIdx(SMeta *pMeta);
void *tsdbGetIvtIdx(SMeta *pMeta); void *tsdbGetIvtIdx(SMeta *pMeta);
int32_t tsdbLastRowReaderOpen(void *pVnode, int32_t type, SArray *pTableIdList, int32_t *colId, int32_t numOfCols, int32_t tsdbLastRowReaderOpen(void *pVnode, int32_t type, SArray *pTableIdList, int32_t numOfCols, void **pReader);
void **pReader);
int32_t tsdbRetrieveLastRow(void *pReader, SSDataBlock *pResBlock, const int32_t *slotIds); int32_t tsdbRetrieveLastRow(void *pReader, SSDataBlock *pResBlock, const int32_t *slotIds);
int32_t tsdbLastrowReaderClose(void *pReader); int32_t tsdbLastrowReaderClose(void *pReader);
int32_t tsdbGetTableSchema(SVnode *pVnode, int64_t uid, STSchema **pSchema, int64_t *suid); int32_t tsdbGetTableSchema(SVnode *pVnode, int64_t uid, STSchema **pSchema, int64_t *suid);

View File

@ -89,8 +89,6 @@ typedef struct {
STqExecTb execTb; STqExecTb execTb;
STqExecDb execDb; STqExecDb execDb;
}; };
// TODO remove it
int64_t tsdbEndVer;
} STqExecHandle; } STqExecHandle;
@ -101,6 +99,8 @@ typedef struct {
int32_t epoch; int32_t epoch;
int8_t fetchMeta; int8_t fetchMeta;
int64_t snapshotVer;
// TODO remove // TODO remove
SWalReader* pWalReader; SWalReader* pWalReader;
@ -131,7 +131,7 @@ typedef struct {
static STqMgmt tqMgmt = {0}; static STqMgmt tqMgmt = {0};
// tqRead // tqRead
int64_t tqScan(STQ* pTq, const STqExecHandle* pExec, SMqDataRsp* pRsp, STqOffsetVal* offset); int64_t tqScan(STQ* pTq, const STqHandle* pHandle, SMqDataRsp* pRsp, STqOffsetVal* offset);
int64_t tqFetchLog(STQ* pTq, STqHandle* pHandle, int64_t* fetchOffset, SWalCkHead** pHeadWithCkSum); int64_t tqFetchLog(STQ* pTq, STqHandle* pHandle, int64_t* fetchOffset, SWalCkHead** pHeadWithCkSum);
// tqExec // tqExec

View File

@ -244,6 +244,9 @@ int32_t tqProcessPollReq(STQ* pTq, SRpcMsg* pMsg, int32_t workerId) {
STqOffsetVal reqOffset = pReq->reqOffset; STqOffsetVal reqOffset = pReq->reqOffset;
STqOffsetVal fetchOffsetNew; STqOffsetVal fetchOffsetNew;
// todo
workerId = 0;
// 1.find handle // 1.find handle
STqHandle* pHandle = taosHashGet(pTq->handles, pReq->subKey, strlen(pReq->subKey)); STqHandle* pHandle = taosHashGet(pTq->handles, pReq->subKey, strlen(pReq->subKey));
/*ASSERT(pHandle);*/ /*ASSERT(pHandle);*/
@ -284,7 +287,8 @@ int32_t tqProcessPollReq(STQ* pTq, SRpcMsg* pMsg, int32_t workerId) {
fetchOffsetNew = pOffset->val; fetchOffsetNew = pOffset->val;
char formatBuf[80]; char formatBuf[80];
tFormatOffset(formatBuf, 80, &fetchOffsetNew); tFormatOffset(formatBuf, 80, &fetchOffsetNew);
tqDebug("tmq poll: consumer %" PRId64 ", subkey %s, offset reset to %s", consumerId, pHandle->subKey, formatBuf); tqDebug("tmq poll: consumer %" PRId64 ", subkey %s, vg %d, offset reset to %s", consumerId, pHandle->subKey,
TD_VID(pTq->pVnode), formatBuf);
} else { } else {
if (reqOffset.type == TMQ_OFFSET__RESET_EARLIEAST) { if (reqOffset.type == TMQ_OFFSET__RESET_EARLIEAST) {
if (pReq->useSnapshot && pHandle->execHandle.subType == TOPIC_SUB_TYPE__COLUMN) { if (pReq->useSnapshot && pHandle->execHandle.subType == TOPIC_SUB_TYPE__COLUMN) {
@ -299,8 +303,8 @@ int32_t tqProcessPollReq(STQ* pTq, SRpcMsg* pMsg, int32_t workerId) {
} }
} else if (reqOffset.type == TMQ_OFFSET__RESET_LATEST) { } else if (reqOffset.type == TMQ_OFFSET__RESET_LATEST) {
tqOffsetResetToLog(&dataRsp.rspOffset, walGetLastVer(pTq->pVnode->pWal)); tqOffsetResetToLog(&dataRsp.rspOffset, walGetLastVer(pTq->pVnode->pWal));
tqDebug("tmq poll: consumer %ld, subkey %s, offset reset to %ld", consumerId, pHandle->subKey, tqDebug("tmq poll: consumer %ld, subkey %s, vg %d, offset reset to %ld", consumerId, pHandle->subKey,
dataRsp.rspOffset.version); TD_VID(pTq->pVnode), dataRsp.rspOffset.version);
if (tqSendDataRsp(pTq, pMsg, pReq, &dataRsp) < 0) { if (tqSendDataRsp(pTq, pMsg, pReq, &dataRsp) < 0) {
code = -1; code = -1;
} }
@ -318,10 +322,10 @@ int32_t tqProcessPollReq(STQ* pTq, SRpcMsg* pMsg, int32_t workerId) {
// 3.query // 3.query
if (pHandle->execHandle.subType == TOPIC_SUB_TYPE__COLUMN) { if (pHandle->execHandle.subType == TOPIC_SUB_TYPE__COLUMN) {
if (fetchOffsetNew.type == TMQ_OFFSET__LOG) { /*if (fetchOffsetNew.type == TMQ_OFFSET__LOG) {*/
fetchOffsetNew.version++; /*fetchOffsetNew.version++;*/
} /*}*/
if (tqScan(pTq, &pHandle->execHandle, &dataRsp, &fetchOffsetNew) < 0) { if (tqScan(pTq, pHandle, &dataRsp, &fetchOffsetNew) < 0) {
ASSERT(0); ASSERT(0);
code = -1; code = -1;
goto OVER; goto OVER;
@ -480,30 +484,28 @@ int32_t tqProcessVgChangeReq(STQ* pTq, char* msg, int32_t msgLen) {
pHandle->fetchMeta = req.withMeta; pHandle->fetchMeta = req.withMeta;
pHandle->pWalReader = walOpenReader(pTq->pVnode->pWal, NULL); pHandle->pWalReader = walOpenReader(pTq->pVnode->pWal, NULL);
/*for (int32_t i = 0; i < 5; i++) {*/
/*pHandle->execHandle.pExecReader[i] = tqOpenReader(pTq->pVnode);*/ // TODO version should be assigned in preprocess
/*}*/
int64_t ver = walGetCommittedVer(pTq->pVnode->pWal); int64_t ver = walGetCommittedVer(pTq->pVnode->pWal);
if (pHandle->execHandle.subType == TOPIC_SUB_TYPE__COLUMN) { if (pHandle->execHandle.subType == TOPIC_SUB_TYPE__COLUMN) {
pHandle->execHandle.execCol.qmsg = req.qmsg; pHandle->execHandle.execCol.qmsg = req.qmsg;
pHandle->snapshotVer = ver;
req.qmsg = NULL; req.qmsg = NULL;
for (int32_t i = 0; i < 5; i++) { for (int32_t i = 0; i < 5; i++) {
SReadHandle handle = { SReadHandle handle = {
.tqReader = pHandle->execHandle.pExecReader[i],
.meta = pTq->pVnode->pMeta, .meta = pTq->pVnode->pMeta,
.vnode = pTq->pVnode, .vnode = pTq->pVnode,
.initTableReader = true, .initTableReader = true,
.initTqReader = true, .initTqReader = true,
.version = ver, .version = ver,
}; };
pHandle->execHandle.execCol.task[i] = qCreateStreamExecTaskInfo(pHandle->execHandle.execCol.qmsg, &handle); pHandle->execHandle.execCol.task[i] = qCreateQueueExecTaskInfo(pHandle->execHandle.execCol.qmsg, &handle);
ASSERT(pHandle->execHandle.execCol.task[i]); ASSERT(pHandle->execHandle.execCol.task[i]);
void* scanner = NULL; void* scanner = NULL;
qExtractStreamScanner(pHandle->execHandle.execCol.task[i], &scanner); qExtractStreamScanner(pHandle->execHandle.execCol.task[i], &scanner);
ASSERT(scanner); ASSERT(scanner);
pHandle->execHandle.pExecReader[i] = qExtractReaderFromStreamScanner(scanner); pHandle->execHandle.pExecReader[i] = qExtractReaderFromStreamScanner(scanner);
ASSERT(pHandle->execHandle.pExecReader[i]); ASSERT(pHandle->execHandle.pExecReader[i]);
pHandle->execHandle.tsdbEndVer = ver;
} }
} else if (pHandle->execHandle.subType == TOPIC_SUB_TYPE__DB) { } else if (pHandle->execHandle.subType == TOPIC_SUB_TYPE__DB) {
for (int32_t i = 0; i < 5; i++) { for (int32_t i = 0; i < 5; i++) {

View File

@ -59,13 +59,13 @@ static int32_t tqAddTbNameToRsp(const STQ* pTq, int64_t uid, SMqDataRsp* pRsp) {
return 0; return 0;
} }
int64_t tqScan(STQ* pTq, const STqExecHandle* pExec, SMqDataRsp* pRsp, STqOffsetVal* pOffset) { int64_t tqScan(STQ* pTq, const STqHandle* pHandle, SMqDataRsp* pRsp, STqOffsetVal* pOffset) {
qTaskInfo_t task = pExec->execCol.task[0]; const STqExecHandle* pExec = &pHandle->execHandle;
qTaskInfo_t task = pExec->execCol.task[0];
if (qStreamPrepareScan(task, pOffset) < 0) { if (qStreamPrepareScan(task, pOffset) < 0) {
ASSERT(pOffset->type == TMQ_OFFSET__LOG); ASSERT(pOffset->type == TMQ_OFFSET__LOG);
pRsp->rspOffset = *pOffset; pRsp->rspOffset = *pOffset;
pRsp->rspOffset.version--;
return 0; return 0;
} }
@ -73,9 +73,11 @@ int64_t tqScan(STQ* pTq, const STqExecHandle* pExec, SMqDataRsp* pRsp, STqOffset
while (1) { while (1) {
SSDataBlock* pDataBlock = NULL; SSDataBlock* pDataBlock = NULL;
uint64_t ts = 0; uint64_t ts = 0;
tqDebug("task start to execute");
if (qExecTask(task, &pDataBlock, &ts) < 0) { if (qExecTask(task, &pDataBlock, &ts) < 0) {
ASSERT(0); ASSERT(0);
} }
tqDebug("task execute end, get %p", pDataBlock);
if (pDataBlock != NULL) { if (pDataBlock != NULL) {
tqAddBlockDataToRsp(pDataBlock, pRsp); tqAddBlockDataToRsp(pDataBlock, pRsp);
@ -97,7 +99,7 @@ int64_t tqScan(STQ* pTq, const STqExecHandle* pExec, SMqDataRsp* pRsp, STqOffset
} }
if (pRsp->blockNum == 0 && pOffset->type == TMQ_OFFSET__SNAPSHOT_DATA) { if (pRsp->blockNum == 0 && pOffset->type == TMQ_OFFSET__SNAPSHOT_DATA) {
tqOffsetResetToLog(pOffset, pExec->tsdbEndVer + 1); tqOffsetResetToLog(pOffset, pHandle->snapshotVer + 1);
qStreamPrepareScan(task, pOffset); qStreamPrepareScan(task, pOffset);
continue; continue;
} }
@ -116,7 +118,7 @@ int64_t tqScan(STQ* pTq, const STqExecHandle* pExec, SMqDataRsp* pRsp, STqOffset
if (pRsp->reqOffset.type == TMQ_OFFSET__LOG) { if (pRsp->reqOffset.type == TMQ_OFFSET__LOG) {
ASSERT(pRsp->rspOffset.version + 1 >= pRsp->reqOffset.version); ASSERT(pRsp->rspOffset.version + 1 >= pRsp->reqOffset.version);
} }
tqDebug("task exec exited");
break; break;
} }

View File

@ -19,6 +19,7 @@ static int32_t tEncodeSTqHandle(SEncoder* pEncoder, const STqHandle* pHandle) {
if (tStartEncode(pEncoder) < 0) return -1; if (tStartEncode(pEncoder) < 0) return -1;
if (tEncodeCStr(pEncoder, pHandle->subKey) < 0) return -1; if (tEncodeCStr(pEncoder, pHandle->subKey) < 0) return -1;
if (tEncodeI64(pEncoder, pHandle->consumerId) < 0) return -1; if (tEncodeI64(pEncoder, pHandle->consumerId) < 0) return -1;
if (tEncodeI64(pEncoder, pHandle->snapshotVer) < 0) return -1;
if (tEncodeI32(pEncoder, pHandle->epoch) < 0) return -1; if (tEncodeI32(pEncoder, pHandle->epoch) < 0) return -1;
if (tEncodeI8(pEncoder, pHandle->execHandle.subType) < 0) return -1; if (tEncodeI8(pEncoder, pHandle->execHandle.subType) < 0) return -1;
if (pHandle->execHandle.subType == TOPIC_SUB_TYPE__COLUMN) { if (pHandle->execHandle.subType == TOPIC_SUB_TYPE__COLUMN) {
@ -32,6 +33,7 @@ static int32_t tDecodeSTqHandle(SDecoder* pDecoder, STqHandle* pHandle) {
if (tStartDecode(pDecoder) < 0) return -1; if (tStartDecode(pDecoder) < 0) return -1;
if (tDecodeCStrTo(pDecoder, pHandle->subKey) < 0) return -1; if (tDecodeCStrTo(pDecoder, pHandle->subKey) < 0) return -1;
if (tDecodeI64(pDecoder, &pHandle->consumerId) < 0) return -1; if (tDecodeI64(pDecoder, &pHandle->consumerId) < 0) return -1;
if (tDecodeI64(pDecoder, &pHandle->snapshotVer) < 0) return -1;
if (tDecodeI32(pDecoder, &pHandle->epoch) < 0) return -1; if (tDecodeI32(pDecoder, &pHandle->epoch) < 0) return -1;
if (tDecodeI8(pDecoder, &pHandle->execHandle.subType) < 0) return -1; if (tDecodeI8(pDecoder, &pHandle->execHandle.subType) < 0) return -1;
if (pHandle->execHandle.subType == TOPIC_SUB_TYPE__COLUMN) { if (pHandle->execHandle.subType == TOPIC_SUB_TYPE__COLUMN) {
@ -78,19 +80,25 @@ int32_t tqMetaOpen(STQ* pTq) {
tDecoderInit(&decoder, (uint8_t*)pVal, vLen); tDecoderInit(&decoder, (uint8_t*)pVal, vLen);
tDecodeSTqHandle(&decoder, &handle); tDecodeSTqHandle(&decoder, &handle);
handle.pWalReader = walOpenReader(pTq->pVnode->pWal, NULL); handle.pWalReader = walOpenReader(pTq->pVnode->pWal, NULL);
for (int32_t i = 0; i < 5; i++) { /*for (int32_t i = 0; i < 5; i++) {*/
handle.execHandle.pExecReader[i] = tqOpenReader(pTq->pVnode); /*handle.execHandle.pExecReader[i] = tqOpenReader(pTq->pVnode);*/
} /*}*/
if (handle.execHandle.subType == TOPIC_SUB_TYPE__COLUMN) { if (handle.execHandle.subType == TOPIC_SUB_TYPE__COLUMN) {
for (int32_t i = 0; i < 5; i++) { for (int32_t i = 0; i < 5; i++) {
SReadHandle reader = { SReadHandle reader = {
.tqReader = handle.execHandle.pExecReader[i],
.meta = pTq->pVnode->pMeta, .meta = pTq->pVnode->pMeta,
.pMsgCb = &pTq->pVnode->msgCb,
.vnode = pTq->pVnode, .vnode = pTq->pVnode,
.initTableReader = true,
.initTqReader = true,
.version = handle.snapshotVer,
}; };
handle.execHandle.execCol.task[i] = qCreateStreamExecTaskInfo(handle.execHandle.execCol.qmsg, &reader); handle.execHandle.execCol.task[i] = qCreateQueueExecTaskInfo(handle.execHandle.execCol.qmsg, &reader);
ASSERT(handle.execHandle.execCol.task[i]); ASSERT(handle.execHandle.execCol.task[i]);
void* scanner = NULL;
qExtractStreamScanner(handle.execHandle.execCol.task[i], &scanner);
ASSERT(scanner);
handle.execHandle.pExecReader[i] = qExtractReaderFromStreamScanner(scanner);
ASSERT(handle.execHandle.pExecReader[i]);
} }
} else { } else {
handle.execHandle.execDb.pFilterOutTbUid = handle.execHandle.execDb.pFilterOutTbUid =

View File

@ -22,7 +22,6 @@ typedef struct SLastrowReader {
SVnode* pVnode; SVnode* pVnode;
STSchema* pSchema; STSchema* pSchema;
uint64_t uid; uint64_t uid;
// int32_t* pSlotIds;
char** transferBuf; // todo remove it soon char** transferBuf; // todo remove it soon
int32_t numOfCols; int32_t numOfCols;
int32_t type; int32_t type;
@ -31,25 +30,27 @@ typedef struct SLastrowReader {
} SLastrowReader; } SLastrowReader;
static void saveOneRow(STSRow* pRow, SSDataBlock* pBlock, SLastrowReader* pReader, const int32_t* slotIds) { static void saveOneRow(STSRow* pRow, SSDataBlock* pBlock, SLastrowReader* pReader, const int32_t* slotIds) {
ASSERT(pReader->numOfCols <= taosArrayGetSize(pBlock->pDataBlock));
int32_t numOfRows = pBlock->info.rows; int32_t numOfRows = pBlock->info.rows;
size_t numOfCols = taosArrayGetSize(pBlock->pDataBlock);
SColVal colVal = {0}; SColVal colVal = {0};
for (int32_t i = 0; i < numOfCols; ++i) { for (int32_t i = 0; i < pReader->numOfCols; ++i) {
SColumnInfoData* pColInfoData = taosArrayGet(pBlock->pDataBlock, i); SColumnInfoData* pColInfoData = taosArrayGet(pBlock->pDataBlock, i);
if (slotIds[i] == -1) { if (slotIds[i] == -1) {
colDataAppend(pColInfoData, numOfRows, (const char*)&pRow->ts, false); colDataAppend(pColInfoData, numOfRows, (const char*)&pRow->ts, false);
} else { } else {
tTSRowGetVal(pRow, pReader->pSchema, slotIds[i], &colVal); int32_t slotId = slotIds[i];
tTSRowGetVal(pRow, pReader->pSchema, slotId, &colVal);
if (IS_VAR_DATA_TYPE(colVal.type)) { if (IS_VAR_DATA_TYPE(colVal.type)) {
if (colVal.isNull || colVal.isNone) { if (colVal.isNull || colVal.isNone) {
colDataAppendNULL(pColInfoData, numOfRows); colDataAppendNULL(pColInfoData, numOfRows);
} else { } else {
varDataSetLen(pReader->transferBuf[i], colVal.value.nData); varDataSetLen(pReader->transferBuf[slotId], colVal.value.nData);
memcpy(varDataVal(pReader->transferBuf[i]), colVal.value.pData, colVal.value.nData); memcpy(varDataVal(pReader->transferBuf[slotId]), colVal.value.pData, colVal.value.nData);
colDataAppend(pColInfoData, numOfRows, pReader->transferBuf[i], false); colDataAppend(pColInfoData, numOfRows, pReader->transferBuf[slotId], false);
} }
} else { } else {
colDataAppend(pColInfoData, numOfRows, (const char*)&colVal.value, colVal.isNull || colVal.isNone); colDataAppend(pColInfoData, numOfRows, (const char*)&colVal.value, colVal.isNull || colVal.isNone);
@ -60,8 +61,7 @@ static void saveOneRow(STSRow* pRow, SSDataBlock* pBlock, SLastrowReader* pReade
pBlock->info.rows += 1; pBlock->info.rows += 1;
} }
int32_t tsdbLastRowReaderOpen(void* pVnode, int32_t type, SArray* pTableIdList, int32_t* colId, int32_t numOfCols, int32_t tsdbLastRowReaderOpen(void* pVnode, int32_t type, SArray* pTableIdList, int32_t numOfCols, void** pReader) {
void** pReader) {
SLastrowReader* p = taosMemoryCalloc(1, sizeof(SLastrowReader)); SLastrowReader* p = taosMemoryCalloc(1, sizeof(SLastrowReader));
if (p == NULL) { if (p == NULL) {
return TSDB_CODE_OUT_OF_MEMORY; return TSDB_CODE_OUT_OF_MEMORY;
@ -70,13 +70,18 @@ int32_t tsdbLastRowReaderOpen(void* pVnode, int32_t type, SArray* pTableIdList,
p->type = type; p->type = type;
p->pVnode = pVnode; p->pVnode = pVnode;
p->numOfCols = numOfCols; p->numOfCols = numOfCols;
p->transferBuf = taosMemoryCalloc(p->numOfCols, POINTER_BYTES);
if (taosArrayGetSize(pTableIdList) == 0) {
*pReader = p;
return TSDB_CODE_SUCCESS;
}
STableKeyInfo* pKeyInfo = taosArrayGet(pTableIdList, 0); STableKeyInfo* pKeyInfo = taosArrayGet(pTableIdList, 0);
p->pSchema = metaGetTbTSchema(p->pVnode->pMeta, pKeyInfo->uid, -1); p->pSchema = metaGetTbTSchema(p->pVnode->pMeta, pKeyInfo->uid, -1);
p->pTableList = pTableIdList; p->pTableList = pTableIdList;
for (int32_t i = 0; i < p->numOfCols; ++i) { p->transferBuf = taosMemoryCalloc(p->pSchema->numOfCols, POINTER_BYTES);
for (int32_t i = 0; i < p->pSchema->numOfCols; ++i) {
if (IS_VAR_DATA_TYPE(p->pSchema->columns[i].type)) { if (IS_VAR_DATA_TYPE(p->pSchema->columns[i].type)) {
p->transferBuf[i] = taosMemoryMalloc(p->pSchema->columns[i].bytes); p->transferBuf[i] = taosMemoryMalloc(p->pSchema->columns[i].bytes);
} }
@ -89,10 +94,11 @@ int32_t tsdbLastRowReaderOpen(void* pVnode, int32_t type, SArray* pTableIdList,
int32_t tsdbLastrowReaderClose(void* pReader) { int32_t tsdbLastrowReaderClose(void* pReader) {
SLastrowReader* p = pReader; SLastrowReader* p = pReader;
for (int32_t i = 0; i < p->numOfCols; ++i) { for (int32_t i = 0; i < p->pSchema->numOfCols; ++i) {
taosMemoryFreeClear(p->transferBuf[i]); taosMemoryFreeClear(p->transferBuf[i]);
} }
taosMemoryFree(p->pSchema);
taosMemoryFree(p->transferBuf); taosMemoryFree(p->transferBuf);
taosMemoryFree(pReader); taosMemoryFree(pReader);
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;

View File

@ -71,7 +71,7 @@ typedef struct SFilesetIter {
typedef struct SFileDataBlockInfo { typedef struct SFileDataBlockInfo {
int32_t int32_t
tbBlockIdx; // index position in STableBlockScanInfo in order to check whether neighbor block overlaps with it tbBlockIdx; // index position in STableBlockScanInfo in order to check whether neighbor block overlaps with it
uint64_t uid; uint64_t uid;
} SFileDataBlockInfo; } SFileDataBlockInfo;
@ -119,10 +119,10 @@ struct STsdbReader {
int32_t type; // query type: 1. retrieve all data blocks, 2. retrieve direct prev|next rows int32_t type; // query type: 1. retrieve all data blocks, 2. retrieve direct prev|next rows
SBlockLoadSuppInfo suppInfo; SBlockLoadSuppInfo suppInfo;
SIOCostSummary cost; SIOCostSummary cost;
STSchema* pSchema; STSchema* pSchema;
SDataFReader* pFileReader; SDataFReader* pFileReader;
SVersionRange verRange; SVersionRange verRange;
}; };
static SFileDataBlockInfo* getCurrentBlockInfo(SDataBlockIter* pBlockIter); static SFileDataBlockInfo* getCurrentBlockInfo(SDataBlockIter* pBlockIter);
@ -287,9 +287,7 @@ static int32_t initFilesetIterator(SFilesetIter* pIter, const STsdbFSState* pFSt
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
} }
static void cleanupFilesetIterator(SFilesetIter* pIter) { static void cleanupFilesetIterator(SFilesetIter* pIter) { taosArrayDestroy(pIter->pFileList); }
taosArrayDestroy(pIter->pFileList);
}
static bool filesetIteratorNext(SFilesetIter* pIter, STsdbReader* pReader) { static bool filesetIteratorNext(SFilesetIter* pIter, STsdbReader* pReader) {
bool asc = ASCENDING_TRAVERSE(pIter->order); bool asc = ASCENDING_TRAVERSE(pIter->order);
@ -304,6 +302,10 @@ static bool filesetIteratorNext(SFilesetIter* pIter, STsdbReader* pReader) {
STimeWindow win = {0}; STimeWindow win = {0};
while (1) { while (1) {
// if (pReader->pFileReader != NULL) {
// tsdbDataFReaderClose(&pReader->pFileReader);
// }
pReader->status.pCurrentFileset = (SDFileSet*)taosArrayGet(pIter->pFileList, pIter->index); pReader->status.pCurrentFileset = (SDFileSet*)taosArrayGet(pIter->pFileList, pIter->index);
int32_t code = tsdbDataFReaderOpen(&pReader->pFileReader, pReader->pTsdb, pReader->status.pCurrentFileset); int32_t code = tsdbDataFReaderOpen(&pReader->pFileReader, pReader->pTsdb, pReader->status.pCurrentFileset);
@ -349,9 +351,7 @@ static void resetDataBlockIterator(SDataBlockIter* pIter, int32_t order) {
} }
} }
static void cleanupDataBlockIterator(SDataBlockIter* pIter) { static void cleanupDataBlockIterator(SDataBlockIter* pIter) { taosArrayDestroy(pIter->blockList); }
taosArrayDestroy(pIter->blockList);
}
static void initReaderStatus(SReaderStatus* pStatus) { static void initReaderStatus(SReaderStatus* pStatus) {
pStatus->pTableIter = NULL; pStatus->pTableIter = NULL;
@ -392,8 +392,7 @@ static int32_t tsdbReaderCreate(SVnode* pVnode, SQueryTableDataCond* pCond, STsd
initReaderStatus(&pReader->status); initReaderStatus(&pReader->status);
pReader->pTsdb = pReader->pTsdb = getTsdbByRetentions(pVnode, pCond->twindows.skey, pVnode->config.tsdbCfg.retentions, idstr, &level);
getTsdbByRetentions(pVnode, pCond->twindows.skey, pVnode->config.tsdbCfg.retentions, idstr, &level);
pReader->suid = pCond->suid; pReader->suid = pCond->suid;
pReader->order = pCond->order; pReader->order = pCond->order;
pReader->capacity = 4096; pReader->capacity = 4096;
@ -833,7 +832,7 @@ static int32_t doLoadFileBlockData(STsdbReader* pReader, SDataBlockIter* pBlockI
uint8_t *pb = NULL, *pb1 = NULL; uint8_t *pb = NULL, *pb1 = NULL;
int32_t code = tsdbReadColData(pReader->pFileReader, &pBlockScanInfo->blockIdx, pBlock, pSupInfo->colIds, numOfCols, int32_t code = tsdbReadColData(pReader->pFileReader, &pBlockScanInfo->blockIdx, pBlock, pSupInfo->colIds, numOfCols,
pBlockData, &pb, &pb1); pBlockData, &pb, &pb1);
if (code != TSDB_CODE_SUCCESS) { if (code != TSDB_CODE_SUCCESS) {
goto _error; goto _error;
} }
@ -1459,18 +1458,18 @@ static bool overlapWithDelSkyline(STableBlockScanInfo* pBlockScanInfo, const SBl
} }
TSDBKEY* pFirst = taosArrayGet(pBlockScanInfo->delSkyline, 0); TSDBKEY* pFirst = taosArrayGet(pBlockScanInfo->delSkyline, 0);
TSDBKEY* pLast = taosArrayGetLast(pBlockScanInfo->delSkyline); TSDBKEY* pLast = taosArrayGetLast(pBlockScanInfo->delSkyline);
// ts is not overlap // ts is not overlap
if (pBlock->minKey.ts > pLast->ts || pBlock->maxKey.ts < pFirst->ts) { if (pBlock->minKey.ts > pLast->ts || pBlock->maxKey.ts < pFirst->ts) {
return false; return false;
} }
int32_t step = ASCENDING_TRAVERSE(order)? 1:-1; int32_t step = ASCENDING_TRAVERSE(order) ? 1 : -1;
// version is not overlap // version is not overlap
size_t num = taosArrayGetSize(pBlockScanInfo->delSkyline); size_t num = taosArrayGetSize(pBlockScanInfo->delSkyline);
for(int32_t i = pBlockScanInfo->fileDelIndex; i < num; i += step) { for (int32_t i = pBlockScanInfo->fileDelIndex; i < num; i += step) {
TSDBKEY* p = taosArrayGet(pBlockScanInfo->delSkyline, i); TSDBKEY* p = taosArrayGet(pBlockScanInfo->delSkyline, i);
if (p->ts >= pBlock->minKey.ts && p->ts <= pBlock->maxKey.ts) { if (p->ts >= pBlock->minKey.ts && p->ts <= pBlock->maxKey.ts) {
if (p->version >= pBlock->minVersion) { if (p->version >= pBlock->minVersion) {
@ -1502,8 +1501,8 @@ static bool fileBlockShouldLoad(STsdbReader* pReader, SFileDataBlockInfo* pFBloc
} }
// has duplicated ts of different version in this block // has duplicated ts of different version in this block
bool hasDup = (pBlock->nSubBlock == 1)? pBlock->hasDup:true; bool hasDup = (pBlock->nSubBlock == 1) ? pBlock->hasDup : true;
bool overlapWithDel= overlapWithDelSkyline(pScanInfo, pBlock, pReader->order); bool overlapWithDel = overlapWithDelSkyline(pScanInfo, pBlock, pReader->order);
return (overlapWithNeighbor || hasDup || dataBlockPartiallyRequired(&pReader->window, &pReader->verRange, pBlock) || return (overlapWithNeighbor || hasDup || dataBlockPartiallyRequired(&pReader->window, &pReader->verRange, pBlock) ||
keyOverlapFileBlock(key, pBlock, &pReader->verRange) || (pBlock->nRow > pReader->capacity) || overlapWithDel); keyOverlapFileBlock(key, pBlock, &pReader->verRange) || (pBlock->nRow > pReader->capacity) || overlapWithDel);
@ -2220,17 +2219,18 @@ static STsdb* getTsdbByRetentions(SVnode* pVnode, TSKEY winSKey, SRetention* ret
} }
SVersionRange getQueryVerRange(SVnode* pVnode, SQueryTableDataCond* pCond, int8_t level) { SVersionRange getQueryVerRange(SVnode* pVnode, SQueryTableDataCond* pCond, int8_t level) {
int64_t startVer = (pCond->startVersion == -1)? 0:pCond->startVersion; int64_t startVer = (pCond->startVersion == -1) ? 0 : pCond->startVersion;
if (VND_IS_RSMA(pVnode)) { if (VND_IS_RSMA(pVnode)) {
return (SVersionRange){.minVer = startVer, .maxVer = tdRSmaGetMaxSubmitVer(pVnode->pSma, level)}; return (SVersionRange){.minVer = startVer, .maxVer = tdRSmaGetMaxSubmitVer(pVnode->pSma, level)};
} }
int64_t endVer = 0; int64_t endVer = 0;
if (pCond->endVersion == -1) { // user not specified end version, set current maximum version of vnode as the endVersion if (pCond->endVersion ==
-1) { // user not specified end version, set current maximum version of vnode as the endVersion
endVer = pVnode->state.applied; endVer = pVnode->state.applied;
} else { } else {
endVer = (pCond->endVersion > pVnode->state.applied)? pVnode->state.applied:pCond->endVersion; endVer = (pCond->endVersion > pVnode->state.applied) ? pVnode->state.applied : pCond->endVersion;
} }
return (SVersionRange){.minVer = startVer, .maxVer = endVer}; return (SVersionRange){.minVer = startVer, .maxVer = endVer};
@ -2274,9 +2274,9 @@ bool hasBeenDropped(const SArray* pDelList, int32_t* index, TSDBKEY* pKey, int32
if (pDelList == NULL) { if (pDelList == NULL) {
return false; return false;
} }
size_t num = taosArrayGetSize(pDelList); size_t num = taosArrayGetSize(pDelList);
bool asc = ASCENDING_TRAVERSE(order); bool asc = ASCENDING_TRAVERSE(order);
int32_t step = asc? 1:-1; int32_t step = asc ? 1 : -1;
if (asc) { if (asc) {
if (*index >= num - 1) { if (*index >= num - 1) {
@ -2437,6 +2437,7 @@ static int32_t doMergeRowsInFileBlockImpl(SBlockData* pBlockData, int32_t rowInd
SVersionRange* pVerRange, int32_t step) { SVersionRange* pVerRange, int32_t step) {
while (pBlockData->aTSKEY[rowIndex] == key && rowIndex < pBlockData->nRow && rowIndex >= 0) { while (pBlockData->aTSKEY[rowIndex] == key && rowIndex < pBlockData->nRow && rowIndex >= 0) {
if (pBlockData->aVersion[rowIndex] > pVerRange->maxVer || pBlockData->aVersion[rowIndex] < pVerRange->minVer) { if (pBlockData->aVersion[rowIndex] > pVerRange->maxVer || pBlockData->aVersion[rowIndex] < pVerRange->minVer) {
rowIndex += step;
continue; continue;
} }
@ -2823,7 +2824,7 @@ void tsdbReaderClose(STsdbReader* pReader) {
taosMemoryFree(pSupInfo->colIds); taosMemoryFree(pSupInfo->colIds);
taosArrayDestroy(pSupInfo->pColAgg); taosArrayDestroy(pSupInfo->pColAgg);
for(int32_t i = 0; i < blockDataGetNumOfCols(pReader->pResBlock); ++i) { for (int32_t i = 0; i < blockDataGetNumOfCols(pReader->pResBlock); ++i) {
if (pSupInfo->buildBuf[i] != NULL) { if (pSupInfo->buildBuf[i] != NULL) {
taosMemoryFreeClear(pSupInfo->buildBuf[i]); taosMemoryFreeClear(pSupInfo->buildBuf[i]);
} }
@ -2835,6 +2836,9 @@ void tsdbReaderClose(STsdbReader* pReader) {
destroyBlockScanInfo(pReader->status.pTableMap); destroyBlockScanInfo(pReader->status.pTableMap);
blockDataDestroy(pReader->pResBlock); blockDataDestroy(pReader->pResBlock);
if (pReader->pFileReader != NULL) {
tsdbDataFReaderClose(&pReader->pFileReader);
}
#if 0 #if 0
// if (pReader->status.pTableScanInfo != NULL) { // if (pReader->status.pTableScanInfo != NULL) {
@ -3011,8 +3015,8 @@ int32_t tsdbReaderReset(STsdbReader* pReader, SQueryTableDataCond* pCond) {
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
} }
pReader->order = pCond->order; pReader->order = pCond->order;
pReader->type = BLOCK_LOAD_OFFSET_ORDER; pReader->type = BLOCK_LOAD_OFFSET_ORDER;
pReader->status.loadFromFile = true; pReader->status.loadFromFile = true;
pReader->status.pTableIter = NULL; pReader->status.pTableIter = NULL;
@ -3023,11 +3027,14 @@ int32_t tsdbReaderReset(STsdbReader* pReader, SQueryTableDataCond* pCond) {
memset(pReader->suppInfo.plist, 0, POINTER_BYTES); memset(pReader->suppInfo.plist, 0, POINTER_BYTES);
pReader->suppInfo.tsColAgg.colId = PRIMARYKEY_TIMESTAMP_COL_ID; pReader->suppInfo.tsColAgg.colId = PRIMARYKEY_TIMESTAMP_COL_ID;
tsdbDataFReaderClose(&pReader->pFileReader);
// todo set the correct numOfTables // todo set the correct numOfTables
int32_t numOfTables = 1; int32_t numOfTables = 1;
SDataBlockIter* pBlockIter = &pReader->status.blockIter; SDataBlockIter* pBlockIter = &pReader->status.blockIter;
tsdbDataFReaderClose(&pReader->pFileReader);
STsdbFSState* pFState = pReader->pTsdb->fs->cState; STsdbFSState* pFState = pReader->pTsdb->fs->cState;
initFilesetIterator(&pReader->status.fileIter, pFState, pReader->order, pReader->idStr); initFilesetIterator(&pReader->status.fileIter, pFState, pReader->order, pReader->idStr);
resetDataBlockIterator(&pReader->status.blockIter, pReader->order); resetDataBlockIterator(&pReader->status.blockIter, pReader->order);
@ -3114,13 +3121,12 @@ int32_t tsdbGetFileBlocksDistInfo(STsdbReader* pReader, STableBlockDistInfo* pTa
pTableBlockInfo->numOfBlocks += pBlockIter->numOfBlocks; pTableBlockInfo->numOfBlocks += pBlockIter->numOfBlocks;
} }
/* /*
hasNext = blockIteratorNext(&pStatus->blockIter); hasNext = blockIteratorNext(&pStatus->blockIter);
*/ */
// tsdbDebug("%p %d blocks found in file for %d table(s), fid:%d, %s", pReader, numOfBlocks, numOfTables,
// tsdbDebug("%p %d blocks found in file for %d table(s), fid:%d, %s", pReader, numOfBlocks, numOfTables, // pReader->pFileGroup->fid, pReader->idStr);
// pReader->pFileGroup->fid, pReader->idStr);
} }
return code; return code;
@ -3158,7 +3164,7 @@ int64_t tsdbGetNumOfRowsInMemTable(STsdbReader* pReader) {
return rows; return rows;
} }
int32_t tsdbGetTableSchema(SVnode* pVnode, int64_t uid, STSchema** pSchema, int64_t *suid) { int32_t tsdbGetTableSchema(SVnode* pVnode, int64_t uid, STSchema** pSchema, int64_t* suid) {
int32_t sversion = 1; int32_t sversion = 1;
SMetaReader mr = {0}; SMetaReader mr = {0};
@ -3171,7 +3177,7 @@ int32_t tsdbGetTableSchema(SVnode* pVnode, int64_t uid, STSchema** pSchema, int6
} }
*suid = 0; *suid = 0;
if (mr.me.type == TSDB_CHILD_TABLE) { if (mr.me.type == TSDB_CHILD_TABLE) {
*suid = mr.me.ctbEntry.suid; *suid = mr.me.ctbEntry.suid;
code = metaGetTableEntryByUid(&mr, *suid); code = metaGetTableEntryByUid(&mr, *suid);
@ -3188,8 +3194,7 @@ int32_t tsdbGetTableSchema(SVnode* pVnode, int64_t uid, STSchema** pSchema, int6
metaReaderClear(&mr); metaReaderClear(&mr);
*pSchema = metaGetTbTSchema(pVnode->pMeta, uid, sversion); *pSchema = metaGetTbTSchema(pVnode->pMeta, uid, sversion);
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
} }

View File

@ -642,6 +642,7 @@ void ctgFreeSTableIndex(void *info);
void ctgClearSubTaskRes(SCtgSubRes *pRes); void ctgClearSubTaskRes(SCtgSubRes *pRes);
void ctgFreeQNode(SCtgQNode *node); void ctgFreeQNode(SCtgQNode *node);
void ctgClearHandle(SCatalog* pCtg); void ctgClearHandle(SCatalog* pCtg);
void ctgFreeTbCacheImpl(SCtgTbCache *pCache);
extern SCatalogMgmt gCtgMgmt; extern SCatalogMgmt gCtgMgmt;

View File

@ -647,6 +647,8 @@ int32_t ctgEnqueue(SCatalog* pCtg, SCtgCacheOperation *operation) {
CTG_RET(TSDB_CODE_OUT_OF_MEMORY); CTG_RET(TSDB_CODE_OUT_OF_MEMORY);
} }
bool syncOp = operation->syncOp;
char* opName = gCtgCacheOperation[operation->opId].name;
if (operation->syncOp) { if (operation->syncOp) {
tsem_init(&operation->rspSem, 0, 0); tsem_init(&operation->rspSem, 0, 0);
} }
@ -664,14 +666,14 @@ int32_t ctgEnqueue(SCatalog* pCtg, SCtgCacheOperation *operation) {
gCtgMgmt.queue.tail = node; gCtgMgmt.queue.tail = node;
CTG_UNLOCK(CTG_WRITE, &gCtgMgmt.queue.qlock); CTG_UNLOCK(CTG_WRITE, &gCtgMgmt.queue.qlock);
ctgDebug("action [%s] added into queue", opName);
CTG_QUEUE_INC(); CTG_QUEUE_INC();
CTG_RT_STAT_INC(numOfOpEnqueue, 1); CTG_RT_STAT_INC(numOfOpEnqueue, 1);
tsem_post(&gCtgMgmt.queue.reqSem); tsem_post(&gCtgMgmt.queue.reqSem);
ctgDebug("action [%s] added into queue", gCtgCacheOperation[operation->opId].name); if (syncOp) {
if (operation->syncOp) {
tsem_wait(&operation->rspSem); tsem_wait(&operation->rspSem);
taosMemoryFree(operation); taosMemoryFree(operation);
} }
@ -840,6 +842,7 @@ _return:
ctgFreeVgInfo(dbInfo); ctgFreeVgInfo(dbInfo);
taosMemoryFreeClear(op->data); taosMemoryFreeClear(op->data);
taosMemoryFreeClear(op);
CTG_RET(code); CTG_RET(code);
} }
@ -852,7 +855,7 @@ int32_t ctgUpdateTbMetaEnqueue(SCatalog* pCtg, STableMetaOutput *output, bool sy
SCtgUpdateTbMetaMsg *msg = taosMemoryMalloc(sizeof(SCtgUpdateTbMetaMsg)); SCtgUpdateTbMetaMsg *msg = taosMemoryMalloc(sizeof(SCtgUpdateTbMetaMsg));
if (NULL == msg) { if (NULL == msg) {
ctgError("malloc %d failed", (int32_t)sizeof(SCtgUpdateTbMetaMsg)); ctgError("malloc %d failed", (int32_t)sizeof(SCtgUpdateTbMetaMsg));
CTG_ERR_RET(TSDB_CODE_OUT_OF_MEMORY); CTG_ERR_JRET(TSDB_CODE_OUT_OF_MEMORY);
} }
char *p = strchr(output->dbFName, '.'); char *p = strchr(output->dbFName, '.');
@ -871,6 +874,11 @@ int32_t ctgUpdateTbMetaEnqueue(SCatalog* pCtg, STableMetaOutput *output, bool sy
_return: _return:
if (output) {
taosMemoryFree(output->tbMeta);
taosMemoryFree(output);
}
taosMemoryFreeClear(msg); taosMemoryFreeClear(msg);
CTG_RET(code); CTG_RET(code);
@ -1753,6 +1761,16 @@ int32_t ctgOpDropStbMeta(SCtgCacheOperation *operation) {
CTG_CACHE_STAT_DEC(numOfStb, 1); CTG_CACHE_STAT_DEC(numOfStb, 1);
} }
SCtgTbCache* pTbCache = taosHashGet(dbCache->tbCache, msg->stbName, strlen(msg->stbName));
if (NULL == pTbCache) {
ctgDebug("stb %s already not in cache", msg->stbName);
goto _return;
}
CTG_LOCK(CTG_WRITE, &pTbCache->metaLock);
ctgFreeTbCacheImpl(pTbCache);
CTG_UNLOCK(CTG_WRITE, &pTbCache->metaLock);
if (taosHashRemove(dbCache->tbCache, msg->stbName, strlen(msg->stbName))) { if (taosHashRemove(dbCache->tbCache, msg->stbName, strlen(msg->stbName))) {
ctgError("stb not exist in cache, dbFName:%s, stb:%s, suid:0x%"PRIx64, msg->dbFName, msg->stbName, msg->suid); ctgError("stb not exist in cache, dbFName:%s, stb:%s, suid:0x%"PRIx64, msg->dbFName, msg->stbName, msg->suid);
} else { } else {
@ -1780,14 +1798,24 @@ int32_t ctgOpDropTbMeta(SCtgCacheOperation *operation) {
SCtgDBCache *dbCache = NULL; SCtgDBCache *dbCache = NULL;
ctgGetDBCache(pCtg, msg->dbFName, &dbCache); ctgGetDBCache(pCtg, msg->dbFName, &dbCache);
if (NULL == dbCache) { if (NULL == dbCache) {
return TSDB_CODE_SUCCESS; goto _return;
} }
if (dbCache->dbId != msg->dbId) { if (dbCache->dbId != msg->dbId) {
ctgDebug("dbId 0x%" PRIx64 " not match with curId 0x%"PRIx64", dbFName:%s, tbName:%s", msg->dbId, dbCache->dbId, msg->dbFName, msg->tbName); ctgDebug("dbId 0x%" PRIx64 " not match with curId 0x%"PRIx64", dbFName:%s, tbName:%s", msg->dbId, dbCache->dbId, msg->dbFName, msg->tbName);
return TSDB_CODE_SUCCESS; goto _return;
} }
SCtgTbCache* pTbCache = taosHashGet(dbCache->tbCache, msg->tbName, strlen(msg->tbName));
if (NULL == pTbCache) {
ctgDebug("tb %s already not in cache", msg->tbName);
goto _return;
}
CTG_LOCK(CTG_WRITE, &pTbCache->metaLock);
ctgFreeTbCacheImpl(pTbCache);
CTG_UNLOCK(CTG_WRITE, &pTbCache->metaLock);
if (taosHashRemove(dbCache->tbCache, msg->tbName, strlen(msg->tbName))) { if (taosHashRemove(dbCache->tbCache, msg->tbName, strlen(msg->tbName))) {
ctgError("tb %s not exist in cache, dbFName:%s", msg->tbName, msg->dbFName); ctgError("tb %s not exist in cache, dbFName:%s", msg->tbName, msg->dbFName);
CTG_ERR_JRET(TSDB_CODE_CTG_INTERNAL_ERROR); CTG_ERR_JRET(TSDB_CODE_CTG_INTERNAL_ERROR);
@ -2063,6 +2091,8 @@ void* ctgUpdateThreadFunc(void* param) {
if (operation->syncOp) { if (operation->syncOp) {
tsem_post(&operation->rspSem); tsem_post(&operation->rspSem);
} else {
taosMemoryFreeClear(operation);
} }
CTG_RT_STAT_INC(numOfOpDequeue, 1); CTG_RT_STAT_INC(numOfOpDequeue, 1);

View File

@ -261,6 +261,8 @@ int32_t ctgHandleMsgCallback(void *param, SDataBuf *pMsg, int32_t rspCode) {
_return: _return:
taosMemoryFree(pMsg->pData);
if (pJob) { if (pJob) {
taosReleaseRef(gCtgMgmt.jobPool, cbParam->refId); taosReleaseRef(gCtgMgmt.jobPool, cbParam->refId);
} }

View File

@ -152,6 +152,7 @@ void ctgFreeStbMetaCache(SCtgDBCache *dbCache) {
} }
void ctgFreeTbCacheImpl(SCtgTbCache *pCache) { void ctgFreeTbCacheImpl(SCtgTbCache *pCache) {
qDebug("tbMeta freed, p:%p", pCache->pMeta);
taosMemoryFreeClear(pCache->pMeta); taosMemoryFreeClear(pCache->pMeta);
if (pCache->pIndex) { if (pCache->pIndex) {
taosArrayDestroyEx(pCache->pIndex->pIndex, tFreeSTableIndexInfo); taosArrayDestroyEx(pCache->pIndex->pIndex, tFreeSTableIndexInfo);
@ -831,6 +832,7 @@ int32_t ctgCloneMetaOutput(STableMetaOutput *output, STableMetaOutput **pOutput)
if (output->tbMeta) { if (output->tbMeta) {
int32_t metaSize = CTG_META_SIZE(output->tbMeta); int32_t metaSize = CTG_META_SIZE(output->tbMeta);
(*pOutput)->tbMeta = taosMemoryMalloc(metaSize); (*pOutput)->tbMeta = taosMemoryMalloc(metaSize);
qDebug("tbMeta cloned, size:%d, p:%p", metaSize, (*pOutput)->tbMeta);
if (NULL == (*pOutput)->tbMeta) { if (NULL == (*pOutput)->tbMeta) {
qError("malloc %d failed", (int32_t)sizeof(STableMetaOutput)); qError("malloc %d failed", (int32_t)sizeof(STableMetaOutput));
taosMemoryFreeClear(*pOutput); taosMemoryFreeClear(*pOutput);

View File

@ -389,6 +389,7 @@ typedef struct SStreamScanInfo {
SSDataBlock* pPullDataRes; // pull data SSDataBlock SSDataBlock* pPullDataRes; // pull data SSDataBlock
SSDataBlock* pDeleteDataRes; // delete data SSDataBlock SSDataBlock* pDeleteDataRes; // delete data SSDataBlock
int32_t deleteDataIndex; int32_t deleteDataIndex;
STimeWindow updateWin;
// status for tmq // status for tmq
// SSchemaWrapper schema; // SSchemaWrapper schema;

View File

@ -46,7 +46,7 @@ SOperatorInfo* createLastrowScanOperator(SLastRowScanPhysiNode* pScanNode, SRead
pInfo->pColMatchInfo = extractColMatchInfo(pScanNode->pScanCols, pScanNode->node.pOutputDataBlockDesc, &numOfCols, pInfo->pColMatchInfo = extractColMatchInfo(pScanNode->pScanCols, pScanNode->node.pOutputDataBlockDesc, &numOfCols,
COL_MATCH_FROM_COL_ID); COL_MATCH_FROM_COL_ID);
int32_t* pCols = taosMemoryMalloc(numOfCols * sizeof(int32_t)); int32_t* pCols = taosMemoryMalloc(numOfCols * sizeof(int32_t));
for (int32_t i = 0; i < numOfCols; ++i) { for (int32_t i = 0; i < taosArrayGetSize(pInfo->pColMatchInfo); ++i) {
SColMatchInfo* pColMatch = taosArrayGet(pInfo->pColMatchInfo, i); SColMatchInfo* pColMatch = taosArrayGet(pInfo->pColMatchInfo, i);
pCols[i] = pColMatch->colId; pCols[i] = pColMatch->colId;
} }
@ -56,7 +56,7 @@ SOperatorInfo* createLastrowScanOperator(SLastRowScanPhysiNode* pScanNode, SRead
goto _error; goto _error;
} }
tsdbLastRowReaderOpen(readHandle->vnode, LASTROW_RETRIEVE_TYPE_ALL, pTableList, pCols, numOfCols, tsdbLastRowReaderOpen(readHandle->vnode, LASTROW_RETRIEVE_TYPE_ALL, pTableList, taosArrayGetSize(pInfo->pColMatchInfo),
&pInfo->pLastrowReader); &pInfo->pLastrowReader);
taosMemoryFree(pCols); taosMemoryFree(pCols);

View File

@ -191,6 +191,7 @@ SSDataBlock* createResDataBlock(SDataBlockDescNode* pNode) {
pBlock->info.blockId = pNode->dataBlockId; pBlock->info.blockId = pNode->dataBlockId;
pBlock->info.type = STREAM_INVALID; pBlock->info.type = STREAM_INVALID;
pBlock->info.calWin = (STimeWindow){.skey = INT64_MIN, .ekey = INT64_MAX};
for (int32_t i = 0; i < numOfCols; ++i) { for (int32_t i = 0; i < numOfCols; ++i) {
SSlotDescNode* pDescNode = (SSlotDescNode*)nodesListGetNode(pNode->pSlots, i); SSlotDescNode* pDescNode = (SSlotDescNode*)nodesListGetNode(pNode->pSlots, i);

View File

@ -106,6 +106,30 @@ int32_t qSetMultiStreamInput(qTaskInfo_t tinfo, const void* pBlocks, size_t numO
return code; return code;
} }
qTaskInfo_t qCreateQueueExecTaskInfo(void* msg, SReadHandle* readers) {
if (msg == NULL) {
// TODO create raw scan
return NULL;
}
struct SSubplan* plan = NULL;
int32_t code = qStringToSubplan(msg, &plan);
if (code != TSDB_CODE_SUCCESS) {
terrno = code;
return NULL;
}
qTaskInfo_t pTaskInfo = NULL;
code = qCreateExecTask(readers, 0, 0, plan, &pTaskInfo, NULL, NULL, OPTR_EXEC_MODEL_QUEUE);
if (code != TSDB_CODE_SUCCESS) {
// TODO: destroy SSubplan & pTaskInfo
terrno = code;
return NULL;
}
return pTaskInfo;
}
qTaskInfo_t qCreateStreamExecTaskInfo(void* msg, SReadHandle* readers) { qTaskInfo_t qCreateStreamExecTaskInfo(void* msg, SReadHandle* readers) {
if (msg == NULL) { if (msg == NULL) {
return NULL; return NULL;
@ -186,7 +210,7 @@ int32_t qUpdateQualifiedTableId(qTaskInfo_t tinfo, const SArray* tableIdList, bo
} }
int32_t qGetQueryTableSchemaVersion(qTaskInfo_t tinfo, char* dbName, char* tableName, int32_t* sversion, int32_t qGetQueryTableSchemaVersion(qTaskInfo_t tinfo, char* dbName, char* tableName, int32_t* sversion,
int32_t* tversion) { int32_t* tversion) {
ASSERT(tinfo != NULL && dbName != NULL && tableName != NULL); ASSERT(tinfo != NULL && dbName != NULL && tableName != NULL);
SExecTaskInfo* pTaskInfo = (SExecTaskInfo*)tinfo; SExecTaskInfo* pTaskInfo = (SExecTaskInfo*)tinfo;

View File

@ -269,13 +269,13 @@ const STqOffset* qExtractStatusFromStreamScanner(void* scanner) {
void* qStreamExtractMetaMsg(qTaskInfo_t tinfo) { void* qStreamExtractMetaMsg(qTaskInfo_t tinfo) {
SExecTaskInfo* pTaskInfo = (SExecTaskInfo*)tinfo; SExecTaskInfo* pTaskInfo = (SExecTaskInfo*)tinfo;
ASSERT(pTaskInfo->execModel == OPTR_EXEC_MODEL_STREAM); ASSERT(pTaskInfo->execModel == OPTR_EXEC_MODEL_QUEUE);
return pTaskInfo->streamInfo.metaBlk; return pTaskInfo->streamInfo.metaBlk;
} }
int32_t qStreamExtractOffset(qTaskInfo_t tinfo, STqOffsetVal* pOffset) { int32_t qStreamExtractOffset(qTaskInfo_t tinfo, STqOffsetVal* pOffset) {
SExecTaskInfo* pTaskInfo = (SExecTaskInfo*)tinfo; SExecTaskInfo* pTaskInfo = (SExecTaskInfo*)tinfo;
ASSERT(pTaskInfo->execModel == OPTR_EXEC_MODEL_STREAM); ASSERT(pTaskInfo->execModel == OPTR_EXEC_MODEL_QUEUE);
memcpy(pOffset, &pTaskInfo->streamInfo.lastStatus, sizeof(STqOffsetVal)); memcpy(pOffset, &pTaskInfo->streamInfo.lastStatus, sizeof(STqOffsetVal));
return 0; return 0;
} }
@ -283,35 +283,41 @@ int32_t qStreamExtractOffset(qTaskInfo_t tinfo, STqOffsetVal* pOffset) {
int32_t qStreamPrepareScan(qTaskInfo_t tinfo, const STqOffsetVal* pOffset) { int32_t qStreamPrepareScan(qTaskInfo_t tinfo, const STqOffsetVal* pOffset) {
SExecTaskInfo* pTaskInfo = (SExecTaskInfo*)tinfo; SExecTaskInfo* pTaskInfo = (SExecTaskInfo*)tinfo;
SOperatorInfo* pOperator = pTaskInfo->pRoot; SOperatorInfo* pOperator = pTaskInfo->pRoot;
ASSERT(pTaskInfo->execModel == OPTR_EXEC_MODEL_STREAM); ASSERT(pTaskInfo->execModel == OPTR_EXEC_MODEL_QUEUE);
pTaskInfo->streamInfo.prepareStatus = *pOffset; pTaskInfo->streamInfo.prepareStatus = *pOffset;
// TODO: optimize if (!tOffsetEqual(pOffset, &pTaskInfo->streamInfo.lastStatus)) {
/*if (pTaskInfo->streamInfo.lastStatus.type != pOffset->type ||*/ while (1) {
/*pTaskInfo->streamInfo.prepareStatus.version != pTaskInfo->streamInfo.lastStatus.version) {*/ uint8_t type = pOperator->operatorType;
while (1) { pOperator->status = OP_OPENED;
uint8_t type = pOperator->operatorType; if (type == QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN) {
pOperator->status = OP_OPENED; SStreamScanInfo* pInfo = pOperator->info;
if (type == QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN) { if (pOffset->type == TMQ_OFFSET__LOG) {
SStreamScanInfo* pInfo = pOperator->info; #if 0
if (pOffset->type == TMQ_OFFSET__LOG) { if (tOffsetEqual(pOffset, &pTaskInfo->streamInfo.lastStatus) &&
if (tqSeekVer(pInfo->tqReader, pOffset->version) < 0) { pInfo->tqReader->pWalReader->curVersion != pOffset->version) {
return -1; qError("prepare scan ver %ld actual ver %ld, last %ld", pOffset->version,
} pInfo->tqReader->pWalReader->curVersion, pTaskInfo->streamInfo.lastStatus.version);
ASSERT(pInfo->tqReader->pWalReader->curVersion == pOffset->version); ASSERT(0);
} else if (pOffset->type == TMQ_OFFSET__SNAPSHOT_DATA) {
/*pInfo->blockType = STREAM_INPUT__TABLE_SCAN;*/
int64_t uid = pOffset->uid;
int64_t ts = pOffset->ts;
if (uid == 0) {
if (taosArrayGetSize(pTaskInfo->tableqinfoList.pTableList) != 0) {
STableKeyInfo* pTableInfo = taosArrayGet(pTaskInfo->tableqinfoList.pTableList, 0);
uid = pTableInfo->uid;
ts = INT64_MIN;
} }
} #endif
if (pTaskInfo->streamInfo.lastStatus.type != TMQ_OFFSET__SNAPSHOT_DATA || if (tqSeekVer(pInfo->tqReader, pOffset->version + 1) < 0) {
pTaskInfo->streamInfo.lastStatus.uid != uid || pTaskInfo->streamInfo.lastStatus.ts != ts) { return -1;
}
ASSERT(pInfo->tqReader->pWalReader->curVersion == pOffset->version + 1);
} else if (pOffset->type == TMQ_OFFSET__SNAPSHOT_DATA) {
/*pInfo->blockType = STREAM_INPUT__TABLE_SCAN;*/
int64_t uid = pOffset->uid;
int64_t ts = pOffset->ts;
if (uid == 0) {
if (taosArrayGetSize(pTaskInfo->tableqinfoList.pTableList) != 0) {
STableKeyInfo* pTableInfo = taosArrayGet(pTaskInfo->tableqinfoList.pTableList, 0);
uid = pTableInfo->uid;
ts = INT64_MIN;
}
}
/*if (pTaskInfo->streamInfo.lastStatus.type != TMQ_OFFSET__SNAPSHOT_DATA ||*/
/*pTaskInfo->streamInfo.lastStatus.uid != uid || pTaskInfo->streamInfo.lastStatus.ts != ts) {*/
STableScanInfo* pTableScanInfo = pInfo->pTableScanOp->info; STableScanInfo* pTableScanInfo = pInfo->pTableScanOp->info;
int32_t tableSz = taosArrayGetSize(pTaskInfo->tableqinfoList.pTableList); int32_t tableSz = taosArrayGetSize(pTaskInfo->tableqinfoList.pTableList);
bool found = false; bool found = false;
@ -320,6 +326,7 @@ int32_t qStreamPrepareScan(qTaskInfo_t tinfo, const STqOffsetVal* pOffset) {
if (pTableInfo->uid == uid) { if (pTableInfo->uid == uid) {
found = true; found = true;
pTableScanInfo->currentTable = i; pTableScanInfo->currentTable = i;
break;
} }
} }
@ -335,18 +342,18 @@ int32_t qStreamPrepareScan(qTaskInfo_t tinfo, const STqOffsetVal* pOffset) {
qDebug("tsdb reader offset seek to uid %ld ts %ld, table cur set to %d , all table num %d", uid, ts, qDebug("tsdb reader offset seek to uid %ld ts %ld, table cur set to %d , all table num %d", uid, ts,
pTableScanInfo->currentTable, tableSz); pTableScanInfo->currentTable, tableSz);
} /*}*/
} else {
ASSERT(0);
}
return 0;
} else { } else {
ASSERT(0); ASSERT(pOperator->numOfDownstream == 1);
pOperator = pOperator->pDownstream[0];
} }
return 0;
} else {
ASSERT(pOperator->numOfDownstream == 1);
pOperator = pOperator->pDownstream[0];
} }
} }
/*}*/
return 0; return 0;
} }

View File

@ -594,10 +594,14 @@ int32_t projectApplyFunctions(SExprInfo* pExpr, SSDataBlock* pResult, SSDataBloc
SColumnInfoData* pColInfoData = taosArrayGet(pResult->pDataBlock, outputSlotId); SColumnInfoData* pColInfoData = taosArrayGet(pResult->pDataBlock, outputSlotId);
int32_t offset = createNewColModel ? 0 : pResult->info.rows; int32_t offset = createNewColModel ? 0 : pResult->info.rows;
for (int32_t i = 0; i < pSrcBlock->info.rows; ++i) {
colDataAppend(pColInfoData, i + offset, int32_t type = pExpr[k].base.pParam[0].param.nType;
taosVariantGet(&pExpr[k].base.pParam[0].param, pExpr[k].base.pParam[0].param.nType), if (TSDB_DATA_TYPE_NULL == type) {
TSDB_DATA_TYPE_NULL == pExpr[k].base.pParam[0].param.nType); colDataAppendNNULL(pColInfoData, offset, pSrcBlock->info.rows);
} else {
for (int32_t i = 0; i < pSrcBlock->info.rows; ++i) {
colDataAppend(pColInfoData, i + offset, taosVariantGet(&pExpr[k].base.pParam[0].param, type), false);
}
} }
numOfRows = pSrcBlock->info.rows; numOfRows = pSrcBlock->info.rows;
@ -3235,6 +3239,10 @@ static SSDataBlock* doProjectOperation(SOperatorInfo* pOperator) {
SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo; SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
if (pOperator->status == OP_EXEC_DONE) { if (pOperator->status == OP_EXEC_DONE) {
if (pTaskInfo->execModel == OPTR_EXEC_MODEL_QUEUE) {
pOperator->status = OP_OPENED;
return NULL;
}
return NULL; return NULL;
} }
@ -3268,11 +3276,15 @@ static SSDataBlock* doProjectOperation(SOperatorInfo* pOperator) {
while (1) { while (1) {
// The downstream exec may change the value of the newgroup, so use a local variable instead. // The downstream exec may change the value of the newgroup, so use a local variable instead.
qDebug("projection call next");
SSDataBlock* pBlock = downstream->fpSet.getNextFn(downstream); SSDataBlock* pBlock = downstream->fpSet.getNextFn(downstream);
if (pBlock == NULL) { if (pBlock == NULL) {
// TODO optimize qDebug("projection get null");
/*if (pTaskInfo->execModel != OPTR_EXEC_MODEL_STREAM) {*/
/*if (pTaskInfo->execModel == OPTR_EXEC_MODEL_BATCH) {*/
doSetOperatorCompleted(pOperator); doSetOperatorCompleted(pOperator);
/*} else if (pTaskInfo->execModel == OPTR_EXEC_MODEL_QUEUE) {*/
/*pOperator->status = OP_RES_TO_RETURN;*/
/*}*/ /*}*/
break; break;
} }

View File

@ -884,6 +884,28 @@ static bool prepareRangeScan(SStreamScanInfo* pInfo, SSDataBlock* pBlock, int32_
return true; return true;
} }
static STimeWindow getSlidingWindow(TSKEY* tsCol, SInterval* pInterval, SDataBlockInfo* pDataBlockInfo, int32_t* pRowIndex) {
SResultRowInfo dumyInfo;
dumyInfo.cur.pageId = -1;
STimeWindow win = getActiveTimeWindow(NULL, &dumyInfo, tsCol[*pRowIndex], pInterval,
TSDB_ORDER_ASC);
STimeWindow endWin = win;
STimeWindow preWin = win;
while (1) {
(*pRowIndex) += getNumOfRowsInTimeWindow(pDataBlockInfo, tsCol, *pRowIndex, endWin.ekey,
binarySearchForKey, NULL, TSDB_ORDER_ASC);
do {
preWin = endWin;
getNextTimeWindow(pInterval, &endWin, TSDB_ORDER_ASC);
} while (tsCol[(*pRowIndex) - 1] >= endWin.skey);
endWin = preWin;
if (win.ekey == endWin.ekey || (*pRowIndex) == pDataBlockInfo->rows ) {
win.ekey = endWin.ekey;
return win;
}
win.ekey = endWin.ekey;
}
}
static bool prepareDataScan(SStreamScanInfo* pInfo, SSDataBlock* pSDB, int32_t tsColIndex, int32_t* pRowIndex) { static bool prepareDataScan(SStreamScanInfo* pInfo, SSDataBlock* pSDB, int32_t tsColIndex, int32_t* pRowIndex) {
STimeWindow win = { STimeWindow win = {
.skey = INT64_MIN, .skey = INT64_MIN,
@ -905,10 +927,13 @@ static bool prepareDataScan(SStreamScanInfo* pInfo, SSDataBlock* pSDB, int32_t t
setGroupId(pInfo, pSDB, GROUPID_COLUMN_INDEX, *pRowIndex); setGroupId(pInfo, pSDB, GROUPID_COLUMN_INDEX, *pRowIndex);
(*pRowIndex) += updateSessionWindowInfo(pCurWin, tsCols, NULL, pSDB->info.rows, *pRowIndex, gap, NULL); (*pRowIndex) += updateSessionWindowInfo(pCurWin, tsCols, NULL, pSDB->info.rows, *pRowIndex, gap, NULL);
} else { } else {
win = getActiveTimeWindow(NULL, &dumyInfo, tsCols[*pRowIndex], &pInfo->interval, TSDB_ORDER_ASC);
setGroupId(pInfo, pSDB, GROUPID_COLUMN_INDEX, *pRowIndex); setGroupId(pInfo, pSDB, GROUPID_COLUMN_INDEX, *pRowIndex);
(*pRowIndex) += pInfo->updateWin.skey = tsCols[*pRowIndex];
getNumOfRowsInTimeWindow(&pSDB->info, tsCols, *pRowIndex, win.ekey, binarySearchForKey, NULL, TSDB_ORDER_ASC); win = getSlidingWindow(tsCols, &pInfo->interval, &pSDB->info, pRowIndex);
pInfo->updateWin.ekey = tsCols[*pRowIndex - 1];
// win = getActiveTimeWindow(NULL, &dumyInfo, tsCols[*pRowIndex], &pInfo->interval, TSDB_ORDER_ASC);
// (*pRowIndex) +=
// getNumOfRowsInTimeWindow(&pSDB->info, tsCols, *pRowIndex, win.ekey, binarySearchForKey, NULL, TSDB_ORDER_ASC);
} }
needRead = true; needRead = true;
} else if (isStateWindow(pInfo)) { } else if (isStateWindow(pInfo)) {
@ -974,10 +999,12 @@ static SSDataBlock* doDataScan(SStreamScanInfo* pInfo, SSDataBlock* pSDB, int32_
} }
} }
if (!pResult) { if (!pResult) {
pInfo->updateWin = (STimeWindow){.skey = INT64_MIN, .ekey = INT64_MAX};
return NULL; return NULL;
} }
if (pResult->info.groupId == pInfo->groupId) { if (pResult->info.groupId == pInfo->groupId) {
pResult->info.calWin = pInfo->updateWin;
return pResult; return pResult;
} }
} }
@ -1209,6 +1236,7 @@ static SSDataBlock* doStreamScan(SOperatorInfo* pOperator) {
/*return NULL;*/ /*return NULL;*/
/*}*/ /*}*/
qDebug("stream scan called");
if (pTaskInfo->streamInfo.prepareStatus.type == TMQ_OFFSET__LOG) { if (pTaskInfo->streamInfo.prepareStatus.type == TMQ_OFFSET__LOG) {
while (1) { while (1) {
SFetchRet ret = {0}; SFetchRet ret = {0};
@ -1220,6 +1248,7 @@ static SSDataBlock* doStreamScan(SOperatorInfo* pOperator) {
} }
// TODO clean data block // TODO clean data block
if (pInfo->pRes->info.rows > 0) { if (pInfo->pRes->info.rows > 0) {
qDebug("stream scan log return %d rows", pInfo->pRes->info.rows);
return pInfo->pRes; return pInfo->pRes;
} }
} else if (ret.fetchType == FETCH_TYPE__META) { } else if (ret.fetchType == FETCH_TYPE__META) {
@ -1230,6 +1259,7 @@ static SSDataBlock* doStreamScan(SOperatorInfo* pOperator) {
} else if (ret.fetchType == FETCH_TYPE__NONE) { } else if (ret.fetchType == FETCH_TYPE__NONE) {
pTaskInfo->streamInfo.lastStatus = ret.offset; pTaskInfo->streamInfo.lastStatus = ret.offset;
ASSERT(pTaskInfo->streamInfo.lastStatus.version + 1 >= pTaskInfo->streamInfo.prepareStatus.version); ASSERT(pTaskInfo->streamInfo.lastStatus.version + 1 >= pTaskInfo->streamInfo.prepareStatus.version);
qDebug("stream scan log return null");
return NULL; return NULL;
} else { } else {
ASSERT(0); ASSERT(0);
@ -1237,7 +1267,12 @@ static SSDataBlock* doStreamScan(SOperatorInfo* pOperator) {
} }
} else if (pTaskInfo->streamInfo.prepareStatus.type == TMQ_OFFSET__SNAPSHOT_DATA) { } else if (pTaskInfo->streamInfo.prepareStatus.type == TMQ_OFFSET__SNAPSHOT_DATA) {
SSDataBlock* pResult = doTableScan(pInfo->pTableScanOp); SSDataBlock* pResult = doTableScan(pInfo->pTableScanOp);
return pResult && pResult->info.rows > 0 ? pResult : NULL; if (pResult && pResult->info.rows > 0) {
qDebug("stream scan tsdb return %d rows", pResult->info.rows);
return pResult;
}
qDebug("stream scan tsdb return null");
return NULL;
} else if (pTaskInfo->streamInfo.prepareStatus.type == TMQ_OFFSET__SNAPSHOT_META) { } else if (pTaskInfo->streamInfo.prepareStatus.type == TMQ_OFFSET__SNAPSHOT_META) {
// TODO scan meta // TODO scan meta
ASSERT(0); ASSERT(0);
@ -1256,8 +1291,13 @@ static SSDataBlock* doStreamScan(SOperatorInfo* pOperator) {
int32_t current = pInfo->validBlockIndex++; int32_t current = pInfo->validBlockIndex++;
SSDataBlock* pBlock = taosArrayGetP(pInfo->pBlockLists, current); SSDataBlock* pBlock = taosArrayGetP(pInfo->pBlockLists, current);
// TODO move into scan // TODO move into scan
pBlock->info.calWin.skey = INT64_MIN;
pBlock->info.calWin.ekey = INT64_MAX;
blockDataUpdateTsWindow(pBlock, 0); blockDataUpdateTsWindow(pBlock, 0);
switch (pBlock->info.type) { switch (pBlock->info.type) {
case STREAM_NORMAL:
case STREAM_GET_ALL:
return pBlock;
case STREAM_RETRIEVE: { case STREAM_RETRIEVE: {
pInfo->blockType = STREAM_INPUT__DATA_SUBMIT; pInfo->blockType = STREAM_INPUT__DATA_SUBMIT;
pInfo->scanMode = STREAM_SCAN_FROM_DATAREADER_RETRIEVE; pInfo->scanMode = STREAM_SCAN_FROM_DATAREADER_RETRIEVE;
@ -1287,6 +1327,7 @@ static SSDataBlock* doStreamScan(SOperatorInfo* pOperator) {
} }
return pBlock; return pBlock;
} else if (pInfo->blockType == STREAM_INPUT__DATA_SUBMIT) { } else if (pInfo->blockType == STREAM_INPUT__DATA_SUBMIT) {
qDebug("scan mode %d", pInfo->scanMode);
if (pInfo->scanMode == STREAM_SCAN_FROM_RES) { if (pInfo->scanMode == STREAM_SCAN_FROM_RES) {
blockDataDestroy(pInfo->pUpdateRes); blockDataDestroy(pInfo->pUpdateRes);
pInfo->scanMode = STREAM_SCAN_FROM_READERHANDLE; pInfo->scanMode = STREAM_SCAN_FROM_READERHANDLE;
@ -1381,7 +1422,7 @@ static SSDataBlock* doStreamScan(SOperatorInfo* pOperator) {
} }
} }
} }
qDebug("scan rows: %d", pBlockInfo->rows);
return (pBlockInfo->rows == 0) ? NULL : pInfo->pRes; return (pBlockInfo->rows == 0) ? NULL : pInfo->pRes;
#if 0 #if 0
@ -1533,6 +1574,7 @@ SOperatorInfo* createStreamScanOperatorInfo(SReadHandle* pHandle, STableScanPhys
pInfo->pStreamScanOp = pOperator; pInfo->pStreamScanOp = pOperator;
pInfo->deleteDataIndex = 0; pInfo->deleteDataIndex = 0;
pInfo->pDeleteDataRes = createPullDataBlock(); pInfo->pDeleteDataRes = createPullDataBlock();
pInfo->updateWin = (STimeWindow){.skey = INT64_MAX, .ekey = INT64_MAX};
pOperator->name = "StreamScanOperator"; pOperator->name = "StreamScanOperator";
pOperator->operatorType = QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN; pOperator->operatorType = QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN;

View File

@ -419,6 +419,14 @@ static bool setTimeWindowInterpolationEndTs(SIntervalAggOperatorInfo* pInfo, SEx
return true; return true;
} }
bool inSlidingWindow(SInterval* pInterval, STimeWindow* pWin, SDataBlockInfo* pBlockInfo) {
if (pInterval->interval != pInterval->sliding &&
(pWin->ekey < pBlockInfo->calWin.skey || pWin->skey > pBlockInfo->calWin.ekey)) {
return false;
}
return true;
}
static int32_t getNextQualifiedWindow(SInterval* pInterval, STimeWindow* pNext, SDataBlockInfo* pDataBlockInfo, static int32_t getNextQualifiedWindow(SInterval* pInterval, STimeWindow* pNext, SDataBlockInfo* pDataBlockInfo,
TSKEY* primaryKeys, int32_t prevPosition, int32_t order) { TSKEY* primaryKeys, int32_t prevPosition, int32_t order) {
bool ascQuery = (order == TSDB_ORDER_ASC); bool ascQuery = (order == TSDB_ORDER_ASC);
@ -432,6 +440,10 @@ static int32_t getNextQualifiedWindow(SInterval* pInterval, STimeWindow* pNext,
return -1; return -1;
} }
if (!inSlidingWindow(pInterval, pNext, pDataBlockInfo) && order == TSDB_ORDER_ASC) {
return -1;
}
TSKEY skey = ascQuery ? pNext->skey : pNext->ekey; TSKEY skey = ascQuery ? pNext->skey : pNext->ekey;
int32_t startPos = 0; int32_t startPos = 0;
@ -801,7 +813,8 @@ static void hashIntervalAgg(SOperatorInfo* pOperatorInfo, SResultRowInfo* pResul
STimeWindow win = getActiveTimeWindow(pInfo->aggSup.pResultBuf, pResultRowInfo, ts, &pInfo->interval, pInfo->order); STimeWindow win = getActiveTimeWindow(pInfo->aggSup.pResultBuf, pResultRowInfo, ts, &pInfo->interval, pInfo->order);
int32_t ret = TSDB_CODE_SUCCESS; int32_t ret = TSDB_CODE_SUCCESS;
if (!pInfo->ignoreExpiredData || !isCloseWindow(&win, &pInfo->twAggSup)) { if ((!pInfo->ignoreExpiredData || !isCloseWindow(&win, &pInfo->twAggSup)) &&
inSlidingWindow(&pInfo->interval, &win, &pBlock->info)) {
ret = setTimeWindowOutputBuf(pResultRowInfo, &win, (scanFlag == MAIN_SCAN), &pResult, tableGroupId, pSup->pCtx, ret = setTimeWindowOutputBuf(pResultRowInfo, &win, (scanFlag == MAIN_SCAN), &pResult, tableGroupId, pSup->pCtx,
numOfOutput, pSup->rowEntryInfoOffset, &pInfo->aggSup, pTaskInfo); numOfOutput, pSup->rowEntryInfoOffset, &pInfo->aggSup, pTaskInfo);
if (ret != TSDB_CODE_SUCCESS || pResult == NULL) { if (ret != TSDB_CODE_SUCCESS || pResult == NULL) {
@ -834,7 +847,8 @@ static void hashIntervalAgg(SOperatorInfo* pOperatorInfo, SResultRowInfo* pResul
doWindowBorderInterpolation(pInfo, pBlock, pResult, &win, startPos, forwardRows, pSup); doWindowBorderInterpolation(pInfo, pBlock, pResult, &win, startPos, forwardRows, pSup);
} }
if (!pInfo->ignoreExpiredData || !isCloseWindow(&win, &pInfo->twAggSup)) { if ((!pInfo->ignoreExpiredData || !isCloseWindow(&win, &pInfo->twAggSup)) &&
inSlidingWindow(&pInfo->interval, &win, &pBlock->info)) {
updateTimeWindowInfo(&pInfo->twAggSup.timeWindowData, &win, true); updateTimeWindowInfo(&pInfo->twAggSup.timeWindowData, &win, true);
doApplyFunctions(pTaskInfo, pSup->pCtx, &win, &pInfo->twAggSup.timeWindowData, startPos, forwardRows, tsCols, doApplyFunctions(pTaskInfo, pSup->pCtx, &win, &pInfo->twAggSup.timeWindowData, startPos, forwardRows, tsCols,
pBlock->info.rows, numOfOutput, pInfo->order); pBlock->info.rows, numOfOutput, pInfo->order);
@ -1278,18 +1292,25 @@ static void doClearWindows(SAggSupporter* pAggSup, SExprSupp* pSup1, SInterval*
SColumnInfoData* pGpCol = taosArrayGet(pBlock->pDataBlock, GROUPID_COLUMN_INDEX); SColumnInfoData* pGpCol = taosArrayGet(pBlock->pDataBlock, GROUPID_COLUMN_INDEX);
pGpDatas = (uint64_t*)pGpCol->pData; pGpDatas = (uint64_t*)pGpCol->pData;
} }
int32_t step = 0; int32_t step = 0;
for (int32_t i = 0; i < pBlock->info.rows; i += step) { int32_t startPos = 0;
SResultRowInfo dumyInfo; SResultRowInfo dumyInfo;
dumyInfo.cur.pageId = -1; dumyInfo.cur.pageId = -1;
STimeWindow win = getActiveTimeWindow(NULL, &dumyInfo, tsCols[i], pInterval, TSDB_ORDER_ASC); STimeWindow win = getActiveTimeWindow(NULL, &dumyInfo, tsCols[0], pInterval, TSDB_ORDER_ASC);
step = getNumOfRowsInTimeWindow(&pBlock->info, tsCols, i, win.ekey, binarySearchForKey, NULL, TSDB_ORDER_ASC); while (1) {
uint64_t winGpId = pGpDatas ? pGpDatas[i] : pBlock->info.groupId; step =
bool res = doClearWindow(pAggSup, pSup1, (char*)&win.skey, sizeof(TKEY), winGpId, numOfOutput); getNumOfRowsInTimeWindow(&pBlock->info, tsCols, startPos, win.ekey, binarySearchForKey, NULL, TSDB_ORDER_ASC);
uint64_t winGpId = pGpDatas ? pGpDatas[startPos] : pBlock->info.groupId;
bool res = doClearWindow(pAggSup, pSup1, (char*)&win.skey, sizeof(TSKEY), winGpId, numOfOutput);
if (pUpWins && res) { if (pUpWins && res) {
SWinRes winRes = {.ts = win.skey, .groupId = winGpId}; SWinRes winRes = {.ts = win.skey, .groupId = winGpId};
taosArrayPush(pUpWins, &winRes); taosArrayPush(pUpWins, &winRes);
} }
int32_t prevEndPos = step - 1 + startPos;
startPos = getNextQualifiedWindow(pInterval, &win, &pBlock->info, tsCols, prevEndPos, TSDB_ORDER_ASC);
if (startPos < 0) {
break;
}
} }
} }
@ -1332,13 +1353,13 @@ static int32_t closeIntervalWindow(SHashObj* pHashMap, STimeWindowAggSupp* pSup,
if (chIds && pPullDataMap) { if (chIds && pPullDataMap) {
SArray* chAy = *(SArray**)chIds; SArray* chAy = *(SArray**)chIds;
int32_t size = taosArrayGetSize(chAy); int32_t size = taosArrayGetSize(chAy);
qInfo("window %" PRId64 " wait child size:%d", win.skey, size); qDebug("window %" PRId64 " wait child size:%d", win.skey, size);
for (int32_t i = 0; i < size; i++) { for (int32_t i = 0; i < size; i++) {
qInfo("window %" PRId64 " wait chid id:%d", win.skey, *(int32_t*)taosArrayGet(chAy, i)); qDebug("window %" PRId64 " wait chid id:%d", win.skey, *(int32_t*)taosArrayGet(chAy, i));
} }
continue; continue;
} else if (pPullDataMap) { } else if (pPullDataMap) {
qInfo("close window %" PRId64, win.skey); qDebug("close window %" PRId64, win.skey);
} }
SResultRowPosition* pPos = (SResultRowPosition*)pIte; SResultRowPosition* pPos = (SResultRowPosition*)pIte;
if (pSup->calTrigger == STREAM_TRIGGER_WINDOW_CLOSE) { if (pSup->calTrigger == STREAM_TRIGGER_WINDOW_CLOSE) {
@ -2434,7 +2455,7 @@ static void doHashInterval(SOperatorInfo* pOperatorInfo, SSDataBlock* pSDataBloc
} }
while (1) { while (1) {
bool isClosed = isCloseWindow(&nextWin, &pInfo->twAggSup); bool isClosed = isCloseWindow(&nextWin, &pInfo->twAggSup);
if (pInfo->ignoreExpiredData && isClosed) { if ((pInfo->ignoreExpiredData && isClosed) || !inSlidingWindow(&pInfo->interval, &nextWin, &pSDataBlock->info)) {
startPos = getNexWindowPos(&pInfo->interval, &pSDataBlock->info, tsCols, startPos, nextWin.ekey, &nextWin); startPos = getNexWindowPos(&pInfo->interval, &pSDataBlock->info, tsCols, startPos, nextWin.ekey, &nextWin);
if (startPos < 0) { if (startPos < 0) {
break; break;
@ -2491,8 +2512,8 @@ static void doHashInterval(SOperatorInfo* pOperatorInfo, SSDataBlock* pSDataBloc
if (IS_FINAL_OP(pInfo)) { if (IS_FINAL_OP(pInfo)) {
forwardRows = 1; forwardRows = 1;
} else { } else {
forwardRows = getNumOfRowsInTimeWindow(&pSDataBlock->info, tsCols, startPos, nextWin.ekey, binarySearchForKey, NULL, forwardRows = getNumOfRowsInTimeWindow(&pSDataBlock->info, tsCols, startPos, nextWin.ekey, binarySearchForKey,
TSDB_ORDER_ASC); NULL, TSDB_ORDER_ASC);
} }
if (pInfo->twAggSup.calTrigger == STREAM_TRIGGER_AT_ONCE && pUpdated) { if (pInfo->twAggSup.calTrigger == STREAM_TRIGGER_AT_ONCE && pUpdated) {
saveResultRow(pResult, tableGroupId, pUpdated); saveResultRow(pResult, tableGroupId, pUpdated);
@ -2609,6 +2630,8 @@ static SSDataBlock* doStreamFinalIntervalAgg(SOperatorInfo* pOperator) {
SExprSupp* pSup = &pOperator->exprSupp; SExprSupp* pSup = &pOperator->exprSupp;
qDebug("interval status %d %s", pOperator->status, IS_FINAL_OP(pInfo) ? "interval Final" : "interval Semi");
if (pOperator->status == OP_EXEC_DONE) { if (pOperator->status == OP_EXEC_DONE) {
return NULL; return NULL;
} else if (pOperator->status == OP_RES_TO_RETURN) { } else if (pOperator->status == OP_RES_TO_RETURN) {
@ -2659,7 +2682,7 @@ static SSDataBlock* doStreamFinalIntervalAgg(SOperatorInfo* pOperator) {
clearSpecialDataBlock(pInfo->pUpdateRes); clearSpecialDataBlock(pInfo->pUpdateRes);
removeDeleteResults(pUpdated, pInfo->pDelWins); removeDeleteResults(pUpdated, pInfo->pDelWins);
pOperator->status = OP_RES_TO_RETURN; pOperator->status = OP_RES_TO_RETURN;
qInfo("%s return data", IS_FINAL_OP(pInfo) ? "interval Final" : "interval Semi"); qDebug("%s return data", IS_FINAL_OP(pInfo) ? "interval Final" : "interval Semi");
break; break;
} }
printDataBlock(pBlock, IS_FINAL_OP(pInfo) ? "interval Final recv" : "interval Semi recv"); printDataBlock(pBlock, IS_FINAL_OP(pInfo) ? "interval Final recv" : "interval Semi recv");
@ -3101,12 +3124,7 @@ int64_t getSessionWindowEndkey(void* data, int32_t index) {
} }
bool isInTimeWindow(STimeWindow* pWin, TSKEY ts, int64_t gap) { bool isInTimeWindow(STimeWindow* pWin, TSKEY ts, int64_t gap) {
int64_t sGap = ts - pWin->skey + gap; if (ts + gap >= pWin->skey && ts - gap <= pWin->ekey) {
int64_t eGap = pWin->ekey - ts + gap;
// if ((sGap < 0 && sGap >= -gap) || (eGap < 0 && eGap >= -gap) || (sGap >= 0 && eGap >= 0)) {
// return true;
// }
if (sGap >= 0 && eGap >= 0) {
return true; return true;
} }
return false; return false;

View File

@ -2217,7 +2217,7 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = {
{ {
.name = "last_row", .name = "last_row",
.type = FUNCTION_TYPE_LAST_ROW, .type = FUNCTION_TYPE_LAST_ROW,
.classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_MULTI_RES_FUNC | FUNC_MGT_SELECT_FUNC | FUNC_MGT_TIMELINE_FUNC, .classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_MULTI_RES_FUNC | FUNC_MGT_SELECT_FUNC | FUNC_MGT_TIMELINE_FUNC | FUNC_MGT_IMPLICIT_TS_FUNC,
.translateFunc = translateFirstLast, .translateFunc = translateFirstLast,
.getEnvFunc = getFirstLastFuncEnv, .getEnvFunc = getFirstLastFuncEnv,
.initFunc = functionSetup, .initFunc = functionSetup,

View File

@ -80,8 +80,10 @@ typedef struct STopBotRes {
} STopBotRes; } STopBotRes;
typedef struct SFirstLastRes { typedef struct SFirstLastRes {
bool hasResult; bool hasResult;
bool isNull; // used for last_row function only // used for last_row function only, isNullRes in SResultRowEntry can not be passed to downstream.So,
// this attribute is required
bool isNull;
int32_t bytes; int32_t bytes;
char buf[]; char buf[];
} SFirstLastRes; } SFirstLastRes;
@ -1144,9 +1146,9 @@ static void copyTupleData(SqlFunctionCtx* pCtx, int32_t rowIndex, const SSDataBl
static int32_t findRowIndex(int32_t start, int32_t num, SColumnInfoData* pCol, const char* tval) { static int32_t findRowIndex(int32_t start, int32_t num, SColumnInfoData* pCol, const char* tval) {
// the data is loaded, not only the block SMA value // the data is loaded, not only the block SMA value
for(int32_t i = start; i < num + start; ++i) { for (int32_t i = start; i < num + start; ++i) {
char* p = colDataGetData(pCol, i); char* p = colDataGetData(pCol, i);
if (memcpy((void*)tval, p, pCol->info.bytes) == 0) { if (memcpy((void*)tval, p, pCol->info.bytes) == 0) {
return i; return i;
} }
} }
@ -1154,7 +1156,6 @@ static int32_t findRowIndex(int32_t start, int32_t num, SColumnInfoData* pCol, c
ASSERT(0); ASSERT(0);
} }
int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) { int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) {
int32_t numOfElems = 0; int32_t numOfElems = 0;
@ -1631,10 +1632,14 @@ void setNullSelectivityValue(SqlFunctionCtx* pCtx, SSDataBlock* pBlock, int32_t
} }
void setSelectivityValue(SqlFunctionCtx* pCtx, SSDataBlock* pBlock, const STuplePos* pTuplePos, int32_t rowIndex) { void setSelectivityValue(SqlFunctionCtx* pCtx, SSDataBlock* pBlock, const STuplePos* pTuplePos, int32_t rowIndex) {
if (pCtx->subsidiaries.num <= 0) {
return;
}
int32_t pageId = pTuplePos->pageId; int32_t pageId = pTuplePos->pageId;
int32_t offset = pTuplePos->offset; int32_t offset = pTuplePos->offset;
if (pTuplePos->pageId != -1 && pCtx->subsidiaries.num > 0) { if (pTuplePos->pageId != -1) {
int32_t numOfCols = pCtx->subsidiaries.num; int32_t numOfCols = pCtx->subsidiaries.num;
SFilePage* pPage = getBufPage(pCtx->pBuf, pageId); SFilePage* pPage = getBufPage(pCtx->pBuf, pageId);
@ -1934,7 +1939,7 @@ int32_t stddevFunctionMerge(SqlFunctionCtx* pCtx) {
SStddevRes* pInfo = GET_ROWCELL_INTERBUF(GET_RES_INFO(pCtx)); SStddevRes* pInfo = GET_ROWCELL_INTERBUF(GET_RES_INFO(pCtx));
for(int32_t i = pInput->startRowIndex; i < pInput->startRowIndex + pInput->numOfRows; ++i) { for (int32_t i = pInput->startRowIndex; i < pInput->startRowIndex + pInput->numOfRows; ++i) {
char* data = colDataGetData(pCol, i); char* data = colDataGetData(pCol, i);
SStddevRes* pInputInfo = (SStddevRes*)varDataVal(data); SStddevRes* pInputInfo = (SStddevRes*)varDataVal(data);
stddevTransferInfo(pInputInfo, pInfo); stddevTransferInfo(pInputInfo, pInfo);
@ -2945,7 +2950,7 @@ int32_t firstLastFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock) {
pResInfo->isNullRes = (pResInfo->numOfRes == 0) ? 1 : 0; pResInfo->isNullRes = (pResInfo->numOfRes == 0) ? 1 : 0;
SFirstLastRes* pRes = GET_ROWCELL_INTERBUF(pResInfo); SFirstLastRes* pRes = GET_ROWCELL_INTERBUF(pResInfo);
colDataAppend(pCol, pBlock->info.rows, pRes->buf, pResInfo->isNullRes); colDataAppend(pCol, pBlock->info.rows, pRes->buf, pRes->isNull||pResInfo->isNullRes);
// handle selectivity // handle selectivity
STuplePos* pTuplePos = (STuplePos*)(pRes->buf + pRes->bytes + sizeof(TSKEY)); STuplePos* pTuplePos = (STuplePos*)(pRes->buf + pRes->bytes + sizeof(TSKEY));
setSelectivityValue(pCtx, pBlock, pTuplePos, pBlock->info.rows); setSelectivityValue(pCtx, pBlock, pTuplePos, pBlock->info.rows);
@ -3508,8 +3513,7 @@ void saveTupleData(SqlFunctionCtx* pCtx, int32_t rowIndex, const SSDataBlock* pS
setBufPageDirty(pPage, true); setBufPageDirty(pPage, true);
releaseBufPage(pCtx->pBuf, pPage); releaseBufPage(pCtx->pBuf, pPage);
#ifdef BUF_PAGE_DEBUG #ifdef BUF_PAGE_DEBUG
qDebug("page_saveTuple pos:%p,pageId:%d, offset:%d\n", pPos, pPos->pageId, qDebug("page_saveTuple pos:%p,pageId:%d, offset:%d\n", pPos, pPos->pageId, pPos->offset);
pPos->offset);
#endif #endif
} }
@ -3810,7 +3814,7 @@ bool elapsedFunctionSetup(SqlFunctionCtx* pCtx, SResultRowEntryInfo* pResultInfo
SElapsedInfo* pInfo = GET_ROWCELL_INTERBUF(pResultInfo); SElapsedInfo* pInfo = GET_ROWCELL_INTERBUF(pResultInfo);
pInfo->result = 0; pInfo->result = 0;
pInfo->min = MAX_TS_KEY; pInfo->min = TSKEY_MAX;
pInfo->max = 0; pInfo->max = 0;
if (pCtx->numOfParams > 1) { if (pCtx->numOfParams > 1) {
@ -3837,7 +3841,7 @@ int32_t elapsedFunction(SqlFunctionCtx* pCtx) {
} }
if (pInput->colDataAggIsSet) { if (pInput->colDataAggIsSet) {
if (pInfo->min == MAX_TS_KEY) { if (pInfo->min == TSKEY_MAX) {
pInfo->min = GET_INT64_VAL(&pAgg->min); pInfo->min = GET_INT64_VAL(&pAgg->min);
pInfo->max = GET_INT64_VAL(&pAgg->max); pInfo->max = GET_INT64_VAL(&pAgg->max);
} else { } else {
@ -5986,24 +5990,28 @@ int32_t lastrowFunction(SqlFunctionCtx* pCtx) {
int32_t type = pInputCol->info.type; int32_t type = pInputCol->info.type;
int32_t bytes = pInputCol->info.bytes; int32_t bytes = pInputCol->info.bytes;
pInfo->bytes = bytes; pInfo->bytes = bytes;
// last_row function does not ignore the null value
for (int32_t i = pInput->numOfRows + pInput->startRowIndex - 1; i >= pInput->startRowIndex; --i) { for (int32_t i = pInput->numOfRows + pInput->startRowIndex - 1; i >= pInput->startRowIndex; --i) {
if (pInputCol->hasNull && colDataIsNull_s(pInputCol, i)) {
continue;
}
numOfElems++; numOfElems++;
char* data = colDataGetData(pInputCol, i); char* data = colDataGetData(pInputCol, i);
TSKEY cts = getRowPTs(pInput->pPTS, i); TSKEY cts = getRowPTs(pInput->pPTS, i);
if (pResInfo->numOfRes == 0 || *(TSKEY*)(pInfo->buf + bytes) < cts) { if (pResInfo->numOfRes == 0 || *(TSKEY*)(pInfo->buf + bytes) < cts) {
if (IS_VAR_DATA_TYPE(type)) {
bytes = varDataTLen(data); if (colDataIsNull_s(pInputCol, i)) {
pInfo->bytes = bytes; pInfo->isNull = true;
} else {
if (IS_VAR_DATA_TYPE(type)) {
bytes = varDataTLen(data);
pInfo->bytes = bytes;
}
memcpy(pInfo->buf, data, bytes);
} }
memcpy(pInfo->buf, data, bytes);
*(TSKEY*)(pInfo->buf + bytes) = cts; *(TSKEY*)(pInfo->buf + bytes) = cts;
pInfo->hasResult = true; pInfo->hasResult = true;

View File

@ -729,6 +729,7 @@ EDealRes sclRewriteFunction(SNode** pNode, SScalarCtx *ctx) {
if (colDataIsNull_s(output.columnData, 0)) { if (colDataIsNull_s(output.columnData, 0)) {
res->node.resType.type = TSDB_DATA_TYPE_NULL; res->node.resType.type = TSDB_DATA_TYPE_NULL;
res->node.resType.bytes = tDataTypes[TSDB_DATA_TYPE_NULL].bytes;
} else { } else {
res->node.resType.type = output.columnData->info.type; res->node.resType.type = output.columnData->info.type;
res->node.resType.bytes = output.columnData->info.bytes; res->node.resType.bytes = output.columnData->info.bytes;
@ -819,6 +820,7 @@ EDealRes sclRewriteOperator(SNode** pNode, SScalarCtx *ctx) {
if (colDataIsNull_s(output.columnData, 0)) { if (colDataIsNull_s(output.columnData, 0)) {
if(node->node.resType.type != TSDB_DATA_TYPE_JSON){ if(node->node.resType.type != TSDB_DATA_TYPE_JSON){
res->node.resType.type = TSDB_DATA_TYPE_NULL; res->node.resType.type = TSDB_DATA_TYPE_NULL;
res->node.resType.bytes = tDataTypes[TSDB_DATA_TYPE_NULL].bytes;
}else{ }else{
res->node.resType = node->node.resType; res->node.resType = node->node.resType;
res->isNull = true; res->isNull = true;

View File

@ -41,6 +41,8 @@ void schFreeTask(SSchJob *pJob, SSchTask *pTask) {
if (pTask->execNodes) { if (pTask->execNodes) {
taosHashCleanup(pTask->execNodes); taosHashCleanup(pTask->execNodes);
} }
taosMemoryFree(pTask->profile.execTime);
} }
int32_t schInitTask(SSchJob *pJob, SSchTask *pTask, SSubplan *pPlan, SSchLevel *pLevel, int32_t levelNum) { int32_t schInitTask(SSchJob *pJob, SSchTask *pTask, SSubplan *pPlan, SSchLevel *pLevel, int32_t levelNum) {

View File

@ -173,7 +173,8 @@ int32_t streamTaskEnqueueRetrieve(SStreamTask* pTask, SStreamRetrieveReq* pReq,
} }
int32_t streamProcessDispatchReq(SStreamTask* pTask, SStreamDispatchReq* pReq, SRpcMsg* pRsp) { int32_t streamProcessDispatchReq(SStreamTask* pTask, SStreamDispatchReq* pReq, SRpcMsg* pRsp) {
qInfo("task %d receive dispatch req from node %d task %d", pTask->taskId, pReq->upstreamNodeId, pReq->upstreamTaskId); qDebug("task %d receive dispatch req from node %d task %d", pTask->taskId, pReq->upstreamNodeId,
pReq->upstreamTaskId);
// 1. handle input // 1. handle input
streamTaskEnqueue(pTask, pReq, pRsp); streamTaskEnqueue(pTask, pReq, pRsp);

View File

@ -26,10 +26,12 @@ static int32_t streamTaskExecImpl(SStreamTask* pTask, void* data, SArray* pRes)
} else if (pItem->type == STREAM_INPUT__DATA_SUBMIT) { } else if (pItem->type == STREAM_INPUT__DATA_SUBMIT) {
ASSERT(pTask->isDataScan); ASSERT(pTask->isDataScan);
SStreamDataSubmit* pSubmit = (SStreamDataSubmit*)data; SStreamDataSubmit* pSubmit = (SStreamDataSubmit*)data;
qDebug("task %d %p set submit input %p %p %d", pTask->taskId, pTask, pSubmit, pSubmit->data, *pSubmit->dataRef);
qSetStreamInput(exec, pSubmit->data, STREAM_INPUT__DATA_SUBMIT, false); qSetStreamInput(exec, pSubmit->data, STREAM_INPUT__DATA_SUBMIT, false);
} else if (pItem->type == STREAM_INPUT__DATA_BLOCK || pItem->type == STREAM_INPUT__DATA_RETRIEVE) { } else if (pItem->type == STREAM_INPUT__DATA_BLOCK || pItem->type == STREAM_INPUT__DATA_RETRIEVE) {
SStreamDataBlock* pBlock = (SStreamDataBlock*)data; SStreamDataBlock* pBlock = (SStreamDataBlock*)data;
SArray* blocks = pBlock->blocks; SArray* blocks = pBlock->blocks;
qDebug("task %d %p set ssdata input", pTask->taskId, pTask);
qSetMultiStreamInput(exec, blocks->pData, blocks->size, STREAM_INPUT__DATA_BLOCK, false); qSetMultiStreamInput(exec, blocks->pData, blocks->size, STREAM_INPUT__DATA_BLOCK, false);
} else if (pItem->type == STREAM_INPUT__DROP) { } else if (pItem->type == STREAM_INPUT__DROP) {
// TODO exec drop // TODO exec drop

View File

@ -242,13 +242,13 @@ static int32_t syncIOStopInternal(SSyncIO *io) {
} }
static void *syncIOConsumerFunc(void *param) { static void *syncIOConsumerFunc(void *param) {
SSyncIO * io = param; SSyncIO *io = param;
STaosQall *qall; STaosQall *qall = taosAllocateQall();
SRpcMsg * pRpcMsg, rpcMsg; SRpcMsg *pRpcMsg, rpcMsg;
qall = taosAllocateQall(); SQueueInfo qinfo = {0};
while (1) { while (1) {
int numOfMsgs = taosReadAllQitemsFromQset(io->pQset, qall, NULL, NULL); int numOfMsgs = taosReadAllQitemsFromQset(io->pQset, qall, &qinfo);
sTrace("syncIOConsumerFunc %d msgs are received", numOfMsgs); sTrace("syncIOConsumerFunc %d msgs are received", numOfMsgs);
if (numOfMsgs <= 0) { if (numOfMsgs <= 0) {
break; break;
@ -369,6 +369,8 @@ static void *syncIOConsumerFunc(void *param) {
taosFreeQitem(pRpcMsg); taosFreeQitem(pRpcMsg);
} }
taosUpdateItemSize(qinfo.queue, numOfMsgs);
} }
taosFreeQall(qall); taosFreeQall(qall);

View File

@ -31,12 +31,12 @@ void processShellMsg() {
STaosQall *qall; STaosQall *qall;
SRpcMsg * pRpcMsg, rpcMsg; SRpcMsg * pRpcMsg, rpcMsg;
int type; int type;
void * pvnode; SQueueInfo qinfo = {0};
qall = taosAllocateQall(); qall = taosAllocateQall();
while (1) { while (1) {
int numOfMsgs = taosReadAllQitemsFromQset(qset, qall, &pvnode, NULL); int numOfMsgs = taosReadAllQitemsFromQset(qset, qall, &qinfo);
tDebug("%d shell msgs are received", numOfMsgs); tDebug("%d shell msgs are received", numOfMsgs);
if (numOfMsgs <= 0) break; if (numOfMsgs <= 0) break;
@ -86,6 +86,8 @@ void processShellMsg() {
rpcSendResponse(&nRpcMsg); rpcSendResponse(&nRpcMsg);
} }
} }
taosUpdateItemSize(qinfo.queue, numOfMsgs);
} }
taosFreeQall(qall); taosFreeQall(qall);

View File

@ -66,6 +66,7 @@ void walCloseReader(SWalReader *pRead) {
} }
int32_t walNextValidMsg(SWalReader *pRead) { int32_t walNextValidMsg(SWalReader *pRead) {
wDebug("vgId:%d wal start to fetch", pRead->pWal->cfg.vgId);
int64_t fetchVer = pRead->curVersion; int64_t fetchVer = pRead->curVersion;
int64_t endVer = pRead->cond.scanUncommited ? walGetLastVer(pRead->pWal) : walGetCommittedVer(pRead->pWal); int64_t endVer = pRead->cond.scanUncommited ? walGetLastVer(pRead->pWal) : walGetCommittedVer(pRead->pWal);
while (fetchVer <= endVer) { while (fetchVer <= endVer) {
@ -176,7 +177,7 @@ int32_t walReadSeekVerImpl(SWalReader *pRead, int64_t ver) {
return -1; return -1;
} }
wDebug("wal version reset from %ld to %ld", pRead->curVersion, ver); wDebug("wal version reset from %ld(invalid: %d) to %ld", pRead->curVersion, pRead->curInvalid, ver);
pRead->curVersion = ver; pRead->curVersion = ver;
return 0; return 0;
@ -242,6 +243,7 @@ static int32_t walFetchHeadNew(SWalReader *pRead, int64_t fetchVer) {
return -1; return -1;
} }
} }
pRead->curInvalid = 0;
return 0; return 0;
} }
@ -301,6 +303,7 @@ static int32_t walSkipFetchBodyNew(SWalReader *pRead) {
int64_t code; int64_t code;
ASSERT(pRead->curVersion == pRead->pHead->head.version); ASSERT(pRead->curVersion == pRead->pHead->head.version);
ASSERT(pRead->curInvalid == 0);
code = taosLSeekFile(pRead->pLogFile, pRead->pHead->head.bodyLen, SEEK_CUR); code = taosLSeekFile(pRead->pLogFile, pRead->pHead->head.bodyLen, SEEK_CUR);
if (code < 0) { if (code < 0) {
@ -404,6 +407,7 @@ int32_t walFetchBody(SWalReader *pRead, SWalCkHead **ppHead) {
} }
int32_t walReadVer(SWalReader *pRead, int64_t ver) { int32_t walReadVer(SWalReader *pRead, int64_t ver) {
wDebug("vgId:%d wal start to read ver %ld", pRead->pWal->cfg.vgId, ver);
int64_t contLen; int64_t contLen;
bool seeked = false; bool seeked = false;

View File

@ -115,7 +115,7 @@ bool taosQueueEmpty(STaosQueue *queue) {
bool empty = false; bool empty = false;
taosThreadMutexLock(&queue->mutex); taosThreadMutexLock(&queue->mutex);
if (queue->head == NULL && queue->tail == NULL) { if (queue->head == NULL && queue->tail == NULL && queue->numOfItems == 0 && queue->memOfItems == 0) {
empty = true; empty = true;
} }
taosThreadMutexUnlock(&queue->mutex); taosThreadMutexUnlock(&queue->mutex);
@ -123,6 +123,14 @@ bool taosQueueEmpty(STaosQueue *queue) {
return empty; return empty;
} }
void taosUpdateItemSize(STaosQueue *queue, int32_t items) {
if (queue == NULL) return;
taosThreadMutexLock(&queue->mutex);
queue->numOfItems -= items;
taosThreadMutexUnlock(&queue->mutex);
}
int32_t taosQueueItemSize(STaosQueue *queue) { int32_t taosQueueItemSize(STaosQueue *queue) {
if (queue == NULL) return 0; if (queue == NULL) return 0;
@ -257,6 +265,7 @@ int32_t taosReadAllQitems(STaosQueue *queue, STaosQall *qall) {
queue->tail = NULL; queue->tail = NULL;
queue->numOfItems = 0; queue->numOfItems = 0;
queue->memOfItems = 0; queue->memOfItems = 0;
uTrace("read %d items from queue:%p, items:%d mem:%" PRId64, code, queue, queue->numOfItems, queue->memOfItems);
if (queue->qset) atomic_sub_fetch_32(&queue->qset->numOfItems, qall->numOfItems); if (queue->qset) atomic_sub_fetch_32(&queue->qset->numOfItems, qall->numOfItems);
} }
@ -397,7 +406,7 @@ void taosRemoveFromQset(STaosQset *qset, STaosQueue *queue) {
int32_t taosGetQueueNumber(STaosQset *qset) { return qset->numOfQueues; } int32_t taosGetQueueNumber(STaosQset *qset) { return qset->numOfQueues; }
int32_t taosReadQitemFromQset(STaosQset *qset, void **ppItem, int64_t *ts, void **ahandle, FItem *itemFp) { int32_t taosReadQitemFromQset(STaosQset *qset, void **ppItem, SQueueInfo *qinfo) {
STaosQnode *pNode = NULL; STaosQnode *pNode = NULL;
int32_t code = 0; int32_t code = 0;
@ -417,17 +426,18 @@ int32_t taosReadQitemFromQset(STaosQset *qset, void **ppItem, int64_t *ts, void
if (queue->head) { if (queue->head) {
pNode = queue->head; pNode = queue->head;
*ppItem = pNode->item; *ppItem = pNode->item;
if (ahandle) *ahandle = queue->ahandle; qinfo->ahandle = queue->ahandle;
if (itemFp) *itemFp = queue->itemFp; qinfo->fp = queue->itemFp;
if (ts) *ts = pNode->timestamp; qinfo->queue = queue;
qinfo->timestamp = pNode->timestamp;
queue->head = pNode->next; queue->head = pNode->next;
if (queue->head == NULL) queue->tail = NULL; if (queue->head == NULL) queue->tail = NULL;
queue->numOfItems--; // queue->numOfItems--;
queue->memOfItems -= pNode->size; queue->memOfItems -= pNode->size;
atomic_sub_fetch_32(&qset->numOfItems, 1); atomic_sub_fetch_32(&qset->numOfItems, 1);
code = 1; code = 1;
uTrace("item:%p is read out from queue:%p, items:%d mem:%" PRId64, *ppItem, queue, queue->numOfItems, uTrace("item:%p is read out from queue:%p, items:%d mem:%" PRId64, *ppItem, queue, queue->numOfItems - 1,
queue->memOfItems); queue->memOfItems);
} }
@ -440,7 +450,7 @@ int32_t taosReadQitemFromQset(STaosQset *qset, void **ppItem, int64_t *ts, void
return code; return code;
} }
int32_t taosReadAllQitemsFromQset(STaosQset *qset, STaosQall *qall, void **ahandle, FItems *itemsFp) { int32_t taosReadAllQitemsFromQset(STaosQset *qset, STaosQall *qall, SQueueInfo *qinfo) {
STaosQueue *queue; STaosQueue *queue;
int32_t code = 0; int32_t code = 0;
@ -461,13 +471,16 @@ int32_t taosReadAllQitemsFromQset(STaosQset *qset, STaosQall *qall, void **ahand
qall->start = queue->head; qall->start = queue->head;
qall->numOfItems = queue->numOfItems; qall->numOfItems = queue->numOfItems;
code = qall->numOfItems; code = qall->numOfItems;
if (ahandle) *ahandle = queue->ahandle; qinfo->ahandle = queue->ahandle;
if (itemsFp) *itemsFp = queue->itemsFp; qinfo->fp = queue->itemsFp;
qinfo->queue = queue;
queue->head = NULL; queue->head = NULL;
queue->tail = NULL; queue->tail = NULL;
queue->numOfItems = 0; // queue->numOfItems = 0;
queue->memOfItems = 0; queue->memOfItems = 0;
uTrace("read %d items from queue:%p, items:0 mem:%" PRId64, code, queue, queue->memOfItems);
atomic_sub_fetch_32(&qset->numOfItems, qall->numOfItems); atomic_sub_fetch_32(&qset->numOfItems, qall->numOfItems);
for (int32_t j = 1; j < qall->numOfItems; ++j) { for (int32_t j = 1; j < qall->numOfItems; ++j) {
tsem_wait(&qset->sem); tsem_wait(&qset->sem);

View File

@ -70,27 +70,27 @@ void tQWorkerCleanup(SQWorkerPool *pool) {
static void *tQWorkerThreadFp(SQWorker *worker) { static void *tQWorkerThreadFp(SQWorker *worker) {
SQWorkerPool *pool = worker->pool; SQWorkerPool *pool = worker->pool;
FItem fp = NULL; SQueueInfo qinfo = {0};
void *msg = NULL;
void *msg = NULL; int32_t code = 0;
void *ahandle = NULL;
int32_t code = 0;
int64_t ts = 0;
taosBlockSIGPIPE(); taosBlockSIGPIPE();
setThreadName(pool->name); setThreadName(pool->name);
uDebug("worker:%s:%d is running", pool->name, worker->id); uDebug("worker:%s:%d is running", pool->name, worker->id);
while (1) { while (1) {
if (taosReadQitemFromQset(pool->qset, (void **)&msg, &ts, &ahandle, &fp) == 0) { if (taosReadQitemFromQset(pool->qset, (void **)&msg, &qinfo) == 0) {
uDebug("worker:%s:%d qset:%p, got no message and exiting", pool->name, worker->id, pool->qset); uDebug("worker:%s:%d qset:%p, got no message and exiting", pool->name, worker->id, pool->qset);
break; break;
} }
if (fp != NULL) { if (qinfo.fp != NULL) {
SQueueInfo info = {.ahandle = ahandle, .workerId = worker->id, .threadNum = pool->num, .timestamp = ts}; qinfo.workerId = worker->id;
(*fp)(&info, msg); qinfo.threadNum = pool->num;
(*((FItem)qinfo.fp))(&qinfo, msg);
} }
taosUpdateItemSize(qinfo.queue, 1);
} }
return NULL; return NULL;
@ -195,28 +195,28 @@ void tWWorkerCleanup(SWWorkerPool *pool) {
static void *tWWorkerThreadFp(SWWorker *worker) { static void *tWWorkerThreadFp(SWWorker *worker) {
SWWorkerPool *pool = worker->pool; SWWorkerPool *pool = worker->pool;
FItems fp = NULL; SQueueInfo qinfo = {0};
void *msg = NULL;
void *msg = NULL; int32_t code = 0;
void *ahandle = NULL; int32_t numOfMsgs = 0;
int32_t numOfMsgs = 0;
int32_t qtype = 0;
taosBlockSIGPIPE(); taosBlockSIGPIPE();
setThreadName(pool->name); setThreadName(pool->name);
uDebug("worker:%s:%d is running", pool->name, worker->id); uDebug("worker:%s:%d is running", pool->name, worker->id);
while (1) { while (1) {
numOfMsgs = taosReadAllQitemsFromQset(worker->qset, worker->qall, &ahandle, &fp); numOfMsgs = taosReadAllQitemsFromQset(worker->qset, worker->qall, &qinfo);
if (numOfMsgs == 0) { if (numOfMsgs == 0) {
uDebug("worker:%s:%d qset:%p, got no message and exiting", pool->name, worker->id, worker->qset); uDebug("worker:%s:%d qset:%p, got no message and exiting", pool->name, worker->id, worker->qset);
break; break;
} }
if (fp != NULL) { if (qinfo.fp != NULL) {
SQueueInfo info = {.ahandle = ahandle, .workerId = worker->id, .threadNum = pool->num}; qinfo.workerId = worker->id;
(*fp)(&info, worker->qall, numOfMsgs); qinfo.threadNum = pool->num;
(*((FItems)qinfo.fp))(&qinfo, worker->qall, numOfMsgs);
} }
taosUpdateItemSize(qinfo.queue, numOfMsgs);
} }
return NULL; return NULL;

View File

@ -96,11 +96,11 @@
./test.sh -f tsim/stream/basic2.sim ./test.sh -f tsim/stream/basic2.sim
./test.sh -f tsim/stream/drop_stream.sim ./test.sh -f tsim/stream/drop_stream.sim
./test.sh -f tsim/stream/distributeInterval0.sim ./test.sh -f tsim/stream/distributeInterval0.sim
# ./test.sh -f tsim/stream/distributeIntervalRetrive0.sim ./test.sh -f tsim/stream/distributeIntervalRetrive0.sim
# ./test.sh -f tsim/stream/distributesession0.sim # ./test.sh -f tsim/stream/distributesession0.sim
./test.sh -f tsim/stream/session0.sim ./test.sh -f tsim/stream/session0.sim
./test.sh -f tsim/stream/session1.sim ./test.sh -f tsim/stream/session1.sim
# ./test.sh -f tsim/stream/state0.sim ./test.sh -f tsim/stream/state0.sim
./test.sh -f tsim/stream/triggerInterval0.sim ./test.sh -f tsim/stream/triggerInterval0.sim
# ./test.sh -f tsim/stream/triggerSession0.sim # ./test.sh -f tsim/stream/triggerSession0.sim
./test.sh -f tsim/stream/partitionby.sim ./test.sh -f tsim/stream/partitionby.sim
@ -170,6 +170,7 @@
# --- valgrind # --- valgrind
./test.sh -f tsim/valgrind/checkError1.sim ./test.sh -f tsim/valgrind/checkError1.sim
./test.sh -f tsim/valgrind/checkError2.sim ./test.sh -f tsim/valgrind/checkError2.sim
./test.sh -f tsim/valgrind/checkError3.sim
# --- vnode # --- vnode
# ./test.sh -f tsim/vnode/replica3_basic.sim # ./test.sh -f tsim/vnode/replica3_basic.sim

View File

@ -89,5 +89,10 @@ endi
#TODO: MOVE IT TO NORMAL CASE #TODO: MOVE IT TO NORMAL CASE
sql_error select * from tb1 where not (null); sql_error select * from tb1 where not (null);
sql select sum(1/0) from tb1;
if $rows != 1 then
return -1
endi
system sh/exec.sh -n dnode1 -s stop -x SIGINT system sh/exec.sh -n dnode1 -s stop -x SIGINT

View File

@ -76,7 +76,7 @@ if $data01 != 5 then
goto loop1 goto loop1
endi endi
if $data02 != 14 then if $data02 != 38 then
print =====data02=$data02 print =====data02=$data02
goto loop1 goto loop1
endi endi
@ -134,7 +134,7 @@ if $data01 != 6 then
goto loop2 goto loop2
endi endi
if $data02 != 18 then if $data02 != 42 then
print =====data02=$data02 print =====data02=$data02
goto loop2 goto loop2
endi endi
@ -192,7 +192,7 @@ if $data01 != 7 then
goto loop3 goto loop3
endi endi
if $data02 != 22 then if $data02 != 46 then
print =====data02=$data02 print =====data02=$data02
goto loop3 goto loop3
endi endi
@ -232,60 +232,4 @@ endi
print loop3 over print loop3 over
$loop_count = 0
loop4:
sleep 1000
sql select * from streamtST1;
$loop_count = $loop_count + 1
if $loop_count == 10 then
return -1
endi
# row 0
if $data01 != 7 then
print =====data01=$data01
goto loop4
endi
if $data02 != 22 then
print =====data02=$data02
goto loop4
endi
# row 1
if $data11 != 3 then
print =====data11=$data11
goto loop4
endi
if $data12 != 10 then
print =====data12=$data12
goto loop4
endi
#row2
if $data21 != 3 then
print =====data21=$data21
goto loop4
endi
if $data22 != 11 then
print =====data22=$data22
goto loop4
endi
#row 3
if $data31 != 5 then
print =====data31=$data31
goto loop4
endi
if $data32 != 60 then
print =====data32=$data32
goto loop4
endi
print loop4 over
system sh/stop_dnodes.sh system sh/stop_dnodes.sh

View File

@ -16,18 +16,18 @@ class TDTestCase:
self.rowNum = 10 self.rowNum = 10
self.ts = 1640966400000 # 2022-1-1 00:00:00.000 self.ts = 1640966400000 # 2022-1-1 00:00:00.000
def check_customize_param_ms(self): def check_customize_param_ms(self):
time_zone = time.strftime('%z') time_zone = time.strftime('%z')
tdSql.execute('create database db1 precision "ms"') tdSql.execute('create database db1 precision "ms"')
tdSql.execute('use db1') tdSql.execute('use db1')
tdSql.execute('create table if not exists ntb(ts timestamp, c1 int, c2 timestamp)') tdSql.execute('create table if not exists ntb(ts timestamp, c1 int, c2 timestamp)')
for i in range(self.rowNum): for i in range(self.rowNum):
tdSql.execute("insert into ntb values(%d, %d, %d)" tdSql.execute("insert into ntb values(%d, %d, %d)"
% (self.ts + i, i + 1, self.ts + i)) % (self.ts + i, i + 1, self.ts + i))
tdSql.query('select to_iso8601(ts) from ntb') tdSql.query('select to_iso8601(ts) from ntb')
for i in range(self.rowNum): for i in range(self.rowNum):
tdSql.checkEqual(tdSql.queryResult[i][0],f'2022-01-01T00:00:00.00{i}{time_zone}') tdSql.checkEqual(tdSql.queryResult[i][0],f'2022-01-01T00:00:00.00{i}{time_zone}')
timezone_list = ['+0000','+0100','+0200','+0300','+0330','+0400','+0500','+0530','+0600','+0700','+0800','+0900','+1000','+1100','+1200',\ timezone_list = ['+0000','+0100','+0200','+0300','+0330','+0400','+0500','+0530','+0600','+0700','+0800','+0900','+1000','+1100','+1200',\
'+00','+01','+02','+03','+04','+05','+06','+07','+08','+09','+10','+11','+12',\ '+00','+01','+02','+03','+04','+05','+06','+07','+08','+09','+10','+11','+12',\
'+00:00','+01:00','+02:00','+03:00','+03:30','+04:00','+05:00','+05:30','+06:00','+07:00','+08:00','+09:00','+10:00','+11:00','+12:00',\ '+00:00','+01:00','+02:00','+03:00','+03:30','+04:00','+05:00','+05:30','+06:00','+07:00','+08:00','+09:00','+10:00','+11:00','+12:00',\
@ -39,7 +39,7 @@ class TDTestCase:
tdSql.query(f'select to_iso8601(ts,"{j}") from ntb') tdSql.query(f'select to_iso8601(ts,"{j}") from ntb')
for i in range(self.rowNum): for i in range(self.rowNum):
tdSql.checkEqual(tdSql.queryResult[i][0],f'2022-01-01T00:00:00.00{i}{j}') tdSql.checkEqual(tdSql.queryResult[i][0],f'2022-01-01T00:00:00.00{i}{j}')
error_param_list = [0,100.5,'a','!'] error_param_list = [0,100.5,'a','!']
for i in error_param_list: for i in error_param_list:
tdSql.error(f'select to_iso8601(ts,"{i}") from ntb') tdSql.error(f'select to_iso8601(ts,"{i}") from ntb')
@ -47,7 +47,7 @@ class TDTestCase:
error_timezone_param = ['+13','-13','+1300','-1300','+0001','-0001','-0330','-0530'] error_timezone_param = ['+13','-13','+1300','-1300','+0001','-0001','-0330','-0530']
for i in error_timezone_param: for i in error_timezone_param:
tdSql.error(f'select to_iso8601(ts,"{i}") from ntb') tdSql.error(f'select to_iso8601(ts,"{i}") from ntb')
def check_base_function(self): def check_base_function(self):
tdSql.prepare() tdSql.prepare()
tdLog.printNoPrefix("==========step1:create tables==========") tdLog.printNoPrefix("==========step1:create tables==========")
@ -75,12 +75,12 @@ class TDTestCase:
tdSql.query("select to_iso8601(ts) from ntb") tdSql.query("select to_iso8601(ts) from ntb")
tdSql.checkRows(3) tdSql.checkRows(3)
tdSql.query("select to_iso8601(ts) from db.ntb") tdSql.query("select to_iso8601(ts) from db.ntb")
tdSql.query("select to_iso8601(today()) from ntb") tdSql.query("select to_iso8601(today()) from ntb")
tdSql.checkRows(3) tdSql.checkRows(3)
tdSql.query("select to_iso8601(now()) from ntb") tdSql.query("select to_iso8601(now()) from ntb")
tdSql.checkRows(3) tdSql.checkRows(3)
tdSql.error("select to_iso8601(timezone()) from ntb") tdSql.error("select to_iso8601(timezone()) from ntb")
tdSql.error("select to_iso8601('abc') from ntb") tdSql.error("select to_iso8601('abc') from ntb")
@ -104,7 +104,7 @@ class TDTestCase:
for i in err_param: for i in err_param:
tdSql.error(f"select to_iso8601({i}) from ntb") tdSql.error(f"select to_iso8601({i}) from ntb")
tdSql.error(f"select to_iso8601({i}) from db.ntb") tdSql.error(f"select to_iso8601({i}) from db.ntb")
tdSql.query("select to_iso8601(now) from stb") tdSql.query("select to_iso8601(now) from stb")
tdSql.checkRows(3) tdSql.checkRows(3)
tdSql.query("select to_iso8601(now()) from stb") tdSql.query("select to_iso8601(now()) from stb")
@ -126,7 +126,7 @@ class TDTestCase:
tdSql.query(f"select to_iso8601(today()) {i}null from db.stb") tdSql.query(f"select to_iso8601(today()) {i}null from db.stb")
tdSql.checkRows(3) tdSql.checkRows(3)
tdSql.checkData(0,0,None) tdSql.checkData(0,0,None)
def run(self): # sourcery skip: extract-duplicate-method def run(self): # sourcery skip: extract-duplicate-method
self.check_base_function() self.check_base_function()
self.check_customize_param_ms() self.check_customize_param_ms()

View File

@ -58,7 +58,7 @@ class TDTestCase:
tag_sql += f"{k} {v}," tag_sql += f"{k} {v},"
create_stb_sql = f'create table {stbname} ({column_sql[:-1]}) tags({tag_sql[:-1]})' create_stb_sql = f'create table {stbname} ({column_sql[:-1]}) tags({tag_sql[:-1]})'
return create_stb_sql return create_stb_sql
def data_check(self,column_dict={},tbname = '',values_list = [],tb_num = 1,tb = 'tb',precision = 'ms'): def data_check(self,column_dict={},tbname = '',values_list = [],tb_num = 1,tb = 'tb',precision = 'ms'):
for k,v in column_dict.items(): for k,v in column_dict.items():
num_up = 0 num_up = 0
@ -175,7 +175,7 @@ class TDTestCase:
tdSql.execute('drop database db') tdSql.execute('drop database db')
def run(self): # sourcery skip: extract-duplicate-method def run(self): # sourcery skip: extract-duplicate-method
self.today_check_ntb() self.today_check_ntb()
self.today_check_stb_tb() self.today_check_stb_tb()

View File

@ -47,7 +47,7 @@ class TDTestCase:
c9 = "'nchar_val'" c9 = "'nchar_val'"
c10 = ts c10 = ts
tdSql.execute(f" insert into {tbname} values ({ts},{c1},{c2},{c3},{c4},{c5},{c6},{c7},{c8},{c9},{c10})") tdSql.execute(f" insert into {tbname} values ({ts},{c1},{c2},{c3},{c4},{c5},{c6},{c7},{c8},{c9},{c10})")
tdSql.execute("use test") tdSql.execute("use test")
tbnames = ["stb", "sub_tb_1"] tbnames = ["stb", "sub_tb_1"]
support_types = ["BIGINT", "SMALLINT", "TINYINT", "FLOAT", "DOUBLE", "INT"] support_types = ["BIGINT", "SMALLINT", "TINYINT", "FLOAT", "DOUBLE", "INT"]
@ -60,7 +60,7 @@ class TDTestCase:
origin_sql = "select {} from {} order by tbname".format(colname, tbname) origin_sql = "select {} from {} order by tbname".format(colname, tbname)
if coltype[1] in support_types: if coltype[1] in support_types:
self.check_result_auto(origin_sql , abs_sql) self.check_result_auto(origin_sql , abs_sql)
def prepare_datas(self): def prepare_datas(self):
tdSql.execute( tdSql.execute(

View File

@ -47,7 +47,7 @@ class TDTestCase:
c9 = "'nchar_val'" c9 = "'nchar_val'"
c10 = ts c10 = ts
tdSql.execute(f" insert into {tbname} values ({ts},{c1},{c2},{c3},{c4},{c5},{c6},{c7},{c8},{c9},{c10})") tdSql.execute(f" insert into {tbname} values ({ts},{c1},{c2},{c3},{c4},{c5},{c6},{c7},{c8},{c9},{c10})")
tdSql.execute("use test") tdSql.execute("use test")
tbnames = ["stb", "sub_tb_1"] tbnames = ["stb", "sub_tb_1"]
support_types = ["BIGINT", "SMALLINT", "TINYINT", "FLOAT", "DOUBLE", "INT"] support_types = ["BIGINT", "SMALLINT", "TINYINT", "FLOAT", "DOUBLE", "INT"]
@ -62,7 +62,7 @@ class TDTestCase:
cols = random.sample(colnames,3) cols = random.sample(colnames,3)
self.check_function("&",False,tbname,cols[0],cols[1],cols[2]) self.check_function("&",False,tbname,cols[0],cols[1],cols[2])
self.check_function("|",False,tbname,cols[0],cols[1],cols[2]) self.check_function("|",False,tbname,cols[0],cols[1],cols[2])
def prepare_datas(self): def prepare_datas(self):
tdSql.execute( tdSql.execute(
@ -215,14 +215,14 @@ class TDTestCase:
"abs value check pass , it work as expected ,sql is \"%s\" " % abs_query) "abs value check pass , it work as expected ,sql is \"%s\" " % abs_query)
def check_function(self, opera ,agg, tbname , *args): def check_function(self, opera ,agg, tbname , *args):
if opera =="&": if opera =="&":
pass pass
elif opera =="|": elif opera =="|":
pass pass
else: else:
pass pass
work_sql = " select " work_sql = " select "
for ind , arg in enumerate(args): for ind , arg in enumerate(args):
if ind ==len(args)-1: if ind ==len(args)-1:
work_sql += f"cast({arg} as bigint) " work_sql += f"cast({arg} as bigint) "
@ -235,7 +235,7 @@ class TDTestCase:
work_sql+= f" from {tbname} " work_sql+= f" from {tbname} "
tdSql.query(work_sql) tdSql.query(work_sql)
work_result = tdSql.queryResult work_result = tdSql.queryResult
origin_sql = " select " origin_sql = " select "
for ind , arg in enumerate(args): for ind , arg in enumerate(args):
if ind ==len(args)-1: if ind ==len(args)-1:
@ -323,7 +323,7 @@ class TDTestCase:
tdSql.checkData(0,0,None) tdSql.checkData(0,0,None)
tdSql.checkData(1,0,640) tdSql.checkData(1,0,640)
tdSql.checkData(10,0,0) tdSql.checkData(10,0,0)
# used for regular table # used for regular table
tdSql.query("select abs(c1)&c3&c3 from t1") tdSql.query("select abs(c1)&c3&c3 from t1")
tdSql.checkData(0, 0, None) tdSql.checkData(0, 0, None)
@ -349,7 +349,7 @@ class TDTestCase:
self.check_function("&",False,"stb1","c1","floor(t1)","abs(c1+c2)","t1+1") self.check_function("&",False,"stb1","c1","floor(t1)","abs(c1+c2)","t1+1")
self.check_function("&",True,"stb1","max(c1)","min(floor(t1))","sum(abs(c1+c2))","last(t1)+1") self.check_function("&",True,"stb1","max(c1)","min(floor(t1))","sum(abs(c1+c2))","last(t1)+1")
self.check_function("&",False,"stb1","abs(abs(abs(abs(abs(abs(abs(abs(abs(abs(c1))))))))))","floor(t1)","abs(c1+c2)","t1+1") self.check_function("&",False,"stb1","abs(abs(abs(abs(abs(abs(abs(abs(abs(abs(c1))))))))))","floor(t1)","abs(c1+c2)","t1+1")
# mix with common col # mix with common col
tdSql.query("select c1&abs(c1)&c2&c3 ,c1,c2, t1 from ct1") tdSql.query("select c1&abs(c1)&c2&c3 ,c1,c2, t1 from ct1")
tdSql.checkData(0, 0, 8) tdSql.checkData(0, 0, 8)
@ -388,7 +388,7 @@ class TDTestCase:
# agg functions mix with agg functions # agg functions mix with agg functions
tdSql.query("select sum(c1&abs(c1)&c2&c3) ,max(c5), count(c5) from stb1") tdSql.query("select sum(c1&abs(c1)&c2&c3) ,max(c5), count(c5) from stb1")
tdSql.query("select max(c1)&max(c2)|first(ts), count(c5) from ct1") tdSql.query("select max(c1)&max(c2)|first(ts), count(c5) from ct1")
# bug fix for compute # bug fix for compute
@ -409,7 +409,7 @@ class TDTestCase:
tdSql.checkData(1, 2, 894.900000000) tdSql.checkData(1, 2, 894.900000000)
def check_boundary_values(self): def check_boundary_values(self):
@ -490,7 +490,7 @@ class TDTestCase:
self.check_function("&", False ,"ct4","123","abs(c1)","t1","abs(t2)","abs(t3)","abs(t4)","t5") self.check_function("&", False ,"ct4","123","abs(c1)","t1","abs(t2)","abs(t3)","abs(t4)","t5")
self.check_function("&", False ,"ct4","c1+2","abs(t2+2)","t3","abs(t4)","abs(t5)","abs(c1)","t5") self.check_function("&", False ,"ct4","c1+2","abs(t2+2)","t3","abs(t4)","abs(t5)","abs(c1)","t5")
tdSql.query(" select sum(c1) from stb1 where t1+10 >1; ") tdSql.query(" select sum(c1) from stb1 where t1+10 >1; ")
tdSql.query("select c1 ,t1 from stb1 where t1 =0 ") tdSql.query("select c1 ,t1 from stb1 where t1 =0 ")
tdSql.checkRows(13) tdSql.checkRows(13)
self.check_function("&", False ,"t1","c1+2","abs(c2)") self.check_function("&", False ,"t1","c1+2","abs(c2)")
@ -534,7 +534,7 @@ class TDTestCase:
self.support_super_table_test() self.support_super_table_test()
self.insert_datas_and_check_abs(self.tb_nums,self.row_nums,self.time_step) self.insert_datas_and_check_abs(self.tb_nums,self.row_nums,self.time_step)
def stop(self): def stop(self):
tdSql.close() tdSql.close()

View File

@ -44,7 +44,7 @@ class TDTestCase:
'col12': f'binary({self.binary_length})', 'col12': f'binary({self.binary_length})',
'col13': f'nchar({self.nchar_length})' 'col13': f'nchar({self.nchar_length})'
} }
self.tag_dict = { self.tag_dict = {
'ts_tag' : 'timestamp', 'ts_tag' : 'timestamp',
't1': 'tinyint', 't1': 'tinyint',
@ -79,9 +79,9 @@ class TDTestCase:
self.tag_values = [ self.tag_values = [
f'{self.tag_ts},{self.tag_tinyint},{self.tag_smallint},{self.tag_int},{self.tag_bigint},\ f'{self.tag_ts},{self.tag_tinyint},{self.tag_smallint},{self.tag_int},{self.tag_bigint},\
{self.tag_utint},{self.tag_usint},{self.tag_uint},{self.tag_ubint},{self.tag_float},{self.tag_double},{self.tag_bool},"{self.binary_str}","{self.nchar_str}"' {self.tag_utint},{self.tag_usint},{self.tag_uint},{self.tag_ubint},{self.tag_float},{self.tag_double},{self.tag_bool},"{self.binary_str}","{self.nchar_str}"'
] ]
self.percent = [1,50,100] self.percent = [1,50,100]
self.param_list = ['default','t-digest'] self.param_list = ['default','t-digest']
def insert_data(self,column_dict,tbname,row_num): def insert_data(self,column_dict,tbname,row_num):
@ -90,7 +90,7 @@ class TDTestCase:
insert_list = [] insert_list = []
self.setsql.insert_values(column_dict,i,insert_sql,insert_list,self.ts) self.setsql.insert_values(column_dict,i,insert_sql,insert_list,self.ts)
def function_check_ntb(self): def function_check_ntb(self):
tdSql.prepare() tdSql.prepare()
tdSql.execute(self.setsql.set_create_normaltable_sql(self.ntbname,self.column_dict)) tdSql.execute(self.setsql.set_create_normaltable_sql(self.ntbname,self.column_dict))
@ -126,7 +126,7 @@ class TDTestCase:
def run(self): def run(self):
self.function_check_ntb() self.function_check_ntb()
self.function_check_stb() self.function_check_stb()
def stop(self): def stop(self):
tdSql.close() tdSql.close()
tdLog.success("%s successfully executed" % __file__) tdLog.success("%s successfully executed" % __file__)

View File

@ -48,7 +48,7 @@ class TDTestCase:
'col12': 'binary(20)', 'col12': 'binary(20)',
'col13': 'nchar(20)' 'col13': 'nchar(20)'
} }
self.param_list = [1,100] self.param_list = [1,100]
def insert_data(self,column_dict,tbname,row_num): def insert_data(self,column_dict,tbname,row_num):
insert_sql = self.setsql.set_insertsql(column_dict,tbname,self.binary_str,self.nchar_str) insert_sql = self.setsql.set_insertsql(column_dict,tbname,self.binary_str,self.nchar_str)
@ -125,11 +125,11 @@ class TDTestCase:
self.bottom_check_data(f'{stbname}_{i}','child_table') self.bottom_check_data(f'{stbname}_{i}','child_table')
self.bottom_check_data(f'{stbname}','stable') self.bottom_check_data(f'{stbname}','stable')
tdSql.execute(f'drop database {self.dbname}') tdSql.execute(f'drop database {self.dbname}')
def run(self): def run(self):
self.bottom_check_ntb() self.bottom_check_ntb()
self.bottom_check_stb() self.bottom_check_stb()
def stop(self): def stop(self):
tdSql.close() tdSql.close()
tdLog.success("%s successfully executed" % __file__) tdLog.success("%s successfully executed" % __file__)

View File

@ -9,14 +9,14 @@ from util.sql import *
from util.cases import * from util.cases import *
class TDTestCase: class TDTestCase:
updatecfgDict = {'debugFlag': 143 ,"cDebugFlag":143,"uDebugFlag":143 ,"rpcDebugFlag":143 , "tmrDebugFlag":143 , updatecfgDict = {'debugFlag': 143 ,"cDebugFlag":143,"uDebugFlag":143 ,"rpcDebugFlag":143 , "tmrDebugFlag":143 ,
"jniDebugFlag":143 ,"simDebugFlag":143,"dDebugFlag":143, "dDebugFlag":143,"vDebugFlag":143,"mDebugFlag":143,"qDebugFlag":143, "jniDebugFlag":143 ,"simDebugFlag":143,"dDebugFlag":143, "dDebugFlag":143,"vDebugFlag":143,"mDebugFlag":143,"qDebugFlag":143,
"wDebugFlag":143,"sDebugFlag":143,"tsdbDebugFlag":143,"tqDebugFlag":143 ,"fsDebugFlag":143 ,"udfDebugFlag":143} "wDebugFlag":143,"sDebugFlag":143,"tsdbDebugFlag":143,"tqDebugFlag":143 ,"fsDebugFlag":143 ,"udfDebugFlag":143}
def init(self, conn, logSql): def init(self, conn, logSql):
tdLog.debug(f"start to excute {__file__}") tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor()) tdSql.init(conn.cursor())
def prepare_datas(self): def prepare_datas(self):
tdSql.execute( tdSql.execute(
'''create table stb1 '''create table stb1
@ -24,7 +24,7 @@ class TDTestCase:
tags (t1 int) tags (t1 int)
''' '''
) )
tdSql.execute( tdSql.execute(
''' '''
create table t1 create table t1
@ -66,14 +66,14 @@ class TDTestCase:
( '2023-02-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ( '2023-02-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
''' '''
) )
def check_result_auto(self ,origin_query , ceil_query): def check_result_auto(self ,origin_query , ceil_query):
pass pass
ceil_result = tdSql.getResult(ceil_query) ceil_result = tdSql.getResult(ceil_query)
origin_result = tdSql.getResult(origin_query) origin_result = tdSql.getResult(origin_query)
auto_result =[] auto_result =[]
for row in origin_result: for row in origin_result:
row_check = [] row_check = []
for elem in row: for elem in row:
@ -88,13 +88,13 @@ class TDTestCase:
for row_index , row in enumerate(ceil_result): for row_index , row in enumerate(ceil_result):
for col_index , elem in enumerate(row): for col_index , elem in enumerate(row):
if auto_result[row_index][col_index] != elem: if auto_result[row_index][col_index] != elem:
check_status = False check_status = False
if not check_status: if not check_status:
tdLog.notice("ceil function value has not as expected , sql is \"%s\" "%ceil_query ) tdLog.notice("ceil function value has not as expected , sql is \"%s\" "%ceil_query )
sys.exit(1) sys.exit(1)
else: else:
tdLog.info("ceil value check pass , it work as expected ,sql is \"%s\" "%ceil_query ) tdLog.info("ceil value check pass , it work as expected ,sql is \"%s\" "%ceil_query )
def test_errors(self): def test_errors(self):
error_sql_lists = [ error_sql_lists = [
"select ceil from t1", "select ceil from t1",
@ -128,42 +128,42 @@ class TDTestCase:
] ]
for error_sql in error_sql_lists: for error_sql in error_sql_lists:
tdSql.error(error_sql) tdSql.error(error_sql)
def support_types(self): def support_types(self):
type_error_sql_lists = [ type_error_sql_lists = [
"select ceil(ts) from t1" , "select ceil(ts) from t1" ,
"select ceil(c7) from t1", "select ceil(c7) from t1",
"select ceil(c8) from t1", "select ceil(c8) from t1",
"select ceil(c9) from t1", "select ceil(c9) from t1",
"select ceil(ts) from ct1" , "select ceil(ts) from ct1" ,
"select ceil(c7) from ct1", "select ceil(c7) from ct1",
"select ceil(c8) from ct1", "select ceil(c8) from ct1",
"select ceil(c9) from ct1", "select ceil(c9) from ct1",
"select ceil(ts) from ct3" , "select ceil(ts) from ct3" ,
"select ceil(c7) from ct3", "select ceil(c7) from ct3",
"select ceil(c8) from ct3", "select ceil(c8) from ct3",
"select ceil(c9) from ct3", "select ceil(c9) from ct3",
"select ceil(ts) from ct4" , "select ceil(ts) from ct4" ,
"select ceil(c7) from ct4", "select ceil(c7) from ct4",
"select ceil(c8) from ct4", "select ceil(c8) from ct4",
"select ceil(c9) from ct4", "select ceil(c9) from ct4",
"select ceil(ts) from stb1" , "select ceil(ts) from stb1" ,
"select ceil(c7) from stb1", "select ceil(c7) from stb1",
"select ceil(c8) from stb1", "select ceil(c8) from stb1",
"select ceil(c9) from stb1" , "select ceil(c9) from stb1" ,
"select ceil(ts) from stbbb1" , "select ceil(ts) from stbbb1" ,
"select ceil(c7) from stbbb1", "select ceil(c7) from stbbb1",
"select ceil(ts) from tbname", "select ceil(ts) from tbname",
"select ceil(c9) from tbname" "select ceil(c9) from tbname"
] ]
for type_sql in type_error_sql_lists: for type_sql in type_error_sql_lists:
tdSql.error(type_sql) tdSql.error(type_sql)
type_sql_lists = [ type_sql_lists = [
"select ceil(c1) from t1", "select ceil(c1) from t1",
"select ceil(c2) from t1", "select ceil(c2) from t1",
@ -193,16 +193,16 @@ class TDTestCase:
"select ceil(c5) from stb1", "select ceil(c5) from stb1",
"select ceil(c6) from stb1", "select ceil(c6) from stb1",
"select ceil(c6) as alisb from stb1", "select ceil(c6) as alisb from stb1",
"select ceil(c6) alisb from stb1", "select ceil(c6) alisb from stb1",
] ]
for type_sql in type_sql_lists: for type_sql in type_sql_lists:
tdSql.query(type_sql) tdSql.query(type_sql)
def basic_ceil_function(self): def basic_ceil_function(self):
# basic query # basic query
tdSql.query("select c1 from ct3") tdSql.query("select c1 from ct3")
tdSql.checkRows(0) tdSql.checkRows(0)
tdSql.query("select c1 from t1") tdSql.query("select c1 from t1")
@ -222,7 +222,7 @@ class TDTestCase:
tdSql.query("select ceil(c5) from ct3") tdSql.query("select ceil(c5) from ct3")
tdSql.checkRows(0) tdSql.checkRows(0)
tdSql.query("select ceil(c6) from ct3") tdSql.query("select ceil(c6) from ct3")
# used for regular table # used for regular table
tdSql.query("select ceil(c1) from t1") tdSql.query("select ceil(c1) from t1")
tdSql.checkData(0, 0, None) tdSql.checkData(0, 0, None)
@ -240,7 +240,7 @@ class TDTestCase:
tdSql.checkData(5, 5, None) tdSql.checkData(5, 5, None)
self.check_result_auto( "select c1, c2, c3 , c4, c5 from t1", "select (c1), ceil(c2) ,ceil(c3), ceil(c4), ceil(c5) from t1") self.check_result_auto( "select c1, c2, c3 , c4, c5 from t1", "select (c1), ceil(c2) ,ceil(c3), ceil(c4), ceil(c5) from t1")
# used for sub table # used for sub table
tdSql.query("select ceil(c1) from ct1") tdSql.query("select ceil(c1) from ct1")
tdSql.checkData(0, 0, 8) tdSql.checkData(0, 0, 8)
@ -252,20 +252,20 @@ class TDTestCase:
self.check_result_auto( "select c1, c2, c3 , c4, c5 from ct1", "select (c1), ceil(c2) ,ceil(c3), ceil(c4), ceil(c5) from ct1") self.check_result_auto( "select c1, c2, c3 , c4, c5 from ct1", "select (c1), ceil(c2) ,ceil(c3), ceil(c4), ceil(c5) from ct1")
self.check_result_auto("select ceil(ceil(ceil(ceil(ceil(ceil(ceil(ceil(ceil(ceil(c1)))))))))) nest_col_func from ct1;","select c1 from ct1" ) self.check_result_auto("select ceil(ceil(ceil(ceil(ceil(ceil(ceil(ceil(ceil(ceil(c1)))))))))) nest_col_func from ct1;","select c1 from ct1" )
# used for stable table # used for stable table
tdSql.query("select ceil(c1) from stb1") tdSql.query("select ceil(c1) from stb1")
tdSql.checkRows(25) tdSql.checkRows(25)
self.check_result_auto( "select c1, c2, c3 , c4, c5 from ct4 ", "select (c1), ceil(c2) ,ceil(c3), ceil(c4), ceil(c5) from ct4") self.check_result_auto( "select c1, c2, c3 , c4, c5 from ct4 ", "select (c1), ceil(c2) ,ceil(c3), ceil(c4), ceil(c5) from ct4")
self.check_result_auto("select ceil(ceil(ceil(ceil(ceil(ceil(ceil(ceil(ceil(ceil(c1)))))))))) nest_col_func from ct4;" , "select c1 from ct4" ) self.check_result_auto("select ceil(ceil(ceil(ceil(ceil(ceil(ceil(ceil(ceil(ceil(c1)))))))))) nest_col_func from ct4;" , "select c1 from ct4" )
# used for not exists table # used for not exists table
tdSql.error("select ceil(c1) from stbbb1") tdSql.error("select ceil(c1) from stbbb1")
tdSql.error("select ceil(c1) from tbname") tdSql.error("select ceil(c1) from tbname")
tdSql.error("select ceil(c1) from ct5") tdSql.error("select ceil(c1) from ct5")
# mix with common col # mix with common col
tdSql.query("select c1, ceil(c1) from ct1") tdSql.query("select c1, ceil(c1) from ct1")
tdSql.checkData(0 , 0 ,8) tdSql.checkData(0 , 0 ,8)
tdSql.checkData(0 , 1 ,8) tdSql.checkData(0 , 1 ,8)
@ -290,7 +290,7 @@ class TDTestCase:
tdSql.checkData(0 , 1 ,None) tdSql.checkData(0 , 1 ,None)
tdSql.checkData(0 , 2 ,None) tdSql.checkData(0 , 2 ,None)
tdSql.checkData(0 , 3 ,None) tdSql.checkData(0 , 3 ,None)
tdSql.checkData(3 , 0 , 6) tdSql.checkData(3 , 0 , 6)
tdSql.checkData(3 , 1 , 6) tdSql.checkData(3 , 1 , 6)
tdSql.checkData(3 , 2 ,6.66000) tdSql.checkData(3 , 2 ,6.66000)
@ -311,7 +311,7 @@ class TDTestCase:
tdSql.query("select max(c5), count(c5) from stb1") tdSql.query("select max(c5), count(c5) from stb1")
tdSql.query("select max(c5), count(c5) from ct1") tdSql.query("select max(c5), count(c5) from ct1")
# bug fix for count # bug fix for count
tdSql.query("select count(c1) from ct4 ") tdSql.query("select count(c1) from ct4 ")
tdSql.checkData(0,0,9) tdSql.checkData(0,0,9)
@ -322,7 +322,7 @@ class TDTestCase:
tdSql.query("select count(*) from stb1 ") tdSql.query("select count(*) from stb1 ")
tdSql.checkData(0,0,25) tdSql.checkData(0,0,25)
# bug fix for compute # bug fix for compute
tdSql.query("select c1, abs(c1) -0 ,ceil(c1)-0 from ct4 ") tdSql.query("select c1, abs(c1) -0 ,ceil(c1)-0 from ct4 ")
tdSql.checkData(0, 0, None) tdSql.checkData(0, 0, None)
tdSql.checkData(0, 1, None) tdSql.checkData(0, 1, None)
@ -373,10 +373,10 @@ class TDTestCase:
tdSql.checkData(0,3,8.000000000) tdSql.checkData(0,3,8.000000000)
tdSql.checkData(0,4,7.900000000) tdSql.checkData(0,4,7.900000000)
tdSql.checkData(0,5,3.000000000) tdSql.checkData(0,5,3.000000000)
def ceil_Arithmetic(self): def ceil_Arithmetic(self):
pass pass
def check_boundary_values(self): def check_boundary_values(self):
tdSql.execute("drop database if exists bound_test") tdSql.execute("drop database if exists bound_test")
@ -405,14 +405,14 @@ class TDTestCase:
tdSql.execute( tdSql.execute(
f"insert into sub1_bound values ( now(), -2147483643, -9223372036854775803, -32763, -123, -3.39E+38, -1.69e+308, True, 'binary_tb1', 'nchar_tb1', now() )" f"insert into sub1_bound values ( now(), -2147483643, -9223372036854775803, -32763, -123, -3.39E+38, -1.69e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
) )
tdSql.error( tdSql.error(
f"insert into sub1_bound values ( now()+1s, 2147483648, 9223372036854775808, 32768, 128, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )" f"insert into sub1_bound values ( now()+1s, 2147483648, 9223372036854775808, 32768, 128, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
) )
self.check_result_auto( "select c1, c2, c3 , c4, c5 ,c6 from sub1_bound ", "select ceil(c1), ceil(c2) ,ceil(c3), ceil(c4), ceil(c5) ,ceil(c6) from sub1_bound") self.check_result_auto( "select c1, c2, c3 , c4, c5 ,c6 from sub1_bound ", "select ceil(c1), ceil(c2) ,ceil(c3), ceil(c4), ceil(c5) ,ceil(c6) from sub1_bound")
self.check_result_auto( "select c1, c2, c3 , c3, c2 ,c1 from sub1_bound ", "select ceil(c1), ceil(c2) ,ceil(c3), ceil(c3), ceil(c2) ,ceil(c1) from sub1_bound") self.check_result_auto( "select c1, c2, c3 , c3, c2 ,c1 from sub1_bound ", "select ceil(c1), ceil(c2) ,ceil(c3), ceil(c3), ceil(c2) ,ceil(c1) from sub1_bound")
self.check_result_auto("select ceil(ceil(ceil(ceil(ceil(ceil(ceil(ceil(ceil(ceil(c1)))))))))) nest_col_func from sub1_bound;" , "select ceil(c1) from sub1_bound" ) self.check_result_auto("select ceil(ceil(ceil(ceil(ceil(ceil(ceil(ceil(ceil(ceil(c1)))))))))) nest_col_func from sub1_bound;" , "select ceil(c1) from sub1_bound" )
# check basic elem for table per row # check basic elem for table per row
tdSql.query("select ceil(c1+0.2) ,ceil(c2) , ceil(c3+0.3) , ceil(c4-0.3), ceil(c5/2), ceil(c6/2) from sub1_bound ") tdSql.query("select ceil(c1+0.2) ,ceil(c2) , ceil(c3+0.3) , ceil(c4-0.3), ceil(c5/2), ceil(c6/2) from sub1_bound ")
tdSql.checkData(0, 0, 2147483648.000000000) tdSql.checkData(0, 0, 2147483648.000000000)
@ -426,7 +426,7 @@ class TDTestCase:
tdSql.checkData(4, 4, -169499995645668991474575059260979281920.000000000) tdSql.checkData(4, 4, -169499995645668991474575059260979281920.000000000)
self.check_result_auto("select c1+1 ,c2 , c3*1 , c4/2, c5/2, c6 from sub1_bound" ,"select ceil(c1+1) ,ceil(c2) , ceil(c3*1) , ceil(c4/2), ceil(c5)/2, ceil(c6) from sub1_bound ") self.check_result_auto("select c1+1 ,c2 , c3*1 , c4/2, c5/2, c6 from sub1_bound" ,"select ceil(c1+1) ,ceil(c2) , ceil(c3*1) , ceil(c4/2), ceil(c5)/2, ceil(c6) from sub1_bound ")
def support_super_table_test(self): def support_super_table_test(self):
tdSql.execute(" use db ") tdSql.execute(" use db ")
self.check_result_auto( " select c5 from stb1 order by ts " , "select ceil(c5) from stb1 order by ts" ) self.check_result_auto( " select c5 from stb1 order by ts " , "select ceil(c5) from stb1 order by ts" )
@ -444,26 +444,26 @@ class TDTestCase:
tdSql.prepare() tdSql.prepare()
tdLog.printNoPrefix("==========step1:create table ==============") tdLog.printNoPrefix("==========step1:create table ==============")
self.prepare_datas() self.prepare_datas()
tdLog.printNoPrefix("==========step2:test errors ==============") tdLog.printNoPrefix("==========step2:test errors ==============")
self.test_errors() self.test_errors()
tdLog.printNoPrefix("==========step3:support types ============") tdLog.printNoPrefix("==========step3:support types ============")
self.support_types() self.support_types()
tdLog.printNoPrefix("==========step4: ceil basic query ============") tdLog.printNoPrefix("==========step4: ceil basic query ============")
self.basic_ceil_function() self.basic_ceil_function()
tdLog.printNoPrefix("==========step5: ceil boundary query ============") tdLog.printNoPrefix("==========step5: ceil boundary query ============")
self.check_boundary_values() self.check_boundary_values()
tdLog.printNoPrefix("==========step6: ceil filter query ============") tdLog.printNoPrefix("==========step6: ceil filter query ============")
self.abs_func_filter() self.abs_func_filter()

View File

@ -9,13 +9,13 @@ from util.cases import *
class TDTestCase: class TDTestCase:
updatecfgDict = {'debugFlag': 143 ,"cDebugFlag":143,"uDebugFlag":143 ,"rpcDebugFlag":143 , "tmrDebugFlag":143 , updatecfgDict = {'debugFlag': 143 ,"cDebugFlag":143,"uDebugFlag":143 ,"rpcDebugFlag":143 , "tmrDebugFlag":143 ,
"jniDebugFlag":143 ,"simDebugFlag":143,"dDebugFlag":143, "dDebugFlag":143,"vDebugFlag":143,"mDebugFlag":143,"qDebugFlag":143, "jniDebugFlag":143 ,"simDebugFlag":143,"dDebugFlag":143, "dDebugFlag":143,"vDebugFlag":143,"mDebugFlag":143,"qDebugFlag":143,
"wDebugFlag":143,"sDebugFlag":143,"tsdbDebugFlag":143,"tqDebugFlag":143 ,"fsDebugFlag":143 ,"udfDebugFlag":143} "wDebugFlag":143,"sDebugFlag":143,"tsdbDebugFlag":143,"tqDebugFlag":143 ,"fsDebugFlag":143 ,"udfDebugFlag":143}
def init(self, conn, powSql): def init(self, conn, powSql):
tdLog.debug(f"start to excute {__file__}") tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor()) tdSql.init(conn.cursor())
def prepare_datas(self): def prepare_datas(self):
tdSql.execute( tdSql.execute(
'''create table stb1 '''create table stb1
@ -23,7 +23,7 @@ class TDTestCase:
tags (t1 int) tags (t1 int)
''' '''
) )
tdSql.execute( tdSql.execute(
''' '''
create table t1 create table t1
@ -65,14 +65,14 @@ class TDTestCase:
( '2023-02-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ( '2023-02-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
''' '''
) )
def check_result_auto_cos(self ,origin_query , pow_query): def check_result_auto_cos(self ,origin_query , pow_query):
pow_result = tdSql.getResult(pow_query) pow_result = tdSql.getResult(pow_query)
origin_result = tdSql.getResult(origin_query) origin_result = tdSql.getResult(origin_query)
auto_result =[] auto_result =[]
for row in origin_result: for row in origin_result:
row_check = [] row_check = []
for elem in row: for elem in row:
@ -90,7 +90,7 @@ class TDTestCase:
if auto_result[row_index][col_index] == None and not (auto_result[row_index][col_index] == None and elem == None): if auto_result[row_index][col_index] == None and not (auto_result[row_index][col_index] == None and elem == None):
check_status = False check_status = False
elif auto_result[row_index][col_index] != None and (auto_result[row_index][col_index] - elem > 0.00000001): elif auto_result[row_index][col_index] != None and (auto_result[row_index][col_index] - elem > 0.00000001):
check_status = False check_status = False
else: else:
pass pass
if not check_status: if not check_status:
@ -98,7 +98,7 @@ class TDTestCase:
sys.exit(1) sys.exit(1)
else: else:
tdLog.info("cos value check pass , it work as expected ,sql is \"%s\" "%pow_query ) tdLog.info("cos value check pass , it work as expected ,sql is \"%s\" "%pow_query )
def test_errors(self): def test_errors(self):
error_sql_lists = [ error_sql_lists = [
"select cos from t1", "select cos from t1",
@ -132,42 +132,42 @@ class TDTestCase:
] ]
for error_sql in error_sql_lists: for error_sql in error_sql_lists:
tdSql.error(error_sql) tdSql.error(error_sql)
def support_types(self): def support_types(self):
type_error_sql_lists = [ type_error_sql_lists = [
"select cos(ts) from t1" , "select cos(ts) from t1" ,
"select cos(c7) from t1", "select cos(c7) from t1",
"select cos(c8) from t1", "select cos(c8) from t1",
"select cos(c9) from t1", "select cos(c9) from t1",
"select cos(ts) from ct1" , "select cos(ts) from ct1" ,
"select cos(c7) from ct1", "select cos(c7) from ct1",
"select cos(c8) from ct1", "select cos(c8) from ct1",
"select cos(c9) from ct1", "select cos(c9) from ct1",
"select cos(ts) from ct3" , "select cos(ts) from ct3" ,
"select cos(c7) from ct3", "select cos(c7) from ct3",
"select cos(c8) from ct3", "select cos(c8) from ct3",
"select cos(c9) from ct3", "select cos(c9) from ct3",
"select cos(ts) from ct4" , "select cos(ts) from ct4" ,
"select cos(c7) from ct4", "select cos(c7) from ct4",
"select cos(c8) from ct4", "select cos(c8) from ct4",
"select cos(c9) from ct4", "select cos(c9) from ct4",
"select cos(ts) from stb1" , "select cos(ts) from stb1" ,
"select cos(c7) from stb1", "select cos(c7) from stb1",
"select cos(c8) from stb1", "select cos(c8) from stb1",
"select cos(c9) from stb1" , "select cos(c9) from stb1" ,
"select cos(ts) from stbbb1" , "select cos(ts) from stbbb1" ,
"select cos(c7) from stbbb1", "select cos(c7) from stbbb1",
"select cos(ts) from tbname", "select cos(ts) from tbname",
"select cos(c9) from tbname" "select cos(c9) from tbname"
] ]
for type_sql in type_error_sql_lists: for type_sql in type_error_sql_lists:
tdSql.error(type_sql) tdSql.error(type_sql)
type_sql_lists = [ type_sql_lists = [
"select cos(c1) from t1", "select cos(c1) from t1",
"select cos(c2) from t1", "select cos(c2) from t1",
@ -197,16 +197,16 @@ class TDTestCase:
"select cos(c5) from stb1", "select cos(c5) from stb1",
"select cos(c6) from stb1", "select cos(c6) from stb1",
"select cos(c6) as alisb from stb1", "select cos(c6) as alisb from stb1",
"select cos(c6) alisb from stb1", "select cos(c6) alisb from stb1",
] ]
for type_sql in type_sql_lists: for type_sql in type_sql_lists:
tdSql.query(type_sql) tdSql.query(type_sql)
def basic_cosin_function(self): def basic_cosin_function(self):
# basic query # basic query
tdSql.query("select c1 from ct3") tdSql.query("select c1 from ct3")
tdSql.checkRows(0) tdSql.checkRows(0)
tdSql.query("select c1 from t1") tdSql.query("select c1 from t1")
@ -247,7 +247,7 @@ class TDTestCase:
tdSql.checkData(5, 5, None) tdSql.checkData(5, 5, None)
self.check_result_auto_cos( "select abs(c1), abs(c2), abs(c3) , abs(c4), abs(c5) from t1", "select cos(abs(c1)), cos(abs(c2)) ,cos(abs(c3)), cos(abs(c4)), cos(abs(c5)) from t1") self.check_result_auto_cos( "select abs(c1), abs(c2), abs(c3) , abs(c4), abs(c5) from t1", "select cos(abs(c1)), cos(abs(c2)) ,cos(abs(c3)), cos(abs(c4)), cos(abs(c5)) from t1")
# used for sub table # used for sub table
tdSql.query("select c2 ,cos(c2) from ct1") tdSql.query("select c2 ,cos(c2) from ct1")
tdSql.checkData(0, 1, 0.975339851) tdSql.checkData(0, 1, 0.975339851)
@ -263,7 +263,7 @@ class TDTestCase:
tdSql.checkData(5 , 2, None) tdSql.checkData(5 , 2, None)
self.check_result_auto_cos( "select c1, c2, c3 , c4, c5 from ct1", "select cos(c1), cos(c2) ,cos(c3), cos(c4), cos(c5) from ct1") self.check_result_auto_cos( "select c1, c2, c3 , c4, c5 from ct1", "select cos(c1), cos(c2) ,cos(c3), cos(c4), cos(c5) from ct1")
# nest query for cos functions # nest query for cos functions
tdSql.query("select c4 , cos(c4) ,cos(cos(c4)) , cos(cos(cos(c4))) from ct1;") tdSql.query("select c4 , cos(c4) ,cos(cos(c4)) , cos(cos(cos(c4))) from ct1;")
tdSql.checkData(0 , 0 , 88) tdSql.checkData(0 , 0 , 88)
@ -281,21 +281,21 @@ class TDTestCase:
tdSql.checkData(11 , 2 , 0.999207254) tdSql.checkData(11 , 2 , 0.999207254)
tdSql.checkData(11 , 3 , 0.540969209) tdSql.checkData(11 , 3 , 0.540969209)
# used for stable table # used for stable table
tdSql.query("select cos(c1) from stb1") tdSql.query("select cos(c1) from stb1")
tdSql.checkRows(25) tdSql.checkRows(25)
# used for not exists table # used for not exists table
tdSql.error("select cos(c1) from stbbb1") tdSql.error("select cos(c1) from stbbb1")
tdSql.error("select cos(c1) from tbname") tdSql.error("select cos(c1) from tbname")
tdSql.error("select cos(c1) from ct5") tdSql.error("select cos(c1) from ct5")
# mix with common col # mix with common col
tdSql.query("select c1, cos(c1) from ct1") tdSql.query("select c1, cos(c1) from ct1")
tdSql.query("select c2, cos(c2) from ct4") tdSql.query("select c2, cos(c2) from ct4")
# mix with common functions # mix with common functions
tdSql.query("select c1, cos(c1),cos(c1), cos(cos(c1)) from ct4 ") tdSql.query("select c1, cos(c1),cos(c1), cos(cos(c1)) from ct4 ")
@ -303,7 +303,7 @@ class TDTestCase:
tdSql.checkData(0 , 1 ,None) tdSql.checkData(0 , 1 ,None)
tdSql.checkData(0 , 2 ,None) tdSql.checkData(0 , 2 ,None)
tdSql.checkData(0 , 3 ,None) tdSql.checkData(0 , 3 ,None)
tdSql.checkData(3 , 0 , 6) tdSql.checkData(3 , 0 , 6)
tdSql.checkData(3 , 1 ,0.960170287) tdSql.checkData(3 , 1 ,0.960170287)
tdSql.checkData(3 , 2 ,0.960170287) tdSql.checkData(3 , 2 ,0.960170287)
@ -324,8 +324,8 @@ class TDTestCase:
tdSql.query("select max(c5), count(c5) from stb1") tdSql.query("select max(c5), count(c5) from stb1")
tdSql.query("select max(c5), count(c5) from ct1") tdSql.query("select max(c5), count(c5) from ct1")
# # bug fix for compute # # bug fix for compute
tdSql.query("select c1, cos(c1) -0 ,cos(c1-4)-0 from ct4 ") tdSql.query("select c1, cos(c1) -0 ,cos(c1-4)-0 from ct4 ")
tdSql.checkData(0, 0, None) tdSql.checkData(0, 0, None)
tdSql.checkData(0, 1, None) tdSql.checkData(0, 1, None)
@ -394,10 +394,10 @@ class TDTestCase:
tdSql.checkData(0,3,8.000000000) tdSql.checkData(0,3,8.000000000)
tdSql.checkData(0,4,7.900000000) tdSql.checkData(0,4,7.900000000)
tdSql.checkData(0,5,0.000000000) tdSql.checkData(0,5,0.000000000)
def pow_Arithmetic(self): def pow_Arithmetic(self):
pass pass
def check_boundary_values(self): def check_boundary_values(self):
PI=3.1415926 PI=3.1415926
@ -426,11 +426,11 @@ class TDTestCase:
f"insert into sub1_bound values ( now()+1s, 2147483648, 9223372036854775808, 32768, 128, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )" f"insert into sub1_bound values ( now()+1s, 2147483648, 9223372036854775808, 32768, 128, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
) )
self.check_result_auto_cos( "select abs(c1), abs(c2), abs(c3) , abs(c4), abs(c5) from sub1_bound ", "select cos(abs(c1)), cos(abs(c2)) ,cos(abs(c3)), cos(abs(c4)), cos(abs(c5)) from sub1_bound") self.check_result_auto_cos( "select abs(c1), abs(c2), abs(c3) , abs(c4), abs(c5) from sub1_bound ", "select cos(abs(c1)), cos(abs(c2)) ,cos(abs(c3)), cos(abs(c4)), cos(abs(c5)) from sub1_bound")
self.check_result_auto_cos( "select c1, c2, c3 , c3, c2 ,c1 from sub1_bound ", "select cos(c1), cos(c2) ,cos(c3), cos(c3), cos(c2) ,cos(c1) from sub1_bound") self.check_result_auto_cos( "select c1, c2, c3 , c3, c2 ,c1 from sub1_bound ", "select cos(c1), cos(c2) ,cos(c3), cos(c3), cos(c2) ,cos(c1) from sub1_bound")
self.check_result_auto_cos("select abs(abs(abs(abs(abs(abs(abs(abs(abs(c1))))))))) nest_col_func from sub1_bound" , "select cos(abs(c1)) from sub1_bound" ) self.check_result_auto_cos("select abs(abs(abs(abs(abs(abs(abs(abs(abs(c1))))))))) nest_col_func from sub1_bound" , "select cos(abs(c1)) from sub1_bound" )
# check basic elem for table per row # check basic elem for table per row
tdSql.query("select cos(abs(c1)) ,cos(abs(c2)) , cos(abs(c3)) , cos(abs(c4)), cos(abs(c5)), cos(abs(c6)) from sub1_bound ") tdSql.query("select cos(abs(c1)) ,cos(abs(c2)) , cos(abs(c3)) , cos(abs(c4)), cos(abs(c5)), cos(abs(c6)) from sub1_bound ")
tdSql.checkData(0,0,math.cos(2147483647)) tdSql.checkData(0,0,math.cos(2147483647))
@ -489,36 +489,36 @@ class TDTestCase:
self.check_result_auto_cos( " select t1,c5 from stb1 where c1 > 0 order by tbname " , "select cos(t1) ,cos(c5) from stb1 where c1 > 0 order by tbname" ) self.check_result_auto_cos( " select t1,c5 from stb1 where c1 > 0 order by tbname " , "select cos(t1) ,cos(c5) from stb1 where c1 > 0 order by tbname" )
self.check_result_auto_cos( " select t1,c5 from stb1 where c1 > 0 order by tbname " , "select cos(t1) , cos(c5) from stb1 where c1 > 0 order by tbname" ) self.check_result_auto_cos( " select t1,c5 from stb1 where c1 > 0 order by tbname " , "select cos(t1) , cos(c5) from stb1 where c1 > 0 order by tbname" )
pass pass
def run(self): # sourcery skip: extract-duplicate-method, remove-redundant-fstring def run(self): # sourcery skip: extract-duplicate-method, remove-redundant-fstring
tdSql.prepare() tdSql.prepare()
tdLog.printNoPrefix("==========step1:create table ==============") tdLog.printNoPrefix("==========step1:create table ==============")
self.prepare_datas() self.prepare_datas()
tdLog.printNoPrefix("==========step2:test errors ==============") tdLog.printNoPrefix("==========step2:test errors ==============")
self.test_errors() self.test_errors()
tdLog.printNoPrefix("==========step3:support types ============") tdLog.printNoPrefix("==========step3:support types ============")
self.support_types() self.support_types()
tdLog.printNoPrefix("==========step4: cos basic query ============") tdLog.printNoPrefix("==========step4: cos basic query ============")
self.basic_cosin_function() self.basic_cosin_function()
tdLog.printNoPrefix("==========step5: big number cos query ============") tdLog.printNoPrefix("==========step5: big number cos query ============")
self.test_big_number() self.test_big_number()
tdLog.printNoPrefix("==========step6: cos boundary query ============") tdLog.printNoPrefix("==========step6: cos boundary query ============")
self.check_boundary_values() self.check_boundary_values()
tdLog.printNoPrefix("==========step7: cos filter query ============") tdLog.printNoPrefix("==========step7: cos filter query ============")
self.abs_func_filter() self.abs_func_filter()

View File

@ -13,24 +13,24 @@ class TDTestCase:
def run(self): def run(self):
tdSql.prepare() tdSql.prepare()
tdSql.execute('''create table stb(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double, tdSql.execute('''create table stb(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double,
col7 bool, col8 binary(20), col9 nchar(20), col11 tinyint unsigned, col12 smallint unsigned, col13 int unsigned, col14 bigint unsigned) tags(loc nchar(20))''') col7 bool, col8 binary(20), col9 nchar(20), col11 tinyint unsigned, col12 smallint unsigned, col13 int unsigned, col14 bigint unsigned) tags(loc nchar(20))''')
tdSql.execute("create table stb_1 using stb tags('beijing')") tdSql.execute("create table stb_1 using stb tags('beijing')")
tdSql.execute("create table stb_2 using stb tags('shanghai')") tdSql.execute("create table stb_2 using stb tags('shanghai')")
tdSql.execute('''create table ntb(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double, tdSql.execute('''create table ntb(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double,
col7 bool, col8 binary(20), col9 nchar(20), col11 tinyint unsigned, col12 smallint unsigned, col13 int unsigned, col14 bigint unsigned)''') col7 bool, col8 binary(20), col9 nchar(20), col11 tinyint unsigned, col12 smallint unsigned, col13 int unsigned, col14 bigint unsigned)''')
for i in range(self.rowNum): for i in range(self.rowNum):
tdSql.execute("insert into stb_1 values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d)" tdSql.execute("insert into stb_1 values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d)"
% (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1)) % (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1))
tdSql.execute("insert into stb_2 values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d)" tdSql.execute("insert into stb_2 values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d)"
% (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1)) % (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1))
for i in range(self.rowNum): for i in range(self.rowNum):
tdSql.execute("insert into ntb values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d)" tdSql.execute("insert into ntb values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d)"
% (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1)) % (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1))
tdSql.query("select count(*) from stb") tdSql.query("select count(*) from stb")
tdSql.checkData(0,0,20) tdSql.checkData(0,0,20)
tdSql.query("select count(*) from db.stb") tdSql.query("select count(*) from db.stb")
@ -95,7 +95,7 @@ class TDTestCase:
tdSql.query("select count(ts) from db.stb_1") tdSql.query("select count(ts) from db.stb_1")
tdSql.checkData(0,0,10) tdSql.checkData(0,0,10)
tdSql.query("select count(ts) from db.stb_1") tdSql.query("select count(ts) from db.stb_1")
tdSql.checkData(0,0,10) tdSql.checkData(0,0,10)
tdSql.query("select count(col1) from stb_1") tdSql.query("select count(col1) from stb_1")
@ -171,7 +171,7 @@ class TDTestCase:
tdSql.query("select count(col1),count(ts) from stb") tdSql.query("select count(col1),count(ts) from stb")
tdSql.checkData(0,0,20) tdSql.checkData(0,0,20)
tdSql.checkData(0,1,21) tdSql.checkData(0,1,21)
tdSql.query("select count(col1) from db.stb") tdSql.query("select count(col1) from db.stb")
tdSql.checkData(0,0,20) tdSql.checkData(0,0,20)
tdSql.query("select count(col1),count(ts) from db.stb") tdSql.query("select count(col1),count(ts) from db.stb")
@ -184,7 +184,7 @@ class TDTestCase:
tdSql.query("select count(col1) from stb group by col7") tdSql.query("select count(col1) from stb group by col7")
tdSql.checkRows(3) tdSql.checkRows(3)
def stop(self): def stop(self):
@ -193,4 +193,4 @@ class TDTestCase:
tdCases.addWindows(__file__, TDTestCase()) tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase()) tdCases.addLinux(__file__, TDTestCase())

View File

@ -419,7 +419,7 @@ class TDTestCase:
tdSql.checkData(3,0,4) tdSql.checkData(3,0,4)
tdSql.query("select csum(abs(c1))+2 from t1 ") tdSql.query("select csum(abs(c1))+2 from t1 ")
tdSql.checkRows(4) tdSql.checkRows(4)
def csum_support_stable(self): def csum_support_stable(self):
tdSql.query(" select csum(1) from stb1 ") tdSql.query(" select csum(1) from stb1 ")
tdSql.checkRows(70) tdSql.checkRows(70)
@ -434,17 +434,17 @@ class TDTestCase:
tdSql.query("select csum(st1+c1) from stb1 partition by tbname") tdSql.query("select csum(st1+c1) from stb1 partition by tbname")
tdSql.checkRows(40) tdSql.checkRows(40)
# # bug need fix # # bug need fix
# tdSql.query("select csum(st1+c1) from stb1 partition by tbname slimit 1 ") # tdSql.query("select csum(st1+c1) from stb1 partition by tbname slimit 1 ")
# tdSql.checkRows(4) # tdSql.checkRows(4)
# tdSql.error("select csum(st1+c1) from stb1 partition by tbname limit 1 ") # tdSql.error("select csum(st1+c1) from stb1 partition by tbname limit 1 ")
# bug need fix # bug need fix
tdSql.query("select csum(st1+c1) from stb1 partition by tbname") tdSql.query("select csum(st1+c1) from stb1 partition by tbname")
tdSql.checkRows(40) tdSql.checkRows(40)
# bug need fix # bug need fix
# tdSql.query("select tbname , csum(c1) from stb1 partition by tbname") # tdSql.query("select tbname , csum(c1) from stb1 partition by tbname")
# tdSql.checkRows(40) # tdSql.checkRows(40)
# tdSql.query("select tbname , csum(st1) from stb1 partition by tbname") # tdSql.query("select tbname , csum(st1) from stb1 partition by tbname")
@ -452,7 +452,7 @@ class TDTestCase:
# tdSql.query("select tbname , csum(st1) from stb1 partition by tbname slimit 1") # tdSql.query("select tbname , csum(st1) from stb1 partition by tbname slimit 1")
# tdSql.checkRows(7) # tdSql.checkRows(7)
# partition by tags # partition by tags
# tdSql.query("select st1 , csum(c1) from stb1 partition by st1") # tdSql.query("select st1 , csum(c1) from stb1 partition by st1")
# tdSql.checkRows(40) # tdSql.checkRows(40)
# tdSql.query("select csum(c1) from stb1 partition by st1") # tdSql.query("select csum(c1) from stb1 partition by st1")
@ -491,4 +491,4 @@ class TDTestCase:
tdLog.success("%s successfully executed" % __file__) tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase()) tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase()) tdCases.addLinux(__file__, TDTestCase())

View File

@ -16,7 +16,7 @@ class TDTestCase:
def init(self, conn, logSql): def init(self, conn, logSql):
tdLog.debug(f"start to excute {__file__}") tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor()) tdSql.init(conn.cursor())
def prepare_datas(self): def prepare_datas(self):
tdSql.execute( tdSql.execute(
'''create table stb1 '''create table stb1
@ -24,7 +24,7 @@ class TDTestCase:
tags (t1 int) tags (t1 int)
''' '''
) )
tdSql.execute( tdSql.execute(
''' '''
create table t1 create table t1
@ -66,14 +66,14 @@ class TDTestCase:
( '2023-02-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ( '2023-02-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
''' '''
) )
def check_result_auto(self ,origin_query , floor_query): def check_result_auto(self ,origin_query , floor_query):
pass pass
floor_result = tdSql.getResult(floor_query) floor_result = tdSql.getResult(floor_query)
origin_result = tdSql.getResult(origin_query) origin_result = tdSql.getResult(origin_query)
auto_result =[] auto_result =[]
for row in origin_result: for row in origin_result:
row_check = [] row_check = []
for elem in row: for elem in row:
@ -88,13 +88,13 @@ class TDTestCase:
for row_index , row in enumerate(floor_result): for row_index , row in enumerate(floor_result):
for col_index , elem in enumerate(row): for col_index , elem in enumerate(row):
if auto_result[row_index][col_index] != elem: if auto_result[row_index][col_index] != elem:
check_status = False check_status = False
if not check_status: if not check_status:
tdLog.notice("floor function value has not as expected , sql is \"%s\" "%floor_query ) tdLog.notice("floor function value has not as expected , sql is \"%s\" "%floor_query )
sys.exit(1) sys.exit(1)
else: else:
tdLog.info("floor value check pass , it work as expected ,sql is \"%s\" "%floor_query ) tdLog.info("floor value check pass , it work as expected ,sql is \"%s\" "%floor_query )
def test_errors(self): def test_errors(self):
error_sql_lists = [ error_sql_lists = [
"select floor from t1", "select floor from t1",
@ -128,42 +128,42 @@ class TDTestCase:
] ]
for error_sql in error_sql_lists: for error_sql in error_sql_lists:
tdSql.error(error_sql) tdSql.error(error_sql)
def support_types(self): def support_types(self):
type_error_sql_lists = [ type_error_sql_lists = [
"select floor(ts) from t1" , "select floor(ts) from t1" ,
"select floor(c7) from t1", "select floor(c7) from t1",
"select floor(c8) from t1", "select floor(c8) from t1",
"select floor(c9) from t1", "select floor(c9) from t1",
"select floor(ts) from ct1" , "select floor(ts) from ct1" ,
"select floor(c7) from ct1", "select floor(c7) from ct1",
"select floor(c8) from ct1", "select floor(c8) from ct1",
"select floor(c9) from ct1", "select floor(c9) from ct1",
"select floor(ts) from ct3" , "select floor(ts) from ct3" ,
"select floor(c7) from ct3", "select floor(c7) from ct3",
"select floor(c8) from ct3", "select floor(c8) from ct3",
"select floor(c9) from ct3", "select floor(c9) from ct3",
"select floor(ts) from ct4" , "select floor(ts) from ct4" ,
"select floor(c7) from ct4", "select floor(c7) from ct4",
"select floor(c8) from ct4", "select floor(c8) from ct4",
"select floor(c9) from ct4", "select floor(c9) from ct4",
"select floor(ts) from stb1" , "select floor(ts) from stb1" ,
"select floor(c7) from stb1", "select floor(c7) from stb1",
"select floor(c8) from stb1", "select floor(c8) from stb1",
"select floor(c9) from stb1" , "select floor(c9) from stb1" ,
"select floor(ts) from stbbb1" , "select floor(ts) from stbbb1" ,
"select floor(c7) from stbbb1", "select floor(c7) from stbbb1",
"select floor(ts) from tbname", "select floor(ts) from tbname",
"select floor(c9) from tbname" "select floor(c9) from tbname"
] ]
for type_sql in type_error_sql_lists: for type_sql in type_error_sql_lists:
tdSql.error(type_sql) tdSql.error(type_sql)
type_sql_lists = [ type_sql_lists = [
"select floor(c1) from t1", "select floor(c1) from t1",
"select floor(c2) from t1", "select floor(c2) from t1",
@ -193,16 +193,16 @@ class TDTestCase:
"select floor(c5) from stb1", "select floor(c5) from stb1",
"select floor(c6) from stb1", "select floor(c6) from stb1",
"select floor(c6) as alisb from stb1", "select floor(c6) as alisb from stb1",
"select floor(c6) alisb from stb1", "select floor(c6) alisb from stb1",
] ]
for type_sql in type_sql_lists: for type_sql in type_sql_lists:
tdSql.query(type_sql) tdSql.query(type_sql)
def basic_floor_function(self): def basic_floor_function(self):
# basic query # basic query
tdSql.query("select c1 from ct3") tdSql.query("select c1 from ct3")
tdSql.checkRows(0) tdSql.checkRows(0)
tdSql.query("select c1 from t1") tdSql.query("select c1 from t1")
@ -222,7 +222,7 @@ class TDTestCase:
tdSql.query("select floor(c5) from ct3") tdSql.query("select floor(c5) from ct3")
tdSql.checkRows(0) tdSql.checkRows(0)
tdSql.query("select floor(c6) from ct3") tdSql.query("select floor(c6) from ct3")
# used for regular table # used for regular table
tdSql.query("select floor(c1) from t1") tdSql.query("select floor(c1) from t1")
tdSql.checkData(0, 0, None) tdSql.checkData(0, 0, None)
@ -240,7 +240,7 @@ class TDTestCase:
tdSql.checkData(5, 5, None) tdSql.checkData(5, 5, None)
self.check_result_auto( "select c1, c2, c3 , c4, c5 from t1", "select (c1), floor(c2) ,floor(c3), floor(c4), floor(c5) from t1") self.check_result_auto( "select c1, c2, c3 , c4, c5 from t1", "select (c1), floor(c2) ,floor(c3), floor(c4), floor(c5) from t1")
# used for sub table # used for sub table
tdSql.query("select floor(c1) from ct1") tdSql.query("select floor(c1) from ct1")
tdSql.checkData(0, 0, 8) tdSql.checkData(0, 0, 8)
@ -252,20 +252,20 @@ class TDTestCase:
self.check_result_auto( "select c1, c2, c3 , c4, c5 from ct1", "select (c1), floor(c2) ,floor(c3), floor(c4), floor(c5) from ct1") self.check_result_auto( "select c1, c2, c3 , c4, c5 from ct1", "select (c1), floor(c2) ,floor(c3), floor(c4), floor(c5) from ct1")
self.check_result_auto("select floor(floor(floor(floor(floor(floor(floor(floor(floor(floor(c1)))))))))) nest_col_func from ct1;","select c1 from ct1" ) self.check_result_auto("select floor(floor(floor(floor(floor(floor(floor(floor(floor(floor(c1)))))))))) nest_col_func from ct1;","select c1 from ct1" )
# used for stable table # used for stable table
tdSql.query("select floor(c1) from stb1") tdSql.query("select floor(c1) from stb1")
tdSql.checkRows(25) tdSql.checkRows(25)
self.check_result_auto( "select c1, c2, c3 , c4, c5 from ct4 ", "select (c1), floor(c2) ,floor(c3), floor(c4), floor(c5) from ct4") self.check_result_auto( "select c1, c2, c3 , c4, c5 from ct4 ", "select (c1), floor(c2) ,floor(c3), floor(c4), floor(c5) from ct4")
self.check_result_auto("select floor(floor(floor(floor(floor(floor(floor(floor(floor(floor(c1)))))))))) nest_col_func from ct4;" , "select c1 from ct4" ) self.check_result_auto("select floor(floor(floor(floor(floor(floor(floor(floor(floor(floor(c1)))))))))) nest_col_func from ct4;" , "select c1 from ct4" )
# used for not exists table # used for not exists table
tdSql.error("select floor(c1) from stbbb1") tdSql.error("select floor(c1) from stbbb1")
tdSql.error("select floor(c1) from tbname") tdSql.error("select floor(c1) from tbname")
tdSql.error("select floor(c1) from ct5") tdSql.error("select floor(c1) from ct5")
# mix with common col # mix with common col
tdSql.query("select c1, floor(c1) from ct1") tdSql.query("select c1, floor(c1) from ct1")
tdSql.checkData(0 , 0 ,8) tdSql.checkData(0 , 0 ,8)
tdSql.checkData(0 , 1 ,8) tdSql.checkData(0 , 1 ,8)
@ -290,7 +290,7 @@ class TDTestCase:
tdSql.checkData(0 , 1 ,None) tdSql.checkData(0 , 1 ,None)
tdSql.checkData(0 , 2 ,None) tdSql.checkData(0 , 2 ,None)
tdSql.checkData(0 , 3 ,None) tdSql.checkData(0 , 3 ,None)
tdSql.checkData(3 , 0 , 6) tdSql.checkData(3 , 0 , 6)
tdSql.checkData(3 , 1 , 6) tdSql.checkData(3 , 1 , 6)
tdSql.checkData(3 , 2 ,6.66000) tdSql.checkData(3 , 2 ,6.66000)
@ -311,7 +311,7 @@ class TDTestCase:
tdSql.query("select max(c5), count(c5) from stb1") tdSql.query("select max(c5), count(c5) from stb1")
tdSql.query("select max(c5), count(c5) from ct1") tdSql.query("select max(c5), count(c5) from ct1")
# bug fix for count # bug fix for count
tdSql.query("select count(c1) from ct4 ") tdSql.query("select count(c1) from ct4 ")
tdSql.checkData(0,0,9) tdSql.checkData(0,0,9)
@ -322,7 +322,7 @@ class TDTestCase:
tdSql.query("select count(*) from stb1 ") tdSql.query("select count(*) from stb1 ")
tdSql.checkData(0,0,25) tdSql.checkData(0,0,25)
# bug fix for compute # bug fix for compute
tdSql.query("select c1, abs(c1) -0 ,floor(c1)-0 from ct4 ") tdSql.query("select c1, abs(c1) -0 ,floor(c1)-0 from ct4 ")
tdSql.checkData(0, 0, None) tdSql.checkData(0, 0, None)
tdSql.checkData(0, 1, None) tdSql.checkData(0, 1, None)
@ -373,10 +373,10 @@ class TDTestCase:
tdSql.checkData(0,3,8.000000000) tdSql.checkData(0,3,8.000000000)
tdSql.checkData(0,4,7.900000000) tdSql.checkData(0,4,7.900000000)
tdSql.checkData(0,5,3.000000000) tdSql.checkData(0,5,3.000000000)
def floor_Arithmetic(self): def floor_Arithmetic(self):
pass pass
def check_boundary_values(self): def check_boundary_values(self):
tdSql.execute("drop database if exists bound_test") tdSql.execute("drop database if exists bound_test")
@ -405,14 +405,14 @@ class TDTestCase:
tdSql.execute( tdSql.execute(
f"insert into sub1_bound values ( now(), -2147483643, -9223372036854775803, -32763, -123, -3.39E+38, -1.69e+308, True, 'binary_tb1', 'nchar_tb1', now() )" f"insert into sub1_bound values ( now(), -2147483643, -9223372036854775803, -32763, -123, -3.39E+38, -1.69e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
) )
tdSql.error( tdSql.error(
f"insert into sub1_bound values ( now()+1s, 2147483648, 9223372036854775808, 32768, 128, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )" f"insert into sub1_bound values ( now()+1s, 2147483648, 9223372036854775808, 32768, 128, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
) )
self.check_result_auto( "select c1, c2, c3 , c4, c5 ,c6 from sub1_bound ", "select floor(c1), floor(c2) ,floor(c3), floor(c4), floor(c5) ,floor(c6) from sub1_bound") self.check_result_auto( "select c1, c2, c3 , c4, c5 ,c6 from sub1_bound ", "select floor(c1), floor(c2) ,floor(c3), floor(c4), floor(c5) ,floor(c6) from sub1_bound")
self.check_result_auto( "select c1, c2, c3 , c3, c2 ,c1 from sub1_bound ", "select floor(c1), floor(c2) ,floor(c3), floor(c3), floor(c2) ,floor(c1) from sub1_bound") self.check_result_auto( "select c1, c2, c3 , c3, c2 ,c1 from sub1_bound ", "select floor(c1), floor(c2) ,floor(c3), floor(c3), floor(c2) ,floor(c1) from sub1_bound")
self.check_result_auto("select floor(floor(floor(floor(floor(floor(floor(floor(floor(floor(c1)))))))))) nest_col_func from sub1_bound;" , "select floor(c1) from sub1_bound" ) self.check_result_auto("select floor(floor(floor(floor(floor(floor(floor(floor(floor(floor(c1)))))))))) nest_col_func from sub1_bound;" , "select floor(c1) from sub1_bound" )
# check basic elem for table per row # check basic elem for table per row
tdSql.query("select floor(c1+0.2) ,floor(c2) , floor(c3+0.3) , floor(c4-0.3), floor(c5/2), floor(c6/2) from sub1_bound ") tdSql.query("select floor(c1+0.2) ,floor(c2) , floor(c3+0.3) , floor(c4-0.3), floor(c5/2), floor(c6/2) from sub1_bound ")
tdSql.checkData(0, 0, 2147483647.000000000) tdSql.checkData(0, 0, 2147483647.000000000)
@ -444,26 +444,26 @@ class TDTestCase:
tdSql.prepare() tdSql.prepare()
tdLog.printNoPrefix("==========step1:create table ==============") tdLog.printNoPrefix("==========step1:create table ==============")
self.prepare_datas() self.prepare_datas()
tdLog.printNoPrefix("==========step2:test errors ==============") tdLog.printNoPrefix("==========step2:test errors ==============")
self.test_errors() self.test_errors()
tdLog.printNoPrefix("==========step3:support types ============") tdLog.printNoPrefix("==========step3:support types ============")
self.support_types() self.support_types()
tdLog.printNoPrefix("==========step4: floor basic query ============") tdLog.printNoPrefix("==========step4: floor basic query ============")
self.basic_floor_function() self.basic_floor_function()
tdLog.printNoPrefix("==========step5: floor boundary query ============") tdLog.printNoPrefix("==========step5: floor boundary query ============")
self.check_boundary_values() self.check_boundary_values()
tdLog.printNoPrefix("==========step6: floor filter query ============") tdLog.printNoPrefix("==========step6: floor filter query ============")
self.abs_func_filter() self.abs_func_filter()

View File

@ -370,17 +370,17 @@ class TDTestCase:
tdSql.query("select diff(st1+c1) from stb1 partition by tbname") tdSql.query("select diff(st1+c1) from stb1 partition by tbname")
tdSql.checkRows(190) tdSql.checkRows(190)
# # bug need fix # # bug need fix
# tdSql.query("select diff(st1+c1) from stb1 partition by tbname slimit 1 ") # tdSql.query("select diff(st1+c1) from stb1 partition by tbname slimit 1 ")
# tdSql.checkRows(19) # tdSql.checkRows(19)
# tdSql.error("select diff(st1+c1) from stb1 partition by tbname limit 1 ") # tdSql.error("select diff(st1+c1) from stb1 partition by tbname limit 1 ")
# bug need fix # bug need fix
tdSql.query("select diff(st1+c1) from stb1 partition by tbname") tdSql.query("select diff(st1+c1) from stb1 partition by tbname")
tdSql.checkRows(190) tdSql.checkRows(190)
# bug need fix # bug need fix
# tdSql.query("select tbname , diff(c1) from stb1 partition by tbname") # tdSql.query("select tbname , diff(c1) from stb1 partition by tbname")
# tdSql.checkRows(199) # tdSql.checkRows(199)
# tdSql.query("select tbname , diff(st1) from stb1 partition by tbname") # tdSql.query("select tbname , diff(st1) from stb1 partition by tbname")
@ -388,7 +388,7 @@ class TDTestCase:
# tdSql.query("select tbname , diff(st1) from stb1 partition by tbname slimit 1") # tdSql.query("select tbname , diff(st1) from stb1 partition by tbname slimit 1")
# tdSql.checkRows(19) # tdSql.checkRows(19)
# partition by tags # partition by tags
# tdSql.query("select st1 , diff(c1) from stb1 partition by st1") # tdSql.query("select st1 , diff(c1) from stb1 partition by st1")
# tdSql.checkRows(199) # tdSql.checkRows(199)
# tdSql.query("select diff(c1) from stb1 partition by st1") # tdSql.query("select diff(c1) from stb1 partition by st1")
@ -488,4 +488,4 @@ class TDTestCase:
tdLog.success("%s successfully executed" % __file__) tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase()) tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase()) tdCases.addLinux(__file__, TDTestCase())

View File

@ -11,7 +11,7 @@ from util.sql import *
from util.cases import * from util.cases import *
class TDTestCase: class TDTestCase:
updatecfgDict = {'debugFlag': 143 ,"cDebugFlag":143,"uDebugFlag":143 ,"rpcDebugFlag":143 , "tmrDebugFlag":143 , updatecfgDict = {'debugFlag': 143 ,"cDebugFlag":143,"uDebugFlag":143 ,"rpcDebugFlag":143 , "tmrDebugFlag":143 ,
"jniDebugFlag":143 ,"simDebugFlag":143,"dDebugFlag":143, "dDebugFlag":143,"vDebugFlag":143,"mDebugFlag":143,"qDebugFlag":143, "jniDebugFlag":143 ,"simDebugFlag":143,"dDebugFlag":143, "dDebugFlag":143,"vDebugFlag":143,"mDebugFlag":143,"qDebugFlag":143,
"wDebugFlag":143,"sDebugFlag":143,"tsdbDebugFlag":143,"tqDebugFlag":143 ,"fsDebugFlag":143 ,"udfDebugFlag":143} "wDebugFlag":143,"sDebugFlag":143,"tsdbDebugFlag":143,"tqDebugFlag":143 ,"fsDebugFlag":143 ,"udfDebugFlag":143}
@ -388,11 +388,11 @@ class TDTestCase:
tdSql.execute( tdSql.execute(
f"insert into sub1_bound values ( now(), -2147483643, -9223372036854775803, -32763, -123, -3.39E+38, -1.69e+308, True, 'binary_tb1', 'nchar_tb1', now() )" f"insert into sub1_bound values ( now(), -2147483643, -9223372036854775803, -32763, -123, -3.39E+38, -1.69e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
) )
tdSql.error( tdSql.error(
f"insert into sub1_bound values ( now()+1s, 2147483648, 9223372036854775808, 32768, 128, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )" f"insert into sub1_bound values ( now()+1s, 2147483648, 9223372036854775808, 32768, 128, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
) )
tdSql.query("select stateduration(c1,'GT',1,1s) from sub1_bound") tdSql.query("select stateduration(c1,'GT',1,1s) from sub1_bound")
tdSql.checkRows(5) tdSql.checkRows(5)
@ -400,29 +400,29 @@ class TDTestCase:
tdSql.prepare() tdSql.prepare()
tdLog.printNoPrefix("==========step1:create table ==============") tdLog.printNoPrefix("==========step1:create table ==============")
self.prepare_datas() self.prepare_datas()
tdLog.printNoPrefix("==========step2:test errors ==============") tdLog.printNoPrefix("==========step2:test errors ==============")
self.test_errors() self.test_errors()
tdLog.printNoPrefix("==========step3:support types ============") tdLog.printNoPrefix("==========step3:support types ============")
self.support_types() self.support_types()
tdLog.printNoPrefix("==========step4:support opers ============") tdLog.printNoPrefix("==========step4:support opers ============")
self.support_opers() self.support_opers()
tdLog.printNoPrefix("==========step5: stateduration basic query ============") tdLog.printNoPrefix("==========step5: stateduration basic query ============")
self.basic_stateduration_function() self.basic_stateduration_function()
tdLog.printNoPrefix("==========step6: stateduration boundary query ============") tdLog.printNoPrefix("==========step6: stateduration boundary query ============")
self.check_boundary_values() self.check_boundary_values()
tdLog.printNoPrefix("==========step6: stateduration unit time test ============") tdLog.printNoPrefix("==========step6: stateduration unit time test ============")
self.check_unit_time() self.check_unit_time()

View File

@ -34,7 +34,7 @@ class TDTestCase:
tag_sql += f"{k} {v}," tag_sql += f"{k} {v},"
create_stb_sql = f'create table {stbname} (ts timestamp,{column_sql[:-1]}) tags({tag_sql[:-1]})' create_stb_sql = f'create table {stbname} (ts timestamp,{column_sql[:-1]}) tags({tag_sql[:-1]})'
return create_stb_sql return create_stb_sql
def last_check_stb_tb_base(self): def last_check_stb_tb_base(self):
tdSql.prepare() tdSql.prepare()
stbname = tdCom.getLongName(5, "letters") stbname = tdCom.getLongName(5, "letters")
@ -201,7 +201,7 @@ class TDTestCase:
tdSql.execute(f'use {dbname}') tdSql.execute(f'use {dbname}')
# build 20 child tables,every table insert 10 rows # build 20 child tables,every table insert 10 rows
tdSql.execute(f'''create table {stbname}(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 tinyint unsigned, col6 smallint unsigned, tdSql.execute(f'''create table {stbname}(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 tinyint unsigned, col6 smallint unsigned,
col7 int unsigned, col8 bigint unsigned, col9 float, col10 double, col11 bool, col12 binary(20), col13 nchar(20)) tags(loc nchar(20))''') col7 int unsigned, col8 bigint unsigned, col9 float, col10 double, col11 bool, col12 binary(20), col13 nchar(20)) tags(loc nchar(20))''')
for i in range(self.tbnum): for i in range(self.tbnum):
tdSql.execute( tdSql.execute(

View File

@ -159,7 +159,7 @@ class TDTestCase:
return tdSql.error(self.mavg_query_form( return tdSql.error(self.mavg_query_form(
sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr,
table_expr=table_expr, condition=condition table_expr=table_expr, condition=condition
)) ))
if all(["group" in condition.lower(), "tbname" not in condition.lower()]): if all(["group" in condition.lower(), "tbname" not in condition.lower()]):
print(f"case in {line}: ", end='') print(f"case in {line}: ", end='')
@ -295,7 +295,7 @@ class TDTestCase:
pre_result = np.array(tdSql.queryResult)[np.array(tdSql.queryResult) != None] pre_result = np.array(tdSql.queryResult)[np.array(tdSql.queryResult) != None]
if (platform.system().lower() == 'windows' and pre_result.dtype == 'int32'): if (platform.system().lower() == 'windows' and pre_result.dtype == 'int32'):
pre_result = np.array(pre_result, dtype = 'int64') pre_result = np.array(pre_result, dtype = 'int64')
pre_mavg = pre_mavg = np.convolve(pre_result, np.ones(k), "valid")[offset_val:]/k pre_mavg = pre_mavg = np.convolve(pre_result, np.ones(k), "valid")[offset_val:]/k
tdSql.query(self.mavg_query_form( tdSql.query(self.mavg_query_form(
sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr,
@ -669,7 +669,7 @@ class TDTestCase:
tdSql.checkData(0,0,1.000000000) tdSql.checkData(0,0,1.000000000)
tdSql.checkData(1,0,1.000000000) tdSql.checkData(1,0,1.000000000)
tdSql.checkData(5,0,1.000000000) tdSql.checkData(5,0,1.000000000)
tdSql.query("select mavg(abs(c1),1) from t1") tdSql.query("select mavg(abs(c1),1) from t1")
tdSql.checkRows(4) tdSql.checkRows(4)
@ -688,17 +688,17 @@ class TDTestCase:
tdSql.query("select mavg(st1+c1,3) from stb1 partition by tbname") tdSql.query("select mavg(st1+c1,3) from stb1 partition by tbname")
tdSql.checkRows(20) tdSql.checkRows(20)
# # bug need fix # # bug need fix
# tdSql.query("select mavg(st1+c1,3) from stb1 partition by tbname slimit 1 ") # tdSql.query("select mavg(st1+c1,3) from stb1 partition by tbname slimit 1 ")
# tdSql.checkRows(2) # tdSql.checkRows(2)
# tdSql.error("select mavg(st1+c1,3) from stb1 partition by tbname limit 1 ") # tdSql.error("select mavg(st1+c1,3) from stb1 partition by tbname limit 1 ")
# bug need fix # bug need fix
tdSql.query("select mavg(st1+c1,3) from stb1 partition by tbname") tdSql.query("select mavg(st1+c1,3) from stb1 partition by tbname")
tdSql.checkRows(20) tdSql.checkRows(20)
# bug need fix # bug need fix
# tdSql.query("select tbname , mavg(c1,3) from stb1 partition by tbname") # tdSql.query("select tbname , mavg(c1,3) from stb1 partition by tbname")
# tdSql.checkRows(38) # tdSql.checkRows(38)
# tdSql.query("select tbname , mavg(st1,3) from stb1 partition by tbname") # tdSql.query("select tbname , mavg(st1,3) from stb1 partition by tbname")
@ -706,7 +706,7 @@ class TDTestCase:
# tdSql.query("select tbname , mavg(st1,3) from stb1 partition by tbname slimit 1") # tdSql.query("select tbname , mavg(st1,3) from stb1 partition by tbname slimit 1")
# tdSql.checkRows(2) # tdSql.checkRows(2)
# partition by tags # partition by tags
# tdSql.query("select st1 , mavg(c1,3) from stb1 partition by st1") # tdSql.query("select st1 , mavg(c1,3) from stb1 partition by st1")
# tdSql.checkRows(38) # tdSql.checkRows(38)
# tdSql.query("select mavg(c1,3) from stb1 partition by st1") # tdSql.query("select mavg(c1,3) from stb1 partition by st1")
@ -743,4 +743,4 @@ class TDTestCase:
tdLog.success("%s successfully executed" % __file__) tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase()) tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase()) tdCases.addLinux(__file__, TDTestCase())

View File

@ -5,7 +5,7 @@ import numpy as np
class TDTestCase: class TDTestCase:
updatecfgDict = {'debugFlag': 143 ,"cDebugFlag":143,"uDebugFlag":143 ,"rpcDebugFlag":143 , "tmrDebugFlag":143 , updatecfgDict = {'debugFlag': 143 ,"cDebugFlag":143,"uDebugFlag":143 ,"rpcDebugFlag":143 , "tmrDebugFlag":143 ,
"jniDebugFlag":143 ,"simDebugFlag":143,"dDebugFlag":143, "dDebugFlag":143,"vDebugFlag":143,"mDebugFlag":143,"qDebugFlag":143, "jniDebugFlag":143 ,"simDebugFlag":143,"dDebugFlag":143, "dDebugFlag":143,"vDebugFlag":143,"mDebugFlag":143,"qDebugFlag":143,
"wDebugFlag":143,"sDebugFlag":143,"tsdbDebugFlag":143,"tqDebugFlag":143 ,"fsDebugFlag":143 ,"udfDebugFlag":143, "wDebugFlag":143,"sDebugFlag":143,"tsdbDebugFlag":143,"tqDebugFlag":143 ,"fsDebugFlag":143 ,"udfDebugFlag":143,
"maxTablesPerVnode":2 ,"minTablesPerVnode":2,"tableIncStepPerVnode":2 } "maxTablesPerVnode":2 ,"minTablesPerVnode":2,"tableIncStepPerVnode":2 }
@ -19,15 +19,15 @@ class TDTestCase:
self.nchar_str = '涛思数据' self.nchar_str = '涛思数据'
def max_check_stb_and_tb_base(self): def max_check_stb_and_tb_base(self):
tdSql.prepare() tdSql.prepare()
intData = [] intData = []
floatData = [] floatData = []
tdSql.execute('''create table stb(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 tinyint unsigned, col6 smallint unsigned, tdSql.execute('''create table stb(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 tinyint unsigned, col6 smallint unsigned,
col7 int unsigned, col8 bigint unsigned, col9 float, col10 double, col11 bool, col12 binary(20), col13 nchar(20)) tags(loc nchar(20))''') col7 int unsigned, col8 bigint unsigned, col9 float, col10 double, col11 bool, col12 binary(20), col13 nchar(20)) tags(loc nchar(20))''')
tdSql.execute("create table stb_1 using stb tags('beijing')") tdSql.execute("create table stb_1 using stb tags('beijing')")
for i in range(self.rowNum): for i in range(self.rowNum):
tdSql.execute(f"insert into stb_1 values(%d, %d, %d, %d, %d, %d, %d, %d, %d, %f, %f, %d, '{self.binary_str}%d', '{self.nchar_str}%d')" tdSql.execute(f"insert into stb_1 values(%d, %d, %d, %d, %d, %d, %d, %d, %d, %f, %f, %d, '{self.binary_str}%d', '{self.nchar_str}%d')"
% (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1)) % (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1))
intData.append(i + 1) intData.append(i + 1)
floatData.append(i + 0.1) floatData.append(i + 0.1)
for i in ['ts','col11','col12','col13']: for i in ['ts','col11','col12','col13']:
for j in ['db.stb','stb','db.stb_1','stb_1']: for j in ['db.stb','stb','db.stb_1','stb_1']:
@ -45,17 +45,17 @@ class TDTestCase:
tdSql.query("select max(col1) from stb where col2<=5") tdSql.query("select max(col1) from stb where col2<=5")
tdSql.checkData(0,0,5) tdSql.checkData(0,0,5)
tdSql.execute('drop database db') tdSql.execute('drop database db')
def max_check_ntb_base(self): def max_check_ntb_base(self):
tdSql.prepare() tdSql.prepare()
intData = [] intData = []
floatData = [] floatData = []
tdSql.execute('''create table ntb(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 tinyint unsigned, col6 smallint unsigned, tdSql.execute('''create table ntb(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 tinyint unsigned, col6 smallint unsigned,
col7 int unsigned, col8 bigint unsigned, col9 float, col10 double, col11 bool, col12 binary(20), col13 nchar(20))''') col7 int unsigned, col8 bigint unsigned, col9 float, col10 double, col11 bool, col12 binary(20), col13 nchar(20))''')
for i in range(self.rowNum): for i in range(self.rowNum):
tdSql.execute(f"insert into ntb values(%d, %d, %d, %d, %d, %d, %d, %d, %d, %f, %f, %d, '{self.binary_str}%d', '{self.nchar_str}%d')" tdSql.execute(f"insert into ntb values(%d, %d, %d, %d, %d, %d, %d, %d, %d, %f, %f, %d, '{self.binary_str}%d', '{self.nchar_str}%d')"
% (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1)) % (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1))
intData.append(i + 1) intData.append(i + 1)
floatData.append(i + 0.1) floatData.append(i + 0.1)
for i in ['ts','col11','col12','col13']: for i in ['ts','col11','col12','col13']:
for j in ['db.ntb','ntb']: for j in ['db.ntb','ntb']:
@ -79,7 +79,7 @@ class TDTestCase:
same_sql = f"select {col_name} from {tbname} order by {col_name} desc limit 1" same_sql = f"select {col_name} from {tbname} order by {col_name} desc limit 1"
tdSql.query(max_sql) tdSql.query(max_sql)
max_result = tdSql.queryResult max_result = tdSql.queryResult
tdSql.query(same_sql) tdSql.query(same_sql)
same_result = tdSql.queryResult same_result = tdSql.queryResult
@ -91,7 +91,7 @@ class TDTestCase:
def support_distributed_aggregate(self): def support_distributed_aggregate(self):
# prepate datas for 20 tables distributed at different vgroups # prepate datas for 20 tables distributed at different vgroups
tdSql.execute("create database if not exists testdb keep 3650 duration 1000 vgroups 5") tdSql.execute("create database if not exists testdb keep 3650 duration 1000 vgroups 5")
tdSql.execute(" use testdb ") tdSql.execute(" use testdb ")
@ -161,17 +161,17 @@ class TDTestCase:
vgroups = tdSql.queryResult vgroups = tdSql.queryResult
vnode_tables={} vnode_tables={}
for vgroup_id in vgroups: for vgroup_id in vgroups:
vnode_tables[vgroup_id[0]]=[] vnode_tables[vgroup_id[0]]=[]
# check sub_table of per vnode ,make sure sub_table has been distributed # check sub_table of per vnode ,make sure sub_table has been distributed
tdSql.query("show tables like 'ct%'") tdSql.query("show tables like 'ct%'")
table_names = tdSql.queryResult table_names = tdSql.queryResult
tablenames = [] tablenames = []
for table_name in table_names: for table_name in table_names:
vnode_tables[table_name[6]].append(table_name[0]) vnode_tables[table_name[6]].append(table_name[0])
count = 0 count = 0
for k ,v in vnode_tables.items(): for k ,v in vnode_tables.items():
@ -180,8 +180,8 @@ class TDTestCase:
if count < 2: if count < 2:
tdLog.exit(" the datas of all not satisfy sub_table has been distributed ") tdLog.exit(" the datas of all not satisfy sub_table has been distributed ")
# check max function work status # check max function work status
tdSql.query("show tables like 'ct%'") tdSql.query("show tables like 'ct%'")
table_names = tdSql.queryResult table_names = tdSql.queryResult
tablenames = [] tablenames = []
@ -190,23 +190,23 @@ class TDTestCase:
tdSql.query("desc stb1") tdSql.query("desc stb1")
col_names = tdSql.queryResult col_names = tdSql.queryResult
colnames = [] colnames = []
for col_name in col_names: for col_name in col_names:
if col_name[1] in ["INT" ,"BIGINT" ,"SMALLINT" ,"TINYINT" , "FLOAT" ,"DOUBLE"]: if col_name[1] in ["INT" ,"BIGINT" ,"SMALLINT" ,"TINYINT" , "FLOAT" ,"DOUBLE"]:
colnames.append(col_name[0]) colnames.append(col_name[0])
for tablename in tablenames: for tablename in tablenames:
for colname in colnames: for colname in colnames:
self.check_max_functions(tablename,colname) self.check_max_functions(tablename,colname)
# max function with basic filter # max function with basic filter
print(vnode_tables) print(vnode_tables)
def run(self): def run(self):
# max verifacation # max verifacation
self.max_check_stb_and_tb_base() self.max_check_stb_and_tb_base()
self.max_check_ntb_base() self.max_check_ntb_base()

View File

@ -12,30 +12,30 @@ class TDTestCase:
self.rowNum = 10 self.rowNum = 10
self.ts = 1537146000000 self.ts = 1537146000000
def run(self): def run(self):
tdSql.prepare() tdSql.prepare()
intData = [] intData = []
floatData = [] floatData = []
tdSql.execute('''create table stb(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double, tdSql.execute('''create table stb(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double,
col7 bool, col8 binary(20), col9 nchar(20), col11 tinyint unsigned, col12 smallint unsigned, col13 int unsigned, col14 bigint unsigned) tags(loc nchar(20))''') col7 bool, col8 binary(20), col9 nchar(20), col11 tinyint unsigned, col12 smallint unsigned, col13 int unsigned, col14 bigint unsigned) tags(loc nchar(20))''')
tdSql.execute("create table stb_1 using stb tags('beijing')") tdSql.execute("create table stb_1 using stb tags('beijing')")
tdSql.execute('''create table ntb(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double, tdSql.execute('''create table ntb(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double,
col7 bool, col8 binary(20), col9 nchar(20), col11 tinyint unsigned, col12 smallint unsigned, col13 int unsigned, col14 bigint unsigned)''') col7 bool, col8 binary(20), col9 nchar(20), col11 tinyint unsigned, col12 smallint unsigned, col13 int unsigned, col14 bigint unsigned)''')
for i in range(self.rowNum): for i in range(self.rowNum):
tdSql.execute("insert into ntb values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d)" tdSql.execute("insert into ntb values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d)"
% (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1)) % (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1))
intData.append(i + 1) intData.append(i + 1)
floatData.append(i + 0.1) floatData.append(i + 0.1)
for i in range(self.rowNum): for i in range(self.rowNum):
tdSql.execute("insert into stb_1 values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d)" tdSql.execute("insert into stb_1 values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d)"
% (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1)) % (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1))
intData.append(i + 1) intData.append(i + 1)
floatData.append(i + 0.1) floatData.append(i + 0.1)
# max verifacation # max verifacation
tdSql.error("select min(ts) from stb_1") tdSql.error("select min(ts) from stb_1")
tdSql.error("select min(ts) from db.stb_1") tdSql.error("select min(ts) from db.stb_1")
tdSql.error("select min(col7) from stb_1") tdSql.error("select min(col7) from stb_1")
@ -206,7 +206,7 @@ class TDTestCase:
tdSql.query("select min(col1) from ntb where col2>=5") tdSql.query("select min(col1) from ntb where col2>=5")
tdSql.checkData(0,0,5) tdSql.checkData(0,0,5)
def stop(self): def stop(self):
tdSql.close() tdSql.close()
tdLog.success("%s successfully executed" % __file__) tdLog.success("%s successfully executed" % __file__)

File diff suppressed because it is too large Load Diff

View File

@ -8,14 +8,14 @@ from util.sql import *
from util.cases import * from util.cases import *
class TDTestCase: class TDTestCase:
updatecfgDict = {'debugFlag': 143 ,"cDebugFlag":143,"uDebugFlag":143 ,"rpcDebugFlag":143 , "tmrDebugFlag":143 , updatecfgDict = {'debugFlag': 143 ,"cDebugFlag":143,"uDebugFlag":143 ,"rpcDebugFlag":143 , "tmrDebugFlag":143 ,
"jniDebugFlag":143 ,"simDebugFlag":143,"dDebugFlag":143, "dDebugFlag":143,"vDebugFlag":143,"mDebugFlag":143,"qDebugFlag":143, "jniDebugFlag":143 ,"simDebugFlag":143,"dDebugFlag":143, "dDebugFlag":143,"vDebugFlag":143,"mDebugFlag":143,"qDebugFlag":143,
"wDebugFlag":143,"sDebugFlag":143,"tsdbDebugFlag":143,"tqDebugFlag":143 ,"fsDebugFlag":143 ,"udfDebugFlag":143} "wDebugFlag":143,"sDebugFlag":143,"tsdbDebugFlag":143,"tqDebugFlag":143 ,"fsDebugFlag":143 ,"udfDebugFlag":143}
def init(self, conn, logSql): def init(self, conn, logSql):
tdLog.debug(f"start to excute {__file__}") tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor()) tdSql.init(conn.cursor())
def prepare_datas(self): def prepare_datas(self):
tdSql.execute( tdSql.execute(
'''create table stb1 '''create table stb1
@ -23,7 +23,7 @@ class TDTestCase:
tags (t1 int) tags (t1 int)
''' '''
) )
tdSql.execute( tdSql.execute(
''' '''
create table t1 create table t1
@ -65,14 +65,14 @@ class TDTestCase:
( '2023-02-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ( '2023-02-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
''' '''
) )
def check_result_auto(self ,origin_query , round_query): def check_result_auto(self ,origin_query , round_query):
pass pass
round_result = tdSql.getResult(round_query) round_result = tdSql.getResult(round_query)
origin_result = tdSql.getResult(origin_query) origin_result = tdSql.getResult(origin_query)
auto_result =[] auto_result =[]
for row in origin_result: for row in origin_result:
row_check = [] row_check = []
for elem in row: for elem in row:
@ -87,13 +87,13 @@ class TDTestCase:
for row_index , row in enumerate(round_result): for row_index , row in enumerate(round_result):
for col_index , elem in enumerate(row): for col_index , elem in enumerate(row):
if auto_result[row_index][col_index] != elem: if auto_result[row_index][col_index] != elem:
check_status = False check_status = False
if not check_status: if not check_status:
tdLog.notice("round function value has not as expected , sql is \"%s\" "%round_query ) tdLog.notice("round function value has not as expected , sql is \"%s\" "%round_query )
sys.exit(1) sys.exit(1)
else: else:
tdLog.info("round value check pass , it work as expected ,sql is \"%s\" "%round_query ) tdLog.info("round value check pass , it work as expected ,sql is \"%s\" "%round_query )
def test_errors(self): def test_errors(self):
error_sql_lists = [ error_sql_lists = [
"select round from t1", "select round from t1",
@ -127,42 +127,42 @@ class TDTestCase:
] ]
for error_sql in error_sql_lists: for error_sql in error_sql_lists:
tdSql.error(error_sql) tdSql.error(error_sql)
def support_types(self): def support_types(self):
type_error_sql_lists = [ type_error_sql_lists = [
"select round(ts) from t1" , "select round(ts) from t1" ,
"select round(c7) from t1", "select round(c7) from t1",
"select round(c8) from t1", "select round(c8) from t1",
"select round(c9) from t1", "select round(c9) from t1",
"select round(ts) from ct1" , "select round(ts) from ct1" ,
"select round(c7) from ct1", "select round(c7) from ct1",
"select round(c8) from ct1", "select round(c8) from ct1",
"select round(c9) from ct1", "select round(c9) from ct1",
"select round(ts) from ct3" , "select round(ts) from ct3" ,
"select round(c7) from ct3", "select round(c7) from ct3",
"select round(c8) from ct3", "select round(c8) from ct3",
"select round(c9) from ct3", "select round(c9) from ct3",
"select round(ts) from ct4" , "select round(ts) from ct4" ,
"select round(c7) from ct4", "select round(c7) from ct4",
"select round(c8) from ct4", "select round(c8) from ct4",
"select round(c9) from ct4", "select round(c9) from ct4",
"select round(ts) from stb1" , "select round(ts) from stb1" ,
"select round(c7) from stb1", "select round(c7) from stb1",
"select round(c8) from stb1", "select round(c8) from stb1",
"select round(c9) from stb1" , "select round(c9) from stb1" ,
"select round(ts) from stbbb1" , "select round(ts) from stbbb1" ,
"select round(c7) from stbbb1", "select round(c7) from stbbb1",
"select round(ts) from tbname", "select round(ts) from tbname",
"select round(c9) from tbname" "select round(c9) from tbname"
] ]
for type_sql in type_error_sql_lists: for type_sql in type_error_sql_lists:
tdSql.error(type_sql) tdSql.error(type_sql)
type_sql_lists = [ type_sql_lists = [
"select round(c1) from t1", "select round(c1) from t1",
"select round(c2) from t1", "select round(c2) from t1",
@ -192,16 +192,16 @@ class TDTestCase:
"select round(c5) from stb1", "select round(c5) from stb1",
"select round(c6) from stb1", "select round(c6) from stb1",
"select round(c6) as alisb from stb1", "select round(c6) as alisb from stb1",
"select round(c6) alisb from stb1", "select round(c6) alisb from stb1",
] ]
for type_sql in type_sql_lists: for type_sql in type_sql_lists:
tdSql.query(type_sql) tdSql.query(type_sql)
def basic_round_function(self): def basic_round_function(self):
# basic query # basic query
tdSql.query("select c1 from ct3") tdSql.query("select c1 from ct3")
tdSql.checkRows(0) tdSql.checkRows(0)
tdSql.query("select c1 from t1") tdSql.query("select c1 from t1")
@ -221,7 +221,7 @@ class TDTestCase:
tdSql.query("select round(c5) from ct3") tdSql.query("select round(c5) from ct3")
tdSql.checkRows(0) tdSql.checkRows(0)
tdSql.query("select round(c6) from ct3") tdSql.query("select round(c6) from ct3")
# used for regular table # used for regular table
tdSql.query("select round(c1) from t1") tdSql.query("select round(c1) from t1")
tdSql.checkData(0, 0, None) tdSql.checkData(0, 0, None)
@ -239,7 +239,7 @@ class TDTestCase:
tdSql.checkData(5, 5, None) tdSql.checkData(5, 5, None)
self.check_result_auto( "select c1, c2, c3 , c4, c5 from t1", "select (c1), round(c2) ,round(c3), round(c4), round(c5) from t1") self.check_result_auto( "select c1, c2, c3 , c4, c5 from t1", "select (c1), round(c2) ,round(c3), round(c4), round(c5) from t1")
# used for sub table # used for sub table
tdSql.query("select round(c1) from ct1") tdSql.query("select round(c1) from ct1")
tdSql.checkData(0, 0, 8) tdSql.checkData(0, 0, 8)
@ -251,20 +251,20 @@ class TDTestCase:
self.check_result_auto( "select c1, c2, c3 , c4, c5 from ct1", "select (c1), round(c2) ,round(c3), round(c4), round(c5) from ct1") self.check_result_auto( "select c1, c2, c3 , c4, c5 from ct1", "select (c1), round(c2) ,round(c3), round(c4), round(c5) from ct1")
self.check_result_auto("select round(round(round(round(round(round(round(round(round(round(c1)))))))))) nest_col_func from ct1;","select c1 from ct1" ) self.check_result_auto("select round(round(round(round(round(round(round(round(round(round(c1)))))))))) nest_col_func from ct1;","select c1 from ct1" )
# used for stable table # used for stable table
tdSql.query("select round(c1) from stb1") tdSql.query("select round(c1) from stb1")
tdSql.checkRows(25) tdSql.checkRows(25)
self.check_result_auto( "select c1, c2, c3 , c4, c5 from ct4 ", "select (c1), round(c2) ,round(c3), round(c4), round(c5) from ct4") self.check_result_auto( "select c1, c2, c3 , c4, c5 from ct4 ", "select (c1), round(c2) ,round(c3), round(c4), round(c5) from ct4")
self.check_result_auto("select round(round(round(round(round(round(round(round(round(round(c1)))))))))) nest_col_func from ct4;" , "select c1 from ct4" ) self.check_result_auto("select round(round(round(round(round(round(round(round(round(round(c1)))))))))) nest_col_func from ct4;" , "select c1 from ct4" )
# used for not exists table # used for not exists table
tdSql.error("select round(c1) from stbbb1") tdSql.error("select round(c1) from stbbb1")
tdSql.error("select round(c1) from tbname") tdSql.error("select round(c1) from tbname")
tdSql.error("select round(c1) from ct5") tdSql.error("select round(c1) from ct5")
# mix with common col # mix with common col
tdSql.query("select c1, round(c1) from ct1") tdSql.query("select c1, round(c1) from ct1")
tdSql.checkData(0 , 0 ,8) tdSql.checkData(0 , 0 ,8)
tdSql.checkData(0 , 1 ,8) tdSql.checkData(0 , 1 ,8)
@ -289,7 +289,7 @@ class TDTestCase:
tdSql.checkData(0 , 1 ,None) tdSql.checkData(0 , 1 ,None)
tdSql.checkData(0 , 2 ,None) tdSql.checkData(0 , 2 ,None)
tdSql.checkData(0 , 3 ,None) tdSql.checkData(0 , 3 ,None)
tdSql.checkData(3 , 0 , 6) tdSql.checkData(3 , 0 , 6)
tdSql.checkData(3 , 1 , 6) tdSql.checkData(3 , 1 , 6)
tdSql.checkData(3 , 2 ,6.66000) tdSql.checkData(3 , 2 ,6.66000)
@ -315,7 +315,7 @@ class TDTestCase:
tdSql.query("select max(c5), count(c5) from stb1") tdSql.query("select max(c5), count(c5) from stb1")
tdSql.query("select max(c5), count(c5) from ct1") tdSql.query("select max(c5), count(c5) from ct1")
# bug fix for count # bug fix for count
tdSql.query("select count(c1) from ct4 ") tdSql.query("select count(c1) from ct4 ")
tdSql.checkData(0,0,9) tdSql.checkData(0,0,9)
@ -326,7 +326,7 @@ class TDTestCase:
tdSql.query("select count(*) from stb1 ") tdSql.query("select count(*) from stb1 ")
tdSql.checkData(0,0,25) tdSql.checkData(0,0,25)
# bug fix for compute # bug fix for compute
tdSql.query("select c1, abs(c1) -0 ,round(c1)-0 from ct4 ") tdSql.query("select c1, abs(c1) -0 ,round(c1)-0 from ct4 ")
tdSql.checkData(0, 0, None) tdSql.checkData(0, 0, None)
tdSql.checkData(0, 1, None) tdSql.checkData(0, 1, None)
@ -378,10 +378,10 @@ class TDTestCase:
tdSql.checkData(0,4,7.900000000) tdSql.checkData(0,4,7.900000000)
tdSql.checkData(0,5,3.000000000) tdSql.checkData(0,5,3.000000000)
tdSql.checkData(0,6,7.500000000) tdSql.checkData(0,6,7.500000000)
def round_Arithmetic(self): def round_Arithmetic(self):
pass pass
def check_boundary_values(self): def check_boundary_values(self):
tdSql.execute("drop database if exists bound_test") tdSql.execute("drop database if exists bound_test")
@ -410,14 +410,14 @@ class TDTestCase:
tdSql.execute( tdSql.execute(
f"insert into sub1_bound values ( now(), -2147483643, -9223372036854775803, -32763, -123, -3.39E+38, -1.69e+308, True, 'binary_tb1', 'nchar_tb1', now() )" f"insert into sub1_bound values ( now(), -2147483643, -9223372036854775803, -32763, -123, -3.39E+38, -1.69e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
) )
tdSql.error( tdSql.error(
f"insert into sub1_bound values ( now()+1s, 2147483648, 9223372036854775808, 32768, 128, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )" f"insert into sub1_bound values ( now()+1s, 2147483648, 9223372036854775808, 32768, 128, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
) )
self.check_result_auto( "select c1, c2, c3 , c4, c5 ,c6 from sub1_bound ", "select round(c1), round(c2) ,round(c3), round(c4), round(c5) ,round(c6) from sub1_bound") self.check_result_auto( "select c1, c2, c3 , c4, c5 ,c6 from sub1_bound ", "select round(c1), round(c2) ,round(c3), round(c4), round(c5) ,round(c6) from sub1_bound")
self.check_result_auto( "select c1, c2, c3 , c3, c2 ,c1 from sub1_bound ", "select round(c1), round(c2) ,round(c3), round(c3), round(c2) ,round(c1) from sub1_bound") self.check_result_auto( "select c1, c2, c3 , c3, c2 ,c1 from sub1_bound ", "select round(c1), round(c2) ,round(c3), round(c3), round(c2) ,round(c1) from sub1_bound")
self.check_result_auto("select round(round(round(round(round(round(round(round(round(round(c1)))))))))) nest_col_func from sub1_bound;" , "select round(c1) from sub1_bound" ) self.check_result_auto("select round(round(round(round(round(round(round(round(round(round(c1)))))))))) nest_col_func from sub1_bound;" , "select round(c1) from sub1_bound" )
# check basic elem for table per row # check basic elem for table per row
tdSql.query("select round(c1+0.2) ,round(c2) , round(c3+0.3) , round(c4-0.3), round(c5/2), round(c6/2) from sub1_bound ") tdSql.query("select round(c1+0.2) ,round(c2) , round(c3+0.3) , round(c4-0.3), round(c5/2), round(c6/2) from sub1_bound ")
tdSql.checkData(0, 0, 2147483647.000000000) tdSql.checkData(0, 0, 2147483647.000000000)
@ -444,32 +444,32 @@ class TDTestCase:
self.check_result_auto( " select t1,c5 from stb1 where c1 > 0 order by tbname " , "select round(t1) ,round(c5) from stb1 where c1 > 0 order by tbname" ) self.check_result_auto( " select t1,c5 from stb1 where c1 > 0 order by tbname " , "select round(t1) ,round(c5) from stb1 where c1 > 0 order by tbname" )
self.check_result_auto( " select t1,c5 from stb1 where c1 > 0 order by tbname " , "select round(t1) , round(c5) from stb1 where c1 > 0 order by tbname" ) self.check_result_auto( " select t1,c5 from stb1 where c1 > 0 order by tbname " , "select round(t1) , round(c5) from stb1 where c1 > 0 order by tbname" )
pass pass
def run(self): # sourcery skip: extract-duplicate-method, remove-redundant-fstring def run(self): # sourcery skip: extract-duplicate-method, remove-redundant-fstring
tdSql.prepare() tdSql.prepare()
tdLog.printNoPrefix("==========step1:create table ==============") tdLog.printNoPrefix("==========step1:create table ==============")
self.prepare_datas() self.prepare_datas()
tdLog.printNoPrefix("==========step2:test errors ==============") tdLog.printNoPrefix("==========step2:test errors ==============")
self.test_errors() self.test_errors()
tdLog.printNoPrefix("==========step3:support types ============") tdLog.printNoPrefix("==========step3:support types ============")
self.support_types() self.support_types()
tdLog.printNoPrefix("==========step4: round basic query ============") tdLog.printNoPrefix("==========step4: round basic query ============")
self.basic_round_function() self.basic_round_function()
tdLog.printNoPrefix("==========step5: round boundary query ============") tdLog.printNoPrefix("==========step5: round boundary query ============")
self.check_boundary_values() self.check_boundary_values()
tdLog.printNoPrefix("==========step6: round filter query ============") tdLog.printNoPrefix("==========step6: round filter query ============")
self.abs_func_filter() self.abs_func_filter()

View File

@ -9,13 +9,13 @@ from util.cases import *
class TDTestCase: class TDTestCase:
updatecfgDict = {'debugFlag': 143 ,"cDebugFlag":143,"uDebugFlag":143 ,"rpcDebugFlag":143 , "tmrDebugFlag":143 , updatecfgDict = {'debugFlag': 143 ,"cDebugFlag":143,"uDebugFlag":143 ,"rpcDebugFlag":143 , "tmrDebugFlag":143 ,
"jniDebugFlag":143 ,"simDebugFlag":143,"dDebugFlag":143, "dDebugFlag":143,"vDebugFlag":143,"mDebugFlag":143,"qDebugFlag":143, "jniDebugFlag":143 ,"simDebugFlag":143,"dDebugFlag":143, "dDebugFlag":143,"vDebugFlag":143,"mDebugFlag":143,"qDebugFlag":143,
"wDebugFlag":143,"sDebugFlag":143,"tsdbDebugFlag":143,"tqDebugFlag":143 ,"fsDebugFlag":143 ,"udfDebugFlag":143} "wDebugFlag":143,"sDebugFlag":143,"tsdbDebugFlag":143,"tqDebugFlag":143 ,"fsDebugFlag":143 ,"udfDebugFlag":143}
def init(self, conn, powSql): def init(self, conn, powSql):
tdLog.debug(f"start to excute {__file__}") tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor()) tdSql.init(conn.cursor())
def prepare_datas(self): def prepare_datas(self):
tdSql.execute( tdSql.execute(
'''create table stb1 '''create table stb1
@ -23,7 +23,7 @@ class TDTestCase:
tags (t1 int) tags (t1 int)
''' '''
) )
tdSql.execute( tdSql.execute(
''' '''
create table t1 create table t1
@ -65,14 +65,14 @@ class TDTestCase:
( '2023-02-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ( '2023-02-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
''' '''
) )
def check_result_auto_sqrt(self ,origin_query , pow_query): def check_result_auto_sqrt(self ,origin_query , pow_query):
pow_result = tdSql.getResult(pow_query) pow_result = tdSql.getResult(pow_query)
origin_result = tdSql.getResult(origin_query) origin_result = tdSql.getResult(origin_query)
auto_result =[] auto_result =[]
for row in origin_result: for row in origin_result:
row_check = [] row_check = []
for elem in row: for elem in row:
@ -92,7 +92,7 @@ class TDTestCase:
if auto_result[row_index][col_index] == None and not (auto_result[row_index][col_index] == None and elem == None): if auto_result[row_index][col_index] == None and not (auto_result[row_index][col_index] == None and elem == None):
check_status = False check_status = False
elif auto_result[row_index][col_index] != None and (auto_result[row_index][col_index] - elem > 0.00000001): elif auto_result[row_index][col_index] != None and (auto_result[row_index][col_index] - elem > 0.00000001):
check_status = False check_status = False
else: else:
pass pass
if not check_status: if not check_status:
@ -100,7 +100,7 @@ class TDTestCase:
sys.exit(1) sys.exit(1)
else: else:
tdLog.info("sqrt value check pass , it work as expected ,sql is \"%s\" "%pow_query ) tdLog.info("sqrt value check pass , it work as expected ,sql is \"%s\" "%pow_query )
def test_errors(self): def test_errors(self):
error_sql_lists = [ error_sql_lists = [
"select sqrt from t1", "select sqrt from t1",
@ -134,42 +134,42 @@ class TDTestCase:
] ]
for error_sql in error_sql_lists: for error_sql in error_sql_lists:
tdSql.error(error_sql) tdSql.error(error_sql)
def support_types(self): def support_types(self):
type_error_sql_lists = [ type_error_sql_lists = [
"select sqrt(ts) from t1" , "select sqrt(ts) from t1" ,
"select sqrt(c7) from t1", "select sqrt(c7) from t1",
"select sqrt(c8) from t1", "select sqrt(c8) from t1",
"select sqrt(c9) from t1", "select sqrt(c9) from t1",
"select sqrt(ts) from ct1" , "select sqrt(ts) from ct1" ,
"select sqrt(c7) from ct1", "select sqrt(c7) from ct1",
"select sqrt(c8) from ct1", "select sqrt(c8) from ct1",
"select sqrt(c9) from ct1", "select sqrt(c9) from ct1",
"select sqrt(ts) from ct3" , "select sqrt(ts) from ct3" ,
"select sqrt(c7) from ct3", "select sqrt(c7) from ct3",
"select sqrt(c8) from ct3", "select sqrt(c8) from ct3",
"select sqrt(c9) from ct3", "select sqrt(c9) from ct3",
"select sqrt(ts) from ct4" , "select sqrt(ts) from ct4" ,
"select sqrt(c7) from ct4", "select sqrt(c7) from ct4",
"select sqrt(c8) from ct4", "select sqrt(c8) from ct4",
"select sqrt(c9) from ct4", "select sqrt(c9) from ct4",
"select sqrt(ts) from stb1" , "select sqrt(ts) from stb1" ,
"select sqrt(c7) from stb1", "select sqrt(c7) from stb1",
"select sqrt(c8) from stb1", "select sqrt(c8) from stb1",
"select sqrt(c9) from stb1" , "select sqrt(c9) from stb1" ,
"select sqrt(ts) from stbbb1" , "select sqrt(ts) from stbbb1" ,
"select sqrt(c7) from stbbb1", "select sqrt(c7) from stbbb1",
"select sqrt(ts) from tbname", "select sqrt(ts) from tbname",
"select sqrt(c9) from tbname" "select sqrt(c9) from tbname"
] ]
for type_sql in type_error_sql_lists: for type_sql in type_error_sql_lists:
tdSql.error(type_sql) tdSql.error(type_sql)
type_sql_lists = [ type_sql_lists = [
"select sqrt(c1) from t1", "select sqrt(c1) from t1",
"select sqrt(c2) from t1", "select sqrt(c2) from t1",
@ -199,16 +199,16 @@ class TDTestCase:
"select sqrt(c5) from stb1", "select sqrt(c5) from stb1",
"select sqrt(c6) from stb1", "select sqrt(c6) from stb1",
"select sqrt(c6) as alisb from stb1", "select sqrt(c6) as alisb from stb1",
"select sqrt(c6) alisb from stb1", "select sqrt(c6) alisb from stb1",
] ]
for type_sql in type_sql_lists: for type_sql in type_sql_lists:
tdSql.query(type_sql) tdSql.query(type_sql)
def basic_sqrt_function(self): def basic_sqrt_function(self):
# basic query # basic query
tdSql.query("select c1 from ct3") tdSql.query("select c1 from ct3")
tdSql.checkRows(0) tdSql.checkRows(0)
tdSql.query("select c1 from t1") tdSql.query("select c1 from t1")
@ -249,7 +249,7 @@ class TDTestCase:
tdSql.checkData(5, 5, None) tdSql.checkData(5, 5, None)
self.check_result_auto_sqrt( "select abs(c1), abs(c2), abs(c3) , abs(c4), abs(c5) from t1", "select sqrt(abs(c1)), sqrt(abs(c2)) ,sqrt(abs(c3)), sqrt(abs(c4)), sqrt(abs(c5)) from t1") self.check_result_auto_sqrt( "select abs(c1), abs(c2), abs(c3) , abs(c4), abs(c5) from t1", "select sqrt(abs(c1)), sqrt(abs(c2)) ,sqrt(abs(c3)), sqrt(abs(c4)), sqrt(abs(c5)) from t1")
# used for sub table # used for sub table
tdSql.query("select c2 ,sqrt(c2) from ct1") tdSql.query("select c2 ,sqrt(c2) from ct1")
tdSql.checkData(0, 1, 298.140906284) tdSql.checkData(0, 1, 298.140906284)
@ -265,7 +265,7 @@ class TDTestCase:
tdSql.checkData(5 , 2, None) tdSql.checkData(5 , 2, None)
self.check_result_auto_sqrt( "select c1, c2, c3 , c4, c5 from ct1", "select sqrt(c1), sqrt(c2) ,sqrt(c3), sqrt(c4), sqrt(c5) from ct1") self.check_result_auto_sqrt( "select c1, c2, c3 , c4, c5 from ct1", "select sqrt(c1), sqrt(c2) ,sqrt(c3), sqrt(c4), sqrt(c5) from ct1")
# nest query for sqrt functions # nest query for sqrt functions
tdSql.query("select c4 , sqrt(c4) ,sqrt(sqrt(c4)) , sqrt(sqrt(sqrt(c4))) from ct1;") tdSql.query("select c4 , sqrt(c4) ,sqrt(sqrt(c4)) , sqrt(sqrt(sqrt(c4))) from ct1;")
tdSql.checkData(0 , 0 , 88) tdSql.checkData(0 , 0 , 88)
@ -283,18 +283,18 @@ class TDTestCase:
tdSql.checkData(11 , 2 , None) tdSql.checkData(11 , 2 , None)
tdSql.checkData(11 , 3 , None) tdSql.checkData(11 , 3 , None)
# used for stable table # used for stable table
tdSql.query("select sqrt(c1) from stb1") tdSql.query("select sqrt(c1) from stb1")
tdSql.checkRows(25) tdSql.checkRows(25)
# used for not exists table # used for not exists table
tdSql.error("select sqrt(c1) from stbbb1") tdSql.error("select sqrt(c1) from stbbb1")
tdSql.error("select sqrt(c1) from tbname") tdSql.error("select sqrt(c1) from tbname")
tdSql.error("select sqrt(c1) from ct5") tdSql.error("select sqrt(c1) from ct5")
# mix with common col # mix with common col
tdSql.query("select c1, sqrt(c1) from ct1") tdSql.query("select c1, sqrt(c1) from ct1")
tdSql.checkData(0 , 0 ,8) tdSql.checkData(0 , 0 ,8)
tdSql.checkData(0 , 1 ,2.828427125) tdSql.checkData(0 , 1 ,2.828427125)
@ -314,7 +314,7 @@ class TDTestCase:
tdSql.checkData(0 , 1 ,None) tdSql.checkData(0 , 1 ,None)
tdSql.checkData(0 , 2 ,None) tdSql.checkData(0 , 2 ,None)
tdSql.checkData(0 , 3 ,None) tdSql.checkData(0 , 3 ,None)
tdSql.checkData(3 , 0 , 6) tdSql.checkData(3 , 0 , 6)
tdSql.checkData(3 , 1 ,2.449489743) tdSql.checkData(3 , 1 ,2.449489743)
tdSql.checkData(3 , 2 ,2.449489743) tdSql.checkData(3 , 2 ,2.449489743)
@ -335,7 +335,7 @@ class TDTestCase:
tdSql.query("select max(c5), count(c5) from stb1") tdSql.query("select max(c5), count(c5) from stb1")
tdSql.query("select max(c5), count(c5) from ct1") tdSql.query("select max(c5), count(c5) from ct1")
# bug fix for count # bug fix for count
tdSql.query("select count(c1) from ct4 ") tdSql.query("select count(c1) from ct4 ")
tdSql.checkData(0,0,9) tdSql.checkData(0,0,9)
@ -346,7 +346,7 @@ class TDTestCase:
tdSql.query("select count(*) from stb1 ") tdSql.query("select count(*) from stb1 ")
tdSql.checkData(0,0,25) tdSql.checkData(0,0,25)
# # bug fix for compute # # bug fix for compute
tdSql.query("select c1, sqrt(c1) -0 ,sqrt(c1-4)-0 from ct4 ") tdSql.query("select c1, sqrt(c1) -0 ,sqrt(c1-4)-0 from ct4 ")
tdSql.checkData(0, 0, None) tdSql.checkData(0, 0, None)
tdSql.checkData(0, 1, None) tdSql.checkData(0, 1, None)
@ -397,16 +397,16 @@ class TDTestCase:
tdSql.checkRows(13) tdSql.checkRows(13)
# # bug for compute in functions # # bug for compute in functions
# tdSql.query("select c1, abs(1/0) from ct1") # tdSql.query("select c1, abs(1/0) from ct1")
# tdSql.checkData(0, 0, 8) # tdSql.checkData(0, 0, 8)
# tdSql.checkData(0, 1, 1) # tdSql.checkData(0, 1, 1)
tdSql.query("select c1, sqrt(1) from ct1") tdSql.query("select c1, sqrt(1) from ct1")
tdSql.checkData(0, 1, 1.000000000) tdSql.checkData(0, 1, 1.000000000)
tdSql.checkRows(13) tdSql.checkRows(13)
# two cols start sqrt(x,y) # two cols start sqrt(x,y)
tdSql.query("select c1,c2, sqrt(c2) from ct1") tdSql.query("select c1,c2, sqrt(c2) from ct1")
tdSql.checkData(0, 2, 298.140906284) tdSql.checkData(0, 2, 298.140906284)
tdSql.checkData(1, 2, 278.885281074) tdSql.checkData(1, 2, 278.885281074)
tdSql.checkData(4, 2, 0.000000000) tdSql.checkData(4, 2, 0.000000000)
@ -445,10 +445,10 @@ class TDTestCase:
tdSql.checkData(0,3,1.000000000) tdSql.checkData(0,3,1.000000000)
tdSql.checkData(0,4,0.900000000) tdSql.checkData(0,4,0.900000000)
tdSql.checkData(0,5,1.000000000) tdSql.checkData(0,5,1.000000000)
def pow_Arithmetic(self): def pow_Arithmetic(self):
pass pass
def check_boundary_values(self): def check_boundary_values(self):
tdSql.execute("drop database if exists bound_test") tdSql.execute("drop database if exists bound_test")
@ -475,11 +475,11 @@ class TDTestCase:
f"insert into sub1_bound values ( now()+1s, 2147483648, 9223372036854775808, 32768, 128, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )" f"insert into sub1_bound values ( now()+1s, 2147483648, 9223372036854775808, 32768, 128, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
) )
self.check_result_auto_sqrt( "select abs(c1), abs(c2), abs(c3) , abs(c4), abs(c5) from sub1_bound ", "select sqrt(abs(c1)), sqrt(abs(c2)) ,sqrt(abs(c3)), sqrt(abs(c4)), sqrt(abs(c5)) from sub1_bound") self.check_result_auto_sqrt( "select abs(c1), abs(c2), abs(c3) , abs(c4), abs(c5) from sub1_bound ", "select sqrt(abs(c1)), sqrt(abs(c2)) ,sqrt(abs(c3)), sqrt(abs(c4)), sqrt(abs(c5)) from sub1_bound")
self.check_result_auto_sqrt( "select c1, c2, c3 , c3, c2 ,c1 from sub1_bound ", "select sqrt(c1), sqrt(c2) ,sqrt(c3), sqrt(c3), sqrt(c2) ,sqrt(c1) from sub1_bound") self.check_result_auto_sqrt( "select c1, c2, c3 , c3, c2 ,c1 from sub1_bound ", "select sqrt(c1), sqrt(c2) ,sqrt(c3), sqrt(c3), sqrt(c2) ,sqrt(c1) from sub1_bound")
self.check_result_auto_sqrt("select abs(abs(abs(abs(abs(abs(abs(abs(abs(c1))))))))) nest_col_func from sub1_bound" , "select sqrt(abs(c1)) from sub1_bound" ) self.check_result_auto_sqrt("select abs(abs(abs(abs(abs(abs(abs(abs(abs(c1))))))))) nest_col_func from sub1_bound" , "select sqrt(abs(c1)) from sub1_bound" )
# check basic elem for table per row # check basic elem for table per row
tdSql.query("select sqrt(abs(c1)) ,sqrt(abs(c2)) , sqrt(abs(c3)) , sqrt(abs(c4)), sqrt(abs(c5)), sqrt(abs(c6)) from sub1_bound ") tdSql.query("select sqrt(abs(c1)) ,sqrt(abs(c2)) , sqrt(abs(c3)) , sqrt(abs(c4)), sqrt(abs(c5)), sqrt(abs(c6)) from sub1_bound ")
tdSql.checkData(0,0,math.sqrt(2147483647)) tdSql.checkData(0,0,math.sqrt(2147483647))
@ -504,7 +504,7 @@ class TDTestCase:
tdSql.checkData(0,1,math.sqrt(9223372036854775807)) tdSql.checkData(0,1,math.sqrt(9223372036854775807))
tdSql.checkData(0,2,math.sqrt(32767.000000000)) tdSql.checkData(0,2,math.sqrt(32767.000000000))
tdSql.checkData(0,3,math.sqrt(63.500000000)) tdSql.checkData(0,3,math.sqrt(63.500000000))
def support_super_table_test(self): def support_super_table_test(self):
tdSql.execute(" use db ") tdSql.execute(" use db ")
self.check_result_auto_sqrt( " select c5 from stb1 order by ts " , "select sqrt(c5) from stb1 order by ts" ) self.check_result_auto_sqrt( " select c5 from stb1 order by ts " , "select sqrt(c5) from stb1 order by ts" )
@ -522,42 +522,42 @@ class TDTestCase:
tdSql.prepare() tdSql.prepare()
tdLog.printNoPrefix("==========step1:create table ==============") tdLog.printNoPrefix("==========step1:create table ==============")
self.prepare_datas() self.prepare_datas()
tdLog.printNoPrefix("==========step2:test errors ==============") tdLog.printNoPrefix("==========step2:test errors ==============")
self.test_errors() self.test_errors()
tdLog.printNoPrefix("==========step3:support types ============") tdLog.printNoPrefix("==========step3:support types ============")
self.support_types() self.support_types()
tdLog.printNoPrefix("==========step4: sqrt basic query ============") tdLog.printNoPrefix("==========step4: sqrt basic query ============")
self.basic_sqrt_function() self.basic_sqrt_function()
tdLog.printNoPrefix("==========step5: big number sqrt query ============") tdLog.printNoPrefix("==========step5: big number sqrt query ============")
self.test_big_number() self.test_big_number()
tdLog.printNoPrefix("==========step6: base number for sqrt query ============") tdLog.printNoPrefix("==========step6: base number for sqrt query ============")
self.pow_base_test() self.pow_base_test()
tdLog.printNoPrefix("==========step7: sqrt boundary query ============") tdLog.printNoPrefix("==========step7: sqrt boundary query ============")
self.check_boundary_values() self.check_boundary_values()
tdLog.printNoPrefix("==========step8: sqrt filter query ============") tdLog.printNoPrefix("==========step8: sqrt filter query ============")
self.abs_func_filter() self.abs_func_filter()
tdLog.printNoPrefix("==========step9: check sqrt result of stable query ============") tdLog.printNoPrefix("==========step9: check sqrt result of stable query ============")
self.support_super_table_test() self.support_super_table_test()
def stop(self): def stop(self):
tdSql.close() tdSql.close()

View File

@ -11,7 +11,7 @@ from util.sql import *
from util.cases import * from util.cases import *
class TDTestCase: class TDTestCase:
updatecfgDict = {'debugFlag': 143 ,"cDebugFlag":143,"uDebugFlag":143 ,"rpcDebugFlag":143 , "tmrDebugFlag":143 , updatecfgDict = {'debugFlag': 143 ,"cDebugFlag":143,"uDebugFlag":143 ,"rpcDebugFlag":143 , "tmrDebugFlag":143 ,
"jniDebugFlag":143 ,"simDebugFlag":143,"dDebugFlag":143, "dDebugFlag":143,"vDebugFlag":143,"mDebugFlag":143,"qDebugFlag":143, "jniDebugFlag":143 ,"simDebugFlag":143,"dDebugFlag":143, "dDebugFlag":143,"vDebugFlag":143,"mDebugFlag":143,"qDebugFlag":143,
"wDebugFlag":143,"sDebugFlag":143,"tsdbDebugFlag":143,"tqDebugFlag":143 ,"fsDebugFlag":143 ,"udfDebugFlag":143} "wDebugFlag":143,"sDebugFlag":143,"tsdbDebugFlag":143,"tqDebugFlag":143 ,"fsDebugFlag":143 ,"udfDebugFlag":143}
@ -476,7 +476,7 @@ class TDTestCase:
self.check_unit_time() self.check_unit_time()
self.query_precision() self.query_precision()
def stop(self): def stop(self):

View File

@ -25,10 +25,10 @@ class TDTestCase:
def run(self): def run(self):
tdSql.prepare() tdSql.prepare()
# timestamp = 1ms , time_unit = 1s # timestamp = 1ms , time_unit = 1s
tdSql.execute('''create table test(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double, tdSql.execute('''create table test(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double,
col7 bool, col8 binary(20), col9 nchar(20), col11 tinyint unsigned, col12 smallint unsigned, col13 int unsigned, col14 bigint unsigned)''') col7 bool, col8 binary(20), col9 nchar(20), col11 tinyint unsigned, col12 smallint unsigned, col13 int unsigned, col14 bigint unsigned)''')
for i in range(self.row_num): for i in range(self.row_num):
tdSql.execute("insert into test values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d)" tdSql.execute("insert into test values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d)"
% (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1)) % (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1))
integer_list = [1,2,3,4,11,12,13,14] integer_list = [1,2,3,4,11,12,13,14]
float_list = [5,6] float_list = [5,6]
@ -72,10 +72,10 @@ class TDTestCase:
tdSql.error(f"select stateduration(col1,{i},5) from test") tdSql.error(f"select stateduration(col1,{i},5) from test")
# timestamp = 1s, time_unit =1s # timestamp = 1s, time_unit =1s
tdSql.execute('''create table test1(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double, tdSql.execute('''create table test1(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double,
col7 bool, col8 binary(20), col9 nchar(20), col11 tinyint unsigned, col12 smallint unsigned, col13 int unsigned, col14 bigint unsigned)''') col7 bool, col8 binary(20), col9 nchar(20), col11 tinyint unsigned, col12 smallint unsigned, col13 int unsigned, col14 bigint unsigned)''')
for i in range(self.row_num): for i in range(self.row_num):
tdSql.execute("insert into test1 values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d)" tdSql.execute("insert into test1 values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d)"
% (self.ts + i*1000, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1)) % (self.ts + i*1000, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1))
for i in integer_list: for i in integer_list:

View File

@ -18,24 +18,24 @@ class TDTestCase:
self.arithmetic_operators = ['+','-','*','/'] self.arithmetic_operators = ['+','-','*','/']
self.arithmetic_values = [0,1,100,15.5] self.arithmetic_values = [0,1,100,15.5]
# name of normal table # name of normal table
self.ntbname = 'ntb' self.ntbname = 'ntb'
# name of stable # name of stable
self.stbname = 'stb' self.stbname = 'stb'
# structure of column # structure of column
self.column_dict = { self.column_dict = {
'ts':'timestamp', 'ts':'timestamp',
'c1':'int', 'c1':'int',
'c2':'float', 'c2':'float',
'c3':'double' 'c3':'double'
} }
# structure of tag # structure of tag
self.tag_dict = { self.tag_dict = {
't0':'int' 't0':'int'
} }
# number of child tables # number of child tables
self.tbnum = 2 self.tbnum = 2
# values of tag,the number of values should equal to tbnum # values of tag,the number of values should equal to tbnum
self.tag_values = [ self.tag_values = [
f'10', f'10',
f'100' f'100'
] ]
@ -62,7 +62,7 @@ class TDTestCase:
time_zone = time_zone_1 + " " + time_zone_2 time_zone = time_zone_1 + " " + time_zone_2
print("expected time zone: " + time_zone) print("expected time zone: " + time_zone)
return time_zone return time_zone
def tb_type_check(self,tb_type): def tb_type_check(self,tb_type):
if tb_type in ['normal_table','child_table']: if tb_type in ['normal_table','child_table']:
tdSql.checkRows(len(self.values_list)) tdSql.checkRows(len(self.values_list))
@ -115,7 +115,7 @@ class TDTestCase:
timezone = self.get_system_timezone() timezone = self.get_system_timezone()
self.timezone_check_ntb(timezone) self.timezone_check_ntb(timezone)
self.timezone_check_stb(timezone) self.timezone_check_stb(timezone)
def stop(self): def stop(self):
tdSql.close() tdSql.close()
tdLog.success(f"{__file__} successfully executed") tdLog.success(f"{__file__} successfully executed")

View File

@ -45,7 +45,7 @@ class TDTestCase:
'col12': 'binary(20)', 'col12': 'binary(20)',
'col13': 'nchar(20)' 'col13': 'nchar(20)'
} }
self.param_list = [1,100] self.param_list = [1,100]
def insert_data(self,column_dict,tbname,row_num): def insert_data(self,column_dict,tbname,row_num):
@ -107,7 +107,7 @@ class TDTestCase:
tdSql.execute(f"create database if not exists {dbname} vgroups 2") tdSql.execute(f"create database if not exists {dbname} vgroups 2")
tdSql.execute(f'use {dbname}') tdSql.execute(f'use {dbname}')
tdSql.execute(self.setsql.set_create_stable_sql(stbname,self.column_dict,tag_dict)) tdSql.execute(self.setsql.set_create_stable_sql(stbname,self.column_dict,tag_dict))
for i in range(self.tbnum): for i in range(self.tbnum):
tdSql.execute(f"create table {stbname}_{i} using {stbname} tags({tag_values[0]})") tdSql.execute(f"create table {stbname}_{i} using {stbname} tags({tag_values[0]})")
self.insert_data(self.column_dict,f'{stbname}_{i}',self.rowNum) self.insert_data(self.column_dict,f'{stbname}_{i}',self.rowNum)
@ -141,7 +141,7 @@ class TDTestCase:
self.top_check_ntb() self.top_check_ntb()
self.top_check_stb() self.top_check_stb()
def stop(self): def stop(self):
tdSql.close() tdSql.close()
tdLog.success("%s successfully executed" % __file__) tdLog.success("%s successfully executed" % __file__)

View File

@ -0,0 +1,254 @@
import taos
import sys
import time
import socket
import os
import threading
import math
from util.log import *
from util.sql import *
from util.cases import *
from util.dnodes import *
from util.common import *
sys.path.append("./7-tmq")
from tmqCommon import *
class TDTestCase:
def __init__(self):
self.vgroups = 1
self.ctbNum = 100
self.rowsPerTbl = 10000
def init(self, conn, logSql):
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor(), False)
def prepareTestEnv(self):
tdLog.printNoPrefix("======== prepare test env include database, stable, ctables, and insert data: ")
paraDict = {'dbName': 'dbt',
'dropFlag': 1,
'event': '',
'vgroups': 1,
'stbName': 'stb',
'colPrefix': 'c',
'tagPrefix': 't',
'colSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1},{'type': 'TIMESTAMP', 'count':1}],
'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}],
'ctbPrefix': 'ctb',
'ctbStartIdx': 0,
'ctbNum': 10,
'rowsPerTbl': 10000,
'batchNum': 1000,
'startTs': 1640966400000, # 2022-01-01 00:00:00.000
'pollDelay': 10,
'showMsg': 1,
'showRow': 1,
'snapshot': 1}
paraDict['vgroups'] = self.vgroups
paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
tmqCom.initConsumerTable()
tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=paraDict["vgroups"],replica=1)
tdLog.info("create stb")
tmqCom.create_stable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"])
tdLog.info("create ctb")
tmqCom.create_ctable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"],ctbPrefix=paraDict['ctbPrefix'],
ctbNum=paraDict["ctbNum"],ctbStartIdx=paraDict['ctbStartIdx'])
tdLog.info("insert data")
tmqCom.insert_data_interlaceByMultiTbl(tsql=tdSql,dbName=paraDict["dbName"],ctbPrefix=paraDict["ctbPrefix"],
ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"],
startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx'])
tdLog.info("restart taosd to ensure that the data falls into the disk")
# tdDnodes.stop(1)
# tdDnodes.start(1)
tdSql.query("flush database %s"%(paraDict['dbName']))
return
def tmqCase1(self):
tdLog.printNoPrefix("======== test case 1: ")
paraDict = {'dbName': 'dbt',
'dropFlag': 1,
'event': '',
'vgroups': 1,
'stbName': 'stb',
'colPrefix': 'c',
'tagPrefix': 't',
'colSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1},{'type': 'TIMESTAMP', 'count':1}],
'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}],
'ctbPrefix': 'ctb',
'ctbStartIdx': 0,
'ctbNum': 10,
'rowsPerTbl': 10000,
'batchNum': 10,
'startTs': 1640966400000, # 2022-01-01 00:00:00.000
'pollDelay': 3,
'showMsg': 1,
'showRow': 1,
'snapshot': 1}
paraDict['vgroups'] = self.vgroups
paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
paraDict['batchNum'] = 100
paraDict['startTs'] = paraDict['startTs'] + self.rowsPerTbl
topicNameList = ['topic1']
expectRowsList = []
tmqCom.initConsumerTable()
tdLog.info("create topics from stb with filter")
queryString = "select * from %s.%s"%(paraDict['dbName'], paraDict['stbName'])
# sqlString = "create topic %s as stable %s" %(topicNameList[0], paraDict['stbName'])
sqlString = "create topic %s as %s" %(topicNameList[0], queryString)
tdLog.info("create topic sql: %s"%sqlString)
tdSql.execute(sqlString)
# tdSql.query(queryString)
# expectRowsList.append(tdSql.getRows())
# init consume info, and start tmq_sim, then check consume result
tdLog.info("insert consume info to consume processor")
consumerId = 0
expectrowcnt = paraDict["rowsPerTbl"] * paraDict["ctbNum"] * 2
topicList = topicNameList[0]
ifcheckdata = 1
ifManualCommit = 1
keyList = 'group.id:cgrp1, enable.auto.commit:true, auto.commit.interval.ms:1000, auto.offset.reset:earliest'
tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
tdLog.info("start consume processor")
tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot'])
# after start consume, continue insert some data
tmqCom.insert_data_interlaceByMultiTbl(tsql=tdSql,dbName=paraDict["dbName"],ctbPrefix=paraDict["ctbPrefix"],
ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"],
startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx'])
#
tdSql.query(queryString)
expectRowsList.append(tdSql.getRows())
tdLog.info("wait the consume result")
expectRows = 1
resultList = tmqCom.selectConsumeResult(expectRows)
if expectRowsList[0] != resultList[0]:
tdLog.info("expect consume rows: %d, act consume rows: %d"%(expectRowsList[0], resultList[0]))
tdLog.exit("%d tmq consume rows error!"%consumerId)
tmqCom.checkFileContent(consumerId, queryString)
time.sleep(10)
for i in range(len(topicNameList)):
tdSql.query("drop topic %s"%topicNameList[i])
tdLog.printNoPrefix("======== test case 1 end ...... ")
def tmqCase2(self):
tdLog.printNoPrefix("======== test case 2: ")
paraDict = {'dbName': 'dbt',
'dropFlag': 1,
'event': '',
'vgroups': 1,
'stbName': 'stb',
'colPrefix': 'c',
'tagPrefix': 't',
'colSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1},{'type': 'TIMESTAMP', 'count':1}],
'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}],
'ctbPrefix': 'ctb',
'ctbStartIdx': 0,
'ctbNum': 10,
'rowsPerTbl': 10000,
'batchNum': 10,
'startTs': 1640966400000, # 2022-01-01 00:00:00.000
'pollDelay': 3,
'showMsg': 1,
'showRow': 1,
'snapshot': 1}
paraDict['vgroups'] = self.vgroups
paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
topicNameList = ['topic1']
expectRowsList = []
tmqCom.initConsumerTable()
tdLog.info("create topics from stb with filter")
queryString = "select * from %s.%s"%(paraDict['dbName'], paraDict['stbName'])
# sqlString = "create topic %s as stable %s" %(topicNameList[0], paraDict['stbName'])
sqlString = "create topic %s as %s" %(topicNameList[0], queryString)
tdLog.info("create topic sql: %s"%sqlString)
tdSql.execute(sqlString)
tdSql.query(queryString)
expectRowsList.append(tdSql.getRows())
totalRowsInserted = expectRowsList[0]
# init consume info, and start tmq_sim, then check consume result
tdLog.info("insert consume info to consume processor")
consumerId = 1
expectrowcnt = math.ceil(paraDict["rowsPerTbl"] * paraDict["ctbNum"] / 3)
topicList = topicNameList[0]
ifcheckdata = 1
ifManualCommit = 1
keyList = 'group.id:cgrp1, enable.auto.commit:true, auto.commit.interval.ms:1000, auto.offset.reset:earliest'
tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
tdLog.info("start consume processor 0")
tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot'])
tdLog.info("wait the consume result")
expectRows = 1
resultList = tmqCom.selectConsumeResult(expectRows)
if not (expectrowcnt <= resultList[0] and totalRowsInserted >= resultList[0]):
tdLog.info("act consume rows: %d, expect consume rows between %d and %d"%(resultList[0], expectrowcnt, totalRowsInserted))
tdLog.exit("%d tmq consume rows error!"%consumerId)
firstConsumeRows = resultList[0]
# reinit consume info, and start tmq_sim, then check consume result
tmqCom.initConsumerTable()
consumerId = 2
expectrowcnt = math.ceil(paraDict["rowsPerTbl"] * paraDict["ctbNum"] * 2/3)
tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
tdLog.info("start consume processor 1")
tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot'])
tdLog.info("wait the consume result")
expectRows = 1
resultList = tmqCom.selectConsumeResult(expectRows)
actConsumeTotalRows = firstConsumeRows + resultList[0]
if not (expectrowcnt >= resultList[0] and totalRowsInserted == actConsumeTotalRows):
tdLog.info("act consume rows, first: %d, second: %d "%(firstConsumeRows, resultList[0]))
tdLog.info("and sum of two consume rows: %d should be equal to total inserted rows: %d"%(actConsumeTotalRows, totalRowsInserted))
tdLog.exit("%d tmq consume rows error!"%consumerId)
time.sleep(10)
for i in range(len(topicNameList)):
tdSql.query("drop topic %s"%topicNameList[i])
tdLog.printNoPrefix("======== test case 2 end ...... ")
def run(self):
tdSql.prepare()
self.prepareTestEnv()
self.tmqCase1()
# self.tmqCase2()
def stop(self):
tdSql.close()
tdLog.success(f"{__file__} successfully executed")
event = threading.Event()
tdCases.addLinux(__file__, TDTestCase())
tdCases.addWindows(__file__, TDTestCase())

View File

@ -0,0 +1,244 @@
import taos
import sys
import time
import socket
import os
import threading
from enum import Enum
from util.log import *
from util.sql import *
from util.cases import *
from util.dnodes import *
sys.path.append("./7-tmq")
from tmqCommon import *
class TDTestCase:
def __init__(self):
self.vgroups = 4
self.ctbNum = 500
self.rowsPerTbl = 1000
def init(self, conn, logSql):
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor(), False)
def prepareTestEnv(self):
tdLog.printNoPrefix("======== prepare test env include database, stable, ctables, and insert data: ")
paraDict = {'dbName': 'dbt',
'dropFlag': 1,
'event': '',
'vgroups': 4,
'stbName': 'stb',
'colPrefix': 'c',
'tagPrefix': 't',
'colSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1},{'type': 'TIMESTAMP', 'count':1}],
'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}],
'ctbPrefix': 'ctb',
'ctbStartIdx': 0,
'ctbNum': 1000,
'rowsPerTbl': 1000,
'batchNum': 400,
'startTs': 1640966400000, # 2022-01-01 00:00:00.000
'pollDelay': 3,
'showMsg': 1,
'showRow': 1,
'snapshot': 0}
paraDict['vgroups'] = self.vgroups
paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
tmqCom.initConsumerTable()
tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=paraDict["vgroups"],replica=1)
tdLog.info("create stb")
tmqCom.create_stable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"])
tdLog.info("create ctb")
tmqCom.create_ctable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"],ctbPrefix=paraDict['ctbPrefix'],
ctbNum=paraDict["ctbNum"],ctbStartIdx=paraDict['ctbStartIdx'])
tdLog.info("insert data")
tmqCom.insert_data_interlaceByMultiTbl(tsql=tdSql,dbName=paraDict["dbName"],ctbPrefix=paraDict["ctbPrefix"],
ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"],
startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx'])
tmqCom.insert_data_with_autoCreateTbl(tsql=tdSql,dbName=paraDict["dbName"],stbName=paraDict["stbName"],ctbPrefix="ctbx",
ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"],
startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx'])
# tdLog.info("restart taosd to ensure that the data falls into the disk")
# tdSql.query("flush database %s"%(paraDict['dbName']))
return
# 自动建表完成数据插入,启动消费
def tmqCase1(self):
tdLog.printNoPrefix("======== test case 1: ")
paraDict = {'dbName': 'dbt',
'dropFlag': 1,
'event': '',
'vgroups': 4,
'stbName': 'stb',
'colPrefix': 'c',
'tagPrefix': 't',
'colSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1},{'type': 'TIMESTAMP', 'count':1}],
'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}],
'ctbPrefix': 'ctb',
'ctbStartIdx': 0,
'ctbNum': 1000,
'rowsPerTbl': 1000,
'batchNum': 400,
'startTs': 1640966400000, # 2022-01-01 00:00:00.000
'pollDelay': 5,
'showMsg': 1,
'showRow': 1,
'snapshot': 0}
paraDict['vgroups'] = self.vgroups
paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
# update to half tables
paraDict['ctbNum'] = int(self.ctbNum / 2)
tmqCom.insert_data_with_autoCreateTbl(tsql=tdSql,dbName=paraDict["dbName"],stbName=paraDict["stbName"],ctbPrefix="ctbx",
ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"],
startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx'])
tmqCom.insert_data_interlaceByMultiTbl(tsql=tdSql,dbName=paraDict["dbName"],ctbPrefix=paraDict["ctbPrefix"],
ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"],
startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx'])
tdLog.info("create topics from stb1")
topicFromStb1 = 'topic_stb1'
queryString = "select ts, c1, c2 from %s.%s"%(paraDict['dbName'], paraDict['stbName'])
sqlString = "create topic %s as %s" %(topicFromStb1, queryString)
tdLog.info("create topic sql: %s"%sqlString)
tdSql.execute(sqlString)
paraDict['ctbNum'] = self.ctbNum
consumerId = 0
expectrowcnt = int(paraDict["rowsPerTbl"] * paraDict["ctbNum"] * 3)
topicList = topicFromStb1
ifcheckdata = 0
ifManualCommit = 0
keyList = 'group.id:cgrp1,\
enable.auto.commit:true,\
auto.commit.interval.ms:1000,\
auto.offset.reset:earliest'
tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
tdLog.info("start consume processor")
tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot'])
tdLog.info("insert process end, and start to check consume result")
expectRows = 1
resultList = tmqCom.selectConsumeResult(expectRows)
totalConsumeRows = 0
for i in range(expectRows):
totalConsumeRows += resultList[i]
tdSql.query(queryString)
totalRowsInserted = tdSql.getRows()
tdLog.info("act consume rows: %d, expect consume rows: %d, act insert rows: %d"%(totalConsumeRows, expectrowcnt, totalRowsInserted))
if totalConsumeRows != expectrowcnt:
tdLog.exit("tmq consume rows error!")
tdSql.query("drop topic %s"%topicFromStb1)
tdLog.printNoPrefix("======== test case 1 end ...... ")
def tmqCase2(self):
tdLog.printNoPrefix("======== test case 2: ")
paraDict = {'dbName': 'dbt',
'dropFlag': 1,
'event': '',
'vgroups': 4,
'stbName': 'stb',
'colPrefix': 'c',
'tagPrefix': 't',
'colSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1},{'type': 'TIMESTAMP', 'count':1}],
'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}],
'ctbPrefix': 'ctb',
'ctbStartIdx': 0,
'ctbNum': 1000,
'rowsPerTbl': 1000,
'batchNum': 1000,
'startTs': 1640966400000, # 2022-01-01 00:00:00.000
'pollDelay': 5,
'showMsg': 1,
'showRow': 1,
'snapshot': 1}
paraDict['vgroups'] = self.vgroups
paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
tdLog.info("restart taosd to ensure that the data falls into the disk")
tdSql.query("flush database %s"%(paraDict['dbName']))
# update to half tables
paraDict['ctbNum'] = int(self.ctbNum / 2)
tmqCom.insert_data_with_autoCreateTbl(tsql=tdSql,dbName=paraDict["dbName"],stbName=paraDict["stbName"],ctbPrefix="ctbx",
ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"],
startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx']+paraDict['ctbNum'])
tmqCom.insert_data_interlaceByMultiTbl(tsql=tdSql,dbName=paraDict["dbName"],ctbPrefix=paraDict["ctbPrefix"],
ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"],
startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx']+paraDict['ctbNum'])
tmqCom.initConsumerTable()
tdLog.info("create topics from stb1")
topicFromStb1 = 'topic_stb1'
queryString = "select ts, c1, c2 from %s.%s"%(paraDict['dbName'], paraDict['stbName'])
sqlString = "create topic %s as %s" %(topicFromStb1, queryString)
tdLog.info("create topic sql: %s"%sqlString)
tdSql.execute(sqlString)
paraDict['ctbNum'] = self.ctbNum
consumerId = 0
expectrowcnt = paraDict["rowsPerTbl"] * paraDict["ctbNum"] * 2 * 2
topicList = topicFromStb1
ifcheckdata = 0
ifManualCommit = 0
keyList = 'group.id:cgrp1,\
enable.auto.commit:true,\
auto.commit.interval.ms:1000,\
auto.offset.reset:earliest'
tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
tdLog.info("start consume processor")
tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot'])
tdLog.info("insert process end, and start to check consume result")
expectRows = 1
resultList = tmqCom.selectConsumeResult(expectRows)
totalConsumeRows = 0
for i in range(expectRows):
totalConsumeRows += resultList[i]
tdSql.query(queryString)
totalRowsInserted = tdSql.getRows()
tdLog.info("act consume rows: %d, act insert rows: %d, expect consume rows: %d, "%(totalConsumeRows, totalRowsInserted, expectrowcnt))
if totalConsumeRows != totalRowsInserted:
tdLog.exit("tmq consume rows error!")
tmqCom.checkFileContent(consumerId, queryString)
tdSql.query("drop topic %s"%topicFromStb1)
tdLog.printNoPrefix("======== test case 2 end ...... ")
def run(self):
tdSql.prepare()
self.prepareTestEnv()
self.tmqCase1()
self.tmqCase2()
def stop(self):
tdSql.close()
tdLog.success(f"{__file__} successfully executed")
event = threading.Event()
tdCases.addLinux(__file__, TDTestCase())
tdCases.addWindows(__file__, TDTestCase())

View File

@ -0,0 +1,175 @@
import taos
import sys
import time
import socket
import os
import threading
from enum import Enum
from util.log import *
from util.sql import *
from util.cases import *
from util.dnodes import *
sys.path.append("./7-tmq")
from tmqCommon import *
class TDTestCase:
def __init__(self):
self.vgroups = 4
self.ctbNum = 1000
self.rowsPerTbl = 1000
def init(self, conn, logSql):
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor(), False)
def prepareTestEnv(self):
tdLog.printNoPrefix("======== prepare test env include database, stable, ctables, and insert data: ")
paraDict = {'dbName': 'dbt',
'dropFlag': 1,
'event': '',
'vgroups': 4,
'stbName': 'stb',
'colPrefix': 'c',
'tagPrefix': 't',
'colSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1},{'type': 'TIMESTAMP', 'count':1}],
'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}],
'ctbPrefix': 'ctb',
'ctbStartIdx': 0,
'ctbNum': 1000,
'rowsPerTbl': 1000,
'batchNum': 400,
'startTs': 1640966400000, # 2022-01-01 00:00:00.000
'pollDelay': 3,
'showMsg': 1,
'showRow': 1,
'snapshot': 0}
paraDict['vgroups'] = self.vgroups
paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
tmqCom.initConsumerTable()
tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=paraDict["vgroups"],replica=1)
tdLog.info("create stb")
tmqCom.create_stable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"])
tdLog.info("create ctb")
tmqCom.create_ctable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"],ctbPrefix=paraDict['ctbPrefix'],
ctbNum=paraDict["ctbNum"],ctbStartIdx=paraDict['ctbStartIdx'])
tdLog.info("insert data")
paraDict['ctbNum'] = int(self.ctbNum / 2)
tmqCom.insert_data_interlaceByMultiTbl(tsql=tdSql,dbName=paraDict["dbName"],ctbPrefix=paraDict["ctbPrefix"],
ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"],
startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx'])
tmqCom.insert_data_with_autoCreateTbl(tsql=tdSql,dbName=paraDict["dbName"],stbName=paraDict["stbName"],ctbPrefix="ctbx",
ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"],
startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx'])
# tdLog.info("restart taosd to ensure that the data falls into the disk")
# tdSql.query("flush database %s"%(paraDict['dbName']))
return
def tmqCase1(self):
tdLog.printNoPrefix("======== test case 1: ")
paraDict = {'dbName': 'dbt',
'dropFlag': 1,
'event': '',
'vgroups': 4,
'stbName': 'stb',
'colPrefix': 'c',
'tagPrefix': 't',
'colSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1},{'type': 'TIMESTAMP', 'count':1}],
'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}],
'ctbPrefix': 'ctb',
'ctbStartIdx': 0,
'ctbNum': 1000,
'rowsPerTbl': 1000,
'batchNum': 1000,
'startTs': 1640966400000, # 2022-01-01 00:00:00.000
'pollDelay': 5,
'showMsg': 1,
'showRow': 1,
'snapshot': 1}
paraDict['vgroups'] = self.vgroups
paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
tdLog.info("restart taosd to ensure that the data falls into the disk")
tdSql.query("flush database %s"%(paraDict['dbName']))
# update to half tables
paraDict['ctbNum'] = int(self.ctbNum / 4)
tmqCom.insert_data_with_autoCreateTbl(tsql=tdSql,dbName=paraDict["dbName"],stbName=paraDict["stbName"],ctbPrefix="ctbx",
ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"],
startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx']+paraDict['ctbNum'])
tmqCom.insert_data_interlaceByMultiTbl(tsql=tdSql,dbName=paraDict["dbName"],ctbPrefix=paraDict["ctbPrefix"],
ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"],
startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx']+paraDict['ctbNum'])
tmqCom.initConsumerTable()
tdLog.info("create topics from stb1")
topicFromStb1 = 'topic_stb1'
queryString = "select ts, c1, c2 from %s.%s"%(paraDict['dbName'], paraDict['stbName'])
sqlString = "create topic %s as %s" %(topicFromStb1, queryString)
tdLog.info("create topic sql: %s"%sqlString)
tdSql.execute(sqlString)
paraDict['ctbNum'] = self.ctbNum
consumerId = 0
expectrowcnt = paraDict["rowsPerTbl"] * paraDict["ctbNum"] * (1 + 1)
topicList = topicFromStb1
ifcheckdata = 0
ifManualCommit = 0
keyList = 'group.id:cgrp1,\
enable.auto.commit:true,\
auto.commit.interval.ms:1000,\
auto.offset.reset:earliest'
tmqCom.insertConsumerInfo(consumerId, expectrowcnt + paraDict["rowsPerTbl"] * paraDict["ctbNum"],topicList,keyList,ifcheckdata,ifManualCommit)
tdLog.info("start consume processor")
tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot'])
paraDict['ctbNum'] = int(self.ctbNum / 2)
paraDict['ctbStartIdx'] += paraDict['ctbNum']
_ = tmqCom.asyncInsertDataByInterlace(paraDict)
time.sleep(3)
pthread = tmqCom.asyncInsertDataByInterlace(paraDict)
pthread.join()
tdLog.info("insert process end, and start to check consume result")
expectRows = 1
resultList = tmqCom.selectConsumeResult(expectRows)
totalConsumeRows = 0
for i in range(expectRows):
totalConsumeRows += resultList[i]
tdSql.query(queryString)
totalRowsInserted = tdSql.getRows()
tdLog.info("act consume rows: %d, act insert rows: %d, expect consume rows: %d, "%(totalConsumeRows, totalRowsInserted, expectrowcnt))
if totalConsumeRows <= totalRowsInserted or totalConsumeRows != expectrowcnt:
tdLog.exit("tmq consume rows error!")
tdSql.query("drop topic %s"%topicFromStb1)
tdLog.printNoPrefix("======== test case 1 end ...... ")
def run(self):
tdSql.prepare()
self.prepareTestEnv()
self.tmqCase1()
# self.tmqCase2()
def stop(self):
tdSql.close()
tdLog.success(f"{__file__} successfully executed")
event = threading.Event()
tdCases.addLinux(__file__, TDTestCase())
tdCases.addWindows(__file__, TDTestCase())

@ -1 +1 @@
Subproject commit 6dccac192a2ae7dd78718ab926201aab5419327a Subproject commit 7a94ffab45f08e16f09b3f430fe75d717054adb6