Merge branch '3.0' into enh/stopquery_a
This commit is contained in:
commit
346c86408f
|
@ -108,6 +108,7 @@ typedef struct SDataBlockInfo {
|
|||
// TODO: optimize and remove following
|
||||
int32_t childId; // used for stream, do not serialize
|
||||
EStreamType type; // used for stream, do not serialize
|
||||
STimeWindow calWin; // used for stream, do not serialize
|
||||
} SDataBlockInfo;
|
||||
|
||||
typedef struct SSDataBlock {
|
||||
|
|
|
@ -64,18 +64,22 @@ int32_t tPutValue(uint8_t *p, SValue *pValue, int8_t type);
|
|||
int32_t tGetValue(uint8_t *p, SValue *pValue, int8_t type);
|
||||
int tValueCmprFn(const SValue *pValue1, const SValue *pValue2, int8_t type);
|
||||
|
||||
// STSRow2
|
||||
// SColVal
|
||||
#define COL_VAL_NONE(CID, TYPE) ((SColVal){.cid = (CID), .type = (TYPE), .isNone = 1})
|
||||
#define COL_VAL_NULL(CID, TYPE) ((SColVal){.cid = (CID), .type = (TYPE), .isNull = 1})
|
||||
#define COL_VAL_VALUE(CID, TYPE, V) ((SColVal){.cid = (CID), .type = (TYPE), .value = (V)})
|
||||
|
||||
// STSRow2
|
||||
#define TSROW_LEN(PROW, V) tGetI32v((uint8_t *)(PROW)->data, (V) ? &(V) : NULL)
|
||||
#define TSROW_SVER(PROW, V) tGetI32v((PROW)->data + TSROW_LEN(PROW, NULL), (V) ? &(V) : NULL)
|
||||
|
||||
int32_t tTSRowNew(STSRowBuilder *pBuilder, SArray *pArray, STSchema *pTSchema, STSRow2 **ppRow);
|
||||
int32_t tTSRowClone(const STSRow2 *pRow, STSRow2 **ppRow);
|
||||
void tTSRowFree(STSRow2 *pRow);
|
||||
void tTSRowGet(STSRow2 *pRow, STSchema *pTSchema, int32_t iCol, SColVal *pColVal);
|
||||
int32_t tTSRowToArray(STSRow2 *pRow, STSchema *pTSchema, SArray **ppArray);
|
||||
int32_t tPutTSRow(uint8_t *p, STSRow2 *pRow);
|
||||
int32_t tGetTSRow(uint8_t *p, STSRow2 *pRow);
|
||||
int32_t tGetTSRow(uint8_t *p, STSRow2 **ppRow);
|
||||
|
||||
// STSRowBuilder
|
||||
#define tsRowBuilderInit() ((STSRowBuilder){0})
|
||||
|
@ -97,7 +101,7 @@ int32_t tEncodeTag(SEncoder *pEncoder, const STag *pTag);
|
|||
int32_t tDecodeTag(SDecoder *pDecoder, STag **ppTag);
|
||||
int32_t tTagToValArray(const STag *pTag, SArray **ppArray);
|
||||
void debugPrintSTag(STag *pTag, const char *tag, int32_t ln); // TODO: remove
|
||||
int32_t parseJsontoTagData(const char* json, SArray* pTagVals, STag** ppTag, void* pMsgBuf);
|
||||
int32_t parseJsontoTagData(const char *json, SArray *pTagVals, STag **ppTag, void *pMsgBuf);
|
||||
|
||||
// STRUCT =================
|
||||
struct STColumn {
|
||||
|
@ -123,16 +127,16 @@ struct STSchema {
|
|||
#define TSROW_KV_SMALL ((uint8_t)0x10U)
|
||||
#define TSROW_KV_MID ((uint8_t)0x20U)
|
||||
#define TSROW_KV_BIG ((uint8_t)0x40U)
|
||||
#pragma pack(push, 1)
|
||||
struct STSRow2 {
|
||||
TSKEY ts;
|
||||
uint8_t flags;
|
||||
int32_t sver;
|
||||
uint32_t nData;
|
||||
uint8_t *pData;
|
||||
TSKEY ts;
|
||||
uint8_t flags;
|
||||
uint8_t data[];
|
||||
};
|
||||
#pragma pack(pop)
|
||||
|
||||
struct STSRowBuilder {
|
||||
STSRow2 tsRow;
|
||||
// STSRow2 tsRow;
|
||||
int32_t szBuf;
|
||||
uint8_t *pBuf;
|
||||
};
|
||||
|
@ -226,50 +230,6 @@ struct STag {
|
|||
memcpy(varDataVal(x), (str), (_size)); \
|
||||
} while (0);
|
||||
|
||||
// ----------------- TSDB COLUMN DEFINITION
|
||||
|
||||
#define colType(col) ((col)->type)
|
||||
#define colFlags(col) ((col)->flags)
|
||||
#define colColId(col) ((col)->colId)
|
||||
#define colBytes(col) ((col)->bytes)
|
||||
#define colOffset(col) ((col)->offset)
|
||||
|
||||
#define colSetType(col, t) (colType(col) = (t))
|
||||
#define colSetFlags(col, f) (colFlags(col) = (f))
|
||||
#define colSetColId(col, id) (colColId(col) = (id))
|
||||
#define colSetBytes(col, b) (colBytes(col) = (b))
|
||||
#define colSetOffset(col, o) (colOffset(col) = (o))
|
||||
|
||||
// ----------------- TSDB SCHEMA DEFINITION
|
||||
|
||||
#define schemaNCols(s) ((s)->numOfCols)
|
||||
#define schemaVersion(s) ((s)->version)
|
||||
#define schemaTLen(s) ((s)->tlen)
|
||||
#define schemaFLen(s) ((s)->flen)
|
||||
#define schemaVLen(s) ((s)->vlen)
|
||||
#define schemaColAt(s, i) ((s)->columns + i)
|
||||
#define tdFreeSchema(s) taosMemoryFreeClear((s))
|
||||
|
||||
STSchema *tdDupSchema(const STSchema *pSchema);
|
||||
int32_t tdEncodeSchema(void **buf, STSchema *pSchema);
|
||||
void *tdDecodeSchema(void *buf, STSchema **pRSchema);
|
||||
|
||||
static FORCE_INLINE int32_t comparColId(const void *key1, const void *key2) {
|
||||
if (*(int16_t *)key1 > ((STColumn *)key2)->colId) {
|
||||
return 1;
|
||||
} else if (*(int16_t *)key1 < ((STColumn *)key2)->colId) {
|
||||
return -1;
|
||||
} else {
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
static FORCE_INLINE STColumn *tdGetColOfID(STSchema *pSchema, int16_t colId) {
|
||||
void *ptr = bsearch(&colId, (void *)pSchema->columns, schemaNCols(pSchema), sizeof(STColumn), comparColId);
|
||||
if (ptr == NULL) return NULL;
|
||||
return (STColumn *)ptr;
|
||||
}
|
||||
|
||||
// ----------------- SCHEMA BUILDER DEFINITION
|
||||
typedef struct {
|
||||
int32_t tCols;
|
||||
|
@ -299,141 +259,6 @@ void tdResetTSchemaBuilder(STSchemaBuilder *pBuilder, schema_ver_t version)
|
|||
int32_t tdAddColToSchema(STSchemaBuilder *pBuilder, int8_t type, int8_t flags, col_id_t colId, col_bytes_t bytes);
|
||||
STSchema *tdGetSchemaFromBuilder(STSchemaBuilder *pBuilder);
|
||||
|
||||
// ----------------- Semantic timestamp key definition
|
||||
// typedef uint64_t TKEY;
|
||||
#define TKEY TSKEY
|
||||
|
||||
#define TKEY_INVALID UINT64_MAX
|
||||
#define TKEY_NULL TKEY_INVALID
|
||||
#define TKEY_NEGATIVE_FLAG (((TKEY)1) << 63)
|
||||
#define TKEY_VALUE_FILTER (~(TKEY_NEGATIVE_FLAG))
|
||||
|
||||
#define TKEY_IS_NEGATIVE(tkey) (((tkey)&TKEY_NEGATIVE_FLAG) != 0)
|
||||
#define TKEY_IS_DELETED(tkey) (false)
|
||||
|
||||
#define tdGetTKEY(key) (key)
|
||||
#define tdGetKey(tskey) (tskey)
|
||||
|
||||
#define MIN_TS_KEY ((TSKEY)0x8000000000000001)
|
||||
#define MAX_TS_KEY ((TSKEY)0x7fffffffffffffff)
|
||||
|
||||
#define TD_TO_TKEY(key) tdGetTKEY(((key) < MIN_TS_KEY) ? MIN_TS_KEY : (((key) > MAX_TS_KEY) ? MAX_TS_KEY : key))
|
||||
|
||||
static FORCE_INLINE TKEY keyToTkey(TSKEY key) {
|
||||
TSKEY lkey = key;
|
||||
if (key > MAX_TS_KEY) {
|
||||
lkey = MAX_TS_KEY;
|
||||
} else if (key < MIN_TS_KEY) {
|
||||
lkey = MIN_TS_KEY;
|
||||
}
|
||||
|
||||
return tdGetTKEY(lkey);
|
||||
}
|
||||
|
||||
static FORCE_INLINE int32_t tkeyComparFn(const void *tkey1, const void *tkey2) {
|
||||
TSKEY key1 = tdGetKey(*(TKEY *)tkey1);
|
||||
TSKEY key2 = tdGetKey(*(TKEY *)tkey2);
|
||||
|
||||
if (key1 < key2) {
|
||||
return -1;
|
||||
} else if (key1 > key2) {
|
||||
return 1;
|
||||
} else {
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
// ----------------- Data column structure
|
||||
// SDataCol arrangement: data => bitmap => dataOffset
|
||||
typedef struct SDataCol {
|
||||
int8_t type; // column type
|
||||
uint8_t bitmap : 1; // 0: no bitmap if all rows are NORM, 1: has bitmap if has NULL/NORM rows
|
||||
uint8_t reserve : 7;
|
||||
int16_t colId; // column ID
|
||||
int32_t bytes; // column data bytes defined
|
||||
int32_t offset; // data offset in a SDataRow (including the header size)
|
||||
int32_t spaceSize; // Total space size for this column
|
||||
int32_t len; // column data length
|
||||
VarDataOffsetT *dataOff; // For binary and nchar data, the offset in the data column
|
||||
void *pData; // Actual data pointer
|
||||
void *pBitmap; // Bitmap pointer
|
||||
TSKEY ts; // only used in last NULL column
|
||||
} SDataCol;
|
||||
|
||||
#define isAllRowsNull(pCol) ((pCol)->len == 0)
|
||||
#define isAllRowsNone(pCol) ((pCol)->len == 0)
|
||||
static FORCE_INLINE void dataColReset(SDataCol *pDataCol) { pDataCol->len = 0; }
|
||||
|
||||
int32_t tdAllocMemForCol(SDataCol *pCol, int32_t maxPoints);
|
||||
|
||||
void dataColInit(SDataCol *pDataCol, STColumn *pCol, int32_t maxPoints);
|
||||
int32_t dataColAppendVal(SDataCol *pCol, const void *value, int32_t numOfRows, int32_t maxPoints);
|
||||
void *dataColSetOffset(SDataCol *pCol, int32_t nEle);
|
||||
|
||||
bool isNEleNull(SDataCol *pCol, int32_t nEle);
|
||||
|
||||
typedef struct {
|
||||
col_id_t maxCols; // max number of columns
|
||||
col_id_t numOfCols; // Total number of cols
|
||||
int32_t maxPoints; // max number of points
|
||||
int32_t numOfRows;
|
||||
int32_t bitmapMode : 1; // default is 0(2 bits), otherwise 1(1 bit)
|
||||
int32_t sversion : 31; // TODO: set sversion(not used yet)
|
||||
SDataCol *cols;
|
||||
} SDataCols;
|
||||
|
||||
static FORCE_INLINE bool tdDataColsIsBitmapI(SDataCols *pCols) { return pCols->bitmapMode != TSDB_BITMODE_DEFAULT; }
|
||||
static FORCE_INLINE void tdDataColsSetBitmapI(SDataCols *pCols) { pCols->bitmapMode = TSDB_BITMODE_ONE_BIT; }
|
||||
static FORCE_INLINE bool tdIsBitmapModeI(int8_t bitmapMode) { return bitmapMode != TSDB_BITMODE_DEFAULT; }
|
||||
|
||||
#define keyCol(pCols) (&((pCols)->cols[0])) // Key column
|
||||
#define dataColsTKeyAt(pCols, idx) ((TKEY *)(keyCol(pCols)->pData))[(idx)] // the idx row of column-wised data
|
||||
#define dataColsKeyAt(pCols, idx) tdGetKey(dataColsTKeyAt(pCols, idx))
|
||||
static FORCE_INLINE TKEY dataColsTKeyFirst(SDataCols *pCols) {
|
||||
if (pCols->numOfRows) {
|
||||
return dataColsTKeyAt(pCols, 0);
|
||||
} else {
|
||||
return TKEY_INVALID;
|
||||
}
|
||||
}
|
||||
|
||||
static FORCE_INLINE TSKEY dataColsKeyAtRow(SDataCols *pCols, int32_t row) {
|
||||
assert(row < pCols->numOfRows);
|
||||
return dataColsKeyAt(pCols, row);
|
||||
}
|
||||
|
||||
static FORCE_INLINE TSKEY dataColsKeyFirst(SDataCols *pCols) {
|
||||
if (pCols->numOfRows) {
|
||||
return dataColsKeyAt(pCols, 0);
|
||||
} else {
|
||||
return TSDB_DATA_TIMESTAMP_NULL;
|
||||
}
|
||||
}
|
||||
|
||||
static FORCE_INLINE TKEY dataColsTKeyLast(SDataCols *pCols) {
|
||||
if (pCols->numOfRows) {
|
||||
return dataColsTKeyAt(pCols, pCols->numOfRows - 1);
|
||||
} else {
|
||||
return TKEY_INVALID;
|
||||
}
|
||||
}
|
||||
|
||||
static FORCE_INLINE TSKEY dataColsKeyLast(SDataCols *pCols) {
|
||||
if (pCols->numOfRows) {
|
||||
return dataColsKeyAt(pCols, pCols->numOfRows - 1);
|
||||
} else {
|
||||
return TSDB_DATA_TIMESTAMP_NULL;
|
||||
}
|
||||
}
|
||||
|
||||
SDataCols *tdNewDataCols(int32_t maxCols, int32_t maxRows);
|
||||
void tdResetDataCols(SDataCols *pCols);
|
||||
int32_t tdInitDataCols(SDataCols *pCols, STSchema *pSchema);
|
||||
SDataCols *tdDupDataCols(SDataCols *pCols, bool keepData);
|
||||
SDataCols *tdFreeDataCols(SDataCols *pCols);
|
||||
int32_t tdMergeDataCols(SDataCols *target, SDataCols *source, int32_t rowsToMerge, int32_t *pOffset, bool update,
|
||||
TDRowVerT maxVer);
|
||||
|
||||
#endif
|
||||
|
||||
#ifdef __cplusplus
|
||||
|
|
|
@ -168,7 +168,7 @@ typedef struct {
|
|||
|
||||
// N.B. If without STSchema, getExtendedRowSize() is used to get the rowMaxBytes and
|
||||
// (int32_t)ceil((double)nCols/TD_VTYPE_PARTS) should be added if TD_SUPPORT_BITMAP defined.
|
||||
#define TD_ROW_MAX_BYTES_FROM_SCHEMA(s) (schemaTLen(s) + TD_ROW_HEAD_LEN)
|
||||
#define TD_ROW_MAX_BYTES_FROM_SCHEMA(s) ((s)->tlen + TD_ROW_HEAD_LEN)
|
||||
|
||||
#define TD_ROW_SET_INFO(r, i) (TD_ROW_INFO(r) = (i))
|
||||
#define TD_ROW_SET_TYPE(r, t) (TD_ROW_TYPE(r) = (t))
|
||||
|
@ -223,9 +223,10 @@ int32_t tdSetBitmapValTypeN(void *pBitmap, int16_t nEle, TDR
|
|||
static FORCE_INLINE int32_t tdGetBitmapValType(const void *pBitmap, int16_t colIdx, TDRowValT *pValType,
|
||||
int8_t bitmapMode);
|
||||
bool tdIsBitmapBlkNorm(const void *pBitmap, int32_t numOfBits, int8_t bitmapMode);
|
||||
int32_t tdAppendValToDataCol(SDataCol *pCol, TDRowValT valType, const void *val, int32_t numOfRows, int32_t maxPoints,
|
||||
int8_t bitmapMode, bool isMerge);
|
||||
int32_t tdAppendSTSRowToDataCol(STSRow *pRow, STSchema *pSchema, SDataCols *pCols, bool isMerge);
|
||||
// int32_t tdAppendValToDataCol(SDataCol *pCol, TDRowValT valType, const void *val, int32_t numOfRows, int32_t
|
||||
// maxPoints,
|
||||
// int8_t bitmapMode, bool isMerge);
|
||||
// int32_t tdAppendSTSRowToDataCol(STSRow *pRow, STSchema *pSchema, SDataCols *pCols, bool isMerge);
|
||||
|
||||
int32_t tdGetBitmapValTypeII(const void *pBitmap, int16_t colIdx, TDRowValT *pValType);
|
||||
int32_t tdSetBitmapValTypeI(void *pBitmap, int16_t colIdx, TDRowValT valType);
|
||||
|
@ -318,12 +319,9 @@ bool tdSTSRowGetVal(STSRowIter *pIter, col_id_t colId, col_type_t colType, SC
|
|||
bool tdGetTpRowDataOfCol(STSRowIter *pIter, col_type_t colType, int32_t offset, SCellVal *pVal);
|
||||
bool tdGetKvRowValOfColEx(STSRowIter *pIter, col_id_t colId, col_type_t colType, col_id_t *nIdx, SCellVal *pVal);
|
||||
bool tdSTSRowIterNext(STSRowIter *pIter, col_id_t colId, col_type_t colType, SCellVal *pVal);
|
||||
STSRow *mergeTwoRows(void *buffer, STSRow *row1, STSRow *row2, STSchema *pSchema1, STSchema *pSchema2);
|
||||
int32_t tdGetColDataOfRow(SCellVal *pVal, SDataCol *pCol, int32_t row, int8_t bitmapMode);
|
||||
bool tdSTpRowGetVal(STSRow *pRow, col_id_t colId, col_type_t colType, int32_t flen, uint32_t offset, col_id_t colIdx,
|
||||
SCellVal *pVal);
|
||||
bool tdSKvRowGetVal(STSRow *pRow, col_id_t colId, col_id_t colIdx, SCellVal *pVal);
|
||||
int32_t dataColGetNEleLen(SDataCol *pDataCol, int32_t rows, int8_t bitmapMode);
|
||||
void tdSCellValPrint(SCellVal *pVal, int8_t colType);
|
||||
void tdSRowPrint(STSRow *row, STSchema *pSchema, const char *tag);
|
||||
|
||||
|
|
|
@ -52,6 +52,7 @@ SMnode *mndOpen(const char *path, const SMnodeOpt *pOption);
|
|||
* @param pMnode The mnode object to close.
|
||||
*/
|
||||
void mndClose(SMnode *pMnode);
|
||||
void mndPreClose(SMnode *pMnode);
|
||||
|
||||
/**
|
||||
* @brief Start mnode
|
||||
|
|
|
@ -42,25 +42,28 @@ typedef struct SReadHandle {
|
|||
bool initTqReader;
|
||||
} SReadHandle;
|
||||
|
||||
// in queue mode, data streams are seperated by msg
|
||||
typedef enum {
|
||||
OPTR_EXEC_MODEL_BATCH = 0x1,
|
||||
OPTR_EXEC_MODEL_STREAM = 0x2,
|
||||
OPTR_EXEC_MODEL_QUEUE = 0x3,
|
||||
} EOPTR_EXEC_MODEL;
|
||||
|
||||
/**
|
||||
* Create the exec task for streaming mode
|
||||
* Create the exec task for stream mode
|
||||
* @param pMsg
|
||||
* @param streamReadHandle
|
||||
* @param SReadHandle
|
||||
* @return
|
||||
*/
|
||||
qTaskInfo_t qCreateStreamExecTaskInfo(void* msg, SReadHandle* readers);
|
||||
|
||||
/**
|
||||
* Switch the stream scan to snapshot mode
|
||||
* @param tinfo
|
||||
* Create the exec task for queue mode
|
||||
* @param pMsg
|
||||
* @param SReadHandle
|
||||
* @return
|
||||
*/
|
||||
int32_t qStreamScanSnapshot(qTaskInfo_t tinfo);
|
||||
qTaskInfo_t qCreateQueueExecTaskInfo(void* msg, SReadHandle* readers);
|
||||
|
||||
/**
|
||||
* Set the input data block for the stream scan.
|
||||
|
@ -111,7 +114,7 @@ int32_t qCreateExecTask(SReadHandle* readHandle, int32_t vgId, uint64_t taskId,
|
|||
* @return
|
||||
*/
|
||||
int32_t qGetQueryTableSchemaVersion(qTaskInfo_t tinfo, char* dbName, char* tableName, int32_t* sversion,
|
||||
int32_t* tversion);
|
||||
int32_t* tversion);
|
||||
|
||||
/**
|
||||
* The main task execution function, including query on both table and multiple tables,
|
||||
|
|
|
@ -14,6 +14,7 @@
|
|||
*/
|
||||
|
||||
#include "os.h"
|
||||
#include "query.h"
|
||||
#include "tdatablock.h"
|
||||
#include "tmsg.h"
|
||||
#include "tmsgcb.h"
|
||||
|
@ -119,6 +120,7 @@ static FORCE_INLINE void* streamQueueCurItem(SStreamQueue* queue) { return queue
|
|||
static FORCE_INLINE void* streamQueueNextItem(SStreamQueue* queue) {
|
||||
int8_t dequeueFlag = atomic_exchange_8(&queue->status, STREAM_QUEUE__PROCESSING);
|
||||
if (dequeueFlag == STREAM_QUEUE__FAILED) {
|
||||
ASSERT(0);
|
||||
ASSERT(queue->qItem != NULL);
|
||||
return streamQueueCurItem(queue);
|
||||
} else {
|
||||
|
@ -305,6 +307,7 @@ static FORCE_INLINE int32_t streamTaskInput(SStreamTask* pTask, SStreamQueueItem
|
|||
atomic_store_8(&pTask->inputStatus, TASK_INPUT_STATUS__FAILED);
|
||||
return -1;
|
||||
}
|
||||
qInfo("task %d %p submit enqueue %p %p %p", pTask->taskId, pTask, pItem, pSubmitClone, pSubmitClone->data);
|
||||
taosWriteQitem(pTask->inputQueue->queue, pSubmitClone);
|
||||
} else if (pItem->type == STREAM_INPUT__DATA_BLOCK || pItem->type == STREAM_INPUT__DATA_RETRIEVE) {
|
||||
taosWriteQitem(pTask->inputQueue->queue, pItem);
|
||||
|
|
|
@ -94,7 +94,7 @@ void taosPrintLongString(const char *flags, ELogLevel level, int32_t dflag, cons
|
|||
#define pError(...) { taosPrintLog("APP ERROR ", DEBUG_ERROR, 255, __VA_ARGS__); }
|
||||
#define pPrint(...) { taosPrintLog("APP ", DEBUG_INFO, 255, __VA_ARGS__); }
|
||||
// clang-format on
|
||||
#define BUF_PAGE_DEBUG
|
||||
//#define BUF_PAGE_DEBUG
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -44,6 +44,8 @@ typedef struct STaosQset STaosQset;
|
|||
typedef struct STaosQall STaosQall;
|
||||
typedef struct {
|
||||
void *ahandle;
|
||||
void *fp;
|
||||
void *queue;
|
||||
int32_t workerId;
|
||||
int32_t threadNum;
|
||||
int64_t timestamp;
|
||||
|
@ -65,6 +67,7 @@ void taosFreeQitem(void *pItem);
|
|||
void taosWriteQitem(STaosQueue *queue, void *pItem);
|
||||
int32_t taosReadQitem(STaosQueue *queue, void **ppItem);
|
||||
bool taosQueueEmpty(STaosQueue *queue);
|
||||
void taosUpdateItemSize(STaosQueue *queue, int32_t items);
|
||||
int32_t taosQueueItemSize(STaosQueue *queue);
|
||||
int64_t taosQueueMemorySize(STaosQueue *queue);
|
||||
|
||||
|
@ -81,8 +84,8 @@ int32_t taosAddIntoQset(STaosQset *qset, STaosQueue *queue, void *ahandle);
|
|||
void taosRemoveFromQset(STaosQset *qset, STaosQueue *queue);
|
||||
int32_t taosGetQueueNumber(STaosQset *qset);
|
||||
|
||||
int32_t taosReadQitemFromQset(STaosQset *qset, void **ppItem, int64_t *ts, void **ahandle, FItem *itemFp);
|
||||
int32_t taosReadAllQitemsFromQset(STaosQset *qset, STaosQall *qall, void **ahandle, FItems *itemsFp);
|
||||
int32_t taosReadQitemFromQset(STaosQset *qset, void **ppItem, SQueueInfo *qinfo);
|
||||
int32_t taosReadAllQitemsFromQset(STaosQset *qset, STaosQall *qall, SQueueInfo *qinfo);
|
||||
void taosResetQsetThread(STaosQset *qset, void *pItem);
|
||||
|
||||
extern int64_t tsRpcQueueMemoryAllowed;
|
||||
|
|
|
@ -169,6 +169,7 @@ typedef struct SReqResultInfo {
|
|||
uint32_t numOfRows;
|
||||
uint64_t totalRows;
|
||||
uint32_t current;
|
||||
bool localResultFetched;
|
||||
bool completed;
|
||||
int32_t precision;
|
||||
bool convertUcs4;
|
||||
|
@ -222,8 +223,8 @@ typedef struct SRequestObj {
|
|||
SArray* tableList;
|
||||
SQueryExecMetric metric;
|
||||
SRequestSendRecvBody body;
|
||||
bool stableQuery; // todo refactor
|
||||
bool validateOnly; // todo refactor
|
||||
bool stableQuery; // todo refactor
|
||||
bool validateOnly; // todo refactor
|
||||
|
||||
bool killed;
|
||||
uint32_t prevCode; // previous error code: todo refactor, add update flag for catalog
|
||||
|
@ -324,7 +325,8 @@ int32_t parseSql(SRequestObj* pRequest, bool topicQuery, SQuery** pQuery, SStmtC
|
|||
|
||||
int32_t getPlan(SRequestObj* pRequest, SQuery* pQuery, SQueryPlan** pPlan, SArray* pNodeList);
|
||||
|
||||
int32_t buildRequest(uint64_t connId, const char* sql, int sqlLen, void* param, bool validateSql, SRequestObj** pRequest);
|
||||
int32_t buildRequest(uint64_t connId, const char* sql, int sqlLen, void* param, bool validateSql,
|
||||
SRequestObj** pRequest);
|
||||
|
||||
void taos_close_internal(void* taos);
|
||||
|
||||
|
@ -358,9 +360,6 @@ int32_t removeMeta(STscObj* pTscObj, SArray* tbList); // todo move to clie
|
|||
int32_t handleAlterTbExecRes(void* res, struct SCatalog* pCatalog); // todo move to xxx
|
||||
bool qnodeRequired(SRequestObj* pRequest);
|
||||
|
||||
void initTscQhandle();
|
||||
void cleanupTscQhandle();
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -35,22 +35,10 @@ SAppInfo appInfo;
|
|||
int32_t clientReqRefPool = -1;
|
||||
int32_t clientConnRefPool = -1;
|
||||
|
||||
void *tscQhandle = NULL;
|
||||
|
||||
static TdThreadOnce tscinit = PTHREAD_ONCE_INIT;
|
||||
volatile int32_t tscInitRes = 0;
|
||||
|
||||
void initTscQhandle() {
|
||||
// init handle
|
||||
tscQhandle = taosInitScheduler(4096, 5, "tscQ");
|
||||
}
|
||||
|
||||
void cleanupTscQhandle() {
|
||||
// destroy handle
|
||||
taosCleanUpScheduler(tscQhandle);
|
||||
}
|
||||
|
||||
static int32_t registerRequest(SRequestObj *pRequest, STscObj* pTscObj) {
|
||||
static int32_t registerRequest(SRequestObj *pRequest, STscObj *pTscObj) {
|
||||
// connection has been released already, abort creating request.
|
||||
pRequest->self = taosAddRef(clientReqRefPool, pRequest);
|
||||
|
||||
|
@ -72,7 +60,7 @@ static int32_t registerRequest(SRequestObj *pRequest, STscObj* pTscObj) {
|
|||
static void deregisterRequest(SRequestObj *pRequest) {
|
||||
assert(pRequest != NULL);
|
||||
|
||||
STscObj * pTscObj = pRequest->pTscObj;
|
||||
STscObj *pTscObj = pRequest->pTscObj;
|
||||
SAppClusterSummary *pActivity = &pTscObj->pAppInfo->summary;
|
||||
|
||||
int32_t currentInst = atomic_sub_fetch_64((int64_t *)&pActivity->currentRequests, 1);
|
||||
|
@ -97,7 +85,8 @@ void closeTransporter(SAppInstInfo *pAppInfo) {
|
|||
|
||||
static bool clientRpcRfp(int32_t code, tmsg_t msgType) {
|
||||
if (NEED_REDIRECT_ERROR(code)) {
|
||||
if (msgType == TDMT_SCH_QUERY || msgType == TDMT_SCH_MERGE_QUERY || msgType == TDMT_SCH_FETCH || msgType == TDMT_SCH_MERGE_FETCH) {
|
||||
if (msgType == TDMT_SCH_QUERY || msgType == TDMT_SCH_MERGE_QUERY || msgType == TDMT_SCH_FETCH ||
|
||||
msgType == TDMT_SCH_MERGE_FETCH) {
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
|
@ -251,7 +240,7 @@ void *createRequest(uint64_t connId, int32_t type) {
|
|||
return NULL;
|
||||
}
|
||||
|
||||
STscObj* pTscObj = acquireTscObj(connId);
|
||||
STscObj *pTscObj = acquireTscObj(connId);
|
||||
if (pTscObj == NULL) {
|
||||
terrno = TSDB_CODE_TSC_DISCONNECTED;
|
||||
return NULL;
|
||||
|
@ -348,7 +337,6 @@ void taos_init_imp(void) {
|
|||
// In the APIs of other program language, taos_cleanup is not available yet.
|
||||
// So, to make sure taos_cleanup will be invoked to clean up the allocated resource to suppress the valgrind warning.
|
||||
atexit(taos_cleanup);
|
||||
initTscQhandle();
|
||||
errno = TSDB_CODE_SUCCESS;
|
||||
taosSeedRand(taosGetTimestampSec());
|
||||
|
||||
|
@ -407,7 +395,7 @@ int taos_options_imp(TSDB_OPTION option, const char *str) {
|
|||
return 0;
|
||||
}
|
||||
|
||||
SConfig * pCfg = taosGetCfg();
|
||||
SConfig *pCfg = taosGetCfg();
|
||||
SConfigItem *pItem = NULL;
|
||||
|
||||
switch (option) {
|
||||
|
|
|
@ -1274,8 +1274,8 @@ typedef struct SchedArg {
|
|||
SEpSet* pEpset;
|
||||
} SchedArg;
|
||||
|
||||
void doProcessMsgFromServer(SSchedMsg* schedMsg) {
|
||||
SchedArg* arg = (SchedArg*)schedMsg->ahandle;
|
||||
int32_t doProcessMsgFromServer(void* param) {
|
||||
SchedArg* arg = (SchedArg*)param;
|
||||
SRpcMsg* pMsg = &arg->msg;
|
||||
SEpSet* pEpSet = arg->pEpset;
|
||||
|
||||
|
@ -1328,11 +1328,10 @@ void doProcessMsgFromServer(SSchedMsg* schedMsg) {
|
|||
rpcFreeCont(pMsg->pCont);
|
||||
destroySendMsgInfo(pSendInfo);
|
||||
taosMemoryFree(arg);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
void processMsgFromServer(void* parent, SRpcMsg* pMsg, SEpSet* pEpSet) {
|
||||
SSchedMsg schedMsg = {0};
|
||||
|
||||
SEpSet* tEpSet = NULL;
|
||||
if (pEpSet != NULL) {
|
||||
tEpSet = taosMemoryCalloc(1, sizeof(SEpSet));
|
||||
|
@ -1343,9 +1342,7 @@ void processMsgFromServer(void* parent, SRpcMsg* pMsg, SEpSet* pEpSet) {
|
|||
arg->msg = *pMsg;
|
||||
arg->pEpset = tEpSet;
|
||||
|
||||
schedMsg.fp = doProcessMsgFromServer;
|
||||
schedMsg.ahandle = arg;
|
||||
taosScheduleTask(tscQhandle, &schedMsg);
|
||||
taosAsyncExec(doProcessMsgFromServer, arg, NULL);
|
||||
}
|
||||
|
||||
TAOS* taos_connect_auth(const char* ip, const char* user, const char* auth, const char* db, uint16_t port) {
|
||||
|
@ -1905,6 +1902,10 @@ int32_t appendTbToReq(SArray* pList, int32_t pos1, int32_t len1, int32_t pos2, i
|
|||
tbLen = len1;
|
||||
}
|
||||
|
||||
if (dbLen <= 0 || tbLen <= 0) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (tNameSetDbName(&name, acctId, dbName, dbLen)) {
|
||||
return -1;
|
||||
}
|
||||
|
|
|
@ -72,7 +72,6 @@ void taos_cleanup(void) {
|
|||
catalogDestroy();
|
||||
schedulerDestroy();
|
||||
|
||||
cleanupTscQhandle();
|
||||
rpcCleanup();
|
||||
tscInfo("all local resources released");
|
||||
taosCleanupCfg();
|
||||
|
@ -242,7 +241,7 @@ TAOS_ROW taos_fetch_row(TAOS_RES *res) {
|
|||
#endif
|
||||
|
||||
} else if (TD_RES_TMQ(res)) {
|
||||
SMqRspObj * msg = ((SMqRspObj *)res);
|
||||
SMqRspObj *msg = ((SMqRspObj *)res);
|
||||
SReqResultInfo *pResultInfo;
|
||||
if (msg->resIter == -1) {
|
||||
pResultInfo = tmqGetNextResInfo(res, true);
|
||||
|
@ -418,7 +417,7 @@ int taos_affected_rows(TAOS_RES *res) {
|
|||
return 0;
|
||||
}
|
||||
|
||||
SRequestObj * pRequest = (SRequestObj *)res;
|
||||
SRequestObj *pRequest = (SRequestObj *)res;
|
||||
SReqResultInfo *pResInfo = &pRequest->body.resInfo;
|
||||
return pResInfo->numOfRows;
|
||||
}
|
||||
|
@ -601,7 +600,7 @@ int *taos_get_column_data_offset(TAOS_RES *res, int columnIndex) {
|
|||
}
|
||||
|
||||
SReqResultInfo *pResInfo = tscGetCurResInfo(res);
|
||||
TAOS_FIELD * pField = &pResInfo->userFields[columnIndex];
|
||||
TAOS_FIELD *pField = &pResInfo->userFields[columnIndex];
|
||||
if (!IS_VAR_DATA_TYPE(pField->type)) {
|
||||
return 0;
|
||||
}
|
||||
|
@ -645,8 +644,8 @@ const char *taos_get_server_info(TAOS *taos) {
|
|||
typedef struct SqlParseWrapper {
|
||||
SParseContext *pCtx;
|
||||
SCatalogReq catalogReq;
|
||||
SRequestObj * pRequest;
|
||||
SQuery * pQuery;
|
||||
SRequestObj *pRequest;
|
||||
SQuery *pQuery;
|
||||
} SqlParseWrapper;
|
||||
|
||||
static void destorySqlParseWrapper(SqlParseWrapper *pWrapper) {
|
||||
|
@ -665,8 +664,8 @@ static void destorySqlParseWrapper(SqlParseWrapper *pWrapper) {
|
|||
|
||||
void retrieveMetaCallback(SMetaData *pResultMeta, void *param, int32_t code) {
|
||||
SqlParseWrapper *pWrapper = (SqlParseWrapper *)param;
|
||||
SQuery * pQuery = pWrapper->pQuery;
|
||||
SRequestObj * pRequest = pWrapper->pRequest;
|
||||
SQuery *pQuery = pWrapper->pQuery;
|
||||
SRequestObj *pRequest = pWrapper->pRequest;
|
||||
|
||||
if (code == TSDB_CODE_SUCCESS) {
|
||||
code = qAnalyseSqlSemantic(pWrapper->pCtx, &pWrapper->catalogReq, pResultMeta, pQuery);
|
||||
|
@ -684,7 +683,8 @@ void retrieveMetaCallback(SMetaData *pResultMeta, void *param, int32_t code) {
|
|||
|
||||
destorySqlParseWrapper(pWrapper);
|
||||
|
||||
tscDebug("0x%"PRIx64" analysis semantics completed, start async query, reqId:0x%"PRIx64, pRequest->self, pRequest->requestId);
|
||||
tscDebug("0x%" PRIx64 " analysis semantics completed, start async query, reqId:0x%" PRIx64, pRequest->self,
|
||||
pRequest->requestId);
|
||||
launchAsyncQuery(pRequest, pQuery, pResultMeta);
|
||||
} else {
|
||||
destorySqlParseWrapper(pWrapper);
|
||||
|
@ -705,7 +705,7 @@ void retrieveMetaCallback(SMetaData *pResultMeta, void *param, int32_t code) {
|
|||
}
|
||||
|
||||
void taos_query_a(TAOS *taos, const char *sql, __taos_async_fn_t fp, void *param) {
|
||||
int64_t connId = *(int64_t*)taos;
|
||||
int64_t connId = *(int64_t *)taos;
|
||||
taosAsyncQueryImpl(connId, sql, fp, param, false);
|
||||
}
|
||||
|
||||
|
@ -739,7 +739,7 @@ int32_t createParseContext(const SRequestObj *pRequest, SParseContext **pCxt) {
|
|||
|
||||
void doAsyncQuery(SRequestObj *pRequest, bool updateMetaForce) {
|
||||
SParseContext *pCxt = NULL;
|
||||
STscObj * pTscObj = pRequest->pTscObj;
|
||||
STscObj *pTscObj = pRequest->pTscObj;
|
||||
int32_t code = 0;
|
||||
|
||||
if (pRequest->retry++ > REQUEST_TOTAL_EXEC_TIMES) {
|
||||
|
@ -852,23 +852,33 @@ void taos_fetch_rows_a(TAOS_RES *res, __taos_async_fn_t fp, void *param) {
|
|||
}
|
||||
|
||||
// all data has returned to App already, no need to try again
|
||||
if ((pResultInfo->pData == NULL || pResultInfo->current >= pResultInfo->numOfRows) && pResultInfo->completed) {
|
||||
if (pResultInfo->completed && (pRequest->body.queryJob != 0)) {
|
||||
pResultInfo->numOfRows = 0;
|
||||
pRequest->body.fetchFp(param, pRequest, pResultInfo->numOfRows);
|
||||
return;
|
||||
}
|
||||
|
||||
// it is a local executed query, no need to do async fetch
|
||||
if (pResultInfo->current < pResultInfo->numOfRows && pRequest->body.queryJob == 0) {
|
||||
pRequest->body.fetchFp(param, pRequest, pResultInfo->numOfRows);
|
||||
if (pRequest->body.queryJob == 0) {
|
||||
ASSERT(pResultInfo->completed && pResultInfo->numOfRows >= 0);
|
||||
if (pResultInfo->localResultFetched) {
|
||||
pResultInfo->numOfRows = 0;
|
||||
pResultInfo->current = 0;
|
||||
pRequest->body.fetchFp(param, pRequest, pResultInfo->numOfRows);
|
||||
} else {
|
||||
pResultInfo->localResultFetched = true;
|
||||
pRequest->body.fetchFp(param, pRequest, pResultInfo->numOfRows);
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
SSchedulerReq req = {
|
||||
.syncReq = false,
|
||||
.fetchFp = fetchCallback,
|
||||
.cbParam = pRequest,
|
||||
.syncReq = false,
|
||||
.fetchFp = fetchCallback,
|
||||
.cbParam = pRequest,
|
||||
};
|
||||
|
||||
schedulerFetchRows(pRequest->body.queryJob, &req);
|
||||
}
|
||||
|
||||
|
@ -876,14 +886,14 @@ void taos_fetch_raw_block_a(TAOS_RES *res, __taos_async_fn_t fp, void *param) {
|
|||
ASSERT(res != NULL && fp != NULL);
|
||||
ASSERT(TD_RES_QUERY(res));
|
||||
|
||||
SRequestObj *pRequest = res;
|
||||
SRequestObj *pRequest = res;
|
||||
SReqResultInfo *pResultInfo = &pRequest->body.resInfo;
|
||||
|
||||
// set the current block is all consumed
|
||||
pResultInfo->current = pResultInfo->numOfRows;
|
||||
pResultInfo->convertUcs4 = false;
|
||||
|
||||
taos_fetch_rows_a(res, fp, param);
|
||||
// it is a local executed query, no need to do async fetch
|
||||
taos_fetch_rows_a(pRequest, fp, param);
|
||||
}
|
||||
|
||||
const void *taos_get_raw_block(TAOS_RES *res) {
|
||||
|
@ -918,7 +928,7 @@ int taos_load_table_info(TAOS *taos, const char *tableNameList) {
|
|||
int64_t connId = *(int64_t *)taos;
|
||||
const int32_t MAX_TABLE_NAME_LENGTH = 12 * 1024 * 1024; // 12MB list
|
||||
int32_t code = 0;
|
||||
SRequestObj * pRequest = NULL;
|
||||
SRequestObj *pRequest = NULL;
|
||||
SCatalogReq catalogReq = {0};
|
||||
|
||||
if (NULL == tableNameList) {
|
||||
|
@ -940,7 +950,7 @@ int taos_load_table_info(TAOS *taos, const char *tableNameList) {
|
|||
goto _return;
|
||||
}
|
||||
|
||||
STscObj* pTscObj = pRequest->pTscObj;
|
||||
STscObj *pTscObj = pRequest->pTscObj;
|
||||
code = transferTableNameList(tableNameList, pTscObj->acctId, pTscObj->db, &catalogReq.pTableMeta);
|
||||
if (code) {
|
||||
goto _return;
|
||||
|
@ -962,7 +972,7 @@ int taos_load_table_info(TAOS *taos, const char *tableNameList) {
|
|||
goto _return;
|
||||
}
|
||||
|
||||
SSyncQueryParam* pParam = pRequest->body.param;
|
||||
SSyncQueryParam *pParam = pRequest->body.param;
|
||||
tsem_wait(&pParam->sem);
|
||||
|
||||
_return:
|
||||
|
|
|
@ -179,7 +179,6 @@ int32_t processUseDbRsp(void* param, SDataBuf* pMsg, int32_t code) {
|
|||
if (code != 0) {
|
||||
terrno = code;
|
||||
if (output.dbVgroup) taosHashCleanup(output.dbVgroup->vgHash);
|
||||
taosMemoryFreeClear(output.dbVgroup);
|
||||
|
||||
tscError("0x%" PRIx64 " failed to build use db output since %s", pRequest->requestId, terrstr());
|
||||
} else if (output.dbVgroup && output.dbVgroup->vgHash) {
|
||||
|
@ -189,12 +188,14 @@ int32_t processUseDbRsp(void* param, SDataBuf* pMsg, int32_t code) {
|
|||
if (code1 != TSDB_CODE_SUCCESS) {
|
||||
tscWarn("catalogGetHandle failed, clusterId:%" PRIx64 ", error:%s", pRequest->pTscObj->pAppInfo->clusterId,
|
||||
tstrerror(code1));
|
||||
taosMemoryFreeClear(output.dbVgroup);
|
||||
} else {
|
||||
catalogUpdateDBVgInfo(pCatalog, output.db, output.dbId, output.dbVgroup);
|
||||
output.dbVgroup = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
taosMemoryFreeClear(output.dbVgroup);
|
||||
|
||||
tFreeSUsedbRsp(&usedbRsp);
|
||||
|
||||
char db[TSDB_DB_NAME_LEN] = {0};
|
||||
|
|
|
@ -1149,11 +1149,10 @@ int32_t tmqPollCb(void* param, SDataBuf* pMsg, int32_t code) {
|
|||
tDecoderInit(&decoder, POINTER_SHIFT(pMsg->pData, sizeof(SMqRspHead)), pMsg->len - sizeof(SMqRspHead));
|
||||
tDecodeSMqDataRsp(&decoder, &pRspWrapper->dataRsp);
|
||||
memcpy(&pRspWrapper->dataRsp, pMsg->pData, sizeof(SMqRspHead));
|
||||
/*tDecodeSMqDataBlkRsp(POINTER_SHIFT(pMsg->pData, sizeof(SMqRspHead)), &pRspWrapper->dataRsp);*/
|
||||
} else {
|
||||
ASSERT(rspType == TMQ_MSG_TYPE__POLL_META_RSP);
|
||||
memcpy(&pRspWrapper->metaRsp, pMsg->pData, sizeof(SMqRspHead));
|
||||
tDecodeSMqMetaRsp(POINTER_SHIFT(pMsg->pData, sizeof(SMqRspHead)), &pRspWrapper->metaRsp);
|
||||
memcpy(&pRspWrapper->metaRsp, pMsg->pData, sizeof(SMqRspHead));
|
||||
}
|
||||
|
||||
taosMemoryFree(pMsg->pData);
|
||||
|
@ -2427,15 +2426,15 @@ static void destroyCreateTbReqBatch(void* data) {
|
|||
taosArrayDestroy(pTbBatch->req.pArray);
|
||||
}
|
||||
|
||||
static int32_t taosCreateTable(TAOS *taos, void *meta, int32_t metaLen){
|
||||
SVCreateTbBatchReq req = {0};
|
||||
SDecoder coder = {0};
|
||||
int32_t code = TSDB_CODE_SUCCESS;
|
||||
SRequestObj *pRequest = NULL;
|
||||
SQuery *pQuery = NULL;
|
||||
SHashObj *pVgroupHashmap = NULL;
|
||||
static int32_t taosCreateTable(TAOS* taos, void* meta, int32_t metaLen) {
|
||||
SVCreateTbBatchReq req = {0};
|
||||
SDecoder coder = {0};
|
||||
int32_t code = TSDB_CODE_SUCCESS;
|
||||
SRequestObj* pRequest = NULL;
|
||||
SQuery* pQuery = NULL;
|
||||
SHashObj* pVgroupHashmap = NULL;
|
||||
|
||||
code = buildRequest(*(int64_t*) taos, "", 0, NULL, false, &pRequest);
|
||||
code = buildRequest(*(int64_t*)taos, "", 0, NULL, false, &pRequest);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
goto end;
|
||||
}
|
||||
|
@ -2455,8 +2454,8 @@ static int32_t taosCreateTable(TAOS *taos, void *meta, int32_t metaLen){
|
|||
|
||||
STscObj* pTscObj = pRequest->pTscObj;
|
||||
|
||||
SVCreateTbReq *pCreateReq = NULL;
|
||||
SCatalog* pCatalog = NULL;
|
||||
SVCreateTbReq* pCreateReq = NULL;
|
||||
SCatalog* pCatalog = NULL;
|
||||
code = catalogGetHandle(pTscObj->pAppInfo->clusterId, &pCatalog);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
goto end;
|
||||
|
@ -2540,13 +2539,13 @@ static void destroyDropTbReqBatch(void* data) {
|
|||
taosArrayDestroy(pTbBatch->req.pArray);
|
||||
}
|
||||
|
||||
static int32_t taosDropTable(TAOS *taos, void *meta, int32_t metaLen){
|
||||
SVDropTbBatchReq req = {0};
|
||||
SDecoder coder = {0};
|
||||
int32_t code = TSDB_CODE_SUCCESS;
|
||||
SRequestObj *pRequest = NULL;
|
||||
SQuery *pQuery = NULL;
|
||||
SHashObj *pVgroupHashmap = NULL;
|
||||
static int32_t taosDropTable(TAOS* taos, void* meta, int32_t metaLen) {
|
||||
SVDropTbBatchReq req = {0};
|
||||
SDecoder coder = {0};
|
||||
int32_t code = TSDB_CODE_SUCCESS;
|
||||
SRequestObj* pRequest = NULL;
|
||||
SQuery* pQuery = NULL;
|
||||
SHashObj* pVgroupHashmap = NULL;
|
||||
|
||||
code = buildRequest(*(int64_t*)taos, "", 0, NULL, false, &pRequest);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
|
@ -2568,8 +2567,8 @@ static int32_t taosDropTable(TAOS *taos, void *meta, int32_t metaLen){
|
|||
|
||||
STscObj* pTscObj = pRequest->pTscObj;
|
||||
|
||||
SVDropTbReq *pDropReq = NULL;
|
||||
SCatalog *pCatalog = NULL;
|
||||
SVDropTbReq* pDropReq = NULL;
|
||||
SCatalog* pCatalog = NULL;
|
||||
code = catalogGetHandle(pTscObj->pAppInfo->clusterId, &pCatalog);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
goto end;
|
||||
|
@ -2640,17 +2639,16 @@ end:
|
|||
return code;
|
||||
}
|
||||
|
||||
static int32_t taosAlterTable(TAOS *taos, void *meta, int32_t metaLen){
|
||||
SVAlterTbReq req = {0};
|
||||
SDecoder coder = {0};
|
||||
int32_t code = TSDB_CODE_SUCCESS;
|
||||
SRequestObj *pRequest = NULL;
|
||||
SQuery *pQuery = NULL;
|
||||
SArray *pArray = NULL;
|
||||
SVgDataBlocks *pVgData = NULL;
|
||||
static int32_t taosAlterTable(TAOS* taos, void* meta, int32_t metaLen) {
|
||||
SVAlterTbReq req = {0};
|
||||
SDecoder coder = {0};
|
||||
int32_t code = TSDB_CODE_SUCCESS;
|
||||
SRequestObj* pRequest = NULL;
|
||||
SQuery* pQuery = NULL;
|
||||
SArray* pArray = NULL;
|
||||
SVgDataBlocks* pVgData = NULL;
|
||||
|
||||
|
||||
code = buildRequest(*(int64_t*) taos, "", 0, NULL, false, &pRequest);
|
||||
code = buildRequest(*(int64_t*)taos, "", 0, NULL, false, &pRequest);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
goto end;
|
||||
}
|
||||
|
|
|
@ -320,7 +320,9 @@ int32_t colDataAssign(SColumnInfoData* pColumnInfoData, const SColumnInfoData* p
|
|||
memcpy(pColumnInfoData->pData, pSource->pData, pSource->varmeta.length);
|
||||
} else {
|
||||
memcpy(pColumnInfoData->nullbitmap, pSource->nullbitmap, BitmapLen(numOfRows));
|
||||
memcpy(pColumnInfoData->pData, pSource->pData, pSource->info.bytes * numOfRows);
|
||||
if (pSource->pData) {
|
||||
memcpy(pColumnInfoData->pData, pSource->pData, pSource->info.bytes * numOfRows);
|
||||
}
|
||||
}
|
||||
|
||||
pColumnInfoData->hasNull = pSource->hasNull;
|
||||
|
@ -1736,56 +1738,57 @@ char* dumpBlockData(SSDataBlock* pDataBlock, const char* flag, char** pDataBuf)
|
|||
int32_t colNum = taosArrayGetSize(pDataBlock->pDataBlock);
|
||||
int32_t rows = pDataBlock->info.rows;
|
||||
int32_t len = 0;
|
||||
len += snprintf(dumpBuf + len, size - len, "\n%s |block type %d |child id %d|group id:%" PRIu64 "|\n", flag,
|
||||
(int32_t)pDataBlock->info.type, pDataBlock->info.childId, pDataBlock->info.groupId);
|
||||
len += snprintf(dumpBuf + len, size - len, "\n%s |block type %d |child id %d|group id:%" PRIu64 "| uid:%ld\n", flag,
|
||||
(int32_t)pDataBlock->info.type, pDataBlock->info.childId, pDataBlock->info.groupId,
|
||||
pDataBlock->info.uid);
|
||||
if (len >= size - 1) return dumpBuf;
|
||||
|
||||
for (int32_t j = 0; j < rows; j++) {
|
||||
len += snprintf(dumpBuf + len, size - len, "%s |", flag);
|
||||
if (len >= size -1) return dumpBuf;
|
||||
if (len >= size - 1) return dumpBuf;
|
||||
|
||||
for (int32_t k = 0; k < colNum; k++) {
|
||||
SColumnInfoData* pColInfoData = taosArrayGet(pDataBlock->pDataBlock, k);
|
||||
void* var = POINTER_SHIFT(pColInfoData->pData, j * pColInfoData->info.bytes);
|
||||
if (colDataIsNull(pColInfoData, rows, j, NULL) || !pColInfoData->pData) {
|
||||
len += snprintf(dumpBuf + len, size - len, " %15s |", "NULL");
|
||||
if (len >= size -1) return dumpBuf;
|
||||
if (len >= size - 1) return dumpBuf;
|
||||
continue;
|
||||
}
|
||||
switch (pColInfoData->info.type) {
|
||||
case TSDB_DATA_TYPE_TIMESTAMP:
|
||||
formatTimestamp(pBuf, *(uint64_t*)var, TSDB_TIME_PRECISION_MILLI);
|
||||
len += snprintf(dumpBuf + len, size - len, " %25s |", pBuf);
|
||||
if (len >= size -1) return dumpBuf;
|
||||
if (len >= size - 1) return dumpBuf;
|
||||
break;
|
||||
case TSDB_DATA_TYPE_INT:
|
||||
len += snprintf(dumpBuf + len, size - len, " %15d |", *(int32_t*)var);
|
||||
if (len >= size -1) return dumpBuf;
|
||||
if (len >= size - 1) return dumpBuf;
|
||||
break;
|
||||
case TSDB_DATA_TYPE_UINT:
|
||||
len += snprintf(dumpBuf + len, size - len, " %15u |", *(uint32_t*)var);
|
||||
if (len >= size -1) return dumpBuf;
|
||||
if (len >= size - 1) return dumpBuf;
|
||||
break;
|
||||
case TSDB_DATA_TYPE_BIGINT:
|
||||
len += snprintf(dumpBuf + len, size - len, " %15ld |", *(int64_t*)var);
|
||||
if (len >= size -1) return dumpBuf;
|
||||
if (len >= size - 1) return dumpBuf;
|
||||
break;
|
||||
case TSDB_DATA_TYPE_UBIGINT:
|
||||
len += snprintf(dumpBuf + len, size - len, " %15lu |", *(uint64_t*)var);
|
||||
if (len >= size -1) return dumpBuf;
|
||||
if (len >= size - 1) return dumpBuf;
|
||||
break;
|
||||
case TSDB_DATA_TYPE_FLOAT:
|
||||
len += snprintf(dumpBuf + len, size - len, " %15f |", *(float*)var);
|
||||
if (len >= size -1) return dumpBuf;
|
||||
if (len >= size - 1) return dumpBuf;
|
||||
break;
|
||||
case TSDB_DATA_TYPE_DOUBLE:
|
||||
len += snprintf(dumpBuf + len, size - len, " %15lf |", *(double*)var);
|
||||
if (len >= size -1) return dumpBuf;
|
||||
if (len >= size - 1) return dumpBuf;
|
||||
break;
|
||||
}
|
||||
}
|
||||
len += snprintf(dumpBuf + len, size - len, "\n");
|
||||
if (len >= size -1) return dumpBuf;
|
||||
if (len >= size - 1) return dumpBuf;
|
||||
}
|
||||
len += snprintf(dumpBuf + len, size - len, "%s |end\n", flag);
|
||||
return dumpBuf;
|
||||
|
|
|
@ -175,7 +175,8 @@ static void setBitMap(uint8_t *pb, uint8_t v, int32_t idx, uint8_t flags) {
|
|||
} while (0)
|
||||
|
||||
int32_t tTSRowNew(STSRowBuilder *pBuilder, SArray *pArray, STSchema *pTSchema, STSRow2 **ppRow) {
|
||||
int32_t code = 0;
|
||||
int32_t code = 0;
|
||||
#if 0
|
||||
STColumn *pTColumn;
|
||||
SColVal *pColVal;
|
||||
int32_t nColVal = taosArrayGetSize(pArray);
|
||||
|
@ -462,30 +463,22 @@ int32_t tTSRowNew(STSRowBuilder *pBuilder, SArray *pArray, STSchema *pTSchema, S
|
|||
}
|
||||
}
|
||||
|
||||
#endif
|
||||
_exit:
|
||||
return code;
|
||||
}
|
||||
|
||||
int32_t tTSRowClone(const STSRow2 *pRow, STSRow2 **ppRow) {
|
||||
int32_t code = 0;
|
||||
int32_t rLen;
|
||||
|
||||
(*ppRow) = (STSRow2 *)taosMemoryMalloc(sizeof(**ppRow));
|
||||
TSROW_LEN(pRow, rLen);
|
||||
(*ppRow) = (STSRow2 *)taosMemoryMalloc(rLen);
|
||||
if (*ppRow == NULL) {
|
||||
code = TSDB_CODE_OUT_OF_MEMORY;
|
||||
goto _exit;
|
||||
}
|
||||
**ppRow = *pRow;
|
||||
(*ppRow)->pData = NULL;
|
||||
|
||||
if (pRow->nData) {
|
||||
(*ppRow)->pData = taosMemoryMalloc(pRow->nData);
|
||||
if ((*ppRow)->pData == NULL) {
|
||||
taosMemoryFree(*ppRow);
|
||||
code = TSDB_CODE_OUT_OF_MEMORY;
|
||||
goto _exit;
|
||||
}
|
||||
memcpy((*ppRow)->pData, pRow->pData, pRow->nData);
|
||||
}
|
||||
memcpy(*ppRow, pRow, rLen);
|
||||
|
||||
_exit:
|
||||
return code;
|
||||
|
@ -493,12 +486,12 @@ _exit:
|
|||
|
||||
void tTSRowFree(STSRow2 *pRow) {
|
||||
if (pRow) {
|
||||
if (pRow->pData) taosMemoryFree(pRow->pData);
|
||||
taosMemoryFree(pRow);
|
||||
}
|
||||
}
|
||||
|
||||
void tTSRowGet(STSRow2 *pRow, STSchema *pTSchema, int32_t iCol, SColVal *pColVal) {
|
||||
#if 0
|
||||
uint8_t isTuple = ((pRow->flags & 0xf0) == 0) ? 1 : 0;
|
||||
STColumn *pTColumn = &pTSchema->columns[iCol];
|
||||
uint8_t flags = pRow->flags & (uint8_t)0xf;
|
||||
|
@ -643,10 +636,12 @@ _return_null:
|
|||
_return_value:
|
||||
*pColVal = COL_VAL_VALUE(pTColumn->colId, pTColumn->type, value);
|
||||
return;
|
||||
#endif
|
||||
}
|
||||
|
||||
int32_t tTSRowToArray(STSRow2 *pRow, STSchema *pTSchema, SArray **ppArray) {
|
||||
int32_t code = 0;
|
||||
#if 0
|
||||
SColVal cv;
|
||||
|
||||
(*ppArray) = taosArrayInit(pTSchema->numOfCols, sizeof(SColVal));
|
||||
|
@ -660,52 +655,27 @@ int32_t tTSRowToArray(STSRow2 *pRow, STSchema *pTSchema, SArray **ppArray) {
|
|||
taosArrayPush(*ppArray, &cv);
|
||||
}
|
||||
|
||||
#endif
|
||||
_exit:
|
||||
return code;
|
||||
}
|
||||
|
||||
int32_t tPutTSRow(uint8_t *p, STSRow2 *pRow) {
|
||||
int32_t n = 0;
|
||||
int32_t n;
|
||||
|
||||
n += tPutI64(p ? p + n : p, pRow->ts);
|
||||
n += tPutI8(p ? p + n : p, pRow->flags);
|
||||
n += tPutI32v(p ? p + n : p, pRow->sver);
|
||||
|
||||
ASSERT(pRow->flags & 0xf);
|
||||
|
||||
switch (pRow->flags & 0xf) {
|
||||
case TSROW_HAS_NONE:
|
||||
case TSROW_HAS_NULL:
|
||||
ASSERT(pRow->nData == 0);
|
||||
ASSERT(pRow->pData == NULL);
|
||||
break;
|
||||
default:
|
||||
ASSERT(pRow->nData && pRow->pData);
|
||||
n += tPutBinary(p ? p + n : p, pRow->pData, pRow->nData);
|
||||
break;
|
||||
TSROW_LEN(pRow, n);
|
||||
if (p) {
|
||||
memcpy(p, pRow, n);
|
||||
}
|
||||
|
||||
return n;
|
||||
}
|
||||
|
||||
int32_t tGetTSRow(uint8_t *p, STSRow2 *pRow) {
|
||||
int32_t n = 0;
|
||||
int32_t tGetTSRow(uint8_t *p, STSRow2 **ppRow) {
|
||||
int32_t n;
|
||||
|
||||
n += tGetI64(p + n, &pRow->ts);
|
||||
n += tGetI8(p + n, &pRow->flags);
|
||||
n += tGetI32v(p + n, &pRow->sver);
|
||||
|
||||
ASSERT(pRow->flags);
|
||||
switch (pRow->flags & 0xf) {
|
||||
case TSROW_HAS_NONE:
|
||||
case TSROW_HAS_NULL:
|
||||
pRow->nData = 0;
|
||||
pRow->pData = NULL;
|
||||
break;
|
||||
default:
|
||||
n += tGetBinary(p + n, &pRow->pData, &pRow->nData);
|
||||
break;
|
||||
}
|
||||
*ppRow = (STSRow2 *)p;
|
||||
TSROW_LEN(*ppRow, n);
|
||||
|
||||
return n;
|
||||
}
|
||||
|
@ -904,15 +874,13 @@ static int32_t tGetTagVal(uint8_t *p, STagVal *pTagVal, int8_t isJson) {
|
|||
return n;
|
||||
}
|
||||
|
||||
bool tTagIsJson(const void *pTag){
|
||||
return (((const STag *)pTag)->flags & TD_TAG_JSON);
|
||||
}
|
||||
bool tTagIsJson(const void *pTag) { return (((const STag *)pTag)->flags & TD_TAG_JSON); }
|
||||
|
||||
bool tTagIsJsonNull(void *data){
|
||||
STag *pTag = (STag*)data;
|
||||
int8_t isJson = tTagIsJson(pTag);
|
||||
if(!isJson) return false;
|
||||
return ((STag*)data)->nTag == 0;
|
||||
bool tTagIsJsonNull(void *data) {
|
||||
STag *pTag = (STag *)data;
|
||||
int8_t isJson = tTagIsJson(pTag);
|
||||
if (!isJson) return false;
|
||||
return ((STag *)data)->nTag == 0;
|
||||
}
|
||||
|
||||
int32_t tTagNew(SArray *pArray, int32_t version, int8_t isJson, STag **ppTag) {
|
||||
|
@ -1097,112 +1065,6 @@ _err:
|
|||
}
|
||||
|
||||
#if 1 // ===================================================================================================================
|
||||
static void dataColSetNEleNull(SDataCol *pCol, int nEle);
|
||||
int tdAllocMemForCol(SDataCol *pCol, int maxPoints) {
|
||||
int spaceNeeded = pCol->bytes * maxPoints;
|
||||
if (IS_VAR_DATA_TYPE(pCol->type)) {
|
||||
spaceNeeded += sizeof(VarDataOffsetT) * maxPoints;
|
||||
}
|
||||
#ifdef TD_SUPPORT_BITMAP
|
||||
int32_t nBitmapBytes = (int32_t)TD_BITMAP_BYTES(maxPoints);
|
||||
spaceNeeded += (int)nBitmapBytes;
|
||||
// TODO: Currently, the compression of bitmap parts is affiliated to the column data parts, thus allocate 1 more
|
||||
// TYPE_BYTES as to comprise complete TYPE_BYTES. Otherwise, invalid read/write would be triggered.
|
||||
// spaceNeeded += TYPE_BYTES[pCol->type]; // the bitmap part is append as a single part since 2022.04.03, thus
|
||||
// remove the additional space
|
||||
#endif
|
||||
|
||||
if (pCol->spaceSize < spaceNeeded) {
|
||||
void *ptr = taosMemoryRealloc(pCol->pData, spaceNeeded);
|
||||
if (ptr == NULL) {
|
||||
uDebug("malloc failure, size:%" PRId64 " failed, reason:%s", (int64_t)spaceNeeded, strerror(errno));
|
||||
return -1;
|
||||
} else {
|
||||
pCol->pData = ptr;
|
||||
pCol->spaceSize = spaceNeeded;
|
||||
}
|
||||
}
|
||||
#ifdef TD_SUPPORT_BITMAP
|
||||
|
||||
if (IS_VAR_DATA_TYPE(pCol->type)) {
|
||||
pCol->pBitmap = POINTER_SHIFT(pCol->pData, pCol->bytes * maxPoints);
|
||||
pCol->dataOff = POINTER_SHIFT(pCol->pBitmap, nBitmapBytes);
|
||||
} else {
|
||||
pCol->pBitmap = POINTER_SHIFT(pCol->pData, pCol->bytes * maxPoints);
|
||||
}
|
||||
#else
|
||||
if (IS_VAR_DATA_TYPE(pCol->type)) {
|
||||
pCol->dataOff = POINTER_SHIFT(pCol->pData, pCol->bytes * maxPoints);
|
||||
}
|
||||
#endif
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Duplicate the schema and return a new object
|
||||
*/
|
||||
STSchema *tdDupSchema(const STSchema *pSchema) {
|
||||
int tlen = sizeof(STSchema) + sizeof(STColumn) * schemaNCols(pSchema);
|
||||
STSchema *tSchema = (STSchema *)taosMemoryMalloc(tlen);
|
||||
if (tSchema == NULL) return NULL;
|
||||
|
||||
memcpy((void *)tSchema, (void *)pSchema, tlen);
|
||||
|
||||
return tSchema;
|
||||
}
|
||||
|
||||
/**
|
||||
* Encode a schema to dst, and return the next pointer
|
||||
*/
|
||||
int tdEncodeSchema(void **buf, STSchema *pSchema) {
|
||||
int tlen = 0;
|
||||
tlen += taosEncodeFixedI32(buf, schemaVersion(pSchema));
|
||||
tlen += taosEncodeFixedI32(buf, schemaNCols(pSchema));
|
||||
|
||||
for (int i = 0; i < schemaNCols(pSchema); i++) {
|
||||
STColumn *pCol = schemaColAt(pSchema, i);
|
||||
tlen += taosEncodeFixedI8(buf, colType(pCol));
|
||||
tlen += taosEncodeFixedI8(buf, colFlags(pCol));
|
||||
tlen += taosEncodeFixedI16(buf, colColId(pCol));
|
||||
tlen += taosEncodeFixedI16(buf, colBytes(pCol));
|
||||
}
|
||||
|
||||
return tlen;
|
||||
}
|
||||
|
||||
/**
|
||||
* Decode a schema from a binary.
|
||||
*/
|
||||
void *tdDecodeSchema(void *buf, STSchema **pRSchema) {
|
||||
int version = 0;
|
||||
int numOfCols = 0;
|
||||
STSchemaBuilder schemaBuilder;
|
||||
|
||||
buf = taosDecodeFixedI32(buf, &version);
|
||||
buf = taosDecodeFixedI32(buf, &numOfCols);
|
||||
|
||||
if (tdInitTSchemaBuilder(&schemaBuilder, version) < 0) return NULL;
|
||||
|
||||
for (int i = 0; i < numOfCols; i++) {
|
||||
col_type_t type = 0;
|
||||
int8_t flags = 0;
|
||||
col_id_t colId = 0;
|
||||
col_bytes_t bytes = 0;
|
||||
buf = taosDecodeFixedI8(buf, &type);
|
||||
buf = taosDecodeFixedI8(buf, &flags);
|
||||
buf = taosDecodeFixedI16(buf, &colId);
|
||||
buf = taosDecodeFixedI32(buf, &bytes);
|
||||
if (tdAddColToSchema(&schemaBuilder, type, flags, colId, bytes) < 0) {
|
||||
tdDestroyTSchemaBuilder(&schemaBuilder);
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
|
||||
*pRSchema = tdGetSchemaFromBuilder(&schemaBuilder);
|
||||
tdDestroyTSchemaBuilder(&schemaBuilder);
|
||||
return buf;
|
||||
}
|
||||
|
||||
int tdInitTSchemaBuilder(STSchemaBuilder *pBuilder, schema_ver_t version) {
|
||||
if (pBuilder == NULL) return -1;
|
||||
|
||||
|
@ -1239,22 +1101,22 @@ int32_t tdAddColToSchema(STSchemaBuilder *pBuilder, int8_t type, int8_t flags, c
|
|||
}
|
||||
|
||||
STColumn *pCol = &(pBuilder->columns[pBuilder->nCols]);
|
||||
colSetType(pCol, type);
|
||||
colSetColId(pCol, colId);
|
||||
colSetFlags(pCol, flags);
|
||||
pCol->type = type;
|
||||
pCol->colId = colId;
|
||||
pCol->flags = flags;
|
||||
if (pBuilder->nCols == 0) {
|
||||
colSetOffset(pCol, 0);
|
||||
pCol->offset = 0;
|
||||
} else {
|
||||
STColumn *pTCol = &(pBuilder->columns[pBuilder->nCols - 1]);
|
||||
colSetOffset(pCol, pTCol->offset + TYPE_BYTES[pTCol->type]);
|
||||
pCol->offset = pTCol->offset + TYPE_BYTES[pTCol->type];
|
||||
}
|
||||
|
||||
if (IS_VAR_DATA_TYPE(type)) {
|
||||
colSetBytes(pCol, bytes);
|
||||
pCol->bytes = bytes;
|
||||
pBuilder->tlen += (TYPE_BYTES[type] + bytes);
|
||||
pBuilder->vlen += bytes - sizeof(VarDataLenT);
|
||||
} else {
|
||||
colSetBytes(pCol, TYPE_BYTES[type]);
|
||||
pCol->bytes = TYPE_BYTES[type];
|
||||
pBuilder->tlen += TYPE_BYTES[type];
|
||||
pBuilder->vlen += TYPE_BYTES[type];
|
||||
}
|
||||
|
@ -1275,151 +1137,19 @@ STSchema *tdGetSchemaFromBuilder(STSchemaBuilder *pBuilder) {
|
|||
STSchema *pSchema = (STSchema *)taosMemoryMalloc(tlen);
|
||||
if (pSchema == NULL) return NULL;
|
||||
|
||||
schemaVersion(pSchema) = pBuilder->version;
|
||||
schemaNCols(pSchema) = pBuilder->nCols;
|
||||
schemaTLen(pSchema) = pBuilder->tlen;
|
||||
schemaFLen(pSchema) = pBuilder->flen;
|
||||
schemaVLen(pSchema) = pBuilder->vlen;
|
||||
pSchema->version = pBuilder->version;
|
||||
pSchema->numOfCols = pBuilder->nCols;
|
||||
pSchema->tlen = pBuilder->tlen;
|
||||
pSchema->flen = pBuilder->flen;
|
||||
pSchema->vlen = pBuilder->vlen;
|
||||
|
||||
#ifdef TD_SUPPORT_BITMAP
|
||||
schemaTLen(pSchema) += (int)TD_BITMAP_BYTES(schemaNCols(pSchema));
|
||||
pSchema->tlen += (int)TD_BITMAP_BYTES(pSchema->numOfCols);
|
||||
#endif
|
||||
|
||||
memcpy(schemaColAt(pSchema, 0), pBuilder->columns, sizeof(STColumn) * pBuilder->nCols);
|
||||
memcpy(&pSchema->columns[0], pBuilder->columns, sizeof(STColumn) * pBuilder->nCols);
|
||||
|
||||
return pSchema;
|
||||
}
|
||||
|
||||
void dataColInit(SDataCol *pDataCol, STColumn *pCol, int maxPoints) {
|
||||
pDataCol->type = colType(pCol);
|
||||
pDataCol->colId = colColId(pCol);
|
||||
pDataCol->bytes = colBytes(pCol);
|
||||
pDataCol->offset = colOffset(pCol) + 0; // TD_DATA_ROW_HEAD_SIZE;
|
||||
|
||||
pDataCol->len = 0;
|
||||
}
|
||||
|
||||
static FORCE_INLINE const void *tdGetColDataOfRowUnsafe(SDataCol *pCol, int row) {
|
||||
if (IS_VAR_DATA_TYPE(pCol->type)) {
|
||||
return POINTER_SHIFT(pCol->pData, pCol->dataOff[row]);
|
||||
} else {
|
||||
return POINTER_SHIFT(pCol->pData, TYPE_BYTES[pCol->type] * row);
|
||||
}
|
||||
}
|
||||
|
||||
bool isNEleNull(SDataCol *pCol, int nEle) {
|
||||
if (isAllRowsNull(pCol)) return true;
|
||||
for (int i = 0; i < nEle; ++i) {
|
||||
if (!isNull(tdGetColDataOfRowUnsafe(pCol, i), pCol->type)) return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
void *dataColSetOffset(SDataCol *pCol, int nEle) {
|
||||
ASSERT(((pCol->type == TSDB_DATA_TYPE_BINARY) || (pCol->type == TSDB_DATA_TYPE_NCHAR)));
|
||||
|
||||
void *tptr = pCol->pData;
|
||||
// char *tptr = (char *)(pCol->pData);
|
||||
|
||||
VarDataOffsetT offset = 0;
|
||||
for (int i = 0; i < nEle; ++i) {
|
||||
pCol->dataOff[i] = offset;
|
||||
offset += varDataTLen(tptr);
|
||||
tptr = POINTER_SHIFT(tptr, varDataTLen(tptr));
|
||||
}
|
||||
return POINTER_SHIFT(tptr, varDataTLen(tptr));
|
||||
}
|
||||
|
||||
SDataCols *tdNewDataCols(int maxCols, int maxRows) {
|
||||
SDataCols *pCols = (SDataCols *)taosMemoryCalloc(1, sizeof(SDataCols));
|
||||
if (pCols == NULL) {
|
||||
uDebug("malloc failure, size:%" PRId64 " failed, reason:%s", (int64_t)sizeof(SDataCols), strerror(errno));
|
||||
return NULL;
|
||||
}
|
||||
|
||||
pCols->maxPoints = maxRows;
|
||||
pCols->maxCols = maxCols;
|
||||
pCols->numOfRows = 0;
|
||||
pCols->numOfCols = 0;
|
||||
pCols->bitmapMode = TSDB_BITMODE_DEFAULT;
|
||||
|
||||
if (maxCols > 0) {
|
||||
pCols->cols = (SDataCol *)taosMemoryCalloc(maxCols, sizeof(SDataCol));
|
||||
if (pCols->cols == NULL) {
|
||||
uDebug("malloc failure, size:%" PRId64 " failed, reason:%s", (int64_t)sizeof(SDataCol) * maxCols,
|
||||
strerror(errno));
|
||||
tdFreeDataCols(pCols);
|
||||
return NULL;
|
||||
}
|
||||
#if 0 // no need as calloc used
|
||||
int i;
|
||||
for (i = 0; i < maxCols; i++) {
|
||||
pCols->cols[i].spaceSize = 0;
|
||||
pCols->cols[i].len = 0;
|
||||
pCols->cols[i].pData = NULL;
|
||||
pCols->cols[i].dataOff = NULL;
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
return pCols;
|
||||
}
|
||||
|
||||
int tdInitDataCols(SDataCols *pCols, STSchema *pSchema) {
|
||||
int i;
|
||||
int oldMaxCols = pCols->maxCols;
|
||||
if (schemaNCols(pSchema) > oldMaxCols) {
|
||||
pCols->maxCols = schemaNCols(pSchema);
|
||||
void *ptr = (SDataCol *)taosMemoryRealloc(pCols->cols, sizeof(SDataCol) * pCols->maxCols);
|
||||
if (ptr == NULL) return -1;
|
||||
pCols->cols = ptr;
|
||||
for (i = oldMaxCols; i < pCols->maxCols; ++i) {
|
||||
pCols->cols[i].pData = NULL;
|
||||
pCols->cols[i].dataOff = NULL;
|
||||
pCols->cols[i].pBitmap = NULL;
|
||||
pCols->cols[i].spaceSize = 0;
|
||||
}
|
||||
}
|
||||
#if 0
|
||||
tdResetDataCols(pCols); // redundant loop to reset len/blen to 0, already reset in following dataColInit(...)
|
||||
#endif
|
||||
|
||||
pCols->numOfRows = 0;
|
||||
pCols->bitmapMode = TSDB_BITMODE_DEFAULT;
|
||||
pCols->numOfCols = schemaNCols(pSchema);
|
||||
|
||||
for (i = 0; i < schemaNCols(pSchema); ++i) {
|
||||
dataColInit(pCols->cols + i, schemaColAt(pSchema, i), pCols->maxPoints);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
SDataCols *tdFreeDataCols(SDataCols *pCols) {
|
||||
int i;
|
||||
if (pCols) {
|
||||
if (pCols->cols) {
|
||||
int maxCols = pCols->maxCols;
|
||||
for (i = 0; i < maxCols; ++i) {
|
||||
SDataCol *pCol = &pCols->cols[i];
|
||||
taosMemoryFreeClear(pCol->pData);
|
||||
}
|
||||
taosMemoryFree(pCols->cols);
|
||||
pCols->cols = NULL;
|
||||
}
|
||||
taosMemoryFree(pCols);
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
void tdResetDataCols(SDataCols *pCols) {
|
||||
if (pCols != NULL) {
|
||||
pCols->numOfRows = 0;
|
||||
pCols->bitmapMode = 0;
|
||||
for (int i = 0; i < pCols->maxCols; ++i) {
|
||||
dataColReset(pCols->cols + i);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#endif
|
|
@ -114,7 +114,7 @@ int32_t tsMinSlidingTime = 10;
|
|||
// the maxinum number of distict query result
|
||||
int32_t tsMaxNumOfDistinctResults = 1000 * 10000;
|
||||
|
||||
// 1 us for interval time range, changed accordingly
|
||||
// 1 database precision unit for interval time range, changed accordingly
|
||||
int32_t tsMinIntervalTime = 1;
|
||||
|
||||
// 20sec, the maximum value of stream computing delay, changed accordingly
|
||||
|
@ -412,7 +412,8 @@ static int32_t taosAddServerCfg(SConfig *pCfg) {
|
|||
tsNumOfVnodeQueryThreads = TMAX(tsNumOfVnodeQueryThreads, 2);
|
||||
if (cfgAddInt32(pCfg, "numOfVnodeQueryThreads", tsNumOfVnodeQueryThreads, 1, 1024, 0) != 0) return -1;
|
||||
|
||||
tsNumOfVnodeFetchThreads = TRANGE(tsNumOfVnodeFetchThreads, 1, 1);
|
||||
tsNumOfVnodeFetchThreads = tsNumOfCores / 4;
|
||||
tsNumOfVnodeFetchThreads = TMAX(tsNumOfVnodeFetchThreads, 4);
|
||||
if (cfgAddInt32(pCfg, "numOfVnodeFetchThreads", tsNumOfVnodeFetchThreads, 1, 1024, 0) != 0) return -1;
|
||||
|
||||
tsNumOfVnodeWriteThreads = tsNumOfCores;
|
||||
|
|
|
@ -4962,7 +4962,7 @@ int tDecodeSVCreateTbReq(SDecoder *pCoder, SVCreateTbReq *pReq) {
|
|||
if (tDecodeI64(pCoder, &pReq->ctb.suid) < 0) return -1;
|
||||
if (tDecodeTag(pCoder, (STag **)&pReq->ctb.pTag) < 0) return -1;
|
||||
} else if (pReq->type == TSDB_NORMAL_TABLE) {
|
||||
if (tDecodeSSchemaWrapper(pCoder, &pReq->ntb.schemaRow) < 0) return -1;
|
||||
if (tDecodeSSchemaWrapperEx(pCoder, &pReq->ntb.schemaRow) < 0) return -1;
|
||||
} else {
|
||||
ASSERT(0);
|
||||
}
|
||||
|
@ -5526,6 +5526,11 @@ bool tOffsetEqual(const STqOffsetVal *pLeft, const STqOffsetVal *pRight) {
|
|||
ASSERT(0);
|
||||
// TODO
|
||||
return pLeft->uid == pRight->uid && pLeft->ts == pRight->ts;
|
||||
} else {
|
||||
ASSERT(0);
|
||||
/*ASSERT(pLeft->type == TMQ_OFFSET__RESET_NONE || pLeft->type == TMQ_OFFSET__RESET_EARLIEAST ||*/
|
||||
/*pLeft->type == TMQ_OFFSET__RESET_LATEST);*/
|
||||
/*return true;*/
|
||||
}
|
||||
}
|
||||
return false;
|
||||
|
|
|
@ -32,28 +32,10 @@ const uint8_t tdVTypeByte[2][3] = {{
|
|||
};
|
||||
|
||||
// declaration
|
||||
static uint8_t tdGetBitmapByte(uint8_t byte);
|
||||
static int32_t tdCompareColId(const void *arg1, const void *arg2);
|
||||
static uint8_t tdGetBitmapByte(uint8_t byte);
|
||||
static int32_t tdCompareColId(const void *arg1, const void *arg2);
|
||||
static FORCE_INLINE int32_t compareKvRowColId(const void *key1, const void *key2);
|
||||
|
||||
// static void dataColSetNEleNull(SDataCol *pCol, int nEle);
|
||||
|
||||
/**
|
||||
* @brief src2 data has more priority than src1
|
||||
*
|
||||
* @param target
|
||||
* @param src1
|
||||
* @param iter1
|
||||
* @param limit1
|
||||
* @param src2
|
||||
* @param iter2
|
||||
* @param limit2
|
||||
* @param tRows
|
||||
* @param update
|
||||
*/
|
||||
static void tdMergeTwoDataCols(SDataCols *target, SDataCols *src1, int *iter1, int limit1, SDataCols *src2, int *iter2,
|
||||
int limit2, int tRows, bool update);
|
||||
|
||||
// implementation
|
||||
/**
|
||||
* @brief Compress bitmap bytes comprised of 2-bits to counterpart of 1-bit.
|
||||
|
@ -287,33 +269,6 @@ void tdMergeBitmap(uint8_t *srcBitmap, int32_t nBits, uint8_t *dstBitmap) {
|
|||
}
|
||||
}
|
||||
|
||||
static FORCE_INLINE void dataColSetNullAt(SDataCol *pCol, int index, bool setBitmap, int8_t bitmapMode) {
|
||||
if (IS_VAR_DATA_TYPE(pCol->type)) {
|
||||
pCol->dataOff[index] = pCol->len;
|
||||
char *ptr = POINTER_SHIFT(pCol->pData, pCol->len);
|
||||
setVardataNull(ptr, pCol->type);
|
||||
pCol->len += varDataTLen(ptr);
|
||||
} else {
|
||||
setNull(POINTER_SHIFT(pCol->pData, TYPE_BYTES[pCol->type] * index), pCol->type, pCol->bytes);
|
||||
pCol->len += TYPE_BYTES[pCol->type];
|
||||
}
|
||||
if (setBitmap) {
|
||||
tdSetBitmapValType(pCol->pBitmap, index, TD_VTYPE_NONE, bitmapMode);
|
||||
}
|
||||
}
|
||||
|
||||
// static void dataColSetNEleNull(SDataCol *pCol, int nEle) {
|
||||
// if (IS_VAR_DATA_TYPE(pCol->type)) {
|
||||
// pCol->len = 0;
|
||||
// for (int i = 0; i < nEle; i++) {
|
||||
// dataColSetNullAt(pCol, i);
|
||||
// }
|
||||
// } else {
|
||||
// setNullN(pCol->pData, pCol->type, pCol->bytes, nEle);
|
||||
// pCol->len = TYPE_BYTES[pCol->type] * nEle;
|
||||
// }
|
||||
// }
|
||||
|
||||
/**
|
||||
* @brief Set bitmap area by byte preferentially and then by bit.
|
||||
*
|
||||
|
@ -362,56 +317,6 @@ bool tdIsBitmapBlkNorm(const void *pBitmap, int32_t numOfBits, int8_t bitmapMode
|
|||
return true;
|
||||
}
|
||||
|
||||
static FORCE_INLINE void dataColSetNoneAt(SDataCol *pCol, int index, bool setBitmap, int8_t bitmapMode) {
|
||||
if (IS_VAR_DATA_TYPE(pCol->type)) {
|
||||
pCol->dataOff[index] = pCol->len;
|
||||
char *ptr = POINTER_SHIFT(pCol->pData, pCol->len);
|
||||
setVardataNull(ptr, pCol->type);
|
||||
pCol->len += varDataTLen(ptr);
|
||||
} else {
|
||||
setNull(POINTER_SHIFT(pCol->pData, TYPE_BYTES[pCol->type] * index), pCol->type, pCol->bytes);
|
||||
pCol->len += TYPE_BYTES[pCol->type];
|
||||
}
|
||||
if (setBitmap) {
|
||||
tdSetBitmapValType(pCol->pBitmap, index, TD_VTYPE_NONE, bitmapMode);
|
||||
}
|
||||
}
|
||||
|
||||
static void dataColSetNEleNone(SDataCol *pCol, int nEle, int8_t bitmapMode) {
|
||||
if (IS_VAR_DATA_TYPE(pCol->type)) {
|
||||
pCol->len = 0;
|
||||
for (int i = 0; i < nEle; ++i) {
|
||||
dataColSetNoneAt(pCol, i, false, bitmapMode);
|
||||
}
|
||||
} else {
|
||||
setNullN(pCol->pData, pCol->type, pCol->bytes, nEle);
|
||||
pCol->len = TYPE_BYTES[pCol->type] * nEle;
|
||||
}
|
||||
#ifdef TD_SUPPORT_BITMAP
|
||||
tdSetBitmapValTypeN(pCol->pBitmap, nEle, TD_VTYPE_NONE, bitmapMode);
|
||||
#endif
|
||||
}
|
||||
|
||||
#if 0
|
||||
void trbSetRowInfo(SRowBuilder *pRB, bool del, uint16_t sver) {
|
||||
// TODO
|
||||
}
|
||||
|
||||
void trbSetRowVersion(SRowBuilder *pRB, uint64_t ver) {
|
||||
// TODO
|
||||
}
|
||||
|
||||
void trbSetRowTS(SRowBuilder *pRB, TSKEY ts) {
|
||||
// TODO
|
||||
}
|
||||
|
||||
int trbWriteCol(SRowBuilder *pRB, void *pData, col_id_t cid) {
|
||||
// TODO
|
||||
return 0;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
STSRow *tdRowDup(STSRow *row) {
|
||||
STSRow *trow = taosMemoryMalloc(TD_ROW_LEN(row));
|
||||
if (trow == NULL) return NULL;
|
||||
|
@ -420,511 +325,6 @@ STSRow *tdRowDup(STSRow *row) {
|
|||
return trow;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief
|
||||
*
|
||||
* @param pCol
|
||||
* @param valType
|
||||
* @param val
|
||||
* @param numOfRows
|
||||
* @param maxPoints
|
||||
* @param bitmapMode default is 0(2 bits), otherwise 1(1 bit)
|
||||
* @param isMerge merge to current row
|
||||
* @return int
|
||||
*/
|
||||
int tdAppendValToDataCol(SDataCol *pCol, TDRowValT valType, const void *val, int numOfRows, int maxPoints,
|
||||
int8_t bitmapMode, bool isMerge) {
|
||||
TASSERT(pCol != NULL);
|
||||
|
||||
// Assume that the columns not specified during insert/upsert mean None.
|
||||
if (isAllRowsNone(pCol)) {
|
||||
if (tdValIsNone(valType)) {
|
||||
// all None value yet, just return
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (tdAllocMemForCol(pCol, maxPoints) < 0) return -1;
|
||||
if (numOfRows > 0) {
|
||||
// Find the first not None value, fill all previous values as None
|
||||
dataColSetNEleNone(pCol, numOfRows, bitmapMode);
|
||||
}
|
||||
}
|
||||
const void *value = val;
|
||||
if (!tdValTypeIsNorm(valType) || !val) {
|
||||
// TODO:
|
||||
// 1. back compatibility and easy to debug with codes of 2.0 to save NULL values.
|
||||
// 2. later on, considering further optimization, don't save Null/None for VarType.
|
||||
value = getNullValue(pCol->type);
|
||||
}
|
||||
if (!isMerge) {
|
||||
if (IS_VAR_DATA_TYPE(pCol->type)) {
|
||||
// set offset
|
||||
pCol->dataOff[numOfRows] = pCol->len;
|
||||
// Copy data
|
||||
memcpy(POINTER_SHIFT(pCol->pData, pCol->len), value, varDataTLen(value));
|
||||
// Update the length
|
||||
pCol->len += varDataTLen(value);
|
||||
} else {
|
||||
ASSERT(pCol->len == TYPE_BYTES[pCol->type] * numOfRows);
|
||||
memcpy(POINTER_SHIFT(pCol->pData, pCol->len), value, pCol->bytes);
|
||||
pCol->len += pCol->bytes;
|
||||
}
|
||||
} else if (!tdValTypeIsNone(valType)) {
|
||||
if (IS_VAR_DATA_TYPE(pCol->type)) {
|
||||
// keep the last offset
|
||||
// discard the last var data
|
||||
int32_t lastVarLen = varDataTLen(POINTER_SHIFT(pCol->pData, pCol->dataOff[numOfRows]));
|
||||
pCol->len -= lastVarLen;
|
||||
// Copy data
|
||||
memcpy(POINTER_SHIFT(pCol->pData, pCol->len), value, varDataTLen(value));
|
||||
// Update the length
|
||||
pCol->len += varDataTLen(value);
|
||||
} else {
|
||||
ASSERT(pCol->len - TYPE_BYTES[pCol->type] == TYPE_BYTES[pCol->type] * numOfRows);
|
||||
memcpy(POINTER_SHIFT(pCol->pData, pCol->len - TYPE_BYTES[pCol->type]), value, pCol->bytes);
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef TD_SUPPORT_BITMAP
|
||||
if (!isMerge || !tdValTypeIsNone(valType)) {
|
||||
tdSetBitmapValType(pCol->pBitmap, numOfRows, valType, bitmapMode);
|
||||
}
|
||||
#endif
|
||||
return 0;
|
||||
}
|
||||
|
||||
// internal
|
||||
static int32_t tdAppendTpRowToDataCol(STSRow *pRow, STSchema *pSchema, SDataCols *pCols, bool isMerge) {
|
||||
#if 0
|
||||
ASSERT(pCols->numOfRows == 0 || dataColsKeyLast(pCols) < TD_ROW_KEY(pRow));
|
||||
#endif
|
||||
|
||||
// Multi-Version rows with the same key and different versions supported
|
||||
ASSERT(pCols->numOfRows == 0 || dataColsKeyLast(pCols) <= TD_ROW_KEY(pRow));
|
||||
|
||||
int rcol = 1;
|
||||
int dcol = 1;
|
||||
void *pBitmap = tdGetBitmapAddrTp(pRow, pSchema->flen);
|
||||
|
||||
SDataCol *pDataCol = &(pCols->cols[0]);
|
||||
ASSERT(pDataCol->colId == PRIMARYKEY_TIMESTAMP_COL_ID);
|
||||
tdAppendValToDataCol(pDataCol, TD_VTYPE_NORM, &pRow->ts, pCols->numOfRows, pCols->maxPoints, pCols->bitmapMode,
|
||||
isMerge);
|
||||
|
||||
while (dcol < pCols->numOfCols) {
|
||||
pDataCol = &(pCols->cols[dcol]);
|
||||
if (rcol >= schemaNCols(pSchema)) {
|
||||
tdAppendValToDataCol(pDataCol, TD_VTYPE_NULL, NULL, pCols->numOfRows, pCols->maxPoints, pCols->bitmapMode,
|
||||
isMerge);
|
||||
++dcol;
|
||||
continue;
|
||||
}
|
||||
|
||||
STColumn *pRowCol = schemaColAt(pSchema, rcol);
|
||||
SCellVal sVal = {0};
|
||||
if (pRowCol->colId == pDataCol->colId) {
|
||||
if (tdGetTpRowValOfCol(&sVal, pRow, pBitmap, pRowCol->type, pRowCol->offset - sizeof(TSKEY), rcol - 1) < 0) {
|
||||
return terrno;
|
||||
}
|
||||
tdAppendValToDataCol(pDataCol, sVal.valType, sVal.val, pCols->numOfRows, pCols->maxPoints, pCols->bitmapMode,
|
||||
isMerge);
|
||||
++dcol;
|
||||
++rcol;
|
||||
} else if (pRowCol->colId < pDataCol->colId) {
|
||||
++rcol;
|
||||
} else {
|
||||
tdAppendValToDataCol(pDataCol, TD_VTYPE_NULL, NULL, pCols->numOfRows, pCols->maxPoints, pCols->bitmapMode,
|
||||
isMerge);
|
||||
++dcol;
|
||||
}
|
||||
}
|
||||
#if 0
|
||||
++pCols->numOfRows;
|
||||
#endif
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
// internal
|
||||
static int32_t tdAppendKvRowToDataCol(STSRow *pRow, STSchema *pSchema, SDataCols *pCols, bool isMerge) {
|
||||
ASSERT(pCols->numOfRows == 0 || dataColsKeyLast(pCols) < TD_ROW_KEY(pRow));
|
||||
|
||||
int rcol = 0;
|
||||
int dcol = 1;
|
||||
int tRowCols = tdRowGetNCols(pRow) - 1; // the primary TS key not included in kvRowColIdx part
|
||||
int tSchemaCols = schemaNCols(pSchema) - 1;
|
||||
void *pBitmap = tdGetBitmapAddrKv(pRow, tdRowGetNCols(pRow));
|
||||
|
||||
SDataCol *pDataCol = &(pCols->cols[0]);
|
||||
ASSERT(pDataCol->colId == PRIMARYKEY_TIMESTAMP_COL_ID);
|
||||
tdAppendValToDataCol(pDataCol, TD_VTYPE_NORM, &pRow->ts, pCols->numOfRows, pCols->maxPoints, pCols->bitmapMode,
|
||||
isMerge);
|
||||
|
||||
while (dcol < pCols->numOfCols) {
|
||||
pDataCol = &(pCols->cols[dcol]);
|
||||
if (rcol >= tRowCols || rcol >= tSchemaCols) {
|
||||
tdAppendValToDataCol(pDataCol, TD_VTYPE_NULL, NULL, pCols->numOfRows, pCols->maxPoints, pCols->bitmapMode,
|
||||
isMerge);
|
||||
++dcol;
|
||||
continue;
|
||||
}
|
||||
|
||||
SKvRowIdx *pIdx = tdKvRowColIdxAt(pRow, rcol);
|
||||
int16_t colIdx = -1;
|
||||
if (pIdx) {
|
||||
colIdx = POINTER_DISTANCE(pIdx, TD_ROW_COL_IDX(pRow)) / sizeof(SKvRowIdx);
|
||||
}
|
||||
TASSERT(colIdx >= 0);
|
||||
SCellVal sVal = {0};
|
||||
if (pIdx->colId == pDataCol->colId) {
|
||||
if (tdGetKvRowValOfCol(&sVal, pRow, pBitmap, pIdx->offset, colIdx) < 0) {
|
||||
return terrno;
|
||||
}
|
||||
tdAppendValToDataCol(pDataCol, sVal.valType, sVal.val, pCols->numOfRows, pCols->maxPoints, pCols->bitmapMode,
|
||||
isMerge);
|
||||
++dcol;
|
||||
++rcol;
|
||||
} else if (pIdx->colId < pDataCol->colId) {
|
||||
++rcol;
|
||||
} else {
|
||||
tdAppendValToDataCol(pDataCol, TD_VTYPE_NULL, NULL, pCols->numOfRows, pCols->maxPoints, pCols->bitmapMode,
|
||||
isMerge);
|
||||
++dcol;
|
||||
}
|
||||
}
|
||||
#if 0
|
||||
++pCols->numOfRows;
|
||||
#endif
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief exposed
|
||||
*
|
||||
* @param pRow
|
||||
* @param pSchema
|
||||
* @param pCols
|
||||
*/
|
||||
int32_t tdAppendSTSRowToDataCol(STSRow *pRow, STSchema *pSchema, SDataCols *pCols, bool isMerge) {
|
||||
#ifdef TD_DEBUG_PRINT_TSDB_LOAD_DCOLS
|
||||
printf("%s:%d ts: %" PRIi64 " sver:%d maxCols:%" PRIi16 " nCols:%" PRIi16 ", nRows:%d\n", __func__, __LINE__,
|
||||
TD_ROW_KEY(pRow), TD_ROW_SVER(pRow), pCols->maxCols, pCols->numOfCols, pCols->numOfRows);
|
||||
#endif
|
||||
if (TD_IS_TP_ROW(pRow)) {
|
||||
return tdAppendTpRowToDataCol(pRow, pSchema, pCols, isMerge);
|
||||
} else if (TD_IS_KV_ROW(pRow)) {
|
||||
return tdAppendKvRowToDataCol(pRow, pSchema, pCols, isMerge);
|
||||
} else {
|
||||
ASSERT(0);
|
||||
}
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief source data has more priority than target
|
||||
*
|
||||
* @param target
|
||||
* @param source
|
||||
* @param rowsToMerge
|
||||
* @param pOffset
|
||||
* @param update
|
||||
* @param maxVer
|
||||
* @return int
|
||||
*/
|
||||
int tdMergeDataCols(SDataCols *target, SDataCols *source, int rowsToMerge, int *pOffset, bool update,
|
||||
TDRowVerT maxVer) {
|
||||
ASSERT(rowsToMerge > 0 && rowsToMerge <= source->numOfRows);
|
||||
ASSERT(target->numOfCols == source->numOfCols);
|
||||
int offset = 0;
|
||||
|
||||
if (pOffset == NULL) {
|
||||
pOffset = &offset;
|
||||
}
|
||||
|
||||
SDataCols *pTarget = NULL;
|
||||
|
||||
if ((target->numOfRows == 0) || (dataColsKeyLast(target) < dataColsKeyAtRow(source, *pOffset))) { // No overlap
|
||||
ASSERT(target->numOfRows + rowsToMerge <= target->maxPoints);
|
||||
// TODO: filter the maxVer
|
||||
TSKEY lastKey = TSKEY_INITIAL_VAL;
|
||||
for (int i = 0; i < rowsToMerge; ++i) {
|
||||
bool merge = false;
|
||||
for (int j = 0; j < source->numOfCols; j++) {
|
||||
if (source->cols[j].len > 0 || target->cols[j].len > 0) {
|
||||
SCellVal sVal = {0};
|
||||
if (tdGetColDataOfRow(&sVal, source->cols + j, i + (*pOffset), source->bitmapMode) < 0) {
|
||||
TASSERT(0);
|
||||
}
|
||||
|
||||
if (j == 0) {
|
||||
if (lastKey == *(TSKEY *)sVal.val) {
|
||||
if (!update) {
|
||||
break;
|
||||
}
|
||||
merge = true;
|
||||
} else if (lastKey != TSKEY_INITIAL_VAL) {
|
||||
++target->numOfRows;
|
||||
}
|
||||
|
||||
lastKey = *(TSKEY *)sVal.val;
|
||||
}
|
||||
if (i == 0) {
|
||||
(target->cols + j)->bitmap = (source->cols + j)->bitmap;
|
||||
}
|
||||
|
||||
tdAppendValToDataCol(target->cols + j, sVal.valType, sVal.val, target->numOfRows, target->maxPoints,
|
||||
target->bitmapMode, merge);
|
||||
}
|
||||
}
|
||||
}
|
||||
if (lastKey != TSKEY_INITIAL_VAL) {
|
||||
++target->numOfRows;
|
||||
}
|
||||
(*pOffset) += rowsToMerge;
|
||||
} else {
|
||||
pTarget = tdDupDataCols(target, true);
|
||||
if (pTarget == NULL) goto _err;
|
||||
|
||||
int iter1 = 0;
|
||||
tdMergeTwoDataCols(target, pTarget, &iter1, pTarget->numOfRows, source, pOffset, source->numOfRows,
|
||||
pTarget->numOfRows + rowsToMerge, update);
|
||||
}
|
||||
|
||||
tdFreeDataCols(pTarget);
|
||||
return 0;
|
||||
|
||||
_err:
|
||||
tdFreeDataCols(pTarget);
|
||||
return -1;
|
||||
}
|
||||
|
||||
static void tdAppendValToDataCols(SDataCols *target, SDataCols *src, int iter, bool isMerge) {
|
||||
for (int i = 0; i < src->numOfCols; ++i) {
|
||||
ASSERT(target->cols[i].type == src->cols[i].type);
|
||||
if (src->cols[i].len > 0 || target->cols[i].len > 0) {
|
||||
SCellVal sVal = {0};
|
||||
if (tdGetColDataOfRow(&sVal, src->cols + i, iter, src->bitmapMode) < 0) {
|
||||
TASSERT(0);
|
||||
}
|
||||
if (isMerge) {
|
||||
if (!tdValTypeIsNone(sVal.valType)) {
|
||||
tdAppendValToDataCol(&(target->cols[i]), sVal.valType, sVal.val, target->numOfRows, target->maxPoints,
|
||||
target->bitmapMode, isMerge);
|
||||
} else {
|
||||
// Keep the origin value for None
|
||||
}
|
||||
} else {
|
||||
tdAppendValToDataCol(&(target->cols[i]), sVal.valType, sVal.val, target->numOfRows, target->maxPoints,
|
||||
target->bitmapMode, isMerge);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
/**
|
||||
* @brief src2 data has more priority than src1
|
||||
*
|
||||
* @param target
|
||||
* @param src1
|
||||
* @param iter1
|
||||
* @param limit1
|
||||
* @param src2
|
||||
* @param iter2
|
||||
* @param limit2
|
||||
* @param tRows
|
||||
* @param update
|
||||
*/
|
||||
static void tdMergeTwoDataCols(SDataCols *target, SDataCols *src1, int *iter1, int limit1, SDataCols *src2, int *iter2,
|
||||
int limit2, int tRows, bool update) {
|
||||
tdResetDataCols(target);
|
||||
target->bitmapMode = src1->bitmapMode;
|
||||
ASSERT(limit1 <= src1->numOfRows && limit2 <= src2->numOfRows);
|
||||
int32_t nRows = 0;
|
||||
|
||||
// TODO: filter the maxVer
|
||||
// TODO: handle the delete function
|
||||
TSKEY lastKey = TSKEY_INITIAL_VAL;
|
||||
while (nRows < tRows) {
|
||||
if (*iter1 >= limit1 && *iter2 >= limit2) break;
|
||||
|
||||
TSKEY key1 = (*iter1 >= limit1) ? INT64_MAX : dataColsKeyAt(src1, *iter1);
|
||||
// TKEY tkey1 = (*iter1 >= limit1) ? TKEY_NULL : dataColsTKeyAt(src1, *iter1);
|
||||
TSKEY key2 = (*iter2 >= limit2) ? INT64_MAX : dataColsKeyAt(src2, *iter2);
|
||||
// TKEY tkey2 = (*iter2 >= limit2) ? TKEY_NULL : dataColsTKeyAt(src2, *iter2);
|
||||
|
||||
// ASSERT(tkey1 == TKEY_NULL || (!TKEY_IS_DELETED(tkey1)));
|
||||
|
||||
if (key1 <= key2) {
|
||||
// select key1 if not delete
|
||||
if (update && (lastKey == key1)) {
|
||||
tdAppendValToDataCols(target, src1, *iter1, true);
|
||||
} else if (lastKey != key1) {
|
||||
if (lastKey != TSKEY_INITIAL_VAL) {
|
||||
++target->numOfRows;
|
||||
}
|
||||
tdAppendValToDataCols(target, src1, *iter1, false);
|
||||
}
|
||||
++nRows;
|
||||
++(*iter1);
|
||||
lastKey = key1;
|
||||
} else {
|
||||
// use key2 if not deleted
|
||||
// TODO: handle the delete function
|
||||
if (update && (lastKey == key2)) {
|
||||
tdAppendValToDataCols(target, src2, *iter2, true);
|
||||
} else if (lastKey != key2) {
|
||||
if (lastKey != TSKEY_INITIAL_VAL) {
|
||||
++target->numOfRows;
|
||||
}
|
||||
tdAppendValToDataCols(target, src2, *iter2, false);
|
||||
}
|
||||
|
||||
++nRows;
|
||||
++(*iter2);
|
||||
lastKey = key2;
|
||||
}
|
||||
|
||||
ASSERT(target->numOfRows <= target->maxPoints - 1);
|
||||
}
|
||||
if (lastKey != TSKEY_INITIAL_VAL) {
|
||||
++target->numOfRows;
|
||||
}
|
||||
}
|
||||
|
||||
STSRow *mergeTwoRows(void *buffer, STSRow *row1, STSRow *row2, STSchema *pSchema1, STSchema *pSchema2) {
|
||||
#if 0
|
||||
ASSERT(TD_ROW_KEY(row1) == TD_ROW_KEY(row2));
|
||||
ASSERT(schemaVersion(pSchema1) == TD_ROW_SVER(row1));
|
||||
ASSERT(schemaVersion(pSchema2) == TD_ROW_SVER(row2));
|
||||
ASSERT(schemaVersion(pSchema1) >= schemaVersion(pSchema2));
|
||||
#endif
|
||||
|
||||
#if 0
|
||||
SArray *stashRow = taosArrayInit(pSchema1->numOfCols, sizeof(SColInfo));
|
||||
if (stashRow == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
STSRow pRow = buffer;
|
||||
STpRow dataRow = memRowDataBody(pRow);
|
||||
memRowSetType(pRow, SMEM_ROW_DATA);
|
||||
dataRowSetVersion(dataRow, schemaVersion(pSchema1)); // use latest schema version
|
||||
dataRowSetLen(dataRow, (TDRowLenT)(TD_DATA_ROW_HEAD_SIZE + pSchema1->flen));
|
||||
|
||||
TDRowLenT dataLen = 0, kvLen = TD_MEM_ROW_KV_HEAD_SIZE;
|
||||
|
||||
int32_t i = 0; // row1
|
||||
int32_t j = 0; // row2
|
||||
int32_t nCols1 = schemaNCols(pSchema1);
|
||||
int32_t nCols2 = schemaNCols(pSchema2);
|
||||
SColInfo colInfo = {0};
|
||||
int32_t kvIdx1 = 0, kvIdx2 = 0;
|
||||
|
||||
while (i < nCols1) {
|
||||
STColumn *pCol = schemaColAt(pSchema1, i);
|
||||
void * val1 = tdGetMemRowDataOfColEx(row1, pCol->colId, pCol->type, TD_DATA_ROW_HEAD_SIZE + pCol->offset, &kvIdx1);
|
||||
// if val1 != NULL, use val1;
|
||||
if (val1 != NULL && !isNull(val1, pCol->type)) {
|
||||
tdAppendColVal(dataRow, val1, pCol->type, pCol->offset);
|
||||
kvLen += tdGetColAppendLen(SMEM_ROW_KV, val1, pCol->type);
|
||||
setSColInfo(&colInfo, pCol->colId, pCol->type, val1);
|
||||
taosArrayPush(stashRow, &colInfo);
|
||||
++i; // next col
|
||||
continue;
|
||||
}
|
||||
|
||||
void *val2 = NULL;
|
||||
while (j < nCols2) {
|
||||
STColumn *tCol = schemaColAt(pSchema2, j);
|
||||
if (tCol->colId < pCol->colId) {
|
||||
++j;
|
||||
continue;
|
||||
}
|
||||
if (tCol->colId == pCol->colId) {
|
||||
val2 = tdGetMemRowDataOfColEx(row2, tCol->colId, tCol->type, TD_DATA_ROW_HEAD_SIZE + tCol->offset, &kvIdx2);
|
||||
} else if (tCol->colId > pCol->colId) {
|
||||
// set NULL
|
||||
}
|
||||
break;
|
||||
} // end of while(j<nCols2)
|
||||
if (val2 == NULL) {
|
||||
val2 = (void *)getNullValue(pCol->type);
|
||||
}
|
||||
tdAppendColVal(dataRow, val2, pCol->type, pCol->offset);
|
||||
if (!isNull(val2, pCol->type)) {
|
||||
kvLen += tdGetColAppendLen(SMEM_ROW_KV, val2, pCol->type);
|
||||
setSColInfo(&colInfo, pCol->colId, pCol->type, val2);
|
||||
taosArrayPush(stashRow, &colInfo);
|
||||
}
|
||||
|
||||
++i; // next col
|
||||
}
|
||||
|
||||
dataLen = TD_ROW_LEN(pRow);
|
||||
|
||||
if (kvLen < dataLen) {
|
||||
// scan stashRow and generate SKVRow
|
||||
memset(buffer, 0, sizeof(dataLen));
|
||||
STSRow tRow = buffer;
|
||||
memRowSetType(tRow, SMEM_ROW_KV);
|
||||
SKVRow kvRow = (SKVRow)memRowKvBody(tRow);
|
||||
int16_t nKvNCols = (int16_t) taosArrayGetSize(stashRow);
|
||||
kvRowSetLen(kvRow, (TDRowLenT)(TD_KV_ROW_HEAD_SIZE + sizeof(SColIdx) * nKvNCols));
|
||||
kvRowSetNCols(kvRow, nKvNCols);
|
||||
memRowSetKvVersion(tRow, pSchema1->version);
|
||||
|
||||
int32_t toffset = 0;
|
||||
int16_t k;
|
||||
for (k = 0; k < nKvNCols; ++k) {
|
||||
SColInfo *pColInfo = taosArrayGet(stashRow, k);
|
||||
tdAppendKvColVal(kvRow, pColInfo->colVal, true, pColInfo->colId, pColInfo->colType, toffset);
|
||||
toffset += sizeof(SColIdx);
|
||||
}
|
||||
ASSERT(kvLen == TD_ROW_LEN(tRow));
|
||||
}
|
||||
taosArrayDestroy(stashRow);
|
||||
return buffer;
|
||||
#endif
|
||||
return NULL;
|
||||
}
|
||||
|
||||
SDataCols *tdDupDataCols(SDataCols *pDataCols, bool keepData) {
|
||||
SDataCols *pRet = tdNewDataCols(pDataCols->maxCols, pDataCols->maxPoints);
|
||||
if (pRet == NULL) return NULL;
|
||||
|
||||
pRet->numOfCols = pDataCols->numOfCols;
|
||||
pRet->bitmapMode = pDataCols->bitmapMode;
|
||||
pRet->sversion = pDataCols->sversion;
|
||||
if (keepData) pRet->numOfRows = pDataCols->numOfRows;
|
||||
|
||||
for (int i = 0; i < pDataCols->numOfCols; ++i) {
|
||||
pRet->cols[i].type = pDataCols->cols[i].type;
|
||||
pRet->cols[i].bitmap = pDataCols->cols[i].bitmap;
|
||||
pRet->cols[i].colId = pDataCols->cols[i].colId;
|
||||
pRet->cols[i].bytes = pDataCols->cols[i].bytes;
|
||||
pRet->cols[i].offset = pDataCols->cols[i].offset;
|
||||
|
||||
if (keepData) {
|
||||
if (pDataCols->cols[i].len > 0) {
|
||||
if (tdAllocMemForCol(&pRet->cols[i], pRet->maxPoints) < 0) {
|
||||
tdFreeDataCols(pRet);
|
||||
return NULL;
|
||||
}
|
||||
pRet->cols[i].len = pDataCols->cols[i].len;
|
||||
memcpy(pRet->cols[i].pData, pDataCols->cols[i].pData, pDataCols->cols[i].len);
|
||||
if (IS_VAR_DATA_TYPE(pRet->cols[i].type)) {
|
||||
int dataOffSize = sizeof(VarDataOffsetT) * pDataCols->maxPoints;
|
||||
memcpy(pRet->cols[i].dataOff, pDataCols->cols[i].dataOff, dataOffSize);
|
||||
}
|
||||
if (!TD_COL_ROWS_NORM(pRet->cols + i)) {
|
||||
memcpy(pRet->cols[i].pBitmap, pDataCols->cols[i].pBitmap, TD_BITMAP_BYTES(pDataCols->numOfRows));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return pRet;
|
||||
}
|
||||
|
||||
void tdSRowPrint(STSRow *row, STSchema *pSchema, const char *tag) {
|
||||
STSRowIter iter = {0};
|
||||
tdSTSRowIterInit(&iter, pSchema);
|
||||
|
@ -1020,32 +420,6 @@ void tdSCellValPrint(SCellVal *pVal, int8_t colType) {
|
|||
}
|
||||
}
|
||||
|
||||
int32_t dataColGetNEleLen(SDataCol *pDataCol, int32_t rows, int8_t bitmapMode) {
|
||||
ASSERT(rows > 0);
|
||||
int32_t result = 0;
|
||||
|
||||
if (IS_VAR_DATA_TYPE(pDataCol->type)) {
|
||||
result += pDataCol->dataOff[rows - 1];
|
||||
SCellVal val = {0};
|
||||
if (tdGetColDataOfRow(&val, pDataCol, rows - 1, bitmapMode) < 0) {
|
||||
TASSERT(0);
|
||||
}
|
||||
|
||||
// Currently, count the varDataTLen in of Null/None cols considering back compatibility test for 2.4
|
||||
result += varDataTLen(val.val);
|
||||
// TODO: later on, don't save Null/None for VarDataT for 3.0
|
||||
// if (tdValTypeIsNorm(val.valType)) {
|
||||
// result += varDataTLen(val.val);
|
||||
// }
|
||||
} else {
|
||||
result += TYPE_BYTES[pDataCol->type] * rows;
|
||||
}
|
||||
|
||||
ASSERT(pDataCol->len == result);
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
bool tdSKvRowGetVal(STSRow *pRow, col_id_t colId, col_id_t colIdx, SCellVal *pVal) {
|
||||
if (colId == PRIMARYKEY_TIMESTAMP_COL_ID) {
|
||||
tdRowSetVal(pVal, TD_VTYPE_NORM, TD_ROW_KEY_ADDR(pRow));
|
||||
|
@ -1082,40 +456,6 @@ bool tdSTpRowGetVal(STSRow *pRow, col_id_t colId, col_type_t colType, int32_t fl
|
|||
return true;
|
||||
}
|
||||
|
||||
int32_t tdGetColDataOfRow(SCellVal *pVal, SDataCol *pCol, int32_t row, int8_t bitmapMode) {
|
||||
if (isAllRowsNone(pCol)) {
|
||||
pVal->valType = TD_VTYPE_NONE;
|
||||
#ifdef TD_SUPPORT_READ2
|
||||
pVal->val = (void *)getNullValue(pCol->type);
|
||||
#else
|
||||
pVal->val = NULL;
|
||||
#endif
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
if (TD_COL_ROWS_NORM(pCol)) {
|
||||
pVal->valType = TD_VTYPE_NORM;
|
||||
} else if (tdGetBitmapValType(pCol->pBitmap, row, &(pVal->valType), bitmapMode) < 0) {
|
||||
return terrno;
|
||||
}
|
||||
|
||||
if (tdValTypeIsNorm(pVal->valType)) {
|
||||
if (IS_VAR_DATA_TYPE(pCol->type)) {
|
||||
pVal->val = POINTER_SHIFT(pCol->pData, pCol->dataOff[row]);
|
||||
} else {
|
||||
pVal->val = POINTER_SHIFT(pCol->pData, TYPE_BYTES[pCol->type] * row);
|
||||
}
|
||||
} else {
|
||||
pVal->valType = TD_VTYPE_NULL;
|
||||
#ifdef TD_SUPPORT_READ2
|
||||
pVal->val = (void *)getNullValue(pCol->type);
|
||||
#else
|
||||
pVal->val = NULL;
|
||||
#endif
|
||||
}
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
bool tdSTSRowIterNext(STSRowIter *pIter, col_id_t colId, col_type_t colType, SCellVal *pVal) {
|
||||
if (colId == PRIMARYKEY_TIMESTAMP_COL_ID) {
|
||||
pVal->val = &pIter->pRow->ts;
|
||||
|
|
|
@ -285,8 +285,8 @@ int32_t debugPrintSColVal(SColVal *cv, int8_t type) {
|
|||
}
|
||||
|
||||
void debugPrintTSRow(STSRow2 *row, STSchema *pTSchema, const char *tags, int32_t ln) {
|
||||
printf("%s:%d %s:v%d:%d ", tags, ln, (row->flags & 0xf0) ? "KV" : "TP", row->sver, row->nData);
|
||||
for (int16_t i = 0; i < schemaNCols(pTSchema); ++i) {
|
||||
// printf("%s:%d %s:v%d:%d ", tags, ln, (row->flags & 0xf0) ? "KV" : "TP", row->sver, row->nData);
|
||||
for (int16_t i = 0; i < pTSchema->numOfCols; ++i) {
|
||||
SColVal cv = {0};
|
||||
tTSRowGet(row, pTSchema, i, &cv);
|
||||
debugPrintSColVal(&cv, pTSchema->columns[i].type);
|
||||
|
@ -393,7 +393,7 @@ static int32_t checkSColVal(const char *rawVal, SColVal *cv, int8_t type) {
|
|||
}
|
||||
|
||||
static void checkTSRow(const char **data, STSRow2 *row, STSchema *pTSchema) {
|
||||
for (int16_t i = 0; i < schemaNCols(pTSchema); ++i) {
|
||||
for (int16_t i = 0; i < pTSchema->numOfCols; ++i) {
|
||||
SColVal cv = {0};
|
||||
tTSRowGet(row, pTSchema, i, &cv);
|
||||
checkSColVal(data[i], &cv, pTSchema->columns[i].type);
|
||||
|
|
|
@ -150,6 +150,7 @@ static void mmStop(SMnodeMgmt *pMgmt) {
|
|||
dDebug("mnode-mgmt start to stop");
|
||||
taosThreadRwlockWrlock(&pMgmt->lock);
|
||||
pMgmt->stopped = 1;
|
||||
mndPreClose(pMgmt->pMnode);
|
||||
taosThreadRwlockUnlock(&pMgmt->lock);
|
||||
|
||||
mndStop(pMgmt->pMnode);
|
||||
|
|
|
@ -31,7 +31,7 @@ typedef struct SVnodeMgmt {
|
|||
const char *path;
|
||||
const char *name;
|
||||
SQWorkerPool queryPool;
|
||||
SQWorkerPool fetchPool;
|
||||
SWWorkerPool fetchPool;
|
||||
SWWorkerPool syncPool;
|
||||
SWWorkerPool writePool;
|
||||
SWWorkerPool applyPool;
|
||||
|
|
|
@ -31,7 +31,7 @@ SVnodeObj **vmGetVnodeListFromHash(SVnodeMgmt *pMgmt, int32_t *numOfVnodes) {
|
|||
SVnodeObj *pVnode = *ppVnode;
|
||||
if (pVnode && num < size) {
|
||||
int32_t refCount = atomic_add_fetch_32(&pVnode->refCount, 1);
|
||||
// dTrace("vgId:%d, acquire vnode, refCount:%d", pVnode->vgId, refCount);
|
||||
// dTrace("vgId:%d, acquire vnode list, ref:%d", pVnode->vgId, refCount);
|
||||
pVnodes[num++] = (*ppVnode);
|
||||
pIter = taosHashIterate(pMgmt->hash, pIter);
|
||||
} else {
|
||||
|
|
|
@ -23,6 +23,7 @@ SVnodeObj *vmAcquireVnode(SVnodeMgmt *pMgmt, int32_t vgId) {
|
|||
taosHashGetDup(pMgmt->hash, &vgId, sizeof(int32_t), (void *)&pVnode);
|
||||
if (pVnode == NULL || pVnode->dropped) {
|
||||
terrno = TSDB_CODE_VND_INVALID_VGROUP_ID;
|
||||
pVnode = NULL;
|
||||
} else {
|
||||
int32_t refCount = atomic_add_fetch_32(&pVnode->refCount, 1);
|
||||
// dTrace("vgId:%d, acquire vnode, ref:%d", pVnode->vgId, refCount);
|
||||
|
@ -75,11 +76,14 @@ int32_t vmOpenVnode(SVnodeMgmt *pMgmt, SWrapperCfg *pCfg, SVnode *pImpl) {
|
|||
void vmCloseVnode(SVnodeMgmt *pMgmt, SVnodeObj *pVnode) {
|
||||
char path[TSDB_FILENAME_LEN] = {0};
|
||||
|
||||
vnodePreClose(pVnode->pImpl);
|
||||
|
||||
taosThreadRwlockWrlock(&pMgmt->lock);
|
||||
taosHashRemove(pMgmt->hash, &pVnode->vgId, sizeof(int32_t));
|
||||
taosThreadRwlockUnlock(&pMgmt->lock);
|
||||
|
||||
vmReleaseVnode(pMgmt, pVnode);
|
||||
|
||||
dTrace("vgId:%d, wait for vnode ref become 0", pVnode->vgId);
|
||||
while (pVnode->refCount > 0) taosMsleep(10);
|
||||
dTrace("vgId:%d, wait for vnode queue is empty", pVnode->vgId);
|
||||
|
||||
|
|
|
@ -81,21 +81,26 @@ static void vmProcessQueryQueue(SQueueInfo *pInfo, SRpcMsg *pMsg) {
|
|||
taosFreeQitem(pMsg);
|
||||
}
|
||||
|
||||
static void vmProcessFetchQueue(SQueueInfo *pInfo, SRpcMsg *pMsg) {
|
||||
SVnodeObj *pVnode = pInfo->ahandle;
|
||||
const STraceId *trace = &pMsg->info.traceId;
|
||||
static void vmProcessFetchQueue(SQueueInfo *pInfo, STaosQall *qall, int32_t numOfMsgs) {
|
||||
SVnodeObj *pVnode = pInfo->ahandle;
|
||||
SRpcMsg *pMsg = NULL;
|
||||
|
||||
dGTrace("vgId:%d, msg:%p get from vnode-fetch queue", pVnode->vgId, pMsg);
|
||||
int32_t code = vnodeProcessFetchMsg(pVnode->pImpl, pMsg, pInfo);
|
||||
if (code != 0) {
|
||||
if (terrno != 0) code = terrno;
|
||||
dGError("vgId:%d, msg:%p failed to fetch since %s", pVnode->vgId, pMsg, terrstr());
|
||||
vmSendRsp(pMsg, code);
|
||||
for (int32_t i = 0; i < numOfMsgs; ++i) {
|
||||
if (taosGetQitem(qall, (void **)&pMsg) == 0) continue;
|
||||
const STraceId *trace = &pMsg->info.traceId;
|
||||
dGTrace("vgId:%d, msg:%p get from vnode-fetch queue", pVnode->vgId, pMsg);
|
||||
|
||||
int32_t code = vnodeProcessFetchMsg(pVnode->pImpl, pMsg, pInfo);
|
||||
if (code != 0) {
|
||||
if (terrno != 0) code = terrno;
|
||||
dGError("vgId:%d, msg:%p failed to fetch since %s", pVnode->vgId, pMsg, terrstr());
|
||||
vmSendRsp(pMsg, code);
|
||||
}
|
||||
|
||||
dGTrace("vgId:%d, msg:%p is freed, code:0x%x", pVnode->vgId, pMsg, code);
|
||||
rpcFreeCont(pMsg->pCont);
|
||||
taosFreeQitem(pMsg);
|
||||
}
|
||||
|
||||
dGTrace("vgId:%d, msg:%p is freed, code:0x%x", pVnode->vgId, pMsg, code);
|
||||
rpcFreeCont(pMsg->pCont);
|
||||
taosFreeQitem(pMsg);
|
||||
}
|
||||
|
||||
static void vmProcessSyncQueue(SQueueInfo *pInfo, STaosQall *qall, int32_t numOfMsgs) {
|
||||
|
@ -201,9 +206,9 @@ int32_t vmPutRpcMsgToQueue(SVnodeMgmt *pMgmt, EQueueType qtype, SRpcMsg *pRpc) {
|
|||
int32_t code = vmPutMsgToQueue(pMgmt, pMsg, qtype);
|
||||
if (code != 0) {
|
||||
dTrace("msg:%p, is freed", pMsg);
|
||||
taosFreeQitem(pMsg);
|
||||
rpcFreeCont(pMsg->pCont);
|
||||
pRpc->pCont = NULL;
|
||||
taosFreeQitem(pMsg);
|
||||
}
|
||||
|
||||
return code;
|
||||
|
@ -232,8 +237,8 @@ int32_t vmGetQueueSize(SVnodeMgmt *pMgmt, int32_t vgId, EQueueType qtype) {
|
|||
default:
|
||||
break;
|
||||
}
|
||||
vmReleaseVnode(pMgmt, pVnode);
|
||||
}
|
||||
vmReleaseVnode(pMgmt, pVnode);
|
||||
return size;
|
||||
}
|
||||
|
||||
|
@ -242,7 +247,7 @@ int32_t vmAllocQueue(SVnodeMgmt *pMgmt, SVnodeObj *pVnode) {
|
|||
pVnode->pSyncQ = tWWorkerAllocQueue(&pMgmt->syncPool, pVnode, (FItems)vmProcessSyncQueue);
|
||||
pVnode->pApplyQ = tWWorkerAllocQueue(&pMgmt->applyPool, pVnode->pImpl, (FItems)vnodeApplyWriteMsg);
|
||||
pVnode->pQueryQ = tQWorkerAllocQueue(&pMgmt->queryPool, pVnode, (FItem)vmProcessQueryQueue);
|
||||
pVnode->pFetchQ = tQWorkerAllocQueue(&pMgmt->fetchPool, pVnode, (FItem)vmProcessFetchQueue);
|
||||
pVnode->pFetchQ = tWWorkerAllocQueue(&pMgmt->fetchPool, pVnode, (FItems)vmProcessFetchQueue);
|
||||
|
||||
if (pVnode->pWriteQ == NULL || pVnode->pSyncQ == NULL || pVnode->pApplyQ == NULL || pVnode->pQueryQ == NULL ||
|
||||
pVnode->pFetchQ == NULL) {
|
||||
|
@ -250,7 +255,11 @@ int32_t vmAllocQueue(SVnodeMgmt *pMgmt, SVnodeObj *pVnode) {
|
|||
return -1;
|
||||
}
|
||||
|
||||
dDebug("vgId:%d, queue is alloced", pVnode->vgId);
|
||||
dDebug("vgId:%d, write-queue:%p is alloced", pVnode->vgId, pVnode->pWriteQ);
|
||||
dDebug("vgId:%d, sync-queue:%p is alloced", pVnode->vgId, pVnode->pSyncQ);
|
||||
dDebug("vgId:%d, apply-queue:%p is alloced", pVnode->vgId, pVnode->pApplyQ);
|
||||
dDebug("vgId:%d, query-queue:%p is alloced", pVnode->vgId, pVnode->pQueryQ);
|
||||
dDebug("vgId:%d, fetch-queue:%p is alloced", pVnode->vgId, pVnode->pFetchQ);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -259,7 +268,7 @@ void vmFreeQueue(SVnodeMgmt *pMgmt, SVnodeObj *pVnode) {
|
|||
tWWorkerFreeQueue(&pMgmt->applyPool, pVnode->pApplyQ);
|
||||
tWWorkerFreeQueue(&pMgmt->syncPool, pVnode->pSyncQ);
|
||||
tQWorkerFreeQueue(&pMgmt->queryPool, pVnode->pQueryQ);
|
||||
tQWorkerFreeQueue(&pMgmt->fetchPool, pVnode->pFetchQ);
|
||||
tWWorkerFreeQueue(&pMgmt->fetchPool, pVnode->pFetchQ);
|
||||
pVnode->pWriteQ = NULL;
|
||||
pVnode->pSyncQ = NULL;
|
||||
pVnode->pApplyQ = NULL;
|
||||
|
@ -275,11 +284,10 @@ int32_t vmStartWorker(SVnodeMgmt *pMgmt) {
|
|||
pQPool->max = tsNumOfVnodeQueryThreads;
|
||||
if (tQWorkerInit(pQPool) != 0) return -1;
|
||||
|
||||
SQWorkerPool *pFPool = &pMgmt->fetchPool;
|
||||
SWWorkerPool *pFPool = &pMgmt->fetchPool;
|
||||
pFPool->name = "vnode-fetch";
|
||||
pFPool->min = tsNumOfVnodeFetchThreads;
|
||||
pFPool->max = tsNumOfVnodeFetchThreads;
|
||||
if (tQWorkerInit(pFPool) != 0) return -1;
|
||||
if (tWWorkerInit(pFPool) != 0) return -1;
|
||||
|
||||
SWWorkerPool *pWPool = &pMgmt->writePool;
|
||||
pWPool->name = "vnode-write";
|
||||
|
@ -325,6 +333,6 @@ void vmStopWorker(SVnodeMgmt *pMgmt) {
|
|||
tWWorkerCleanup(&pMgmt->applyPool);
|
||||
tWWorkerCleanup(&pMgmt->syncPool);
|
||||
tQWorkerCleanup(&pMgmt->queryPool);
|
||||
tQWorkerCleanup(&pMgmt->fetchPool);
|
||||
tWWorkerCleanup(&pMgmt->fetchPool);
|
||||
dDebug("vnode workers are closed");
|
||||
}
|
||||
|
|
|
@ -366,6 +366,12 @@ SMnode *mndOpen(const char *path, const SMnodeOpt *pOption) {
|
|||
return pMnode;
|
||||
}
|
||||
|
||||
void mndPreClose(SMnode *pMnode) {
|
||||
if (pMnode != NULL) {
|
||||
syncLeaderTransfer(pMnode->syncMgmt.sync);
|
||||
}
|
||||
}
|
||||
|
||||
void mndClose(SMnode *pMnode) {
|
||||
if (pMnode != NULL) {
|
||||
mDebug("start to close mnode");
|
||||
|
|
|
@ -56,20 +56,22 @@ void mndSyncCommitMsg(struct SSyncFSM *pFsm, const SRpcMsg *pMsg, SFsmCbMeta cbM
|
|||
sdbSetApplyInfo(pMnode->pSdb, cbMeta.index, cbMeta.term, cbMeta.lastConfigIndex);
|
||||
}
|
||||
|
||||
if (pMgmt->transId == transId) {
|
||||
if (pMgmt->transId == transId && transId != 0) {
|
||||
if (pMgmt->errCode != 0) {
|
||||
mError("trans:%d, failed to propose since %s", transId, tstrerror(pMgmt->errCode));
|
||||
}
|
||||
pMgmt->transId = 0;
|
||||
tsem_post(&pMgmt->syncSem);
|
||||
} else {
|
||||
#if 1
|
||||
mError("trans:%d, invalid commit msg since trandId not match with %d", transId, pMgmt->transId);
|
||||
#else
|
||||
STrans *pTrans = mndAcquireTrans(pMnode, transId);
|
||||
if (pTrans != NULL) {
|
||||
mndTransExecute(pMnode, pTrans);
|
||||
mndReleaseTrans(pMnode, pTrans);
|
||||
}
|
||||
#if 0
|
||||
sdbWriteFile(pMnode->pSdb, SDB_WRITE_DELTA);
|
||||
// sdbWriteFile(pMnode->pSdb, SDB_WRITE_DELTA);
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
@ -199,6 +201,7 @@ int32_t mndInitSync(SMnode *pMnode) {
|
|||
}
|
||||
|
||||
// decrease election timer
|
||||
setPingTimerMS(pMgmt->sync, 5000);
|
||||
setElectTimerMS(pMgmt->sync, 600);
|
||||
setHeartbeatTimerMS(pMgmt->sync, 300);
|
||||
|
||||
|
|
|
@ -51,6 +51,7 @@ void vnodeCleanup();
|
|||
int32_t vnodeCreate(const char *path, SVnodeCfg *pCfg, STfs *pTfs);
|
||||
void vnodeDestroy(const char *path, STfs *pTfs);
|
||||
SVnode *vnodeOpen(const char *path, STfs *pTfs, SMsgCb msgCb);
|
||||
void vnodePreClose(SVnode *pVnode);
|
||||
void vnodeClose(SVnode *pVnode);
|
||||
|
||||
int32_t vnodeStart(SVnode *pVnode);
|
||||
|
@ -136,8 +137,7 @@ int64_t tsdbGetNumOfRowsInMemTable(STsdbReader *pHandle);
|
|||
void *tsdbGetIdx(SMeta *pMeta);
|
||||
void *tsdbGetIvtIdx(SMeta *pMeta);
|
||||
|
||||
int32_t tsdbLastRowReaderOpen(void *pVnode, int32_t type, SArray *pTableIdList, int32_t *colId, int32_t numOfCols,
|
||||
void **pReader);
|
||||
int32_t tsdbLastRowReaderOpen(void *pVnode, int32_t type, SArray *pTableIdList, int32_t numOfCols, void **pReader);
|
||||
int32_t tsdbRetrieveLastRow(void *pReader, SSDataBlock *pResBlock, const int32_t *slotIds);
|
||||
int32_t tsdbLastrowReaderClose(void *pReader);
|
||||
int32_t tsdbGetTableSchema(SVnode *pVnode, int64_t uid, STSchema **pSchema, int64_t *suid);
|
||||
|
|
|
@ -89,8 +89,6 @@ typedef struct {
|
|||
STqExecTb execTb;
|
||||
STqExecDb execDb;
|
||||
};
|
||||
// TODO remove it
|
||||
int64_t tsdbEndVer;
|
||||
|
||||
} STqExecHandle;
|
||||
|
||||
|
@ -101,6 +99,8 @@ typedef struct {
|
|||
int32_t epoch;
|
||||
int8_t fetchMeta;
|
||||
|
||||
int64_t snapshotVer;
|
||||
|
||||
// TODO remove
|
||||
SWalReader* pWalReader;
|
||||
|
||||
|
@ -131,7 +131,7 @@ typedef struct {
|
|||
static STqMgmt tqMgmt = {0};
|
||||
|
||||
// tqRead
|
||||
int64_t tqScan(STQ* pTq, const STqExecHandle* pExec, SMqDataRsp* pRsp, STqOffsetVal* offset);
|
||||
int64_t tqScan(STQ* pTq, const STqHandle* pHandle, SMqDataRsp* pRsp, STqOffsetVal* offset);
|
||||
int64_t tqFetchLog(STQ* pTq, STqHandle* pHandle, int64_t* fetchOffset, SWalCkHead** pHeadWithCkSum);
|
||||
|
||||
// tqExec
|
||||
|
|
|
@ -244,6 +244,9 @@ int32_t tqProcessPollReq(STQ* pTq, SRpcMsg* pMsg, int32_t workerId) {
|
|||
STqOffsetVal reqOffset = pReq->reqOffset;
|
||||
STqOffsetVal fetchOffsetNew;
|
||||
|
||||
// todo
|
||||
workerId = 0;
|
||||
|
||||
// 1.find handle
|
||||
STqHandle* pHandle = taosHashGet(pTq->handles, pReq->subKey, strlen(pReq->subKey));
|
||||
/*ASSERT(pHandle);*/
|
||||
|
@ -284,7 +287,8 @@ int32_t tqProcessPollReq(STQ* pTq, SRpcMsg* pMsg, int32_t workerId) {
|
|||
fetchOffsetNew = pOffset->val;
|
||||
char formatBuf[80];
|
||||
tFormatOffset(formatBuf, 80, &fetchOffsetNew);
|
||||
tqDebug("tmq poll: consumer %" PRId64 ", subkey %s, offset reset to %s", consumerId, pHandle->subKey, formatBuf);
|
||||
tqDebug("tmq poll: consumer %" PRId64 ", subkey %s, vg %d, offset reset to %s", consumerId, pHandle->subKey,
|
||||
TD_VID(pTq->pVnode), formatBuf);
|
||||
} else {
|
||||
if (reqOffset.type == TMQ_OFFSET__RESET_EARLIEAST) {
|
||||
if (pReq->useSnapshot && pHandle->execHandle.subType == TOPIC_SUB_TYPE__COLUMN) {
|
||||
|
@ -299,8 +303,8 @@ int32_t tqProcessPollReq(STQ* pTq, SRpcMsg* pMsg, int32_t workerId) {
|
|||
}
|
||||
} else if (reqOffset.type == TMQ_OFFSET__RESET_LATEST) {
|
||||
tqOffsetResetToLog(&dataRsp.rspOffset, walGetLastVer(pTq->pVnode->pWal));
|
||||
tqDebug("tmq poll: consumer %ld, subkey %s, offset reset to %ld", consumerId, pHandle->subKey,
|
||||
dataRsp.rspOffset.version);
|
||||
tqDebug("tmq poll: consumer %ld, subkey %s, vg %d, offset reset to %ld", consumerId, pHandle->subKey,
|
||||
TD_VID(pTq->pVnode), dataRsp.rspOffset.version);
|
||||
if (tqSendDataRsp(pTq, pMsg, pReq, &dataRsp) < 0) {
|
||||
code = -1;
|
||||
}
|
||||
|
@ -318,10 +322,10 @@ int32_t tqProcessPollReq(STQ* pTq, SRpcMsg* pMsg, int32_t workerId) {
|
|||
|
||||
// 3.query
|
||||
if (pHandle->execHandle.subType == TOPIC_SUB_TYPE__COLUMN) {
|
||||
if (fetchOffsetNew.type == TMQ_OFFSET__LOG) {
|
||||
fetchOffsetNew.version++;
|
||||
}
|
||||
if (tqScan(pTq, &pHandle->execHandle, &dataRsp, &fetchOffsetNew) < 0) {
|
||||
/*if (fetchOffsetNew.type == TMQ_OFFSET__LOG) {*/
|
||||
/*fetchOffsetNew.version++;*/
|
||||
/*}*/
|
||||
if (tqScan(pTq, pHandle, &dataRsp, &fetchOffsetNew) < 0) {
|
||||
ASSERT(0);
|
||||
code = -1;
|
||||
goto OVER;
|
||||
|
@ -480,30 +484,28 @@ int32_t tqProcessVgChangeReq(STQ* pTq, char* msg, int32_t msgLen) {
|
|||
pHandle->fetchMeta = req.withMeta;
|
||||
|
||||
pHandle->pWalReader = walOpenReader(pTq->pVnode->pWal, NULL);
|
||||
/*for (int32_t i = 0; i < 5; i++) {*/
|
||||
/*pHandle->execHandle.pExecReader[i] = tqOpenReader(pTq->pVnode);*/
|
||||
/*}*/
|
||||
|
||||
// TODO version should be assigned in preprocess
|
||||
int64_t ver = walGetCommittedVer(pTq->pVnode->pWal);
|
||||
if (pHandle->execHandle.subType == TOPIC_SUB_TYPE__COLUMN) {
|
||||
pHandle->execHandle.execCol.qmsg = req.qmsg;
|
||||
pHandle->snapshotVer = ver;
|
||||
req.qmsg = NULL;
|
||||
for (int32_t i = 0; i < 5; i++) {
|
||||
SReadHandle handle = {
|
||||
.tqReader = pHandle->execHandle.pExecReader[i],
|
||||
.meta = pTq->pVnode->pMeta,
|
||||
.vnode = pTq->pVnode,
|
||||
.initTableReader = true,
|
||||
.initTqReader = true,
|
||||
.version = ver,
|
||||
};
|
||||
pHandle->execHandle.execCol.task[i] = qCreateStreamExecTaskInfo(pHandle->execHandle.execCol.qmsg, &handle);
|
||||
pHandle->execHandle.execCol.task[i] = qCreateQueueExecTaskInfo(pHandle->execHandle.execCol.qmsg, &handle);
|
||||
ASSERT(pHandle->execHandle.execCol.task[i]);
|
||||
void* scanner = NULL;
|
||||
qExtractStreamScanner(pHandle->execHandle.execCol.task[i], &scanner);
|
||||
ASSERT(scanner);
|
||||
pHandle->execHandle.pExecReader[i] = qExtractReaderFromStreamScanner(scanner);
|
||||
ASSERT(pHandle->execHandle.pExecReader[i]);
|
||||
pHandle->execHandle.tsdbEndVer = ver;
|
||||
}
|
||||
} else if (pHandle->execHandle.subType == TOPIC_SUB_TYPE__DB) {
|
||||
for (int32_t i = 0; i < 5; i++) {
|
||||
|
|
|
@ -59,13 +59,13 @@ static int32_t tqAddTbNameToRsp(const STQ* pTq, int64_t uid, SMqDataRsp* pRsp) {
|
|||
return 0;
|
||||
}
|
||||
|
||||
int64_t tqScan(STQ* pTq, const STqExecHandle* pExec, SMqDataRsp* pRsp, STqOffsetVal* pOffset) {
|
||||
qTaskInfo_t task = pExec->execCol.task[0];
|
||||
int64_t tqScan(STQ* pTq, const STqHandle* pHandle, SMqDataRsp* pRsp, STqOffsetVal* pOffset) {
|
||||
const STqExecHandle* pExec = &pHandle->execHandle;
|
||||
qTaskInfo_t task = pExec->execCol.task[0];
|
||||
|
||||
if (qStreamPrepareScan(task, pOffset) < 0) {
|
||||
ASSERT(pOffset->type == TMQ_OFFSET__LOG);
|
||||
pRsp->rspOffset = *pOffset;
|
||||
pRsp->rspOffset.version--;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -73,9 +73,11 @@ int64_t tqScan(STQ* pTq, const STqExecHandle* pExec, SMqDataRsp* pRsp, STqOffset
|
|||
while (1) {
|
||||
SSDataBlock* pDataBlock = NULL;
|
||||
uint64_t ts = 0;
|
||||
tqDebug("task start to execute");
|
||||
if (qExecTask(task, &pDataBlock, &ts) < 0) {
|
||||
ASSERT(0);
|
||||
}
|
||||
tqDebug("task execute end, get %p", pDataBlock);
|
||||
|
||||
if (pDataBlock != NULL) {
|
||||
tqAddBlockDataToRsp(pDataBlock, pRsp);
|
||||
|
@ -97,7 +99,7 @@ int64_t tqScan(STQ* pTq, const STqExecHandle* pExec, SMqDataRsp* pRsp, STqOffset
|
|||
}
|
||||
|
||||
if (pRsp->blockNum == 0 && pOffset->type == TMQ_OFFSET__SNAPSHOT_DATA) {
|
||||
tqOffsetResetToLog(pOffset, pExec->tsdbEndVer + 1);
|
||||
tqOffsetResetToLog(pOffset, pHandle->snapshotVer + 1);
|
||||
qStreamPrepareScan(task, pOffset);
|
||||
continue;
|
||||
}
|
||||
|
@ -116,7 +118,7 @@ int64_t tqScan(STQ* pTq, const STqExecHandle* pExec, SMqDataRsp* pRsp, STqOffset
|
|||
if (pRsp->reqOffset.type == TMQ_OFFSET__LOG) {
|
||||
ASSERT(pRsp->rspOffset.version + 1 >= pRsp->reqOffset.version);
|
||||
}
|
||||
|
||||
tqDebug("task exec exited");
|
||||
break;
|
||||
}
|
||||
|
||||
|
|
|
@ -19,6 +19,7 @@ static int32_t tEncodeSTqHandle(SEncoder* pEncoder, const STqHandle* pHandle) {
|
|||
if (tStartEncode(pEncoder) < 0) return -1;
|
||||
if (tEncodeCStr(pEncoder, pHandle->subKey) < 0) return -1;
|
||||
if (tEncodeI64(pEncoder, pHandle->consumerId) < 0) return -1;
|
||||
if (tEncodeI64(pEncoder, pHandle->snapshotVer) < 0) return -1;
|
||||
if (tEncodeI32(pEncoder, pHandle->epoch) < 0) return -1;
|
||||
if (tEncodeI8(pEncoder, pHandle->execHandle.subType) < 0) return -1;
|
||||
if (pHandle->execHandle.subType == TOPIC_SUB_TYPE__COLUMN) {
|
||||
|
@ -32,6 +33,7 @@ static int32_t tDecodeSTqHandle(SDecoder* pDecoder, STqHandle* pHandle) {
|
|||
if (tStartDecode(pDecoder) < 0) return -1;
|
||||
if (tDecodeCStrTo(pDecoder, pHandle->subKey) < 0) return -1;
|
||||
if (tDecodeI64(pDecoder, &pHandle->consumerId) < 0) return -1;
|
||||
if (tDecodeI64(pDecoder, &pHandle->snapshotVer) < 0) return -1;
|
||||
if (tDecodeI32(pDecoder, &pHandle->epoch) < 0) return -1;
|
||||
if (tDecodeI8(pDecoder, &pHandle->execHandle.subType) < 0) return -1;
|
||||
if (pHandle->execHandle.subType == TOPIC_SUB_TYPE__COLUMN) {
|
||||
|
@ -78,19 +80,25 @@ int32_t tqMetaOpen(STQ* pTq) {
|
|||
tDecoderInit(&decoder, (uint8_t*)pVal, vLen);
|
||||
tDecodeSTqHandle(&decoder, &handle);
|
||||
handle.pWalReader = walOpenReader(pTq->pVnode->pWal, NULL);
|
||||
for (int32_t i = 0; i < 5; i++) {
|
||||
handle.execHandle.pExecReader[i] = tqOpenReader(pTq->pVnode);
|
||||
}
|
||||
/*for (int32_t i = 0; i < 5; i++) {*/
|
||||
/*handle.execHandle.pExecReader[i] = tqOpenReader(pTq->pVnode);*/
|
||||
/*}*/
|
||||
if (handle.execHandle.subType == TOPIC_SUB_TYPE__COLUMN) {
|
||||
for (int32_t i = 0; i < 5; i++) {
|
||||
SReadHandle reader = {
|
||||
.tqReader = handle.execHandle.pExecReader[i],
|
||||
.meta = pTq->pVnode->pMeta,
|
||||
.pMsgCb = &pTq->pVnode->msgCb,
|
||||
.vnode = pTq->pVnode,
|
||||
.initTableReader = true,
|
||||
.initTqReader = true,
|
||||
.version = handle.snapshotVer,
|
||||
};
|
||||
handle.execHandle.execCol.task[i] = qCreateStreamExecTaskInfo(handle.execHandle.execCol.qmsg, &reader);
|
||||
handle.execHandle.execCol.task[i] = qCreateQueueExecTaskInfo(handle.execHandle.execCol.qmsg, &reader);
|
||||
ASSERT(handle.execHandle.execCol.task[i]);
|
||||
void* scanner = NULL;
|
||||
qExtractStreamScanner(handle.execHandle.execCol.task[i], &scanner);
|
||||
ASSERT(scanner);
|
||||
handle.execHandle.pExecReader[i] = qExtractReaderFromStreamScanner(scanner);
|
||||
ASSERT(handle.execHandle.pExecReader[i]);
|
||||
}
|
||||
} else {
|
||||
handle.execHandle.execDb.pFilterOutTbUid =
|
||||
|
|
|
@ -22,7 +22,6 @@ typedef struct SLastrowReader {
|
|||
SVnode* pVnode;
|
||||
STSchema* pSchema;
|
||||
uint64_t uid;
|
||||
// int32_t* pSlotIds;
|
||||
char** transferBuf; // todo remove it soon
|
||||
int32_t numOfCols;
|
||||
int32_t type;
|
||||
|
@ -31,25 +30,27 @@ typedef struct SLastrowReader {
|
|||
} SLastrowReader;
|
||||
|
||||
static void saveOneRow(STSRow* pRow, SSDataBlock* pBlock, SLastrowReader* pReader, const int32_t* slotIds) {
|
||||
ASSERT(pReader->numOfCols <= taosArrayGetSize(pBlock->pDataBlock));
|
||||
int32_t numOfRows = pBlock->info.rows;
|
||||
size_t numOfCols = taosArrayGetSize(pBlock->pDataBlock);
|
||||
|
||||
SColVal colVal = {0};
|
||||
for (int32_t i = 0; i < numOfCols; ++i) {
|
||||
for (int32_t i = 0; i < pReader->numOfCols; ++i) {
|
||||
SColumnInfoData* pColInfoData = taosArrayGet(pBlock->pDataBlock, i);
|
||||
|
||||
if (slotIds[i] == -1) {
|
||||
colDataAppend(pColInfoData, numOfRows, (const char*)&pRow->ts, false);
|
||||
} else {
|
||||
tTSRowGetVal(pRow, pReader->pSchema, slotIds[i], &colVal);
|
||||
int32_t slotId = slotIds[i];
|
||||
|
||||
tTSRowGetVal(pRow, pReader->pSchema, slotId, &colVal);
|
||||
|
||||
if (IS_VAR_DATA_TYPE(colVal.type)) {
|
||||
if (colVal.isNull || colVal.isNone) {
|
||||
colDataAppendNULL(pColInfoData, numOfRows);
|
||||
} else {
|
||||
varDataSetLen(pReader->transferBuf[i], colVal.value.nData);
|
||||
memcpy(varDataVal(pReader->transferBuf[i]), colVal.value.pData, colVal.value.nData);
|
||||
colDataAppend(pColInfoData, numOfRows, pReader->transferBuf[i], false);
|
||||
varDataSetLen(pReader->transferBuf[slotId], colVal.value.nData);
|
||||
memcpy(varDataVal(pReader->transferBuf[slotId]), colVal.value.pData, colVal.value.nData);
|
||||
colDataAppend(pColInfoData, numOfRows, pReader->transferBuf[slotId], false);
|
||||
}
|
||||
} else {
|
||||
colDataAppend(pColInfoData, numOfRows, (const char*)&colVal.value, colVal.isNull || colVal.isNone);
|
||||
|
@ -60,8 +61,7 @@ static void saveOneRow(STSRow* pRow, SSDataBlock* pBlock, SLastrowReader* pReade
|
|||
pBlock->info.rows += 1;
|
||||
}
|
||||
|
||||
int32_t tsdbLastRowReaderOpen(void* pVnode, int32_t type, SArray* pTableIdList, int32_t* colId, int32_t numOfCols,
|
||||
void** pReader) {
|
||||
int32_t tsdbLastRowReaderOpen(void* pVnode, int32_t type, SArray* pTableIdList, int32_t numOfCols, void** pReader) {
|
||||
SLastrowReader* p = taosMemoryCalloc(1, sizeof(SLastrowReader));
|
||||
if (p == NULL) {
|
||||
return TSDB_CODE_OUT_OF_MEMORY;
|
||||
|
@ -70,13 +70,18 @@ int32_t tsdbLastRowReaderOpen(void* pVnode, int32_t type, SArray* pTableIdList,
|
|||
p->type = type;
|
||||
p->pVnode = pVnode;
|
||||
p->numOfCols = numOfCols;
|
||||
p->transferBuf = taosMemoryCalloc(p->numOfCols, POINTER_BYTES);
|
||||
|
||||
if (taosArrayGetSize(pTableIdList) == 0) {
|
||||
*pReader = p;
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
STableKeyInfo* pKeyInfo = taosArrayGet(pTableIdList, 0);
|
||||
p->pSchema = metaGetTbTSchema(p->pVnode->pMeta, pKeyInfo->uid, -1);
|
||||
p->pTableList = pTableIdList;
|
||||
|
||||
for (int32_t i = 0; i < p->numOfCols; ++i) {
|
||||
p->transferBuf = taosMemoryCalloc(p->pSchema->numOfCols, POINTER_BYTES);
|
||||
for (int32_t i = 0; i < p->pSchema->numOfCols; ++i) {
|
||||
if (IS_VAR_DATA_TYPE(p->pSchema->columns[i].type)) {
|
||||
p->transferBuf[i] = taosMemoryMalloc(p->pSchema->columns[i].bytes);
|
||||
}
|
||||
|
@ -89,10 +94,11 @@ int32_t tsdbLastRowReaderOpen(void* pVnode, int32_t type, SArray* pTableIdList,
|
|||
int32_t tsdbLastrowReaderClose(void* pReader) {
|
||||
SLastrowReader* p = pReader;
|
||||
|
||||
for (int32_t i = 0; i < p->numOfCols; ++i) {
|
||||
for (int32_t i = 0; i < p->pSchema->numOfCols; ++i) {
|
||||
taosMemoryFreeClear(p->transferBuf[i]);
|
||||
}
|
||||
|
||||
taosMemoryFree(p->pSchema);
|
||||
taosMemoryFree(p->transferBuf);
|
||||
taosMemoryFree(pReader);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
|
|
|
@ -71,7 +71,7 @@ typedef struct SFilesetIter {
|
|||
|
||||
typedef struct SFileDataBlockInfo {
|
||||
int32_t
|
||||
tbBlockIdx; // index position in STableBlockScanInfo in order to check whether neighbor block overlaps with it
|
||||
tbBlockIdx; // index position in STableBlockScanInfo in order to check whether neighbor block overlaps with it
|
||||
uint64_t uid;
|
||||
} SFileDataBlockInfo;
|
||||
|
||||
|
@ -119,10 +119,10 @@ struct STsdbReader {
|
|||
int32_t type; // query type: 1. retrieve all data blocks, 2. retrieve direct prev|next rows
|
||||
SBlockLoadSuppInfo suppInfo;
|
||||
|
||||
SIOCostSummary cost;
|
||||
STSchema* pSchema;
|
||||
SDataFReader* pFileReader;
|
||||
SVersionRange verRange;
|
||||
SIOCostSummary cost;
|
||||
STSchema* pSchema;
|
||||
SDataFReader* pFileReader;
|
||||
SVersionRange verRange;
|
||||
};
|
||||
|
||||
static SFileDataBlockInfo* getCurrentBlockInfo(SDataBlockIter* pBlockIter);
|
||||
|
@ -287,9 +287,7 @@ static int32_t initFilesetIterator(SFilesetIter* pIter, const STsdbFSState* pFSt
|
|||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
static void cleanupFilesetIterator(SFilesetIter* pIter) {
|
||||
taosArrayDestroy(pIter->pFileList);
|
||||
}
|
||||
static void cleanupFilesetIterator(SFilesetIter* pIter) { taosArrayDestroy(pIter->pFileList); }
|
||||
|
||||
static bool filesetIteratorNext(SFilesetIter* pIter, STsdbReader* pReader) {
|
||||
bool asc = ASCENDING_TRAVERSE(pIter->order);
|
||||
|
@ -304,6 +302,10 @@ static bool filesetIteratorNext(SFilesetIter* pIter, STsdbReader* pReader) {
|
|||
STimeWindow win = {0};
|
||||
|
||||
while (1) {
|
||||
// if (pReader->pFileReader != NULL) {
|
||||
// tsdbDataFReaderClose(&pReader->pFileReader);
|
||||
// }
|
||||
|
||||
pReader->status.pCurrentFileset = (SDFileSet*)taosArrayGet(pIter->pFileList, pIter->index);
|
||||
|
||||
int32_t code = tsdbDataFReaderOpen(&pReader->pFileReader, pReader->pTsdb, pReader->status.pCurrentFileset);
|
||||
|
@ -349,9 +351,7 @@ static void resetDataBlockIterator(SDataBlockIter* pIter, int32_t order) {
|
|||
}
|
||||
}
|
||||
|
||||
static void cleanupDataBlockIterator(SDataBlockIter* pIter) {
|
||||
taosArrayDestroy(pIter->blockList);
|
||||
}
|
||||
static void cleanupDataBlockIterator(SDataBlockIter* pIter) { taosArrayDestroy(pIter->blockList); }
|
||||
|
||||
static void initReaderStatus(SReaderStatus* pStatus) {
|
||||
pStatus->pTableIter = NULL;
|
||||
|
@ -392,8 +392,7 @@ static int32_t tsdbReaderCreate(SVnode* pVnode, SQueryTableDataCond* pCond, STsd
|
|||
|
||||
initReaderStatus(&pReader->status);
|
||||
|
||||
pReader->pTsdb =
|
||||
getTsdbByRetentions(pVnode, pCond->twindows.skey, pVnode->config.tsdbCfg.retentions, idstr, &level);
|
||||
pReader->pTsdb = getTsdbByRetentions(pVnode, pCond->twindows.skey, pVnode->config.tsdbCfg.retentions, idstr, &level);
|
||||
pReader->suid = pCond->suid;
|
||||
pReader->order = pCond->order;
|
||||
pReader->capacity = 4096;
|
||||
|
@ -833,7 +832,7 @@ static int32_t doLoadFileBlockData(STsdbReader* pReader, SDataBlockIter* pBlockI
|
|||
|
||||
uint8_t *pb = NULL, *pb1 = NULL;
|
||||
int32_t code = tsdbReadColData(pReader->pFileReader, &pBlockScanInfo->blockIdx, pBlock, pSupInfo->colIds, numOfCols,
|
||||
pBlockData, &pb, &pb1);
|
||||
pBlockData, &pb, &pb1);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
goto _error;
|
||||
}
|
||||
|
@ -1459,18 +1458,18 @@ static bool overlapWithDelSkyline(STableBlockScanInfo* pBlockScanInfo, const SBl
|
|||
}
|
||||
|
||||
TSDBKEY* pFirst = taosArrayGet(pBlockScanInfo->delSkyline, 0);
|
||||
TSDBKEY* pLast = taosArrayGetLast(pBlockScanInfo->delSkyline);
|
||||
TSDBKEY* pLast = taosArrayGetLast(pBlockScanInfo->delSkyline);
|
||||
|
||||
// ts is not overlap
|
||||
if (pBlock->minKey.ts > pLast->ts || pBlock->maxKey.ts < pFirst->ts) {
|
||||
return false;
|
||||
}
|
||||
|
||||
int32_t step = ASCENDING_TRAVERSE(order)? 1:-1;
|
||||
int32_t step = ASCENDING_TRAVERSE(order) ? 1 : -1;
|
||||
|
||||
// version is not overlap
|
||||
size_t num = taosArrayGetSize(pBlockScanInfo->delSkyline);
|
||||
for(int32_t i = pBlockScanInfo->fileDelIndex; i < num; i += step) {
|
||||
for (int32_t i = pBlockScanInfo->fileDelIndex; i < num; i += step) {
|
||||
TSDBKEY* p = taosArrayGet(pBlockScanInfo->delSkyline, i);
|
||||
if (p->ts >= pBlock->minKey.ts && p->ts <= pBlock->maxKey.ts) {
|
||||
if (p->version >= pBlock->minVersion) {
|
||||
|
@ -1502,8 +1501,8 @@ static bool fileBlockShouldLoad(STsdbReader* pReader, SFileDataBlockInfo* pFBloc
|
|||
}
|
||||
|
||||
// has duplicated ts of different version in this block
|
||||
bool hasDup = (pBlock->nSubBlock == 1)? pBlock->hasDup:true;
|
||||
bool overlapWithDel= overlapWithDelSkyline(pScanInfo, pBlock, pReader->order);
|
||||
bool hasDup = (pBlock->nSubBlock == 1) ? pBlock->hasDup : true;
|
||||
bool overlapWithDel = overlapWithDelSkyline(pScanInfo, pBlock, pReader->order);
|
||||
|
||||
return (overlapWithNeighbor || hasDup || dataBlockPartiallyRequired(&pReader->window, &pReader->verRange, pBlock) ||
|
||||
keyOverlapFileBlock(key, pBlock, &pReader->verRange) || (pBlock->nRow > pReader->capacity) || overlapWithDel);
|
||||
|
@ -2220,17 +2219,18 @@ static STsdb* getTsdbByRetentions(SVnode* pVnode, TSKEY winSKey, SRetention* ret
|
|||
}
|
||||
|
||||
SVersionRange getQueryVerRange(SVnode* pVnode, SQueryTableDataCond* pCond, int8_t level) {
|
||||
int64_t startVer = (pCond->startVersion == -1)? 0:pCond->startVersion;
|
||||
int64_t startVer = (pCond->startVersion == -1) ? 0 : pCond->startVersion;
|
||||
|
||||
if (VND_IS_RSMA(pVnode)) {
|
||||
return (SVersionRange){.minVer = startVer, .maxVer = tdRSmaGetMaxSubmitVer(pVnode->pSma, level)};
|
||||
}
|
||||
|
||||
int64_t endVer = 0;
|
||||
if (pCond->endVersion == -1) { // user not specified end version, set current maximum version of vnode as the endVersion
|
||||
if (pCond->endVersion ==
|
||||
-1) { // user not specified end version, set current maximum version of vnode as the endVersion
|
||||
endVer = pVnode->state.applied;
|
||||
} else {
|
||||
endVer = (pCond->endVersion > pVnode->state.applied)? pVnode->state.applied:pCond->endVersion;
|
||||
endVer = (pCond->endVersion > pVnode->state.applied) ? pVnode->state.applied : pCond->endVersion;
|
||||
}
|
||||
|
||||
return (SVersionRange){.minVer = startVer, .maxVer = endVer};
|
||||
|
@ -2274,9 +2274,9 @@ bool hasBeenDropped(const SArray* pDelList, int32_t* index, TSDBKEY* pKey, int32
|
|||
if (pDelList == NULL) {
|
||||
return false;
|
||||
}
|
||||
size_t num = taosArrayGetSize(pDelList);
|
||||
bool asc = ASCENDING_TRAVERSE(order);
|
||||
int32_t step = asc? 1:-1;
|
||||
size_t num = taosArrayGetSize(pDelList);
|
||||
bool asc = ASCENDING_TRAVERSE(order);
|
||||
int32_t step = asc ? 1 : -1;
|
||||
|
||||
if (asc) {
|
||||
if (*index >= num - 1) {
|
||||
|
@ -2437,6 +2437,7 @@ static int32_t doMergeRowsInFileBlockImpl(SBlockData* pBlockData, int32_t rowInd
|
|||
SVersionRange* pVerRange, int32_t step) {
|
||||
while (pBlockData->aTSKEY[rowIndex] == key && rowIndex < pBlockData->nRow && rowIndex >= 0) {
|
||||
if (pBlockData->aVersion[rowIndex] > pVerRange->maxVer || pBlockData->aVersion[rowIndex] < pVerRange->minVer) {
|
||||
rowIndex += step;
|
||||
continue;
|
||||
}
|
||||
|
||||
|
@ -2823,7 +2824,7 @@ void tsdbReaderClose(STsdbReader* pReader) {
|
|||
taosMemoryFree(pSupInfo->colIds);
|
||||
|
||||
taosArrayDestroy(pSupInfo->pColAgg);
|
||||
for(int32_t i = 0; i < blockDataGetNumOfCols(pReader->pResBlock); ++i) {
|
||||
for (int32_t i = 0; i < blockDataGetNumOfCols(pReader->pResBlock); ++i) {
|
||||
if (pSupInfo->buildBuf[i] != NULL) {
|
||||
taosMemoryFreeClear(pSupInfo->buildBuf[i]);
|
||||
}
|
||||
|
@ -2835,6 +2836,9 @@ void tsdbReaderClose(STsdbReader* pReader) {
|
|||
destroyBlockScanInfo(pReader->status.pTableMap);
|
||||
blockDataDestroy(pReader->pResBlock);
|
||||
|
||||
if (pReader->pFileReader != NULL) {
|
||||
tsdbDataFReaderClose(&pReader->pFileReader);
|
||||
}
|
||||
|
||||
#if 0
|
||||
// if (pReader->status.pTableScanInfo != NULL) {
|
||||
|
@ -3011,8 +3015,8 @@ int32_t tsdbReaderReset(STsdbReader* pReader, SQueryTableDataCond* pCond) {
|
|||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
pReader->order = pCond->order;
|
||||
pReader->type = BLOCK_LOAD_OFFSET_ORDER;
|
||||
pReader->order = pCond->order;
|
||||
pReader->type = BLOCK_LOAD_OFFSET_ORDER;
|
||||
pReader->status.loadFromFile = true;
|
||||
pReader->status.pTableIter = NULL;
|
||||
|
||||
|
@ -3023,11 +3027,14 @@ int32_t tsdbReaderReset(STsdbReader* pReader, SQueryTableDataCond* pCond) {
|
|||
memset(pReader->suppInfo.plist, 0, POINTER_BYTES);
|
||||
|
||||
pReader->suppInfo.tsColAgg.colId = PRIMARYKEY_TIMESTAMP_COL_ID;
|
||||
tsdbDataFReaderClose(&pReader->pFileReader);
|
||||
|
||||
// todo set the correct numOfTables
|
||||
int32_t numOfTables = 1;
|
||||
SDataBlockIter* pBlockIter = &pReader->status.blockIter;
|
||||
|
||||
tsdbDataFReaderClose(&pReader->pFileReader);
|
||||
|
||||
STsdbFSState* pFState = pReader->pTsdb->fs->cState;
|
||||
initFilesetIterator(&pReader->status.fileIter, pFState, pReader->order, pReader->idStr);
|
||||
resetDataBlockIterator(&pReader->status.blockIter, pReader->order);
|
||||
|
@ -3114,13 +3121,12 @@ int32_t tsdbGetFileBlocksDistInfo(STsdbReader* pReader, STableBlockDistInfo* pTa
|
|||
pTableBlockInfo->numOfBlocks += pBlockIter->numOfBlocks;
|
||||
}
|
||||
|
||||
/*
|
||||
hasNext = blockIteratorNext(&pStatus->blockIter);
|
||||
*/
|
||||
/*
|
||||
hasNext = blockIteratorNext(&pStatus->blockIter);
|
||||
*/
|
||||
|
||||
|
||||
// tsdbDebug("%p %d blocks found in file for %d table(s), fid:%d, %s", pReader, numOfBlocks, numOfTables,
|
||||
// pReader->pFileGroup->fid, pReader->idStr);
|
||||
// tsdbDebug("%p %d blocks found in file for %d table(s), fid:%d, %s", pReader, numOfBlocks, numOfTables,
|
||||
// pReader->pFileGroup->fid, pReader->idStr);
|
||||
}
|
||||
|
||||
return code;
|
||||
|
@ -3158,7 +3164,7 @@ int64_t tsdbGetNumOfRowsInMemTable(STsdbReader* pReader) {
|
|||
return rows;
|
||||
}
|
||||
|
||||
int32_t tsdbGetTableSchema(SVnode* pVnode, int64_t uid, STSchema** pSchema, int64_t *suid) {
|
||||
int32_t tsdbGetTableSchema(SVnode* pVnode, int64_t uid, STSchema** pSchema, int64_t* suid) {
|
||||
int32_t sversion = 1;
|
||||
|
||||
SMetaReader mr = {0};
|
||||
|
@ -3171,7 +3177,7 @@ int32_t tsdbGetTableSchema(SVnode* pVnode, int64_t uid, STSchema** pSchema, int6
|
|||
}
|
||||
|
||||
*suid = 0;
|
||||
|
||||
|
||||
if (mr.me.type == TSDB_CHILD_TABLE) {
|
||||
*suid = mr.me.ctbEntry.suid;
|
||||
code = metaGetTableEntryByUid(&mr, *suid);
|
||||
|
@ -3188,8 +3194,7 @@ int32_t tsdbGetTableSchema(SVnode* pVnode, int64_t uid, STSchema** pSchema, int6
|
|||
|
||||
metaReaderClear(&mr);
|
||||
*pSchema = metaGetTbTSchema(pVnode->pMeta, uid, sversion);
|
||||
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -175,6 +175,12 @@ _err:
|
|||
return NULL;
|
||||
}
|
||||
|
||||
void vnodePreClose(SVnode *pVnode) {
|
||||
if (pVnode) {
|
||||
syncLeaderTransfer(pVnode->sync);
|
||||
}
|
||||
}
|
||||
|
||||
void vnodeClose(SVnode *pVnode) {
|
||||
if (pVnode) {
|
||||
vnodeCommit(pVnode);
|
||||
|
|
|
@ -569,7 +569,7 @@ int32_t vnodeSyncOpen(SVnode *pVnode, char *path) {
|
|||
return -1;
|
||||
}
|
||||
|
||||
setPingTimerMS(pVnode->sync, 3000);
|
||||
setPingTimerMS(pVnode->sync, 5000);
|
||||
setElectTimerMS(pVnode->sync, 500);
|
||||
setHeartbeatTimerMS(pVnode->sync, 100);
|
||||
return 0;
|
||||
|
|
|
@ -642,6 +642,7 @@ void ctgFreeSTableIndex(void *info);
|
|||
void ctgClearSubTaskRes(SCtgSubRes *pRes);
|
||||
void ctgFreeQNode(SCtgQNode *node);
|
||||
void ctgClearHandle(SCatalog* pCtg);
|
||||
void ctgFreeTbCacheImpl(SCtgTbCache *pCache);
|
||||
|
||||
|
||||
extern SCatalogMgmt gCtgMgmt;
|
||||
|
|
|
@ -647,6 +647,8 @@ int32_t ctgEnqueue(SCatalog* pCtg, SCtgCacheOperation *operation) {
|
|||
CTG_RET(TSDB_CODE_OUT_OF_MEMORY);
|
||||
}
|
||||
|
||||
bool syncOp = operation->syncOp;
|
||||
char* opName = gCtgCacheOperation[operation->opId].name;
|
||||
if (operation->syncOp) {
|
||||
tsem_init(&operation->rspSem, 0, 0);
|
||||
}
|
||||
|
@ -664,14 +666,14 @@ int32_t ctgEnqueue(SCatalog* pCtg, SCtgCacheOperation *operation) {
|
|||
gCtgMgmt.queue.tail = node;
|
||||
CTG_UNLOCK(CTG_WRITE, &gCtgMgmt.queue.qlock);
|
||||
|
||||
ctgDebug("action [%s] added into queue", opName);
|
||||
|
||||
CTG_QUEUE_INC();
|
||||
CTG_RT_STAT_INC(numOfOpEnqueue, 1);
|
||||
|
||||
tsem_post(&gCtgMgmt.queue.reqSem);
|
||||
|
||||
ctgDebug("action [%s] added into queue", gCtgCacheOperation[operation->opId].name);
|
||||
|
||||
if (operation->syncOp) {
|
||||
if (syncOp) {
|
||||
tsem_wait(&operation->rspSem);
|
||||
taosMemoryFree(operation);
|
||||
}
|
||||
|
@ -840,6 +842,7 @@ _return:
|
|||
|
||||
ctgFreeVgInfo(dbInfo);
|
||||
taosMemoryFreeClear(op->data);
|
||||
taosMemoryFreeClear(op);
|
||||
CTG_RET(code);
|
||||
}
|
||||
|
||||
|
@ -852,7 +855,7 @@ int32_t ctgUpdateTbMetaEnqueue(SCatalog* pCtg, STableMetaOutput *output, bool sy
|
|||
SCtgUpdateTbMetaMsg *msg = taosMemoryMalloc(sizeof(SCtgUpdateTbMetaMsg));
|
||||
if (NULL == msg) {
|
||||
ctgError("malloc %d failed", (int32_t)sizeof(SCtgUpdateTbMetaMsg));
|
||||
CTG_ERR_RET(TSDB_CODE_OUT_OF_MEMORY);
|
||||
CTG_ERR_JRET(TSDB_CODE_OUT_OF_MEMORY);
|
||||
}
|
||||
|
||||
char *p = strchr(output->dbFName, '.');
|
||||
|
@ -871,6 +874,11 @@ int32_t ctgUpdateTbMetaEnqueue(SCatalog* pCtg, STableMetaOutput *output, bool sy
|
|||
|
||||
_return:
|
||||
|
||||
if (output) {
|
||||
taosMemoryFree(output->tbMeta);
|
||||
taosMemoryFree(output);
|
||||
}
|
||||
|
||||
taosMemoryFreeClear(msg);
|
||||
|
||||
CTG_RET(code);
|
||||
|
@ -1753,6 +1761,16 @@ int32_t ctgOpDropStbMeta(SCtgCacheOperation *operation) {
|
|||
CTG_CACHE_STAT_DEC(numOfStb, 1);
|
||||
}
|
||||
|
||||
SCtgTbCache* pTbCache = taosHashGet(dbCache->tbCache, msg->stbName, strlen(msg->stbName));
|
||||
if (NULL == pTbCache) {
|
||||
ctgDebug("stb %s already not in cache", msg->stbName);
|
||||
goto _return;
|
||||
}
|
||||
|
||||
CTG_LOCK(CTG_WRITE, &pTbCache->metaLock);
|
||||
ctgFreeTbCacheImpl(pTbCache);
|
||||
CTG_UNLOCK(CTG_WRITE, &pTbCache->metaLock);
|
||||
|
||||
if (taosHashRemove(dbCache->tbCache, msg->stbName, strlen(msg->stbName))) {
|
||||
ctgError("stb not exist in cache, dbFName:%s, stb:%s, suid:0x%"PRIx64, msg->dbFName, msg->stbName, msg->suid);
|
||||
} else {
|
||||
|
@ -1780,14 +1798,24 @@ int32_t ctgOpDropTbMeta(SCtgCacheOperation *operation) {
|
|||
SCtgDBCache *dbCache = NULL;
|
||||
ctgGetDBCache(pCtg, msg->dbFName, &dbCache);
|
||||
if (NULL == dbCache) {
|
||||
return TSDB_CODE_SUCCESS;
|
||||
goto _return;
|
||||
}
|
||||
|
||||
if (dbCache->dbId != msg->dbId) {
|
||||
ctgDebug("dbId 0x%" PRIx64 " not match with curId 0x%"PRIx64", dbFName:%s, tbName:%s", msg->dbId, dbCache->dbId, msg->dbFName, msg->tbName);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
goto _return;
|
||||
}
|
||||
|
||||
SCtgTbCache* pTbCache = taosHashGet(dbCache->tbCache, msg->tbName, strlen(msg->tbName));
|
||||
if (NULL == pTbCache) {
|
||||
ctgDebug("tb %s already not in cache", msg->tbName);
|
||||
goto _return;
|
||||
}
|
||||
|
||||
CTG_LOCK(CTG_WRITE, &pTbCache->metaLock);
|
||||
ctgFreeTbCacheImpl(pTbCache);
|
||||
CTG_UNLOCK(CTG_WRITE, &pTbCache->metaLock);
|
||||
|
||||
if (taosHashRemove(dbCache->tbCache, msg->tbName, strlen(msg->tbName))) {
|
||||
ctgError("tb %s not exist in cache, dbFName:%s", msg->tbName, msg->dbFName);
|
||||
CTG_ERR_JRET(TSDB_CODE_CTG_INTERNAL_ERROR);
|
||||
|
@ -2063,6 +2091,8 @@ void* ctgUpdateThreadFunc(void* param) {
|
|||
|
||||
if (operation->syncOp) {
|
||||
tsem_post(&operation->rspSem);
|
||||
} else {
|
||||
taosMemoryFreeClear(operation);
|
||||
}
|
||||
|
||||
CTG_RT_STAT_INC(numOfOpDequeue, 1);
|
||||
|
|
|
@ -261,6 +261,8 @@ int32_t ctgHandleMsgCallback(void *param, SDataBuf *pMsg, int32_t rspCode) {
|
|||
|
||||
_return:
|
||||
|
||||
taosMemoryFree(pMsg->pData);
|
||||
|
||||
if (pJob) {
|
||||
taosReleaseRef(gCtgMgmt.jobPool, cbParam->refId);
|
||||
}
|
||||
|
|
|
@ -152,6 +152,7 @@ void ctgFreeStbMetaCache(SCtgDBCache *dbCache) {
|
|||
}
|
||||
|
||||
void ctgFreeTbCacheImpl(SCtgTbCache *pCache) {
|
||||
qDebug("tbMeta freed, p:%p", pCache->pMeta);
|
||||
taosMemoryFreeClear(pCache->pMeta);
|
||||
if (pCache->pIndex) {
|
||||
taosArrayDestroyEx(pCache->pIndex->pIndex, tFreeSTableIndexInfo);
|
||||
|
@ -831,6 +832,7 @@ int32_t ctgCloneMetaOutput(STableMetaOutput *output, STableMetaOutput **pOutput)
|
|||
if (output->tbMeta) {
|
||||
int32_t metaSize = CTG_META_SIZE(output->tbMeta);
|
||||
(*pOutput)->tbMeta = taosMemoryMalloc(metaSize);
|
||||
qDebug("tbMeta cloned, size:%d, p:%p", metaSize, (*pOutput)->tbMeta);
|
||||
if (NULL == (*pOutput)->tbMeta) {
|
||||
qError("malloc %d failed", (int32_t)sizeof(STableMetaOutput));
|
||||
taosMemoryFreeClear(*pOutput);
|
||||
|
|
|
@ -389,6 +389,7 @@ typedef struct SStreamScanInfo {
|
|||
SSDataBlock* pPullDataRes; // pull data SSDataBlock
|
||||
SSDataBlock* pDeleteDataRes; // delete data SSDataBlock
|
||||
int32_t deleteDataIndex;
|
||||
STimeWindow updateWin;
|
||||
|
||||
// status for tmq
|
||||
// SSchemaWrapper schema;
|
||||
|
|
|
@ -46,7 +46,7 @@ SOperatorInfo* createLastrowScanOperator(SLastRowScanPhysiNode* pScanNode, SRead
|
|||
pInfo->pColMatchInfo = extractColMatchInfo(pScanNode->pScanCols, pScanNode->node.pOutputDataBlockDesc, &numOfCols,
|
||||
COL_MATCH_FROM_COL_ID);
|
||||
int32_t* pCols = taosMemoryMalloc(numOfCols * sizeof(int32_t));
|
||||
for (int32_t i = 0; i < numOfCols; ++i) {
|
||||
for (int32_t i = 0; i < taosArrayGetSize(pInfo->pColMatchInfo); ++i) {
|
||||
SColMatchInfo* pColMatch = taosArrayGet(pInfo->pColMatchInfo, i);
|
||||
pCols[i] = pColMatch->colId;
|
||||
}
|
||||
|
@ -56,7 +56,7 @@ SOperatorInfo* createLastrowScanOperator(SLastRowScanPhysiNode* pScanNode, SRead
|
|||
goto _error;
|
||||
}
|
||||
|
||||
tsdbLastRowReaderOpen(readHandle->vnode, LASTROW_RETRIEVE_TYPE_ALL, pTableList, pCols, numOfCols,
|
||||
tsdbLastRowReaderOpen(readHandle->vnode, LASTROW_RETRIEVE_TYPE_ALL, pTableList, taosArrayGetSize(pInfo->pColMatchInfo),
|
||||
&pInfo->pLastrowReader);
|
||||
taosMemoryFree(pCols);
|
||||
|
||||
|
|
|
@ -191,6 +191,7 @@ SSDataBlock* createResDataBlock(SDataBlockDescNode* pNode) {
|
|||
|
||||
pBlock->info.blockId = pNode->dataBlockId;
|
||||
pBlock->info.type = STREAM_INVALID;
|
||||
pBlock->info.calWin = (STimeWindow){.skey = INT64_MIN, .ekey = INT64_MAX};
|
||||
|
||||
for (int32_t i = 0; i < numOfCols; ++i) {
|
||||
SSlotDescNode* pDescNode = (SSlotDescNode*)nodesListGetNode(pNode->pSlots, i);
|
||||
|
|
|
@ -106,6 +106,30 @@ int32_t qSetMultiStreamInput(qTaskInfo_t tinfo, const void* pBlocks, size_t numO
|
|||
return code;
|
||||
}
|
||||
|
||||
qTaskInfo_t qCreateQueueExecTaskInfo(void* msg, SReadHandle* readers) {
|
||||
if (msg == NULL) {
|
||||
// TODO create raw scan
|
||||
return NULL;
|
||||
}
|
||||
|
||||
struct SSubplan* plan = NULL;
|
||||
int32_t code = qStringToSubplan(msg, &plan);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
terrno = code;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
qTaskInfo_t pTaskInfo = NULL;
|
||||
code = qCreateExecTask(readers, 0, 0, plan, &pTaskInfo, NULL, NULL, OPTR_EXEC_MODEL_QUEUE);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
// TODO: destroy SSubplan & pTaskInfo
|
||||
terrno = code;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return pTaskInfo;
|
||||
}
|
||||
|
||||
qTaskInfo_t qCreateStreamExecTaskInfo(void* msg, SReadHandle* readers) {
|
||||
if (msg == NULL) {
|
||||
return NULL;
|
||||
|
@ -186,7 +210,7 @@ int32_t qUpdateQualifiedTableId(qTaskInfo_t tinfo, const SArray* tableIdList, bo
|
|||
}
|
||||
|
||||
int32_t qGetQueryTableSchemaVersion(qTaskInfo_t tinfo, char* dbName, char* tableName, int32_t* sversion,
|
||||
int32_t* tversion) {
|
||||
int32_t* tversion) {
|
||||
ASSERT(tinfo != NULL && dbName != NULL && tableName != NULL);
|
||||
SExecTaskInfo* pTaskInfo = (SExecTaskInfo*)tinfo;
|
||||
|
||||
|
|
|
@ -269,13 +269,13 @@ const STqOffset* qExtractStatusFromStreamScanner(void* scanner) {
|
|||
|
||||
void* qStreamExtractMetaMsg(qTaskInfo_t tinfo) {
|
||||
SExecTaskInfo* pTaskInfo = (SExecTaskInfo*)tinfo;
|
||||
ASSERT(pTaskInfo->execModel == OPTR_EXEC_MODEL_STREAM);
|
||||
ASSERT(pTaskInfo->execModel == OPTR_EXEC_MODEL_QUEUE);
|
||||
return pTaskInfo->streamInfo.metaBlk;
|
||||
}
|
||||
|
||||
int32_t qStreamExtractOffset(qTaskInfo_t tinfo, STqOffsetVal* pOffset) {
|
||||
SExecTaskInfo* pTaskInfo = (SExecTaskInfo*)tinfo;
|
||||
ASSERT(pTaskInfo->execModel == OPTR_EXEC_MODEL_STREAM);
|
||||
ASSERT(pTaskInfo->execModel == OPTR_EXEC_MODEL_QUEUE);
|
||||
memcpy(pOffset, &pTaskInfo->streamInfo.lastStatus, sizeof(STqOffsetVal));
|
||||
return 0;
|
||||
}
|
||||
|
@ -283,35 +283,41 @@ int32_t qStreamExtractOffset(qTaskInfo_t tinfo, STqOffsetVal* pOffset) {
|
|||
int32_t qStreamPrepareScan(qTaskInfo_t tinfo, const STqOffsetVal* pOffset) {
|
||||
SExecTaskInfo* pTaskInfo = (SExecTaskInfo*)tinfo;
|
||||
SOperatorInfo* pOperator = pTaskInfo->pRoot;
|
||||
ASSERT(pTaskInfo->execModel == OPTR_EXEC_MODEL_STREAM);
|
||||
ASSERT(pTaskInfo->execModel == OPTR_EXEC_MODEL_QUEUE);
|
||||
pTaskInfo->streamInfo.prepareStatus = *pOffset;
|
||||
// TODO: optimize
|
||||
/*if (pTaskInfo->streamInfo.lastStatus.type != pOffset->type ||*/
|
||||
/*pTaskInfo->streamInfo.prepareStatus.version != pTaskInfo->streamInfo.lastStatus.version) {*/
|
||||
while (1) {
|
||||
uint8_t type = pOperator->operatorType;
|
||||
pOperator->status = OP_OPENED;
|
||||
if (type == QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN) {
|
||||
SStreamScanInfo* pInfo = pOperator->info;
|
||||
if (pOffset->type == TMQ_OFFSET__LOG) {
|
||||
if (tqSeekVer(pInfo->tqReader, pOffset->version) < 0) {
|
||||
return -1;
|
||||
}
|
||||
ASSERT(pInfo->tqReader->pWalReader->curVersion == pOffset->version);
|
||||
} else if (pOffset->type == TMQ_OFFSET__SNAPSHOT_DATA) {
|
||||
/*pInfo->blockType = STREAM_INPUT__TABLE_SCAN;*/
|
||||
int64_t uid = pOffset->uid;
|
||||
int64_t ts = pOffset->ts;
|
||||
|
||||
if (uid == 0) {
|
||||
if (taosArrayGetSize(pTaskInfo->tableqinfoList.pTableList) != 0) {
|
||||
STableKeyInfo* pTableInfo = taosArrayGet(pTaskInfo->tableqinfoList.pTableList, 0);
|
||||
uid = pTableInfo->uid;
|
||||
ts = INT64_MIN;
|
||||
if (!tOffsetEqual(pOffset, &pTaskInfo->streamInfo.lastStatus)) {
|
||||
while (1) {
|
||||
uint8_t type = pOperator->operatorType;
|
||||
pOperator->status = OP_OPENED;
|
||||
if (type == QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN) {
|
||||
SStreamScanInfo* pInfo = pOperator->info;
|
||||
if (pOffset->type == TMQ_OFFSET__LOG) {
|
||||
#if 0
|
||||
if (tOffsetEqual(pOffset, &pTaskInfo->streamInfo.lastStatus) &&
|
||||
pInfo->tqReader->pWalReader->curVersion != pOffset->version) {
|
||||
qError("prepare scan ver %ld actual ver %ld, last %ld", pOffset->version,
|
||||
pInfo->tqReader->pWalReader->curVersion, pTaskInfo->streamInfo.lastStatus.version);
|
||||
ASSERT(0);
|
||||
}
|
||||
}
|
||||
if (pTaskInfo->streamInfo.lastStatus.type != TMQ_OFFSET__SNAPSHOT_DATA ||
|
||||
pTaskInfo->streamInfo.lastStatus.uid != uid || pTaskInfo->streamInfo.lastStatus.ts != ts) {
|
||||
#endif
|
||||
if (tqSeekVer(pInfo->tqReader, pOffset->version + 1) < 0) {
|
||||
return -1;
|
||||
}
|
||||
ASSERT(pInfo->tqReader->pWalReader->curVersion == pOffset->version + 1);
|
||||
} else if (pOffset->type == TMQ_OFFSET__SNAPSHOT_DATA) {
|
||||
/*pInfo->blockType = STREAM_INPUT__TABLE_SCAN;*/
|
||||
int64_t uid = pOffset->uid;
|
||||
int64_t ts = pOffset->ts;
|
||||
|
||||
if (uid == 0) {
|
||||
if (taosArrayGetSize(pTaskInfo->tableqinfoList.pTableList) != 0) {
|
||||
STableKeyInfo* pTableInfo = taosArrayGet(pTaskInfo->tableqinfoList.pTableList, 0);
|
||||
uid = pTableInfo->uid;
|
||||
ts = INT64_MIN;
|
||||
}
|
||||
}
|
||||
/*if (pTaskInfo->streamInfo.lastStatus.type != TMQ_OFFSET__SNAPSHOT_DATA ||*/
|
||||
/*pTaskInfo->streamInfo.lastStatus.uid != uid || pTaskInfo->streamInfo.lastStatus.ts != ts) {*/
|
||||
STableScanInfo* pTableScanInfo = pInfo->pTableScanOp->info;
|
||||
int32_t tableSz = taosArrayGetSize(pTaskInfo->tableqinfoList.pTableList);
|
||||
bool found = false;
|
||||
|
@ -320,6 +326,7 @@ int32_t qStreamPrepareScan(qTaskInfo_t tinfo, const STqOffsetVal* pOffset) {
|
|||
if (pTableInfo->uid == uid) {
|
||||
found = true;
|
||||
pTableScanInfo->currentTable = i;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -335,18 +342,18 @@ int32_t qStreamPrepareScan(qTaskInfo_t tinfo, const STqOffsetVal* pOffset) {
|
|||
|
||||
qDebug("tsdb reader offset seek to uid %ld ts %ld, table cur set to %d , all table num %d", uid, ts,
|
||||
pTableScanInfo->currentTable, tableSz);
|
||||
}
|
||||
/*}*/
|
||||
|
||||
} else {
|
||||
ASSERT(0);
|
||||
}
|
||||
return 0;
|
||||
} else {
|
||||
ASSERT(0);
|
||||
ASSERT(pOperator->numOfDownstream == 1);
|
||||
pOperator = pOperator->pDownstream[0];
|
||||
}
|
||||
return 0;
|
||||
} else {
|
||||
ASSERT(pOperator->numOfDownstream == 1);
|
||||
pOperator = pOperator->pDownstream[0];
|
||||
}
|
||||
}
|
||||
/*}*/
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -538,7 +538,7 @@ static int32_t doSetInputDataBlock(SOperatorInfo* pOperator, SqlFunctionCtx* pCt
|
|||
return code;
|
||||
}
|
||||
|
||||
static int32_t doAggregateImpl(SOperatorInfo* pOperator, TSKEY startTs, SqlFunctionCtx* pCtx) {
|
||||
static int32_t doAggregateImpl(SOperatorInfo* pOperator, SqlFunctionCtx* pCtx) {
|
||||
for (int32_t k = 0; k < pOperator->exprSupp.numOfExprs; ++k) {
|
||||
if (functionNeedToExecute(&pCtx[k])) {
|
||||
// todo add a dummy funtion to avoid process check
|
||||
|
@ -594,10 +594,14 @@ int32_t projectApplyFunctions(SExprInfo* pExpr, SSDataBlock* pResult, SSDataBloc
|
|||
SColumnInfoData* pColInfoData = taosArrayGet(pResult->pDataBlock, outputSlotId);
|
||||
|
||||
int32_t offset = createNewColModel ? 0 : pResult->info.rows;
|
||||
for (int32_t i = 0; i < pSrcBlock->info.rows; ++i) {
|
||||
colDataAppend(pColInfoData, i + offset,
|
||||
taosVariantGet(&pExpr[k].base.pParam[0].param, pExpr[k].base.pParam[0].param.nType),
|
||||
TSDB_DATA_TYPE_NULL == pExpr[k].base.pParam[0].param.nType);
|
||||
|
||||
int32_t type = pExpr[k].base.pParam[0].param.nType;
|
||||
if (TSDB_DATA_TYPE_NULL == type) {
|
||||
colDataAppendNNULL(pColInfoData, offset, pSrcBlock->info.rows);
|
||||
} else {
|
||||
for (int32_t i = 0; i < pSrcBlock->info.rows; ++i) {
|
||||
colDataAppend(pColInfoData, i + offset, taosVariantGet(&pExpr[k].base.pParam[0].param, type), false);
|
||||
}
|
||||
}
|
||||
|
||||
numOfRows = pSrcBlock->info.rows;
|
||||
|
@ -2969,25 +2973,10 @@ static int32_t doOpenAggregateOptr(SOperatorInfo* pOperator) {
|
|||
// the pDataBlock are always the same one, no need to call this again
|
||||
setExecutionContext(pOperator, pOperator->exprSupp.numOfExprs, pBlock->info.groupId, pAggInfo);
|
||||
setInputDataBlock(pOperator, pSup->pCtx, pBlock, order, scanFlag, true);
|
||||
code = doAggregateImpl(pOperator, 0, pSup->pCtx);
|
||||
code = doAggregateImpl(pOperator, pSup->pCtx);
|
||||
if (code != 0) {
|
||||
longjmp(pTaskInfo->env, code);
|
||||
}
|
||||
|
||||
#if 0 // test for encode/decode result info
|
||||
if(pOperator->fpSet.encodeResultRow){
|
||||
char *result = NULL;
|
||||
int32_t length = 0;
|
||||
pOperator->fpSet.encodeResultRow(pOperator, &result, &length);
|
||||
SAggSupporter* pSup = &pAggInfo->aggSup;
|
||||
taosHashClear(pSup->pResultRowHashTable);
|
||||
pInfo->resultRowInfo.size = 0;
|
||||
pOperator->fpSet.decodeResultRow(pOperator, result);
|
||||
if(result){
|
||||
taosMemoryFree(result);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
closeAllResultRows(&pAggInfo->binfo.resultRowInfo);
|
||||
|
@ -3250,6 +3239,10 @@ static SSDataBlock* doProjectOperation(SOperatorInfo* pOperator) {
|
|||
|
||||
SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
|
||||
if (pOperator->status == OP_EXEC_DONE) {
|
||||
if (pTaskInfo->execModel == OPTR_EXEC_MODEL_QUEUE) {
|
||||
pOperator->status = OP_OPENED;
|
||||
return NULL;
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
@ -3283,11 +3276,15 @@ static SSDataBlock* doProjectOperation(SOperatorInfo* pOperator) {
|
|||
|
||||
while (1) {
|
||||
// The downstream exec may change the value of the newgroup, so use a local variable instead.
|
||||
qDebug("projection call next");
|
||||
SSDataBlock* pBlock = downstream->fpSet.getNextFn(downstream);
|
||||
if (pBlock == NULL) {
|
||||
// TODO optimize
|
||||
/*if (pTaskInfo->execModel != OPTR_EXEC_MODEL_STREAM) {*/
|
||||
qDebug("projection get null");
|
||||
|
||||
/*if (pTaskInfo->execModel == OPTR_EXEC_MODEL_BATCH) {*/
|
||||
doSetOperatorCompleted(pOperator);
|
||||
/*} else if (pTaskInfo->execModel == OPTR_EXEC_MODEL_QUEUE) {*/
|
||||
/*pOperator->status = OP_RES_TO_RETURN;*/
|
||||
/*}*/
|
||||
break;
|
||||
}
|
||||
|
|
|
@ -884,6 +884,28 @@ static bool prepareRangeScan(SStreamScanInfo* pInfo, SSDataBlock* pBlock, int32_
|
|||
return true;
|
||||
}
|
||||
|
||||
static STimeWindow getSlidingWindow(TSKEY* tsCol, SInterval* pInterval, SDataBlockInfo* pDataBlockInfo, int32_t* pRowIndex) {
|
||||
SResultRowInfo dumyInfo;
|
||||
dumyInfo.cur.pageId = -1;
|
||||
STimeWindow win = getActiveTimeWindow(NULL, &dumyInfo, tsCol[*pRowIndex], pInterval,
|
||||
TSDB_ORDER_ASC);
|
||||
STimeWindow endWin = win;
|
||||
STimeWindow preWin = win;
|
||||
while (1) {
|
||||
(*pRowIndex) += getNumOfRowsInTimeWindow(pDataBlockInfo, tsCol, *pRowIndex, endWin.ekey,
|
||||
binarySearchForKey, NULL, TSDB_ORDER_ASC);
|
||||
do {
|
||||
preWin = endWin;
|
||||
getNextTimeWindow(pInterval, &endWin, TSDB_ORDER_ASC);
|
||||
} while (tsCol[(*pRowIndex) - 1] >= endWin.skey);
|
||||
endWin = preWin;
|
||||
if (win.ekey == endWin.ekey || (*pRowIndex) == pDataBlockInfo->rows ) {
|
||||
win.ekey = endWin.ekey;
|
||||
return win;
|
||||
}
|
||||
win.ekey = endWin.ekey;
|
||||
}
|
||||
}
|
||||
static bool prepareDataScan(SStreamScanInfo* pInfo, SSDataBlock* pSDB, int32_t tsColIndex, int32_t* pRowIndex) {
|
||||
STimeWindow win = {
|
||||
.skey = INT64_MIN,
|
||||
|
@ -905,10 +927,13 @@ static bool prepareDataScan(SStreamScanInfo* pInfo, SSDataBlock* pSDB, int32_t t
|
|||
setGroupId(pInfo, pSDB, GROUPID_COLUMN_INDEX, *pRowIndex);
|
||||
(*pRowIndex) += updateSessionWindowInfo(pCurWin, tsCols, NULL, pSDB->info.rows, *pRowIndex, gap, NULL);
|
||||
} else {
|
||||
win = getActiveTimeWindow(NULL, &dumyInfo, tsCols[*pRowIndex], &pInfo->interval, TSDB_ORDER_ASC);
|
||||
setGroupId(pInfo, pSDB, GROUPID_COLUMN_INDEX, *pRowIndex);
|
||||
(*pRowIndex) +=
|
||||
getNumOfRowsInTimeWindow(&pSDB->info, tsCols, *pRowIndex, win.ekey, binarySearchForKey, NULL, TSDB_ORDER_ASC);
|
||||
pInfo->updateWin.skey = tsCols[*pRowIndex];
|
||||
win = getSlidingWindow(tsCols, &pInfo->interval, &pSDB->info, pRowIndex);
|
||||
pInfo->updateWin.ekey = tsCols[*pRowIndex - 1];
|
||||
// win = getActiveTimeWindow(NULL, &dumyInfo, tsCols[*pRowIndex], &pInfo->interval, TSDB_ORDER_ASC);
|
||||
// (*pRowIndex) +=
|
||||
// getNumOfRowsInTimeWindow(&pSDB->info, tsCols, *pRowIndex, win.ekey, binarySearchForKey, NULL, TSDB_ORDER_ASC);
|
||||
}
|
||||
needRead = true;
|
||||
} else if (isStateWindow(pInfo)) {
|
||||
|
@ -974,10 +999,12 @@ static SSDataBlock* doDataScan(SStreamScanInfo* pInfo, SSDataBlock* pSDB, int32_
|
|||
}
|
||||
}
|
||||
if (!pResult) {
|
||||
pInfo->updateWin = (STimeWindow){.skey = INT64_MIN, .ekey = INT64_MAX};
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (pResult->info.groupId == pInfo->groupId) {
|
||||
pResult->info.calWin = pInfo->updateWin;
|
||||
return pResult;
|
||||
}
|
||||
}
|
||||
|
@ -1209,6 +1236,7 @@ static SSDataBlock* doStreamScan(SOperatorInfo* pOperator) {
|
|||
/*return NULL;*/
|
||||
/*}*/
|
||||
|
||||
qDebug("stream scan called");
|
||||
if (pTaskInfo->streamInfo.prepareStatus.type == TMQ_OFFSET__LOG) {
|
||||
while (1) {
|
||||
SFetchRet ret = {0};
|
||||
|
@ -1220,6 +1248,7 @@ static SSDataBlock* doStreamScan(SOperatorInfo* pOperator) {
|
|||
}
|
||||
// TODO clean data block
|
||||
if (pInfo->pRes->info.rows > 0) {
|
||||
qDebug("stream scan log return %d rows", pInfo->pRes->info.rows);
|
||||
return pInfo->pRes;
|
||||
}
|
||||
} else if (ret.fetchType == FETCH_TYPE__META) {
|
||||
|
@ -1230,6 +1259,7 @@ static SSDataBlock* doStreamScan(SOperatorInfo* pOperator) {
|
|||
} else if (ret.fetchType == FETCH_TYPE__NONE) {
|
||||
pTaskInfo->streamInfo.lastStatus = ret.offset;
|
||||
ASSERT(pTaskInfo->streamInfo.lastStatus.version + 1 >= pTaskInfo->streamInfo.prepareStatus.version);
|
||||
qDebug("stream scan log return null");
|
||||
return NULL;
|
||||
} else {
|
||||
ASSERT(0);
|
||||
|
@ -1237,7 +1267,12 @@ static SSDataBlock* doStreamScan(SOperatorInfo* pOperator) {
|
|||
}
|
||||
} else if (pTaskInfo->streamInfo.prepareStatus.type == TMQ_OFFSET__SNAPSHOT_DATA) {
|
||||
SSDataBlock* pResult = doTableScan(pInfo->pTableScanOp);
|
||||
return pResult && pResult->info.rows > 0 ? pResult : NULL;
|
||||
if (pResult && pResult->info.rows > 0) {
|
||||
qDebug("stream scan tsdb return %d rows", pResult->info.rows);
|
||||
return pResult;
|
||||
}
|
||||
qDebug("stream scan tsdb return null");
|
||||
return NULL;
|
||||
} else if (pTaskInfo->streamInfo.prepareStatus.type == TMQ_OFFSET__SNAPSHOT_META) {
|
||||
// TODO scan meta
|
||||
ASSERT(0);
|
||||
|
@ -1256,8 +1291,13 @@ static SSDataBlock* doStreamScan(SOperatorInfo* pOperator) {
|
|||
int32_t current = pInfo->validBlockIndex++;
|
||||
SSDataBlock* pBlock = taosArrayGetP(pInfo->pBlockLists, current);
|
||||
// TODO move into scan
|
||||
pBlock->info.calWin.skey = INT64_MIN;
|
||||
pBlock->info.calWin.ekey = INT64_MAX;
|
||||
blockDataUpdateTsWindow(pBlock, 0);
|
||||
switch (pBlock->info.type) {
|
||||
case STREAM_NORMAL:
|
||||
case STREAM_GET_ALL:
|
||||
return pBlock;
|
||||
case STREAM_RETRIEVE: {
|
||||
pInfo->blockType = STREAM_INPUT__DATA_SUBMIT;
|
||||
pInfo->scanMode = STREAM_SCAN_FROM_DATAREADER_RETRIEVE;
|
||||
|
@ -1287,6 +1327,7 @@ static SSDataBlock* doStreamScan(SOperatorInfo* pOperator) {
|
|||
}
|
||||
return pBlock;
|
||||
} else if (pInfo->blockType == STREAM_INPUT__DATA_SUBMIT) {
|
||||
qDebug("scan mode %d", pInfo->scanMode);
|
||||
if (pInfo->scanMode == STREAM_SCAN_FROM_RES) {
|
||||
blockDataDestroy(pInfo->pUpdateRes);
|
||||
pInfo->scanMode = STREAM_SCAN_FROM_READERHANDLE;
|
||||
|
@ -1381,7 +1422,7 @@ static SSDataBlock* doStreamScan(SOperatorInfo* pOperator) {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
qDebug("scan rows: %d", pBlockInfo->rows);
|
||||
return (pBlockInfo->rows == 0) ? NULL : pInfo->pRes;
|
||||
|
||||
#if 0
|
||||
|
@ -1533,6 +1574,7 @@ SOperatorInfo* createStreamScanOperatorInfo(SReadHandle* pHandle, STableScanPhys
|
|||
pInfo->pStreamScanOp = pOperator;
|
||||
pInfo->deleteDataIndex = 0;
|
||||
pInfo->pDeleteDataRes = createPullDataBlock();
|
||||
pInfo->updateWin = (STimeWindow){.skey = INT64_MAX, .ekey = INT64_MAX};
|
||||
|
||||
pOperator->name = "StreamScanOperator";
|
||||
pOperator->operatorType = QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN;
|
||||
|
@ -2860,101 +2902,3 @@ _error:
|
|||
return NULL;
|
||||
}
|
||||
|
||||
static SSDataBlock* doScanLastrow(SOperatorInfo* pOperator) {
|
||||
if (pOperator->status == OP_EXEC_DONE) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
SLastrowScanInfo* pInfo = pOperator->info;
|
||||
SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
|
||||
|
||||
int32_t size = taosArrayGetSize(pInfo->pTableList);
|
||||
if (size == 0) {
|
||||
setTaskStatus(pTaskInfo, TASK_COMPLETED);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
// check if it is a group by tbname
|
||||
if (size == taosArrayGetSize(pInfo->pTableList)) {
|
||||
blockDataCleanup(pInfo->pRes);
|
||||
tsdbRetrieveLastRow(pInfo->pLastrowReader, pInfo->pRes, pInfo->pSlotIds);
|
||||
return (pInfo->pRes->info.rows == 0) ? NULL : pInfo->pRes;
|
||||
} else {
|
||||
// todo fetch the result for each group
|
||||
}
|
||||
|
||||
return pInfo->pRes->info.rows == 0 ? NULL : pInfo->pRes;
|
||||
}
|
||||
|
||||
static void destroyLastrowScanOperator(void* param, int32_t numOfOutput) {
|
||||
SLastrowScanInfo* pInfo = (SLastrowScanInfo*)param;
|
||||
blockDataDestroy(pInfo->pRes);
|
||||
tsdbLastrowReaderClose(pInfo->pLastrowReader);
|
||||
|
||||
taosMemoryFreeClear(param);
|
||||
}
|
||||
|
||||
SOperatorInfo* createLastrowScanOperator(SLastRowScanPhysiNode* pScanNode, SReadHandle* readHandle, SArray* pTableList,
|
||||
SExecTaskInfo* pTaskInfo) {
|
||||
SLastrowScanInfo* pInfo = taosMemoryCalloc(1, sizeof(SLastrowScanInfo));
|
||||
SOperatorInfo* pOperator = taosMemoryCalloc(1, sizeof(SOperatorInfo));
|
||||
if (pInfo == NULL || pOperator == NULL) {
|
||||
goto _error;
|
||||
}
|
||||
|
||||
pInfo->pTableList = pTableList;
|
||||
pInfo->readHandle = *readHandle;
|
||||
pInfo->pRes = createResDataBlock(pScanNode->node.pOutputDataBlockDesc);
|
||||
|
||||
int32_t numOfCols = 0;
|
||||
pInfo->pColMatchInfo = extractColMatchInfo(pScanNode->pScanCols, pScanNode->node.pOutputDataBlockDesc, &numOfCols,
|
||||
COL_MATCH_FROM_COL_ID);
|
||||
int32_t* pCols = taosMemoryMalloc(numOfCols * sizeof(int32_t));
|
||||
for (int32_t i = 0; i < numOfCols; ++i) {
|
||||
SColMatchInfo* pColMatch = taosArrayGet(pInfo->pColMatchInfo, i);
|
||||
pCols[i] = pColMatch->colId;
|
||||
}
|
||||
|
||||
pInfo->pSlotIds = taosMemoryMalloc(numOfCols * sizeof(pInfo->pSlotIds[0]));
|
||||
for (int32_t i = 0; i < numOfCols; ++i) {
|
||||
SColMatchInfo* pColMatch = taosArrayGet(pInfo->pColMatchInfo, i);
|
||||
for (int32_t j = 0; j < pTaskInfo->schemaVer.sw->nCols; ++j) {
|
||||
if (pColMatch->colId == pTaskInfo->schemaVer.sw->pSchema[j].colId &&
|
||||
pColMatch->colId == PRIMARYKEY_TIMESTAMP_COL_ID) {
|
||||
pInfo->pSlotIds[pColMatch->targetSlotId] = -1;
|
||||
break;
|
||||
}
|
||||
|
||||
if (pColMatch->colId == pTaskInfo->schemaVer.sw->pSchema[j].colId) {
|
||||
pInfo->pSlotIds[pColMatch->targetSlotId] = j;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
tsdbLastRowReaderOpen(readHandle->vnode, LASTROW_RETRIEVE_TYPE_ALL, pTableList, pCols, numOfCols,
|
||||
&pInfo->pLastrowReader);
|
||||
taosMemoryFree(pCols);
|
||||
|
||||
pOperator->name = "LastrowScanOperator";
|
||||
pOperator->operatorType = QUERY_NODE_PHYSICAL_PLAN_LAST_ROW_SCAN;
|
||||
pOperator->blocking = false;
|
||||
pOperator->status = OP_NOT_OPENED;
|
||||
pOperator->info = pInfo;
|
||||
pOperator->pTaskInfo = pTaskInfo;
|
||||
pOperator->exprSupp.numOfExprs = taosArrayGetSize(pInfo->pRes->pDataBlock);
|
||||
|
||||
initResultSizeInfo(pOperator, 1024);
|
||||
blockDataEnsureCapacity(pInfo->pRes, pOperator->resultInfo.capacity);
|
||||
|
||||
pOperator->fpSet =
|
||||
createOperatorFpSet(operatorDummyOpenFn, doScanLastrow, NULL, NULL, destroyLastrowScanOperator, NULL, NULL, NULL);
|
||||
pOperator->cost.openCost = 0;
|
||||
return pOperator;
|
||||
|
||||
_error:
|
||||
pTaskInfo->code = TSDB_CODE_OUT_OF_MEMORY;
|
||||
taosMemoryFree(pInfo);
|
||||
taosMemoryFree(pOperator);
|
||||
return NULL;
|
||||
}
|
||||
|
|
|
@ -419,6 +419,14 @@ static bool setTimeWindowInterpolationEndTs(SIntervalAggOperatorInfo* pInfo, SEx
|
|||
return true;
|
||||
}
|
||||
|
||||
bool inSlidingWindow(SInterval* pInterval, STimeWindow* pWin, SDataBlockInfo* pBlockInfo) {
|
||||
if (pInterval->interval != pInterval->sliding &&
|
||||
(pWin->ekey < pBlockInfo->calWin.skey || pWin->skey > pBlockInfo->calWin.ekey)) {
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
static int32_t getNextQualifiedWindow(SInterval* pInterval, STimeWindow* pNext, SDataBlockInfo* pDataBlockInfo,
|
||||
TSKEY* primaryKeys, int32_t prevPosition, int32_t order) {
|
||||
bool ascQuery = (order == TSDB_ORDER_ASC);
|
||||
|
@ -432,6 +440,10 @@ static int32_t getNextQualifiedWindow(SInterval* pInterval, STimeWindow* pNext,
|
|||
return -1;
|
||||
}
|
||||
|
||||
if (!inSlidingWindow(pInterval, pNext, pDataBlockInfo) && order == TSDB_ORDER_ASC) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
TSKEY skey = ascQuery ? pNext->skey : pNext->ekey;
|
||||
int32_t startPos = 0;
|
||||
|
||||
|
@ -801,7 +813,8 @@ static void hashIntervalAgg(SOperatorInfo* pOperatorInfo, SResultRowInfo* pResul
|
|||
|
||||
STimeWindow win = getActiveTimeWindow(pInfo->aggSup.pResultBuf, pResultRowInfo, ts, &pInfo->interval, pInfo->order);
|
||||
int32_t ret = TSDB_CODE_SUCCESS;
|
||||
if (!pInfo->ignoreExpiredData || !isCloseWindow(&win, &pInfo->twAggSup)) {
|
||||
if ((!pInfo->ignoreExpiredData || !isCloseWindow(&win, &pInfo->twAggSup)) &&
|
||||
inSlidingWindow(&pInfo->interval, &win, &pBlock->info)) {
|
||||
ret = setTimeWindowOutputBuf(pResultRowInfo, &win, (scanFlag == MAIN_SCAN), &pResult, tableGroupId, pSup->pCtx,
|
||||
numOfOutput, pSup->rowEntryInfoOffset, &pInfo->aggSup, pTaskInfo);
|
||||
if (ret != TSDB_CODE_SUCCESS || pResult == NULL) {
|
||||
|
@ -834,7 +847,8 @@ static void hashIntervalAgg(SOperatorInfo* pOperatorInfo, SResultRowInfo* pResul
|
|||
doWindowBorderInterpolation(pInfo, pBlock, pResult, &win, startPos, forwardRows, pSup);
|
||||
}
|
||||
|
||||
if (!pInfo->ignoreExpiredData || !isCloseWindow(&win, &pInfo->twAggSup)) {
|
||||
if ((!pInfo->ignoreExpiredData || !isCloseWindow(&win, &pInfo->twAggSup)) &&
|
||||
inSlidingWindow(&pInfo->interval, &win, &pBlock->info)) {
|
||||
updateTimeWindowInfo(&pInfo->twAggSup.timeWindowData, &win, true);
|
||||
doApplyFunctions(pTaskInfo, pSup->pCtx, &win, &pInfo->twAggSup.timeWindowData, startPos, forwardRows, tsCols,
|
||||
pBlock->info.rows, numOfOutput, pInfo->order);
|
||||
|
@ -1278,18 +1292,25 @@ static void doClearWindows(SAggSupporter* pAggSup, SExprSupp* pSup1, SInterval*
|
|||
SColumnInfoData* pGpCol = taosArrayGet(pBlock->pDataBlock, GROUPID_COLUMN_INDEX);
|
||||
pGpDatas = (uint64_t*)pGpCol->pData;
|
||||
}
|
||||
int32_t step = 0;
|
||||
for (int32_t i = 0; i < pBlock->info.rows; i += step) {
|
||||
SResultRowInfo dumyInfo;
|
||||
dumyInfo.cur.pageId = -1;
|
||||
STimeWindow win = getActiveTimeWindow(NULL, &dumyInfo, tsCols[i], pInterval, TSDB_ORDER_ASC);
|
||||
step = getNumOfRowsInTimeWindow(&pBlock->info, tsCols, i, win.ekey, binarySearchForKey, NULL, TSDB_ORDER_ASC);
|
||||
uint64_t winGpId = pGpDatas ? pGpDatas[i] : pBlock->info.groupId;
|
||||
bool res = doClearWindow(pAggSup, pSup1, (char*)&win.skey, sizeof(TKEY), winGpId, numOfOutput);
|
||||
int32_t step = 0;
|
||||
int32_t startPos = 0;
|
||||
SResultRowInfo dumyInfo;
|
||||
dumyInfo.cur.pageId = -1;
|
||||
STimeWindow win = getActiveTimeWindow(NULL, &dumyInfo, tsCols[0], pInterval, TSDB_ORDER_ASC);
|
||||
while (1) {
|
||||
step =
|
||||
getNumOfRowsInTimeWindow(&pBlock->info, tsCols, startPos, win.ekey, binarySearchForKey, NULL, TSDB_ORDER_ASC);
|
||||
uint64_t winGpId = pGpDatas ? pGpDatas[startPos] : pBlock->info.groupId;
|
||||
bool res = doClearWindow(pAggSup, pSup1, (char*)&win.skey, sizeof(TSKEY), winGpId, numOfOutput);
|
||||
if (pUpWins && res) {
|
||||
SWinRes winRes = {.ts = win.skey, .groupId = winGpId};
|
||||
taosArrayPush(pUpWins, &winRes);
|
||||
}
|
||||
int32_t prevEndPos = step - 1 + startPos;
|
||||
startPos = getNextQualifiedWindow(pInterval, &win, &pBlock->info, tsCols, prevEndPos, TSDB_ORDER_ASC);
|
||||
if (startPos < 0) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1332,13 +1353,13 @@ static int32_t closeIntervalWindow(SHashObj* pHashMap, STimeWindowAggSupp* pSup,
|
|||
if (chIds && pPullDataMap) {
|
||||
SArray* chAy = *(SArray**)chIds;
|
||||
int32_t size = taosArrayGetSize(chAy);
|
||||
qInfo("window %" PRId64 " wait child size:%d", win.skey, size);
|
||||
qDebug("window %" PRId64 " wait child size:%d", win.skey, size);
|
||||
for (int32_t i = 0; i < size; i++) {
|
||||
qInfo("window %" PRId64 " wait chid id:%d", win.skey, *(int32_t*)taosArrayGet(chAy, i));
|
||||
qDebug("window %" PRId64 " wait chid id:%d", win.skey, *(int32_t*)taosArrayGet(chAy, i));
|
||||
}
|
||||
continue;
|
||||
} else if (pPullDataMap) {
|
||||
qInfo("close window %" PRId64, win.skey);
|
||||
qDebug("close window %" PRId64, win.skey);
|
||||
}
|
||||
SResultRowPosition* pPos = (SResultRowPosition*)pIte;
|
||||
if (pSup->calTrigger == STREAM_TRIGGER_WINDOW_CLOSE) {
|
||||
|
@ -2434,7 +2455,7 @@ static void doHashInterval(SOperatorInfo* pOperatorInfo, SSDataBlock* pSDataBloc
|
|||
}
|
||||
while (1) {
|
||||
bool isClosed = isCloseWindow(&nextWin, &pInfo->twAggSup);
|
||||
if (pInfo->ignoreExpiredData && isClosed) {
|
||||
if ((pInfo->ignoreExpiredData && isClosed) || !inSlidingWindow(&pInfo->interval, &nextWin, &pSDataBlock->info)) {
|
||||
startPos = getNexWindowPos(&pInfo->interval, &pSDataBlock->info, tsCols, startPos, nextWin.ekey, &nextWin);
|
||||
if (startPos < 0) {
|
||||
break;
|
||||
|
@ -2491,8 +2512,8 @@ static void doHashInterval(SOperatorInfo* pOperatorInfo, SSDataBlock* pSDataBloc
|
|||
if (IS_FINAL_OP(pInfo)) {
|
||||
forwardRows = 1;
|
||||
} else {
|
||||
forwardRows = getNumOfRowsInTimeWindow(&pSDataBlock->info, tsCols, startPos, nextWin.ekey, binarySearchForKey, NULL,
|
||||
TSDB_ORDER_ASC);
|
||||
forwardRows = getNumOfRowsInTimeWindow(&pSDataBlock->info, tsCols, startPos, nextWin.ekey, binarySearchForKey,
|
||||
NULL, TSDB_ORDER_ASC);
|
||||
}
|
||||
if (pInfo->twAggSup.calTrigger == STREAM_TRIGGER_AT_ONCE && pUpdated) {
|
||||
saveResultRow(pResult, tableGroupId, pUpdated);
|
||||
|
@ -2609,6 +2630,8 @@ static SSDataBlock* doStreamFinalIntervalAgg(SOperatorInfo* pOperator) {
|
|||
|
||||
SExprSupp* pSup = &pOperator->exprSupp;
|
||||
|
||||
qDebug("interval status %d %s", pOperator->status, IS_FINAL_OP(pInfo) ? "interval Final" : "interval Semi");
|
||||
|
||||
if (pOperator->status == OP_EXEC_DONE) {
|
||||
return NULL;
|
||||
} else if (pOperator->status == OP_RES_TO_RETURN) {
|
||||
|
@ -2659,7 +2682,7 @@ static SSDataBlock* doStreamFinalIntervalAgg(SOperatorInfo* pOperator) {
|
|||
clearSpecialDataBlock(pInfo->pUpdateRes);
|
||||
removeDeleteResults(pUpdated, pInfo->pDelWins);
|
||||
pOperator->status = OP_RES_TO_RETURN;
|
||||
qInfo("%s return data", IS_FINAL_OP(pInfo) ? "interval Final" : "interval Semi");
|
||||
qDebug("%s return data", IS_FINAL_OP(pInfo) ? "interval Final" : "interval Semi");
|
||||
break;
|
||||
}
|
||||
printDataBlock(pBlock, IS_FINAL_OP(pInfo) ? "interval Final recv" : "interval Semi recv");
|
||||
|
@ -3101,12 +3124,7 @@ int64_t getSessionWindowEndkey(void* data, int32_t index) {
|
|||
}
|
||||
|
||||
bool isInTimeWindow(STimeWindow* pWin, TSKEY ts, int64_t gap) {
|
||||
int64_t sGap = ts - pWin->skey + gap;
|
||||
int64_t eGap = pWin->ekey - ts + gap;
|
||||
// if ((sGap < 0 && sGap >= -gap) || (eGap < 0 && eGap >= -gap) || (sGap >= 0 && eGap >= 0)) {
|
||||
// return true;
|
||||
// }
|
||||
if (sGap >= 0 && eGap >= 0) {
|
||||
if (ts + gap >= pWin->skey && ts - gap <= pWin->ekey) {
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
|
|
|
@ -1976,7 +1976,7 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = {
|
|||
{
|
||||
.name = "leastsquares",
|
||||
.type = FUNCTION_TYPE_LEASTSQUARES,
|
||||
.classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_FORBID_STREAM_FUNC,
|
||||
.classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_TIMELINE_FUNC | FUNC_MGT_FORBID_STREAM_FUNC,
|
||||
.translateFunc = translateLeastSQR,
|
||||
.getEnvFunc = getLeastSQRFuncEnv,
|
||||
.initFunc = leastSQRFunctionSetup,
|
||||
|
@ -2217,7 +2217,7 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = {
|
|||
{
|
||||
.name = "last_row",
|
||||
.type = FUNCTION_TYPE_LAST_ROW,
|
||||
.classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_MULTI_RES_FUNC | FUNC_MGT_SELECT_FUNC | FUNC_MGT_TIMELINE_FUNC,
|
||||
.classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_MULTI_RES_FUNC | FUNC_MGT_SELECT_FUNC | FUNC_MGT_TIMELINE_FUNC | FUNC_MGT_IMPLICIT_TS_FUNC,
|
||||
.translateFunc = translateFirstLast,
|
||||
.getEnvFunc = getFirstLastFuncEnv,
|
||||
.initFunc = functionSetup,
|
||||
|
|
|
@ -80,8 +80,10 @@ typedef struct STopBotRes {
|
|||
} STopBotRes;
|
||||
|
||||
typedef struct SFirstLastRes {
|
||||
bool hasResult;
|
||||
bool isNull; // used for last_row function only
|
||||
bool hasResult;
|
||||
// used for last_row function only, isNullRes in SResultRowEntry can not be passed to downstream.So,
|
||||
// this attribute is required
|
||||
bool isNull;
|
||||
int32_t bytes;
|
||||
char buf[];
|
||||
} SFirstLastRes;
|
||||
|
@ -338,6 +340,104 @@ typedef struct SGroupKeyInfo {
|
|||
} \
|
||||
} while (0)
|
||||
|
||||
#define LIST_ADD_N(_res, _col, _start, _rows, _t, numOfElem) \
|
||||
do { \
|
||||
_t* d = (_t*)(_col->pData); \
|
||||
for (int32_t i = (_start); i < (_rows) + (_start); ++i) { \
|
||||
if (((_col)->hasNull) && colDataIsNull_f((_col)->nullbitmap, i)) { \
|
||||
continue; \
|
||||
}; \
|
||||
(_res) += (d)[i]; \
|
||||
(numOfElem)++; \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#define LIST_SUB_N(_res, _col, _start, _rows, _t, numOfElem) \
|
||||
do { \
|
||||
_t* d = (_t*)(_col->pData); \
|
||||
for (int32_t i = (_start); i < (_rows) + (_start); ++i) { \
|
||||
if (((_col)->hasNull) && colDataIsNull_f((_col)->nullbitmap, i)) { \
|
||||
continue; \
|
||||
}; \
|
||||
(_res) -= (d)[i]; \
|
||||
(numOfElem)++; \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#define LIST_AVG_N(sumT, T) \
|
||||
do { \
|
||||
T* plist = (T*)pCol->pData; \
|
||||
for (int32_t i = start; i < numOfRows + pInput->startRowIndex; ++i) { \
|
||||
if (pCol->hasNull && colDataIsNull_f(pCol->nullbitmap, i)) { \
|
||||
continue; \
|
||||
} \
|
||||
\
|
||||
numOfElem += 1; \
|
||||
pAvgRes->count -= 1; \
|
||||
sumT -= plist[i]; \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#define LIST_STDDEV_SUB_N(sumT, T) \
|
||||
do { \
|
||||
T* plist = (T*)pCol->pData; \
|
||||
for (int32_t i = start; i < numOfRows + start; ++i) { \
|
||||
if (pCol->hasNull && colDataIsNull_f(pCol->nullbitmap, i)) { \
|
||||
continue; \
|
||||
} \
|
||||
numOfElem += 1; \
|
||||
pStddevRes->count -= 1; \
|
||||
sumT -= plist[i]; \
|
||||
pStddevRes->quadraticISum -= plist[i] * plist[i]; \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#define LEASTSQR_CAL(p, x, y, index, step) \
|
||||
do { \
|
||||
(p)[0][0] += (double)(x) * (x); \
|
||||
(p)[0][1] += (double)(x); \
|
||||
(p)[0][2] += (double)(x) * (y)[index]; \
|
||||
(p)[1][2] += (y)[index]; \
|
||||
(x) += step; \
|
||||
} while (0)
|
||||
|
||||
|
||||
#define STATE_COMP(_op, _lval, _param) STATE_COMP_IMPL(_op, _lval, GET_STATE_VAL(_param))
|
||||
|
||||
#define GET_STATE_VAL(param) ((param.nType == TSDB_DATA_TYPE_BIGINT) ? (param.i) : (param.d))
|
||||
|
||||
#define STATE_COMP_IMPL(_op, _lval, _rval) \
|
||||
do { \
|
||||
switch (_op) { \
|
||||
case STATE_OPER_LT: \
|
||||
return ((_lval) < (_rval)); \
|
||||
break; \
|
||||
case STATE_OPER_GT: \
|
||||
return ((_lval) > (_rval)); \
|
||||
break; \
|
||||
case STATE_OPER_LE: \
|
||||
return ((_lval) <= (_rval)); \
|
||||
break; \
|
||||
case STATE_OPER_GE: \
|
||||
return ((_lval) >= (_rval)); \
|
||||
break; \
|
||||
case STATE_OPER_NE: \
|
||||
return ((_lval) != (_rval)); \
|
||||
break; \
|
||||
case STATE_OPER_EQ: \
|
||||
return ((_lval) == (_rval)); \
|
||||
break; \
|
||||
default: \
|
||||
break; \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#define INIT_INTP_POINT(_p, _k, _v) \
|
||||
do { \
|
||||
(_p).key = (_k); \
|
||||
(_p).val = (_v); \
|
||||
} while (0)
|
||||
|
||||
bool dummyGetEnv(SFunctionNode* UNUSED_PARAM(pFunc), SFuncExecEnv* UNUSED_PARAM(pEnv)) { return true; }
|
||||
|
||||
bool dummyInit(SqlFunctionCtx* UNUSED_PARAM(pCtx), SResultRowEntryInfo* UNUSED_PARAM(pResultInfo)) { return true; }
|
||||
|
@ -499,30 +599,6 @@ int32_t combineFunction(SqlFunctionCtx* pDestCtx, SqlFunctionCtx* pSourceCtx) {
|
|||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
#define LIST_ADD_N(_res, _col, _start, _rows, _t, numOfElem) \
|
||||
do { \
|
||||
_t* d = (_t*)(_col->pData); \
|
||||
for (int32_t i = (_start); i < (_rows) + (_start); ++i) { \
|
||||
if (((_col)->hasNull) && colDataIsNull_f((_col)->nullbitmap, i)) { \
|
||||
continue; \
|
||||
}; \
|
||||
(_res) += (d)[i]; \
|
||||
(numOfElem)++; \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#define LIST_SUB_N(_res, _col, _start, _rows, _t, numOfElem) \
|
||||
do { \
|
||||
_t* d = (_t*)(_col->pData); \
|
||||
for (int32_t i = (_start); i < (_rows) + (_start); ++i) { \
|
||||
if (((_col)->hasNull) && colDataIsNull_f((_col)->nullbitmap, i)) { \
|
||||
continue; \
|
||||
}; \
|
||||
(_res) -= (d)[i]; \
|
||||
(numOfElem)++; \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
int32_t sumFunction(SqlFunctionCtx* pCtx) {
|
||||
int32_t numOfElem = 0;
|
||||
|
||||
|
@ -920,20 +996,6 @@ int32_t avgFunctionMerge(SqlFunctionCtx* pCtx) {
|
|||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
#define LIST_AVG_N(sumT, T) \
|
||||
do { \
|
||||
T* plist = (T*)pCol->pData; \
|
||||
for (int32_t i = start; i < numOfRows + pInput->startRowIndex; ++i) { \
|
||||
if (pCol->hasNull && colDataIsNull_f(pCol->nullbitmap, i)) { \
|
||||
continue; \
|
||||
} \
|
||||
\
|
||||
numOfElem += 1; \
|
||||
pAvgRes->count -= 1; \
|
||||
sumT -= plist[i]; \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
int32_t avgInvertFunction(SqlFunctionCtx* pCtx) {
|
||||
int32_t numOfElem = 0;
|
||||
|
||||
|
@ -1084,9 +1146,9 @@ static void copyTupleData(SqlFunctionCtx* pCtx, int32_t rowIndex, const SSDataBl
|
|||
|
||||
static int32_t findRowIndex(int32_t start, int32_t num, SColumnInfoData* pCol, const char* tval) {
|
||||
// the data is loaded, not only the block SMA value
|
||||
for(int32_t i = start; i < num + start; ++i) {
|
||||
for (int32_t i = start; i < num + start; ++i) {
|
||||
char* p = colDataGetData(pCol, i);
|
||||
if (memcpy((void*)tval, p, pCol->info.bytes) == 0) {
|
||||
if (memcpy((void*)tval, p, pCol->info.bytes) == 0) {
|
||||
return i;
|
||||
}
|
||||
}
|
||||
|
@ -1094,7 +1156,6 @@ static int32_t findRowIndex(int32_t start, int32_t num, SColumnInfoData* pCol, c
|
|||
ASSERT(0);
|
||||
}
|
||||
|
||||
|
||||
int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) {
|
||||
int32_t numOfElems = 0;
|
||||
|
||||
|
@ -1571,10 +1632,14 @@ void setNullSelectivityValue(SqlFunctionCtx* pCtx, SSDataBlock* pBlock, int32_t
|
|||
}
|
||||
|
||||
void setSelectivityValue(SqlFunctionCtx* pCtx, SSDataBlock* pBlock, const STuplePos* pTuplePos, int32_t rowIndex) {
|
||||
if (pCtx->subsidiaries.num <= 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
int32_t pageId = pTuplePos->pageId;
|
||||
int32_t offset = pTuplePos->offset;
|
||||
|
||||
if (pTuplePos->pageId != -1 && pCtx->subsidiaries.num > 0) {
|
||||
if (pTuplePos->pageId != -1) {
|
||||
int32_t numOfCols = pCtx->subsidiaries.num;
|
||||
SFilePage* pPage = getBufPage(pCtx->pBuf, pageId);
|
||||
|
||||
|
@ -1874,7 +1939,7 @@ int32_t stddevFunctionMerge(SqlFunctionCtx* pCtx) {
|
|||
|
||||
SStddevRes* pInfo = GET_ROWCELL_INTERBUF(GET_RES_INFO(pCtx));
|
||||
|
||||
for(int32_t i = pInput->startRowIndex; i < pInput->startRowIndex + pInput->numOfRows; ++i) {
|
||||
for (int32_t i = pInput->startRowIndex; i < pInput->startRowIndex + pInput->numOfRows; ++i) {
|
||||
char* data = colDataGetData(pCol, i);
|
||||
SStddevRes* pInputInfo = (SStddevRes*)varDataVal(data);
|
||||
stddevTransferInfo(pInputInfo, pInfo);
|
||||
|
@ -1884,20 +1949,6 @@ int32_t stddevFunctionMerge(SqlFunctionCtx* pCtx) {
|
|||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
#define LIST_STDDEV_SUB_N(sumT, T) \
|
||||
do { \
|
||||
T* plist = (T*)pCol->pData; \
|
||||
for (int32_t i = start; i < numOfRows + start; ++i) { \
|
||||
if (pCol->hasNull && colDataIsNull_f(pCol->nullbitmap, i)) { \
|
||||
continue; \
|
||||
} \
|
||||
numOfElem += 1; \
|
||||
pStddevRes->count -= 1; \
|
||||
sumT -= plist[i]; \
|
||||
pStddevRes->quadraticISum -= plist[i] * plist[i]; \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
int32_t stddevInvertFunction(SqlFunctionCtx* pCtx) {
|
||||
int32_t numOfElem = 0;
|
||||
|
||||
|
@ -2046,15 +2097,6 @@ bool leastSQRFunctionSetup(SqlFunctionCtx* pCtx, SResultRowEntryInfo* pResultInf
|
|||
return true;
|
||||
}
|
||||
|
||||
#define LEASTSQR_CAL(p, x, y, index, step) \
|
||||
do { \
|
||||
(p)[0][0] += (double)(x) * (x); \
|
||||
(p)[0][1] += (double)(x); \
|
||||
(p)[0][2] += (double)(x) * (y)[index]; \
|
||||
(p)[1][2] += (y)[index]; \
|
||||
(x) += step; \
|
||||
} while (0)
|
||||
|
||||
int32_t leastSQRFunction(SqlFunctionCtx* pCtx) {
|
||||
int32_t numOfElem = 0;
|
||||
|
||||
|
@ -2733,7 +2775,6 @@ int32_t firstFunction(SqlFunctionCtx* pCtx) {
|
|||
}
|
||||
}
|
||||
pInfo->hasResult = true;
|
||||
// DO_UPDATE_TAG_COLUMNS(pCtx, ts);
|
||||
pResInfo->numOfRes = 1;
|
||||
break;
|
||||
}
|
||||
|
@ -2830,7 +2871,6 @@ int32_t lastFunction(SqlFunctionCtx* pCtx) {
|
|||
}
|
||||
pInfo->hasResult = true;
|
||||
pResInfo->numOfRes = 1;
|
||||
// DO_UPDATE_TAG_COLUMNS(pCtx, ts);
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
@ -2910,7 +2950,7 @@ int32_t firstLastFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock) {
|
|||
pResInfo->isNullRes = (pResInfo->numOfRes == 0) ? 1 : 0;
|
||||
|
||||
SFirstLastRes* pRes = GET_ROWCELL_INTERBUF(pResInfo);
|
||||
colDataAppend(pCol, pBlock->info.rows, pRes->buf, pResInfo->isNullRes);
|
||||
colDataAppend(pCol, pBlock->info.rows, pRes->buf, pRes->isNull||pResInfo->isNullRes);
|
||||
// handle selectivity
|
||||
STuplePos* pTuplePos = (STuplePos*)(pRes->buf + pRes->bytes + sizeof(TSKEY));
|
||||
setSelectivityValue(pCtx, pBlock, pTuplePos, pBlock->info.rows);
|
||||
|
@ -3473,8 +3513,7 @@ void saveTupleData(SqlFunctionCtx* pCtx, int32_t rowIndex, const SSDataBlock* pS
|
|||
setBufPageDirty(pPage, true);
|
||||
releaseBufPage(pCtx->pBuf, pPage);
|
||||
#ifdef BUF_PAGE_DEBUG
|
||||
qDebug("page_saveTuple pos:%p,pageId:%d, offset:%d\n", pPos, pPos->pageId,
|
||||
pPos->offset);
|
||||
qDebug("page_saveTuple pos:%p,pageId:%d, offset:%d\n", pPos, pPos->pageId, pPos->offset);
|
||||
#endif
|
||||
}
|
||||
|
||||
|
@ -3775,7 +3814,7 @@ bool elapsedFunctionSetup(SqlFunctionCtx* pCtx, SResultRowEntryInfo* pResultInfo
|
|||
|
||||
SElapsedInfo* pInfo = GET_ROWCELL_INTERBUF(pResultInfo);
|
||||
pInfo->result = 0;
|
||||
pInfo->min = MAX_TS_KEY;
|
||||
pInfo->min = TSKEY_MAX;
|
||||
pInfo->max = 0;
|
||||
|
||||
if (pCtx->numOfParams > 1) {
|
||||
|
@ -3802,7 +3841,7 @@ int32_t elapsedFunction(SqlFunctionCtx* pCtx) {
|
|||
}
|
||||
|
||||
if (pInput->colDataAggIsSet) {
|
||||
if (pInfo->min == MAX_TS_KEY) {
|
||||
if (pInfo->min == TSKEY_MAX) {
|
||||
pInfo->min = GET_INT64_VAL(&pAgg->min);
|
||||
pInfo->max = GET_INT64_VAL(&pAgg->max);
|
||||
} else {
|
||||
|
@ -4477,36 +4516,6 @@ static int8_t getStateOpType(char* opStr) {
|
|||
return opType;
|
||||
}
|
||||
|
||||
#define GET_STATE_VAL(param) ((param.nType == TSDB_DATA_TYPE_BIGINT) ? (param.i) : (param.d))
|
||||
|
||||
#define STATE_COMP(_op, _lval, _param) STATE_COMP_IMPL(_op, _lval, GET_STATE_VAL(_param))
|
||||
|
||||
#define STATE_COMP_IMPL(_op, _lval, _rval) \
|
||||
do { \
|
||||
switch (_op) { \
|
||||
case STATE_OPER_LT: \
|
||||
return ((_lval) < (_rval)); \
|
||||
break; \
|
||||
case STATE_OPER_GT: \
|
||||
return ((_lval) > (_rval)); \
|
||||
break; \
|
||||
case STATE_OPER_LE: \
|
||||
return ((_lval) <= (_rval)); \
|
||||
break; \
|
||||
case STATE_OPER_GE: \
|
||||
return ((_lval) >= (_rval)); \
|
||||
break; \
|
||||
case STATE_OPER_NE: \
|
||||
return ((_lval) != (_rval)); \
|
||||
break; \
|
||||
case STATE_OPER_EQ: \
|
||||
return ((_lval) == (_rval)); \
|
||||
break; \
|
||||
default: \
|
||||
break; \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
static bool checkStateOp(int8_t op, SColumnInfoData* pCol, int32_t index, SVariant param) {
|
||||
char* data = colDataGetData(pCol, index);
|
||||
switch (pCol->info.type) {
|
||||
|
@ -5214,12 +5223,6 @@ static double twa_get_area(SPoint1 s, SPoint1 e) {
|
|||
return val;
|
||||
}
|
||||
|
||||
#define INIT_INTP_POINT(_p, _k, _v) \
|
||||
do { \
|
||||
(_p).key = (_k); \
|
||||
(_p).val = (_v); \
|
||||
} while (0)
|
||||
|
||||
int32_t twaFunction(SqlFunctionCtx* pCtx) {
|
||||
SInputColumnInfoData* pInput = &pCtx->input;
|
||||
SColumnInfoData* pInputCol = pInput->pData[0];
|
||||
|
@ -5987,28 +5990,41 @@ int32_t lastrowFunction(SqlFunctionCtx* pCtx) {
|
|||
|
||||
int32_t type = pInputCol->info.type;
|
||||
int32_t bytes = pInputCol->info.bytes;
|
||||
|
||||
pInfo->bytes = bytes;
|
||||
|
||||
// last_row function does not ignore the null value
|
||||
for (int32_t i = pInput->numOfRows + pInput->startRowIndex - 1; i >= pInput->startRowIndex; --i) {
|
||||
if (pInputCol->hasNull && colDataIsNull_s(pInputCol, i)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
numOfElems++;
|
||||
|
||||
char* data = colDataGetData(pInputCol, i);
|
||||
TSKEY cts = getRowPTs(pInput->pPTS, i);
|
||||
if (pResInfo->numOfRes == 0 || *(TSKEY*)(pInfo->buf + bytes) < cts) {
|
||||
if (IS_VAR_DATA_TYPE(type)) {
|
||||
bytes = varDataTLen(data);
|
||||
pInfo->bytes = bytes;
|
||||
|
||||
if (colDataIsNull_s(pInputCol, i)) {
|
||||
pInfo->isNull = true;
|
||||
} else {
|
||||
if (IS_VAR_DATA_TYPE(type)) {
|
||||
bytes = varDataTLen(data);
|
||||
pInfo->bytes = bytes;
|
||||
}
|
||||
|
||||
memcpy(pInfo->buf, data, bytes);
|
||||
}
|
||||
|
||||
memcpy(pInfo->buf, data, bytes);
|
||||
*(TSKEY*)(pInfo->buf + bytes) = cts;
|
||||
|
||||
pInfo->hasResult = true;
|
||||
pResInfo->numOfRes = 1;
|
||||
|
||||
if (pCtx->subsidiaries.num > 0) {
|
||||
STuplePos* pTuplePos = (STuplePos*)(pInfo->buf + bytes + sizeof(TSKEY));
|
||||
if (!pInfo->hasResult) {
|
||||
saveTupleData(pCtx, i, pCtx->pSrcBlock, pTuplePos);
|
||||
} else {
|
||||
copyTupleData(pCtx, i, pCtx->pSrcBlock, pTuplePos);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -2481,7 +2481,6 @@ static int32_t jsonToSubplan(const SJson* pJson, void* pObj) {
|
|||
int32_t code = tjsonToObject(pJson, jkSubplanId, jsonToSubplanId, &pNode->id);
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
tjsonGetNumberValue(pJson, jkSubplanType, pNode->subplanType, code);
|
||||
;
|
||||
}
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = tjsonGetIntValue(pJson, jkSubplanMsgType, &pNode->msgType);
|
||||
|
|
|
@ -956,7 +956,8 @@ void nodesDestroyNode(SNode* pNode) {
|
|||
}
|
||||
case QUERY_NODE_PHYSICAL_SUBPLAN: {
|
||||
SSubplan* pSubplan = (SSubplan*)pNode;
|
||||
nodesDestroyList(pSubplan->pChildren);
|
||||
// nodesDestroyList(pSubplan->pChildren);
|
||||
nodesClearList(pSubplan->pChildren);
|
||||
nodesDestroyNode((SNode*)pSubplan->pNode);
|
||||
nodesDestroyNode((SNode*)pSubplan->pDataSink);
|
||||
nodesDestroyNode((SNode*)pSubplan->pTagCond);
|
||||
|
@ -972,7 +973,7 @@ void nodesDestroyNode(SNode* pNode) {
|
|||
SNode* pElement = NULL;
|
||||
FOREACH(pElement, pPlan->pSubplans) {
|
||||
if (first) {
|
||||
first = false;
|
||||
// first = false;
|
||||
nodesDestroyNode(pElement);
|
||||
} else {
|
||||
nodesClearList(((SNodeListNode*)pElement)->pNodeList);
|
||||
|
|
|
@ -556,6 +556,7 @@ signed_literal(A) ::= TIMESTAMP NK_STRING(B).
|
|||
signed_literal(A) ::= duration_literal(B). { A = releaseRawExprNode(pCxt, B); }
|
||||
signed_literal(A) ::= NULL(B). { A = createValueNode(pCxt, TSDB_DATA_TYPE_NULL, &B); }
|
||||
signed_literal(A) ::= literal_func(B). { A = releaseRawExprNode(pCxt, B); }
|
||||
signed_literal(A) ::= NK_QUESTION(B). { A = createPlaceholderValueNode(pCxt, &B); }
|
||||
|
||||
%type literal_list { SNodeList* }
|
||||
%destructor literal_list { nodesDestroyList($$); }
|
||||
|
|
|
@ -218,7 +218,7 @@ static SNode* createConstantValue() {
|
|||
static int32_t calcConstProjections(SCalcConstContext* pCxt, SSelectStmt* pSelect, bool subquery) {
|
||||
SNode* pProj = NULL;
|
||||
WHERE_EACH(pProj, pSelect->pProjectionList) {
|
||||
if (subquery && isUselessCol((SExprNode*)pProj)) {
|
||||
if (subquery && !pSelect->isDistinct && isUselessCol((SExprNode*)pProj)) {
|
||||
ERASE_NODE(pSelect->pProjectionList);
|
||||
continue;
|
||||
}
|
||||
|
|
|
@ -133,7 +133,10 @@ static int32_t createSName(SName* pName, SToken* pTableName, int32_t acctId, con
|
|||
assert(*p == TS_PATH_DELIMITER[0]);
|
||||
|
||||
int32_t dbLen = p - pTableName->z;
|
||||
char name[TSDB_DB_FNAME_LEN] = {0};
|
||||
if (dbLen <= 0) {
|
||||
return buildInvalidOperationMsg(pMsgBuf, msg2);
|
||||
}
|
||||
char name[TSDB_DB_FNAME_LEN] = {0};
|
||||
strncpy(name, pTableName->z, dbLen);
|
||||
dbLen = strdequote(name);
|
||||
|
||||
|
|
|
@ -2173,14 +2173,28 @@ static int64_t getMonthsFromTimeVal(int64_t val, int32_t fromPrecision, char uni
|
|||
return -1;
|
||||
}
|
||||
|
||||
static const char* getPrecisionStr(uint8_t precision) {
|
||||
switch (precision) {
|
||||
case TSDB_TIME_PRECISION_MILLI:
|
||||
return TSDB_TIME_PRECISION_MILLI_STR;
|
||||
case TSDB_TIME_PRECISION_MICRO:
|
||||
return TSDB_TIME_PRECISION_MICRO_STR;
|
||||
case TSDB_TIME_PRECISION_NANO:
|
||||
return TSDB_TIME_PRECISION_NANO_STR;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
return "unknown";
|
||||
}
|
||||
|
||||
static int32_t checkIntervalWindow(STranslateContext* pCxt, SIntervalWindowNode* pInterval) {
|
||||
uint8_t precision = ((SColumnNode*)pInterval->pCol)->node.resType.precision;
|
||||
|
||||
SValueNode* pInter = (SValueNode*)pInterval->pInterval;
|
||||
bool valInter = TIME_IS_VAR_DURATION(pInter->unit);
|
||||
if (pInter->datum.i <= 0 ||
|
||||
(!valInter && convertTimePrecision(pInter->datum.i, precision, TSDB_TIME_PRECISION_MICRO) < tsMinIntervalTime)) {
|
||||
return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INTER_VALUE_TOO_SMALL, tsMinIntervalTime);
|
||||
if (pInter->datum.i <= 0 || (!valInter && pInter->datum.i < tsMinIntervalTime)) {
|
||||
return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INTER_VALUE_TOO_SMALL, tsMinIntervalTime,
|
||||
getPrecisionStr(precision));
|
||||
}
|
||||
|
||||
if (NULL != pInterval->pOffset) {
|
||||
|
@ -2754,6 +2768,11 @@ static int32_t translateInsertProject(STranslateContext* pCxt, SInsertStmt* pIns
|
|||
}
|
||||
}
|
||||
|
||||
if (NULL == pPrimaryKeyExpr) {
|
||||
return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_COLUMNS_NUM,
|
||||
"Primary timestamp column can not be null");
|
||||
}
|
||||
|
||||
return addOrderByPrimaryKeyToQuery(pCxt, pPrimaryKeyExpr, pInsert->pQuery);
|
||||
}
|
||||
|
||||
|
@ -2998,8 +3017,7 @@ static int32_t checkDatabaseOptions(STranslateContext* pCxt, const char* pDbName
|
|||
int32_t code =
|
||||
checkRangeOption(pCxt, "buffer", pOptions->buffer, TSDB_MIN_BUFFER_PER_VNODE, TSDB_MAX_BUFFER_PER_VNODE);
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = checkRangeOption(pCxt, "cacheLast", pOptions->cacheLast, TSDB_MIN_DB_CACHE_LAST,
|
||||
TSDB_MAX_DB_CACHE_LAST);
|
||||
code = checkRangeOption(pCxt, "cacheLast", pOptions->cacheLast, TSDB_MIN_DB_CACHE_LAST, TSDB_MAX_DB_CACHE_LAST);
|
||||
}
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = checkRangeOption(pCxt, "cacheLastSize", pOptions->cacheLastSize, TSDB_MIN_DB_CACHE_LAST_SIZE,
|
||||
|
@ -4739,8 +4757,13 @@ static int32_t extractQueryResultSchema(const SNodeList* pProjections, int32_t*
|
|||
int32_t index = 0;
|
||||
FOREACH(pNode, pProjections) {
|
||||
SExprNode* pExpr = (SExprNode*)pNode;
|
||||
(*pSchema)[index].type = pExpr->resType.type;
|
||||
(*pSchema)[index].bytes = pExpr->resType.bytes;
|
||||
if (TSDB_DATA_TYPE_NULL == pExpr->resType.type) {
|
||||
(*pSchema)[index].type = TSDB_DATA_TYPE_VARCHAR;
|
||||
(*pSchema)[index].bytes = 0;
|
||||
} else {
|
||||
(*pSchema)[index].type = pExpr->resType.type;
|
||||
(*pSchema)[index].bytes = pExpr->resType.bytes;
|
||||
}
|
||||
(*pSchema)[index].colId = index + 1;
|
||||
if ('\0' != pExpr->userAlias[0]) {
|
||||
strcpy((*pSchema)[index].name, pExpr->userAlias);
|
||||
|
|
|
@ -60,7 +60,7 @@ static char* getSyntaxErrFormat(int32_t errCode) {
|
|||
case TSDB_CODE_PAR_EXPRIE_STATEMENT:
|
||||
return "This statement is no longer supported";
|
||||
case TSDB_CODE_PAR_INTER_VALUE_TOO_SMALL:
|
||||
return "Interval cannot be less than %d us";
|
||||
return "Interval cannot be less than %d %s";
|
||||
case TSDB_CODE_PAR_DB_NOT_SPECIFIED:
|
||||
return "Database not specified";
|
||||
case TSDB_CODE_PAR_INVALID_IDENTIFIER_NAME:
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -282,7 +282,7 @@ int32_t dataConverToStr(char* str, int type, void* buf, int32_t bufSize, int32_t
|
|||
}
|
||||
|
||||
*str = '"';
|
||||
int32_t length = taosUcs4ToMbs((TdUcs4 *)buf, bufSize, str + 1);
|
||||
int32_t length = taosUcs4ToMbs((TdUcs4*)buf, bufSize, str + 1);
|
||||
if (length <= 0) {
|
||||
return TSDB_CODE_TSC_INVALID_VALUE;
|
||||
}
|
||||
|
@ -310,15 +310,15 @@ int32_t dataConverToStr(char* str, int type, void* buf, int32_t bufSize, int32_t
|
|||
return TSDB_CODE_TSC_INVALID_VALUE;
|
||||
}
|
||||
|
||||
if(len) *len = n;
|
||||
if (len) *len = n;
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
char* parseTagDatatoJson(void* p) {
|
||||
char* string = NULL;
|
||||
char* string = NULL;
|
||||
SArray* pTagVals = NULL;
|
||||
cJSON* json = NULL;
|
||||
cJSON* json = NULL;
|
||||
if (tTagToValArray((const STag*)p, &pTagVals) != 0) {
|
||||
goto end;
|
||||
}
|
||||
|
@ -327,7 +327,7 @@ char* parseTagDatatoJson(void* p) {
|
|||
if (nCols == 0) {
|
||||
goto end;
|
||||
}
|
||||
char tagJsonKey[256] = {0};
|
||||
char tagJsonKey[256] = {0};
|
||||
json = cJSON_CreateObject();
|
||||
if (json == NULL) {
|
||||
goto end;
|
||||
|
@ -390,7 +390,7 @@ char* parseTagDatatoJson(void* p) {
|
|||
end:
|
||||
cJSON_Delete(json);
|
||||
taosArrayDestroy(pTagVals);
|
||||
if(string == NULL){
|
||||
if (string == NULL) {
|
||||
string = strdup(TSDB_DATA_NULL_STR_L);
|
||||
}
|
||||
return string;
|
||||
|
|
|
@ -729,6 +729,7 @@ EDealRes sclRewriteFunction(SNode** pNode, SScalarCtx *ctx) {
|
|||
|
||||
if (colDataIsNull_s(output.columnData, 0)) {
|
||||
res->node.resType.type = TSDB_DATA_TYPE_NULL;
|
||||
res->node.resType.bytes = tDataTypes[TSDB_DATA_TYPE_NULL].bytes;
|
||||
} else {
|
||||
res->node.resType.type = output.columnData->info.type;
|
||||
res->node.resType.bytes = output.columnData->info.bytes;
|
||||
|
@ -819,6 +820,7 @@ EDealRes sclRewriteOperator(SNode** pNode, SScalarCtx *ctx) {
|
|||
if (colDataIsNull_s(output.columnData, 0)) {
|
||||
if(node->node.resType.type != TSDB_DATA_TYPE_JSON){
|
||||
res->node.resType.type = TSDB_DATA_TYPE_NULL;
|
||||
res->node.resType.bytes = tDataTypes[TSDB_DATA_TYPE_NULL].bytes;
|
||||
}else{
|
||||
res->node.resType = node->node.resType;
|
||||
res->isNull = true;
|
||||
|
|
|
@ -41,6 +41,8 @@ void schFreeTask(SSchJob *pJob, SSchTask *pTask) {
|
|||
if (pTask->execNodes) {
|
||||
taosHashCleanup(pTask->execNodes);
|
||||
}
|
||||
|
||||
taosMemoryFree(pTask->profile.execTime);
|
||||
}
|
||||
|
||||
int32_t schInitTask(SSchJob *pJob, SSchTask *pTask, SSubplan *pPlan, SSchLevel *pLevel, int32_t levelNum) {
|
||||
|
|
|
@ -173,7 +173,8 @@ int32_t streamTaskEnqueueRetrieve(SStreamTask* pTask, SStreamRetrieveReq* pReq,
|
|||
}
|
||||
|
||||
int32_t streamProcessDispatchReq(SStreamTask* pTask, SStreamDispatchReq* pReq, SRpcMsg* pRsp) {
|
||||
qInfo("task %d receive dispatch req from node %d task %d", pTask->taskId, pReq->upstreamNodeId, pReq->upstreamTaskId);
|
||||
qDebug("task %d receive dispatch req from node %d task %d", pTask->taskId, pReq->upstreamNodeId,
|
||||
pReq->upstreamTaskId);
|
||||
|
||||
// 1. handle input
|
||||
streamTaskEnqueue(pTask, pReq, pRsp);
|
||||
|
|
|
@ -26,10 +26,12 @@ static int32_t streamTaskExecImpl(SStreamTask* pTask, void* data, SArray* pRes)
|
|||
} else if (pItem->type == STREAM_INPUT__DATA_SUBMIT) {
|
||||
ASSERT(pTask->isDataScan);
|
||||
SStreamDataSubmit* pSubmit = (SStreamDataSubmit*)data;
|
||||
qDebug("task %d %p set submit input %p %p %d", pTask->taskId, pTask, pSubmit, pSubmit->data, *pSubmit->dataRef);
|
||||
qSetStreamInput(exec, pSubmit->data, STREAM_INPUT__DATA_SUBMIT, false);
|
||||
} else if (pItem->type == STREAM_INPUT__DATA_BLOCK || pItem->type == STREAM_INPUT__DATA_RETRIEVE) {
|
||||
SStreamDataBlock* pBlock = (SStreamDataBlock*)data;
|
||||
SArray* blocks = pBlock->blocks;
|
||||
qDebug("task %d %p set ssdata input", pTask->taskId, pTask);
|
||||
qSetMultiStreamInput(exec, blocks->pData, blocks->size, STREAM_INPUT__DATA_BLOCK, false);
|
||||
} else if (pItem->type == STREAM_INPUT__DROP) {
|
||||
// TODO exec drop
|
||||
|
|
|
@ -30,7 +30,7 @@ extern "C" {
|
|||
|
||||
#define TIMER_MAX_MS 0x7FFFFFFF
|
||||
#define ENV_TICK_TIMER_MS 1000
|
||||
#define PING_TIMER_MS 1000
|
||||
#define PING_TIMER_MS 5000
|
||||
#define ELECT_TIMER_MS_MIN 1300
|
||||
#define ELECT_TIMER_MS_MAX (ELECT_TIMER_MS_MIN * 2)
|
||||
#define ELECT_TIMER_MS_RANGE (ELECT_TIMER_MS_MAX - ELECT_TIMER_MS_MIN)
|
||||
|
|
|
@ -242,13 +242,13 @@ static int32_t syncIOStopInternal(SSyncIO *io) {
|
|||
}
|
||||
|
||||
static void *syncIOConsumerFunc(void *param) {
|
||||
SSyncIO * io = param;
|
||||
STaosQall *qall;
|
||||
SRpcMsg * pRpcMsg, rpcMsg;
|
||||
qall = taosAllocateQall();
|
||||
SSyncIO *io = param;
|
||||
STaosQall *qall = taosAllocateQall();
|
||||
SRpcMsg *pRpcMsg, rpcMsg;
|
||||
SQueueInfo qinfo = {0};
|
||||
|
||||
while (1) {
|
||||
int numOfMsgs = taosReadAllQitemsFromQset(io->pQset, qall, NULL, NULL);
|
||||
int numOfMsgs = taosReadAllQitemsFromQset(io->pQset, qall, &qinfo);
|
||||
sTrace("syncIOConsumerFunc %d msgs are received", numOfMsgs);
|
||||
if (numOfMsgs <= 0) {
|
||||
break;
|
||||
|
@ -369,6 +369,8 @@ static void *syncIOConsumerFunc(void *param) {
|
|||
|
||||
taosFreeQitem(pRpcMsg);
|
||||
}
|
||||
|
||||
taosUpdateItemSize(qinfo.queue, numOfMsgs);
|
||||
}
|
||||
|
||||
taosFreeQall(qall);
|
||||
|
|
|
@ -273,16 +273,8 @@ int32_t syncLeaderTransfer(int64_t rid) {
|
|||
}
|
||||
ASSERT(rid == pSyncNode->rid);
|
||||
|
||||
if (pSyncNode->peersNum == 0) {
|
||||
taosReleaseRef(tsNodeRefId, pSyncNode->rid);
|
||||
terrno = TSDB_CODE_SYN_INTERNAL_ERROR;
|
||||
return -1;
|
||||
}
|
||||
|
||||
SNodeInfo newLeader = (pSyncNode->peersNodeInfo)[0];
|
||||
int32_t ret = syncNodeLeaderTransfer(pSyncNode);
|
||||
taosReleaseRef(tsNodeRefId, pSyncNode->rid);
|
||||
|
||||
int32_t ret = syncLeaderTransferTo(rid, newLeader);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -293,25 +285,8 @@ int32_t syncLeaderTransferTo(int64_t rid, SNodeInfo newLeader) {
|
|||
return -1;
|
||||
}
|
||||
ASSERT(rid == pSyncNode->rid);
|
||||
int32_t ret = 0;
|
||||
|
||||
if (pSyncNode->replicaNum == 1) {
|
||||
sError("only one replica, cannot drop leader");
|
||||
taosReleaseRef(tsNodeRefId, pSyncNode->rid);
|
||||
terrno = TSDB_CODE_SYN_ONE_REPLICA;
|
||||
return -1;
|
||||
}
|
||||
|
||||
SyncLeaderTransfer* pMsg = syncLeaderTransferBuild(pSyncNode->vgId);
|
||||
pMsg->newLeaderId.addr = syncUtilAddr2U64(newLeader.nodeFqdn, newLeader.nodePort);
|
||||
pMsg->newLeaderId.vgId = pSyncNode->vgId;
|
||||
pMsg->newNodeInfo = newLeader;
|
||||
ASSERT(pMsg != NULL);
|
||||
SRpcMsg rpcMsg = {0};
|
||||
syncLeaderTransfer2RpcMsg(pMsg, &rpcMsg);
|
||||
syncLeaderTransferDestroy(pMsg);
|
||||
|
||||
ret = syncNodePropose(pSyncNode, &rpcMsg, false);
|
||||
int32_t ret = syncNodeLeaderTransferTo(pSyncNode, newLeader);
|
||||
taosReleaseRef(tsNodeRefId, pSyncNode->rid);
|
||||
return ret;
|
||||
}
|
||||
|
@ -337,6 +312,12 @@ int32_t syncNodeLeaderTransferTo(SSyncNode* pSyncNode, SNodeInfo newLeader) {
|
|||
return -1;
|
||||
}
|
||||
|
||||
do {
|
||||
char logBuf[128];
|
||||
snprintf(logBuf, sizeof(logBuf), "begin leader transfer to %s:%u", newLeader.nodeFqdn, newLeader.nodePort);
|
||||
syncNodeEventLog(pSyncNode, logBuf);
|
||||
} while (0);
|
||||
|
||||
SyncLeaderTransfer* pMsg = syncLeaderTransferBuild(pSyncNode->vgId);
|
||||
pMsg->newLeaderId.addr = syncUtilAddr2U64(newLeader.nodeFqdn, newLeader.nodePort);
|
||||
pMsg->newLeaderId.vgId = pSyncNode->vgId;
|
||||
|
@ -1118,19 +1099,13 @@ void syncNodeStart(SSyncNode* pSyncNode) {
|
|||
// Raft 3.6.2 Committing entries from previous terms
|
||||
syncNodeAppendNoop(pSyncNode);
|
||||
syncMaybeAdvanceCommitIndex(pSyncNode);
|
||||
|
||||
return;
|
||||
} else {
|
||||
syncNodeBecomeFollower(pSyncNode, "first start");
|
||||
}
|
||||
|
||||
syncNodeBecomeFollower(pSyncNode, "first start");
|
||||
|
||||
// int32_t ret = 0;
|
||||
// ret = syncNodeStartPingTimer(pSyncNode);
|
||||
// ASSERT(ret == 0);
|
||||
|
||||
if (gRaftDetailLog) {
|
||||
syncNodeLog2("==state change become leader immediately==", pSyncNode);
|
||||
}
|
||||
int32_t ret = 0;
|
||||
ret = syncNodeStartPingTimer(pSyncNode);
|
||||
ASSERT(ret == 0);
|
||||
}
|
||||
|
||||
void syncNodeStartStandBy(SSyncNode* pSyncNode) {
|
||||
|
@ -1147,8 +1122,6 @@ void syncNodeStartStandBy(SSyncNode* pSyncNode) {
|
|||
void syncNodeClose(SSyncNode* pSyncNode) {
|
||||
syncNodeEventLog(pSyncNode, "sync close");
|
||||
|
||||
// leader transfer
|
||||
|
||||
int32_t ret;
|
||||
ASSERT(pSyncNode != NULL);
|
||||
|
||||
|
@ -1183,14 +1156,6 @@ void syncNodeClose(SSyncNode* pSyncNode) {
|
|||
pSyncNode->pNewNodeReceiver = NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
if (pSyncNode->pSnapshot != NULL) {
|
||||
taosMemoryFree(pSyncNode->pSnapshot);
|
||||
}
|
||||
*/
|
||||
|
||||
// tsem_destroy(&pSyncNode->restoreSem);
|
||||
|
||||
// free memory in syncFreeNode
|
||||
// taosMemoryFree(pSyncNode);
|
||||
}
|
||||
|
@ -1255,7 +1220,7 @@ int32_t syncNodeStartPingTimer(SSyncNode* pSyncNode) {
|
|||
&pSyncNode->pPingTimer);
|
||||
atomic_store_64(&pSyncNode->pingTimerLogicClock, pSyncNode->pingTimerLogicClockUser);
|
||||
} else {
|
||||
sError("sync env is stop, syncNodeStartPingTimer");
|
||||
sError("vgId:%d, start ping timer error, sync env is stop", pSyncNode->vgId);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
@ -1276,7 +1241,7 @@ int32_t syncNodeStartElectTimer(SSyncNode* pSyncNode, int32_t ms) {
|
|||
&pSyncNode->pElectTimer);
|
||||
atomic_store_64(&pSyncNode->electTimerLogicClock, pSyncNode->electTimerLogicClockUser);
|
||||
} else {
|
||||
sError("sync env is stop, syncNodeStartElectTimer");
|
||||
sError("vgId:%d, start elect timer error, sync env is stop", pSyncNode->vgId);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
@ -1316,7 +1281,7 @@ int32_t syncNodeStartHeartbeatTimer(SSyncNode* pSyncNode) {
|
|||
&pSyncNode->pHeartbeatTimer);
|
||||
atomic_store_64(&pSyncNode->heartbeatTimerLogicClock, pSyncNode->heartbeatTimerLogicClockUser);
|
||||
} else {
|
||||
sError("sync env is stop, syncNodeStartHeartbeatTimer");
|
||||
sError("vgId:%d, start heartbeat timer error, sync env is stop", pSyncNode->vgId);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
@ -2643,7 +2608,7 @@ const char* syncStr(ESyncState state) {
|
|||
static int32_t syncDoLeaderTransfer(SSyncNode* ths, SRpcMsg* pRpcMsg, SSyncRaftEntry* pEntry) {
|
||||
SyncLeaderTransfer* pSyncLeaderTransfer = syncLeaderTransferFromRpcMsg2(pRpcMsg);
|
||||
|
||||
syncNodeEventLog(ths, "begin leader transfer");
|
||||
syncNodeEventLog(ths, "do leader transfer");
|
||||
|
||||
bool sameId = syncUtilSameId(&(pSyncLeaderTransfer->newLeaderId), &(ths->myRaftId));
|
||||
bool sameNodeInfo = strcmp(pSyncLeaderTransfer->newNodeInfo.nodeFqdn, ths->myNodeInfo.nodeFqdn) == 0 &&
|
||||
|
|
|
@ -17,6 +17,11 @@
|
|||
#include "syncElection.h"
|
||||
#include "syncReplication.h"
|
||||
|
||||
int32_t syncNodeTimerRoutine(SSyncNode* ths) {
|
||||
syncNodeEventLog(ths, "timer routines ... ");
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t syncNodeOnTimeoutCb(SSyncNode* ths, SyncTimeout* pMsg) {
|
||||
int32_t ret = 0;
|
||||
syncTimeoutLog2("==syncNodeOnTimeoutCb==", pMsg);
|
||||
|
@ -24,8 +29,11 @@ int32_t syncNodeOnTimeoutCb(SSyncNode* ths, SyncTimeout* pMsg) {
|
|||
if (pMsg->timeoutType == SYNC_TIMEOUT_PING) {
|
||||
if (atomic_load_64(&ths->pingTimerLogicClockUser) <= pMsg->logicClock) {
|
||||
++(ths->pingTimerCounter);
|
||||
|
||||
// syncNodePingAll(ths);
|
||||
syncNodePingPeers(ths);
|
||||
// syncNodePingPeers(ths);
|
||||
|
||||
syncNodeTimerRoutine(ths);
|
||||
}
|
||||
|
||||
} else if (pMsg->timeoutType == SYNC_TIMEOUT_ELECTION) {
|
||||
|
@ -40,7 +48,7 @@ int32_t syncNodeOnTimeoutCb(SSyncNode* ths, SyncTimeout* pMsg) {
|
|||
syncNodeReplicate(ths);
|
||||
}
|
||||
} else {
|
||||
sTrace("unknown timeoutType:%d", pMsg->timeoutType);
|
||||
sError("vgId:%d, unknown timeout-type:%d", ths->vgId, pMsg->timeoutType);
|
||||
}
|
||||
|
||||
return ret;
|
||||
|
|
|
@ -31,12 +31,12 @@ void processShellMsg() {
|
|||
STaosQall *qall;
|
||||
SRpcMsg * pRpcMsg, rpcMsg;
|
||||
int type;
|
||||
void * pvnode;
|
||||
SQueueInfo qinfo = {0};
|
||||
|
||||
qall = taosAllocateQall();
|
||||
|
||||
while (1) {
|
||||
int numOfMsgs = taosReadAllQitemsFromQset(qset, qall, &pvnode, NULL);
|
||||
int numOfMsgs = taosReadAllQitemsFromQset(qset, qall, &qinfo);
|
||||
tDebug("%d shell msgs are received", numOfMsgs);
|
||||
if (numOfMsgs <= 0) break;
|
||||
|
||||
|
@ -86,6 +86,8 @@ void processShellMsg() {
|
|||
rpcSendResponse(&nRpcMsg);
|
||||
}
|
||||
}
|
||||
|
||||
taosUpdateItemSize(qinfo.queue, numOfMsgs);
|
||||
}
|
||||
|
||||
taosFreeQall(qall);
|
||||
|
|
|
@ -66,6 +66,7 @@ void walCloseReader(SWalReader *pRead) {
|
|||
}
|
||||
|
||||
int32_t walNextValidMsg(SWalReader *pRead) {
|
||||
wDebug("vgId:%d wal start to fetch", pRead->pWal->cfg.vgId);
|
||||
int64_t fetchVer = pRead->curVersion;
|
||||
int64_t endVer = pRead->cond.scanUncommited ? walGetLastVer(pRead->pWal) : walGetCommittedVer(pRead->pWal);
|
||||
while (fetchVer <= endVer) {
|
||||
|
@ -176,7 +177,7 @@ int32_t walReadSeekVerImpl(SWalReader *pRead, int64_t ver) {
|
|||
return -1;
|
||||
}
|
||||
|
||||
wDebug("wal version reset from %ld to %ld", pRead->curVersion, ver);
|
||||
wDebug("wal version reset from %ld(invalid: %d) to %ld", pRead->curVersion, pRead->curInvalid, ver);
|
||||
|
||||
pRead->curVersion = ver;
|
||||
return 0;
|
||||
|
@ -242,6 +243,7 @@ static int32_t walFetchHeadNew(SWalReader *pRead, int64_t fetchVer) {
|
|||
return -1;
|
||||
}
|
||||
}
|
||||
pRead->curInvalid = 0;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -301,6 +303,7 @@ static int32_t walSkipFetchBodyNew(SWalReader *pRead) {
|
|||
int64_t code;
|
||||
|
||||
ASSERT(pRead->curVersion == pRead->pHead->head.version);
|
||||
ASSERT(pRead->curInvalid == 0);
|
||||
|
||||
code = taosLSeekFile(pRead->pLogFile, pRead->pHead->head.bodyLen, SEEK_CUR);
|
||||
if (code < 0) {
|
||||
|
@ -404,6 +407,7 @@ int32_t walFetchBody(SWalReader *pRead, SWalCkHead **ppHead) {
|
|||
}
|
||||
|
||||
int32_t walReadVer(SWalReader *pRead, int64_t ver) {
|
||||
wDebug("vgId:%d wal start to read ver %ld", pRead->pWal->cfg.vgId, ver);
|
||||
int64_t contLen;
|
||||
bool seeked = false;
|
||||
|
||||
|
|
|
@ -115,7 +115,7 @@ bool taosQueueEmpty(STaosQueue *queue) {
|
|||
|
||||
bool empty = false;
|
||||
taosThreadMutexLock(&queue->mutex);
|
||||
if (queue->head == NULL && queue->tail == NULL) {
|
||||
if (queue->head == NULL && queue->tail == NULL && queue->numOfItems == 0 && queue->memOfItems == 0) {
|
||||
empty = true;
|
||||
}
|
||||
taosThreadMutexUnlock(&queue->mutex);
|
||||
|
@ -123,6 +123,14 @@ bool taosQueueEmpty(STaosQueue *queue) {
|
|||
return empty;
|
||||
}
|
||||
|
||||
void taosUpdateItemSize(STaosQueue *queue, int32_t items) {
|
||||
if (queue == NULL) return;
|
||||
|
||||
taosThreadMutexLock(&queue->mutex);
|
||||
queue->numOfItems -= items;
|
||||
taosThreadMutexUnlock(&queue->mutex);
|
||||
}
|
||||
|
||||
int32_t taosQueueItemSize(STaosQueue *queue) {
|
||||
if (queue == NULL) return 0;
|
||||
|
||||
|
@ -257,6 +265,7 @@ int32_t taosReadAllQitems(STaosQueue *queue, STaosQall *qall) {
|
|||
queue->tail = NULL;
|
||||
queue->numOfItems = 0;
|
||||
queue->memOfItems = 0;
|
||||
uTrace("read %d items from queue:%p, items:%d mem:%" PRId64, code, queue, queue->numOfItems, queue->memOfItems);
|
||||
if (queue->qset) atomic_sub_fetch_32(&queue->qset->numOfItems, qall->numOfItems);
|
||||
}
|
||||
|
||||
|
@ -397,7 +406,7 @@ void taosRemoveFromQset(STaosQset *qset, STaosQueue *queue) {
|
|||
|
||||
int32_t taosGetQueueNumber(STaosQset *qset) { return qset->numOfQueues; }
|
||||
|
||||
int32_t taosReadQitemFromQset(STaosQset *qset, void **ppItem, int64_t *ts, void **ahandle, FItem *itemFp) {
|
||||
int32_t taosReadQitemFromQset(STaosQset *qset, void **ppItem, SQueueInfo *qinfo) {
|
||||
STaosQnode *pNode = NULL;
|
||||
int32_t code = 0;
|
||||
|
||||
|
@ -417,17 +426,18 @@ int32_t taosReadQitemFromQset(STaosQset *qset, void **ppItem, int64_t *ts, void
|
|||
if (queue->head) {
|
||||
pNode = queue->head;
|
||||
*ppItem = pNode->item;
|
||||
if (ahandle) *ahandle = queue->ahandle;
|
||||
if (itemFp) *itemFp = queue->itemFp;
|
||||
if (ts) *ts = pNode->timestamp;
|
||||
qinfo->ahandle = queue->ahandle;
|
||||
qinfo->fp = queue->itemFp;
|
||||
qinfo->queue = queue;
|
||||
qinfo->timestamp = pNode->timestamp;
|
||||
|
||||
queue->head = pNode->next;
|
||||
if (queue->head == NULL) queue->tail = NULL;
|
||||
queue->numOfItems--;
|
||||
// queue->numOfItems--;
|
||||
queue->memOfItems -= pNode->size;
|
||||
atomic_sub_fetch_32(&qset->numOfItems, 1);
|
||||
code = 1;
|
||||
uTrace("item:%p is read out from queue:%p, items:%d mem:%" PRId64, *ppItem, queue, queue->numOfItems,
|
||||
uTrace("item:%p is read out from queue:%p, items:%d mem:%" PRId64, *ppItem, queue, queue->numOfItems - 1,
|
||||
queue->memOfItems);
|
||||
}
|
||||
|
||||
|
@ -440,7 +450,7 @@ int32_t taosReadQitemFromQset(STaosQset *qset, void **ppItem, int64_t *ts, void
|
|||
return code;
|
||||
}
|
||||
|
||||
int32_t taosReadAllQitemsFromQset(STaosQset *qset, STaosQall *qall, void **ahandle, FItems *itemsFp) {
|
||||
int32_t taosReadAllQitemsFromQset(STaosQset *qset, STaosQall *qall, SQueueInfo *qinfo) {
|
||||
STaosQueue *queue;
|
||||
int32_t code = 0;
|
||||
|
||||
|
@ -461,13 +471,16 @@ int32_t taosReadAllQitemsFromQset(STaosQset *qset, STaosQall *qall, void **ahand
|
|||
qall->start = queue->head;
|
||||
qall->numOfItems = queue->numOfItems;
|
||||
code = qall->numOfItems;
|
||||
if (ahandle) *ahandle = queue->ahandle;
|
||||
if (itemsFp) *itemsFp = queue->itemsFp;
|
||||
qinfo->ahandle = queue->ahandle;
|
||||
qinfo->fp = queue->itemsFp;
|
||||
qinfo->queue = queue;
|
||||
|
||||
queue->head = NULL;
|
||||
queue->tail = NULL;
|
||||
queue->numOfItems = 0;
|
||||
// queue->numOfItems = 0;
|
||||
queue->memOfItems = 0;
|
||||
uTrace("read %d items from queue:%p, items:0 mem:%" PRId64, code, queue, queue->memOfItems);
|
||||
|
||||
atomic_sub_fetch_32(&qset->numOfItems, qall->numOfItems);
|
||||
for (int32_t j = 1; j < qall->numOfItems; ++j) {
|
||||
tsem_wait(&qset->sem);
|
||||
|
|
|
@ -70,27 +70,27 @@ void tQWorkerCleanup(SQWorkerPool *pool) {
|
|||
|
||||
static void *tQWorkerThreadFp(SQWorker *worker) {
|
||||
SQWorkerPool *pool = worker->pool;
|
||||
FItem fp = NULL;
|
||||
|
||||
void *msg = NULL;
|
||||
void *ahandle = NULL;
|
||||
int32_t code = 0;
|
||||
int64_t ts = 0;
|
||||
SQueueInfo qinfo = {0};
|
||||
void *msg = NULL;
|
||||
int32_t code = 0;
|
||||
|
||||
taosBlockSIGPIPE();
|
||||
setThreadName(pool->name);
|
||||
uDebug("worker:%s:%d is running", pool->name, worker->id);
|
||||
|
||||
while (1) {
|
||||
if (taosReadQitemFromQset(pool->qset, (void **)&msg, &ts, &ahandle, &fp) == 0) {
|
||||
if (taosReadQitemFromQset(pool->qset, (void **)&msg, &qinfo) == 0) {
|
||||
uDebug("worker:%s:%d qset:%p, got no message and exiting", pool->name, worker->id, pool->qset);
|
||||
break;
|
||||
}
|
||||
|
||||
if (fp != NULL) {
|
||||
SQueueInfo info = {.ahandle = ahandle, .workerId = worker->id, .threadNum = pool->num, .timestamp = ts};
|
||||
(*fp)(&info, msg);
|
||||
if (qinfo.fp != NULL) {
|
||||
qinfo.workerId = worker->id;
|
||||
qinfo.threadNum = pool->num;
|
||||
(*((FItem)qinfo.fp))(&qinfo, msg);
|
||||
}
|
||||
|
||||
taosUpdateItemSize(qinfo.queue, 1);
|
||||
}
|
||||
|
||||
return NULL;
|
||||
|
@ -195,28 +195,28 @@ void tWWorkerCleanup(SWWorkerPool *pool) {
|
|||
|
||||
static void *tWWorkerThreadFp(SWWorker *worker) {
|
||||
SWWorkerPool *pool = worker->pool;
|
||||
FItems fp = NULL;
|
||||
|
||||
void *msg = NULL;
|
||||
void *ahandle = NULL;
|
||||
int32_t numOfMsgs = 0;
|
||||
int32_t qtype = 0;
|
||||
SQueueInfo qinfo = {0};
|
||||
void *msg = NULL;
|
||||
int32_t code = 0;
|
||||
int32_t numOfMsgs = 0;
|
||||
|
||||
taosBlockSIGPIPE();
|
||||
setThreadName(pool->name);
|
||||
uDebug("worker:%s:%d is running", pool->name, worker->id);
|
||||
|
||||
while (1) {
|
||||
numOfMsgs = taosReadAllQitemsFromQset(worker->qset, worker->qall, &ahandle, &fp);
|
||||
numOfMsgs = taosReadAllQitemsFromQset(worker->qset, worker->qall, &qinfo);
|
||||
if (numOfMsgs == 0) {
|
||||
uDebug("worker:%s:%d qset:%p, got no message and exiting", pool->name, worker->id, worker->qset);
|
||||
break;
|
||||
}
|
||||
|
||||
if (fp != NULL) {
|
||||
SQueueInfo info = {.ahandle = ahandle, .workerId = worker->id, .threadNum = pool->num};
|
||||
(*fp)(&info, worker->qall, numOfMsgs);
|
||||
if (qinfo.fp != NULL) {
|
||||
qinfo.workerId = worker->id;
|
||||
qinfo.threadNum = pool->num;
|
||||
(*((FItems)qinfo.fp))(&qinfo, worker->qall, numOfMsgs);
|
||||
}
|
||||
taosUpdateItemSize(qinfo.queue, numOfMsgs);
|
||||
}
|
||||
|
||||
return NULL;
|
||||
|
|
|
@ -96,11 +96,11 @@
|
|||
./test.sh -f tsim/stream/basic2.sim
|
||||
./test.sh -f tsim/stream/drop_stream.sim
|
||||
./test.sh -f tsim/stream/distributeInterval0.sim
|
||||
# ./test.sh -f tsim/stream/distributeIntervalRetrive0.sim
|
||||
./test.sh -f tsim/stream/distributeIntervalRetrive0.sim
|
||||
# ./test.sh -f tsim/stream/distributesession0.sim
|
||||
./test.sh -f tsim/stream/session0.sim
|
||||
./test.sh -f tsim/stream/session1.sim
|
||||
# ./test.sh -f tsim/stream/state0.sim
|
||||
./test.sh -f tsim/stream/state0.sim
|
||||
./test.sh -f tsim/stream/triggerInterval0.sim
|
||||
# ./test.sh -f tsim/stream/triggerSession0.sim
|
||||
./test.sh -f tsim/stream/partitionby.sim
|
||||
|
@ -170,6 +170,7 @@
|
|||
# --- valgrind
|
||||
./test.sh -f tsim/valgrind/checkError1.sim
|
||||
./test.sh -f tsim/valgrind/checkError2.sim
|
||||
./test.sh -f tsim/valgrind/checkError3.sim
|
||||
|
||||
# --- vnode
|
||||
# ./test.sh -f tsim/vnode/replica3_basic.sim
|
||||
|
|
|
@ -89,5 +89,10 @@ endi
|
|||
#TODO: MOVE IT TO NORMAL CASE
|
||||
sql_error select * from tb1 where not (null);
|
||||
|
||||
sql select sum(1/0) from tb1;
|
||||
if $rows != 1 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
|
||||
system sh/exec.sh -n dnode1 -s stop -x SIGINT
|
||||
|
|
|
@ -76,7 +76,7 @@ if $data01 != 5 then
|
|||
goto loop1
|
||||
endi
|
||||
|
||||
if $data02 != 14 then
|
||||
if $data02 != 38 then
|
||||
print =====data02=$data02
|
||||
goto loop1
|
||||
endi
|
||||
|
@ -134,7 +134,7 @@ if $data01 != 6 then
|
|||
goto loop2
|
||||
endi
|
||||
|
||||
if $data02 != 18 then
|
||||
if $data02 != 42 then
|
||||
print =====data02=$data02
|
||||
goto loop2
|
||||
endi
|
||||
|
@ -192,7 +192,7 @@ if $data01 != 7 then
|
|||
goto loop3
|
||||
endi
|
||||
|
||||
if $data02 != 22 then
|
||||
if $data02 != 46 then
|
||||
print =====data02=$data02
|
||||
goto loop3
|
||||
endi
|
||||
|
@ -232,60 +232,4 @@ endi
|
|||
|
||||
print loop3 over
|
||||
|
||||
$loop_count = 0
|
||||
loop4:
|
||||
sleep 1000
|
||||
sql select * from streamtST1;
|
||||
|
||||
$loop_count = $loop_count + 1
|
||||
if $loop_count == 10 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
# row 0
|
||||
if $data01 != 7 then
|
||||
print =====data01=$data01
|
||||
goto loop4
|
||||
endi
|
||||
|
||||
if $data02 != 22 then
|
||||
print =====data02=$data02
|
||||
goto loop4
|
||||
endi
|
||||
|
||||
# row 1
|
||||
if $data11 != 3 then
|
||||
print =====data11=$data11
|
||||
goto loop4
|
||||
endi
|
||||
|
||||
if $data12 != 10 then
|
||||
print =====data12=$data12
|
||||
goto loop4
|
||||
endi
|
||||
|
||||
#row2
|
||||
if $data21 != 3 then
|
||||
print =====data21=$data21
|
||||
goto loop4
|
||||
endi
|
||||
|
||||
if $data22 != 11 then
|
||||
print =====data22=$data22
|
||||
goto loop4
|
||||
endi
|
||||
|
||||
#row 3
|
||||
if $data31 != 5 then
|
||||
print =====data31=$data31
|
||||
goto loop4
|
||||
endi
|
||||
|
||||
if $data32 != 60 then
|
||||
print =====data32=$data32
|
||||
goto loop4
|
||||
endi
|
||||
|
||||
print loop4 over
|
||||
|
||||
system sh/stop_dnodes.sh
|
||||
|
|
|
@ -0,0 +1,193 @@
|
|||
system sh/stop_dnodes.sh
|
||||
system sh/deploy.sh -n dnode1 -i 1
|
||||
system sh/deploy.sh -n dnode2 -i 2
|
||||
system sh/deploy.sh -n dnode3 -i 3
|
||||
system sh/deploy.sh -n dnode4 -i 4
|
||||
|
||||
system sh/cfg.sh -n dnode1 -c supportVnodes -v 0
|
||||
|
||||
system sh/exec.sh -n dnode1 -s start
|
||||
system sh/exec.sh -n dnode2 -s start
|
||||
system sh/exec.sh -n dnode3 -s start
|
||||
system sh/exec.sh -n dnode4 -s start
|
||||
|
||||
$loop_cnt = 0
|
||||
check_dnode_ready:
|
||||
$loop_cnt = $loop_cnt + 1
|
||||
sleep 200
|
||||
if $loop_cnt == 10 then
|
||||
print ====> dnode not ready!
|
||||
return -1
|
||||
endi
|
||||
sql show dnodes
|
||||
print ===> $rows $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6]
|
||||
print ===> $rows $data[1][0] $data[1][1] $data[1][2] $data[1][3] $data[1][4] $data[1][5] $data[1][6]
|
||||
print ===> $rows $data[2][0] $data[2][1] $data[2][2] $data[2][3] $data[2][4] $data[2][5] $data[2][6]
|
||||
print ===> $rows $data[3][0] $data[3][1] $data[3][2] $data[3][3] $data[3][4] $data[3][5] $data[3][6]
|
||||
if $data[0][0] != 1 then
|
||||
return -1
|
||||
endi
|
||||
if $data[0][4] != ready then
|
||||
goto check_dnode_ready
|
||||
endi
|
||||
|
||||
sql connect
|
||||
sql create dnode $hostname port 7200
|
||||
sql create dnode $hostname port 7300
|
||||
sql create dnode $hostname port 7400
|
||||
|
||||
$loop_cnt = 0
|
||||
check_dnode_ready_1:
|
||||
$loop_cnt = $loop_cnt + 1
|
||||
sleep 200
|
||||
if $loop_cnt == 10 then
|
||||
print ====> dnodes not ready!
|
||||
return -1
|
||||
endi
|
||||
sql show dnodes
|
||||
print ===> $rows $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6]
|
||||
print ===> $rows $data[1][0] $data[1][1] $data[1][2] $data[1][3] $data[1][4] $data[1][5] $data[1][6]
|
||||
print ===> $rows $data[2][0] $data[2][1] $data[2][2] $data[2][3] $data[2][4] $data[2][5] $data[2][6]
|
||||
print ===> $rows $data[3][0] $data[3][1] $data[3][2] $data[3][3] $data[3][4] $data[3][5] $data[3][6]
|
||||
if $data[0][4] != ready then
|
||||
goto check_dnode_ready_1
|
||||
endi
|
||||
if $data[1][4] != ready then
|
||||
goto check_dnode_ready_1
|
||||
endi
|
||||
if $data[2][4] != ready then
|
||||
goto check_dnode_ready_1
|
||||
endi
|
||||
if $data[3][4] != ready then
|
||||
goto check_dnode_ready_1
|
||||
endi
|
||||
|
||||
$replica = 3
|
||||
$vgroups = 1
|
||||
|
||||
print ============= create database
|
||||
sql create database db replica $replica vgroups $vgroups
|
||||
|
||||
$loop_cnt = 0
|
||||
check_db_ready:
|
||||
$loop_cnt = $loop_cnt + 1
|
||||
sleep 200
|
||||
if $loop_cnt == 100 then
|
||||
print ====> db not ready!
|
||||
return -1
|
||||
endi
|
||||
sql show databases
|
||||
print ===> rows: $rows
|
||||
print $data[2][0] $data[2][1] $data[2][2] $data[2][3] $data[2][4] $data[2][5] $data[2][6] $data[2][7] $data[2][8] $data[2][9] $data[2][6] $data[2][11] $data[2][12] $data[2][13] $data[2][14] $data[2][15] $data[2][16] $data[2][17] $data[2][18] $data[2][19]
|
||||
if $rows != 3 then
|
||||
return -1
|
||||
endi
|
||||
if $data[2][19] != ready then
|
||||
goto check_db_ready
|
||||
endi
|
||||
|
||||
sql use db
|
||||
|
||||
$loop_cnt = 0
|
||||
check_vg_ready:
|
||||
$loop_cnt = $loop_cnt + 1
|
||||
sleep 200
|
||||
if $loop_cnt == 300 then
|
||||
print ====> vgroups not ready!
|
||||
return -1
|
||||
endi
|
||||
|
||||
sql show vgroups
|
||||
print ===> rows: $rows
|
||||
print $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6] $data[0][7] $data[0][8] $data[0][9] $data[0][10] $data[0][11]
|
||||
|
||||
if $rows != $vgroups then
|
||||
return -1
|
||||
endi
|
||||
|
||||
if $data[0][4] == leader then
|
||||
if $data[0][6] == follower then
|
||||
if $data[0][8] == follower then
|
||||
print ---- vgroup $data[0][0] leader locate on dnode $data[0][3]
|
||||
endi
|
||||
endi
|
||||
elif $data[0][6] == leader then
|
||||
if $data[0][4] == follower then
|
||||
if $data[0][8] == follower then
|
||||
print ---- vgroup $data[0][0] leader locate on dnode $data[0][5]
|
||||
endi
|
||||
endi
|
||||
elif $data[0][8] == leader then
|
||||
if $data[0][4] == follower then
|
||||
if $data[0][6] == follower then
|
||||
print ---- vgroup $data[0][0] leader locate on dnode $data[0][7]
|
||||
endi
|
||||
endi
|
||||
else
|
||||
goto check_vg_ready
|
||||
endi
|
||||
|
||||
|
||||
vg_ready:
|
||||
print ====> create stable/child table
|
||||
sql create table stb (ts timestamp, c1 int, c2 float, c3 double) tags (t1 int)
|
||||
|
||||
sql show stables
|
||||
if $rows != 1 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
sql create table ct1 using stb tags(1000)
|
||||
|
||||
|
||||
print ===> write 100 records
|
||||
$N = 100
|
||||
$count = 0
|
||||
while $count < $N
|
||||
$ms = 1591200000000 + $count
|
||||
sql insert into ct1 values( $ms , $count , 2.1, 3.1)
|
||||
$count = $count + 1
|
||||
endw
|
||||
|
||||
|
||||
#sql flush database db;
|
||||
|
||||
|
||||
sleep 3000
|
||||
|
||||
|
||||
print ===> stop dnode1 dnode2 dnode3 dnode4
|
||||
system sh/exec.sh -n dnode1 -s stop -x SIGINT
|
||||
system sh/exec.sh -n dnode2 -s stop -x SIGINT
|
||||
system sh/exec.sh -n dnode3 -s stop -x SIGINT
|
||||
system sh/exec.sh -n dnode4 -s stop -x SIGINT
|
||||
|
||||
|
||||
|
||||
########################################################
|
||||
print ===> start dnode1 dnode2 dnode3 dnode4
|
||||
system sh/exec.sh -n dnode1 -s start
|
||||
system sh/exec.sh -n dnode2 -s start
|
||||
system sh/exec.sh -n dnode3 -s start
|
||||
system sh/exec.sh -n dnode4 -s start
|
||||
|
||||
sleep 3000
|
||||
|
||||
print =============== query data
|
||||
sql connect
|
||||
sql use db
|
||||
sql select * from ct1
|
||||
print rows: $rows
|
||||
print $data00 $data01 $data02
|
||||
if $rows != 100 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
#system sh/exec.sh -n dnode1 -s stop -x SIGINT
|
||||
#system sh/exec.sh -n dnode2 -s stop -x SIGINT
|
||||
#system sh/exec.sh -n dnode3 -s stop -x SIGINT
|
||||
#system sh/exec.sh -n dnode4 -s stop -x SIGINT
|
||||
#########################################################
|
||||
|
||||
|
||||
|
|
@ -210,10 +210,11 @@ class TDTestCase:
|
|||
self.tag_check(i,k,tag_unint)
|
||||
for error in [constant.INT_UN_MIN-1,constant.INT_UN_MAX+1]:
|
||||
tdSql.error(f'alter table {self.stbname}_{i} set tag {k} = {error}')
|
||||
elif v.lower() == 'bigint unsigned':
|
||||
self.tag_check(i,k,tag_unbigint)
|
||||
for error in [constant.BIGINT_UN_MIN-1,constant.BIGINT_UN_MAX+1]:
|
||||
tdSql.error(f'alter table {self.stbname}_{i} set tag {k} = {error}')
|
||||
#! bug TD-17106
|
||||
# elif v.lower() == 'bigint unsigned':
|
||||
# self.tag_check(i,k,tag_unbigint)
|
||||
# for error in [constant.BIGINT_UN_MIN-1,constant.BIGINT_UN_MAX+1]:
|
||||
# tdSql.error(f'alter table {self.stbname}_{i} set tag {k} = {error}')
|
||||
elif v.lower() == 'bool':
|
||||
self.tag_check(i,k,tag_bool)
|
||||
elif v.lower() == 'float':
|
||||
|
@ -223,7 +224,8 @@ class TDTestCase:
|
|||
tdSql.checkEqual(tdSql.queryResult[0][0],tdSql.queryResult[0][0])
|
||||
else:
|
||||
tdLog.exit(f'select {k} from {self.stbname}_{i},data check failure')
|
||||
# for error in [constant.FLOAT_MIN*10,constant.FLOAT_MAX*10]:
|
||||
#! bug TD-17106
|
||||
# for error in [constant.FLOAT_MIN*1.1,constant.FLOAT_MAX*1.1]:
|
||||
# tdSql.error(f'alter table {self.stbname}_{i} set tag {k} = {error}')
|
||||
elif v.lower() == 'double':
|
||||
tdSql.execute(f'alter table {self.stbname}_{i} set tag {k} = {tag_double}')
|
||||
|
@ -232,7 +234,7 @@ class TDTestCase:
|
|||
tdSql.checkEqual(tdSql.queryResult[0][0],tdSql.queryResult[0][0])
|
||||
else:
|
||||
tdLog.exit(f'select {k} from {self.stbname}_{i},data check failure')
|
||||
for error in [constant.DOUBLE_MIN-1,constant.DOUBLE_MAX+1]:
|
||||
for error in [constant.DOUBLE_MIN*1.1,constant.DOUBLE_MAX*1.1]:
|
||||
tdSql.error(f'alter table {self.stbname}_{i} set tag {k} = {error}')
|
||||
elif 'binary' in v.lower():
|
||||
tag_binary_error = tdCom.getLongName(self.binary_length+1)
|
||||
|
@ -242,7 +244,8 @@ class TDTestCase:
|
|||
tdSql.checkData(0,0,tag_binary)
|
||||
elif 'nchar' in v.lower():
|
||||
tag_nchar_error = tdCom.getLongName(self.nchar_length+1)
|
||||
tdSql.execute(f'alter table {self.stbname}_{i} set tag {k} = "{tag_nchar_error}"')
|
||||
tdSql.error(f'alter table {self.stbname}_{i} set tag {k} = "{tag_nchar_error}"')
|
||||
tdSql.execute(f'alter table {self.stbname}_{i} set tag {k} = "{tag_nchar}"')
|
||||
tdSql.query(f'select {k} from {self.stbname}_{i}')
|
||||
tdSql.checkData(0,0,tag_nchar)
|
||||
|
||||
|
|
|
@ -16,18 +16,18 @@ class TDTestCase:
|
|||
self.rowNum = 10
|
||||
self.ts = 1640966400000 # 2022-1-1 00:00:00.000
|
||||
def check_customize_param_ms(self):
|
||||
|
||||
|
||||
time_zone = time.strftime('%z')
|
||||
tdSql.execute('create database db1 precision "ms"')
|
||||
tdSql.execute('use db1')
|
||||
tdSql.execute('create table if not exists ntb(ts timestamp, c1 int, c2 timestamp)')
|
||||
for i in range(self.rowNum):
|
||||
tdSql.execute("insert into ntb values(%d, %d, %d)"
|
||||
tdSql.execute("insert into ntb values(%d, %d, %d)"
|
||||
% (self.ts + i, i + 1, self.ts + i))
|
||||
tdSql.query('select to_iso8601(ts) from ntb')
|
||||
for i in range(self.rowNum):
|
||||
tdSql.checkEqual(tdSql.queryResult[i][0],f'2022-01-01T00:00:00.00{i}{time_zone}')
|
||||
|
||||
|
||||
timezone_list = ['+0000','+0100','+0200','+0300','+0330','+0400','+0500','+0530','+0600','+0700','+0800','+0900','+1000','+1100','+1200',\
|
||||
'+00','+01','+02','+03','+04','+05','+06','+07','+08','+09','+10','+11','+12',\
|
||||
'+00:00','+01:00','+02:00','+03:00','+03:30','+04:00','+05:00','+05:30','+06:00','+07:00','+08:00','+09:00','+10:00','+11:00','+12:00',\
|
||||
|
@ -39,7 +39,7 @@ class TDTestCase:
|
|||
tdSql.query(f'select to_iso8601(ts,"{j}") from ntb')
|
||||
for i in range(self.rowNum):
|
||||
tdSql.checkEqual(tdSql.queryResult[i][0],f'2022-01-01T00:00:00.00{i}{j}')
|
||||
|
||||
|
||||
error_param_list = [0,100.5,'a','!']
|
||||
for i in error_param_list:
|
||||
tdSql.error(f'select to_iso8601(ts,"{i}") from ntb')
|
||||
|
@ -47,7 +47,7 @@ class TDTestCase:
|
|||
error_timezone_param = ['+13','-13','+1300','-1300','+0001','-0001','-0330','-0530']
|
||||
for i in error_timezone_param:
|
||||
tdSql.error(f'select to_iso8601(ts,"{i}") from ntb')
|
||||
|
||||
|
||||
def check_base_function(self):
|
||||
tdSql.prepare()
|
||||
tdLog.printNoPrefix("==========step1:create tables==========")
|
||||
|
@ -75,12 +75,12 @@ class TDTestCase:
|
|||
tdSql.query("select to_iso8601(ts) from ntb")
|
||||
tdSql.checkRows(3)
|
||||
tdSql.query("select to_iso8601(ts) from db.ntb")
|
||||
|
||||
|
||||
tdSql.query("select to_iso8601(today()) from ntb")
|
||||
tdSql.checkRows(3)
|
||||
tdSql.query("select to_iso8601(now()) from ntb")
|
||||
tdSql.checkRows(3)
|
||||
|
||||
|
||||
tdSql.error("select to_iso8601(timezone()) from ntb")
|
||||
tdSql.error("select to_iso8601('abc') from ntb")
|
||||
|
||||
|
@ -104,7 +104,7 @@ class TDTestCase:
|
|||
for i in err_param:
|
||||
tdSql.error(f"select to_iso8601({i}) from ntb")
|
||||
tdSql.error(f"select to_iso8601({i}) from db.ntb")
|
||||
|
||||
|
||||
tdSql.query("select to_iso8601(now) from stb")
|
||||
tdSql.checkRows(3)
|
||||
tdSql.query("select to_iso8601(now()) from stb")
|
||||
|
@ -126,7 +126,7 @@ class TDTestCase:
|
|||
tdSql.query(f"select to_iso8601(today()) {i}null from db.stb")
|
||||
tdSql.checkRows(3)
|
||||
tdSql.checkData(0,0,None)
|
||||
|
||||
|
||||
def run(self): # sourcery skip: extract-duplicate-method
|
||||
self.check_base_function()
|
||||
self.check_customize_param_ms()
|
||||
|
|
|
@ -58,7 +58,7 @@ class TDTestCase:
|
|||
tag_sql += f"{k} {v},"
|
||||
create_stb_sql = f'create table {stbname} ({column_sql[:-1]}) tags({tag_sql[:-1]})'
|
||||
return create_stb_sql
|
||||
|
||||
|
||||
def data_check(self,column_dict={},tbname = '',values_list = [],tb_num = 1,tb = 'tb',precision = 'ms'):
|
||||
for k,v in column_dict.items():
|
||||
num_up = 0
|
||||
|
@ -175,7 +175,7 @@ class TDTestCase:
|
|||
tdSql.execute('drop database db')
|
||||
|
||||
def run(self): # sourcery skip: extract-duplicate-method
|
||||
|
||||
|
||||
self.today_check_ntb()
|
||||
self.today_check_stb_tb()
|
||||
|
||||
|
|
|
@ -47,7 +47,7 @@ class TDTestCase:
|
|||
c9 = "'nchar_val'"
|
||||
c10 = ts
|
||||
tdSql.execute(f" insert into {tbname} values ({ts},{c1},{c2},{c3},{c4},{c5},{c6},{c7},{c8},{c9},{c10})")
|
||||
|
||||
|
||||
tdSql.execute("use test")
|
||||
tbnames = ["stb", "sub_tb_1"]
|
||||
support_types = ["BIGINT", "SMALLINT", "TINYINT", "FLOAT", "DOUBLE", "INT"]
|
||||
|
@ -60,7 +60,7 @@ class TDTestCase:
|
|||
origin_sql = "select {} from {} order by tbname".format(colname, tbname)
|
||||
if coltype[1] in support_types:
|
||||
self.check_result_auto(origin_sql , abs_sql)
|
||||
|
||||
|
||||
|
||||
def prepare_datas(self):
|
||||
tdSql.execute(
|
||||
|
|
|
@ -47,7 +47,7 @@ class TDTestCase:
|
|||
c9 = "'nchar_val'"
|
||||
c10 = ts
|
||||
tdSql.execute(f" insert into {tbname} values ({ts},{c1},{c2},{c3},{c4},{c5},{c6},{c7},{c8},{c9},{c10})")
|
||||
|
||||
|
||||
tdSql.execute("use test")
|
||||
tbnames = ["stb", "sub_tb_1"]
|
||||
support_types = ["BIGINT", "SMALLINT", "TINYINT", "FLOAT", "DOUBLE", "INT"]
|
||||
|
@ -62,7 +62,7 @@ class TDTestCase:
|
|||
cols = random.sample(colnames,3)
|
||||
self.check_function("&",False,tbname,cols[0],cols[1],cols[2])
|
||||
self.check_function("|",False,tbname,cols[0],cols[1],cols[2])
|
||||
|
||||
|
||||
|
||||
def prepare_datas(self):
|
||||
tdSql.execute(
|
||||
|
@ -215,14 +215,14 @@ class TDTestCase:
|
|||
"abs value check pass , it work as expected ,sql is \"%s\" " % abs_query)
|
||||
|
||||
def check_function(self, opera ,agg, tbname , *args):
|
||||
|
||||
|
||||
if opera =="&":
|
||||
pass
|
||||
elif opera =="|":
|
||||
pass
|
||||
else:
|
||||
pass
|
||||
work_sql = " select "
|
||||
work_sql = " select "
|
||||
for ind , arg in enumerate(args):
|
||||
if ind ==len(args)-1:
|
||||
work_sql += f"cast({arg} as bigint) "
|
||||
|
@ -235,7 +235,7 @@ class TDTestCase:
|
|||
work_sql+= f" from {tbname} "
|
||||
tdSql.query(work_sql)
|
||||
work_result = tdSql.queryResult
|
||||
|
||||
|
||||
origin_sql = " select "
|
||||
for ind , arg in enumerate(args):
|
||||
if ind ==len(args)-1:
|
||||
|
@ -323,7 +323,7 @@ class TDTestCase:
|
|||
tdSql.checkData(0,0,None)
|
||||
tdSql.checkData(1,0,640)
|
||||
tdSql.checkData(10,0,0)
|
||||
|
||||
|
||||
# used for regular table
|
||||
tdSql.query("select abs(c1)&c3&c3 from t1")
|
||||
tdSql.checkData(0, 0, None)
|
||||
|
@ -349,7 +349,7 @@ class TDTestCase:
|
|||
self.check_function("&",False,"stb1","c1","floor(t1)","abs(c1+c2)","t1+1")
|
||||
self.check_function("&",True,"stb1","max(c1)","min(floor(t1))","sum(abs(c1+c2))","last(t1)+1")
|
||||
self.check_function("&",False,"stb1","abs(abs(abs(abs(abs(abs(abs(abs(abs(abs(c1))))))))))","floor(t1)","abs(c1+c2)","t1+1")
|
||||
|
||||
|
||||
# mix with common col
|
||||
tdSql.query("select c1&abs(c1)&c2&c3 ,c1,c2, t1 from ct1")
|
||||
tdSql.checkData(0, 0, 8)
|
||||
|
@ -388,7 +388,7 @@ class TDTestCase:
|
|||
# agg functions mix with agg functions
|
||||
|
||||
tdSql.query("select sum(c1&abs(c1)&c2&c3) ,max(c5), count(c5) from stb1")
|
||||
|
||||
|
||||
tdSql.query("select max(c1)&max(c2)|first(ts), count(c5) from ct1")
|
||||
|
||||
# bug fix for compute
|
||||
|
@ -409,7 +409,7 @@ class TDTestCase:
|
|||
tdSql.checkData(1, 2, 894.900000000)
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
def check_boundary_values(self):
|
||||
|
||||
|
@ -490,7 +490,7 @@ class TDTestCase:
|
|||
self.check_function("&", False ,"ct4","123","abs(c1)","t1","abs(t2)","abs(t3)","abs(t4)","t5")
|
||||
self.check_function("&", False ,"ct4","c1+2","abs(t2+2)","t3","abs(t4)","abs(t5)","abs(c1)","t5")
|
||||
|
||||
tdSql.query(" select sum(c1) from stb1 where t1+10 >1; ")
|
||||
tdSql.query(" select sum(c1) from stb1 where t1+10 >1; ")
|
||||
tdSql.query("select c1 ,t1 from stb1 where t1 =0 ")
|
||||
tdSql.checkRows(13)
|
||||
self.check_function("&", False ,"t1","c1+2","abs(c2)")
|
||||
|
@ -534,7 +534,7 @@ class TDTestCase:
|
|||
self.support_super_table_test()
|
||||
self.insert_datas_and_check_abs(self.tb_nums,self.row_nums,self.time_step)
|
||||
|
||||
|
||||
|
||||
|
||||
def stop(self):
|
||||
tdSql.close()
|
||||
|
|
|
@ -44,7 +44,7 @@ class TDTestCase:
|
|||
'col12': f'binary({self.binary_length})',
|
||||
'col13': f'nchar({self.nchar_length})'
|
||||
}
|
||||
|
||||
|
||||
self.tag_dict = {
|
||||
'ts_tag' : 'timestamp',
|
||||
't1': 'tinyint',
|
||||
|
@ -79,9 +79,9 @@ class TDTestCase:
|
|||
self.tag_values = [
|
||||
f'{self.tag_ts},{self.tag_tinyint},{self.tag_smallint},{self.tag_int},{self.tag_bigint},\
|
||||
{self.tag_utint},{self.tag_usint},{self.tag_uint},{self.tag_ubint},{self.tag_float},{self.tag_double},{self.tag_bool},"{self.binary_str}","{self.nchar_str}"'
|
||||
|
||||
|
||||
]
|
||||
|
||||
|
||||
self.percent = [1,50,100]
|
||||
self.param_list = ['default','t-digest']
|
||||
def insert_data(self,column_dict,tbname,row_num):
|
||||
|
@ -90,7 +90,7 @@ class TDTestCase:
|
|||
insert_list = []
|
||||
self.setsql.insert_values(column_dict,i,insert_sql,insert_list,self.ts)
|
||||
|
||||
|
||||
|
||||
def function_check_ntb(self):
|
||||
tdSql.prepare()
|
||||
tdSql.execute(self.setsql.set_create_normaltable_sql(self.ntbname,self.column_dict))
|
||||
|
@ -126,7 +126,7 @@ class TDTestCase:
|
|||
def run(self):
|
||||
self.function_check_ntb()
|
||||
self.function_check_stb()
|
||||
|
||||
|
||||
def stop(self):
|
||||
tdSql.close()
|
||||
tdLog.success("%s successfully executed" % __file__)
|
||||
|
|
|
@ -48,7 +48,7 @@ class TDTestCase:
|
|||
'col12': 'binary(20)',
|
||||
'col13': 'nchar(20)'
|
||||
}
|
||||
|
||||
|
||||
self.param_list = [1,100]
|
||||
def insert_data(self,column_dict,tbname,row_num):
|
||||
insert_sql = self.setsql.set_insertsql(column_dict,tbname,self.binary_str,self.nchar_str)
|
||||
|
@ -125,11 +125,11 @@ class TDTestCase:
|
|||
self.bottom_check_data(f'{stbname}_{i}','child_table')
|
||||
self.bottom_check_data(f'{stbname}','stable')
|
||||
tdSql.execute(f'drop database {self.dbname}')
|
||||
|
||||
|
||||
def run(self):
|
||||
self.bottom_check_ntb()
|
||||
self.bottom_check_stb()
|
||||
|
||||
|
||||
def stop(self):
|
||||
tdSql.close()
|
||||
tdLog.success("%s successfully executed" % __file__)
|
||||
|
|
|
@ -9,14 +9,14 @@ from util.sql import *
|
|||
from util.cases import *
|
||||
|
||||
class TDTestCase:
|
||||
updatecfgDict = {'debugFlag': 143 ,"cDebugFlag":143,"uDebugFlag":143 ,"rpcDebugFlag":143 , "tmrDebugFlag":143 ,
|
||||
updatecfgDict = {'debugFlag': 143 ,"cDebugFlag":143,"uDebugFlag":143 ,"rpcDebugFlag":143 , "tmrDebugFlag":143 ,
|
||||
"jniDebugFlag":143 ,"simDebugFlag":143,"dDebugFlag":143, "dDebugFlag":143,"vDebugFlag":143,"mDebugFlag":143,"qDebugFlag":143,
|
||||
"wDebugFlag":143,"sDebugFlag":143,"tsdbDebugFlag":143,"tqDebugFlag":143 ,"fsDebugFlag":143 ,"udfDebugFlag":143}
|
||||
|
||||
def init(self, conn, logSql):
|
||||
tdLog.debug(f"start to excute {__file__}")
|
||||
tdSql.init(conn.cursor())
|
||||
|
||||
|
||||
def prepare_datas(self):
|
||||
tdSql.execute(
|
||||
'''create table stb1
|
||||
|
@ -24,7 +24,7 @@ class TDTestCase:
|
|||
tags (t1 int)
|
||||
'''
|
||||
)
|
||||
|
||||
|
||||
tdSql.execute(
|
||||
'''
|
||||
create table t1
|
||||
|
@ -66,14 +66,14 @@ class TDTestCase:
|
|||
( '2023-02-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
|
||||
'''
|
||||
)
|
||||
|
||||
|
||||
def check_result_auto(self ,origin_query , ceil_query):
|
||||
pass
|
||||
ceil_result = tdSql.getResult(ceil_query)
|
||||
origin_result = tdSql.getResult(origin_query)
|
||||
|
||||
auto_result =[]
|
||||
|
||||
|
||||
for row in origin_result:
|
||||
row_check = []
|
||||
for elem in row:
|
||||
|
@ -88,13 +88,13 @@ class TDTestCase:
|
|||
for row_index , row in enumerate(ceil_result):
|
||||
for col_index , elem in enumerate(row):
|
||||
if auto_result[row_index][col_index] != elem:
|
||||
check_status = False
|
||||
check_status = False
|
||||
if not check_status:
|
||||
tdLog.notice("ceil function value has not as expected , sql is \"%s\" "%ceil_query )
|
||||
sys.exit(1)
|
||||
else:
|
||||
tdLog.info("ceil value check pass , it work as expected ,sql is \"%s\" "%ceil_query )
|
||||
|
||||
|
||||
def test_errors(self):
|
||||
error_sql_lists = [
|
||||
"select ceil from t1",
|
||||
|
@ -128,42 +128,42 @@ class TDTestCase:
|
|||
]
|
||||
for error_sql in error_sql_lists:
|
||||
tdSql.error(error_sql)
|
||||
|
||||
|
||||
def support_types(self):
|
||||
type_error_sql_lists = [
|
||||
"select ceil(ts) from t1" ,
|
||||
"select ceil(ts) from t1" ,
|
||||
"select ceil(c7) from t1",
|
||||
"select ceil(c8) from t1",
|
||||
"select ceil(c9) from t1",
|
||||
"select ceil(ts) from ct1" ,
|
||||
"select ceil(ts) from ct1" ,
|
||||
"select ceil(c7) from ct1",
|
||||
"select ceil(c8) from ct1",
|
||||
"select ceil(c9) from ct1",
|
||||
"select ceil(ts) from ct3" ,
|
||||
"select ceil(ts) from ct3" ,
|
||||
"select ceil(c7) from ct3",
|
||||
"select ceil(c8) from ct3",
|
||||
"select ceil(c9) from ct3",
|
||||
"select ceil(ts) from ct4" ,
|
||||
"select ceil(ts) from ct4" ,
|
||||
"select ceil(c7) from ct4",
|
||||
"select ceil(c8) from ct4",
|
||||
"select ceil(c9) from ct4",
|
||||
"select ceil(ts) from stb1" ,
|
||||
"select ceil(ts) from stb1" ,
|
||||
"select ceil(c7) from stb1",
|
||||
"select ceil(c8) from stb1",
|
||||
"select ceil(c9) from stb1" ,
|
||||
|
||||
"select ceil(ts) from stbbb1" ,
|
||||
"select ceil(ts) from stbbb1" ,
|
||||
"select ceil(c7) from stbbb1",
|
||||
|
||||
"select ceil(ts) from tbname",
|
||||
"select ceil(c9) from tbname"
|
||||
|
||||
]
|
||||
|
||||
|
||||
for type_sql in type_error_sql_lists:
|
||||
tdSql.error(type_sql)
|
||||
|
||||
|
||||
|
||||
|
||||
type_sql_lists = [
|
||||
"select ceil(c1) from t1",
|
||||
"select ceil(c2) from t1",
|
||||
|
@ -193,16 +193,16 @@ class TDTestCase:
|
|||
"select ceil(c5) from stb1",
|
||||
"select ceil(c6) from stb1",
|
||||
|
||||
"select ceil(c6) as alisb from stb1",
|
||||
"select ceil(c6) alisb from stb1",
|
||||
"select ceil(c6) as alisb from stb1",
|
||||
"select ceil(c6) alisb from stb1",
|
||||
]
|
||||
|
||||
for type_sql in type_sql_lists:
|
||||
tdSql.query(type_sql)
|
||||
|
||||
|
||||
def basic_ceil_function(self):
|
||||
|
||||
# basic query
|
||||
# basic query
|
||||
tdSql.query("select c1 from ct3")
|
||||
tdSql.checkRows(0)
|
||||
tdSql.query("select c1 from t1")
|
||||
|
@ -222,7 +222,7 @@ class TDTestCase:
|
|||
tdSql.query("select ceil(c5) from ct3")
|
||||
tdSql.checkRows(0)
|
||||
tdSql.query("select ceil(c6) from ct3")
|
||||
|
||||
|
||||
# used for regular table
|
||||
tdSql.query("select ceil(c1) from t1")
|
||||
tdSql.checkData(0, 0, None)
|
||||
|
@ -240,7 +240,7 @@ class TDTestCase:
|
|||
tdSql.checkData(5, 5, None)
|
||||
|
||||
self.check_result_auto( "select c1, c2, c3 , c4, c5 from t1", "select (c1), ceil(c2) ,ceil(c3), ceil(c4), ceil(c5) from t1")
|
||||
|
||||
|
||||
# used for sub table
|
||||
tdSql.query("select ceil(c1) from ct1")
|
||||
tdSql.checkData(0, 0, 8)
|
||||
|
@ -252,20 +252,20 @@ class TDTestCase:
|
|||
self.check_result_auto( "select c1, c2, c3 , c4, c5 from ct1", "select (c1), ceil(c2) ,ceil(c3), ceil(c4), ceil(c5) from ct1")
|
||||
self.check_result_auto("select ceil(ceil(ceil(ceil(ceil(ceil(ceil(ceil(ceil(ceil(c1)))))))))) nest_col_func from ct1;","select c1 from ct1" )
|
||||
|
||||
# used for stable table
|
||||
|
||||
# used for stable table
|
||||
|
||||
tdSql.query("select ceil(c1) from stb1")
|
||||
tdSql.checkRows(25)
|
||||
self.check_result_auto( "select c1, c2, c3 , c4, c5 from ct4 ", "select (c1), ceil(c2) ,ceil(c3), ceil(c4), ceil(c5) from ct4")
|
||||
self.check_result_auto("select ceil(ceil(ceil(ceil(ceil(ceil(ceil(ceil(ceil(ceil(c1)))))))))) nest_col_func from ct4;" , "select c1 from ct4" )
|
||||
|
||||
|
||||
|
||||
# used for not exists table
|
||||
tdSql.error("select ceil(c1) from stbbb1")
|
||||
tdSql.error("select ceil(c1) from tbname")
|
||||
tdSql.error("select ceil(c1) from ct5")
|
||||
|
||||
# mix with common col
|
||||
# mix with common col
|
||||
tdSql.query("select c1, ceil(c1) from ct1")
|
||||
tdSql.checkData(0 , 0 ,8)
|
||||
tdSql.checkData(0 , 1 ,8)
|
||||
|
@ -290,7 +290,7 @@ class TDTestCase:
|
|||
tdSql.checkData(0 , 1 ,None)
|
||||
tdSql.checkData(0 , 2 ,None)
|
||||
tdSql.checkData(0 , 3 ,None)
|
||||
|
||||
|
||||
tdSql.checkData(3 , 0 , 6)
|
||||
tdSql.checkData(3 , 1 , 6)
|
||||
tdSql.checkData(3 , 2 ,6.66000)
|
||||
|
@ -311,7 +311,7 @@ class TDTestCase:
|
|||
tdSql.query("select max(c5), count(c5) from stb1")
|
||||
tdSql.query("select max(c5), count(c5) from ct1")
|
||||
|
||||
|
||||
|
||||
# bug fix for count
|
||||
tdSql.query("select count(c1) from ct4 ")
|
||||
tdSql.checkData(0,0,9)
|
||||
|
@ -322,7 +322,7 @@ class TDTestCase:
|
|||
tdSql.query("select count(*) from stb1 ")
|
||||
tdSql.checkData(0,0,25)
|
||||
|
||||
# bug fix for compute
|
||||
# bug fix for compute
|
||||
tdSql.query("select c1, abs(c1) -0 ,ceil(c1)-0 from ct4 ")
|
||||
tdSql.checkData(0, 0, None)
|
||||
tdSql.checkData(0, 1, None)
|
||||
|
@ -373,10 +373,10 @@ class TDTestCase:
|
|||
tdSql.checkData(0,3,8.000000000)
|
||||
tdSql.checkData(0,4,7.900000000)
|
||||
tdSql.checkData(0,5,3.000000000)
|
||||
|
||||
|
||||
def ceil_Arithmetic(self):
|
||||
pass
|
||||
|
||||
|
||||
def check_boundary_values(self):
|
||||
|
||||
tdSql.execute("drop database if exists bound_test")
|
||||
|
@ -405,14 +405,14 @@ class TDTestCase:
|
|||
tdSql.execute(
|
||||
f"insert into sub1_bound values ( now(), -2147483643, -9223372036854775803, -32763, -123, -3.39E+38, -1.69e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
|
||||
)
|
||||
|
||||
|
||||
tdSql.error(
|
||||
f"insert into sub1_bound values ( now()+1s, 2147483648, 9223372036854775808, 32768, 128, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
|
||||
)
|
||||
self.check_result_auto( "select c1, c2, c3 , c4, c5 ,c6 from sub1_bound ", "select ceil(c1), ceil(c2) ,ceil(c3), ceil(c4), ceil(c5) ,ceil(c6) from sub1_bound")
|
||||
self.check_result_auto( "select c1, c2, c3 , c3, c2 ,c1 from sub1_bound ", "select ceil(c1), ceil(c2) ,ceil(c3), ceil(c3), ceil(c2) ,ceil(c1) from sub1_bound")
|
||||
self.check_result_auto("select ceil(ceil(ceil(ceil(ceil(ceil(ceil(ceil(ceil(ceil(c1)))))))))) nest_col_func from sub1_bound;" , "select ceil(c1) from sub1_bound" )
|
||||
|
||||
|
||||
# check basic elem for table per row
|
||||
tdSql.query("select ceil(c1+0.2) ,ceil(c2) , ceil(c3+0.3) , ceil(c4-0.3), ceil(c5/2), ceil(c6/2) from sub1_bound ")
|
||||
tdSql.checkData(0, 0, 2147483648.000000000)
|
||||
|
@ -426,7 +426,7 @@ class TDTestCase:
|
|||
tdSql.checkData(4, 4, -169499995645668991474575059260979281920.000000000)
|
||||
|
||||
self.check_result_auto("select c1+1 ,c2 , c3*1 , c4/2, c5/2, c6 from sub1_bound" ,"select ceil(c1+1) ,ceil(c2) , ceil(c3*1) , ceil(c4/2), ceil(c5)/2, ceil(c6) from sub1_bound ")
|
||||
|
||||
|
||||
def support_super_table_test(self):
|
||||
tdSql.execute(" use db ")
|
||||
self.check_result_auto( " select c5 from stb1 order by ts " , "select ceil(c5) from stb1 order by ts" )
|
||||
|
@ -444,26 +444,26 @@ class TDTestCase:
|
|||
tdSql.prepare()
|
||||
|
||||
tdLog.printNoPrefix("==========step1:create table ==============")
|
||||
|
||||
|
||||
self.prepare_datas()
|
||||
|
||||
tdLog.printNoPrefix("==========step2:test errors ==============")
|
||||
tdLog.printNoPrefix("==========step2:test errors ==============")
|
||||
|
||||
self.test_errors()
|
||||
|
||||
tdLog.printNoPrefix("==========step3:support types ============")
|
||||
|
||||
tdLog.printNoPrefix("==========step3:support types ============")
|
||||
|
||||
self.support_types()
|
||||
|
||||
tdLog.printNoPrefix("==========step4: ceil basic query ============")
|
||||
tdLog.printNoPrefix("==========step4: ceil basic query ============")
|
||||
|
||||
self.basic_ceil_function()
|
||||
|
||||
tdLog.printNoPrefix("==========step5: ceil boundary query ============")
|
||||
tdLog.printNoPrefix("==========step5: ceil boundary query ============")
|
||||
|
||||
self.check_boundary_values()
|
||||
|
||||
tdLog.printNoPrefix("==========step6: ceil filter query ============")
|
||||
tdLog.printNoPrefix("==========step6: ceil filter query ============")
|
||||
|
||||
self.abs_func_filter()
|
||||
|
||||
|
|
|
@ -9,13 +9,13 @@ from util.cases import *
|
|||
|
||||
|
||||
class TDTestCase:
|
||||
updatecfgDict = {'debugFlag': 143 ,"cDebugFlag":143,"uDebugFlag":143 ,"rpcDebugFlag":143 , "tmrDebugFlag":143 ,
|
||||
updatecfgDict = {'debugFlag': 143 ,"cDebugFlag":143,"uDebugFlag":143 ,"rpcDebugFlag":143 , "tmrDebugFlag":143 ,
|
||||
"jniDebugFlag":143 ,"simDebugFlag":143,"dDebugFlag":143, "dDebugFlag":143,"vDebugFlag":143,"mDebugFlag":143,"qDebugFlag":143,
|
||||
"wDebugFlag":143,"sDebugFlag":143,"tsdbDebugFlag":143,"tqDebugFlag":143 ,"fsDebugFlag":143 ,"udfDebugFlag":143}
|
||||
def init(self, conn, powSql):
|
||||
tdLog.debug(f"start to excute {__file__}")
|
||||
tdSql.init(conn.cursor())
|
||||
|
||||
|
||||
def prepare_datas(self):
|
||||
tdSql.execute(
|
||||
'''create table stb1
|
||||
|
@ -23,7 +23,7 @@ class TDTestCase:
|
|||
tags (t1 int)
|
||||
'''
|
||||
)
|
||||
|
||||
|
||||
tdSql.execute(
|
||||
'''
|
||||
create table t1
|
||||
|
@ -65,14 +65,14 @@ class TDTestCase:
|
|||
( '2023-02-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
|
||||
'''
|
||||
)
|
||||
|
||||
|
||||
def check_result_auto_cos(self ,origin_query , pow_query):
|
||||
|
||||
pow_result = tdSql.getResult(pow_query)
|
||||
origin_result = tdSql.getResult(origin_query)
|
||||
|
||||
auto_result =[]
|
||||
|
||||
|
||||
for row in origin_result:
|
||||
row_check = []
|
||||
for elem in row:
|
||||
|
@ -90,7 +90,7 @@ class TDTestCase:
|
|||
if auto_result[row_index][col_index] == None and not (auto_result[row_index][col_index] == None and elem == None):
|
||||
check_status = False
|
||||
elif auto_result[row_index][col_index] != None and (auto_result[row_index][col_index] - elem > 0.00000001):
|
||||
check_status = False
|
||||
check_status = False
|
||||
else:
|
||||
pass
|
||||
if not check_status:
|
||||
|
@ -98,7 +98,7 @@ class TDTestCase:
|
|||
sys.exit(1)
|
||||
else:
|
||||
tdLog.info("cos value check pass , it work as expected ,sql is \"%s\" "%pow_query )
|
||||
|
||||
|
||||
def test_errors(self):
|
||||
error_sql_lists = [
|
||||
"select cos from t1",
|
||||
|
@ -132,42 +132,42 @@ class TDTestCase:
|
|||
]
|
||||
for error_sql in error_sql_lists:
|
||||
tdSql.error(error_sql)
|
||||
|
||||
|
||||
def support_types(self):
|
||||
type_error_sql_lists = [
|
||||
"select cos(ts) from t1" ,
|
||||
"select cos(ts) from t1" ,
|
||||
"select cos(c7) from t1",
|
||||
"select cos(c8) from t1",
|
||||
"select cos(c9) from t1",
|
||||
"select cos(ts) from ct1" ,
|
||||
"select cos(ts) from ct1" ,
|
||||
"select cos(c7) from ct1",
|
||||
"select cos(c8) from ct1",
|
||||
"select cos(c9) from ct1",
|
||||
"select cos(ts) from ct3" ,
|
||||
"select cos(ts) from ct3" ,
|
||||
"select cos(c7) from ct3",
|
||||
"select cos(c8) from ct3",
|
||||
"select cos(c9) from ct3",
|
||||
"select cos(ts) from ct4" ,
|
||||
"select cos(ts) from ct4" ,
|
||||
"select cos(c7) from ct4",
|
||||
"select cos(c8) from ct4",
|
||||
"select cos(c9) from ct4",
|
||||
"select cos(ts) from stb1" ,
|
||||
"select cos(ts) from stb1" ,
|
||||
"select cos(c7) from stb1",
|
||||
"select cos(c8) from stb1",
|
||||
"select cos(c9) from stb1" ,
|
||||
|
||||
"select cos(ts) from stbbb1" ,
|
||||
"select cos(ts) from stbbb1" ,
|
||||
"select cos(c7) from stbbb1",
|
||||
|
||||
"select cos(ts) from tbname",
|
||||
"select cos(c9) from tbname"
|
||||
|
||||
]
|
||||
|
||||
|
||||
for type_sql in type_error_sql_lists:
|
||||
tdSql.error(type_sql)
|
||||
|
||||
|
||||
|
||||
|
||||
type_sql_lists = [
|
||||
"select cos(c1) from t1",
|
||||
"select cos(c2) from t1",
|
||||
|
@ -197,16 +197,16 @@ class TDTestCase:
|
|||
"select cos(c5) from stb1",
|
||||
"select cos(c6) from stb1",
|
||||
|
||||
"select cos(c6) as alisb from stb1",
|
||||
"select cos(c6) alisb from stb1",
|
||||
"select cos(c6) as alisb from stb1",
|
||||
"select cos(c6) alisb from stb1",
|
||||
]
|
||||
|
||||
for type_sql in type_sql_lists:
|
||||
tdSql.query(type_sql)
|
||||
|
||||
|
||||
def basic_cosin_function(self):
|
||||
|
||||
# basic query
|
||||
# basic query
|
||||
tdSql.query("select c1 from ct3")
|
||||
tdSql.checkRows(0)
|
||||
tdSql.query("select c1 from t1")
|
||||
|
@ -247,7 +247,7 @@ class TDTestCase:
|
|||
tdSql.checkData(5, 5, None)
|
||||
|
||||
self.check_result_auto_cos( "select abs(c1), abs(c2), abs(c3) , abs(c4), abs(c5) from t1", "select cos(abs(c1)), cos(abs(c2)) ,cos(abs(c3)), cos(abs(c4)), cos(abs(c5)) from t1")
|
||||
|
||||
|
||||
# used for sub table
|
||||
tdSql.query("select c2 ,cos(c2) from ct1")
|
||||
tdSql.checkData(0, 1, 0.975339851)
|
||||
|
@ -263,7 +263,7 @@ class TDTestCase:
|
|||
tdSql.checkData(5 , 2, None)
|
||||
|
||||
self.check_result_auto_cos( "select c1, c2, c3 , c4, c5 from ct1", "select cos(c1), cos(c2) ,cos(c3), cos(c4), cos(c5) from ct1")
|
||||
|
||||
|
||||
# nest query for cos functions
|
||||
tdSql.query("select c4 , cos(c4) ,cos(cos(c4)) , cos(cos(cos(c4))) from ct1;")
|
||||
tdSql.checkData(0 , 0 , 88)
|
||||
|
@ -281,21 +281,21 @@ class TDTestCase:
|
|||
tdSql.checkData(11 , 2 , 0.999207254)
|
||||
tdSql.checkData(11 , 3 , 0.540969209)
|
||||
|
||||
# used for stable table
|
||||
|
||||
# used for stable table
|
||||
|
||||
tdSql.query("select cos(c1) from stb1")
|
||||
tdSql.checkRows(25)
|
||||
|
||||
|
||||
|
||||
# used for not exists table
|
||||
tdSql.error("select cos(c1) from stbbb1")
|
||||
tdSql.error("select cos(c1) from tbname")
|
||||
tdSql.error("select cos(c1) from ct5")
|
||||
|
||||
# mix with common col
|
||||
# mix with common col
|
||||
tdSql.query("select c1, cos(c1) from ct1")
|
||||
tdSql.query("select c2, cos(c2) from ct4")
|
||||
|
||||
|
||||
|
||||
# mix with common functions
|
||||
tdSql.query("select c1, cos(c1),cos(c1), cos(cos(c1)) from ct4 ")
|
||||
|
@ -303,7 +303,7 @@ class TDTestCase:
|
|||
tdSql.checkData(0 , 1 ,None)
|
||||
tdSql.checkData(0 , 2 ,None)
|
||||
tdSql.checkData(0 , 3 ,None)
|
||||
|
||||
|
||||
tdSql.checkData(3 , 0 , 6)
|
||||
tdSql.checkData(3 , 1 ,0.960170287)
|
||||
tdSql.checkData(3 , 2 ,0.960170287)
|
||||
|
@ -324,8 +324,8 @@ class TDTestCase:
|
|||
tdSql.query("select max(c5), count(c5) from stb1")
|
||||
tdSql.query("select max(c5), count(c5) from ct1")
|
||||
|
||||
|
||||
# # bug fix for compute
|
||||
|
||||
# # bug fix for compute
|
||||
tdSql.query("select c1, cos(c1) -0 ,cos(c1-4)-0 from ct4 ")
|
||||
tdSql.checkData(0, 0, None)
|
||||
tdSql.checkData(0, 1, None)
|
||||
|
@ -394,10 +394,10 @@ class TDTestCase:
|
|||
tdSql.checkData(0,3,8.000000000)
|
||||
tdSql.checkData(0,4,7.900000000)
|
||||
tdSql.checkData(0,5,0.000000000)
|
||||
|
||||
|
||||
def pow_Arithmetic(self):
|
||||
pass
|
||||
|
||||
|
||||
def check_boundary_values(self):
|
||||
|
||||
PI=3.1415926
|
||||
|
@ -426,11 +426,11 @@ class TDTestCase:
|
|||
f"insert into sub1_bound values ( now()+1s, 2147483648, 9223372036854775808, 32768, 128, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
|
||||
)
|
||||
self.check_result_auto_cos( "select abs(c1), abs(c2), abs(c3) , abs(c4), abs(c5) from sub1_bound ", "select cos(abs(c1)), cos(abs(c2)) ,cos(abs(c3)), cos(abs(c4)), cos(abs(c5)) from sub1_bound")
|
||||
|
||||
|
||||
self.check_result_auto_cos( "select c1, c2, c3 , c3, c2 ,c1 from sub1_bound ", "select cos(c1), cos(c2) ,cos(c3), cos(c3), cos(c2) ,cos(c1) from sub1_bound")
|
||||
|
||||
self.check_result_auto_cos("select abs(abs(abs(abs(abs(abs(abs(abs(abs(c1))))))))) nest_col_func from sub1_bound" , "select cos(abs(c1)) from sub1_bound" )
|
||||
|
||||
|
||||
# check basic elem for table per row
|
||||
tdSql.query("select cos(abs(c1)) ,cos(abs(c2)) , cos(abs(c3)) , cos(abs(c4)), cos(abs(c5)), cos(abs(c6)) from sub1_bound ")
|
||||
tdSql.checkData(0,0,math.cos(2147483647))
|
||||
|
@ -489,36 +489,36 @@ class TDTestCase:
|
|||
self.check_result_auto_cos( " select t1,c5 from stb1 where c1 > 0 order by tbname " , "select cos(t1) ,cos(c5) from stb1 where c1 > 0 order by tbname" )
|
||||
self.check_result_auto_cos( " select t1,c5 from stb1 where c1 > 0 order by tbname " , "select cos(t1) , cos(c5) from stb1 where c1 > 0 order by tbname" )
|
||||
pass
|
||||
|
||||
|
||||
def run(self): # sourcery skip: extract-duplicate-method, remove-redundant-fstring
|
||||
tdSql.prepare()
|
||||
|
||||
tdLog.printNoPrefix("==========step1:create table ==============")
|
||||
|
||||
|
||||
self.prepare_datas()
|
||||
|
||||
tdLog.printNoPrefix("==========step2:test errors ==============")
|
||||
tdLog.printNoPrefix("==========step2:test errors ==============")
|
||||
|
||||
self.test_errors()
|
||||
|
||||
tdLog.printNoPrefix("==========step3:support types ============")
|
||||
|
||||
tdLog.printNoPrefix("==========step3:support types ============")
|
||||
|
||||
self.support_types()
|
||||
|
||||
tdLog.printNoPrefix("==========step4: cos basic query ============")
|
||||
tdLog.printNoPrefix("==========step4: cos basic query ============")
|
||||
|
||||
self.basic_cosin_function()
|
||||
|
||||
tdLog.printNoPrefix("==========step5: big number cos query ============")
|
||||
tdLog.printNoPrefix("==========step5: big number cos query ============")
|
||||
|
||||
self.test_big_number()
|
||||
|
||||
|
||||
tdLog.printNoPrefix("==========step6: cos boundary query ============")
|
||||
tdLog.printNoPrefix("==========step6: cos boundary query ============")
|
||||
|
||||
self.check_boundary_values()
|
||||
|
||||
tdLog.printNoPrefix("==========step7: cos filter query ============")
|
||||
tdLog.printNoPrefix("==========step7: cos filter query ============")
|
||||
|
||||
self.abs_func_filter()
|
||||
|
||||
|
|
|
@ -13,24 +13,24 @@ class TDTestCase:
|
|||
def run(self):
|
||||
tdSql.prepare()
|
||||
|
||||
tdSql.execute('''create table stb(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double,
|
||||
tdSql.execute('''create table stb(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double,
|
||||
col7 bool, col8 binary(20), col9 nchar(20), col11 tinyint unsigned, col12 smallint unsigned, col13 int unsigned, col14 bigint unsigned) tags(loc nchar(20))''')
|
||||
tdSql.execute("create table stb_1 using stb tags('beijing')")
|
||||
tdSql.execute("create table stb_2 using stb tags('shanghai')")
|
||||
|
||||
tdSql.execute('''create table ntb(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double,
|
||||
tdSql.execute('''create table ntb(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double,
|
||||
col7 bool, col8 binary(20), col9 nchar(20), col11 tinyint unsigned, col12 smallint unsigned, col13 int unsigned, col14 bigint unsigned)''')
|
||||
|
||||
|
||||
for i in range(self.rowNum):
|
||||
tdSql.execute("insert into stb_1 values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d)"
|
||||
tdSql.execute("insert into stb_1 values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d)"
|
||||
% (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1))
|
||||
tdSql.execute("insert into stb_2 values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d)"
|
||||
tdSql.execute("insert into stb_2 values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d)"
|
||||
% (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1))
|
||||
|
||||
|
||||
for i in range(self.rowNum):
|
||||
tdSql.execute("insert into ntb values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d)"
|
||||
tdSql.execute("insert into ntb values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d)"
|
||||
% (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1))
|
||||
|
||||
|
||||
tdSql.query("select count(*) from stb")
|
||||
tdSql.checkData(0,0,20)
|
||||
tdSql.query("select count(*) from db.stb")
|
||||
|
@ -95,7 +95,7 @@ class TDTestCase:
|
|||
|
||||
|
||||
tdSql.query("select count(ts) from db.stb_1")
|
||||
tdSql.checkData(0,0,10)
|
||||
tdSql.checkData(0,0,10)
|
||||
tdSql.query("select count(ts) from db.stb_1")
|
||||
tdSql.checkData(0,0,10)
|
||||
tdSql.query("select count(col1) from stb_1")
|
||||
|
@ -171,7 +171,7 @@ class TDTestCase:
|
|||
tdSql.query("select count(col1),count(ts) from stb")
|
||||
tdSql.checkData(0,0,20)
|
||||
tdSql.checkData(0,1,21)
|
||||
|
||||
|
||||
tdSql.query("select count(col1) from db.stb")
|
||||
tdSql.checkData(0,0,20)
|
||||
tdSql.query("select count(col1),count(ts) from db.stb")
|
||||
|
@ -184,7 +184,7 @@ class TDTestCase:
|
|||
tdSql.query("select count(col1) from stb group by col7")
|
||||
tdSql.checkRows(3)
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
def stop(self):
|
||||
|
@ -193,4 +193,4 @@ class TDTestCase:
|
|||
|
||||
|
||||
tdCases.addWindows(__file__, TDTestCase())
|
||||
tdCases.addLinux(__file__, TDTestCase())
|
||||
tdCases.addLinux(__file__, TDTestCase())
|
||||
|
|
|
@ -0,0 +1,176 @@
|
|||
# author : wenzhouwww
|
||||
from util.log import *
|
||||
from util.sql import *
|
||||
from util.cases import *
|
||||
|
||||
class TDTestCase:
|
||||
def init(self, conn, logSql):
|
||||
tdLog.debug("start to execute %s" % __file__)
|
||||
tdSql.init(conn.cursor())
|
||||
|
||||
self.row_nums = 10
|
||||
self.tb_nums = 10
|
||||
self.ts = 1537146000000
|
||||
|
||||
def prepare_datas(self, stb_name , tb_nums , row_nums ):
|
||||
tdSql.execute(" use db ")
|
||||
tdSql.execute(f" create stable {stb_name} (ts timestamp , c1 int , c2 bigint , c3 float , c4 double , c5 smallint , c6 tinyint , c7 bool , c8 binary(36) , c9 nchar(36) , uc1 int unsigned,\
|
||||
uc2 bigint unsigned ,uc3 smallint unsigned , uc4 tinyint unsigned ) tags(t1 timestamp , t2 int , t3 bigint , t4 float , t5 double , t6 smallint , t7 tinyint , t8 bool , t9 binary(36)\
|
||||
, t10 nchar(36) , t11 int unsigned , t12 bigint unsigned ,t13 smallint unsigned , t14 tinyint unsigned ) ")
|
||||
|
||||
for i in range(tb_nums):
|
||||
tbname = f"sub_{stb_name}_{i}"
|
||||
ts = self.ts + i*10000
|
||||
tdSql.execute(f"create table {tbname} using {stb_name} tags ({ts} , {i} , {i}*10 ,{i}*1.0,{i}*1.0 , 1 , 2, 'true', 'binary_{i}' ,'nchar_{i}',{i},{i},10,20 )")
|
||||
|
||||
for row in range(row_nums):
|
||||
ts = self.ts + row*1000
|
||||
tdSql.execute(f"insert into {tbname} values({ts} , {row} , {row} , {row} , {row} , 1 , 2 , 'true' , 'binary_{row}' , 'nchar_{row}' , {row} , {row} , 1 ,2 )")
|
||||
|
||||
for null in range(5):
|
||||
ts = self.ts + row_nums*1000 + null*1000
|
||||
tdSql.execute(f"insert into {tbname} values({ts} , NULL , NULL , NULL , NULL , NULL , NULL , NULL , NULL , NULL , NULL , NULL , NULL , NULL )")
|
||||
|
||||
def basic_query(self):
|
||||
tdSql.query("select count(*) from stb")
|
||||
tdSql.checkData(0,0,(self.row_nums + 5 )*self.tb_nums)
|
||||
tdSql.query("select count(c1) from stb")
|
||||
tdSql.checkData(0,0,(self.row_nums )*self.tb_nums)
|
||||
tdSql.query(" select tbname , count(*) from stb partition by tbname ")
|
||||
tdSql.checkRows(self.tb_nums)
|
||||
tdSql.query(" select count(c1) from stb group by t1 order by t1 ")
|
||||
tdSql.checkRows(self.tb_nums)
|
||||
tdSql.error(" select count(c1) from stb group by c1 order by t1 ")
|
||||
tdSql.error(" select count(t1) from stb group by c1 order by t1 ")
|
||||
tdSql.query(" select count(c1) from stb group by tbname order by tbname ")
|
||||
tdSql.checkRows(self.tb_nums)
|
||||
# bug need fix
|
||||
# tdSql.query(" select count(t1) from stb group by t2 order by t2 ")
|
||||
# tdSql.checkRows(self.tb_nums)
|
||||
tdSql.query(" select count(c1) from stb group by c1 order by c1 ")
|
||||
tdSql.checkRows(self.row_nums+1)
|
||||
|
||||
tdSql.query(" select c1 , count(c1) from stb group by c1 order by c1 ")
|
||||
tdSql.checkRows(self.row_nums+1)
|
||||
|
||||
tdSql.query("select count(c1) from stb group by abs(c1) order by abs(c1)")
|
||||
tdSql.checkRows(self.row_nums+1)
|
||||
tdSql.query("select abs(c1+c3), count(c1+c3) from stb group by abs(c1+c3) order by abs(c1+c3)")
|
||||
tdSql.checkRows(self.row_nums+1)
|
||||
tdSql.query("select count(c1+c3)+max(c2) ,abs(c1) from stb group by abs(c1) order by abs(c1)")
|
||||
tdSql.checkRows(self.row_nums+1)
|
||||
tdSql.error("select count(c1+c3)+max(c2) ,abs(c1) ,abs(t1) from stb group by abs(c1) order by abs(t1)+c2")
|
||||
tdSql.error("select count(c1+c3)+max(c2) ,abs(c1) from stb group by abs(c1) order by abs(c1)+c2")
|
||||
tdSql.query("select abs(c1+c3)+abs(c2) , count(c1+c3)+count(c2) from stb group by abs(c1+c3)+abs(c2) order by abs(c1+c3)+abs(c2)")
|
||||
tdSql.checkRows(self.row_nums+1)
|
||||
|
||||
tdSql.query("select count(c1) , count(t2) from stb where abs(c1+t2)=1 partition by tbname")
|
||||
tdSql.checkRows(2)
|
||||
tdSql.query("select count(c1) from stb where abs(c1+t2)=1 partition by tbname")
|
||||
tdSql.checkRows(2)
|
||||
|
||||
tdSql.query("select tbname , count(c1) from stb partition by tbname order by tbname")
|
||||
tdSql.checkRows(self.tb_nums)
|
||||
tdSql.checkData(0,1,self.row_nums)
|
||||
|
||||
tdSql.error("select tbname , count(c1) from stb partition by t1 order by t1")
|
||||
tdSql.error("select tbname , count(t1) from stb partition by t1 order by t1")
|
||||
tdSql.error("select tbname , count(t1) from stb partition by t2 order by t2")
|
||||
|
||||
# # bug need fix
|
||||
# tdSql.query("select t2 , count(t1) from stb partition by t2 order by t2")
|
||||
# tdSql.checkRows(self.tb_nums)
|
||||
|
||||
tdSql.query("select tbname , count(c1) from stb partition by tbname order by tbname")
|
||||
tdSql.checkRows(self.tb_nums)
|
||||
tdSql.checkData(0,1,self.row_nums)
|
||||
|
||||
|
||||
tdSql.error("select tbname , count(c1) from stb partition by t2 order by t2")
|
||||
|
||||
tdSql.query("select c2, count(c1) from stb partition by c2 order by c2 desc")
|
||||
tdSql.checkRows(self.tb_nums+1)
|
||||
tdSql.checkData(0,1,self.tb_nums)
|
||||
|
||||
tdSql.error("select tbname , count(c1) from stb partition by c1 order by c2")
|
||||
|
||||
|
||||
tdSql.query("select tbname , abs(t2) from stb partition by c2 order by t2")
|
||||
tdSql.checkRows(self.tb_nums*(self.row_nums+5))
|
||||
|
||||
tdSql.query("select count(c1) , count(t2) from stb partition by c2 ")
|
||||
tdSql.checkRows(self.row_nums+1)
|
||||
tdSql.checkData(0,1,self.row_nums)
|
||||
|
||||
tdSql.query("select count(c1) , count(t2) ,c2 from stb partition by c2 order by c2")
|
||||
tdSql.checkRows(self.row_nums+1)
|
||||
|
||||
tdSql.query("select count(c1) , count(t1) ,max(c2) ,tbname from stb partition by tbname order by tbname")
|
||||
tdSql.checkRows(self.tb_nums)
|
||||
tdSql.checkCols(4)
|
||||
|
||||
tdSql.query("select count(c1) , count(t2) ,t1 from stb partition by t1 order by t1")
|
||||
tdSql.checkRows(self.tb_nums)
|
||||
tdSql.checkData(0,0,self.row_nums)
|
||||
|
||||
# bug need fix
|
||||
# tdSql.query("select count(c1) , count(t1) ,abs(c1) from stb partition by abs(c1) order by abs(c1)")
|
||||
# tdSql.checkRows(self.row_nums+1)
|
||||
|
||||
|
||||
tdSql.query("select count(ceil(c2)) , count(floor(t2)) ,count(floor(c2)) from stb partition by abs(c2) order by abs(c2)")
|
||||
tdSql.checkRows(self.row_nums+1)
|
||||
|
||||
|
||||
tdSql.query("select count(ceil(c1-2)) , count(floor(t2+1)) ,max(c2-c1) from stb partition by abs(floor(c1)) order by abs(floor(c1))")
|
||||
tdSql.checkRows(self.row_nums+1)
|
||||
|
||||
|
||||
# interval
|
||||
tdSql.query("select count(c1) from stb interval(2s) sliding(1s)")
|
||||
|
||||
# bug need fix
|
||||
|
||||
tdSql.query('select max(c1) from stb where ts>="2022-07-06 16:00:00.000 " and ts < "2022-07-06 17:00:00.000 " interval(50s) sliding(30s) fill(NULL)')
|
||||
|
||||
tdSql.query(" select tbname , count(c1) from stb partition by tbname interval(10s) slimit 5 soffset 1 ")
|
||||
|
||||
tdSql.query("select tbname , count(c1) from stb partition by tbname interval(10s)")
|
||||
|
||||
tdSql.query("select tbname , count(c1) from sub_stb_1 partition by tbname interval(10s)")
|
||||
tdSql.checkData(0,0,'sub_stb_1')
|
||||
tdSql.checkData(0,1,self.row_nums)
|
||||
|
||||
# tdSql.query(" select tbname , count(c1) from stb partition by tbname order by tbname slimit 5 soffset 0 ")
|
||||
# tdSql.checkRows(5)
|
||||
|
||||
# tdSql.query(" select tbname , count(c1) from stb partition by tbname order by tbname slimit 5 soffset 1 ")
|
||||
# tdSql.checkRows(5)
|
||||
|
||||
tdSql.query(" select tbname , count(c1) from sub_stb_1 partition by tbname interval(10s) sliding(5s) ")
|
||||
|
||||
tdSql.query(f'select max(c1) from stb where ts>={self.ts} and ts < {self.ts}+10000 partition by tbname interval(50s) sliding(30s)')
|
||||
tdSql.query(f'select max(c1) from stb where ts>={self.ts} and ts < {self.ts}+10000 interval(50s) sliding(30s)')
|
||||
tdSql.query(f'select tbname , count(c1) from stb where ts>={self.ts} and ts < {self.ts}+10000 partition by tbname interval(50s) sliding(30s)')
|
||||
|
||||
|
||||
def run(self):
|
||||
tdSql.prepare()
|
||||
self.prepare_datas("stb",self.tb_nums,self.row_nums)
|
||||
self.basic_query()
|
||||
|
||||
# # coverage case for taosd crash about bug fix
|
||||
tdSql.query(" select sum(c1) from stb where t2+10 >1 ")
|
||||
tdSql.query(" select count(c1),count(t1) from stb where -t2<1 ")
|
||||
tdSql.query(" select tbname ,max(ceil(c1)) from stb group by tbname ")
|
||||
tdSql.query(" select avg(abs(c1)) , tbname from stb group by tbname ")
|
||||
tdSql.query(" select t1,c1 from stb where abs(t2+c1)=1 ")
|
||||
|
||||
|
||||
def stop(self):
|
||||
tdSql.close()
|
||||
tdLog.success("%s successfully executed" % __file__)
|
||||
|
||||
|
||||
tdCases.addWindows(__file__, TDTestCase())
|
||||
tdCases.addLinux(__file__, TDTestCase())
|
|
@ -419,7 +419,7 @@ class TDTestCase:
|
|||
tdSql.checkData(3,0,4)
|
||||
tdSql.query("select csum(abs(c1))+2 from t1 ")
|
||||
tdSql.checkRows(4)
|
||||
|
||||
|
||||
def csum_support_stable(self):
|
||||
tdSql.query(" select csum(1) from stb1 ")
|
||||
tdSql.checkRows(70)
|
||||
|
@ -434,17 +434,17 @@ class TDTestCase:
|
|||
tdSql.query("select csum(st1+c1) from stb1 partition by tbname")
|
||||
tdSql.checkRows(40)
|
||||
|
||||
# # bug need fix
|
||||
# # bug need fix
|
||||
# tdSql.query("select csum(st1+c1) from stb1 partition by tbname slimit 1 ")
|
||||
# tdSql.checkRows(4)
|
||||
# tdSql.error("select csum(st1+c1) from stb1 partition by tbname limit 1 ")
|
||||
|
||||
|
||||
# bug need fix
|
||||
# bug need fix
|
||||
tdSql.query("select csum(st1+c1) from stb1 partition by tbname")
|
||||
tdSql.checkRows(40)
|
||||
tdSql.checkRows(40)
|
||||
|
||||
# bug need fix
|
||||
# bug need fix
|
||||
# tdSql.query("select tbname , csum(c1) from stb1 partition by tbname")
|
||||
# tdSql.checkRows(40)
|
||||
# tdSql.query("select tbname , csum(st1) from stb1 partition by tbname")
|
||||
|
@ -452,7 +452,7 @@ class TDTestCase:
|
|||
# tdSql.query("select tbname , csum(st1) from stb1 partition by tbname slimit 1")
|
||||
# tdSql.checkRows(7)
|
||||
|
||||
# partition by tags
|
||||
# partition by tags
|
||||
# tdSql.query("select st1 , csum(c1) from stb1 partition by st1")
|
||||
# tdSql.checkRows(40)
|
||||
# tdSql.query("select csum(c1) from stb1 partition by st1")
|
||||
|
@ -491,4 +491,4 @@ class TDTestCase:
|
|||
tdLog.success("%s successfully executed" % __file__)
|
||||
|
||||
tdCases.addWindows(__file__, TDTestCase())
|
||||
tdCases.addLinux(__file__, TDTestCase())
|
||||
tdCases.addLinux(__file__, TDTestCase())
|
||||
|
|
|
@ -16,7 +16,7 @@ class TDTestCase:
|
|||
def init(self, conn, logSql):
|
||||
tdLog.debug(f"start to excute {__file__}")
|
||||
tdSql.init(conn.cursor())
|
||||
|
||||
|
||||
def prepare_datas(self):
|
||||
tdSql.execute(
|
||||
'''create table stb1
|
||||
|
@ -24,7 +24,7 @@ class TDTestCase:
|
|||
tags (t1 int)
|
||||
'''
|
||||
)
|
||||
|
||||
|
||||
tdSql.execute(
|
||||
'''
|
||||
create table t1
|
||||
|
@ -66,14 +66,14 @@ class TDTestCase:
|
|||
( '2023-02-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
|
||||
'''
|
||||
)
|
||||
|
||||
|
||||
def check_result_auto(self ,origin_query , floor_query):
|
||||
pass
|
||||
floor_result = tdSql.getResult(floor_query)
|
||||
origin_result = tdSql.getResult(origin_query)
|
||||
|
||||
auto_result =[]
|
||||
|
||||
|
||||
for row in origin_result:
|
||||
row_check = []
|
||||
for elem in row:
|
||||
|
@ -88,13 +88,13 @@ class TDTestCase:
|
|||
for row_index , row in enumerate(floor_result):
|
||||
for col_index , elem in enumerate(row):
|
||||
if auto_result[row_index][col_index] != elem:
|
||||
check_status = False
|
||||
check_status = False
|
||||
if not check_status:
|
||||
tdLog.notice("floor function value has not as expected , sql is \"%s\" "%floor_query )
|
||||
sys.exit(1)
|
||||
else:
|
||||
tdLog.info("floor value check pass , it work as expected ,sql is \"%s\" "%floor_query )
|
||||
|
||||
|
||||
def test_errors(self):
|
||||
error_sql_lists = [
|
||||
"select floor from t1",
|
||||
|
@ -128,42 +128,42 @@ class TDTestCase:
|
|||
]
|
||||
for error_sql in error_sql_lists:
|
||||
tdSql.error(error_sql)
|
||||
|
||||
|
||||
def support_types(self):
|
||||
type_error_sql_lists = [
|
||||
"select floor(ts) from t1" ,
|
||||
"select floor(ts) from t1" ,
|
||||
"select floor(c7) from t1",
|
||||
"select floor(c8) from t1",
|
||||
"select floor(c9) from t1",
|
||||
"select floor(ts) from ct1" ,
|
||||
"select floor(ts) from ct1" ,
|
||||
"select floor(c7) from ct1",
|
||||
"select floor(c8) from ct1",
|
||||
"select floor(c9) from ct1",
|
||||
"select floor(ts) from ct3" ,
|
||||
"select floor(ts) from ct3" ,
|
||||
"select floor(c7) from ct3",
|
||||
"select floor(c8) from ct3",
|
||||
"select floor(c9) from ct3",
|
||||
"select floor(ts) from ct4" ,
|
||||
"select floor(ts) from ct4" ,
|
||||
"select floor(c7) from ct4",
|
||||
"select floor(c8) from ct4",
|
||||
"select floor(c9) from ct4",
|
||||
"select floor(ts) from stb1" ,
|
||||
"select floor(ts) from stb1" ,
|
||||
"select floor(c7) from stb1",
|
||||
"select floor(c8) from stb1",
|
||||
"select floor(c9) from stb1" ,
|
||||
|
||||
"select floor(ts) from stbbb1" ,
|
||||
"select floor(ts) from stbbb1" ,
|
||||
"select floor(c7) from stbbb1",
|
||||
|
||||
"select floor(ts) from tbname",
|
||||
"select floor(c9) from tbname"
|
||||
|
||||
]
|
||||
|
||||
|
||||
for type_sql in type_error_sql_lists:
|
||||
tdSql.error(type_sql)
|
||||
|
||||
|
||||
|
||||
|
||||
type_sql_lists = [
|
||||
"select floor(c1) from t1",
|
||||
"select floor(c2) from t1",
|
||||
|
@ -193,16 +193,16 @@ class TDTestCase:
|
|||
"select floor(c5) from stb1",
|
||||
"select floor(c6) from stb1",
|
||||
|
||||
"select floor(c6) as alisb from stb1",
|
||||
"select floor(c6) alisb from stb1",
|
||||
"select floor(c6) as alisb from stb1",
|
||||
"select floor(c6) alisb from stb1",
|
||||
]
|
||||
|
||||
for type_sql in type_sql_lists:
|
||||
tdSql.query(type_sql)
|
||||
|
||||
|
||||
def basic_floor_function(self):
|
||||
|
||||
# basic query
|
||||
# basic query
|
||||
tdSql.query("select c1 from ct3")
|
||||
tdSql.checkRows(0)
|
||||
tdSql.query("select c1 from t1")
|
||||
|
@ -222,7 +222,7 @@ class TDTestCase:
|
|||
tdSql.query("select floor(c5) from ct3")
|
||||
tdSql.checkRows(0)
|
||||
tdSql.query("select floor(c6) from ct3")
|
||||
|
||||
|
||||
# used for regular table
|
||||
tdSql.query("select floor(c1) from t1")
|
||||
tdSql.checkData(0, 0, None)
|
||||
|
@ -240,7 +240,7 @@ class TDTestCase:
|
|||
tdSql.checkData(5, 5, None)
|
||||
|
||||
self.check_result_auto( "select c1, c2, c3 , c4, c5 from t1", "select (c1), floor(c2) ,floor(c3), floor(c4), floor(c5) from t1")
|
||||
|
||||
|
||||
# used for sub table
|
||||
tdSql.query("select floor(c1) from ct1")
|
||||
tdSql.checkData(0, 0, 8)
|
||||
|
@ -252,20 +252,20 @@ class TDTestCase:
|
|||
self.check_result_auto( "select c1, c2, c3 , c4, c5 from ct1", "select (c1), floor(c2) ,floor(c3), floor(c4), floor(c5) from ct1")
|
||||
self.check_result_auto("select floor(floor(floor(floor(floor(floor(floor(floor(floor(floor(c1)))))))))) nest_col_func from ct1;","select c1 from ct1" )
|
||||
|
||||
# used for stable table
|
||||
|
||||
# used for stable table
|
||||
|
||||
tdSql.query("select floor(c1) from stb1")
|
||||
tdSql.checkRows(25)
|
||||
self.check_result_auto( "select c1, c2, c3 , c4, c5 from ct4 ", "select (c1), floor(c2) ,floor(c3), floor(c4), floor(c5) from ct4")
|
||||
self.check_result_auto("select floor(floor(floor(floor(floor(floor(floor(floor(floor(floor(c1)))))))))) nest_col_func from ct4;" , "select c1 from ct4" )
|
||||
|
||||
|
||||
|
||||
# used for not exists table
|
||||
tdSql.error("select floor(c1) from stbbb1")
|
||||
tdSql.error("select floor(c1) from tbname")
|
||||
tdSql.error("select floor(c1) from ct5")
|
||||
|
||||
# mix with common col
|
||||
# mix with common col
|
||||
tdSql.query("select c1, floor(c1) from ct1")
|
||||
tdSql.checkData(0 , 0 ,8)
|
||||
tdSql.checkData(0 , 1 ,8)
|
||||
|
@ -290,7 +290,7 @@ class TDTestCase:
|
|||
tdSql.checkData(0 , 1 ,None)
|
||||
tdSql.checkData(0 , 2 ,None)
|
||||
tdSql.checkData(0 , 3 ,None)
|
||||
|
||||
|
||||
tdSql.checkData(3 , 0 , 6)
|
||||
tdSql.checkData(3 , 1 , 6)
|
||||
tdSql.checkData(3 , 2 ,6.66000)
|
||||
|
@ -311,7 +311,7 @@ class TDTestCase:
|
|||
tdSql.query("select max(c5), count(c5) from stb1")
|
||||
tdSql.query("select max(c5), count(c5) from ct1")
|
||||
|
||||
|
||||
|
||||
# bug fix for count
|
||||
tdSql.query("select count(c1) from ct4 ")
|
||||
tdSql.checkData(0,0,9)
|
||||
|
@ -322,7 +322,7 @@ class TDTestCase:
|
|||
tdSql.query("select count(*) from stb1 ")
|
||||
tdSql.checkData(0,0,25)
|
||||
|
||||
# bug fix for compute
|
||||
# bug fix for compute
|
||||
tdSql.query("select c1, abs(c1) -0 ,floor(c1)-0 from ct4 ")
|
||||
tdSql.checkData(0, 0, None)
|
||||
tdSql.checkData(0, 1, None)
|
||||
|
@ -373,10 +373,10 @@ class TDTestCase:
|
|||
tdSql.checkData(0,3,8.000000000)
|
||||
tdSql.checkData(0,4,7.900000000)
|
||||
tdSql.checkData(0,5,3.000000000)
|
||||
|
||||
|
||||
def floor_Arithmetic(self):
|
||||
pass
|
||||
|
||||
|
||||
def check_boundary_values(self):
|
||||
|
||||
tdSql.execute("drop database if exists bound_test")
|
||||
|
@ -405,14 +405,14 @@ class TDTestCase:
|
|||
tdSql.execute(
|
||||
f"insert into sub1_bound values ( now(), -2147483643, -9223372036854775803, -32763, -123, -3.39E+38, -1.69e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
|
||||
)
|
||||
|
||||
|
||||
tdSql.error(
|
||||
f"insert into sub1_bound values ( now()+1s, 2147483648, 9223372036854775808, 32768, 128, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
|
||||
)
|
||||
self.check_result_auto( "select c1, c2, c3 , c4, c5 ,c6 from sub1_bound ", "select floor(c1), floor(c2) ,floor(c3), floor(c4), floor(c5) ,floor(c6) from sub1_bound")
|
||||
self.check_result_auto( "select c1, c2, c3 , c3, c2 ,c1 from sub1_bound ", "select floor(c1), floor(c2) ,floor(c3), floor(c3), floor(c2) ,floor(c1) from sub1_bound")
|
||||
self.check_result_auto("select floor(floor(floor(floor(floor(floor(floor(floor(floor(floor(c1)))))))))) nest_col_func from sub1_bound;" , "select floor(c1) from sub1_bound" )
|
||||
|
||||
|
||||
# check basic elem for table per row
|
||||
tdSql.query("select floor(c1+0.2) ,floor(c2) , floor(c3+0.3) , floor(c4-0.3), floor(c5/2), floor(c6/2) from sub1_bound ")
|
||||
tdSql.checkData(0, 0, 2147483647.000000000)
|
||||
|
@ -444,26 +444,26 @@ class TDTestCase:
|
|||
tdSql.prepare()
|
||||
|
||||
tdLog.printNoPrefix("==========step1:create table ==============")
|
||||
|
||||
|
||||
self.prepare_datas()
|
||||
|
||||
tdLog.printNoPrefix("==========step2:test errors ==============")
|
||||
tdLog.printNoPrefix("==========step2:test errors ==============")
|
||||
|
||||
self.test_errors()
|
||||
|
||||
tdLog.printNoPrefix("==========step3:support types ============")
|
||||
|
||||
tdLog.printNoPrefix("==========step3:support types ============")
|
||||
|
||||
self.support_types()
|
||||
|
||||
tdLog.printNoPrefix("==========step4: floor basic query ============")
|
||||
tdLog.printNoPrefix("==========step4: floor basic query ============")
|
||||
|
||||
self.basic_floor_function()
|
||||
|
||||
tdLog.printNoPrefix("==========step5: floor boundary query ============")
|
||||
tdLog.printNoPrefix("==========step5: floor boundary query ============")
|
||||
|
||||
self.check_boundary_values()
|
||||
|
||||
tdLog.printNoPrefix("==========step6: floor filter query ============")
|
||||
tdLog.printNoPrefix("==========step6: floor filter query ============")
|
||||
|
||||
self.abs_func_filter()
|
||||
|
||||
|
|
|
@ -370,17 +370,17 @@ class TDTestCase:
|
|||
tdSql.query("select diff(st1+c1) from stb1 partition by tbname")
|
||||
tdSql.checkRows(190)
|
||||
|
||||
# # bug need fix
|
||||
# # bug need fix
|
||||
# tdSql.query("select diff(st1+c1) from stb1 partition by tbname slimit 1 ")
|
||||
# tdSql.checkRows(19)
|
||||
# tdSql.error("select diff(st1+c1) from stb1 partition by tbname limit 1 ")
|
||||
|
||||
|
||||
# bug need fix
|
||||
# bug need fix
|
||||
tdSql.query("select diff(st1+c1) from stb1 partition by tbname")
|
||||
tdSql.checkRows(190)
|
||||
|
||||
# bug need fix
|
||||
# bug need fix
|
||||
# tdSql.query("select tbname , diff(c1) from stb1 partition by tbname")
|
||||
# tdSql.checkRows(199)
|
||||
# tdSql.query("select tbname , diff(st1) from stb1 partition by tbname")
|
||||
|
@ -388,7 +388,7 @@ class TDTestCase:
|
|||
# tdSql.query("select tbname , diff(st1) from stb1 partition by tbname slimit 1")
|
||||
# tdSql.checkRows(19)
|
||||
|
||||
# partition by tags
|
||||
# partition by tags
|
||||
# tdSql.query("select st1 , diff(c1) from stb1 partition by st1")
|
||||
# tdSql.checkRows(199)
|
||||
# tdSql.query("select diff(c1) from stb1 partition by st1")
|
||||
|
@ -488,4 +488,4 @@ class TDTestCase:
|
|||
tdLog.success("%s successfully executed" % __file__)
|
||||
|
||||
tdCases.addWindows(__file__, TDTestCase())
|
||||
tdCases.addLinux(__file__, TDTestCase())
|
||||
tdCases.addLinux(__file__, TDTestCase())
|
||||
|
|
|
@ -11,7 +11,7 @@ from util.sql import *
|
|||
from util.cases import *
|
||||
|
||||
class TDTestCase:
|
||||
updatecfgDict = {'debugFlag': 143 ,"cDebugFlag":143,"uDebugFlag":143 ,"rpcDebugFlag":143 , "tmrDebugFlag":143 ,
|
||||
updatecfgDict = {'debugFlag': 143 ,"cDebugFlag":143,"uDebugFlag":143 ,"rpcDebugFlag":143 , "tmrDebugFlag":143 ,
|
||||
"jniDebugFlag":143 ,"simDebugFlag":143,"dDebugFlag":143, "dDebugFlag":143,"vDebugFlag":143,"mDebugFlag":143,"qDebugFlag":143,
|
||||
"wDebugFlag":143,"sDebugFlag":143,"tsdbDebugFlag":143,"tqDebugFlag":143 ,"fsDebugFlag":143 ,"udfDebugFlag":143}
|
||||
|
||||
|
@ -388,11 +388,11 @@ class TDTestCase:
|
|||
tdSql.execute(
|
||||
f"insert into sub1_bound values ( now(), -2147483643, -9223372036854775803, -32763, -123, -3.39E+38, -1.69e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
|
||||
)
|
||||
|
||||
|
||||
tdSql.error(
|
||||
f"insert into sub1_bound values ( now()+1s, 2147483648, 9223372036854775808, 32768, 128, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
|
||||
)
|
||||
|
||||
|
||||
tdSql.query("select stateduration(c1,'GT',1,1s) from sub1_bound")
|
||||
tdSql.checkRows(5)
|
||||
|
||||
|
@ -400,29 +400,29 @@ class TDTestCase:
|
|||
tdSql.prepare()
|
||||
|
||||
tdLog.printNoPrefix("==========step1:create table ==============")
|
||||
|
||||
|
||||
self.prepare_datas()
|
||||
|
||||
tdLog.printNoPrefix("==========step2:test errors ==============")
|
||||
tdLog.printNoPrefix("==========step2:test errors ==============")
|
||||
|
||||
self.test_errors()
|
||||
|
||||
tdLog.printNoPrefix("==========step3:support types ============")
|
||||
|
||||
tdLog.printNoPrefix("==========step3:support types ============")
|
||||
|
||||
self.support_types()
|
||||
|
||||
tdLog.printNoPrefix("==========step4:support opers ============")
|
||||
tdLog.printNoPrefix("==========step4:support opers ============")
|
||||
self.support_opers()
|
||||
|
||||
tdLog.printNoPrefix("==========step5: stateduration basic query ============")
|
||||
tdLog.printNoPrefix("==========step5: stateduration basic query ============")
|
||||
|
||||
self.basic_stateduration_function()
|
||||
|
||||
tdLog.printNoPrefix("==========step6: stateduration boundary query ============")
|
||||
tdLog.printNoPrefix("==========step6: stateduration boundary query ============")
|
||||
|
||||
self.check_boundary_values()
|
||||
|
||||
tdLog.printNoPrefix("==========step6: stateduration unit time test ============")
|
||||
tdLog.printNoPrefix("==========step6: stateduration unit time test ============")
|
||||
|
||||
self.check_unit_time()
|
||||
|
||||
|
|
|
@ -34,7 +34,7 @@ class TDTestCase:
|
|||
tag_sql += f"{k} {v},"
|
||||
create_stb_sql = f'create table {stbname} (ts timestamp,{column_sql[:-1]}) tags({tag_sql[:-1]})'
|
||||
return create_stb_sql
|
||||
|
||||
|
||||
def last_check_stb_tb_base(self):
|
||||
tdSql.prepare()
|
||||
stbname = tdCom.getLongName(5, "letters")
|
||||
|
@ -201,7 +201,7 @@ class TDTestCase:
|
|||
tdSql.execute(f'use {dbname}')
|
||||
|
||||
# build 20 child tables,every table insert 10 rows
|
||||
tdSql.execute(f'''create table {stbname}(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 tinyint unsigned, col6 smallint unsigned,
|
||||
tdSql.execute(f'''create table {stbname}(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 tinyint unsigned, col6 smallint unsigned,
|
||||
col7 int unsigned, col8 bigint unsigned, col9 float, col10 double, col11 bool, col12 binary(20), col13 nchar(20)) tags(loc nchar(20))''')
|
||||
for i in range(self.tbnum):
|
||||
tdSql.execute(
|
||||
|
|
|
@ -159,7 +159,7 @@ class TDTestCase:
|
|||
return tdSql.error(self.mavg_query_form(
|
||||
sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr,
|
||||
table_expr=table_expr, condition=condition
|
||||
))
|
||||
))
|
||||
|
||||
if all(["group" in condition.lower(), "tbname" not in condition.lower()]):
|
||||
print(f"case in {line}: ", end='')
|
||||
|
@ -295,7 +295,7 @@ class TDTestCase:
|
|||
pre_result = np.array(tdSql.queryResult)[np.array(tdSql.queryResult) != None]
|
||||
if (platform.system().lower() == 'windows' and pre_result.dtype == 'int32'):
|
||||
pre_result = np.array(pre_result, dtype = 'int64')
|
||||
|
||||
|
||||
pre_mavg = pre_mavg = np.convolve(pre_result, np.ones(k), "valid")[offset_val:]/k
|
||||
tdSql.query(self.mavg_query_form(
|
||||
sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr,
|
||||
|
@ -669,7 +669,7 @@ class TDTestCase:
|
|||
tdSql.checkData(0,0,1.000000000)
|
||||
tdSql.checkData(1,0,1.000000000)
|
||||
tdSql.checkData(5,0,1.000000000)
|
||||
|
||||
|
||||
tdSql.query("select mavg(abs(c1),1) from t1")
|
||||
tdSql.checkRows(4)
|
||||
|
||||
|
@ -688,17 +688,17 @@ class TDTestCase:
|
|||
tdSql.query("select mavg(st1+c1,3) from stb1 partition by tbname")
|
||||
tdSql.checkRows(20)
|
||||
|
||||
# # bug need fix
|
||||
# # bug need fix
|
||||
# tdSql.query("select mavg(st1+c1,3) from stb1 partition by tbname slimit 1 ")
|
||||
# tdSql.checkRows(2)
|
||||
# tdSql.error("select mavg(st1+c1,3) from stb1 partition by tbname limit 1 ")
|
||||
|
||||
|
||||
# bug need fix
|
||||
# bug need fix
|
||||
tdSql.query("select mavg(st1+c1,3) from stb1 partition by tbname")
|
||||
tdSql.checkRows(20)
|
||||
|
||||
# bug need fix
|
||||
# bug need fix
|
||||
# tdSql.query("select tbname , mavg(c1,3) from stb1 partition by tbname")
|
||||
# tdSql.checkRows(38)
|
||||
# tdSql.query("select tbname , mavg(st1,3) from stb1 partition by tbname")
|
||||
|
@ -706,7 +706,7 @@ class TDTestCase:
|
|||
# tdSql.query("select tbname , mavg(st1,3) from stb1 partition by tbname slimit 1")
|
||||
# tdSql.checkRows(2)
|
||||
|
||||
# partition by tags
|
||||
# partition by tags
|
||||
# tdSql.query("select st1 , mavg(c1,3) from stb1 partition by st1")
|
||||
# tdSql.checkRows(38)
|
||||
# tdSql.query("select mavg(c1,3) from stb1 partition by st1")
|
||||
|
@ -743,4 +743,4 @@ class TDTestCase:
|
|||
tdLog.success("%s successfully executed" % __file__)
|
||||
|
||||
tdCases.addWindows(__file__, TDTestCase())
|
||||
tdCases.addLinux(__file__, TDTestCase())
|
||||
tdCases.addLinux(__file__, TDTestCase())
|
||||
|
|
|
@ -5,7 +5,7 @@ import numpy as np
|
|||
|
||||
|
||||
class TDTestCase:
|
||||
updatecfgDict = {'debugFlag': 143 ,"cDebugFlag":143,"uDebugFlag":143 ,"rpcDebugFlag":143 , "tmrDebugFlag":143 ,
|
||||
updatecfgDict = {'debugFlag': 143 ,"cDebugFlag":143,"uDebugFlag":143 ,"rpcDebugFlag":143 , "tmrDebugFlag":143 ,
|
||||
"jniDebugFlag":143 ,"simDebugFlag":143,"dDebugFlag":143, "dDebugFlag":143,"vDebugFlag":143,"mDebugFlag":143,"qDebugFlag":143,
|
||||
"wDebugFlag":143,"sDebugFlag":143,"tsdbDebugFlag":143,"tqDebugFlag":143 ,"fsDebugFlag":143 ,"udfDebugFlag":143,
|
||||
"maxTablesPerVnode":2 ,"minTablesPerVnode":2,"tableIncStepPerVnode":2 }
|
||||
|
@ -19,15 +19,15 @@ class TDTestCase:
|
|||
self.nchar_str = '涛思数据'
|
||||
def max_check_stb_and_tb_base(self):
|
||||
tdSql.prepare()
|
||||
intData = []
|
||||
intData = []
|
||||
floatData = []
|
||||
tdSql.execute('''create table stb(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 tinyint unsigned, col6 smallint unsigned,
|
||||
tdSql.execute('''create table stb(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 tinyint unsigned, col6 smallint unsigned,
|
||||
col7 int unsigned, col8 bigint unsigned, col9 float, col10 double, col11 bool, col12 binary(20), col13 nchar(20)) tags(loc nchar(20))''')
|
||||
tdSql.execute("create table stb_1 using stb tags('beijing')")
|
||||
for i in range(self.rowNum):
|
||||
tdSql.execute(f"insert into stb_1 values(%d, %d, %d, %d, %d, %d, %d, %d, %d, %f, %f, %d, '{self.binary_str}%d', '{self.nchar_str}%d')"
|
||||
% (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1))
|
||||
intData.append(i + 1)
|
||||
intData.append(i + 1)
|
||||
floatData.append(i + 0.1)
|
||||
for i in ['ts','col11','col12','col13']:
|
||||
for j in ['db.stb','stb','db.stb_1','stb_1']:
|
||||
|
@ -45,17 +45,17 @@ class TDTestCase:
|
|||
tdSql.query("select max(col1) from stb where col2<=5")
|
||||
tdSql.checkData(0,0,5)
|
||||
tdSql.execute('drop database db')
|
||||
|
||||
|
||||
def max_check_ntb_base(self):
|
||||
tdSql.prepare()
|
||||
intData = []
|
||||
intData = []
|
||||
floatData = []
|
||||
tdSql.execute('''create table ntb(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 tinyint unsigned, col6 smallint unsigned,
|
||||
tdSql.execute('''create table ntb(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 tinyint unsigned, col6 smallint unsigned,
|
||||
col7 int unsigned, col8 bigint unsigned, col9 float, col10 double, col11 bool, col12 binary(20), col13 nchar(20))''')
|
||||
for i in range(self.rowNum):
|
||||
tdSql.execute(f"insert into ntb values(%d, %d, %d, %d, %d, %d, %d, %d, %d, %f, %f, %d, '{self.binary_str}%d', '{self.nchar_str}%d')"
|
||||
% (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1))
|
||||
intData.append(i + 1)
|
||||
intData.append(i + 1)
|
||||
floatData.append(i + 0.1)
|
||||
for i in ['ts','col11','col12','col13']:
|
||||
for j in ['db.ntb','ntb']:
|
||||
|
@ -79,7 +79,7 @@ class TDTestCase:
|
|||
same_sql = f"select {col_name} from {tbname} order by {col_name} desc limit 1"
|
||||
|
||||
tdSql.query(max_sql)
|
||||
max_result = tdSql.queryResult
|
||||
max_result = tdSql.queryResult
|
||||
|
||||
tdSql.query(same_sql)
|
||||
same_result = tdSql.queryResult
|
||||
|
@ -91,7 +91,7 @@ class TDTestCase:
|
|||
|
||||
|
||||
def support_distributed_aggregate(self):
|
||||
|
||||
|
||||
# prepate datas for 20 tables distributed at different vgroups
|
||||
tdSql.execute("create database if not exists testdb keep 3650 duration 1000 vgroups 5")
|
||||
tdSql.execute(" use testdb ")
|
||||
|
@ -161,17 +161,17 @@ class TDTestCase:
|
|||
vgroups = tdSql.queryResult
|
||||
|
||||
vnode_tables={}
|
||||
|
||||
|
||||
for vgroup_id in vgroups:
|
||||
vnode_tables[vgroup_id[0]]=[]
|
||||
|
||||
|
||||
|
||||
# check sub_table of per vnode ,make sure sub_table has been distributed
|
||||
tdSql.query("show tables like 'ct%'")
|
||||
table_names = tdSql.queryResult
|
||||
tablenames = []
|
||||
for table_name in table_names:
|
||||
vnode_tables[table_name[6]].append(table_name[0])
|
||||
vnode_tables[table_name[6]].append(table_name[0])
|
||||
|
||||
count = 0
|
||||
for k ,v in vnode_tables.items():
|
||||
|
@ -180,8 +180,8 @@ class TDTestCase:
|
|||
if count < 2:
|
||||
tdLog.exit(" the datas of all not satisfy sub_table has been distributed ")
|
||||
|
||||
# check max function work status
|
||||
|
||||
# check max function work status
|
||||
|
||||
tdSql.query("show tables like 'ct%'")
|
||||
table_names = tdSql.queryResult
|
||||
tablenames = []
|
||||
|
@ -190,23 +190,23 @@ class TDTestCase:
|
|||
|
||||
tdSql.query("desc stb1")
|
||||
col_names = tdSql.queryResult
|
||||
|
||||
|
||||
colnames = []
|
||||
for col_name in col_names:
|
||||
if col_name[1] in ["INT" ,"BIGINT" ,"SMALLINT" ,"TINYINT" , "FLOAT" ,"DOUBLE"]:
|
||||
colnames.append(col_name[0])
|
||||
|
||||
|
||||
for tablename in tablenames:
|
||||
for colname in colnames:
|
||||
self.check_max_functions(tablename,colname)
|
||||
|
||||
# max function with basic filter
|
||||
# max function with basic filter
|
||||
print(vnode_tables)
|
||||
|
||||
|
||||
def run(self):
|
||||
def run(self):
|
||||
|
||||
# max verifacation
|
||||
# max verifacation
|
||||
self.max_check_stb_and_tb_base()
|
||||
self.max_check_ntb_base()
|
||||
|
||||
|
|
|
@ -0,0 +1,189 @@
|
|||
# author : wenzhouwww
|
||||
from util.log import *
|
||||
from util.sql import *
|
||||
from util.cases import *
|
||||
|
||||
class TDTestCase:
|
||||
def init(self, conn, logSql):
|
||||
tdLog.debug("start to execute %s" % __file__)
|
||||
tdSql.init(conn.cursor())
|
||||
|
||||
self.row_nums = 10
|
||||
self.tb_nums = 10
|
||||
self.ts = 1537146000000
|
||||
|
||||
def prepare_datas(self, stb_name , tb_nums , row_nums ):
|
||||
tdSql.execute(" use db ")
|
||||
tdSql.execute(f" create stable {stb_name} (ts timestamp , c1 int , c2 bigint , c3 float , c4 double , c5 smallint , c6 tinyint , c7 bool , c8 binary(36) , c9 nchar(36) , uc1 int unsigned,\
|
||||
uc2 bigint unsigned ,uc3 smallint unsigned , uc4 tinyint unsigned ) tags(t1 timestamp , t2 int , t3 bigint , t4 float , t5 double , t6 smallint , t7 tinyint , t8 bool , t9 binary(36)\
|
||||
, t10 nchar(36) , t11 int unsigned , t12 bigint unsigned ,t13 smallint unsigned , t14 tinyint unsigned ) ")
|
||||
|
||||
for i in range(tb_nums):
|
||||
tbname = f"sub_{stb_name}_{i}"
|
||||
ts = self.ts + i*10000
|
||||
tdSql.execute(f"create table {tbname} using {stb_name} tags ({ts} , {i} , {i}*10 ,{i}*1.0,{i}*1.0 , 1 , 2, 'true', 'binary_{i}' ,'nchar_{i}',{i},{i},10,20 )")
|
||||
|
||||
for row in range(row_nums):
|
||||
ts = self.ts + row*1000
|
||||
tdSql.execute(f"insert into {tbname} values({ts} , {row} , {row} , {row} , {row} , 1 , 2 , 'true' , 'binary_{row}' , 'nchar_{row}' , {row} , {row} , 1 ,2 )")
|
||||
|
||||
for null in range(5):
|
||||
ts = self.ts + row_nums*1000 + null*1000
|
||||
tdSql.execute(f"insert into {tbname} values({ts} , NULL , NULL , NULL , NULL , NULL , NULL , NULL , NULL , NULL , NULL , NULL , NULL , NULL )")
|
||||
|
||||
def basic_query(self):
|
||||
tdSql.query("select count(*) from stb")
|
||||
tdSql.checkData(0,0,(self.row_nums + 5 )*self.tb_nums)
|
||||
tdSql.query("select max(c1) from stb")
|
||||
tdSql.checkData(0,0,(self.row_nums -1))
|
||||
tdSql.query(" select tbname , max(c1) from stb partition by tbname ")
|
||||
tdSql.checkRows(self.tb_nums)
|
||||
tdSql.query(" select max(c1) from stb group by t1 order by t1 ")
|
||||
tdSql.checkRows(self.tb_nums)
|
||||
tdSql.query(" select max(c1) from stb group by c1 order by t1 ")
|
||||
tdSql.query(" select max(t2) from stb group by c1 order by t1 ")
|
||||
tdSql.query(" select max(c1) from stb group by tbname order by tbname ")
|
||||
tdSql.checkRows(self.tb_nums)
|
||||
# bug need fix
|
||||
# tdSql.query(" select max(t1) from stb group by t2 order by t2 ")
|
||||
# tdSql.checkRows(self.tb_nums)
|
||||
tdSql.query(" select max(c1) from stb group by c1 order by c1 ")
|
||||
tdSql.checkRows(self.row_nums+1)
|
||||
|
||||
tdSql.query(" select c1 , max(c1) from stb group by c1 order by c1 ")
|
||||
tdSql.checkRows(self.row_nums+1)
|
||||
|
||||
# support selective functions
|
||||
tdSql.query(" select c1 ,c2 ,c3 , max(c1) ,c4 ,c5 ,t11 from stb group by c1 order by c1 desc ")
|
||||
tdSql.checkRows(self.row_nums+1)
|
||||
|
||||
tdSql.query(" select c1, tbname , max(c1) ,c4 ,c5 ,t11 from stb group by c1 order by c1 desc ")
|
||||
tdSql.checkRows(self.row_nums+1)
|
||||
|
||||
# bug need fix
|
||||
# tdSql.query(" select tbname , max(c1) from sub_stb_1 where c1 is null group by c1 order by c1 desc ")
|
||||
# tdSql.checkRows(1)
|
||||
# tdSql.checkData(0,0,"sub_stb_1")
|
||||
|
||||
tdSql.query("select max(c1) ,c2 ,t2,tbname from stb group by abs(c1) order by abs(c1)")
|
||||
tdSql.checkRows(self.row_nums+1)
|
||||
tdSql.query("select abs(c1+c3), count(c1+c3) ,max(c1+t2) from stb group by abs(c1+c3) order by abs(c1+c3)")
|
||||
tdSql.checkRows(self.row_nums+1)
|
||||
tdSql.query("select max(c1+c3)+min(c2) ,abs(c1) from stb group by abs(c1) order by abs(c1)")
|
||||
tdSql.checkRows(self.row_nums+1)
|
||||
tdSql.error("select count(c1+c3)+max(c2) ,abs(c1) ,abs(t1) from stb group by abs(c1) order by abs(t1)+c2")
|
||||
tdSql.error("select count(c1+c3)+max(c2) ,abs(c1) from stb group by abs(c1) order by abs(c1)+c2")
|
||||
tdSql.query("select abs(c1+c3)+abs(c2) , count(c1+c3)+max(c2) from stb group by abs(c1+c3)+abs(c2) order by abs(c1+c3)+abs(c2)")
|
||||
tdSql.checkRows(self.row_nums+1)
|
||||
|
||||
tdSql.query(" select max(c1) , max(t2) from stb where abs(c1+t2)=1 partition by tbname ")
|
||||
tdSql.checkRows(2)
|
||||
tdSql.query(" select max(c1) from stb where abs(c1+t2)=1 partition by tbname ")
|
||||
tdSql.checkRows(2)
|
||||
|
||||
tdSql.query(" select tbname , max(c1) from stb partition by tbname order by tbname ")
|
||||
tdSql.checkRows(self.tb_nums)
|
||||
tdSql.checkData(0,1,self.row_nums-1)
|
||||
|
||||
tdSql.query("select tbname , max(c2) from stb partition by t1 order by t1")
|
||||
tdSql.query("select tbname , max(t2) from stb partition by t1 order by t1")
|
||||
tdSql.query("select tbname , max(t2) from stb partition by t2 order by t2")
|
||||
|
||||
# # bug need fix
|
||||
# tdSql.query("select t2 , max(t2) from stb partition by t2 order by t2")
|
||||
# tdSql.checkRows(self.tb_nums)
|
||||
|
||||
tdSql.query("select tbname , max(c1) from stb partition by tbname order by tbname")
|
||||
tdSql.checkRows(self.tb_nums)
|
||||
tdSql.checkData(0,1,self.row_nums-1)
|
||||
|
||||
|
||||
tdSql.query("select tbname , max(c1) from stb partition by t2 order by t2")
|
||||
|
||||
tdSql.query("select c2, max(c1) from stb partition by c2 order by c2 desc")
|
||||
tdSql.checkRows(self.tb_nums+1)
|
||||
tdSql.checkData(0,1,self.row_nums-1)
|
||||
|
||||
tdSql.query("select tbname , max(c1) from stb partition by c1 order by c2")
|
||||
|
||||
|
||||
tdSql.query("select tbname , abs(t2) from stb partition by c2 order by t2")
|
||||
tdSql.checkRows(self.tb_nums*(self.row_nums+5))
|
||||
|
||||
tdSql.query("select max(c1) , count(t2) from stb partition by c2 ")
|
||||
tdSql.checkRows(self.row_nums+1)
|
||||
tdSql.checkData(0,1,self.row_nums)
|
||||
|
||||
tdSql.query("select count(c1) , max(t2) ,c2 from stb partition by c2 order by c2")
|
||||
tdSql.checkRows(self.row_nums+1)
|
||||
|
||||
tdSql.query("select count(c1) , count(t1) ,max(c2) ,tbname from stb partition by tbname order by tbname")
|
||||
tdSql.checkRows(self.tb_nums)
|
||||
tdSql.checkCols(4)
|
||||
|
||||
tdSql.query("select count(c1) , max(t2) ,t1 from stb partition by t1 order by t1")
|
||||
tdSql.checkRows(self.tb_nums)
|
||||
tdSql.checkData(0,0,self.row_nums)
|
||||
|
||||
# bug need fix
|
||||
# tdSql.query("select count(c1) , max(t1) ,abs(c1) from stb partition by abs(c1) order by abs(c1)")
|
||||
# tdSql.checkRows(self.row_nums+1)
|
||||
|
||||
|
||||
tdSql.query("select max(ceil(c2)) , max(floor(t2)) ,max(floor(c2)) from stb partition by abs(c2) order by abs(c2)")
|
||||
tdSql.checkRows(self.row_nums+1)
|
||||
|
||||
|
||||
tdSql.query("select max(ceil(c1-2)) , max(floor(t2+1)) ,max(c2-c1) from stb partition by abs(floor(c1)) order by abs(floor(c1))")
|
||||
tdSql.checkRows(self.row_nums+1)
|
||||
|
||||
|
||||
# interval
|
||||
tdSql.query("select max(c1) from stb interval(2s) sliding(1s)")
|
||||
|
||||
# bug need fix
|
||||
|
||||
tdSql.query('select max(c1) from stb where ts>="2022-07-06 16:00:00.000 " and ts < "2022-07-06 17:00:00.000 " interval(50s) sliding(30s) fill(NULL)')
|
||||
|
||||
tdSql.query(" select tbname , count(c1) from stb partition by tbname interval(10s) slimit 5 soffset 1 ")
|
||||
|
||||
tdSql.query("select tbname , max(c1) from stb partition by tbname interval(10s)")
|
||||
tdSql.checkRows(self.row_nums*2)
|
||||
|
||||
tdSql.query("select tbname , count(c1) from sub_stb_1 partition by tbname interval(10s)")
|
||||
tdSql.checkData(0,0,'sub_stb_1')
|
||||
tdSql.checkData(0,1,self.row_nums)
|
||||
|
||||
# bug need fix
|
||||
# tdSql.query(" select tbname , max(c1) from stb partition by tbname order by tbname slimit 5 soffset 0 ")
|
||||
# tdSql.checkRows(5)
|
||||
|
||||
# tdSql.query(" select tbname , max(c1) from stb partition by tbname order by tbname slimit 5 soffset 1 ")
|
||||
# tdSql.checkRows(5)
|
||||
|
||||
tdSql.query(" select tbname , max(c1) from sub_stb_1 partition by tbname interval(10s) sliding(5s) ")
|
||||
|
||||
tdSql.query(f'select max(c1) from stb where ts>={self.ts} and ts < {self.ts}+1000 interval(50s) sliding(30s)')
|
||||
tdSql.query(f'select tbname , max(c1) from stb where ts>={self.ts} and ts < {self.ts}+1000 interval(50s) sliding(30s)')
|
||||
|
||||
|
||||
def run(self):
|
||||
tdSql.prepare()
|
||||
self.prepare_datas("stb",self.tb_nums,self.row_nums)
|
||||
self.basic_query()
|
||||
|
||||
# # coverage case for taosd crash about bug fix
|
||||
tdSql.query(" select sum(c1) from stb where t2+10 >1 ")
|
||||
tdSql.query(" select count(c1),count(t1) from stb where -t2<1 ")
|
||||
tdSql.query(" select tbname ,max(ceil(c1)) from stb group by tbname ")
|
||||
tdSql.query(" select avg(abs(c1)) , tbname from stb group by tbname ")
|
||||
tdSql.query(" select t1,c1 from stb where abs(t2+c1)=1 ")
|
||||
|
||||
|
||||
def stop(self):
|
||||
tdSql.close()
|
||||
tdLog.success("%s successfully executed" % __file__)
|
||||
|
||||
|
||||
tdCases.addWindows(__file__, TDTestCase())
|
||||
tdCases.addLinux(__file__, TDTestCase())
|
|
@ -12,30 +12,30 @@ class TDTestCase:
|
|||
|
||||
self.rowNum = 10
|
||||
self.ts = 1537146000000
|
||||
|
||||
|
||||
def run(self):
|
||||
tdSql.prepare()
|
||||
|
||||
intData = []
|
||||
intData = []
|
||||
floatData = []
|
||||
|
||||
tdSql.execute('''create table stb(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double,
|
||||
tdSql.execute('''create table stb(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double,
|
||||
col7 bool, col8 binary(20), col9 nchar(20), col11 tinyint unsigned, col12 smallint unsigned, col13 int unsigned, col14 bigint unsigned) tags(loc nchar(20))''')
|
||||
tdSql.execute("create table stb_1 using stb tags('beijing')")
|
||||
tdSql.execute('''create table ntb(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double,
|
||||
tdSql.execute('''create table ntb(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double,
|
||||
col7 bool, col8 binary(20), col9 nchar(20), col11 tinyint unsigned, col12 smallint unsigned, col13 int unsigned, col14 bigint unsigned)''')
|
||||
for i in range(self.rowNum):
|
||||
tdSql.execute("insert into ntb values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d)"
|
||||
tdSql.execute("insert into ntb values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d)"
|
||||
% (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1))
|
||||
intData.append(i + 1)
|
||||
intData.append(i + 1)
|
||||
floatData.append(i + 0.1)
|
||||
for i in range(self.rowNum):
|
||||
tdSql.execute("insert into stb_1 values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d)"
|
||||
tdSql.execute("insert into stb_1 values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d)"
|
||||
% (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1))
|
||||
intData.append(i + 1)
|
||||
floatData.append(i + 0.1)
|
||||
intData.append(i + 1)
|
||||
floatData.append(i + 0.1)
|
||||
|
||||
# max verifacation
|
||||
# max verifacation
|
||||
tdSql.error("select min(ts) from stb_1")
|
||||
tdSql.error("select min(ts) from db.stb_1")
|
||||
tdSql.error("select min(col7) from stb_1")
|
||||
|
@ -206,7 +206,7 @@ class TDTestCase:
|
|||
tdSql.query("select min(col1) from ntb where col2>=5")
|
||||
tdSql.checkData(0,0,5)
|
||||
|
||||
|
||||
|
||||
def stop(self):
|
||||
tdSql.close()
|
||||
tdLog.success("%s successfully executed" % __file__)
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -134,31 +134,16 @@ class TDTestCase:
|
|||
tdSql.query(f'select percentile({k}, {param}) from {self.stbname}_{i}')
|
||||
tdSql.checkData(0, 0, np.percentile(floatData, param))
|
||||
|
||||
#!bug TD-17119
|
||||
# for k,v in self.tag_dict.items():
|
||||
# for param in self.param:
|
||||
# if v.lower() in ['timestamp','bool'] or 'binary' in v.lower() or 'nchar' in v.lower():
|
||||
# tdSql.error(f'select percentile({k},{param}) from {self.stbname}_{i}')
|
||||
# elif v.lower() == 'tinyint':
|
||||
# self.check_tags(k,param,i,self.tag_tinyint)
|
||||
# elif v.lower() == 'smallint':
|
||||
# self.check_tags(k,param,i,self.tag_smallint)
|
||||
# elif v.lower() == 'int':
|
||||
# self.check_tags(k,param,i,self.tag_int)
|
||||
# elif v.lower() == 'bigint':
|
||||
# self.check_tags(k,param,i,self.tag_bigint)
|
||||
# elif v.lower() == 'tinyint unsigned':
|
||||
# self.check_tags(k,param,i,self.tag_utint)
|
||||
# elif v.lower() == 'smallint unsigned':
|
||||
# self.check_tags(k,param,i,self.tag_usint)
|
||||
# elif v.lower() == 'int unsigned':
|
||||
# self.check_tags(k,param,i,self.tag_uint)
|
||||
# elif v.lower() == 'bigint unsigned':
|
||||
# self.check_tags(k,param,i,self.tag_ubint)
|
||||
# elif v.lower() == 'float':
|
||||
# self.check_tags(k,param,i,self.tag_float)
|
||||
# elif v.lower() == 'double':
|
||||
# self.check_tags(k,param,i,self.tag_double)
|
||||
for k,v in self.tag_dict.items():
|
||||
for param in self.param:
|
||||
if v.lower() in ['timestamp','bool'] or 'binary' in v.lower() or 'nchar' in v.lower():
|
||||
tdSql.error(f'select percentile({k},{param}) from {self.stbname}_{i}')
|
||||
else:
|
||||
tdSql.query(f'select {k} from {self.stbname}_{i}')
|
||||
data_num = tdSql.queryResult[0][0]
|
||||
tdSql.query(f'select percentile({k},{param}) from {self.stbname}_{i}')
|
||||
tdSql.checkData(0,0,data_num)
|
||||
|
||||
def run(self):
|
||||
self.function_check_ntb()
|
||||
self.function_check_ctb()
|
||||
|
|
|
@ -8,14 +8,14 @@ from util.sql import *
|
|||
from util.cases import *
|
||||
|
||||
class TDTestCase:
|
||||
updatecfgDict = {'debugFlag': 143 ,"cDebugFlag":143,"uDebugFlag":143 ,"rpcDebugFlag":143 , "tmrDebugFlag":143 ,
|
||||
updatecfgDict = {'debugFlag': 143 ,"cDebugFlag":143,"uDebugFlag":143 ,"rpcDebugFlag":143 , "tmrDebugFlag":143 ,
|
||||
"jniDebugFlag":143 ,"simDebugFlag":143,"dDebugFlag":143, "dDebugFlag":143,"vDebugFlag":143,"mDebugFlag":143,"qDebugFlag":143,
|
||||
"wDebugFlag":143,"sDebugFlag":143,"tsdbDebugFlag":143,"tqDebugFlag":143 ,"fsDebugFlag":143 ,"udfDebugFlag":143}
|
||||
|
||||
def init(self, conn, logSql):
|
||||
tdLog.debug(f"start to excute {__file__}")
|
||||
tdSql.init(conn.cursor())
|
||||
|
||||
|
||||
def prepare_datas(self):
|
||||
tdSql.execute(
|
||||
'''create table stb1
|
||||
|
@ -23,7 +23,7 @@ class TDTestCase:
|
|||
tags (t1 int)
|
||||
'''
|
||||
)
|
||||
|
||||
|
||||
tdSql.execute(
|
||||
'''
|
||||
create table t1
|
||||
|
@ -65,14 +65,14 @@ class TDTestCase:
|
|||
( '2023-02-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
|
||||
'''
|
||||
)
|
||||
|
||||
|
||||
def check_result_auto(self ,origin_query , round_query):
|
||||
pass
|
||||
round_result = tdSql.getResult(round_query)
|
||||
origin_result = tdSql.getResult(origin_query)
|
||||
|
||||
auto_result =[]
|
||||
|
||||
|
||||
for row in origin_result:
|
||||
row_check = []
|
||||
for elem in row:
|
||||
|
@ -87,13 +87,13 @@ class TDTestCase:
|
|||
for row_index , row in enumerate(round_result):
|
||||
for col_index , elem in enumerate(row):
|
||||
if auto_result[row_index][col_index] != elem:
|
||||
check_status = False
|
||||
check_status = False
|
||||
if not check_status:
|
||||
tdLog.notice("round function value has not as expected , sql is \"%s\" "%round_query )
|
||||
sys.exit(1)
|
||||
else:
|
||||
tdLog.info("round value check pass , it work as expected ,sql is \"%s\" "%round_query )
|
||||
|
||||
|
||||
def test_errors(self):
|
||||
error_sql_lists = [
|
||||
"select round from t1",
|
||||
|
@ -127,42 +127,42 @@ class TDTestCase:
|
|||
]
|
||||
for error_sql in error_sql_lists:
|
||||
tdSql.error(error_sql)
|
||||
|
||||
|
||||
def support_types(self):
|
||||
type_error_sql_lists = [
|
||||
"select round(ts) from t1" ,
|
||||
"select round(ts) from t1" ,
|
||||
"select round(c7) from t1",
|
||||
"select round(c8) from t1",
|
||||
"select round(c9) from t1",
|
||||
"select round(ts) from ct1" ,
|
||||
"select round(ts) from ct1" ,
|
||||
"select round(c7) from ct1",
|
||||
"select round(c8) from ct1",
|
||||
"select round(c9) from ct1",
|
||||
"select round(ts) from ct3" ,
|
||||
"select round(ts) from ct3" ,
|
||||
"select round(c7) from ct3",
|
||||
"select round(c8) from ct3",
|
||||
"select round(c9) from ct3",
|
||||
"select round(ts) from ct4" ,
|
||||
"select round(ts) from ct4" ,
|
||||
"select round(c7) from ct4",
|
||||
"select round(c8) from ct4",
|
||||
"select round(c9) from ct4",
|
||||
"select round(ts) from stb1" ,
|
||||
"select round(ts) from stb1" ,
|
||||
"select round(c7) from stb1",
|
||||
"select round(c8) from stb1",
|
||||
"select round(c9) from stb1" ,
|
||||
|
||||
"select round(ts) from stbbb1" ,
|
||||
"select round(ts) from stbbb1" ,
|
||||
"select round(c7) from stbbb1",
|
||||
|
||||
"select round(ts) from tbname",
|
||||
"select round(c9) from tbname"
|
||||
|
||||
]
|
||||
|
||||
|
||||
for type_sql in type_error_sql_lists:
|
||||
tdSql.error(type_sql)
|
||||
|
||||
|
||||
|
||||
|
||||
type_sql_lists = [
|
||||
"select round(c1) from t1",
|
||||
"select round(c2) from t1",
|
||||
|
@ -192,16 +192,16 @@ class TDTestCase:
|
|||
"select round(c5) from stb1",
|
||||
"select round(c6) from stb1",
|
||||
|
||||
"select round(c6) as alisb from stb1",
|
||||
"select round(c6) alisb from stb1",
|
||||
"select round(c6) as alisb from stb1",
|
||||
"select round(c6) alisb from stb1",
|
||||
]
|
||||
|
||||
for type_sql in type_sql_lists:
|
||||
tdSql.query(type_sql)
|
||||
|
||||
|
||||
def basic_round_function(self):
|
||||
|
||||
# basic query
|
||||
# basic query
|
||||
tdSql.query("select c1 from ct3")
|
||||
tdSql.checkRows(0)
|
||||
tdSql.query("select c1 from t1")
|
||||
|
@ -221,7 +221,7 @@ class TDTestCase:
|
|||
tdSql.query("select round(c5) from ct3")
|
||||
tdSql.checkRows(0)
|
||||
tdSql.query("select round(c6) from ct3")
|
||||
|
||||
|
||||
# used for regular table
|
||||
tdSql.query("select round(c1) from t1")
|
||||
tdSql.checkData(0, 0, None)
|
||||
|
@ -239,7 +239,7 @@ class TDTestCase:
|
|||
tdSql.checkData(5, 5, None)
|
||||
|
||||
self.check_result_auto( "select c1, c2, c3 , c4, c5 from t1", "select (c1), round(c2) ,round(c3), round(c4), round(c5) from t1")
|
||||
|
||||
|
||||
# used for sub table
|
||||
tdSql.query("select round(c1) from ct1")
|
||||
tdSql.checkData(0, 0, 8)
|
||||
|
@ -251,20 +251,20 @@ class TDTestCase:
|
|||
self.check_result_auto( "select c1, c2, c3 , c4, c5 from ct1", "select (c1), round(c2) ,round(c3), round(c4), round(c5) from ct1")
|
||||
self.check_result_auto("select round(round(round(round(round(round(round(round(round(round(c1)))))))))) nest_col_func from ct1;","select c1 from ct1" )
|
||||
|
||||
# used for stable table
|
||||
|
||||
# used for stable table
|
||||
|
||||
tdSql.query("select round(c1) from stb1")
|
||||
tdSql.checkRows(25)
|
||||
self.check_result_auto( "select c1, c2, c3 , c4, c5 from ct4 ", "select (c1), round(c2) ,round(c3), round(c4), round(c5) from ct4")
|
||||
self.check_result_auto("select round(round(round(round(round(round(round(round(round(round(c1)))))))))) nest_col_func from ct4;" , "select c1 from ct4" )
|
||||
|
||||
|
||||
|
||||
# used for not exists table
|
||||
tdSql.error("select round(c1) from stbbb1")
|
||||
tdSql.error("select round(c1) from tbname")
|
||||
tdSql.error("select round(c1) from ct5")
|
||||
|
||||
# mix with common col
|
||||
# mix with common col
|
||||
tdSql.query("select c1, round(c1) from ct1")
|
||||
tdSql.checkData(0 , 0 ,8)
|
||||
tdSql.checkData(0 , 1 ,8)
|
||||
|
@ -289,7 +289,7 @@ class TDTestCase:
|
|||
tdSql.checkData(0 , 1 ,None)
|
||||
tdSql.checkData(0 , 2 ,None)
|
||||
tdSql.checkData(0 , 3 ,None)
|
||||
|
||||
|
||||
tdSql.checkData(3 , 0 , 6)
|
||||
tdSql.checkData(3 , 1 , 6)
|
||||
tdSql.checkData(3 , 2 ,6.66000)
|
||||
|
@ -315,7 +315,7 @@ class TDTestCase:
|
|||
tdSql.query("select max(c5), count(c5) from stb1")
|
||||
tdSql.query("select max(c5), count(c5) from ct1")
|
||||
|
||||
|
||||
|
||||
# bug fix for count
|
||||
tdSql.query("select count(c1) from ct4 ")
|
||||
tdSql.checkData(0,0,9)
|
||||
|
@ -326,7 +326,7 @@ class TDTestCase:
|
|||
tdSql.query("select count(*) from stb1 ")
|
||||
tdSql.checkData(0,0,25)
|
||||
|
||||
# bug fix for compute
|
||||
# bug fix for compute
|
||||
tdSql.query("select c1, abs(c1) -0 ,round(c1)-0 from ct4 ")
|
||||
tdSql.checkData(0, 0, None)
|
||||
tdSql.checkData(0, 1, None)
|
||||
|
@ -378,10 +378,10 @@ class TDTestCase:
|
|||
tdSql.checkData(0,4,7.900000000)
|
||||
tdSql.checkData(0,5,3.000000000)
|
||||
tdSql.checkData(0,6,7.500000000)
|
||||
|
||||
|
||||
def round_Arithmetic(self):
|
||||
pass
|
||||
|
||||
|
||||
def check_boundary_values(self):
|
||||
|
||||
tdSql.execute("drop database if exists bound_test")
|
||||
|
@ -410,14 +410,14 @@ class TDTestCase:
|
|||
tdSql.execute(
|
||||
f"insert into sub1_bound values ( now(), -2147483643, -9223372036854775803, -32763, -123, -3.39E+38, -1.69e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
|
||||
)
|
||||
|
||||
|
||||
tdSql.error(
|
||||
f"insert into sub1_bound values ( now()+1s, 2147483648, 9223372036854775808, 32768, 128, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
|
||||
)
|
||||
self.check_result_auto( "select c1, c2, c3 , c4, c5 ,c6 from sub1_bound ", "select round(c1), round(c2) ,round(c3), round(c4), round(c5) ,round(c6) from sub1_bound")
|
||||
self.check_result_auto( "select c1, c2, c3 , c3, c2 ,c1 from sub1_bound ", "select round(c1), round(c2) ,round(c3), round(c3), round(c2) ,round(c1) from sub1_bound")
|
||||
self.check_result_auto("select round(round(round(round(round(round(round(round(round(round(c1)))))))))) nest_col_func from sub1_bound;" , "select round(c1) from sub1_bound" )
|
||||
|
||||
|
||||
# check basic elem for table per row
|
||||
tdSql.query("select round(c1+0.2) ,round(c2) , round(c3+0.3) , round(c4-0.3), round(c5/2), round(c6/2) from sub1_bound ")
|
||||
tdSql.checkData(0, 0, 2147483647.000000000)
|
||||
|
@ -444,32 +444,32 @@ class TDTestCase:
|
|||
self.check_result_auto( " select t1,c5 from stb1 where c1 > 0 order by tbname " , "select round(t1) ,round(c5) from stb1 where c1 > 0 order by tbname" )
|
||||
self.check_result_auto( " select t1,c5 from stb1 where c1 > 0 order by tbname " , "select round(t1) , round(c5) from stb1 where c1 > 0 order by tbname" )
|
||||
pass
|
||||
|
||||
|
||||
|
||||
|
||||
def run(self): # sourcery skip: extract-duplicate-method, remove-redundant-fstring
|
||||
tdSql.prepare()
|
||||
|
||||
tdLog.printNoPrefix("==========step1:create table ==============")
|
||||
|
||||
|
||||
self.prepare_datas()
|
||||
|
||||
tdLog.printNoPrefix("==========step2:test errors ==============")
|
||||
tdLog.printNoPrefix("==========step2:test errors ==============")
|
||||
|
||||
self.test_errors()
|
||||
|
||||
tdLog.printNoPrefix("==========step3:support types ============")
|
||||
|
||||
tdLog.printNoPrefix("==========step3:support types ============")
|
||||
|
||||
self.support_types()
|
||||
|
||||
tdLog.printNoPrefix("==========step4: round basic query ============")
|
||||
tdLog.printNoPrefix("==========step4: round basic query ============")
|
||||
|
||||
self.basic_round_function()
|
||||
|
||||
tdLog.printNoPrefix("==========step5: round boundary query ============")
|
||||
tdLog.printNoPrefix("==========step5: round boundary query ============")
|
||||
|
||||
self.check_boundary_values()
|
||||
|
||||
tdLog.printNoPrefix("==========step6: round filter query ============")
|
||||
tdLog.printNoPrefix("==========step6: round filter query ============")
|
||||
|
||||
self.abs_func_filter()
|
||||
|
||||
|
|
|
@ -9,13 +9,13 @@ from util.cases import *
|
|||
|
||||
|
||||
class TDTestCase:
|
||||
updatecfgDict = {'debugFlag': 143 ,"cDebugFlag":143,"uDebugFlag":143 ,"rpcDebugFlag":143 , "tmrDebugFlag":143 ,
|
||||
updatecfgDict = {'debugFlag': 143 ,"cDebugFlag":143,"uDebugFlag":143 ,"rpcDebugFlag":143 , "tmrDebugFlag":143 ,
|
||||
"jniDebugFlag":143 ,"simDebugFlag":143,"dDebugFlag":143, "dDebugFlag":143,"vDebugFlag":143,"mDebugFlag":143,"qDebugFlag":143,
|
||||
"wDebugFlag":143,"sDebugFlag":143,"tsdbDebugFlag":143,"tqDebugFlag":143 ,"fsDebugFlag":143 ,"udfDebugFlag":143}
|
||||
def init(self, conn, powSql):
|
||||
tdLog.debug(f"start to excute {__file__}")
|
||||
tdSql.init(conn.cursor())
|
||||
|
||||
|
||||
def prepare_datas(self):
|
||||
tdSql.execute(
|
||||
'''create table stb1
|
||||
|
@ -23,7 +23,7 @@ class TDTestCase:
|
|||
tags (t1 int)
|
||||
'''
|
||||
)
|
||||
|
||||
|
||||
tdSql.execute(
|
||||
'''
|
||||
create table t1
|
||||
|
@ -65,14 +65,14 @@ class TDTestCase:
|
|||
( '2023-02-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
|
||||
'''
|
||||
)
|
||||
|
||||
|
||||
def check_result_auto_sqrt(self ,origin_query , pow_query):
|
||||
|
||||
pow_result = tdSql.getResult(pow_query)
|
||||
origin_result = tdSql.getResult(origin_query)
|
||||
|
||||
auto_result =[]
|
||||
|
||||
|
||||
for row in origin_result:
|
||||
row_check = []
|
||||
for elem in row:
|
||||
|
@ -92,7 +92,7 @@ class TDTestCase:
|
|||
if auto_result[row_index][col_index] == None and not (auto_result[row_index][col_index] == None and elem == None):
|
||||
check_status = False
|
||||
elif auto_result[row_index][col_index] != None and (auto_result[row_index][col_index] - elem > 0.00000001):
|
||||
check_status = False
|
||||
check_status = False
|
||||
else:
|
||||
pass
|
||||
if not check_status:
|
||||
|
@ -100,7 +100,7 @@ class TDTestCase:
|
|||
sys.exit(1)
|
||||
else:
|
||||
tdLog.info("sqrt value check pass , it work as expected ,sql is \"%s\" "%pow_query )
|
||||
|
||||
|
||||
def test_errors(self):
|
||||
error_sql_lists = [
|
||||
"select sqrt from t1",
|
||||
|
@ -134,42 +134,42 @@ class TDTestCase:
|
|||
]
|
||||
for error_sql in error_sql_lists:
|
||||
tdSql.error(error_sql)
|
||||
|
||||
|
||||
def support_types(self):
|
||||
type_error_sql_lists = [
|
||||
"select sqrt(ts) from t1" ,
|
||||
"select sqrt(ts) from t1" ,
|
||||
"select sqrt(c7) from t1",
|
||||
"select sqrt(c8) from t1",
|
||||
"select sqrt(c9) from t1",
|
||||
"select sqrt(ts) from ct1" ,
|
||||
"select sqrt(ts) from ct1" ,
|
||||
"select sqrt(c7) from ct1",
|
||||
"select sqrt(c8) from ct1",
|
||||
"select sqrt(c9) from ct1",
|
||||
"select sqrt(ts) from ct3" ,
|
||||
"select sqrt(ts) from ct3" ,
|
||||
"select sqrt(c7) from ct3",
|
||||
"select sqrt(c8) from ct3",
|
||||
"select sqrt(c9) from ct3",
|
||||
"select sqrt(ts) from ct4" ,
|
||||
"select sqrt(ts) from ct4" ,
|
||||
"select sqrt(c7) from ct4",
|
||||
"select sqrt(c8) from ct4",
|
||||
"select sqrt(c9) from ct4",
|
||||
"select sqrt(ts) from stb1" ,
|
||||
"select sqrt(ts) from stb1" ,
|
||||
"select sqrt(c7) from stb1",
|
||||
"select sqrt(c8) from stb1",
|
||||
"select sqrt(c9) from stb1" ,
|
||||
|
||||
"select sqrt(ts) from stbbb1" ,
|
||||
"select sqrt(ts) from stbbb1" ,
|
||||
"select sqrt(c7) from stbbb1",
|
||||
|
||||
"select sqrt(ts) from tbname",
|
||||
"select sqrt(c9) from tbname"
|
||||
|
||||
]
|
||||
|
||||
|
||||
for type_sql in type_error_sql_lists:
|
||||
tdSql.error(type_sql)
|
||||
|
||||
|
||||
|
||||
|
||||
type_sql_lists = [
|
||||
"select sqrt(c1) from t1",
|
||||
"select sqrt(c2) from t1",
|
||||
|
@ -199,16 +199,16 @@ class TDTestCase:
|
|||
"select sqrt(c5) from stb1",
|
||||
"select sqrt(c6) from stb1",
|
||||
|
||||
"select sqrt(c6) as alisb from stb1",
|
||||
"select sqrt(c6) alisb from stb1",
|
||||
"select sqrt(c6) as alisb from stb1",
|
||||
"select sqrt(c6) alisb from stb1",
|
||||
]
|
||||
|
||||
for type_sql in type_sql_lists:
|
||||
tdSql.query(type_sql)
|
||||
|
||||
|
||||
def basic_sqrt_function(self):
|
||||
|
||||
# basic query
|
||||
# basic query
|
||||
tdSql.query("select c1 from ct3")
|
||||
tdSql.checkRows(0)
|
||||
tdSql.query("select c1 from t1")
|
||||
|
@ -249,7 +249,7 @@ class TDTestCase:
|
|||
tdSql.checkData(5, 5, None)
|
||||
|
||||
self.check_result_auto_sqrt( "select abs(c1), abs(c2), abs(c3) , abs(c4), abs(c5) from t1", "select sqrt(abs(c1)), sqrt(abs(c2)) ,sqrt(abs(c3)), sqrt(abs(c4)), sqrt(abs(c5)) from t1")
|
||||
|
||||
|
||||
# used for sub table
|
||||
tdSql.query("select c2 ,sqrt(c2) from ct1")
|
||||
tdSql.checkData(0, 1, 298.140906284)
|
||||
|
@ -265,7 +265,7 @@ class TDTestCase:
|
|||
tdSql.checkData(5 , 2, None)
|
||||
|
||||
self.check_result_auto_sqrt( "select c1, c2, c3 , c4, c5 from ct1", "select sqrt(c1), sqrt(c2) ,sqrt(c3), sqrt(c4), sqrt(c5) from ct1")
|
||||
|
||||
|
||||
# nest query for sqrt functions
|
||||
tdSql.query("select c4 , sqrt(c4) ,sqrt(sqrt(c4)) , sqrt(sqrt(sqrt(c4))) from ct1;")
|
||||
tdSql.checkData(0 , 0 , 88)
|
||||
|
@ -283,18 +283,18 @@ class TDTestCase:
|
|||
tdSql.checkData(11 , 2 , None)
|
||||
tdSql.checkData(11 , 3 , None)
|
||||
|
||||
# used for stable table
|
||||
|
||||
# used for stable table
|
||||
|
||||
tdSql.query("select sqrt(c1) from stb1")
|
||||
tdSql.checkRows(25)
|
||||
|
||||
|
||||
|
||||
# used for not exists table
|
||||
tdSql.error("select sqrt(c1) from stbbb1")
|
||||
tdSql.error("select sqrt(c1) from tbname")
|
||||
tdSql.error("select sqrt(c1) from ct5")
|
||||
|
||||
# mix with common col
|
||||
# mix with common col
|
||||
tdSql.query("select c1, sqrt(c1) from ct1")
|
||||
tdSql.checkData(0 , 0 ,8)
|
||||
tdSql.checkData(0 , 1 ,2.828427125)
|
||||
|
@ -314,7 +314,7 @@ class TDTestCase:
|
|||
tdSql.checkData(0 , 1 ,None)
|
||||
tdSql.checkData(0 , 2 ,None)
|
||||
tdSql.checkData(0 , 3 ,None)
|
||||
|
||||
|
||||
tdSql.checkData(3 , 0 , 6)
|
||||
tdSql.checkData(3 , 1 ,2.449489743)
|
||||
tdSql.checkData(3 , 2 ,2.449489743)
|
||||
|
@ -335,7 +335,7 @@ class TDTestCase:
|
|||
tdSql.query("select max(c5), count(c5) from stb1")
|
||||
tdSql.query("select max(c5), count(c5) from ct1")
|
||||
|
||||
|
||||
|
||||
# bug fix for count
|
||||
tdSql.query("select count(c1) from ct4 ")
|
||||
tdSql.checkData(0,0,9)
|
||||
|
@ -346,7 +346,7 @@ class TDTestCase:
|
|||
tdSql.query("select count(*) from stb1 ")
|
||||
tdSql.checkData(0,0,25)
|
||||
|
||||
# # bug fix for compute
|
||||
# # bug fix for compute
|
||||
tdSql.query("select c1, sqrt(c1) -0 ,sqrt(c1-4)-0 from ct4 ")
|
||||
tdSql.checkData(0, 0, None)
|
||||
tdSql.checkData(0, 1, None)
|
||||
|
@ -397,16 +397,16 @@ class TDTestCase:
|
|||
tdSql.checkRows(13)
|
||||
|
||||
# # bug for compute in functions
|
||||
# tdSql.query("select c1, abs(1/0) from ct1")
|
||||
# tdSql.query("select c1, abs(1/0) from ct1")
|
||||
# tdSql.checkData(0, 0, 8)
|
||||
# tdSql.checkData(0, 1, 1)
|
||||
|
||||
tdSql.query("select c1, sqrt(1) from ct1")
|
||||
tdSql.query("select c1, sqrt(1) from ct1")
|
||||
tdSql.checkData(0, 1, 1.000000000)
|
||||
tdSql.checkRows(13)
|
||||
|
||||
# two cols start sqrt(x,y)
|
||||
tdSql.query("select c1,c2, sqrt(c2) from ct1")
|
||||
tdSql.query("select c1,c2, sqrt(c2) from ct1")
|
||||
tdSql.checkData(0, 2, 298.140906284)
|
||||
tdSql.checkData(1, 2, 278.885281074)
|
||||
tdSql.checkData(4, 2, 0.000000000)
|
||||
|
@ -445,10 +445,10 @@ class TDTestCase:
|
|||
tdSql.checkData(0,3,1.000000000)
|
||||
tdSql.checkData(0,4,0.900000000)
|
||||
tdSql.checkData(0,5,1.000000000)
|
||||
|
||||
|
||||
def pow_Arithmetic(self):
|
||||
pass
|
||||
|
||||
|
||||
def check_boundary_values(self):
|
||||
|
||||
tdSql.execute("drop database if exists bound_test")
|
||||
|
@ -475,11 +475,11 @@ class TDTestCase:
|
|||
f"insert into sub1_bound values ( now()+1s, 2147483648, 9223372036854775808, 32768, 128, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
|
||||
)
|
||||
self.check_result_auto_sqrt( "select abs(c1), abs(c2), abs(c3) , abs(c4), abs(c5) from sub1_bound ", "select sqrt(abs(c1)), sqrt(abs(c2)) ,sqrt(abs(c3)), sqrt(abs(c4)), sqrt(abs(c5)) from sub1_bound")
|
||||
|
||||
|
||||
self.check_result_auto_sqrt( "select c1, c2, c3 , c3, c2 ,c1 from sub1_bound ", "select sqrt(c1), sqrt(c2) ,sqrt(c3), sqrt(c3), sqrt(c2) ,sqrt(c1) from sub1_bound")
|
||||
|
||||
self.check_result_auto_sqrt("select abs(abs(abs(abs(abs(abs(abs(abs(abs(c1))))))))) nest_col_func from sub1_bound" , "select sqrt(abs(c1)) from sub1_bound" )
|
||||
|
||||
|
||||
# check basic elem for table per row
|
||||
tdSql.query("select sqrt(abs(c1)) ,sqrt(abs(c2)) , sqrt(abs(c3)) , sqrt(abs(c4)), sqrt(abs(c5)), sqrt(abs(c6)) from sub1_bound ")
|
||||
tdSql.checkData(0,0,math.sqrt(2147483647))
|
||||
|
@ -504,7 +504,7 @@ class TDTestCase:
|
|||
tdSql.checkData(0,1,math.sqrt(9223372036854775807))
|
||||
tdSql.checkData(0,2,math.sqrt(32767.000000000))
|
||||
tdSql.checkData(0,3,math.sqrt(63.500000000))
|
||||
|
||||
|
||||
def support_super_table_test(self):
|
||||
tdSql.execute(" use db ")
|
||||
self.check_result_auto_sqrt( " select c5 from stb1 order by ts " , "select sqrt(c5) from stb1 order by ts" )
|
||||
|
@ -522,42 +522,42 @@ class TDTestCase:
|
|||
tdSql.prepare()
|
||||
|
||||
tdLog.printNoPrefix("==========step1:create table ==============")
|
||||
|
||||
|
||||
self.prepare_datas()
|
||||
|
||||
tdLog.printNoPrefix("==========step2:test errors ==============")
|
||||
tdLog.printNoPrefix("==========step2:test errors ==============")
|
||||
|
||||
self.test_errors()
|
||||
|
||||
tdLog.printNoPrefix("==========step3:support types ============")
|
||||
|
||||
tdLog.printNoPrefix("==========step3:support types ============")
|
||||
|
||||
self.support_types()
|
||||
|
||||
tdLog.printNoPrefix("==========step4: sqrt basic query ============")
|
||||
tdLog.printNoPrefix("==========step4: sqrt basic query ============")
|
||||
|
||||
self.basic_sqrt_function()
|
||||
|
||||
tdLog.printNoPrefix("==========step5: big number sqrt query ============")
|
||||
tdLog.printNoPrefix("==========step5: big number sqrt query ============")
|
||||
|
||||
self.test_big_number()
|
||||
|
||||
tdLog.printNoPrefix("==========step6: base number for sqrt query ============")
|
||||
tdLog.printNoPrefix("==========step6: base number for sqrt query ============")
|
||||
|
||||
self.pow_base_test()
|
||||
|
||||
tdLog.printNoPrefix("==========step7: sqrt boundary query ============")
|
||||
tdLog.printNoPrefix("==========step7: sqrt boundary query ============")
|
||||
|
||||
self.check_boundary_values()
|
||||
|
||||
tdLog.printNoPrefix("==========step8: sqrt filter query ============")
|
||||
tdLog.printNoPrefix("==========step8: sqrt filter query ============")
|
||||
|
||||
self.abs_func_filter()
|
||||
|
||||
tdLog.printNoPrefix("==========step9: check sqrt result of stable query ============")
|
||||
|
||||
self.support_super_table_test()
|
||||
self.support_super_table_test()
|
||||
|
||||
|
||||
|
||||
|
||||
def stop(self):
|
||||
tdSql.close()
|
||||
|
|
|
@ -11,7 +11,7 @@ from util.sql import *
|
|||
from util.cases import *
|
||||
|
||||
class TDTestCase:
|
||||
updatecfgDict = {'debugFlag': 143 ,"cDebugFlag":143,"uDebugFlag":143 ,"rpcDebugFlag":143 , "tmrDebugFlag":143 ,
|
||||
updatecfgDict = {'debugFlag': 143 ,"cDebugFlag":143,"uDebugFlag":143 ,"rpcDebugFlag":143 , "tmrDebugFlag":143 ,
|
||||
"jniDebugFlag":143 ,"simDebugFlag":143,"dDebugFlag":143, "dDebugFlag":143,"vDebugFlag":143,"mDebugFlag":143,"qDebugFlag":143,
|
||||
"wDebugFlag":143,"sDebugFlag":143,"tsdbDebugFlag":143,"tqDebugFlag":143 ,"fsDebugFlag":143 ,"udfDebugFlag":143}
|
||||
|
||||
|
@ -476,7 +476,7 @@ class TDTestCase:
|
|||
|
||||
self.check_unit_time()
|
||||
self.query_precision()
|
||||
|
||||
|
||||
|
||||
|
||||
def stop(self):
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue