Merge branch '3.0' of https://github.com/taosdata/TDengine into test/TD-19706

This commit is contained in:
chenhaoran 2022-10-26 14:33:27 +08:00
commit d1ad4d0f2a
67 changed files with 1784 additions and 677 deletions

View File

@ -2,7 +2,7 @@
# taosadapter
ExternalProject_Add(taosadapter
GIT_REPOSITORY https://github.com/taosdata/taosadapter.git
GIT_TAG cc43ef0
GIT_TAG a11131c
SOURCE_DIR "${TD_SOURCE_DIR}/tools/taosadapter"
BINARY_DIR ""
#BUILD_IN_SOURCE TRUE

View File

@ -2,7 +2,7 @@
# taos-tools
ExternalProject_Add(taos-tools
GIT_REPOSITORY https://github.com/taosdata/taos-tools.git
GIT_TAG bc99376
GIT_TAG f9c1d32
SOURCE_DIR "${TD_SOURCE_DIR}/tools/taos-tools"
BINARY_DIR ""
#BUILD_IN_SOURCE TRUE

View File

@ -161,6 +161,7 @@ typedef enum EStreamType {
STREAM_RETRIEVE,
STREAM_PULL_DATA,
STREAM_PULL_OVER,
STREAM_FILL_OVER,
} EStreamType;
typedef struct {

View File

@ -198,6 +198,9 @@ enum {
TD_DEF_MSG_TYPE(TDMT_VND_CONSUME, "vnode-consume", SMqPollReq, SMqDataBlkRsp)
TD_DEF_MSG_TYPE(TDMT_VND_STREAM_TRIGGER, "vnode-stream-trigger", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_VND_STREAM_DISPATCH_WRITE, "vnode-stream-task-dispatch-write", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_VND_STREAM_RECOVER_STEP1, "vnode-stream-recover1", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_VND_STREAM_RECOVER_STEP2, "vnode-stream-recover2", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_VND_STREAM_RECOVER_FINISH, "vnode-stream-finish", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_VND_CREATE_SMA, "vnode-create-sma", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_VND_CANCEL_SMA, "vnode-cancel-sma", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_VND_DROP_SMA, "vnode-drop-sma", NULL, NULL)

View File

@ -133,13 +133,6 @@ int32_t qGetQueryTableSchemaVersion(qTaskInfo_t tinfo, char* dbName, char* table
int32_t qExecTaskOpt(qTaskInfo_t tinfo, SArray* pResList, uint64_t* useconds, bool* hasMore, SLocalFetch* pLocal);
int32_t qExecTask(qTaskInfo_t tinfo, SSDataBlock** pBlock, uint64_t* useconds);
/**
* kill the ongoing query and free the query handle and corresponding resources automatically
* @param tinfo qhandle
* @return
*/
int32_t qKillTask(qTaskInfo_t tinfo);
/**
* kill the ongoing query asynchronously
* @param tinfo qhandle
@ -176,6 +169,7 @@ int32_t qSerializeTaskStatus(qTaskInfo_t tinfo, char** pOutput, int32_t* len);
int32_t qDeserializeTaskStatus(qTaskInfo_t tinfo, const char* pInput, int32_t len);
STimeWindow getAlignQueryTimeWindow(SInterval* pInterval, int32_t precision, int64_t key);
/**
* return the scan info, in the form of tuple of two items, including table uid and current timestamp
* @param tinfo
@ -207,9 +201,11 @@ int32_t qExtractStreamScanner(qTaskInfo_t tinfo, void** scanner);
int32_t qStreamInput(qTaskInfo_t tinfo, void* pItem);
int32_t qStreamPrepareRecover(qTaskInfo_t tinfo, int64_t startVer, int64_t endVer);
STimeWindow getAlignQueryTimeWindow(SInterval* pInterval, int32_t precision, int64_t key);
int32_t qStreamSetParamForRecover(qTaskInfo_t tinfo);
int32_t qStreamSourceRecoverStep1(qTaskInfo_t tinfo, int64_t ver);
int32_t qStreamSourceRecoverStep2(qTaskInfo_t tinfo, int64_t ver);
int32_t qStreamRecoverFinish(qTaskInfo_t tinfo);
int32_t qStreamRestoreParam(qTaskInfo_t tinfo);
#ifdef __cplusplus
}

View File

@ -47,7 +47,9 @@ enum {
TASK_STATUS__FAIL,
TASK_STATUS__STOP,
TASK_STATUS__RECOVER_DOWNSTREAM,
TASK_STATUS__RECOVER_SELF,
TASK_STATUS__RECOVER_PREPARE,
TASK_STATUS__RECOVER1,
TASK_STATUS__RECOVER2,
};
enum {
@ -329,6 +331,9 @@ typedef struct SStreamTask {
// state backend
SStreamState* pState;
// do not serialize
int32_t recoverWaitingChild;
} SStreamTask;
int32_t tEncodeStreamEpInfo(SEncoder* pEncoder, const SStreamChildEpInfo* pInfo);
@ -435,6 +440,20 @@ typedef struct {
int32_t rspToTaskId;
} SStreamRetrieveRsp;
typedef struct {
int64_t streamId;
int32_t taskId;
} SStreamRecoverStep1Req, SStreamRecoverStep2Req;
typedef struct {
int64_t streamId;
int32_t taskId;
int32_t childId;
} SStreamRecoverFinishReq;
int32_t tEncodeSStreamRecoverFinishReq(SEncoder* pEncoder, const SStreamRecoverFinishReq* pReq);
int32_t tDecodeSStreamRecoverFinishReq(SDecoder* pDecoder, SStreamRecoverFinishReq* pReq);
#if 0
typedef struct {
int64_t streamId;
@ -521,8 +540,29 @@ int32_t streamProcessRetrieveRsp(SStreamTask* pTask, SStreamRetrieveRsp* pRsp);
int32_t streamTryExec(SStreamTask* pTask);
int32_t streamSchedExec(SStreamTask* pTask);
typedef int32_t FTaskExpand(void* ahandle, SStreamTask* pTask);
int32_t streamScanExec(SStreamTask* pTask, int32_t batchSz);
// recover and fill history
// common
int32_t streamSetParamForRecover(SStreamTask* pTask);
int32_t streamRestoreParam(SStreamTask* pTask);
int32_t streamSetStatusNormal(SStreamTask* pTask);
// source level
int32_t streamSourceRecoverPrepareStep1(SStreamTask* pTask, int64_t ver);
int32_t streamBuildSourceRecover1Req(SStreamTask* pTask, SStreamRecoverStep1Req* pReq);
int32_t streamSourceRecoverScanStep1(SStreamTask* pTask);
int32_t streamBuildSourceRecover2Req(SStreamTask* pTask, SStreamRecoverStep2Req* pReq);
int32_t streamSourceRecoverScanStep2(SStreamTask* pTask, int64_t ver);
int32_t streamDispatchRecoverFinishReq(SStreamTask* pTask);
// agg level
int32_t streamAggRecoverPrepare(SStreamTask* pTask);
// int32_t streamAggChildrenRecoverFinish(SStreamTask* pTask);
int32_t streamProcessRecoverFinishReq(SStreamTask* pTask, int32_t childId);
// expand and deploy
typedef int32_t FTaskExpand(void* ahandle, SStreamTask* pTask, int64_t ver);
// meta
typedef struct SStreamMeta {
char* path;
TDB* db;
@ -533,12 +573,13 @@ typedef struct SStreamMeta {
void* ahandle;
TXN txn;
FTaskExpand* expandFunc;
int32_t vgId;
} SStreamMeta;
SStreamMeta* streamMetaOpen(const char* path, void* ahandle, FTaskExpand expandFunc);
SStreamMeta* streamMetaOpen(const char* path, void* ahandle, FTaskExpand expandFunc, int32_t vgId);
void streamMetaClose(SStreamMeta* streamMeta);
// int32_t streamMetaAddTask(SStreamMeta* pMeta, SStreamTask* pTask);
int32_t streamMetaAddTask(SStreamMeta* pMeta, int64_t ver, SStreamTask* pTask);
int32_t streamMetaAddSerializedTask(SStreamMeta* pMeta, int64_t startVer, char* msg, int32_t msgLen);
int32_t streamMetaRemoveTask(SStreamMeta* pMeta, int32_t taskId);
SStreamTask* streamMetaGetTask(SStreamMeta* pMeta, int32_t taskId);

View File

@ -33,16 +33,16 @@ extern "C" {
#define wTrace(...) { if (wDebugFlag & DEBUG_TRACE) { taosPrintLog("WAL ", DEBUG_TRACE, wDebugFlag, __VA_ARGS__); }}
// clang-format on
#define WAL_PROTO_VER 0
#define WAL_NOSUFFIX_LEN 20
#define WAL_SUFFIX_AT (WAL_NOSUFFIX_LEN + 1)
#define WAL_LOG_SUFFIX "log"
#define WAL_INDEX_SUFFIX "idx"
#define WAL_REFRESH_MS 1000
#define WAL_PATH_LEN (TSDB_FILENAME_LEN + 12)
#define WAL_FILE_LEN (WAL_PATH_LEN + 32)
#define WAL_MAGIC 0xFAFBFCFDF4F3F2F1ULL
#define WAL_SCAN_BUF_SIZE (1024 * 1024 * 3)
#define WAL_PROTO_VER 0
#define WAL_NOSUFFIX_LEN 20
#define WAL_SUFFIX_AT (WAL_NOSUFFIX_LEN + 1)
#define WAL_LOG_SUFFIX "log"
#define WAL_INDEX_SUFFIX "idx"
#define WAL_REFRESH_MS 1000
#define WAL_PATH_LEN (TSDB_FILENAME_LEN + 12)
#define WAL_FILE_LEN (WAL_PATH_LEN + 32)
#define WAL_MAGIC 0xFAFBFCFDF4F3F2F1ULL
#define WAL_SCAN_BUF_SIZE (1024 * 1024 * 3)
#define WAL_RECOV_SIZE_LIMIT (100 * WAL_SCAN_BUF_SIZE)
typedef enum {
@ -204,7 +204,6 @@ SWalRef *walRefCommittedVer(SWal *);
SWalRef *walOpenRef(SWal *);
void walCloseRef(SWal *pWal, int64_t refId);
int32_t walRefVer(SWalRef *, int64_t ver);
int32_t walPreRefVer(SWalRef *pRef, int64_t ver);
void walUnrefVer(SWalRef *);
// helper function for raft

View File

@ -38,7 +38,6 @@ extern "C" {
#define TARRAY_MIN_SIZE 8
#define TARRAY_GET_ELEM(array, index) ((void*)((char*)((array)->pData) + (index) * (array)->elemSize))
#define TARRAY_ELEM_IDX(array, ele) (POINTER_DISTANCE(ele, (array)->pData) / (array)->elemSize)
#define TARRAY_GET_START(array) ((array)->pData)
typedef struct SArray {
size_t size;
@ -71,14 +70,6 @@ int32_t taosArrayEnsureCap(SArray* pArray, size_t tsize);
*/
void* taosArrayAddBatch(SArray* pArray, const void* pData, int32_t nEles);
/**
*
* @param pArray
* @param pData position array list
* @param numOfElems the number of removed position
*/
void taosArrayRemoveBatch(SArray* pArray, const int32_t* pData, int32_t numOfElems);
/**
*
* @param pArray
@ -266,13 +257,6 @@ void* taosArraySearch(const SArray* pArray, const void* key, __compar_fn_t compa
*/
int32_t taosArraySearchIdx(const SArray* pArray, const void* key, __compar_fn_t comparFn, int32_t flags);
/**
* search the array
* @param pArray
* @param key
*/
char* taosArraySearchString(const SArray* pArray, const char* key, __compar_fn_t comparFn, int32_t flags);
/**
* sort the pointer data in the array
* @param pArray
@ -286,8 +270,6 @@ void taosArraySortPWithExt(SArray* pArray, __ext_compar_fn_t fn, const void* par
int32_t taosEncodeArray(void** buf, const SArray* pArray, FEncode encode);
void* taosDecodeArray(const void* buf, SArray** pArray, FDecode decode, int32_t dataSz);
char* taosShowStrArray(const SArray* pArray);
/**
* swap array
* @param a
@ -295,6 +277,7 @@ char* taosShowStrArray(const SArray* pArray);
* @return
*/
void taosArraySwap(SArray* a, SArray* b);
#ifdef __cplusplus
}
#endif

View File

@ -1732,6 +1732,7 @@ TAOS_RES* tmq_consumer_poll(tmq_t* tmq, int64_t timeout) {
// in no topic status, delayed task also need to be processed
if (atomic_load_8(&tmq->status) == TMQ_CONSUMER_STATUS__INIT) {
tscDebug("consumer:%" PRId64 ", poll return since consumer status is init", tmq->consumerId);
return NULL;
}

View File

@ -1707,8 +1707,6 @@ void* tDecodeDataBlock(const void* buf, SSDataBlock* pBlock) {
if (IS_VAR_DATA_TYPE(data.info.type)) {
buf = taosDecodeBinary(buf, (void**)&data.varmeta.offset, pBlock->info.rows * sizeof(int32_t));
data.varmeta.length = pBlock->info.rows * sizeof(int32_t);
data.varmeta.allocLen = data.varmeta.length;
} else {
buf = taosDecodeBinary(buf, (void**)&data.nullbitmap, BitmapLen(pBlock->info.rows));
}
@ -1716,6 +1714,10 @@ void* tDecodeDataBlock(const void* buf, SSDataBlock* pBlock) {
int32_t len = 0;
buf = taosDecodeFixedI32(buf, &len);
buf = taosDecodeBinary(buf, (void**)&data.pData, len);
if (IS_VAR_DATA_TYPE(data.info.type)) {
data.varmeta.length = len;
data.varmeta.allocLen = len;
}
taosArrayPush(pBlock->pDataBlock, &data);
}
return (void*)buf;

View File

@ -4853,6 +4853,7 @@ int32_t tSerializeSCMCreateStreamReq(void *buf, int32_t bufLen, const SCMCreateS
if (tEncodeCStr(&encoder, pReq->sourceDB) < 0) return -1;
if (tEncodeCStr(&encoder, pReq->targetStbFullName) < 0) return -1;
if (tEncodeI8(&encoder, pReq->igExists) < 0) return -1;
if (tEncodeI8(&encoder, pReq->fillHistory) < 0) return -1;
if (tEncodeI32(&encoder, sqlLen) < 0) return -1;
if (tEncodeI32(&encoder, astLen) < 0) return -1;
if (tEncodeI8(&encoder, pReq->triggerType) < 0) return -1;
@ -4889,6 +4890,7 @@ int32_t tDeserializeSCMCreateStreamReq(void *buf, int32_t bufLen, SCMCreateStrea
if (tDecodeCStrTo(&decoder, pReq->sourceDB) < 0) return -1;
if (tDecodeCStrTo(&decoder, pReq->targetStbFullName) < 0) return -1;
if (tDecodeI8(&decoder, &pReq->igExists) < 0) return -1;
if (tDecodeI8(&decoder, &pReq->fillHistory) < 0) return -1;
if (tDecodeI32(&decoder, &sqlLen) < 0) return -1;
if (tDecodeI32(&decoder, &astLen) < 0) return -1;
if (tDecodeI8(&decoder, &pReq->triggerType) < 0) return -1;

View File

@ -167,7 +167,8 @@ void *tsdbGetIdx(SMeta *pMeta);
void *tsdbGetIvtIdx(SMeta *pMeta);
uint64_t getReaderMaxVersion(STsdbReader *pReader);
int32_t tsdbCacherowsReaderOpen(void *pVnode, int32_t type, SArray *pTableIdList, int32_t numOfCols, void **pReader);
int32_t tsdbCacherowsReaderOpen(void *pVnode, int32_t type, SArray *pTableIdList, int32_t numOfCols, uint64_t suid,
void **pReader);
int32_t tsdbRetrieveCacheRows(void *pReader, SSDataBlock *pResBlock, const int32_t *slotIds, SArray *pTableUids);
void *tsdbCacherowsReaderClose(void *pReader);
int32_t tsdbGetTableSchema(SVnode *pVnode, int64_t uid, STSchema **pSchema, int64_t *suid);

View File

@ -181,15 +181,15 @@ int32_t tqOffsetDelete(STqOffsetStore* pStore, const char* subscribeKey)
int32_t tqOffsetCommitFile(STqOffsetStore* pStore);
// tqSink
void tqSinkToTableMerge(SStreamTask* pTask, void* vnode, int64_t ver, void* data);
// void tqSinkToTableMerge(SStreamTask* pTask, void* vnode, int64_t ver, void* data);
void tqSinkToTablePipeline(SStreamTask* pTask, void* vnode, int64_t ver, void* data);
// tqOffset
char* tqOffsetBuildFName(const char* path, int32_t ver);
char* tqOffsetBuildFName(const char* path, int32_t fVer);
int32_t tqOffsetRestoreFromFile(STqOffsetStore* pStore, const char* fname);
// tqStream
int32_t tqExpandTask(STQ* pTq, SStreamTask* pTask);
int32_t tqExpandTask(STQ* pTq, SStreamTask* pTask, int64_t ver);
#ifdef __cplusplus
}

View File

@ -298,29 +298,6 @@ int32_t tsdbMerge(STsdb *pTsdb);
#define TSDB_CACHE_LAST_ROW(c) (((c).cacheLast & 1) > 0)
#define TSDB_CACHE_LAST(c) (((c).cacheLast & 2) > 0)
// tsdbCache ==============================================================================================
typedef struct {
TSKEY ts;
SColVal colVal;
} SLastCol;
int32_t tsdbOpenCache(STsdb *pTsdb);
void tsdbCloseCache(STsdb *pTsdb);
int32_t tsdbCacheInsertLast(SLRUCache *pCache, tb_uid_t uid, STSRow *row, STsdb *pTsdb);
int32_t tsdbCacheInsertLastrow(SLRUCache *pCache, STsdb *pTsdb, tb_uid_t uid, STSRow *row, bool dup);
int32_t tsdbCacheGetLastH(SLRUCache *pCache, tb_uid_t uid, STsdb *pTsdb, LRUHandle **h);
int32_t tsdbCacheGetLastrowH(SLRUCache *pCache, tb_uid_t uid, STsdb *pTsdb, LRUHandle **h);
int32_t tsdbCacheRelease(SLRUCache *pCache, LRUHandle *h);
int32_t tsdbCacheDeleteLastrow(SLRUCache *pCache, tb_uid_t uid, TSKEY eKey);
int32_t tsdbCacheDeleteLast(SLRUCache *pCache, tb_uid_t uid, TSKEY eKey);
int32_t tsdbCacheDelete(SLRUCache *pCache, tb_uid_t uid, TSKEY eKey);
void tsdbCacheSetCapacity(SVnode *pVnode, size_t capacity);
size_t tsdbCacheGetCapacity(SVnode *pVnode);
int32_t tsdbCacheLastArray2Row(SArray *pLastArray, STSRow **ppRow, STSchema *pSchema);
// tsdbDiskData ==============================================================================================
int32_t tDiskDataBuilderCreate(SDiskDataBuilder **ppBuilder);
void *tDiskDataBuilderDestroy(SDiskDataBuilder *pBuilder);
@ -729,6 +706,45 @@ void resetLastBlockLoadInfo(SSttBlockLoadInfo *pLoadInfo);
void getLastBlockLoadInfo(SSttBlockLoadInfo *pLoadInfo, int64_t *blocks, double *el);
void *destroyLastBlockLoadInfo(SSttBlockLoadInfo *pLoadInfo);
// tsdbCache ==============================================================================================
typedef struct SCacheRowsReader {
SVnode *pVnode;
STSchema *pSchema;
uint64_t uid;
uint64_t suid;
char **transferBuf; // todo remove it soon
int32_t numOfCols;
int32_t type;
int32_t tableIndex; // currently returned result tables
SArray *pTableList; // table id list
SSttBlockLoadInfo *pLoadInfo;
STsdbReadSnap *pReadSnap;
SDataFReader *pDataFReader;
SDataFReader *pDataFReaderLast;
} SCacheRowsReader;
typedef struct {
TSKEY ts;
SColVal colVal;
} SLastCol;
int32_t tsdbOpenCache(STsdb *pTsdb);
void tsdbCloseCache(STsdb *pTsdb);
int32_t tsdbCacheInsertLast(SLRUCache *pCache, tb_uid_t uid, STSRow *row, STsdb *pTsdb);
int32_t tsdbCacheInsertLastrow(SLRUCache *pCache, STsdb *pTsdb, tb_uid_t uid, STSRow *row, bool dup);
int32_t tsdbCacheGetLastH(SLRUCache *pCache, tb_uid_t uid, SCacheRowsReader *pr, LRUHandle **h);
int32_t tsdbCacheGetLastrowH(SLRUCache *pCache, tb_uid_t uid, SCacheRowsReader *pr, LRUHandle **h);
int32_t tsdbCacheRelease(SLRUCache *pCache, LRUHandle *h);
int32_t tsdbCacheDeleteLastrow(SLRUCache *pCache, tb_uid_t uid, TSKEY eKey);
int32_t tsdbCacheDeleteLast(SLRUCache *pCache, tb_uid_t uid, TSKEY eKey);
int32_t tsdbCacheDelete(SLRUCache *pCache, tb_uid_t uid, TSKEY eKey);
void tsdbCacheSetCapacity(SVnode *pVnode, size_t capacity);
size_t tsdbCacheGetCapacity(SVnode *pVnode);
int32_t tsdbCacheLastArray2Row(SArray *pLastArray, STSRow **ppRow, STSchema *pSchema);
// ========== inline functions ==========
static FORCE_INLINE int32_t tsdbKeyCmprFn(const void *p1, const void *p2) {
TSDBKEY *pKey1 = (TSDBKEY *)p1;

View File

@ -98,7 +98,7 @@ STQ* tqOpen(const char* path, SVnode* pVnode) {
ASSERT(0);
}
pTq->pStreamMeta = streamMetaOpen(path, pTq, (FTaskExpand*)tqExpandTask);
pTq->pStreamMeta = streamMetaOpen(path, pTq, (FTaskExpand*)tqExpandTask, pTq->pVnode->config.vgId);
if (pTq->pStreamMeta == NULL) {
ASSERT(0);
}
@ -872,7 +872,7 @@ int32_t tqProcessVgChangeReq(STQ* pTq, int64_t version, char* msg, int32_t msgLe
return 0;
}
int32_t tqExpandTask(STQ* pTq, SStreamTask* pTask) {
int32_t tqExpandTask(STQ* pTq, SStreamTask* pTask, int64_t ver) {
if (pTask->taskLevel == TASK_LEVEL__AGG) {
ASSERT(taosArrayGetSize(pTask->childEpInfo) != 0);
}
@ -891,6 +891,8 @@ int32_t tqExpandTask(STQ* pTq, SStreamTask* pTask) {
pTask->pMsgCb = &pTq->pVnode->msgCb;
pTask->startVer = ver;
// expand executor
if (pTask->taskLevel == TASK_LEVEL__SOURCE) {
pTask->pState = streamStateOpen(pTq->pStreamMeta->path, pTask, false, -1, -1);
@ -906,6 +908,10 @@ int32_t tqExpandTask(STQ* pTq, SStreamTask* pTask) {
};
pTask->exec.executor = qCreateStreamExecTaskInfo(pTask->exec.qmsg, &handle);
ASSERT(pTask->exec.executor);
if (pTask->fillHistory) {
pTask->taskStatus = TASK_STATUS__RECOVER_PREPARE;
}
} else if (pTask->taskLevel == TASK_LEVEL__AGG) {
pTask->pState = streamStateOpen(pTq->pStreamMeta->path, pTask, false, -1, -1);
if (pTask->pState == NULL) {
@ -945,8 +951,163 @@ int32_t tqExpandTask(STQ* pTq, SStreamTask* pTask) {
}
int32_t tqProcessTaskDeployReq(STQ* pTq, int64_t version, char* msg, int32_t msgLen) {
//
return streamMetaAddSerializedTask(pTq->pStreamMeta, version, msg, msgLen);
int32_t code;
#if 0
code = streamMetaAddSerializedTask(pTq->pStreamMeta, version, msg, msgLen);
if (code < 0) return code;
#endif
// 1.deserialize msg and build task
SStreamTask* pTask = taosMemoryCalloc(1, sizeof(SStreamTask));
if (pTask == NULL) {
return -1;
}
SDecoder decoder;
tDecoderInit(&decoder, (uint8_t*)msg, msgLen);
code = tDecodeSStreamTask(&decoder, pTask);
if (code < 0) {
tDecoderClear(&decoder);
taosMemoryFree(pTask);
return -1;
}
tDecoderClear(&decoder);
// 2.save task
code = streamMetaAddTask(pTq->pStreamMeta, version, pTask);
if (code < 0) {
return -1;
}
// 3.go through recover steps to fill history
if (pTask->fillHistory) {
streamSetParamForRecover(pTask);
if (pTask->taskLevel == TASK_LEVEL__SOURCE) {
streamSourceRecoverPrepareStep1(pTask, version);
SStreamRecoverStep1Req req;
streamBuildSourceRecover1Req(pTask, &req);
void* serialziedReq = (void*)&req;
int32_t len = sizeof(SStreamRecoverStep1Req);
SRpcMsg rpcMsg = {
.contLen = len,
.pCont = serialziedReq,
.msgType = TDMT_VND_STREAM_RECOVER_STEP1,
};
tmsgPutToQueue(&pTq->pVnode->msgCb, STREAM_QUEUE, &rpcMsg);
} else if (pTask->taskLevel == TASK_LEVEL__AGG) {
streamAggRecoverPrepare(pTask);
} else if (pTask->taskLevel == TASK_LEVEL__SINK) {
// do nothing
}
}
return 0;
}
int32_t tqProcessTaskRecover1Req(STQ* pTq, char* msg, int32_t msgLen) {
int32_t code;
SStreamRecoverStep1Req* pReq = (SStreamRecoverStep1Req*)msg;
SStreamTask* pTask = streamMetaGetTask(pTq->pStreamMeta, pReq->taskId);
if (pTask == NULL) {
return -1;
}
// check param
int64_t fillVer1 = pTask->startVer;
if (fillVer1 <= 0) {
ASSERT(0);
return -1;
}
// do recovery step 1
streamSourceRecoverScanStep1(pTask);
// build msg to launch next step
SStreamRecoverStep2Req req;
code = streamBuildSourceRecover2Req(pTask, &req);
if (code < 0) {
return -1;
}
// serialize msg
int32_t len = sizeof(SStreamRecoverStep2Req);
void* serializedReq = (void*)&req;
// dispatch msg
SRpcMsg rpcMsg = {
.code = 0,
.contLen = len,
.msgType = TDMT_VND_STREAM_RECOVER_STEP2,
.pCont = (void*)serializedReq,
};
tmsgPutToQueue(&pTq->pVnode->msgCb, WRITE_QUEUE, &rpcMsg);
return 0;
}
int32_t tqProcessTaskRecover2Req(STQ* pTq, int64_t version, char* msg, int32_t msgLen) {
int32_t code;
SStreamRecoverStep2Req* pReq = (SStreamRecoverStep2Req*)msg;
SStreamTask* pTask = streamMetaGetTask(pTq->pStreamMeta, pReq->taskId);
if (pTask == NULL) {
return -1;
}
// do recovery step 2
code = streamSourceRecoverScanStep2(pTask, version);
if (code < 0) {
return -1;
}
// restore param
code = streamRestoreParam(pTask);
if (code < 0) {
return -1;
}
// set status normal
code = streamSetStatusNormal(pTask);
if (code < 0) {
return -1;
}
// dispatch recover finish req to all related downstream task
code = streamDispatchRecoverFinishReq(pTask);
if (code < 0) {
return -1;
}
return 0;
}
int32_t tqProcessTaskRecoverFinishReq(STQ* pTq, char* msg, int32_t msgLen) {
int32_t code;
// deserialize
int32_t len;
SStreamRecoverFinishReq req;
SDecoder decoder;
tDecoderInit(&decoder, msg, sizeof(SStreamRecoverFinishReq));
tDecodeSStreamRecoverFinishReq(&decoder, &req);
tDecoderClear(&decoder);
// find task
SStreamTask* pTask = streamMetaGetTask(pTq->pStreamMeta, req.taskId);
if (pTask == NULL) {
return -1;
}
// do process request
if (streamProcessRecoverFinishReq(pTask, req.childId) < 0) {
return -1;
}
return 0;
}
int32_t tqProcessDelReq(STQ* pTq, void* pReq, int32_t len, int64_t ver) {
@ -1081,6 +1242,7 @@ int32_t tqProcessSubmitReq(STQ* pTq, SSubmitReq* pReq, int64_t ver) {
if (pIter == NULL) break;
SStreamTask* pTask = *(SStreamTask**)pIter;
if (pTask->taskLevel != TASK_LEVEL__SOURCE) continue;
if (pTask->taskStatus == TASK_STATUS__RECOVER_PREPARE || pTask->taskStatus == TASK_STATUS__RECOVER1) continue;
qDebug("data submit enqueue stream task: %d, ver: %" PRId64, pTask->taskId, ver);

View File

@ -85,11 +85,11 @@ int32_t tqScanData(STQ* pTq, const STqHandle* pHandle, SMqDataRsp* pRsp, STqOffs
while (1) {
SSDataBlock* pDataBlock = NULL;
uint64_t ts = 0;
tqDebug("tmq task start to execute");
tqDebug("vgId:%d, tmq task start to execute", pTq->pVnode->config.vgId);
if (qExecTask(task, &pDataBlock, &ts) < 0) {
ASSERT(0);
}
tqDebug("tmq task executed, get %p", pDataBlock);
tqDebug("vgId:%d, tmq task executed, get %p", pTq->pVnode->config.vgId, pDataBlock);
if (pDataBlock == NULL) {
break;

View File

@ -22,10 +22,10 @@ struct STqOffsetStore {
SHashObj* pHash; // SHashObj<subscribeKey, offset>
};
char* tqOffsetBuildFName(const char* path, int32_t ver) {
char* tqOffsetBuildFName(const char* path, int32_t fVer) {
int32_t len = strlen(path);
char* fname = taosMemoryCalloc(1, len + 40);
snprintf(fname, len + 40, "%s/offset-ver%d", path, ver);
snprintf(fname, len + 40, "%s/offset-ver%d", path, fVer);
return fname;
}

View File

@ -525,7 +525,7 @@ int tqReaderSetTbUidList(STqReader* pReader, const SArray* tbUidList) {
if (pReader->tbIdHash) {
taosHashClear(pReader->tbIdHash);
} else {
pReader->tbIdHash = taosHashInit(64, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), true, HASH_NO_LOCK);
pReader->tbIdHash = taosHashInit(64, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), true, HASH_ENTRY_LOCK);
}
if (pReader->tbIdHash == NULL) {
@ -543,7 +543,7 @@ int tqReaderSetTbUidList(STqReader* pReader, const SArray* tbUidList) {
int tqReaderAddTbUidList(STqReader* pReader, const SArray* tbUidList) {
if (pReader->tbIdHash == NULL) {
pReader->tbIdHash = taosHashInit(64, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), true, HASH_NO_LOCK);
pReader->tbIdHash = taosHashInit(64, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), true, HASH_ENTRY_LOCK);
if (pReader->tbIdHash == NULL) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
return -1;

View File

@ -530,6 +530,7 @@ void tqSinkToTablePipeline(SStreamTask* pTask, void* vnode, int64_t ver, void* d
taosArrayDestroy(tagArray);
}
#if 0
void tqSinkToTableMerge(SStreamTask* pTask, void* vnode, int64_t ver, void* data) {
const SArray* pRes = (const SArray*)data;
SVnode* pVnode = (SVnode*)vnode;
@ -585,3 +586,4 @@ void tqSinkToTableMerge(SStreamTask* pTask, void* vnode, int64_t ver, void* data
tqDebug("failed to put into write-queue since %s", terrstr());
}
}
#endif

View File

@ -26,7 +26,7 @@ int32_t tsdbOpenCache(STsdb *pTsdb) {
goto _err;
}
taosLRUCacheSetStrictCapacity(pCache, true);
taosLRUCacheSetStrictCapacity(pCache, false);
taosThreadMutexInit(&pTsdb->lruMutex, NULL);
@ -230,7 +230,21 @@ int32_t tsdbCacheInsertLastrow(SLRUCache *pCache, STsdb *pTsdb, tb_uid_t uid, ST
break;
}
} else {
taosArraySet(pLast, iCol, &(SLastCol){.ts = keyTs, .colVal = colVal});
SLastCol lastCol = {.ts = keyTs, .colVal = colVal};
if (IS_VAR_DATA_TYPE(colVal.type) && colVal.value.nData > 0) {
SLastCol *pLastCol = (SLastCol *)taosArrayGet(pLast, iCol);
taosMemoryFree(pLastCol->colVal.value.pData);
lastCol.colVal.value.pData = taosMemoryMalloc(colVal.value.nData);
if (lastCol.colVal.value.pData == NULL) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
code = TSDB_CODE_OUT_OF_MEMORY;
goto _invalidate;
}
memcpy(lastCol.colVal.value.pData, colVal.value.pData, colVal.value.nData);
}
taosArraySet(pLast, iCol, &lastCol);
}
}
}
@ -342,7 +356,21 @@ int32_t tsdbCacheInsertLast(SLRUCache *pCache, tb_uid_t uid, STSRow *row, STsdb
break;
}
} else {
taosArraySet(pLast, iCol, &(SLastCol){.ts = keyTs, .colVal = colVal});
SLastCol lastCol = {.ts = keyTs, .colVal = colVal};
if (IS_VAR_DATA_TYPE(colVal.type) && colVal.value.nData > 0) {
SLastCol *pLastCol = (SLastCol *)taosArrayGet(pLast, iCol);
taosMemoryFree(pLastCol->colVal.value.pData);
lastCol.colVal.value.pData = taosMemoryMalloc(colVal.value.nData);
if (lastCol.colVal.value.pData == NULL) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
code = TSDB_CODE_OUT_OF_MEMORY;
goto _invalidate;
}
memcpy(lastCol.colVal.value.pData, colVal.value.pData, colVal.value.nData);
}
taosArraySet(pLast, iCol, &lastCol);
}
}
}
@ -488,11 +516,12 @@ typedef struct {
int32_t nFileSet;
int32_t iFileSet;
SArray *aDFileSet;
SDataFReader *pDataFReader;
SDataFReader **pDataFReader;
TSDBROW row;
SMergeTree mergeTree;
SMergeTree *pMergeTree;
SMergeTree mergeTree;
SMergeTree *pMergeTree;
SSttBlockLoadInfo *pLoadInfo;
} SFSLastNextRowIter;
static int32_t getNextRowFromFSLast(void *iter, TSDBROW **ppRow) {
@ -519,18 +548,20 @@ static int32_t getNextRowFromFSLast(void *iter, TSDBROW **ppRow) {
return code;
}
if (state->pDataFReader != NULL) {
tsdbDataFReaderClose(&state->pDataFReader);
state->pDataFReader = NULL;
if (*state->pDataFReader == NULL || (*state->pDataFReader)->pSet->fid != pFileSet->fid) {
if (*state->pDataFReader != NULL) {
tsdbDataFReaderClose(state->pDataFReader);
resetLastBlockLoadInfo(state->pLoadInfo);
}
code = tsdbDataFReaderOpen(state->pDataFReader, state->pTsdb, pFileSet);
if (code) goto _err;
}
code = tsdbDataFReaderOpen(&state->pDataFReader, state->pTsdb, pFileSet);
if (code) goto _err;
SSttBlockLoadInfo *pLoadInfo = tCreateLastBlockLoadInfo(state->pTSchema, NULL, 0);
tMergeTreeOpen(&state->mergeTree, 1, state->pDataFReader, state->suid, state->uid,
tMergeTreeOpen(&state->mergeTree, 1, *state->pDataFReader, state->suid, state->uid,
&(STimeWindow){.skey = TSKEY_MIN, .ekey = TSKEY_MAX},
&(SVersionRange){.minVer = 0, .maxVer = UINT64_MAX}, pLoadInfo, true, NULL);
&(SVersionRange){.minVer = 0, .maxVer = UINT64_MAX}, state->pLoadInfo, false, NULL);
state->pMergeTree = &state->mergeTree;
bool hasVal = tMergeTreeNext(&state->mergeTree);
if (!hasVal) {
@ -554,10 +585,10 @@ static int32_t getNextRowFromFSLast(void *iter, TSDBROW **ppRow) {
}
_err:
if (state->pDataFReader) {
/*if (state->pDataFReader) {
tsdbDataFReaderClose(&state->pDataFReader);
state->pDataFReader = NULL;
}
}*/
if (state->pMergeTree != NULL) {
tMergeTreeClose(state->pMergeTree);
state->pMergeTree = NULL;
@ -575,12 +606,12 @@ int32_t clearNextRowFromFSLast(void *iter) {
if (!state) {
return code;
}
/*
if (state->pDataFReader) {
tsdbDataFReaderClose(&state->pDataFReader);
state->pDataFReader = NULL;
}
*/
if (state->pMergeTree != NULL) {
tMergeTreeClose(state->pMergeTree);
state->pMergeTree = NULL;
@ -597,27 +628,28 @@ typedef enum SFSNEXTROWSTATES {
} SFSNEXTROWSTATES;
typedef struct SFSNextRowIter {
SFSNEXTROWSTATES state; // [input]
STsdb *pTsdb; // [input]
SBlockIdx *pBlockIdxExp; // [input]
STSchema *pTSchema; // [input]
tb_uid_t suid;
tb_uid_t uid;
int32_t nFileSet;
int32_t iFileSet;
SArray *aDFileSet;
SDataFReader *pDataFReader;
SArray *aBlockIdx;
SBlockIdx *pBlockIdx;
SMapData blockMap;
int32_t nBlock;
int32_t iBlock;
SDataBlk block;
SBlockData blockData;
SBlockData *pBlockData;
int32_t nRow;
int32_t iRow;
TSDBROW row;
SFSNEXTROWSTATES state; // [input]
STsdb *pTsdb; // [input]
SBlockIdx *pBlockIdxExp; // [input]
STSchema *pTSchema; // [input]
tb_uid_t suid;
tb_uid_t uid;
int32_t nFileSet;
int32_t iFileSet;
SArray *aDFileSet;
SDataFReader **pDataFReader;
SArray *aBlockIdx;
SBlockIdx *pBlockIdx;
SMapData blockMap;
int32_t nBlock;
int32_t iBlock;
SDataBlk block;
SBlockData blockData;
SBlockData *pBlockData;
int32_t nRow;
int32_t iRow;
TSDBROW row;
SSttBlockLoadInfo *pLoadInfo;
} SFSNextRowIter;
static int32_t getNextRowFromFS(void *iter, TSDBROW **ppRow) {
@ -648,8 +680,16 @@ static int32_t getNextRowFromFS(void *iter, TSDBROW **ppRow) {
return code;
}
code = tsdbDataFReaderOpen(&state->pDataFReader, state->pTsdb, pFileSet);
if (code) goto _err;
if (*state->pDataFReader == NULL || (*state->pDataFReader)->pSet->fid != pFileSet->fid) {
if (*state->pDataFReader != NULL) {
tsdbDataFReaderClose(state->pDataFReader);
resetLastBlockLoadInfo(state->pLoadInfo);
}
code = tsdbDataFReaderOpen(state->pDataFReader, state->pTsdb, pFileSet);
if (code) goto _err;
}
// tMapDataReset(&state->blockIdxMap);
if (!state->aBlockIdx) {
@ -657,7 +697,7 @@ static int32_t getNextRowFromFS(void *iter, TSDBROW **ppRow) {
} else {
taosArrayClear(state->aBlockIdx);
}
code = tsdbReadBlockIdx(state->pDataFReader, state->aBlockIdx);
code = tsdbReadBlockIdx(*state->pDataFReader, state->aBlockIdx);
if (code) goto _err;
/* if (state->pBlockIdx) { */
@ -666,17 +706,20 @@ static int32_t getNextRowFromFS(void *iter, TSDBROW **ppRow) {
* &state->blockIdx);
*/
state->pBlockIdx = taosArraySearch(state->aBlockIdx, state->pBlockIdxExp, tCmprBlockIdx, TD_EQ);
if (!state->pBlockIdx) {
tsdbDataFReaderClose(&state->pDataFReader);
state->pDataFReader = NULL;
if (!state->pBlockIdx) { /*
tsdbDataFReaderClose(state->pDataFReader);
*state->pDataFReader = NULL;
resetLastBlockLoadInfo(state->pLoadInfo);*/
goto _next_fileset;
}
tMapDataReset(&state->blockMap);
/*
if (state->blockMap.pData != NULL) {
tMapDataClear(&state->blockMap);
}
code = tsdbReadDataBlk(state->pDataFReader, state->pBlockIdx, &state->blockMap);
*/
code = tsdbReadDataBlk(*state->pDataFReader, state->pBlockIdx, &state->blockMap);
if (code) goto _err;
state->nBlock = state->blockMap.nItem;
@ -703,7 +746,7 @@ static int32_t getNextRowFromFS(void *iter, TSDBROW **ppRow) {
code = tBlockDataInit(state->pBlockData, &tid, state->pTSchema, NULL, 0);
if (code) goto _err;
code = tsdbReadDataBlock(state->pDataFReader, &block, state->pBlockData);
code = tsdbReadDataBlock(*state->pDataFReader, &block, state->pBlockData);
if (code) goto _err;
state->nRow = state->blockData.nRow;
@ -719,8 +762,9 @@ static int32_t getNextRowFromFS(void *iter, TSDBROW **ppRow) {
if (--state->iRow < 0) {
state->state = SFSNEXTROW_BLOCKDATA;
if (--state->iBlock < 0) {
tsdbDataFReaderClose(&state->pDataFReader);
state->pDataFReader = NULL;
tsdbDataFReaderClose(state->pDataFReader);
*state->pDataFReader = NULL;
resetLastBlockLoadInfo(state->pLoadInfo);
if (state->aBlockIdx) {
taosArrayDestroy(state->aBlockIdx);
@ -739,16 +783,17 @@ static int32_t getNextRowFromFS(void *iter, TSDBROW **ppRow) {
}
_err:
if (state->pDataFReader) {
tsdbDataFReaderClose(&state->pDataFReader);
state->pDataFReader = NULL;
}
/*
if (*state->pDataFReader) {
tsdbDataFReaderClose(state->pDataFReader);
*state->pDataFReader = NULL;
resetLastBlockLoadInfo(state->pLoadInfo);
}*/
if (state->aBlockIdx) {
taosArrayDestroy(state->aBlockIdx);
state->aBlockIdx = NULL;
}
if (state->pBlockData) {
// tBlockDataDestroy(&state->blockData, 1);
tBlockDataDestroy(state->pBlockData, 1);
state->pBlockData = NULL;
}
@ -765,11 +810,11 @@ int32_t clearNextRowFromFS(void *iter) {
if (!state) {
return code;
}
/*
if (state->pDataFReader) {
tsdbDataFReaderClose(&state->pDataFReader);
state->pDataFReader = NULL;
}
}*/
if (state->aBlockIdx) {
taosArrayDestroy(state->aBlockIdx);
state->aBlockIdx = NULL;
@ -930,25 +975,22 @@ typedef struct {
TSDBROW memRow, imemRow, fsLastRow, fsRow;
TsdbNextRowState input[4];
STsdbReadSnap *pReadSnap;
STsdb *pTsdb;
} CacheNextRowIter;
static int32_t nextRowIterOpen(CacheNextRowIter *pIter, tb_uid_t uid, STsdb *pTsdb, STSchema *pTSchema) {
static int32_t nextRowIterOpen(CacheNextRowIter *pIter, tb_uid_t uid, STsdb *pTsdb, STSchema *pTSchema, tb_uid_t suid,
SSttBlockLoadInfo *pLoadInfo, STsdbReadSnap *pReadSnap, SDataFReader **pDataFReader,
SDataFReader **pDataFReaderLast) {
int code = 0;
tb_uid_t suid = getTableSuidByUid(uid, pTsdb);
tsdbTakeReadSnap(pTsdb, &pIter->pReadSnap, NULL);
STbData *pMem = NULL;
if (pIter->pReadSnap->pMem) {
pMem = tsdbGetTbDataFromMemTable(pIter->pReadSnap->pMem, suid, uid);
if (pReadSnap->pMem) {
pMem = tsdbGetTbDataFromMemTable(pReadSnap->pMem, suid, uid);
}
STbData *pIMem = NULL;
if (pIter->pReadSnap->pIMem) {
pIMem = tsdbGetTbDataFromMemTable(pIter->pReadSnap->pIMem, suid, uid);
if (pReadSnap->pIMem) {
pIMem = tsdbGetTbDataFromMemTable(pReadSnap->pIMem, suid, uid);
}
pIter->pTsdb = pTsdb;
@ -957,7 +999,7 @@ static int32_t nextRowIterOpen(CacheNextRowIter *pIter, tb_uid_t uid, STsdb *pTs
SDelIdx delIdx;
SDelFile *pDelFile = pIter->pReadSnap->fs.pDelFile;
SDelFile *pDelFile = pReadSnap->fs.pDelFile;
if (pDelFile) {
SDelFReader *pDelFReader;
@ -988,18 +1030,22 @@ static int32_t nextRowIterOpen(CacheNextRowIter *pIter, tb_uid_t uid, STsdb *pTs
pIter->fsLastState.state = (SFSLASTNEXTROWSTATES)SFSNEXTROW_FS;
pIter->fsLastState.pTsdb = pTsdb;
pIter->fsLastState.aDFileSet = pIter->pReadSnap->fs.aDFileSet;
pIter->fsLastState.aDFileSet = pReadSnap->fs.aDFileSet;
pIter->fsLastState.pTSchema = pTSchema;
pIter->fsLastState.suid = suid;
pIter->fsLastState.uid = uid;
pIter->fsLastState.pLoadInfo = pLoadInfo;
pIter->fsLastState.pDataFReader = pDataFReaderLast;
pIter->fsState.state = SFSNEXTROW_FS;
pIter->fsState.pTsdb = pTsdb;
pIter->fsState.aDFileSet = pIter->pReadSnap->fs.aDFileSet;
pIter->fsState.aDFileSet = pReadSnap->fs.aDFileSet;
pIter->fsState.pBlockIdxExp = &pIter->idx;
pIter->fsState.pTSchema = pTSchema;
pIter->fsState.suid = suid;
pIter->fsState.uid = uid;
pIter->fsState.pLoadInfo = pLoadInfo;
pIter->fsState.pDataFReader = pDataFReader;
pIter->input[0] = (TsdbNextRowState){&pIter->memRow, true, false, &pIter->memState, getNextRowFromMem, NULL};
pIter->input[1] = (TsdbNextRowState){&pIter->imemRow, true, false, &pIter->imemState, getNextRowFromMem, NULL};
@ -1040,8 +1086,6 @@ static int32_t nextRowIterClose(CacheNextRowIter *pIter) {
taosArrayDestroy(pIter->pSkyline);
}
tsdbUntakeReadSnap(pIter->pTsdb, pIter->pReadSnap, NULL);
_err:
return code;
}
@ -1119,10 +1163,10 @@ _err:
return code;
}
static int32_t mergeLastRow(tb_uid_t uid, STsdb *pTsdb, bool *dup, SArray **ppColArray) {
static int32_t mergeLastRow(tb_uid_t uid, STsdb *pTsdb, bool *dup, SArray **ppColArray, SCacheRowsReader *pr) {
int32_t code = 0;
STSchema *pTSchema = metaGetTbTSchema(pTsdb->pVnode->pMeta, uid, -1, 1);
STSchema *pTSchema = pr->pSchema; // metaGetTbTSchema(pTsdb->pVnode->pMeta, uid, -1, 1);
int16_t nCol = pTSchema->numOfCols;
int16_t iCol = 0;
int16_t noneCol = 0;
@ -1133,7 +1177,8 @@ static int32_t mergeLastRow(tb_uid_t uid, STsdb *pTsdb, bool *dup, SArray **ppCo
TSKEY lastRowTs = TSKEY_MAX;
CacheNextRowIter iter = {0};
nextRowIterOpen(&iter, uid, pTsdb, pTSchema);
nextRowIterOpen(&iter, uid, pTsdb, pTSchema, pr->suid, pr->pLoadInfo, pr->pReadSnap, &pr->pDataFReader,
&pr->pDataFReaderLast);
do {
TSDBROW *pRow = NULL;
@ -1233,20 +1278,20 @@ static int32_t mergeLastRow(tb_uid_t uid, STsdb *pTsdb, bool *dup, SArray **ppCo
}
nextRowIterClose(&iter);
taosMemoryFreeClear(pTSchema);
// taosMemoryFreeClear(pTSchema);
return code;
_err:
nextRowIterClose(&iter);
taosArrayDestroy(pColArray);
taosMemoryFreeClear(pTSchema);
// taosMemoryFreeClear(pTSchema);
return code;
}
static int32_t mergeLast(tb_uid_t uid, STsdb *pTsdb, SArray **ppLastArray) {
static int32_t mergeLast(tb_uid_t uid, STsdb *pTsdb, SArray **ppLastArray, SCacheRowsReader *pr) {
int32_t code = 0;
STSchema *pTSchema = metaGetTbTSchema(pTsdb->pVnode->pMeta, uid, -1, 1);
STSchema *pTSchema = pr->pSchema; // metaGetTbTSchema(pTsdb->pVnode->pMeta, uid, -1, 1);
int16_t nCol = pTSchema->numOfCols;
int16_t iCol = 0;
int16_t noneCol = 0;
@ -1257,7 +1302,8 @@ static int32_t mergeLast(tb_uid_t uid, STsdb *pTsdb, SArray **ppLastArray) {
TSKEY lastRowTs = TSKEY_MAX;
CacheNextRowIter iter = {0};
nextRowIterOpen(&iter, uid, pTsdb, pTSchema);
nextRowIterOpen(&iter, uid, pTsdb, pTSchema, pr->suid, pr->pLoadInfo, pr->pReadSnap, &pr->pDataFReader,
&pr->pDataFReaderLast);
do {
TSDBROW *pRow = NULL;
@ -1350,18 +1396,18 @@ static int32_t mergeLast(tb_uid_t uid, STsdb *pTsdb, SArray **ppLastArray) {
}
nextRowIterClose(&iter);
taosMemoryFreeClear(pTSchema);
// taosMemoryFreeClear(pTSchema);
return code;
_err:
nextRowIterClose(&iter);
taosMemoryFreeClear(pTSchema);
// taosMemoryFreeClear(pTSchema);
*ppLastArray = NULL;
taosArrayDestroy(pColArray);
return code;
}
int32_t tsdbCacheGetLastrowH(SLRUCache *pCache, tb_uid_t uid, STsdb *pTsdb, LRUHandle **handle) {
int32_t tsdbCacheGetLastrowH(SLRUCache *pCache, tb_uid_t uid, SCacheRowsReader *pr, LRUHandle **handle) {
int32_t code = 0;
char key[32] = {0};
int keyLen = 0;
@ -1370,13 +1416,14 @@ int32_t tsdbCacheGetLastrowH(SLRUCache *pCache, tb_uid_t uid, STsdb *pTsdb, LRUH
getTableCacheKey(uid, 0, key, &keyLen);
LRUHandle *h = taosLRUCacheLookup(pCache, key, keyLen);
if (!h) {
STsdb *pTsdb = pr->pVnode->pTsdb;
taosThreadMutexLock(&pTsdb->lruMutex);
h = taosLRUCacheLookup(pCache, key, keyLen);
if (!h) {
SArray *pArray = NULL;
bool dup = false; // which is always false for now
code = mergeLastRow(uid, pTsdb, &dup, &pArray);
code = mergeLastRow(uid, pTsdb, &dup, &pArray, pr);
// if table's empty or error, return code of -1
if (code < 0 || pArray == NULL) {
if (!dup && pArray) {
@ -1392,17 +1439,17 @@ int32_t tsdbCacheGetLastrowH(SLRUCache *pCache, tb_uid_t uid, STsdb *pTsdb, LRUH
size_t charge = pArray->capacity * pArray->elemSize + sizeof(*pArray);
_taos_lru_deleter_t deleter = deleteTableCacheLast;
LRUStatus status = taosLRUCacheInsert(pCache, key, keyLen, pArray, charge, deleter, NULL, TAOS_LRU_PRIORITY_LOW);
LRUStatus status = taosLRUCacheInsert(pCache, key, keyLen, pArray, charge, deleter, &h, TAOS_LRU_PRIORITY_LOW);
if (status != TAOS_LRU_STATUS_OK) {
code = -1;
}
taosThreadMutexUnlock(&pTsdb->lruMutex);
// taosThreadMutexUnlock(&pTsdb->lruMutex);
h = taosLRUCacheLookup(pCache, key, keyLen);
} else {
taosThreadMutexUnlock(&pTsdb->lruMutex);
}
// h = taosLRUCacheLookup(pCache, key, keyLen);
} // else {
taosThreadMutexUnlock(&pTsdb->lruMutex);
//}
}
*handle = h;
@ -1434,7 +1481,7 @@ _err:
return code;
}
int32_t tsdbCacheGetLastH(SLRUCache *pCache, tb_uid_t uid, STsdb *pTsdb, LRUHandle **handle) {
int32_t tsdbCacheGetLastH(SLRUCache *pCache, tb_uid_t uid, SCacheRowsReader *pr, LRUHandle **handle) {
int32_t code = 0;
char key[32] = {0};
int keyLen = 0;
@ -1443,12 +1490,13 @@ int32_t tsdbCacheGetLastH(SLRUCache *pCache, tb_uid_t uid, STsdb *pTsdb, LRUHand
getTableCacheKey(uid, 1, key, &keyLen);
LRUHandle *h = taosLRUCacheLookup(pCache, key, keyLen);
if (!h) {
STsdb *pTsdb = pr->pVnode->pTsdb;
taosThreadMutexLock(&pTsdb->lruMutex);
h = taosLRUCacheLookup(pCache, key, keyLen);
if (!h) {
SArray *pLastArray = NULL;
code = mergeLast(uid, pTsdb, &pLastArray);
code = mergeLast(uid, pTsdb, &pLastArray, pr);
// if table's empty or error, return code of -1
if (code < 0 || pLastArray == NULL) {
taosThreadMutexUnlock(&pTsdb->lruMutex);
@ -1460,17 +1508,17 @@ int32_t tsdbCacheGetLastH(SLRUCache *pCache, tb_uid_t uid, STsdb *pTsdb, LRUHand
size_t charge = pLastArray->capacity * pLastArray->elemSize + sizeof(*pLastArray);
_taos_lru_deleter_t deleter = deleteTableCacheLast;
LRUStatus status =
taosLRUCacheInsert(pCache, key, keyLen, pLastArray, charge, deleter, NULL, TAOS_LRU_PRIORITY_LOW);
taosLRUCacheInsert(pCache, key, keyLen, pLastArray, charge, deleter, &h, TAOS_LRU_PRIORITY_LOW);
if (status != TAOS_LRU_STATUS_OK) {
code = -1;
}
taosThreadMutexUnlock(&pTsdb->lruMutex);
// taosThreadMutexUnlock(&pTsdb->lruMutex);
h = taosLRUCacheLookup(pCache, key, keyLen);
} else {
taosThreadMutexUnlock(&pTsdb->lruMutex);
}
// h = taosLRUCacheLookup(pCache, key, keyLen);
} // else {
taosThreadMutexUnlock(&pTsdb->lruMutex);
//}
}
*handle = h;

View File

@ -18,18 +18,7 @@
#include "tcommon.h"
#include "tsdb.h"
typedef struct SCacheRowsReader {
SVnode* pVnode;
STSchema* pSchema;
uint64_t uid;
char** transferBuf; // todo remove it soon
int32_t numOfCols;
int32_t type;
int32_t tableIndex; // currently returned result tables
SArray* pTableList; // table id list
} SCacheRowsReader;
#define HASTYPE(_type, _t) (((_type) & (_t)) == (_t))
#define HASTYPE(_type, _t) (((_type) & (_t)) == (_t))
static void saveOneRow(SArray* pRow, SSDataBlock* pBlock, SCacheRowsReader* pReader, const int32_t* slotIds,
void** pRes) {
@ -61,7 +50,7 @@ static void saveOneRow(SArray* pRow, SSDataBlock* pBlock, SCacheRowsReader* pRea
if (IS_VAR_DATA_TYPE(pColVal->colVal.type)) {
varDataSetLen(p->buf, pColVal->colVal.value.nData);
memcpy(varDataVal(p->buf), pColVal->colVal.value.pData, pColVal->colVal.value.nData);
p->bytes = pColVal->colVal.value.nData + VARSTR_HEADER_SIZE; // binary needs to plus the header size
p->bytes = pColVal->colVal.value.nData + VARSTR_HEADER_SIZE; // binary needs to plus the header size
} else {
memcpy(p->buf, &pColVal->colVal.value, pReader->pSchema->columns[slotId].bytes);
p->bytes = pReader->pSchema->columns[slotId].bytes;
@ -75,7 +64,7 @@ static void saveOneRow(SArray* pRow, SSDataBlock* pBlock, SCacheRowsReader* pRea
colDataAppend(pColInfoData, numOfRows, (const char*)pRes[i], false);
}
pBlock->info.rows += allNullRow? 0:1;
pBlock->info.rows += allNullRow ? 0 : 1;
} else {
ASSERT(HASTYPE(pReader->type, CACHESCAN_RETRIEVE_LAST_ROW));
@ -108,7 +97,8 @@ static void saveOneRow(SArray* pRow, SSDataBlock* pBlock, SCacheRowsReader* pRea
}
}
int32_t tsdbCacherowsReaderOpen(void* pVnode, int32_t type, SArray* pTableIdList, int32_t numOfCols, void** pReader) {
int32_t tsdbCacherowsReaderOpen(void* pVnode, int32_t type, SArray* pTableIdList, int32_t numOfCols, uint64_t suid,
void** pReader) {
*pReader = NULL;
SCacheRowsReader* p = taosMemoryCalloc(1, sizeof(SCacheRowsReader));
@ -119,6 +109,7 @@ int32_t tsdbCacherowsReaderOpen(void* pVnode, int32_t type, SArray* pTableIdList
p->type = type;
p->pVnode = pVnode;
p->numOfCols = numOfCols;
p->suid = suid;
if (taosArrayGetSize(pTableIdList) == 0) {
*pReader = p;
@ -145,6 +136,12 @@ int32_t tsdbCacherowsReaderOpen(void* pVnode, int32_t type, SArray* pTableIdList
}
}
p->pLoadInfo = tCreateLastBlockLoadInfo(p->pSchema, NULL, 0);
if (p->pLoadInfo == NULL) {
tsdbCacherowsReaderClose(p);
return TSDB_CODE_OUT_OF_MEMORY;
}
*pReader = p;
return TSDB_CODE_SUCCESS;
}
@ -161,6 +158,8 @@ void* tsdbCacherowsReaderClose(void* pReader) {
taosMemoryFree(p->pSchema);
}
destroyLastBlockLoadInfo(p->pLoadInfo);
taosMemoryFree(pReader);
return NULL;
}
@ -171,9 +170,9 @@ static int32_t doExtractCacheRow(SCacheRowsReader* pr, SLRUCache* lruCache, uint
*pRow = NULL;
if (HASTYPE(pr->type, CACHESCAN_RETRIEVE_LAST_ROW)) {
code = tsdbCacheGetLastrowH(lruCache, uid, pr->pVnode->pTsdb, h);
code = tsdbCacheGetLastrowH(lruCache, uid, pr, h);
} else {
code = tsdbCacheGetLastH(lruCache, uid, pr->pVnode->pTsdb, h);
code = tsdbCacheGetLastH(lruCache, uid, pr, h);
}
if (code != TSDB_CODE_SUCCESS) {
@ -189,7 +188,7 @@ static int32_t doExtractCacheRow(SCacheRowsReader* pr, SLRUCache* lruCache, uint
}
static void freeItem(void* pItem) {
SLastCol* pCol = (SLastCol*) pItem;
SLastCol* pCol = (SLastCol*)pItem;
if (IS_VAR_DATA_TYPE(pCol->colVal.type)) {
taosMemoryFree(pCol->colVal.value.pData);
}
@ -230,7 +229,7 @@ int32_t tsdbRetrieveCacheRows(void* pReader, SSDataBlock* pResBlock, const int32
for (int32_t i = 0; i < pr->pSchema->numOfCols; ++i) {
struct STColumn* pCol = &pr->pSchema->columns[i];
SLastCol p = {.ts = INT64_MIN, .colVal.type = pCol->type};
SLastCol p = {.ts = INT64_MIN, .colVal.type = pCol->type};
if (IS_VAR_DATA_TYPE(pCol->type)) {
p.colVal.value.pData = taosMemoryCalloc(pCol->bytes, sizeof(char));
@ -238,6 +237,10 @@ int32_t tsdbRetrieveCacheRows(void* pReader, SSDataBlock* pResBlock, const int32
taosArrayPush(pLastCols, &p);
}
tsdbTakeReadSnap(pr->pVnode->pTsdb, &pr->pReadSnap, "cache-l");
pr->pDataFReader = NULL;
pr->pDataFReaderLast = NULL;
// retrieve the only one last row of all tables in the uid list.
if (HASTYPE(pr->type, CACHESCAN_RETRIEVE_TYPE_SINGLE)) {
for (int32_t i = 0; i < numOfTables; ++i) {
@ -306,7 +309,7 @@ int32_t tsdbRetrieveCacheRows(void* pReader, SSDataBlock* pResBlock, const int32
} else if (HASTYPE(pr->type, CACHESCAN_RETRIEVE_TYPE_ALL)) {
for (int32_t i = pr->tableIndex; i < numOfTables; ++i) {
STableKeyInfo* pKeyInfo = (STableKeyInfo*) taosArrayGet(pr->pTableList, i);
STableKeyInfo* pKeyInfo = (STableKeyInfo*)taosArrayGet(pr->pTableList, i);
code = doExtractCacheRow(pr, lruCache, pKeyInfo->uid, &pRow, &h);
if (code != TSDB_CODE_SUCCESS) {
return code;
@ -331,7 +334,12 @@ int32_t tsdbRetrieveCacheRows(void* pReader, SSDataBlock* pResBlock, const int32
code = TSDB_CODE_INVALID_PARA;
}
_end:
_end:
tsdbDataFReaderClose(&pr->pDataFReaderLast);
tsdbDataFReaderClose(&pr->pDataFReader);
tsdbUntakeReadSnap(pr->pVnode->pTsdb, pr->pReadSnap, "cache-l");
for (int32_t j = 0; j < pr->numOfCols; ++j) {
taosMemoryFree(pRes[j]);
}

View File

@ -1506,7 +1506,7 @@ static int32_t tsdbCommitTableData(SCommitter *pCommitter, TABLEID id) {
TSDB_CHECK_CODE(code, lino, _exit);
}
#else
if (pCommitter->dWriter.bData.nRow >= pCommitter->maxRow) {
if (pCommitter->dWriter.bDatal.nRow >= pCommitter->maxRow) {
code = tsdbWriteSttBlock(pCommitter->dWriter.pWriter, &pCommitter->dWriter.bDatal, pCommitter->dWriter.aSttBlk,
pCommitter->cmprAlg);
TSDB_CHECK_CODE(code, lino, _exit);

View File

@ -152,8 +152,8 @@ static SBlockData *loadLastBlock(SLDataIter *pIter, const char *idStr) {
pInfo->loadBlocks += 1;
tsdbDebug("read last block, total load:%d, trigger by uid:%" PRIu64
", last file index:%d, last block index:%d, entry:%d, %p, elapsed time:%.2f ms, %s",
pInfo->loadBlocks, pIter->uid, pIter->iStt, pIter->iSttBlk, pInfo->currentLoadBlockIndex, pBlock, el,
", last file index:%d, last block index:%d, entry:%d, rows:%d, %p, elapsed time:%.2f ms, %s",
pInfo->loadBlocks, pIter->uid, pIter->iStt, pIter->iSttBlk, pInfo->currentLoadBlockIndex, pBlock->nRow, pBlock, el,
idStr);
pInfo->blockIndex[pInfo->currentLoadBlockIndex] = pIter->iSttBlk;

View File

@ -2139,12 +2139,13 @@ static int64_t getCurrentKeyInLastBlock(SLastBlockReader* pLastBlockReader) {
}
static bool hasDataInLastBlock(SLastBlockReader* pLastBlockReader) { return pLastBlockReader->mergeTree.pIter != NULL; }
bool hasDataInFileBlock(const SBlockData* pBlockData, const SFileBlockDumpInfo* pDumpInfo) {
if (pBlockData->nRow > 0) {
ASSERT(pBlockData->nRow == pDumpInfo->totalRows);
bool hasDataInFileBlock(const SBlockData* pBlockData, const SFileBlockDumpInfo* pDumpInfo) {
if (pBlockData->nRow > 0) {
ASSERT(pBlockData->nRow == pDumpInfo->totalRows);
}
return pBlockData->nRow > 0 && (!pDumpInfo->allDumped);
return pBlockData->nRow > 0 && (!pDumpInfo->allDumped);
}
int32_t mergeRowsInFileBlocks(SBlockData* pBlockData, STableBlockScanInfo* pBlockScanInfo, int64_t key,
@ -2619,6 +2620,7 @@ static int32_t doBuildDataBlock(STsdbReader* pReader) {
ASSERT(tsLast >= pBlock->maxKey.ts);
tBlockDataReset(&pReader->status.fileBlockData);
tsdbDebug("load data in last block firstly, due to desc scan data, %s", pReader->idStr);
code = buildComposedDataBlock(pReader);
} else { // whole block is required, return it directly
SDataBlockInfo* pInfo = &pReader->pResBlock->info;

View File

@ -685,7 +685,12 @@ int32_t qExplainResNodeToRowsImpl(SExplainResNode *pResNode, SExplainCtx *ctx, i
EXPLAIN_ROW_NEW(level + 1, EXPLAIN_ON_CONDITIONS_FORMAT);
QRY_ERR_RET(
nodesNodeToSQL(pJoinNode->pOnConditions, tbuf + VARSTR_HEADER_SIZE, TSDB_EXPLAIN_RESULT_ROW_SIZE, &tlen));
nodesNodeToSQL(pJoinNode->pMergeCondition, tbuf + VARSTR_HEADER_SIZE, TSDB_EXPLAIN_RESULT_ROW_SIZE, &tlen));
if (pJoinNode->pOnConditions) {
EXPLAIN_ROW_APPEND(" AND ");
QRY_ERR_RET(
nodesNodeToSQL(pJoinNode->pOnConditions, tbuf + VARSTR_HEADER_SIZE, TSDB_EXPLAIN_RESULT_ROW_SIZE, &tlen));
}
EXPLAIN_ROW_END();
QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level + 1));
}

View File

@ -12,7 +12,6 @@
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
// clang-format off
#ifndef TDENGINE_EXECUTORIMPL_H
#define TDENGINE_EXECUTORIMPL_H
@ -47,7 +46,7 @@ extern "C" {
typedef int32_t (*__block_search_fn_t)(char* data, int32_t num, int64_t key, int32_t order);
#define Q_STATUS_EQUAL(p, s) (((p) & (s)) != 0u)
#define Q_STATUS_EQUAL(p, s) (((p) & (s)) != 0u)
#define IS_VALID_SESSION_WIN(winInfo) ((winInfo).sessionWin.win.skey > 0)
#define SET_SESSION_WIN_INVALID(winInfo) ((winInfo).sessionWin.win.skey = INT64_MIN)
#define IS_INVALID_SESSION_WIN_KEY(winKey) ((winKey).win.skey <= 0)
@ -135,18 +134,19 @@ typedef struct STaskIdInfo {
enum {
STREAM_RECOVER_STEP__NONE = 0,
STREAM_RECOVER_STEP__PREPARE,
STREAM_RECOVER_STEP__PREPARE1,
STREAM_RECOVER_STEP__PREPARE2,
STREAM_RECOVER_STEP__SCAN,
};
typedef struct {
// TODO remove prepareStatus
STqOffsetVal prepareStatus; // for tmq
STqOffsetVal lastStatus; // for tmq
SMqMetaRsp metaRsp; // for tmq fetching meta
int8_t returned;
int64_t snapshotVer;
const SSubmitReq* pReq;
STqOffsetVal prepareStatus; // for tmq
STqOffsetVal lastStatus; // for tmq
SMqMetaRsp metaRsp; // for tmq fetching meta
int8_t returned;
int64_t snapshotVer;
const SSubmitReq* pReq;
SSchemaWrapper* schema;
char tbName[TSDB_TABLE_NAME_LEN];
@ -159,7 +159,10 @@ typedef struct {
int64_t recoverEndVer;
int64_t fillHistoryVer1;
int64_t fillHistoryVer2;
SStreamState* pState;
int8_t triggerSaved;
int64_t deleteMarkSaved;
SStreamState* pState;
} SStreamTaskInfo;
typedef struct {
@ -187,7 +190,7 @@ typedef struct SExecTaskInfo {
EOPTR_EXEC_MODEL execModel; // operator execution model [batch model|stream model]
SSubplan* pSubplan;
struct SOperatorInfo* pRoot;
SLocalFetch localFetch;
SLocalFetch localFetch;
} SExecTaskInfo;
enum {
@ -235,7 +238,7 @@ typedef struct SOperatorInfo {
typedef enum {
EX_SOURCE_DATA_NOT_READY = 0x1,
EX_SOURCE_DATA_READY = 0x2,
EX_SOURCE_DATA_READY = 0x2,
EX_SOURCE_DATA_EXHAUSTED = 0x3,
} EX_SOURCE_STATUS;
@ -373,9 +376,9 @@ typedef struct STableMergeScanInfo {
int32_t dataBlockLoadFlag;
// if the upstream is an interval operator, the interval info is also kept here to get the time
// window to check if current data block needs to be loaded.
SInterval interval;
SSampleExecInfo sample; // sample execution info
SSortExecInfo sortExecInfo;
SInterval interval;
SSampleExecInfo sample; // sample execution info
SSortExecInfo sortExecInfo;
} STableMergeScanInfo;
typedef struct STagScanInfo {
@ -388,17 +391,17 @@ typedef struct STagScanInfo {
} STagScanInfo;
typedef struct SLastrowScanInfo {
SSDataBlock* pRes;
SReadHandle readHandle;
void* pLastrowReader;
SColMatchInfo matchInfo;
int32_t* pSlotIds;
SExprSupp pseudoExprSup;
int32_t retrieveType;
int32_t currentGroupIndex;
SSDataBlock* pBufferredRes;
SArray* pUidList;
int32_t indexOfBufferedRes;
SSDataBlock* pRes;
SReadHandle readHandle;
void* pLastrowReader;
SColMatchInfo matchInfo;
int32_t* pSlotIds;
SExprSupp pseudoExprSup;
int32_t retrieveType;
int32_t currentGroupIndex;
SSDataBlock* pBufferredRes;
SArray* pUidList;
int32_t indexOfBufferedRes;
} SLastrowScanInfo;
typedef enum EStreamScanMode {
@ -426,8 +429,8 @@ typedef struct SStreamAggSupporter {
int32_t resultRowSize; // the result buffer size for each result row, with the meta data size for each row
SSDataBlock* pScanBlock;
SStreamState* pState;
int64_t gap; // stream session window gap
SqlFunctionCtx* pDummyCtx; // for combine
int64_t gap; // stream session window gap
SqlFunctionCtx* pDummyCtx; // for combine
SSHashObj* pResultRows;
int32_t stateKeySize;
int16_t stateKeyType;
@ -465,28 +468,28 @@ typedef struct STimeWindowAggSupp {
} STimeWindowAggSupp;
typedef struct SStreamScanInfo {
uint64_t tableUid; // queried super table uid
SExprInfo* pPseudoExpr;
int32_t numOfPseudoExpr;
SExprSupp tbnameCalSup;
SExprSupp tagCalSup;
int32_t primaryTsIndex; // primary time stamp slot id
SReadHandle readHandle;
SInterval interval; // if the upstream is an interval operator, the interval info is also kept here.
SColMatchInfo matchInfo;
SNode* pCondition;
SArray* pBlockLists; // multiple SSDatablock.
SSDataBlock* pRes; // result SSDataBlock
SSDataBlock* pUpdateRes; // update SSDataBlock
int32_t updateResIndex;
int32_t blockType; // current block type
int32_t validBlockIndex; // Is current data has returned?
uint64_t numOfExec; // execution times
STqReader* tqReader;
uint64_t tableUid; // queried super table uid
SExprInfo* pPseudoExpr;
int32_t numOfPseudoExpr;
SExprSupp tbnameCalSup;
SExprSupp tagCalSup;
int32_t primaryTsIndex; // primary time stamp slot id
SReadHandle readHandle;
SInterval interval; // if the upstream is an interval operator, the interval info is also kept here.
SColMatchInfo matchInfo;
SNode* pCondition;
uint64_t groupId;
SUpdateInfo* pUpdateInfo;
SArray* pBlockLists; // multiple SSDatablock.
SSDataBlock* pRes; // result SSDataBlock
SSDataBlock* pUpdateRes; // update SSDataBlock
int32_t updateResIndex;
int32_t blockType; // current block type
int32_t validBlockIndex; // Is current data has returned?
uint64_t numOfExec; // execution times
STqReader* tqReader;
uint64_t groupId;
SUpdateInfo* pUpdateInfo;
EStreamScanMode scanMode;
SOperatorInfo* pStreamScanOp;
@ -524,9 +527,9 @@ typedef struct {
} SStreamRawScanInfo;
typedef struct SSysTableIndex {
int8_t init;
SArray *uids;
int32_t lastIdx;
int8_t init;
SArray* uids;
int32_t lastIdx;
} SSysTableIndex;
typedef struct SSysTableScanInfo {
@ -541,7 +544,7 @@ typedef struct SSysTableScanInfo {
bool showRewrite;
SNode* pCondition; // db_name filter condition, to discard data that are not in current database
SMTbCursor* pCur; // cursor for iterate the local table meta store.
SSysTableIndex* pIdx; // idx for local table meta
SSysTableIndex* pIdx; // idx for local table meta
SColMatchInfo matchInfo;
SName name;
SSDataBlock* pRes;
@ -585,7 +588,7 @@ typedef struct SIntervalAggOperatorInfo {
typedef struct SMergeAlignedIntervalAggOperatorInfo {
SIntervalAggOperatorInfo* intervalAggOperatorInfo;
// bool hasGroupId;
// bool hasGroupId;
uint64_t groupId; // current groupId
int64_t curTs; // current ts
SSDataBlock* prefetchedBlock;
@ -595,21 +598,21 @@ typedef struct SMergeAlignedIntervalAggOperatorInfo {
typedef struct SStreamIntervalOperatorInfo {
// SOptrBasicInfo should be first, SAggSupporter should be second for stream encode
SOptrBasicInfo binfo; // basic info
SAggSupporter aggSup; // aggregate supporter
SExprSupp scalarSupp; // supporter for perform scalar function
SGroupResInfo groupResInfo; // multiple results build supporter
SInterval interval; // interval info
int32_t primaryTsIndex; // primary time stamp slot id from result of downstream operator.
SOptrBasicInfo binfo; // basic info
SAggSupporter aggSup; // aggregate supporter
SExprSupp scalarSupp; // supporter for perform scalar function
SGroupResInfo groupResInfo; // multiple results build supporter
SInterval interval; // interval info
int32_t primaryTsIndex; // primary time stamp slot id from result of downstream operator.
STimeWindowAggSupp twAggSup;
bool invertible;
bool ignoreExpiredData;
SArray* pDelWins; // SWinRes
SArray* pDelWins; // SWinRes
int32_t delIndex;
SSDataBlock* pDelRes;
SPhysiNode* pPhyNode; // create new child
SPhysiNode* pPhyNode; // create new child
SHashObj* pPullDataMap;
SArray* pPullWins; // SPullWindowInfo
SArray* pPullWins; // SPullWindowInfo
int32_t pullIndex;
SSDataBlock* pPullDataRes;
bool isFinal;
@ -677,9 +680,9 @@ typedef struct SGroupbyOperatorInfo {
SArray* pGroupCols; // group by columns, SArray<SColumn>
SArray* pGroupColVals; // current group column values, SArray<SGroupKeys>
SNode* pCondition;
bool isInit; // denote if current val is initialized or not
char* keyBuf; // group by keys for hash
int32_t groupKeyLen; // total group by column width
bool isInit; // denote if current val is initialized or not
char* keyBuf; // group by keys for hash
int32_t groupKeyLen; // total group by column width
SGroupResInfo groupResInfo;
SExprSupp scalarSup;
} SGroupbyOperatorInfo;
@ -730,9 +733,9 @@ typedef struct SSessionAggOperatorInfo {
} SSessionAggOperatorInfo;
typedef struct SResultWindowInfo {
void* pOutputBuf;
SSessionKey sessionWin;
bool isOutput;
void* pOutputBuf;
SSessionKey sessionWin;
bool isOutput;
} SResultWindowInfo;
typedef struct SStateWindowInfo {
@ -743,20 +746,20 @@ typedef struct SStateWindowInfo {
typedef struct SStreamSessionAggOperatorInfo {
SOptrBasicInfo binfo;
SStreamAggSupporter streamAggSup;
SExprSupp scalarSupp; // supporter for perform scalar function
SExprSupp scalarSupp; // supporter for perform scalar function
SGroupResInfo groupResInfo;
int32_t primaryTsIndex; // primary timestamp slot id
int32_t endTsIndex; // window end timestamp slot id
int32_t order; // current SSDataBlock scan order
STimeWindowAggSupp twAggSup;
SSDataBlock* pWinBlock; // window result
SSDataBlock* pDelRes; // delete result
SSDataBlock* pUpdateRes; // update window
SSDataBlock* pWinBlock; // window result
SSDataBlock* pDelRes; // delete result
SSDataBlock* pUpdateRes; // update window
bool returnUpdate;
SSHashObj* pStDeleted;
void* pDelIterator;
SArray* pChildren; // cache for children's result; final stream operator
SPhysiNode* pPhyNode; // create new child
SArray* pChildren; // cache for children's result; final stream operator
SPhysiNode* pPhyNode; // create new child
bool isFinal;
bool ignoreExpiredData;
SHashObj* pGroupIdTbNameMap;
@ -765,7 +768,7 @@ typedef struct SStreamSessionAggOperatorInfo {
typedef struct SStreamStateAggOperatorInfo {
SOptrBasicInfo binfo;
SStreamAggSupporter streamAggSup;
SExprSupp scalarSupp; // supporter for perform scalar function
SExprSupp scalarSupp; // supporter for perform scalar function
SGroupResInfo groupResInfo;
int32_t primaryTsIndex; // primary timestamp slot id
STimeWindowAggSupp twAggSup;
@ -773,7 +776,7 @@ typedef struct SStreamStateAggOperatorInfo {
SSDataBlock* pDelRes;
SSHashObj* pSeDeleted;
void* pDelIterator;
SArray* pChildren; // cache for children's result;
SArray* pChildren; // cache for children's result;
bool ignoreExpiredData;
SHashObj* pGroupIdTbNameMap;
} SStreamStateAggOperatorInfo;
@ -793,18 +796,18 @@ typedef struct SStreamPartitionOperatorInfo {
typedef struct SStreamFillOperatorInfo {
SStreamFillSupporter* pFillSup;
SSDataBlock* pRes;
SSDataBlock* pSrcBlock;
int32_t srcRowIndex;
SSDataBlock* pPrevSrcBlock;
SSDataBlock* pSrcDelBlock;
int32_t srcDelRowIndex;
SSDataBlock* pDelRes;
SNode* pCondition;
SColMatchInfo matchInfo;
int32_t primaryTsCol;
int32_t primarySrcSlotId;
SStreamFillInfo* pFillInfo;
SSDataBlock* pRes;
SSDataBlock* pSrcBlock;
int32_t srcRowIndex;
SSDataBlock* pPrevSrcBlock;
SSDataBlock* pSrcDelBlock;
int32_t srcDelRowIndex;
SSDataBlock* pDelRes;
SNode* pCondition;
SColMatchInfo matchInfo;
int32_t primaryTsCol;
int32_t primarySrcSlotId;
SStreamFillInfo* pFillInfo;
} SStreamFillOperatorInfo;
typedef struct STimeSliceOperatorInfo {
@ -837,7 +840,7 @@ typedef struct SStateWindowOperatorInfo {
SStateKeys stateKey;
int32_t tsSlotId; // primary timestamp column slot id
STimeWindowAggSupp twAggSup;
const SNode* pCondition;
const SNode* pCondition;
} SStateWindowOperatorInfo;
typedef struct SSortOperatorInfo {
@ -894,8 +897,8 @@ void initResultSizeInfo(SResultInfo* pResultInfo, int32_t numOfRows);
void doBuildStreamResBlock(SOperatorInfo* pOperator, SOptrBasicInfo* pbInfo, SGroupResInfo* pGroupResInfo,
SDiskbasedBuf* pBuf);
void doBuildResultDatablock(SOperatorInfo* pOperator, SOptrBasicInfo* pbInfo, SGroupResInfo* pGroupResInfo,
SDiskbasedBuf* pBuf);
void doBuildResultDatablock(SOperatorInfo* pOperator, SOptrBasicInfo* pbInfo, SGroupResInfo* pGroupResInfo,
SDiskbasedBuf* pBuf);
int32_t handleLimitOffset(SOperatorInfo* pOperator, SLimitInfo* pLimitInfo, SSDataBlock* pBlock, bool holdDataInBuf);
bool hasLimitOffsetInfo(SLimitInfo* pLimitInfo);
@ -964,7 +967,7 @@ SOperatorInfo* createStreamFinalIntervalOperatorInfo(SOperatorInfo* downstream,
SExecTaskInfo* pTaskInfo, int32_t numOfChild);
SOperatorInfo* createSessionAggOperatorInfo(SOperatorInfo* downstream, SSessionWinodwPhysiNode* pSessionNode,
SExecTaskInfo* pTaskInfo);
SOperatorInfo* createGroupOperatorInfo(SOperatorInfo* downstream, SAggPhysiNode *pAggNode, SExecTaskInfo* pTaskInfo);
SOperatorInfo* createGroupOperatorInfo(SOperatorInfo* downstream, SAggPhysiNode* pAggNode, SExecTaskInfo* pTaskInfo);
SOperatorInfo* createDataBlockInfoScanOperator(void* dataReader, SReadHandle* readHandle, uint64_t uid,
SBlockDistScanPhysiNode* pBlockScanNode, SExecTaskInfo* pTaskInfo);
@ -991,8 +994,8 @@ SOperatorInfo* createStreamSessionAggOperatorInfo(SOperatorInfo* downstream, SPh
SExecTaskInfo* pTaskInfo);
SOperatorInfo* createStreamFinalSessionAggOperatorInfo(SOperatorInfo* downstream, SPhysiNode* pPhyNode,
SExecTaskInfo* pTaskInfo, int32_t numOfChild);
SOperatorInfo* createStreamIntervalOperatorInfo(SOperatorInfo* downstream,
SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo);
SOperatorInfo* createStreamIntervalOperatorInfo(SOperatorInfo* downstream, SPhysiNode* pPhyNode,
SExecTaskInfo* pTaskInfo);
SOperatorInfo* createStreamStateAggOperatorInfo(SOperatorInfo* downstream, SPhysiNode* pPhyNode,
SExecTaskInfo* pTaskInfo);
@ -1044,20 +1047,21 @@ STimeWindow getActiveTimeWindow(SDiskbasedBuf* pBuf, SResultRowInfo* pResultRowI
int32_t getNumOfRowsInTimeWindow(SDataBlockInfo* pDataBlockInfo, TSKEY* pPrimaryColumn, int32_t startPos, TSKEY ekey,
__block_search_fn_t searchFn, STableQueryInfo* item, int32_t order);
int32_t binarySearchForKey(char* pValue, int num, TSKEY key, int order);
SResultRow* getNewResultRow(SDiskbasedBuf* pResultBuf, int32_t* currentPageId, int32_t interBufSize);
SResultRow* getNewResultRow(SDiskbasedBuf* pResultBuf, int32_t* currentPageId, int32_t interBufSize);
void getCurSessionWindow(SStreamAggSupporter* pAggSup, TSKEY startTs, TSKEY endTs, uint64_t groupId, SSessionKey* pKey);
bool isInTimeWindow(STimeWindow* pWin, TSKEY ts, int64_t gap);
bool functionNeedToExecute(SqlFunctionCtx* pCtx);
bool isOverdue(TSKEY ts, STimeWindowAggSupp* pSup);
bool isCloseWindow(STimeWindow* pWin, STimeWindowAggSupp* pSup);
bool isDeletedWindow(STimeWindow* pWin, uint64_t groupId, SAggSupporter* pSup);
bool isDeletedStreamWindow(STimeWindow* pWin, uint64_t groupId, SStreamState* pState, STimeWindowAggSupp* pTwSup);
void appendOneRowToStreamSpecialBlock(SSDataBlock* pBlock, TSKEY* pStartTs, TSKEY* pEndTs, uint64_t* pUid, uint64_t* pGp, void* pTbName);
void printDataBlock(SSDataBlock* pBlock, const char* flag);
bool isInTimeWindow(STimeWindow* pWin, TSKEY ts, int64_t gap);
bool functionNeedToExecute(SqlFunctionCtx* pCtx);
bool isOverdue(TSKEY ts, STimeWindowAggSupp* pSup);
bool isCloseWindow(STimeWindow* pWin, STimeWindowAggSupp* pSup);
bool isDeletedWindow(STimeWindow* pWin, uint64_t groupId, SAggSupporter* pSup);
bool isDeletedStreamWindow(STimeWindow* pWin, uint64_t groupId, SStreamState* pState, STimeWindowAggSupp* pTwSup);
void appendOneRowToStreamSpecialBlock(SSDataBlock* pBlock, TSKEY* pStartTs, TSKEY* pEndTs, uint64_t* pUid,
uint64_t* pGp, void* pTbName);
void printDataBlock(SSDataBlock* pBlock, const char* flag);
uint64_t calGroupIdByData(SPartitionBySupporter* pParSup, SExprSupp* pExprSup, SSDataBlock* pBlock, int32_t rowId);
int32_t finalizeResultRows(SDiskbasedBuf* pBuf, SResultRowPosition* resultRowPosition,
SExprSupp* pSup, SSDataBlock* pBlock, SExecTaskInfo* pTaskInfo);
int32_t finalizeResultRows(SDiskbasedBuf* pBuf, SResultRowPosition* resultRowPosition, SExprSupp* pSup,
SSDataBlock* pBlock, SExecTaskInfo* pTaskInfo);
int32_t createScanTableListInfo(SScanPhysiNode* pScanNode, SNodeList* pGroupTags, bool groupSort, SReadHandle* pHandle,
STableListInfo* pTableListInfo, SNode* pTagCond, SNode* pTagIndexCond,
@ -1078,8 +1082,8 @@ int32_t buildDataBlockFromGroupRes(SOperatorInfo* pOperator, SStreamState* pStat
int32_t saveSessionDiscBuf(SStreamState* pState, SSessionKey* key, void* buf, int32_t size);
int32_t buildSessionResultDataBlock(SOperatorInfo* pOperator, SStreamState* pState, SSDataBlock* pBlock,
SExprSupp* pSup, SGroupResInfo* pGroupResInfo);
int32_t setOutputBuf(SStreamState* pState, STimeWindow* win, SResultRow** pResult, int64_t tableGroupId, SqlFunctionCtx* pCtx,
int32_t numOfOutput, int32_t* rowEntryInfoOffset, SAggSupporter* pAggSup);
int32_t setOutputBuf(SStreamState* pState, STimeWindow* win, SResultRow** pResult, int64_t tableGroupId,
SqlFunctionCtx* pCtx, int32_t numOfOutput, int32_t* rowEntryInfoOffset, SAggSupporter* pAggSup);
int32_t releaseOutputBuf(SStreamState* pState, SWinKey* pKey, SResultRow* pResult);
int32_t saveOutputBuf(SStreamState* pState, SWinKey* pKey, SResultRow* pResult, int32_t resSize);
void getNextIntervalWindow(SInterval* pInterval, STimeWindow* tw, int32_t order);

View File

@ -46,7 +46,8 @@ SOperatorInfo* createCacherowsScanOperator(SLastRowScanPhysiNode* pScanNode, SRe
pInfo->pRes = createResDataBlock(pDescNode);
int32_t numOfCols = 0;
code = extractColMatchInfo(pScanNode->scan.pScanCols, pDescNode, &numOfCols, COL_MATCH_FROM_COL_ID, &pInfo->matchInfo);
code =
extractColMatchInfo(pScanNode->scan.pScanCols, pDescNode, &numOfCols, COL_MATCH_FROM_COL_ID, &pInfo->matchInfo);
removeRedundantTsCol(pScanNode, &pInfo->matchInfo);
code = extractCacheScanSlotId(pInfo->matchInfo.pList, pTaskInfo, &pInfo->pSlotIds);
@ -62,17 +63,19 @@ SOperatorInfo* createCacherowsScanOperator(SLastRowScanPhysiNode* pScanNode, SRe
// partition by tbname
if (taosArrayGetSize(pTableList->pGroupList) == taosArrayGetSize(pTableList->pTableList)) {
pInfo->retrieveType = CACHESCAN_RETRIEVE_TYPE_ALL|(pScanNode->ignoreNull? CACHESCAN_RETRIEVE_LAST:CACHESCAN_RETRIEVE_LAST_ROW);
pInfo->retrieveType =
CACHESCAN_RETRIEVE_TYPE_ALL | (pScanNode->ignoreNull ? CACHESCAN_RETRIEVE_LAST : CACHESCAN_RETRIEVE_LAST_ROW);
code = tsdbCacherowsReaderOpen(pInfo->readHandle.vnode, pInfo->retrieveType, pTableList->pTableList,
taosArrayGetSize(pInfo->matchInfo.pList), &pInfo->pLastrowReader);
taosArrayGetSize(pInfo->matchInfo.pList), pTableList->suid, &pInfo->pLastrowReader);
if (code != TSDB_CODE_SUCCESS) {
goto _error;
}
pInfo->pBufferredRes = createOneDataBlock(pInfo->pRes, false);
blockDataEnsureCapacity(pInfo->pBufferredRes, pOperator->resultInfo.capacity);
} else { // by tags
pInfo->retrieveType = CACHESCAN_RETRIEVE_TYPE_SINGLE|(pScanNode->ignoreNull? CACHESCAN_RETRIEVE_LAST:CACHESCAN_RETRIEVE_LAST_ROW);
} else { // by tags
pInfo->retrieveType = CACHESCAN_RETRIEVE_TYPE_SINGLE |
(pScanNode->ignoreNull ? CACHESCAN_RETRIEVE_LAST : CACHESCAN_RETRIEVE_LAST_ROW);
}
if (pScanNode->scan.pScanPseudoCols != NULL) {
@ -188,7 +191,7 @@ SSDataBlock* doScanCache(SOperatorInfo* pOperator) {
SArray* pGroupTableList = taosArrayGetP(pTableList->pGroupList, pInfo->currentGroupIndex);
tsdbCacherowsReaderOpen(pInfo->readHandle.vnode, pInfo->retrieveType, pGroupTableList,
taosArrayGetSize(pInfo->matchInfo.pList), &pInfo->pLastrowReader);
taosArrayGetSize(pInfo->matchInfo.pList), pTableList->suid, &pInfo->pLastrowReader);
taosArrayClear(pInfo->pUidList);
int32_t code = tsdbRetrieveCacheRows(pInfo->pLastrowReader, pInfo->pRes, pInfo->pSlotIds, pInfo->pUidList);
@ -257,8 +260,7 @@ int32_t extractCacheScanSlotId(const SArray* pColMatchInfo, SExecTaskInfo* pTask
for (int32_t i = 0; i < numOfCols; ++i) {
SColMatchItem* pColMatch = taosArrayGet(pColMatchInfo, i);
for (int32_t j = 0; j < pWrapper->nCols; ++j) {
if (pColMatch->colId == pWrapper->pSchema[j].colId &&
pColMatch->colId == PRIMARYKEY_TIMESTAMP_COL_ID) {
if (pColMatch->colId == pWrapper->pSchema[j].colId && pColMatch->colId == PRIMARYKEY_TIMESTAMP_COL_ID) {
(*pSlotIds)[pColMatch->dstSlotId] = -1;
break;
}
@ -296,4 +298,4 @@ int32_t removeRedundantTsCol(SLastRowScanPhysiNode* pScanNode, SColMatchInfo* pC
taosArrayDestroy(pColMatchInfo->pList);
pColMatchInfo->pList = pMatchInfo;
return TSDB_CODE_SUCCESS;
}
}

View File

@ -233,7 +233,7 @@ int32_t qUpdateQualifiedTableId(qTaskInfo_t tinfo, const SArray* tableIdList, bo
}
if (pListInfo->map == NULL) {
pListInfo->map = taosHashInit(32, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), false, HASH_NO_LOCK);
pListInfo->map = taosHashInit(32, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), false, HASH_ENTRY_LOCK);
}
// traverse to the stream scanner node to add this table id
@ -564,23 +564,6 @@ int32_t qExecTask(qTaskInfo_t tinfo, SSDataBlock** pRes, uint64_t* useconds) {
return pTaskInfo->code;
}
int32_t qKillTask(qTaskInfo_t qinfo) {
SExecTaskInfo* pTaskInfo = (SExecTaskInfo*)qinfo;
if (pTaskInfo == NULL) {
return TSDB_CODE_QRY_INVALID_QHANDLE;
}
qAsyncKillTask(qinfo);
// Wait for the query executing thread being stopped/
// Once the query is stopped, the owner of qHandle will be cleared immediately.
while (pTaskInfo->owner != 0) {
taosMsleep(100);
}
return TSDB_CODE_SUCCESS;
}
int32_t qAsyncKillTask(qTaskInfo_t qinfo) {
SExecTaskInfo* pTaskInfo = (SExecTaskInfo*)qinfo;
@ -660,12 +643,114 @@ int32_t qStreamInput(qTaskInfo_t tinfo, void* pItem) {
}
#endif
int32_t qStreamPrepareRecover(qTaskInfo_t tinfo, int64_t startVer, int64_t endVer) {
int32_t qStreamSourceRecoverStep1(qTaskInfo_t tinfo, int64_t ver) {
SExecTaskInfo* pTaskInfo = (SExecTaskInfo*)tinfo;
ASSERT(pTaskInfo->execModel == OPTR_EXEC_MODEL_STREAM);
pTaskInfo->streamInfo.recoverStartVer = startVer;
pTaskInfo->streamInfo.recoverEndVer = endVer;
pTaskInfo->streamInfo.recoverStep = STREAM_RECOVER_STEP__PREPARE;
pTaskInfo->streamInfo.recoverStartVer = 0;
pTaskInfo->streamInfo.recoverEndVer = ver;
pTaskInfo->streamInfo.recoverStep = STREAM_RECOVER_STEP__PREPARE1;
return 0;
}
int32_t qStreamSourceRecoverStep2(qTaskInfo_t tinfo, int64_t ver) {
SExecTaskInfo* pTaskInfo = (SExecTaskInfo*)tinfo;
ASSERT(pTaskInfo->execModel == OPTR_EXEC_MODEL_STREAM);
pTaskInfo->streamInfo.recoverStartVer = pTaskInfo->streamInfo.recoverEndVer;
pTaskInfo->streamInfo.recoverEndVer = ver;
pTaskInfo->streamInfo.recoverStep = STREAM_RECOVER_STEP__PREPARE2;
return 0;
}
int32_t qStreamRecoverFinish(qTaskInfo_t tinfo) {
SExecTaskInfo* pTaskInfo = (SExecTaskInfo*)tinfo;
ASSERT(pTaskInfo->execModel == OPTR_EXEC_MODEL_STREAM);
pTaskInfo->streamInfo.recoverStep = STREAM_RECOVER_STEP__NONE;
return 0;
}
int32_t qStreamSetParamForRecover(qTaskInfo_t tinfo) {
SExecTaskInfo* pTaskInfo = (SExecTaskInfo*)tinfo;
SOperatorInfo* pOperator = pTaskInfo->pRoot;
while (1) {
if (pOperator->operatorType == QUERY_NODE_PHYSICAL_PLAN_STREAM_INTERVAL ||
pOperator->operatorType == QUERY_NODE_PHYSICAL_PLAN_STREAM_SEMI_INTERVAL ||
pOperator->operatorType == QUERY_NODE_PHYSICAL_PLAN_STREAM_FINAL_INTERVAL) {
SStreamIntervalOperatorInfo* pInfo = pOperator->info;
pTaskInfo->streamInfo.triggerSaved = pInfo->twAggSup.calTrigger;
pTaskInfo->streamInfo.deleteMarkSaved = pInfo->twAggSup.deleteMark;
pInfo->twAggSup.calTrigger = STREAM_TRIGGER_AT_ONCE;
pInfo->twAggSup.deleteMark = INT64_MAX;
} else if (pOperator->operatorType == QUERY_NODE_PHYSICAL_PLAN_STREAM_SESSION ||
pOperator->operatorType == QUERY_NODE_PHYSICAL_PLAN_STREAM_SEMI_SESSION ||
pOperator->operatorType == QUERY_NODE_PHYSICAL_PLAN_STREAM_FINAL_SESSION) {
SStreamSessionAggOperatorInfo* pInfo = pOperator->info;
pTaskInfo->streamInfo.triggerSaved = pInfo->twAggSup.calTrigger;
pTaskInfo->streamInfo.deleteMarkSaved = pInfo->twAggSup.deleteMark;
pInfo->twAggSup.calTrigger = STREAM_TRIGGER_AT_ONCE;
pInfo->twAggSup.deleteMark = INT64_MAX;
} else if (pOperator->operatorType == QUERY_NODE_PHYSICAL_PLAN_STREAM_STATE) {
SStreamStateAggOperatorInfo* pInfo = pOperator->info;
pTaskInfo->streamInfo.triggerSaved = pInfo->twAggSup.calTrigger;
pTaskInfo->streamInfo.deleteMarkSaved = pInfo->twAggSup.deleteMark;
pInfo->twAggSup.calTrigger = STREAM_TRIGGER_AT_ONCE;
pInfo->twAggSup.deleteMark = INT64_MAX;
}
// iterate operator tree
if (pOperator->numOfDownstream != 1 || pOperator->pDownstream[0] == NULL) {
if (pOperator->numOfDownstream > 1) {
qError("unexpected stream, multiple downstream");
ASSERT(0);
return -1;
}
return 0;
} else {
pOperator = pOperator->pDownstream[0];
}
}
return 0;
}
int32_t qStreamRestoreParam(qTaskInfo_t tinfo) {
SExecTaskInfo* pTaskInfo = (SExecTaskInfo*)tinfo;
SOperatorInfo* pOperator = pTaskInfo->pRoot;
while (1) {
if (pOperator->operatorType == QUERY_NODE_PHYSICAL_PLAN_STREAM_INTERVAL ||
pOperator->operatorType == QUERY_NODE_PHYSICAL_PLAN_STREAM_SEMI_INTERVAL ||
pOperator->operatorType == QUERY_NODE_PHYSICAL_PLAN_STREAM_FINAL_INTERVAL) {
SStreamIntervalOperatorInfo* pInfo = pOperator->info;
pInfo->twAggSup.calTrigger = pTaskInfo->streamInfo.triggerSaved;
pInfo->twAggSup.deleteMark = pTaskInfo->streamInfo.deleteMarkSaved;
} else if (pOperator->operatorType == QUERY_NODE_PHYSICAL_PLAN_STREAM_SESSION ||
pOperator->operatorType == QUERY_NODE_PHYSICAL_PLAN_STREAM_SEMI_SESSION ||
pOperator->operatorType == QUERY_NODE_PHYSICAL_PLAN_STREAM_FINAL_SESSION) {
SStreamSessionAggOperatorInfo* pInfo = pOperator->info;
pInfo->twAggSup.calTrigger = pTaskInfo->streamInfo.triggerSaved;
pInfo->twAggSup.deleteMark = pTaskInfo->streamInfo.deleteMarkSaved;
} else if (pOperator->operatorType == QUERY_NODE_PHYSICAL_PLAN_STREAM_STATE) {
SStreamStateAggOperatorInfo* pInfo = pOperator->info;
pInfo->twAggSup.calTrigger = pTaskInfo->streamInfo.triggerSaved;
pInfo->twAggSup.deleteMark = pTaskInfo->streamInfo.deleteMarkSaved;
}
// iterate operator tree
if (pOperator->numOfDownstream != 1 || pOperator->pDownstream[0] == NULL) {
if (pOperator->numOfDownstream > 1) {
qError("unexpected stream, multiple downstream");
ASSERT(0);
return -1;
}
return 0;
} else {
pOperator = pOperator->pDownstream[0];
}
}
return 0;
}

View File

@ -2301,8 +2301,8 @@ SOperatorInfo* createExchangeOperatorInfo(void* pTransporter, SExchangePhysiNode
pOperator->exprSupp.numOfExprs = taosArrayGetSize(pInfo->pDummyBlock->pDataBlock);
pOperator->pTaskInfo = pTaskInfo;
pOperator->fpSet = createOperatorFpSet(prepareLoadRemoteData, doLoadRemoteData, NULL, NULL,
destroyExchangeOperatorInfo, NULL);
pOperator->fpSet =
createOperatorFpSet(prepareLoadRemoteData, doLoadRemoteData, NULL, NULL, destroyExchangeOperatorInfo, NULL);
return pOperator;
_error:
@ -3026,8 +3026,8 @@ SOperatorInfo* createAggregateOperatorInfo(SOperatorInfo* downstream, SAggPhysiN
pOperator->info = pInfo;
pOperator->pTaskInfo = pTaskInfo;
pOperator->fpSet = createOperatorFpSet(doOpenAggregateOptr, getAggregateResult, NULL, NULL, destroyAggOperatorInfo,
NULL);
pOperator->fpSet =
createOperatorFpSet(doOpenAggregateOptr, getAggregateResult, NULL, NULL, destroyAggOperatorInfo, NULL);
if (downstream->operatorType == QUERY_NODE_PHYSICAL_PLAN_TABLE_SCAN) {
STableScanInfo* pTableScanInfo = downstream->info;
@ -3253,8 +3253,7 @@ SOperatorInfo* createFillOperatorInfo(SOperatorInfo* downstream, SFillPhysiNode*
pOperator->info = pInfo;
pOperator->pTaskInfo = pTaskInfo;
pOperator->fpSet =
createOperatorFpSet(operatorDummyOpenFn, doFill, NULL, NULL, destroyFillOperatorInfo, NULL);
pOperator->fpSet = createOperatorFpSet(operatorDummyOpenFn, doFill, NULL, NULL, destroyFillOperatorInfo, NULL);
code = appendDownstream(pOperator, &downstream, 1);
return pOperator;
@ -3427,7 +3426,7 @@ static int32_t sortTableGroup(STableListInfo* pTableListInfo) {
bool groupbyTbname(SNodeList* pGroupList) {
bool bytbname = false;
if (LIST_LENGTH(pGroupList) > 0) {
if (LIST_LENGTH(pGroupList) == 1) {
SNode* p = nodesListGetNode(pGroupList, 0);
if (p->type == QUERY_NODE_FUNCTION) {
// partition by tbname/group by tbname
@ -3443,7 +3442,7 @@ int32_t generateGroupIdMap(STableListInfo* pTableListInfo, SReadHandle* pHandle,
return TDB_CODE_SUCCESS;
}
pTableListInfo->map = taosHashInit(32, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), false, HASH_NO_LOCK);
pTableListInfo->map = taosHashInit(32, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), false, HASH_ENTRY_LOCK);
if (pTableListInfo->map == NULL) {
return TSDB_CODE_OUT_OF_MEMORY;
}
@ -3640,9 +3639,7 @@ SOperatorInfo* createOperatorTree(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo
return pOperator;
}
int32_t num = 0;
size_t size = LIST_LENGTH(pPhyNode->pChildren);
size_t size = LIST_LENGTH(pPhyNode->pChildren);
SOperatorInfo** ops = taosMemoryCalloc(size, POINTER_BYTES);
for (int32_t i = 0; i < size; ++i) {
SPhysiNode* pChildNode = (SPhysiNode*)nodesListGetNode(pPhyNode->pChildren, i);
@ -3950,8 +3947,11 @@ int32_t createExecTaskInfoImpl(SSubplan* pPlan, SExecTaskInfo** pTaskInfo, SRead
goto _complete;
}
if (pHandle && pHandle->pStateBackend) {
(*pTaskInfo)->streamInfo.pState = pHandle->pStateBackend;
if (pHandle) {
/*(*pTaskInfo)->streamInfo.fillHistoryVer1 = pHandle->fillHistoryVer1;*/
if (pHandle->pStateBackend) {
(*pTaskInfo)->streamInfo.pState = pHandle->pStateBackend;
}
}
(*pTaskInfo)->sql = sql;

View File

@ -988,8 +988,8 @@ SOperatorInfo* createDataBlockInfoScanOperator(void* dataReader, SReadHandle* re
pOperator->info = pInfo;
pOperator->pTaskInfo = pTaskInfo;
pOperator->fpSet = createOperatorFpSet(operatorDummyOpenFn, doBlockInfoScan, NULL, NULL,
destroyBlockDistScanOperatorInfo, NULL);
pOperator->fpSet =
createOperatorFpSet(operatorDummyOpenFn, doBlockInfoScan, NULL, NULL, destroyBlockDistScanOperatorInfo, NULL);
return pOperator;
_error:
@ -1771,11 +1771,17 @@ static SSDataBlock* doStreamScan(SOperatorInfo* pOperator) {
#endif
#if 1
if (pTaskInfo->streamInfo.recoverStep == STREAM_RECOVER_STEP__PREPARE) {
if (pTaskInfo->streamInfo.recoverStep == STREAM_RECOVER_STEP__PREPARE1 ||
pTaskInfo->streamInfo.recoverStep == STREAM_RECOVER_STEP__PREPARE2) {
STableScanInfo* pTSInfo = pInfo->pTableScanOp->info;
memcpy(&pTSInfo->cond, &pTaskInfo->streamInfo.tableCond, sizeof(SQueryTableDataCond));
pTSInfo->cond.startVersion = -1;
pTSInfo->cond.endVersion = pTaskInfo->streamInfo.fillHistoryVer1;
if (pTaskInfo->streamInfo.recoverStep == STREAM_RECOVER_STEP__PREPARE1) {
pTSInfo->cond.startVersion = -1;
pTSInfo->cond.endVersion = pTaskInfo->streamInfo.fillHistoryVer1;
} else {
pTSInfo->cond.startVersion = pTaskInfo->streamInfo.fillHistoryVer1 + 1;
pTSInfo->cond.endVersion = pTaskInfo->streamInfo.fillHistoryVer2;
}
pTSInfo->scanTimes = 0;
pTSInfo->currentGroupId = -1;
pTaskInfo->streamInfo.recoverStep = STREAM_RECOVER_STEP__SCAN;
@ -2229,7 +2235,8 @@ SOperatorInfo* createStreamScanOperatorInfo(SReadHandle* pHandle, STableScanPhys
pInfo->pGroupTags = pTableScanNode->pGroupTags;
int32_t numOfCols = 0;
int32_t code = extractColMatchInfo(pScanPhyNode->pScanCols, pDescNode, &numOfCols, COL_MATCH_FROM_COL_ID, &pInfo->matchInfo);
int32_t code =
extractColMatchInfo(pScanPhyNode->pScanCols, pDescNode, &numOfCols, COL_MATCH_FROM_COL_ID, &pInfo->matchInfo);
int32_t numOfOutput = taosArrayGetSize(pInfo->matchInfo.pList);
SArray* pColIds = taosArrayInit(numOfOutput, sizeof(int16_t));
@ -2287,7 +2294,8 @@ SOperatorInfo* createStreamScanOperatorInfo(SReadHandle* pHandle, STableScanPhys
pTSInfo->scanMode = TABLE_SCAN__TABLE_ORDER;
pTSInfo->dataReader = NULL;
if (tsdbReaderOpen(pHandle->vnode, &pTSInfo->cond, tableList, &pTSInfo->dataReader, NULL) < 0) {
ASSERT(0);
terrno = TSDB_CODE_OUT_OF_MEMORY;
goto _error;
}
}
@ -2353,8 +2361,7 @@ SOperatorInfo* createStreamScanOperatorInfo(SReadHandle* pHandle, STableScanPhys
pOperator->pTaskInfo = pTaskInfo;
__optr_fn_t nextFn = pTaskInfo->execModel == OPTR_EXEC_MODEL_STREAM ? doStreamScan : doQueueScan;
pOperator->fpSet =
createOperatorFpSet(operatorDummyOpenFn, nextFn, NULL, NULL, destroyStreamScanOperatorInfo, NULL);
pOperator->fpSet = createOperatorFpSet(operatorDummyOpenFn, nextFn, NULL, NULL, destroyStreamScanOperatorInfo, NULL);
return pOperator;
@ -3922,8 +3929,7 @@ SOperatorInfo* createSysTableScanOperatorInfo(void* readHandle, SSystemTableScan
pOperator->exprSupp.numOfExprs = taosArrayGetSize(pInfo->pRes->pDataBlock);
pOperator->pTaskInfo = pTaskInfo;
pOperator->fpSet =
createOperatorFpSet(operatorDummyOpenFn, doSysTableScan, NULL, NULL, destroySysScanOperator, NULL);
pOperator->fpSet = createOperatorFpSet(operatorDummyOpenFn, doSysTableScan, NULL, NULL, destroySysScanOperator, NULL);
return pOperator;
@ -4035,7 +4041,8 @@ SOperatorInfo* createTagScanOperatorInfo(SReadHandle* pReadHandle, STagScanPhysi
int32_t num = 0;
int32_t numOfExprs = 0;
SExprInfo* pExprInfo = createExprInfo(pPhyNode->pScanPseudoCols, NULL, &numOfExprs);
int32_t code = extractColMatchInfo(pPhyNode->pScanPseudoCols, pDescNode, &num, COL_MATCH_FROM_COL_ID, &pInfo->matchInfo);
int32_t code =
extractColMatchInfo(pPhyNode->pScanPseudoCols, pDescNode, &num, COL_MATCH_FROM_COL_ID, &pInfo->matchInfo);
code = initExprSupp(&pOperator->exprSupp, pExprInfo, numOfExprs);
if (code != TSDB_CODE_SUCCESS) {
@ -4058,8 +4065,7 @@ SOperatorInfo* createTagScanOperatorInfo(SReadHandle* pReadHandle, STagScanPhysi
initResultSizeInfo(&pOperator->resultInfo, 4096);
blockDataEnsureCapacity(pInfo->pRes, pOperator->resultInfo.capacity);
pOperator->fpSet =
createOperatorFpSet(operatorDummyOpenFn, doTagScan, NULL, NULL, destroyTagScanOperatorInfo, NULL);
pOperator->fpSet = createOperatorFpSet(operatorDummyOpenFn, doTagScan, NULL, NULL, destroyTagScanOperatorInfo, NULL);
return pOperator;
@ -4555,7 +4561,8 @@ SOperatorInfo* createTableMergeScanOperatorInfo(STableScanPhysiNode* pTableScanN
SDataBlockDescNode* pDescNode = pTableScanNode->scan.node.pOutputDataBlockDesc;
int32_t numOfCols = 0;
int32_t code = extractColMatchInfo(pTableScanNode->scan.pScanCols, pDescNode, &numOfCols, COL_MATCH_FROM_COL_ID, &pInfo->matchInfo);
int32_t code = extractColMatchInfo(pTableScanNode->scan.pScanCols, pDescNode, &numOfCols, COL_MATCH_FROM_COL_ID,
&pInfo->matchInfo);
code = initQueryTableDataCond(&pInfo->cond, pTableScanNode);
if (code != TSDB_CODE_SUCCESS) {

View File

@ -1777,11 +1777,11 @@ SOperatorInfo* createIntervalOperatorInfo(SOperatorInfo* downstream, SIntervalPh
}
SInterval interval = {.interval = pPhyNode->interval,
.sliding = pPhyNode->sliding,
.intervalUnit = pPhyNode->intervalUnit,
.slidingUnit = pPhyNode->slidingUnit,
.offset = pPhyNode->offset,
.precision = ((SColumnNode*)pPhyNode->window.pTspk)->node.resType.precision};
.sliding = pPhyNode->sliding,
.intervalUnit = pPhyNode->intervalUnit,
.slidingUnit = pPhyNode->slidingUnit,
.offset = pPhyNode->offset,
.precision = ((SColumnNode*)pPhyNode->window.pTspk)->node.resType.precision};
STimeWindowAggSupp as = {
.waterMark = pPhyNode->window.watermark,
@ -1832,7 +1832,8 @@ SOperatorInfo* createIntervalOperatorInfo(SOperatorInfo* downstream, SIntervalPh
pOperator->status = OP_NOT_OPENED;
pOperator->info = pInfo;
pOperator->fpSet = createOperatorFpSet(doOpenIntervalAgg, doBuildIntervalResult, NULL, NULL, destroyIntervalOperatorInfo, NULL);
pOperator->fpSet =
createOperatorFpSet(doOpenIntervalAgg, doBuildIntervalResult, NULL, NULL, destroyIntervalOperatorInfo, NULL);
code = appendDownstream(pOperator, &downstream, 1);
if (code != TSDB_CODE_SUCCESS) {
@ -2638,7 +2639,8 @@ SOperatorInfo* createTimeSliceOperatorInfo(SOperatorInfo* downstream, SPhysiNode
pOperator->info = pInfo;
pOperator->pTaskInfo = pTaskInfo;
pOperator->fpSet = createOperatorFpSet(operatorDummyOpenFn, doTimeslice, NULL, NULL, destroyTimeSliceOperatorInfo, NULL);
pOperator->fpSet =
createOperatorFpSet(operatorDummyOpenFn, doTimeslice, NULL, NULL, destroyTimeSliceOperatorInfo, NULL);
blockDataEnsureCapacity(pInfo->pRes, pOperator->resultInfo.capacity);
@ -2708,8 +2710,8 @@ SOperatorInfo* createStatewindowOperatorInfo(SOperatorInfo* downstream, SStateWi
pOperator->pTaskInfo = pTaskInfo;
pOperator->info = pInfo;
pOperator->fpSet = createOperatorFpSet(openStateWindowAggOptr, doStateWindowAgg, NULL, NULL,
destroyStateWindowOperatorInfo, NULL);
pOperator->fpSet =
createOperatorFpSet(openStateWindowAggOptr, doStateWindowAgg, NULL, NULL, destroyStateWindowOperatorInfo, NULL);
code = appendDownstream(pOperator, &downstream, 1);
if (code != TSDB_CODE_SUCCESS) {
@ -2782,8 +2784,8 @@ SOperatorInfo* createSessionAggOperatorInfo(SOperatorInfo* downstream, SSessionW
pOperator->status = OP_NOT_OPENED;
pOperator->info = pInfo;
pOperator->fpSet = createOperatorFpSet(operatorDummyOpenFn, doSessionWindowAgg, NULL, NULL,
destroySWindowOperatorInfo, NULL);
pOperator->fpSet =
createOperatorFpSet(operatorDummyOpenFn, doSessionWindowAgg, NULL, NULL, destroySWindowOperatorInfo, NULL);
pOperator->pTaskInfo = pTaskInfo;
code = appendDownstream(pOperator, &downstream, 1);
if (code != TSDB_CODE_SUCCESS) {
@ -4257,9 +4259,8 @@ SOperatorInfo* createStreamSessionAggOperatorInfo(SOperatorInfo* downstream, SPh
pOperator->blocking = true;
pOperator->status = OP_NOT_OPENED;
pOperator->info = pInfo;
pOperator->fpSet =
createOperatorFpSet(operatorDummyOpenFn, doStreamSessionAgg, NULL, NULL, destroyStreamSessionAggOperatorInfo,
NULL);
pOperator->fpSet = createOperatorFpSet(operatorDummyOpenFn, doStreamSessionAgg, NULL, NULL,
destroyStreamSessionAggOperatorInfo, NULL);
if (downstream) {
initDownStream(downstream, &pInfo->streamAggSup, pInfo->twAggSup.waterMark, pOperator->operatorType,
pInfo->primaryTsIndex);
@ -4404,9 +4405,8 @@ SOperatorInfo* createStreamFinalSessionAggOperatorInfo(SOperatorInfo* downstream
pInfo->pUpdateRes = createSpecialDataBlock(STREAM_CLEAR);
blockDataEnsureCapacity(pInfo->pUpdateRes, 128);
pOperator->name = "StreamSessionSemiAggOperator";
pOperator->fpSet =
createOperatorFpSet(operatorDummyOpenFn, doStreamSessionSemiAgg, NULL, NULL,
destroyStreamSessionAggOperatorInfo, NULL);
pOperator->fpSet = createOperatorFpSet(operatorDummyOpenFn, doStreamSessionSemiAgg, NULL, NULL,
destroyStreamSessionAggOperatorInfo, NULL);
}
pInfo->pGroupIdTbNameMap =
@ -4739,6 +4739,7 @@ SOperatorInfo* createStreamStateAggOperatorInfo(SOperatorInfo* downstream, SPhys
.maxTs = INT64_MIN,
.minTs = INT64_MAX,
};
initExecTimeWindowInfo(&pInfo->twAggSup.timeWindowData, &pTaskInfo->window);
SExprSupp* pSup = &pOperator->exprSupp;
@ -4774,8 +4775,8 @@ SOperatorInfo* createStreamStateAggOperatorInfo(SOperatorInfo* downstream, SPhys
pOperator->status = OP_NOT_OPENED;
pOperator->pTaskInfo = pTaskInfo;
pOperator->info = pInfo;
pOperator->fpSet = createOperatorFpSet(operatorDummyOpenFn, doStreamStateAgg, NULL, NULL,
destroyStreamStateOperatorInfo, NULL);
pOperator->fpSet =
createOperatorFpSet(operatorDummyOpenFn, doStreamStateAgg, NULL, NULL, destroyStreamStateOperatorInfo, NULL);
initDownStream(downstream, &pInfo->streamAggSup, pInfo->twAggSup.waterMark, pOperator->operatorType,
pInfo->primaryTsIndex);
code = appendDownstream(pOperator, &downstream, 1);
@ -5051,8 +5052,8 @@ SOperatorInfo* createMergeAlignedIntervalOperatorInfo(SOperatorInfo* downstream,
pOperator->pTaskInfo = pTaskInfo;
pOperator->info = miaInfo;
pOperator->fpSet = createOperatorFpSet(operatorDummyOpenFn, mergeAlignedIntervalAgg, NULL, NULL,
destroyMAIOperatorInfo, NULL);
pOperator->fpSet =
createOperatorFpSet(operatorDummyOpenFn, mergeAlignedIntervalAgg, NULL, NULL, destroyMAIOperatorInfo, NULL);
code = appendDownstream(pOperator, &downstream, 1);
if (code != TSDB_CODE_SUCCESS) {
@ -5363,8 +5364,8 @@ SOperatorInfo* createMergeIntervalOperatorInfo(SOperatorInfo* downstream, SMerge
pOperator->pTaskInfo = pTaskInfo;
pOperator->info = pMergeIntervalInfo;
pOperator->fpSet = createOperatorFpSet(operatorDummyOpenFn, doMergeIntervalAgg, NULL, NULL,
destroyMergeIntervalOperatorInfo, NULL);
pOperator->fpSet =
createOperatorFpSet(operatorDummyOpenFn, doMergeIntervalAgg, NULL, NULL, destroyMergeIntervalOperatorInfo, NULL);
code = appendDownstream(pOperator, &downstream, 1);
if (code != TSDB_CODE_SUCCESS) {
@ -5528,6 +5529,7 @@ SOperatorInfo* createStreamIntervalOperatorInfo(SOperatorInfo* downstream, SPhys
};
ASSERT(twAggSupp.calTrigger != STREAM_TRIGGER_MAX_DELAY);
pOperator->pTaskInfo = pTaskInfo;
pInfo->interval = interval;
pInfo->twAggSup = twAggSupp;
@ -5595,9 +5597,8 @@ SOperatorInfo* createStreamIntervalOperatorInfo(SOperatorInfo* downstream, SPhys
pOperator->blocking = true;
pOperator->status = OP_NOT_OPENED;
pOperator->info = pInfo;
pOperator->fpSet =
createOperatorFpSet(operatorDummyOpenFn, doStreamIntervalAgg, NULL, NULL, destroyStreamFinalIntervalOperatorInfo,
NULL);
pOperator->fpSet = createOperatorFpSet(operatorDummyOpenFn, doStreamIntervalAgg, NULL, NULL,
destroyStreamFinalIntervalOperatorInfo, NULL);
initIntervalDownStream(downstream, pPhyNode->type, &pInfo->aggSup, &pInfo->interval, &pInfo->twAggSup);
code = appendDownstream(pOperator, &downstream, 1);

View File

@ -49,8 +49,7 @@ extern "C" {
#define FUNC_MGT_MULTI_ROWS_FUNC FUNC_MGT_FUNC_CLASSIFICATION_MASK(20)
#define FUNC_MGT_KEEP_ORDER_FUNC FUNC_MGT_FUNC_CLASSIFICATION_MASK(21)
#define FUNC_MGT_CUMULATIVE_FUNC FUNC_MGT_FUNC_CLASSIFICATION_MASK(22)
#define FUNC_MGT_FORBID_STABLE_FUNC FUNC_MGT_FUNC_CLASSIFICATION_MASK(23)
#define FUNC_MGT_INTERP_PC_FUNC FUNC_MGT_FUNC_CLASSIFICATION_MASK(24)
#define FUNC_MGT_INTERP_PC_FUNC FUNC_MGT_FUNC_CLASSIFICATION_MASK(23)
#define FUNC_MGT_TEST_MASK(val, mask) (((val) & (mask)) != 0)

View File

@ -2381,7 +2381,7 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = {
.name = "interp",
.type = FUNCTION_TYPE_INTERP,
.classification = FUNC_MGT_TIMELINE_FUNC | FUNC_MGT_INTERVAL_INTERPO_FUNC | FUNC_MGT_IMPLICIT_TS_FUNC |
FUNC_MGT_FORBID_STREAM_FUNC | FUNC_MGT_FORBID_STABLE_FUNC,
FUNC_MGT_FORBID_STREAM_FUNC,
.translateFunc = translateInterp,
.getEnvFunc = getSelectivityFuncEnv,
.initFunc = functionSetup,

View File

@ -216,8 +216,6 @@ bool fmIsKeepOrderFunc(int32_t funcId) { return isSpecificClassifyFunc(funcId, F
bool fmIsCumulativeFunc(int32_t funcId) { return isSpecificClassifyFunc(funcId, FUNC_MGT_CUMULATIVE_FUNC); }
bool fmIsForbidSuperTableFunc(int32_t funcId) { return isSpecificClassifyFunc(funcId, FUNC_MGT_FORBID_STABLE_FUNC); }
bool fmIsInterpFunc(int32_t funcId) {
if (funcId < 0 || funcId >= funcMgtBuiltinsNum) {
return false;

View File

@ -888,10 +888,11 @@ static int32_t udfdUvInit() {
}
global.loop = loop;
uv_pipe_init(global.loop, &global.ctrlPipe, 1);
uv_pipe_open(&global.ctrlPipe, 0);
uv_read_start((uv_stream_t *)&global.ctrlPipe, udfdCtrlAllocBufCb, udfdCtrlReadCb);
if (tsStartUdfd) { // udfd is started by taosd, which shall exit when taosd exit
uv_pipe_init(global.loop, &global.ctrlPipe, 1);
uv_pipe_open(&global.ctrlPipe, 0);
uv_read_start((uv_stream_t *)&global.ctrlPipe, udfdCtrlAllocBufCb, udfdCtrlReadCb);
}
getUdfdPipeName(global.listenPipeName, sizeof(global.listenPipeName));
removeListeningPipe();
@ -979,13 +980,13 @@ int32_t udfdDeinitResidentFuncs() {
char* funcName = taosArrayGet(global.residentFuncs, i);
SUdf** udfInHash = taosHashGet(global.udfsHash, funcName, strlen(funcName));
if (udfInHash) {
taosHashRemove(global.udfsHash, funcName, strlen(funcName));
SUdf* udf = *udfInHash;
if (udf->destroyFunc) {
(udf->destroyFunc)();
}
uv_dlclose(&udf->lib);
taosMemoryFree(udf);
taosHashRemove(global.udfsHash, funcName, strlen(funcName));
}
}
taosArrayDestroy(global.residentFuncs);

View File

@ -1537,25 +1537,6 @@ static int32_t translateRepeatScanFunc(STranslateContext* pCxt, SFunctionNode* p
return TSDB_CODE_SUCCESS;
}
static int32_t translateForbidSuperTableFunc(STranslateContext* pCxt, SFunctionNode* pFunc) {
if (!fmIsForbidSuperTableFunc(pFunc->funcId)) {
return TSDB_CODE_SUCCESS;
}
if (!isSelectStmt(pCxt->pCurrStmt)) {
return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_ONLY_SUPPORT_SINGLE_TABLE,
"%s is only supported in single table query", pFunc->functionName);
}
SSelectStmt* pSelect = (SSelectStmt*)pCxt->pCurrStmt;
SNode* pTable = pSelect->pFromTable;
if ((NULL != pTable && (QUERY_NODE_REAL_TABLE != nodeType(pTable) ||
(TSDB_CHILD_TABLE != ((SRealTableNode*)pTable)->pMeta->tableType &&
TSDB_NORMAL_TABLE != ((SRealTableNode*)pTable)->pMeta->tableType)))) {
return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_ONLY_SUPPORT_SINGLE_TABLE,
"%s is only supported in single table query", pFunc->functionName);
}
return TSDB_CODE_SUCCESS;
}
static bool isStar(SNode* pNode) {
return (QUERY_NODE_COLUMN == nodeType(pNode)) && ('\0' == ((SColumnNode*)pNode)->tableAlias[0]) &&
(0 == strcmp(((SColumnNode*)pNode)->colName, "*"));
@ -1717,9 +1698,6 @@ static int32_t rewriteSystemInfoFunc(STranslateContext* pCxt, SNode** pNode) {
static int32_t translateNoramlFunction(STranslateContext* pCxt, SFunctionNode* pFunc) {
int32_t code = translateAggFunc(pCxt, pFunc);
if (TSDB_CODE_SUCCESS == code) {
code = translateForbidSuperTableFunc(pCxt, pFunc);
}
if (TSDB_CODE_SUCCESS == code) {
code = translateScanPseudoColumnFunc(pCxt, pFunc);
}
@ -3058,12 +3036,14 @@ static EDealRes checkStateExpr(SNode* pNode, void* pContext) {
return DEAL_RES_CONTINUE;
}
static bool isPartitionByTbname(SNodeList* pPartitionByList) {
if (1 != LIST_LENGTH(pPartitionByList)) {
return false;
static bool hasPartitionByTbname(SNodeList* pPartitionByList) {
SNode* pPartKey = NULL;
FOREACH(pPartKey, pPartitionByList) {
if (QUERY_NODE_FUNCTION == nodeType(pPartKey) && FUNCTION_TYPE_TBNAME == ((SFunctionNode*)pPartKey)->funcType) {
return true;
}
}
SNode* pPartKey = nodesListGetNode(pPartitionByList, 0);
return QUERY_NODE_FUNCTION == nodeType(pPartKey) && FUNCTION_TYPE_TBNAME == ((SFunctionNode*)pPartKey)->funcType;
return false;
}
static int32_t checkStateWindowForStream(STranslateContext* pCxt, SSelectStmt* pSelect) {
@ -3071,7 +3051,7 @@ static int32_t checkStateWindowForStream(STranslateContext* pCxt, SSelectStmt* p
return TSDB_CODE_SUCCESS;
}
if (TSDB_SUPER_TABLE == ((SRealTableNode*)pSelect->pFromTable)->pMeta->tableType &&
!isPartitionByTbname(pSelect->pPartitionByList)) {
!hasPartitionByTbname(pSelect->pPartitionByList)) {
return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_STREAM_QUERY, "Unsupported stream query");
}
return TSDB_CODE_SUCCESS;
@ -4847,6 +4827,11 @@ static int32_t checkAlterSuperTableBySchema(STranslateContext* pCxt, SAlterTable
return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_ONLY_ONE_JSON_TAG);
}
int32_t tagsLen = 0;
for (int32_t i = 0; i < pTableMeta->tableInfo.numOfTags; ++i) {
tagsLen += pTagsSchema[i].bytes;
}
if (TSDB_ALTER_TABLE_UPDATE_COLUMN_BYTES == pStmt->alterType ||
TSDB_ALTER_TABLE_UPDATE_TAG_BYTES == pStmt->alterType) {
if (TSDB_SUPER_TABLE != pTableMeta->tableType) {
@ -4860,7 +4845,38 @@ static int32_t checkAlterSuperTableBySchema(STranslateContext* pCxt, SAlterTable
pSchema->bytes >= calcTypeBytes(pStmt->dataType)) {
return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_MODIFY_COL);
}
if (TSDB_ALTER_TABLE_UPDATE_COLUMN_BYTES == pStmt->alterType &&
pTableMeta->tableInfo.rowSize + calcTypeBytes(pStmt->dataType) - pSchema->bytes > TSDB_MAX_BYTES_PER_ROW) {
return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_ROW_LENGTH, TSDB_MAX_BYTES_PER_ROW);
}
if (TSDB_ALTER_TABLE_UPDATE_TAG_BYTES == pStmt->alterType &&
tagsLen + calcTypeBytes(pStmt->dataType) - pSchema->bytes > TSDB_MAX_TAGS_LEN) {
return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_TAGS_LENGTH, TSDB_MAX_TAGS_LEN);
}
}
if (TSDB_ALTER_TABLE_ADD_COLUMN == pStmt->alterType) {
if (TSDB_MAX_COLUMNS == pTableMeta->tableInfo.numOfColumns) {
return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_TOO_MANY_COLUMNS);
}
if (pTableMeta->tableInfo.rowSize + calcTypeBytes(pStmt->dataType) > TSDB_MAX_BYTES_PER_ROW) {
return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_ROW_LENGTH, TSDB_MAX_BYTES_PER_ROW);
}
}
if (TSDB_ALTER_TABLE_ADD_TAG == pStmt->alterType) {
if (TSDB_MAX_TAGS == pTableMeta->tableInfo.numOfTags) {
return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_TAGS_NUM);
}
if (tagsLen + calcTypeBytes(pStmt->dataType) > TSDB_MAX_TAGS_LEN) {
return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_TAGS_LENGTH, TSDB_MAX_TAGS_LEN);
}
}
return TSDB_CODE_SUCCESS;
}
@ -5329,12 +5345,12 @@ static int32_t translateKillTransaction(STranslateContext* pCxt, SKillStmt* pStm
static bool crossTableWithoutAggOper(SSelectStmt* pSelect) {
return NULL == pSelect->pWindow && !pSelect->hasAggFuncs && !pSelect->hasIndefiniteRowsFunc &&
!pSelect->hasInterpFunc && TSDB_SUPER_TABLE == ((SRealTableNode*)pSelect->pFromTable)->pMeta->tableType &&
!isPartitionByTbname(pSelect->pPartitionByList);
!hasPartitionByTbname(pSelect->pPartitionByList);
}
static bool crossTableWithUdaf(SSelectStmt* pSelect) {
return pSelect->hasUdaf && TSDB_SUPER_TABLE == ((SRealTableNode*)pSelect->pFromTable)->pMeta->tableType &&
!isPartitionByTbname(pSelect->pPartitionByList);
!hasPartitionByTbname(pSelect->pPartitionByList);
}
static int32_t checkCreateStream(STranslateContext* pCxt, SCreateStreamStmt* pStmt) {
@ -7078,6 +7094,14 @@ static int32_t buildAddColReq(STranslateContext* pCxt, SAlterTableStmt* pStmt, S
return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_DUPLICATED_COLUMN);
}
if (TSDB_MAX_COLUMNS == pTableMeta->tableInfo.numOfColumns) {
return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_TOO_MANY_COLUMNS);
}
if (pTableMeta->tableInfo.rowSize + calcTypeBytes(pStmt->dataType) > TSDB_MAX_BYTES_PER_ROW) {
return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_ROW_LENGTH, TSDB_MAX_BYTES_PER_ROW);
}
pReq->colName = strdup(pStmt->colName);
if (NULL == pReq->colName) {
return TSDB_CODE_OUT_OF_MEMORY;
@ -7085,7 +7109,6 @@ static int32_t buildAddColReq(STranslateContext* pCxt, SAlterTableStmt* pStmt, S
pReq->type = pStmt->dataType.type;
pReq->flags = COL_SMA_ON;
// pReq->bytes = pStmt->dataType.bytes;
pReq->bytes = calcTypeBytes(pStmt->dataType);
return TSDB_CODE_SUCCESS;
}
@ -7123,6 +7146,10 @@ static int32_t buildUpdateColReq(STranslateContext* pCxt, SAlterTableStmt* pStmt
return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_MODIFY_COL);
}
if (pTableMeta->tableInfo.rowSize + pReq->colModBytes - pSchema->bytes > TSDB_MAX_BYTES_PER_ROW) {
return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_ROW_LENGTH, TSDB_MAX_BYTES_PER_ROW);
}
pReq->colName = strdup(pStmt->colName);
if (NULL == pReq->colName) {
return TSDB_CODE_OUT_OF_MEMORY;

View File

@ -624,8 +624,6 @@ static int32_t createIndefRowsFuncLogicNode(SLogicPlanContext* pCxt, SSelectStmt
return code;
}
static bool isInterpFunc(int32_t funcId) { return fmIsInterpFunc(funcId) || fmIsInterpPseudoColumnFunc(funcId); }
static int32_t createInterpFuncLogicNode(SLogicPlanContext* pCxt, SSelectStmt* pSelect, SLogicNode** pLogicNode) {
if (!pSelect->hasInterpFunc) {
return TSDB_CODE_SUCCESS;
@ -640,7 +638,8 @@ static int32_t createInterpFuncLogicNode(SLogicPlanContext* pCxt, SSelectStmt* p
pInterpFunc->node.requireDataOrder = getRequireDataOrder(true, pSelect);
pInterpFunc->node.resultDataOrder = pInterpFunc->node.requireDataOrder;
int32_t code = nodesCollectFuncs(pSelect, SQL_CLAUSE_SELECT, isInterpFunc, &pInterpFunc->pFuncs);
// interp functions and _group_key functions
int32_t code = nodesCollectFuncs(pSelect, SQL_CLAUSE_SELECT, fmIsVectorFunc, &pInterpFunc->pFuncs);
if (TSDB_CODE_SUCCESS == code) {
code = rewriteExprsForSelect(pInterpFunc->pFuncs, pSelect, SQL_CLAUSE_SELECT);
}
@ -728,7 +727,7 @@ static int32_t createWindowLogicNodeByState(SLogicPlanContext* pCxt, SStateWindo
if (TSDB_CODE_SUCCESS == code) {
code = createWindowLogicNodeFinalize(pCxt, pSelect, pWindow, pLogicNode);
}
return code;
}

View File

@ -2242,6 +2242,26 @@ static EDealRes lastRowScanOptSetColDataType(SNode* pNode, void* pContext) {
return DEAL_RES_CONTINUE;
}
static void lastRowScanOptSetLastTargets(SNodeList* pTargets, SNodeList* pLastCols) {
SNode* pTarget = NULL;
WHERE_EACH(pTarget, pTargets) {
bool found = false;
SNode* pCol = NULL;
FOREACH(pCol, pLastCols) {
if (nodesEqualNode(pCol, pTarget)) {
getLastCacheDataType(&(((SColumnNode*)pTarget)->node.resType));
found = true;
break;
}
}
if (!found) {
ERASE_NODE(pTargets);
continue;
}
WHERE_NEXT;
}
}
static int32_t lastRowScanOptimize(SOptimizeContext* pCxt, SLogicSubplan* pLogicSubplan) {
SAggLogicNode* pAgg = (SAggLogicNode*)optFindPossibleNode(pLogicSubplan->pNode, lastRowScanOptMayBeOptimized);
@ -2265,6 +2285,7 @@ static int32_t lastRowScanOptimize(SOptimizeContext* pCxt, SLogicSubplan* pLogic
}
if (FUNCTION_TYPE_LAST == funcType) {
nodesWalkExpr(nodesListGetNode(pFunc->pParameterList, 0), lastRowScanOptSetColDataType, &cxt);
nodesListErase(pFunc->pParameterList, nodesListGetCell(pFunc->pParameterList, 1));
}
}
}
@ -2274,9 +2295,9 @@ static int32_t lastRowScanOptimize(SOptimizeContext* pCxt, SLogicSubplan* pLogic
pScan->igLastNull = pAgg->hasLast ? true : false;
if (NULL != cxt.pLastCols) {
cxt.doAgg = false;
nodesWalkExprs(pScan->pScanCols, lastRowScanOptSetColDataType, &cxt);
lastRowScanOptSetLastTargets(pScan->pScanCols, cxt.pLastCols);
nodesWalkExprs(pScan->pScanPseudoCols, lastRowScanOptSetColDataType, &cxt);
nodesWalkExprs(pScan->node.pTargets, lastRowScanOptSetColDataType, &cxt);
lastRowScanOptSetLastTargets(pScan->node.pTargets, cxt.pLastCols);
nodesClearList(cxt.pLastCols);
}
pAgg->hasLastRow = false;

View File

@ -103,6 +103,9 @@ TEST_F(PlanBasicTest, interpFunc) {
run("SELECT INTERP(c1) FROM t1 RANGE('2017-7-14 18:00:00', '2017-7-14 19:00:00') EVERY(5s) FILL(LINEAR)");
run("SELECT _IROWTS, INTERP(c1) FROM t1 RANGE('2017-7-14 18:00:00', '2017-7-14 19:00:00') EVERY(5s) FILL(LINEAR)");
run("SELECT TBNAME, _IROWTS, INTERP(c1) FROM t1 PARTITION BY TBNAME "
"RANGE('2017-7-14 18:00:00', '2017-7-14 19:00:00') EVERY(5s) FILL(LINEAR)");
}
TEST_F(PlanBasicTest, lastRowFuncWithoutCache) {

View File

@ -1085,7 +1085,8 @@ EDealRes sclRewriteNonConstOperator(SNode **pNode, SScalarCtx *ctx) {
EDealRes sclRewriteFunction(SNode **pNode, SScalarCtx *ctx) {
SFunctionNode *node = (SFunctionNode *)*pNode;
SNode *tnode = NULL;
if (!fmIsScalarFunc(node->funcId) && (!ctx->dual)) {
if ((!fmIsScalarFunc(node->funcId) && (!ctx->dual)) ||
fmIsUserDefinedFunc(node->funcId)) {
return DEAL_RES_CONTINUE;
}

View File

@ -32,7 +32,7 @@ typedef struct {
static SStreamGlobalEnv streamEnv;
int32_t streamPipelineExec(SStreamTask* pTask, int32_t batchNum, bool dispatch);
// int32_t streamPipelineExec(SStreamTask* pTask, int32_t batchNum, bool dispatch);
int32_t streamDispatch(SStreamTask* pTask);
int32_t streamDispatchReqToData(const SStreamDispatchReq* pReq, SStreamDataBlock* pData);

View File

@ -210,6 +210,46 @@ static int32_t streamAddBlockToDispatchMsg(const SSDataBlock* pBlock, SStreamDis
return 0;
}
int32_t streamDispatchOneRecoverFinishReq(SStreamTask* pTask, const SStreamRecoverFinishReq* pReq, int32_t vgId,
SEpSet* pEpSet) {
void* buf = NULL;
int32_t code = -1;
SRpcMsg msg = {0};
int32_t tlen;
tEncodeSize(tEncodeSStreamRecoverFinishReq, pReq, tlen, code);
if (code < 0) {
return -1;
}
buf = rpcMallocCont(sizeof(SMsgHead) + tlen);
if (buf == NULL) {
return -1;
}
((SMsgHead*)buf)->vgId = htonl(vgId);
void* abuf = POINTER_SHIFT(buf, sizeof(SMsgHead));
SEncoder encoder;
tEncoderInit(&encoder, abuf, tlen);
if ((code = tEncodeSStreamRecoverFinishReq(&encoder, pReq)) < 0) {
goto FAIL;
}
tEncoderClear(&encoder);
msg.contLen = tlen + sizeof(SMsgHead);
msg.pCont = buf;
msg.msgType = TDMT_VND_STREAM_RECOVER_FINISH;
tmsgSendReq(pEpSet, &msg);
code = 0;
return 0;
FAIL:
if (buf) rpcFreeCont(buf);
return code;
}
int32_t streamDispatchOneReq(SStreamTask* pTask, const SStreamDispatchReq* pReq, int32_t vgId, SEpSet* pEpSet) {
void* buf = NULL;
int32_t code = -1;
@ -244,9 +284,10 @@ int32_t streamDispatchOneReq(SStreamTask* pTask, const SStreamDispatchReq* pReq,
tmsgSendReq(pEpSet, &msg);
code = 0;
FAIL:
if (code < 0 && buf) rpcFreeCont(buf);
return 0;
FAIL:
if (buf) rpcFreeCont(buf);
return code;
}
int32_t streamSearchAndAddBlock(SStreamTask* pTask, SStreamDispatchReq* pReqs, SSDataBlock* pDataBlock, int32_t vgSz,
@ -439,3 +480,4 @@ FREE:
taosFreeQitem(pBlock);
return code;
}

View File

@ -85,6 +85,54 @@ static int32_t streamTaskExecImpl(SStreamTask* pTask, const void* data, SArray*
return 0;
}
int32_t streamScanExec(SStreamTask* pTask, int32_t batchSz) {
ASSERT(pTask->taskLevel == TASK_LEVEL__SOURCE);
void* exec = pTask->exec.executor;
while (1) {
SArray* pRes = taosArrayInit(0, sizeof(SSDataBlock));
if (pRes == NULL) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
return -1;
}
int32_t batchCnt = 0;
while (1) {
SSDataBlock* output = NULL;
uint64_t ts = 0;
if (qExecTask(exec, &output, &ts) < 0) {
ASSERT(0);
}
if (output == NULL) break;
SSDataBlock block = {0};
assignOneDataBlock(&block, output);
block.info.childId = pTask->selfChildId;
taosArrayPush(pRes, &block);
if (++batchCnt >= batchSz) break;
}
if (taosArrayGetSize(pRes) == 0) {
taosArrayDestroy(pRes);
break;
}
SStreamDataBlock* qRes = taosAllocateQitem(sizeof(SStreamDataBlock), DEF_QITEM);
if (qRes == NULL) {
taosArrayDestroyEx(pRes, (FDelete)blockDataFreeRes);
terrno = TSDB_CODE_OUT_OF_MEMORY;
return -1;
}
qRes->type = STREAM_INPUT__DATA_BLOCK;
qRes->blocks = pRes;
streamTaskOutput(pTask, qRes);
// TODO stream sched dispatch
}
return 0;
}
#if 0
int32_t streamPipelineExec(SStreamTask* pTask, int32_t batchNum, bool dispatch) {
ASSERT(pTask->taskLevel != TASK_LEVEL__SINK);
@ -144,6 +192,7 @@ int32_t streamPipelineExec(SStreamTask* pTask, int32_t batchNum, bool dispatch)
return 0;
}
#endif
int32_t streamExecForAll(SStreamTask* pTask) {
while (1) {

View File

@ -17,7 +17,7 @@
#include "streamInc.h"
#include "ttimer.h"
SStreamMeta* streamMetaOpen(const char* path, void* ahandle, FTaskExpand expandFunc) {
SStreamMeta* streamMetaOpen(const char* path, void* ahandle, FTaskExpand expandFunc, int32_t vgId) {
SStreamMeta* pMeta = taosMemoryCalloc(1, sizeof(SStreamMeta));
if (pMeta == NULL) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
@ -86,7 +86,8 @@ void streamMetaClose(SStreamMeta* pMeta) {
taosMemoryFree(pMeta);
}
int32_t streamMetaAddSerializedTask(SStreamMeta* pMeta, int64_t startVer, char* msg, int32_t msgLen) {
#if 0
int32_t streamMetaAddSerializedTask(SStreamMeta* pMeta, int64_t ver, char* msg, int32_t msgLen) {
SStreamTask* pTask = taosMemoryCalloc(1, sizeof(SStreamTask));
if (pTask == NULL) {
return -1;
@ -99,7 +100,7 @@ int32_t streamMetaAddSerializedTask(SStreamMeta* pMeta, int64_t startVer, char*
}
tDecoderClear(&decoder);
if (pMeta->expandFunc(pMeta->ahandle, pTask) < 0) {
if (pMeta->expandFunc(pMeta->ahandle, pTask, ver) < 0) {
ASSERT(0);
goto FAIL;
}
@ -114,26 +115,20 @@ int32_t streamMetaAddSerializedTask(SStreamMeta* pMeta, int64_t startVer, char*
goto FAIL;
}
if (pTask->fillHistory) {
// pipeline exec
// if finished, dispatch a stream-prepare-finished msg to downstream task
// set status normal
}
return 0;
FAIL:
if (pTask) tFreeSStreamTask(pTask);
return -1;
}
#endif
#if 0
int32_t streamMetaAddTask(SStreamMeta* pMeta, SStreamTask* pTask) {
#if 1
int32_t streamMetaAddTask(SStreamMeta* pMeta, int64_t ver, SStreamTask* pTask) {
void* buf = NULL;
if (pMeta->expandFunc(pMeta->ahandle, pTask) < 0) {
if (pMeta->expandFunc(pMeta->ahandle, pTask, ver) < 0) {
return -1;
}
taosHashPut(pMeta->pTasks, &pTask->taskId, sizeof(int32_t), &pTask, sizeof(void*));
int32_t len;
int32_t code;
@ -141,20 +136,24 @@ int32_t streamMetaAddTask(SStreamMeta* pMeta, SStreamTask* pTask) {
if (code < 0) {
return -1;
}
buf = taosMemoryCalloc(1, sizeof(len));
buf = taosMemoryCalloc(1, len);
if (buf == NULL) {
return -1;
}
SEncoder encoder;
SEncoder encoder = {0};
tEncoderInit(&encoder, buf, len);
tEncodeSStreamTask(&encoder, pTask);
tEncoderClear(&encoder);
if (tdbTbUpsert(pMeta->pTaskDb, &pTask->taskId, sizeof(int32_t), buf, len, &pMeta->txn) < 0) {
ASSERT(0);
return -1;
}
taosMemoryFree(buf);
taosHashPut(pMeta->pTasks, &pTask->taskId, sizeof(int32_t), &pTask, sizeof(void*));
return 0;
}
#endif
@ -269,7 +268,7 @@ int32_t streamLoadTasks(SStreamMeta* pMeta) {
tDecodeSStreamTask(&decoder, pTask);
tDecoderClear(&decoder);
if (pMeta->expandFunc(pMeta->ahandle, pTask) < 0) {
if (pMeta->expandFunc(pMeta->ahandle, pTask, -1) < 0) {
tdbFree(pKey);
tdbFree(pVal);
tdbTbcClose(pCur);

View File

@ -15,6 +15,111 @@
#include "streamInc.h"
// common
int32_t streamSetParamForRecover(SStreamTask* pTask) {
void* exec = pTask->exec.executor;
return qStreamSetParamForRecover(exec);
}
int32_t streamRestoreParam(SStreamTask* pTask) {
void* exec = pTask->exec.executor;
return qStreamRestoreParam(exec);
}
int32_t streamSetStatusNormal(SStreamTask* pTask) {
pTask->taskStatus = TASK_STATUS__NORMAL;
return 0;
}
// source
int32_t streamSourceRecoverPrepareStep1(SStreamTask* pTask, int64_t ver) {
void* exec = pTask->exec.executor;
return qStreamSourceRecoverStep1(exec, ver);
}
int32_t streamBuildSourceRecover1Req(SStreamTask* pTask, SStreamRecoverStep1Req* pReq) {
pReq->streamId = pTask->streamId;
pReq->taskId = pTask->taskId;
return 0;
}
int32_t streamSourceRecoverScanStep1(SStreamTask* pTask) {
//
return streamScanExec(pTask, 100);
// TODO next: dispatch msg to launch scan step2
}
int32_t streamBuildSourceRecover2Req(SStreamTask* pTask, SStreamRecoverStep2Req* pReq) {
pReq->streamId = pTask->streamId;
pReq->taskId = pTask->taskId;
return 0;
}
int32_t streamSourceRecoverScanStep2(SStreamTask* pTask, int64_t ver) {
void* exec = pTask->exec.executor;
if (qStreamSourceRecoverStep2(exec, ver) < 0) {
ASSERT(0);
}
return streamScanExec(pTask, 100);
}
int32_t streamDispatchRecoverFinishReq(SStreamTask* pTask) {
SStreamRecoverFinishReq req = {
.streamId = pTask->streamId,
.taskId = pTask->taskId,
.childId = pTask->selfChildId,
};
if (pTask->outputType == TASK_OUTPUT__FIXED_DISPATCH) {
} else if (pTask->outputType == TASK_OUTPUT__SHUFFLE_DISPATCH) {
}
return 0;
}
// agg
int32_t streamAggRecoverPrepare(SStreamTask* pTask) {
void* exec = pTask->exec.executor;
if (qStreamSetParamForRecover(exec) < 0) {
return -1;
}
pTask->recoverWaitingChild = taosArrayGetSize(pTask->childEpInfo);
return 0;
}
int32_t streamAggChildrenRecoverFinish(SStreamTask* pTask) {
void* exec = pTask->exec.executor;
if (qStreamRestoreParam(exec) < 0) {
return -1;
}
if (qStreamRecoverFinish(exec) < 0) {
return -1;
}
streamSetStatusNormal(pTask);
return 0;
}
int32_t streamProcessRecoverFinishReq(SStreamTask* pTask, int32_t childId) {
int32_t left = atomic_sub_fetch_32(&pTask->recoverWaitingChild, 1);
ASSERT(left >= 0);
if (left == 0) {
streamAggChildrenRecoverFinish(pTask);
}
return 0;
}
int32_t tEncodeSStreamRecoverFinishReq(SEncoder* pEncoder, const SStreamRecoverFinishReq* pReq) {
if (tStartEncode(pEncoder) < 0) return -1;
if (tEncodeI64(pEncoder, pReq->streamId) < 0) return -1;
if (tEncodeI32(pEncoder, pReq->taskId) < 0) return -1;
if (tEncodeI32(pEncoder, pReq->childId) < 0) return -1;
tEndEncode(pEncoder);
return pEncoder->pos;
}
int32_t tDecodeSStreamRecoverFinishReq(SDecoder* pDecoder, SStreamRecoverFinishReq* pReq) {
if (tStartDecode(pDecoder) < 0) return -1;
if (tDecodeI64(pDecoder, &pReq->streamId) < 0) return -1;
if (tDecodeI32(pDecoder, &pReq->taskId) < 0) return -1;
if (tDecodeI32(pDecoder, &pReq->childId) < 0) return -1;
tEndDecode(pDecoder);
return 0;
}
#if 0
int32_t tEncodeStreamTaskRecoverReq(SEncoder* pEncoder, const SStreamTaskRecoverReq* pReq) {
if (tStartEncode(pEncoder) < 0) return -1;
@ -132,6 +237,7 @@ int32_t tDecodeSStreamMultiVgCheckpointInfo(SDecoder* pDecoder, SStreamMultiVgCh
return 0;
}
#if 0
int32_t tEncodeSStreamTaskRecoverReq(SEncoder* pEncoder, const SStreamRecoverDownstreamReq* pReq) {
if (tEncodeI64(pEncoder, pReq->streamId) < 0) return -1;
if (tEncodeI32(pEncoder, pReq->downstreamTaskId) < 0) return -1;
@ -174,6 +280,7 @@ int32_t tDecodeSStreamTaskRecoverRsp(SDecoder* pDecoder, SStreamRecoverDownstrea
}
return 0;
}
#endif
int32_t streamSaveStateInfo(SStreamMeta* pMeta, SStreamTask* pTask) {
#if 0
@ -269,6 +376,7 @@ int32_t streamSaveAggLevel(SStreamMeta* pMeta, SStreamTask* pTask) {
return 0;
}
#if 0
int32_t streamFetchRecoverStatus(SStreamTask* pTask, const SVgroupInfo* pVgInfo) {
int32_t taskId = pVgInfo->taskId;
int32_t nodeId = pVgInfo->vgId;
@ -339,7 +447,9 @@ int32_t streamFetchDownstreamStatus(SStreamMeta* pMeta, SStreamTask* pTask) {
}
return 0;
}
#endif
#if 0
int32_t streamProcessFetchStatusRsp(SStreamMeta* pMeta, SStreamTask* pTask, SStreamRecoverDownstreamRsp* pRsp) {
// if failed, set timer and retry
// if successful
@ -430,3 +540,4 @@ int32_t streamRecoverTask(SStreamTask* pTask) {
//
return 0;
}
#endif

View File

@ -79,6 +79,12 @@ typedef struct SSyncTimer {
void* pData;
} SSyncTimer;
typedef struct SElectTimer {
uint64_t logicClock;
SSyncNode* pSyncNode;
void* pData;
} SElectTimer;
int32_t syncHbTimerInit(SSyncNode* pSyncNode, SSyncTimer* pSyncTimer, SRaftId destId);
int32_t syncHbTimerStart(SSyncNode* pSyncNode, SSyncTimer* pSyncTimer);
int32_t syncHbTimerStop(SSyncNode* pSyncNode, SSyncTimer* pSyncTimer);
@ -155,7 +161,6 @@ typedef struct SSyncNode {
tmr_h pElectTimer;
int32_t electTimerMS;
uint64_t electTimerLogicClock;
uint64_t electTimerLogicClockUser;
TAOS_TMR_CALLBACK FpElectTimerCB; // Timer Fp
uint64_t electTimerCounter;

View File

@ -1310,7 +1310,6 @@ SSyncNode* syncNodeOpen(SSyncInfo* pSyncInfo) {
pSyncNode->pElectTimer = NULL;
pSyncNode->electTimerMS = syncUtilElectRandomMS(pSyncNode->electBaseLine, 2 * pSyncNode->electBaseLine);
atomic_store_64(&pSyncNode->electTimerLogicClock, 0);
atomic_store_64(&pSyncNode->electTimerLogicClockUser, 0);
pSyncNode->FpElectTimerCB = syncNodeEqElectTimer;
pSyncNode->electTimerCounter = 0;
@ -1565,17 +1564,14 @@ int32_t syncNodeStartElectTimer(SSyncNode* pSyncNode, int32_t ms) {
int32_t ret = 0;
if (syncEnvIsStart()) {
pSyncNode->electTimerMS = ms;
taosTmrReset(pSyncNode->FpElectTimerCB, pSyncNode->electTimerMS, pSyncNode, gSyncEnv->pTimerManager,
&pSyncNode->pElectTimer);
atomic_store_64(&pSyncNode->electTimerLogicClock, pSyncNode->electTimerLogicClockUser);
/*
do {
char logBuf[128];
snprintf(logBuf, sizeof(logBuf), "elect timer reset, ms:%d", ms);
syncNodeEventLog(pSyncNode, logBuf);
} while (0);
*/
SElectTimer* pElectTimer = taosMemoryMalloc(sizeof(SElectTimer));
pElectTimer->logicClock = pSyncNode->electTimerLogicClock;
pElectTimer->pSyncNode = pSyncNode;
pElectTimer->pData = NULL;
taosTmrReset(pSyncNode->FpElectTimerCB, pSyncNode->electTimerMS, pElectTimer, gSyncEnv->pTimerManager,
&pSyncNode->pElectTimer);
} else {
sError("vgId:%d, start elect timer error, sync env is stop", pSyncNode->vgId);
@ -1585,11 +1581,10 @@ int32_t syncNodeStartElectTimer(SSyncNode* pSyncNode, int32_t ms) {
int32_t syncNodeStopElectTimer(SSyncNode* pSyncNode) {
int32_t ret = 0;
atomic_add_fetch_64(&pSyncNode->electTimerLogicClockUser, 1);
atomic_add_fetch_64(&pSyncNode->electTimerLogicClock, 1);
taosTmrStop(pSyncNode->pElectTimer);
pSyncNode->pElectTimer = NULL;
// sTrace("vgId:%d, sync %s stop elect timer", pSyncNode->vgId, syncUtilState2String(pSyncNode->state));
return ret;
}
@ -1815,8 +1810,6 @@ cJSON* syncNode2Json(const SSyncNode* pSyncNode) {
cJSON_AddNumberToObject(pRoot, "electTimerMS", pSyncNode->electTimerMS);
snprintf(u64buf, sizeof(u64buf), "%" PRIu64, pSyncNode->electTimerLogicClock);
cJSON_AddStringToObject(pRoot, "electTimerLogicClock", u64buf);
snprintf(u64buf, sizeof(u64buf), "%" PRIu64, pSyncNode->electTimerLogicClockUser);
cJSON_AddStringToObject(pRoot, "electTimerLogicClockUser", u64buf);
snprintf(u64buf, sizeof(u64buf), "%p", pSyncNode->FpElectTimerCB);
cJSON_AddStringToObject(pRoot, "FpElectTimerCB", u64buf);
snprintf(u64buf, sizeof(u64buf), "%" PRIu64, pSyncNode->electTimerCounter);
@ -1922,7 +1915,7 @@ inline void syncNodeEventLog(const SSyncNode* pSyncNode, char* str) {
snapshot.lastApplyTerm, pSyncNode->pRaftCfg->isStandBy, pSyncNode->pRaftCfg->snapshotStrategy,
pSyncNode->pRaftCfg->batchSize, pSyncNode->replicaNum, pSyncNode->pRaftCfg->lastConfigIndex,
pSyncNode->changing, pSyncNode->restoreFinish, syncNodeDynamicQuorum(pSyncNode),
pSyncNode->electTimerLogicClockUser, pSyncNode->heartbeatTimerLogicClockUser, peerStateStr, printStr);
pSyncNode->electTimerLogicClock, pSyncNode->heartbeatTimerLogicClockUser, peerStateStr, printStr);
} else {
snprintf(logBuf, sizeof(logBuf), "%s", str);
}
@ -1946,7 +1939,7 @@ inline void syncNodeEventLog(const SSyncNode* pSyncNode, char* str) {
snapshot.lastApplyTerm, pSyncNode->pRaftCfg->isStandBy, pSyncNode->pRaftCfg->snapshotStrategy,
pSyncNode->pRaftCfg->batchSize, pSyncNode->replicaNum, pSyncNode->pRaftCfg->lastConfigIndex,
pSyncNode->changing, pSyncNode->restoreFinish, syncNodeDynamicQuorum(pSyncNode),
pSyncNode->electTimerLogicClockUser, pSyncNode->heartbeatTimerLogicClockUser, peerStateStr, printStr);
pSyncNode->electTimerLogicClock, pSyncNode->heartbeatTimerLogicClockUser, peerStateStr, printStr);
} else {
snprintf(s, len, "%s", str);
}
@ -2000,7 +1993,7 @@ inline void syncNodeErrorLog(const SSyncNode* pSyncNode, char* str) {
snapshot.lastApplyTerm, pSyncNode->pRaftCfg->isStandBy, pSyncNode->pRaftCfg->snapshotStrategy,
pSyncNode->pRaftCfg->batchSize, pSyncNode->replicaNum, pSyncNode->pRaftCfg->lastConfigIndex,
pSyncNode->changing, pSyncNode->restoreFinish, syncNodeDynamicQuorum(pSyncNode),
pSyncNode->electTimerLogicClockUser, pSyncNode->heartbeatTimerLogicClockUser, printStr);
pSyncNode->electTimerLogicClock, pSyncNode->heartbeatTimerLogicClockUser, printStr);
} else {
snprintf(logBuf, sizeof(logBuf), "%s", str);
}
@ -2022,7 +2015,7 @@ inline void syncNodeErrorLog(const SSyncNode* pSyncNode, char* str) {
snapshot.lastApplyTerm, pSyncNode->pRaftCfg->isStandBy, pSyncNode->pRaftCfg->snapshotStrategy,
pSyncNode->pRaftCfg->batchSize, pSyncNode->replicaNum, pSyncNode->pRaftCfg->lastConfigIndex,
pSyncNode->changing, pSyncNode->restoreFinish, syncNodeDynamicQuorum(pSyncNode),
pSyncNode->electTimerLogicClockUser, pSyncNode->heartbeatTimerLogicClockUser, printStr);
pSyncNode->electTimerLogicClock, pSyncNode->heartbeatTimerLogicClockUser, printStr);
} else {
snprintf(s, len, "%s", str);
}
@ -2789,38 +2782,46 @@ static void syncNodeEqPingTimer(void* param, void* tmrId) {
}
static void syncNodeEqElectTimer(void* param, void* tmrId) {
SSyncNode* pSyncNode = (SSyncNode*)param;
if (atomic_load_64(&pSyncNode->electTimerLogicClockUser) <= atomic_load_64(&pSyncNode->electTimerLogicClock)) {
SyncTimeout* pSyncMsg = syncTimeoutBuild2(SYNC_TIMEOUT_ELECTION, atomic_load_64(&pSyncNode->electTimerLogicClock),
pSyncNode->electTimerMS, pSyncNode->vgId, pSyncNode);
SRpcMsg rpcMsg;
syncTimeout2RpcMsg(pSyncMsg, &rpcMsg);
syncRpcMsgLog2((char*)"==syncNodeEqElectTimer==", &rpcMsg);
if (pSyncNode->FpEqMsg != NULL) {
int32_t code = pSyncNode->FpEqMsg(pSyncNode->msgcb, &rpcMsg);
if (code != 0) {
sError("vgId:%d, sync enqueue elect msg error, code:%d", pSyncNode->vgId, code);
rpcFreeCont(rpcMsg.pCont);
syncTimeoutDestroy(pSyncMsg);
return;
}
} else {
sTrace("syncNodeEqElectTimer FpEqMsg is NULL");
}
syncTimeoutDestroy(pSyncMsg);
SElectTimer* pElectTimer = (SElectTimer*)param;
SSyncNode* pSyncNode = pElectTimer->pSyncNode;
// reset timer ms
if (syncEnvIsStart() && pSyncNode->electBaseLine > 0) {
pSyncNode->electTimerMS = syncUtilElectRandomMS(pSyncNode->electBaseLine, 2 * pSyncNode->electBaseLine);
taosTmrReset(syncNodeEqElectTimer, pSyncNode->electTimerMS, pSyncNode, gSyncEnv->pTimerManager,
&pSyncNode->pElectTimer);
} else {
sError("sync env is stop, syncNodeEqElectTimer");
SyncTimeout* pSyncMsg = syncTimeoutBuild2(SYNC_TIMEOUT_ELECTION, pElectTimer->logicClock, pSyncNode->electTimerMS,
pSyncNode->vgId, pSyncNode);
SRpcMsg rpcMsg;
syncTimeout2RpcMsg(pSyncMsg, &rpcMsg);
if (pSyncNode->FpEqMsg != NULL) {
int32_t code = pSyncNode->FpEqMsg(pSyncNode->msgcb, &rpcMsg);
if (code != 0) {
sError("vgId:%d, sync enqueue elect msg error, code:%d", pSyncNode->vgId, code);
rpcFreeCont(rpcMsg.pCont);
syncTimeoutDestroy(pSyncMsg);
taosMemoryFree(pElectTimer);
return;
}
do {
char logBuf[128];
snprintf(logBuf, sizeof(logBuf), "eq elect timer lc:%" PRIu64, pSyncMsg->logicClock);
syncNodeEventLog(pSyncNode, logBuf);
} while (0);
} else {
sTrace("==syncNodeEqElectTimer== electTimerLogicClock:%" PRIu64 ", electTimerLogicClockUser:%" PRIu64,
pSyncNode->electTimerLogicClock, pSyncNode->electTimerLogicClockUser);
sTrace("syncNodeEqElectTimer FpEqMsg is NULL");
}
syncTimeoutDestroy(pSyncMsg);
taosMemoryFree(pElectTimer);
#if 0
// reset timer ms
if (syncEnvIsStart() && pSyncNode->electBaseLine > 0) {
pSyncNode->electTimerMS = syncUtilElectRandomMS(pSyncNode->electBaseLine, 2 * pSyncNode->electBaseLine);
taosTmrReset(syncNodeEqElectTimer, pSyncNode->electTimerMS, pSyncNode, gSyncEnv->pTimerManager,
&pSyncNode->pElectTimer);
} else {
sError("sync env is stop, syncNodeEqElectTimer");
}
#endif
}
static void syncNodeEqHeartbeatTimer(void* param, void* tmrId) {
@ -3000,16 +3001,6 @@ static int32_t syncNodeAppendNoop(SSyncNode* ths) {
// on message ----
int32_t syncNodeOnPingCb(SSyncNode* ths, SyncPing* pMsg) {
// log state
char logBuf[1024] = {0};
snprintf(logBuf, sizeof(logBuf),
"==syncNodeOnPingCb== vgId:%d, state: %d, %s, term:%" PRIu64 " electTimerLogicClock:%" PRIu64
", "
"electTimerLogicClockUser:%" PRIu64 ", electTimerMS:%d",
ths->vgId, ths->state, syncUtilState2String(ths->state), ths->pRaftStore->currentTerm,
ths->electTimerLogicClock, ths->electTimerLogicClockUser, ths->electTimerMS);
int32_t ret = 0;
syncPingLog2(logBuf, pMsg);
SyncPingReply* pMsgReply = syncPingReplyBuild3(&ths->myRaftId, &pMsg->srcId, ths->vgId);
SRpcMsg rpcMsg;
syncPingReply2RpcMsg(pMsgReply, &rpcMsg);
@ -3024,7 +3015,7 @@ int32_t syncNodeOnPingCb(SSyncNode* ths, SyncPing* pMsg) {
syncNodeSendMsgById(&pMsgReply->destId, ths, &rpcMsg);
syncPingReplyDestroy(pMsgReply);
return ret;
return 0;
}
int32_t syncNodeOnPingReplyCb(SSyncNode* ths, SyncPingReply* pMsg) {

View File

@ -113,10 +113,8 @@ int32_t syncNodeOnTimer(SSyncNode* ths, SyncTimeout* pMsg) {
}
} else if (pMsg->timeoutType == SYNC_TIMEOUT_ELECTION) {
if (atomic_load_64(&ths->electTimerLogicClockUser) <= pMsg->logicClock) {
if (atomic_load_64(&ths->electTimerLogicClock) <= pMsg->logicClock) {
++(ths->electTimerCounter);
sTrace("vgId:%d, sync timer, type:election count:%" PRIu64 ", lc-user:%" PRIu64, ths->vgId,
ths->electTimerCounter, ths->electTimerLogicClockUser);
syncNodeElect(ths);
}

View File

@ -66,7 +66,12 @@ bool voteGrantedMajority(SVotesGranted *pVotesGranted) {
void voteGrantedVote(SVotesGranted *pVotesGranted, SyncRequestVoteReply *pMsg) {
ASSERT(pMsg->voteGranted == true);
ASSERT(pMsg->term == pVotesGranted->term);
if (pMsg->term != pVotesGranted->term) {
syncNodeEventLog(pVotesGranted->pSyncNode, "vote grant vnode error");
return;
}
ASSERT(syncUtilSameId(&pVotesGranted->pSyncNode->myRaftId, &pMsg->destId));
int j = -1;
@ -201,7 +206,11 @@ bool votesResponded(SVotesRespond *pVotesRespond, const SRaftId *pRaftId) {
}
void votesRespondAdd(SVotesRespond *pVotesRespond, const SyncRequestVoteReply *pMsg) {
ASSERT(pVotesRespond->term == pMsg->term);
if (pVotesRespond->term != pMsg->term) {
syncNodeEventLog(pVotesRespond->pSyncNode, "vote respond add error");
return;
}
for (int i = 0; i < pVotesRespond->replicaNum; ++i) {
if (syncUtilSameId(&((*(pVotesRespond->replicas))[i]), &pMsg->srcId)) {
// ASSERT(pVotesRespond->isRespond[i] == false);

View File

@ -741,9 +741,13 @@ static int tdbBtreeBalanceNonRoot(SBTree *pBt, SPage *pParent, int idx, TXN *pTx
tdbBtreeInitPage(pOldsCopy[i], &iarg, 0);
tdbPageCopy(pOlds[i], pOldsCopy[i], 0);
}
for (iNew = 0; iNew < nNews; ++iNew) {
tdbBtreeInitPage(pNews[iNew], &iarg, 0);
}
iNew = 0;
nNewCells = 0;
tdbBtreeInitPage(pNews[iNew], &iarg, 0);
for (int iOld = 0; iOld < nOlds; iOld++) {
SPage *pPage;

View File

@ -65,11 +65,6 @@ int32_t walRefVer(SWalRef *pRef, int64_t ver) {
return 0;
}
int32_t walPreRefVer(SWalRef *pRef, int64_t ver) {
pRef->refVer = ver;
return 0;
}
void walUnrefVer(SWalRef *pRef) {
pRef->refId = -1;
pRef->refFile = -1;

View File

@ -91,48 +91,6 @@ void* taosArrayAddBatch(SArray* pArray, const void* pData, int32_t nEles) {
return dst;
}
void taosArrayRemoveBatch(SArray* pArray, const int32_t* pData, int32_t numOfElems) {
assert(pArray != NULL && pData != NULL);
if (numOfElems <= 0) {
return;
}
size_t size = taosArrayGetSize(pArray);
if (numOfElems >= size) {
taosArrayClear(pArray);
return;
}
int32_t i = pData[0] + 1, j = 0;
while (i < size) {
if (j == numOfElems - 1) {
break;
}
char* p = TARRAY_GET_ELEM(pArray, i);
if (i > pData[j] && i < pData[j + 1]) {
char* dst = TARRAY_GET_ELEM(pArray, i - (j + 1));
memmove(dst, p, pArray->elemSize);
} else if (i == pData[j + 1]) {
j += 1;
}
i += 1;
}
assert(i == pData[numOfElems - 1] + 1 && i <= size);
int32_t srcIndex = pData[numOfElems - 1] + 1;
int32_t dstIndex = pData[numOfElems - 1] - numOfElems + 1;
if (pArray->size - srcIndex > 0) {
char* dst = TARRAY_GET_ELEM(pArray, dstIndex);
char* src = TARRAY_GET_ELEM(pArray, srcIndex);
memmove(dst, src, pArray->elemSize * (pArray->size - srcIndex));
}
pArray->size -= numOfElems;
}
void taosArrayRemoveDuplicate(SArray* pArray, __compar_fn_t comparFn, void (*fp)(void*)) {
assert(pArray);
@ -435,17 +393,6 @@ void taosArraySortString(SArray* pArray, __compar_fn_t comparFn) {
taosSort(pArray->pData, pArray->size, pArray->elemSize, comparFn);
}
char* taosArraySearchString(const SArray* pArray, const char* key, __compar_fn_t comparFn, int32_t flags) {
assert(pArray != NULL);
assert(key != NULL);
void* p = taosbsearch(&key, pArray->pData, pArray->size, pArray->elemSize, comparFn, flags);
if (p == NULL) {
return NULL;
}
return *(char**)p;
}
static int32_t taosArrayPartition(SArray* pArray, int32_t i, int32_t j, __ext_compar_fn_t fn, const void* userData) {
void* key = taosArrayGetP(pArray, i);
while (i < j) {
@ -543,26 +490,7 @@ void* taosDecodeArray(const void* buf, SArray** pArray, FDecode decode, int32_t
void taosArraySortPWithExt(SArray* pArray, __ext_compar_fn_t fn, const void* param) {
taosArrayGetSize(pArray) > 8 ? taosArrayQuickSort(pArray, fn, param) : taosArrayInsertSort(pArray, fn, param);
}
// TODO(yihaoDeng) add order array<type>
//
char* taosShowStrArray(const SArray* pArray) {
int32_t sz = pArray->size;
int32_t tlen = 0;
for (int32_t i = 0; i < sz; i++) {
tlen += strlen(taosArrayGetP(pArray, i)) + 1;
}
char* res = taosMemoryCalloc(1, tlen);
char* buf = res;
for (int32_t i = 0; i < sz; i++) {
char* str = taosArrayGetP(pArray, i);
int32_t len = strlen(str);
memcpy(buf, str, len);
buf += len;
if (i != sz - 1) *buf = ',';
}
return res;
}
void taosArraySwap(SArray* a, SArray* b) {
if (a == NULL || b == NULL) return;
size_t t = a->size;

View File

@ -247,15 +247,15 @@
./test.sh -f tsim/stream/ignoreExpiredData.sim
./test.sh -f tsim/stream/sliding.sim
./test.sh -f tsim/stream/partitionbyColumnInterval.sim
#./test.sh -f tsim/stream/partitionbyColumnSession.sim
#./test.sh -f tsim/stream/partitionbyColumnState.sim
#./test.sh -f tsim/stream/deleteInterval.sim
#./test.sh -f tsim/stream/deleteSession.sim
#./test.sh -f tsim/stream/deleteState.sim
#./test.sh -f tsim/stream/fillIntervalDelete0.sim
#./test.sh -f tsim/stream/fillIntervalDelete1.sim
./test.sh -f tsim/stream/partitionbyColumnSession.sim
./test.sh -f tsim/stream/partitionbyColumnState.sim
./test.sh -f tsim/stream/deleteInterval.sim
./test.sh -f tsim/stream/deleteSession.sim
./test.sh -f tsim/stream/deleteState.sim
./test.sh -f tsim/stream/fillIntervalDelete0.sim
./test.sh -f tsim/stream/fillIntervalDelete1.sim
./test.sh -f tsim/stream/fillIntervalLinear.sim
#./test.sh -f tsim/stream/fillIntervalPartitionBy.sim
./test.sh -f tsim/stream/fillIntervalPartitionBy.sim
./test.sh -f tsim/stream/fillIntervalPrevNext.sim
./test.sh -f tsim/stream/fillIntervalValue.sim

View File

@ -85,3 +85,4 @@ echo statusInterval 1 >> %TAOS_CFG%
echo asyncLog 0 >> %TAOS_CFG%
echo locale en_US.UTF-8 >> %TAOS_CFG%
echo telemetryReporting 0 >> %TAOS_CFG%
echo querySmaOptimize 1 >> %TAOS_CFG%

View File

@ -144,4 +144,5 @@ echo "numOfLogLines 20000000" >> $TAOS_CFG
echo "asyncLog 0" >> $TAOS_CFG
echo "locale en_US.UTF-8" >> $TAOS_CFG
echo "telemetryReporting 0" >> $TAOS_CFG
echo "querySmaOptimize 1" >> $TAOS_CFG
echo " " >> $TAOS_CFG

View File

@ -55,7 +55,7 @@ sql_error alter table tb modify column c3 nchar(10);
sql_error alter table tb modify column c3 nchar(0);
sql_error alter table tb modify column c3 nchar(-1);
sql_error alter table tb modify column c3 binary(80);
sql alter table tb modify column c3 nchar(17000);
sql_error alter table tb modify column c3 nchar(17000);
sql_error alter table tb modify column c3 nchar(100), c2 binary(30);
sql_error alter table tb modify column c1 nchar(100), c2 binary(30);
sql_error alter stable tb modify column c2 binary(30);

View File

@ -34,7 +34,7 @@ sql alter table tb1 set tag len = 379
# case TD-5594
sql create stable st5520(ts timestamp, f int) tags(t0 bool, t1 nchar(4093), t2 nchar(1))
sql alter stable st5520 modify tag t2 nchar(2);
sql_error alter stable st5520 modify tag t2 nchar(2);
# test end
sql drop database $db

View File

@ -47,6 +47,7 @@ sql explain verbose true select ts from tb1 where f1 > 0;
sql explain verbose true select * from st1 where f1 > 0 and ts > '2020-10-31 00:00:00' and ts < '2021-10-31 00:00:00';
sql explain verbose true select count(*) from st1 partition by tbname slimit 1 soffset 2 limit 2 offset 1;
sql explain verbose true select * from information_schema.ins_stables where db_name='db2';
sql explain verbose true select st1.f1 from st1 join st2 on st1.ts=st2.ts and st1.f1 > 0;
print ======== step4
sql explain analyze select ts from st1 where -2;
@ -75,6 +76,7 @@ sql explain analyze verbose true select f1 from st1 where f1 > 0 and ts > '2020-
sql explain analyze verbose true select * from information_schema.ins_stables where db_name='db2';
sql explain analyze verbose true select * from (select min(f1),count(*) a from st1 where f1 > 0) where a < 0;
sql explain analyze verbose true select count(f1) from st1 group by tbname;
sql explain analyze verbose true select st1.f1 from st1 join st2 on st1.ts=st2.ts and st1.f1 > 0;
#not pass case
#sql explain verbose true select count(*),sum(f1) as aa from tb1 where (f1 > 0 or f1 < -1) and ts > '2020-10-31 00:00:00' and ts < '2021-10-31 00:00:00' order by aa;
@ -95,4 +97,4 @@ sql explain analyze verbose true select count(f1) from st1 group by tbname;
#sql explain analyze verbose true select min(f1) from st1 interval(3m, 2a) sliding(1m);
system sh/exec.sh -n dnode1 -s stop -x SIGINT
#system sh/exec.sh -n dnode1 -s stop -x SIGINT

View File

@ -30,12 +30,13 @@ sql insert into ct1 values('2022-10-19 09:55:46.682', 11, 2.1, 3.1)('2022-10-19
print =============== create sma index from super table
sql create sma index sma_index_name1 on stb function(max(c1),max(c2),min(c1)) interval(5m,10s) sliding(5m)
sql create sma index sma_index_name1 on stb function(max(c1),max(c2),min(c1)) interval(5m,10s) sliding(5m) watermark 1s max_delay 1s
print $data00 $data01 $data02 $data03
print =============== trigger stream to execute sma aggr task and insert sma data into sma store
sql insert into ct1 values('2022-10-19 09:55:50.682', 20, 20.0, 30.0)
#===================================================================
#==================== sleep 2s to wait for tsma result
sleep 2000
print =============== show streams ================================
sql show streams;

View File

@ -14,6 +14,7 @@ from util.log import *
from util.sql import *
from util.cases import *
from util.dnodes import *
from util.cluster import *
class TDTestCase:
#updatecfgDict = {'clientCfg': {'serverPort': 7080, 'firstEp': 'trd02:7080', 'secondEp':'trd02:7080'},\
@ -106,12 +107,39 @@ class TDTestCase:
tdLog.printNoPrefix("%s"%taosdCmd)
os.system(f"{taosdCmd}")
def run(self):
tdSql.prepare()
# time.sleep(2)
tdSql.query("create user testpy pass 'testpy'")
def preData(self):
# database\stb\tb\chiild-tb\rows\topics
tdSql.execute("create user testpy pass 'testpy'")
tdSql.execute("drop database if exists db0;")
tdSql.execute("create database db0;")
tdSql.execute("use db0;")
tdSql.execute("create table if not exists db0.stb (ts timestamp, c1 int, c2 float, c3 double) tags (t1 int unsigned);")
tdSql.execute("create table db0.ct1 using db0.stb tags(1000);")
tdSql.execute("create table db0.ct2 using db0.stb tags(2000);")
tdSql.execute("create table if not exists db0.ntb (ts timestamp, c1 int, c2 float, c3 double) ;")
tdSql.query("show db0.stables;")
tdSql.execute("insert into db0.ct1 values(now+0s, 10, 2.0, 3.0);")
tdSql.execute("insert into db0.ct1 values(now+1s, 11, 2.1, 3.1)(now+2s, 12, 2.2, 3.2)(now+3s, 13, 2.3, 3.3);")
tdSql.execute("insert into db0.ntb values(now+2s, 10, 2.0, 3.0);")
tdSql.execute("create sma index sma_index_name1 on db0.stb function(max(c1),max(c2),min(c1)) interval(6m,10s) sliding(6m);")
tdSql.execute("create topic tpc1 as select * from db0.ct2; ")
#hostname = socket.gethostname()
#stream
tdSql.execute("drop database if exists source_db;")
tdSql.query("create database source_db vgroups 3;")
tdSql.query("use source_db")
tdSql.query("create table if not exists source_db.stb (ts timestamp, k int) tags (a int);")
tdSql.query("create table source_db.ct1 using source_db.stb tags(1000);create table source_db.ct2 using source_db.stb tags(2000);create table source_db.ct3 using source_db.stb tags(3000);")
tdSql.query("create stream s1 into source_db.output_stb as select _wstart AS start, min(k), max(k), sum(k) from source_db.stb interval(10m);")
def run(self):
# tdSql.prepare()
# time.sleep(2)
self.preData()
#tdLog.info ("hostname: %s" % hostname)
buildPath = self.getBuildPath()
@ -128,7 +156,15 @@ class TDTestCase:
# keyDict['h'] = self.hostname
# keyDict['c'] = cfgPath
# keyDict['P'] = self.serverPort
tdDnodes.stop(1)
tdDnodes=cluster.dnodes
for i in range(5):
tdDnodes[i].stoptaosd()
startAction = " -s -c " + taosdCfgPath
tdLog.printNoPrefix("================================ parameter: %s"%startAction)
self.taosdCommandExe(startAction,taosdCmdRun)
startAction = " --help"
tdLog.printNoPrefix("================================ parameter: %s"%startAction)
@ -153,9 +189,7 @@ class TDTestCase:
tdLog.printNoPrefix("================================ parameter: %s"%startAction)
self.taosdCommandStop(startAction,taosdCmdRun)
startAction = " -s"
tdLog.printNoPrefix("================================ parameter: %s"%startAction)
self.taosdCommandExe(startAction,taosdCmdRun)
startAction = " -e TAOS_QUERY_POLICY=2 "
tdLog.printNoPrefix("================================ parameter: %s"%startAction)

View File

@ -1,4 +1,6 @@
import datetime
import random
import os
from util.log import *
from util.sql import *
@ -30,7 +32,11 @@ class TDTestCase:
def init(self, conn, logSql):
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor())
tdSql.init(conn.cursor(), logSql)
self.testcasePath = os.path.split(__file__)[0]
self.testcaseFilename = os.path.split(__file__)[-1]
os.system("rm -rf %s/%s.sql" % (self.testcasePath,self.testcaseFilename))
def __query_condition(self,tbname):
query_condition = [f"{tbname}.{col}" for col in ALL_COL]
@ -132,6 +138,67 @@ class TDTestCase:
return
return f"explain select {select_clause} from {from_clause} {where_condition} {group_condition}"
def __single_sql_verbose_true(self, select_clause, from_clause, where_condition="", group_condition=""):
if isinstance(select_clause, str) and "on" not in from_clause and select_clause.split(".")[0].split("(")[-1] != from_clause.split(".")[0]:
return
return f"explain verbose true select {select_clause} from {from_clause} {where_condition} {group_condition}"
def __single_sql_verbose_false(self, select_clause, from_clause, where_condition="", group_condition=""):
if isinstance(select_clause, str) and "on" not in from_clause and select_clause.split(".")[0].split("(")[-1] != from_clause.split(".")[0]:
return
return f"explain verbose false select {select_clause} from {from_clause} {where_condition} {group_condition}"
def __single_sql_ratio(self, select_clause, from_clause, where_condition="", group_condition=""):
ratio = random.uniform(0.001,1)
if isinstance(select_clause, str) and "on" not in from_clause and select_clause.split(".")[0].split("(")[-1] != from_clause.split(".")[0]:
return
return f"explain ratio {ratio} select {select_clause} from {from_clause} {where_condition} {group_condition}"
def __single_sql_ratio_verbose_true(self, select_clause, from_clause, where_condition="", group_condition=""):
ratio = random.uniform(0.001,1)
if isinstance(select_clause, str) and "on" not in from_clause and select_clause.split(".")[0].split("(")[-1] != from_clause.split(".")[0]:
return
return f"explain ratio {ratio} verbose true select {select_clause} from {from_clause} {where_condition} {group_condition}"
def __single_sql_ratio_verbose_false(self, select_clause, from_clause, where_condition="", group_condition=""):
ratio = random.uniform(0.001,1)
if isinstance(select_clause, str) and "on" not in from_clause and select_clause.split(".")[0].split("(")[-1] != from_clause.split(".")[0]:
return
return f"explain ratio {ratio} verbose false select {select_clause} from {from_clause} {where_condition} {group_condition}"
def __single_sql_analyze(self, select_clause, from_clause, where_condition="", group_condition=""):
if isinstance(select_clause, str) and "on" not in from_clause and select_clause.split(".")[0].split("(")[-1] != from_clause.split(".")[0]:
return
return f"explain analyze select {select_clause} from {from_clause} {where_condition} {group_condition}"
def __single_sql_analyze_verbose_true(self, select_clause, from_clause, where_condition="", group_condition=""):
if isinstance(select_clause, str) and "on" not in from_clause and select_clause.split(".")[0].split("(")[-1] != from_clause.split(".")[0]:
return
return f"explain analyze verbose true select {select_clause} from {from_clause} {where_condition} {group_condition}"
def __single_sql_analyze_verbose_false(self, select_clause, from_clause, where_condition="", group_condition=""):
if isinstance(select_clause, str) and "on" not in from_clause and select_clause.split(".")[0].split("(")[-1] != from_clause.split(".")[0]:
return
return f"explain analyze verbose false select {select_clause} from {from_clause} {where_condition} {group_condition}"
def __single_sql_analyze_ratio(self, select_clause, from_clause, where_condition="", group_condition=""):
ratio = random.uniform(0.001,1)
if isinstance(select_clause, str) and "on" not in from_clause and select_clause.split(".")[0].split("(")[-1] != from_clause.split(".")[0]:
return
return f"explain analyze ratio {ratio} select {select_clause} from {from_clause} {where_condition} {group_condition}"
def __single_sql_analyze_ratio_verbose_true(self, select_clause, from_clause, where_condition="", group_condition=""):
ratio = random.uniform(0.001,1)
if isinstance(select_clause, str) and "on" not in from_clause and select_clause.split(".")[0].split("(")[-1] != from_clause.split(".")[0]:
return
return f"explain analyze ratio {ratio} verbose true select {select_clause} from {from_clause} {where_condition} {group_condition}"
def __single_sql_analyze_ratio_verbose_false(self, select_clause, from_clause, where_condition="", group_condition=""):
ratio = random.uniform(0.001,1)
if isinstance(select_clause, str) and "on" not in from_clause and select_clause.split(".")[0].split("(")[-1] != from_clause.split(".")[0]:
return
return f"explain analyze ratio {ratio} verbose false select {select_clause} from {from_clause} {where_condition} {group_condition}"
@property
def __tb_list(self, dbname=DBNAME):
return [
@ -158,6 +225,53 @@ class TDTestCase:
self.__single_sql(select_claus, tb,),
self.__single_sql(select_claus, tb, where_condition=where_claus),
self.__single_sql(select_claus, tb, group_condition=group_claus),
self.__single_sql_verbose_true(select_claus, tb, where_claus, having_claus),
self.__single_sql_verbose_true(select_claus, tb,),
self.__single_sql_verbose_true(select_claus, tb, where_condition=where_claus),
self.__single_sql_verbose_true(select_claus, tb, group_condition=group_claus),
self.__single_sql_verbose_false(select_claus, tb, where_claus, having_claus),
self.__single_sql_verbose_false(select_claus, tb,),
self.__single_sql_verbose_false(select_claus, tb, where_condition=where_claus),
self.__single_sql_verbose_false(select_claus, tb, group_condition=group_claus),
self.__single_sql_ratio(select_claus, tb, where_claus, having_claus),
self.__single_sql_ratio(select_claus, tb,),
self.__single_sql_ratio(select_claus, tb, where_condition=where_claus),
self.__single_sql_ratio(select_claus, tb, group_condition=group_claus),
self.__single_sql_ratio_verbose_true(select_claus, tb, where_claus, having_claus),
self.__single_sql_ratio_verbose_true(select_claus, tb,),
self.__single_sql_ratio_verbose_true(select_claus, tb, where_condition=where_claus),
self.__single_sql_ratio_verbose_true(select_claus, tb, group_condition=group_claus),
self.__single_sql_ratio_verbose_false(select_claus, tb, where_claus, having_claus),
self.__single_sql_ratio_verbose_false(select_claus, tb,),
self.__single_sql_ratio_verbose_false(select_claus, tb, where_condition=where_claus),
self.__single_sql_ratio_verbose_false(select_claus, tb, group_condition=group_claus),
self.__single_sql_analyze(select_claus, tb, where_claus, having_claus),
self.__single_sql_analyze(select_claus, tb,),
self.__single_sql_analyze(select_claus, tb, where_condition=where_claus),
self.__single_sql_analyze(select_claus, tb, group_condition=group_claus),
self.__single_sql_analyze_verbose_true(select_claus, tb, where_claus, having_claus),
self.__single_sql_analyze_verbose_true(select_claus, tb,),
self.__single_sql_analyze_verbose_true(select_claus, tb, where_condition=where_claus),
self.__single_sql_analyze_verbose_true(select_claus, tb, group_condition=group_claus),
self.__single_sql_analyze_verbose_false(select_claus, tb, where_claus, having_claus),
self.__single_sql_analyze_verbose_false(select_claus, tb,),
self.__single_sql_analyze_verbose_false(select_claus, tb, where_condition=where_claus),
self.__single_sql_analyze_verbose_false(select_claus, tb, group_condition=group_claus),
self.__single_sql_analyze_ratio(select_claus, tb, where_claus, having_claus),
self.__single_sql_analyze_ratio(select_claus, tb,),
self.__single_sql_analyze_ratio(select_claus, tb, where_condition=where_claus),
self.__single_sql_analyze_ratio(select_claus, tb, group_condition=group_claus),
self.__single_sql_analyze_ratio_verbose_true(select_claus, tb, where_claus, having_claus),
self.__single_sql_analyze_ratio_verbose_true(select_claus, tb,),
self.__single_sql_analyze_ratio_verbose_true(select_claus, tb, where_condition=where_claus),
self.__single_sql_analyze_ratio_verbose_true(select_claus, tb, group_condition=group_claus),
self.__single_sql_analyze_ratio_verbose_false(select_claus, tb, where_claus, having_claus),
self.__single_sql_analyze_ratio_verbose_false(select_claus, tb,),
self.__single_sql_analyze_ratio_verbose_false(select_claus, tb, where_condition=where_claus),
self.__single_sql_analyze_ratio_verbose_false(select_claus, tb, group_condition=group_claus),
)
)
@ -172,6 +286,9 @@ class TDTestCase:
tdSql.query(sqls[i])
def __test_current(self, dbname=DBNAME):
ratio = random.uniform(0.001,1)
tdSql.query(f"explain select {INT_COL} from {dbname}.ct1")
tdSql.query(f"explain select 1 from {dbname}.ct2")
tdSql.query(f"explain select cast(ceil({DOUBLE_COL}) as bigint) from {dbname}.ct4 group by {DOUBLE_COL}")
@ -180,8 +297,111 @@ class TDTestCase:
tdSql.query(f"explain select {INT_COL} from {dbname}.stb1 where {INT_COL} is not null and {INT_COL} in (0, 1, 2) or {INT_COL} between 2 and 100 ")
self.explain_check()
tdSql.query(f"explain verbose true select {INT_COL} from {dbname}.ct1")
tdSql.query(f"explain verbose true select 1 from {dbname}.ct2")
tdSql.query(f"explain verbose true select cast(ceil({DOUBLE_COL}) as bigint) from {dbname}.ct4 group by {DOUBLE_COL}")
tdSql.query(f"explain verbose true select count({SINT_COL}) from {dbname}.ct4 group by {BOOL_COL} having count({SINT_COL}) > 0")
tdSql.query(f"explain verbose true select ct2.{SINT_COL} from {dbname}.ct4 ct4 join {dbname}.ct2 ct2 on ct4.ts=ct2.ts")
tdSql.query(f"explain verbose true select {INT_COL} from {dbname}.stb1 where {INT_COL} is not null and {INT_COL} in (0, 1, 2) or {INT_COL} between 2 and 100 ")
self.explain_check()
tdSql.query(f"explain verbose false select {INT_COL} from {dbname}.ct1")
tdSql.query(f"explain verbose false select 1 from {dbname}.ct2")
tdSql.query(f"explain verbose false select cast(ceil({DOUBLE_COL}) as bigint) from {dbname}.ct4 group by {DOUBLE_COL}")
tdSql.query(f"explain verbose false select count({SINT_COL}) from {dbname}.ct4 group by {BOOL_COL} having count({SINT_COL}) > 0")
tdSql.query(f"explain verbose false select ct2.{SINT_COL} from {dbname}.ct4 ct4 join {dbname}.ct2 ct2 on ct4.ts=ct2.ts")
tdSql.query(f"explain verbose false select {INT_COL} from {dbname}.stb1 where {INT_COL} is not null and {INT_COL} in (0, 1, 2) or {INT_COL} between 2 and 100 ")
self.explain_check()
tdSql.query(f"explain ratio {ratio} select {INT_COL} from {dbname}.ct1")
tdSql.query(f"explain ratio {ratio} select 1 from {dbname}.ct2")
tdSql.query(f"explain ratio {ratio} select cast(ceil({DOUBLE_COL}) as bigint) from {dbname}.ct4 group by {DOUBLE_COL}")
tdSql.query(f"explain ratio {ratio} select count({SINT_COL}) from {dbname}.ct4 group by {BOOL_COL} having count({SINT_COL}) > 0")
tdSql.query(f"explain ratio {ratio} select ct2.{SINT_COL} from {dbname}.ct4 ct4 join {dbname}.ct2 ct2 on ct4.ts=ct2.ts")
tdSql.query(f"explain ratio {ratio} select {INT_COL} from {dbname}.stb1 where {INT_COL} is not null and {INT_COL} in (0, 1, 2) or {INT_COL} between 2 and 100 ")
self.explain_check()
tdSql.query(f"explain ratio {ratio} verbose true select {INT_COL} from {dbname}.ct1")
tdSql.query(f"explain ratio {ratio} verbose true select 1 from {dbname}.ct2")
tdSql.query(f"explain ratio {ratio} verbose true select cast(ceil({DOUBLE_COL}) as bigint) from {dbname}.ct4 group by {DOUBLE_COL}")
tdSql.query(f"explain ratio {ratio} verbose true select count({SINT_COL}) from {dbname}.ct4 group by {BOOL_COL} having count({SINT_COL}) > 0")
tdSql.query(f"explain ratio {ratio} verbose true select ct2.{SINT_COL} from {dbname}.ct4 ct4 join {dbname}.ct2 ct2 on ct4.ts=ct2.ts")
tdSql.query(f"explain ratio {ratio} verbose true select {INT_COL} from {dbname}.stb1 where {INT_COL} is not null and {INT_COL} in (0, 1, 2) or {INT_COL} between 2 and 100 ")
self.explain_check()
tdSql.query(f"explain ratio {ratio} verbose false select {INT_COL} from {dbname}.ct1")
tdSql.query(f"explain ratio {ratio} verbose false select 1 from {dbname}.ct2")
tdSql.query(f"explain ratio {ratio} verbose false select cast(ceil({DOUBLE_COL}) as bigint) from {dbname}.ct4 group by {DOUBLE_COL}")
tdSql.query(f"explain ratio {ratio} verbose false select count({SINT_COL}) from {dbname}.ct4 group by {BOOL_COL} having count({SINT_COL}) > 0")
tdSql.query(f"explain ratio {ratio} verbose false select ct2.{SINT_COL} from {dbname}.ct4 ct4 join {dbname}.ct2 ct2 on ct4.ts=ct2.ts")
tdSql.query(f"explain ratio {ratio} verbose false select {INT_COL} from {dbname}.stb1 where {INT_COL} is not null and {INT_COL} in (0, 1, 2) or {INT_COL} between 2 and 100 ")
self.explain_check()
tdSql.query(f"explain analyze select {INT_COL} from {dbname}.ct1")
tdSql.query(f"explain analyze select 1 from {dbname}.ct2")
tdSql.query(f"explain analyze select cast(ceil({DOUBLE_COL}) as bigint) from {dbname}.ct4 group by {DOUBLE_COL}")
tdSql.query(f"explain analyze select count({SINT_COL}) from {dbname}.ct4 group by {BOOL_COL} having count({SINT_COL}) > 0")
tdSql.query(f"explain analyze select ct2.{SINT_COL} from {dbname}.ct4 ct4 join {dbname}.ct2 ct2 on ct4.ts=ct2.ts")
tdSql.query(f"explain analyze select {INT_COL} from {dbname}.stb1 where {INT_COL} is not null and {INT_COL} in (0, 1, 2) or {INT_COL} between 2 and 100 ")
self.explain_check()
tdSql.query(f"explain analyze verbose true select {INT_COL} from {dbname}.ct1")
tdSql.query(f"explain analyze verbose true select 1 from {dbname}.ct2")
tdSql.query(f"explain analyze verbose true select cast(ceil({DOUBLE_COL}) as bigint) from {dbname}.ct4 group by {DOUBLE_COL}")
tdSql.query(f"explain analyze verbose true select count({SINT_COL}) from {dbname}.ct4 group by {BOOL_COL} having count({SINT_COL}) > 0")
tdSql.query(f"explain analyze verbose true select ct2.{SINT_COL} from {dbname}.ct4 ct4 join {dbname}.ct2 ct2 on ct4.ts=ct2.ts")
tdSql.query(f"explain analyze verbose true select {INT_COL} from {dbname}.stb1 where {INT_COL} is not null and {INT_COL} in (0, 1, 2) or {INT_COL} between 2 and 100 ")
self.explain_check()
tdSql.query(f"explain analyze verbose false select {INT_COL} from {dbname}.ct1")
tdSql.query(f"explain analyze verbose false select 1 from {dbname}.ct2")
tdSql.query(f"explain analyze verbose false select cast(ceil({DOUBLE_COL}) as bigint) from {dbname}.ct4 group by {DOUBLE_COL}")
tdSql.query(f"explain analyze verbose false select count({SINT_COL}) from {dbname}.ct4 group by {BOOL_COL} having count({SINT_COL}) > 0")
tdSql.query(f"explain analyze verbose false select ct2.{SINT_COL} from {dbname}.ct4 ct4 join {dbname}.ct2 ct2 on ct4.ts=ct2.ts")
tdSql.query(f"explain analyze verbose false select {INT_COL} from {dbname}.stb1 where {INT_COL} is not null and {INT_COL} in (0, 1, 2) or {INT_COL} between 2 and 100 ")
self.explain_check()
tdSql.query(f"explain analyze ratio {ratio} select {INT_COL} from {dbname}.ct1")
tdSql.query(f"explain analyze ratio {ratio} select 1 from {dbname}.ct2")
tdSql.query(f"explain analyze ratio {ratio} select cast(ceil({DOUBLE_COL}) as bigint) from {dbname}.ct4 group by {DOUBLE_COL}")
tdSql.query(f"explain analyze ratio {ratio} select count({SINT_COL}) from {dbname}.ct4 group by {BOOL_COL} having count({SINT_COL}) > 0")
tdSql.query(f"explain analyze ratio {ratio} select ct2.{SINT_COL} from {dbname}.ct4 ct4 join {dbname}.ct2 ct2 on ct4.ts=ct2.ts")
tdSql.query(f"explain analyze ratio {ratio} select {INT_COL} from {dbname}.stb1 where {INT_COL} is not null and {INT_COL} in (0, 1, 2) or {INT_COL} between 2 and 100 ")
self.explain_check()
tdSql.query(f"explain analyze ratio {ratio} verbose true select {INT_COL} from {dbname}.ct1")
tdSql.query(f"explain analyze ratio {ratio} verbose true select 1 from {dbname}.ct2")
tdSql.query(f"explain analyze ratio {ratio} verbose true select cast(ceil({DOUBLE_COL}) as bigint) from {dbname}.ct4 group by {DOUBLE_COL}")
tdSql.query(f"explain analyze ratio {ratio} verbose true select count({SINT_COL}) from {dbname}.ct4 group by {BOOL_COL} having count({SINT_COL}) > 0")
tdSql.query(f"explain analyze ratio {ratio} verbose true select ct2.{SINT_COL} from {dbname}.ct4 ct4 join {dbname}.ct2 ct2 on ct4.ts=ct2.ts")
tdSql.query(f"explain analyze ratio {ratio} verbose true select {INT_COL} from {dbname}.stb1 where {INT_COL} is not null and {INT_COL} in (0, 1, 2) or {INT_COL} between 2 and 100 ")
self.explain_check()
tdSql.query(f"explain analyze ratio {ratio} verbose false select {INT_COL} from {dbname}.ct1")
tdSql.query(f"explain analyze ratio {ratio} verbose false select 1 from {dbname}.ct2")
tdSql.query(f"explain analyze ratio {ratio} verbose false select cast(ceil({DOUBLE_COL}) as bigint) from {dbname}.ct4 group by {DOUBLE_COL}")
tdSql.query(f"explain analyze ratio {ratio} verbose false select count({SINT_COL}) from {dbname}.ct4 group by {BOOL_COL} having count({SINT_COL}) > 0")
tdSql.query(f"explain analyze ratio {ratio} verbose false select ct2.{SINT_COL} from {dbname}.ct4 ct4 join {dbname}.ct2 ct2 on ct4.ts=ct2.ts")
tdSql.query(f"explain analyze ratio {ratio} verbose false select {INT_COL} from {dbname}.stb1 where {INT_COL} is not null and {INT_COL} in (0, 1, 2) or {INT_COL} between 2 and 100 ")
self.explain_check()
def __test_error(self, dbname=DBNAME):
ratio = random.uniform(0.001,1)
tdLog.printNoPrefix("===step 0: err case, must return err")
tdSql.error( f"explain select hyperloglog({INT_COL}) from {dbname}.ct8" )
@ -195,6 +415,143 @@ class TDTestCase:
where ['{INT_COL} + {INT_COL}', '{INT_COL} + {BINT_COL}', '{INT_COL} + {SINT_COL}', '{INT_COL} + {TINT_COL}', '{INT_COL} + {FLOAT_COL}', '{INT_COL} + {DOUBLE_COL}', '{INT_COL} + {BOOL_COL}', '{INT_COL} + {BINARY_COL}', '{INT_COL} + {NCHAR_COL}', '{INT_COL} + {TS_COL}'] is not null
group by ['{INT_COL} + {INT_COL}', '{INT_COL} + {BINT_COL}', '{INT_COL} + {SINT_COL}', '{INT_COL} + {TINT_COL}', '{INT_COL} + {FLOAT_COL}', '{INT_COL} + {DOUBLE_COL}', '{INT_COL} + {BOOL_COL}', '{INT_COL} + {BINARY_COL}', '{INT_COL} + {NCHAR_COL}', '{INT_COL} + {TS_COL}']
having ['{INT_COL} + {INT_COL}', '{INT_COL} + {BINT_COL}', '{INT_COL} + {SINT_COL}', '{INT_COL} + {TINT_COL}', '{INT_COL} + {FLOAT_COL}', '{INT_COL} + {DOUBLE_COL}', '{INT_COL} + {BOOL_COL}', '{INT_COL} + {BINARY_COL}', '{INT_COL} + {NCHAR_COL}', '{INT_COL} + {TS_COL}'] is not null ''' )
tdSql.error( f"explain verbose true select hyperloglog({INT_COL}) from {dbname}.ct8" )
tdSql.error( f"explain verbose true show databases " )
tdSql.error( f"explain verbose true show {dbname}.stables " )
tdSql.error( f"explain verbose true show {dbname}.tables " )
tdSql.error( f"explain verbose true show {dbname}.vgroups " )
tdSql.error( f"explain verbose true show dnodes " )
tdSql.error( f'''explain verbose true select hyperloglog(['{INT_COL} + {INT_COL}', '{INT_COL} + {BINT_COL}', '{INT_COL} + {SINT_COL}', '{INT_COL} + {TINT_COL}', '{INT_COL} + {FLOAT_COL}', '{INT_COL} + {DOUBLE_COL}', '{INT_COL} + {BOOL_COL}', '{INT_COL} + {BINARY_COL}', '{INT_COL} + {NCHAR_COL}', '{INT_COL} + {TS_COL}'])
from {dbname}.ct1
where ['{INT_COL} + {INT_COL}', '{INT_COL} + {BINT_COL}', '{INT_COL} + {SINT_COL}', '{INT_COL} + {TINT_COL}', '{INT_COL} + {FLOAT_COL}', '{INT_COL} + {DOUBLE_COL}', '{INT_COL} + {BOOL_COL}', '{INT_COL} + {BINARY_COL}', '{INT_COL} + {NCHAR_COL}', '{INT_COL} + {TS_COL}'] is not null
group by ['{INT_COL} + {INT_COL}', '{INT_COL} + {BINT_COL}', '{INT_COL} + {SINT_COL}', '{INT_COL} + {TINT_COL}', '{INT_COL} + {FLOAT_COL}', '{INT_COL} + {DOUBLE_COL}', '{INT_COL} + {BOOL_COL}', '{INT_COL} + {BINARY_COL}', '{INT_COL} + {NCHAR_COL}', '{INT_COL} + {TS_COL}']
having ['{INT_COL} + {INT_COL}', '{INT_COL} + {BINT_COL}', '{INT_COL} + {SINT_COL}', '{INT_COL} + {TINT_COL}', '{INT_COL} + {FLOAT_COL}', '{INT_COL} + {DOUBLE_COL}', '{INT_COL} + {BOOL_COL}', '{INT_COL} + {BINARY_COL}', '{INT_COL} + {NCHAR_COL}', '{INT_COL} + {TS_COL}'] is not null ''' )
tdSql.error( f"explain verbose false select hyperloglog({INT_COL}) from {dbname}.ct8" )
tdSql.error( f"explain verbose false show databases " )
tdSql.error( f"explain verbose false show {dbname}.stables " )
tdSql.error( f"explain verbose false show {dbname}.tables " )
tdSql.error( f"explain verbose false show {dbname}.vgroups " )
tdSql.error( f"explain verbose false show dnodes " )
tdSql.error( f'''explain verbose false select hyperloglog(['{INT_COL} + {INT_COL}', '{INT_COL} + {BINT_COL}', '{INT_COL} + {SINT_COL}', '{INT_COL} + {TINT_COL}', '{INT_COL} + {FLOAT_COL}', '{INT_COL} + {DOUBLE_COL}', '{INT_COL} + {BOOL_COL}', '{INT_COL} + {BINARY_COL}', '{INT_COL} + {NCHAR_COL}', '{INT_COL} + {TS_COL}'])
from {dbname}.ct1
where ['{INT_COL} + {INT_COL}', '{INT_COL} + {BINT_COL}', '{INT_COL} + {SINT_COL}', '{INT_COL} + {TINT_COL}', '{INT_COL} + {FLOAT_COL}', '{INT_COL} + {DOUBLE_COL}', '{INT_COL} + {BOOL_COL}', '{INT_COL} + {BINARY_COL}', '{INT_COL} + {NCHAR_COL}', '{INT_COL} + {TS_COL}'] is not null
group by ['{INT_COL} + {INT_COL}', '{INT_COL} + {BINT_COL}', '{INT_COL} + {SINT_COL}', '{INT_COL} + {TINT_COL}', '{INT_COL} + {FLOAT_COL}', '{INT_COL} + {DOUBLE_COL}', '{INT_COL} + {BOOL_COL}', '{INT_COL} + {BINARY_COL}', '{INT_COL} + {NCHAR_COL}', '{INT_COL} + {TS_COL}']
having ['{INT_COL} + {INT_COL}', '{INT_COL} + {BINT_COL}', '{INT_COL} + {SINT_COL}', '{INT_COL} + {TINT_COL}', '{INT_COL} + {FLOAT_COL}', '{INT_COL} + {DOUBLE_COL}', '{INT_COL} + {BOOL_COL}', '{INT_COL} + {BINARY_COL}', '{INT_COL} + {NCHAR_COL}', '{INT_COL} + {TS_COL}'] is not null ''' )
tdLog.printNoPrefix("===step 0: err case, must return err")
tdSql.error( f"explain ratio {ratio} select hyperloglog({INT_COL}) from {dbname}.ct8" )
tdSql.error( f"explain ratio {ratio} show databases " )
tdSql.error( f"explain ratio {ratio} show {dbname}.stables " )
tdSql.error( f"explain ratio {ratio} show {dbname}.tables " )
tdSql.error( f"explain ratio {ratio} show {dbname}.vgroups " )
tdSql.error( f"explain ratio {ratio} show dnodes " )
tdSql.error( f'''explain ratio {ratio} select hyperloglog(['{INT_COL} + {INT_COL}', '{INT_COL} + {BINT_COL}', '{INT_COL} + {SINT_COL}', '{INT_COL} + {TINT_COL}', '{INT_COL} + {FLOAT_COL}', '{INT_COL} + {DOUBLE_COL}', '{INT_COL} + {BOOL_COL}', '{INT_COL} + {BINARY_COL}', '{INT_COL} + {NCHAR_COL}', '{INT_COL} + {TS_COL}'])
from {dbname}.ct1
where ['{INT_COL} + {INT_COL}', '{INT_COL} + {BINT_COL}', '{INT_COL} + {SINT_COL}', '{INT_COL} + {TINT_COL}', '{INT_COL} + {FLOAT_COL}', '{INT_COL} + {DOUBLE_COL}', '{INT_COL} + {BOOL_COL}', '{INT_COL} + {BINARY_COL}', '{INT_COL} + {NCHAR_COL}', '{INT_COL} + {TS_COL}'] is not null
group by ['{INT_COL} + {INT_COL}', '{INT_COL} + {BINT_COL}', '{INT_COL} + {SINT_COL}', '{INT_COL} + {TINT_COL}', '{INT_COL} + {FLOAT_COL}', '{INT_COL} + {DOUBLE_COL}', '{INT_COL} + {BOOL_COL}', '{INT_COL} + {BINARY_COL}', '{INT_COL} + {NCHAR_COL}', '{INT_COL} + {TS_COL}']
having ['{INT_COL} + {INT_COL}', '{INT_COL} + {BINT_COL}', '{INT_COL} + {SINT_COL}', '{INT_COL} + {TINT_COL}', '{INT_COL} + {FLOAT_COL}', '{INT_COL} + {DOUBLE_COL}', '{INT_COL} + {BOOL_COL}', '{INT_COL} + {BINARY_COL}', '{INT_COL} + {NCHAR_COL}', '{INT_COL} + {TS_COL}'] is not null ''' )
tdSql.error( f"explain ratio {ratio} verbose true select hyperloglog({INT_COL}) from {dbname}.ct8" )
tdSql.error( f"explain ratio {ratio} verbose true show databases " )
tdSql.error( f"explain ratio {ratio} verbose true show {dbname}.stables " )
tdSql.error( f"explain ratio {ratio} verbose true show {dbname}.tables " )
tdSql.error( f"explain ratio {ratio} verbose true show {dbname}.vgroups " )
tdSql.error( f"explain ratio {ratio} verbose true show dnodes " )
tdSql.error( f'''explain ratio {ratio} verbose true select hyperloglog(['{INT_COL} + {INT_COL}', '{INT_COL} + {BINT_COL}', '{INT_COL} + {SINT_COL}', '{INT_COL} + {TINT_COL}', '{INT_COL} + {FLOAT_COL}', '{INT_COL} + {DOUBLE_COL}', '{INT_COL} + {BOOL_COL}', '{INT_COL} + {BINARY_COL}', '{INT_COL} + {NCHAR_COL}', '{INT_COL} + {TS_COL}'])
from {dbname}.ct1
where ['{INT_COL} + {INT_COL}', '{INT_COL} + {BINT_COL}', '{INT_COL} + {SINT_COL}', '{INT_COL} + {TINT_COL}', '{INT_COL} + {FLOAT_COL}', '{INT_COL} + {DOUBLE_COL}', '{INT_COL} + {BOOL_COL}', '{INT_COL} + {BINARY_COL}', '{INT_COL} + {NCHAR_COL}', '{INT_COL} + {TS_COL}'] is not null
group by ['{INT_COL} + {INT_COL}', '{INT_COL} + {BINT_COL}', '{INT_COL} + {SINT_COL}', '{INT_COL} + {TINT_COL}', '{INT_COL} + {FLOAT_COL}', '{INT_COL} + {DOUBLE_COL}', '{INT_COL} + {BOOL_COL}', '{INT_COL} + {BINARY_COL}', '{INT_COL} + {NCHAR_COL}', '{INT_COL} + {TS_COL}']
having ['{INT_COL} + {INT_COL}', '{INT_COL} + {BINT_COL}', '{INT_COL} + {SINT_COL}', '{INT_COL} + {TINT_COL}', '{INT_COL} + {FLOAT_COL}', '{INT_COL} + {DOUBLE_COL}', '{INT_COL} + {BOOL_COL}', '{INT_COL} + {BINARY_COL}', '{INT_COL} + {NCHAR_COL}', '{INT_COL} + {TS_COL}'] is not null ''' )
tdSql.error( f"explain ratio {ratio} verbose false select hyperloglog({INT_COL}) from {dbname}.ct8" )
tdSql.error( f"explain ratio {ratio} verbose false show databases " )
tdSql.error( f"explain ratio {ratio} verbose false show {dbname}.stables " )
tdSql.error( f"explain ratio {ratio} verbose false show {dbname}.tables " )
tdSql.error( f"explain ratio {ratio} verbose false show {dbname}.vgroups " )
tdSql.error( f"explain ratio {ratio} verbose false show dnodes " )
tdSql.error( f'''explain ratio {ratio} verbose false select hyperloglog(['{INT_COL} + {INT_COL}', '{INT_COL} + {BINT_COL}', '{INT_COL} + {SINT_COL}', '{INT_COL} + {TINT_COL}', '{INT_COL} + {FLOAT_COL}', '{INT_COL} + {DOUBLE_COL}', '{INT_COL} + {BOOL_COL}', '{INT_COL} + {BINARY_COL}', '{INT_COL} + {NCHAR_COL}', '{INT_COL} + {TS_COL}'])
from {dbname}.ct1
where ['{INT_COL} + {INT_COL}', '{INT_COL} + {BINT_COL}', '{INT_COL} + {SINT_COL}', '{INT_COL} + {TINT_COL}', '{INT_COL} + {FLOAT_COL}', '{INT_COL} + {DOUBLE_COL}', '{INT_COL} + {BOOL_COL}', '{INT_COL} + {BINARY_COL}', '{INT_COL} + {NCHAR_COL}', '{INT_COL} + {TS_COL}'] is not null
group by ['{INT_COL} + {INT_COL}', '{INT_COL} + {BINT_COL}', '{INT_COL} + {SINT_COL}', '{INT_COL} + {TINT_COL}', '{INT_COL} + {FLOAT_COL}', '{INT_COL} + {DOUBLE_COL}', '{INT_COL} + {BOOL_COL}', '{INT_COL} + {BINARY_COL}', '{INT_COL} + {NCHAR_COL}', '{INT_COL} + {TS_COL}']
having ['{INT_COL} + {INT_COL}', '{INT_COL} + {BINT_COL}', '{INT_COL} + {SINT_COL}', '{INT_COL} + {TINT_COL}', '{INT_COL} + {FLOAT_COL}', '{INT_COL} + {DOUBLE_COL}', '{INT_COL} + {BOOL_COL}', '{INT_COL} + {BINARY_COL}', '{INT_COL} + {NCHAR_COL}', '{INT_COL} + {TS_COL}'] is not null ''' )
tdLog.printNoPrefix("===step 0: err case, must return err")
tdSql.error( f"explain analyze select hyperloglog({INT_COL}) from {dbname}.ct8" )
tdSql.error( f"explain analyze show databases " )
tdSql.error( f"explain analyze show {dbname}.stables " )
tdSql.error( f"explain analyze show {dbname}.tables " )
tdSql.error( f"explain analyze show {dbname}.vgroups " )
tdSql.error( f"explain analyze show dnodes " )
tdSql.error( f'''explain analyze select hyperloglog(['{INT_COL} + {INT_COL}', '{INT_COL} + {BINT_COL}', '{INT_COL} + {SINT_COL}', '{INT_COL} + {TINT_COL}', '{INT_COL} + {FLOAT_COL}', '{INT_COL} + {DOUBLE_COL}', '{INT_COL} + {BOOL_COL}', '{INT_COL} + {BINARY_COL}', '{INT_COL} + {NCHAR_COL}', '{INT_COL} + {TS_COL}'])
from {dbname}.ct1
where ['{INT_COL} + {INT_COL}', '{INT_COL} + {BINT_COL}', '{INT_COL} + {SINT_COL}', '{INT_COL} + {TINT_COL}', '{INT_COL} + {FLOAT_COL}', '{INT_COL} + {DOUBLE_COL}', '{INT_COL} + {BOOL_COL}', '{INT_COL} + {BINARY_COL}', '{INT_COL} + {NCHAR_COL}', '{INT_COL} + {TS_COL}'] is not null
group by ['{INT_COL} + {INT_COL}', '{INT_COL} + {BINT_COL}', '{INT_COL} + {SINT_COL}', '{INT_COL} + {TINT_COL}', '{INT_COL} + {FLOAT_COL}', '{INT_COL} + {DOUBLE_COL}', '{INT_COL} + {BOOL_COL}', '{INT_COL} + {BINARY_COL}', '{INT_COL} + {NCHAR_COL}', '{INT_COL} + {TS_COL}']
having ['{INT_COL} + {INT_COL}', '{INT_COL} + {BINT_COL}', '{INT_COL} + {SINT_COL}', '{INT_COL} + {TINT_COL}', '{INT_COL} + {FLOAT_COL}', '{INT_COL} + {DOUBLE_COL}', '{INT_COL} + {BOOL_COL}', '{INT_COL} + {BINARY_COL}', '{INT_COL} + {NCHAR_COL}', '{INT_COL} + {TS_COL}'] is not null ''' )
tdSql.error( f"explain analyze verbose true select hyperloglog({INT_COL}) from {dbname}.ct8" )
tdSql.error( f"explain analyze verbose true show databases " )
tdSql.error( f"explain analyze verbose true show {dbname}.stables " )
tdSql.error( f"explain analyze verbose true show {dbname}.tables " )
tdSql.error( f"explain analyze verbose true show {dbname}.vgroups " )
tdSql.error( f"explain analyze verbose true show dnodes " )
tdSql.error( f'''explain analyze verbose true select hyperloglog(['{INT_COL} + {INT_COL}', '{INT_COL} + {BINT_COL}', '{INT_COL} + {SINT_COL}', '{INT_COL} + {TINT_COL}', '{INT_COL} + {FLOAT_COL}', '{INT_COL} + {DOUBLE_COL}', '{INT_COL} + {BOOL_COL}', '{INT_COL} + {BINARY_COL}', '{INT_COL} + {NCHAR_COL}', '{INT_COL} + {TS_COL}'])
from {dbname}.ct1
where ['{INT_COL} + {INT_COL}', '{INT_COL} + {BINT_COL}', '{INT_COL} + {SINT_COL}', '{INT_COL} + {TINT_COL}', '{INT_COL} + {FLOAT_COL}', '{INT_COL} + {DOUBLE_COL}', '{INT_COL} + {BOOL_COL}', '{INT_COL} + {BINARY_COL}', '{INT_COL} + {NCHAR_COL}', '{INT_COL} + {TS_COL}'] is not null
group by ['{INT_COL} + {INT_COL}', '{INT_COL} + {BINT_COL}', '{INT_COL} + {SINT_COL}', '{INT_COL} + {TINT_COL}', '{INT_COL} + {FLOAT_COL}', '{INT_COL} + {DOUBLE_COL}', '{INT_COL} + {BOOL_COL}', '{INT_COL} + {BINARY_COL}', '{INT_COL} + {NCHAR_COL}', '{INT_COL} + {TS_COL}']
having ['{INT_COL} + {INT_COL}', '{INT_COL} + {BINT_COL}', '{INT_COL} + {SINT_COL}', '{INT_COL} + {TINT_COL}', '{INT_COL} + {FLOAT_COL}', '{INT_COL} + {DOUBLE_COL}', '{INT_COL} + {BOOL_COL}', '{INT_COL} + {BINARY_COL}', '{INT_COL} + {NCHAR_COL}', '{INT_COL} + {TS_COL}'] is not null ''' )
tdSql.error( f"explain analyze verbose false select hyperloglog({INT_COL}) from {dbname}.ct8" )
tdSql.error( f"explain analyze verbose false show databases " )
tdSql.error( f"explain analyze verbose false show {dbname}.stables " )
tdSql.error( f"explain analyze verbose false show {dbname}.tables " )
tdSql.error( f"explain analyze verbose false show {dbname}.vgroups " )
tdSql.error( f"explain analyze verbose false show dnodes " )
tdSql.error( f'''explain analyze verbose false select hyperloglog(['{INT_COL} + {INT_COL}', '{INT_COL} + {BINT_COL}', '{INT_COL} + {SINT_COL}', '{INT_COL} + {TINT_COL}', '{INT_COL} + {FLOAT_COL}', '{INT_COL} + {DOUBLE_COL}', '{INT_COL} + {BOOL_COL}', '{INT_COL} + {BINARY_COL}', '{INT_COL} + {NCHAR_COL}', '{INT_COL} + {TS_COL}'])
from {dbname}.ct1
where ['{INT_COL} + {INT_COL}', '{INT_COL} + {BINT_COL}', '{INT_COL} + {SINT_COL}', '{INT_COL} + {TINT_COL}', '{INT_COL} + {FLOAT_COL}', '{INT_COL} + {DOUBLE_COL}', '{INT_COL} + {BOOL_COL}', '{INT_COL} + {BINARY_COL}', '{INT_COL} + {NCHAR_COL}', '{INT_COL} + {TS_COL}'] is not null
group by ['{INT_COL} + {INT_COL}', '{INT_COL} + {BINT_COL}', '{INT_COL} + {SINT_COL}', '{INT_COL} + {TINT_COL}', '{INT_COL} + {FLOAT_COL}', '{INT_COL} + {DOUBLE_COL}', '{INT_COL} + {BOOL_COL}', '{INT_COL} + {BINARY_COL}', '{INT_COL} + {NCHAR_COL}', '{INT_COL} + {TS_COL}']
having ['{INT_COL} + {INT_COL}', '{INT_COL} + {BINT_COL}', '{INT_COL} + {SINT_COL}', '{INT_COL} + {TINT_COL}', '{INT_COL} + {FLOAT_COL}', '{INT_COL} + {DOUBLE_COL}', '{INT_COL} + {BOOL_COL}', '{INT_COL} + {BINARY_COL}', '{INT_COL} + {NCHAR_COL}', '{INT_COL} + {TS_COL}'] is not null ''' )
tdLog.printNoPrefix("===step 0: err case, must return err")
tdSql.error( f"explain analyze ratio {ratio} select hyperloglog({INT_COL}) from {dbname}.ct8" )
tdSql.error( f"explain analyze ratio {ratio} show databases " )
tdSql.error( f"explain analyze ratio {ratio} show {dbname}.stables " )
tdSql.error( f"explain analyze ratio {ratio} show {dbname}.tables " )
tdSql.error( f"explain analyze ratio {ratio} show {dbname}.vgroups " )
tdSql.error( f"explain analyze ratio {ratio} show dnodes " )
tdSql.error( f'''explain analyze ratio {ratio} select hyperloglog(['{INT_COL} + {INT_COL}', '{INT_COL} + {BINT_COL}', '{INT_COL} + {SINT_COL}', '{INT_COL} + {TINT_COL}', '{INT_COL} + {FLOAT_COL}', '{INT_COL} + {DOUBLE_COL}', '{INT_COL} + {BOOL_COL}', '{INT_COL} + {BINARY_COL}', '{INT_COL} + {NCHAR_COL}', '{INT_COL} + {TS_COL}'])
from {dbname}.ct1
where ['{INT_COL} + {INT_COL}', '{INT_COL} + {BINT_COL}', '{INT_COL} + {SINT_COL}', '{INT_COL} + {TINT_COL}', '{INT_COL} + {FLOAT_COL}', '{INT_COL} + {DOUBLE_COL}', '{INT_COL} + {BOOL_COL}', '{INT_COL} + {BINARY_COL}', '{INT_COL} + {NCHAR_COL}', '{INT_COL} + {TS_COL}'] is not null
group by ['{INT_COL} + {INT_COL}', '{INT_COL} + {BINT_COL}', '{INT_COL} + {SINT_COL}', '{INT_COL} + {TINT_COL}', '{INT_COL} + {FLOAT_COL}', '{INT_COL} + {DOUBLE_COL}', '{INT_COL} + {BOOL_COL}', '{INT_COL} + {BINARY_COL}', '{INT_COL} + {NCHAR_COL}', '{INT_COL} + {TS_COL}']
having ['{INT_COL} + {INT_COL}', '{INT_COL} + {BINT_COL}', '{INT_COL} + {SINT_COL}', '{INT_COL} + {TINT_COL}', '{INT_COL} + {FLOAT_COL}', '{INT_COL} + {DOUBLE_COL}', '{INT_COL} + {BOOL_COL}', '{INT_COL} + {BINARY_COL}', '{INT_COL} + {NCHAR_COL}', '{INT_COL} + {TS_COL}'] is not null ''' )
tdSql.error( f"explain analyze ratio {ratio} verbose true select hyperloglog({INT_COL}) from {dbname}.ct8" )
tdSql.error( f"explain analyze ratio {ratio} verbose true show databases " )
tdSql.error( f"explain analyze ratio {ratio} verbose true show {dbname}.stables " )
tdSql.error( f"explain analyze ratio {ratio} verbose true show {dbname}.tables " )
tdSql.error( f"explain analyze ratio {ratio} verbose true show {dbname}.vgroups " )
tdSql.error( f"explain analyze ratio {ratio} verbose true show dnodes " )
tdSql.error( f'''explain analyze ratio {ratio} verbose true select hyperloglog(['{INT_COL} + {INT_COL}', '{INT_COL} + {BINT_COL}', '{INT_COL} + {SINT_COL}', '{INT_COL} + {TINT_COL}', '{INT_COL} + {FLOAT_COL}', '{INT_COL} + {DOUBLE_COL}', '{INT_COL} + {BOOL_COL}', '{INT_COL} + {BINARY_COL}', '{INT_COL} + {NCHAR_COL}', '{INT_COL} + {TS_COL}'])
from {dbname}.ct1
where ['{INT_COL} + {INT_COL}', '{INT_COL} + {BINT_COL}', '{INT_COL} + {SINT_COL}', '{INT_COL} + {TINT_COL}', '{INT_COL} + {FLOAT_COL}', '{INT_COL} + {DOUBLE_COL}', '{INT_COL} + {BOOL_COL}', '{INT_COL} + {BINARY_COL}', '{INT_COL} + {NCHAR_COL}', '{INT_COL} + {TS_COL}'] is not null
group by ['{INT_COL} + {INT_COL}', '{INT_COL} + {BINT_COL}', '{INT_COL} + {SINT_COL}', '{INT_COL} + {TINT_COL}', '{INT_COL} + {FLOAT_COL}', '{INT_COL} + {DOUBLE_COL}', '{INT_COL} + {BOOL_COL}', '{INT_COL} + {BINARY_COL}', '{INT_COL} + {NCHAR_COL}', '{INT_COL} + {TS_COL}']
having ['{INT_COL} + {INT_COL}', '{INT_COL} + {BINT_COL}', '{INT_COL} + {SINT_COL}', '{INT_COL} + {TINT_COL}', '{INT_COL} + {FLOAT_COL}', '{INT_COL} + {DOUBLE_COL}', '{INT_COL} + {BOOL_COL}', '{INT_COL} + {BINARY_COL}', '{INT_COL} + {NCHAR_COL}', '{INT_COL} + {TS_COL}'] is not null ''' )
tdSql.error( f"explain analyze ratio {ratio} verbose false select hyperloglog({INT_COL}) from {dbname}.ct8" )
tdSql.error( f"explain analyze ratio {ratio} verbose false show databases " )
tdSql.error( f"explain analyze ratio {ratio} verbose false show {dbname}.stables " )
tdSql.error( f"explain analyze ratio {ratio} verbose false show {dbname}.tables " )
tdSql.error( f"explain analyze ratio {ratio} verbose false show {dbname}.vgroups " )
tdSql.error( f"explain analyze ratio {ratio} verbose false show dnodes " )
tdSql.error( f'''explain analyze ratio {ratio} verbose false select hyperloglog(['{INT_COL} + {INT_COL}', '{INT_COL} + {BINT_COL}', '{INT_COL} + {SINT_COL}', '{INT_COL} + {TINT_COL}', '{INT_COL} + {FLOAT_COL}', '{INT_COL} + {DOUBLE_COL}', '{INT_COL} + {BOOL_COL}', '{INT_COL} + {BINARY_COL}', '{INT_COL} + {NCHAR_COL}', '{INT_COL} + {TS_COL}'])
from {dbname}.ct1
where ['{INT_COL} + {INT_COL}', '{INT_COL} + {BINT_COL}', '{INT_COL} + {SINT_COL}', '{INT_COL} + {TINT_COL}', '{INT_COL} + {FLOAT_COL}', '{INT_COL} + {DOUBLE_COL}', '{INT_COL} + {BOOL_COL}', '{INT_COL} + {BINARY_COL}', '{INT_COL} + {NCHAR_COL}', '{INT_COL} + {TS_COL}'] is not null
group by ['{INT_COL} + {INT_COL}', '{INT_COL} + {BINT_COL}', '{INT_COL} + {SINT_COL}', '{INT_COL} + {TINT_COL}', '{INT_COL} + {FLOAT_COL}', '{INT_COL} + {DOUBLE_COL}', '{INT_COL} + {BOOL_COL}', '{INT_COL} + {BINARY_COL}', '{INT_COL} + {NCHAR_COL}', '{INT_COL} + {TS_COL}']
having ['{INT_COL} + {INT_COL}', '{INT_COL} + {BINT_COL}', '{INT_COL} + {SINT_COL}', '{INT_COL} + {TINT_COL}', '{INT_COL} + {FLOAT_COL}', '{INT_COL} + {DOUBLE_COL}', '{INT_COL} + {BOOL_COL}', '{INT_COL} + {BINARY_COL}', '{INT_COL} + {NCHAR_COL}', '{INT_COL} + {TS_COL}'] is not null ''' )
def all_test(self):
self.__test_error()

View File

@ -28,6 +28,8 @@ ALL_COL = [ INT_COL, BINT_COL, SINT_COL, TINT_COL, FLOAT_COL, DOUBLE_COL, BOOL_C
DBNAME = "db"
class TDTestCase:
updatecfgDict = {"maxTablesPerVnode":2 ,"minTablesPerVnode":2,"tableIncStepPerVnode":2 }
def init(self, conn, logSql):
tdLog.debug(f"start to excute {__file__}")

View File

@ -11,11 +11,15 @@ class TDTestCase:
def init(self, conn, logSql):
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor())
#tdSql.init(conn.cursor())
tdSql.init(conn.cursor(), logSql) # output sql.txt file
def run(self):
dbname = "db"
tbname = "tb"
stbname = "stb"
ctbname1 = "ctb1"
ctbname2 = "ctb2"
tdSql.prepare()
@ -32,6 +36,8 @@ class TDTestCase:
tdSql.execute(f"insert into {dbname}.{tbname} values ('2020-02-01 00:00:05', 5, 5, 5, 5, 5.0, 5.0, true, 'varchar', 'nchar')")
tdSql.execute(f"insert into {dbname}.{tbname} values ('2020-02-01 00:00:10', 10, 10, 10, 10, 10.0, 10.0, true, 'varchar', 'nchar')")
tdSql.execute(f"insert into {dbname}.{tbname} values ('2020-02-01 00:00:15', 15, 15, 15, 15, 15.0, 15.0, true, 'varchar', 'nchar')")
tdSql.execute(f"insert into {dbname}.{tbname} (ts) values (now)")
tdLog.printNoPrefix("==========step3:fill null")
@ -240,7 +246,7 @@ class TDTestCase:
## {. . .}
tdSql.query(f"select interp(c0) from {dbname}.{tbname} range('2020-02-01 00:00:04', '2020-02-01 00:00:16') every(1s) fill(next)")
tdSql.checkRows(12)
tdSql.checkRows(13)
tdSql.checkData(0, 0, 5)
tdSql.checkData(1, 0, 5)
tdSql.checkData(2, 0, 10)
@ -290,21 +296,21 @@ class TDTestCase:
## ..{.}
tdSql.query(f"select interp(c0) from {dbname}.{tbname} range('2020-02-01 00:00:13', '2020-02-01 00:00:17') every(1s) fill(next)")
tdSql.checkRows(3)
tdSql.checkRows(5)
tdSql.checkData(0, 0, 15)
tdSql.checkData(1, 0, 15)
tdSql.checkData(2, 0, 15)
## ... {}
tdSql.query(f"select interp(c0) from {dbname}.{tbname} range('2020-02-01 00:00:16', '2020-02-01 00:00:19') every(1s) fill(next)")
tdSql.checkRows(0)
tdSql.checkRows(4)
tdLog.printNoPrefix("==========step7:fill linear")
## {. . .}
tdSql.query(f"select interp(c0) from {dbname}.{tbname} range('2020-02-01 00:00:04', '2020-02-01 00:00:16') every(1s) fill(linear)")
tdSql.checkRows(11)
tdSql.checkRows(12)
tdSql.checkData(0, 0, 5)
tdSql.checkData(1, 0, 6)
tdSql.checkData(2, 0, 7)
@ -347,7 +353,7 @@ class TDTestCase:
## ..{.}
tdSql.query(f"select interp(c0) from {dbname}.{tbname} range('2020-02-01 00:00:13', '2020-02-01 00:00:17') every(1s) fill(linear)")
tdSql.checkRows(3)
tdSql.checkRows(5)
tdSql.checkData(0, 0, 13)
tdSql.checkData(1, 0, 14)
tdSql.checkData(2, 0, 15)
@ -505,7 +511,7 @@ class TDTestCase:
tdSql.checkData(8, 0, '2020-02-01 00:00:12.000')
tdSql.query(f"select _irowts,interp(c0) from {dbname}.{tbname} range('2020-02-01 00:00:04', '2020-02-01 00:00:16') every(1s) fill(next)")
tdSql.checkRows(12)
tdSql.checkRows(13)
tdSql.checkCols(2)
tdSql.checkData(0, 0, '2020-02-01 00:00:04.000')
@ -548,7 +554,7 @@ class TDTestCase:
tdSql.checkData(8, 0, '2020-02-01 00:00:12.000')
tdSql.query(f"select _irowts,interp(c0) from {dbname}.{tbname} range('2020-02-01 00:00:04', '2020-02-01 00:00:16') every(1s) fill(linear)")
tdSql.checkRows(11)
tdSql.checkRows(12)
tdSql.checkCols(2)
tdSql.checkData(0, 0, '2020-02-01 00:00:05.000')
@ -576,7 +582,7 @@ class TDTestCase:
# multiple _irowts
tdSql.query(f"select interp(c0),_irowts from {dbname}.{tbname} range('2020-02-01 00:00:04', '2020-02-01 00:00:16') every(1s) fill(linear)")
tdSql.checkRows(11)
tdSql.checkRows(12)
tdSql.checkCols(2)
tdSql.checkData(0, 1, '2020-02-01 00:00:05.000')
@ -592,7 +598,7 @@ class TDTestCase:
tdSql.checkData(10, 1, '2020-02-01 00:00:15.000')
tdSql.query(f"select _irowts, interp(c0), interp(c0), _irowts from {dbname}.{tbname} range('2020-02-01 00:00:04', '2020-02-01 00:00:16') every(1s) fill(linear)")
tdSql.checkRows(11)
tdSql.checkRows(12)
tdSql.checkCols(4)
cols = (0, 3)
@ -621,6 +627,32 @@ class TDTestCase:
tdSql.execute(f"insert into {dbname}.{tbname} values ('2020-02-01 00:00:05', 5, 5, 5, 5, 5.0, 5.0, true, 'varchar', 'nchar')")
tdSql.execute(f"insert into {dbname}.{tbname} values ('2020-02-11 00:00:05', 15, 15, 15, 15, 15.0, 15.0, true, 'varchar', 'nchar')")
tdSql.execute(
f'''create stable if not exists {dbname}.{stbname}
(ts timestamp, c0 tinyint, c1 smallint, c2 int, c3 bigint, c4 double, c5 float, c6 bool, c7 varchar(10), c8 nchar(10)) tags(t1 int)
'''
)
tdSql.execute(
f'''create table if not exists {dbname}.{ctbname1} using {dbname}.{stbname} tags(1)
'''
)
tdSql.execute(
f'''create table if not exists {dbname}.{ctbname2} using {dbname}.{stbname} tags(1)
'''
)
tdSql.execute(f"insert into {dbname}.{ctbname1} values ('2020-02-01 00:00:05', 5, 5, 5, 5, 5.0, 5.0, true, 'varchar', 'nchar')")
tdSql.execute(f"insert into {dbname}.{ctbname1} values ('2020-02-01 00:00:10', 10, 10, 10, 10, 10.0, 10.0, true, 'varchar', 'nchar')")
tdSql.execute(f"insert into {dbname}.{ctbname1} values ('2020-02-01 00:00:15', 15, 15, 15, 15, 15.0, 15.0, true, 'varchar', 'nchar')")
tdSql.execute(f"insert into {dbname}.{ctbname2} values ('2020-02-02 00:00:05', 5, 5, 5, 5, 5.0, 5.0, true, 'varchar', 'nchar')")
tdSql.execute(f"insert into {dbname}.{ctbname2} values ('2020-02-02 00:00:10', 10, 10, 10, 10, 10.0, 10.0, true, 'varchar', 'nchar')")
tdSql.execute(f"insert into {dbname}.{ctbname2} values ('2020-02-02 00:00:15', 15, 15, 15, 15, 15.0, 15.0, true, 'varchar', 'nchar')")
tdSql.execute(f"flush database {dbname}");
# test fill null
@ -851,6 +883,10 @@ class TDTestCase:
tdSql.checkRows(3)
tdSql.checkCols(4)
tdSql.query(f"select interp(c0),interp(c1),interp(c2),interp(c3),interp(c4),interp(c5) from {dbname}.{tbname} range('2020-02-09 00:00:05', '2020-02-13 00:00:05') every(1d) fill(linear)")
tdSql.checkRows(3)
tdSql.checkCols(6)
for i in range (tdSql.queryCols):
tdSql.checkData(0, i, 13)
@ -877,6 +913,21 @@ class TDTestCase:
tdSql.error(f"select interp('abcd') from {dbname}.{tbname} range('2020-02-10 00:00:05', '2020-02-15 00:00:05') every(1d) fill(null)")
tdSql.error(f"select interp('中文字符') from {dbname}.{tbname} range('2020-02-10 00:00:05', '2020-02-15 00:00:05') every(1d) fill(null)")
tdLog.printNoPrefix("==========step12:stable cases")
#tdSql.query(f"select interp(c0) from {dbname}.{stbname} range('2020-02-01 00:00:04', '2020-02-01 00:00:16') every(1s) fill(null)")
#tdSql.checkRows(13)
#tdSql.query(f"select interp(c0) from {dbname}.{ctbname1} range('2020-02-01 00:00:04', '2020-02-01 00:00:16') every(1s) fill(null)")
#tdSql.checkRows(13)
#tdSql.query(f"select interp(c0) from {dbname}.{stbname} partition by tbname range('2020-02-01 00:00:04', '2020-02-02 00:00:16') every(1s) fill(null)")
#tdSql.checkRows(13)
#tdSql.query(f"select _irowts,interp(c0) from {dbname}.{stbname} partition by tbname range('2020-02-01 00:00:04', '2020-02-02 00:00:16') every(1h) fill(prev)")
#tdSql.query(f"select tbname,_irowts,interp(c0) from {dbname}.{stbname} partition by tbname range('2020-02-01 00:00:04', '2020-02-02 00:00:16') every(1h) fill(prev)")
def stop(self):
tdSql.close()
tdLog.success(f"{__file__} successfully executed")

View File

@ -24,7 +24,7 @@ class TDTestCase:
tdSql.execute(f"create database if not exists {dbname} keep 3650 duration 1000 vgroups 5")
tdSql.execute(
f'''create table {dbname}.stb1
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp,c11 int UNSIGNED, c12 bigint UNSIGNED, c13 smallint UNSIGNED, c14 tinyint UNSIGNED)
tags (t0 timestamp, t1 int, t2 bigint, t3 smallint, t4 tinyint, t5 float, t6 double, t7 bool, t8 binary(16),t9 nchar(32))
'''
)
@ -35,12 +35,12 @@ class TDTestCase:
for j in range(self.row_nums):
ts+=j*self.time_step
tdSql.execute(
f"insert into {dbname}.ct{i+1} values({ts}, 1, 11111, 111, 1, 1.11, 11.11, 2, 'binary{j}', 'nchar{j}', now()+{1*j}a )"
f"insert into {dbname}.ct{i+1} values({ts}, 1, 11111, 111, 1, 1.11, 11.11, 2, 'binary{j}', 'nchar{j}', now()+{1*j}a, 1, 11111, 111, 1 )"
)
tdSql.execute(f"insert into {dbname}.ct1 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
tdSql.execute(f"insert into {dbname}.ct1 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
tdSql.execute(f"insert into {dbname}.ct1 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
tdSql.execute(f"insert into {dbname}.ct1 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
tdSql.execute(f"insert into {dbname}.ct1 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
tdSql.execute(f"insert into {dbname}.ct1 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL , NULL, NULL, NULL, NULL ) ")
tdLog.info(" prepare data for distributed_aggregate done! ")
@ -48,7 +48,7 @@ class TDTestCase:
tdSql.query(f"desc {dbname}.stb1 ")
schema_list = tdSql.queryResult
for col_type in schema_list:
if col_type[1] in ["TINYINT" ,"SMALLINT","BIGINT" ,"INT","FLOAT","DOUBLE"]:
if col_type[1] in ["TINYINT" ,"SMALLINT","BIGINT" ,"INT","FLOAT","DOUBLE","TINYINT UNSIGNED" ,"SMALLINT UNSIGNED","BIGINT UNSIGNED" ,"INT UNSIGNED"]:
tdSql.query(f"select twa({col_type[0]}) from {dbname}.stb1 partition by tbname ")
else:
tdSql.error(f"select twa({col_type[0]}) from {dbname}.stb1 partition by tbname ")
@ -98,11 +98,57 @@ class TDTestCase:
tdSql.query(f"select twa(c1) from {dbname}.stb1 partition by t1")
tdSql.checkRows(self.tb_nums)
tdSql.checkData(0,0,1.000000000)
tdSql.query(f"select twa(c11) from {dbname}.ct1 ")
tdSql.checkData(0,0,1.000000000)
tdSql.query(f"select twa(c11) from {dbname}.stb1 partition by tbname ")
tdSql.checkRows(self.tb_nums)
tdSql.checkData(0,0,1.000000000)
tdSql.query(f"select twa(c12) from {dbname}.stb1 group by tbname ")
tdSql.checkRows(self.tb_nums)
tdSql.checkData(0,0,11111.000000000)
tdSql.query(f"select twa(c11+c12) from {dbname}.stb1 partition by tbname ")
tdSql.checkData(0,0,11112.000000000)
tdSql.query(f"select twa(c11) from {dbname}.stb1 partition by t1")
tdSql.checkRows(self.tb_nums)
tdSql.checkData(0,0,1.000000000)
tdSql.query(f"select twa(c13) from {dbname}.stb1 partition by tbname ")
tdSql.checkRows(self.tb_nums)
tdSql.query(f"select twa(c13) from {dbname}.stb1 group by tbname ")
tdSql.checkRows(self.tb_nums)
tdSql.query(f"select twa(c14) from {dbname}.stb1 partition by tbname ")
tdSql.checkRows(self.tb_nums)
tdSql.query(f"select twa(c14) from {dbname}.stb1 group by tbname ")
tdSql.checkRows(self.tb_nums)
# union all
tdSql.query(f"select twa(c1) from {dbname}.stb1 partition by tbname union all select twa(c1) from {dbname}.stb1 partition by tbname ")
tdSql.checkRows(40)
tdSql.checkData(0,0,1.000000000)
tdSql.query(f"select twa(c11) from {dbname}.stb1 partition by tbname union all select twa(c11) from {dbname}.stb1 partition by tbname ")
tdSql.checkRows(40)
tdSql.checkData(0,0,1.000000000)
tdSql.query(f"select twa(c2) from {dbname}.stb1 partition by tbname union all select twa(c2) from {dbname}.stb1 partition by tbname ")
tdSql.checkRows(40)
tdSql.query(f"select twa(c3) from {dbname}.stb1 partition by tbname union all select twa(c3) from {dbname}.stb1 partition by tbname ")
tdSql.checkRows(40)
tdSql.query(f"select twa(c4) from {dbname}.stb1 partition by tbname union all select twa(c4) from {dbname}.stb1 partition by tbname ")
tdSql.checkRows(40)
tdSql.query(f"select twa(c12) from {dbname}.stb1 partition by tbname union all select twa(c12) from {dbname}.stb1 partition by tbname ")
tdSql.checkRows(40)
tdSql.query(f"select twa(c13) from {dbname}.stb1 partition by tbname union all select twa(c13) from {dbname}.stb1 partition by tbname ")
tdSql.checkRows(40)
tdSql.query(f"select twa(c14) from {dbname}.stb1 partition by tbname union all select twa(c14) from {dbname}.stb1 partition by tbname ")
tdSql.checkRows(40)
# join
@ -122,6 +168,7 @@ class TDTestCase:
tdSql.checkRows(1)
tdSql.checkData(0,0,4.500000000)
tdSql.checkData(0,1,4.500000000)
# mixup with other functions
tdSql.query(f"select twa(c1),twa(c2),max(c1),elapsed(ts) from {dbname}.ct1 ")

View File

@ -240,12 +240,14 @@ class TDTestCase:
time.sleep(3)
tdLog.info("================= stop dnode, and remove data file, then start dnode ===========================")
tdDnodes.stop(1)
# time.sleep(5)
dataPath = buildPath + "/../sim/dnode1/data/*"
shellCmd = 'rm -rf ' + dataPath
tdLog.info(shellCmd)
os.system(shellCmd)
tdDnodes.start(1)
#tdDnodes.start(1)
tdDnodes.starttaosd(1)
time.sleep(2)
######### redo to consume

View File

@ -13,7 +13,7 @@ python3 ./test.py -f 0-others/udf_restart_taosd.py
python3 ./test.py -f 0-others/cachemodel.py
python3 ./test.py -f 0-others/udf_cfg1.py
python3 ./test.py -f 0-others/udf_cfg2.py
python3 ./test.py -f 0-others/taosdShell.py -N 5 -M 3 -Q 3
python3 ./test.py -f 0-others/sysinfo.py
python3 ./test.py -f 0-others/user_control.py
python3 ./test.py -f 0-others/fsync.py
@ -363,7 +363,7 @@ python3 ./test.py -f 2-query/concat.py -Q 2
python3 ./test.py -f 2-query/concat2.py -Q 2
python3 ./test.py -f 2-query/concat_ws.py -Q 2
python3 ./test.py -f 2-query/concat_ws2.py -Q 2
#python3 ./test.py -f 2-query/check_tsdb.py -Q 2
python3 ./test.py -f 2-query/check_tsdb.py -Q 2
python3 ./test.py -f 2-query/spread.py -Q 2
python3 ./test.py -f 2-query/hyperloglog.py -Q 2
python3 ./test.py -f 2-query/explain.py -Q 2
@ -402,13 +402,17 @@ python3 ./test.py -f 2-query/arctan.py -Q 2
python3 ./test.py -f 2-query/query_cols_tags_and_or.py -Q 2
python3 ./test.py -f 2-query/interp.py -Q 2
# python3 ./test.py -f 2-query/nestedQuery.py -Q 2
# python3 ./test.py -f 2-query/nestedQuery_str.py -Q 2
python3 ./test.py -f 2-query/nestedQuery.py -Q 2
python3 ./test.py -f 2-query/nestedQuery_str.py -Q 2
python3 ./test.py -f 2-query/nestedQuery_math.py -Q 2
python3 ./test.py -f 2-query/nestedQuery_time.py -Q 2
python3 ./test.py -f 2-query/stablity.py -Q 2
python3 ./test.py -f 2-query/stablity_1.py -Q 2
python3 ./test.py -f 2-query/avg.py -Q 2
# python3 ./test.py -f 2-query/elapsed.py -Q 2
python3 ./test.py -f 2-query/elapsed.py -Q 2
python3 ./test.py -f 2-query/csum.py -Q 2
#python3 ./test.py -f 2-query/mavg.py -Q 2
python3 ./test.py -f 2-query/mavg.py -Q 2
python3 ./test.py -f 2-query/sample.py -Q 2
python3 ./test.py -f 2-query/function_diff.py -Q 2
python3 ./test.py -f 2-query/unique.py -Q 2
@ -432,8 +436,9 @@ python3 ./test.py -f 2-query/count_partition.py -Q 2
python3 ./test.py -f 2-query/max_partition.py -Q 2
python3 ./test.py -f 2-query/last_row.py -Q 2
python3 ./test.py -f 2-query/tsbsQuery.py -Q 2
#------------querPolicy 3-----------
python3 ./test.py -f 2-query/sml.py -Q 2
#------------querPolicy 3-----------
python3 ./test.py -f 2-query/between.py -Q 3
python3 ./test.py -f 2-query/distinct.py -Q 3
python3 ./test.py -f 2-query/varchar.py -Q 3
@ -453,7 +458,7 @@ python3 ./test.py -f 2-query/concat.py -Q 3
python3 ./test.py -f 2-query/concat2.py -Q 3
python3 ./test.py -f 2-query/concat_ws.py -Q 3
python3 ./test.py -f 2-query/concat_ws2.py -Q 3
#python3 ./test.py -f 2-query/check_tsdb.py -Q 3
python3 ./test.py -f 2-query/check_tsdb.py -Q 3
python3 ./test.py -f 2-query/spread.py -Q 3
python3 ./test.py -f 2-query/hyperloglog.py -Q 3
python3 ./test.py -f 2-query/explain.py -Q 3
@ -464,7 +469,7 @@ python3 ./test.py -f 2-query/Today.py -Q 3
python3 ./test.py -f 2-query/max.py -Q 3
python3 ./test.py -f 2-query/min.py -Q 3
python3 ./test.py -f 2-query/count.py -Q 3
#python3 ./test.py -f 2-query/last.py -Q 3
python3 ./test.py -f 2-query/last.py -Q 3
python3 ./test.py -f 2-query/first.py -Q 3
python3 ./test.py -f 2-query/To_iso8601.py -Q 3
python3 ./test.py -f 2-query/To_unixtimestamp.py -Q 3
@ -490,12 +495,18 @@ python3 ./test.py -f 2-query/arcsin.py -Q 3
python3 ./test.py -f 2-query/arccos.py -Q 3
python3 ./test.py -f 2-query/arctan.py -Q 3
python3 ./test.py -f 2-query/query_cols_tags_and_or.py -Q 3
# python3 ./test.py -f 2-query/nestedQuery.py -Q 3
# python3 ./test.py -f 2-query/nestedQuery_str.py -Q 3
# python3 ./test.py -f 2-query/avg.py -Q 3
# python3 ./test.py -f 2-query/elapsed.py -Q 3
python3 ./test.py -f 2-query/nestedQuery.py -Q 3
python3 ./test.py -f 2-query/nestedQuery_str.py -Q 3
python3 ./test.py -f 2-query/nestedQuery_math.py -Q 3
python3 ./test.py -f 2-query/nestedQuery_time.py -Q 3
python3 ./test.py -f 2-query/stablity.py -Q 3
python3 ./test.py -f 2-query/stablity_1.py -Q 3
python3 ./test.py -f 2-query/avg.py -Q 3
python3 ./test.py -f 2-query/elapsed.py -Q 3
python3 ./test.py -f 2-query/csum.py -Q 3
#python3 ./test.py -f 2-query/mavg.py -Q 3
python3 ./test.py -f 2-query/mavg.py -Q 3
python3 ./test.py -f 2-query/sample.py -Q 3
python3 ./test.py -f 2-query/function_diff.py -Q 3
python3 ./test.py -f 2-query/unique.py -Q 3
@ -544,7 +555,7 @@ python3 ./test.py -f 2-query/concat.py -Q 4
python3 ./test.py -f 2-query/concat2.py -Q 4
python3 ./test.py -f 2-query/concat_ws.py -Q 4
python3 ./test.py -f 2-query/concat_ws2.py -Q 4
#python3 ./test.py -f 2-query/check_tsdb.py -Q 4
python3 ./test.py -f 2-query/check_tsdb.py -Q 4
python3 ./test.py -f 2-query/spread.py -Q 4
python3 ./test.py -f 2-query/hyperloglog.py -Q 4
python3 ./test.py -f 2-query/explain.py -Q 4
@ -555,7 +566,7 @@ python3 ./test.py -f 2-query/Today.py -Q 4
python3 ./test.py -f 2-query/max.py -Q 4
python3 ./test.py -f 2-query/min.py -Q 4
python3 ./test.py -f 2-query/count.py -Q 4
#python3 ./test.py -f 2-query/last.py -Q 4
python3 ./test.py -f 2-query/last.py -Q 4
python3 ./test.py -f 2-query/first.py -Q 4
python3 ./test.py -f 2-query/To_iso8601.py -Q 4
python3 ./test.py -f 2-query/To_unixtimestamp.py -Q 4
@ -581,12 +592,19 @@ python3 ./test.py -f 2-query/arcsin.py -Q 4
python3 ./test.py -f 2-query/arccos.py -Q 4
python3 ./test.py -f 2-query/arctan.py -Q 4
python3 ./test.py -f 2-query/query_cols_tags_and_or.py -Q 4
# python3 ./test.py -f 2-query/nestedQuery.py -Q 4
# python3 ./test.py -f 2-query/nestedQuery_str.py -Q 4
# python3 ./test.py -f 2-query/avg.py -Q 4
# python3 ./test.py -f 2-query/elapsed.py -Q 4
python3 ./test.py -f 2-query/nestedQuery.py -Q 4
python3 ./test.py -f 2-query/nestedQuery_str.py -Q 4
python3 ./test.py -f 2-query/nestedQuery_math.py -Q 4
python3 ./test.py -f 2-query/nestedQuery_time.py -Q 4
python3 ./test.py -f 2-query/stablity.py -Q 4
python3 ./test.py -f 2-query/stablity_1.py -Q 4
python3 ./test.py -f 2-query/avg.py -Q 4
python3 ./test.py -f 2-query/elapsed.py -Q 4
python3 ./test.py -f 2-query/csum.py -Q 4
#python3 ./test.py -f 2-query/mavg.py -Q 4
python3 ./test.py -f 2-query/mavg.py -Q 4
python3 ./test.py -f 2-query/sample.py -Q 4
python3 ./test.py -f 2-query/function_diff.py -Q 4
python3 ./test.py -f 2-query/unique.py -Q 4
@ -610,5 +628,5 @@ python3 ./test.py -f 2-query/count_partition.py -Q 4
python3 ./test.py -f 2-query/max_partition.py -Q 4
python3 ./test.py -f 2-query/last_row.py -Q 4
python3 ./test.py -f 2-query/tsbsQuery.py -Q 4
#python3 ./test.py -f 2-query/sml.py -Q 4
python3 ./test.py -f 2-query/sml.py -Q 4
python3 ./test.py -f 2-query/interp.py -Q 4

View File

@ -321,7 +321,7 @@ if __name__ == "__main__":
for dnode in tdDnodes.dnodes:
tdDnodes.starttaosd(dnode.index)
tdCases.logSql(logSql)
if restful:
tAdapter.deploy(adapter_cfg_dict)
tAdapter.start()
@ -341,6 +341,26 @@ if __name__ == "__main__":
print("check dnode ready")
except Exception as r:
print(r)
if queryPolicy != 1:
queryPolicy=int(queryPolicy)
if restful:
conn = taosrest.connect(url=f"http://{host}:6041")
else:
conn = taos.connect(host,config=tdDnodes.getSimCfgPath())
cursor = conn.cursor()
cursor.execute("create qnode on dnode 1")
cursor.execute(f'alter local "queryPolicy" "{queryPolicy}"')
cursor.execute("show local variables")
res = cursor.fetchall()
for i in range(cursor.rowcount):
if res[i][0] == "queryPolicy" :
if int(res[i][1]) == int(queryPolicy):
tdLog.success(f'alter queryPolicy to {queryPolicy} successfully')
else:
tdLog.debug(res)
tdLog.exit(f"alter queryPolicy to {queryPolicy} failed")
if ucase is not None and hasattr(ucase, 'noConn') and ucase.noConn == True:
conn = None
else:
@ -455,6 +475,26 @@ if __name__ == "__main__":
except Exception as r:
print(r)
if queryPolicy != 1:
queryPolicy=int(queryPolicy)
if restful:
conn = taosrest.connect(url=f"http://{host}:6041")
else:
conn = taos.connect(host,config=tdDnodes.getSimCfgPath())
cursor = conn.cursor()
cursor.execute("create qnode on dnode 1")
cursor.execute(f'alter local "queryPolicy" "{queryPolicy}"')
cursor.execute("show local variables")
res = cursor.fetchall()
for i in range(cursor.rowcount):
if res[i][0] == "queryPolicy" :
if int(res[i][1]) == int(queryPolicy):
tdLog.success(f'alter queryPolicy to {queryPolicy} successfully')
else:
tdLog.debug(res)
tdLog.exit(f"alter queryPolicy to {queryPolicy} failed")
if testCluster:
tdLog.info("Procedures for testing cluster")