Merge branch '3.0' of github.com:taosdata/TDengine into szhou/feature/project-compaction
This commit is contained in:
commit
474c84bcc9
|
@ -236,7 +236,7 @@ void blockCompressEncode(const SSDataBlock* pBlock, char* data, int32_t*
|
|||
int8_t needCompress);
|
||||
const char* blockCompressDecode(SSDataBlock* pBlock, int32_t numOfCols, int32_t numOfRows, const char* pData);
|
||||
|
||||
void blockDebugShowData(const SArray* dataBlocks, const char* flag);
|
||||
void blockDebugShowDataBlocks(const SArray* dataBlocks, const char* flag);
|
||||
// for debug
|
||||
char* dumpBlockData(SSDataBlock* pDataBlock, const char* flag, char** dumpBuf);
|
||||
|
||||
|
|
|
@ -24,6 +24,8 @@ extern "C" {
|
|||
#include "querynodes.h"
|
||||
#include "tname.h"
|
||||
|
||||
#define SLOT_NAME_LEN TSDB_TABLE_NAME_LEN + TSDB_COL_NAME_LEN
|
||||
|
||||
typedef struct SLogicNode {
|
||||
ENodeType type;
|
||||
SNodeList* pTargets; // SColumnNode
|
||||
|
@ -74,8 +76,8 @@ typedef struct SScanLogicNode {
|
|||
int16_t tsColId;
|
||||
double filesFactor;
|
||||
SArray* pSmaIndexes;
|
||||
SNodeList* pPartTags;
|
||||
bool partSort;
|
||||
SNodeList* pGroupTags;
|
||||
bool groupSort;
|
||||
} SScanLogicNode;
|
||||
|
||||
typedef struct SJoinLogicNode {
|
||||
|
@ -100,6 +102,7 @@ typedef struct SProjectLogicNode {
|
|||
typedef struct SIndefRowsFuncLogicNode {
|
||||
SLogicNode node;
|
||||
SNodeList* pFuncs;
|
||||
bool isTailFunc;
|
||||
} SIndefRowsFuncLogicNode;
|
||||
|
||||
typedef struct SInterpFuncLogicNode {
|
||||
|
@ -138,6 +141,7 @@ typedef struct SMergeLogicNode {
|
|||
SNodeList* pInputs;
|
||||
int32_t numOfChannels;
|
||||
int32_t srcGroupId;
|
||||
bool groupSort;
|
||||
} SMergeLogicNode;
|
||||
|
||||
typedef enum EWindowType { WINDOW_TYPE_INTERVAL = 1, WINDOW_TYPE_SESSION, WINDOW_TYPE_STATE } EWindowType;
|
||||
|
@ -184,6 +188,7 @@ typedef struct SFillLogicNode {
|
|||
typedef struct SSortLogicNode {
|
||||
SLogicNode node;
|
||||
SNodeList* pSortKeys;
|
||||
bool groupSort;
|
||||
} SSortLogicNode;
|
||||
|
||||
typedef struct SPartitionLogicNode {
|
||||
|
@ -230,6 +235,7 @@ typedef struct SSlotDescNode {
|
|||
bool reserve;
|
||||
bool output;
|
||||
bool tag;
|
||||
char name[SLOT_NAME_LEN];
|
||||
} SSlotDescNode;
|
||||
|
||||
typedef struct SDataBlockDescNode {
|
||||
|
@ -279,7 +285,8 @@ typedef struct STableScanPhysiNode {
|
|||
double ratio;
|
||||
int32_t dataRequired;
|
||||
SNodeList* pDynamicScanFuncs;
|
||||
SNodeList* pPartitionTags;
|
||||
SNodeList* pGroupTags;
|
||||
bool groupSort;
|
||||
int64_t interval;
|
||||
int64_t offset;
|
||||
int64_t sliding;
|
||||
|
@ -353,6 +360,7 @@ typedef struct SMergePhysiNode {
|
|||
SNodeList* pTargets;
|
||||
int32_t numOfChannels;
|
||||
int32_t srcGroupId;
|
||||
bool groupSort;
|
||||
} SMergePhysiNode;
|
||||
|
||||
typedef struct SWinodwPhysiNode {
|
||||
|
|
|
@ -259,6 +259,7 @@ typedef struct SSelectStmt {
|
|||
bool hasTailFunc;
|
||||
bool hasInterpFunc;
|
||||
bool hasLastRowFunc;
|
||||
bool groupSort;
|
||||
} SSelectStmt;
|
||||
|
||||
typedef enum ESetOperatorType { SET_OP_TYPE_UNION_ALL = 1, SET_OP_TYPE_UNION } ESetOperatorType;
|
||||
|
|
|
@ -1605,7 +1605,7 @@ static char* formatTimestamp(char* buf, int64_t val, int precision) {
|
|||
return buf;
|
||||
}
|
||||
|
||||
void blockDebugShowData(const SArray* dataBlocks, const char* flag) {
|
||||
void blockDebugShowDataBlocks(const SArray* dataBlocks, const char* flag) {
|
||||
char pBuf[128] = {0};
|
||||
int32_t sz = taosArrayGetSize(dataBlocks);
|
||||
for (int32_t i = 0; i < sz; i++) {
|
||||
|
@ -1613,7 +1613,7 @@ void blockDebugShowData(const SArray* dataBlocks, const char* flag) {
|
|||
size_t numOfCols = taosArrayGetSize(pDataBlock->pDataBlock);
|
||||
|
||||
int32_t rows = pDataBlock->info.rows;
|
||||
printf("%s |block type %d |child id %d|\n", flag, (int32_t)pDataBlock->info.type, pDataBlock->info.childId);
|
||||
printf("%s |block type %d |child id %d|group id %zX\n", flag, (int32_t)pDataBlock->info.type, pDataBlock->info.childId, pDataBlock->info.groupId);
|
||||
for (int32_t j = 0; j < rows; j++) {
|
||||
printf("%s |", flag);
|
||||
for (int32_t k = 0; k < numOfCols; k++) {
|
||||
|
|
|
@ -39,8 +39,10 @@ typedef struct {
|
|||
int32_t id;
|
||||
int32_t errCode;
|
||||
int32_t acceptableCode;
|
||||
ETrnStage stage;
|
||||
int32_t retryCode;
|
||||
ETrnAct actionType;
|
||||
ETrnStage stage;
|
||||
int8_t reserved;
|
||||
int8_t rawWritten;
|
||||
int8_t msgSent;
|
||||
int8_t msgReceived;
|
||||
|
|
|
@ -234,7 +234,7 @@ static int32_t mndProcessRetrieveSysTableReq(SRpcMsg *pReq) {
|
|||
if (retrieveReq.user[0] != 0) {
|
||||
memcpy(pReq->info.conn.user, retrieveReq.user, TSDB_USER_LEN);
|
||||
} else {
|
||||
memcpy(pReq->info.conn.user, TSDB_DEFAULT_USER, TSDB_USER_LEN);
|
||||
memcpy(pReq->info.conn.user, TSDB_DEFAULT_USER, strlen(TSDB_DEFAULT_USER) + 1);
|
||||
}
|
||||
if (mndCheckShowPrivilege(pMnode, pReq->info.conn.user, pShow->type, retrieveReq.db) != 0) {
|
||||
return -1;
|
||||
|
|
|
@ -15,9 +15,9 @@
|
|||
|
||||
#define _DEFAULT_SOURCE
|
||||
#include "mndTrans.h"
|
||||
#include "mndPrivilege.h"
|
||||
#include "mndConsumer.h"
|
||||
#include "mndDb.h"
|
||||
#include "mndPrivilege.h"
|
||||
#include "mndShow.h"
|
||||
#include "mndSync.h"
|
||||
#include "mndUser.h"
|
||||
|
@ -55,7 +55,7 @@ static bool mndTransPerfromFinishedStage(SMnode *pMnode, STrans *pTrans);
|
|||
static bool mndCannotExecuteTransAction(SMnode *pMnode) { return !pMnode->deploy && !mndIsMaster(pMnode); }
|
||||
|
||||
static void mndTransSendRpcRsp(SMnode *pMnode, STrans *pTrans);
|
||||
static int32_t mndProcessTransReq(SRpcMsg *pReq);
|
||||
static int32_t mndProcessTransTimer(SRpcMsg *pReq);
|
||||
static int32_t mndProcessTtl(SRpcMsg *pReq);
|
||||
static int32_t mndProcessKillTransReq(SRpcMsg *pReq);
|
||||
|
||||
|
@ -73,7 +73,7 @@ int32_t mndInitTrans(SMnode *pMnode) {
|
|||
.deleteFp = (SdbDeleteFp)mndTransActionDelete,
|
||||
};
|
||||
|
||||
mndSetMsgHandle(pMnode, TDMT_MND_TRANS_TIMER, mndProcessTransReq);
|
||||
mndSetMsgHandle(pMnode, TDMT_MND_TRANS_TIMER, mndProcessTransTimer);
|
||||
mndSetMsgHandle(pMnode, TDMT_MND_KILL_TRANS, mndProcessKillTransReq);
|
||||
|
||||
mndAddShowRetrieveHandle(pMnode, TSDB_MGMT_TABLE_TRANS, mndRetrieveTrans);
|
||||
|
@ -139,8 +139,10 @@ static SSdbRaw *mndTransActionEncode(STrans *pTrans) {
|
|||
SDB_SET_INT32(pRaw, dataPos, pAction->id, _OVER)
|
||||
SDB_SET_INT32(pRaw, dataPos, pAction->errCode, _OVER)
|
||||
SDB_SET_INT32(pRaw, dataPos, pAction->acceptableCode, _OVER)
|
||||
SDB_SET_INT32(pRaw, dataPos, pAction->retryCode, _OVER)
|
||||
SDB_SET_INT8(pRaw, dataPos, pAction->actionType, _OVER)
|
||||
SDB_SET_INT8(pRaw, dataPos, pAction->stage, _OVER)
|
||||
SDB_SET_INT8(pRaw, dataPos, pAction->reserved, _OVER)
|
||||
if (pAction->actionType == TRANS_ACTION_RAW) {
|
||||
int32_t len = sdbGetRawTotalSize(pAction->pRaw);
|
||||
SDB_SET_INT8(pRaw, dataPos, pAction->rawWritten, _OVER)
|
||||
|
@ -163,8 +165,10 @@ static SSdbRaw *mndTransActionEncode(STrans *pTrans) {
|
|||
SDB_SET_INT32(pRaw, dataPos, pAction->id, _OVER)
|
||||
SDB_SET_INT32(pRaw, dataPos, pAction->errCode, _OVER)
|
||||
SDB_SET_INT32(pRaw, dataPos, pAction->acceptableCode, _OVER)
|
||||
SDB_SET_INT32(pRaw, dataPos, pAction->retryCode, _OVER)
|
||||
SDB_SET_INT8(pRaw, dataPos, pAction->actionType, _OVER)
|
||||
SDB_SET_INT8(pRaw, dataPos, pAction->stage, _OVER)
|
||||
SDB_SET_INT8(pRaw, dataPos, pAction->reserved, _OVER)
|
||||
if (pAction->actionType == TRANS_ACTION_RAW) {
|
||||
int32_t len = sdbGetRawTotalSize(pAction->pRaw);
|
||||
SDB_SET_INT8(pRaw, dataPos, pAction->rawWritten, _OVER)
|
||||
|
@ -187,8 +191,10 @@ static SSdbRaw *mndTransActionEncode(STrans *pTrans) {
|
|||
SDB_SET_INT32(pRaw, dataPos, pAction->id, _OVER)
|
||||
SDB_SET_INT32(pRaw, dataPos, pAction->errCode, _OVER)
|
||||
SDB_SET_INT32(pRaw, dataPos, pAction->acceptableCode, _OVER)
|
||||
SDB_SET_INT32(pRaw, dataPos, pAction->retryCode, _OVER)
|
||||
SDB_SET_INT8(pRaw, dataPos, pAction->actionType, _OVER)
|
||||
SDB_SET_INT8(pRaw, dataPos, pAction->stage, _OVER)
|
||||
SDB_SET_INT8(pRaw, dataPos, pAction->reserved, _OVER)
|
||||
if (pAction->actionType == TRANS_ACTION_RAW) {
|
||||
int32_t len = sdbGetRawTotalSize(pAction->pRaw);
|
||||
SDB_SET_INT8(pRaw, dataPos, pAction->rawWritten, _OVER)
|
||||
|
@ -291,10 +297,12 @@ static SSdbRow *mndTransActionDecode(SSdbRaw *pRaw) {
|
|||
SDB_GET_INT32(pRaw, dataPos, &action.id, _OVER)
|
||||
SDB_GET_INT32(pRaw, dataPos, &action.errCode, _OVER)
|
||||
SDB_GET_INT32(pRaw, dataPos, &action.acceptableCode, _OVER)
|
||||
SDB_GET_INT32(pRaw, dataPos, &action.retryCode, _OVER)
|
||||
SDB_GET_INT8(pRaw, dataPos, &actionType, _OVER)
|
||||
action.actionType = actionType;
|
||||
SDB_GET_INT8(pRaw, dataPos, &stage, _OVER)
|
||||
action.stage = stage;
|
||||
SDB_GET_INT8(pRaw, dataPos, &action.reserved, _OVER)
|
||||
if (action.actionType == TRANS_ACTION_RAW) {
|
||||
SDB_GET_INT8(pRaw, dataPos, &action.rawWritten, _OVER)
|
||||
SDB_GET_INT32(pRaw, dataPos, &dataLen, _OVER)
|
||||
|
@ -324,10 +332,12 @@ static SSdbRow *mndTransActionDecode(SSdbRaw *pRaw) {
|
|||
SDB_GET_INT32(pRaw, dataPos, &action.id, _OVER)
|
||||
SDB_GET_INT32(pRaw, dataPos, &action.errCode, _OVER)
|
||||
SDB_GET_INT32(pRaw, dataPos, &action.acceptableCode, _OVER)
|
||||
SDB_GET_INT32(pRaw, dataPos, &action.retryCode, _OVER)
|
||||
SDB_GET_INT8(pRaw, dataPos, &actionType, _OVER)
|
||||
action.actionType = actionType;
|
||||
SDB_GET_INT8(pRaw, dataPos, &stage, _OVER)
|
||||
action.stage = stage;
|
||||
SDB_GET_INT8(pRaw, dataPos, &action.reserved, _OVER)
|
||||
if (action.actionType == TRANS_ACTION_RAW) {
|
||||
SDB_GET_INT8(pRaw, dataPos, &action.rawWritten, _OVER)
|
||||
SDB_GET_INT32(pRaw, dataPos, &dataLen, _OVER)
|
||||
|
@ -357,10 +367,12 @@ static SSdbRow *mndTransActionDecode(SSdbRaw *pRaw) {
|
|||
SDB_GET_INT32(pRaw, dataPos, &action.id, _OVER)
|
||||
SDB_GET_INT32(pRaw, dataPos, &action.errCode, _OVER)
|
||||
SDB_GET_INT32(pRaw, dataPos, &action.acceptableCode, _OVER)
|
||||
SDB_GET_INT32(pRaw, dataPos, &action.retryCode, _OVER)
|
||||
SDB_GET_INT8(pRaw, dataPos, &actionType, _OVER)
|
||||
action.actionType = actionType;
|
||||
SDB_GET_INT8(pRaw, dataPos, &stage, _OVER)
|
||||
action.stage = stage;
|
||||
SDB_GET_INT8(pRaw, dataPos, &action.reserved, _OVER)
|
||||
if (action.actionType) {
|
||||
SDB_GET_INT8(pRaw, dataPos, &action.rawWritten, _OVER)
|
||||
SDB_GET_INT32(pRaw, dataPos, &dataLen, _OVER)
|
||||
|
@ -463,15 +475,25 @@ static int32_t mndTransActionInsert(SSdb *pSdb, STrans *pTrans) {
|
|||
if (fp) {
|
||||
(*fp)(pSdb->pMnode, pTrans->param, pTrans->paramLen);
|
||||
}
|
||||
pTrans->startFunc = 0;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void mndTransDropData(STrans *pTrans) {
|
||||
if (pTrans->redoActions != NULL) {
|
||||
mndTransDropActions(pTrans->redoActions);
|
||||
pTrans->redoActions = NULL;
|
||||
}
|
||||
if (pTrans->undoActions != NULL) {
|
||||
mndTransDropActions(pTrans->undoActions);
|
||||
pTrans->undoActions = NULL;
|
||||
}
|
||||
if (pTrans->commitActions != NULL) {
|
||||
mndTransDropActions(pTrans->commitActions);
|
||||
pTrans->commitActions = NULL;
|
||||
}
|
||||
if (pTrans->rpcRsp != NULL) {
|
||||
taosMemoryFree(pTrans->rpcRsp);
|
||||
pTrans->rpcRsp = NULL;
|
||||
|
@ -492,6 +514,7 @@ static int32_t mndTransActionDelete(SSdb *pSdb, STrans *pTrans, bool callFunc) {
|
|||
if (fp) {
|
||||
(*fp)(pSdb->pMnode, pTrans->param, pTrans->paramLen);
|
||||
}
|
||||
pTrans->stopFunc = 0;
|
||||
}
|
||||
|
||||
mndTransDropData(pTrans);
|
||||
|
@ -805,7 +828,7 @@ static void mndTransSendRpcRsp(SMnode *pMnode, STrans *pTrans) {
|
|||
sendRsp = true;
|
||||
}
|
||||
} else {
|
||||
if (pTrans->stage == TRN_STAGE_REDO_ACTION && pTrans->failedTimes > 3) {
|
||||
if (pTrans->stage == TRN_STAGE_REDO_ACTION && pTrans->failedTimes > 6) {
|
||||
if (code == 0) code = TSDB_CODE_MND_TRANS_UNKNOW_ERROR;
|
||||
sendRsp = true;
|
||||
}
|
||||
|
@ -875,8 +898,8 @@ int32_t mndTransProcessRsp(SRpcMsg *pRsp) {
|
|||
pAction->errCode = pRsp->code;
|
||||
}
|
||||
|
||||
mDebug("trans:%d, %s:%d response is received, code:0x%x, accept:0x%x", transId, mndTransStr(pAction->stage), action,
|
||||
pRsp->code, pAction->acceptableCode);
|
||||
mDebug("trans:%d, %s:%d response is received, code:0x%x, accept:0x%x retry:0x%x", transId,
|
||||
mndTransStr(pAction->stage), action, pRsp->code, pAction->acceptableCode, pAction->retryCode);
|
||||
mndTransExecute(pMnode, pTrans);
|
||||
|
||||
_OVER:
|
||||
|
@ -884,6 +907,21 @@ _OVER:
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void mndTransResetAction(SMnode *pMnode, STrans *pTrans, STransAction *pAction) {
|
||||
pAction->rawWritten = 0;
|
||||
pAction->msgSent = 0;
|
||||
pAction->msgReceived = 0;
|
||||
if (pAction->errCode == TSDB_CODE_RPC_REDIRECT || pAction->errCode == TSDB_CODE_SYN_NEW_CONFIG_ERROR ||
|
||||
pAction->errCode == TSDB_CODE_SYN_INTERNAL_ERROR || pAction->errCode == TSDB_CODE_SYN_NOT_LEADER) {
|
||||
pAction->epSet.inUse = (pAction->epSet.inUse + 1) % pAction->epSet.numOfEps;
|
||||
mDebug("trans:%d, %s:%d execute status is reset and set epset inuse:%d", pTrans->id, mndTransStr(pAction->stage),
|
||||
pAction->id, pAction->epSet.inUse);
|
||||
} else {
|
||||
mDebug("trans:%d, %s:%d execute status is reset", pTrans->id, mndTransStr(pAction->stage), pAction->id);
|
||||
}
|
||||
pAction->errCode = 0;
|
||||
}
|
||||
|
||||
static void mndTransResetActions(SMnode *pMnode, STrans *pTrans, SArray *pArray) {
|
||||
int32_t numOfActions = taosArrayGetSize(pArray);
|
||||
|
||||
|
@ -894,18 +932,7 @@ static void mndTransResetActions(SMnode *pMnode, STrans *pTrans, SArray *pArray)
|
|||
continue;
|
||||
if (pAction->rawWritten && (pAction->errCode == 0 || pAction->errCode == pAction->acceptableCode)) continue;
|
||||
|
||||
pAction->rawWritten = 0;
|
||||
pAction->msgSent = 0;
|
||||
pAction->msgReceived = 0;
|
||||
if (pAction->errCode == TSDB_CODE_RPC_REDIRECT || pAction->errCode == TSDB_CODE_SYN_NEW_CONFIG_ERROR ||
|
||||
pAction->errCode == TSDB_CODE_SYN_INTERNAL_ERROR || pAction->errCode == TSDB_CODE_SYN_NOT_LEADER) {
|
||||
pAction->epSet.inUse = (pAction->epSet.inUse + 1) % pAction->epSet.numOfEps;
|
||||
mDebug("trans:%d, %s:%d execute status is reset and set epset inuse:%d", pTrans->id, mndTransStr(pAction->stage),
|
||||
action, pAction->epSet.inUse);
|
||||
} else {
|
||||
mDebug("trans:%d, %s:%d execute status is reset", pTrans->id, mndTransStr(pAction->stage), action);
|
||||
}
|
||||
pAction->errCode = 0;
|
||||
mndTransResetAction(pMnode, pTrans, pAction);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1112,9 +1139,9 @@ static int32_t mndTransExecuteRedoActionsSerial(SMnode *pMnode, STrans *pTrans)
|
|||
if (pAction->msgReceived) {
|
||||
if (pAction->errCode != 0 && pAction->errCode != pAction->acceptableCode) {
|
||||
code = pAction->errCode;
|
||||
pAction->msgSent = 0;
|
||||
pAction->msgReceived = 0;
|
||||
mDebug("trans:%d, %s:%d execute status is reset", pTrans->id, mndTransStr(pAction->stage), action);
|
||||
mndTransResetAction(pMnode, pTrans, pAction);
|
||||
} else {
|
||||
mDebug("trans:%d, %s:%d execute successfully", pTrans->id, mndTransStr(pAction->stage), action);
|
||||
}
|
||||
} else {
|
||||
code = TSDB_CODE_ACTION_IN_PROGRESS;
|
||||
|
@ -1123,6 +1150,8 @@ static int32_t mndTransExecuteRedoActionsSerial(SMnode *pMnode, STrans *pTrans)
|
|||
if (pAction->rawWritten) {
|
||||
if (pAction->errCode != 0 && pAction->errCode != pAction->acceptableCode) {
|
||||
code = pAction->errCode;
|
||||
} else {
|
||||
mDebug("trans:%d, %s:%d write successfully", pTrans->id, mndTransStr(pAction->stage), action);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1156,9 +1185,15 @@ static int32_t mndTransExecuteRedoActionsSerial(SMnode *pMnode, STrans *pTrans)
|
|||
} else if (code == TSDB_CODE_ACTION_IN_PROGRESS) {
|
||||
mDebug("trans:%d, %s:%d is in progress and wait it finish", pTrans->id, mndTransStr(pAction->stage), pAction->id);
|
||||
break;
|
||||
} else if (code == pAction->retryCode) {
|
||||
mDebug("trans:%d, %s:%d receive code:0x%x and retry", pTrans->id, mndTransStr(pAction->stage), pAction->id, code);
|
||||
taosMsleep(300);
|
||||
continue;
|
||||
} else {
|
||||
terrno = code;
|
||||
pTrans->code = code;
|
||||
mDebug("trans:%d, %s:%d receive code:0x%x and wait another schedule, failedTimes:%d", pTrans->id,
|
||||
mndTransStr(pAction->stage), pAction->id, code, pTrans->failedTimes);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
@ -1343,7 +1378,7 @@ void mndTransExecute(SMnode *pMnode, STrans *pTrans) {
|
|||
mndTransSendRpcRsp(pMnode, pTrans);
|
||||
}
|
||||
|
||||
static int32_t mndProcessTransReq(SRpcMsg *pReq) {
|
||||
static int32_t mndProcessTransTimer(SRpcMsg *pReq) {
|
||||
mndTransPullup(pReq->info.node);
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -15,10 +15,10 @@
|
|||
|
||||
#define _DEFAULT_SOURCE
|
||||
#include "mndVgroup.h"
|
||||
#include "mndPrivilege.h"
|
||||
#include "mndDb.h"
|
||||
#include "mndDnode.h"
|
||||
#include "mndMnode.h"
|
||||
#include "mndPrivilege.h"
|
||||
#include "mndShow.h"
|
||||
#include "mndTrans.h"
|
||||
#include "mndUser.h"
|
||||
|
@ -896,6 +896,8 @@ int32_t mndAddAlterVnodeConfirmAction(SMnode *pMnode, STrans *pTrans, SDbObj *pD
|
|||
action.pCont = pHead;
|
||||
action.contLen = contLen;
|
||||
action.msgType = TDMT_VND_ALTER_CONFIRM;
|
||||
// incorrect redirect result will cause this erro
|
||||
action.retryCode = TSDB_CODE_VND_INVALID_VGROUP_ID;
|
||||
|
||||
if (mndTransAppendRedoAction(pTrans, &action) != 0) {
|
||||
taosMemoryFree(pHead);
|
||||
|
@ -942,6 +944,8 @@ static int32_t mndAddSetVnodeStandByAction(SMnode *pMnode, STrans *pTrans, SDbOb
|
|||
action.contLen = contLen;
|
||||
action.msgType = TDMT_SYNC_SET_VNODE_STANDBY;
|
||||
action.acceptableCode = TSDB_CODE_NODE_NOT_DEPLOYED;
|
||||
// Keep retrying until the target vnode is not the leader
|
||||
action.retryCode = TSDB_CODE_SYN_IS_LEADER;
|
||||
|
||||
if (isRedo) {
|
||||
if (mndTransAppendRedoAction(pTrans, &action) != 0) {
|
||||
|
@ -1229,7 +1233,8 @@ static int32_t mndProcessRedistributeVgroupMsg(SRpcMsg *pReq) {
|
|||
}
|
||||
|
||||
if (req.dnodeId1 == pVgroup->vnodeGid[0].dnodeId) {
|
||||
terrno = TSDB_CODE_MND_VGROUP_UN_CHANGED;
|
||||
// terrno = TSDB_CODE_MND_VGROUP_UN_CHANGED;
|
||||
code = 0;
|
||||
goto _OVER;
|
||||
}
|
||||
|
||||
|
@ -1351,7 +1356,8 @@ static int32_t mndProcessRedistributeVgroupMsg(SRpcMsg *pReq) {
|
|||
}
|
||||
|
||||
if (pNew1 == NULL && pOld1 == NULL && pNew2 == NULL && pOld2 == NULL && pNew3 == NULL && pOld3 == NULL) {
|
||||
terrno = TSDB_CODE_MND_VGROUP_UN_CHANGED;
|
||||
// terrno = TSDB_CODE_MND_VGROUP_UN_CHANGED;
|
||||
code = 0;
|
||||
goto _OVER;
|
||||
}
|
||||
|
||||
|
|
|
@ -487,7 +487,7 @@ static int32_t tdFetchAndSubmitRSmaResult(SRSmaInfoItem *pItem, int8_t blkType)
|
|||
#if 1
|
||||
char flag[10] = {0};
|
||||
snprintf(flag, 10, "level %" PRIi8, pItem->level);
|
||||
blockDebugShowData(pResult, flag);
|
||||
blockDebugShowDataBlocks(pResult, flag);
|
||||
#endif
|
||||
STsdb *sinkTsdb = (pItem->level == TSDB_RETENTION_L1 ? pSma->pRSmaTsdb1 : pSma->pRSmaTsdb2);
|
||||
SSubmitReq *pReq = NULL;
|
||||
|
|
|
@ -299,7 +299,7 @@ int32_t vnodeProcessWriteMsg(SVnode *pVnode, int64_t version, SRpcMsg *pMsg, SRp
|
|||
void smaHandleRes(void *pVnode, int64_t smaId, const SArray *data) {
|
||||
// TODO
|
||||
|
||||
blockDebugShowData(data, __func__);
|
||||
blockDebugShowDataBlocks(data, __func__);
|
||||
tdProcessTSmaInsert(((SVnode *)pVnode)->pSma, smaId, (const char *)data);
|
||||
}
|
||||
|
||||
|
|
|
@ -136,6 +136,13 @@ bool tsortIsNullVal(STupleHandle* pVHandle, int32_t colId);
|
|||
*/
|
||||
void* tsortGetValue(STupleHandle* pVHandle, int32_t colId);
|
||||
|
||||
/**
|
||||
*
|
||||
* @param pVHandle
|
||||
* @return
|
||||
*/
|
||||
uint64_t tsortGetGroupId(STupleHandle* pVHandle);
|
||||
|
||||
/**
|
||||
*
|
||||
* @param pSortHandle
|
||||
|
|
|
@ -2051,7 +2051,7 @@ int32_t createScanTableListInfo(STableScanPhysiNode* pTableScanNode, SReadHandle
|
|||
qDebug("no table qualified for query, TID:0x%" PRIx64 ", QID:0x%" PRIx64, taskId, queryId);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
code = generateGroupIdMap(pTableListInfo, pHandle, pTableScanNode->pPartitionTags);
|
||||
code = generateGroupIdMap(pTableListInfo, pHandle, pTableScanNode->pGroupTags);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
return code;
|
||||
}
|
||||
|
@ -2455,7 +2455,7 @@ SOperatorInfo* createTableMergeScanOperatorInfo(STableScanPhysiNode* pTableScanN
|
|||
if (pInfo == NULL || pOperator == NULL) {
|
||||
goto _error;
|
||||
}
|
||||
if (pTableScanNode->pPartitionTags) {
|
||||
if (pTableScanNode->pGroupTags) {
|
||||
taosArraySort(pTableListInfo->pTableList, compareTableKeyInfoByGid);
|
||||
}
|
||||
|
||||
|
|
|
@ -142,7 +142,8 @@ SSDataBlock* getSortedBlockData(SSortHandle* pHandle, SSDataBlock* pDataBlock, i
|
|||
|
||||
SSDataBlock* loadNextDataBlock(void* param) {
|
||||
SOperatorInfo* pOperator = (SOperatorInfo*)param;
|
||||
return pOperator->fpSet.getNextFn(pOperator);
|
||||
SSDataBlock *pBlock = pOperator->fpSet.getNextFn(pOperator);
|
||||
return pBlock;
|
||||
}
|
||||
|
||||
// todo refactor: merged with fetch fp
|
||||
|
@ -505,7 +506,9 @@ typedef struct SMultiwaySortMergeOperatorInfo {
|
|||
|
||||
SSDataBlock* pInputBlock;
|
||||
int64_t startTs; // sort start time
|
||||
bool hasGroupId;
|
||||
uint64_t groupId;
|
||||
STupleHandle *prefetchedTuple;
|
||||
} SMultiwaySortMergeOperatorInfo;
|
||||
|
||||
int32_t doOpenMultiwaySortMergeOperator(SOperatorInfo* pOperator) {
|
||||
|
@ -560,12 +563,30 @@ SSDataBlock* getMultiwaySortedBlockData(SSortHandle* pHandle, SSDataBlock* pData
|
|||
blockDataEnsureCapacity(p, capacity);
|
||||
|
||||
while (1) {
|
||||
STupleHandle* pTupleHandle = tsortNextTuple(pHandle);
|
||||
STupleHandle* pTupleHandle = NULL;
|
||||
if (pInfo->prefetchedTuple == NULL) {
|
||||
pTupleHandle = tsortNextTuple(pHandle);
|
||||
} else {
|
||||
pTupleHandle = pInfo->prefetchedTuple;
|
||||
pInfo->groupId = tsortGetGroupId(pTupleHandle);
|
||||
pInfo->prefetchedTuple = NULL;
|
||||
}
|
||||
|
||||
if (pTupleHandle == NULL) {
|
||||
break;
|
||||
}
|
||||
|
||||
uint64_t tupleGroupId = tsortGetGroupId(pTupleHandle);
|
||||
if (!pInfo->hasGroupId) {
|
||||
pInfo->groupId = tupleGroupId;
|
||||
pInfo->hasGroupId = true;
|
||||
appendOneRowToDataBlock(p, pTupleHandle);
|
||||
} else if (pInfo->groupId == tupleGroupId) {
|
||||
appendOneRowToDataBlock(p, pTupleHandle);
|
||||
} else {
|
||||
pInfo->prefetchedTuple = pTupleHandle;
|
||||
break;
|
||||
}
|
||||
if (p->info.rows >= capacity) {
|
||||
break;
|
||||
}
|
||||
|
@ -608,7 +629,6 @@ SSDataBlock* doMultiwaySortMerge(SOperatorInfo* pOperator) {
|
|||
|
||||
SSDataBlock* pBlock = getMultiwaySortedBlockData(pInfo->pSortHandle, pInfo->binfo.pRes,
|
||||
pOperator->resultInfo.capacity, pInfo->pColMatchInfo, pOperator);
|
||||
|
||||
if (pBlock != NULL) {
|
||||
pOperator->resultInfo.totalRows += pBlock->info.rows;
|
||||
} else {
|
||||
|
|
|
@ -752,6 +752,10 @@ void* tsortGetValue(STupleHandle* pVHandle, int32_t colIndex) {
|
|||
}
|
||||
}
|
||||
|
||||
uint64_t tsortGetGroupId(STupleHandle* pVHandle) {
|
||||
return pVHandle->pBlock->info.groupId;
|
||||
}
|
||||
|
||||
SSortExecInfo tsortGetSortExecInfo(SSortHandle* pHandle) {
|
||||
SSortExecInfo info = {0};
|
||||
|
||||
|
|
|
@ -351,7 +351,7 @@ static int32_t logicScanCopy(const SScanLogicNode* pSrc, SScanLogicNode* pDst) {
|
|||
COPY_SCALAR_FIELD(watermark);
|
||||
COPY_SCALAR_FIELD(tsColId);
|
||||
COPY_SCALAR_FIELD(filesFactor);
|
||||
CLONE_NODE_LIST_FIELD(pPartTags);
|
||||
CLONE_NODE_LIST_FIELD(pGroupTags);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
|
@ -401,6 +401,7 @@ static int32_t logicMergeCopy(const SMergeLogicNode* pSrc, SMergeLogicNode* pDst
|
|||
CLONE_NODE_LIST_FIELD(pInputs);
|
||||
COPY_SCALAR_FIELD(numOfChannels);
|
||||
COPY_SCALAR_FIELD(srcGroupId);
|
||||
COPY_SCALAR_FIELD(groupSort);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
|
@ -436,6 +437,7 @@ static int32_t logicFillCopy(const SFillLogicNode* pSrc, SFillLogicNode* pDst) {
|
|||
static int32_t logicSortCopy(const SSortLogicNode* pSrc, SSortLogicNode* pDst) {
|
||||
COPY_BASE_OBJECT_FIELD(node, logicNodeCopy);
|
||||
CLONE_NODE_LIST_FIELD(pSortKeys);
|
||||
COPY_SCALAR_FIELD(groupSort);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
|
@ -500,7 +502,7 @@ static int32_t physiTableScanCopy(const STableScanPhysiNode* pSrc, STableScanPhy
|
|||
COPY_SCALAR_FIELD(ratio);
|
||||
COPY_SCALAR_FIELD(dataRequired);
|
||||
CLONE_NODE_LIST_FIELD(pDynamicScanFuncs);
|
||||
CLONE_NODE_LIST_FIELD(pPartitionTags);
|
||||
CLONE_NODE_LIST_FIELD(pGroupTags);
|
||||
COPY_SCALAR_FIELD(interval);
|
||||
COPY_SCALAR_FIELD(offset);
|
||||
COPY_SCALAR_FIELD(sliding);
|
||||
|
|
|
@ -234,6 +234,8 @@ const char* nodesNodeName(ENodeType type) {
|
|||
return "PhysiMerge";
|
||||
case QUERY_NODE_PHYSICAL_PLAN_SORT:
|
||||
return "PhysiSort";
|
||||
case QUERY_NODE_PHYSICAL_PLAN_GROUP_SORT:
|
||||
return "PhysiGroupSort";
|
||||
case QUERY_NODE_PHYSICAL_PLAN_HASH_INTERVAL:
|
||||
return "PhysiHashInterval";
|
||||
case QUERY_NODE_PHYSICAL_PLAN_MERGE_ALIGNED_INTERVAL:
|
||||
|
@ -539,7 +541,7 @@ static const char* jkScanLogicPlanScanPseudoCols = "ScanPseudoCols";
|
|||
static const char* jkScanLogicPlanTableId = "TableId";
|
||||
static const char* jkScanLogicPlanTableType = "TableType";
|
||||
static const char* jkScanLogicPlanTagCond = "TagCond";
|
||||
static const char* jkScanLogicPlanPartTags = "PartTags";
|
||||
static const char* jkScanLogicPlanGroupTags = "GroupTags";
|
||||
|
||||
static int32_t logicScanNodeToJson(const void* pObj, SJson* pJson) {
|
||||
const SScanLogicNode* pNode = (const SScanLogicNode*)pObj;
|
||||
|
@ -561,7 +563,7 @@ static int32_t logicScanNodeToJson(const void* pObj, SJson* pJson) {
|
|||
code = tjsonAddObject(pJson, jkScanLogicPlanTagCond, nodeToJson, pNode->pTagCond);
|
||||
}
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = nodeListToJson(pJson, jkScanLogicPlanPartTags, pNode->pPartTags);
|
||||
code = nodeListToJson(pJson, jkScanLogicPlanGroupTags, pNode->pGroupTags);
|
||||
}
|
||||
|
||||
return code;
|
||||
|
@ -588,7 +590,7 @@ static int32_t jsonToLogicScanNode(const SJson* pJson, void* pObj) {
|
|||
code = jsonToNodeObject(pJson, jkScanLogicPlanTagCond, &pNode->pTagCond);
|
||||
}
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = jsonToNodeList(pJson, jkScanLogicPlanPartTags, &pNode->pPartTags);
|
||||
code = jsonToNodeList(pJson, jkScanLogicPlanGroupTags, &pNode->pGroupTags);
|
||||
}
|
||||
|
||||
return code;
|
||||
|
@ -1430,7 +1432,8 @@ static const char* jkTableScanPhysiPlanTriggerType = "triggerType";
|
|||
static const char* jkTableScanPhysiPlanWatermark = "watermark";
|
||||
static const char* jkTableScanPhysiPlanTsColId = "tsColId";
|
||||
static const char* jkTableScanPhysiPlanFilesFactor = "FilesFactor";
|
||||
static const char* jkTableScanPhysiPlanPartitionTags = "PartitionTags";
|
||||
static const char* jkTableScanPhysiPlanGroupTags = "GroupTags";
|
||||
static const char* jkTableScanPhysiPlanGroupSort = "GroupSort";
|
||||
|
||||
static int32_t physiTableScanNodeToJson(const void* pObj, SJson* pJson) {
|
||||
const STableScanPhysiNode* pNode = (const STableScanPhysiNode*)pObj;
|
||||
|
@ -1485,7 +1488,10 @@ static int32_t physiTableScanNodeToJson(const void* pObj, SJson* pJson) {
|
|||
code = tjsonAddDoubleToObject(pJson, jkTableScanPhysiPlanFilesFactor, pNode->filesFactor);
|
||||
}
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = nodeListToJson(pJson, jkTableScanPhysiPlanPartitionTags, pNode->pPartitionTags);
|
||||
code = nodeListToJson(pJson, jkTableScanPhysiPlanGroupTags, pNode->pGroupTags);
|
||||
}
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = tjsonAddBoolToObject(pJson, jkTableScanPhysiPlanGroupSort, pNode->groupSort);
|
||||
}
|
||||
|
||||
return code;
|
||||
|
@ -1544,7 +1550,10 @@ static int32_t jsonToPhysiTableScanNode(const SJson* pJson, void* pObj) {
|
|||
code = tjsonGetDoubleValue(pJson, jkTableScanPhysiPlanFilesFactor, &pNode->filesFactor);
|
||||
}
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = jsonToNodeList(pJson, jkTableScanPhysiPlanPartitionTags, &pNode->pPartitionTags);
|
||||
code = jsonToNodeList(pJson, jkTableScanPhysiPlanGroupTags, &pNode->pGroupTags);
|
||||
}
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = tjsonGetBoolValue(pJson, jkTableScanPhysiPlanGroupSort, &pNode->groupSort);
|
||||
}
|
||||
|
||||
return code;
|
||||
|
@ -1725,6 +1734,7 @@ static const char* jkMergePhysiPlanMergeKeys = "MergeKeys";
|
|||
static const char* jkMergePhysiPlanTargets = "Targets";
|
||||
static const char* jkMergePhysiPlanNumOfChannels = "NumOfChannels";
|
||||
static const char* jkMergePhysiPlanSrcGroupId = "SrcGroupId";
|
||||
static const char* jkMergePhysiPlanGroupSort = "GroupSort";
|
||||
|
||||
static int32_t physiMergeNodeToJson(const void* pObj, SJson* pJson) {
|
||||
const SMergePhysiNode* pNode = (const SMergePhysiNode*)pObj;
|
||||
|
@ -1742,6 +1752,9 @@ static int32_t physiMergeNodeToJson(const void* pObj, SJson* pJson) {
|
|||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = tjsonAddIntegerToObject(pJson, jkMergePhysiPlanSrcGroupId, pNode->srcGroupId);
|
||||
}
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = tjsonAddBoolToObject(pJson, jkMergePhysiPlanGroupSort, pNode->groupSort);
|
||||
}
|
||||
|
||||
return code;
|
||||
}
|
||||
|
@ -1762,6 +1775,9 @@ static int32_t jsonToPhysiMergeNode(const SJson* pJson, void* pObj) {
|
|||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = tjsonGetIntValue(pJson, jkMergePhysiPlanSrcGroupId, &pNode->srcGroupId);
|
||||
}
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = tjsonGetBoolValue(pJson, jkMergePhysiPlanGroupSort, &pNode->groupSort);
|
||||
}
|
||||
|
||||
return code;
|
||||
}
|
||||
|
@ -3369,6 +3385,7 @@ static const char* jkSlotDescSlotId = "SlotId";
|
|||
static const char* jkSlotDescDataType = "DataType";
|
||||
static const char* jkSlotDescReserve = "Reserve";
|
||||
static const char* jkSlotDescOutput = "Output";
|
||||
static const char* jkSlotDescName = "Name";
|
||||
|
||||
static int32_t slotDescNodeToJson(const void* pObj, SJson* pJson) {
|
||||
const SSlotDescNode* pNode = (const SSlotDescNode*)pObj;
|
||||
|
@ -3383,6 +3400,9 @@ static int32_t slotDescNodeToJson(const void* pObj, SJson* pJson) {
|
|||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = tjsonAddBoolToObject(pJson, jkSlotDescOutput, pNode->output);
|
||||
}
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = tjsonAddStringToObject(pJson, jkSlotDescName, pNode->name);
|
||||
}
|
||||
|
||||
return code;
|
||||
}
|
||||
|
@ -3400,6 +3420,9 @@ static int32_t jsonToSlotDescNode(const SJson* pJson, void* pObj) {
|
|||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = tjsonGetBoolValue(pJson, jkSlotDescOutput, &pNode->output);
|
||||
}
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = tjsonGetStringValue(pJson, jkSlotDescName, pNode->name);
|
||||
}
|
||||
|
||||
return code;
|
||||
}
|
||||
|
@ -4137,6 +4160,7 @@ static int32_t specificNodeToJson(const void* pObj, SJson* pJson) {
|
|||
case QUERY_NODE_PHYSICAL_PLAN_MERGE:
|
||||
return physiMergeNodeToJson(pObj, pJson);
|
||||
case QUERY_NODE_PHYSICAL_PLAN_SORT:
|
||||
case QUERY_NODE_PHYSICAL_PLAN_GROUP_SORT:
|
||||
return physiSortNodeToJson(pObj, pJson);
|
||||
case QUERY_NODE_PHYSICAL_PLAN_HASH_INTERVAL:
|
||||
case QUERY_NODE_PHYSICAL_PLAN_MERGE_ALIGNED_INTERVAL:
|
||||
|
@ -4280,6 +4304,7 @@ static int32_t jsonToSpecificNode(const SJson* pJson, void* pObj) {
|
|||
case QUERY_NODE_PHYSICAL_PLAN_MERGE:
|
||||
return jsonToPhysiMergeNode(pJson, pObj);
|
||||
case QUERY_NODE_PHYSICAL_PLAN_SORT:
|
||||
case QUERY_NODE_PHYSICAL_PLAN_GROUP_SORT:
|
||||
return jsonToPhysiSortNode(pJson, pObj);
|
||||
case QUERY_NODE_PHYSICAL_PLAN_HASH_INTERVAL:
|
||||
case QUERY_NODE_PHYSICAL_PLAN_MERGE_ALIGNED_INTERVAL:
|
||||
|
|
|
@ -500,7 +500,8 @@ static EDealRes dispatchPhysiPlan(SNode* pNode, ETraversalOrder order, FNodeWalk
|
|||
}
|
||||
break;
|
||||
}
|
||||
case QUERY_NODE_PHYSICAL_PLAN_SORT: {
|
||||
case QUERY_NODE_PHYSICAL_PLAN_SORT:
|
||||
case QUERY_NODE_PHYSICAL_PLAN_GROUP_SORT: {
|
||||
SSortPhysiNode* pSort = (SSortPhysiNode*)pNode;
|
||||
res = walkPhysiNode((SPhysiNode*)pNode, order, walker, pContext);
|
||||
if (DEAL_RES_ERROR != res && DEAL_RES_END != res) {
|
||||
|
|
|
@ -288,6 +288,8 @@ SNode* nodesMakeNode(ENodeType type) {
|
|||
return makeNode(type, sizeof(SMergePhysiNode));
|
||||
case QUERY_NODE_PHYSICAL_PLAN_SORT:
|
||||
return makeNode(type, sizeof(SSortPhysiNode));
|
||||
case QUERY_NODE_PHYSICAL_PLAN_GROUP_SORT:
|
||||
return makeNode(type, sizeof(SGroupSortPhysiNode));
|
||||
case QUERY_NODE_PHYSICAL_PLAN_HASH_INTERVAL:
|
||||
return makeNode(type, sizeof(SIntervalPhysiNode));
|
||||
case QUERY_NODE_PHYSICAL_PLAN_MERGE_ALIGNED_INTERVAL:
|
||||
|
@ -709,7 +711,7 @@ void nodesDestroyNode(SNode* pNode) {
|
|||
nodesDestroyNode(pLogicNode->pTagCond);
|
||||
nodesDestroyNode(pLogicNode->pTagIndexCond);
|
||||
taosArrayDestroy(pLogicNode->pSmaIndexes);
|
||||
nodesDestroyList(pLogicNode->pPartTags);
|
||||
nodesDestroyList(pLogicNode->pGroupTags);
|
||||
break;
|
||||
}
|
||||
case QUERY_NODE_LOGIC_PLAN_JOIN: {
|
||||
|
@ -813,7 +815,7 @@ void nodesDestroyNode(SNode* pNode) {
|
|||
STableScanPhysiNode* pPhyNode = (STableScanPhysiNode*)pNode;
|
||||
destroyScanPhysiNode((SScanPhysiNode*)pNode);
|
||||
nodesDestroyList(pPhyNode->pDynamicScanFuncs);
|
||||
nodesDestroyList(pPhyNode->pPartitionTags);
|
||||
nodesDestroyList(pPhyNode->pGroupTags);
|
||||
break;
|
||||
}
|
||||
case QUERY_NODE_PHYSICAL_PLAN_PROJECT: {
|
||||
|
@ -850,7 +852,8 @@ void nodesDestroyNode(SNode* pNode) {
|
|||
nodesDestroyList(pPhyNode->pTargets);
|
||||
break;
|
||||
}
|
||||
case QUERY_NODE_PHYSICAL_PLAN_SORT: {
|
||||
case QUERY_NODE_PHYSICAL_PLAN_SORT:
|
||||
case QUERY_NODE_PHYSICAL_PLAN_GROUP_SORT: {
|
||||
SSortPhysiNode* pPhyNode = (SSortPhysiNode*)pNode;
|
||||
destroyPhysiNode((SPhysiNode*)pPhyNode);
|
||||
nodesDestroyList(pPhyNode->pExprs);
|
||||
|
|
|
@ -2456,90 +2456,6 @@ static int32_t rewriteUniqueStmt(STranslateContext* pCxt, SSelectStmt* pSelect)
|
|||
return cxt.pTranslateCxt->errCode;
|
||||
}
|
||||
|
||||
typedef struct SRwriteTailCxt {
|
||||
STranslateContext* pTranslateCxt;
|
||||
int64_t limit;
|
||||
int64_t offset;
|
||||
} SRwriteTailCxt;
|
||||
|
||||
static EDealRes rewriteTailFunc(SNode** pNode, void* pContext) {
|
||||
SRwriteTailCxt* pCxt = pContext;
|
||||
if (QUERY_NODE_FUNCTION == nodeType(*pNode)) {
|
||||
SFunctionNode* pFunc = (SFunctionNode*)*pNode;
|
||||
if (FUNCTION_TYPE_TAIL == pFunc->funcType) {
|
||||
pCxt->limit = ((SValueNode*)nodesListGetNode(pFunc->pParameterList, 1))->datum.i;
|
||||
if (3 == LIST_LENGTH(pFunc->pParameterList)) {
|
||||
pCxt->offset = ((SValueNode*)nodesListGetNode(pFunc->pParameterList, 2))->datum.i;
|
||||
}
|
||||
SNode* pExpr = nodesListGetNode(pFunc->pParameterList, 0);
|
||||
strcpy(((SExprNode*)pExpr)->aliasName, ((SExprNode*)*pNode)->aliasName);
|
||||
NODES_CLEAR_LIST(pFunc->pParameterList);
|
||||
nodesDestroyNode(*pNode);
|
||||
*pNode = pExpr;
|
||||
return DEAL_RES_IGNORE_CHILD;
|
||||
}
|
||||
}
|
||||
return DEAL_RES_CONTINUE;
|
||||
}
|
||||
|
||||
static int32_t createLimieNode(SRwriteTailCxt* pCxt, SLimitNode** pOutput) {
|
||||
*pOutput = (SLimitNode*)nodesMakeNode(QUERY_NODE_LIMIT);
|
||||
if (NULL == *pOutput) {
|
||||
return TSDB_CODE_OUT_OF_MEMORY;
|
||||
}
|
||||
(*pOutput)->limit = pCxt->limit;
|
||||
(*pOutput)->offset = pCxt->offset;
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
static SNode* createOrderByExpr(STranslateContext* pCxt) {
|
||||
SOrderByExprNode* pOrder = (SOrderByExprNode*)nodesMakeNode(QUERY_NODE_ORDER_BY_EXPR);
|
||||
if (NULL == pOrder) {
|
||||
return NULL;
|
||||
}
|
||||
pCxt->errCode = createPrimaryKeyCol(pCxt, &pOrder->pExpr);
|
||||
if (TSDB_CODE_SUCCESS != pCxt->errCode) {
|
||||
nodesDestroyNode((SNode*)pOrder);
|
||||
return NULL;
|
||||
}
|
||||
pOrder->order = ORDER_DESC;
|
||||
pOrder->nullOrder = NULL_ORDER_FIRST;
|
||||
return (SNode*)pOrder;
|
||||
}
|
||||
|
||||
/* case 1:
|
||||
* in: select tail(expr, k, f) from t where_clause
|
||||
* out: select expr from t where_clause order by _rowts desc limit k offset f
|
||||
*
|
||||
* case 2:
|
||||
* in: select tail(expr, k, f) from t where_clause partition_by_clause
|
||||
* out: select expr from t where_clause partition_by_clause sort by _rowts desc limit k offset f
|
||||
*
|
||||
* case 3:
|
||||
* in: select tail(expr, k, f) from t where_clause order_by_clause limit_clause
|
||||
* out: select expr from (
|
||||
* select expr from t where_clause order by _rowts desc limit k offset f
|
||||
* ) order_by_clause limit_clause
|
||||
*
|
||||
* case 4:
|
||||
* in: select tail(expr, k, f) from t where_clause partition_by_clause limit_clause
|
||||
* out:
|
||||
*/
|
||||
static int32_t rewriteTailStmt(STranslateContext* pCxt, SSelectStmt* pSelect) {
|
||||
if (!pSelect->hasTailFunc) {
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
SRwriteTailCxt cxt = {.pTranslateCxt = pCxt, .limit = -1, .offset = -1};
|
||||
nodesRewriteExprs(pSelect->pProjectionList, rewriteTailFunc, &cxt);
|
||||
int32_t code = nodesListMakeStrictAppend(&pSelect->pOrderByList, createOrderByExpr(pCxt));
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = createLimieNode(&cxt, &pSelect->pLimit);
|
||||
}
|
||||
pSelect->hasIndefiniteRowsFunc = false;
|
||||
return code;
|
||||
}
|
||||
|
||||
typedef struct SReplaceOrderByAliasCxt {
|
||||
STranslateContext* pTranslateCxt;
|
||||
SNodeList* pProjectionList;
|
||||
|
@ -2616,9 +2532,6 @@ static int32_t translateSelectFrom(STranslateContext* pCxt, SSelectStmt* pSelect
|
|||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = rewriteUniqueStmt(pCxt, pSelect);
|
||||
}
|
||||
// if (TSDB_CODE_SUCCESS == code) {
|
||||
// code = rewriteTailStmt(pCxt, pSelect);
|
||||
// }
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = rewriteTimelineFunc(pCxt, pSelect);
|
||||
}
|
||||
|
|
|
@ -507,6 +507,8 @@ static int32_t createIndefRowsFuncLogicNode(SLogicPlanContext* pCxt, SSelectStmt
|
|||
return TSDB_CODE_OUT_OF_MEMORY;
|
||||
}
|
||||
|
||||
pIdfRowsFunc->isTailFunc = pSelect->hasTailFunc;
|
||||
|
||||
// indefinite rows functions and _select_values functions
|
||||
int32_t code = nodesCollectFuncs(pSelect, SQL_CLAUSE_SELECT, fmIsVectorFunc, &pIdfRowsFunc->pFuncs);
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
|
@ -733,6 +735,8 @@ static int32_t createSortLogicNode(SLogicPlanContext* pCxt, SSelectStmt* pSelect
|
|||
return TSDB_CODE_OUT_OF_MEMORY;
|
||||
}
|
||||
|
||||
pSort->groupSort = pSelect->groupSort;
|
||||
|
||||
int32_t code = nodesCollectColumns(pSelect, SQL_CLAUSE_ORDER_BY, NULL, COLLECT_COL_TYPE_ALL, &pSort->node.pTargets);
|
||||
if (TSDB_CODE_SUCCESS == code && NULL == pSort->node.pTargets) {
|
||||
code = nodesListMakeStrictAppend(&pSort->node.pTargets,
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -41,8 +41,12 @@ typedef struct SPhysiPlanContext {
|
|||
static int32_t getSlotKey(SNode* pNode, const char* pStmtName, char* pKey) {
|
||||
if (QUERY_NODE_COLUMN == nodeType(pNode)) {
|
||||
SColumnNode* pCol = (SColumnNode*)pNode;
|
||||
if (NULL != pStmtName && '\0' != pStmtName[0]) {
|
||||
if (NULL != pStmtName) {
|
||||
if ('\0' != pStmtName[0]) {
|
||||
return sprintf(pKey, "%s.%s", pStmtName, pCol->node.aliasName);
|
||||
} else {
|
||||
return sprintf(pKey, "%s", pCol->node.aliasName);
|
||||
}
|
||||
}
|
||||
if ('\0' == pCol->tableAlias[0]) {
|
||||
return sprintf(pKey, "%s", pCol->colName);
|
||||
|
@ -56,11 +60,13 @@ static int32_t getSlotKey(SNode* pNode, const char* pStmtName, char* pKey) {
|
|||
return sprintf(pKey, "%s", ((SExprNode*)pNode)->aliasName);
|
||||
}
|
||||
|
||||
static SNode* createSlotDesc(SPhysiPlanContext* pCxt, const SNode* pNode, int16_t slotId, bool output, bool reserve) {
|
||||
static SNode* createSlotDesc(SPhysiPlanContext* pCxt, const char* pName, const SNode* pNode, int16_t slotId,
|
||||
bool output, bool reserve) {
|
||||
SSlotDescNode* pSlot = (SSlotDescNode*)nodesMakeNode(QUERY_NODE_SLOT_DESC);
|
||||
if (NULL == pSlot) {
|
||||
return NULL;
|
||||
}
|
||||
strcpy(pSlot->name, pName);
|
||||
pSlot->slotId = slotId;
|
||||
pSlot->dataType = ((SExprNode*)pNode)->resType;
|
||||
pSlot->reserve = reserve;
|
||||
|
@ -99,10 +105,8 @@ static int32_t putSlotToHashImpl(int16_t dataBlockId, int16_t slotId, const char
|
|||
return taosHashPut(pHash, pName, len, &index, sizeof(SSlotIndex));
|
||||
}
|
||||
|
||||
static int32_t putSlotToHash(int16_t dataBlockId, int16_t slotId, SNode* pNode, SHashObj* pHash) {
|
||||
char name[TSDB_TABLE_NAME_LEN + TSDB_COL_NAME_LEN];
|
||||
int32_t len = getSlotKey(pNode, NULL, name);
|
||||
return putSlotToHashImpl(dataBlockId, slotId, name, len, pHash);
|
||||
static int32_t putSlotToHash(const char* pName, int16_t dataBlockId, int16_t slotId, SNode* pNode, SHashObj* pHash) {
|
||||
return putSlotToHashImpl(dataBlockId, slotId, pName, strlen(pName), pHash);
|
||||
}
|
||||
|
||||
static int32_t createDataBlockDescHash(SPhysiPlanContext* pCxt, int32_t capacity, int16_t dataBlockId,
|
||||
|
@ -131,9 +135,11 @@ static int32_t buildDataBlockSlots(SPhysiPlanContext* pCxt, SNodeList* pList, SD
|
|||
int16_t slotId = 0;
|
||||
SNode* pNode = NULL;
|
||||
FOREACH(pNode, pList) {
|
||||
code = nodesListStrictAppend(pDataBlockDesc->pSlots, createSlotDesc(pCxt, pNode, slotId, true, false));
|
||||
char name[TSDB_TABLE_NAME_LEN + TSDB_COL_NAME_LEN];
|
||||
getSlotKey(pNode, NULL, name);
|
||||
code = nodesListStrictAppend(pDataBlockDesc->pSlots, createSlotDesc(pCxt, name, pNode, slotId, true, false));
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = putSlotToHash(pDataBlockDesc->dataBlockId, slotId, pNode, pHash);
|
||||
code = putSlotToHash(name, pDataBlockDesc->dataBlockId, slotId, pNode, pHash);
|
||||
}
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
pDataBlockDesc->totalRowSize += ((SExprNode*)pNode)->resType.bytes;
|
||||
|
@ -196,7 +202,8 @@ static int32_t addDataBlockSlotsImpl(SPhysiPlanContext* pCxt, SNodeList* pList,
|
|||
int32_t len = getSlotKey(pExpr, pStmtName, name);
|
||||
SSlotIndex* pIndex = taosHashGet(pHash, name, len);
|
||||
if (NULL == pIndex) {
|
||||
code = nodesListStrictAppend(pDataBlockDesc->pSlots, createSlotDesc(pCxt, pExpr, nextSlotId, output, reserve));
|
||||
code =
|
||||
nodesListStrictAppend(pDataBlockDesc->pSlots, createSlotDesc(pCxt, name, pExpr, nextSlotId, output, reserve));
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = putSlotToHashImpl(pDataBlockDesc->dataBlockId, nextSlotId, name, len, pHash);
|
||||
}
|
||||
|
@ -513,12 +520,13 @@ static int32_t createTableScanPhysiNode(SPhysiPlanContext* pCxt, SSubplan* pSubp
|
|||
tNameGetFullDbName(&pScanLogicNode->tableName, pSubplan->dbFName);
|
||||
pTableScan->dataRequired = pScanLogicNode->dataRequired;
|
||||
pTableScan->pDynamicScanFuncs = nodesCloneList(pScanLogicNode->pDynamicScanFuncs);
|
||||
pTableScan->pPartitionTags = nodesCloneList(pScanLogicNode->pPartTags);
|
||||
pTableScan->pGroupTags = nodesCloneList(pScanLogicNode->pGroupTags);
|
||||
if ((NULL != pScanLogicNode->pDynamicScanFuncs && NULL == pTableScan->pDynamicScanFuncs) ||
|
||||
(NULL != pScanLogicNode->pPartTags && NULL == pTableScan->pPartitionTags)) {
|
||||
(NULL != pScanLogicNode->pGroupTags && NULL == pTableScan->pGroupTags)) {
|
||||
nodesDestroyNode((SNode*)pTableScan);
|
||||
return TSDB_CODE_OUT_OF_MEMORY;
|
||||
}
|
||||
pTableScan->groupSort = pScanLogicNode->groupSort;
|
||||
pTableScan->interval = pScanLogicNode->interval;
|
||||
pTableScan->offset = pScanLogicNode->offset;
|
||||
pTableScan->sliding = pScanLogicNode->sliding;
|
||||
|
@ -1170,8 +1178,9 @@ static int32_t createWindowPhysiNode(SPhysiPlanContext* pCxt, SNodeList* pChildr
|
|||
|
||||
static int32_t createSortPhysiNode(SPhysiPlanContext* pCxt, SNodeList* pChildren, SSortLogicNode* pSortLogicNode,
|
||||
SPhysiNode** pPhyNode) {
|
||||
SSortPhysiNode* pSort =
|
||||
(SSortPhysiNode*)makePhysiNode(pCxt, (SLogicNode*)pSortLogicNode, QUERY_NODE_PHYSICAL_PLAN_SORT);
|
||||
SSortPhysiNode* pSort = (SSortPhysiNode*)makePhysiNode(
|
||||
pCxt, (SLogicNode*)pSortLogicNode,
|
||||
pSortLogicNode->groupSort ? QUERY_NODE_PHYSICAL_PLAN_GROUP_SORT : QUERY_NODE_PHYSICAL_PLAN_SORT);
|
||||
if (NULL == pSort) {
|
||||
return TSDB_CODE_OUT_OF_MEMORY;
|
||||
}
|
||||
|
@ -1185,7 +1194,7 @@ static int32_t createSortPhysiNode(SPhysiPlanContext* pCxt, SNodeList* pChildren
|
|||
if (TSDB_CODE_SUCCESS == code && NULL != pPrecalcExprs) {
|
||||
code = setListSlotId(pCxt, pChildTupe->dataBlockId, -1, pPrecalcExprs, &pSort->pExprs);
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = addDataBlockSlots(pCxt, pSort->pExprs, pChildTupe);
|
||||
code = pushdownDataBlockSlots(pCxt, pSort->pExprs, pChildTupe);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1322,6 +1331,7 @@ static int32_t createMergePhysiNode(SPhysiPlanContext* pCxt, SMergeLogicNode* pM
|
|||
|
||||
pMerge->numOfChannels = pMergeLogicNode->numOfChannels;
|
||||
pMerge->srcGroupId = pMergeLogicNode->srcGroupId;
|
||||
pMerge->groupSort = pMergeLogicNode->groupSort;
|
||||
|
||||
int32_t code = addDataBlockSlots(pCxt, pMergeLogicNode->pInputs, pMerge->node.pOutputDataBlockDesc);
|
||||
|
||||
|
|
|
@ -362,7 +362,7 @@ static int32_t stbSplGetNumOfVgroups(SLogicNode* pNode) {
|
|||
}
|
||||
|
||||
static int32_t stbSplCreateMergeNode(SSplitContext* pCxt, SLogicSubplan* pSubplan, SLogicNode* pSplitNode,
|
||||
SNodeList* pMergeKeys, SLogicNode* pPartChild) {
|
||||
SNodeList* pMergeKeys, SLogicNode* pPartChild, bool groupSort) {
|
||||
SMergeLogicNode* pMerge = (SMergeLogicNode*)nodesMakeNode(QUERY_NODE_LOGIC_PLAN_MERGE);
|
||||
if (NULL == pMerge) {
|
||||
return TSDB_CODE_OUT_OF_MEMORY;
|
||||
|
@ -371,6 +371,7 @@ static int32_t stbSplCreateMergeNode(SSplitContext* pCxt, SLogicSubplan* pSubpla
|
|||
pMerge->srcGroupId = pCxt->groupId;
|
||||
pMerge->node.precision = pPartChild->precision;
|
||||
pMerge->pMergeKeys = pMergeKeys;
|
||||
pMerge->groupSort = groupSort;
|
||||
|
||||
int32_t code = TSDB_CODE_SUCCESS;
|
||||
pMerge->pInputs = nodesCloneList(pPartChild->pTargets);
|
||||
|
@ -430,7 +431,7 @@ static int32_t stbSplSplitIntervalForBatch(SSplitContext* pCxt, SStableSplitInfo
|
|||
SNodeList* pMergeKeys = NULL;
|
||||
code = stbSplCreateMergeKeysByPrimaryKey(((SWindowLogicNode*)pInfo->pSplitNode)->pTspk, &pMergeKeys);
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = stbSplCreateMergeNode(pCxt, NULL, pInfo->pSplitNode, pMergeKeys, pPartWindow);
|
||||
code = stbSplCreateMergeNode(pCxt, NULL, pInfo->pSplitNode, pMergeKeys, pPartWindow, false);
|
||||
}
|
||||
if (TSDB_CODE_SUCCESS != code) {
|
||||
nodesDestroyList(pMergeKeys);
|
||||
|
@ -497,12 +498,16 @@ static int32_t stbSplSplitSessionForStream(SSplitContext* pCxt, SStableSplitInfo
|
|||
return code;
|
||||
}
|
||||
|
||||
static void splSetTableScanType(SLogicNode* pNode, EScanType scanType) {
|
||||
static void stbSplSetTableMergeScan(SLogicNode* pNode) {
|
||||
if (QUERY_NODE_LOGIC_PLAN_SCAN == nodeType(pNode)) {
|
||||
((SScanLogicNode*)pNode)->scanType = scanType;
|
||||
SScanLogicNode* pScan = (SScanLogicNode*)pNode;
|
||||
pScan->scanType = SCAN_TYPE_TABLE_MERGE;
|
||||
if (NULL != pScan->pGroupTags) {
|
||||
pScan->groupSort = true;
|
||||
}
|
||||
} else {
|
||||
if (1 == LIST_LENGTH(pNode->pChildren)) {
|
||||
splSetTableScanType((SLogicNode*)nodesListGetNode(pNode->pChildren, 0), scanType);
|
||||
stbSplSetTableMergeScan((SLogicNode*)nodesListGetNode(pNode->pChildren, 0));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -515,7 +520,7 @@ static int32_t stbSplSplitSessionOrStateForBatch(SSplitContext* pCxt, SStableSpl
|
|||
int32_t code = stbSplCreateMergeKeysByPrimaryKey(((SWindowLogicNode*)pWindow)->pTspk, &pMergeKeys);
|
||||
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = stbSplCreateMergeNode(pCxt, pInfo->pSubplan, pChild, pMergeKeys, (SLogicNode*)pChild);
|
||||
code = stbSplCreateMergeNode(pCxt, pInfo->pSubplan, pChild, pMergeKeys, (SLogicNode*)pChild, true);
|
||||
}
|
||||
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
|
@ -524,13 +529,10 @@ static int32_t stbSplSplitSessionOrStateForBatch(SSplitContext* pCxt, SStableSpl
|
|||
}
|
||||
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
splSetTableScanType(pChild, SCAN_TYPE_TABLE_MERGE);
|
||||
++(pCxt->groupId);
|
||||
}
|
||||
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
stbSplSetTableMergeScan(pChild);
|
||||
pInfo->pSubplan->subplanType = SUBPLAN_TYPE_MERGE;
|
||||
SPLIT_FLAG_SET_MASK(pInfo->pSubplan->splitFlag, SPLIT_FLAG_STABLE_SPLIT);
|
||||
++(pCxt->groupId);
|
||||
} else {
|
||||
nodesDestroyList(pMergeKeys);
|
||||
}
|
||||
|
@ -560,7 +562,7 @@ static int32_t stbSplSplitState(SSplitContext* pCxt, SStableSplitInfo* pInfo) {
|
|||
|
||||
static SNodeList* stbSplGetPartKeys(SLogicNode* pNode) {
|
||||
if (QUERY_NODE_LOGIC_PLAN_SCAN == nodeType(pNode)) {
|
||||
return ((SScanLogicNode*)pNode)->pPartTags;
|
||||
return ((SScanLogicNode*)pNode)->pGroupTags;
|
||||
} else if (QUERY_NODE_LOGIC_PLAN_PARTITION == nodeType(pNode)) {
|
||||
return ((SPartitionLogicNode*)pNode)->pPartitionKeys;
|
||||
} else {
|
||||
|
@ -775,6 +777,7 @@ static int32_t stbSplCreatePartSortNode(SSortLogicNode* pSort, SLogicNode** pOut
|
|||
pPartSort->node.pChildren = pChildren;
|
||||
splSetParent((SLogicNode*)pPartSort);
|
||||
pPartSort->pSortKeys = pSortKeys;
|
||||
pPartSort->groupSort = pSort->groupSort;
|
||||
code = stbSplCreateMergeKeys(pPartSort->pSortKeys, pPartSort->node.pTargets, &pMergeKeys);
|
||||
}
|
||||
|
||||
|
@ -789,12 +792,29 @@ static int32_t stbSplCreatePartSortNode(SSortLogicNode* pSort, SLogicNode** pOut
|
|||
return code;
|
||||
}
|
||||
|
||||
static void stbSplSetScanPartSort(SLogicNode* pNode) {
|
||||
if (QUERY_NODE_LOGIC_PLAN_SCAN == nodeType(pNode)) {
|
||||
SScanLogicNode* pScan = (SScanLogicNode*)pNode;
|
||||
if (NULL != pScan->pGroupTags) {
|
||||
pScan->groupSort = true;
|
||||
}
|
||||
} else {
|
||||
if (1 == LIST_LENGTH(pNode->pChildren)) {
|
||||
stbSplSetScanPartSort((SLogicNode*)nodesListGetNode(pNode->pChildren, 0));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static int32_t stbSplSplitSortNode(SSplitContext* pCxt, SStableSplitInfo* pInfo) {
|
||||
SLogicNode* pPartSort = NULL;
|
||||
SNodeList* pMergeKeys = NULL;
|
||||
bool groupSort = ((SSortLogicNode*)pInfo->pSplitNode)->groupSort;
|
||||
int32_t code = stbSplCreatePartSortNode((SSortLogicNode*)pInfo->pSplitNode, &pPartSort, &pMergeKeys);
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = stbSplCreateMergeNode(pCxt, pInfo->pSubplan, pInfo->pSplitNode, pMergeKeys, pPartSort);
|
||||
code = stbSplCreateMergeNode(pCxt, pInfo->pSubplan, pInfo->pSplitNode, pMergeKeys, pPartSort, groupSort);
|
||||
}
|
||||
if (TSDB_CODE_SUCCESS == code && groupSort) {
|
||||
stbSplSetScanPartSort(pPartSort);
|
||||
}
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = nodesListMakeStrictAppend(&pInfo->pSubplan->pChildren,
|
||||
|
@ -829,7 +849,7 @@ static int32_t stbSplSplitScanNodeForJoin(SSplitContext* pCxt, SLogicSubplan* pS
|
|||
SNodeList* pMergeKeys = NULL;
|
||||
int32_t code = stbSplCreateMergeKeysByPrimaryKey(stbSplFindPrimaryKeyFromScan(pScan), &pMergeKeys);
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = stbSplCreateMergeNode(pCxt, pSubplan, (SLogicNode*)pScan, pMergeKeys, (SLogicNode*)pScan);
|
||||
code = stbSplCreateMergeNode(pCxt, pSubplan, (SLogicNode*)pScan, pMergeKeys, (SLogicNode*)pScan, false);
|
||||
}
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = nodesListMakeStrictAppend(&pSubplan->pChildren,
|
||||
|
|
|
@ -67,6 +67,14 @@ TEST_F(PlanBasicTest, tailFunc) {
|
|||
run("SELECT TAIL(c1, 10) FROM t1");
|
||||
|
||||
run("SELECT TAIL(c2 + 10, 10, 80) FROM t1 WHERE c1 > 10");
|
||||
|
||||
run("SELECT TAIL(c2 + 10, 10, 80) FROM t1 WHERE c1 > 10 PARTITION BY c1");
|
||||
|
||||
run("SELECT TAIL(c2 + 10, 10, 80) FROM t1 WHERE c1 > 10 ORDER BY 1");
|
||||
|
||||
run("SELECT TAIL(c2 + 10, 10, 80) FROM t1 WHERE c1 > 10 LIMIT 5");
|
||||
|
||||
run("SELECT TAIL(c2 + 10, 10, 80) FROM t1 WHERE c1 > 10 PARTITION BY c1 LIMIT 5");
|
||||
}
|
||||
|
||||
TEST_F(PlanBasicTest, interpFunc) {
|
||||
|
|
|
@ -76,6 +76,7 @@ static void parseArg(int argc, char* argv[]) {
|
|||
static struct option long_options[] = {
|
||||
{"dump", optional_argument, NULL, 'd'},
|
||||
{"skipSql", required_argument, NULL, 's'},
|
||||
{"limitSql", required_argument, NULL, 'i'},
|
||||
{"log", required_argument, NULL, 'l'},
|
||||
{0, 0, 0, 0}
|
||||
};
|
||||
|
@ -88,6 +89,9 @@ static void parseArg(int argc, char* argv[]) {
|
|||
case 's':
|
||||
setSkipSqlNum(optarg);
|
||||
break;
|
||||
case 'i':
|
||||
setLimitSqlNum(optarg);
|
||||
break;
|
||||
case 'l':
|
||||
setLogLevel(optarg);
|
||||
break;
|
||||
|
|
|
@ -51,6 +51,7 @@ enum DumpModule {
|
|||
|
||||
DumpModule g_dumpModule = DUMP_MODULE_NOTHING;
|
||||
int32_t g_skipSql = 0;
|
||||
int32_t g_limitSql = 0;
|
||||
int32_t g_logLevel = 131;
|
||||
|
||||
void setDumpModule(const char* pModule) {
|
||||
|
@ -76,28 +77,33 @@ void setDumpModule(const char* pModule) {
|
|||
}
|
||||
|
||||
void setSkipSqlNum(const char* pNum) { g_skipSql = stoi(pNum); }
|
||||
|
||||
void setLimitSqlNum(const char* pNum) { g_limitSql = stoi(pNum); }
|
||||
void setLogLevel(const char* pLogLevel) { g_logLevel = stoi(pLogLevel); }
|
||||
|
||||
int32_t getLogLevel() { return g_logLevel; }
|
||||
|
||||
class PlannerTestBaseImpl {
|
||||
public:
|
||||
PlannerTestBaseImpl() : sqlNo_(0) {}
|
||||
PlannerTestBaseImpl() : sqlNo_(0), sqlNum_(0) {}
|
||||
|
||||
void useDb(const string& user, const string& db) {
|
||||
caseEnv_.acctId_ = 0;
|
||||
caseEnv_.user_ = user;
|
||||
caseEnv_.db_ = db;
|
||||
caseEnv_.nsql_ = g_skipSql;
|
||||
caseEnv_.numOfSkipSql_ = g_skipSql;
|
||||
caseEnv_.numOfLimitSql_ = g_limitSql;
|
||||
}
|
||||
|
||||
void run(const string& sql) {
|
||||
++sqlNo_;
|
||||
if (caseEnv_.nsql_ > 0) {
|
||||
--(caseEnv_.nsql_);
|
||||
if (caseEnv_.numOfSkipSql_ > 0) {
|
||||
--(caseEnv_.numOfSkipSql_);
|
||||
return;
|
||||
}
|
||||
if (caseEnv_.numOfLimitSql_ > 0 && caseEnv_.numOfLimitSql_ == sqlNum_) {
|
||||
return;
|
||||
}
|
||||
++sqlNum_;
|
||||
|
||||
reset();
|
||||
try {
|
||||
|
@ -134,7 +140,7 @@ class PlannerTestBaseImpl {
|
|||
}
|
||||
|
||||
void prepare(const string& sql) {
|
||||
if (caseEnv_.nsql_ > 0) {
|
||||
if (caseEnv_.numOfSkipSql_ > 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -148,7 +154,7 @@ class PlannerTestBaseImpl {
|
|||
}
|
||||
|
||||
void bindParams(TAOS_MULTI_BIND* pParams, int32_t colIdx) {
|
||||
if (caseEnv_.nsql_ > 0) {
|
||||
if (caseEnv_.numOfSkipSql_ > 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -161,8 +167,8 @@ class PlannerTestBaseImpl {
|
|||
}
|
||||
|
||||
void exec() {
|
||||
if (caseEnv_.nsql_ > 0) {
|
||||
--(caseEnv_.nsql_);
|
||||
if (caseEnv_.numOfSkipSql_ > 0) {
|
||||
--(caseEnv_.numOfSkipSql_);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -197,9 +203,10 @@ class PlannerTestBaseImpl {
|
|||
int32_t acctId_;
|
||||
string user_;
|
||||
string db_;
|
||||
int32_t nsql_;
|
||||
int32_t numOfSkipSql_;
|
||||
int32_t numOfLimitSql_;
|
||||
|
||||
caseEnv() : nsql_(0) {}
|
||||
caseEnv() : numOfSkipSql_(0) {}
|
||||
};
|
||||
|
||||
struct stmtEnv {
|
||||
|
@ -401,6 +408,7 @@ class PlannerTestBaseImpl {
|
|||
stmtEnv stmtEnv_;
|
||||
stmtRes res_;
|
||||
int32_t sqlNo_;
|
||||
int32_t sqlNum_;
|
||||
};
|
||||
|
||||
PlannerTestBase::PlannerTestBase() : impl_(new PlannerTestBaseImpl()) {}
|
||||
|
|
|
@ -43,6 +43,7 @@ class PlannerTestBase : public testing::Test {
|
|||
|
||||
extern void setDumpModule(const char* pModule);
|
||||
extern void setSkipSqlNum(const char* pNum);
|
||||
extern void setLimitSqlNum(const char* pNum);
|
||||
extern void setLogLevel(const char* pLogLevel);
|
||||
extern int32_t getLogLevel();
|
||||
|
||||
|
|
|
@ -56,6 +56,31 @@ void syncEntryPrint2(char* s, const SSyncRaftEntry* pObj);
|
|||
void syncEntryLog(const SSyncRaftEntry* pObj);
|
||||
void syncEntryLog2(char* s, const SSyncRaftEntry* pObj);
|
||||
|
||||
//-----------------------------------
|
||||
typedef struct SRaftEntryCache {
|
||||
SHashObj* pEntryHash;
|
||||
int32_t maxCount;
|
||||
int32_t currentCount;
|
||||
TdThreadMutex mutex;
|
||||
SSyncNode* pSyncNode;
|
||||
} SRaftEntryCache;
|
||||
|
||||
SRaftEntryCache* raftCacheCreate(SSyncNode* pSyncNode, int32_t maxCount);
|
||||
void raftCacheDestroy(SRaftEntryCache* pCache);
|
||||
int32_t raftCachePutEntry(struct SRaftEntryCache* pCache, SSyncRaftEntry* pEntry);
|
||||
int32_t raftCacheGetEntry(struct SRaftEntryCache* pCache, SyncIndex index, SSyncRaftEntry** ppEntry);
|
||||
int32_t raftCacheGetEntryP(struct SRaftEntryCache* pCache, SyncIndex index, SSyncRaftEntry** ppEntry);
|
||||
int32_t raftCacheDelEntry(struct SRaftEntryCache* pCache, SyncIndex index);
|
||||
int32_t raftCacheGetAndDel(struct SRaftEntryCache* pCache, SyncIndex index, SSyncRaftEntry** ppEntry);
|
||||
int32_t raftCacheClear(struct SRaftEntryCache* pCache);
|
||||
|
||||
cJSON* raftCache2Json(SRaftEntryCache* pObj);
|
||||
char* raftCache2Str(SRaftEntryCache* pObj);
|
||||
void raftCachePrint(SRaftEntryCache* pObj);
|
||||
void raftCachePrint2(char* s, SRaftEntryCache* pObj);
|
||||
void raftCacheLog(SRaftEntryCache* pObj);
|
||||
void raftCacheLog2(char* s, SRaftEntryCache* pObj);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -1355,11 +1355,16 @@ void syncNodeEventLog(const SSyncNode* pSyncNode, char* str) {
|
|||
int32_t userStrLen = strlen(str);
|
||||
|
||||
SSnapshot snapshot = {.data = NULL, .lastApplyIndex = -1, .lastApplyTerm = 0};
|
||||
if (pSyncNode->pFsm->FpGetSnapshotInfo != NULL) {
|
||||
if (pSyncNode->pFsm != NULL && pSyncNode->pFsm->FpGetSnapshotInfo != NULL) {
|
||||
pSyncNode->pFsm->FpGetSnapshotInfo(pSyncNode->pFsm, &snapshot);
|
||||
}
|
||||
SyncIndex logLastIndex = pSyncNode->pLogStore->syncLogLastIndex(pSyncNode->pLogStore);
|
||||
SyncIndex logBeginIndex = pSyncNode->pLogStore->syncLogBeginIndex(pSyncNode->pLogStore);
|
||||
|
||||
SyncIndex logLastIndex = SYNC_INDEX_INVALID;
|
||||
SyncIndex logBeginIndex = SYNC_INDEX_INVALID;
|
||||
if (pSyncNode->pLogStore != NULL) {
|
||||
logLastIndex = pSyncNode->pLogStore->syncLogLastIndex(pSyncNode->pLogStore);
|
||||
logBeginIndex = pSyncNode->pLogStore->syncLogBeginIndex(pSyncNode->pLogStore);
|
||||
}
|
||||
|
||||
char* pCfgStr = syncCfg2SimpleStr(&(pSyncNode->pRaftCfg->cfg));
|
||||
char* printStr = "";
|
||||
|
|
|
@ -180,3 +180,247 @@ void syncEntryLog2(char* s, const SSyncRaftEntry* pObj) {
|
|||
sTrace("syncEntryLog2 | len:%zu | %s | %s", strlen(serialized), s, serialized);
|
||||
taosMemoryFree(serialized);
|
||||
}
|
||||
|
||||
//-----------------------------------
|
||||
SRaftEntryCache* raftCacheCreate(SSyncNode* pSyncNode, int32_t maxCount) {
|
||||
SRaftEntryCache* pCache = taosMemoryMalloc(sizeof(SRaftEntryCache));
|
||||
if (pCache == NULL) {
|
||||
sError("vgId:%d raft cache create error", pSyncNode->vgId);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
pCache->pEntryHash =
|
||||
taosHashInit(sizeof(SyncIndex), taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_NO_LOCK);
|
||||
if (pCache->pEntryHash == NULL) {
|
||||
sError("vgId:%d raft cache create hash error", pSyncNode->vgId);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
taosThreadMutexInit(&(pCache->mutex), NULL);
|
||||
pCache->maxCount = maxCount;
|
||||
pCache->currentCount = 0;
|
||||
pCache->pSyncNode = pSyncNode;
|
||||
|
||||
return pCache;
|
||||
}
|
||||
|
||||
void raftCacheDestroy(SRaftEntryCache* pCache) {
|
||||
if (pCache != NULL) {
|
||||
taosThreadMutexLock(&(pCache->mutex));
|
||||
taosHashCleanup(pCache->pEntryHash);
|
||||
taosThreadMutexUnlock(&(pCache->mutex));
|
||||
taosThreadMutexDestroy(&(pCache->mutex));
|
||||
taosMemoryFree(pCache);
|
||||
}
|
||||
}
|
||||
|
||||
// success, return 1
|
||||
// max count, return 0
|
||||
// error, return -1
|
||||
int32_t raftCachePutEntry(struct SRaftEntryCache* pCache, SSyncRaftEntry* pEntry) {
|
||||
taosThreadMutexLock(&(pCache->mutex));
|
||||
|
||||
if (pCache->currentCount >= pCache->maxCount) {
|
||||
taosThreadMutexUnlock(&(pCache->mutex));
|
||||
return 0;
|
||||
}
|
||||
|
||||
taosHashPut(pCache->pEntryHash, &(pEntry->index), sizeof(pEntry->index), pEntry, pEntry->bytes);
|
||||
++(pCache->currentCount);
|
||||
|
||||
do {
|
||||
char eventLog[128];
|
||||
snprintf(eventLog, sizeof(eventLog), "raft cache add, type:%s,%d, type2:%s,%d, index:%ld, bytes:%d",
|
||||
TMSG_INFO(pEntry->msgType), pEntry->msgType, TMSG_INFO(pEntry->originalRpcType), pEntry->originalRpcType,
|
||||
pEntry->index, pEntry->bytes);
|
||||
syncNodeEventLog(pCache->pSyncNode, eventLog);
|
||||
} while (0);
|
||||
|
||||
taosThreadMutexUnlock(&(pCache->mutex));
|
||||
return 1;
|
||||
}
|
||||
|
||||
// success, return 0
|
||||
// error, return -1
|
||||
// not exist, return -1, terrno = TSDB_CODE_WAL_LOG_NOT_EXIST
|
||||
int32_t raftCacheGetEntry(struct SRaftEntryCache* pCache, SyncIndex index, SSyncRaftEntry** ppEntry) {
|
||||
if (ppEntry == NULL) {
|
||||
return -1;
|
||||
}
|
||||
*ppEntry = NULL;
|
||||
|
||||
taosThreadMutexLock(&(pCache->mutex));
|
||||
void* pTmp = taosHashGet(pCache->pEntryHash, &index, sizeof(index));
|
||||
if (pTmp != NULL) {
|
||||
SSyncRaftEntry* pEntry = pTmp;
|
||||
*ppEntry = taosMemoryMalloc(pEntry->bytes);
|
||||
memcpy(*ppEntry, pTmp, pEntry->bytes);
|
||||
|
||||
do {
|
||||
char eventLog[128];
|
||||
snprintf(eventLog, sizeof(eventLog), "raft cache get, type:%s,%d, type2:%s,%d, index:%ld",
|
||||
TMSG_INFO((*ppEntry)->msgType), (*ppEntry)->msgType, TMSG_INFO((*ppEntry)->originalRpcType),
|
||||
(*ppEntry)->originalRpcType, (*ppEntry)->index);
|
||||
syncNodeEventLog(pCache->pSyncNode, eventLog);
|
||||
} while (0);
|
||||
|
||||
taosThreadMutexUnlock(&(pCache->mutex));
|
||||
return 0;
|
||||
}
|
||||
|
||||
taosThreadMutexUnlock(&(pCache->mutex));
|
||||
terrno = TSDB_CODE_WAL_LOG_NOT_EXIST;
|
||||
return -1;
|
||||
}
|
||||
|
||||
// success, return 0
|
||||
// error, return -1
|
||||
// not exist, return -1, terrno = TSDB_CODE_WAL_LOG_NOT_EXIST
|
||||
int32_t raftCacheGetEntryP(struct SRaftEntryCache* pCache, SyncIndex index, SSyncRaftEntry** ppEntry) {
|
||||
if (ppEntry == NULL) {
|
||||
return -1;
|
||||
}
|
||||
*ppEntry = NULL;
|
||||
|
||||
taosThreadMutexLock(&(pCache->mutex));
|
||||
void* pTmp = taosHashGet(pCache->pEntryHash, &index, sizeof(index));
|
||||
if (pTmp != NULL) {
|
||||
SSyncRaftEntry* pEntry = pTmp;
|
||||
*ppEntry = pEntry;
|
||||
|
||||
do {
|
||||
char eventLog[128];
|
||||
snprintf(eventLog, sizeof(eventLog), "raft cache get, type:%s,%d, type2:%s,%d, index:%ld",
|
||||
TMSG_INFO((*ppEntry)->msgType), (*ppEntry)->msgType, TMSG_INFO((*ppEntry)->originalRpcType),
|
||||
(*ppEntry)->originalRpcType, (*ppEntry)->index);
|
||||
syncNodeEventLog(pCache->pSyncNode, eventLog);
|
||||
} while (0);
|
||||
|
||||
taosThreadMutexUnlock(&(pCache->mutex));
|
||||
return 0;
|
||||
}
|
||||
|
||||
taosThreadMutexUnlock(&(pCache->mutex));
|
||||
terrno = TSDB_CODE_WAL_LOG_NOT_EXIST;
|
||||
return -1;
|
||||
}
|
||||
|
||||
int32_t raftCacheDelEntry(struct SRaftEntryCache* pCache, SyncIndex index) {
|
||||
taosThreadMutexLock(&(pCache->mutex));
|
||||
taosHashRemove(pCache->pEntryHash, &index, sizeof(index));
|
||||
--(pCache->currentCount);
|
||||
taosThreadMutexUnlock(&(pCache->mutex));
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t raftCacheGetAndDel(struct SRaftEntryCache* pCache, SyncIndex index, SSyncRaftEntry** ppEntry) {
|
||||
if (ppEntry == NULL) {
|
||||
return -1;
|
||||
}
|
||||
*ppEntry = NULL;
|
||||
|
||||
taosThreadMutexLock(&(pCache->mutex));
|
||||
void* pTmp = taosHashGet(pCache->pEntryHash, &index, sizeof(index));
|
||||
if (pTmp != NULL) {
|
||||
SSyncRaftEntry* pEntry = pTmp;
|
||||
*ppEntry = taosMemoryMalloc(pEntry->bytes);
|
||||
memcpy(*ppEntry, pTmp, pEntry->bytes);
|
||||
|
||||
do {
|
||||
char eventLog[128];
|
||||
snprintf(eventLog, sizeof(eventLog), "raft cache get-and-del, type:%s,%d, type2:%s,%d, index:%ld",
|
||||
TMSG_INFO((*ppEntry)->msgType), (*ppEntry)->msgType, TMSG_INFO((*ppEntry)->originalRpcType),
|
||||
(*ppEntry)->originalRpcType, (*ppEntry)->index);
|
||||
syncNodeEventLog(pCache->pSyncNode, eventLog);
|
||||
} while (0);
|
||||
|
||||
taosHashRemove(pCache->pEntryHash, &index, sizeof(index));
|
||||
--(pCache->currentCount);
|
||||
|
||||
taosThreadMutexUnlock(&(pCache->mutex));
|
||||
return 0;
|
||||
}
|
||||
|
||||
taosThreadMutexUnlock(&(pCache->mutex));
|
||||
terrno = TSDB_CODE_WAL_LOG_NOT_EXIST;
|
||||
return -1;
|
||||
}
|
||||
|
||||
int32_t raftCacheClear(struct SRaftEntryCache* pCache) {
|
||||
taosThreadMutexLock(&(pCache->mutex));
|
||||
taosHashClear(pCache->pEntryHash);
|
||||
pCache->currentCount = 0;
|
||||
taosThreadMutexUnlock(&(pCache->mutex));
|
||||
return 0;
|
||||
}
|
||||
|
||||
//-----------------------------------
|
||||
cJSON* raftCache2Json(SRaftEntryCache* pCache) {
|
||||
char u64buf[128] = {0};
|
||||
cJSON* pRoot = cJSON_CreateObject();
|
||||
|
||||
if (pCache != NULL) {
|
||||
taosThreadMutexLock(&(pCache->mutex));
|
||||
|
||||
snprintf(u64buf, sizeof(u64buf), "%p", pCache->pSyncNode);
|
||||
cJSON_AddStringToObject(pRoot, "pSyncNode", u64buf);
|
||||
cJSON_AddNumberToObject(pRoot, "currentCount", pCache->currentCount);
|
||||
cJSON_AddNumberToObject(pRoot, "maxCount", pCache->maxCount);
|
||||
cJSON* pEntries = cJSON_CreateArray();
|
||||
cJSON_AddItemToObject(pRoot, "entries", pEntries);
|
||||
|
||||
SSyncRaftEntry* pIter = (SSyncRaftEntry*)taosHashIterate(pCache->pEntryHash, NULL);
|
||||
if (pIter != NULL) {
|
||||
SSyncRaftEntry* pEntry = (SSyncRaftEntry*)pIter;
|
||||
cJSON_AddItemToArray(pEntries, syncEntry2Json(pEntry));
|
||||
}
|
||||
while (pIter) {
|
||||
pIter = taosHashIterate(pCache->pEntryHash, pIter);
|
||||
if (pIter != NULL) {
|
||||
SSyncRaftEntry* pEntry = (SSyncRaftEntry*)pIter;
|
||||
cJSON_AddItemToArray(pEntries, syncEntry2Json(pEntry));
|
||||
}
|
||||
}
|
||||
|
||||
taosThreadMutexUnlock(&(pCache->mutex));
|
||||
}
|
||||
|
||||
cJSON* pJson = cJSON_CreateObject();
|
||||
cJSON_AddItemToObject(pJson, "SRaftEntryCache", pRoot);
|
||||
return pJson;
|
||||
}
|
||||
|
||||
char* raftCache2Str(SRaftEntryCache* pCache) {
|
||||
cJSON* pJson = raftCache2Json(pCache);
|
||||
char* serialized = cJSON_Print(pJson);
|
||||
cJSON_Delete(pJson);
|
||||
return serialized;
|
||||
}
|
||||
|
||||
void raftCachePrint(SRaftEntryCache* pCache) {
|
||||
char* serialized = raftCache2Str(pCache);
|
||||
printf("raftCachePrint | len:%lu | %s \n", strlen(serialized), serialized);
|
||||
fflush(NULL);
|
||||
taosMemoryFree(serialized);
|
||||
}
|
||||
|
||||
void raftCachePrint2(char* s, SRaftEntryCache* pCache) {
|
||||
char* serialized = raftCache2Str(pCache);
|
||||
printf("raftCachePrint2 | len:%lu | %s | %s \n", strlen(serialized), s, serialized);
|
||||
fflush(NULL);
|
||||
taosMemoryFree(serialized);
|
||||
}
|
||||
|
||||
void raftCacheLog(SRaftEntryCache* pCache) {
|
||||
char* serialized = raftCache2Str(pCache);
|
||||
sTrace("raftCacheLog | len:%lu | %s", strlen(serialized), serialized);
|
||||
taosMemoryFree(serialized);
|
||||
}
|
||||
|
||||
void raftCacheLog2(char* s, SRaftEntryCache* pCache) {
|
||||
if (gRaftDetailLog) {
|
||||
char* serialized = raftCache2Str(pCache);
|
||||
sTraceLong("raftCacheLog2 | len:%lu | %s | %s", strlen(serialized), s, serialized);
|
||||
taosMemoryFree(serialized);
|
||||
}
|
||||
}
|
|
@ -32,12 +32,14 @@ SSyncRespMgr *syncRespMgrCreate(void *data, int64_t ttl) {
|
|||
}
|
||||
|
||||
void syncRespMgrDestroy(SSyncRespMgr *pObj) {
|
||||
if (pObj != NULL) {
|
||||
taosThreadMutexLock(&(pObj->mutex));
|
||||
taosHashCleanup(pObj->pRespHash);
|
||||
taosThreadMutexUnlock(&(pObj->mutex));
|
||||
taosThreadMutexDestroy(&(pObj->mutex));
|
||||
taosMemoryFree(pObj);
|
||||
}
|
||||
}
|
||||
|
||||
int64_t syncRespMgrAdd(SSyncRespMgr *pObj, SRespStub *pStub) {
|
||||
taosThreadMutexLock(&(pObj->mutex));
|
||||
|
|
|
@ -17,6 +17,7 @@ add_executable(syncVotesRespondTest "")
|
|||
add_executable(syncIndexMgrTest "")
|
||||
add_executable(syncLogStoreTest "")
|
||||
add_executable(syncEntryTest "")
|
||||
add_executable(syncEntryCacheTest "")
|
||||
add_executable(syncRequestVoteTest "")
|
||||
add_executable(syncRequestVoteReplyTest "")
|
||||
add_executable(syncAppendEntriesTest "")
|
||||
|
@ -129,6 +130,10 @@ target_sources(syncEntryTest
|
|||
PRIVATE
|
||||
"syncEntryTest.cpp"
|
||||
)
|
||||
target_sources(syncEntryCacheTest
|
||||
PRIVATE
|
||||
"syncEntryCacheTest.cpp"
|
||||
)
|
||||
target_sources(syncRequestVoteTest
|
||||
PRIVATE
|
||||
"syncRequestVoteTest.cpp"
|
||||
|
@ -362,6 +367,11 @@ target_include_directories(syncEntryTest
|
|||
"${TD_SOURCE_DIR}/include/libs/sync"
|
||||
"${CMAKE_CURRENT_SOURCE_DIR}/../inc"
|
||||
)
|
||||
target_include_directories(syncEntryCacheTest
|
||||
PUBLIC
|
||||
"${TD_SOURCE_DIR}/include/libs/sync"
|
||||
"${CMAKE_CURRENT_SOURCE_DIR}/../inc"
|
||||
)
|
||||
target_include_directories(syncRequestVoteTest
|
||||
PUBLIC
|
||||
"${TD_SOURCE_DIR}/include/libs/sync"
|
||||
|
@ -610,6 +620,10 @@ target_link_libraries(syncEntryTest
|
|||
sync
|
||||
gtest_main
|
||||
)
|
||||
target_link_libraries(syncEntryCacheTest
|
||||
sync
|
||||
gtest_main
|
||||
)
|
||||
target_link_libraries(syncRequestVoteTest
|
||||
sync
|
||||
gtest_main
|
||||
|
|
|
@ -0,0 +1,162 @@
|
|||
#include <stdio.h>
|
||||
#include "syncEnv.h"
|
||||
#include "syncIO.h"
|
||||
#include "syncInt.h"
|
||||
#include "syncRaftLog.h"
|
||||
#include "syncRaftStore.h"
|
||||
#include "syncUtil.h"
|
||||
|
||||
void logTest() {
|
||||
sTrace("--- sync log test: trace");
|
||||
sDebug("--- sync log test: debug");
|
||||
sInfo("--- sync log test: info");
|
||||
sWarn("--- sync log test: warn");
|
||||
sError("--- sync log test: error");
|
||||
sFatal("--- sync log test: fatal");
|
||||
}
|
||||
|
||||
SSyncRaftEntry* createEntry(int i) {
|
||||
int32_t dataLen = 20;
|
||||
SSyncRaftEntry* pEntry = syncEntryBuild(dataLen);
|
||||
assert(pEntry != NULL);
|
||||
pEntry->msgType = 88;
|
||||
pEntry->originalRpcType = 99;
|
||||
pEntry->seqNum = 3;
|
||||
pEntry->isWeak = true;
|
||||
pEntry->term = 100 + i;
|
||||
pEntry->index = i;
|
||||
snprintf(pEntry->data, dataLen, "value%d", i);
|
||||
|
||||
return pEntry;
|
||||
}
|
||||
|
||||
SSyncNode* createFakeNode() {
|
||||
SSyncNode* pSyncNode = (SSyncNode*)taosMemoryMalloc(sizeof(SSyncNode));
|
||||
ASSERT(pSyncNode != NULL);
|
||||
memset(pSyncNode, 0, sizeof(SSyncNode));
|
||||
|
||||
return pSyncNode;
|
||||
}
|
||||
|
||||
SRaftEntryCache* createCache(int maxCount) {
|
||||
SSyncNode* pSyncNode = createFakeNode();
|
||||
ASSERT(pSyncNode != NULL);
|
||||
|
||||
SRaftEntryCache* pCache = raftCacheCreate(pSyncNode, maxCount);
|
||||
ASSERT(pCache != NULL);
|
||||
|
||||
return pCache;
|
||||
}
|
||||
|
||||
void test1() {
|
||||
int32_t code = 0;
|
||||
SRaftEntryCache* pCache = createCache(5);
|
||||
for (int i = 0; i < 5; ++i) {
|
||||
SSyncRaftEntry* pEntry = createEntry(i);
|
||||
code = raftCachePutEntry(pCache, pEntry);
|
||||
ASSERT(code == 1);
|
||||
syncEntryDestory(pEntry);
|
||||
}
|
||||
raftCacheLog2((char*)"==test1 write 5 entries==", pCache);
|
||||
|
||||
SyncIndex index;
|
||||
index = 1;
|
||||
code = raftCacheDelEntry(pCache, index);
|
||||
ASSERT(code == 0);
|
||||
index = 3;
|
||||
code = raftCacheDelEntry(pCache, index);
|
||||
ASSERT(code == 0);
|
||||
raftCacheLog2((char*)"==test1 delete 1,3==", pCache);
|
||||
|
||||
code = raftCacheClear(pCache);
|
||||
ASSERT(code == 0);
|
||||
raftCacheLog2((char*)"==clear all==", pCache);
|
||||
}
|
||||
|
||||
void test2() {
|
||||
int32_t code = 0;
|
||||
SRaftEntryCache* pCache = createCache(5);
|
||||
for (int i = 0; i < 5; ++i) {
|
||||
SSyncRaftEntry* pEntry = createEntry(i);
|
||||
code = raftCachePutEntry(pCache, pEntry);
|
||||
ASSERT(code == 1);
|
||||
syncEntryDestory(pEntry);
|
||||
}
|
||||
raftCacheLog2((char*)"==test2 write 5 entries==", pCache);
|
||||
|
||||
SyncIndex index;
|
||||
index = 1;
|
||||
SSyncRaftEntry* pEntry;
|
||||
code = raftCacheGetEntry(pCache, index, &pEntry);
|
||||
ASSERT(code == 0);
|
||||
syncEntryDestory(pEntry);
|
||||
syncEntryLog2((char*)"==test2 get entry 1==", pEntry);
|
||||
|
||||
index = 2;
|
||||
code = raftCacheGetEntryP(pCache, index, &pEntry);
|
||||
ASSERT(code == 0);
|
||||
syncEntryLog2((char*)"==test2 get entry pointer 2==", pEntry);
|
||||
|
||||
// not found
|
||||
index = 8;
|
||||
code = raftCacheGetEntry(pCache, index, &pEntry);
|
||||
ASSERT(code == -1 && terrno == TSDB_CODE_WAL_LOG_NOT_EXIST);
|
||||
sTrace("==test2 get entry 8 not found==");
|
||||
|
||||
// not found
|
||||
index = 9;
|
||||
code = raftCacheGetEntryP(pCache, index, &pEntry);
|
||||
ASSERT(code == -1 && terrno == TSDB_CODE_WAL_LOG_NOT_EXIST);
|
||||
sTrace("==test2 get entry pointer 9 not found==");
|
||||
}
|
||||
|
||||
void test3() {
|
||||
int32_t code = 0;
|
||||
SRaftEntryCache* pCache = createCache(5);
|
||||
for (int i = 0; i < 5; ++i) {
|
||||
SSyncRaftEntry* pEntry = createEntry(i);
|
||||
code = raftCachePutEntry(pCache, pEntry);
|
||||
ASSERT(code == 1);
|
||||
syncEntryDestory(pEntry);
|
||||
}
|
||||
for (int i = 6; i < 10; ++i) {
|
||||
SSyncRaftEntry* pEntry = createEntry(i);
|
||||
code = raftCachePutEntry(pCache, pEntry);
|
||||
ASSERT(code == 0);
|
||||
syncEntryDestory(pEntry);
|
||||
}
|
||||
raftCacheLog2((char*)"==test3 write 10 entries, max count is 5==", pCache);
|
||||
}
|
||||
|
||||
void test4() {
|
||||
int32_t code = 0;
|
||||
SRaftEntryCache* pCache = createCache(5);
|
||||
for (int i = 0; i < 5; ++i) {
|
||||
SSyncRaftEntry* pEntry = createEntry(i);
|
||||
code = raftCachePutEntry(pCache, pEntry);
|
||||
ASSERT(code == 1);
|
||||
syncEntryDestory(pEntry);
|
||||
}
|
||||
raftCacheLog2((char*)"==test4 write 5 entries==", pCache);
|
||||
|
||||
SyncIndex index;
|
||||
index = 3;
|
||||
SSyncRaftEntry* pEntry;
|
||||
code = raftCacheGetAndDel(pCache, index, &pEntry);
|
||||
ASSERT(code == 0);
|
||||
syncEntryLog2((char*)"==test4 get-and-del entry 3==", pEntry);
|
||||
raftCacheLog2((char*)"==test4 after get-and-del entry 3==", pCache);
|
||||
}
|
||||
|
||||
int main(int argc, char** argv) {
|
||||
gRaftDetailLog = true;
|
||||
tsAsyncLog = 0;
|
||||
sDebugFlag = DEBUG_TRACE + DEBUG_SCREEN + DEBUG_FILE + DEBUG_DEBUG;
|
||||
|
||||
test1();
|
||||
test2();
|
||||
test3();
|
||||
test4();
|
||||
|
||||
return 0;
|
||||
}
|
|
@ -1,5 +1,76 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
|
||||
# basic type
|
||||
TAOS_DATA_TYPE = [
|
||||
"INT", "BIGINT", "SMALLINT", "TINYINT", "INT UNSIGNED", "BIGINT UNSIGNED", "SMALLINT UNSIGNED", "TINYINT UNSIGNED",
|
||||
"FLOAT", "DOUBLE",
|
||||
"BOOL",
|
||||
"BINARY", "NCHAR", "VARCHAR",
|
||||
"TIMESTAMP",
|
||||
# "MEDIUMBLOB", "BLOB", # add in 3.x
|
||||
# "DECIMAL", "NUMERIC", # add in 3.x
|
||||
"JSON", # only for tag
|
||||
]
|
||||
|
||||
TAOS_NUM_TYPE = [
|
||||
"INT", "BIGINT", "SMALLINT", "TINYINT", "INT UNSIGNED", "BIGINT UNSIGNED", "SMALLINT UNSIGNED", "TINYINT UNSIGNED", "FLOAT", "DOUBLE",
|
||||
# "DECIMAL", "NUMERIC", # add in 3.x
|
||||
]
|
||||
TAOS_CHAR_TYPE = [
|
||||
"BINARY", "NCHAR", "VARCHAR",
|
||||
]
|
||||
TAOS_BOOL_TYPE = ["BOOL",]
|
||||
TAOS_TS_TYPE = ["TIMESTAMP",]
|
||||
TAOS_BIN_TYPE = [
|
||||
"MEDIUMBLOB", "BLOB", # add in 3.x
|
||||
]
|
||||
|
||||
TAOS_TIME_INIT = ["b", "u", "a", "s", "m", "h", "d", "w", "n", "y"]
|
||||
TAOS_PRECISION = ["ms", "us", "ns"]
|
||||
PRECISION_DEFAULT = "ms"
|
||||
PRECISION = PRECISION_DEFAULT
|
||||
|
||||
TAOS_KEYWORDS = [
|
||||
"ABORT", "CREATE", "IGNORE", "NULL", "STAR",
|
||||
"ACCOUNT", "CTIME", "IMMEDIATE", "OF", "STATE",
|
||||
"ACCOUNTS", "DATABASE", "IMPORT", "OFFSET", "STATEMENT",
|
||||
"ADD", "DATABASES", "IN", "OR", "STATE_WINDOW",
|
||||
"AFTER", "DAYS", "INITIALLY", "ORDER", "STORAGE",
|
||||
"ALL", "DBS", "INSERT", "PARTITIONS", "STREAM",
|
||||
"ALTER", "DEFERRED", "INSTEAD", "PASS", "STREAMS",
|
||||
"AND", "DELIMITERS", "INT", "PLUS", "STRING",
|
||||
"AS", "DESC", "INTEGER", "PPS", "SYNCDB",
|
||||
"ASC", "DESCRIBE", "INTERVAL", "PRECISION", "TABLE",
|
||||
"ATTACH", "DETACH", "INTO", "PREV", "TABLES",
|
||||
"BEFORE", "DISTINCT", "IS", "PRIVILEGE", "TAG",
|
||||
"BEGIN", "DIVIDE", "ISNULL", "QTIME", "TAGS",
|
||||
"BETWEEN", "DNODE", "JOIN", "QUERIES", "TBNAME",
|
||||
"BIGINT", "DNODES", "KEEP", "QUERY", "TIMES",
|
||||
"BINARY", "DOT", "KEY", "QUORUM", "TIMESTAMP",
|
||||
"BITAND", "DOUBLE", "KILL", "RAISE", "TINYINT",
|
||||
"BITNOT", "DROP", "LE", "REM", "TOPIC",
|
||||
"BITOR", "EACH", "LIKE", "REPLACE", "TOPICS",
|
||||
"BLOCKS", "END", "LIMIT", "REPLICA", "TRIGGER",
|
||||
"BOOL", "EQ", "LINEAR", "RESET", "TSERIES",
|
||||
"BY", "EXISTS", "LOCAL", "RESTRICT", "UMINUS",
|
||||
"CACHE", "EXPLAIN", "LP", "ROW", "UNION",
|
||||
"CACHELAST", "FAIL", "LSHIFT", "RP", "UNSIGNED",
|
||||
"CASCADE", "FILE", "LT", "RSHIFT", "UPDATE",
|
||||
"CHANGE", "FILL", "MATCH", "SCORES", "UPLUS",
|
||||
"CLUSTER", "FLOAT", "MAXROWS", "SELECT", "USE",
|
||||
"COLON", "FOR", "MINROWS", "SEMI", "USER",
|
||||
"COLUMN", "FROM", "MINUS", "SESSION", "USERS",
|
||||
"COMMA", "FSYNC", "MNODES", "SET", "USING",
|
||||
"COMP", "GE", "MODIFY", "SHOW", "VALUES",
|
||||
"COMPACT", "GLOB", "MODULES", "SLASH", "VARIABLE",
|
||||
"CONCAT", "GRANTS", "NCHAR", "SLIDING", "VARIABLES",
|
||||
"CONFLICT", "GROUP", "NE", "SLIMIT", "VGROUPS",
|
||||
"CONNECTION", "GT", "NONE", "SMALLINT", "VIEW",
|
||||
"CONNECTIONS", "HAVING", "NOT", "SOFFSET", "VNODES",
|
||||
"CONNS", "ID", "NOTNULL", "STABLE", "WAL",
|
||||
"COPY", "IF", "NOW", "STABLES", "WHERE",
|
||||
]
|
||||
|
||||
# basic data type boundary
|
||||
TINYINT_MAX = 127
|
||||
TINYINT_MIN = -128
|
||||
|
@ -11,7 +82,7 @@ SMALLINT_MAX = 32767
|
|||
SMALLINT_MIN = -32768
|
||||
|
||||
SMALLINT_UN_MAX = 65535
|
||||
MALLINT_UN_MIN = 0
|
||||
SMALLINT_UN_MIN = 0
|
||||
|
||||
INT_MAX = 2147483647
|
||||
INT_MIN = -2147483648
|
||||
|
@ -33,8 +104,8 @@ DOUBLE_MIN = -1.7E+308
|
|||
|
||||
# schema boundary
|
||||
BINARY_LENGTH_MAX = 16374
|
||||
NCAHR_LENGTH_MAX_ = 4093
|
||||
DBNAME_LENGTH_MAX_ = 64
|
||||
NCAHR_LENGTH_MAX = 4093
|
||||
DBNAME_LENGTH_MAX = 64
|
||||
|
||||
STBNAME_LENGTH_MAX = 192
|
||||
STBNAME_LENGTH_MIN = 1
|
||||
|
@ -67,3 +138,31 @@ MNODE_SHM_SIZE_DEFAULT = 6292480
|
|||
VNODE_SHM_SIZE_MAX = 2147483647
|
||||
VNODE_SHM_SIZE_MIN = 6292480
|
||||
VNODE_SHM_SIZE_DEFAULT = 31458304
|
||||
|
||||
# time_init
|
||||
TIME_MS = 1
|
||||
TIME_US = TIME_MS/1000
|
||||
TIME_NS = TIME_US/1000
|
||||
|
||||
TIME_S = 1000 * TIME_MS
|
||||
TIME_M = 60 * TIME_S
|
||||
TIME_H = 60 * TIME_M
|
||||
TIME_D = 24 * TIME_H
|
||||
TIME_W = 7 * TIME_D
|
||||
TIME_N = 30 * TIME_D
|
||||
TIME_Y = 365 * TIME_D
|
||||
|
||||
|
||||
# session parameters
|
||||
INTERVAL_MIN = 1 * TIME_MS if PRECISION == PRECISION_DEFAULT else 1 * TIME_US
|
||||
|
||||
|
||||
# streams and related agg-function
|
||||
SMA_INDEX_FUNCTIONS = ["MIN", "MAX"]
|
||||
ROLLUP_FUNCTIONS = ["AVG", "SUM", "MIN", "MAX", "LAST", "FIRST"]
|
||||
SMA_WATMARK_MAXDELAY_INIT = ['a', "s", "m"]
|
||||
WATERMARK_MAX = 900000
|
||||
WATERMARK_MIN = 0
|
||||
|
||||
MAX_DELAY_MAX = 900000
|
||||
MAX_DELAY_MIN = 1
|
|
@ -21,6 +21,7 @@ import psutil
|
|||
import shutil
|
||||
import pandas as pd
|
||||
from util.log import *
|
||||
from util.constant import *
|
||||
|
||||
def _parse_datetime(timestr):
|
||||
try:
|
||||
|
@ -117,8 +118,7 @@ class TDSql:
|
|||
col_name_list = []
|
||||
col_type_list = []
|
||||
self.cursor.execute(sql)
|
||||
self.queryCols = self.cursor.description
|
||||
for query_col in self.queryCols:
|
||||
for query_col in self.cursor.description:
|
||||
col_name_list.append(query_col[0])
|
||||
col_type_list.append(query_col[1])
|
||||
except Exception as e:
|
||||
|
@ -301,6 +301,41 @@ class TDSql:
|
|||
args = (caller.filename, caller.lineno, self.sql, elm, expect_elm)
|
||||
tdLog.exit("%s(%d) failed: sql:%s, elm:%s == expect_elm:%s" % args)
|
||||
|
||||
def get_times(self, time_str, precision="ms"):
|
||||
caller = inspect.getframeinfo(inspect.stack()[1][0])
|
||||
if time_str[-1] not in TAOS_TIME_INIT:
|
||||
tdLog.exit(f"{caller.filename}({caller.lineno}) failed: {time_str} not a standard taos time init")
|
||||
if precision not in TAOS_PRECISION:
|
||||
tdLog.exit(f"{caller.filename}({caller.lineno}) failed: {precision} not a standard taos time precision")
|
||||
|
||||
if time_str[-1] == TAOS_TIME_INIT[0]:
|
||||
times = int(time_str[:-1]) * TIME_NS
|
||||
if time_str[-1] == TAOS_TIME_INIT[1]:
|
||||
times = int(time_str[:-1]) * TIME_US
|
||||
if time_str[-1] == TAOS_TIME_INIT[2]:
|
||||
times = int(time_str[:-1]) * TIME_MS
|
||||
if time_str[-1] == TAOS_TIME_INIT[3]:
|
||||
times = int(time_str[:-1]) * TIME_S
|
||||
if time_str[-1] == TAOS_TIME_INIT[4]:
|
||||
times = int(time_str[:-1]) * TIME_M
|
||||
if time_str[-1] == TAOS_TIME_INIT[5]:
|
||||
times = int(time_str[:-1]) * TIME_H
|
||||
if time_str[-1] == TAOS_TIME_INIT[6]:
|
||||
times = int(time_str[:-1]) * TIME_D
|
||||
if time_str[-1] == TAOS_TIME_INIT[7]:
|
||||
times = int(time_str[:-1]) * TIME_W
|
||||
if time_str[-1] == TAOS_TIME_INIT[8]:
|
||||
times = int(time_str[:-1]) * TIME_N
|
||||
if time_str[-1] == TAOS_TIME_INIT[9]:
|
||||
times = int(time_str[:-1]) * TIME_Y
|
||||
|
||||
if precision == "ms":
|
||||
return int(times)
|
||||
elif precision == "us":
|
||||
return int(times*1000)
|
||||
elif precision == "ns":
|
||||
return int(times*1000*1000)
|
||||
|
||||
def taosdStatus(self, state):
|
||||
tdLog.sleep(5)
|
||||
pstate = 0
|
||||
|
|
|
@ -29,7 +29,7 @@
|
|||
#./test.sh -f tsim/dnode/drop_dnode_has_vnode_replica3.sim
|
||||
#./test.sh -f tsim/dnode/drop_dnode_has_multi_vnode_replica1.sim
|
||||
#./test.sh -f tsim/dnode/drop_dnode_has_multi_vnode_replica3.sim
|
||||
#./test.sh -f tsim/dnode/redistribute_vgroup_replica3_v1_leader.sim
|
||||
./test.sh -f tsim/dnode/redistribute_vgroup_replica3_v1_leader.sim
|
||||
./test.sh -f tsim/dnode/redistribute_vgroup_replica3_v1_follower.sim
|
||||
|
||||
# ---- insert
|
||||
|
|
|
@ -65,7 +65,7 @@ sql_error redistribute vgroup 3 dnode 6 dnode 3 dnode 4
|
|||
# vgroup not exist
|
||||
sql_error redistribute vgroup 3 dnode 5 dnode 3 dnode 4
|
||||
# un changed
|
||||
sql_error redistribute vgroup 2 dnode 2 dnode 3 dnode 4
|
||||
# sql_error redistribute vgroup 2 dnode 2 dnode 3 dnode 4
|
||||
# no enought vnodes
|
||||
sql_error redistribute vgroup 2 dnode 1 dnode 3 dnode 4
|
||||
# offline vnodes
|
||||
|
@ -176,8 +176,6 @@ if $rows != 1 then
|
|||
return -1
|
||||
endi
|
||||
|
||||
return
|
||||
|
||||
print =============== step32:
|
||||
print redistribute vgroup 2 dnode $leaderVnode dnode $follower1 dnode 5
|
||||
sql redistribute vgroup 2 dnode $leaderVnode dnode $follower1 dnode 5
|
||||
|
|
|
@ -21,9 +21,9 @@ SINT_UN_COL = "c_sint_un"
|
|||
BINT_UN_COL = "c_bint_un"
|
||||
INT_UN_COL = "c_int_un"
|
||||
|
||||
BINARY_COL = "c8"
|
||||
NCHAR_COL = "c9"
|
||||
TS_COL = "c10"
|
||||
BINARY_COL = "c_binary"
|
||||
NCHAR_COL = "c_nchar"
|
||||
TS_COL = "c_ts"
|
||||
|
||||
NUM_COL = [ INT_COL, BINT_COL, SINT_COL, TINT_COL, FLOAT_COL, DOUBLE_COL, ]
|
||||
CHAR_COL = [ BINARY_COL, NCHAR_COL, ]
|
||||
|
@ -51,12 +51,28 @@ class DataSet:
|
|||
binary_data : List[str] = None
|
||||
nchar_data : List[str] = None
|
||||
|
||||
def __post_init__(self):
|
||||
self.ts_data = []
|
||||
self.int_data = []
|
||||
self.bint_data = []
|
||||
self.sint_data = []
|
||||
self.tint_data = []
|
||||
self.int_un_data = []
|
||||
self.bint_un_data = []
|
||||
self.sint_un_data = []
|
||||
self.tint_un_data = []
|
||||
self.float_data = []
|
||||
self.double_data = []
|
||||
self.bool_data = []
|
||||
self.binary_data = []
|
||||
self.nchar_data = []
|
||||
|
||||
|
||||
class TDTestCase:
|
||||
|
||||
def init(self, conn, logSql):
|
||||
tdLog.debug(f"start to excute {__file__}")
|
||||
tdSql.init(conn.cursor(), True)
|
||||
tdSql.init(conn.cursor(), False)
|
||||
|
||||
@property
|
||||
def create_databases_sql_err(self):
|
||||
|
@ -87,28 +103,28 @@ class TDTestCase:
|
|||
@property
|
||||
def create_stable_sql_err(self):
|
||||
return [
|
||||
f"create stable stb1 ({PRIMARY_COL} timestamp, {INT_COL} int) tags (tag1 int) rollup(ceil) watermark 1s maxdelay 1m",
|
||||
f"create stable stb1 ({PRIMARY_COL} timestamp, {INT_COL} int) tags (tag1 int) rollup(ceil) watermark 1s max_delay 1m",
|
||||
f"create stable stb1 ({PRIMARY_COL} timestamp, {INT_COL} int) tags (tag1 int) rollup(count) watermark 1min",
|
||||
f"create stable stb1 ({PRIMARY_COL} timestamp, {INT_COL} int) tags (tag1 int) rollup(min) maxdelay -1s",
|
||||
f"create stable stb1 ({PRIMARY_COL} timestamp, {INT_COL} int) tags (tag1 int) rollup(min) max_delay -1s",
|
||||
f"create stable stb1 ({PRIMARY_COL} timestamp, {INT_COL} int) tags (tag1 int) rollup(min) watermark -1m",
|
||||
f"create stable stb1 ({PRIMARY_COL} timestamp, {INT_COL} int) tags (tag1 int) watermark 1m ",
|
||||
f"create stable stb1 ({PRIMARY_COL} timestamp, {INT_COL} int) tags (tag1 int) maxdelay 1m ",
|
||||
# f"create stable stb1 ({PRIMARY_COL} timestamp, {INT_COL} int) tags (tag1 int) watermark 1m ",
|
||||
# f"create stable stb1 ({PRIMARY_COL} timestamp, {INT_COL} int) tags (tag1 int) max_delay 1m ",
|
||||
f"create stable stb2 ({PRIMARY_COL} timestamp, {INT_COL} int, {BINARY_COL} binary(16)) tags (tag1 int) rollup(avg) watermark 1s",
|
||||
f"create stable stb2 ({PRIMARY_COL} timestamp, {INT_COL} int, {BINARY_COL} nchar(16)) tags (tag1 int) rollup(avg) maxdelay 1m",
|
||||
# f"create table ntb_1 ({PRIMARY_COL} timestamp, {INT_COL} int, {BINARY_COL} nchar(16)) rollup(avg) watermark 1s maxdelay 1s",
|
||||
f"create stable stb2 ({PRIMARY_COL} timestamp, {INT_COL} int, {BINARY_COL} nchar(16)) tags (tag1 int) rollup(avg) max_delay 1m",
|
||||
# f"create table ntb_1 ({PRIMARY_COL} timestamp, {INT_COL} int, {BINARY_COL} nchar(16)) rollup(avg) watermark 1s max_delay 1s",
|
||||
# f"create stable stb2 ({PRIMARY_COL} timestamp, {INT_COL} int, {BINARY_COL} nchar(16)) tags (tag1 int) " ,
|
||||
# f"create stable stb2 ({PRIMARY_COL} timestamp, {INT_COL} int) tags (tag1 int) " ,
|
||||
# f"create stable stb2 ({PRIMARY_COL} timestamp, {INT_COL} int) " ,
|
||||
# f"create stable stb2 ({PRIMARY_COL} timestamp, {INT_COL} int, {BINARY_COL} nchar(16)) " ,
|
||||
|
||||
# watermark, maxdelay: [0, 900000], [ms, s, m, ?]
|
||||
f"create stable stb1 ({PRIMARY_COL} timestamp, {INT_COL} int) tags (tag1 int) rollup(min) maxdelay 1u",
|
||||
# watermark, max_delay: [0, 900000], [ms, s, m, ?]
|
||||
f"create stable stb1 ({PRIMARY_COL} timestamp, {INT_COL} int) tags (tag1 int) rollup(min) max_delay 1u",
|
||||
f"create stable stb1 ({PRIMARY_COL} timestamp, {INT_COL} int) tags (tag1 int) rollup(min) watermark 1b",
|
||||
f"create stable stb1 ({PRIMARY_COL} timestamp, {INT_COL} int) tags (tag1 int) rollup(min) watermark 900001ms",
|
||||
f"create stable stb1 ({PRIMARY_COL} timestamp, {INT_COL} int) tags (tag1 int) rollup(min) maxdelay 16m",
|
||||
f"create stable stb1 ({PRIMARY_COL} timestamp, {INT_COL} int) tags (tag1 int) rollup(min) maxdelay 901s",
|
||||
f"create stable stb1 ({PRIMARY_COL} timestamp, {INT_COL} int) tags (tag1 int) rollup(min) maxdelay 1h",
|
||||
f"create stable stb1 ({PRIMARY_COL} timestamp, {INT_COL} int) tags (tag1 int) rollup(min) maxdelay 0.2h",
|
||||
f"create stable stb1 ({PRIMARY_COL} timestamp, {INT_COL} int) tags (tag1 int) rollup(min) max_delay 16m",
|
||||
f"create stable stb1 ({PRIMARY_COL} timestamp, {INT_COL} int) tags (tag1 int) rollup(min) max_delay 901s",
|
||||
f"create stable stb1 ({PRIMARY_COL} timestamp, {INT_COL} int) tags (tag1 int) rollup(min) max_delay 1h",
|
||||
f"create stable stb1 ({PRIMARY_COL} timestamp, {INT_COL} int) tags (tag1 int) rollup(min) max_delay 0.2h",
|
||||
f"create stable stb1 ({PRIMARY_COL} timestamp, {INT_COL} int) tags (tag1 int) rollup(min) watermark 0.002d",
|
||||
|
||||
]
|
||||
|
@ -117,11 +133,11 @@ class TDTestCase:
|
|||
def create_stable_sql_current(self):
|
||||
return [
|
||||
f"create stable stb1 ({PRIMARY_COL} timestamp, {INT_COL} int) tags (tag1 int) rollup(avg)",
|
||||
f"create stable stb2 ({PRIMARY_COL} timestamp, {INT_COL} int) tags (tag1 int) rollup(min) watermark 5s maxdelay 1m",
|
||||
f"create stable stb3 ({PRIMARY_COL} timestamp, {INT_COL} int) tags (tag1 int) rollup(max) watermark 5s maxdelay 1m",
|
||||
f"create stable stb4 ({PRIMARY_COL} timestamp, {INT_COL} int) tags (tag1 int) rollup(sum) watermark 5s maxdelay 1m",
|
||||
# f"create stable stb5 ({PRIMARY_COL} timestamp, {INT_COL} int) tags (tag1 int) rollup(last) watermark 5s maxdelay 1m",
|
||||
# f"create stable stb6 ({PRIMARY_COL} timestamp, {INT_COL} int) tags (tag1 int) rollup(first) watermark 5s maxdelay 1m",
|
||||
f"create stable stb2 ({PRIMARY_COL} timestamp, {INT_COL} int) tags (tag1 int) rollup(min) watermark 5s max_delay 1m",
|
||||
f"create stable stb3 ({PRIMARY_COL} timestamp, {INT_COL} int) tags (tag1 int) rollup(max) watermark 5s max_delay 1m",
|
||||
f"create stable stb4 ({PRIMARY_COL} timestamp, {INT_COL} int) tags (tag1 int) rollup(sum) watermark 5s max_delay 1m",
|
||||
# f"create stable stb5 ({PRIMARY_COL} timestamp, {INT_COL} int) tags (tag1 int) rollup(last) watermark 5s max_delay 1m",
|
||||
# f"create stable stb6 ({PRIMARY_COL} timestamp, {INT_COL} int) tags (tag1 int) rollup(first) watermark 5s max_delay 1m",
|
||||
]
|
||||
|
||||
def test_create_stb(self):
|
||||
|
@ -135,7 +151,7 @@ class TDTestCase:
|
|||
tdSql.checkRows(len(self.create_stable_sql_current))
|
||||
|
||||
# tdSql.execute("use db") # because db is a noraml database, not a rollup database, should not be able to create a rollup database
|
||||
# tdSql.error(f"create stable nor_db_rollup_stb ({PRIMARY_COL} timestamp, {INT_COL} int) tags (tag1 int) file_factor 5.0")
|
||||
# tdSql.error(f"create stable nor_db_rollup_stb ({PRIMARY_COL} timestamp, {INT_COL} int) tags (tag1 int) watermark 5s max_delay 1m")
|
||||
|
||||
|
||||
def test_create_databases(self):
|
||||
|
@ -177,21 +193,6 @@ class TDTestCase:
|
|||
|
||||
def __data_set(self, rows):
|
||||
data_set = DataSet()
|
||||
# neg_data_set = DataSet()
|
||||
data_set.ts_data = []
|
||||
data_set.int_data = []
|
||||
data_set.bint_data = []
|
||||
data_set.sint_data = []
|
||||
data_set.tint_data = []
|
||||
data_set.int_un_data = []
|
||||
data_set.bint_un_data = []
|
||||
data_set.sint_un_data = []
|
||||
data_set.tint_un_data = []
|
||||
data_set.float_data = []
|
||||
data_set.double_data = []
|
||||
data_set.bool_data = []
|
||||
data_set.binary_data = []
|
||||
data_set.nchar_data = []
|
||||
|
||||
for i in range(rows):
|
||||
data_set.ts_data.append(NOW + 1 * (rows - i))
|
||||
|
@ -226,6 +227,7 @@ class TDTestCase:
|
|||
return data_set
|
||||
|
||||
def __insert_data(self):
|
||||
tdLog.printNoPrefix("==========step: start inser data into tables now.....")
|
||||
data = self.__data_set(rows=self.rows)
|
||||
|
||||
# now_time = int(datetime.datetime.timestamp(datetime.datetime.now()) * 1000)
|
||||
|
@ -264,10 +266,10 @@ class TDTestCase:
|
|||
|
||||
def run(self):
|
||||
self.rows = 10
|
||||
|
||||
tdSql.prepare()
|
||||
|
||||
tdLog.printNoPrefix("==========step0:all check")
|
||||
# self.all_test()
|
||||
self.all_test()
|
||||
|
||||
tdLog.printNoPrefix("==========step1:create table in normal database")
|
||||
tdSql.prepare()
|
||||
|
|
|
@ -17,25 +17,34 @@ TINT_COL = "c_tint"
|
|||
FLOAT_COL = "c_float"
|
||||
DOUBLE_COL = "c_double"
|
||||
BOOL_COL = "c_bool"
|
||||
TINT_UN_COL = "c_tint_un"
|
||||
SINT_UN_COL = "c_sint_un"
|
||||
BINT_UN_COL = "c_bint_un"
|
||||
INT_UN_COL = "c_int_un"
|
||||
TINT_UN_COL = "c_utint"
|
||||
SINT_UN_COL = "c_usint"
|
||||
BINT_UN_COL = "c_ubint"
|
||||
INT_UN_COL = "c_uint"
|
||||
BINARY_COL = "c_binary"
|
||||
NCHAR_COL = "c_nchar"
|
||||
TS_COL = "c_ts"
|
||||
|
||||
|
||||
|
||||
NUM_COL = [INT_COL, BINT_COL, SINT_COL, TINT_COL, FLOAT_COL, DOUBLE_COL, ]
|
||||
CHAR_COL = [BINARY_COL, NCHAR_COL, ]
|
||||
BOOLEAN_COL = [BOOL_COL, ]
|
||||
TS_TYPE_COL = [TS_COL, ]
|
||||
|
||||
INT_TAG = "t_int"
|
||||
|
||||
ALL_COL = [PRIMARY_COL, INT_COL, BINT_COL, SINT_COL, TINT_COL, FLOAT_COL, DOUBLE_COL, BINARY_COL, NCHAR_COL, BOOL_COL, TS_COL]
|
||||
TAG_COL = [INT_TAG]
|
||||
|
||||
# insert data args:
|
||||
TIME_STEP = 10000
|
||||
NOW = int(datetime.datetime.timestamp(datetime.datetime.now()) * 1000)
|
||||
|
||||
# init db/table
|
||||
DBNAME = "db"
|
||||
STBNAME = "stb1"
|
||||
CTBNAME = "ct1"
|
||||
NTBNAME = "nt1"
|
||||
|
||||
|
||||
@dataclass
|
||||
class DataSet:
|
||||
|
@ -78,14 +87,20 @@ class SMAschema:
|
|||
index_flag : str = "SMA INDEX"
|
||||
operator : str = "ON"
|
||||
tbname : str = None
|
||||
watermark : str = None
|
||||
maxdelay : str = None
|
||||
watermark : str = "5s"
|
||||
max_delay : str = "6m"
|
||||
func : Tuple[str] = None
|
||||
interval : Tuple[str] = None
|
||||
sliding : str = None
|
||||
interval : Tuple[str] = ("6m", "10s")
|
||||
sliding : str = "6m"
|
||||
other : Any = None
|
||||
drop : str = "DROP"
|
||||
drop_flag : str = "INDEX"
|
||||
querySmaOptimize : int = 1
|
||||
show : str = "SHOW"
|
||||
show_msg : str = "INDEXES"
|
||||
show_oper : str = "FROM"
|
||||
dbname : str = None
|
||||
rollup_db : bool = False
|
||||
|
||||
def __post_init__(self):
|
||||
if isinstance(self.other, dict):
|
||||
|
@ -111,8 +126,8 @@ class SMAschema:
|
|||
self.watermark = v
|
||||
del self.other[k]
|
||||
|
||||
if k.lower() == "maxdelay" and isinstance(v, str) and not self.maxdelay:
|
||||
self.maxdelay = v
|
||||
if k.lower() == "max_delay" and isinstance(v, str) and not self.max_delay:
|
||||
self.max_delay = v
|
||||
del self.other[k]
|
||||
|
||||
if k.lower() == "functions" and isinstance(v, tuple) and not self.func:
|
||||
|
@ -131,12 +146,36 @@ class SMAschema:
|
|||
self.drop_flag = v
|
||||
del self.other[k]
|
||||
|
||||
if k.lower() == "show_msg" and isinstance(v, str) and not self.show_msg:
|
||||
self.show_msg = v
|
||||
del self.other[k]
|
||||
|
||||
if k.lower() == "dbname" and isinstance(v, str) and not self.dbname:
|
||||
self.dbname = v
|
||||
del self.other[k]
|
||||
|
||||
if k.lower() == "show_oper" and isinstance(v, str) and not self.show_oper:
|
||||
self.show_oper = v
|
||||
del self.other[k]
|
||||
|
||||
if k.lower() == "rollup_db" and isinstance(v, bool) and not self.rollup_db:
|
||||
self.rollup_db = v
|
||||
del self.other[k]
|
||||
|
||||
|
||||
|
||||
# from ...pytest.util.sql import *
|
||||
# from ...pytest.util.constant import *
|
||||
|
||||
class TDTestCase:
|
||||
updatecfgDict = {"querySmaOptimize": 1}
|
||||
|
||||
def init(self, conn, logSql):
|
||||
tdLog.debug(f"start to excute {__file__}")
|
||||
tdSql.init(conn.cursor(), False)
|
||||
self.precision = "ms"
|
||||
self.sma_count = 0
|
||||
self.sma_created_index = []
|
||||
|
||||
"""
|
||||
create sma index :
|
||||
|
@ -155,13 +194,17 @@ class TDTestCase:
|
|||
if sma.func:
|
||||
sql += f" function({', '.join(sma.func)})"
|
||||
if sma.interval:
|
||||
sql += f" interval({', '.join(sma.interval)})"
|
||||
interval, offset = self.__get_interval_offset(sma.interval)
|
||||
if offset:
|
||||
sql += f" interval({interval}, {offset})"
|
||||
else:
|
||||
sql += f" interval({interval})"
|
||||
if sma.sliding:
|
||||
sql += f" sliding({sma.sliding})"
|
||||
if sma.watermark:
|
||||
sql += f" watermark {sma.watermark}"
|
||||
if sma.maxdelay:
|
||||
sql += f" maxdelay {sma.maxdelay}"
|
||||
if sma.max_delay:
|
||||
sql += f" max_delay {sma.max_delay}"
|
||||
if isinstance(sma.other, dict):
|
||||
for k,v in sma.other.items():
|
||||
if isinstance(v,tuple) or isinstance(v, list):
|
||||
|
@ -171,53 +214,305 @@ class TDTestCase:
|
|||
if isinstance(sma.other, tuple) or isinstance(sma.other, list):
|
||||
sql += " ".join(sma.other)
|
||||
if isinstance(sma.other, int) or isinstance(sma.other, float) or isinstance(sma.other, str):
|
||||
sql += sma.other
|
||||
sql += f" {sma.other}"
|
||||
|
||||
return sql
|
||||
|
||||
def sma_create_check(self, sma:SMAschema):
|
||||
def __get_sma_func_col(self, func):
|
||||
cols = []
|
||||
if isinstance(func, str):
|
||||
cols.append( func.split("(")[-1].split(")")[0] )
|
||||
elif isinstance(func, tuple) or isinstance(func, list):
|
||||
for func_col in func:
|
||||
cols.append(func_col.split("(")[-1].split(")")[0])
|
||||
else:
|
||||
cols = []
|
||||
return cols
|
||||
|
||||
def __check_sma_func(self, func:tuple):
|
||||
if not isinstance(func, str) and not isinstance(func, tuple) and not isinstance(func, list):
|
||||
return False
|
||||
if isinstance(func, str) :
|
||||
if "(" not in func or ")" not in func:
|
||||
return False
|
||||
if func.split("(")[0].upper() not in SMA_INDEX_FUNCTIONS:
|
||||
return False
|
||||
if func.split("(")[1].split(")")[0] not in ALL_COL and func.split("(")[1].split(")")[0] not in TAG_COL :
|
||||
return False
|
||||
if isinstance(func, tuple) or isinstance(func, list):
|
||||
for arg in func:
|
||||
if not isinstance(arg, str):
|
||||
return False
|
||||
if "(" not in arg or ")" not in arg:
|
||||
return False
|
||||
if arg.split("(")[0].upper() not in SMA_INDEX_FUNCTIONS:
|
||||
return False
|
||||
if arg.split("(")[1].split(")")[0] not in ALL_COL and arg.split("(")[1].split(")")[0] not in TAG_COL :
|
||||
return False
|
||||
return True
|
||||
|
||||
def __check_sma_watermark(self, arg):
|
||||
if not arg:
|
||||
return False
|
||||
if not isinstance(arg, str):
|
||||
return False
|
||||
if arg[-1] not in SMA_WATMARK_MAXDELAY_INIT:
|
||||
return False
|
||||
if len(arg) == 1:
|
||||
return False
|
||||
if not arg[:-1].isdecimal():
|
||||
return False
|
||||
if tdSql.get_times(arg) > WATERMARK_MAX:
|
||||
return False
|
||||
if tdSql.get_times(arg) < WATERMARK_MIN:
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
def __check_sma_max_delay(self, arg):
|
||||
if not self.__check_sma_watermark(arg):
|
||||
return False
|
||||
if tdSql.get_times(arg) < MAX_DELAY_MIN:
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
def __check_sma_sliding(self, arg):
|
||||
if not isinstance(arg, str):
|
||||
return False
|
||||
if arg[-1] not in TAOS_TIME_INIT:
|
||||
return False
|
||||
if len(arg) == 1:
|
||||
return False
|
||||
if not arg[:-1].isdecimal():
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
def __get_interval_offset(self, args):
|
||||
if isinstance(args, str):
|
||||
interval, offset = args, None
|
||||
elif isinstance(args,tuple) or isinstance(args, list):
|
||||
if len(args) == 1:
|
||||
interval, offset = args[0], None
|
||||
elif len(args) == 2:
|
||||
interval, offset = args
|
||||
else:
|
||||
interval, offset = False, False
|
||||
else:
|
||||
interval, offset = False, False
|
||||
|
||||
return interval, offset
|
||||
|
||||
def __check_sma_interval(self, args):
|
||||
if not isinstance(args, tuple) and not isinstance(args,str):
|
||||
return False
|
||||
interval, offset = self.__get_interval_offset(args)
|
||||
if not interval:
|
||||
return False
|
||||
if not self.__check_sma_sliding(interval):
|
||||
return False
|
||||
if tdSql.get_times(interval) < INTERVAL_MIN:
|
||||
return False
|
||||
if offset:
|
||||
if not self.__check_sma_sliding(offset):
|
||||
return False
|
||||
if tdSql.get_times(interval) <= tdSql.get_times(offset) :
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
def __sma_create_check(self, sma:SMAschema):
|
||||
if self.updatecfgDict["querySmaOptimize"] == 0:
|
||||
return False
|
||||
# # TODO: if database is a rollup-db, can not create sma index
|
||||
# tdSql.query("select database()")
|
||||
# if sma.rollup_db :
|
||||
# return False
|
||||
tdSql.query("show stables")
|
||||
if not sma.tbname:
|
||||
return False
|
||||
stb_in_list = False
|
||||
for row in tdSql.queryResult:
|
||||
if sma.tbname == row[0]:
|
||||
stb_in_list = True
|
||||
break
|
||||
if not stb_in_list:
|
||||
tdSql.error(self.__create_sma_index(sma))
|
||||
if not sma.creation:
|
||||
tdSql.error(self.__create_sma_index(sma))
|
||||
if not sma.index_flag:
|
||||
tdSql.error(self.__create_sma_index(sma))
|
||||
if not sma.index_name:
|
||||
tdSql.error(self.__create_sma_index(sma))
|
||||
if not sma.operator:
|
||||
tdSql.error(self.__create_sma_index(sma))
|
||||
if not sma.tbname:
|
||||
tdSql.error(self.__create_sma_index(sma))
|
||||
if not sma.func:
|
||||
tdSql.error(self.__create_sma_index(sma))
|
||||
if not sma.interval:
|
||||
tdSql.error(self.__create_sma_index(sma))
|
||||
if not sma.sliding:
|
||||
tdSql.error(self.__create_sma_index(sma))
|
||||
return False
|
||||
if not sma.creation or not isinstance(sma.creation, str) or sma.creation.upper() != "CREATE":
|
||||
return False
|
||||
if not sma.index_flag or not isinstance(sma.index_flag, str) or sma.index_flag.upper() != "SMA INDEX" :
|
||||
return False
|
||||
if not sma.index_name or not isinstance(sma.index_name, str) or sma.index_name.upper() in TAOS_KEYWORDS:
|
||||
return False
|
||||
if not sma.operator or not isinstance(sma.operator, str) or sma.operator.upper() != "ON":
|
||||
return False
|
||||
|
||||
if not sma.func or not self.__check_sma_func(sma.func):
|
||||
return False
|
||||
tdSql.query(f"desc {sma.tbname}")
|
||||
_col_list = []
|
||||
for col_row in tdSql.queryResult:
|
||||
_col_list.append(col_row[0])
|
||||
_sma_func_cols = self.__get_sma_func_col(sma.func)
|
||||
for _sma_func_col in _sma_func_cols:
|
||||
if _sma_func_col not in _col_list:
|
||||
return False
|
||||
|
||||
if sma.sliding and not self.__check_sma_sliding(sma.sliding):
|
||||
return False
|
||||
interval, _ = self.__get_interval_offset(sma.interval)
|
||||
if not sma.interval or not self.__check_sma_interval(sma.interval) :
|
||||
return False
|
||||
if sma.sliding and tdSql.get_times(interval) < tdSql.get_times(sma.sliding):
|
||||
return False
|
||||
if sma.watermark and not self.__check_sma_watermark(sma.watermark):
|
||||
return False
|
||||
if sma.max_delay and not self.__check_sma_max_delay(sma.max_delay):
|
||||
return False
|
||||
if sma.other:
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
def sma_create_check(self, sma:SMAschema):
|
||||
if self.__sma_create_check(sma):
|
||||
tdSql.query(self.__create_sma_index(sma))
|
||||
self.sma_count += 1
|
||||
self.sma_created_index.append(sma.index_name)
|
||||
tdSql.query("show streams")
|
||||
tdSql.checkRows(self.sma_count)
|
||||
|
||||
else:
|
||||
tdSql.error(self.__create_sma_index(sma))
|
||||
|
||||
def __drop_sma_index(self, sma:SMAschema):
|
||||
sql = f"{sma.drop} {sma.drop_flag} {sma.index_name}"
|
||||
return sql
|
||||
|
||||
def __sma_drop_check(self, sma:SMAschema):
|
||||
if not sma.drop:
|
||||
return False
|
||||
if not sma.drop_flag:
|
||||
return False
|
||||
if not sma.index_name:
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
def sma_drop_check(self, sma:SMAschema):
|
||||
if self.__sma_drop_check(sma):
|
||||
tdSql.query(self.__drop_sma_index(sma))
|
||||
print(self.__drop_sma_index(sma))
|
||||
self.sma_count -= 1
|
||||
self.sma_created_index = list(filter(lambda x: x != sma.index_name, self.sma_created_index))
|
||||
tdSql.query("show streams")
|
||||
tdSql.checkRows(self.sma_count)
|
||||
|
||||
else:
|
||||
tdSql.error(self.__drop_sma_index(sma))
|
||||
|
||||
def __show_sma_index(self, sma:SMAschema):
|
||||
sql = f"{sma.show} {sma.show_msg} {sma.show_oper} {sma.tbname}"
|
||||
return sql
|
||||
|
||||
def __sma_show_check(self, sma:SMAschema):
|
||||
if not sma.show:
|
||||
return False
|
||||
if not sma.show_msg:
|
||||
return False
|
||||
if not sma.show_oper:
|
||||
return False
|
||||
if not sma.tbname:
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
def sma_show_check(self, sma:SMAschema):
|
||||
if self.__sma_show_check(sma):
|
||||
tdSql.query(self.__show_sma_index(sma))
|
||||
tdSql.checkRows(self.sma_count)
|
||||
else:
|
||||
tdSql.error(self.__show_sma_index(sma))
|
||||
|
||||
@property
|
||||
def __create_sma_sql(self):
|
||||
err_sqls = []
|
||||
cur_sqls = []
|
||||
# err_set
|
||||
# # case 1: required fields check
|
||||
err_sqls.append( SMAschema(creation="", tbname=STBNAME, func=(f"min({INT_COL})",f"max({INT_COL})") ) )
|
||||
err_sqls.append( SMAschema(index_name="",tbname=STBNAME, func=(f"min({INT_COL})",f"max({INT_COL})") ) )
|
||||
err_sqls.append( SMAschema(index_flag="",tbname=STBNAME, func=(f"min({INT_COL})",f"max({INT_COL})") ) )
|
||||
err_sqls.append( SMAschema(operator="",tbname=STBNAME, func=(f"min({INT_COL})",f"max({INT_COL})") ) )
|
||||
err_sqls.append( SMAschema(tbname="", func=(f"min({INT_COL})",f"max({INT_COL})") ) )
|
||||
err_sqls.append( SMAschema(func=("",),tbname=STBNAME ) )
|
||||
err_sqls.append( SMAschema(interval=(""),tbname=STBNAME, func=(f"min({INT_COL})",f"max({INT_COL})") ) )
|
||||
|
||||
# # case 2: err fields
|
||||
err_sqls.append( SMAschema(creation="show",tbname=STBNAME, func=(f"min({INT_COL})",f"max({INT_COL})") ) )
|
||||
err_sqls.append( SMAschema(creation="alter",tbname=STBNAME, func=(f"min({INT_COL})",f"max({INT_COL})") ) )
|
||||
err_sqls.append( SMAschema(creation="select",tbname=STBNAME, func=(f"min({INT_COL})",f"max({INT_COL})") ) )
|
||||
|
||||
err_sqls.append( SMAschema(index_flag="SMA INDEXES", tbname=STBNAME, func=(f"min({INT_COL})",f"max({INT_COL})") ) )
|
||||
err_sqls.append( SMAschema(index_flag="SMA INDEX ,", tbname=STBNAME, func=(f"min({INT_COL})",f"max({INT_COL})") ) )
|
||||
err_sqls.append( SMAschema(index_name="tbname", tbname=STBNAME, func=(f"min({INT_COL})",f"max({INT_COL})") ) )
|
||||
|
||||
|
||||
# current_set
|
||||
|
||||
cur_sqls.append( SMAschema(max_delay="",tbname=STBNAME, func=(f"min({INT_COL})",f"max({INT_COL})") ) )
|
||||
cur_sqls.append( SMAschema(watermark="",index_name="sma_index_2",tbname=STBNAME, func=(f"min({INT_COL})",f"max({INT_COL})") ) )
|
||||
cur_sqls.append( SMAschema(sliding="",index_name='sma_index_3',tbname=STBNAME, func=(f"min({INT_COL})",f"max({INT_COL})") ) )
|
||||
|
||||
|
||||
return err_sqls, cur_sqls
|
||||
|
||||
def test_create_sma(self):
|
||||
err_sqls , cur_sqls = self.__create_sma_sql
|
||||
for err_sql in err_sqls:
|
||||
self.sma_create_check(err_sql)
|
||||
for cur_sql in cur_sqls:
|
||||
self.sma_create_check(cur_sql)
|
||||
|
||||
@property
|
||||
def __drop_sma_sql(self):
|
||||
err_sqls = []
|
||||
cur_sqls = []
|
||||
# err_set
|
||||
## case 1: required fields check
|
||||
err_sqls.append( SMAschema(drop="") )
|
||||
err_sqls.append( SMAschema(drop_flag="") )
|
||||
err_sqls.append( SMAschema(index_name="") )
|
||||
|
||||
for index in self.sma_created_index:
|
||||
cur_sqls.append(SMAschema(index_name=index))
|
||||
|
||||
return err_sqls, cur_sqls
|
||||
|
||||
def test_drop_sma(self):
|
||||
err_sqls , cur_sqls = self.__drop_sma_sql
|
||||
for err_sql in err_sqls:
|
||||
self.sma_drop_check(err_sql)
|
||||
# for cur_sql in cur_sqls:
|
||||
# self.sma_drop_check(cur_sql)
|
||||
|
||||
def all_test(self):
|
||||
self.test_create_sma()
|
||||
self.test_drop_sma()
|
||||
|
||||
pass
|
||||
|
||||
def __create_tb(self):
|
||||
tdLog.printNoPrefix("==========step: create table")
|
||||
create_stb_sql = f'''create table stb1(
|
||||
create_stb_sql = f'''create table {STBNAME}(
|
||||
ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint,
|
||||
{FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool,
|
||||
{BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp,
|
||||
{TINT_UN_COL} tinyint unsigned, {SINT_UN_COL} smallint unsigned,
|
||||
{INT_UN_COL} int unsigned, {BINT_UN_COL} bigint unsigned
|
||||
) tags (tag1 int)
|
||||
) tags ({INT_TAG} int)
|
||||
'''
|
||||
create_ntb_sql = f'''create table t1(
|
||||
create_ntb_sql = f'''create table {NTBNAME}(
|
||||
ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint,
|
||||
{FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool,
|
||||
{BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp,
|
||||
|
@ -253,6 +548,7 @@ class TDTestCase:
|
|||
return data_set
|
||||
|
||||
def __insert_data(self):
|
||||
tdLog.printNoPrefix("==========step: start inser data into tables now.....")
|
||||
data = self.__data_set(rows=self.rows)
|
||||
|
||||
# now_time = int(datetime.datetime.timestamp(datetime.datetime.now()) * 1000)
|
||||
|
@ -278,7 +574,7 @@ class TDTestCase:
|
|||
tdSql.execute(
|
||||
f"insert into ct4 values ( {NOW - i * int(TIME_STEP * 0.8) }, {row_data} )")
|
||||
tdSql.execute(
|
||||
f"insert into t1 values ( {NOW - i * int(TIME_STEP * 1.2)}, {row_data} )")
|
||||
f"insert into {NTBNAME} values ( {NOW - i * int(TIME_STEP * 1.2)}, {row_data} )")
|
||||
|
||||
tdSql.execute(
|
||||
f"insert into ct2 values ( {NOW + int(TIME_STEP * 0.6)}, {null_data} )")
|
||||
|
@ -295,28 +591,31 @@ class TDTestCase:
|
|||
f"insert into ct4 values ( {NOW - self.rows * int(TIME_STEP * 0.39)}, {null_data} )")
|
||||
|
||||
tdSql.execute(
|
||||
f"insert into t1 values ( {NOW + int(TIME_STEP * 1.2)}, {null_data} )")
|
||||
f"insert into {NTBNAME} values ( {NOW + int(TIME_STEP * 1.2)}, {null_data} )")
|
||||
tdSql.execute(
|
||||
f"insert into t1 values ( {NOW - (self.rows + 1) * int(TIME_STEP * 1.2)}, {null_data} )")
|
||||
f"insert into {NTBNAME} values ( {NOW - (self.rows + 1) * int(TIME_STEP * 1.2)}, {null_data} )")
|
||||
tdSql.execute(
|
||||
f"insert into t1 values ( {NOW - self.rows * int(TIME_STEP * 0.59)}, {null_data} )")
|
||||
f"insert into {NTBNAME} values ( {NOW - self.rows * int(TIME_STEP * 0.59)}, {null_data} )")
|
||||
|
||||
def run(self):
|
||||
sma1 = SMAschema(func=("min(c1)","max(c2)"))
|
||||
sql1 = self.__create_sma_index(sma1)
|
||||
print("================")
|
||||
print(sql1)
|
||||
# a = DataSet()
|
||||
# return
|
||||
self.rows = 10
|
||||
|
||||
tdLog.printNoPrefix("==========step0:all check")
|
||||
# self.all_test()
|
||||
|
||||
tdLog.printNoPrefix("==========step1:create table in normal database")
|
||||
tdSql.prepare()
|
||||
self.__create_tb()
|
||||
self.__insert_data()
|
||||
# self.__insert_data()
|
||||
self.all_test()
|
||||
|
||||
# drop databases, create same name db、stb and sma index
|
||||
# tdSql.prepare()
|
||||
# self.__create_tb()
|
||||
# self.__insert_data()
|
||||
# self.all_test()
|
||||
|
||||
|
||||
|
||||
return
|
||||
|
||||
tdLog.printNoPrefix("==========step2:create table in rollup database")
|
||||
|
|
|
@ -28,7 +28,7 @@ class TDTestCase:
|
|||
|
||||
def init(self, conn, logSql):
|
||||
tdLog.debug(f"start to excute {__file__}")
|
||||
tdSql.init(conn.cursor(), True)
|
||||
tdSql.init(conn.cursor(), False)
|
||||
|
||||
def __query_condition(self,tbname):
|
||||
query_condition = []
|
||||
|
|
|
@ -28,7 +28,7 @@ class TDTestCase:
|
|||
|
||||
def init(self, conn, logSql):
|
||||
tdLog.debug(f"start to excute {__file__}")
|
||||
tdSql.init(conn.cursor(), True)
|
||||
tdSql.init(conn.cursor(), False)
|
||||
|
||||
def __query_condition(self,tbname):
|
||||
query_condition = []
|
||||
|
|
|
@ -31,7 +31,7 @@ class TDTestCase:
|
|||
|
||||
def init(self, conn, logSql):
|
||||
tdLog.debug(f"start to excute {__file__}")
|
||||
tdSql.init(conn.cursor())
|
||||
tdSql.init(conn.cursor(),False)
|
||||
|
||||
def __substr_condition(self): # sourcery skip: extract-method
|
||||
substr_condition = []
|
||||
|
|
|
@ -188,8 +188,8 @@ class TDTestCase:
|
|||
|
||||
def check_tail_table(self , tbname , col_name , tail_rows , offset):
|
||||
tail_sql = f"select tail({col_name} , {tail_rows} , {offset}) from {tbname}"
|
||||
equal_sql = f"select {col_name} from (select ts , {col_name} from {tbname} order by ts desc limit {tail_rows} offset {offset}) order by ts"
|
||||
#equal_sql = f"select {col_name} from {tbname} order by ts desc limit {tail_rows} offset {offset}"
|
||||
#equal_sql = f"select {col_name} from (select ts , {col_name} from {tbname} order by ts desc limit {tail_rows} offset {offset}) order by ts"
|
||||
equal_sql = f"select {col_name} from {tbname} order by ts desc limit {tail_rows} offset {offset}"
|
||||
tdSql.query(tail_sql)
|
||||
tail_result = tdSql.queryResult
|
||||
|
||||
|
@ -404,7 +404,7 @@ class TDTestCase:
|
|||
f"insert into sub1_bound values ( now()+1s, 2147483648, 9223372036854775808, 32768, 128, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
|
||||
)
|
||||
|
||||
tdSql.query("select tail(c2,2) from sub1_bound")
|
||||
tdSql.query("select tail(c2,2) from sub1_bound order by 1 desc")
|
||||
tdSql.checkRows(2)
|
||||
tdSql.checkData(0,0,9223372036854775803)
|
||||
|
||||
|
|
Loading…
Reference in New Issue