Merge branch '3.0' into fix/TD-17795
This commit is contained in:
commit
7c6fbd1a09
|
@ -152,7 +152,10 @@ void taosCfgDynamicOptions(const char *option, const char *value);
|
|||
void taosAddDataDir(int32_t index, char *v1, int32_t level, int32_t primary);
|
||||
|
||||
struct SConfig *taosGetCfg();
|
||||
int32_t taosSetCfg(SConfig *pCfg, char* name);
|
||||
|
||||
void taosSetAllDebugFlag(int32_t flag);
|
||||
void taosSetDebugFlag(int32_t *pFlagPtr, const char *flagName, int32_t flagVal);
|
||||
int32_t taosSetCfg(SConfig *pCfg, char *name);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
|
|
|
@ -104,6 +104,7 @@ typedef struct SJoinLogicNode {
|
|||
SNode* pMergeCondition;
|
||||
SNode* pOnConditions;
|
||||
bool isSingleTableJoin;
|
||||
EOrder inputTsOrder;
|
||||
} SJoinLogicNode;
|
||||
|
||||
typedef struct SAggLogicNode {
|
||||
|
@ -201,6 +202,7 @@ typedef struct SWindowLogicNode {
|
|||
int64_t watermark;
|
||||
int8_t igExpired;
|
||||
EWindowAlgorithm windowAlgo;
|
||||
EOrder inputTsOrder;
|
||||
} SWindowLogicNode;
|
||||
|
||||
typedef struct SFillLogicNode {
|
||||
|
@ -356,15 +358,14 @@ typedef struct SInterpFuncPhysiNode {
|
|||
SNode* pTimeSeries; // SColumnNode
|
||||
} SInterpFuncPhysiNode;
|
||||
|
||||
typedef struct SJoinPhysiNode {
|
||||
typedef struct SSortMergeJoinPhysiNode {
|
||||
SPhysiNode node;
|
||||
EJoinType joinType;
|
||||
SNode* pMergeCondition;
|
||||
SNode* pOnConditions;
|
||||
SNodeList* pTargets;
|
||||
} SJoinPhysiNode;
|
||||
|
||||
typedef SJoinPhysiNode SSortMergeJoinPhysiNode;
|
||||
EOrder inputTsOrder;
|
||||
} SSortMergeJoinPhysiNode;
|
||||
|
||||
typedef struct SAggPhysiNode {
|
||||
SPhysiNode node;
|
||||
|
|
|
@ -255,6 +255,7 @@ typedef struct SSelectStmt {
|
|||
int32_t selectFuncNum;
|
||||
bool isEmptyResult;
|
||||
bool isTimeLineResult;
|
||||
bool isSubquery;
|
||||
bool hasAggFuncs;
|
||||
bool hasRepeatScanFuncs;
|
||||
bool hasIndefiniteRowsFunc;
|
||||
|
|
|
@ -67,7 +67,6 @@ extern int32_t idxDebugFlag;
|
|||
int32_t taosInitLog(const char *logName, int32_t maxFiles);
|
||||
void taosCloseLog();
|
||||
void taosResetLog();
|
||||
void taosSetAllDebugFlag(int32_t flag);
|
||||
void taosDumpData(uint8_t *msg, int32_t len);
|
||||
|
||||
void taosPrintLog(const char *flags, ELogLevel level, int32_t dflag, const char *format, ...)
|
||||
|
|
|
@ -1878,7 +1878,7 @@ int32_t buildSubmitReqFromDataBlock(SSubmitReq** pReq, const SArray* pDataBlocks
|
|||
msgLen += sizeof(SSubmitBlk);
|
||||
int32_t dataLen = 0;
|
||||
for (int32_t j = 0; j < rows; ++j) { // iterate by row
|
||||
tdSRowResetBuf(&rb, POINTER_SHIFT(pDataBuf, msgLen)); // set row buf
|
||||
tdSRowResetBuf(&rb, POINTER_SHIFT(pDataBuf, msgLen + dataLen)); // set row buf
|
||||
bool isStartKey = false;
|
||||
int32_t offset = 0;
|
||||
for (int32_t k = 0; k < colNum; ++k) { // iterate by column
|
||||
|
|
|
@ -1143,6 +1143,10 @@ void taosCfgDynamicOptions(const char *option, const char *value) {
|
|||
int32_t monitor = atoi(value);
|
||||
uInfo("monitor set from %d to %d", tsEnableMonitor, monitor);
|
||||
tsEnableMonitor = monitor;
|
||||
SConfigItem *pItem = cfgGetItem(tsCfg, "monitor");
|
||||
if (pItem != NULL) {
|
||||
pItem->bval = tsEnableMonitor;
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -1166,8 +1170,39 @@ void taosCfgDynamicOptions(const char *option, const char *value) {
|
|||
int32_t flag = atoi(value);
|
||||
uInfo("%s set from %d to %d", optName, *optionVars[d], flag);
|
||||
*optionVars[d] = flag;
|
||||
taosSetDebugFlag(optionVars[d], optName, flag);
|
||||
return;
|
||||
}
|
||||
|
||||
uError("failed to cfg dynamic option:%s value:%s", option, value);
|
||||
}
|
||||
|
||||
void taosSetDebugFlag(int32_t *pFlagPtr, const char *flagName, int32_t flagVal) {
|
||||
SConfigItem *pItem = cfgGetItem(tsCfg, flagName);
|
||||
if (pItem != NULL) {
|
||||
pItem->i32 = flagVal;
|
||||
}
|
||||
*pFlagPtr = flagVal;
|
||||
}
|
||||
|
||||
void taosSetAllDebugFlag(int32_t flag) {
|
||||
if (flag <= 0) return;
|
||||
|
||||
taosSetDebugFlag(&uDebugFlag, "uDebugFlag", flag);
|
||||
taosSetDebugFlag(&rpcDebugFlag, "rpcDebugFlag", flag);
|
||||
taosSetDebugFlag(&jniDebugFlag, "jniDebugFlag", flag);
|
||||
taosSetDebugFlag(&qDebugFlag, "qDebugFlag", flag);
|
||||
taosSetDebugFlag(&cDebugFlag, "cDebugFlag", flag);
|
||||
taosSetDebugFlag(&dDebugFlag, "dDebugFlag", flag);
|
||||
taosSetDebugFlag(&vDebugFlag, "vDebugFlag", flag);
|
||||
taosSetDebugFlag(&mDebugFlag, "mDebugFlag", flag);
|
||||
taosSetDebugFlag(&wDebugFlag, "wDebugFlag", flag);
|
||||
taosSetDebugFlag(&sDebugFlag, "sDebugFlag", flag);
|
||||
taosSetDebugFlag(&tsdbDebugFlag, "tsdbDebugFlag", flag);
|
||||
taosSetDebugFlag(&tqDebugFlag, "tqDebugFlag", flag);
|
||||
taosSetDebugFlag(&fsDebugFlag, "fsDebugFlag", flag);
|
||||
taosSetDebugFlag(&udfDebugFlag, "udfDebugFlag", flag);
|
||||
taosSetDebugFlag(&smaDebugFlag, "smaDebugFlag", flag);
|
||||
taosSetDebugFlag(&idxDebugFlag, "idxDebugFlag", flag);
|
||||
uInfo("all debug flag are set to %d", flag);
|
||||
}
|
||||
|
|
|
@ -874,7 +874,7 @@ static int32_t mndProcessConfigDnodeReq(SRpcMsg *pReq) {
|
|||
}
|
||||
|
||||
static int32_t mndProcessConfigDnodeRsp(SRpcMsg *pRsp) {
|
||||
mInfo("config rsp from dnode, app:%p", pRsp->info.ahandle);
|
||||
mInfo("config rsp from dnode");
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -268,6 +268,7 @@ struct SVnode {
|
|||
tsem_t canCommit;
|
||||
int64_t sync;
|
||||
int32_t blockCount;
|
||||
bool restored;
|
||||
tsem_t syncSem;
|
||||
SQHandle* pQuery;
|
||||
};
|
||||
|
|
|
@ -1395,10 +1395,26 @@ void tsdbCalcColDataSMA(SColData *pColData, SColumnDataAgg *pColAgg) {
|
|||
break;
|
||||
case TSDB_DATA_TYPE_BOOL:
|
||||
break;
|
||||
case TSDB_DATA_TYPE_TINYINT:
|
||||
case TSDB_DATA_TYPE_TINYINT:{
|
||||
pColAgg->sum += colVal.value.i8;
|
||||
if (pColAgg->min > colVal.value.i8) {
|
||||
pColAgg->min = colVal.value.i8;
|
||||
}
|
||||
if (pColAgg->max < colVal.value.i8) {
|
||||
pColAgg->max = colVal.value.i8;
|
||||
}
|
||||
break;
|
||||
case TSDB_DATA_TYPE_SMALLINT:
|
||||
}
|
||||
case TSDB_DATA_TYPE_SMALLINT:{
|
||||
pColAgg->sum += colVal.value.i16;
|
||||
if (pColAgg->min > colVal.value.i16) {
|
||||
pColAgg->min = colVal.value.i16;
|
||||
}
|
||||
if (pColAgg->max < colVal.value.i16) {
|
||||
pColAgg->max = colVal.value.i16;
|
||||
}
|
||||
break;
|
||||
}
|
||||
case TSDB_DATA_TYPE_INT: {
|
||||
pColAgg->sum += colVal.value.i32;
|
||||
if (pColAgg->min > colVal.value.i32) {
|
||||
|
@ -1419,24 +1435,79 @@ void tsdbCalcColDataSMA(SColData *pColData, SColumnDataAgg *pColAgg) {
|
|||
}
|
||||
break;
|
||||
}
|
||||
case TSDB_DATA_TYPE_FLOAT:
|
||||
case TSDB_DATA_TYPE_FLOAT:{
|
||||
pColAgg->sum += colVal.value.f;
|
||||
if (pColAgg->min > colVal.value.f) {
|
||||
pColAgg->min = colVal.value.f;
|
||||
}
|
||||
if (pColAgg->max < colVal.value.f) {
|
||||
pColAgg->max = colVal.value.f;
|
||||
}
|
||||
break;
|
||||
case TSDB_DATA_TYPE_DOUBLE:
|
||||
}
|
||||
case TSDB_DATA_TYPE_DOUBLE:{
|
||||
pColAgg->sum += colVal.value.d;
|
||||
if (pColAgg->min > colVal.value.d) {
|
||||
pColAgg->min = colVal.value.d;
|
||||
}
|
||||
if (pColAgg->max < colVal.value.d) {
|
||||
pColAgg->max = colVal.value.d;
|
||||
}
|
||||
break;
|
||||
}
|
||||
case TSDB_DATA_TYPE_VARCHAR:
|
||||
break;
|
||||
case TSDB_DATA_TYPE_TIMESTAMP:
|
||||
case TSDB_DATA_TYPE_TIMESTAMP:{
|
||||
if (pColAgg->min > colVal.value.i64) {
|
||||
pColAgg->min = colVal.value.i64;
|
||||
}
|
||||
if (pColAgg->max < colVal.value.i64) {
|
||||
pColAgg->max = colVal.value.i64;
|
||||
}
|
||||
break;
|
||||
}
|
||||
case TSDB_DATA_TYPE_NCHAR:
|
||||
break;
|
||||
case TSDB_DATA_TYPE_UTINYINT:
|
||||
case TSDB_DATA_TYPE_UTINYINT:{
|
||||
pColAgg->sum += colVal.value.u8;
|
||||
if (pColAgg->min > colVal.value.u8) {
|
||||
pColAgg->min = colVal.value.u8;
|
||||
}
|
||||
if (pColAgg->max < colVal.value.u8) {
|
||||
pColAgg->max = colVal.value.u8;
|
||||
}
|
||||
break;
|
||||
case TSDB_DATA_TYPE_USMALLINT:
|
||||
}
|
||||
case TSDB_DATA_TYPE_USMALLINT:{
|
||||
pColAgg->sum += colVal.value.u16;
|
||||
if (pColAgg->min > colVal.value.u16) {
|
||||
pColAgg->min = colVal.value.u16;
|
||||
}
|
||||
if (pColAgg->max < colVal.value.u16) {
|
||||
pColAgg->max = colVal.value.u16;
|
||||
}
|
||||
break;
|
||||
case TSDB_DATA_TYPE_UINT:
|
||||
}
|
||||
case TSDB_DATA_TYPE_UINT:{
|
||||
pColAgg->sum += colVal.value.u32;
|
||||
if (pColAgg->min > colVal.value.u32) {
|
||||
pColAgg->min = colVal.value.u32;
|
||||
}
|
||||
if (pColAgg->max < colVal.value.u32) {
|
||||
pColAgg->max = colVal.value.u32;
|
||||
}
|
||||
break;
|
||||
case TSDB_DATA_TYPE_UBIGINT:
|
||||
}
|
||||
case TSDB_DATA_TYPE_UBIGINT:{
|
||||
pColAgg->sum += colVal.value.u64;
|
||||
if (pColAgg->min > colVal.value.u64) {
|
||||
pColAgg->min = colVal.value.u64;
|
||||
}
|
||||
if (pColAgg->max < colVal.value.u64) {
|
||||
pColAgg->max = colVal.value.u64;
|
||||
}
|
||||
break;
|
||||
}
|
||||
case TSDB_DATA_TYPE_JSON:
|
||||
break;
|
||||
case TSDB_DATA_TYPE_VARBINARY:
|
||||
|
|
|
@ -16,23 +16,28 @@
|
|||
#define _DEFAULT_SOURCE
|
||||
#include "vnd.h"
|
||||
|
||||
#define BATCH_DISABLE 1
|
||||
|
||||
static inline bool vnodeIsMsgBlock(tmsg_t type) {
|
||||
return (type == TDMT_VND_CREATE_TABLE) || (type == TDMT_VND_CREATE_TABLE) || (type == TDMT_VND_CREATE_TABLE) ||
|
||||
(type == TDMT_VND_ALTER_TABLE) || (type == TDMT_VND_DROP_TABLE) || (type == TDMT_VND_UPDATE_TAG_VAL);
|
||||
(type == TDMT_VND_ALTER_TABLE) || (type == TDMT_VND_DROP_TABLE) || (type == TDMT_VND_UPDATE_TAG_VAL) ||
|
||||
(type == TDMT_VND_ALTER_REPLICA);
|
||||
}
|
||||
|
||||
static inline bool vnodeIsMsgWeak(tmsg_t type) { return false; }
|
||||
|
||||
static inline void vnodeWaitBlockMsg(SVnode *pVnode, const SRpcMsg *pMsg) {
|
||||
if (vnodeIsMsgBlock(pMsg->msgType)) {
|
||||
vTrace("vgId:%d, msg:%p wait block, type:%s", pVnode->config.vgId, pMsg, TMSG_INFO(pMsg->msgType));
|
||||
const STraceId *trace = &pMsg->info.traceId;
|
||||
vGTrace("vgId:%d, msg:%p wait block, type:%s", pVnode->config.vgId, pMsg, TMSG_INFO(pMsg->msgType));
|
||||
tsem_wait(&pVnode->syncSem);
|
||||
}
|
||||
}
|
||||
|
||||
static inline void vnodePostBlockMsg(SVnode *pVnode, const SRpcMsg *pMsg) {
|
||||
if (vnodeIsMsgBlock(pMsg->msgType)) {
|
||||
vTrace("vgId:%d, msg:%p post block, type:%s", pVnode->config.vgId, pMsg, TMSG_INFO(pMsg->msgType));
|
||||
const STraceId *trace = &pMsg->info.traceId;
|
||||
vGTrace("vgId:%d, msg:%p post block, type:%s", pVnode->config.vgId, pMsg, TMSG_INFO(pMsg->msgType));
|
||||
tsem_post(&pVnode->syncSem);
|
||||
}
|
||||
}
|
||||
|
@ -124,60 +129,147 @@ void vnodeRedirectRpcMsg(SVnode *pVnode, SRpcMsg *pMsg) {
|
|||
tmsgSendRedirectRsp(&rsp, &newEpSet);
|
||||
}
|
||||
|
||||
static void inline vnodeHandleWriteMsg(SVnode *pVnode, SRpcMsg *pMsg) {
|
||||
SRpcMsg rsp = {.code = pMsg->code, .info = pMsg->info};
|
||||
if (vnodeProcessWriteMsg(pVnode, pMsg, pMsg->info.conn.applyIndex, &rsp) < 0) {
|
||||
rsp.code = terrno;
|
||||
const STraceId *trace = &pMsg->info.traceId;
|
||||
vGError("vgId:%d, msg:%p failed to apply right now since %s", pVnode->config.vgId, pMsg, terrstr());
|
||||
}
|
||||
if (rsp.info.handle != NULL) {
|
||||
tmsgSendRsp(&rsp);
|
||||
}
|
||||
}
|
||||
|
||||
static void vnodeHandleProposeError(SVnode *pVnode, SRpcMsg *pMsg, int32_t code) {
|
||||
if (code == TSDB_CODE_SYN_NOT_LEADER) {
|
||||
vnodeRedirectRpcMsg(pVnode, pMsg);
|
||||
} else {
|
||||
const STraceId *trace = &pMsg->info.traceId;
|
||||
vGError("vgId:%d, msg:%p failed to propose since %s, code:0x%x", pVnode->config.vgId, pMsg, tstrerror(code), code);
|
||||
SRpcMsg rsp = {.code = code, .info = pMsg->info};
|
||||
if (rsp.info.handle != NULL) {
|
||||
tmsgSendRsp(&rsp);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void vnodeHandleAlterReplicaReq(SVnode *pVnode, SRpcMsg *pMsg) {
|
||||
int32_t code = vnodeProcessAlterReplicaReq(pVnode, pMsg);
|
||||
|
||||
if (code > 0) {
|
||||
ASSERT(0);
|
||||
} else if (code == 0) {
|
||||
vnodeWaitBlockMsg(pVnode, pMsg);
|
||||
} else {
|
||||
if (terrno != 0) code = terrno;
|
||||
vnodeHandleProposeError(pVnode, pMsg, code);
|
||||
}
|
||||
|
||||
const STraceId *trace = &pMsg->info.traceId;
|
||||
vGTrace("vgId:%d, msg:%p is freed, code:0x%x", pVnode->config.vgId, pMsg, code);
|
||||
rpcFreeCont(pMsg->pCont);
|
||||
taosFreeQitem(pMsg);
|
||||
}
|
||||
|
||||
static void inline vnodeProposeBatchMsg(SVnode *pVnode, SRpcMsg **pMsgArr, bool *pIsWeakArr, int32_t *arrSize) {
|
||||
if (*arrSize <= 0) return;
|
||||
|
||||
#if BATCH_DISABLE
|
||||
int32_t code = syncPropose(pVnode->sync, pMsgArr[0], pIsWeakArr[0]);
|
||||
#else
|
||||
int32_t code = syncProposeBatch(pVnode->sync, pMsgArr, pIsWeakArr, *arrSize);
|
||||
#endif
|
||||
|
||||
if (code > 0) {
|
||||
for (int32_t i = 0; i < *arrSize; ++i) {
|
||||
vnodeHandleWriteMsg(pVnode, pMsgArr[i]);
|
||||
}
|
||||
} else if (code == 0) {
|
||||
vnodeWaitBlockMsg(pVnode, pMsgArr[*arrSize - 1]);
|
||||
} else {
|
||||
if (terrno != 0) code = terrno;
|
||||
for (int32_t i = 0; i < *arrSize; ++i) {
|
||||
vnodeHandleProposeError(pVnode, pMsgArr[i], code);
|
||||
}
|
||||
}
|
||||
|
||||
for (int32_t i = 0; i < *arrSize; ++i) {
|
||||
SRpcMsg *pMsg = pMsgArr[i];
|
||||
const STraceId *trace = &pMsg->info.traceId;
|
||||
vGTrace("vgId:%d, msg:%p is freed, code:0x%x", pVnode->config.vgId, pMsg, code);
|
||||
rpcFreeCont(pMsg->pCont);
|
||||
taosFreeQitem(pMsg);
|
||||
}
|
||||
|
||||
*arrSize = 0;
|
||||
}
|
||||
|
||||
void vnodeProposeWriteMsg(SQueueInfo *pInfo, STaosQall *qall, int32_t numOfMsgs) {
|
||||
SVnode *pVnode = pInfo->ahandle;
|
||||
int32_t vgId = pVnode->config.vgId;
|
||||
int32_t code = 0;
|
||||
SRpcMsg *pMsg = NULL;
|
||||
|
||||
int32_t arrayPos = 0;
|
||||
SRpcMsg **pMsgArr = taosMemoryCalloc(numOfMsgs, sizeof(SRpcMsg*));
|
||||
bool *pIsWeakArr = taosMemoryCalloc(numOfMsgs, sizeof(bool));
|
||||
vTrace("vgId:%d, get %d msgs from vnode-write queue", vgId, numOfMsgs);
|
||||
|
||||
for (int32_t m = 0; m < numOfMsgs; m++) {
|
||||
for (int32_t msg = 0; msg < numOfMsgs; msg++) {
|
||||
if (taosGetQitem(qall, (void **)&pMsg) == 0) continue;
|
||||
bool isWeak = vnodeIsMsgWeak(pMsg->msgType);
|
||||
bool isBlock = vnodeIsMsgBlock(pMsg->msgType);
|
||||
|
||||
const STraceId *trace = &pMsg->info.traceId;
|
||||
vGTrace("vgId:%d, msg:%p get from vnode-write queue handle:%p", vgId, pMsg, pMsg->info.handle);
|
||||
vGTrace("vgId:%d, msg:%p get from vnode-write queue, weak:%d block:%d msg:%d:%d pos:%d, handle:%p", vgId, pMsg,
|
||||
isWeak, isBlock, msg, numOfMsgs, arrayPos, pMsg->info.handle);
|
||||
|
||||
if (!pVnode->restored) {
|
||||
vGError("vgId:%d, msg:%p failed to process since not leader", vgId, pMsg);
|
||||
terrno = TSDB_CODE_APP_NOT_READY;
|
||||
vnodeHandleProposeError(pVnode, pMsg, TSDB_CODE_APP_NOT_READY);
|
||||
rpcFreeCont(pMsg->pCont);
|
||||
taosFreeQitem(pMsg);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (pMsgArr == NULL || pIsWeakArr == NULL) {
|
||||
vGError("vgId:%d, msg:%p failed to process since out of memory", vgId, pMsg);
|
||||
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
||||
vnodeHandleProposeError(pVnode, pMsg, terrno);
|
||||
rpcFreeCont(pMsg->pCont);
|
||||
taosFreeQitem(pMsg);
|
||||
continue;
|
||||
}
|
||||
|
||||
code = vnodePreProcessWriteMsg(pVnode, pMsg);
|
||||
if (code != 0) {
|
||||
vError("vgId:%d, msg:%p failed to pre-process since %s", vgId, pMsg, terrstr());
|
||||
} else {
|
||||
if (pMsg->msgType == TDMT_VND_ALTER_REPLICA) {
|
||||
code = vnodeProcessAlterReplicaReq(pVnode, pMsg);
|
||||
} else {
|
||||
code = syncPropose(pVnode->sync, pMsg, vnodeIsMsgWeak(pMsg->msgType));
|
||||
if (code > 0) {
|
||||
SRpcMsg rsp = {.code = pMsg->code, .info = pMsg->info};
|
||||
if (vnodeProcessWriteMsg(pVnode, pMsg, pMsg->info.conn.applyIndex, &rsp) < 0) {
|
||||
rsp.code = terrno;
|
||||
vError("vgId:%d, msg:%p failed to apply right now since %s", vgId, pMsg, terrstr());
|
||||
}
|
||||
if (rsp.info.handle != NULL) {
|
||||
tmsgSendRsp(&rsp);
|
||||
}
|
||||
} else if (code == 0) {
|
||||
vnodeWaitBlockMsg(pVnode, pMsg);
|
||||
} else {
|
||||
}
|
||||
}
|
||||
vGError("vgId:%d, msg:%p failed to pre-process since %s", vgId, pMsg, terrstr());
|
||||
rpcFreeCont(pMsg->pCont);
|
||||
taosFreeQitem(pMsg);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (code < 0) {
|
||||
if (terrno == TSDB_CODE_SYN_NOT_LEADER) {
|
||||
vnodeRedirectRpcMsg(pVnode, pMsg);
|
||||
} else {
|
||||
if (terrno != 0) code = terrno;
|
||||
vError("vgId:%d, msg:%p failed to propose since %s, code:0x%x", vgId, pMsg, tstrerror(code), code);
|
||||
SRpcMsg rsp = {.code = code, .info = pMsg->info};
|
||||
if (rsp.info.handle != NULL) {
|
||||
tmsgSendRsp(&rsp);
|
||||
}
|
||||
}
|
||||
if (pMsg->msgType == TDMT_VND_ALTER_REPLICA) {
|
||||
vnodeHandleAlterReplicaReq(pVnode, pMsg);
|
||||
continue;
|
||||
}
|
||||
|
||||
vGTrace("vgId:%d, msg:%p is freed, code:0x%x", vgId, pMsg, code);
|
||||
rpcFreeCont(pMsg->pCont);
|
||||
taosFreeQitem(pMsg);
|
||||
if (isBlock || BATCH_DISABLE) {
|
||||
vnodeProposeBatchMsg(pVnode, pMsgArr, pIsWeakArr, &arrayPos);
|
||||
}
|
||||
|
||||
pMsgArr[arrayPos] = pMsg;
|
||||
pIsWeakArr[arrayPos] = isWeak;
|
||||
arrayPos++;
|
||||
|
||||
if (isBlock || msg == numOfMsgs - 1 || BATCH_DISABLE) {
|
||||
vnodeProposeBatchMsg(pVnode, pMsgArr, pIsWeakArr, &arrayPos);
|
||||
}
|
||||
}
|
||||
|
||||
taosMemoryFree(pMsgArr);
|
||||
taosMemoryFree(pIsWeakArr);
|
||||
}
|
||||
|
||||
void vnodeApplyWriteMsg(SQueueInfo *pInfo, STaosQall *qall, int32_t numOfMsgs) {
|
||||
|
@ -527,6 +619,12 @@ static void vnodeLeaderTransfer(struct SSyncFSM *pFsm, const SRpcMsg *pMsg, SFsm
|
|||
SVnode *pVnode = pFsm->data;
|
||||
}
|
||||
|
||||
static void vnodeRestoreFinish(struct SSyncFSM *pFsm) {
|
||||
SVnode *pVnode = pFsm->data;
|
||||
pVnode->restored = true;
|
||||
vDebug("vgId:%d, sync restore finished", pVnode->config.vgId);
|
||||
}
|
||||
|
||||
static SSyncFSM *vnodeSyncMakeFsm(SVnode *pVnode) {
|
||||
SSyncFSM *pFsm = taosMemoryCalloc(1, sizeof(SSyncFSM));
|
||||
pFsm->data = pVnode;
|
||||
|
@ -534,7 +632,7 @@ static SSyncFSM *vnodeSyncMakeFsm(SVnode *pVnode) {
|
|||
pFsm->FpPreCommitCb = vnodeSyncPreCommitMsg;
|
||||
pFsm->FpRollBackCb = vnodeSyncRollBackMsg;
|
||||
pFsm->FpGetSnapshotInfo = vnodeSyncGetSnapshot;
|
||||
pFsm->FpRestoreFinishCb = NULL;
|
||||
pFsm->FpRestoreFinishCb = vnodeRestoreFinish;
|
||||
pFsm->FpLeaderTransferCb = vnodeLeaderTransfer;
|
||||
pFsm->FpReConfigCb = vnodeSyncReconfig;
|
||||
pFsm->FpSnapshotStartRead = vnodeSnapshotStartRead;
|
||||
|
@ -588,11 +686,10 @@ bool vnodeIsLeader(SVnode *pVnode) {
|
|||
return false;
|
||||
}
|
||||
|
||||
// todo
|
||||
// if (!pVnode->restored) {
|
||||
// terrno = TSDB_CODE_APP_NOT_READY;
|
||||
// return false;
|
||||
// }
|
||||
if (!pVnode->restored) {
|
||||
terrno = TSDB_CODE_APP_NOT_READY;
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
|
@ -135,7 +135,7 @@ int32_t qExplainGenerateResChildren(SPhysiNode *pNode, SExplainGroup *group, SNo
|
|||
break;
|
||||
}
|
||||
case QUERY_NODE_PHYSICAL_PLAN_MERGE_JOIN: {
|
||||
SJoinPhysiNode *pJoinNode = (SJoinPhysiNode *)pNode;
|
||||
SSortMergeJoinPhysiNode *pJoinNode = (SSortMergeJoinPhysiNode *)pNode;
|
||||
pPhysiChildren = pJoinNode->node.pChildren;
|
||||
break;
|
||||
}
|
||||
|
@ -434,7 +434,8 @@ int32_t qExplainResNodeToRowsImpl(SExplainResNode *pResNode, SExplainCtx *ctx, i
|
|||
case QUERY_NODE_PHYSICAL_PLAN_TABLE_SCAN: {
|
||||
STableScanPhysiNode *pTblScanNode = (STableScanPhysiNode *)pNode;
|
||||
EXPLAIN_ROW_NEW(level,
|
||||
QUERY_NODE_PHYSICAL_PLAN_TABLE_MERGE_SCAN == pNode->type ? EXPLAIN_TBL_MERGE_SCAN_FORMAT : EXPLAIN_TBL_SCAN_FORMAT,
|
||||
QUERY_NODE_PHYSICAL_PLAN_TABLE_MERGE_SCAN == pNode->type ? EXPLAIN_TBL_MERGE_SCAN_FORMAT
|
||||
: EXPLAIN_TBL_SCAN_FORMAT,
|
||||
pTblScanNode->scan.tableName.tname);
|
||||
EXPLAIN_ROW_APPEND(EXPLAIN_LEFT_PARENTHESIS_FORMAT);
|
||||
if (pResNode->pExecInfo) {
|
||||
|
@ -551,7 +552,7 @@ int32_t qExplainResNodeToRowsImpl(SExplainResNode *pResNode, SExplainCtx *ctx, i
|
|||
EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT);
|
||||
if (pSTblScanNode->scan.pScanPseudoCols) {
|
||||
EXPLAIN_ROW_APPEND(EXPLAIN_PSEUDO_COLUMNS_FORMAT, pSTblScanNode->scan.pScanPseudoCols->length);
|
||||
EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT);
|
||||
EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT);
|
||||
}
|
||||
EXPLAIN_ROW_APPEND(EXPLAIN_WIDTH_FORMAT, pSTblScanNode->scan.node.pOutputDataBlockDesc->totalRowSize);
|
||||
EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT);
|
||||
|
@ -613,7 +614,7 @@ int32_t qExplainResNodeToRowsImpl(SExplainResNode *pResNode, SExplainCtx *ctx, i
|
|||
break;
|
||||
}
|
||||
case QUERY_NODE_PHYSICAL_PLAN_MERGE_JOIN: {
|
||||
SJoinPhysiNode *pJoinNode = (SJoinPhysiNode *)pNode;
|
||||
SSortMergeJoinPhysiNode *pJoinNode = (SSortMergeJoinPhysiNode *)pNode;
|
||||
EXPLAIN_ROW_NEW(level, EXPLAIN_JOIN_FORMAT, EXPLAIN_JOIN_STRING(pJoinNode->joinType));
|
||||
EXPLAIN_ROW_APPEND(EXPLAIN_LEFT_PARENTHESIS_FORMAT);
|
||||
if (pResNode->pExecInfo) {
|
||||
|
@ -1180,7 +1181,7 @@ int32_t qExplainResNodeToRowsImpl(SExplainResNode *pResNode, SExplainCtx *ctx, i
|
|||
EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT);
|
||||
if (pDistScanNode->pScanPseudoCols) {
|
||||
EXPLAIN_ROW_APPEND(EXPLAIN_PSEUDO_COLUMNS_FORMAT, pDistScanNode->pScanPseudoCols->length);
|
||||
EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT);
|
||||
EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT);
|
||||
}
|
||||
EXPLAIN_ROW_APPEND(EXPLAIN_WIDTH_FORMAT, pDistScanNode->node.pOutputDataBlockDesc->totalRowSize);
|
||||
EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT);
|
||||
|
@ -1367,7 +1368,7 @@ int32_t qExplainResNodeToRowsImpl(SExplainResNode *pResNode, SExplainCtx *ctx, i
|
|||
EXPLAIN_ROW_APPEND(EXPLAIN_FUNCTIONS_FORMAT, pInterpNode->pFuncs->length);
|
||||
EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT);
|
||||
}
|
||||
|
||||
|
||||
EXPLAIN_ROW_APPEND(EXPLAIN_MODE_FORMAT, nodesGetFillModeString(pInterpNode->fillMode));
|
||||
EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT);
|
||||
|
||||
|
@ -1419,7 +1420,7 @@ int32_t qExplainResNodeToRowsImpl(SExplainResNode *pResNode, SExplainCtx *ctx, i
|
|||
}
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
default:
|
||||
qError("not supported physical node type %d", pNode->type);
|
||||
return TSDB_CODE_QRY_APP_ERROR;
|
||||
|
|
|
@ -320,6 +320,47 @@ typedef struct STableScanInfo {
|
|||
int8_t noTable;
|
||||
} STableScanInfo;
|
||||
|
||||
typedef struct STableMergeScanInfo {
|
||||
STableListInfo* tableListInfo;
|
||||
int32_t tableStartIndex;
|
||||
int32_t tableEndIndex;
|
||||
bool hasGroupId;
|
||||
uint64_t groupId;
|
||||
SArray* dataReaders; // array of tsdbReaderT*
|
||||
SReadHandle readHandle;
|
||||
int32_t bufPageSize;
|
||||
uint32_t sortBufSize; // max buffer size for in-memory sort
|
||||
SArray* pSortInfo;
|
||||
SSortHandle* pSortHandle;
|
||||
|
||||
SSDataBlock* pSortInputBlock;
|
||||
int64_t startTs; // sort start time
|
||||
SArray* sortSourceParams;
|
||||
|
||||
SFileBlockLoadRecorder readRecorder;
|
||||
int64_t numOfRows;
|
||||
SScanInfo scanInfo;
|
||||
int32_t scanTimes;
|
||||
SNode* pFilterNode; // filter info, which is push down by optimizer
|
||||
SqlFunctionCtx* pCtx; // which belongs to the direct upstream operator operator query context
|
||||
SResultRowInfo* pResultRowInfo;
|
||||
int32_t* rowEntryInfoOffset;
|
||||
SExprInfo* pExpr;
|
||||
SSDataBlock* pResBlock;
|
||||
SArray* pColMatchInfo;
|
||||
int32_t numOfOutput;
|
||||
|
||||
SExprSupp pseudoSup;
|
||||
|
||||
SQueryTableDataCond cond;
|
||||
int32_t scanFlag; // table scan flag to denote if it is a repeat/reverse/main scan
|
||||
int32_t dataBlockLoadFlag;
|
||||
// if the upstream is an interval operator, the interval info is also kept here to get the time
|
||||
// window to check if current data block needs to be loaded.
|
||||
SInterval interval;
|
||||
SSampleExecInfo sample; // sample execution info
|
||||
} STableMergeScanInfo;
|
||||
|
||||
typedef struct STagScanInfo {
|
||||
SColumnInfo *pCols;
|
||||
SSDataBlock *pRes;
|
||||
|
@ -886,7 +927,7 @@ SOperatorInfo* createPartitionOperatorInfo(SOperatorInfo* downstream, SPartition
|
|||
|
||||
SOperatorInfo* createTimeSliceOperatorInfo(SOperatorInfo* downstream, SPhysiNode* pNode, SExecTaskInfo* pTaskInfo);
|
||||
|
||||
SOperatorInfo* createMergeJoinOperatorInfo(SOperatorInfo** pDownstream, int32_t numOfDownstream, SJoinPhysiNode* pJoinNode,
|
||||
SOperatorInfo* createMergeJoinOperatorInfo(SOperatorInfo** pDownstream, int32_t numOfDownstream, SSortMergeJoinPhysiNode* pJoinNode,
|
||||
SExecTaskInfo* pTaskInfo);
|
||||
|
||||
SOperatorInfo* createStreamSessionAggOperatorInfo(SOperatorInfo* downstream,
|
||||
|
|
|
@ -416,6 +416,7 @@ int32_t qExecTask(qTaskInfo_t tinfo, SSDataBlock** pRes, uint64_t* useconds) {
|
|||
}
|
||||
|
||||
if (isTaskKilled(pTaskInfo)) {
|
||||
atomic_store_64(&pTaskInfo->owner, 0);
|
||||
qDebug("%s already killed, abort", GET_TASKID(pTaskInfo));
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
|
|
@ -1325,7 +1325,7 @@ void doFilter(const SNode* pFilterNode, SSDataBlock* pBlock, const SArray* pColM
|
|||
extractQualifiedTupleByFilterResult(pBlock, rowRes, keep);
|
||||
|
||||
if (pColMatchInfo != NULL) {
|
||||
for(int32_t i = 0; i < taosArrayGetSize(pColMatchInfo); ++i) {
|
||||
for (int32_t i = 0; i < taosArrayGetSize(pColMatchInfo); ++i) {
|
||||
SColMatchInfo* pInfo = taosArrayGet(pColMatchInfo, i);
|
||||
if (pInfo->colId == PRIMARYKEY_TIMESTAMP_COL_ID) {
|
||||
SColumnInfoData* pColData = taosArrayGet(pBlock->pDataBlock, pInfo->targetSlotId);
|
||||
|
@ -1646,10 +1646,10 @@ void queryCostStatis(SExecTaskInfo* pTaskInfo) {
|
|||
SFileBlockLoadRecorder* pRecorder = pSummary->pRecoder;
|
||||
if (pSummary->pRecoder != NULL) {
|
||||
qDebug(
|
||||
"%s :cost summary: elapsed time:%.2f ms, total blocks:%d, load block SMA:%d, load data block:%d, total rows:%"
|
||||
PRId64 ", check rows:%" PRId64, GET_TASKID(pTaskInfo), pSummary->elapsedTime / 1000.0,
|
||||
pRecorder->totalBlocks, pRecorder->loadBlockStatis, pRecorder->loadBlocks, pRecorder->totalRows,
|
||||
pRecorder->totalCheckedRows);
|
||||
"%s :cost summary: elapsed time:%.2f ms, total blocks:%d, load block SMA:%d, load data block:%d, total "
|
||||
"rows:%" PRId64 ", check rows:%" PRId64,
|
||||
GET_TASKID(pTaskInfo), pSummary->elapsedTime / 1000.0, pRecorder->totalBlocks, pRecorder->loadBlockStatis,
|
||||
pRecorder->loadBlocks, pRecorder->totalRows, pRecorder->totalCheckedRows);
|
||||
}
|
||||
|
||||
// qDebug("QInfo:0x%"PRIx64" :cost summary: winResPool size:%.2f Kb, numOfWin:%"PRId64", tableInfoSize:%.2f Kb,
|
||||
|
@ -2783,11 +2783,16 @@ int32_t getTableScanInfo(SOperatorInfo* pOperator, int32_t* order, int32_t* scan
|
|||
*order = TSDB_ORDER_ASC;
|
||||
*scanFlag = MAIN_SCAN;
|
||||
return TSDB_CODE_SUCCESS;
|
||||
} else if (type == QUERY_NODE_PHYSICAL_PLAN_TABLE_SCAN || type == QUERY_NODE_PHYSICAL_PLAN_TABLE_MERGE_SCAN) {
|
||||
} else if (type == QUERY_NODE_PHYSICAL_PLAN_TABLE_SCAN) {
|
||||
STableScanInfo* pTableScanInfo = pOperator->info;
|
||||
*order = pTableScanInfo->cond.order;
|
||||
*scanFlag = pTableScanInfo->scanFlag;
|
||||
return TSDB_CODE_SUCCESS;
|
||||
} else if (type == QUERY_NODE_PHYSICAL_PLAN_TABLE_MERGE_SCAN) {
|
||||
STableMergeScanInfo* pTableScanInfo = pOperator->info;
|
||||
*order = pTableScanInfo->cond.order;
|
||||
*scanFlag = pTableScanInfo->scanFlag;
|
||||
return TSDB_CODE_SUCCESS;
|
||||
} else {
|
||||
if (pOperator->pDownstream == NULL || pOperator->pDownstream[0] == NULL) {
|
||||
return TSDB_CODE_INVALID_PARA;
|
||||
|
@ -3728,7 +3733,7 @@ SSchemaWrapper* extractQueriedColumnSchema(SScanPhysiNode* pScanNode) {
|
|||
}
|
||||
|
||||
// this the tags and pseudo function columns, we only keep the tag columns
|
||||
for(int32_t i = 0; i < numOfTags; ++i) {
|
||||
for (int32_t i = 0; i < numOfTags; ++i) {
|
||||
STargetNode* pNode = (STargetNode*)nodesListGetNode(pScanNode->pScanPseudoCols, i);
|
||||
|
||||
int32_t type = nodeType(pNode->pExpr);
|
||||
|
@ -3844,7 +3849,7 @@ int32_t generateGroupIdMap(STableListInfo* pTableListInfo, SReadHandle* pHandle,
|
|||
int32_t groupNum = 0;
|
||||
for (int32_t i = 0; i < taosArrayGetSize(pTableListInfo->pTableList); i++) {
|
||||
STableKeyInfo* info = taosArrayGet(pTableListInfo->pTableList, i);
|
||||
int32_t code = getGroupIdFromTagsVal(pHandle->meta, info->uid, group, keyBuf, &info->groupId);
|
||||
int32_t code = getGroupIdFromTagsVal(pHandle->meta, info->uid, group, keyBuf, &info->groupId);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
return code;
|
||||
}
|
||||
|
@ -4165,7 +4170,7 @@ SOperatorInfo* createOperatorTree(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo
|
|||
} else if (QUERY_NODE_PHYSICAL_PLAN_STREAM_STATE == type) {
|
||||
pOptr = createStreamStateAggOperatorInfo(ops[0], pPhyNode, pTaskInfo);
|
||||
} else if (QUERY_NODE_PHYSICAL_PLAN_MERGE_JOIN == type) {
|
||||
pOptr = createMergeJoinOperatorInfo(ops, size, (SJoinPhysiNode*)pPhyNode, pTaskInfo);
|
||||
pOptr = createMergeJoinOperatorInfo(ops, size, (SSortMergeJoinPhysiNode*)pPhyNode, pTaskInfo);
|
||||
} else if (QUERY_NODE_PHYSICAL_PLAN_FILL == type) {
|
||||
pOptr = createFillOperatorInfo(ops[0], (SFillPhysiNode*)pPhyNode, pTaskInfo);
|
||||
} else if (QUERY_NODE_PHYSICAL_PLAN_INDEF_ROWS_FUNC == type) {
|
||||
|
|
|
@ -28,30 +28,30 @@ static SSDataBlock* doMergeJoin(struct SOperatorInfo* pOperator);
|
|||
static void destroyMergeJoinOperator(void* param, int32_t numOfOutput);
|
||||
static void extractTimeCondition(SJoinOperatorInfo* Info, SLogicConditionNode* pLogicConditionNode);
|
||||
|
||||
SOperatorInfo* createMergeJoinOperatorInfo(SOperatorInfo** pDownstream, int32_t numOfDownstream, SJoinPhysiNode* pJoinNode,
|
||||
SExecTaskInfo* pTaskInfo) {
|
||||
SOperatorInfo* createMergeJoinOperatorInfo(SOperatorInfo** pDownstream, int32_t numOfDownstream,
|
||||
SSortMergeJoinPhysiNode* pJoinNode, SExecTaskInfo* pTaskInfo) {
|
||||
SJoinOperatorInfo* pInfo = taosMemoryCalloc(1, sizeof(SJoinOperatorInfo));
|
||||
SOperatorInfo* pOperator = taosMemoryCalloc(1, sizeof(SOperatorInfo));
|
||||
if (pOperator == NULL || pInfo == NULL) {
|
||||
goto _error;
|
||||
}
|
||||
|
||||
SSDataBlock* pResBlock = createResDataBlock(pJoinNode->node.pOutputDataBlockDesc);
|
||||
SSDataBlock* pResBlock = createResDataBlock(pJoinNode->node.pOutputDataBlockDesc);
|
||||
|
||||
int32_t numOfCols = 0;
|
||||
int32_t numOfCols = 0;
|
||||
SExprInfo* pExprInfo = createExprInfo(pJoinNode->pTargets, NULL, &numOfCols);
|
||||
|
||||
initResultSizeInfo(&pOperator->resultInfo, 4096);
|
||||
|
||||
pInfo->pRes = pResBlock;
|
||||
pOperator->name = "MergeJoinOperator";
|
||||
pInfo->pRes = pResBlock;
|
||||
pOperator->name = "MergeJoinOperator";
|
||||
pOperator->operatorType = QUERY_NODE_PHYSICAL_PLAN_MERGE_JOIN;
|
||||
pOperator->blocking = false;
|
||||
pOperator->status = OP_NOT_OPENED;
|
||||
pOperator->exprSupp.pExprInfo = pExprInfo;
|
||||
pOperator->exprSupp.numOfExprs = numOfCols;
|
||||
pOperator->info = pInfo;
|
||||
pOperator->pTaskInfo = pTaskInfo;
|
||||
pOperator->blocking = false;
|
||||
pOperator->status = OP_NOT_OPENED;
|
||||
pOperator->exprSupp.pExprInfo = pExprInfo;
|
||||
pOperator->exprSupp.numOfExprs = numOfCols;
|
||||
pOperator->info = pInfo;
|
||||
pOperator->pTaskInfo = pTaskInfo;
|
||||
|
||||
SNode* pMergeCondition = pJoinNode->pMergeCondition;
|
||||
if (nodeType(pMergeCondition) == QUERY_NODE_OPERATOR) {
|
||||
|
@ -104,7 +104,7 @@ void setJoinColumnInfo(SColumnInfo* pColumn, const SColumnNode* pColumnNode) {
|
|||
void destroyMergeJoinOperator(void* param, int32_t numOfOutput) {
|
||||
SJoinOperatorInfo* pJoinOperator = (SJoinOperatorInfo*)param;
|
||||
nodesDestroyNode(pJoinOperator->pCondAfterMerge);
|
||||
|
||||
|
||||
taosMemoryFreeClear(param);
|
||||
}
|
||||
|
||||
|
|
|
@ -69,7 +69,7 @@ SOperatorInfo* createProjectOperatorInfo(SOperatorInfo* downstream, SProjectPhys
|
|||
|
||||
// todo remove it soon
|
||||
if (pTaskInfo->execModel == OPTR_EXEC_MODEL_STREAM) {
|
||||
pInfo->mergeDataBlocks = true;
|
||||
pInfo->mergeDataBlocks = false;
|
||||
}
|
||||
|
||||
int32_t numOfRows = 4096;
|
||||
|
|
|
@ -274,7 +274,7 @@ static int32_t loadDataBlock(SOperatorInfo* pOperator, STableScanInfo* pTableSca
|
|||
qDebug("%s data block filter out, brange:%" PRId64 "-%" PRId64 ", rows:%d", GET_TASKID(pTaskInfo),
|
||||
pBlockInfo->window.skey, pBlockInfo->window.ekey, pBlockInfo->rows);
|
||||
} else {
|
||||
qDebug("%s data block filter out, elapsed time:%"PRId64, GET_TASKID(pTaskInfo), (et - st));
|
||||
qDebug("%s data block filter out, elapsed time:%" PRId64, GET_TASKID(pTaskInfo), (et - st));
|
||||
}
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
|
@ -1838,11 +1838,14 @@ static SSDataBlock* sysTableScanUserTags(SOperatorInfo* pOperator) {
|
|||
int8_t tagType = smr.me.stbEntry.schemaTag.pSchema[i].type;
|
||||
pColInfoData = taosArrayGet(p->pDataBlock, 4);
|
||||
char tagTypeStr[VARSTR_HEADER_SIZE + 32];
|
||||
int tagTypeLen = sprintf(varDataVal(tagTypeStr), "%s", tDataTypes[tagType].name);
|
||||
int tagTypeLen = sprintf(varDataVal(tagTypeStr), "%s", tDataTypes[tagType].name);
|
||||
if (tagType == TSDB_DATA_TYPE_VARCHAR) {
|
||||
tagTypeLen += sprintf(varDataVal(tagTypeStr) + tagTypeLen, "(%d)", (int32_t)(smr.me.stbEntry.schemaTag.pSchema[i].bytes - VARSTR_HEADER_SIZE));
|
||||
tagTypeLen += sprintf(varDataVal(tagTypeStr) + tagTypeLen, "(%d)",
|
||||
(int32_t)(smr.me.stbEntry.schemaTag.pSchema[i].bytes - VARSTR_HEADER_SIZE));
|
||||
} else if (tagType == TSDB_DATA_TYPE_NCHAR) {
|
||||
tagTypeLen += sprintf(varDataVal(tagTypeStr) + tagTypeLen, "(%d)", (int32_t)((smr.me.stbEntry.schemaTag.pSchema[i].bytes - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE));
|
||||
tagTypeLen +=
|
||||
sprintf(varDataVal(tagTypeStr) + tagTypeLen, "(%d)",
|
||||
(int32_t)((smr.me.stbEntry.schemaTag.pSchema[i].bytes - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE));
|
||||
}
|
||||
varDataSetLen(tagTypeStr, tagTypeLen);
|
||||
colDataAppend(pColInfoData, numOfRows, (char*)tagTypeStr, false);
|
||||
|
@ -2527,49 +2530,6 @@ _error:
|
|||
return NULL;
|
||||
}
|
||||
|
||||
typedef struct STableMergeScanInfo {
|
||||
STableListInfo* tableListInfo;
|
||||
int32_t tableStartIndex;
|
||||
int32_t tableEndIndex;
|
||||
bool hasGroupId;
|
||||
uint64_t groupId;
|
||||
SArray* dataReaders; // array of tsdbReaderT*
|
||||
SReadHandle readHandle;
|
||||
int32_t bufPageSize;
|
||||
uint32_t sortBufSize; // max buffer size for in-memory sort
|
||||
SArray* pSortInfo;
|
||||
SSortHandle* pSortHandle;
|
||||
|
||||
SSDataBlock* pSortInputBlock;
|
||||
int64_t startTs; // sort start time
|
||||
SArray* sortSourceParams;
|
||||
|
||||
SFileBlockLoadRecorder readRecorder;
|
||||
int64_t numOfRows;
|
||||
SScanInfo scanInfo;
|
||||
int32_t scanTimes;
|
||||
SNode* pFilterNode; // filter info, which is push down by optimizer
|
||||
SqlFunctionCtx* pCtx; // which belongs to the direct upstream operator operator query context
|
||||
SResultRowInfo* pResultRowInfo;
|
||||
int32_t* rowEntryInfoOffset;
|
||||
SExprInfo* pExpr;
|
||||
SSDataBlock* pResBlock;
|
||||
SArray* pColMatchInfo;
|
||||
int32_t numOfOutput;
|
||||
|
||||
SExprInfo* pPseudoExpr;
|
||||
int32_t numOfPseudoExpr;
|
||||
SqlFunctionCtx* pPseudoCtx;
|
||||
|
||||
SQueryTableDataCond cond;
|
||||
int32_t scanFlag; // table scan flag to denote if it is a repeat/reverse/main scan
|
||||
int32_t dataBlockLoadFlag;
|
||||
// if the upstream is an interval operator, the interval info is also kept here to get the time
|
||||
// window to check if current data block needs to be loaded.
|
||||
SInterval interval;
|
||||
SSampleExecInfo sample; // sample execution info
|
||||
} STableMergeScanInfo;
|
||||
|
||||
int32_t createScanTableListInfo(SScanPhysiNode* pScanNode, SNodeList* pGroupTags, bool groupSort, SReadHandle* pHandle,
|
||||
STableListInfo* pTableListInfo, SNode* pTagCond, SNode* pTagIndexCond,
|
||||
const char* idStr) {
|
||||
|
@ -2700,9 +2660,9 @@ static int32_t loadDataBlockFromOneTable(SOperatorInfo* pOperator, STableMergeSc
|
|||
relocateColumnData(pBlock, pTableScanInfo->pColMatchInfo, pCols, true);
|
||||
|
||||
// currently only the tbname pseudo column
|
||||
if (pTableScanInfo->numOfPseudoExpr > 0) {
|
||||
int32_t code = addTagPseudoColumnData(&pTableScanInfo->readHandle, pTableScanInfo->pPseudoExpr,
|
||||
pTableScanInfo->numOfPseudoExpr, pBlock, GET_TASKID(pTaskInfo));
|
||||
if (pTableScanInfo->pseudoSup.numOfExprs > 0) {
|
||||
int32_t code = addTagPseudoColumnData(&pTableScanInfo->readHandle, pTableScanInfo->pseudoSup.pExprInfo,
|
||||
pTableScanInfo->pseudoSup.numOfExprs, pBlock, GET_TASKID(pTaskInfo));
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
longjmp(pTaskInfo->env, code);
|
||||
}
|
||||
|
@ -2869,29 +2829,31 @@ int32_t stopGroupTableMergeScan(SOperatorInfo* pOperator) {
|
|||
STableMergeScanInfo* pInfo = pOperator->info;
|
||||
SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
|
||||
|
||||
tsortDestroySortHandle(pInfo->pSortHandle);
|
||||
size_t numReaders = taosArrayGetSize(pInfo->dataReaders);
|
||||
|
||||
for (int32_t i = 0; i < numReaders; ++i) {
|
||||
STableMergeScanSortSourceParam* param = taosArrayGet(pInfo->sortSourceParams, i);
|
||||
blockDataDestroy(param->inputBlock);
|
||||
}
|
||||
taosArrayClear(pInfo->sortSourceParams);
|
||||
|
||||
for (int32_t i = 0; i < taosArrayGetSize(pInfo->dataReaders); ++i) {
|
||||
tsortDestroySortHandle(pInfo->pSortHandle);
|
||||
|
||||
for (int32_t i = 0; i < numReaders; ++i) {
|
||||
STsdbReader* reader = taosArrayGetP(pInfo->dataReaders, i);
|
||||
tsdbReaderClose(reader);
|
||||
}
|
||||
|
||||
taosArrayDestroy(pInfo->dataReaders);
|
||||
pInfo->dataReaders = NULL;
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
SSDataBlock* getSortedTableMergeScanBlockData(SSortHandle* pHandle, int32_t capacity, SOperatorInfo* pOperator) {
|
||||
SSDataBlock* getSortedTableMergeScanBlockData(SSortHandle* pHandle, SSDataBlock* pResBlock, int32_t capacity, SOperatorInfo* pOperator) {
|
||||
STableMergeScanInfo* pInfo = pOperator->info;
|
||||
SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
|
||||
|
||||
SSDataBlock* p = tsortGetSortedDataBlock(pHandle);
|
||||
if (p == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
blockDataEnsureCapacity(p, capacity);
|
||||
blockDataCleanup(pResBlock);
|
||||
blockDataEnsureCapacity(pResBlock, capacity);
|
||||
|
||||
while (1) {
|
||||
STupleHandle* pTupleHandle = tsortNextTuple(pHandle);
|
||||
|
@ -2899,14 +2861,15 @@ SSDataBlock* getSortedTableMergeScanBlockData(SSortHandle* pHandle, int32_t capa
|
|||
break;
|
||||
}
|
||||
|
||||
appendOneRowToDataBlock(p, pTupleHandle);
|
||||
if (p->info.rows >= capacity) {
|
||||
appendOneRowToDataBlock(pResBlock, pTupleHandle);
|
||||
if (pResBlock->info.rows >= capacity) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
qDebug("%s get sorted row blocks, rows:%d", GET_TASKID(pTaskInfo), p->info.rows);
|
||||
return (p->info.rows > 0) ? p : NULL;
|
||||
|
||||
qDebug("%s get sorted row blocks, rows:%d", GET_TASKID(pTaskInfo), pResBlock->info.rows);
|
||||
return (pResBlock->info.rows > 0) ? pResBlock : NULL;
|
||||
}
|
||||
|
||||
SSDataBlock* doTableMergeScan(SOperatorInfo* pOperator) {
|
||||
|
@ -2935,7 +2898,7 @@ SSDataBlock* doTableMergeScan(SOperatorInfo* pOperator) {
|
|||
}
|
||||
SSDataBlock* pBlock = NULL;
|
||||
while (pInfo->tableStartIndex < tableListSize) {
|
||||
pBlock = getSortedTableMergeScanBlockData(pInfo->pSortHandle, pOperator->resultInfo.capacity, pOperator);
|
||||
pBlock = getSortedTableMergeScanBlockData(pInfo->pSortHandle, pInfo->pResBlock, pOperator->resultInfo.capacity, pOperator);
|
||||
if (pBlock != NULL) {
|
||||
pBlock->info.groupId = pInfo->groupId;
|
||||
pOperator->resultInfo.totalRows += pBlock->info.rows;
|
||||
|
@ -2959,6 +2922,7 @@ SSDataBlock* doTableMergeScan(SOperatorInfo* pOperator) {
|
|||
void destroyTableMergeScanOperatorInfo(void* param, int32_t numOfOutput) {
|
||||
STableMergeScanInfo* pTableScanInfo = (STableMergeScanInfo*)param;
|
||||
cleanupQueryTableDataCond(&pTableScanInfo->cond);
|
||||
taosArrayDestroy(pTableScanInfo->sortSourceParams);
|
||||
|
||||
for (int32_t i = 0; i < taosArrayGetSize(pTableScanInfo->dataReaders); ++i) {
|
||||
STsdbReader* reader = taosArrayGetP(pTableScanInfo->dataReaders, i);
|
||||
|
@ -2974,7 +2938,9 @@ void destroyTableMergeScanOperatorInfo(void* param, int32_t numOfOutput) {
|
|||
pTableScanInfo->pSortInputBlock = blockDataDestroy(pTableScanInfo->pSortInputBlock);
|
||||
|
||||
taosArrayDestroy(pTableScanInfo->pSortInfo);
|
||||
cleanupExprSupp(&pTableScanInfo->pseudoSup);
|
||||
|
||||
taosMemoryFreeClear(pTableScanInfo->rowEntryInfoOffset);
|
||||
taosMemoryFreeClear(param);
|
||||
}
|
||||
|
||||
|
@ -3031,8 +2997,9 @@ SOperatorInfo* createTableMergeScanOperatorInfo(STableScanPhysiNode* pTableScanN
|
|||
}
|
||||
|
||||
if (pTableScanNode->scan.pScanPseudoCols != NULL) {
|
||||
pInfo->pPseudoExpr = createExprInfo(pTableScanNode->scan.pScanPseudoCols, NULL, &pInfo->numOfPseudoExpr);
|
||||
pInfo->pPseudoCtx = createSqlFunctionCtx(pInfo->pPseudoExpr, pInfo->numOfPseudoExpr, &pInfo->rowEntryInfoOffset);
|
||||
SExprSupp* pSup = &pInfo->pseudoSup;
|
||||
pSup->pExprInfo = createExprInfo(pTableScanNode->scan.pScanPseudoCols, NULL, &pSup->numOfExprs);
|
||||
pSup->pCtx = createSqlFunctionCtx(pSup->pExprInfo, pSup->numOfExprs, &pSup->rowEntryInfoOffset);
|
||||
}
|
||||
|
||||
pInfo->scanInfo = (SScanInfo){.numOfAsc = pTableScanNode->scanSeq[0], .numOfDesc = pTableScanNode->scanSeq[1]};
|
||||
|
|
|
@ -1542,7 +1542,7 @@ static int32_t translateDiff(SFunctionNode* pFunc, char* pErrBuf, int32_t len) {
|
|||
}
|
||||
|
||||
uint8_t resType;
|
||||
if (IS_SIGNED_NUMERIC_TYPE(colType) || TSDB_DATA_TYPE_BOOL == colType) {
|
||||
if (IS_SIGNED_NUMERIC_TYPE(colType) || TSDB_DATA_TYPE_BOOL == colType || TSDB_DATA_TYPE_TIMESTAMP == colType) {
|
||||
resType = TSDB_DATA_TYPE_BIGINT;
|
||||
} else {
|
||||
resType = TSDB_DATA_TYPE_DOUBLE;
|
||||
|
|
|
@ -375,6 +375,7 @@ static int32_t logicJoinCopy(const SJoinLogicNode* pSrc, SJoinLogicNode* pDst) {
|
|||
CLONE_NODE_FIELD(pMergeCondition);
|
||||
CLONE_NODE_FIELD(pOnConditions);
|
||||
COPY_SCALAR_FIELD(isSingleTableJoin);
|
||||
COPY_SCALAR_FIELD(inputTsOrder);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
|
@ -440,6 +441,7 @@ static int32_t logicWindowCopy(const SWindowLogicNode* pSrc, SWindowLogicNode* p
|
|||
COPY_SCALAR_FIELD(watermark);
|
||||
COPY_SCALAR_FIELD(igExpired);
|
||||
COPY_SCALAR_FIELD(windowAlgo);
|
||||
COPY_SCALAR_FIELD(inputTsOrder);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
|
|
|
@ -1717,7 +1717,7 @@ static const char* jkJoinPhysiPlanOnConditions = "OnConditions";
|
|||
static const char* jkJoinPhysiPlanTargets = "Targets";
|
||||
|
||||
static int32_t physiJoinNodeToJson(const void* pObj, SJson* pJson) {
|
||||
const SJoinPhysiNode* pNode = (const SJoinPhysiNode*)pObj;
|
||||
const SSortMergeJoinPhysiNode* pNode = (const SSortMergeJoinPhysiNode*)pObj;
|
||||
|
||||
int32_t code = physicPlanNodeToJson(pObj, pJson);
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
|
@ -1737,7 +1737,7 @@ static int32_t physiJoinNodeToJson(const void* pObj, SJson* pJson) {
|
|||
}
|
||||
|
||||
static int32_t jsonToPhysiJoinNode(const SJson* pJson, void* pObj) {
|
||||
SJoinPhysiNode* pNode = (SJoinPhysiNode*)pObj;
|
||||
SSortMergeJoinPhysiNode* pNode = (SSortMergeJoinPhysiNode*)pObj;
|
||||
|
||||
int32_t code = jsonToPhysicPlanNode(pJson, pObj);
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
|
|
|
@ -468,7 +468,7 @@ static EDealRes dispatchPhysiPlan(SNode* pNode, ETraversalOrder order, FNodeWalk
|
|||
break;
|
||||
}
|
||||
case QUERY_NODE_PHYSICAL_PLAN_MERGE_JOIN: {
|
||||
SJoinPhysiNode* pJoin = (SJoinPhysiNode*)pNode;
|
||||
SSortMergeJoinPhysiNode* pJoin = (SSortMergeJoinPhysiNode*)pNode;
|
||||
res = walkPhysiNode((SPhysiNode*)pNode, order, walker, pContext);
|
||||
if (DEAL_RES_ERROR != res && DEAL_RES_END != res) {
|
||||
res = walkPhysiPlan(pJoin->pMergeCondition, order, walker, pContext);
|
||||
|
|
|
@ -287,7 +287,7 @@ SNode* nodesMakeNode(ENodeType type) {
|
|||
case QUERY_NODE_PHYSICAL_PLAN_PROJECT:
|
||||
return makeNode(type, sizeof(SProjectPhysiNode));
|
||||
case QUERY_NODE_PHYSICAL_PLAN_MERGE_JOIN:
|
||||
return makeNode(type, sizeof(SJoinPhysiNode));
|
||||
return makeNode(type, sizeof(SSortMergeJoinPhysiNode));
|
||||
case QUERY_NODE_PHYSICAL_PLAN_HASH_AGG:
|
||||
return makeNode(type, sizeof(SAggPhysiNode));
|
||||
case QUERY_NODE_PHYSICAL_PLAN_EXCHANGE:
|
||||
|
@ -883,7 +883,7 @@ void nodesDestroyNode(SNode* pNode) {
|
|||
break;
|
||||
}
|
||||
case QUERY_NODE_PHYSICAL_PLAN_MERGE_JOIN: {
|
||||
SJoinPhysiNode* pPhyNode = (SJoinPhysiNode*)pNode;
|
||||
SSortMergeJoinPhysiNode* pPhyNode = (SSortMergeJoinPhysiNode*)pNode;
|
||||
destroyPhysiNode((SPhysiNode*)pPhyNode);
|
||||
nodesDestroyNode(pPhyNode->pMergeCondition);
|
||||
nodesDestroyNode(pPhyNode->pOnConditions);
|
||||
|
|
|
@ -90,7 +90,7 @@ SNode* createValueNode(SAstCreateContext* pCxt, int32_t dataType, const SToken*
|
|||
SNode* createDurationValueNode(SAstCreateContext* pCxt, const SToken* pLiteral);
|
||||
SNode* createDefaultDatabaseCondValue(SAstCreateContext* pCxt);
|
||||
SNode* createPlaceholderValueNode(SAstCreateContext* pCxt, const SToken* pLiteral);
|
||||
SNode* setProjectionAlias(SAstCreateContext* pCxt, SNode* pNode, const SToken* pAlias);
|
||||
SNode* setProjectionAlias(SAstCreateContext* pCxt, SNode* pNode, SToken* pAlias);
|
||||
SNode* createLogicConditionNode(SAstCreateContext* pCxt, ELogicConditionType type, SNode* pParam1, SNode* pParam2);
|
||||
SNode* createOperatorNode(SAstCreateContext* pCxt, EOperatorType type, SNode* pLeft, SNode* pRight);
|
||||
SNode* createBetweenAnd(SAstCreateContext* pCxt, SNode* pExpr, SNode* pLeft, SNode* pRight);
|
||||
|
|
|
@ -527,6 +527,7 @@ SNode* createTempTableNode(SAstCreateContext* pCxt, SNode* pSubquery, const STok
|
|||
}
|
||||
if (QUERY_NODE_SELECT_STMT == nodeType(pSubquery)) {
|
||||
strcpy(((SSelectStmt*)pSubquery)->stmtName, tempTable->table.tableAlias);
|
||||
((SSelectStmt*)pSubquery)->isSubquery = true;
|
||||
} else if (QUERY_NODE_SET_OPERATOR == nodeType(pSubquery)) {
|
||||
strcpy(((SSetOperator*)pSubquery)->stmtName, tempTable->table.tableAlias);
|
||||
}
|
||||
|
@ -637,8 +638,9 @@ SNode* createInterpTimeRange(SAstCreateContext* pCxt, SNode* pStart, SNode* pEnd
|
|||
return createBetweenAnd(pCxt, createPrimaryKeyCol(pCxt), pStart, pEnd);
|
||||
}
|
||||
|
||||
SNode* setProjectionAlias(SAstCreateContext* pCxt, SNode* pNode, const SToken* pAlias) {
|
||||
SNode* setProjectionAlias(SAstCreateContext* pCxt, SNode* pNode, SToken* pAlias) {
|
||||
CHECK_PARSER_STATUS(pCxt);
|
||||
trimEscape(pAlias);
|
||||
int32_t len = TMIN(sizeof(((SExprNode*)pNode)->aliasName) - 1, pAlias->n);
|
||||
strncpy(((SExprNode*)pNode)->aliasName, pAlias->z, len);
|
||||
((SExprNode*)pNode)->aliasName[len] = '\0';
|
||||
|
|
|
@ -739,12 +739,13 @@ static int32_t parseBoundColumns(SInsertParseContext* pCxt, SParsedDataColInfo*
|
|||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
static void buildCreateTbReq(SVCreateTbReq* pTbReq, const char* tname, STag* pTag, int64_t suid, const char* sname, SArray* tagName, uint8_t tagNum) {
|
||||
static void buildCreateTbReq(SVCreateTbReq* pTbReq, const char* tname, STag* pTag, int64_t suid, const char* sname,
|
||||
SArray* tagName, uint8_t tagNum) {
|
||||
pTbReq->type = TD_CHILD_TABLE;
|
||||
pTbReq->name = strdup(tname);
|
||||
pTbReq->ctb.suid = suid;
|
||||
pTbReq->ctb.tagNum = tagNum;
|
||||
if(sname) pTbReq->ctb.name = strdup(sname);
|
||||
if (sname) pTbReq->ctb.name = strdup(sname);
|
||||
pTbReq->ctb.pTag = (uint8_t*)pTag;
|
||||
pTbReq->ctb.tagName = taosArrayDup(tagName);
|
||||
pTbReq->commentLen = -1;
|
||||
|
@ -969,7 +970,7 @@ static int32_t parseTagsClause(SInsertParseContext* pCxt, SSchema* pSchema, uint
|
|||
}
|
||||
|
||||
SSchema* pTagSchema = &pSchema[pCxt->tags.boundColumns[i]];
|
||||
char tmpTokenBuf[TSDB_MAX_BYTES_PER_ROW] = {0}; // todo this can be optimize with parse column
|
||||
char tmpTokenBuf[TSDB_MAX_BYTES_PER_ROW] = {0}; // todo this can be optimize with parse column
|
||||
code = checkAndTrimValue(&sToken, tmpTokenBuf, &pCxt->msg);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
goto end;
|
||||
|
@ -1012,7 +1013,8 @@ static int32_t parseTagsClause(SInsertParseContext* pCxt, SSchema* pSchema, uint
|
|||
goto end;
|
||||
}
|
||||
|
||||
buildCreateTbReq(&pCxt->createTblReq, tName, pTag, pCxt->pTableMeta->suid, pCxt->sTableName, tagName, pCxt->pTableMeta->tableInfo.numOfTags);
|
||||
buildCreateTbReq(&pCxt->createTblReq, tName, pTag, pCxt->pTableMeta->suid, pCxt->sTableName, tagName,
|
||||
pCxt->pTableMeta->tableInfo.numOfTags);
|
||||
|
||||
end:
|
||||
for (int i = 0; i < taosArrayGetSize(pTagVals); ++i) {
|
||||
|
@ -1650,7 +1652,6 @@ static int32_t skipUsingClause(SInsertParseSyntaxCxt* pCxt) {
|
|||
static int32_t collectTableMetaKey(SInsertParseSyntaxCxt* pCxt, SToken* pTbToken) {
|
||||
SName name;
|
||||
CHECK_CODE(createSName(&name, pTbToken, pCxt->pComCxt->acctId, pCxt->pComCxt->db, &pCxt->msg));
|
||||
CHECK_CODE(reserveDbCfgInCache(pCxt->pComCxt->acctId, name.dbname, pCxt->pMetaCache));
|
||||
CHECK_CODE(reserveUserAuthInCacheExt(pCxt->pComCxt->pUser, &name, AUTH_TYPE_WRITE, pCxt->pMetaCache));
|
||||
CHECK_CODE(reserveTableMetaInCacheExt(&name, pCxt->pMetaCache));
|
||||
CHECK_CODE(reserveTableVgroupInCacheExt(&name, pCxt->pMetaCache));
|
||||
|
@ -2332,7 +2333,8 @@ int32_t smlBindData(void* handle, SArray* tags, SArray* colsSchema, SArray* cols
|
|||
return ret;
|
||||
}
|
||||
|
||||
buildCreateTbReq(&smlHandle->tableExecHandle.createTblReq, tableName, pTag, pTableMeta->suid, NULL, tagName, pTableMeta->tableInfo.numOfTags);
|
||||
buildCreateTbReq(&smlHandle->tableExecHandle.createTblReq, tableName, pTag, pTableMeta->suid, NULL, tagName,
|
||||
pTableMeta->tableInfo.numOfTags);
|
||||
taosArrayDestroy(tagName);
|
||||
|
||||
smlHandle->tableExecHandle.createTblReq.ctb.name = taosMemoryMalloc(sTableNameLen + 1);
|
||||
|
|
|
@ -92,7 +92,7 @@ static char* getSyntaxErrFormat(int32_t errCode) {
|
|||
case TSDB_CODE_PAR_INTER_SLIDING_TOO_BIG:
|
||||
return "sliding value no larger than the interval value";
|
||||
case TSDB_CODE_PAR_INTER_SLIDING_TOO_SMALL:
|
||||
return "sliding value can not less than 1% of interval value";
|
||||
return "sliding value can not less than 1%% of interval value";
|
||||
case TSDB_CODE_PAR_ONLY_ONE_JSON_TAG:
|
||||
return "Only one tag if there is a json tag";
|
||||
case TSDB_CODE_PAR_INCORRECT_NUM_OF_COL:
|
||||
|
|
|
@ -339,6 +339,7 @@ static int32_t createJoinLogicNode(SLogicPlanContext* pCxt, SSelectStmt* pSelect
|
|||
|
||||
pJoin->joinType = pJoinTable->joinType;
|
||||
pJoin->isSingleTableJoin = pJoinTable->table.singleTable;
|
||||
pJoin->inputTsOrder = ORDER_ASC;
|
||||
pJoin->node.groupAction = GROUP_ACTION_CLEAR;
|
||||
pJoin->node.requireDataOrder = DATA_ORDER_LEVEL_GLOBAL;
|
||||
pJoin->node.requireDataOrder = DATA_ORDER_LEVEL_GLOBAL;
|
||||
|
@ -625,14 +626,14 @@ static int32_t createInterpFuncLogicNode(SLogicPlanContext* pCxt, SSelectStmt* p
|
|||
|
||||
static int32_t createWindowLogicNodeFinalize(SLogicPlanContext* pCxt, SSelectStmt* pSelect, SWindowLogicNode* pWindow,
|
||||
SLogicNode** pLogicNode) {
|
||||
int32_t code = nodesCollectFuncs(pSelect, SQL_CLAUSE_WINDOW, fmIsWindowClauseFunc, &pWindow->pFuncs);
|
||||
|
||||
if (pCxt->pPlanCxt->streamQuery) {
|
||||
pWindow->triggerType = pCxt->pPlanCxt->triggerType;
|
||||
pWindow->watermark = pCxt->pPlanCxt->watermark;
|
||||
pWindow->igExpired = pCxt->pPlanCxt->igExpired;
|
||||
}
|
||||
pWindow->inputTsOrder = ORDER_ASC;
|
||||
|
||||
int32_t code = nodesCollectFuncs(pSelect, SQL_CLAUSE_WINDOW, fmIsWindowClauseFunc, &pWindow->pFuncs);
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = rewriteExprsForSelect(pWindow->pFuncs, pSelect, SQL_CLAUSE_WINDOW);
|
||||
}
|
||||
|
@ -861,7 +862,8 @@ static int32_t createProjectLogicNode(SLogicPlanContext* pCxt, SSelectStmt* pSel
|
|||
|
||||
TSWAP(pProject->node.pLimit, pSelect->pLimit);
|
||||
TSWAP(pProject->node.pSlimit, pSelect->pSlimit);
|
||||
pProject->node.groupAction = GROUP_ACTION_CLEAR;
|
||||
pProject->node.groupAction =
|
||||
(!pSelect->isSubquery && pCxt->pPlanCxt->streamQuery) ? GROUP_ACTION_KEEP : GROUP_ACTION_CLEAR;
|
||||
pProject->node.requireDataOrder = DATA_ORDER_LEVEL_NONE;
|
||||
pProject->node.resultDataOrder = DATA_ORDER_LEVEL_NONE;
|
||||
|
||||
|
|
|
@ -993,25 +993,28 @@ static bool sortPriKeyOptMayBeOptimized(SLogicNode* pNode) {
|
|||
}
|
||||
|
||||
static int32_t sortPriKeyOptGetScanNodesImpl(SLogicNode* pNode, bool* pNotOptimize, SNodeList** pScanNodes) {
|
||||
int32_t code = TSDB_CODE_SUCCESS;
|
||||
|
||||
switch (nodeType(pNode)) {
|
||||
case QUERY_NODE_LOGIC_PLAN_SCAN:
|
||||
if (TSDB_SUPER_TABLE != ((SScanLogicNode*)pNode)->tableType) {
|
||||
return nodesListMakeAppend(pScanNodes, (SNode*)pNode);
|
||||
case QUERY_NODE_LOGIC_PLAN_SCAN: {
|
||||
SScanLogicNode* pScan = (SScanLogicNode*)pNode;
|
||||
if (NULL != pScan->pGroupTags) {
|
||||
*pNotOptimize = true;
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
break;
|
||||
case QUERY_NODE_LOGIC_PLAN_JOIN:
|
||||
code =
|
||||
return nodesListMakeAppend(pScanNodes, (SNode*)pNode);
|
||||
}
|
||||
case QUERY_NODE_LOGIC_PLAN_JOIN: {
|
||||
int32_t code =
|
||||
sortPriKeyOptGetScanNodesImpl((SLogicNode*)nodesListGetNode(pNode->pChildren, 0), pNotOptimize, pScanNodes);
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code =
|
||||
sortPriKeyOptGetScanNodesImpl((SLogicNode*)nodesListGetNode(pNode->pChildren, 1), pNotOptimize, pScanNodes);
|
||||
}
|
||||
return code;
|
||||
}
|
||||
case QUERY_NODE_LOGIC_PLAN_AGG:
|
||||
case QUERY_NODE_LOGIC_PLAN_PARTITION:
|
||||
*pNotOptimize = true;
|
||||
return code;
|
||||
return TSDB_CODE_SUCCESS;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
@ -1037,17 +1040,33 @@ static EOrder sortPriKeyOptGetPriKeyOrder(SSortLogicNode* pSort) {
|
|||
return ((SOrderByExprNode*)nodesListGetNode(pSort->pSortKeys, 0))->order;
|
||||
}
|
||||
|
||||
static void sortPriKeyOptSetParentOrder(SLogicNode* pNode, EOrder order) {
|
||||
if (NULL == pNode) {
|
||||
return;
|
||||
}
|
||||
if (QUERY_NODE_LOGIC_PLAN_WINDOW == nodeType(pNode)) {
|
||||
((SWindowLogicNode*)pNode)->inputTsOrder = order;
|
||||
} else if (QUERY_NODE_LOGIC_PLAN_JOIN == nodeType(pNode)) {
|
||||
((SJoinLogicNode*)pNode)->inputTsOrder = order;
|
||||
}
|
||||
sortPriKeyOptSetParentOrder(pNode->pParent, order);
|
||||
}
|
||||
|
||||
static int32_t sortPriKeyOptApply(SOptimizeContext* pCxt, SLogicSubplan* pLogicSubplan, SSortLogicNode* pSort,
|
||||
SNodeList* pScanNodes) {
|
||||
EOrder order = sortPriKeyOptGetPriKeyOrder(pSort);
|
||||
if (ORDER_DESC == order) {
|
||||
SNode* pScanNode = NULL;
|
||||
FOREACH(pScanNode, pScanNodes) {
|
||||
SScanLogicNode* pScan = (SScanLogicNode*)pScanNode;
|
||||
if (pScan->scanSeq[0] > 0) {
|
||||
TSWAP(pScan->scanSeq[0], pScan->scanSeq[1]);
|
||||
}
|
||||
SNode* pScanNode = NULL;
|
||||
FOREACH(pScanNode, pScanNodes) {
|
||||
SScanLogicNode* pScan = (SScanLogicNode*)pScanNode;
|
||||
if (ORDER_DESC == order && pScan->scanSeq[0] > 0) {
|
||||
TSWAP(pScan->scanSeq[0], pScan->scanSeq[1]);
|
||||
}
|
||||
if (TSDB_SUPER_TABLE == pScan->tableType) {
|
||||
pScan->scanType = SCAN_TYPE_TABLE_MERGE;
|
||||
pScan->node.resultDataOrder = DATA_ORDER_LEVEL_GLOBAL;
|
||||
pScan->node.requireDataOrder = DATA_ORDER_LEVEL_GLOBAL;
|
||||
}
|
||||
sortPriKeyOptSetParentOrder(pScan->node.pParent, order);
|
||||
}
|
||||
|
||||
SLogicNode* pChild = (SLogicNode*)nodesListGetNode(pSort->node.pChildren, 0);
|
||||
|
@ -1613,10 +1632,10 @@ static void alignProjectionWithTarget(SLogicNode* pNode) {
|
|||
}
|
||||
|
||||
SProjectLogicNode* pProjectNode = (SProjectLogicNode*)pNode;
|
||||
SNode* pProjection = NULL;
|
||||
SNode* pProjection = NULL;
|
||||
FOREACH(pProjection, pProjectNode->pProjections) {
|
||||
SNode* pTarget = NULL;
|
||||
bool keep = false;
|
||||
bool keep = false;
|
||||
FOREACH(pTarget, pNode->pTargets) {
|
||||
if (0 == strcmp(((SColumnNode*)pProjection)->node.aliasName, ((SColumnNode*)pTarget)->colName)) {
|
||||
keep = true;
|
||||
|
@ -2214,7 +2233,7 @@ static bool tagScanMayBeOptimized(SLogicNode* pNode) {
|
|||
!planOptNodeListHasTbname(pAgg->pGroupKeys)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
SNode* pGroupKey = NULL;
|
||||
FOREACH(pGroupKey, pAgg->pGroupKeys) {
|
||||
SNode* pGroup = NULL;
|
||||
|
|
|
@ -415,7 +415,6 @@ static int32_t createScanPhysiNodeFinalize(SPhysiPlanContext* pCxt, SSubplan* pS
|
|||
SScanPhysiNode* pScanPhysiNode, SPhysiNode** pPhyNode) {
|
||||
int32_t code = createScanCols(pCxt, pScanPhysiNode, pScanLogicNode->pScanCols);
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
// Data block describe also needs to be set without scanning column, such as SELECT COUNT(*) FROM t
|
||||
code = addDataBlockSlots(pCxt, pScanPhysiNode->pScanCols, pScanPhysiNode->node.pOutputDataBlockDesc);
|
||||
}
|
||||
|
||||
|
@ -622,8 +621,8 @@ static int32_t createScanPhysiNode(SPhysiPlanContext* pCxt, SSubplan* pSubplan,
|
|||
|
||||
static int32_t createJoinPhysiNode(SPhysiPlanContext* pCxt, SNodeList* pChildren, SJoinLogicNode* pJoinLogicNode,
|
||||
SPhysiNode** pPhyNode) {
|
||||
SJoinPhysiNode* pJoin =
|
||||
(SJoinPhysiNode*)makePhysiNode(pCxt, (SLogicNode*)pJoinLogicNode, QUERY_NODE_PHYSICAL_PLAN_MERGE_JOIN);
|
||||
SSortMergeJoinPhysiNode* pJoin =
|
||||
(SSortMergeJoinPhysiNode*)makePhysiNode(pCxt, (SLogicNode*)pJoinLogicNode, QUERY_NODE_PHYSICAL_PLAN_MERGE_JOIN);
|
||||
if (NULL == pJoin) {
|
||||
return TSDB_CODE_OUT_OF_MEMORY;
|
||||
}
|
||||
|
@ -975,6 +974,9 @@ static int32_t createInterpFuncPhysiNode(SPhysiPlanContext* pCxt, SNodeList* pCh
|
|||
}
|
||||
|
||||
static bool projectCanMergeDataBlock(SProjectLogicNode* pProject) {
|
||||
if (GROUP_ACTION_KEEP == pProject->node.groupAction) {
|
||||
return false;
|
||||
}
|
||||
if (DATA_ORDER_LEVEL_NONE == pProject->node.resultDataOrder) {
|
||||
return true;
|
||||
}
|
||||
|
|
|
@ -469,7 +469,7 @@ static int32_t stbSplCreateExchangeNode(SSplitContext* pCxt, SLogicNode* pParent
|
|||
return code;
|
||||
}
|
||||
|
||||
static int32_t stbSplCreateMergeKeysByPrimaryKey(SNode* pPrimaryKey, SNodeList** pMergeKeys) {
|
||||
static int32_t stbSplCreateMergeKeysByPrimaryKey(SNode* pPrimaryKey, EOrder order, SNodeList** pMergeKeys) {
|
||||
SOrderByExprNode* pMergeKey = (SOrderByExprNode*)nodesMakeNode(QUERY_NODE_ORDER_BY_EXPR);
|
||||
if (NULL == pMergeKey) {
|
||||
return TSDB_CODE_OUT_OF_MEMORY;
|
||||
|
@ -479,7 +479,7 @@ static int32_t stbSplCreateMergeKeysByPrimaryKey(SNode* pPrimaryKey, SNodeList**
|
|||
nodesDestroyNode((SNode*)pMergeKey);
|
||||
return TSDB_CODE_OUT_OF_MEMORY;
|
||||
}
|
||||
pMergeKey->order = ORDER_ASC;
|
||||
pMergeKey->order = order;
|
||||
pMergeKey->nullOrder = NULL_ORDER_FIRST;
|
||||
return nodesListMakeStrictAppend(pMergeKeys, (SNode*)pMergeKey);
|
||||
}
|
||||
|
@ -491,7 +491,8 @@ static int32_t stbSplSplitIntervalForBatch(SSplitContext* pCxt, SStableSplitInfo
|
|||
((SWindowLogicNode*)pPartWindow)->windowAlgo = INTERVAL_ALGO_HASH;
|
||||
((SWindowLogicNode*)pInfo->pSplitNode)->windowAlgo = INTERVAL_ALGO_MERGE;
|
||||
SNodeList* pMergeKeys = NULL;
|
||||
code = stbSplCreateMergeKeysByPrimaryKey(((SWindowLogicNode*)pInfo->pSplitNode)->pTspk, &pMergeKeys);
|
||||
code = stbSplCreateMergeKeysByPrimaryKey(((SWindowLogicNode*)pInfo->pSplitNode)->pTspk,
|
||||
((SWindowLogicNode*)pInfo->pSplitNode)->inputTsOrder, &pMergeKeys);
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = stbSplCreateMergeNode(pCxt, NULL, pInfo->pSplitNode, pMergeKeys, pPartWindow, true);
|
||||
}
|
||||
|
@ -579,7 +580,8 @@ static int32_t stbSplSplitSessionOrStateForBatch(SSplitContext* pCxt, SStableSpl
|
|||
SLogicNode* pChild = (SLogicNode*)nodesListGetNode(pWindow->pChildren, 0);
|
||||
|
||||
SNodeList* pMergeKeys = NULL;
|
||||
int32_t code = stbSplCreateMergeKeysByPrimaryKey(((SWindowLogicNode*)pWindow)->pTspk, &pMergeKeys);
|
||||
int32_t code = stbSplCreateMergeKeysByPrimaryKey(((SWindowLogicNode*)pWindow)->pTspk,
|
||||
((SWindowLogicNode*)pWindow)->inputTsOrder, &pMergeKeys);
|
||||
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = stbSplCreateMergeNode(pCxt, pInfo->pSubplan, pChild, pMergeKeys, (SLogicNode*)pChild, true);
|
||||
|
@ -913,27 +915,70 @@ static int32_t stbSplSplitScanNodeWithPartTags(SSplitContext* pCxt, SStableSplit
|
|||
}
|
||||
|
||||
static SNode* stbSplFindPrimaryKeyFromScan(SScanLogicNode* pScan) {
|
||||
bool find = false;
|
||||
SNode* pCol = NULL;
|
||||
FOREACH(pCol, pScan->pScanCols) {
|
||||
if (PRIMARYKEY_TIMESTAMP_COL_ID == ((SColumnNode*)pCol)->colId) {
|
||||
find = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (!find) {
|
||||
return NULL;
|
||||
}
|
||||
SNode* pTarget = NULL;
|
||||
FOREACH(pTarget, pScan->node.pTargets) {
|
||||
if (nodesEqualNode(pTarget, pCol)) {
|
||||
return pCol;
|
||||
}
|
||||
}
|
||||
return NULL;
|
||||
nodesListStrictAppend(pScan->node.pTargets, nodesCloneNode(pCol));
|
||||
return pCol;
|
||||
}
|
||||
|
||||
static int32_t stbSplCreateMergeScanNode(SScanLogicNode* pScan, SLogicNode** pOutputMergeScan,
|
||||
SNodeList** pOutputMergeKeys) {
|
||||
SNodeList* pChildren = pScan->node.pChildren;
|
||||
pScan->node.pChildren = NULL;
|
||||
|
||||
int32_t code = TSDB_CODE_SUCCESS;
|
||||
SScanLogicNode* pMergeScan = (SScanLogicNode*)nodesCloneNode((SNode*)pScan);
|
||||
if (NULL == pMergeScan) {
|
||||
code = TSDB_CODE_OUT_OF_MEMORY;
|
||||
}
|
||||
|
||||
SNodeList* pMergeKeys = NULL;
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
pMergeScan->scanType = SCAN_TYPE_TABLE_MERGE;
|
||||
pMergeScan->node.pChildren = pChildren;
|
||||
splSetParent((SLogicNode*)pMergeScan);
|
||||
code = stbSplCreateMergeKeysByPrimaryKey(stbSplFindPrimaryKeyFromScan(pMergeScan),
|
||||
pMergeScan->scanSeq[0] > 0 ? ORDER_ASC : ORDER_DESC, &pMergeKeys);
|
||||
}
|
||||
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
*pOutputMergeScan = (SLogicNode*)pMergeScan;
|
||||
*pOutputMergeKeys = pMergeKeys;
|
||||
} else {
|
||||
nodesDestroyNode((SNode*)pMergeScan);
|
||||
nodesDestroyList(pMergeKeys);
|
||||
}
|
||||
|
||||
return code;
|
||||
}
|
||||
|
||||
static int32_t stbSplSplitMergeScanNode(SSplitContext* pCxt, SLogicSubplan* pSubplan, SScanLogicNode* pScan,
|
||||
bool groupSort) {
|
||||
SNodeList* pMergeKeys = NULL;
|
||||
int32_t code = stbSplCreateMergeKeysByPrimaryKey(stbSplFindPrimaryKeyFromScan(pScan), &pMergeKeys);
|
||||
SLogicNode* pMergeScan = NULL;
|
||||
SNodeList* pMergeKeys = NULL;
|
||||
int32_t code = stbSplCreateMergeScanNode(pScan, &pMergeScan, &pMergeKeys);
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = stbSplCreateMergeNode(pCxt, pSubplan, (SLogicNode*)pScan, pMergeKeys, (SLogicNode*)pScan, groupSort);
|
||||
code = stbSplCreateMergeNode(pCxt, pSubplan, (SLogicNode*)pScan, pMergeKeys, pMergeScan, groupSort);
|
||||
}
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = nodesListMakeStrictAppend(&pSubplan->pChildren,
|
||||
(SNode*)splCreateScanSubplan(pCxt, (SLogicNode*)pScan, SPLIT_FLAG_STABLE_SPLIT));
|
||||
(SNode*)splCreateScanSubplan(pCxt, pMergeScan, SPLIT_FLAG_STABLE_SPLIT));
|
||||
}
|
||||
pScan->scanType = SCAN_TYPE_TABLE_MERGE;
|
||||
++(pCxt->groupId);
|
||||
return code;
|
||||
}
|
||||
|
@ -978,14 +1023,14 @@ static int32_t stbSplSplitJoinNode(SSplitContext* pCxt, SStableSplitInfo* pInfo)
|
|||
}
|
||||
|
||||
static int32_t stbSplCreateMergeKeysForPartitionNode(SLogicNode* pPart, SNodeList** pMergeKeys) {
|
||||
SNode* pPrimaryKey =
|
||||
nodesCloneNode(stbSplFindPrimaryKeyFromScan((SScanLogicNode*)nodesListGetNode(pPart->pChildren, 0)));
|
||||
SScanLogicNode* pScan = (SScanLogicNode*)nodesListGetNode(pPart->pChildren, 0);
|
||||
SNode* pPrimaryKey = nodesCloneNode(stbSplFindPrimaryKeyFromScan(pScan));
|
||||
if (NULL == pPrimaryKey) {
|
||||
return TSDB_CODE_OUT_OF_MEMORY;
|
||||
}
|
||||
int32_t code = nodesListAppend(pPart->pTargets, pPrimaryKey);
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = stbSplCreateMergeKeysByPrimaryKey(pPrimaryKey, pMergeKeys);
|
||||
code = stbSplCreateMergeKeysByPrimaryKey(pPrimaryKey, pScan->scanSeq[0] > 0 ? ORDER_ASC : ORDER_DESC, pMergeKeys);
|
||||
}
|
||||
return code;
|
||||
}
|
||||
|
|
|
@ -124,7 +124,8 @@ int32_t replaceLogicNode(SLogicSubplan* pSubplan, SLogicNode* pOld, SLogicNode*
|
|||
}
|
||||
|
||||
static int32_t adjustScanDataRequirement(SScanLogicNode* pScan, EDataOrderLevel requirement) {
|
||||
if (SCAN_TYPE_TABLE != pScan->scanType && SCAN_TYPE_TABLE_MERGE != pScan->scanType) {
|
||||
if ((SCAN_TYPE_TABLE != pScan->scanType && SCAN_TYPE_TABLE_MERGE != pScan->scanType) ||
|
||||
DATA_ORDER_LEVEL_GLOBAL == pScan->node.requireDataOrder) {
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
// The lowest sort level of scan output data is DATA_ORDER_LEVEL_IN_BLOCK
|
||||
|
|
|
@ -24,9 +24,10 @@ TEST_F(PlanBasicTest, selectClause) {
|
|||
useDb("root", "test");
|
||||
|
||||
run("SELECT * FROM t1");
|
||||
run("SELECT 1 FROM t1");
|
||||
run("SELECT * FROM st1");
|
||||
run("SELECT 1 FROM st1");
|
||||
|
||||
run("SELECT MAX(c1) c2, c2 FROM t1");
|
||||
|
||||
run("SELECT MAX(c1) c2, c2 FROM st1");
|
||||
}
|
||||
|
||||
TEST_F(PlanBasicTest, whereClause) {
|
||||
|
|
|
@ -53,6 +53,8 @@ TEST_F(PlanOptimizeTest, sortPrimaryKey) {
|
|||
|
||||
run("SELECT c1 FROM t1 ORDER BY ts");
|
||||
|
||||
run("SELECT c1 FROM st1 ORDER BY ts");
|
||||
|
||||
run("SELECT c1 FROM t1 ORDER BY ts DESC");
|
||||
|
||||
run("SELECT COUNT(*) FROM t1 INTERVAL(10S) ORDER BY _WSTART DESC");
|
||||
|
|
|
@ -26,6 +26,7 @@ extern "C" {
|
|||
#include "syncInt.h"
|
||||
#include "syncMessage.h"
|
||||
#include "taosdef.h"
|
||||
#include "tref.h"
|
||||
#include "tskiplist.h"
|
||||
|
||||
typedef struct SSyncRaftEntry {
|
||||
|
@ -89,6 +90,7 @@ typedef struct SRaftEntryCache {
|
|||
SSkipList* pSkipList;
|
||||
int32_t maxCount;
|
||||
int32_t currentCount;
|
||||
int32_t refMgr;
|
||||
TdThreadMutex mutex;
|
||||
SSyncNode* pSyncNode;
|
||||
} SRaftEntryCache;
|
||||
|
|
|
@ -242,9 +242,9 @@ static int32_t syncIOStopInternal(SSyncIO *io) {
|
|||
}
|
||||
|
||||
static void *syncIOConsumerFunc(void *param) {
|
||||
SSyncIO *io = param;
|
||||
SSyncIO * io = param;
|
||||
STaosQall *qall = taosAllocateQall();
|
||||
SRpcMsg *pRpcMsg, rpcMsg;
|
||||
SRpcMsg * pRpcMsg, rpcMsg;
|
||||
SQueueInfo qinfo = {0};
|
||||
|
||||
while (1) {
|
||||
|
|
|
@ -125,7 +125,7 @@ cJSON *syncIndexMgr2Json(SSyncIndexMgr *pSyncIndexMgr) {
|
|||
|
||||
char *syncIndexMgr2Str(SSyncIndexMgr *pSyncIndexMgr) {
|
||||
cJSON *pJson = syncIndexMgr2Json(pSyncIndexMgr);
|
||||
char *serialized = cJSON_Print(pJson);
|
||||
char * serialized = cJSON_Print(pJson);
|
||||
cJSON_Delete(pJson);
|
||||
return serialized;
|
||||
}
|
||||
|
|
|
@ -101,7 +101,7 @@ cJSON *syncCfg2Json(SSyncCfg *pSyncCfg) {
|
|||
|
||||
char *syncCfg2Str(SSyncCfg *pSyncCfg) {
|
||||
cJSON *pJson = syncCfg2Json(pSyncCfg);
|
||||
char *serialized = cJSON_Print(pJson);
|
||||
char * serialized = cJSON_Print(pJson);
|
||||
cJSON_Delete(pJson);
|
||||
return serialized;
|
||||
}
|
||||
|
@ -109,7 +109,7 @@ char *syncCfg2Str(SSyncCfg *pSyncCfg) {
|
|||
char *syncCfg2SimpleStr(SSyncCfg *pSyncCfg) {
|
||||
if (pSyncCfg != NULL) {
|
||||
int32_t len = 512;
|
||||
char *s = taosMemoryMalloc(len);
|
||||
char * s = taosMemoryMalloc(len);
|
||||
memset(s, 0, len);
|
||||
|
||||
snprintf(s, len, "{r-num:%d, my:%d, ", pSyncCfg->replicaNum, pSyncCfg->myIndex);
|
||||
|
@ -206,7 +206,7 @@ cJSON *raftCfg2Json(SRaftCfg *pRaftCfg) {
|
|||
|
||||
char *raftCfg2Str(SRaftCfg *pRaftCfg) {
|
||||
cJSON *pJson = raftCfg2Json(pRaftCfg);
|
||||
char *serialized = cJSON_Print(pJson);
|
||||
char * serialized = cJSON_Print(pJson);
|
||||
cJSON_Delete(pJson);
|
||||
return serialized;
|
||||
}
|
||||
|
@ -285,7 +285,7 @@ int32_t raftCfgFromJson(const cJSON *pRoot, SRaftCfg *pRaftCfg) {
|
|||
(pRaftCfg->configIndexArr)[i] = atoll(pIndex->valuestring);
|
||||
}
|
||||
|
||||
cJSON *pJsonSyncCfg = cJSON_GetObjectItem(pJson, "SSyncCfg");
|
||||
cJSON * pJsonSyncCfg = cJSON_GetObjectItem(pJson, "SSyncCfg");
|
||||
int32_t code = syncCfgFromJson(pJsonSyncCfg, &(pRaftCfg->cfg));
|
||||
ASSERT(code == 0);
|
||||
|
||||
|
|
|
@ -23,6 +23,7 @@ SSyncRaftEntry* syncEntryBuild(uint32_t dataLen) {
|
|||
memset(pEntry, 0, bytes);
|
||||
pEntry->bytes = bytes;
|
||||
pEntry->dataLen = dataLen;
|
||||
pEntry->rid = -1;
|
||||
return pEntry;
|
||||
}
|
||||
|
||||
|
@ -451,6 +452,11 @@ static char* keyFn(const void* pData) {
|
|||
|
||||
static int cmpFn(const void* p1, const void* p2) { return memcmp(p1, p2, sizeof(SyncIndex)); }
|
||||
|
||||
static void freeRaftEntry(void* param) {
|
||||
SSyncRaftEntry* pEntry = (SSyncRaftEntry*)param;
|
||||
syncEntryDestory(pEntry);
|
||||
}
|
||||
|
||||
SRaftEntryCache* raftEntryCacheCreate(SSyncNode* pSyncNode, int32_t maxCount) {
|
||||
SRaftEntryCache* pCache = taosMemoryMalloc(sizeof(SRaftEntryCache));
|
||||
if (pCache == NULL) {
|
||||
|
@ -466,6 +472,7 @@ SRaftEntryCache* raftEntryCacheCreate(SSyncNode* pSyncNode, int32_t maxCount) {
|
|||
}
|
||||
|
||||
taosThreadMutexInit(&(pCache->mutex), NULL);
|
||||
pCache->refMgr = taosOpenRef(10, freeRaftEntry);
|
||||
pCache->maxCount = maxCount;
|
||||
pCache->currentCount = 0;
|
||||
pCache->pSyncNode = pSyncNode;
|
||||
|
@ -477,6 +484,10 @@ void raftEntryCacheDestroy(SRaftEntryCache* pCache) {
|
|||
if (pCache != NULL) {
|
||||
taosThreadMutexLock(&(pCache->mutex));
|
||||
tSkipListDestroy(pCache->pSkipList);
|
||||
if (pCache->refMgr != -1) {
|
||||
taosCloseRef(pCache->refMgr);
|
||||
pCache->refMgr = -1;
|
||||
}
|
||||
taosThreadMutexUnlock(&(pCache->mutex));
|
||||
taosThreadMutexDestroy(&(pCache->mutex));
|
||||
taosMemoryFree(pCache);
|
||||
|
@ -498,6 +509,9 @@ int32_t raftEntryCachePutEntry(struct SRaftEntryCache* pCache, SSyncRaftEntry* p
|
|||
ASSERT(pSkipListNode != NULL);
|
||||
++(pCache->currentCount);
|
||||
|
||||
pEntry->rid = taosAddRef(pCache->refMgr, pEntry);
|
||||
ASSERT(pEntry->rid >= 0);
|
||||
|
||||
do {
|
||||
char eventLog[128];
|
||||
snprintf(eventLog, sizeof(eventLog), "raft cache add, type:%s,%d, type2:%s,%d, index:%" PRId64 ", bytes:%d",
|
||||
|
@ -520,6 +534,7 @@ int32_t raftEntryCacheGetEntry(struct SRaftEntryCache* pCache, SyncIndex index,
|
|||
if (code == 1) {
|
||||
*ppEntry = taosMemoryMalloc(pEntry->bytes);
|
||||
memcpy(*ppEntry, pEntry, pEntry->bytes);
|
||||
(*ppEntry)->rid = -1;
|
||||
} else {
|
||||
*ppEntry = NULL;
|
||||
}
|
||||
|
@ -541,6 +556,7 @@ int32_t raftEntryCacheGetEntryP(struct SRaftEntryCache* pCache, SyncIndex index,
|
|||
SSkipListNode** ppNode = (SSkipListNode**)taosArrayGet(entryPArray, 0);
|
||||
ASSERT(*ppNode != NULL);
|
||||
*ppEntry = (SSyncRaftEntry*)SL_GET_NODE_DATA(*ppNode);
|
||||
taosAcquireRef(pCache->refMgr, (*ppEntry)->rid);
|
||||
code = 1;
|
||||
|
||||
} else if (arraySize == 0) {
|
||||
|
@ -600,7 +616,9 @@ int32_t raftEntryCacheClear(struct SRaftEntryCache* pCache, int32_t count) {
|
|||
taosArrayPush(delNodeArray, &pNode);
|
||||
++returnCnt;
|
||||
SSyncRaftEntry* pEntry = (SSyncRaftEntry*)SL_GET_NODE_DATA(pNode);
|
||||
syncEntryDestory(pEntry);
|
||||
|
||||
// syncEntryDestory(pEntry);
|
||||
taosRemoveRef(pCache->refMgr, pEntry->rid);
|
||||
}
|
||||
tSkipListDestroyIter(pIter);
|
||||
|
||||
|
|
|
@ -216,7 +216,7 @@ cJSON *raftStore2Json(SRaftStore *pRaftStore) {
|
|||
|
||||
char *raftStore2Str(SRaftStore *pRaftStore) {
|
||||
cJSON *pJson = raftStore2Json(pRaftStore);
|
||||
char *serialized = cJSON_Print(pJson);
|
||||
char * serialized = cJSON_Print(pJson);
|
||||
cJSON_Delete(pJson);
|
||||
return serialized;
|
||||
}
|
||||
|
|
|
@ -129,7 +129,7 @@ void syncRespCleanByTTL(SSyncRespMgr *pObj, int64_t ttl) {
|
|||
|
||||
while (pStub) {
|
||||
size_t len;
|
||||
void *key = taosHashGetKey(pStub, &len);
|
||||
void * key = taosHashGetKey(pStub, &len);
|
||||
uint64_t *pSeqNum = (uint64_t *)key;
|
||||
sum++;
|
||||
|
||||
|
|
|
@ -374,14 +374,14 @@ cJSON *snapshotSender2Json(SSyncSnapshotSender *pSender) {
|
|||
|
||||
char *snapshotSender2Str(SSyncSnapshotSender *pSender) {
|
||||
cJSON *pJson = snapshotSender2Json(pSender);
|
||||
char *serialized = cJSON_Print(pJson);
|
||||
char * serialized = cJSON_Print(pJson);
|
||||
cJSON_Delete(pJson);
|
||||
return serialized;
|
||||
}
|
||||
|
||||
char *snapshotSender2SimpleStr(SSyncSnapshotSender *pSender, char *event) {
|
||||
int32_t len = 256;
|
||||
char *s = taosMemoryMalloc(len);
|
||||
char * s = taosMemoryMalloc(len);
|
||||
|
||||
SRaftId destId = pSender->pSyncNode->replicasId[pSender->replicaIndex];
|
||||
char host[64];
|
||||
|
@ -653,7 +653,7 @@ cJSON *snapshotReceiver2Json(SSyncSnapshotReceiver *pReceiver) {
|
|||
cJSON_AddStringToObject(pFromId, "addr", u64buf);
|
||||
{
|
||||
uint64_t u64 = pReceiver->fromId.addr;
|
||||
cJSON *pTmp = pFromId;
|
||||
cJSON * pTmp = pFromId;
|
||||
char host[128] = {0};
|
||||
uint16_t port;
|
||||
syncUtilU642Addr(u64, host, sizeof(host), &port);
|
||||
|
@ -686,14 +686,14 @@ cJSON *snapshotReceiver2Json(SSyncSnapshotReceiver *pReceiver) {
|
|||
|
||||
char *snapshotReceiver2Str(SSyncSnapshotReceiver *pReceiver) {
|
||||
cJSON *pJson = snapshotReceiver2Json(pReceiver);
|
||||
char *serialized = cJSON_Print(pJson);
|
||||
char * serialized = cJSON_Print(pJson);
|
||||
cJSON_Delete(pJson);
|
||||
return serialized;
|
||||
}
|
||||
|
||||
char *snapshotReceiver2SimpleStr(SSyncSnapshotReceiver *pReceiver, char *event) {
|
||||
int32_t len = 256;
|
||||
char *s = taosMemoryMalloc(len);
|
||||
char * s = taosMemoryMalloc(len);
|
||||
|
||||
SRaftId fromId = pReceiver->fromId;
|
||||
char host[128];
|
||||
|
|
|
@ -125,7 +125,7 @@ int32_t SnapshotStartWrite(struct SSyncFSM* pFsm, void* pParam, void** ppWriter)
|
|||
return 0;
|
||||
}
|
||||
|
||||
int32_t SnapshotStopWrite(struct SSyncFSM* pFsm, void* pWriter, bool isApply, SSnapshot *pSnapshot) {
|
||||
int32_t SnapshotStopWrite(struct SSyncFSM* pFsm, void* pWriter, bool isApply, SSnapshot* pSnapshot) {
|
||||
char logBuf[256] = {0};
|
||||
snprintf(logBuf, sizeof(logBuf), "==callback== ==SnapshotStopWrite== pFsm:%p, pWriter:%p, isApply:%d", pFsm, pWriter,
|
||||
isApply);
|
||||
|
|
|
@ -5,8 +5,8 @@
|
|||
#include "syncRaftLog.h"
|
||||
#include "syncRaftStore.h"
|
||||
#include "syncUtil.h"
|
||||
#include "tskiplist.h"
|
||||
#include "tref.h"
|
||||
#include "tskiplist.h"
|
||||
|
||||
void logTest() {
|
||||
sTrace("--- sync log test: trace");
|
||||
|
@ -51,7 +51,7 @@ SRaftEntryCache* createCache(int maxCount) {
|
|||
}
|
||||
|
||||
void test1() {
|
||||
int32_t code = 0;
|
||||
int32_t code = 0;
|
||||
SRaftEntryCache* pCache = createCache(5);
|
||||
for (int i = 0; i < 10; ++i) {
|
||||
SSyncRaftEntry* pEntry = createEntry(i);
|
||||
|
@ -68,7 +68,7 @@ void test1() {
|
|||
}
|
||||
|
||||
void test2() {
|
||||
int32_t code = 0;
|
||||
int32_t code = 0;
|
||||
SRaftEntryCache* pCache = createCache(5);
|
||||
for (int i = 0; i < 10; ++i) {
|
||||
SSyncRaftEntry* pEntry = createEntry(i);
|
||||
|
@ -77,7 +77,7 @@ void test2() {
|
|||
}
|
||||
raftEntryCacheLog2((char*)"==test1 write 5 entries==", pCache);
|
||||
|
||||
SyncIndex index = 2;
|
||||
SyncIndex index = 2;
|
||||
SSyncRaftEntry* pEntry = NULL;
|
||||
|
||||
code = raftEntryCacheGetEntryP(pCache, index, &pEntry);
|
||||
|
@ -107,7 +107,7 @@ void test2() {
|
|||
}
|
||||
|
||||
void test3() {
|
||||
int32_t code = 0;
|
||||
int32_t code = 0;
|
||||
SRaftEntryCache* pCache = createCache(20);
|
||||
for (int i = 0; i <= 4; ++i) {
|
||||
SSyncRaftEntry* pEntry = createEntry(i);
|
||||
|
@ -122,8 +122,6 @@ void test3() {
|
|||
raftEntryCacheLog2((char*)"==test3 write 10 entries==", pCache);
|
||||
}
|
||||
|
||||
|
||||
|
||||
static void freeObj(void* param) {
|
||||
SSyncRaftEntry* pEntry = (SSyncRaftEntry*)param;
|
||||
syncEntryLog2((char*)"freeObj: ", pEntry);
|
||||
|
@ -138,19 +136,41 @@ void test4() {
|
|||
|
||||
int64_t rid = taosAddRef(testRefId, pEntry);
|
||||
sTrace("rid: %ld", rid);
|
||||
|
||||
|
||||
do {
|
||||
SSyncRaftEntry* pAcquireEntry = (SSyncRaftEntry*)taosAcquireRef(testRefId, rid);
|
||||
syncEntryLog2((char*)"acquire: ", pAcquireEntry);
|
||||
|
||||
taosAcquireRef(testRefId, rid);
|
||||
taosAcquireRef(testRefId, rid);
|
||||
taosAcquireRef(testRefId, rid);
|
||||
|
||||
taosReleaseRef(testRefId, rid);
|
||||
//taosReleaseRef(testRefId, rid);
|
||||
// taosReleaseRef(testRefId, rid);
|
||||
// taosReleaseRef(testRefId, rid);
|
||||
} while (0);
|
||||
|
||||
taosRemoveRef(testRefId, rid);
|
||||
|
||||
for (int i = 0; i < 10; ++i) {
|
||||
sTrace("taosReleaseRef, %d", i);
|
||||
taosReleaseRef(testRefId, rid);
|
||||
}
|
||||
}
|
||||
|
||||
void test5() {
|
||||
int32_t testRefId = taosOpenRef(5, freeObj);
|
||||
for (int i = 0; i < 100; i++) {
|
||||
SSyncRaftEntry* pEntry = createEntry(i);
|
||||
ASSERT(pEntry != NULL);
|
||||
|
||||
int64_t rid = taosAddRef(testRefId, pEntry);
|
||||
sTrace("rid: %ld", rid);
|
||||
}
|
||||
|
||||
for (int64_t rid = 2; rid < 101; rid++) {
|
||||
SSyncRaftEntry* pAcquireEntry = (SSyncRaftEntry*)taosAcquireRef(testRefId, rid);
|
||||
syncEntryLog2((char*)"taosAcquireRef: ", pAcquireEntry);
|
||||
}
|
||||
}
|
||||
|
||||
int main(int argc, char** argv) {
|
||||
|
@ -158,11 +178,13 @@ int main(int argc, char** argv) {
|
|||
tsAsyncLog = 0;
|
||||
sDebugFlag = DEBUG_TRACE + DEBUG_SCREEN + DEBUG_FILE + DEBUG_DEBUG;
|
||||
|
||||
test1();
|
||||
test2();
|
||||
test3();
|
||||
|
||||
//test4();
|
||||
/*
|
||||
test1();
|
||||
test2();
|
||||
test3();
|
||||
*/
|
||||
test4();
|
||||
// test5();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -30,7 +30,7 @@ int32_t SnapshotStopRead(struct SSyncFSM* pFsm, void* pReader) { return 0; }
|
|||
int32_t SnapshotDoRead(struct SSyncFSM* pFsm, void* pReader, void** ppBuf, int32_t* len) { return 0; }
|
||||
|
||||
int32_t SnapshotStartWrite(struct SSyncFSM* pFsm, void* pParam, void** ppWriter) { return 0; }
|
||||
int32_t SnapshotStopWrite(struct SSyncFSM* pFsm, void* pWriter, bool isApply, SSnapshot *pSnapshot) { return 0; }
|
||||
int32_t SnapshotStopWrite(struct SSyncFSM* pFsm, void* pWriter, bool isApply, SSnapshot* pSnapshot) { return 0; }
|
||||
int32_t SnapshotDoWrite(struct SSyncFSM* pFsm, void* pWriter, void* pBuf, int32_t len) { return 0; }
|
||||
|
||||
SSyncSnapshotReceiver* createReceiver() {
|
||||
|
|
|
@ -126,7 +126,7 @@ int32_t SnapshotStartWrite(struct SSyncFSM* pFsm, void* pParam, void** ppWriter)
|
|||
return 0;
|
||||
}
|
||||
|
||||
int32_t SnapshotStopWrite(struct SSyncFSM* pFsm, void* pWriter, bool isApply, SSnapshot *pSnapshot) {
|
||||
int32_t SnapshotStopWrite(struct SSyncFSM* pFsm, void* pWriter, bool isApply, SSnapshot* pSnapshot) {
|
||||
if (isApply) {
|
||||
gSnapshotLastApplyIndex = gFinishLastApplyIndex;
|
||||
gSnapshotLastApplyTerm = gFinishLastApplyTerm;
|
||||
|
|
|
@ -335,6 +335,7 @@ int32_t cfgSetItem(SConfig *pCfg, const char *name, const char *value, ECfgSrcTy
|
|||
}
|
||||
|
||||
SConfigItem *cfgGetItem(SConfig *pCfg, const char *name) {
|
||||
if (pCfg == NULL) return NULL;
|
||||
int32_t size = taosArrayGetSize(pCfg->array);
|
||||
for (int32_t i = 0; i < size; ++i) {
|
||||
SConfigItem *pItem = taosArrayGet(pCfg->array, i);
|
||||
|
|
|
@ -512,7 +512,7 @@ TAOS_DEFINE_ERROR(TSDB_CODE_PAR_INTER_OFFSET_UNIT, "Cannot use 'year' as
|
|||
TAOS_DEFINE_ERROR(TSDB_CODE_PAR_INTER_OFFSET_TOO_BIG, "Interval offset should be shorter than interval")
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_PAR_INTER_SLIDING_UNIT, "Does not support sliding when interval is natural month/year")
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_PAR_INTER_SLIDING_TOO_BIG, "sliding value no larger than the interval value")
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_PAR_INTER_SLIDING_TOO_SMALL, "sliding value can not less than 1% of interval value")
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_PAR_INTER_SLIDING_TOO_SMALL, "sliding value can not less than 1%% of interval value")
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_PAR_ONLY_ONE_JSON_TAG, "Only one tag if there is a json tag")
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_PAR_INCORRECT_NUM_OF_COL, "Query block has incorrect number of result columns")
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_PAR_INCORRECT_TIMESTAMP_VAL, "Incorrect TIMESTAMP value")
|
||||
|
|
|
@ -17,6 +17,7 @@
|
|||
#include "tlog.h"
|
||||
#include "os.h"
|
||||
#include "tutil.h"
|
||||
#include "tconfig.h"
|
||||
|
||||
#define LOG_MAX_LINE_SIZE (1024)
|
||||
#define LOG_MAX_LINE_BUFFER_SIZE (LOG_MAX_LINE_SIZE + 3)
|
||||
|
@ -62,6 +63,7 @@ typedef struct {
|
|||
TdThreadMutex logMutex;
|
||||
} SLogObj;
|
||||
|
||||
extern SConfig *tsCfg;
|
||||
static int8_t tsLogInited = 0;
|
||||
static SLogObj tsLogObj = {.fileNum = 1};
|
||||
static int64_t tsAsyncLogLostLines = 0;
|
||||
|
@ -741,25 +743,3 @@ cmp_end:
|
|||
|
||||
return ret;
|
||||
}
|
||||
|
||||
void taosSetAllDebugFlag(int32_t flag) {
|
||||
if (flag <= 0) return;
|
||||
|
||||
uDebugFlag = flag;
|
||||
rpcDebugFlag = flag;
|
||||
jniDebugFlag = flag;
|
||||
qDebugFlag = flag;
|
||||
cDebugFlag = flag;
|
||||
dDebugFlag = flag;
|
||||
vDebugFlag = flag;
|
||||
mDebugFlag = flag;
|
||||
wDebugFlag = flag;
|
||||
sDebugFlag = flag;
|
||||
tsdbDebugFlag = flag;
|
||||
tqDebugFlag = flag;
|
||||
fsDebugFlag = flag;
|
||||
udfDebugFlag = flag;
|
||||
smaDebugFlag = flag;
|
||||
idxDebugFlag = flag;
|
||||
uInfo("all debug flag are set to %d", flag);
|
||||
}
|
||||
|
|
|
@ -65,14 +65,14 @@ class TDTestCase:
|
|||
is_leader=True
|
||||
|
||||
if count==1 and is_leader:
|
||||
tdLog.info("===== depoly cluster success with 1 mnode as leader =====")
|
||||
tdLog.notice("===== depoly cluster success with 1 mnode as leader =====")
|
||||
else:
|
||||
tdLog.exit("===== depoly cluster fail with 1 mnode as leader =====")
|
||||
|
||||
for k ,v in self.dnode_list.items():
|
||||
if k == mnode_name:
|
||||
if v[3]==0:
|
||||
tdLog.info("===== depoly cluster mnode only success at {} , support_vnodes is {} ".format(mnode_name,v[3]))
|
||||
tdLog.notice("===== depoly cluster mnode only success at {} , support_vnodes is {} ".format(mnode_name,v[3]))
|
||||
else:
|
||||
tdLog.exit("===== depoly cluster mnode only fail at {} , support_vnodes is {} ".format(mnode_name,v[3]))
|
||||
else:
|
||||
|
@ -115,7 +115,7 @@ class TDTestCase:
|
|||
|
||||
for k , v in vgroups_infos.items():
|
||||
if len(v) ==1 and v[0]=="leader":
|
||||
tdLog.info(" === create database replica only 1 role leader check success of vgroup_id {} ======".format(k))
|
||||
tdLog.notice(" === create database replica only 1 role leader check success of vgroup_id {} ======".format(k))
|
||||
else:
|
||||
tdLog.exit(" === create database replica only 1 role leader check fail of vgroup_id {} ======".format(k))
|
||||
|
||||
|
|
|
@ -71,14 +71,14 @@ class TDTestCase:
|
|||
is_leader=True
|
||||
|
||||
if count==1 and is_leader:
|
||||
tdLog.info("===== depoly cluster success with 1 mnode as leader =====")
|
||||
tdLog.notice("===== depoly cluster success with 1 mnode as leader =====")
|
||||
else:
|
||||
tdLog.exit("===== depoly cluster fail with 1 mnode as leader =====")
|
||||
|
||||
for k ,v in self.dnode_list.items():
|
||||
if k == mnode_name:
|
||||
if v[3]==0:
|
||||
tdLog.info("===== depoly cluster mnode only success at {} , support_vnodes is {} ".format(mnode_name,v[3]))
|
||||
tdLog.notice("===== depoly cluster mnode only success at {} , support_vnodes is {} ".format(mnode_name,v[3]))
|
||||
else:
|
||||
tdLog.exit("===== depoly cluster mnode only fail at {} , support_vnodes is {} ".format(mnode_name,v[3]))
|
||||
else:
|
||||
|
@ -121,7 +121,7 @@ class TDTestCase:
|
|||
|
||||
for k , v in vgroups_infos.items():
|
||||
if len(v) ==1 and v[0]=="leader":
|
||||
tdLog.info(" === create database replica only 1 role leader check success of vgroup_id {} ======".format(k))
|
||||
tdLog.notice(" === create database replica only 1 role leader check success of vgroup_id {} ======".format(k))
|
||||
else:
|
||||
tdLog.exit(" === create database replica only 1 role leader check fail of vgroup_id {} ======".format(k))
|
||||
|
||||
|
@ -129,7 +129,7 @@ class TDTestCase:
|
|||
drop_db_sql = "drop database if exists {}".format(dbname)
|
||||
create_db_sql = "create database {} replica {} vgroups {}".format(dbname,replica_num,vgroup_nums)
|
||||
|
||||
tdLog.info(" ==== create database {} and insert rows begin =====".format(dbname))
|
||||
tdLog.notice(" ==== create database {} and insert rows begin =====".format(dbname))
|
||||
tdSql.execute(drop_db_sql)
|
||||
tdSql.execute(create_db_sql)
|
||||
tdSql.execute("use {}".format(dbname))
|
||||
|
@ -155,7 +155,7 @@ class TDTestCase:
|
|||
ts = self.ts + 1000*row_num
|
||||
tdSql.execute(f"insert into {sub_tbname} values ({ts}, {row_num} ,{row_num}, 10 ,1 ,{row_num} ,{row_num},true,'bin_{row_num}','nchar_{row_num}',now) ")
|
||||
|
||||
tdLog.info(" ==== create database {} and insert rows execute end =====".format(dbname))
|
||||
tdLog.notice(" ==== create database {} and insert rows execute end =====".format(dbname))
|
||||
|
||||
def check_insert_status(self, dbname, tb_nums , row_nums):
|
||||
tdSql.execute("use {}".format(dbname))
|
||||
|
|
|
@ -71,14 +71,14 @@ class TDTestCase:
|
|||
is_leader=True
|
||||
|
||||
if count==1 and is_leader:
|
||||
tdLog.info("===== depoly cluster success with 1 mnode as leader =====")
|
||||
tdLog.notice("===== depoly cluster success with 1 mnode as leader =====")
|
||||
else:
|
||||
tdLog.exit("===== depoly cluster fail with 1 mnode as leader =====")
|
||||
|
||||
for k ,v in self.dnode_list.items():
|
||||
if k == mnode_name:
|
||||
if v[3]==0:
|
||||
tdLog.info("===== depoly cluster mnode only success at {} , support_vnodes is {} ".format(mnode_name,v[3]))
|
||||
tdLog.notice("===== depoly cluster mnode only success at {} , support_vnodes is {} ".format(mnode_name,v[3]))
|
||||
else:
|
||||
tdLog.exit("===== depoly cluster mnode only fail at {} , support_vnodes is {} ".format(mnode_name,v[3]))
|
||||
else:
|
||||
|
@ -121,7 +121,7 @@ class TDTestCase:
|
|||
|
||||
for k , v in vgroups_infos.items():
|
||||
if len(v) ==1 and v[0]=="leader":
|
||||
tdLog.info(" === create database replica only 1 role leader check success of vgroup_id {} ======".format(k))
|
||||
tdLog.notice(" === create database replica only 1 role leader check success of vgroup_id {} ======".format(k))
|
||||
else:
|
||||
tdLog.exit(" === create database replica only 1 role leader check fail of vgroup_id {} ======".format(k))
|
||||
|
||||
|
@ -129,7 +129,7 @@ class TDTestCase:
|
|||
drop_db_sql = "drop database if exists {}".format(dbname)
|
||||
create_db_sql = "create database {} replica {} vgroups {}".format(dbname,replica_num,vgroup_nums)
|
||||
|
||||
tdLog.info(" ==== create database {} and insert rows begin =====".format(dbname))
|
||||
tdLog.notice(" ==== create database {} and insert rows begin =====".format(dbname))
|
||||
tdSql.execute(drop_db_sql)
|
||||
tdSql.execute(create_db_sql)
|
||||
tdSql.execute("use {}".format(dbname))
|
||||
|
@ -155,7 +155,7 @@ class TDTestCase:
|
|||
ts = self.ts + 1000*row_num
|
||||
tdSql.execute(f"insert into {sub_tbname} values ({ts}, {row_num} ,{row_num}, 10 ,1 ,{row_num} ,{row_num},true,'bin_{row_num}','nchar_{row_num}',now) ")
|
||||
|
||||
tdLog.info(" ==== create database {} and insert rows execute end =====".format(dbname))
|
||||
tdLog.notice(" ==== create database {} and insert rows execute end =====".format(dbname))
|
||||
|
||||
def check_insert_status(self, dbname, tb_nums , row_nums):
|
||||
tdSql.execute("use {}".format(dbname))
|
||||
|
|
|
@ -80,14 +80,14 @@ class TDTestCase:
|
|||
is_leader=True
|
||||
|
||||
if count==1 and is_leader:
|
||||
tdLog.info("===== depoly cluster success with 1 mnode as leader =====")
|
||||
tdLog.notice("===== depoly cluster success with 1 mnode as leader =====")
|
||||
else:
|
||||
tdLog.exit("===== depoly cluster fail with 1 mnode as leader =====")
|
||||
|
||||
for k ,v in self.dnode_list.items():
|
||||
if k == mnode_name:
|
||||
if v[3]==0:
|
||||
tdLog.info("===== depoly cluster mnode only success at {} , support_vnodes is {} ".format(mnode_name,v[3]))
|
||||
tdLog.notice("===== depoly cluster mnode only success at {} , support_vnodes is {} ".format(mnode_name,v[3]))
|
||||
else:
|
||||
tdLog.exit("===== depoly cluster mnode only fail at {} , support_vnodes is {} ".format(mnode_name,v[3]))
|
||||
else:
|
||||
|
@ -130,7 +130,7 @@ class TDTestCase:
|
|||
|
||||
for k , v in vgroups_infos.items():
|
||||
if len(v) ==1 and v[0]=="leader":
|
||||
tdLog.info(" === create database replica only 1 role leader check success of vgroup_id {} ======".format(k))
|
||||
tdLog.notice(" === create database replica only 1 role leader check success of vgroup_id {} ======".format(k))
|
||||
else:
|
||||
tdLog.exit(" === create database replica only 1 role leader check fail of vgroup_id {} ======".format(k))
|
||||
|
||||
|
@ -138,7 +138,7 @@ class TDTestCase:
|
|||
drop_db_sql = "drop database if exists {}".format(dbname)
|
||||
create_db_sql = "create database {} replica {} vgroups {}".format(dbname,replica_num,vgroup_nums)
|
||||
|
||||
tdLog.info(" ==== create database {} and insert rows begin =====".format(dbname))
|
||||
tdLog.notice(" ==== create database {} and insert rows begin =====".format(dbname))
|
||||
tdSql.execute(drop_db_sql)
|
||||
tdSql.execute(create_db_sql)
|
||||
tdSql.execute("use {}".format(dbname))
|
||||
|
@ -161,7 +161,7 @@ class TDTestCase:
|
|||
ts = self.ts + self.ts_step*row_num
|
||||
tdSql.execute(f"insert into {sub_tbname} values ({ts}, {row_num} ,{row_num}, 10 ,1 ,{row_num} ,{row_num},true,'bin_{row_num}','nchar_{row_num}',now) ")
|
||||
|
||||
tdLog.info(" ==== stable {} insert rows execute end =====".format(stablename))
|
||||
tdLog.notice(" ==== stable {} insert rows execute end =====".format(stablename))
|
||||
|
||||
def append_rows_of_exists_tables(self,dbname ,stablename , tbname , append_nums ):
|
||||
|
||||
|
@ -170,7 +170,7 @@ class TDTestCase:
|
|||
for row_num in range(append_nums):
|
||||
tdSql.execute(f"insert into {tbname} values (now, {row_num} ,{row_num}, 10 ,1 ,{row_num} ,{row_num},true,'bin_{row_num}','nchar_{row_num}',now) ")
|
||||
# print(f"insert into {tbname} values (now, {row_num} ,{row_num}, 10 ,1 ,{row_num} ,{row_num},true,'bin_{row_num}','nchar_{row_num}',now) ")
|
||||
tdLog.info(" ==== append new rows of table {} belongs to stable {} execute end =====".format(tbname,stablename))
|
||||
tdLog.notice(" ==== append new rows of table {} belongs to stable {} execute end =====".format(tbname,stablename))
|
||||
os.system("taos -s 'select count(*) from {}.{}';".format(dbname,stablename))
|
||||
|
||||
def check_insert_rows(self, dbname, stablename , tb_nums , row_nums, append_rows):
|
||||
|
@ -197,7 +197,7 @@ class TDTestCase:
|
|||
time.sleep(0.1)
|
||||
tdSql.query("select count(*) from {}.{}".format(dbname,stablename))
|
||||
status_OK = self.mycheckData("select count(*) from {}.{}".format(dbname,stablename) ,0 , 0 , tb_nums*row_nums+append_rows)
|
||||
tdLog.info(" ==== check insert rows first failed , this is {}_th retry check rows of database {}".format(count , dbname))
|
||||
tdLog.debug(" ==== check insert rows first failed , this is {}_th retry check rows of database {}".format(count , dbname))
|
||||
count += 1
|
||||
|
||||
|
||||
|
@ -218,7 +218,7 @@ class TDTestCase:
|
|||
time.sleep(0.1)
|
||||
tdSql.query("select distinct tbname from {}.{}".format(dbname,stablename))
|
||||
status_OK = self.mycheckRows("select distinct tbname from {}.{}".format(dbname,stablename) ,tb_nums)
|
||||
tdLog.info(" ==== check insert tbnames first failed , this is {}_th retry check tbnames of database {}".format(count , dbname))
|
||||
tdLog.debug(" ==== check insert tbnames first failed , this is {}_th retry check tbnames of database {}".format(count , dbname))
|
||||
count += 1
|
||||
def _get_stop_dnode_id(self,dbname):
|
||||
tdSql.query("show {}.vgroups".format(dbname))
|
||||
|
@ -255,8 +255,8 @@ class TDTestCase:
|
|||
while status !="offline":
|
||||
time.sleep(0.1)
|
||||
status = _get_status()
|
||||
# tdLog.info("==== stop dnode has not been stopped , endpoint is {}".format(self.stop_dnode))
|
||||
tdLog.info("==== stop_dnode has stopped , id is {}".format(self.stop_dnode_id))
|
||||
# tdLog.notice("==== stop dnode has not been stopped , endpoint is {}".format(self.stop_dnode))
|
||||
tdLog.notice("==== stop_dnode has stopped , id is {} ====".format(self.stop_dnode_id))
|
||||
|
||||
def wait_start_dnode_OK(self):
|
||||
|
||||
|
@ -277,8 +277,8 @@ class TDTestCase:
|
|||
while status !="ready":
|
||||
time.sleep(0.1)
|
||||
status = _get_status()
|
||||
# tdLog.info("==== stop dnode has not been stopped , endpoint is {}".format(self.stop_dnode))
|
||||
tdLog.info("==== stop_dnode has restart , id is {}".format(self.stop_dnode_id))
|
||||
# tdLog.notice("==== stop dnode has not been stopped , endpoint is {}".format(self.stop_dnode))
|
||||
tdLog.notice("==== stop_dnode has restart , id is {} ====".format(self.stop_dnode_id))
|
||||
|
||||
def _parse_datetime(self,timestr):
|
||||
try:
|
||||
|
@ -342,9 +342,9 @@ class TDTestCase:
|
|||
elif isinstance(data, str):
|
||||
tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" %
|
||||
(sql, row, col, tdSql.queryResult[row][col], data))
|
||||
elif isinstance(data, datetime.date):
|
||||
tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" %
|
||||
(sql, row, col, tdSql.queryResult[row][col], data))
|
||||
# elif isinstance(data, datetime.date):
|
||||
# tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" %
|
||||
# (sql, row, col, tdSql.queryResult[row][col], data))
|
||||
elif isinstance(data, float):
|
||||
tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" %
|
||||
(sql, row, col, tdSql.queryResult[row][col], data))
|
||||
|
@ -389,15 +389,15 @@ class TDTestCase:
|
|||
# append rows of stablename when dnode stop
|
||||
|
||||
tbname = "sub_{}_{}".format(stablename , 0)
|
||||
tdLog.info(" ==== begin append rows of exists table {} when dnode {} offline ====".format(tbname , self.stop_dnode_id))
|
||||
tdLog.notice(" ==== begin append rows of exists table {} when dnode {} offline ====".format(tbname , self.stop_dnode_id))
|
||||
self.append_rows_of_exists_tables(db_name ,stablename , tbname , 100 )
|
||||
tdLog.info(" ==== check append rows of exists table {} when dnode {} offline ====".format(tbname , self.stop_dnode_id))
|
||||
tdLog.notice(" ==== check append rows of exists table {} when dnode {} offline ====".format(tbname , self.stop_dnode_id))
|
||||
self.check_insert_rows(db_name ,stablename ,tb_nums=10 , row_nums= 10 ,append_rows=100)
|
||||
|
||||
# create new stables
|
||||
tdLog.info(" ==== create new stable {} when dnode {} offline ====".format('new_stb1' , self.stop_dnode_id))
|
||||
tdLog.notice(" ==== create new stable {} when dnode {} offline ====".format('new_stb1' , self.stop_dnode_id))
|
||||
self.create_stable_insert_datas(dbname = db_name , stablename = 'new_stb1' , tb_nums= 10 ,row_nums= 10 )
|
||||
tdLog.info(" ==== check new stable {} when dnode {} offline ====".format('new_stb1' , self.stop_dnode_id))
|
||||
tdLog.notice(" ==== check new stable {} when dnode {} offline ====".format('new_stb1' , self.stop_dnode_id))
|
||||
self.check_insert_rows(db_name ,'new_stb1' ,tb_nums=10 , row_nums= 10 ,append_rows=0)
|
||||
|
||||
# begin start dnode
|
||||
|
@ -409,9 +409,9 @@ class TDTestCase:
|
|||
tdLog.exit(" ==== restart dnode {} cost too much time , please check ====".format(self.stop_dnode_id))
|
||||
|
||||
# create new stables again
|
||||
tdLog.info(" ==== create new stable {} when dnode {} restart ====".format('new_stb2' , self.stop_dnode_id))
|
||||
tdLog.notice(" ==== create new stable {} when dnode {} restart ====".format('new_stb2' , self.stop_dnode_id))
|
||||
self.create_stable_insert_datas(dbname = db_name , stablename = 'new_stb2' , tb_nums= 10 ,row_nums= 10 )
|
||||
tdLog.info(" ==== check new stable {} when dnode {} restart ====".format('new_stb2' , self.stop_dnode_id))
|
||||
tdLog.notice(" ==== check new stable {} when dnode {} restart ====".format('new_stb2' , self.stop_dnode_id))
|
||||
self.check_insert_rows(db_name ,'new_stb2' ,tb_nums=10 , row_nums= 10 ,append_rows=0)
|
||||
|
||||
def unsync_run_case(self):
|
||||
|
@ -447,7 +447,7 @@ class TDTestCase:
|
|||
self.create_database(dbname = db_name ,replica_num= self.replica , vgroup_nums= 1)
|
||||
self.create_stable_insert_datas(dbname = db_name , stablename = stablename , tb_nums= 10 ,row_nums= 10 )
|
||||
|
||||
tdLog.info(" ===== restart dnode of database {} in an unsync threading ===== ".format(db_name))
|
||||
tdLog.notice(" ===== restart dnode of database {} in an unsync threading ===== ".format(db_name))
|
||||
|
||||
# create sync threading and start it
|
||||
self.current_thread = _create_threading(db_name)
|
||||
|
@ -457,21 +457,21 @@ class TDTestCase:
|
|||
self.check_insert_rows(db_name ,stablename ,tb_nums=10 , row_nums= 10 ,append_rows=0)
|
||||
|
||||
tbname = "sub_{}_{}".format(stablename , 0)
|
||||
tdLog.info(" ==== begin append rows of exists table {} when dnode {} offline ====".format(tbname , self.stop_dnode_id))
|
||||
tdLog.notice(" ==== begin append rows of exists table {} when dnode {} offline ====".format(tbname , self.stop_dnode_id))
|
||||
self.append_rows_of_exists_tables(db_name ,stablename , tbname , 100 )
|
||||
tdLog.info(" ==== check append rows of exists table {} when dnode {} offline ====".format(tbname , self.stop_dnode_id))
|
||||
tdLog.notice(" ==== check append rows of exists table {} when dnode {} offline ====".format(tbname , self.stop_dnode_id))
|
||||
self.check_insert_rows(db_name ,stablename ,tb_nums=10 , row_nums= 10 ,append_rows=100)
|
||||
|
||||
# create new stables
|
||||
tdLog.info(" ==== create new stable {} when dnode {} offline ====".format('new_stb1' , self.stop_dnode_id))
|
||||
tdLog.notice(" ==== create new stable {} when dnode {} offline ====".format('new_stb1' , self.stop_dnode_id))
|
||||
self.create_stable_insert_datas(dbname = db_name , stablename = 'new_stb1' , tb_nums= 10 ,row_nums= 10 )
|
||||
tdLog.info(" ==== check new stable {} when dnode {} offline ====".format('new_stb1' , self.stop_dnode_id))
|
||||
tdLog.notice(" ==== check new stable {} when dnode {} offline ====".format('new_stb1' , self.stop_dnode_id))
|
||||
self.check_insert_rows(db_name ,'new_stb1' ,tb_nums=10 , row_nums= 10 ,append_rows=0)
|
||||
|
||||
# create new stables again
|
||||
tdLog.info(" ==== create new stable {} when dnode {} restart ====".format('new_stb2' , self.stop_dnode_id))
|
||||
tdLog.notice(" ==== create new stable {} when dnode {} restart ====".format('new_stb2' , self.stop_dnode_id))
|
||||
self.create_stable_insert_datas(dbname = db_name , stablename = 'new_stb2' , tb_nums= 10 ,row_nums= 10 )
|
||||
tdLog.info(" ==== check new stable {} when dnode {} restart ====".format('new_stb2' , self.stop_dnode_id))
|
||||
tdLog.notice(" ==== check new stable {} when dnode {} restart ====".format('new_stb2' , self.stop_dnode_id))
|
||||
self.check_insert_rows(db_name ,'new_stb2' ,tb_nums=10 , row_nums= 10 ,append_rows=0)
|
||||
|
||||
self.current_thread.join()
|
||||
|
|
|
@ -80,14 +80,14 @@ class TDTestCase:
|
|||
is_leader=True
|
||||
|
||||
if count==1 and is_leader:
|
||||
tdLog.info("===== depoly cluster success with 1 mnode as leader =====")
|
||||
tdLog.notice("===== depoly cluster success with 1 mnode as leader =====")
|
||||
else:
|
||||
tdLog.exit("===== depoly cluster fail with 1 mnode as leader =====")
|
||||
|
||||
for k ,v in self.dnode_list.items():
|
||||
if k == mnode_name:
|
||||
if v[3]==0:
|
||||
tdLog.info("===== depoly cluster mnode only success at {} , support_vnodes is {} ".format(mnode_name,v[3]))
|
||||
tdLog.notice("===== depoly cluster mnode only success at {} , support_vnodes is {} ".format(mnode_name,v[3]))
|
||||
else:
|
||||
tdLog.exit("===== depoly cluster mnode only fail at {} , support_vnodes is {} ".format(mnode_name,v[3]))
|
||||
else:
|
||||
|
@ -130,7 +130,7 @@ class TDTestCase:
|
|||
|
||||
for k , v in vgroups_infos.items():
|
||||
if len(v) ==1 and v[0]=="leader":
|
||||
tdLog.info(" === create database replica only 1 role leader check success of vgroup_id {} ======".format(k))
|
||||
tdLog.notice(" === create database replica only 1 role leader check success of vgroup_id {} ======".format(k))
|
||||
else:
|
||||
tdLog.exit(" === create database replica only 1 role leader check fail of vgroup_id {} ======".format(k))
|
||||
|
||||
|
@ -138,7 +138,7 @@ class TDTestCase:
|
|||
drop_db_sql = "drop database if exists {}".format(dbname)
|
||||
create_db_sql = "create database {} replica {} vgroups {}".format(dbname,replica_num,vgroup_nums)
|
||||
|
||||
tdLog.info(" ==== create database {} and insert rows begin =====".format(dbname))
|
||||
tdLog.notice(" ==== create database {} and insert rows begin =====".format(dbname))
|
||||
tdSql.execute(drop_db_sql)
|
||||
tdSql.execute(create_db_sql)
|
||||
tdSql.execute("use {}".format(dbname))
|
||||
|
@ -161,7 +161,7 @@ class TDTestCase:
|
|||
ts = self.ts + self.ts_step*row_num
|
||||
tdSql.execute(f"insert into {sub_tbname} values ({ts}, {row_num} ,{row_num}, 10 ,1 ,{row_num} ,{row_num},true,'bin_{row_num}','nchar_{row_num}',now) ")
|
||||
|
||||
tdLog.info(" ==== stable {} insert rows execute end =====".format(stablename))
|
||||
tdLog.notice(" ==== stable {} insert rows execute end =====".format(stablename))
|
||||
|
||||
def append_rows_of_exists_tables(self,dbname ,stablename , tbname , append_nums ):
|
||||
|
||||
|
@ -170,7 +170,7 @@ class TDTestCase:
|
|||
for row_num in range(append_nums):
|
||||
tdSql.execute(f"insert into {tbname} values (now, {row_num} ,{row_num}, 10 ,1 ,{row_num} ,{row_num},true,'bin_{row_num}','nchar_{row_num}',now) ")
|
||||
# print(f"insert into {tbname} values (now, {row_num} ,{row_num}, 10 ,1 ,{row_num} ,{row_num},true,'bin_{row_num}','nchar_{row_num}',now) ")
|
||||
tdLog.info(" ==== append new rows of table {} belongs to stable {} execute end =====".format(tbname,stablename))
|
||||
tdLog.notice(" ==== append new rows of table {} belongs to stable {} execute end =====".format(tbname,stablename))
|
||||
os.system("taos -s 'select count(*) from {}.{}';".format(dbname,stablename))
|
||||
|
||||
def check_insert_rows(self, dbname, stablename , tb_nums , row_nums, append_rows):
|
||||
|
@ -197,7 +197,7 @@ class TDTestCase:
|
|||
time.sleep(0.1)
|
||||
tdSql.query("select count(*) from {}.{}".format(dbname,stablename))
|
||||
status_OK = self.mycheckData("select count(*) from {}.{}".format(dbname,stablename) ,0 , 0 , tb_nums*row_nums+append_rows)
|
||||
tdLog.info(" ==== check insert rows first failed , this is {}_th retry check rows of database {}".format(count , dbname))
|
||||
tdLog.notice(" ==== check insert rows first failed , this is {}_th retry check rows of database {}".format(count , dbname))
|
||||
count += 1
|
||||
|
||||
|
||||
|
@ -218,7 +218,7 @@ class TDTestCase:
|
|||
time.sleep(0.1)
|
||||
tdSql.query("select distinct tbname from {}.{}".format(dbname,stablename))
|
||||
status_OK = self.mycheckRows("select distinct tbname from {}.{}".format(dbname,stablename) ,tb_nums)
|
||||
tdLog.info(" ==== check insert tbnames first failed , this is {}_th retry check tbnames of database {}".format(count , dbname))
|
||||
tdLog.notice(" ==== check insert tbnames first failed , this is {}_th retry check tbnames of database {}".format(count , dbname))
|
||||
count += 1
|
||||
|
||||
def _get_stop_dnode_id(self,dbname):
|
||||
|
@ -256,8 +256,8 @@ class TDTestCase:
|
|||
while status !="offline":
|
||||
time.sleep(0.1)
|
||||
status = _get_status()
|
||||
# tdLog.info("==== stop dnode has not been stopped , endpoint is {}".format(self.stop_dnode))
|
||||
tdLog.info("==== stop_dnode has stopped , id is {}".format(self.stop_dnode_id))
|
||||
# tdLog.notice("==== stop dnode has not been stopped , endpoint is {}".format(self.stop_dnode))
|
||||
tdLog.notice("==== stop_dnode has stopped , id is {}".format(self.stop_dnode_id))
|
||||
|
||||
def wait_start_dnode_OK(self):
|
||||
|
||||
|
@ -278,8 +278,8 @@ class TDTestCase:
|
|||
while status !="ready":
|
||||
time.sleep(0.1)
|
||||
status = _get_status()
|
||||
# tdLog.info("==== stop dnode has not been stopped , endpoint is {}".format(self.stop_dnode))
|
||||
tdLog.info("==== stop_dnode has restart , id is {}".format(self.stop_dnode_id))
|
||||
# tdLog.notice("==== stop dnode has not been stopped , endpoint is {}".format(self.stop_dnode))
|
||||
tdLog.notice("==== stop_dnode has restart , id is {}".format(self.stop_dnode_id))
|
||||
|
||||
def _parse_datetime(self,timestr):
|
||||
try:
|
||||
|
@ -343,9 +343,9 @@ class TDTestCase:
|
|||
elif isinstance(data, str):
|
||||
tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" %
|
||||
(sql, row, col, tdSql.queryResult[row][col], data))
|
||||
elif isinstance(data, datetime.date):
|
||||
tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" %
|
||||
(sql, row, col, tdSql.queryResult[row][col], data))
|
||||
# elif isinstance(data, datetime.date):
|
||||
# tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" %
|
||||
# (sql, row, col, tdSql.queryResult[row][col], data))
|
||||
elif isinstance(data, float):
|
||||
tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" %
|
||||
(sql, row, col, tdSql.queryResult[row][col], data))
|
||||
|
@ -390,15 +390,15 @@ class TDTestCase:
|
|||
# append rows of stablename when dnode stop
|
||||
|
||||
tbname = "sub_{}_{}".format(stablename , 0)
|
||||
tdLog.info(" ==== begin append rows of exists table {} when dnode {} offline ====".format(tbname , self.stop_dnode_id))
|
||||
tdLog.notice(" ==== begin append rows of exists table {} when dnode {} offline ====".format(tbname , self.stop_dnode_id))
|
||||
self.append_rows_of_exists_tables(db_name ,stablename , tbname , 100 )
|
||||
tdLog.info(" ==== check append rows of exists table {} when dnode {} offline ====".format(tbname , self.stop_dnode_id))
|
||||
tdLog.notice(" ==== check append rows of exists table {} when dnode {} offline ====".format(tbname , self.stop_dnode_id))
|
||||
self.check_insert_rows(db_name ,stablename ,tb_nums=10 , row_nums= 10 ,append_rows=100)
|
||||
|
||||
# create new stables
|
||||
tdLog.info(" ==== create new stable {} when dnode {} offline ====".format('new_stb1' , self.stop_dnode_id))
|
||||
tdLog.notice(" ==== create new stable {} when dnode {} offline ====".format('new_stb1' , self.stop_dnode_id))
|
||||
self.create_stable_insert_datas(dbname = db_name , stablename = 'new_stb1' , tb_nums= 10 ,row_nums= 10 )
|
||||
tdLog.info(" ==== check new stable {} when dnode {} offline ====".format('new_stb1' , self.stop_dnode_id))
|
||||
tdLog.notice(" ==== check new stable {} when dnode {} offline ====".format('new_stb1' , self.stop_dnode_id))
|
||||
self.check_insert_rows(db_name ,'new_stb1' ,tb_nums=10 , row_nums= 10 ,append_rows=0)
|
||||
|
||||
# begin start dnode
|
||||
|
@ -410,9 +410,9 @@ class TDTestCase:
|
|||
tdLog.exit(" ==== restart dnode {} cost too much time , please check ====".format(self.stop_dnode_id))
|
||||
|
||||
# create new stables again
|
||||
tdLog.info(" ==== create new stable {} when dnode {} restart ====".format('new_stb2' , self.stop_dnode_id))
|
||||
tdLog.notice(" ==== create new stable {} when dnode {} restart ====".format('new_stb2' , self.stop_dnode_id))
|
||||
self.create_stable_insert_datas(dbname = db_name , stablename = 'new_stb2' , tb_nums= 10 ,row_nums= 10 )
|
||||
tdLog.info(" ==== check new stable {} when dnode {} restart ====".format('new_stb2' , self.stop_dnode_id))
|
||||
tdLog.notice(" ==== check new stable {} when dnode {} restart ====".format('new_stb2' , self.stop_dnode_id))
|
||||
self.check_insert_rows(db_name ,'new_stb2' ,tb_nums=10 , row_nums= 10 ,append_rows=0)
|
||||
|
||||
def unsync_run_case(self):
|
||||
|
@ -448,7 +448,7 @@ class TDTestCase:
|
|||
self.create_database(dbname = db_name ,replica_num= self.replica , vgroup_nums= 1)
|
||||
self.create_stable_insert_datas(dbname = db_name , stablename = stablename , tb_nums= 10 ,row_nums= 10 )
|
||||
|
||||
tdLog.info(" ===== restart dnode of database {} in an unsync threading ===== ".format(db_name))
|
||||
tdLog.notice(" ===== restart dnode of database {} in an unsync threading ===== ".format(db_name))
|
||||
|
||||
# create sync threading and start it
|
||||
self.current_thread = _create_threading(db_name)
|
||||
|
@ -458,21 +458,21 @@ class TDTestCase:
|
|||
self.check_insert_rows(db_name ,stablename ,tb_nums=10 , row_nums= 10 ,append_rows=0)
|
||||
|
||||
tbname = "sub_{}_{}".format(stablename , 0)
|
||||
tdLog.info(" ==== begin append rows of exists table {} when dnode {} offline ====".format(tbname , self.stop_dnode_id))
|
||||
tdLog.notice(" ==== begin append rows of exists table {} when dnode {} offline ====".format(tbname , self.stop_dnode_id))
|
||||
self.append_rows_of_exists_tables(db_name ,stablename , tbname , 100 )
|
||||
tdLog.info(" ==== check append rows of exists table {} when dnode {} offline ====".format(tbname , self.stop_dnode_id))
|
||||
tdLog.notice(" ==== check append rows of exists table {} when dnode {} offline ====".format(tbname , self.stop_dnode_id))
|
||||
self.check_insert_rows(db_name ,stablename ,tb_nums=10 , row_nums= 10 ,append_rows=100)
|
||||
|
||||
# create new stables
|
||||
tdLog.info(" ==== create new stable {} when dnode {} offline ====".format('new_stb1' , self.stop_dnode_id))
|
||||
tdLog.notice(" ==== create new stable {} when dnode {} offline ====".format('new_stb1' , self.stop_dnode_id))
|
||||
self.create_stable_insert_datas(dbname = db_name , stablename = 'new_stb1' , tb_nums= 10 ,row_nums= 10 )
|
||||
tdLog.info(" ==== check new stable {} when dnode {} offline ====".format('new_stb1' , self.stop_dnode_id))
|
||||
tdLog.notice(" ==== check new stable {} when dnode {} offline ====".format('new_stb1' , self.stop_dnode_id))
|
||||
self.check_insert_rows(db_name ,'new_stb1' ,tb_nums=10 , row_nums= 10 ,append_rows=0)
|
||||
|
||||
# create new stables again
|
||||
tdLog.info(" ==== create new stable {} when dnode {} restart ====".format('new_stb2' , self.stop_dnode_id))
|
||||
tdLog.notice(" ==== create new stable {} when dnode {} restart ====".format('new_stb2' , self.stop_dnode_id))
|
||||
self.create_stable_insert_datas(dbname = db_name , stablename = 'new_stb2' , tb_nums= 10 ,row_nums= 10 )
|
||||
tdLog.info(" ==== check new stable {} when dnode {} restart ====".format('new_stb2' , self.stop_dnode_id))
|
||||
tdLog.notice(" ==== check new stable {} when dnode {} restart ====".format('new_stb2' , self.stop_dnode_id))
|
||||
self.check_insert_rows(db_name ,'new_stb2' ,tb_nums=10 , row_nums= 10 ,append_rows=0)
|
||||
|
||||
self.current_thread.join()
|
||||
|
|
|
@ -80,14 +80,14 @@ class TDTestCase:
|
|||
is_leader=True
|
||||
|
||||
if count==1 and is_leader:
|
||||
tdLog.info("===== depoly cluster success with 1 mnode as leader =====")
|
||||
tdLog.notice("===== depoly cluster success with 1 mnode as leader =====")
|
||||
else:
|
||||
tdLog.exit("===== depoly cluster fail with 1 mnode as leader =====")
|
||||
|
||||
for k ,v in self.dnode_list.items():
|
||||
if k == mnode_name:
|
||||
if v[3]==0:
|
||||
tdLog.info("===== depoly cluster mnode only success at {} , support_vnodes is {} ".format(mnode_name,v[3]))
|
||||
tdLog.notice("===== depoly cluster mnode only success at {} , support_vnodes is {} ".format(mnode_name,v[3]))
|
||||
else:
|
||||
tdLog.exit("===== depoly cluster mnode only fail at {} , support_vnodes is {} ".format(mnode_name,v[3]))
|
||||
else:
|
||||
|
@ -130,7 +130,7 @@ class TDTestCase:
|
|||
|
||||
for k , v in vgroups_infos.items():
|
||||
if len(v) ==1 and v[0]=="leader":
|
||||
tdLog.info(" === create database replica only 1 role leader check success of vgroup_id {} ======".format(k))
|
||||
tdLog.notice(" === create database replica only 1 role leader check success of vgroup_id {} ======".format(k))
|
||||
else:
|
||||
tdLog.exit(" === create database replica only 1 role leader check fail of vgroup_id {} ======".format(k))
|
||||
|
||||
|
@ -138,7 +138,7 @@ class TDTestCase:
|
|||
drop_db_sql = "drop database if exists {}".format(dbname)
|
||||
create_db_sql = "create database {} replica {} vgroups {}".format(dbname,replica_num,vgroup_nums)
|
||||
|
||||
tdLog.info(" ==== create database {} and insert rows begin =====".format(dbname))
|
||||
tdLog.notice(" ==== create database {} and insert rows begin =====".format(dbname))
|
||||
tdSql.execute(drop_db_sql)
|
||||
tdSql.execute(create_db_sql)
|
||||
tdSql.execute("use {}".format(dbname))
|
||||
|
@ -161,7 +161,7 @@ class TDTestCase:
|
|||
ts = self.ts + self.ts_step*row_num
|
||||
tdSql.execute(f"insert into {sub_tbname} values ({ts}, {row_num} ,{row_num}, 10 ,1 ,{row_num} ,{row_num},true,'bin_{row_num}','nchar_{row_num}',now) ")
|
||||
|
||||
tdLog.info(" ==== stable {} insert rows execute end =====".format(stablename))
|
||||
tdLog.notice(" ==== stable {} insert rows execute end =====".format(stablename))
|
||||
|
||||
def append_rows_of_exists_tables(self,dbname ,stablename , tbname , append_nums ):
|
||||
|
||||
|
@ -170,7 +170,7 @@ class TDTestCase:
|
|||
for row_num in range(append_nums):
|
||||
tdSql.execute(f"insert into {tbname} values (now, {row_num} ,{row_num}, 10 ,1 ,{row_num} ,{row_num},true,'bin_{row_num}','nchar_{row_num}',now) ")
|
||||
# print(f"insert into {tbname} values (now, {row_num} ,{row_num}, 10 ,1 ,{row_num} ,{row_num},true,'bin_{row_num}','nchar_{row_num}',now) ")
|
||||
tdLog.info(" ==== append new rows of table {} belongs to stable {} execute end =====".format(tbname,stablename))
|
||||
tdLog.notice(" ==== append new rows of table {} belongs to stable {} execute end =====".format(tbname,stablename))
|
||||
os.system("taos -s 'select count(*) from {}.{}';".format(dbname,stablename))
|
||||
|
||||
def check_insert_rows(self, dbname, stablename , tb_nums , row_nums, append_rows):
|
||||
|
@ -197,7 +197,7 @@ class TDTestCase:
|
|||
time.sleep(0.1)
|
||||
tdSql.query("select count(*) from {}.{}".format(dbname,stablename))
|
||||
status_OK = self.mycheckData("select count(*) from {}.{}".format(dbname,stablename) ,0 , 0 , tb_nums*row_nums+append_rows)
|
||||
tdLog.info(" ==== check insert rows first failed , this is {}_th retry check rows of database {}".format(count , dbname))
|
||||
tdLog.notice(" ==== check insert rows first failed , this is {}_th retry check rows of database {}".format(count , dbname))
|
||||
count += 1
|
||||
|
||||
|
||||
|
@ -218,7 +218,7 @@ class TDTestCase:
|
|||
time.sleep(0.1)
|
||||
tdSql.query("select distinct tbname from {}.{}".format(dbname,stablename))
|
||||
status_OK = self.mycheckRows("select distinct tbname from {}.{}".format(dbname,stablename) ,tb_nums)
|
||||
tdLog.info(" ==== check insert tbnames first failed , this is {}_th retry check tbnames of database {}".format(count , dbname))
|
||||
tdLog.notice(" ==== check insert tbnames first failed , this is {}_th retry check tbnames of database {}".format(count , dbname))
|
||||
count += 1
|
||||
|
||||
def _get_stop_dnode_id(self,dbname):
|
||||
|
@ -256,8 +256,8 @@ class TDTestCase:
|
|||
while status !="offline":
|
||||
time.sleep(0.1)
|
||||
status = _get_status()
|
||||
# tdLog.info("==== stop dnode has not been stopped , endpoint is {}".format(self.stop_dnode))
|
||||
tdLog.info("==== stop_dnode has stopped , id is {}".format(self.stop_dnode_id))
|
||||
# tdLog.notice("==== stop dnode has not been stopped , endpoint is {}".format(self.stop_dnode))
|
||||
tdLog.notice("==== stop_dnode has stopped , id is {}".format(self.stop_dnode_id))
|
||||
|
||||
def wait_start_dnode_OK(self):
|
||||
|
||||
|
@ -278,8 +278,8 @@ class TDTestCase:
|
|||
while status !="ready":
|
||||
time.sleep(0.1)
|
||||
status = _get_status()
|
||||
# tdLog.info("==== stop dnode has not been stopped , endpoint is {}".format(self.stop_dnode))
|
||||
tdLog.info("==== stop_dnode has restart , id is {}".format(self.stop_dnode_id))
|
||||
# tdLog.notice("==== stop dnode has not been stopped , endpoint is {}".format(self.stop_dnode))
|
||||
tdLog.notice("==== stop_dnode has restart , id is {}".format(self.stop_dnode_id))
|
||||
|
||||
def _parse_datetime(self,timestr):
|
||||
try:
|
||||
|
@ -343,9 +343,9 @@ class TDTestCase:
|
|||
elif isinstance(data, str):
|
||||
tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" %
|
||||
(sql, row, col, tdSql.queryResult[row][col], data))
|
||||
elif isinstance(data, datetime.date):
|
||||
tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" %
|
||||
(sql, row, col, tdSql.queryResult[row][col], data))
|
||||
# elif isinstance(data, datetime.date):
|
||||
# tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" %
|
||||
# (sql, row, col, tdSql.queryResult[row][col], data))
|
||||
elif isinstance(data, float):
|
||||
tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" %
|
||||
(sql, row, col, tdSql.queryResult[row][col], data))
|
||||
|
@ -390,15 +390,15 @@ class TDTestCase:
|
|||
# append rows of stablename when dnode stop
|
||||
|
||||
tbname = "sub_{}_{}".format(stablename , 0)
|
||||
tdLog.info(" ==== begin append rows of exists table {} when dnode {} offline ====".format(tbname , self.stop_dnode_id))
|
||||
tdLog.notice(" ==== begin append rows of exists table {} when dnode {} offline ====".format(tbname , self.stop_dnode_id))
|
||||
self.append_rows_of_exists_tables(db_name ,stablename , tbname , 100 )
|
||||
tdLog.info(" ==== check append rows of exists table {} when dnode {} offline ====".format(tbname , self.stop_dnode_id))
|
||||
tdLog.notice(" ==== check append rows of exists table {} when dnode {} offline ====".format(tbname , self.stop_dnode_id))
|
||||
self.check_insert_rows(db_name ,stablename ,tb_nums=10 , row_nums= 10 ,append_rows=100)
|
||||
|
||||
# create new stables
|
||||
tdLog.info(" ==== create new stable {} when dnode {} offline ====".format('new_stb1' , self.stop_dnode_id))
|
||||
tdLog.notice(" ==== create new stable {} when dnode {} offline ====".format('new_stb1' , self.stop_dnode_id))
|
||||
self.create_stable_insert_datas(dbname = db_name , stablename = 'new_stb1' , tb_nums= 10 ,row_nums= 10 )
|
||||
tdLog.info(" ==== check new stable {} when dnode {} offline ====".format('new_stb1' , self.stop_dnode_id))
|
||||
tdLog.notice(" ==== check new stable {} when dnode {} offline ====".format('new_stb1' , self.stop_dnode_id))
|
||||
self.check_insert_rows(db_name ,'new_stb1' ,tb_nums=10 , row_nums= 10 ,append_rows=0)
|
||||
|
||||
# begin start dnode
|
||||
|
@ -410,9 +410,9 @@ class TDTestCase:
|
|||
tdLog.exit(" ==== restart dnode {} cost too much time , please check ====".format(self.stop_dnode_id))
|
||||
|
||||
# create new stables again
|
||||
tdLog.info(" ==== create new stable {} when dnode {} restart ====".format('new_stb2' , self.stop_dnode_id))
|
||||
tdLog.notice(" ==== create new stable {} when dnode {} restart ====".format('new_stb2' , self.stop_dnode_id))
|
||||
self.create_stable_insert_datas(dbname = db_name , stablename = 'new_stb2' , tb_nums= 10 ,row_nums= 10 )
|
||||
tdLog.info(" ==== check new stable {} when dnode {} restart ====".format('new_stb2' , self.stop_dnode_id))
|
||||
tdLog.notice(" ==== check new stable {} when dnode {} restart ====".format('new_stb2' , self.stop_dnode_id))
|
||||
self.check_insert_rows(db_name ,'new_stb2' ,tb_nums=10 , row_nums= 10 ,append_rows=0)
|
||||
|
||||
def unsync_run_case(self):
|
||||
|
@ -453,7 +453,7 @@ class TDTestCase:
|
|||
self.create_database(dbname = db_name ,replica_num= self.replica , vgroup_nums= 1)
|
||||
self.create_stable_insert_datas(dbname = db_name , stablename = stablename , tb_nums= 10 ,row_nums= 10 )
|
||||
|
||||
tdLog.info(" ===== restart dnode of database {} in an unsync threading ===== ".format(db_name))
|
||||
tdLog.notice(" ===== restart dnode of database {} in an unsync threading ===== ".format(db_name))
|
||||
|
||||
# create sync threading and start it
|
||||
self.current_thread = _create_threading(db_name)
|
||||
|
@ -463,21 +463,21 @@ class TDTestCase:
|
|||
self.check_insert_rows(db_name ,stablename ,tb_nums=10 , row_nums= 10 ,append_rows=0)
|
||||
|
||||
tbname = "sub_{}_{}".format(stablename , 0)
|
||||
tdLog.info(" ==== begin append rows of exists table {} when dnode {} offline ====".format(tbname , self.stop_dnode_id))
|
||||
tdLog.notice(" ==== begin append rows of exists table {} when dnode {} offline ====".format(tbname , self.stop_dnode_id))
|
||||
self.append_rows_of_exists_tables(db_name ,stablename , tbname , 100 )
|
||||
tdLog.info(" ==== check append rows of exists table {} when dnode {} offline ====".format(tbname , self.stop_dnode_id))
|
||||
tdLog.notice(" ==== check append rows of exists table {} when dnode {} offline ====".format(tbname , self.stop_dnode_id))
|
||||
self.check_insert_rows(db_name ,stablename ,tb_nums=10 , row_nums= 10 ,append_rows=100)
|
||||
|
||||
# create new stables
|
||||
tdLog.info(" ==== create new stable {} when dnode {} offline ====".format('new_stb1' , self.stop_dnode_id))
|
||||
tdLog.notice(" ==== create new stable {} when dnode {} offline ====".format('new_stb1' , self.stop_dnode_id))
|
||||
self.create_stable_insert_datas(dbname = db_name , stablename = 'new_stb1' , tb_nums= 10 ,row_nums= 10 )
|
||||
tdLog.info(" ==== check new stable {} when dnode {} offline ====".format('new_stb1' , self.stop_dnode_id))
|
||||
tdLog.notice(" ==== check new stable {} when dnode {} offline ====".format('new_stb1' , self.stop_dnode_id))
|
||||
self.check_insert_rows(db_name ,'new_stb1' ,tb_nums=10 , row_nums= 10 ,append_rows=0)
|
||||
|
||||
# create new stables again
|
||||
tdLog.info(" ==== create new stable {} when dnode {} restart ====".format('new_stb2' , self.stop_dnode_id))
|
||||
tdLog.notice(" ==== create new stable {} when dnode {} restart ====".format('new_stb2' , self.stop_dnode_id))
|
||||
self.create_stable_insert_datas(dbname = db_name , stablename = 'new_stb2' , tb_nums= 10 ,row_nums= 10 )
|
||||
tdLog.info(" ==== check new stable {} when dnode {} restart ====".format('new_stb2' , self.stop_dnode_id))
|
||||
tdLog.notice(" ==== check new stable {} when dnode {} restart ====".format('new_stb2' , self.stop_dnode_id))
|
||||
self.check_insert_rows(db_name ,'new_stb2' ,tb_nums=10 , row_nums= 10 ,append_rows=0)
|
||||
|
||||
self.current_thread.join()
|
||||
|
@ -493,7 +493,7 @@ class TDTestCase:
|
|||
else:
|
||||
continue
|
||||
if port:
|
||||
tdLog.info(" ==== dnode {} will be force stop by kill -9 ====".format(dnode_id))
|
||||
tdLog.notice(" ==== dnode {} will be force stop by kill -9 ====".format(dnode_id))
|
||||
psCmd = '''netstat -anp|grep -w LISTEN|grep -w %s |grep -o "LISTEN.*"|awk '{print $2}'|cut -d/ -f1|head -n1''' %(port)
|
||||
processID = subprocess.check_output(
|
||||
psCmd, shell=True).decode("utf-8")
|
||||
|
|
|
@ -114,9 +114,9 @@ class TDTestCase:
|
|||
elif isinstance(data, str):
|
||||
tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" %
|
||||
(sql, row, col, tdSql.queryResult[row][col], data))
|
||||
elif isinstance(data, datetime.date):
|
||||
tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" %
|
||||
(sql, row, col, tdSql.queryResult[row][col], data))
|
||||
# elif isinstance(data, datetime.date):
|
||||
# tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" %
|
||||
# (sql, row, col, tdSql.queryResult[row][col], data))
|
||||
elif isinstance(data, float):
|
||||
tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" %
|
||||
(sql, row, col, tdSql.queryResult[row][col], data))
|
||||
|
@ -163,146 +163,20 @@ class TDTestCase:
|
|||
is_leader=True
|
||||
|
||||
if count==1 and is_leader:
|
||||
tdLog.info("===== depoly cluster success with 1 mnode as leader =====")
|
||||
tdLog.notice("===== depoly cluster success with 1 mnode as leader =====")
|
||||
else:
|
||||
tdLog.exit("===== depoly cluster fail with 1 mnode as leader =====")
|
||||
|
||||
for k ,v in self.dnode_list.items():
|
||||
if k == mnode_name:
|
||||
if v[3]==0:
|
||||
tdLog.info("===== depoly cluster mnode only success at {} , support_vnodes is {} ".format(mnode_name,v[3]))
|
||||
tdLog.notice("===== depoly cluster mnode only success at {} , support_vnodes is {} ".format(mnode_name,v[3]))
|
||||
else:
|
||||
tdLog.exit("===== depoly cluster mnode only fail at {} , support_vnodes is {} ".format(mnode_name,v[3]))
|
||||
else:
|
||||
continue
|
||||
|
||||
def create_db_check_vgroups(self):
|
||||
|
||||
tdSql.execute("drop database if exists test")
|
||||
tdSql.execute("create database if not exists test replica 1 duration 300")
|
||||
tdSql.execute("use test")
|
||||
tdSql.execute(
|
||||
'''create table stb1
|
||||
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
|
||||
tags (t1 int)
|
||||
'''
|
||||
)
|
||||
tdSql.execute(
|
||||
'''
|
||||
create table t1
|
||||
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
|
||||
'''
|
||||
)
|
||||
|
||||
for i in range(5):
|
||||
tdSql.execute("create table sub_tb_{} using stb1 tags({})".format(i,i))
|
||||
tdSql.query("show stables")
|
||||
tdSql.checkRows(1)
|
||||
tdSql.query("show tables")
|
||||
tdSql.checkRows(6)
|
||||
|
||||
tdSql.query("show test.vgroups;")
|
||||
vgroups_infos = {} # key is id: value is info list
|
||||
for vgroup_info in tdSql.queryResult:
|
||||
vgroup_id = vgroup_info[0]
|
||||
tmp_list = []
|
||||
for role in vgroup_info[3:-4]:
|
||||
if role in ['leader','follower']:
|
||||
tmp_list.append(role)
|
||||
vgroups_infos[vgroup_id]=tmp_list
|
||||
|
||||
for k , v in vgroups_infos.items():
|
||||
if len(v) ==1 and v[0]=="leader":
|
||||
tdLog.info(" === create database replica only 1 role leader check success of vgroup_id {} ======".format(k))
|
||||
else:
|
||||
tdLog.exit(" === create database replica only 1 role leader check fail of vgroup_id {} ======".format(k))
|
||||
|
||||
def create_database(self, dbname, replica_num ,vgroup_nums ):
|
||||
drop_db_sql = "drop database if exists {}".format(dbname)
|
||||
create_db_sql = "create database {} replica {} vgroups {}".format(dbname,replica_num,vgroup_nums)
|
||||
|
||||
tdLog.info(" ==== create database {} and insert rows begin =====".format(dbname))
|
||||
tdSql.execute(drop_db_sql)
|
||||
tdSql.execute(create_db_sql)
|
||||
tdSql.execute("use {}".format(dbname))
|
||||
|
||||
def create_stable_insert_datas(self,dbname ,stablename , tb_nums , row_nums):
|
||||
tdSql.execute("use {}".format(dbname))
|
||||
tdSql.execute(
|
||||
'''create table {}
|
||||
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(32),c9 nchar(32), c10 timestamp)
|
||||
tags (t1 int)
|
||||
'''.format(stablename)
|
||||
)
|
||||
|
||||
for i in range(tb_nums):
|
||||
sub_tbname = "sub_{}_{}".format(stablename,i)
|
||||
tdSql.execute("create table {} using {} tags({})".format(sub_tbname, stablename ,i))
|
||||
# insert datas about new database
|
||||
|
||||
for row_num in range(row_nums):
|
||||
ts = self.ts + self.ts_step*row_num
|
||||
tdSql.execute(f"insert into {sub_tbname} values ({ts}, {row_num} ,{row_num}, 10 ,1 ,{row_num} ,{row_num},true,'bin_{row_num}','nchar_{row_num}',now) ")
|
||||
|
||||
tdLog.info(" ==== stable {} insert rows execute end =====".format(stablename))
|
||||
|
||||
def append_rows_of_exists_tables(self,dbname ,stablename , tbname , append_nums ):
|
||||
|
||||
tdSql.execute("use {}".format(dbname))
|
||||
|
||||
for row_num in range(append_nums):
|
||||
tdSql.execute(f"insert into {tbname} values (now, {row_num} ,{row_num}, 10 ,1 ,{row_num} ,{row_num},true,'bin_{row_num}','nchar_{row_num}',now) ")
|
||||
# print(f"insert into {tbname} values (now, {row_num} ,{row_num}, 10 ,1 ,{row_num} ,{row_num},true,'bin_{row_num}','nchar_{row_num}',now) ")
|
||||
tdLog.info(" ==== append new rows of table {} belongs to stable {} execute end =====".format(tbname,stablename))
|
||||
os.system("taos -s 'select count(*) from {}.{}';".format(dbname,stablename))
|
||||
|
||||
def check_insert_rows(self, dbname, stablename , tb_nums , row_nums, append_rows):
|
||||
|
||||
tdSql.execute("use {}".format(dbname))
|
||||
|
||||
tdSql.query("select count(*) from {}.{}".format(dbname,stablename))
|
||||
|
||||
while not tdSql.queryResult:
|
||||
time.sleep(0.1)
|
||||
tdSql.query("select count(*) from {}.{}".format(dbname,stablename))
|
||||
|
||||
status_OK = self.mycheckData("select count(*) from {}.{}".format(dbname,stablename) ,0 , 0 , tb_nums*row_nums+append_rows)
|
||||
|
||||
count = 0
|
||||
while not status_OK :
|
||||
if count > self.try_check_times:
|
||||
os.system("taos -s ' show {}.vgroups; '".format(dbname))
|
||||
tdLog.exit(" ==== check insert rows failed after {} try check {} times of database {}".format(count , self.try_check_times ,dbname))
|
||||
break
|
||||
time.sleep(0.1)
|
||||
tdSql.query("select count(*) from {}.{}".format(dbname,stablename))
|
||||
while not tdSql.queryResult:
|
||||
time.sleep(0.1)
|
||||
tdSql.query("select count(*) from {}.{}".format(dbname,stablename))
|
||||
status_OK = self.mycheckData("select count(*) from {}.{}".format(dbname,stablename) ,0 , 0 , tb_nums*row_nums+append_rows)
|
||||
tdLog.info(" ==== check insert rows first failed , this is {}_th retry check rows of database {}".format(count , dbname))
|
||||
count += 1
|
||||
|
||||
|
||||
tdSql.query("select distinct tbname from {}.{}".format(dbname,stablename))
|
||||
while not tdSql.queryResult:
|
||||
time.sleep(0.1)
|
||||
tdSql.query("select distinct tbname from {}.{}".format(dbname,stablename))
|
||||
status_OK = self.mycheckRows("select distinct tbname from {}.{}".format(dbname,stablename) ,tb_nums)
|
||||
count = 0
|
||||
while not status_OK :
|
||||
if count > self.try_check_times:
|
||||
os.system("taos -s ' show {}.vgroups;'".format(dbname))
|
||||
tdLog.exit(" ==== check insert rows failed after {} try check {} times of database {}".format(count , self.try_check_times ,dbname))
|
||||
break
|
||||
time.sleep(0.1)
|
||||
tdSql.query("select distinct tbname from {}.{}".format(dbname,stablename))
|
||||
while not tdSql.queryResult:
|
||||
time.sleep(0.1)
|
||||
tdSql.query("select distinct tbname from {}.{}".format(dbname,stablename))
|
||||
status_OK = self.mycheckRows("select distinct tbname from {}.{}".format(dbname,stablename) ,tb_nums)
|
||||
tdLog.info(" ==== check insert tbnames first failed , this is {}_th retry check tbnames of database {}".format(count , dbname))
|
||||
count += 1
|
||||
def _get_stop_dnode_id(self,dbname):
|
||||
newTdSql=tdCom.newTdSql()
|
||||
newTdSql.query("show {}.vgroups".format(dbname))
|
||||
|
@ -339,8 +213,8 @@ class TDTestCase:
|
|||
while status !="offline":
|
||||
time.sleep(0.1)
|
||||
status = _get_status()
|
||||
# tdLog.info("==== stop dnode has not been stopped , endpoint is {}".format(self.stop_dnode))
|
||||
tdLog.info("==== stop_dnode has stopped , id is {}".format(self.stop_dnode_id))
|
||||
# tdLog.notice("==== stop dnode has not been stopped , endpoint is {}".format(self.stop_dnode))
|
||||
tdLog.notice("==== stop_dnode has stopped , id is {}".format(self.stop_dnode_id))
|
||||
|
||||
def wait_start_dnode_OK(self):
|
||||
|
||||
|
@ -361,8 +235,8 @@ class TDTestCase:
|
|||
while status !="ready":
|
||||
time.sleep(0.1)
|
||||
status = _get_status()
|
||||
# tdLog.info("==== stop dnode has not been stopped , endpoint is {}".format(self.stop_dnode))
|
||||
tdLog.info("==== stop_dnode has restart , id is {}".format(self.stop_dnode_id))
|
||||
# tdLog.notice("==== stop dnode has not been stopped , endpoint is {}".format(self.stop_dnode))
|
||||
tdLog.notice("==== stop_dnode has restart , id is {}".format(self.stop_dnode_id))
|
||||
|
||||
def get_leader_infos(self ,dbname):
|
||||
|
||||
|
@ -389,166 +263,96 @@ class TDTestCase:
|
|||
if role==self.stop_dnode_id:
|
||||
|
||||
if vgroup_info[ind+1] =="offline" and "leader" in vgroup_info:
|
||||
tdLog.info(" === revote leader ok , leader is {} now ====".format(vgroup_info[list(vgroup_info).index("leader")-1]))
|
||||
tdLog.notice(" === revote leader ok , leader is {} now ====".format(vgroup_info[list(vgroup_info).index("leader")-1]))
|
||||
check_status = True
|
||||
elif vgroup_info[ind+1] !="offline":
|
||||
tdLog.info(" === dnode {} should be offline ".format(self.stop_dnode_id))
|
||||
tdLog.notice(" === dnode {} should be offline ".format(self.stop_dnode_id))
|
||||
else:
|
||||
continue
|
||||
break
|
||||
return check_status
|
||||
|
||||
def sync_run_case(self):
|
||||
def start_benchmark_inserts(self,dbname , json_file):
|
||||
benchmark_build_path = self.getBuildPath() + '/build/bin/taosBenchmark'
|
||||
tdLog.notice("==== start taosBenchmark insert datas of database {} ==== ".format(dbname))
|
||||
os.system(" {} -f {} >>/dev/null 2>&1 ".format(benchmark_build_path , json_file))
|
||||
|
||||
def stop_leader_when_Benchmark_inserts(self,dbname , total_rows , json_file ):
|
||||
# stop follower and insert datas , update tables and create new stables
|
||||
tdDnodes=cluster.dnodes
|
||||
for loop in range(self.loop_restart_times):
|
||||
db_name = "sync_db_{}".format(loop)
|
||||
stablename = 'stable_{}'.format(loop)
|
||||
self.create_database(dbname = db_name ,replica_num= self.replica , vgroup_nums= 1)
|
||||
self.create_stable_insert_datas(dbname = db_name , stablename = stablename , tb_nums= 10 ,row_nums= 10 )
|
||||
self.stop_dnode_id = self._get_stop_dnode_id(db_name)
|
||||
|
||||
# check rows of datas
|
||||
|
||||
self.check_insert_rows(db_name ,stablename ,tb_nums=10 , row_nums= 10 ,append_rows=0)
|
||||
tdSql.execute(" drop database if exists {} ".format(dbname))
|
||||
tdSql.execute(" create database {} replica {} vgroups {}".format(dbname , self.replica , self.vgroups))
|
||||
|
||||
# start insert datas using taosBenchmark ,expect insert 10000 rows
|
||||
|
||||
self.current_thread = threading.Thread(target=self.start_benchmark_inserts, args=(dbname,json_file))
|
||||
self.current_thread.start()
|
||||
tdSql.query(" show databases ")
|
||||
|
||||
# make sure create database ok
|
||||
while (tdSql.queryRows!=3):
|
||||
time.sleep(0.5)
|
||||
tdSql.query(" show databases ")
|
||||
|
||||
# get leader info before stop
|
||||
before_leader_infos = self.get_leader_infos(db_name)
|
||||
# # make sure create stable ok
|
||||
tdSql.query(" show {}.stables ".format(dbname))
|
||||
while (tdSql.queryRows!=1):
|
||||
time.sleep(0.5)
|
||||
tdSql.query(" show {}.stables ".format(dbname))
|
||||
|
||||
# begin stop dnode
|
||||
|
||||
tdDnodes[self.stop_dnode_id-1].stoptaosd()
|
||||
# stop leader of database when insert 10% rows
|
||||
# os.system("taos -s 'show databases';")
|
||||
tdSql.query(" select count(*) from {}.{} ".format(dbname,"stb1"))
|
||||
|
||||
self.wait_stop_dnode_OK()
|
||||
while not tdSql.queryResult:
|
||||
tdSql.query(" select count(*) from {}.{} ".format(dbname,"stb1"))
|
||||
tdLog.debug(" === current insert {} rows in database {} === ".format(tdSql.queryResult[0][0] , dbname))
|
||||
|
||||
# vote leaders check
|
||||
while (tdSql.queryResult[0][0] < total_rows/10):
|
||||
if tdSql.queryResult:
|
||||
tdLog.debug(" === current insert {} rows in database {} === ".format(tdSql.queryResult[0][0] , dbname))
|
||||
time.sleep(0.01)
|
||||
tdSql.query(" select count(*) from {}.{} ".format(dbname,"stb1"))
|
||||
|
||||
tdLog.debug(" === database {} has write {} rows at least ====".format(dbname,total_rows/10))
|
||||
|
||||
# get leader info after stop
|
||||
after_leader_infos = self.get_leader_infos(db_name)
|
||||
|
||||
revote_status = self.check_revote_leader_success(db_name ,before_leader_infos , after_leader_infos)
|
||||
self.stop_dnode_id = self._get_stop_dnode_id(dbname)
|
||||
|
||||
# append rows of stablename when dnode stop make sure revote leaders
|
||||
# prepare stop leader of database
|
||||
before_leader_infos = self.get_leader_infos(dbname)
|
||||
|
||||
tdDnodes[self.stop_dnode_id-1].stoptaosd()
|
||||
# self.current_thread.join()
|
||||
after_leader_infos = self.get_leader_infos(dbname)
|
||||
|
||||
while not revote_status:
|
||||
after_leader_infos = self.get_leader_infos(db_name)
|
||||
revote_status = self.check_revote_leader_success(db_name ,before_leader_infos , after_leader_infos)
|
||||
start = time.time()
|
||||
revote_status = self.check_revote_leader_success(dbname ,before_leader_infos , after_leader_infos)
|
||||
while not revote_status:
|
||||
after_leader_infos = self.get_leader_infos(dbname)
|
||||
revote_status = self.check_revote_leader_success(dbname ,before_leader_infos , after_leader_infos)
|
||||
end = time.time()
|
||||
time_cost = end - start
|
||||
tdLog.debug(" ==== revote leader of database {} cost time {} ====".format(dbname , time_cost))
|
||||
|
||||
self.current_thread.join()
|
||||
|
||||
if revote_status:
|
||||
tbname = "sub_{}_{}".format(stablename , 0)
|
||||
tdLog.info(" ==== begin append rows of exists table {} when dnode {} offline ====".format(tbname , self.stop_dnode_id))
|
||||
self.append_rows_of_exists_tables(db_name ,stablename , tbname , 100 )
|
||||
tdLog.info(" ==== check append rows of exists table {} when dnode {} offline ====".format(tbname , self.stop_dnode_id))
|
||||
self.check_insert_rows(db_name ,stablename ,tb_nums=10 , row_nums= 10 ,append_rows=100)
|
||||
tdDnodes[self.stop_dnode_id-1].starttaosd()
|
||||
self.wait_start_dnode_OK()
|
||||
|
||||
# create new stables
|
||||
tdLog.info(" ==== create new stable {} when dnode {} offline ====".format('new_stb1' , self.stop_dnode_id))
|
||||
self.create_stable_insert_datas(dbname = db_name , stablename = 'new_stb1' , tb_nums= 10 ,row_nums= 10 )
|
||||
tdLog.info(" ==== check new stable {} when dnode {} offline ====".format('new_stb1' , self.stop_dnode_id))
|
||||
self.check_insert_rows(db_name ,'new_stb1' ,tb_nums=10 , row_nums= 10 ,append_rows=0)
|
||||
else:
|
||||
tdLog.info("===== leader of database {} is not ok , append rows fail =====".format(db_name))
|
||||
tdSql.query(" select count(*) from {}.{} ".format(dbname,"stb1"))
|
||||
tdLog.debug(" ==== expected insert {} rows of database {} , really is {}".format(total_rows, dbname , tdSql.queryResult[0][0]))
|
||||
|
||||
# begin start dnode
|
||||
start = time.time()
|
||||
tdDnodes[self.stop_dnode_id-1].starttaosd()
|
||||
self.wait_start_dnode_OK()
|
||||
end = time.time()
|
||||
time_cost = int(end -start)
|
||||
if time_cost > self.max_restart_time:
|
||||
tdLog.exit(" ==== restart dnode {} cost too much time , please check ====".format(self.stop_dnode_id))
|
||||
|
||||
# create new stables again
|
||||
tdLog.info(" ==== create new stable {} when dnode {} restart ====".format('new_stb2' , self.stop_dnode_id))
|
||||
self.create_stable_insert_datas(dbname = db_name , stablename = 'new_stb2' , tb_nums= 10 ,row_nums= 10 )
|
||||
tdLog.info(" ==== check new stable {} when dnode {} restart ====".format('new_stb2' , self.stop_dnode_id))
|
||||
self.check_insert_rows(db_name ,'new_stb2' ,tb_nums=10 , row_nums= 10 ,append_rows=0)
|
||||
|
||||
def unsync_run_case(self):
|
||||
|
||||
def _restart_dnode_of_db_unsync(dbname):
|
||||
|
||||
tdDnodes=cluster.dnodes
|
||||
self.stop_dnode_id = self._get_stop_dnode_id(dbname)
|
||||
# begin restart dnode
|
||||
|
||||
tdDnodes[self.stop_dnode_id-1].stoptaosd()
|
||||
|
||||
tbname = "sub_{}_{}".format(stablename , 0)
|
||||
tdLog.info(" ==== begin append rows of exists table {} when dnode {} offline ====".format(tbname , self.stop_dnode_id))
|
||||
self.append_rows_of_exists_tables(db_name ,stablename , tbname , 100 )
|
||||
tdLog.info(" ==== check append rows of exists table {} when dnode {} offline ====".format(tbname , self.stop_dnode_id))
|
||||
self.check_insert_rows(db_name ,stablename ,tb_nums=10 , row_nums= 10 ,append_rows=100)
|
||||
|
||||
# create new stables
|
||||
tdLog.info(" ==== create new stable {} when dnode {} offline ====".format('new_stb1' , self.stop_dnode_id))
|
||||
self.create_stable_insert_datas(dbname = db_name , stablename = 'new_stb1' , tb_nums= 10 ,row_nums= 10 )
|
||||
tdLog.info(" ==== check new stable {} when dnode {} offline ====".format('new_stb1' , self.stop_dnode_id))
|
||||
self.check_insert_rows(db_name ,'new_stb1' ,tb_nums=10 , row_nums= 10 ,append_rows=0)
|
||||
|
||||
# create new stables again
|
||||
tdLog.info(" ==== create new stable {} when dnode {} restart ====".format('new_stb2' , self.stop_dnode_id))
|
||||
self.create_stable_insert_datas(dbname = db_name , stablename = 'new_stb2' , tb_nums= 10 ,row_nums= 10 )
|
||||
tdLog.info(" ==== check new stable {} when dnode {} restart ====".format('new_stb2' , self.stop_dnode_id))
|
||||
self.check_insert_rows(db_name ,'new_stb2' ,tb_nums=10 , row_nums= 10 ,append_rows=0)
|
||||
|
||||
# # get leader info before stop
|
||||
# before_leader_infos = self.get_leader_infos(db_name)
|
||||
# self.wait_stop_dnode_OK()
|
||||
|
||||
# check revote leader when restart servers
|
||||
# # get leader info after stop
|
||||
# after_leader_infos = self.get_leader_infos(db_name)
|
||||
# revote_status = self.check_revote_leader_success(db_name ,before_leader_infos , after_leader_infos)
|
||||
# # append rows of stablename when dnode stop make sure revote leaders
|
||||
# while not revote_status:
|
||||
# after_leader_infos = self.get_leader_infos(db_name)
|
||||
# revote_status = self.check_revote_leader_success(db_name ,before_leader_infos , after_leader_infos)
|
||||
tdDnodes[self.stop_dnode_id-1].starttaosd()
|
||||
start = time.time()
|
||||
self.wait_start_dnode_OK()
|
||||
end = time.time()
|
||||
time_cost = int(end-start)
|
||||
|
||||
if time_cost > self.max_restart_time:
|
||||
tdLog.exit(" ==== restart dnode {} cost too much time , please check ====".format(self.stop_dnode_id))
|
||||
|
||||
|
||||
def _create_threading(dbname):
|
||||
self.current_thread = threading.Thread(target=_restart_dnode_of_db_unsync, args=(dbname,))
|
||||
return self.current_thread
|
||||
|
||||
|
||||
'''
|
||||
in this mode , it will be extra threading control start or stop dnode , insert will always going with not care follower online or alive
|
||||
'''
|
||||
for loop in range(self.loop_restart_times):
|
||||
db_name = "unsync_db_{}".format(loop)
|
||||
stablename = 'stable_{}'.format(loop)
|
||||
self.create_database(dbname = db_name ,replica_num= self.replica , vgroup_nums= 1)
|
||||
self.create_stable_insert_datas(dbname = db_name , stablename = stablename , tb_nums= 10 ,row_nums= 10 )
|
||||
|
||||
tdLog.info(" ===== restart dnode of database {} in an unsync threading ===== ".format(db_name))
|
||||
|
||||
# create sync threading and start it
|
||||
self.current_thread = _create_threading(db_name)
|
||||
self.current_thread.start()
|
||||
|
||||
# check rows of datas
|
||||
self.check_insert_rows(db_name ,stablename ,tb_nums=10 , row_nums= 10 ,append_rows=0)
|
||||
|
||||
|
||||
self.current_thread.join()
|
||||
|
||||
|
||||
def run(self):
|
||||
|
||||
# basic insert and check of cluster
|
||||
self.check_setup_cluster_status()
|
||||
self.create_db_check_vgroups()
|
||||
self.sync_run_case()
|
||||
# self.unsync_run_case()
|
||||
# self.check_setup_cluster_status()
|
||||
json = os.path.dirname(__file__) + '/insert_10W_rows.json'
|
||||
self.stop_leader_when_Benchmark_inserts('db_1' , 100000 ,json)
|
||||
# tdLog.notice( " ===== start insert 100W rows ==== ")
|
||||
# json = os.path.dirname(__file__) + '/insert_100W_rows.json'
|
||||
# self.stop_leader_when_Benchmark_inserts('db_2' , 1000000 ,json)
|
||||
|
||||
def stop(self):
|
||||
tdSql.close()
|
||||
|
|
|
@ -114,9 +114,9 @@ class TDTestCase:
|
|||
elif isinstance(data, str):
|
||||
tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" %
|
||||
(sql, row, col, tdSql.queryResult[row][col], data))
|
||||
elif isinstance(data, datetime.date):
|
||||
tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" %
|
||||
(sql, row, col, tdSql.queryResult[row][col], data))
|
||||
# elif isinstance(data, datetime.date):
|
||||
# tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" %
|
||||
# (sql, row, col, tdSql.queryResult[row][col], data))
|
||||
elif isinstance(data, float):
|
||||
tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" %
|
||||
(sql, row, col, tdSql.queryResult[row][col], data))
|
||||
|
@ -163,14 +163,14 @@ class TDTestCase:
|
|||
is_leader=True
|
||||
|
||||
if count==1 and is_leader:
|
||||
tdLog.info("===== depoly cluster success with 1 mnode as leader =====")
|
||||
tdLog.notice("===== depoly cluster success with 1 mnode as leader =====")
|
||||
else:
|
||||
tdLog.exit("===== depoly cluster fail with 1 mnode as leader =====")
|
||||
|
||||
for k ,v in self.dnode_list.items():
|
||||
if k == mnode_name:
|
||||
if v[3]==0:
|
||||
tdLog.info("===== depoly cluster mnode only success at {} , support_vnodes is {} ".format(mnode_name,v[3]))
|
||||
tdLog.notice("===== depoly cluster mnode only success at {} , support_vnodes is {} ".format(mnode_name,v[3]))
|
||||
else:
|
||||
tdLog.exit("===== depoly cluster mnode only fail at {} , support_vnodes is {} ".format(mnode_name,v[3]))
|
||||
else:
|
||||
|
@ -213,7 +213,7 @@ class TDTestCase:
|
|||
|
||||
for k , v in vgroups_infos.items():
|
||||
if len(v) ==1 and v[0]=="leader":
|
||||
tdLog.info(" === create database replica only 1 role leader check success of vgroup_id {} ======".format(k))
|
||||
tdLog.notice(" === create database replica only 1 role leader check success of vgroup_id {} ======".format(k))
|
||||
else:
|
||||
tdLog.exit(" === create database replica only 1 role leader check fail of vgroup_id {} ======".format(k))
|
||||
|
||||
|
@ -221,7 +221,7 @@ class TDTestCase:
|
|||
drop_db_sql = "drop database if exists {}".format(dbname)
|
||||
create_db_sql = "create database {} replica {} vgroups {}".format(dbname,replica_num,vgroup_nums)
|
||||
|
||||
tdLog.info(" ==== create database {} and insert rows begin =====".format(dbname))
|
||||
tdLog.notice(" ==== create database {} and insert rows begin =====".format(dbname))
|
||||
tdSql.execute(drop_db_sql)
|
||||
tdSql.execute(create_db_sql)
|
||||
tdSql.execute("use {}".format(dbname))
|
||||
|
@ -244,7 +244,7 @@ class TDTestCase:
|
|||
ts = self.ts + self.ts_step*row_num
|
||||
tdSql.execute(f"insert into {sub_tbname} values ({ts}, {row_num} ,{row_num}, 10 ,1 ,{row_num} ,{row_num},true,'bin_{row_num}','nchar_{row_num}',now) ")
|
||||
|
||||
tdLog.info(" ==== stable {} insert rows execute end =====".format(stablename))
|
||||
tdLog.notice(" ==== stable {} insert rows execute end =====".format(stablename))
|
||||
|
||||
def append_rows_of_exists_tables(self,dbname ,stablename , tbname , append_nums ):
|
||||
|
||||
|
@ -253,7 +253,7 @@ class TDTestCase:
|
|||
for row_num in range(append_nums):
|
||||
tdSql.execute(f"insert into {tbname} values (now, {row_num} ,{row_num}, 10 ,1 ,{row_num} ,{row_num},true,'bin_{row_num}','nchar_{row_num}',now) ")
|
||||
# print(f"insert into {tbname} values (now, {row_num} ,{row_num}, 10 ,1 ,{row_num} ,{row_num},true,'bin_{row_num}','nchar_{row_num}',now) ")
|
||||
tdLog.info(" ==== append new rows of table {} belongs to stable {} execute end =====".format(tbname,stablename))
|
||||
tdLog.notice(" ==== append new rows of table {} belongs to stable {} execute end =====".format(tbname,stablename))
|
||||
os.system("taos -s 'select count(*) from {}.{}';".format(dbname,stablename))
|
||||
|
||||
def check_insert_rows(self, dbname, stablename , tb_nums , row_nums, append_rows):
|
||||
|
@ -280,7 +280,7 @@ class TDTestCase:
|
|||
time.sleep(0.1)
|
||||
tdSql.query("select count(*) from {}.{}".format(dbname,stablename))
|
||||
status_OK = self.mycheckData("select count(*) from {}.{}".format(dbname,stablename) ,0 , 0 , tb_nums*row_nums+append_rows)
|
||||
tdLog.info(" ==== check insert rows first failed , this is {}_th retry check rows of database {}".format(count , dbname))
|
||||
tdLog.notice(" ==== check insert rows first failed , this is {}_th retry check rows of database {} ====".format(count , dbname))
|
||||
count += 1
|
||||
|
||||
|
||||
|
@ -301,7 +301,7 @@ class TDTestCase:
|
|||
time.sleep(0.1)
|
||||
tdSql.query("select distinct tbname from {}.{}".format(dbname,stablename))
|
||||
status_OK = self.mycheckRows("select distinct tbname from {}.{}".format(dbname,stablename) ,tb_nums)
|
||||
tdLog.info(" ==== check insert tbnames first failed , this is {}_th retry check tbnames of database {}".format(count , dbname))
|
||||
tdLog.notice(" ==== check insert tbnames first failed , this is {}_th retry check tbnames of database {}".format(count , dbname))
|
||||
count += 1
|
||||
|
||||
def _get_stop_dnode_id(self,dbname):
|
||||
|
@ -340,8 +340,8 @@ class TDTestCase:
|
|||
while status !="offline":
|
||||
time.sleep(0.1)
|
||||
status = _get_status()
|
||||
# tdLog.info("==== stop dnode has not been stopped , endpoint is {}".format(self.stop_dnode))
|
||||
tdLog.info("==== stop_dnode has stopped , id is {}".format(self.stop_dnode_id))
|
||||
# tdLog.notice("==== stop dnode has not been stopped , endpoint is {}".format(self.stop_dnode))
|
||||
tdLog.notice("==== stop_dnode has stopped , id is {}".format(self.stop_dnode_id))
|
||||
|
||||
def wait_start_dnode_OK(self):
|
||||
|
||||
|
@ -362,8 +362,8 @@ class TDTestCase:
|
|||
while status !="ready":
|
||||
time.sleep(0.1)
|
||||
status = _get_status()
|
||||
# tdLog.info("==== stop dnode has not been stopped , endpoint is {}".format(self.stop_dnode))
|
||||
tdLog.info("==== stop_dnode has restart , id is {}".format(self.stop_dnode_id))
|
||||
# tdLog.notice("==== stop dnode has not been stopped , endpoint is {}".format(self.stop_dnode))
|
||||
tdLog.notice("==== stop_dnode has restart , id is {}".format(self.stop_dnode_id))
|
||||
|
||||
def get_leader_infos(self ,dbname):
|
||||
|
||||
|
@ -390,10 +390,10 @@ class TDTestCase:
|
|||
if role==self.stop_dnode_id:
|
||||
|
||||
if vgroup_info[ind+1] =="offline" and "leader" in vgroup_info:
|
||||
tdLog.info(" === revote leader ok , leader is {} now ====".format(vgroup_info[list(vgroup_info).index("leader")-1]))
|
||||
tdLog.notice(" === revote leader ok , leader is {} now ====".format(vgroup_info[list(vgroup_info).index("leader")-1]))
|
||||
check_status = True
|
||||
elif vgroup_info[ind+1] !="offline":
|
||||
tdLog.info(" === dnode {} should be offline ".format(self.stop_dnode_id))
|
||||
tdLog.notice(" === dnode {} should be offline ".format(self.stop_dnode_id))
|
||||
else:
|
||||
continue
|
||||
break
|
||||
|
@ -410,7 +410,7 @@ class TDTestCase:
|
|||
else:
|
||||
continue
|
||||
if port:
|
||||
tdLog.info(" ==== dnode {} will be force stop by kill -9 ====".format(dnode_id))
|
||||
tdLog.notice(" ==== dnode {} will be force stop by kill -9 ====".format(dnode_id))
|
||||
psCmd = '''netstat -anp|grep -w LISTEN|grep -w %s |grep -o "LISTEN.*"|awk '{print $2}'|cut -d/ -f1|head -n1''' %(port)
|
||||
processID = subprocess.check_output(
|
||||
psCmd, shell=True).decode("utf-8")
|
||||
|
@ -457,18 +457,18 @@ class TDTestCase:
|
|||
|
||||
if revote_status:
|
||||
tbname = "sub_{}_{}".format(stablename , 0)
|
||||
tdLog.info(" ==== begin append rows of exists table {} when dnode {} offline ====".format(tbname , self.stop_dnode_id))
|
||||
tdLog.notice(" ==== begin append rows of exists table {} when dnode {} offline ====".format(tbname , self.stop_dnode_id))
|
||||
self.append_rows_of_exists_tables(db_name ,stablename , tbname , 100 )
|
||||
tdLog.info(" ==== check append rows of exists table {} when dnode {} offline ====".format(tbname , self.stop_dnode_id))
|
||||
tdLog.notice(" ==== check append rows of exists table {} when dnode {} offline ====".format(tbname , self.stop_dnode_id))
|
||||
self.check_insert_rows(db_name ,stablename ,tb_nums=10 , row_nums= 10 ,append_rows=100)
|
||||
|
||||
# create new stables
|
||||
tdLog.info(" ==== create new stable {} when dnode {} offline ====".format('new_stb1' , self.stop_dnode_id))
|
||||
tdLog.notice(" ==== create new stable {} when dnode {} offline ====".format('new_stb1' , self.stop_dnode_id))
|
||||
self.create_stable_insert_datas(dbname = db_name , stablename = 'new_stb1' , tb_nums= 10 ,row_nums= 10 )
|
||||
tdLog.info(" ==== check new stable {} when dnode {} offline ====".format('new_stb1' , self.stop_dnode_id))
|
||||
tdLog.notice(" ==== check new stable {} when dnode {} offline ====".format('new_stb1' , self.stop_dnode_id))
|
||||
self.check_insert_rows(db_name ,'new_stb1' ,tb_nums=10 , row_nums= 10 ,append_rows=0)
|
||||
else:
|
||||
tdLog.info("===== leader of database {} is not ok , append rows fail =====".format(db_name))
|
||||
tdLog.notice("===== leader of database {} is not ok , append rows fail =====".format(db_name))
|
||||
|
||||
# begin start dnode
|
||||
start = time.time()
|
||||
|
@ -480,9 +480,9 @@ class TDTestCase:
|
|||
tdLog.exit(" ==== restart dnode {} cost too much time , please check ====".format(self.stop_dnode_id))
|
||||
|
||||
# create new stables again
|
||||
tdLog.info(" ==== create new stable {} when dnode {} restart ====".format('new_stb2' , self.stop_dnode_id))
|
||||
tdLog.notice(" ==== create new stable {} when dnode {} restart ====".format('new_stb2' , self.stop_dnode_id))
|
||||
self.create_stable_insert_datas(dbname = db_name , stablename = 'new_stb2' , tb_nums= 10 ,row_nums= 10 )
|
||||
tdLog.info(" ==== check new stable {} when dnode {} restart ====".format('new_stb2' , self.stop_dnode_id))
|
||||
tdLog.notice(" ==== check new stable {} when dnode {} restart ====".format('new_stb2' , self.stop_dnode_id))
|
||||
self.check_insert_rows(db_name ,'new_stb2' ,tb_nums=10 , row_nums= 10 ,append_rows=0)
|
||||
|
||||
def unsync_run_case(self):
|
||||
|
@ -509,21 +509,21 @@ class TDTestCase:
|
|||
revote_status = self.check_revote_leader_success(db_name ,before_leader_infos , after_leader_infos)
|
||||
|
||||
tbname = "sub_{}_{}".format(stablename , 0)
|
||||
tdLog.info(" ==== begin append rows of exists table {} when dnode {} offline ====".format(tbname , self.stop_dnode_id))
|
||||
tdLog.notice(" ==== begin append rows of exists table {} when dnode {} offline ====".format(tbname , self.stop_dnode_id))
|
||||
self.append_rows_of_exists_tables(db_name ,stablename , tbname , 100 )
|
||||
tdLog.info(" ==== check append rows of exists table {} when dnode {} offline ====".format(tbname , self.stop_dnode_id))
|
||||
tdLog.notice(" ==== check append rows of exists table {} when dnode {} offline ====".format(tbname , self.stop_dnode_id))
|
||||
self.check_insert_rows(db_name ,stablename ,tb_nums=10 , row_nums= 10 ,append_rows=100)
|
||||
|
||||
# create new stables
|
||||
tdLog.info(" ==== create new stable {} when dnode {} offline ====".format('new_stb1' , self.stop_dnode_id))
|
||||
tdLog.notice(" ==== create new stable {} when dnode {} offline ====".format('new_stb1' , self.stop_dnode_id))
|
||||
self.create_stable_insert_datas(dbname = db_name , stablename = 'new_stb1' , tb_nums= 10 ,row_nums= 10 )
|
||||
tdLog.info(" ==== check new stable {} when dnode {} offline ====".format('new_stb1' , self.stop_dnode_id))
|
||||
tdLog.notice(" ==== check new stable {} when dnode {} offline ====".format('new_stb1' , self.stop_dnode_id))
|
||||
self.check_insert_rows(db_name ,'new_stb1' ,tb_nums=10 , row_nums= 10 ,append_rows=0)
|
||||
|
||||
# create new stables again
|
||||
tdLog.info(" ==== create new stable {} when dnode {} restart ====".format('new_stb2' , self.stop_dnode_id))
|
||||
tdLog.notice(" ==== create new stable {} when dnode {} restart ====".format('new_stb2' , self.stop_dnode_id))
|
||||
self.create_stable_insert_datas(dbname = db_name , stablename = 'new_stb2' , tb_nums= 10 ,row_nums= 10 )
|
||||
tdLog.info(" ==== check new stable {} when dnode {} restart ====".format('new_stb2' , self.stop_dnode_id))
|
||||
tdLog.notice(" ==== check new stable {} when dnode {} restart ====".format('new_stb2' , self.stop_dnode_id))
|
||||
self.check_insert_rows(db_name ,'new_stb2' ,tb_nums=10 , row_nums= 10 ,append_rows=0)
|
||||
|
||||
|
||||
|
@ -551,7 +551,7 @@ class TDTestCase:
|
|||
self.create_database(dbname = db_name ,replica_num= self.replica , vgroup_nums= 1)
|
||||
self.create_stable_insert_datas(dbname = db_name , stablename = stablename , tb_nums= 10 ,row_nums= 10 )
|
||||
|
||||
tdLog.info(" ===== restart dnode of database {} in an unsync threading ===== ".format(db_name))
|
||||
tdLog.notice(" ===== restart dnode of database {} in an unsync threading ===== ".format(db_name))
|
||||
|
||||
# create sync threading and start it
|
||||
self.current_thread = _create_threading(db_name)
|
||||
|
|
|
@ -0,0 +1,416 @@
|
|||
# author : wenzhouwww
|
||||
from ssl import ALERT_DESCRIPTION_CERTIFICATE_UNOBTAINABLE
|
||||
import taos
|
||||
import sys
|
||||
import time
|
||||
import os
|
||||
|
||||
from util.log import *
|
||||
from util.sql import *
|
||||
from util.cases import *
|
||||
from util.dnodes import TDDnodes
|
||||
from util.dnodes import TDDnode
|
||||
from util.cluster import *
|
||||
|
||||
import datetime
|
||||
import inspect
|
||||
import time
|
||||
import socket
|
||||
import subprocess
|
||||
import threading
|
||||
sys.path.append(os.path.dirname(__file__))
|
||||
|
||||
class TDTestCase:
|
||||
def init(self,conn ,logSql):
|
||||
tdLog.debug(f"start to excute {__file__}")
|
||||
tdSql.init(conn.cursor())
|
||||
self.host = socket.gethostname()
|
||||
self.mnode_list = {}
|
||||
self.dnode_list = {}
|
||||
self.ts = 1483200000000
|
||||
self.ts_step =1000
|
||||
self.db_name ='testdb'
|
||||
self.replica = 3
|
||||
self.vgroups = 1
|
||||
self.tb_nums = 10
|
||||
self.row_nums = 100
|
||||
self.stop_dnode_id = None
|
||||
self.loop_restart_times = 5
|
||||
self.thread_list = []
|
||||
self.max_restart_time = 10
|
||||
self.try_check_times = 10
|
||||
self.query_times = 100
|
||||
|
||||
|
||||
def getBuildPath(self):
|
||||
selfPath = os.path.dirname(os.path.realpath(__file__))
|
||||
if ("community" in selfPath):
|
||||
projPath = selfPath[:selfPath.find("community")]
|
||||
else:
|
||||
projPath = selfPath[:selfPath.find("tests")]
|
||||
|
||||
for root, dirs, files in os.walk(projPath):
|
||||
if ("taosd" in files):
|
||||
rootRealPath = os.path.dirname(os.path.realpath(root))
|
||||
if ("packaging" not in rootRealPath):
|
||||
buildPath = root[:len(root) - len("/build/bin")]
|
||||
break
|
||||
return buildPath
|
||||
|
||||
def check_setup_cluster_status(self):
|
||||
tdSql.query("show mnodes")
|
||||
for mnode in tdSql.queryResult:
|
||||
name = mnode[1]
|
||||
info = mnode
|
||||
self.mnode_list[name] = info
|
||||
|
||||
tdSql.query("show dnodes")
|
||||
for dnode in tdSql.queryResult:
|
||||
name = dnode[1]
|
||||
info = dnode
|
||||
self.dnode_list[name] = info
|
||||
|
||||
count = 0
|
||||
is_leader = False
|
||||
mnode_name = ''
|
||||
for k,v in self.mnode_list.items():
|
||||
count +=1
|
||||
# only for 1 mnode
|
||||
mnode_name = k
|
||||
|
||||
if v[2] =='leader':
|
||||
is_leader=True
|
||||
|
||||
if count==1 and is_leader:
|
||||
tdLog.notice("===== depoly cluster success with 1 mnode as leader =====")
|
||||
else:
|
||||
tdLog.exit("===== depoly cluster fail with 1 mnode as leader =====")
|
||||
|
||||
for k ,v in self.dnode_list.items():
|
||||
if k == mnode_name:
|
||||
if v[3]==0:
|
||||
tdLog.notice("===== depoly cluster mnode only success at {} , support_vnodes is {} ".format(mnode_name,v[3]))
|
||||
else:
|
||||
tdLog.exit("===== depoly cluster mnode only fail at {} , support_vnodes is {} ".format(mnode_name,v[3]))
|
||||
else:
|
||||
continue
|
||||
|
||||
def create_database(self, dbname, replica_num ,vgroup_nums ):
|
||||
drop_db_sql = "drop database if exists {}".format(dbname)
|
||||
create_db_sql = "create database {} replica {} vgroups {}".format(dbname,replica_num,vgroup_nums)
|
||||
|
||||
tdLog.notice(" ==== create database {} and insert rows begin =====".format(dbname))
|
||||
tdSql.execute(drop_db_sql)
|
||||
tdSql.execute(create_db_sql)
|
||||
tdSql.execute("use {}".format(dbname))
|
||||
|
||||
def create_stable_insert_datas(self,dbname ,stablename , tb_nums , row_nums):
|
||||
tdSql.execute("use {}".format(dbname))
|
||||
tdSql.execute(
|
||||
'''create table {}
|
||||
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(32),c9 nchar(32), c10 timestamp)
|
||||
tags (t1 int)
|
||||
'''.format(stablename)
|
||||
)
|
||||
|
||||
for i in range(tb_nums):
|
||||
sub_tbname = "sub_{}_{}".format(stablename,i)
|
||||
tdSql.execute("create table {} using {} tags({})".format(sub_tbname, stablename ,i))
|
||||
# insert datas about new database
|
||||
|
||||
for row_num in range(row_nums):
|
||||
ts = self.ts + self.ts_step*row_num
|
||||
tdSql.execute(f"insert into {sub_tbname} values ({ts}, {row_num} ,{row_num}, 10 ,1 ,{row_num} ,{row_num},true,'bin_{row_num}','nchar_{row_num}',now) ")
|
||||
|
||||
tdLog.notice(" ==== stable {} insert rows execute end =====".format(stablename))
|
||||
|
||||
def append_rows_of_exists_tables(self,dbname ,stablename , tbname , append_nums ):
|
||||
|
||||
tdSql.execute("use {}".format(dbname))
|
||||
|
||||
for row_num in range(append_nums):
|
||||
tdSql.execute(f"insert into {tbname} values (now, {row_num} ,{row_num}, 10 ,1 ,{row_num} ,{row_num},true,'bin_{row_num}','nchar_{row_num}',now) ")
|
||||
# print(f"insert into {tbname} values (now, {row_num} ,{row_num}, 10 ,1 ,{row_num} ,{row_num},true,'bin_{row_num}','nchar_{row_num}',now) ")
|
||||
tdLog.notice(" ==== append new rows of table {} belongs to stable {} execute end =====".format(tbname,stablename))
|
||||
os.system("taos -s 'select count(*) from {}.{}';".format(dbname,stablename))
|
||||
|
||||
def check_insert_rows(self, dbname, stablename , tb_nums , row_nums, append_rows):
|
||||
|
||||
tdSql.execute("use {}".format(dbname))
|
||||
|
||||
tdSql.query("select count(*) from {}.{}".format(dbname,stablename))
|
||||
|
||||
while not tdSql.queryResult:
|
||||
time.sleep(0.1)
|
||||
tdSql.query("select count(*) from {}.{}".format(dbname,stablename))
|
||||
|
||||
status_OK = self.mycheckData("select count(*) from {}.{}".format(dbname,stablename) ,0 , 0 , tb_nums*row_nums+append_rows)
|
||||
|
||||
count = 0
|
||||
while not status_OK :
|
||||
if count > self.try_check_times:
|
||||
os.system("taos -s ' show {}.vgroups; '".format(dbname))
|
||||
tdLog.exit(" ==== check insert rows failed after {} try check {} times of database {}".format(count , self.try_check_times ,dbname))
|
||||
break
|
||||
time.sleep(0.1)
|
||||
tdSql.query("select count(*) from {}.{}".format(dbname,stablename))
|
||||
while not tdSql.queryResult:
|
||||
time.sleep(0.1)
|
||||
tdSql.query("select count(*) from {}.{}".format(dbname,stablename))
|
||||
status_OK = self.mycheckData("select count(*) from {}.{}".format(dbname,stablename) ,0 , 0 , tb_nums*row_nums+append_rows)
|
||||
tdLog.notice(" ==== check insert rows first failed , this is {}_th retry check rows of database {}".format(count , dbname))
|
||||
count += 1
|
||||
|
||||
|
||||
tdSql.query("select distinct tbname from {}.{}".format(dbname,stablename))
|
||||
while not tdSql.queryResult:
|
||||
time.sleep(0.1)
|
||||
tdSql.query("select distinct tbname from {}.{}".format(dbname,stablename))
|
||||
status_OK = self.mycheckRows("select distinct tbname from {}.{}".format(dbname,stablename) ,tb_nums)
|
||||
count = 0
|
||||
while not status_OK :
|
||||
if count > self.try_check_times:
|
||||
os.system("taos -s ' show {}.vgroups;'".format(dbname))
|
||||
tdLog.exit(" ==== check insert rows failed after {} try check {} times of database {}".format(count , self.try_check_times ,dbname))
|
||||
break
|
||||
time.sleep(0.1)
|
||||
tdSql.query("select distinct tbname from {}.{}".format(dbname,stablename))
|
||||
while not tdSql.queryResult:
|
||||
time.sleep(0.1)
|
||||
tdSql.query("select distinct tbname from {}.{}".format(dbname,stablename))
|
||||
status_OK = self.mycheckRows("select distinct tbname from {}.{}".format(dbname,stablename) ,tb_nums)
|
||||
tdLog.notice(" ==== check insert tbnames first failed , this is {}_th retry check tbnames of database {}".format(count , dbname))
|
||||
count += 1
|
||||
|
||||
def _get_stop_dnode_id(self,dbname):
|
||||
tdSql.query("show {}.vgroups".format(dbname))
|
||||
vgroup_infos = tdSql.queryResult
|
||||
for vgroup_info in vgroup_infos:
|
||||
leader_infos = vgroup_info[3:-4]
|
||||
# print(vgroup_info)
|
||||
for ind ,role in enumerate(leader_infos):
|
||||
if role =='follower':
|
||||
# print(ind,leader_infos)
|
||||
self.stop_dnode_id = leader_infos[ind-1]
|
||||
break
|
||||
|
||||
|
||||
return self.stop_dnode_id
|
||||
|
||||
def wait_stop_dnode_OK(self):
|
||||
|
||||
def _get_status():
|
||||
newTdSql=tdCom.newTdSql()
|
||||
|
||||
status = ""
|
||||
newTdSql.query("show dnodes")
|
||||
dnode_infos = newTdSql.queryResult
|
||||
for dnode_info in dnode_infos:
|
||||
id = dnode_info[0]
|
||||
dnode_status = dnode_info[4]
|
||||
if id == self.stop_dnode_id:
|
||||
status = dnode_status
|
||||
break
|
||||
return status
|
||||
|
||||
status = _get_status()
|
||||
while status !="offline":
|
||||
time.sleep(0.1)
|
||||
status = _get_status()
|
||||
# tdLog.notice("==== stop dnode has not been stopped , endpoint is {}".format(self.stop_dnode))
|
||||
tdLog.notice("==== stop_dnode has stopped , id is {}".format(self.stop_dnode_id))
|
||||
|
||||
def wait_start_dnode_OK(self):
|
||||
|
||||
def _get_status():
|
||||
newTdSql=tdCom.newTdSql()
|
||||
status = ""
|
||||
newTdSql.query("show dnodes")
|
||||
dnode_infos = newTdSql.queryResult
|
||||
for dnode_info in dnode_infos:
|
||||
id = dnode_info[0]
|
||||
dnode_status = dnode_info[4]
|
||||
if id == self.stop_dnode_id:
|
||||
status = dnode_status
|
||||
break
|
||||
return status
|
||||
|
||||
status = _get_status()
|
||||
while status !="ready":
|
||||
time.sleep(0.1)
|
||||
status = _get_status()
|
||||
# tdLog.notice("==== stop dnode has not been stopped , endpoint is {}".format(self.stop_dnode))
|
||||
tdLog.notice("==== stop_dnode has restart , id is {}".format(self.stop_dnode_id))
|
||||
|
||||
def _parse_datetime(self,timestr):
|
||||
try:
|
||||
return datetime.datetime.strptime(timestr, '%Y-%m-%d %H:%M:%S.%f')
|
||||
except ValueError:
|
||||
pass
|
||||
try:
|
||||
return datetime.datetime.strptime(timestr, '%Y-%m-%d %H:%M:%S')
|
||||
except ValueError:
|
||||
pass
|
||||
|
||||
def mycheckRowCol(self, sql, row, col):
|
||||
caller = inspect.getframeinfo(inspect.stack()[2][0])
|
||||
if row < 0:
|
||||
args = (caller.filename, caller.lineno, sql, row)
|
||||
tdLog.exit("%s(%d) failed: sql:%s, row:%d is smaller than zero" % args)
|
||||
if col < 0:
|
||||
args = (caller.filename, caller.lineno, sql, row)
|
||||
tdLog.exit("%s(%d) failed: sql:%s, col:%d is smaller than zero" % args)
|
||||
if row > tdSql.queryRows:
|
||||
args = (caller.filename, caller.lineno, sql, row, tdSql.queryRows)
|
||||
tdLog.exit("%s(%d) failed: sql:%s, row:%d is larger than queryRows:%d" % args)
|
||||
if col > tdSql.queryCols:
|
||||
args = (caller.filename, caller.lineno, sql, col, tdSql.queryCols)
|
||||
tdLog.exit("%s(%d) failed: sql:%s, col:%d is larger than queryCols:%d" % args)
|
||||
|
||||
def mycheckData(self, sql ,row, col, data):
|
||||
check_status = True
|
||||
self.mycheckRowCol(sql ,row, col)
|
||||
if tdSql.queryResult[row][col] != data:
|
||||
if tdSql.cursor.istype(col, "TIMESTAMP"):
|
||||
# suppose user want to check nanosecond timestamp if a longer data passed
|
||||
if (len(data) >= 28):
|
||||
if pd.to_datetime(tdSql.queryResult[row][col]) == pd.to_datetime(data):
|
||||
tdLog.info("sql:%s, row:%d col:%d data:%d == expect:%s" %
|
||||
(sql, row, col, tdSql.queryResult[row][col], data))
|
||||
else:
|
||||
if tdSql.queryResult[row][col] == self._parse_datetime(data):
|
||||
tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" %
|
||||
(sql, row, col, tdSql.queryResult[row][col], data))
|
||||
return
|
||||
|
||||
if str(tdSql.queryResult[row][col]) == str(data):
|
||||
tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" %
|
||||
(sql, row, col, tdSql.queryResult[row][col], data))
|
||||
return
|
||||
elif isinstance(data, float) and abs(tdSql.queryResult[row][col] - data) <= 0.000001:
|
||||
tdLog.info("sql:%s, row:%d col:%d data:%f == expect:%f" %
|
||||
(sql, row, col, tdSql.queryResult[row][col], data))
|
||||
return
|
||||
else:
|
||||
caller = inspect.getframeinfo(inspect.stack()[1][0])
|
||||
args = (caller.filename, caller.lineno, sql, row, col, tdSql.queryResult[row][col], data)
|
||||
tdLog.info("%s(%d) failed: sql:%s row:%d col:%d data:%s != expect:%s" % args)
|
||||
|
||||
check_status = False
|
||||
|
||||
if data is None:
|
||||
tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" %
|
||||
(sql, row, col, tdSql.queryResult[row][col], data))
|
||||
elif isinstance(data, str):
|
||||
tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" %
|
||||
(sql, row, col, tdSql.queryResult[row][col], data))
|
||||
# elif isinstance(data, datetime.date):
|
||||
# tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" %
|
||||
# (sql, row, col, tdSql.queryResult[row][col], data))
|
||||
elif isinstance(data, float):
|
||||
tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" %
|
||||
(sql, row, col, tdSql.queryResult[row][col], data))
|
||||
else:
|
||||
tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%d" %
|
||||
(sql, row, col, tdSql.queryResult[row][col], data))
|
||||
|
||||
return check_status
|
||||
|
||||
def mycheckRows(self, sql, expectRows):
|
||||
check_status = True
|
||||
if len(tdSql.queryResult) == expectRows:
|
||||
tdLog.info("sql:%s, queryRows:%d == expect:%d" % (sql, len(tdSql.queryResult), expectRows))
|
||||
return True
|
||||
else:
|
||||
caller = inspect.getframeinfo(inspect.stack()[1][0])
|
||||
args = (caller.filename, caller.lineno, sql, len(tdSql.queryResult), expectRows)
|
||||
tdLog.info("%s(%d) failed: sql:%s, queryRows:%d != expect:%d" % args)
|
||||
check_status = False
|
||||
return check_status
|
||||
|
||||
|
||||
def force_stop_dnode(self, dnode_id ):
|
||||
|
||||
tdSql.query("show dnodes")
|
||||
port = None
|
||||
for dnode_info in tdSql.queryResult:
|
||||
if dnode_id == dnode_info[0]:
|
||||
port = dnode_info[1].split(":")[-1]
|
||||
break
|
||||
else:
|
||||
continue
|
||||
if port:
|
||||
tdLog.notice(" ==== dnode {} will be force stop by kill -9 ====".format(dnode_id))
|
||||
psCmd = '''netstat -anp|grep -w LISTEN|grep -w %s |grep -o "LISTEN.*"|awk '{print $2}'|cut -d/ -f1|head -n1''' %(port)
|
||||
processID = subprocess.check_output(
|
||||
psCmd, shell=True).decode("utf-8")
|
||||
ps_kill_taosd = ''' kill -9 {} '''.format(processID)
|
||||
# print(ps_kill_taosd)
|
||||
os.system(ps_kill_taosd)
|
||||
|
||||
def basic_query_task(self,dbname ,stablename):
|
||||
|
||||
sql = "select * from {}.{} ;".format(dbname , stablename)
|
||||
|
||||
count = 0
|
||||
while count < self.query_times:
|
||||
os.system(''' taos -s '{}' >>/dev/null '''.format(sql))
|
||||
count += 1
|
||||
|
||||
def multi_thread_query_task(self, thread_nums ,dbname , stablename ):
|
||||
|
||||
for i in range(thread_nums):
|
||||
task = threading.Thread(target = self.basic_query_task, args=(dbname ,stablename))
|
||||
self.thread_list.append(task)
|
||||
|
||||
for thread in self.thread_list:
|
||||
|
||||
thread.start()
|
||||
return self.thread_list
|
||||
|
||||
|
||||
def stop_follower_when_query_going(self):
|
||||
|
||||
tdDnodes = cluster.dnodes
|
||||
self.create_database(dbname = self.db_name ,replica_num= self.replica , vgroup_nums= 1)
|
||||
self.create_stable_insert_datas(dbname = self.db_name , stablename = "stb1" , tb_nums= self.tb_nums ,row_nums= self.row_nums)
|
||||
|
||||
# let query task start
|
||||
self.thread_list = self.multi_thread_query_task(10 ,self.db_name ,'stb1' )
|
||||
|
||||
# force stop follower
|
||||
for loop in range(self.loop_restart_times):
|
||||
tdLog.debug(" ==== this is {}_th restart follower of database {} ==== ".format(loop ,self.db_name))
|
||||
self.stop_dnode_id = self._get_stop_dnode_id(self.db_name)
|
||||
tdDnodes[self.stop_dnode_id-1].stoptaosd()
|
||||
self.wait_stop_dnode_OK()
|
||||
|
||||
start = time.time()
|
||||
tdDnodes[self.stop_dnode_id-1].starttaosd()
|
||||
self.wait_start_dnode_OK()
|
||||
end = time.time()
|
||||
time_cost = int(end-start)
|
||||
|
||||
if time_cost > self.max_restart_time:
|
||||
tdLog.exit(" ==== restart dnode {} cost too much time , please check ====".format(self.stop_dnode_id))
|
||||
|
||||
for thread in self.thread_list:
|
||||
thread.join()
|
||||
|
||||
|
||||
def run(self):
|
||||
|
||||
# basic check of cluster
|
||||
self.check_setup_cluster_status()
|
||||
self.stop_follower_when_query_going()
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
def stop(self):
|
||||
tdSql.close()
|
||||
tdLog.success(f"{__file__} successfully executed")
|
||||
|
||||
tdCases.addLinux(__file__, TDTestCase())
|
||||
tdCases.addWindows(__file__, TDTestCase())
|
|
@ -0,0 +1,416 @@
|
|||
# author : wenzhouwww
|
||||
from ssl import ALERT_DESCRIPTION_CERTIFICATE_UNOBTAINABLE
|
||||
import taos
|
||||
import sys
|
||||
import time
|
||||
import os
|
||||
|
||||
from util.log import *
|
||||
from util.sql import *
|
||||
from util.cases import *
|
||||
from util.dnodes import TDDnodes
|
||||
from util.dnodes import TDDnode
|
||||
from util.cluster import *
|
||||
|
||||
import datetime
|
||||
import inspect
|
||||
import time
|
||||
import socket
|
||||
import subprocess
|
||||
import threading
|
||||
sys.path.append(os.path.dirname(__file__))
|
||||
|
||||
class TDTestCase:
|
||||
def init(self,conn ,logSql):
|
||||
tdLog.debug(f"start to excute {__file__}")
|
||||
tdSql.init(conn.cursor())
|
||||
self.host = socket.gethostname()
|
||||
self.mnode_list = {}
|
||||
self.dnode_list = {}
|
||||
self.ts = 1483200000000
|
||||
self.ts_step =1000
|
||||
self.db_name ='testdb'
|
||||
self.replica = 3
|
||||
self.vgroups = 1
|
||||
self.tb_nums = 10
|
||||
self.row_nums = 100
|
||||
self.stop_dnode_id = None
|
||||
self.loop_restart_times = 5
|
||||
self.thread_list = []
|
||||
self.max_restart_time = 10
|
||||
self.try_check_times = 10
|
||||
self.query_times = 100
|
||||
|
||||
|
||||
def getBuildPath(self):
|
||||
selfPath = os.path.dirname(os.path.realpath(__file__))
|
||||
if ("community" in selfPath):
|
||||
projPath = selfPath[:selfPath.find("community")]
|
||||
else:
|
||||
projPath = selfPath[:selfPath.find("tests")]
|
||||
|
||||
for root, dirs, files in os.walk(projPath):
|
||||
if ("taosd" in files):
|
||||
rootRealPath = os.path.dirname(os.path.realpath(root))
|
||||
if ("packaging" not in rootRealPath):
|
||||
buildPath = root[:len(root) - len("/build/bin")]
|
||||
break
|
||||
return buildPath
|
||||
|
||||
def check_setup_cluster_status(self):
|
||||
tdSql.query("show mnodes")
|
||||
for mnode in tdSql.queryResult:
|
||||
name = mnode[1]
|
||||
info = mnode
|
||||
self.mnode_list[name] = info
|
||||
|
||||
tdSql.query("show dnodes")
|
||||
for dnode in tdSql.queryResult:
|
||||
name = dnode[1]
|
||||
info = dnode
|
||||
self.dnode_list[name] = info
|
||||
|
||||
count = 0
|
||||
is_leader = False
|
||||
mnode_name = ''
|
||||
for k,v in self.mnode_list.items():
|
||||
count +=1
|
||||
# only for 1 mnode
|
||||
mnode_name = k
|
||||
|
||||
if v[2] =='leader':
|
||||
is_leader=True
|
||||
|
||||
if count==1 and is_leader:
|
||||
tdLog.notice("===== depoly cluster success with 1 mnode as leader =====")
|
||||
else:
|
||||
tdLog.exit("===== depoly cluster fail with 1 mnode as leader =====")
|
||||
|
||||
for k ,v in self.dnode_list.items():
|
||||
if k == mnode_name:
|
||||
if v[3]==0:
|
||||
tdLog.notice("===== depoly cluster mnode only success at {} , support_vnodes is {} ".format(mnode_name,v[3]))
|
||||
else:
|
||||
tdLog.exit("===== depoly cluster mnode only fail at {} , support_vnodes is {} ".format(mnode_name,v[3]))
|
||||
else:
|
||||
continue
|
||||
|
||||
def create_database(self, dbname, replica_num ,vgroup_nums ):
|
||||
drop_db_sql = "drop database if exists {}".format(dbname)
|
||||
create_db_sql = "create database {} replica {} vgroups {}".format(dbname,replica_num,vgroup_nums)
|
||||
|
||||
tdLog.notice(" ==== create database {} and insert rows begin =====".format(dbname))
|
||||
tdSql.execute(drop_db_sql)
|
||||
tdSql.execute(create_db_sql)
|
||||
tdSql.execute("use {}".format(dbname))
|
||||
|
||||
def create_stable_insert_datas(self,dbname ,stablename , tb_nums , row_nums):
|
||||
tdSql.execute("use {}".format(dbname))
|
||||
tdSql.execute(
|
||||
'''create table {}
|
||||
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(32),c9 nchar(32), c10 timestamp)
|
||||
tags (t1 int)
|
||||
'''.format(stablename)
|
||||
)
|
||||
|
||||
for i in range(tb_nums):
|
||||
sub_tbname = "sub_{}_{}".format(stablename,i)
|
||||
tdSql.execute("create table {} using {} tags({})".format(sub_tbname, stablename ,i))
|
||||
# insert datas about new database
|
||||
|
||||
for row_num in range(row_nums):
|
||||
ts = self.ts + self.ts_step*row_num
|
||||
tdSql.execute(f"insert into {sub_tbname} values ({ts}, {row_num} ,{row_num}, 10 ,1 ,{row_num} ,{row_num},true,'bin_{row_num}','nchar_{row_num}',now) ")
|
||||
|
||||
tdLog.notice(" ==== stable {} insert rows execute end =====".format(stablename))
|
||||
|
||||
def append_rows_of_exists_tables(self,dbname ,stablename , tbname , append_nums ):
|
||||
|
||||
tdSql.execute("use {}".format(dbname))
|
||||
|
||||
for row_num in range(append_nums):
|
||||
tdSql.execute(f"insert into {tbname} values (now, {row_num} ,{row_num}, 10 ,1 ,{row_num} ,{row_num},true,'bin_{row_num}','nchar_{row_num}',now) ")
|
||||
# print(f"insert into {tbname} values (now, {row_num} ,{row_num}, 10 ,1 ,{row_num} ,{row_num},true,'bin_{row_num}','nchar_{row_num}',now) ")
|
||||
tdLog.notice(" ==== append new rows of table {} belongs to stable {} execute end =====".format(tbname,stablename))
|
||||
os.system("taos -s 'select count(*) from {}.{}';".format(dbname,stablename))
|
||||
|
||||
def check_insert_rows(self, dbname, stablename , tb_nums , row_nums, append_rows):
|
||||
|
||||
tdSql.execute("use {}".format(dbname))
|
||||
|
||||
tdSql.query("select count(*) from {}.{}".format(dbname,stablename))
|
||||
|
||||
while not tdSql.queryResult:
|
||||
time.sleep(0.1)
|
||||
tdSql.query("select count(*) from {}.{}".format(dbname,stablename))
|
||||
|
||||
status_OK = self.mycheckData("select count(*) from {}.{}".format(dbname,stablename) ,0 , 0 , tb_nums*row_nums+append_rows)
|
||||
|
||||
count = 0
|
||||
while not status_OK :
|
||||
if count > self.try_check_times:
|
||||
os.system("taos -s ' show {}.vgroups; '".format(dbname))
|
||||
tdLog.exit(" ==== check insert rows failed after {} try check {} times of database {}".format(count , self.try_check_times ,dbname))
|
||||
break
|
||||
time.sleep(0.1)
|
||||
tdSql.query("select count(*) from {}.{}".format(dbname,stablename))
|
||||
while not tdSql.queryResult:
|
||||
time.sleep(0.1)
|
||||
tdSql.query("select count(*) from {}.{}".format(dbname,stablename))
|
||||
status_OK = self.mycheckData("select count(*) from {}.{}".format(dbname,stablename) ,0 , 0 , tb_nums*row_nums+append_rows)
|
||||
tdLog.notice(" ==== check insert rows first failed , this is {}_th retry check rows of database {}".format(count , dbname))
|
||||
count += 1
|
||||
|
||||
|
||||
tdSql.query("select distinct tbname from {}.{}".format(dbname,stablename))
|
||||
while not tdSql.queryResult:
|
||||
time.sleep(0.1)
|
||||
tdSql.query("select distinct tbname from {}.{}".format(dbname,stablename))
|
||||
status_OK = self.mycheckRows("select distinct tbname from {}.{}".format(dbname,stablename) ,tb_nums)
|
||||
count = 0
|
||||
while not status_OK :
|
||||
if count > self.try_check_times:
|
||||
os.system("taos -s ' show {}.vgroups;'".format(dbname))
|
||||
tdLog.exit(" ==== check insert rows failed after {} try check {} times of database {}".format(count , self.try_check_times ,dbname))
|
||||
break
|
||||
time.sleep(0.1)
|
||||
tdSql.query("select distinct tbname from {}.{}".format(dbname,stablename))
|
||||
while not tdSql.queryResult:
|
||||
time.sleep(0.1)
|
||||
tdSql.query("select distinct tbname from {}.{}".format(dbname,stablename))
|
||||
status_OK = self.mycheckRows("select distinct tbname from {}.{}".format(dbname,stablename) ,tb_nums)
|
||||
tdLog.notice(" ==== check insert tbnames first failed , this is {}_th retry check tbnames of database {}".format(count , dbname))
|
||||
count += 1
|
||||
|
||||
def _get_stop_dnode_id(self,dbname):
|
||||
tdSql.query("show {}.vgroups".format(dbname))
|
||||
vgroup_infos = tdSql.queryResult
|
||||
for vgroup_info in vgroup_infos:
|
||||
leader_infos = vgroup_info[3:-4]
|
||||
# print(vgroup_info)
|
||||
for ind ,role in enumerate(leader_infos):
|
||||
if role =='follower':
|
||||
# print(ind,leader_infos)
|
||||
self.stop_dnode_id = leader_infos[ind-1]
|
||||
break
|
||||
|
||||
|
||||
return self.stop_dnode_id
|
||||
|
||||
def wait_stop_dnode_OK(self):
|
||||
|
||||
def _get_status():
|
||||
newTdSql=tdCom.newTdSql()
|
||||
|
||||
status = ""
|
||||
newTdSql.query("show dnodes")
|
||||
dnode_infos = newTdSql.queryResult
|
||||
for dnode_info in dnode_infos:
|
||||
id = dnode_info[0]
|
||||
dnode_status = dnode_info[4]
|
||||
if id == self.stop_dnode_id:
|
||||
status = dnode_status
|
||||
break
|
||||
return status
|
||||
|
||||
status = _get_status()
|
||||
while status !="offline":
|
||||
time.sleep(0.1)
|
||||
status = _get_status()
|
||||
# tdLog.notice("==== stop dnode has not been stopped , endpoint is {}".format(self.stop_dnode))
|
||||
tdLog.notice("==== stop_dnode has stopped , id is {}".format(self.stop_dnode_id))
|
||||
|
||||
def wait_start_dnode_OK(self):
|
||||
|
||||
def _get_status():
|
||||
newTdSql=tdCom.newTdSql()
|
||||
status = ""
|
||||
newTdSql.query("show dnodes")
|
||||
dnode_infos = newTdSql.queryResult
|
||||
for dnode_info in dnode_infos:
|
||||
id = dnode_info[0]
|
||||
dnode_status = dnode_info[4]
|
||||
if id == self.stop_dnode_id:
|
||||
status = dnode_status
|
||||
break
|
||||
return status
|
||||
|
||||
status = _get_status()
|
||||
while status !="ready":
|
||||
time.sleep(0.1)
|
||||
status = _get_status()
|
||||
# tdLog.notice("==== stop dnode has not been stopped , endpoint is {}".format(self.stop_dnode))
|
||||
tdLog.notice("==== stop_dnode has restart , id is {}".format(self.stop_dnode_id))
|
||||
|
||||
def _parse_datetime(self,timestr):
|
||||
try:
|
||||
return datetime.datetime.strptime(timestr, '%Y-%m-%d %H:%M:%S.%f')
|
||||
except ValueError:
|
||||
pass
|
||||
try:
|
||||
return datetime.datetime.strptime(timestr, '%Y-%m-%d %H:%M:%S')
|
||||
except ValueError:
|
||||
pass
|
||||
|
||||
def mycheckRowCol(self, sql, row, col):
|
||||
caller = inspect.getframeinfo(inspect.stack()[2][0])
|
||||
if row < 0:
|
||||
args = (caller.filename, caller.lineno, sql, row)
|
||||
tdLog.exit("%s(%d) failed: sql:%s, row:%d is smaller than zero" % args)
|
||||
if col < 0:
|
||||
args = (caller.filename, caller.lineno, sql, row)
|
||||
tdLog.exit("%s(%d) failed: sql:%s, col:%d is smaller than zero" % args)
|
||||
if row > tdSql.queryRows:
|
||||
args = (caller.filename, caller.lineno, sql, row, tdSql.queryRows)
|
||||
tdLog.exit("%s(%d) failed: sql:%s, row:%d is larger than queryRows:%d" % args)
|
||||
if col > tdSql.queryCols:
|
||||
args = (caller.filename, caller.lineno, sql, col, tdSql.queryCols)
|
||||
tdLog.exit("%s(%d) failed: sql:%s, col:%d is larger than queryCols:%d" % args)
|
||||
|
||||
def mycheckData(self, sql ,row, col, data):
|
||||
check_status = True
|
||||
self.mycheckRowCol(sql ,row, col)
|
||||
if tdSql.queryResult[row][col] != data:
|
||||
if tdSql.cursor.istype(col, "TIMESTAMP"):
|
||||
# suppose user want to check nanosecond timestamp if a longer data passed
|
||||
if (len(data) >= 28):
|
||||
if pd.to_datetime(tdSql.queryResult[row][col]) == pd.to_datetime(data):
|
||||
tdLog.info("sql:%s, row:%d col:%d data:%d == expect:%s" %
|
||||
(sql, row, col, tdSql.queryResult[row][col], data))
|
||||
else:
|
||||
if tdSql.queryResult[row][col] == self._parse_datetime(data):
|
||||
tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" %
|
||||
(sql, row, col, tdSql.queryResult[row][col], data))
|
||||
return
|
||||
|
||||
if str(tdSql.queryResult[row][col]) == str(data):
|
||||
tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" %
|
||||
(sql, row, col, tdSql.queryResult[row][col], data))
|
||||
return
|
||||
elif isinstance(data, float) and abs(tdSql.queryResult[row][col] - data) <= 0.000001:
|
||||
tdLog.info("sql:%s, row:%d col:%d data:%f == expect:%f" %
|
||||
(sql, row, col, tdSql.queryResult[row][col], data))
|
||||
return
|
||||
else:
|
||||
caller = inspect.getframeinfo(inspect.stack()[1][0])
|
||||
args = (caller.filename, caller.lineno, sql, row, col, tdSql.queryResult[row][col], data)
|
||||
tdLog.info("%s(%d) failed: sql:%s row:%d col:%d data:%s != expect:%s" % args)
|
||||
|
||||
check_status = False
|
||||
|
||||
if data is None:
|
||||
tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" %
|
||||
(sql, row, col, tdSql.queryResult[row][col], data))
|
||||
elif isinstance(data, str):
|
||||
tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" %
|
||||
(sql, row, col, tdSql.queryResult[row][col], data))
|
||||
# elif isinstance(data, datetime.date):
|
||||
# tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" %
|
||||
# (sql, row, col, tdSql.queryResult[row][col], data))
|
||||
elif isinstance(data, float):
|
||||
tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" %
|
||||
(sql, row, col, tdSql.queryResult[row][col], data))
|
||||
else:
|
||||
tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%d" %
|
||||
(sql, row, col, tdSql.queryResult[row][col], data))
|
||||
|
||||
return check_status
|
||||
|
||||
def mycheckRows(self, sql, expectRows):
|
||||
check_status = True
|
||||
if len(tdSql.queryResult) == expectRows:
|
||||
tdLog.info("sql:%s, queryRows:%d == expect:%d" % (sql, len(tdSql.queryResult), expectRows))
|
||||
return True
|
||||
else:
|
||||
caller = inspect.getframeinfo(inspect.stack()[1][0])
|
||||
args = (caller.filename, caller.lineno, sql, len(tdSql.queryResult), expectRows)
|
||||
tdLog.info("%s(%d) failed: sql:%s, queryRows:%d != expect:%d" % args)
|
||||
check_status = False
|
||||
return check_status
|
||||
|
||||
|
||||
def force_stop_dnode(self, dnode_id ):
|
||||
|
||||
tdSql.query("show dnodes")
|
||||
port = None
|
||||
for dnode_info in tdSql.queryResult:
|
||||
if dnode_id == dnode_info[0]:
|
||||
port = dnode_info[1].split(":")[-1]
|
||||
break
|
||||
else:
|
||||
continue
|
||||
if port:
|
||||
tdLog.notice(" ==== dnode {} will be force stop by kill -9 ====".format(dnode_id))
|
||||
psCmd = '''netstat -anp|grep -w LISTEN|grep -w %s |grep -o "LISTEN.*"|awk '{print $2}'|cut -d/ -f1|head -n1''' %(port)
|
||||
processID = subprocess.check_output(
|
||||
psCmd, shell=True).decode("utf-8")
|
||||
ps_kill_taosd = ''' kill -9 {} '''.format(processID)
|
||||
# print(ps_kill_taosd)
|
||||
os.system(ps_kill_taosd)
|
||||
|
||||
def basic_query_task(self,dbname ,stablename):
|
||||
|
||||
sql = "select * from {}.{} ;".format(dbname , stablename)
|
||||
|
||||
count = 0
|
||||
while count < self.query_times:
|
||||
os.system(''' taos -s '{}' >>/dev/null '''.format(sql))
|
||||
count += 1
|
||||
|
||||
def multi_thread_query_task(self, thread_nums ,dbname , stablename ):
|
||||
|
||||
for i in range(thread_nums):
|
||||
task = threading.Thread(target = self.basic_query_task, args=(dbname ,stablename))
|
||||
self.thread_list.append(task)
|
||||
|
||||
for thread in self.thread_list:
|
||||
|
||||
thread.start()
|
||||
return self.thread_list
|
||||
|
||||
|
||||
def stop_follower_when_query_going(self):
|
||||
|
||||
tdDnodes = cluster.dnodes
|
||||
self.create_database(dbname = self.db_name ,replica_num= self.replica , vgroup_nums= 1)
|
||||
self.create_stable_insert_datas(dbname = self.db_name , stablename = "stb1" , tb_nums= self.tb_nums ,row_nums= self.row_nums)
|
||||
|
||||
# let query task start
|
||||
self.thread_list = self.multi_thread_query_task(10 ,self.db_name ,'stb1' )
|
||||
|
||||
# force stop follower
|
||||
for loop in range(self.loop_restart_times):
|
||||
tdLog.debug(" ==== this is {}_th restart follower of database {} ==== ".format(loop ,self.db_name))
|
||||
self.stop_dnode_id = self._get_stop_dnode_id(self.db_name)
|
||||
self.force_stop_dnode(self.stop_dnode_id)
|
||||
self.wait_stop_dnode_OK()
|
||||
|
||||
start = time.time()
|
||||
tdDnodes[self.stop_dnode_id-1].starttaosd()
|
||||
self.wait_start_dnode_OK()
|
||||
end = time.time()
|
||||
time_cost = int(end-start)
|
||||
|
||||
if time_cost > self.max_restart_time:
|
||||
tdLog.exit(" ==== restart dnode {} cost too much time , please check ====".format(self.stop_dnode_id))
|
||||
|
||||
for thread in self.thread_list:
|
||||
thread.join()
|
||||
|
||||
|
||||
def run(self):
|
||||
|
||||
# basic check of cluster
|
||||
self.check_setup_cluster_status()
|
||||
self.stop_follower_when_query_going()
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
def stop(self):
|
||||
tdSql.close()
|
||||
tdLog.success(f"{__file__} successfully executed")
|
||||
|
||||
tdCases.addLinux(__file__, TDTestCase())
|
||||
tdCases.addWindows(__file__, TDTestCase())
|
|
@ -0,0 +1,470 @@
|
|||
# author : wenzhouwww
|
||||
from ssl import ALERT_DESCRIPTION_CERTIFICATE_UNOBTAINABLE
|
||||
import taos
|
||||
import sys
|
||||
import time
|
||||
import os
|
||||
|
||||
from util.log import *
|
||||
from util.sql import *
|
||||
from util.cases import *
|
||||
from util.dnodes import TDDnodes
|
||||
from util.dnodes import TDDnode
|
||||
from util.cluster import *
|
||||
|
||||
import datetime
|
||||
import inspect
|
||||
import time
|
||||
import socket
|
||||
import subprocess
|
||||
import threading
|
||||
sys.path.append(os.path.dirname(__file__))
|
||||
|
||||
class TDTestCase:
|
||||
def init(self,conn ,logSql):
|
||||
tdLog.debug(f"start to excute {__file__}")
|
||||
tdSql.init(conn.cursor())
|
||||
self.host = socket.gethostname()
|
||||
self.mnode_list = {}
|
||||
self.dnode_list = {}
|
||||
self.ts = 1483200000000
|
||||
self.ts_step =1000
|
||||
self.db_name ='testdb'
|
||||
self.replica = 3
|
||||
self.vgroups = 1
|
||||
self.tb_nums = 10
|
||||
self.row_nums = 100
|
||||
self.stop_dnode_id = None
|
||||
self.loop_restart_times = 5
|
||||
self.thread_list = []
|
||||
self.max_restart_time = 10
|
||||
self.try_check_times = 10
|
||||
self.query_times = 100
|
||||
|
||||
|
||||
def getBuildPath(self):
|
||||
selfPath = os.path.dirname(os.path.realpath(__file__))
|
||||
if ("community" in selfPath):
|
||||
projPath = selfPath[:selfPath.find("community")]
|
||||
else:
|
||||
projPath = selfPath[:selfPath.find("tests")]
|
||||
|
||||
for root, dirs, files in os.walk(projPath):
|
||||
if ("taosd" in files):
|
||||
rootRealPath = os.path.dirname(os.path.realpath(root))
|
||||
if ("packaging" not in rootRealPath):
|
||||
buildPath = root[:len(root) - len("/build/bin")]
|
||||
break
|
||||
return buildPath
|
||||
|
||||
def check_setup_cluster_status(self):
|
||||
tdSql.query("show mnodes")
|
||||
for mnode in tdSql.queryResult:
|
||||
name = mnode[1]
|
||||
info = mnode
|
||||
self.mnode_list[name] = info
|
||||
|
||||
tdSql.query("show dnodes")
|
||||
for dnode in tdSql.queryResult:
|
||||
name = dnode[1]
|
||||
info = dnode
|
||||
self.dnode_list[name] = info
|
||||
|
||||
count = 0
|
||||
is_leader = False
|
||||
mnode_name = ''
|
||||
for k,v in self.mnode_list.items():
|
||||
count +=1
|
||||
# only for 1 mnode
|
||||
mnode_name = k
|
||||
|
||||
if v[2] =='leader':
|
||||
is_leader=True
|
||||
|
||||
if count==1 and is_leader:
|
||||
tdLog.notice("===== depoly cluster success with 1 mnode as leader =====")
|
||||
else:
|
||||
tdLog.exit("===== depoly cluster fail with 1 mnode as leader =====")
|
||||
|
||||
for k ,v in self.dnode_list.items():
|
||||
if k == mnode_name:
|
||||
if v[3]==0:
|
||||
tdLog.notice("===== depoly cluster mnode only success at {} , support_vnodes is {} ".format(mnode_name,v[3]))
|
||||
else:
|
||||
tdLog.exit("===== depoly cluster mnode only fail at {} , support_vnodes is {} ".format(mnode_name,v[3]))
|
||||
else:
|
||||
continue
|
||||
|
||||
def create_database(self, dbname, replica_num ,vgroup_nums ):
|
||||
drop_db_sql = "drop database if exists {}".format(dbname)
|
||||
create_db_sql = "create database {} replica {} vgroups {}".format(dbname,replica_num,vgroup_nums)
|
||||
|
||||
tdLog.notice(" ==== create database {} and insert rows begin =====".format(dbname))
|
||||
tdSql.execute(drop_db_sql)
|
||||
tdSql.execute(create_db_sql)
|
||||
tdSql.execute("use {}".format(dbname))
|
||||
|
||||
def create_stable_insert_datas(self,dbname ,stablename , tb_nums , row_nums):
|
||||
tdSql.execute("use {}".format(dbname))
|
||||
tdSql.execute(
|
||||
'''create table {}
|
||||
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(32),c9 nchar(32), c10 timestamp)
|
||||
tags (t1 int)
|
||||
'''.format(stablename)
|
||||
)
|
||||
|
||||
for i in range(tb_nums):
|
||||
sub_tbname = "sub_{}_{}".format(stablename,i)
|
||||
tdSql.execute("create table {} using {} tags({})".format(sub_tbname, stablename ,i))
|
||||
# insert datas about new database
|
||||
|
||||
for row_num in range(row_nums):
|
||||
ts = self.ts + self.ts_step*row_num
|
||||
tdSql.execute(f"insert into {sub_tbname} values ({ts}, {row_num} ,{row_num}, 10 ,1 ,{row_num} ,{row_num},true,'bin_{row_num}','nchar_{row_num}',now) ")
|
||||
|
||||
tdLog.notice(" ==== stable {} insert rows execute end =====".format(stablename))
|
||||
|
||||
def append_rows_of_exists_tables(self,dbname ,stablename , tbname , append_nums ):
|
||||
|
||||
tdSql.execute("use {}".format(dbname))
|
||||
|
||||
for row_num in range(append_nums):
|
||||
tdSql.execute(f"insert into {tbname} values (now, {row_num} ,{row_num}, 10 ,1 ,{row_num} ,{row_num},true,'bin_{row_num}','nchar_{row_num}',now) ")
|
||||
# print(f"insert into {tbname} values (now, {row_num} ,{row_num}, 10 ,1 ,{row_num} ,{row_num},true,'bin_{row_num}','nchar_{row_num}',now) ")
|
||||
tdLog.notice(" ==== append new rows of table {} belongs to stable {} execute end =====".format(tbname,stablename))
|
||||
os.system("taos -s 'select count(*) from {}.{}';".format(dbname,stablename))
|
||||
|
||||
def check_insert_rows(self, dbname, stablename , tb_nums , row_nums, append_rows):
|
||||
|
||||
tdSql.execute("use {}".format(dbname))
|
||||
|
||||
tdSql.query("select count(*) from {}.{}".format(dbname,stablename))
|
||||
|
||||
while not tdSql.queryResult:
|
||||
time.sleep(0.1)
|
||||
tdSql.query("select count(*) from {}.{}".format(dbname,stablename))
|
||||
|
||||
status_OK = self.mycheckData("select count(*) from {}.{}".format(dbname,stablename) ,0 , 0 , tb_nums*row_nums+append_rows)
|
||||
|
||||
count = 0
|
||||
while not status_OK :
|
||||
if count > self.try_check_times:
|
||||
os.system("taos -s ' show {}.vgroups; '".format(dbname))
|
||||
tdLog.exit(" ==== check insert rows failed after {} try check {} times of database {}".format(count , self.try_check_times ,dbname))
|
||||
break
|
||||
time.sleep(0.1)
|
||||
tdSql.query("select count(*) from {}.{}".format(dbname,stablename))
|
||||
while not tdSql.queryResult:
|
||||
time.sleep(0.1)
|
||||
tdSql.query("select count(*) from {}.{}".format(dbname,stablename))
|
||||
status_OK = self.mycheckData("select count(*) from {}.{}".format(dbname,stablename) ,0 , 0 , tb_nums*row_nums+append_rows)
|
||||
tdLog.notice(" ==== check insert rows first failed , this is {}_th retry check rows of database {}".format(count , dbname))
|
||||
count += 1
|
||||
|
||||
|
||||
tdSql.query("select distinct tbname from {}.{}".format(dbname,stablename))
|
||||
while not tdSql.queryResult:
|
||||
time.sleep(0.1)
|
||||
tdSql.query("select distinct tbname from {}.{}".format(dbname,stablename))
|
||||
status_OK = self.mycheckRows("select distinct tbname from {}.{}".format(dbname,stablename) ,tb_nums)
|
||||
count = 0
|
||||
while not status_OK :
|
||||
if count > self.try_check_times:
|
||||
os.system("taos -s ' show {}.vgroups;'".format(dbname))
|
||||
tdLog.exit(" ==== check insert rows failed after {} try check {} times of database {}".format(count , self.try_check_times ,dbname))
|
||||
break
|
||||
time.sleep(0.1)
|
||||
tdSql.query("select distinct tbname from {}.{}".format(dbname,stablename))
|
||||
while not tdSql.queryResult:
|
||||
time.sleep(0.1)
|
||||
tdSql.query("select distinct tbname from {}.{}".format(dbname,stablename))
|
||||
status_OK = self.mycheckRows("select distinct tbname from {}.{}".format(dbname,stablename) ,tb_nums)
|
||||
tdLog.notice(" ==== check insert tbnames first failed , this is {}_th retry check tbnames of database {}".format(count , dbname))
|
||||
count += 1
|
||||
|
||||
def _get_stop_dnode_id(self,dbname):
|
||||
tdSql.query("show {}.vgroups".format(dbname))
|
||||
vgroup_infos = tdSql.queryResult
|
||||
for vgroup_info in vgroup_infos:
|
||||
leader_infos = vgroup_info[3:-4]
|
||||
# print(vgroup_info)
|
||||
for ind ,role in enumerate(leader_infos):
|
||||
if role =='leader':
|
||||
# print(ind,leader_infos)
|
||||
self.stop_dnode_id = leader_infos[ind-1]
|
||||
break
|
||||
|
||||
|
||||
return self.stop_dnode_id
|
||||
|
||||
def wait_stop_dnode_OK(self):
|
||||
|
||||
def _get_status():
|
||||
newTdSql=tdCom.newTdSql()
|
||||
|
||||
status = ""
|
||||
newTdSql.query("show dnodes")
|
||||
dnode_infos = newTdSql.queryResult
|
||||
for dnode_info in dnode_infos:
|
||||
id = dnode_info[0]
|
||||
dnode_status = dnode_info[4]
|
||||
if id == self.stop_dnode_id:
|
||||
status = dnode_status
|
||||
break
|
||||
return status
|
||||
|
||||
status = _get_status()
|
||||
while status !="offline":
|
||||
time.sleep(0.1)
|
||||
status = _get_status()
|
||||
# tdLog.notice("==== stop dnode has not been stopped , endpoint is {}".format(self.stop_dnode))
|
||||
tdLog.notice("==== stop_dnode has stopped , id is {}".format(self.stop_dnode_id))
|
||||
|
||||
def check_revote_leader_success(self, dbname, before_leader_infos , after_leader_infos):
|
||||
check_status = False
|
||||
vote_act = set(set(after_leader_infos)-set(before_leader_infos))
|
||||
if not vote_act:
|
||||
print("=======before_revote_leader_infos ======\n" , before_leader_infos)
|
||||
print("=======after_revote_leader_infos ======\n" , after_leader_infos)
|
||||
tdLog.info(" ===maybe revote not occured , there is no dnode offline ====")
|
||||
else:
|
||||
for vgroup_info in vote_act:
|
||||
for ind , role in enumerate(vgroup_info):
|
||||
if role==self.stop_dnode_id:
|
||||
|
||||
if vgroup_info[ind+1] =="offline" and "leader" in vgroup_info:
|
||||
tdLog.notice(" === revote leader ok , leader is {} now ====".format(vgroup_info[list(vgroup_info).index("leader")-1]))
|
||||
check_status = True
|
||||
elif vgroup_info[ind+1] !="offline":
|
||||
tdLog.notice(" === dnode {} should be offline ".format(self.stop_dnode_id))
|
||||
else:
|
||||
continue
|
||||
break
|
||||
return check_status
|
||||
|
||||
def wait_start_dnode_OK(self):
|
||||
|
||||
def _get_status():
|
||||
newTdSql=tdCom.newTdSql()
|
||||
status = ""
|
||||
newTdSql.query("show dnodes")
|
||||
dnode_infos = newTdSql.queryResult
|
||||
for dnode_info in dnode_infos:
|
||||
id = dnode_info[0]
|
||||
dnode_status = dnode_info[4]
|
||||
if id == self.stop_dnode_id:
|
||||
status = dnode_status
|
||||
break
|
||||
return status
|
||||
|
||||
status = _get_status()
|
||||
while status !="ready":
|
||||
time.sleep(0.1)
|
||||
status = _get_status()
|
||||
# tdLog.notice("==== stop dnode has not been stopped , endpoint is {}".format(self.stop_dnode))
|
||||
tdLog.notice("==== stop_dnode has restart , id is {}".format(self.stop_dnode_id))
|
||||
|
||||
def _parse_datetime(self,timestr):
|
||||
try:
|
||||
return datetime.datetime.strptime(timestr, '%Y-%m-%d %H:%M:%S.%f')
|
||||
except ValueError:
|
||||
pass
|
||||
try:
|
||||
return datetime.datetime.strptime(timestr, '%Y-%m-%d %H:%M:%S')
|
||||
except ValueError:
|
||||
pass
|
||||
|
||||
def mycheckRowCol(self, sql, row, col):
|
||||
caller = inspect.getframeinfo(inspect.stack()[2][0])
|
||||
if row < 0:
|
||||
args = (caller.filename, caller.lineno, sql, row)
|
||||
tdLog.exit("%s(%d) failed: sql:%s, row:%d is smaller than zero" % args)
|
||||
if col < 0:
|
||||
args = (caller.filename, caller.lineno, sql, row)
|
||||
tdLog.exit("%s(%d) failed: sql:%s, col:%d is smaller than zero" % args)
|
||||
if row > tdSql.queryRows:
|
||||
args = (caller.filename, caller.lineno, sql, row, tdSql.queryRows)
|
||||
tdLog.exit("%s(%d) failed: sql:%s, row:%d is larger than queryRows:%d" % args)
|
||||
if col > tdSql.queryCols:
|
||||
args = (caller.filename, caller.lineno, sql, col, tdSql.queryCols)
|
||||
tdLog.exit("%s(%d) failed: sql:%s, col:%d is larger than queryCols:%d" % args)
|
||||
|
||||
def mycheckData(self, sql ,row, col, data):
|
||||
check_status = True
|
||||
self.mycheckRowCol(sql ,row, col)
|
||||
if tdSql.queryResult[row][col] != data:
|
||||
if tdSql.cursor.istype(col, "TIMESTAMP"):
|
||||
# suppose user want to check nanosecond timestamp if a longer data passed
|
||||
if (len(data) >= 28):
|
||||
if pd.to_datetime(tdSql.queryResult[row][col]) == pd.to_datetime(data):
|
||||
tdLog.info("sql:%s, row:%d col:%d data:%d == expect:%s" %
|
||||
(sql, row, col, tdSql.queryResult[row][col], data))
|
||||
else:
|
||||
if tdSql.queryResult[row][col] == self._parse_datetime(data):
|
||||
tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" %
|
||||
(sql, row, col, tdSql.queryResult[row][col], data))
|
||||
return
|
||||
|
||||
if str(tdSql.queryResult[row][col]) == str(data):
|
||||
tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" %
|
||||
(sql, row, col, tdSql.queryResult[row][col], data))
|
||||
return
|
||||
elif isinstance(data, float) and abs(tdSql.queryResult[row][col] - data) <= 0.000001:
|
||||
tdLog.info("sql:%s, row:%d col:%d data:%f == expect:%f" %
|
||||
(sql, row, col, tdSql.queryResult[row][col], data))
|
||||
return
|
||||
else:
|
||||
caller = inspect.getframeinfo(inspect.stack()[1][0])
|
||||
args = (caller.filename, caller.lineno, sql, row, col, tdSql.queryResult[row][col], data)
|
||||
tdLog.info("%s(%d) failed: sql:%s row:%d col:%d data:%s != expect:%s" % args)
|
||||
|
||||
check_status = False
|
||||
|
||||
if data is None:
|
||||
tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" %
|
||||
(sql, row, col, tdSql.queryResult[row][col], data))
|
||||
elif isinstance(data, str):
|
||||
tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" %
|
||||
(sql, row, col, tdSql.queryResult[row][col], data))
|
||||
# elif isinstance(data, datetime.date):
|
||||
# tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" %
|
||||
# (sql, row, col, tdSql.queryResult[row][col], data))
|
||||
elif isinstance(data, float):
|
||||
tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" %
|
||||
(sql, row, col, tdSql.queryResult[row][col], data))
|
||||
else:
|
||||
tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%d" %
|
||||
(sql, row, col, tdSql.queryResult[row][col], data))
|
||||
|
||||
return check_status
|
||||
|
||||
def mycheckRows(self, sql, expectRows):
|
||||
check_status = True
|
||||
if len(tdSql.queryResult) == expectRows:
|
||||
tdLog.info("sql:%s, queryRows:%d == expect:%d" % (sql, len(tdSql.queryResult), expectRows))
|
||||
return True
|
||||
else:
|
||||
caller = inspect.getframeinfo(inspect.stack()[1][0])
|
||||
args = (caller.filename, caller.lineno, sql, len(tdSql.queryResult), expectRows)
|
||||
tdLog.info("%s(%d) failed: sql:%s, queryRows:%d != expect:%d" % args)
|
||||
check_status = False
|
||||
return check_status
|
||||
|
||||
|
||||
def get_leader_infos(self ,dbname):
|
||||
|
||||
newTdSql=tdCom.newTdSql()
|
||||
newTdSql.query("show {}.vgroups".format(dbname))
|
||||
vgroup_infos = newTdSql.queryResult
|
||||
|
||||
leader_infos = set()
|
||||
for vgroup_info in vgroup_infos:
|
||||
leader_infos.add(vgroup_info[3:-4])
|
||||
|
||||
return leader_infos
|
||||
|
||||
def force_stop_dnode(self, dnode_id ):
|
||||
|
||||
tdSql.query("show dnodes")
|
||||
port = None
|
||||
for dnode_info in tdSql.queryResult:
|
||||
if dnode_id == dnode_info[0]:
|
||||
port = dnode_info[1].split(":")[-1]
|
||||
break
|
||||
else:
|
||||
continue
|
||||
if port:
|
||||
tdLog.notice(" ==== dnode {} will be force stop by kill -9 ====".format(dnode_id))
|
||||
psCmd = '''netstat -anp|grep -w LISTEN|grep -w %s |grep -o "LISTEN.*"|awk '{print $2}'|cut -d/ -f1|head -n1''' %(port)
|
||||
processID = subprocess.check_output(
|
||||
psCmd, shell=True).decode("utf-8")
|
||||
ps_kill_taosd = ''' kill -9 {} '''.format(processID)
|
||||
# print(ps_kill_taosd)
|
||||
os.system(ps_kill_taosd)
|
||||
|
||||
def basic_query_task(self,dbname ,stablename):
|
||||
|
||||
sql = "select * from {}.{} ;".format(dbname , stablename)
|
||||
|
||||
count = 0
|
||||
while count < self.query_times:
|
||||
os.system(''' taos -s '{}' >>/dev/null '''.format(sql))
|
||||
count += 1
|
||||
|
||||
def multi_thread_query_task(self, thread_nums ,dbname , stablename ):
|
||||
|
||||
for i in range(thread_nums):
|
||||
task = threading.Thread(target = self.basic_query_task, args=(dbname ,stablename))
|
||||
self.thread_list.append(task)
|
||||
|
||||
for thread in self.thread_list:
|
||||
|
||||
thread.start()
|
||||
return self.thread_list
|
||||
|
||||
|
||||
def stop_follower_when_query_going(self):
|
||||
|
||||
tdDnodes = cluster.dnodes
|
||||
self.create_database(dbname = self.db_name ,replica_num= self.replica , vgroup_nums= 1)
|
||||
self.create_stable_insert_datas(dbname = self.db_name , stablename = "stb1" , tb_nums= self.tb_nums ,row_nums= self.row_nums)
|
||||
|
||||
# let query task start
|
||||
self.thread_list = self.multi_thread_query_task(10 ,self.db_name ,'stb1' )
|
||||
|
||||
# force stop follower
|
||||
for loop in range(self.loop_restart_times):
|
||||
tdLog.debug(" ==== this is {}_th restart follower of database {} ==== ".format(loop ,self.db_name))
|
||||
|
||||
# get leader info before stop
|
||||
before_leader_infos = self.get_leader_infos(self.db_name)
|
||||
|
||||
self.stop_dnode_id = self._get_stop_dnode_id(self.db_name)
|
||||
tdDnodes[self.stop_dnode_id-1].stoptaosd()
|
||||
|
||||
|
||||
start = time.time()
|
||||
# get leader info after stop
|
||||
after_leader_infos = self.get_leader_infos(self.db_name)
|
||||
|
||||
revote_status = self.check_revote_leader_success(self.db_name ,before_leader_infos , after_leader_infos)
|
||||
|
||||
while not revote_status:
|
||||
after_leader_infos = self.get_leader_infos(self.db_name)
|
||||
revote_status = self.check_revote_leader_success(self.db_name ,before_leader_infos , after_leader_infos)
|
||||
|
||||
end = time.time()
|
||||
time_cost = end - start
|
||||
tdLog.debug(" ==== revote leader of database {} cost time {} ====".format(self.db_name , time_cost))
|
||||
|
||||
self.wait_stop_dnode_OK()
|
||||
|
||||
start = time.time()
|
||||
tdDnodes[self.stop_dnode_id-1].starttaosd()
|
||||
self.wait_start_dnode_OK()
|
||||
end = time.time()
|
||||
time_cost = int(end-start)
|
||||
|
||||
if time_cost > self.max_restart_time:
|
||||
tdLog.exit(" ==== restart dnode {} cost too much time , please check ====".format(self.stop_dnode_id))
|
||||
|
||||
for thread in self.thread_list:
|
||||
thread.join()
|
||||
|
||||
|
||||
def run(self):
|
||||
|
||||
# basic check of cluster
|
||||
self.check_setup_cluster_status()
|
||||
self.stop_follower_when_query_going()
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
def stop(self):
|
||||
tdSql.close()
|
||||
tdLog.success(f"{__file__} successfully executed")
|
||||
|
||||
tdCases.addLinux(__file__, TDTestCase())
|
||||
tdCases.addWindows(__file__, TDTestCase())
|
|
@ -0,0 +1,470 @@
|
|||
# author : wenzhouwww
|
||||
from ssl import ALERT_DESCRIPTION_CERTIFICATE_UNOBTAINABLE
|
||||
import taos
|
||||
import sys
|
||||
import time
|
||||
import os
|
||||
|
||||
from util.log import *
|
||||
from util.sql import *
|
||||
from util.cases import *
|
||||
from util.dnodes import TDDnodes
|
||||
from util.dnodes import TDDnode
|
||||
from util.cluster import *
|
||||
|
||||
import datetime
|
||||
import inspect
|
||||
import time
|
||||
import socket
|
||||
import subprocess
|
||||
import threading
|
||||
sys.path.append(os.path.dirname(__file__))
|
||||
|
||||
class TDTestCase:
|
||||
def init(self,conn ,logSql):
|
||||
tdLog.debug(f"start to excute {__file__}")
|
||||
tdSql.init(conn.cursor())
|
||||
self.host = socket.gethostname()
|
||||
self.mnode_list = {}
|
||||
self.dnode_list = {}
|
||||
self.ts = 1483200000000
|
||||
self.ts_step =1000
|
||||
self.db_name ='testdb'
|
||||
self.replica = 3
|
||||
self.vgroups = 1
|
||||
self.tb_nums = 10
|
||||
self.row_nums = 100
|
||||
self.stop_dnode_id = None
|
||||
self.loop_restart_times = 5
|
||||
self.thread_list = []
|
||||
self.max_restart_time = 10
|
||||
self.try_check_times = 10
|
||||
self.query_times = 100
|
||||
|
||||
|
||||
def getBuildPath(self):
|
||||
selfPath = os.path.dirname(os.path.realpath(__file__))
|
||||
if ("community" in selfPath):
|
||||
projPath = selfPath[:selfPath.find("community")]
|
||||
else:
|
||||
projPath = selfPath[:selfPath.find("tests")]
|
||||
|
||||
for root, dirs, files in os.walk(projPath):
|
||||
if ("taosd" in files):
|
||||
rootRealPath = os.path.dirname(os.path.realpath(root))
|
||||
if ("packaging" not in rootRealPath):
|
||||
buildPath = root[:len(root) - len("/build/bin")]
|
||||
break
|
||||
return buildPath
|
||||
|
||||
def check_setup_cluster_status(self):
|
||||
tdSql.query("show mnodes")
|
||||
for mnode in tdSql.queryResult:
|
||||
name = mnode[1]
|
||||
info = mnode
|
||||
self.mnode_list[name] = info
|
||||
|
||||
tdSql.query("show dnodes")
|
||||
for dnode in tdSql.queryResult:
|
||||
name = dnode[1]
|
||||
info = dnode
|
||||
self.dnode_list[name] = info
|
||||
|
||||
count = 0
|
||||
is_leader = False
|
||||
mnode_name = ''
|
||||
for k,v in self.mnode_list.items():
|
||||
count +=1
|
||||
# only for 1 mnode
|
||||
mnode_name = k
|
||||
|
||||
if v[2] =='leader':
|
||||
is_leader=True
|
||||
|
||||
if count==1 and is_leader:
|
||||
tdLog.notice("===== depoly cluster success with 1 mnode as leader =====")
|
||||
else:
|
||||
tdLog.exit("===== depoly cluster fail with 1 mnode as leader =====")
|
||||
|
||||
for k ,v in self.dnode_list.items():
|
||||
if k == mnode_name:
|
||||
if v[3]==0:
|
||||
tdLog.notice("===== depoly cluster mnode only success at {} , support_vnodes is {} ".format(mnode_name,v[3]))
|
||||
else:
|
||||
tdLog.exit("===== depoly cluster mnode only fail at {} , support_vnodes is {} ".format(mnode_name,v[3]))
|
||||
else:
|
||||
continue
|
||||
|
||||
def create_database(self, dbname, replica_num ,vgroup_nums ):
|
||||
drop_db_sql = "drop database if exists {}".format(dbname)
|
||||
create_db_sql = "create database {} replica {} vgroups {}".format(dbname,replica_num,vgroup_nums)
|
||||
|
||||
tdLog.notice(" ==== create database {} and insert rows begin =====".format(dbname))
|
||||
tdSql.execute(drop_db_sql)
|
||||
tdSql.execute(create_db_sql)
|
||||
tdSql.execute("use {}".format(dbname))
|
||||
|
||||
def create_stable_insert_datas(self,dbname ,stablename , tb_nums , row_nums):
|
||||
tdSql.execute("use {}".format(dbname))
|
||||
tdSql.execute(
|
||||
'''create table {}
|
||||
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(32),c9 nchar(32), c10 timestamp)
|
||||
tags (t1 int)
|
||||
'''.format(stablename)
|
||||
)
|
||||
|
||||
for i in range(tb_nums):
|
||||
sub_tbname = "sub_{}_{}".format(stablename,i)
|
||||
tdSql.execute("create table {} using {} tags({})".format(sub_tbname, stablename ,i))
|
||||
# insert datas about new database
|
||||
|
||||
for row_num in range(row_nums):
|
||||
ts = self.ts + self.ts_step*row_num
|
||||
tdSql.execute(f"insert into {sub_tbname} values ({ts}, {row_num} ,{row_num}, 10 ,1 ,{row_num} ,{row_num},true,'bin_{row_num}','nchar_{row_num}',now) ")
|
||||
|
||||
tdLog.notice(" ==== stable {} insert rows execute end =====".format(stablename))
|
||||
|
||||
def append_rows_of_exists_tables(self,dbname ,stablename , tbname , append_nums ):
|
||||
|
||||
tdSql.execute("use {}".format(dbname))
|
||||
|
||||
for row_num in range(append_nums):
|
||||
tdSql.execute(f"insert into {tbname} values (now, {row_num} ,{row_num}, 10 ,1 ,{row_num} ,{row_num},true,'bin_{row_num}','nchar_{row_num}',now) ")
|
||||
# print(f"insert into {tbname} values (now, {row_num} ,{row_num}, 10 ,1 ,{row_num} ,{row_num},true,'bin_{row_num}','nchar_{row_num}',now) ")
|
||||
tdLog.notice(" ==== append new rows of table {} belongs to stable {} execute end =====".format(tbname,stablename))
|
||||
os.system("taos -s 'select count(*) from {}.{}';".format(dbname,stablename))
|
||||
|
||||
def check_insert_rows(self, dbname, stablename , tb_nums , row_nums, append_rows):
|
||||
|
||||
tdSql.execute("use {}".format(dbname))
|
||||
|
||||
tdSql.query("select count(*) from {}.{}".format(dbname,stablename))
|
||||
|
||||
while not tdSql.queryResult:
|
||||
time.sleep(0.1)
|
||||
tdSql.query("select count(*) from {}.{}".format(dbname,stablename))
|
||||
|
||||
status_OK = self.mycheckData("select count(*) from {}.{}".format(dbname,stablename) ,0 , 0 , tb_nums*row_nums+append_rows)
|
||||
|
||||
count = 0
|
||||
while not status_OK :
|
||||
if count > self.try_check_times:
|
||||
os.system("taos -s ' show {}.vgroups; '".format(dbname))
|
||||
tdLog.exit(" ==== check insert rows failed after {} try check {} times of database {}".format(count , self.try_check_times ,dbname))
|
||||
break
|
||||
time.sleep(0.1)
|
||||
tdSql.query("select count(*) from {}.{}".format(dbname,stablename))
|
||||
while not tdSql.queryResult:
|
||||
time.sleep(0.1)
|
||||
tdSql.query("select count(*) from {}.{}".format(dbname,stablename))
|
||||
status_OK = self.mycheckData("select count(*) from {}.{}".format(dbname,stablename) ,0 , 0 , tb_nums*row_nums+append_rows)
|
||||
tdLog.notice(" ==== check insert rows first failed , this is {}_th retry check rows of database {}".format(count , dbname))
|
||||
count += 1
|
||||
|
||||
|
||||
tdSql.query("select distinct tbname from {}.{}".format(dbname,stablename))
|
||||
while not tdSql.queryResult:
|
||||
time.sleep(0.1)
|
||||
tdSql.query("select distinct tbname from {}.{}".format(dbname,stablename))
|
||||
status_OK = self.mycheckRows("select distinct tbname from {}.{}".format(dbname,stablename) ,tb_nums)
|
||||
count = 0
|
||||
while not status_OK :
|
||||
if count > self.try_check_times:
|
||||
os.system("taos -s ' show {}.vgroups;'".format(dbname))
|
||||
tdLog.exit(" ==== check insert rows failed after {} try check {} times of database {}".format(count , self.try_check_times ,dbname))
|
||||
break
|
||||
time.sleep(0.1)
|
||||
tdSql.query("select distinct tbname from {}.{}".format(dbname,stablename))
|
||||
while not tdSql.queryResult:
|
||||
time.sleep(0.1)
|
||||
tdSql.query("select distinct tbname from {}.{}".format(dbname,stablename))
|
||||
status_OK = self.mycheckRows("select distinct tbname from {}.{}".format(dbname,stablename) ,tb_nums)
|
||||
tdLog.notice(" ==== check insert tbnames first failed , this is {}_th retry check tbnames of database {}".format(count , dbname))
|
||||
count += 1
|
||||
|
||||
def _get_stop_dnode_id(self,dbname):
|
||||
tdSql.query("show {}.vgroups".format(dbname))
|
||||
vgroup_infos = tdSql.queryResult
|
||||
for vgroup_info in vgroup_infos:
|
||||
leader_infos = vgroup_info[3:-4]
|
||||
# print(vgroup_info)
|
||||
for ind ,role in enumerate(leader_infos):
|
||||
if role =='leader':
|
||||
# print(ind,leader_infos)
|
||||
self.stop_dnode_id = leader_infos[ind-1]
|
||||
break
|
||||
|
||||
|
||||
return self.stop_dnode_id
|
||||
|
||||
def wait_stop_dnode_OK(self):
|
||||
|
||||
def _get_status():
|
||||
newTdSql=tdCom.newTdSql()
|
||||
|
||||
status = ""
|
||||
newTdSql.query("show dnodes")
|
||||
dnode_infos = newTdSql.queryResult
|
||||
for dnode_info in dnode_infos:
|
||||
id = dnode_info[0]
|
||||
dnode_status = dnode_info[4]
|
||||
if id == self.stop_dnode_id:
|
||||
status = dnode_status
|
||||
break
|
||||
return status
|
||||
|
||||
status = _get_status()
|
||||
while status !="offline":
|
||||
time.sleep(0.1)
|
||||
status = _get_status()
|
||||
# tdLog.notice("==== stop dnode has not been stopped , endpoint is {}".format(self.stop_dnode))
|
||||
tdLog.notice("==== stop_dnode has stopped , id is {}".format(self.stop_dnode_id))
|
||||
|
||||
def check_revote_leader_success(self, dbname, before_leader_infos , after_leader_infos):
|
||||
check_status = False
|
||||
vote_act = set(set(after_leader_infos)-set(before_leader_infos))
|
||||
if not vote_act:
|
||||
print("=======before_revote_leader_infos ======\n" , before_leader_infos)
|
||||
print("=======after_revote_leader_infos ======\n" , after_leader_infos)
|
||||
tdLog.info(" ===maybe revote not occured , there is no dnode offline ====")
|
||||
else:
|
||||
for vgroup_info in vote_act:
|
||||
for ind , role in enumerate(vgroup_info):
|
||||
if role==self.stop_dnode_id:
|
||||
|
||||
if vgroup_info[ind+1] =="offline" and "leader" in vgroup_info:
|
||||
tdLog.notice(" === revote leader ok , leader is {} now ====".format(vgroup_info[list(vgroup_info).index("leader")-1]))
|
||||
check_status = True
|
||||
elif vgroup_info[ind+1] !="offline":
|
||||
tdLog.notice(" === dnode {} should be offline ".format(self.stop_dnode_id))
|
||||
else:
|
||||
continue
|
||||
break
|
||||
return check_status
|
||||
|
||||
def wait_start_dnode_OK(self):
|
||||
|
||||
def _get_status():
|
||||
newTdSql=tdCom.newTdSql()
|
||||
status = ""
|
||||
newTdSql.query("show dnodes")
|
||||
dnode_infos = newTdSql.queryResult
|
||||
for dnode_info in dnode_infos:
|
||||
id = dnode_info[0]
|
||||
dnode_status = dnode_info[4]
|
||||
if id == self.stop_dnode_id:
|
||||
status = dnode_status
|
||||
break
|
||||
return status
|
||||
|
||||
status = _get_status()
|
||||
while status !="ready":
|
||||
time.sleep(0.1)
|
||||
status = _get_status()
|
||||
# tdLog.notice("==== stop dnode has not been stopped , endpoint is {}".format(self.stop_dnode))
|
||||
tdLog.notice("==== stop_dnode has restart , id is {}".format(self.stop_dnode_id))
|
||||
|
||||
def _parse_datetime(self,timestr):
|
||||
try:
|
||||
return datetime.datetime.strptime(timestr, '%Y-%m-%d %H:%M:%S.%f')
|
||||
except ValueError:
|
||||
pass
|
||||
try:
|
||||
return datetime.datetime.strptime(timestr, '%Y-%m-%d %H:%M:%S')
|
||||
except ValueError:
|
||||
pass
|
||||
|
||||
def mycheckRowCol(self, sql, row, col):
|
||||
caller = inspect.getframeinfo(inspect.stack()[2][0])
|
||||
if row < 0:
|
||||
args = (caller.filename, caller.lineno, sql, row)
|
||||
tdLog.exit("%s(%d) failed: sql:%s, row:%d is smaller than zero" % args)
|
||||
if col < 0:
|
||||
args = (caller.filename, caller.lineno, sql, row)
|
||||
tdLog.exit("%s(%d) failed: sql:%s, col:%d is smaller than zero" % args)
|
||||
if row > tdSql.queryRows:
|
||||
args = (caller.filename, caller.lineno, sql, row, tdSql.queryRows)
|
||||
tdLog.exit("%s(%d) failed: sql:%s, row:%d is larger than queryRows:%d" % args)
|
||||
if col > tdSql.queryCols:
|
||||
args = (caller.filename, caller.lineno, sql, col, tdSql.queryCols)
|
||||
tdLog.exit("%s(%d) failed: sql:%s, col:%d is larger than queryCols:%d" % args)
|
||||
|
||||
def mycheckData(self, sql ,row, col, data):
|
||||
check_status = True
|
||||
self.mycheckRowCol(sql ,row, col)
|
||||
if tdSql.queryResult[row][col] != data:
|
||||
if tdSql.cursor.istype(col, "TIMESTAMP"):
|
||||
# suppose user want to check nanosecond timestamp if a longer data passed
|
||||
if (len(data) >= 28):
|
||||
if pd.to_datetime(tdSql.queryResult[row][col]) == pd.to_datetime(data):
|
||||
tdLog.info("sql:%s, row:%d col:%d data:%d == expect:%s" %
|
||||
(sql, row, col, tdSql.queryResult[row][col], data))
|
||||
else:
|
||||
if tdSql.queryResult[row][col] == self._parse_datetime(data):
|
||||
tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" %
|
||||
(sql, row, col, tdSql.queryResult[row][col], data))
|
||||
return
|
||||
|
||||
if str(tdSql.queryResult[row][col]) == str(data):
|
||||
tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" %
|
||||
(sql, row, col, tdSql.queryResult[row][col], data))
|
||||
return
|
||||
elif isinstance(data, float) and abs(tdSql.queryResult[row][col] - data) <= 0.000001:
|
||||
tdLog.info("sql:%s, row:%d col:%d data:%f == expect:%f" %
|
||||
(sql, row, col, tdSql.queryResult[row][col], data))
|
||||
return
|
||||
else:
|
||||
caller = inspect.getframeinfo(inspect.stack()[1][0])
|
||||
args = (caller.filename, caller.lineno, sql, row, col, tdSql.queryResult[row][col], data)
|
||||
tdLog.info("%s(%d) failed: sql:%s row:%d col:%d data:%s != expect:%s" % args)
|
||||
|
||||
check_status = False
|
||||
|
||||
if data is None:
|
||||
tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" %
|
||||
(sql, row, col, tdSql.queryResult[row][col], data))
|
||||
elif isinstance(data, str):
|
||||
tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" %
|
||||
(sql, row, col, tdSql.queryResult[row][col], data))
|
||||
# elif isinstance(data, datetime.date):
|
||||
# tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" %
|
||||
# (sql, row, col, tdSql.queryResult[row][col], data))
|
||||
elif isinstance(data, float):
|
||||
tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" %
|
||||
(sql, row, col, tdSql.queryResult[row][col], data))
|
||||
else:
|
||||
tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%d" %
|
||||
(sql, row, col, tdSql.queryResult[row][col], data))
|
||||
|
||||
return check_status
|
||||
|
||||
def mycheckRows(self, sql, expectRows):
|
||||
check_status = True
|
||||
if len(tdSql.queryResult) == expectRows:
|
||||
tdLog.info("sql:%s, queryRows:%d == expect:%d" % (sql, len(tdSql.queryResult), expectRows))
|
||||
return True
|
||||
else:
|
||||
caller = inspect.getframeinfo(inspect.stack()[1][0])
|
||||
args = (caller.filename, caller.lineno, sql, len(tdSql.queryResult), expectRows)
|
||||
tdLog.info("%s(%d) failed: sql:%s, queryRows:%d != expect:%d" % args)
|
||||
check_status = False
|
||||
return check_status
|
||||
|
||||
|
||||
def get_leader_infos(self ,dbname):
|
||||
|
||||
newTdSql=tdCom.newTdSql()
|
||||
newTdSql.query("show {}.vgroups".format(dbname))
|
||||
vgroup_infos = newTdSql.queryResult
|
||||
|
||||
leader_infos = set()
|
||||
for vgroup_info in vgroup_infos:
|
||||
leader_infos.add(vgroup_info[3:-4])
|
||||
|
||||
return leader_infos
|
||||
|
||||
def force_stop_dnode(self, dnode_id ):
|
||||
|
||||
tdSql.query("show dnodes")
|
||||
port = None
|
||||
for dnode_info in tdSql.queryResult:
|
||||
if dnode_id == dnode_info[0]:
|
||||
port = dnode_info[1].split(":")[-1]
|
||||
break
|
||||
else:
|
||||
continue
|
||||
if port:
|
||||
tdLog.notice(" ==== dnode {} will be force stop by kill -9 ====".format(dnode_id))
|
||||
psCmd = '''netstat -anp|grep -w LISTEN|grep -w %s |grep -o "LISTEN.*"|awk '{print $2}'|cut -d/ -f1|head -n1''' %(port)
|
||||
processID = subprocess.check_output(
|
||||
psCmd, shell=True).decode("utf-8")
|
||||
ps_kill_taosd = ''' kill -9 {} '''.format(processID)
|
||||
# print(ps_kill_taosd)
|
||||
os.system(ps_kill_taosd)
|
||||
|
||||
def basic_query_task(self,dbname ,stablename):
|
||||
|
||||
sql = "select * from {}.{} ;".format(dbname , stablename)
|
||||
|
||||
count = 0
|
||||
while count < self.query_times:
|
||||
os.system(''' taos -s '{}' >>/dev/null '''.format(sql))
|
||||
count += 1
|
||||
|
||||
def multi_thread_query_task(self, thread_nums ,dbname , stablename ):
|
||||
|
||||
for i in range(thread_nums):
|
||||
task = threading.Thread(target = self.basic_query_task, args=(dbname ,stablename))
|
||||
self.thread_list.append(task)
|
||||
|
||||
for thread in self.thread_list:
|
||||
|
||||
thread.start()
|
||||
return self.thread_list
|
||||
|
||||
|
||||
def stop_follower_when_query_going(self):
|
||||
|
||||
tdDnodes = cluster.dnodes
|
||||
self.create_database(dbname = self.db_name ,replica_num= self.replica , vgroup_nums= 1)
|
||||
self.create_stable_insert_datas(dbname = self.db_name , stablename = "stb1" , tb_nums= self.tb_nums ,row_nums= self.row_nums)
|
||||
|
||||
# let query task start
|
||||
self.thread_list = self.multi_thread_query_task(10 ,self.db_name ,'stb1' )
|
||||
|
||||
# force stop follower
|
||||
for loop in range(self.loop_restart_times):
|
||||
tdLog.debug(" ==== this is {}_th restart follower of database {} ==== ".format(loop ,self.db_name))
|
||||
|
||||
# get leader info before stop
|
||||
before_leader_infos = self.get_leader_infos(self.db_name)
|
||||
|
||||
self.stop_dnode_id = self._get_stop_dnode_id(self.db_name)
|
||||
self.force_stop_dnode(self.stop_dnode_id)
|
||||
|
||||
|
||||
start = time.time()
|
||||
# get leader info after stop
|
||||
after_leader_infos = self.get_leader_infos(self.db_name)
|
||||
|
||||
revote_status = self.check_revote_leader_success(self.db_name ,before_leader_infos , after_leader_infos)
|
||||
|
||||
while not revote_status:
|
||||
after_leader_infos = self.get_leader_infos(self.db_name)
|
||||
revote_status = self.check_revote_leader_success(self.db_name ,before_leader_infos , after_leader_infos)
|
||||
|
||||
end = time.time()
|
||||
time_cost = end - start
|
||||
tdLog.debug(" ==== revote leader of database {} cost time {} ====".format(self.db_name , time_cost))
|
||||
|
||||
self.wait_stop_dnode_OK()
|
||||
|
||||
start = time.time()
|
||||
tdDnodes[self.stop_dnode_id-1].starttaosd()
|
||||
self.wait_start_dnode_OK()
|
||||
end = time.time()
|
||||
time_cost = int(end-start)
|
||||
|
||||
if time_cost > self.max_restart_time:
|
||||
tdLog.exit(" ==== restart dnode {} cost too much time , please check ====".format(self.stop_dnode_id))
|
||||
|
||||
for thread in self.thread_list:
|
||||
thread.join()
|
||||
|
||||
|
||||
def run(self):
|
||||
|
||||
# basic check of cluster
|
||||
self.check_setup_cluster_status()
|
||||
self.stop_follower_when_query_going()
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
def stop(self):
|
||||
tdSql.close()
|
||||
tdLog.success(f"{__file__} successfully executed")
|
||||
|
||||
tdCases.addLinux(__file__, TDTestCase())
|
||||
tdCases.addWindows(__file__, TDTestCase())
|
|
@ -71,14 +71,14 @@ class TDTestCase:
|
|||
is_leader=True
|
||||
|
||||
if count==1 and is_leader:
|
||||
tdLog.info("===== depoly cluster success with 1 mnode as leader =====")
|
||||
tdLog.notice("===== depoly cluster success with 1 mnode as leader =====")
|
||||
else:
|
||||
tdLog.exit("===== depoly cluster fail with 1 mnode as leader =====")
|
||||
|
||||
for k ,v in self.dnode_list.items():
|
||||
if k == mnode_name:
|
||||
if v[3]==0:
|
||||
tdLog.info("===== depoly cluster mnode only success at {} , support_vnodes is {} ".format(mnode_name,v[3]))
|
||||
tdLog.notice("===== depoly cluster mnode only success at {} , support_vnodes is {} ".format(mnode_name,v[3]))
|
||||
else:
|
||||
tdLog.exit("===== depoly cluster mnode only fail at {} , support_vnodes is {} ".format(mnode_name,v[3]))
|
||||
else:
|
||||
|
@ -121,7 +121,7 @@ class TDTestCase:
|
|||
|
||||
for k , v in vgroups_infos.items():
|
||||
if len(v) ==1 and v[0]=="leader":
|
||||
tdLog.info(" === create database replica only 1 role leader check success of vgroup_id {} ======".format(k))
|
||||
tdLog.notice(" === create database replica only 1 role leader check success of vgroup_id {} ======".format(k))
|
||||
else:
|
||||
tdLog.exit(" === create database replica only 1 role leader check fail of vgroup_id {} ======".format(k))
|
||||
|
||||
|
@ -152,10 +152,10 @@ class TDTestCase:
|
|||
time.sleep(0.1)
|
||||
status = self.check_vgroups_init_done(dbname)
|
||||
|
||||
# tdLog.info("=== database {} show vgroups vote the leader is in progress ===".format(dbname))
|
||||
# tdLog.notice("=== database {} show vgroups vote the leader is in progress ===".format(dbname))
|
||||
end = time.time()
|
||||
cost_time = end - start
|
||||
tdLog.info(" ==== database %s vote the leaders success , cost time is %.3f second ====="%(dbname,cost_time) )
|
||||
tdLog.notice(" ==== database %s vote the leaders success , cost time is %.3f second ====="%(dbname,cost_time) )
|
||||
# os.system("taos -s 'show {}.vgroups;'".format(dbname))
|
||||
if cost_time >= self.max_vote_time_cost:
|
||||
tdLog.exit(" ==== database %s vote the leaders cost too large time , cost time is %.3f second ===="%(dbname,cost_time) )
|
||||
|
@ -165,28 +165,28 @@ class TDTestCase:
|
|||
|
||||
def test_init_vgroups_time_costs(self):
|
||||
|
||||
tdLog.info(" ====start check time cost about vgroups vote leaders ==== ")
|
||||
tdLog.info(" ==== current max time cost is set value : {} =======".format(self.max_vote_time_cost))
|
||||
tdLog.notice(" ====start check time cost about vgroups vote leaders ==== ")
|
||||
tdLog.notice(" ==== current max time cost is set value : {} =======".format(self.max_vote_time_cost))
|
||||
|
||||
# create database replica 3 vgroups 1
|
||||
|
||||
db1 = 'db_1'
|
||||
create_db_replica_3_vgroups_1 = "create database {} replica 3 vgroups 1".format(db1)
|
||||
tdLog.info('=======database {} replica 3 vgroups 1 ======'.format(db1))
|
||||
tdLog.notice('=======database {} replica 3 vgroups 1 ======'.format(db1))
|
||||
tdSql.execute(create_db_replica_3_vgroups_1)
|
||||
self.vote_leader_time_costs(db1)
|
||||
|
||||
# create database replica 3 vgroups 10
|
||||
db2 = 'db_2'
|
||||
create_db_replica_3_vgroups_10 = "create database {} replica 3 vgroups 10".format(db2)
|
||||
tdLog.info('=======database {} replica 3 vgroups 10 ======'.format(db2))
|
||||
tdLog.notice('=======database {} replica 3 vgroups 10 ======'.format(db2))
|
||||
tdSql.execute(create_db_replica_3_vgroups_10)
|
||||
self.vote_leader_time_costs(db2)
|
||||
|
||||
# create database replica 3 vgroups 100
|
||||
db3 = 'db_3'
|
||||
create_db_replica_3_vgroups_100 = "create database {} replica 3 vgroups 100".format(db3)
|
||||
tdLog.info('=======database {} replica 3 vgroups 100 ======'.format(db3))
|
||||
tdLog.notice('=======database {} replica 3 vgroups 100 ======'.format(db3))
|
||||
tdSql.execute(create_db_replica_3_vgroups_100)
|
||||
self.vote_leader_time_costs(db3)
|
||||
|
||||
|
|
|
@ -74,14 +74,14 @@ class TDTestCase:
|
|||
is_leader=True
|
||||
|
||||
if count==1 and is_leader:
|
||||
tdLog.info("===== depoly cluster success with 1 mnode as leader =====")
|
||||
tdLog.notice("===== depoly cluster success with 1 mnode as leader =====")
|
||||
else:
|
||||
tdLog.exit("===== depoly cluster fail with 1 mnode as leader =====")
|
||||
|
||||
for k ,v in self.dnode_list.items():
|
||||
if k == mnode_name:
|
||||
if v[3]==0:
|
||||
tdLog.info("===== depoly cluster mnode only success at {} , support_vnodes is {} ".format(mnode_name,v[3]))
|
||||
tdLog.notice("===== depoly cluster mnode only success at {} , support_vnodes is {} ".format(mnode_name,v[3]))
|
||||
else:
|
||||
tdLog.exit("===== depoly cluster mnode only fail at {} , support_vnodes is {} ".format(mnode_name,v[3]))
|
||||
else:
|
||||
|
@ -124,7 +124,7 @@ class TDTestCase:
|
|||
|
||||
for k , v in vgroups_infos.items():
|
||||
if len(v) ==1 and v[0]=="leader":
|
||||
tdLog.info(" === create database replica only 1 role leader check success of vgroup_id {} ======".format(k))
|
||||
tdLog.notice(" === create database replica only 1 role leader check success of vgroup_id {} ======".format(k))
|
||||
else:
|
||||
tdLog.exit(" === create database replica only 1 role leader check fail of vgroup_id {} ======".format(k))
|
||||
|
||||
|
@ -148,7 +148,7 @@ class TDTestCase:
|
|||
|
||||
if ind%2==0:
|
||||
if role == stop_dnode_id and vgroups_leader_follower[ind+1]=="offline":
|
||||
tdLog.info("====== dnode {} has offline , endpoint is {}".format(stop_dnode_id , self.stop_dnode))
|
||||
tdLog.notice("====== dnode {} has offline , endpoint is {}".format(stop_dnode_id , self.stop_dnode))
|
||||
elif role == stop_dnode_id :
|
||||
tdLog.exit("====== dnode {} has not offline , endpoint is {}".format(stop_dnode_id , self.stop_dnode))
|
||||
else:
|
||||
|
@ -180,8 +180,8 @@ class TDTestCase:
|
|||
while status !="offline":
|
||||
time.sleep(0.1)
|
||||
status = _get_status()
|
||||
# tdLog.info("==== stop dnode has not been stopped , endpoint is {}".format(self.stop_dnode))
|
||||
tdLog.info("==== stop_dnode has stopped , endpoint is {}".format(self.stop_dnode))
|
||||
# tdLog.notice("==== stop dnode has not been stopped , endpoint is {}".format(self.stop_dnode))
|
||||
tdLog.notice("==== stop_dnode has stopped , endpoint is {}".format(self.stop_dnode))
|
||||
|
||||
def wait_start_dnode_OK(self):
|
||||
|
||||
|
@ -202,15 +202,15 @@ class TDTestCase:
|
|||
while status !="ready":
|
||||
time.sleep(0.1)
|
||||
status = _get_status()
|
||||
# tdLog.info("==== stop dnode has not been stopped , endpoint is {}".format(self.stop_dnode))
|
||||
tdLog.info("==== stop_dnode has restart , endpoint is {}".format(self.stop_dnode))
|
||||
# tdLog.notice("==== stop dnode has not been stopped , endpoint is {}".format(self.stop_dnode))
|
||||
tdLog.notice("==== stop_dnode has restart , endpoint is {}".format(self.stop_dnode))
|
||||
|
||||
|
||||
|
||||
def random_stop_One_dnode(self):
|
||||
self.stop_dnode = self._get_stop_dnode()
|
||||
stop_dnode_id = self.dnode_list[self.stop_dnode][0]
|
||||
tdLog.info(" ==== dnode {} will offline ,endpoints is {} ====".format(stop_dnode_id , self.stop_dnode))
|
||||
tdLog.notice(" ==== dnode {} will offline ,endpoints is {} ====".format(stop_dnode_id , self.stop_dnode))
|
||||
tdDnodes=cluster.dnodes
|
||||
tdDnodes[stop_dnode_id-1].stoptaosd()
|
||||
self.wait_stop_dnode_OK()
|
||||
|
@ -250,10 +250,10 @@ class TDTestCase:
|
|||
time.sleep(0.1)
|
||||
status = self.check_vgroups_init_done(dbname)
|
||||
|
||||
# tdLog.info("=== database {} show vgroups vote the leader is in progress ===".format(dbname))
|
||||
# tdLog.notice("=== database {} show vgroups vote the leader is in progress ===".format(dbname))
|
||||
end = time.time()
|
||||
cost_time = end - start
|
||||
tdLog.info(" ==== database %s vote the leaders success , cost time is %.3f second ====="%(dbname,cost_time) )
|
||||
tdLog.notice(" ==== database %s vote the leaders success , cost time is %.3f second ====="%(dbname,cost_time) )
|
||||
# os.system("taos -s 'show {}.vgroups;'".format(dbname))
|
||||
if cost_time >= self.max_vote_time_cost:
|
||||
tdLog.exit(" ==== database %s vote the leaders cost too large time , cost time is %.3f second ===="%(dbname,cost_time) )
|
||||
|
@ -269,10 +269,10 @@ class TDTestCase:
|
|||
time.sleep(0.1)
|
||||
status = self.check_vgroups_revote_leader(dbname)
|
||||
|
||||
# tdLog.info("=== database {} show vgroups vote the leader is in progress ===".format(dbname))
|
||||
# tdLog.notice("=== database {} show vgroups vote the leader is in progress ===".format(dbname))
|
||||
end = time.time()
|
||||
cost_time = end - start
|
||||
tdLog.info(" ==== database %s revote the leaders success , cost time is %.3f second ====="%(dbname,cost_time) )
|
||||
tdLog.notice(" ==== database %s revote the leaders success , cost time is %.3f second ====="%(dbname,cost_time) )
|
||||
# os.system("taos -s 'show {}.vgroups;'".format(dbname))
|
||||
if cost_time >= self.max_vote_time_cost:
|
||||
tdLog.exit(" ==== database %s revote the leaders cost too large time , cost time is %.3f second ===="%(dbname,cost_time) )
|
||||
|
@ -306,7 +306,7 @@ class TDTestCase:
|
|||
if role==self.dnode_list[self.stop_dnode][0]:
|
||||
|
||||
if vgroup_info[ind+1] =="offline" and "leader" in vgroup_info:
|
||||
tdLog.info(" === revote leader ok , leader is {} now ====".format(list(vgroup_info).index("leader")-1))
|
||||
tdLog.notice(" === revote leader ok , leader is {} now ====".format(list(vgroup_info).index("leader")-1))
|
||||
elif vgroup_info[ind+1] !="offline":
|
||||
tdLog.exit(" === dnode {} should be offline ".format(self.stop_dnode))
|
||||
else:
|
||||
|
@ -319,14 +319,14 @@ class TDTestCase:
|
|||
self.Restart_stop_dnode()
|
||||
def test_init_vgroups_time_costs(self):
|
||||
|
||||
tdLog.info(" ====start check time cost about vgroups vote leaders ==== ")
|
||||
tdLog.info(" ==== current max time cost is set value : {} =======".format(self.max_vote_time_cost))
|
||||
tdLog.notice(" ====start check time cost about vgroups vote leaders ==== ")
|
||||
tdLog.notice(" ==== current max time cost is set value : {} =======".format(self.max_vote_time_cost))
|
||||
|
||||
# create database replica 3 vgroups 1
|
||||
|
||||
db1 = 'db_1'
|
||||
create_db_replica_3_vgroups_1 = "create database {} replica 3 vgroups 1".format(db1)
|
||||
tdLog.info('=======database {} replica 3 vgroups 1 ======'.format(db1))
|
||||
tdLog.notice('=======database {} replica 3 vgroups 1 ======'.format(db1))
|
||||
tdSql.execute(create_db_replica_3_vgroups_1)
|
||||
self.vote_leader_time_costs(db1)
|
||||
self.exec_revote_action(db1)
|
||||
|
@ -334,7 +334,7 @@ class TDTestCase:
|
|||
# create database replica 3 vgroups 10
|
||||
db2 = 'db_2'
|
||||
create_db_replica_3_vgroups_10 = "create database {} replica 3 vgroups 10".format(db2)
|
||||
tdLog.info('=======database {} replica 3 vgroups 10 ======'.format(db2))
|
||||
tdLog.notice('=======database {} replica 3 vgroups 10 ======'.format(db2))
|
||||
tdSql.execute(create_db_replica_3_vgroups_10)
|
||||
self.vote_leader_time_costs(db2)
|
||||
self.exec_revote_action(db2)
|
||||
|
@ -342,7 +342,7 @@ class TDTestCase:
|
|||
# create database replica 3 vgroups 100
|
||||
db3 = 'db_3'
|
||||
create_db_replica_3_vgroups_100 = "create database {} replica 3 vgroups 100".format(db3)
|
||||
tdLog.info('=======database {} replica 3 vgroups 100 ======'.format(db3))
|
||||
tdLog.notice('=======database {} replica 3 vgroups 100 ======'.format(db3))
|
||||
tdSql.execute(create_db_replica_3_vgroups_100)
|
||||
self.vote_leader_time_costs(db3)
|
||||
self.exec_revote_action(db3)
|
||||
|
|
|
@ -0,0 +1,118 @@
|
|||
{
|
||||
"filetype": "insert",
|
||||
"cfgdir": "/etc/taos/",
|
||||
"host": "localhost",
|
||||
"port": 6030,
|
||||
"user": "root",
|
||||
"password": "taosdata",
|
||||
"thread_count": 10,
|
||||
"create_table_thread_count": 10,
|
||||
"confirm_parameter_prompt": "no",
|
||||
"insert_interval": 0,
|
||||
"interlace_rows": 1000,
|
||||
"num_of_records_per_req": 1000,
|
||||
"databases": [
|
||||
{
|
||||
"dbinfo": {
|
||||
"name": "db_2",
|
||||
"drop": "no",
|
||||
"vgroups": 1,
|
||||
"replica": 3
|
||||
},
|
||||
"super_tables": [
|
||||
{
|
||||
"name": "stb1",
|
||||
"childtable_count": 10,
|
||||
"childtable_prefix": "sub_",
|
||||
"auto_create_table": "yes",
|
||||
"batch_create_tbl_num": 5000,
|
||||
"data_source": "rand",
|
||||
"insert_mode": "taosc",
|
||||
"insert_rows": 100000,
|
||||
"interlace_rows": 0,
|
||||
"insert_interval": 0,
|
||||
"max_sql_len": 1000000,
|
||||
"disorder_ratio": 0,
|
||||
"disorder_range": 1000,
|
||||
"timestamp_step": 10,
|
||||
"start_timestamp": "2015-05-01 00:00:00.000",
|
||||
"sample_format": "csv",
|
||||
"use_sample_ts": "no",
|
||||
"tags_file": "",
|
||||
"columns": [
|
||||
{
|
||||
"type": "INT",
|
||||
"count": 1
|
||||
},
|
||||
{
|
||||
"type": "TINYINT",
|
||||
"count": 1
|
||||
},
|
||||
{
|
||||
"type": "SMALLINT",
|
||||
"count": 1
|
||||
},
|
||||
{
|
||||
"type": "BIGINT",
|
||||
"count": 1
|
||||
},
|
||||
{
|
||||
"type": "UINT",
|
||||
"count": 1
|
||||
},
|
||||
{
|
||||
"type": "UTINYINT",
|
||||
"count": 1
|
||||
},
|
||||
{
|
||||
"type": "USMALLINT",
|
||||
"count": 1
|
||||
},
|
||||
{
|
||||
"type": "UBIGINT",
|
||||
"count": 1
|
||||
},
|
||||
{
|
||||
"type": "DOUBLE",
|
||||
"count": 1
|
||||
},
|
||||
{
|
||||
"type": "FLOAT",
|
||||
"count": 1
|
||||
},
|
||||
{
|
||||
"type": "BINARY",
|
||||
"len": 40,
|
||||
"count": 1
|
||||
},
|
||||
{
|
||||
"type": "VARCHAR",
|
||||
"len": 200,
|
||||
"count": 1
|
||||
},
|
||||
{
|
||||
"type": "nchar",
|
||||
"len": 200,
|
||||
"count": 1
|
||||
}
|
||||
],
|
||||
"tags": [
|
||||
{
|
||||
"type": "INT",
|
||||
"count": 1
|
||||
},
|
||||
{
|
||||
"type": "BINARY",
|
||||
"len": 100,
|
||||
"count": 1
|
||||
},
|
||||
{
|
||||
"type": "BOOL",
|
||||
"count": 1
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
|
@ -0,0 +1,118 @@
|
|||
{
|
||||
"filetype": "insert",
|
||||
"cfgdir": "/etc/taos/",
|
||||
"host": "localhost",
|
||||
"port": 6030,
|
||||
"user": "root",
|
||||
"password": "taosdata",
|
||||
"thread_count": 1,
|
||||
"create_table_thread_count": 1,
|
||||
"confirm_parameter_prompt": "no",
|
||||
"insert_interval": 0,
|
||||
"interlace_rows": 1000,
|
||||
"num_of_records_per_req": 1000,
|
||||
"databases": [
|
||||
{
|
||||
"dbinfo": {
|
||||
"name": "db_1",
|
||||
"drop": "no",
|
||||
"vgroups": 1,
|
||||
"replica": 3
|
||||
},
|
||||
"super_tables": [
|
||||
{
|
||||
"name": "stb1",
|
||||
"childtable_count": 10,
|
||||
"childtable_prefix": "sub_",
|
||||
"auto_create_table": "yes",
|
||||
"batch_create_tbl_num": 5000,
|
||||
"data_source": "rand",
|
||||
"insert_mode": "taosc",
|
||||
"insert_rows": 10000,
|
||||
"interlace_rows": 0,
|
||||
"insert_interval": 0,
|
||||
"max_sql_len": 1000000,
|
||||
"disorder_ratio": 0,
|
||||
"disorder_range": 1000,
|
||||
"timestamp_step": 10,
|
||||
"start_timestamp": "2015-05-01 00:00:00.000",
|
||||
"sample_format": "csv",
|
||||
"use_sample_ts": "no",
|
||||
"tags_file": "",
|
||||
"columns": [
|
||||
{
|
||||
"type": "INT",
|
||||
"count": 1
|
||||
},
|
||||
{
|
||||
"type": "TINYINT",
|
||||
"count": 1
|
||||
},
|
||||
{
|
||||
"type": "SMALLINT",
|
||||
"count": 1
|
||||
},
|
||||
{
|
||||
"type": "BIGINT",
|
||||
"count": 1
|
||||
},
|
||||
{
|
||||
"type": "UINT",
|
||||
"count": 1
|
||||
},
|
||||
{
|
||||
"type": "UTINYINT",
|
||||
"count": 1
|
||||
},
|
||||
{
|
||||
"type": "USMALLINT",
|
||||
"count": 1
|
||||
},
|
||||
{
|
||||
"type": "UBIGINT",
|
||||
"count": 1
|
||||
},
|
||||
{
|
||||
"type": "DOUBLE",
|
||||
"count": 1
|
||||
},
|
||||
{
|
||||
"type": "FLOAT",
|
||||
"count": 1
|
||||
},
|
||||
{
|
||||
"type": "BINARY",
|
||||
"len": 40,
|
||||
"count": 1
|
||||
},
|
||||
{
|
||||
"type": "VARCHAR",
|
||||
"len": 200,
|
||||
"count": 1
|
||||
},
|
||||
{
|
||||
"type": "nchar",
|
||||
"len": 200,
|
||||
"count": 1
|
||||
}
|
||||
],
|
||||
"tags": [
|
||||
{
|
||||
"type": "INT",
|
||||
"count": 1
|
||||
},
|
||||
{
|
||||
"type": "BINARY",
|
||||
"len": 100,
|
||||
"count": 1
|
||||
},
|
||||
{
|
||||
"type": "BOOL",
|
||||
"count": 1
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
|
@ -0,0 +1,198 @@
|
|||
from distutils.log import error
|
||||
import taos
|
||||
import sys
|
||||
import time
|
||||
import socket
|
||||
import os
|
||||
import threading
|
||||
import subprocess
|
||||
import platform
|
||||
|
||||
from util.log import *
|
||||
from util.sql import *
|
||||
from util.cases import *
|
||||
from util.dnodes import *
|
||||
from util.common import *
|
||||
sys.path.append("./7-tmq")
|
||||
from tmqCommon import *
|
||||
|
||||
|
||||
|
||||
class TDTestCase:
|
||||
def __init__(self):
|
||||
self.snapshot = 0
|
||||
self.replica = 3
|
||||
self.vgroups = 3
|
||||
self.ctbNum = 2
|
||||
self.rowsPerTbl = 2
|
||||
|
||||
def init(self, conn, logSql):
|
||||
tdLog.debug(f"start to excute {__file__}")
|
||||
tdSql.init(conn.cursor())
|
||||
#tdSql.init(conn.cursor(), logSql) # output sql.txt file
|
||||
|
||||
def checkFileContent(self, consumerId, queryString):
|
||||
buildPath = tdCom.getBuildPath()
|
||||
cfgPath = tdCom.getClientCfgPath()
|
||||
dstFile = '%s/../log/dstrows_%d.txt'%(cfgPath, consumerId)
|
||||
cmdStr = '%s/build/bin/taos -c %s -s "%s >> %s"'%(buildPath, cfgPath, queryString, dstFile)
|
||||
tdLog.info(cmdStr)
|
||||
os.system(cmdStr)
|
||||
|
||||
consumeRowsFile = '%s/../log/consumerid_%d.txt'%(cfgPath, consumerId)
|
||||
tdLog.info("rows file: %s, %s"%(consumeRowsFile, dstFile))
|
||||
|
||||
consumeFile = open(consumeRowsFile, mode='r')
|
||||
queryFile = open(dstFile, mode='r')
|
||||
|
||||
# skip first line for it is schema
|
||||
queryFile.readline()
|
||||
|
||||
while True:
|
||||
dst = queryFile.readline()
|
||||
src = consumeFile.readline()
|
||||
|
||||
if dst:
|
||||
if dst != src:
|
||||
tdLog.exit("consumerId %d consume rows is not match the rows by direct query"%consumerId)
|
||||
else:
|
||||
break
|
||||
return
|
||||
|
||||
def prepareTestEnv(self):
|
||||
tdLog.printNoPrefix("======== prepare test env include database, stable, ctables, and insert data: ")
|
||||
paraDict = {'dbName': 'dbt',
|
||||
'dropFlag': 1,
|
||||
'event': '',
|
||||
'vgroups': 4,
|
||||
'stbName': 'stb',
|
||||
'colPrefix': 'c',
|
||||
'tagPrefix': 't',
|
||||
'colSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1}],
|
||||
'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1}],
|
||||
'ctbPrefix': 'ctb',
|
||||
'ctbStartIdx': 0,
|
||||
'ctbNum': 2,
|
||||
'rowsPerTbl': 1000,
|
||||
'batchNum': 10,
|
||||
'startTs': 1640966400000, # 2022-01-01 00:00:00.000
|
||||
'pollDelay': 3,
|
||||
'showMsg': 1,
|
||||
'showRow': 1,
|
||||
'snapshot': 0}
|
||||
|
||||
paraDict['vgroups'] = self.vgroups
|
||||
paraDict['ctbNum'] = self.ctbNum
|
||||
paraDict['rowsPerTbl'] = self.rowsPerTbl
|
||||
|
||||
tmqCom.initConsumerTable()
|
||||
tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=paraDict["vgroups"],replica=self.replica)
|
||||
tdLog.info("create stb")
|
||||
tmqCom.create_stable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"])
|
||||
tdLog.info("create ctb")
|
||||
tmqCom.create_ctable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"],ctbPrefix=paraDict['ctbPrefix'],
|
||||
ctbNum=paraDict["ctbNum"],ctbStartIdx=paraDict['ctbStartIdx'])
|
||||
tdLog.info("insert data")
|
||||
tmqCom.insert_data_interlaceByMultiTbl(tsql=tdSql,dbName=paraDict["dbName"],ctbPrefix=paraDict["ctbPrefix"],
|
||||
ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"],
|
||||
startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx'])
|
||||
# tmqCom.insert_data_with_autoCreateTbl(tsql=tdSql,dbName=paraDict["dbName"],stbName=paraDict["stbName"],ctbPrefix="ctbx",
|
||||
# ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"],
|
||||
# startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx'])
|
||||
# tmqCom.asyncInsertDataByInterlace(paraDict)
|
||||
tdLog.printNoPrefix("11111111111111111111111")
|
||||
tmqCom.create_ntable(tdSql, dbname=paraDict["dbName"], tbname_prefix="ntb", tbname_index_start_num = 1, column_elm_list=paraDict["colSchema"], colPrefix='c', tblNum=1)
|
||||
tdLog.printNoPrefix("222222222222222")
|
||||
tmqCom.insert_rows_into_ntbl(tdSql, dbname=paraDict["dbName"], tbname_prefix="ntb", tbname_index_start_num = 1, column_ele_list=paraDict["colSchema"], startTs=paraDict["startTs"], tblNum=1, rows=2) # tdLog.info("restart taosd to ensure that the data falls into the disk")
|
||||
|
||||
tdLog.printNoPrefix("333333333333333333333")
|
||||
tdSql.query("drop database %s"%paraDict["dbName"])
|
||||
tdLog.printNoPrefix("44444444444444444")
|
||||
return
|
||||
|
||||
def tmqCase1(self):
|
||||
tdLog.printNoPrefix("======== test case 1: ")
|
||||
|
||||
# create and start thread
|
||||
paraDict = {'dbName': 'dbt',
|
||||
'dropFlag': 1,
|
||||
'event': '',
|
||||
'vgroups': 4,
|
||||
'stbName': 'stb',
|
||||
'colPrefix': 'c',
|
||||
'tagPrefix': 't',
|
||||
'colSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1},{'type': 'TIMESTAMP', 'count':1}],
|
||||
'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}],
|
||||
'ctbPrefix': 'ctb',
|
||||
'ctbStartIdx': 0,
|
||||
'ctbNum': 100,
|
||||
'rowsPerTbl': 1000,
|
||||
'batchNum': 100,
|
||||
'startTs': 1640966400000, # 2022-01-01 00:00:00.000
|
||||
'pollDelay': 3,
|
||||
'showMsg': 1,
|
||||
'showRow': 1,
|
||||
'snapshot': 1}
|
||||
|
||||
paraDict['vgroups'] = self.vgroups
|
||||
paraDict['ctbNum'] = self.ctbNum
|
||||
paraDict['rowsPerTbl'] = self.rowsPerTbl
|
||||
|
||||
tdLog.info("create topics from stb1")
|
||||
topicFromStb1 = 'topic_stb1'
|
||||
queryString = "select ts, c1, c2 from %s.%s where t4 == 'beijing' or t4 == 'changsha' "%(paraDict['dbName'], paraDict['stbName'])
|
||||
sqlString = "create topic %s as %s" %(topicFromStb1, queryString)
|
||||
tdLog.info("create topic sql: %s"%sqlString)
|
||||
tdSql.execute(sqlString)
|
||||
|
||||
consumerId = 0
|
||||
expectrowcnt = paraDict["rowsPerTbl"] * paraDict["ctbNum"]
|
||||
topicList = topicFromStb1
|
||||
ifcheckdata = 0
|
||||
ifManualCommit = 0
|
||||
keyList = 'group.id:cgrp1,\
|
||||
enable.auto.commit:false,\
|
||||
auto.commit.interval.ms:6000,\
|
||||
auto.offset.reset:earliest'
|
||||
tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
|
||||
|
||||
tdLog.info("start consume processor")
|
||||
pollDelay = 100
|
||||
showMsg = 1
|
||||
showRow = 1
|
||||
tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot'])
|
||||
|
||||
tdLog.info("start to check consume result")
|
||||
expectRows = 1
|
||||
resultList = tmqCom.selectConsumeResult(expectRows)
|
||||
totalConsumeRows = 0
|
||||
for i in range(expectRows):
|
||||
totalConsumeRows += resultList[i]
|
||||
|
||||
tdSql.query(queryString)
|
||||
totalRowsInserted = tdSql.getRows()
|
||||
|
||||
tdLog.info("act consume rows: %d, act insert rows: %d, expect consume rows: %d, "%(totalConsumeRows, totalRowsInserted, expectrowcnt))
|
||||
|
||||
if totalConsumeRows != expectrowcnt:
|
||||
tdLog.exit("tmq consume rows error!")
|
||||
|
||||
# tmqCom.checkFileContent(consumerId, queryString)
|
||||
|
||||
tmqCom.waitSubscriptionExit(tdSql, topicFromStb1)
|
||||
tdSql.query("drop topic %s"%topicFromStb1)
|
||||
|
||||
tdLog.printNoPrefix("======== test case 1 end ...... ")
|
||||
|
||||
def run(self):
|
||||
self.prepareTestEnv()
|
||||
# self.tmqCase1()
|
||||
|
||||
def stop(self):
|
||||
tdSql.close()
|
||||
tdLog.success(f"{__file__} successfully executed")
|
||||
|
||||
event = threading.Event()
|
||||
|
||||
tdCases.addLinux(__file__, TDTestCase())
|
||||
tdCases.addWindows(__file__, TDTestCase())
|
|
@ -0,0 +1,302 @@
|
|||
from distutils.log import error
|
||||
import taos
|
||||
import sys
|
||||
import time
|
||||
import socket
|
||||
import os
|
||||
import threading
|
||||
import subprocess
|
||||
import platform
|
||||
|
||||
from util.log import *
|
||||
from util.sql import *
|
||||
from util.cases import *
|
||||
from util.dnodes import *
|
||||
from util.dnodes import TDDnodes
|
||||
from util.dnodes import TDDnode
|
||||
from util.cluster import *
|
||||
from util.common import *
|
||||
sys.path.append("./7-tmq")
|
||||
from tmqCommon import *
|
||||
|
||||
class TDTestCase:
|
||||
def __init__(self):
|
||||
self.snapshot = 0
|
||||
self.replica = 3
|
||||
self.vgroups = 4
|
||||
self.ctbNum = 1000
|
||||
self.rowsPerTbl = 100
|
||||
|
||||
def init(self, conn, logSql):
|
||||
tdLog.debug(f"start to excute {__file__}")
|
||||
tdSql.init(conn.cursor())
|
||||
#tdSql.init(conn.cursor(), logSql) # output sql.txt file
|
||||
|
||||
def checkFileContent(self, consumerId, queryString):
|
||||
buildPath = tdCom.getBuildPath()
|
||||
cfgPath = tdCom.getClientCfgPath()
|
||||
dstFile = '%s/../log/dstrows_%d.txt'%(cfgPath, consumerId)
|
||||
cmdStr = '%s/build/bin/taos -c %s -s "%s >> %s"'%(buildPath, cfgPath, queryString, dstFile)
|
||||
tdLog.info(cmdStr)
|
||||
os.system(cmdStr)
|
||||
|
||||
consumeRowsFile = '%s/../log/consumerid_%d.txt'%(cfgPath, consumerId)
|
||||
tdLog.info("rows file: %s, %s"%(consumeRowsFile, dstFile))
|
||||
|
||||
consumeFile = open(consumeRowsFile, mode='r')
|
||||
queryFile = open(dstFile, mode='r')
|
||||
|
||||
# skip first line for it is schema
|
||||
queryFile.readline()
|
||||
|
||||
while True:
|
||||
dst = queryFile.readline()
|
||||
src = consumeFile.readline()
|
||||
|
||||
if dst:
|
||||
if dst != src:
|
||||
tdLog.exit("consumerId %d consume rows is not match the rows by direct query"%consumerId)
|
||||
else:
|
||||
break
|
||||
return
|
||||
|
||||
def prepareTestEnv(self):
|
||||
tdLog.printNoPrefix("======== prepare test env include database, stable, ctables, and insert data: ")
|
||||
paraDict = {'dbName': 'dbt',
|
||||
'dropFlag': 1,
|
||||
'event': '',
|
||||
'vgroups': 4,
|
||||
'stbName': 'stb',
|
||||
'colPrefix': 'c',
|
||||
'tagPrefix': 't',
|
||||
'colSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1},{'type': 'TIMESTAMP', 'count':1}],
|
||||
'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}],
|
||||
'ctbPrefix': 'ctb',
|
||||
'ctbStartIdx': 0,
|
||||
'ctbNum': 100,
|
||||
'rowsPerTbl': 1000,
|
||||
'batchNum': 100,
|
||||
'startTs': 1640966400000, # 2022-01-01 00:00:00.000
|
||||
'pollDelay': 3,
|
||||
'showMsg': 1,
|
||||
'showRow': 1,
|
||||
'snapshot': 0}
|
||||
|
||||
paraDict['vgroups'] = self.vgroups
|
||||
paraDict['ctbNum'] = self.ctbNum
|
||||
paraDict['rowsPerTbl'] = self.rowsPerTbl
|
||||
|
||||
tmqCom.initConsumerTable()
|
||||
tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=paraDict["vgroups"],replica=self.replica)
|
||||
tdLog.info("create stb")
|
||||
tmqCom.create_stable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"])
|
||||
tdLog.info("create ctb")
|
||||
tmqCom.create_ctable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"],ctbPrefix=paraDict['ctbPrefix'],
|
||||
ctbNum=paraDict["ctbNum"],ctbStartIdx=paraDict['ctbStartIdx'])
|
||||
tdLog.info("insert data")
|
||||
# tmqCom.insert_data_interlaceByMultiTbl(tsql=tdSql,dbName=paraDict["dbName"],ctbPrefix=paraDict["ctbPrefix"],
|
||||
# ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"],
|
||||
# startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx'])
|
||||
# tmqCom.insert_data_with_autoCreateTbl(tsql=tdSql,dbName=paraDict["dbName"],stbName=paraDict["stbName"],ctbPrefix="ctbx",
|
||||
# ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"],
|
||||
# startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx'])
|
||||
tmqCom.asyncInsertDataByInterlace(paraDict)
|
||||
|
||||
tdLog.info("wait some data inserted")
|
||||
exitFlag = 1
|
||||
while exitFlag:
|
||||
queryString = "select count(*) from %s.%s"%(paraDict["dbName"],paraDict["stbName"])
|
||||
tdSql.query(queryString)
|
||||
if tdSql.getRows() > 0:
|
||||
rowsInserted = tdSql.getData(0,0)
|
||||
if (rowsInserted > ((self.ctbNum * self.rowsPerTbl)/5)):
|
||||
exitFlag = 0
|
||||
time.sleep(0.1)
|
||||
|
||||
tdLog.info("inserted rows: %d"%tdSql.getData(0,0))
|
||||
# tdDnodes=cluster.dnodes
|
||||
tdLog.info("================= restart dnode 2===========================")
|
||||
cluster.dnodes[1].stoptaosd()
|
||||
cluster.dnodes[1].starttaosd()
|
||||
tdLog.info("================= restart dnode 3===========================")
|
||||
cluster.dnodes[2].stoptaosd()
|
||||
cluster.dnodes[2].starttaosd()
|
||||
tdLog.info("================= restart dnode 4===========================")
|
||||
cluster.dnodes[3].stoptaosd()
|
||||
cluster.dnodes[3].starttaosd()
|
||||
tdLog.info("================= restart dnode 5===========================")
|
||||
cluster.dnodes[4].stoptaosd()
|
||||
cluster.dnodes[4].starttaosd()
|
||||
|
||||
# tdLog.info("restart taosd to ensure that the data falls into the disk")
|
||||
# tdSql.query("flush database %s"%(paraDict['dbName']))
|
||||
return
|
||||
|
||||
def tmqCase1(self):
|
||||
tdLog.printNoPrefix("======== test case 1: ")
|
||||
|
||||
# create and start thread
|
||||
paraDict = {'dbName': 'dbt',
|
||||
'dropFlag': 1,
|
||||
'event': '',
|
||||
'vgroups': 4,
|
||||
'stbName': 'stb',
|
||||
'colPrefix': 'c',
|
||||
'tagPrefix': 't',
|
||||
'colSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1},{'type': 'TIMESTAMP', 'count':1}],
|
||||
'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}],
|
||||
'ctbPrefix': 'ctb',
|
||||
'ctbStartIdx': 0,
|
||||
'ctbNum': 100,
|
||||
'rowsPerTbl': 1000,
|
||||
'batchNum': 100,
|
||||
'startTs': 1640966400000, # 2022-01-01 00:00:00.000
|
||||
'pollDelay': 15,
|
||||
'showMsg': 1,
|
||||
'showRow': 1,
|
||||
'snapshot': 0}
|
||||
|
||||
paraDict['vgroups'] = self.vgroups
|
||||
paraDict['ctbNum'] = self.ctbNum
|
||||
paraDict['rowsPerTbl'] = self.rowsPerTbl
|
||||
|
||||
tdLog.info("create topics from stb1")
|
||||
topicFromStb1 = 'topic_stb1'
|
||||
queryString = "select ts, c1, c2 from %s.%s where t4 == 'beijing' or t4 == 'changsha' "%(paraDict['dbName'], paraDict['stbName'])
|
||||
sqlString = "create topic %s as %s" %(topicFromStb1, queryString)
|
||||
tdLog.info("create topic sql: %s"%sqlString)
|
||||
tdSql.execute(sqlString)
|
||||
|
||||
consumerId = 0
|
||||
expectrowcnt = paraDict["rowsPerTbl"] * paraDict["ctbNum"]
|
||||
topicList = topicFromStb1
|
||||
ifcheckdata = 0
|
||||
ifManualCommit = 0
|
||||
keyList = 'group.id:cgrp1,\
|
||||
enable.auto.commit:false,\
|
||||
auto.commit.interval.ms:6000,\
|
||||
auto.offset.reset:earliest'
|
||||
tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
|
||||
|
||||
tdLog.info("start consume processor")
|
||||
tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot'])
|
||||
|
||||
tdLog.info("start to check consume result")
|
||||
expectRows = 1
|
||||
resultList = tmqCom.selectConsumeResult(expectRows)
|
||||
totalConsumeRows = 0
|
||||
for i in range(expectRows):
|
||||
totalConsumeRows += resultList[i]
|
||||
|
||||
tdSql.query(queryString)
|
||||
totalRowsFromQuery = tdSql.getRows()
|
||||
|
||||
tdLog.info("act consume rows: %d, act query rows: %d, expect consume rows: %d, "%(totalConsumeRows, totalRowsFromQuery, expectrowcnt))
|
||||
|
||||
if totalConsumeRows != totalRowsFromQuery:
|
||||
tdLog.exit("tmq consume rows error!")
|
||||
|
||||
# tmqCom.checkFileContent(consumerId, queryString)
|
||||
|
||||
tmqCom.waitSubscriptionExit(tdSql, topicFromStb1)
|
||||
tdSql.query("drop topic %s"%topicFromStb1)
|
||||
|
||||
tdLog.printNoPrefix("======== test case 1 end ...... ")
|
||||
|
||||
def tmqCase2(self):
|
||||
tdLog.printNoPrefix("======== test case 2: ")
|
||||
|
||||
# create and start thread
|
||||
paraDict = {'dbName': 'dbt',
|
||||
'dropFlag': 1,
|
||||
'event': '',
|
||||
'vgroups': 4,
|
||||
'stbName': 'stb',
|
||||
'colPrefix': 'c',
|
||||
'tagPrefix': 't',
|
||||
'colSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1},{'type': 'TIMESTAMP', 'count':1}],
|
||||
'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}],
|
||||
'ctbPrefix': 'ctb',
|
||||
'ctbStartIdx': 0,
|
||||
'ctbNum': 100,
|
||||
'rowsPerTbl': 1000,
|
||||
'batchNum': 100,
|
||||
'startTs': 1640966400000, # 2022-01-01 00:00:00.000
|
||||
'pollDelay': 15,
|
||||
'showMsg': 1,
|
||||
'showRow': 1,
|
||||
'snapshot': 1}
|
||||
|
||||
paraDict['vgroups'] = self.vgroups
|
||||
paraDict['ctbNum'] = self.ctbNum
|
||||
paraDict['rowsPerTbl'] = self.rowsPerTbl
|
||||
|
||||
tdLog.info("create topics from stb1")
|
||||
topicFromStb1 = 'topic_stb1'
|
||||
queryString = "select ts, c1, c2 from %s.%s where t4 == 'beijing' or t4 == 'changsha' "%(paraDict['dbName'], paraDict['stbName'])
|
||||
sqlString = "create topic %s as %s" %(topicFromStb1, queryString)
|
||||
tdLog.info("create topic sql: %s"%sqlString)
|
||||
tdSql.execute(sqlString)
|
||||
|
||||
consumerId = 0
|
||||
expectrowcnt = paraDict["rowsPerTbl"] * paraDict["ctbNum"]
|
||||
topicList = topicFromStb1
|
||||
ifcheckdata = 0
|
||||
ifManualCommit = 0
|
||||
keyList = 'group.id:cgrp1,\
|
||||
enable.auto.commit:false,\
|
||||
auto.commit.interval.ms:6000,\
|
||||
auto.offset.reset:earliest'
|
||||
tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
|
||||
|
||||
tdLog.info("start consume processor")
|
||||
tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot'])
|
||||
|
||||
tdLog.info("================= restart dnode 2===========================")
|
||||
cluster.dnodes[1].stoptaosd()
|
||||
cluster.dnodes[1].starttaosd()
|
||||
tdLog.info("================= restart dnode 3===========================")
|
||||
cluster.dnodes[2].stoptaosd()
|
||||
cluster.dnodes[2].starttaosd()
|
||||
tdLog.info("================= restart dnode 4===========================")
|
||||
cluster.dnodes[3].stoptaosd()
|
||||
cluster.dnodes[3].starttaosd()
|
||||
tdLog.info("================= restart dnode 5===========================")
|
||||
cluster.dnodes[4].stoptaosd()
|
||||
cluster.dnodes[4].starttaosd()
|
||||
|
||||
tdLog.info("start to check consume result")
|
||||
expectRows = 1
|
||||
resultList = tmqCom.selectConsumeResult(expectRows)
|
||||
totalConsumeRows = 0
|
||||
for i in range(expectRows):
|
||||
totalConsumeRows += resultList[i]
|
||||
|
||||
tdSql.query(queryString)
|
||||
totalRowsFromQuery = tdSql.getRows()
|
||||
|
||||
tdLog.info("act consume rows: %d, act query rows: %d, expect consume rows: %d, "%(totalConsumeRows, totalRowsFromQuery, expectrowcnt))
|
||||
|
||||
if totalConsumeRows != totalRowsFromQuery:
|
||||
tdLog.exit("tmq consume rows error!")
|
||||
|
||||
# tmqCom.checkFileContent(consumerId, queryString)
|
||||
|
||||
tmqCom.waitSubscriptionExit(tdSql, topicFromStb1)
|
||||
tdSql.query("drop topic %s"%topicFromStb1)
|
||||
|
||||
tdLog.printNoPrefix("======== test case 2 end ...... ")
|
||||
|
||||
def run(self):
|
||||
self.prepareTestEnv()
|
||||
self.tmqCase1()
|
||||
self.tmqCase2()
|
||||
|
||||
def stop(self):
|
||||
tdSql.close()
|
||||
tdLog.success(f"{__file__} successfully executed")
|
||||
|
||||
event = threading.Event()
|
||||
|
||||
tdCases.addLinux(__file__, TDTestCase())
|
||||
tdCases.addWindows(__file__, TDTestCase())
|
|
@ -1 +1 @@
|
|||
Subproject commit 0b8a3373bb7548f8106d13e7d3b0a988d3c4d48a
|
||||
Subproject commit 817cb6ac431ed8ae4c843872cdfc8c201c1e1894
|
Loading…
Reference in New Issue