Merge branch '3.0' of https://github.com/taosdata/TDengine into fix/TD-31212

This commit is contained in:
54liuyao 2024-08-06 10:41:12 +08:00
commit 143745d023
14 changed files with 360 additions and 73 deletions

View File

@ -1620,6 +1620,7 @@ void blockDataFreeRes(SSDataBlock* pBlock) {
if (pBlock == NULL){
return;
}
int32_t numOfOutput = taosArrayGetSize(pBlock->pDataBlock);
for (int32_t i = 0; i < numOfOutput; ++i) {
SColumnInfoData* pColInfoData = (SColumnInfoData*)taosArrayGet(pBlock->pDataBlock, i);

View File

@ -26,7 +26,7 @@
#define T_LONG_JMP(_obj, _c) \
do { \
ASSERT((_c) != 1); \
ASSERT((_c) != -1); \
longjmp((_obj), (_c)); \
} while (0)

View File

@ -1486,6 +1486,7 @@ int32_t qGetTableList(int64_t suid, void* pVnode, void* node, SArray** tableList
pNode.suid = suid;
pNode.uid = suid;
pNode.tableType = TSDB_SUPER_TABLE;
STableListInfo* pTableListInfo = tableListCreate();
QUERY_CHECK_NULL(pTableListInfo, code, lino, _end, terrno);
uint8_t digest[17] = {0};
@ -1759,8 +1760,11 @@ int32_t createExprFromOneNode(SExprInfo* pExp, SNode* pNode, int16_t slotId) {
SDataType* pType = &pColNode->node.resType;
pExp->base.resSchema =
createResSchema(pType->type, pType->bytes, slotId, pType->scale, pType->precision, pColNode->colName);
pExp->base.pParam[0].pCol =
createColumn(pColNode->dataBlockId, pColNode->slotId, pColNode->colId, pType, pColNode->colType);
QUERY_CHECK_NULL(pExp->base.pParam[0].pCol, code, lino, _end, terrno);
pExp->base.pParam[0].type = FUNC_PARAM_TYPE_COLUMN;
} else if (type == QUERY_NODE_VALUE) {
pExp->pExpr->nodeType = QUERY_NODE_VALUE;
@ -1829,6 +1833,7 @@ int32_t createExprFromOneNode(SExprInfo* pExp, SNode* pNode, int16_t slotId) {
pExp->base.pParam[j].type = FUNC_PARAM_TYPE_COLUMN;
pExp->base.pParam[j].pCol =
createColumn(pcn->dataBlockId, pcn->slotId, pcn->colId, &pcn->node.resType, pcn->colType);
QUERY_CHECK_NULL(pExp->base.pParam[j].pCol, code, lino, _end, terrno);
} else if (p1->type == QUERY_NODE_VALUE) {
SValueNode* pvn = (SValueNode*)p1;
pExp->base.pParam[j].type = FUNC_PARAM_TYPE_VALUE;
@ -2415,7 +2420,9 @@ void tableListGetSourceTableInfo(const STableListInfo* pTableList, uint64_t* psu
uint64_t tableListGetTableGroupId(const STableListInfo* pTableList, uint64_t tableUid) {
int32_t* slot = taosHashGet(pTableList->map, &tableUid, sizeof(tableUid));
ASSERT(pTableList->map != NULL && slot != NULL);
if (slot == NULL) {
return -1;
}
STableKeyInfo* pKeyInfo = taosArrayGet(pTableList->pTableList, *slot);
ASSERT(pKeyInfo->uid == tableUid);
@ -2498,11 +2505,10 @@ bool oneTableForEachGroup(const STableListInfo* pTableList) { return pTableList-
STableListInfo* tableListCreate() {
STableListInfo* pListInfo = taosMemoryCalloc(1, sizeof(STableListInfo));
if (pListInfo == NULL) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
return NULL;
}
pListInfo->remainGroups = NULL;
pListInfo->remainGroups = NULL;
pListInfo->pTableList = taosArrayInit(4, sizeof(STableKeyInfo));
if (pListInfo->pTableList == NULL) {
goto _error;
@ -2518,7 +2524,6 @@ STableListInfo* tableListCreate() {
_error:
tableListDestroy(pListInfo);
terrno = TSDB_CODE_OUT_OF_MEMORY;
return NULL;
}
@ -2528,7 +2533,6 @@ void tableListDestroy(STableListInfo* pTableListInfo) {
}
taosArrayDestroy(pTableListInfo->pTableList);
pTableListInfo->pTableList = NULL;
taosMemoryFreeClear(pTableListInfo->groupOffset);
taosHashCleanup(pTableListInfo->map);

View File

@ -1156,8 +1156,8 @@ int32_t createPartitionOperatorInfo(SOperatorInfo* downstream, SPartitionPhysiNo
int32_t numOfCols = 0;
SExprInfo* pExprInfo = NULL;
code = createExprInfo(pPartNode->pTargets, NULL, &pExprInfo, &numOfCols);
QUERY_CHECK_CODE(code, lino, _error);
pInfo->pGroupCols = makeColumnArrayFromList(pPartNode->pPartitionKeys);

View File

@ -623,6 +623,7 @@ int32_t createMultiwayMergeOperatorInfo(SOperatorInfo** downStreams, size_t numS
SNonSortMergeInfo* pNonSortMerge = &pInfo->nsortMergeInfo;
pInfo->binfo.pRes = createDataBlockFromDescNode(pDescNode);
TSDB_CHECK_NULL(pInfo->binfo.pRes, code, lino, _error, terrno);
initResultSizeInfo(&pOperator->resultInfo, 1024);
code = blockDataEnsureCapacity(pInfo->binfo.pRes, pOperator->resultInfo.capacity);
TSDB_CHECK_CODE(code, lino, _error);
@ -633,6 +634,7 @@ int32_t createMultiwayMergeOperatorInfo(SOperatorInfo** downStreams, size_t numS
SColsMergeInfo* pColsMerge = &pInfo->colsMergeInfo;
pInfo->binfo.pRes = createDataBlockFromDescNode(pDescNode);
TSDB_CHECK_NULL(pInfo->binfo.pRes, code, lino, _error, terrno);
initResultSizeInfo(&pOperator->resultInfo, 1);
code = blockDataEnsureCapacity(pInfo->binfo.pRes, pOperator->resultInfo.capacity);
TSDB_CHECK_CODE(code, lino, _error);

View File

@ -64,6 +64,10 @@ int32_t doCreateTask(uint64_t queryId, uint64_t taskId, int32_t vgId, EOPTR_EXEC
p->id.queryId = queryId;
p->id.taskId = taskId;
p->id.str = taosMemoryMalloc(64);
if (p->id.str == NULL) {
return terrno;
}
buildTaskId(taskId, queryId, p->id.str);
p->schemaInfos = taosArrayInit(1, sizeof(SSchemaInfo));
if (p->id.str == NULL || p->schemaInfos == NULL) {
@ -174,9 +178,16 @@ int32_t initQueriedTableSchemaInfo(SReadHandle* pHandle, SScanPhysiNode* pScanNo
schemaInfo.sw = tCloneSSchemaWrapper(&mr.me.ntbEntry.schemaRow);
}
if (schemaInfo.sw == NULL) {
return terrno;
}
pAPI->metaReaderFn.clearReader(&mr);
schemaInfo.qsw = extractQueriedColumnSchema(pScanNode);
if (schemaInfo.qsw == NULL) {
return terrno;
}
void* p = taosArrayPush(pTaskInfo->schemaInfos, &schemaInfo);
return (p != NULL)? TSDB_CODE_SUCCESS:TSDB_CODE_OUT_OF_MEMORY;
}
@ -186,7 +197,14 @@ SSchemaWrapper* extractQueriedColumnSchema(SScanPhysiNode* pScanNode) {
int32_t numOfTags = LIST_LENGTH(pScanNode->pScanPseudoCols);
SSchemaWrapper* pqSw = taosMemoryCalloc(1, sizeof(SSchemaWrapper));
if (pqSw == NULL) {
return NULL;
}
pqSw->pSchema = taosMemoryCalloc(numOfCols + numOfTags, sizeof(SSchema));
if (pqSw->pSchema == NULL) {
return NULL;
}
for (int32_t i = 0; i < numOfCols; ++i) {
STargetNode* pNode = (STargetNode*)nodesListGetNode(pScanNode->pScanCols, i);

View File

@ -182,8 +182,8 @@ static void* tupleGetField(char* t, uint32_t colIdx, uint32_t colNum) {
}
int32_t tsortGetSortedDataBlock(const SSortHandle* pSortHandle, SSDataBlock** pBlock) {
*pBlock = NULL;
if (pSortHandle->pDataBlock == NULL) {
*pBlock = NULL;
return TSDB_CODE_SUCCESS;
}
return createOneDataBlock(pSortHandle->pDataBlock, false, pBlock);
@ -2452,7 +2452,7 @@ static int32_t createInitialSources(SSortHandle* pHandle) {
return code;
}
static bool tsortOpenForBufMergeSort(SSortHandle* pHandle) {
static int32_t tsortOpenForBufMergeSort(SSortHandle* pHandle) {
int32_t code = createInitialSources(pHandle);
if (code != TSDB_CODE_SUCCESS) {
return code;
@ -2478,7 +2478,8 @@ static bool tsortOpenForBufMergeSort(SSortHandle* pHandle) {
return code;
}
return tMergeTreeCreate(&pHandle->pMergeTree, pHandle->cmpParam.numOfSources, &pHandle->cmpParam, pHandle->comparFn);
code = tMergeTreeCreate(&pHandle->pMergeTree, pHandle->cmpParam.numOfSources, &pHandle->cmpParam, pHandle->comparFn);
return code;
}
void tsortClose(SSortHandle* pHandle) {
@ -2808,19 +2809,24 @@ static int32_t tsortSingleTableMergeNextTuple(SSortHandle* pHandle, STupleHandle
}
int32_t tsortOpen(SSortHandle* pHandle) {
int32_t code = 0;
if (pHandle->opened) {
return 0;
return code;
}
if (pHandle->fetchfp == NULL || pHandle->comparFn == NULL) {
return TSDB_CODE_INVALID_PARA;
if (pHandle == NULL || pHandle->fetchfp == NULL || pHandle->comparFn == NULL) {
code = TSDB_CODE_INVALID_PARA;
return code;
}
pHandle->opened = true;
if (tsortIsPQSortApplicable(pHandle))
return tsortOpenForPQSort(pHandle);
else
return tsortOpenForBufMergeSort(pHandle);
if (tsortIsPQSortApplicable(pHandle)) {
code = tsortOpenForPQSort(pHandle);
} else {
code = tsortOpenForBufMergeSort(pHandle);
}
return code;
}
int32_t tsortNextTuple(SSortHandle* pHandle, STupleHandle** pTupleHandle) {

View File

@ -74,13 +74,6 @@ int32_t streamTaskCheckStatus(SStreamTask* pTask, int32_t upstreamTaskId, int32_
}
if (pInfo->stage != stage) {
streamMutexLock(&pTask->lock);
ETaskStatus status = streamTaskGetStatus(pTask).state;
if (status == TASK_STATUS__CK) {
streamTaskSetFailedCheckpointId(pTask);
}
streamMutexUnlock(&pTask->lock);
return TASK_UPSTREAM_NEW_STAGE;
} else if (pTask->status.downstreamReady != 1) {
stDebug("s-task:%s vgId:%d leader:%d, downstream not ready", id, vgId, (pTask->pMeta->role == NODE_ROLE_LEADER));

View File

@ -453,8 +453,9 @@ int32_t streamTaskProcessCheckpointReadyRsp(SStreamTask* pTask, int32_t upstream
for (int32_t i = 0; i < taosArrayGetSize(pInfo->pReadyMsgList); ++i) {
STaskCheckpointReadyInfo* pReadyInfo = taosArrayGet(pInfo->pReadyMsgList, i);
if (pReadyInfo == NULL) {
streamMutexUnlock(&pInfo->lock);
return TSDB_CODE_INVALID_PARA;
stError("s-task:%s invalid index during iterate the checkpoint-ready msg list, index:%d, ignore and continue",
pTask->id.idStr, i);
continue;
}
if (pReadyInfo->upstreamTaskId == upstreamTaskId && pReadyInfo->checkpointId == checkpointId) {
@ -468,8 +469,9 @@ int32_t streamTaskProcessCheckpointReadyRsp(SStreamTask* pTask, int32_t upstream
for (int32_t i = 0; i < taosArrayGetSize(pInfo->pReadyMsgList); ++i) {
STaskCheckpointReadyInfo* pReadyInfo = taosArrayGet(pInfo->pReadyMsgList, i);
if (pReadyInfo == NULL) {
streamMutexUnlock(&pInfo->lock);
return TSDB_CODE_INVALID_PARA;
stError("s-task:%s invalid index during iterate the checkpoint-ready msg list, index:%d, ignore and continue",
pTask->id.idStr, i);
continue;
}
if (pReadyInfo->sendCompleted == 1) {
@ -601,9 +603,15 @@ int32_t streamTaskUpdateTaskCheckpointInfo(SStreamTask* pTask, bool restored, SV
}
void streamTaskSetFailedCheckpointId(SStreamTask* pTask) {
pTask->chkInfo.pActiveInfo->failedId = pTask->chkInfo.pActiveInfo->activeId;
stDebug("s-task:%s mark the checkpointId:%" PRId64 " (transId:%d) failed", pTask->id.idStr,
pTask->chkInfo.pActiveInfo->activeId, pTask->chkInfo.pActiveInfo->transId);
struct SActiveCheckpointInfo* pInfo = pTask->chkInfo.pActiveInfo;
if (pInfo->activeId <= 0) {
stWarn("s-task:%s checkpoint-info is cleared now, not set the failed checkpoint info", pTask->id.idStr);
} else {
pInfo->failedId = pInfo->activeId;
stDebug("s-task:%s mark the checkpointId:%" PRId64 " (transId:%d) failed", pTask->id.idStr, pInfo->activeId,
pInfo->transId);
}
}
static int32_t getCheckpointDataMeta(const char* id, const char* path, SArray* list) {
@ -960,6 +968,7 @@ bool streamTaskAlreadySendTrigger(SStreamTask* pTask, int32_t downstreamNodeId)
const char* id = pTask->id.idStr;
SActiveCheckpointInfo* pInfo = pTask->chkInfo.pActiveInfo;
SStreamTaskState pStatus = streamTaskGetStatus(pTask);
bool alreadySend = false;
if (pStatus.state != TASK_STATUS__CK) {
return false;
@ -971,11 +980,12 @@ bool streamTaskAlreadySendTrigger(SStreamTask* pTask, int32_t downstreamNodeId)
return false;
}
for (int32_t i = 0; i < taosArrayGetSize(pInfo->pDispatchTriggerList); ++i) {
int32_t num = taosArrayGetSize(pInfo->pDispatchTriggerList);
for (int32_t i = 0; i < num; ++i) {
STaskTriggerSendInfo* pSendInfo = taosArrayGet(pInfo->pDispatchTriggerList, i);
if (pSendInfo == NULL) {
streamMutexUnlock(&pInfo->lock);
return TSDB_CODE_INVALID_PARA;
stError("s-task:%s invalid index in dispatch-trigger list, index:%d, size:%d, ignore and continue", id, i, num);
continue;
}
if (pSendInfo->nodeId != downstreamNodeId) {

View File

@ -95,11 +95,12 @@ int32_t taosArrayEnsureCap(SArray* pArray, size_t newCap) {
tsize = (newSize == tsize) ? (tsize + 2) : newSize;
}
pArray->pData = taosMemoryRealloc(pArray->pData, tsize * pArray->elemSize);
if (pArray->pData == NULL) {
char* p = taosMemoryRealloc(pArray->pData, tsize * pArray->elemSize);
if (p == NULL) {
return terrno;
}
pArray->pData = p;
pArray->capacity = tsize;
}
return 0;

View File

@ -39,7 +39,7 @@ int32_t tMergeTreeCreate(SMultiwayMergeTreeInfo** pTree, uint32_t numOfSources,
(SMultiwayMergeTreeInfo*)taosMemoryCalloc(1, sizeof(SMultiwayMergeTreeInfo) + sizeof(STreeNode) * totalEntries);
if (pTreeInfo == NULL) {
uError("allocate memory for loser-tree failed. reason:%s", strerror(errno));
return TAOS_SYSTEM_ERROR(errno);
return terrno;
}
pTreeInfo->pNode = (STreeNode*)(((char*)pTreeInfo) + sizeof(SMultiwayMergeTreeInfo));

View File

@ -352,9 +352,8 @@ static SPageInfo* getPageInfoFromPayload(void* page) {
int32_t createDiskbasedBuf(SDiskbasedBuf** pBuf, int32_t pagesize, int32_t inMemBufSize, const char* id,
const char* dir) {
*pBuf = taosMemoryCalloc(1, sizeof(SDiskbasedBuf));
SDiskbasedBuf* pPBuf = *pBuf;
*pBuf = NULL;
SDiskbasedBuf* pPBuf = taosMemoryCalloc(1, sizeof(SDiskbasedBuf));
if (pPBuf == NULL) {
goto _error;
}
@ -394,11 +393,16 @@ int32_t createDiskbasedBuf(SDiskbasedBuf** pBuf, int32_t pagesize, int32_t inMem
pPBuf->prefix = (char*)dir;
pPBuf->emptyDummyIdList = taosArrayInit(1, sizeof(int32_t));
if (pPBuf->emptyDummyIdList == NULL) {
goto _error;
}
// qDebug("QInfo:0x%"PRIx64" create resBuf for output, page size:%d, inmem buf pages:%d, file:%s", qId,
// pPBuf->pageSize, pPBuf->inMemPages, pPBuf->path);
*pBuf = pPBuf;
return TSDB_CODE_SUCCESS;
_error:
destroyDiskbasedBuf(pPBuf);
*pBuf = NULL;

View File

@ -15,7 +15,7 @@ from frame import *
from frame.autogen import *
# from frame.server.dnodes import *
# from frame.server.cluster import *
from frame.clusterCommonCheck import *
class TDTestCase(TBase):
updatecfgDict = {
@ -39,29 +39,12 @@ class TDTestCase(TBase):
autoGen.insert_data(1000)
tdSql.execute(f"flush database {self.db}")
sc.dnodeStop(3)
# clusterDnodes.stoptaosd(1)
# clusterDnodes.starttaosd(3)
# time.sleep(5)
# clusterDnodes.stoptaosd(2)
# clusterDnodes.starttaosd(1)
# time.sleep(5)
autoGen.insert_data(5000, True)
self.flushDb(True)
# wait flush operation over
time.sleep(5)
# sql = 'show vnodes;'
# while True:
# bFinish = True
# param_list = tdSql.query(sql, row_tag=True)
# for param in param_list:
# if param[3] == 'leading' or param[3] == 'following':
# bFinish = False
# break
# if bFinish:
# break
self.snapshotAgg()
time.sleep(10)
self.snapshotAgg()
sc.dnodeStopAll()
for i in range(1, 4):
path = clusterDnodes.getDnodeDir(i)
@ -75,19 +58,7 @@ class TDTestCase(TBase):
sc.dnodeStart(2)
sc.dnodeStart(3)
sql = "show vnodes;"
time.sleep(10)
while True:
bFinish = True
param_list = tdSql.query(sql, row_tag=True)
for param in param_list:
if param[3] == 'offline':
tdLog.exit(
"dnode synchronous fail dnode id: %d, vgroup id:%d status offline" % (param[0], param[1]))
if param[3] == 'leading' or param[3] == 'following':
bFinish = False
break
if bFinish:
break
clusterComCheck.check_vgroups_status(vgroup_numbers=2,db_replica=3,db_name=f"{self.db}",count_number=60)
self.timestamp_step = 1000
self.insert_rows = 6000

View File

@ -0,0 +1,277 @@
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
from collections import defaultdict
import random
import string
import threading
import requests
import time
# import socketfrom
import taos
from frame.log import *
from frame.sql import *
from frame.cases import *
from frame.server.dnodes import *
from frame.common import *
# class actionType(Enum):
# CREATE_DATABASE = 0
# CREATE_STABLE = 1
# CREATE_CTABLE = 2
# INSERT_DATA = 3
class ClusterComCheck:
def init(self, conn, logSql=False):
tdSql.init(conn.cursor())
# tdSql.init(conn.cursor(), logSql) # output sql.txt file
def checkDnodes(self,dnodeNumbers, timeout=100):
count=0
# print(tdSql)
while count < timeout:
tdSql.query("select * from information_schema.ins_dnodes")
# tdLog.debug(tdSql.res)
status=0
for i in range(dnodeNumbers):
if tdSql.res[i][4] == "ready":
status+=1
# tdLog.info(status)
if status == dnodeNumbers:
tdLog.success("it find cluster with %d dnodes and check that all cluster dnodes are ready within %ds! " % (dnodeNumbers, count+1))
return True
time.sleep(1)
count+=1
else:
tdSql.query("select * from information_schema.ins_dnodes")
tdLog.debug(tdSql.res)
tdLog.exit("it find cluster with %d dnodes but check that there dnodes are not ready within %ds ! "% (dnodeNumbers, timeout))
def checkDbRows(self,dbNumbers):
dbNumbers=int(dbNumbers)
count=0
while count < 5:
tdSql.query("select * from information_schema.ins_databases where name!='collectd' ;")
count+=1
if tdSql.checkRows(dbNumbers+2):
tdLog.success("we find %d databases and expect %d in clusters! " %(tdSql.queryRows,dbNumbers+2))
return True
else:
continue
else :
tdLog.debug(tdSql.res)
tdLog.exit("we find %d databases but expect %d in clusters! " %(tdSql.queryRows,dbNumbers))
def checkDb(self,dbNumbers,restartNumber,dbNameIndex, timeout=100):
count=0
alldbNumbers=(dbNumbers*restartNumber)+2
while count < timeout:
query_status=0
for j in range(dbNumbers):
for i in range(alldbNumbers):
tdSql.query("select * from information_schema.ins_databases;")
if "%s_%d"%(dbNameIndex,j) == tdSql.res[i][0] :
if tdSql.res[i][15] == "ready":
query_status+=1
tdLog.debug("check %s_%d that status is ready "%(dbNameIndex,j))
else:
sleep(1)
continue
# print(query_status)
if query_status == dbNumbers:
tdLog.success(" check %d database and all databases are ready within %ds! " %(dbNumbers,count+1))
return True
count+=1
else:
tdLog.debug(tdSql.res)
tdLog.debug("query status is %d"%query_status)
tdLog.exit("database is not ready within %ds"%(timeout+1))
def checkData(self,dbname,stbname,stableCount,CtableCount,rowsPerSTable,):
tdSql.execute("use %s"%dbname)
tdSql.query("show %s.stables"%dbname)
tdSql.checkRows(stableCount)
tdSql.query("show %s.tables"%dbname)
tdSql.checkRows(CtableCount)
for i in range(stableCount):
tdSql.query("select count(*) from %s%d"%(stbname,i))
tdSql.checkData(0,0,rowsPerSTable)
return
def checkMnodeStatus(self,mnodeNums):
self.mnodeNums=int(mnodeNums)
# self.leaderDnode=int(leaderDnode)
tdLog.debug("start to check status of mnodes")
count=0
while count < 10:
time.sleep(1)
tdSql.query("select * from information_schema.ins_mnodes;")
if tdSql.checkRows(self.mnodeNums) :
tdLog.success("cluster has %d mnodes" %self.mnodeNums )
if self.mnodeNums == 1:
if tdSql.res[0][2]== 'leader' and tdSql.res[0][3]== 'ready' :
tdLog.success("%d mnodes is ready in 10s"%self.mnodeNums)
return True
count+=1
elif self.mnodeNums == 3 :
if tdSql.res[0][2]=='leader' and tdSql.res[0][3]== 'ready' :
if tdSql.res[1][2]=='follower' and tdSql.res[1][3]== 'ready' :
if tdSql.res[2][2]=='follower' and tdSql.res[2][3]== 'ready' :
tdLog.success("%d mnodes is ready in 10s"%self.mnodeNums)
return True
elif tdSql.res[1][2]=='leader' and tdSql.res[1][3]== 'ready' :
if tdSql.res[0][2]=='follower' and tdSql.res[0][3]== 'ready' :
if tdSql.res[2][2]=='follower' and tdSql.res[2][3]== 'ready' :
tdLog.success("%d mnodes is ready in 10s"%self.mnodeNums)
return True
elif tdSql.res[2][2]=='leader' and tdSql.res[2][3]== 'ready' :
if tdSql.res[0][2]=='follower' and tdSql.res[0][3]== 'ready' :
if tdSql.res[1][2]=='follower' and tdSql.res[1][3]== 'ready' :
tdLog.success("%d mnodes is ready in 10s"%self.mnodeNums)
return True
count+=1
elif self.mnodeNums == 2 :
if tdSql.res[0][2]=='leader' and tdSql.res[0][3]== 'ready' :
if tdSql.res[1][2]=='follower' and tdSql.res[1][3]== 'ready' :
tdLog.success("%d mnodes is ready in 10s"%self.mnodeNums)
return True
elif tdSql.res[1][2]=='leader' and tdSql.res[1][3]== 'ready' :
if tdSql.res[0][2]=='follower' and tdSql.res[0][3]== 'ready' :
tdLog.success("%d mnodes is ready in 10s"%self.mnodeNums)
return True
count+=1
else:
tdLog.debug(tdSql.res)
tdLog.exit("cluster of %d mnodes is not ready in 10s " %self.mnodeNums)
def check3mnodeoff(self,offlineDnodeNo,mnodeNums=3):
count=0
while count < 10:
time.sleep(1)
tdSql.query("select * from information_schema.ins_mnodes;")
if tdSql.checkRows(mnodeNums) :
tdLog.success("cluster has %d mnodes" %self.mnodeNums )
else:
tdLog.exit("mnode number is correct")
if offlineDnodeNo == 1:
if tdSql.res[0][2]=='offline' :
if tdSql.res[1][2]=='leader':
if tdSql.res[2][2]=='follower':
tdLog.success("stop mnodes on dnode %d successfully in 10s" %offlineDnodeNo)
return True
elif tdSql.res[1][2]=='follower':
if tdSql.res[2][2]=='leader':
tdLog.debug("stop mnodes on dnode %d successfully in 10s" %offlineDnodeNo)
return True
count+=1
elif offlineDnodeNo == 2:
if tdSql.res[1][2]=='offline' :
if tdSql.res[0][2]=='leader':
if tdSql.res[2][2]=='follower':
tdLog.debug("stop mnodes on dnode %d successfully in 10s" %offlineDnodeNo)
return True
elif tdSql.res[0][2]=='follower':
if tdSql.res[2][2]=='leader':
tdLog.debug("stop mnodes on dnode %d successfully in 10s" %offlineDnodeNo)
return True
count+=1
elif offlineDnodeNo == 3:
if tdSql.res[2][2]=='offline' :
if tdSql.res[0][2]=='leader':
if tdSql.res[1][2]=='follower':
tdLog.debug("stop mnodes on dnode %d successfully in 10s" %offlineDnodeNo)
return True
elif tdSql.res[0][2]=='follower':
if tdSql.res[1][2]=='leader':
tdLog.debug("stop mnodes on dnode %d successfully in 10s" %offlineDnodeNo)
return True
count+=1
else:
tdLog.debug(tdSql.res)
tdLog.exit(f"stop mnodes on dnode {offlineDnodeNo} failed in 10s ")
def check3mnode2off(self,mnodeNums=3):
count=0
while count < 10:
time.sleep(1)
tdSql.query("select * from information_schema.ins_mnodes;")
if tdSql.checkRows(mnodeNums) :
tdLog.success("cluster has %d mnodes" %self.mnodeNums )
else:
tdLog.exit("mnode number is correct")
if tdSql.res[0][2]=='leader' :
if tdSql.res[1][2]=='offline':
if tdSql.res[2][2]=='offline':
tdLog.success("stop mnodes of follower on dnode successfully in 10s")
return True
count+=1
else:
tdLog.debug(tdSql.res)
tdLog.exit("stop mnodes on dnode 2 or 3 failed in 10s")
def check_vgroups_status(self,vgroup_numbers=2,db_replica=3,count_number=10,db_name="db"):
""" check vgroups status in 10s after db vgroups status is changed """
vgroup_numbers = int(vgroup_numbers)
self.db_replica = int(db_replica)
tdLog.debug("start to check status of vgroups")
count=0
last_number=vgroup_numbers-1
while count < count_number:
time.sleep(1)
count+=1
print("check vgroup count :", count)
tdSql.query(f"show {db_name}.vgroups;")
if tdSql.getRows() != vgroup_numbers :
continue
if self.db_replica == 1 :
if tdSql.res[0][4] == 'leader' and tdSql.res[last_number][4] == 'leader':
tdSql.query(f"select `replica` from information_schema.ins_databases where `name`='{db_name}';")
print("db replica :",tdSql.res[0][0])
if tdSql.res[0][0] == db_replica:
tdLog.success(f"all vgroups with replica {self.db_replica} of {db_name} are leaders in {count} s")
return True
elif self.db_replica == 3 :
vgroup_status_first=[tdSql.res[0][4],tdSql.res[0][6],tdSql.res[0][8]]
vgroup_status_last=[tdSql.res[last_number][4],tdSql.res[last_number][6],tdSql.res[last_number][8]]
if vgroup_status_first.count('leader') == 1 and vgroup_status_first.count('follower') == 2:
if vgroup_status_last.count('leader') == 1 and vgroup_status_last.count('follower') == 2:
tdSql.query(f"select `replica` from information_schema.ins_databases where `name`='{db_name}';")
print("db replica :",tdSql.res[0][0])
if tdSql.res[0][0] == db_replica:
tdLog.success(f"elections of {db_name}.vgroups with replica {self.db_replica} are ready in {count} s")
return True
else:
tdLog.debug(tdSql.res)
tdLog.notice(f"elections of {db_name} all vgroups with replica {self.db_replica} are failed in {count} s ")
caller = inspect.getframeinfo(inspect.stack()[1][0])
args = (caller.filename, caller.lineno)
tdLog.exit("%s(%d) failed " % args)
def close(self):
self.cursor.close()
clusterComCheck = ClusterComCheck()