commit
787f638781
|
@ -298,7 +298,7 @@ static bool dnodeReadMnodeInfos() {
|
|||
tsMnodeInfos.nodeInfos[i].syncPort = (uint16_t)syncPort->valueint;
|
||||
|
||||
cJSON *nodeName = cJSON_GetObjectItem(nodeInfo, "nodeName");
|
||||
if (!nodeIp || nodeName->type != cJSON_String || nodeName->valuestring == NULL) {
|
||||
if (!nodeName || nodeName->type != cJSON_String || nodeName->valuestring == NULL) {
|
||||
dError("failed to read mnode mgmtIpList.json, nodeName not found");
|
||||
goto PARSE_OVER;
|
||||
}
|
||||
|
@ -310,7 +310,7 @@ static bool dnodeReadMnodeInfos() {
|
|||
dPrint("read mnode iplist successed, numOfIps:%d inUse:%d", tsMnodeInfos.nodeNum, tsMnodeInfos.inUse);
|
||||
for (int32_t i = 0; i < tsMnodeInfos.nodeNum; i++) {
|
||||
dPrint("mnode:%d, ip:%s:%u name:%s", tsMnodeInfos.nodeInfos[i].nodeId,
|
||||
taosIpStr(tsMnodeInfos.nodeInfos[i].nodeId), tsMnodeInfos.nodeInfos[i].nodePort,
|
||||
taosIpStr(tsMnodeInfos.nodeInfos[i].nodeIp), tsMnodeInfos.nodeInfos[i].nodePort,
|
||||
tsMnodeInfos.nodeInfos[i].nodeName);
|
||||
}
|
||||
|
||||
|
|
|
@ -33,7 +33,6 @@ static int32_t dnodeOpenVnodes();
|
|||
static void dnodeCloseVnodes();
|
||||
static int32_t dnodeProcessCreateVnodeMsg(SRpcMsg *pMsg);
|
||||
static int32_t dnodeProcessDropVnodeMsg(SRpcMsg *pMsg);
|
||||
static int32_t dnodeProcessAlterVnodeMsg(SRpcMsg *pMsg);
|
||||
static int32_t dnodeProcessAlterStreamMsg(SRpcMsg *pMsg);
|
||||
static int32_t dnodeProcessConfigDnodeMsg(SRpcMsg *pMsg);
|
||||
static int32_t (*dnodeProcessMgmtMsgFp[TSDB_MSG_TYPE_MAX])(SRpcMsg *pMsg);
|
||||
|
@ -41,7 +40,6 @@ static int32_t (*dnodeProcessMgmtMsgFp[TSDB_MSG_TYPE_MAX])(SRpcMsg *pMsg);
|
|||
int32_t dnodeInitMgmt() {
|
||||
dnodeProcessMgmtMsgFp[TSDB_MSG_TYPE_MD_CREATE_VNODE] = dnodeProcessCreateVnodeMsg;
|
||||
dnodeProcessMgmtMsgFp[TSDB_MSG_TYPE_MD_DROP_VNODE] = dnodeProcessDropVnodeMsg;
|
||||
dnodeProcessMgmtMsgFp[TSDB_MSG_TYPE_MD_ALTER_VNODE] = dnodeProcessAlterVnodeMsg;
|
||||
dnodeProcessMgmtMsgFp[TSDB_MSG_TYPE_MD_ALTER_STREAM] = dnodeProcessAlterStreamMsg;
|
||||
dnodeProcessMgmtMsgFp[TSDB_MSG_TYPE_MD_CONFIG_DNODE] = dnodeProcessConfigDnodeMsg;
|
||||
|
||||
|
@ -129,25 +127,31 @@ static void dnodeCloseVnodes() {
|
|||
|
||||
static int32_t dnodeProcessCreateVnodeMsg(SRpcMsg *rpcMsg) {
|
||||
SMDCreateVnodeMsg *pCreate = rpcMsg->pCont;
|
||||
pCreate->cfg.vgId = htonl(pCreate->cfg.vgId);
|
||||
pCreate->cfg.maxSessions = htonl(pCreate->cfg.maxSessions);
|
||||
pCreate->cfg.cacheBlockSize = htonl(pCreate->cfg.cacheBlockSize);
|
||||
pCreate->cfg.daysPerFile = htonl(pCreate->cfg.daysPerFile);
|
||||
pCreate->cfg.daysToKeep1 = htonl(pCreate->cfg.daysToKeep1);
|
||||
pCreate->cfg.daysToKeep2 = htonl(pCreate->cfg.daysToKeep2);
|
||||
pCreate->cfg.daysToKeep = htonl(pCreate->cfg.daysToKeep);
|
||||
pCreate->cfg.commitTime = htonl(pCreate->cfg.commitTime);
|
||||
pCreate->cfg.rowsInFileBlock = htonl(pCreate->cfg.rowsInFileBlock);
|
||||
pCreate->cfg.blocksPerTable = htons(pCreate->cfg.blocksPerTable);
|
||||
pCreate->cfg.cacheNumOfBlocks.totalBlocks = htonl(pCreate->cfg.cacheNumOfBlocks.totalBlocks);
|
||||
pCreate->cfg.vgId = htonl(pCreate->cfg.vgId);
|
||||
pCreate->cfg.maxTables = htonl(pCreate->cfg.maxTables);
|
||||
pCreate->cfg.maxCacheSize = htobe64(pCreate->cfg.maxCacheSize);
|
||||
pCreate->cfg.minRowsPerFileBlock = htonl(pCreate->cfg.minRowsPerFileBlock);
|
||||
pCreate->cfg.maxRowsPerFileBlock = htonl(pCreate->cfg.maxRowsPerFileBlock);
|
||||
pCreate->cfg.daysPerFile = htonl(pCreate->cfg.daysPerFile);
|
||||
pCreate->cfg.daysToKeep1 = htonl(pCreate->cfg.daysToKeep1);
|
||||
pCreate->cfg.daysToKeep2 = htonl(pCreate->cfg.daysToKeep2);
|
||||
pCreate->cfg.daysToKeep = htonl(pCreate->cfg.daysToKeep);
|
||||
pCreate->cfg.commitTime = htonl(pCreate->cfg.commitTime);
|
||||
pCreate->cfg.arbitratorIp = htonl(pCreate->cfg.arbitratorIp);
|
||||
|
||||
for (int32_t j = 0; j < pCreate->cfg.replications; ++j) {
|
||||
pCreate->vpeerDesc[j].vgId = htonl(pCreate->vpeerDesc[j].vgId);
|
||||
pCreate->vpeerDesc[j].dnodeId = htonl(pCreate->vpeerDesc[j].dnodeId);
|
||||
pCreate->vpeerDesc[j].ip = htonl(pCreate->vpeerDesc[j].ip);
|
||||
pCreate->nodes[j].nodeId = htonl(pCreate->nodes[j].nodeId);
|
||||
pCreate->nodes[j].nodeIp = htonl(pCreate->nodes[j].nodeIp);
|
||||
}
|
||||
|
||||
return vnodeCreate(pCreate);
|
||||
void *pVnode = vnodeAccquireVnode(pCreate->cfg.vgId);
|
||||
if (pVnode != NULL) {
|
||||
int32_t code = vnodeAlter(pVnode, pCreate);
|
||||
vnodeRelease(pVnode);
|
||||
return code;
|
||||
} else {
|
||||
return vnodeCreate(pCreate);
|
||||
}
|
||||
}
|
||||
|
||||
static int32_t dnodeProcessDropVnodeMsg(SRpcMsg *rpcMsg) {
|
||||
|
@ -157,15 +161,6 @@ static int32_t dnodeProcessDropVnodeMsg(SRpcMsg *rpcMsg) {
|
|||
return vnodeDrop(pDrop->vgId);
|
||||
}
|
||||
|
||||
static int32_t dnodeProcessAlterVnodeMsg(SRpcMsg *rpcMsg) {
|
||||
SMDCreateVnodeMsg *pCreate = rpcMsg->pCont;
|
||||
pCreate->cfg.vgId = htonl(pCreate->cfg.vgId);
|
||||
pCreate->cfg.maxSessions = htonl(pCreate->cfg.maxSessions);
|
||||
pCreate->cfg.daysPerFile = htonl(pCreate->cfg.daysPerFile);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int32_t dnodeProcessAlterStreamMsg(SRpcMsg *pMsg) {
|
||||
// SMDAlterStreamMsg *pStream = pCont;
|
||||
// pStream->uid = htobe64(pStream->uid);
|
||||
|
|
|
@ -33,7 +33,6 @@ int32_t dnodeInitMnode() {
|
|||
dnodeProcessMgmtMsgFp[TSDB_MSG_TYPE_MD_DROP_STABLE] = dnodeWrite;
|
||||
dnodeProcessMgmtMsgFp[TSDB_MSG_TYPE_MD_CREATE_VNODE] = dnodeMgmt;
|
||||
dnodeProcessMgmtMsgFp[TSDB_MSG_TYPE_MD_DROP_VNODE] = dnodeMgmt;
|
||||
dnodeProcessMgmtMsgFp[TSDB_MSG_TYPE_MD_ALTER_VNODE] = dnodeMgmt;
|
||||
dnodeProcessMgmtMsgFp[TSDB_MSG_TYPE_MD_ALTER_STREAM] = dnodeMgmt;
|
||||
dnodeProcessMgmtMsgFp[TSDB_MSG_TYPE_MD_CONFIG_DNODE] = dnodeMgmt;
|
||||
|
||||
|
|
|
@ -48,14 +48,12 @@ extern "C" {
|
|||
#define TSDB_MSG_TYPE_MD_CREATE_VNODE_RSP 16
|
||||
#define TSDB_MSG_TYPE_MD_DROP_VNODE 17
|
||||
#define TSDB_MSG_TYPE_MD_DROP_VNODE_RSP 18
|
||||
#define TSDB_MSG_TYPE_MD_ALTER_VNODE 19
|
||||
#define TSDB_MSG_TYPE_MD_ALTER_VNODE_RSP 20
|
||||
#define TSDB_MSG_TYPE_MD_DROP_STABLE 21
|
||||
#define TSDB_MSG_TYPE_MD_DROP_STABLE_RSP 22
|
||||
#define TSDB_MSG_TYPE_MD_ALTER_STREAM 23
|
||||
#define TSDB_MSG_TYPE_MD_ALTER_STREAM_RSP 24
|
||||
#define TSDB_MSG_TYPE_MD_CONFIG_DNODE 25
|
||||
#define TSDB_MSG_TYPE_MD_CONFIG_DNODE_RSP 26
|
||||
#define TSDB_MSG_TYPE_MD_DROP_STABLE 19
|
||||
#define TSDB_MSG_TYPE_MD_DROP_STABLE_RSP 20
|
||||
#define TSDB_MSG_TYPE_MD_ALTER_STREAM 21
|
||||
#define TSDB_MSG_TYPE_MD_ALTER_STREAM_RSP 22
|
||||
#define TSDB_MSG_TYPE_MD_CONFIG_DNODE 23
|
||||
#define TSDB_MSG_TYPE_MD_CONFIG_DNODE_RSP 24
|
||||
|
||||
// message from client to mnode
|
||||
#define TSDB_MSG_TYPE_CM_CONNECT 31
|
||||
|
@ -245,12 +243,6 @@ typedef struct SSchema {
|
|||
int16_t bytes;
|
||||
} SSchema;
|
||||
|
||||
typedef struct {
|
||||
int32_t vgId;
|
||||
int32_t dnodeId;
|
||||
uint32_t ip;
|
||||
} SVnodeDesc;
|
||||
|
||||
typedef struct {
|
||||
int32_t contLen;
|
||||
int32_t vgId;
|
||||
|
@ -518,12 +510,10 @@ typedef struct {
|
|||
uint8_t status;
|
||||
uint8_t role;
|
||||
uint8_t accessState;
|
||||
uint8_t replica;
|
||||
uint8_t reserved[5];
|
||||
} SVnodeLoad;
|
||||
|
||||
/*
|
||||
* NOTE: sizeof(SVnodeCfg) < TSDB_FILE_HEADER_LEN / 4
|
||||
*/
|
||||
typedef struct {
|
||||
char acct[TSDB_USER_LEN + 1];
|
||||
char db[TSDB_DB_NAME_LEN + 1];
|
||||
|
@ -548,7 +538,7 @@ typedef struct {
|
|||
int8_t loadLatest; // load into mem or not
|
||||
uint8_t precision; // time resolution
|
||||
int8_t reserved[16];
|
||||
} SVnodeCfg, SDbCfg, SCMCreateDbMsg, SCMAlterDbMsg;
|
||||
} SDbCfg, SCMCreateDbMsg, SCMAlterDbMsg;
|
||||
|
||||
typedef struct {
|
||||
char db[TSDB_TABLE_ID_LEN + 1];
|
||||
|
@ -614,8 +604,35 @@ typedef struct {
|
|||
} SDMStatusRsp;
|
||||
|
||||
typedef struct {
|
||||
SVnodeCfg cfg;
|
||||
SVnodeDesc vpeerDesc[TSDB_MAX_MPEERS];
|
||||
uint32_t vgId;
|
||||
int32_t maxTables;
|
||||
int64_t maxCacheSize;
|
||||
int32_t minRowsPerFileBlock;
|
||||
int32_t maxRowsPerFileBlock;
|
||||
int32_t daysPerFile;
|
||||
int32_t daysToKeep;
|
||||
int32_t daysToKeep1;
|
||||
int32_t daysToKeep2;
|
||||
int32_t commitTime;
|
||||
uint8_t precision; // time resolution
|
||||
int8_t compression;
|
||||
int8_t wals;
|
||||
int8_t commitLog;
|
||||
int8_t replications;
|
||||
int8_t quorum;
|
||||
uint32_t arbitratorIp;
|
||||
int8_t reserved[16];
|
||||
} SMDVnodeCfg;
|
||||
|
||||
typedef struct {
|
||||
int32_t nodeId;
|
||||
uint32_t nodeIp;
|
||||
char nodeName[TSDB_NODE_NAME_LEN + 1];
|
||||
} SMDVnodeDesc;
|
||||
|
||||
typedef struct {
|
||||
SMDVnodeCfg cfg;
|
||||
SMDVnodeDesc nodes[TSDB_MAX_MPEERS];
|
||||
} SMDCreateVnodeMsg;
|
||||
|
||||
typedef struct {
|
||||
|
@ -673,9 +690,16 @@ typedef struct {
|
|||
int32_t metaElem[TSDB_MAX_JOIN_TABLE_NUM];
|
||||
} SSuperTableMetaMsg;
|
||||
|
||||
typedef struct {
|
||||
int32_t nodeId;
|
||||
uint32_t nodeIp;
|
||||
uint16_t nodePort;
|
||||
} SVnodeDesc;
|
||||
|
||||
typedef struct {
|
||||
SVnodeDesc vpeerDesc[TSDB_REPLICA_MAX_NUM];
|
||||
int16_t index; // used locally
|
||||
int32_t vgId;
|
||||
int32_t numOfSids;
|
||||
int32_t pSidExtInfoList[]; // offset value of STableIdInfo
|
||||
} SVnodeSidList;
|
||||
|
|
|
@ -38,6 +38,7 @@ typedef struct {
|
|||
int32_t vnodeCreate(SMDCreateVnodeMsg *pVnodeCfg);
|
||||
int32_t vnodeDrop(int32_t vgId);
|
||||
int32_t vnodeOpen(int32_t vgId, char *rootDir);
|
||||
int32_t vnodeAlter(void *pVnode, SMDCreateVnodeMsg *pVnodeCfg);
|
||||
int32_t vnodeClose(int32_t vgId);
|
||||
|
||||
void vnodeRelease(void *pVnode);
|
||||
|
|
|
@ -51,7 +51,6 @@ typedef struct SDnodeObj {
|
|||
int8_t reserved[15];
|
||||
int8_t updateEnd[1];
|
||||
int32_t refCount;
|
||||
SVnodeLoad vload[TSDB_MAX_VNODES];
|
||||
uint32_t moduleStatus;
|
||||
uint32_t lastReboot; // time stamp for last reboot
|
||||
float score; // calc in balance function
|
||||
|
@ -72,13 +71,6 @@ typedef struct SMnodeObj {
|
|||
SDnodeObj *pDnode;
|
||||
} SMnodeObj;
|
||||
|
||||
|
||||
typedef struct {
|
||||
int32_t dnodeId;
|
||||
uint32_t privateIp;
|
||||
uint32_t publicIp;
|
||||
} SVnodeGid;
|
||||
|
||||
typedef struct {
|
||||
char tableId[TSDB_TABLE_ID_LEN + 1];
|
||||
int8_t type;
|
||||
|
@ -120,24 +112,34 @@ typedef struct {
|
|||
SSuperTableObj *superTable;
|
||||
} SChildTableObj;
|
||||
|
||||
typedef struct {
|
||||
int32_t dnodeId;
|
||||
int8_t role;
|
||||
int8_t reserved[3];
|
||||
SDnodeObj* pDnode;
|
||||
} SVnodeGid;
|
||||
|
||||
typedef struct SVgObj {
|
||||
uint32_t vgId;
|
||||
char dbName[TSDB_DB_NAME_LEN + 1];
|
||||
int64_t createdTime;
|
||||
SVnodeGid vnodeGid[TSDB_VNODES_SUPPORT];
|
||||
int32_t numOfVnodes;
|
||||
int32_t lbDnodeId;
|
||||
int32_t lbTime;
|
||||
int8_t status;
|
||||
int8_t inUse;
|
||||
int8_t reserved[13];
|
||||
int8_t updateEnd[1];
|
||||
int32_t refCount;
|
||||
uint32_t vgId;
|
||||
char dbName[TSDB_DB_NAME_LEN + 1];
|
||||
int64_t createdTime;
|
||||
SVnodeGid vnodeGid[TSDB_VNODES_SUPPORT];
|
||||
int32_t numOfVnodes;
|
||||
int32_t lbDnodeId;
|
||||
int32_t lbTime;
|
||||
int8_t status;
|
||||
int8_t inUse;
|
||||
int8_t reserved[13];
|
||||
int8_t updateEnd[1];
|
||||
int32_t refCount;
|
||||
struct SVgObj *prev, *next;
|
||||
struct SDbObj *pDb;
|
||||
int32_t numOfTables;
|
||||
void * idPool;
|
||||
SChildTableObj ** tableList;
|
||||
int32_t numOfTables;
|
||||
int64_t totalStorage;
|
||||
int64_t compStorage;
|
||||
int64_t pointsWritten;
|
||||
void * idPool;
|
||||
SChildTableObj **tableList;
|
||||
} SVgObj;
|
||||
|
||||
typedef struct SDbObj {
|
||||
|
|
|
@ -35,12 +35,15 @@ void mgmtMonitorDnodeModule();
|
|||
|
||||
int32_t mgmtGetDnodesNum();
|
||||
void * mgmtGetNextDnode(void *pNode, SDnodeObj **pDnode);
|
||||
void mgmtReleaseDnode(SDnodeObj *pDnode);
|
||||
void mgmtIncDnodeRef(SDnodeObj *pDnode);
|
||||
void mgmtDecDnodeRef(SDnodeObj *pDnode);
|
||||
void * mgmtGetDnode(int32_t dnodeId);
|
||||
void * mgmtGetDnodeByIp(uint32_t ip);
|
||||
void mgmtUpdateDnode(SDnodeObj *pDnode);
|
||||
int32_t mgmtDropDnode(SDnodeObj *pDnode);
|
||||
|
||||
extern int32_t tsAccessSquence;
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -24,7 +24,8 @@ extern "C" {
|
|||
int32_t mgmtInitUsers();
|
||||
void mgmtCleanUpUsers();
|
||||
SUserObj *mgmtGetUser(char *name);
|
||||
void mgmtReleaseUser(SUserObj *pUser);
|
||||
void mgmtIncUserRef(SUserObj *pUser);
|
||||
void mgmtDecUserRef(SUserObj *pUser);
|
||||
SUserObj *mgmtGetUserFromConn(void *pConn, bool *usePublicIp);
|
||||
int32_t mgmtCreateUser(SAcctObj *pAcct, char *name, char *pass);
|
||||
void mgmtDropAllUsers(SAcctObj *pAcct);
|
||||
|
|
|
@ -30,12 +30,13 @@ enum _TSDB_VG_STATUS {
|
|||
int32_t mgmtInitVgroups();
|
||||
void mgmtCleanUpVgroups();
|
||||
SVgObj *mgmtGetVgroup(int32_t vgId);
|
||||
void mgmtReleaseVgroup(SVgObj *pVgroup);
|
||||
void mgmtIncVgroupRef(SVgObj *pVgroup);
|
||||
void mgmtDecVgroupRef(SVgObj *pVgroup);
|
||||
void mgmtDropAllVgroups(SDbObj *pDropDb);
|
||||
|
||||
void * mgmtGetNextVgroup(void *pNode, SVgObj **pVgroup);
|
||||
void mgmtUpdateVgroup(SVgObj *pVgroup);
|
||||
void mgmtUpdateVgroupStatus(SVgObj *pVgroup, int32_t dnodeId, SVnodeLoad *pVload);
|
||||
void mgmtUpdateVgroupStatus(SVgObj *pVgroup, SDnodeObj *dnodeId, SVnodeLoad *pVload);
|
||||
|
||||
void mgmtCreateVgroup(SQueuedMsg *pMsg, SDbObj *pDb);
|
||||
void mgmtDropVgroup(SVgObj *pVgroup, void *ahandle);
|
||||
|
@ -46,6 +47,7 @@ void mgmtAddTableIntoVgroup(SVgObj *pVgroup, SChildTableObj *pTable);
|
|||
void mgmtRemoveTableFromVgroup(SVgObj *pVgroup, SChildTableObj *pTable);
|
||||
void mgmtSendCreateVnodeMsg(SVgObj *pVgroup, SRpcIpSet *ipSet, void *ahandle);
|
||||
void mgmtSendDropVnodeMsg(int32_t vgId, SRpcIpSet *ipSet, void *ahandle);
|
||||
void mgmtSendCreateVgroupMsg(SVgObj *pVgroup, void *ahandle);
|
||||
|
||||
SRpcIpSet mgmtGetIpSetFromVgroup(SVgObj *pVgroup);
|
||||
SRpcIpSet mgmtGetIpSetFromIp(uint32_t ip);
|
||||
|
|
|
@ -47,7 +47,7 @@ int32_t balanceAllocVnodes(SVgObj *pVgroup) {
|
|||
vnodeUsage = usage;
|
||||
}
|
||||
}
|
||||
mgmtReleaseDnode(pDnode);
|
||||
mgmtDecDnodeRef(pDnode);
|
||||
}
|
||||
|
||||
if (pSelDnode == NULL) {
|
||||
|
@ -56,8 +56,7 @@ int32_t balanceAllocVnodes(SVgObj *pVgroup) {
|
|||
}
|
||||
|
||||
pVgroup->vnodeGid[0].dnodeId = pSelDnode->dnodeId;
|
||||
pVgroup->vnodeGid[0].privateIp = pSelDnode->privateIp;
|
||||
pVgroup->vnodeGid[0].publicIp = pSelDnode->publicIp;
|
||||
pVgroup->vnodeGid[0].pDnode = pSelDnode;
|
||||
|
||||
mTrace("dnode:%d, alloc one vnode to vgroup, openVnodes:%d", pSelDnode->dnodeId, pSelDnode->openVnodes);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
|
|
|
@ -527,7 +527,7 @@ static int32_t mgmtGetDbMeta(STableMetaMsg *pMeta, SShowObj *pShow, void *pConn)
|
|||
pShow->rowSize = pShow->offset[cols - 1] + pShow->bytes[cols - 1];
|
||||
pShow->numOfRows = pUser->pAcct->acctInfo.numOfDbs;
|
||||
|
||||
mgmtReleaseUser(pUser);
|
||||
mgmtDecUserRef(pUser);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -647,7 +647,7 @@ static int32_t mgmtRetrieveDbs(SShowObj *pShow, char *data, int32_t rows, void *
|
|||
}
|
||||
|
||||
pShow->numOfReads += numOfRows;
|
||||
mgmtReleaseUser(pUser);
|
||||
mgmtDecUserRef(pUser);
|
||||
return numOfRows;
|
||||
}
|
||||
|
||||
|
|
|
@ -23,6 +23,7 @@
|
|||
#include "tutil.h"
|
||||
#include "tsocket.h"
|
||||
#include "tbalance.h"
|
||||
#include "tsync.h"
|
||||
#include "dnode.h"
|
||||
#include "mgmtDef.h"
|
||||
#include "mgmtLog.h"
|
||||
|
@ -37,6 +38,8 @@
|
|||
|
||||
void *tsDnodeSdb = NULL;
|
||||
int32_t tsDnodeUpdateSize = 0;
|
||||
int32_t tsAccessSquence = 0;
|
||||
extern void * tsMnodeSdb;
|
||||
extern void * tsVgroupSdb;
|
||||
|
||||
static int32_t mgmtCreateDnode(uint32_t ip);
|
||||
|
@ -99,7 +102,13 @@ static int32_t mgmtDnodeActionDelete(SSdbOper *pOper) {
|
|||
}
|
||||
}
|
||||
|
||||
mgmtDropMnode(pDnode->dnodeId);
|
||||
SMnodeObj *pMnode = mgmtGetMnode(pDnode->dnodeId);
|
||||
if (pMnode != NULL) {
|
||||
SSdbOper oper = {.type = SDB_OPER_LOCAL, .table = tsMnodeSdb, .pObj = pMnode};
|
||||
sdbDeleteRow(&oper);
|
||||
mgmtReleaseMnode(pMnode);
|
||||
}
|
||||
|
||||
balanceNotify();
|
||||
|
||||
mTrace("dnode:%d, all vgroups:%d is dropped from sdb", pDnode->dnodeId, numOfVgroups);
|
||||
|
@ -139,7 +148,7 @@ static int32_t mgmtDnodeActionRestored() {
|
|||
mgmtCreateDnode(ip);
|
||||
SDnodeObj *pDnode = mgmtGetDnodeByIp(ip);
|
||||
mgmtAddMnode(pDnode->dnodeId);
|
||||
mgmtReleaseDnode(pDnode);
|
||||
mgmtDecDnodeRef(pDnode);
|
||||
}
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
|
@ -215,13 +224,17 @@ void *mgmtGetDnodeByIp(uint32_t ip) {
|
|||
if (ip == pDnode->privateIp) {
|
||||
return pDnode;
|
||||
}
|
||||
mgmtReleaseDnode(pDnode);
|
||||
mgmtDecDnodeRef(pDnode);
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
void mgmtReleaseDnode(SDnodeObj *pDnode) {
|
||||
void mgmtIncDnodeRef(SDnodeObj *pDnode) {
|
||||
sdbIncRef(tsDnodeSdb, pDnode);
|
||||
}
|
||||
|
||||
void mgmtDecDnodeRef(SDnodeObj *pDnode) {
|
||||
sdbDecRef(tsDnodeSdb, pDnode);
|
||||
}
|
||||
|
||||
|
@ -318,27 +331,27 @@ void mgmtProcessDnodeStatusMsg(SRpcMsg *rpcMsg) {
|
|||
pDnode->alternativeRole = pStatus->alternativeRole;
|
||||
pDnode->totalVnodes = pStatus->numOfTotalVnodes;
|
||||
pDnode->moduleStatus = pStatus->moduleStatus;
|
||||
pDnode->lastAccess = tsAccessSquence;
|
||||
|
||||
if (pStatus->dnodeId == 0) {
|
||||
mTrace("dnode:%d, first access, privateIp:%s, name:%s", pDnode->dnodeId, taosIpStr(pDnode->privateIp), pDnode->dnodeName);
|
||||
} else {
|
||||
mTrace("dnode:%d, status received, access times %d", pDnode->dnodeId, pDnode->lastAccess);
|
||||
}
|
||||
|
||||
int32_t openVnodes = htons(pStatus->openVnodes);
|
||||
for (int32_t j = 0; j < openVnodes; ++j) {
|
||||
SVnodeLoad *pVload = &pStatus->load[j];
|
||||
pDnode->vload[j].vgId = htonl(pVload->vgId);
|
||||
pDnode->vload[j].totalStorage = htobe64(pVload->totalStorage);
|
||||
pDnode->vload[j].compStorage = htobe64(pVload->compStorage);
|
||||
pDnode->vload[j].pointsWritten = htobe64(pVload->pointsWritten);
|
||||
pVload->vgId = htonl(pVload->vgId);
|
||||
|
||||
SVgObj *pVgroup = mgmtGetVgroup(pDnode->vload[j].vgId);
|
||||
SVgObj *pVgroup = mgmtGetVgroup(pVload->vgId);
|
||||
if (pVgroup == NULL) {
|
||||
SRpcIpSet ipSet = mgmtGetIpSetFromIp(pDnode->privateIp);
|
||||
mPrint("dnode:%d, vgroup:%d not exist in mnode, drop it", pDnode->dnodeId, pDnode->vload[j].vgId);
|
||||
mgmtSendDropVnodeMsg(pDnode->vload[j].vgId, &ipSet, NULL);
|
||||
mPrint("dnode:%d, vgroup:%d not exist in mnode, drop it", pDnode->dnodeId, pVload->vgId);
|
||||
mgmtSendDropVnodeMsg(pVload->vgId, &ipSet, NULL);
|
||||
} else {
|
||||
mgmtUpdateVgroupStatus(pVgroup, pDnode->dnodeId, pVload);
|
||||
mgmtReleaseVgroup(pVgroup);
|
||||
mgmtUpdateVgroupStatus(pVgroup, pDnode, pVload);
|
||||
mgmtDecVgroupRef(pVgroup);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -348,7 +361,7 @@ void mgmtProcessDnodeStatusMsg(SRpcMsg *rpcMsg) {
|
|||
balanceNotify();
|
||||
}
|
||||
|
||||
mgmtReleaseDnode(pDnode);
|
||||
mgmtDecDnodeRef(pDnode);
|
||||
|
||||
int32_t contLen = sizeof(SDMStatusRsp) + TSDB_MAX_VNODES * sizeof(SDMVgroupAccess);
|
||||
SDMStatusRsp *pRsp = rpcMallocCont(contLen);
|
||||
|
@ -444,7 +457,7 @@ static int32_t mgmtDropDnodeByIp(uint32_t ip) {
|
|||
return TSDB_CODE_NO_REMOVE_MASTER;
|
||||
}
|
||||
|
||||
#ifndef _VPEER
|
||||
#ifndef _SYNC
|
||||
return mgmtDropDnode(pDnode);
|
||||
#else
|
||||
return balanceDropDnode(pDnode);
|
||||
|
@ -554,7 +567,7 @@ static int32_t mgmtGetDnodeMeta(STableMetaMsg *pMeta, SShowObj *pShow, void *pCo
|
|||
pShow->rowSize = pShow->offset[cols - 1] + pShow->bytes[cols - 1];
|
||||
pShow->pNode = NULL;
|
||||
|
||||
mgmtReleaseUser(pUser);
|
||||
mgmtDecUserRef(pUser);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -604,7 +617,7 @@ static int32_t mgmtRetrieveDnodes(SShowObj *pShow, char *data, int32_t rows, voi
|
|||
|
||||
|
||||
numOfRows++;
|
||||
mgmtReleaseDnode(pDnode);
|
||||
mgmtDecDnodeRef(pDnode);
|
||||
}
|
||||
|
||||
pShow->numOfReads += numOfRows;
|
||||
|
@ -661,7 +674,7 @@ static int32_t mgmtGetModuleMeta(STableMetaMsg *pMeta, SShowObj *pShow, void *pC
|
|||
pShow->numOfRows = mgmtGetDnodesNum() * TSDB_MOD_MAX;
|
||||
pShow->rowSize = pShow->offset[cols - 1] + pShow->bytes[cols - 1];
|
||||
pShow->pNode = NULL;
|
||||
mgmtReleaseUser(pUser);
|
||||
mgmtDecUserRef(pUser);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -712,7 +725,7 @@ int32_t mgmtRetrieveModules(SShowObj *pShow, char *data, int32_t rows, void *pCo
|
|||
numOfRows++;
|
||||
}
|
||||
|
||||
mgmtReleaseDnode(pDnode);
|
||||
mgmtDecDnodeRef(pDnode);
|
||||
}
|
||||
|
||||
pShow->numOfReads += numOfRows;
|
||||
|
@ -762,7 +775,7 @@ static int32_t mgmtGetConfigMeta(STableMetaMsg *pMeta, SShowObj *pShow, void *pC
|
|||
|
||||
pShow->rowSize = pShow->offset[cols - 1] + pShow->bytes[cols - 1];
|
||||
pShow->pNode = NULL;
|
||||
mgmtReleaseUser(pUser);
|
||||
mgmtDecUserRef(pUser);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -840,35 +853,18 @@ static int32_t mgmtGetVnodeMeta(STableMetaMsg *pMeta, SShowObj *pShow, void *pCo
|
|||
if (pShow->payloadLen > 0 ) {
|
||||
uint32_t ip = ip2uint(pShow->payload);
|
||||
pDnode = mgmtGetDnodeByIp(ip);
|
||||
if (NULL == pDnode) {
|
||||
return TSDB_CODE_NODE_OFFLINE;
|
||||
}
|
||||
|
||||
SVnodeLoad* pVnode;
|
||||
pShow->numOfRows = 0;
|
||||
for (int32_t i = 0 ; i < TSDB_MAX_VNODES; i++) {
|
||||
pVnode = &pDnode->vload[i];
|
||||
if (0 != pVnode->vgId) {
|
||||
pShow->numOfRows++;
|
||||
}
|
||||
}
|
||||
|
||||
pShow->pNode = pDnode;
|
||||
} else {
|
||||
while (true) {
|
||||
pShow->pNode = mgmtGetNextDnode(pShow->pNode, (SDnodeObj **)&pDnode);
|
||||
if (pDnode == NULL) break;
|
||||
pShow->numOfRows += pDnode->openVnodes;
|
||||
mgmtGetNextDnode(NULL, (SDnodeObj **)&pDnode);
|
||||
}
|
||||
|
||||
if (0 == pShow->numOfRows) return TSDB_CODE_NODE_OFFLINE;
|
||||
}
|
||||
|
||||
pShow->pNode = NULL;
|
||||
if (pDnode != NULL) {
|
||||
pShow->numOfRows += pDnode->openVnodes;
|
||||
mgmtDecDnodeRef(pDnode);
|
||||
}
|
||||
|
||||
pShow->rowSize = pShow->offset[cols - 1] + pShow->bytes[cols - 1];
|
||||
mgmtReleaseDnode(pDnode);
|
||||
mgmtReleaseUser(pUser);
|
||||
pShow->pNode = pDnode;
|
||||
mgmtDecUserRef(pUser);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -881,32 +877,32 @@ static int32_t mgmtRetrieveVnodes(SShowObj *pShow, char *data, int32_t rows, voi
|
|||
|
||||
if (0 == rows) return 0;
|
||||
|
||||
if (pShow->payloadLen) {
|
||||
// output the vnodes info of the designated dnode. And output all vnodes of this dnode, instead of rows (max 100)
|
||||
pDnode = (SDnodeObj *)(pShow->pNode);
|
||||
if (pDnode != NULL) {
|
||||
SVnodeLoad* pVnode;
|
||||
for (int32_t i = 0 ; i < TSDB_MAX_VNODES; i++) {
|
||||
pVnode = &pDnode->vload[i];
|
||||
if (0 == pVnode->vgId) {
|
||||
continue;
|
||||
pDnode = (SDnodeObj *)(pShow->pNode);
|
||||
if (pDnode != NULL) {
|
||||
void *pNode = NULL;
|
||||
SVgObj *pVgroup;
|
||||
while (1) {
|
||||
pNode = mgmtGetNextVgroup(pNode, &pVgroup);
|
||||
if (pVgroup == NULL) break;
|
||||
|
||||
for (int32_t i = 0; i < pVgroup->numOfVnodes; ++i) {
|
||||
SVnodeGid *pVgid = &pVgroup->vnodeGid[i];
|
||||
if (pVgid->pDnode == pDnode) {
|
||||
cols = 0;
|
||||
|
||||
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
|
||||
*(uint32_t *)pWrite = pVgroup->vgId;
|
||||
cols++;
|
||||
|
||||
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
|
||||
strcpy(pWrite, mgmtGetMnodeRoleStr(pVgid->role));
|
||||
cols++;
|
||||
}
|
||||
|
||||
cols = 0;
|
||||
|
||||
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
|
||||
*(uint32_t *)pWrite = pVnode->vgId;
|
||||
cols++;
|
||||
|
||||
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
|
||||
strcpy(pWrite, pVnode->status ? "ready" : "offline");
|
||||
cols++;
|
||||
|
||||
numOfRows++;
|
||||
}
|
||||
|
||||
mgmtDecVgroupRef(pVgroup);
|
||||
}
|
||||
} else {
|
||||
// TODO: output all vnodes of all dnodes
|
||||
numOfRows = 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -149,12 +149,12 @@ void mgmtCleanUpSystem() {
|
|||
mgmtCleanUpShell();
|
||||
mgmtCleanupDClient();
|
||||
mgmtCleanupDServer();
|
||||
mgmtCleanUpAccts();
|
||||
mgmtCleanUpTables();
|
||||
mgmtCleanUpVgroups();
|
||||
mgmtCleanUpDbs();
|
||||
mgmtCleanupDnodes();
|
||||
mgmtCleanUpUsers();
|
||||
mgmtCleanUpAccts();
|
||||
sdbCleanUp();
|
||||
taosTmrCleanUp(tsMgmtTmr);
|
||||
tsMgmtIsRunning = false;
|
||||
|
|
|
@ -30,7 +30,7 @@
|
|||
#include "mgmtShell.h"
|
||||
#include "mgmtUser.h"
|
||||
|
||||
static void * tsMnodeSdb = NULL;
|
||||
void * tsMnodeSdb = NULL;
|
||||
static int32_t tsMnodeUpdateSize = 0;
|
||||
static int32_t mgmtGetMnodeMeta(STableMetaMsg *pMeta, SShowObj *pShow, void *pConn);
|
||||
static int32_t mgmtRetrieveMnodes(SShowObj *pShow, char *data, int32_t rows, void *pConn);
|
||||
|
@ -47,7 +47,7 @@ static int32_t mgmtMnodeActionInsert(SSdbOper *pOper) {
|
|||
|
||||
pMnode->pDnode = pDnode;
|
||||
pDnode->isMgmt = true;
|
||||
mgmtReleaseDnode(pDnode);
|
||||
mgmtDecDnodeRef(pDnode);
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
@ -58,7 +58,7 @@ static int32_t mgmtMnodeActionDelete(SSdbOper *pOper) {
|
|||
SDnodeObj *pDnode = mgmtGetDnode(pMnode->mnodeId);
|
||||
if (pDnode == NULL) return TSDB_CODE_DNODE_NOT_EXIST;
|
||||
pDnode->isMgmt = false;
|
||||
mgmtReleaseDnode(pDnode);
|
||||
mgmtDecDnodeRef(pDnode);
|
||||
|
||||
mTrace("mnode:%d, is dropped from sdb", pMnode->mnodeId);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
|
@ -314,7 +314,7 @@ static int32_t mgmtGetMnodeMeta(STableMetaMsg *pMeta, SShowObj *pShow, void *pCo
|
|||
pShow->numOfRows = mgmtGetMnodesNum();
|
||||
pShow->rowSize = pShow->offset[cols - 1] + pShow->bytes[cols - 1];
|
||||
pShow->pNode = NULL;
|
||||
mgmtReleaseUser(pUser);
|
||||
mgmtDecUserRef(pUser);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -790,12 +790,12 @@ void *mgmtMallocQueuedMsg(SRpcMsg *rpcMsg) {
|
|||
void mgmtFreeQueuedMsg(SQueuedMsg *pMsg) {
|
||||
if (pMsg != NULL) {
|
||||
rpcFreeCont(pMsg->pCont);
|
||||
if (pMsg->pUser) mgmtReleaseUser(pMsg->pUser);
|
||||
if (pMsg->pUser) mgmtDecUserRef(pMsg->pUser);
|
||||
if (pMsg->pDb) mgmtDecDbRef(pMsg->pDb);
|
||||
if (pMsg->pVgroup) mgmtReleaseVgroup(pMsg->pVgroup);
|
||||
if (pMsg->pVgroup) mgmtDecVgroupRef(pMsg->pVgroup);
|
||||
if (pMsg->pTable) mgmtDecTableRef(pMsg->pTable);
|
||||
if (pMsg->pAcct) mgmtDecAcctRef(pMsg->pAcct);
|
||||
if (pMsg->pDnode) mgmtReleaseDnode(pMsg->pDnode);
|
||||
if (pMsg->pDnode) mgmtDecDnodeRef(pMsg->pDnode);
|
||||
free(pMsg);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -371,11 +371,11 @@ static int mgmtShellRetriveAuth(char *user, char *spi, char *encrypt, char *secr
|
|||
SUserObj *pUser = mgmtGetUser(user);
|
||||
if (pUser == NULL) {
|
||||
*secret = 0;
|
||||
mgmtReleaseUser(pUser);
|
||||
mgmtDecUserRef(pUser);
|
||||
return TSDB_CODE_INVALID_USER;
|
||||
} else {
|
||||
memcpy(secret, pUser->pass, TSDB_KEY_LEN);
|
||||
mgmtReleaseUser(pUser);
|
||||
mgmtDecUserRef(pUser);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -97,7 +97,7 @@ static int32_t mgmtChildTableActionInsert(SSdbOper *pOper) {
|
|||
mError("ctable:%s, not in vgroup:%d", pTable->info.tableId, pTable->vgId);
|
||||
return TSDB_CODE_INVALID_VGROUP_ID;
|
||||
}
|
||||
mgmtReleaseVgroup(pVgroup);
|
||||
mgmtDecVgroupRef(pVgroup);
|
||||
|
||||
SDbObj *pDb = mgmtGetDb(pVgroup->dbName);
|
||||
if (pDb == NULL) {
|
||||
|
@ -139,7 +139,7 @@ static int32_t mgmtChildTableActionDelete(SSdbOper *pOper) {
|
|||
if (pVgroup == NULL) {
|
||||
return TSDB_CODE_INVALID_VGROUP_ID;
|
||||
}
|
||||
mgmtReleaseVgroup(pVgroup);
|
||||
mgmtDecVgroupRef(pVgroup);
|
||||
|
||||
SDbObj *pDb = mgmtGetDb(pVgroup->dbName);
|
||||
if (pDb == NULL) {
|
||||
|
@ -275,7 +275,7 @@ static int32_t mgmtChildTableActionRestored() {
|
|||
pNode = pLastNode;
|
||||
continue;
|
||||
}
|
||||
mgmtReleaseVgroup(pVgroup);
|
||||
mgmtDecVgroupRef(pVgroup);
|
||||
|
||||
if (strcmp(pVgroup->dbName, pDb->name) != 0) {
|
||||
mError("ctable:%s, db:%s not match with vgroup:%d db:%s sid:%d, discard it",
|
||||
|
@ -1194,17 +1194,15 @@ static void mgmtProcessSuperTableVgroupMsg(SQueuedMsg *pMsg) {
|
|||
|
||||
pRsp->vgroups[vg].vgId = htonl(vgId);
|
||||
for (int32_t vn = 0; vn < pVgroup->numOfVnodes; ++vn) {
|
||||
SDnodeObj *pDnode = mgmtGetDnode(pVgroup->vnodeGid[vn].dnodeId);
|
||||
SDnodeObj *pDnode = pVgroup->vnodeGid[vn].pDnode;
|
||||
if (pDnode == NULL) break;
|
||||
|
||||
pRsp->vgroups[vg].ipAddr[vn].ip = htonl(pDnode->privateIp);
|
||||
pRsp->vgroups[vg].ipAddr[vn].port = htons(tsDnodeShellPort);
|
||||
pRsp->vgroups[vg].numOfIps++;
|
||||
|
||||
mgmtReleaseDnode(pDnode);
|
||||
}
|
||||
|
||||
mgmtReleaseVgroup(pVgroup);
|
||||
mgmtDecVgroupRef(pVgroup);
|
||||
}
|
||||
pRsp->numOfVgroups = htonl(vg);
|
||||
|
||||
|
@ -1613,7 +1611,7 @@ static int32_t mgmtDoGetChildTableMeta(SQueuedMsg *pMsg, STableMetaMsg *pMeta) {
|
|||
pMeta->vgroup.ipAddr[i].port = htonl(tsDnodeShellPort);
|
||||
}
|
||||
pMeta->vgroup.numOfIps++;
|
||||
mgmtReleaseDnode(pDnode);
|
||||
mgmtDecDnodeRef(pDnode);
|
||||
}
|
||||
pMeta->vgroup.vgId = htonl(pVgroup->vgId);
|
||||
|
||||
|
@ -1742,7 +1740,7 @@ static SChildTableObj* mgmtGetTableByPos(uint32_t dnodeId, int32_t vnode, int32_
|
|||
|
||||
SChildTableObj *pTable = pVgroup->tableList[sid];
|
||||
mgmtIncTableRef((STableObj *)pTable);
|
||||
mgmtReleaseVgroup(pVgroup);
|
||||
mgmtDecVgroupRef(pVgroup);
|
||||
return pTable;
|
||||
}
|
||||
|
||||
|
|
|
@ -150,7 +150,11 @@ SUserObj *mgmtGetUser(char *name) {
|
|||
return (SUserObj *)sdbGetRow(tsUserSdb, name);
|
||||
}
|
||||
|
||||
void mgmtReleaseUser(SUserObj *pUser) {
|
||||
void mgmtIncUserRef(SUserObj *pUser) {
|
||||
return sdbIncRef(tsUserSdb, pUser);
|
||||
}
|
||||
|
||||
void mgmtDecUserRef(SUserObj *pUser) {
|
||||
return sdbDecRef(tsUserSdb, pUser);
|
||||
}
|
||||
|
||||
|
@ -183,7 +187,7 @@ int32_t mgmtCreateUser(SAcctObj *pAcct, char *name, char *pass) {
|
|||
SUserObj *pUser = mgmtGetUser(name);
|
||||
if (pUser != NULL) {
|
||||
mTrace("user:%s is already there", name);
|
||||
mgmtReleaseUser(pUser);
|
||||
mgmtDecUserRef(pUser);
|
||||
return TSDB_CODE_USER_ALREADY_EXIST;
|
||||
}
|
||||
|
||||
|
@ -273,7 +277,7 @@ static int32_t mgmtGetUserMeta(STableMetaMsg *pMeta, SShowObj *pShow, void *pCon
|
|||
pShow->numOfRows = pUser->pAcct->acctInfo.numOfUsers;
|
||||
pShow->rowSize = pShow->offset[cols - 1] + pShow->bytes[cols - 1];
|
||||
|
||||
mgmtReleaseUser(pUser);
|
||||
mgmtDecUserRef(pUser);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -308,7 +312,7 @@ static int32_t mgmtRetrieveUsers(SShowObj *pShow, char *data, int32_t rows, void
|
|||
cols++;
|
||||
|
||||
numOfRows++;
|
||||
mgmtReleaseUser(pUser);
|
||||
mgmtDecUserRef(pUser);
|
||||
}
|
||||
pShow->numOfReads += numOfRows;
|
||||
return numOfRows;
|
||||
|
@ -356,7 +360,7 @@ static void mgmtProcessAlterUserMsg(SQueuedMsg *pMsg) {
|
|||
|
||||
if (strcmp(pUser->user, "monitor") == 0 || (strcmp(pUser->user + 1, pUser->acct) == 0 && pUser->user[0] == '_')) {
|
||||
mgmtSendSimpleResp(pMsg->thandle, TSDB_CODE_NO_RIGHTS);
|
||||
mgmtReleaseUser(pUser);
|
||||
mgmtDecUserRef(pUser);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -432,7 +436,7 @@ static void mgmtProcessAlterUserMsg(SQueuedMsg *pMsg) {
|
|||
mgmtSendSimpleResp(pMsg->thandle, TSDB_CODE_NO_RIGHTS);
|
||||
}
|
||||
|
||||
mgmtReleaseUser(pUser);
|
||||
mgmtDecUserRef(pUser);
|
||||
}
|
||||
|
||||
static void mgmtProcessDropUserMsg(SQueuedMsg *pMsg) {
|
||||
|
@ -449,7 +453,7 @@ static void mgmtProcessDropUserMsg(SQueuedMsg *pMsg) {
|
|||
if (strcmp(pUser->user, "monitor") == 0 || strcmp(pUser->user, pUser->acct) == 0 ||
|
||||
(strcmp(pUser->user + 1, pUser->acct) == 0 && pUser->user[0] == '_')) {
|
||||
mgmtSendSimpleResp(pMsg->thandle, TSDB_CODE_NO_RIGHTS);
|
||||
mgmtReleaseUser(pUser);
|
||||
mgmtDecUserRef(pUser);
|
||||
return ;
|
||||
}
|
||||
|
||||
|
@ -478,7 +482,7 @@ static void mgmtProcessDropUserMsg(SQueuedMsg *pMsg) {
|
|||
}
|
||||
|
||||
mgmtSendSimpleResp(pMsg->thandle, code);
|
||||
mgmtReleaseUser(pUser);
|
||||
mgmtDecUserRef(pUser);
|
||||
}
|
||||
|
||||
void mgmtDropAllUsers(SAcctObj *pAcct) {
|
||||
|
@ -504,7 +508,7 @@ void mgmtDropAllUsers(SAcctObj *pAcct) {
|
|||
numOfUsers++;
|
||||
}
|
||||
|
||||
mgmtReleaseUser(pUser);
|
||||
mgmtDecUserRef(pUser);
|
||||
}
|
||||
|
||||
mTrace("acct:%s, all users:%d is dropped from sdb", pAcct->user, numOfUsers);
|
||||
|
|
|
@ -44,9 +44,7 @@ static int32_t mgmtRetrieveVgroups(SShowObj *pShow, char *data, int32_t rows, vo
|
|||
static void mgmtProcessCreateVnodeRsp(SRpcMsg *rpcMsg);
|
||||
static void mgmtProcessDropVnodeRsp(SRpcMsg *rpcMsg);
|
||||
static void mgmtProcessVnodeCfgMsg(SRpcMsg *rpcMsg) ;
|
||||
|
||||
static void mgmtSendDropVgroupMsg(SVgObj *pVgroup, void *ahandle);
|
||||
static void mgmtSendCreateVgroupMsg(SVgObj *pVgroup, void *ahandle);
|
||||
static void mgmtSendDropVgroupMsg(SVgObj *pVgroup, void *ahandle);
|
||||
|
||||
static int32_t mgmtVgroupActionDestroy(SSdbOper *pOper) {
|
||||
SVgObj *pVgroup = pOper->pObj;
|
||||
|
@ -68,7 +66,6 @@ static int32_t mgmtVgroupActionInsert(SSdbOper *pOper) {
|
|||
if (pDb == NULL) {
|
||||
return TSDB_CODE_INVALID_DB;
|
||||
}
|
||||
mgmtDecDbRef(pDb);
|
||||
|
||||
pVgroup->pDb = pDb;
|
||||
pVgroup->prev = NULL;
|
||||
|
@ -91,15 +88,13 @@ static int32_t mgmtVgroupActionInsert(SSdbOper *pOper) {
|
|||
for (int32_t i = 0; i < pVgroup->numOfVnodes; ++i) {
|
||||
SDnodeObj *pDnode = mgmtGetDnode(pVgroup->vnodeGid[i].dnodeId);
|
||||
if (pDnode != NULL) {
|
||||
pVgroup->vnodeGid[i].privateIp = pDnode->privateIp;
|
||||
pVgroup->vnodeGid[i].publicIp = pDnode->publicIp;
|
||||
pVgroup->vnodeGid[i].pDnode = pDnode;
|
||||
atomic_add_fetch_32(&pDnode->openVnodes, 1);
|
||||
mgmtReleaseDnode(pDnode);
|
||||
mgmtDecDnodeRef(pDnode);
|
||||
}
|
||||
}
|
||||
|
||||
mgmtAddVgroupIntoDb(pVgroup);
|
||||
mgmtIncDbRef(pVgroup->pDb);
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
@ -115,10 +110,10 @@ static int32_t mgmtVgroupActionDelete(SSdbOper *pOper) {
|
|||
|
||||
for (int32_t i = 0; i < pVgroup->numOfVnodes; ++i) {
|
||||
SDnodeObj *pDnode = mgmtGetDnode(pVgroup->vnodeGid[i].dnodeId);
|
||||
if (pDnode) {
|
||||
if (pDnode != NULL) {
|
||||
atomic_sub_fetch_32(&pDnode->openVnodes, 1);
|
||||
}
|
||||
mgmtReleaseDnode(pDnode);
|
||||
mgmtDecDnodeRef(pDnode);
|
||||
}
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
|
@ -127,9 +122,25 @@ static int32_t mgmtVgroupActionDelete(SSdbOper *pOper) {
|
|||
static int32_t mgmtVgroupActionUpdate(SSdbOper *pOper) {
|
||||
SVgObj *pNew = pOper->pObj;
|
||||
SVgObj *pVgroup = mgmtGetVgroup(pNew->vgId);
|
||||
|
||||
if (pVgroup != pNew) {
|
||||
for (int32_t i = 0; i < pVgroup->numOfVnodes; ++i) {
|
||||
SDnodeObj *pDnode = pVgroup->vnodeGid[i].pDnode;
|
||||
if (pDnode != NULL) {
|
||||
atomic_sub_fetch_32(&pDnode->openVnodes, 1);
|
||||
}
|
||||
}
|
||||
|
||||
memcpy(pVgroup, pNew, pOper->rowSize);
|
||||
free(pNew);
|
||||
|
||||
for (int32_t i = 0; i < pVgroup->numOfVnodes; ++i) {
|
||||
SDnodeObj *pDnode = mgmtGetDnode(pVgroup->vnodeGid[i].dnodeId);
|
||||
pVgroup->vnodeGid[i].pDnode = pDnode;
|
||||
if (pDnode != NULL) {
|
||||
atomic_add_fetch_32(&pDnode->openVnodes, 1);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
int32_t oldTables = taosIdPoolMaxSize(pVgroup->idPool);
|
||||
|
@ -150,6 +161,12 @@ static int32_t mgmtVgroupActionUpdate(SSdbOper *pOper) {
|
|||
static int32_t mgmtVgroupActionEncode(SSdbOper *pOper) {
|
||||
SVgObj *pVgroup = pOper->pObj;
|
||||
memcpy(pOper->rowData, pVgroup, tsVgUpdateSize);
|
||||
SVgObj *pTmpVgroup = pOper->rowData;
|
||||
for (int32_t i = 0; i < TSDB_VNODES_SUPPORT; ++i) {
|
||||
pTmpVgroup->vnodeGid[i].pDnode = NULL;
|
||||
pTmpVgroup->vnodeGid[i].role = 0;
|
||||
}
|
||||
|
||||
pOper->rowSize = tsVgUpdateSize;
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
@ -204,7 +221,11 @@ int32_t mgmtInitVgroups() {
|
|||
return 0;
|
||||
}
|
||||
|
||||
void mgmtReleaseVgroup(SVgObj *pVgroup) {
|
||||
void mgmtIncVgroupRef(SVgObj *pVgroup) {
|
||||
return sdbIncRef(tsVgroupSdb, pVgroup);
|
||||
}
|
||||
|
||||
void mgmtDecVgroupRef(SVgObj *pVgroup) {
|
||||
return sdbDecRef(tsVgroupSdb, pVgroup);
|
||||
}
|
||||
|
||||
|
@ -224,16 +245,38 @@ void mgmtUpdateVgroup(SVgObj *pVgroup) {
|
|||
mgmtSendCreateVgroupMsg(pVgroup, NULL);
|
||||
}
|
||||
|
||||
void mgmtUpdateVgroupStatus(SVgObj *pVgroup, int32_t dnodeId, SVnodeLoad *pVload) {
|
||||
if (pVload->role == TAOS_SYNC_ROLE_MASTER) {
|
||||
for (int32_t i = 0; i < pVgroup->numOfVnodes; ++i) {
|
||||
SVnodeGid *pVgid = &pVgroup->vnodeGid[i];
|
||||
if (pVgid->dnodeId == dnodeId) {
|
||||
void mgmtUpdateVgroupStatus(SVgObj *pVgroup, SDnodeObj *pDnode, SVnodeLoad *pVload) {
|
||||
bool dnodeExist = false;
|
||||
for (int32_t i = 0; i < pVgroup->numOfVnodes; ++i) {
|
||||
SVnodeGid *pVgid = &pVgroup->vnodeGid[i];
|
||||
if (pVgid->pDnode == pDnode) {
|
||||
pVgid->role = pVload->role;
|
||||
if (pVload->role == TAOS_SYNC_ROLE_MASTER) {
|
||||
pVgroup->inUse = i;
|
||||
break;
|
||||
}
|
||||
dnodeExist = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (!dnodeExist) {
|
||||
SRpcIpSet ipSet = mgmtGetIpSetFromIp(pDnode->privateIp);
|
||||
mError("vgroup:%d, dnode:%d not exist in mnode, drop it", pVload->vgId, pDnode->dnodeId);
|
||||
mgmtSendDropVnodeMsg(pVload->vgId, &ipSet, NULL);
|
||||
return;
|
||||
}
|
||||
|
||||
if (pVload->role == TAOS_SYNC_ROLE_MASTER) {
|
||||
pVgroup->totalStorage = htobe64(pVload->totalStorage);
|
||||
pVgroup->compStorage = htobe64(pVload->compStorage);
|
||||
pVgroup->pointsWritten = htobe64(pVload->pointsWritten);
|
||||
}
|
||||
|
||||
if (pVload->replica != pVgroup->numOfVnodes) {
|
||||
mError("dnode:%d, vgroup:%d replica:%d not match with mgmt:%d", pDnode->dnodeId, pVload->vgId, pVload->replica,
|
||||
pVgroup->numOfVnodes);
|
||||
mgmtSendCreateVgroupMsg(pVgroup, NULL);
|
||||
}
|
||||
}
|
||||
|
||||
SVgObj *mgmtGetAvailableVgroup(SDbObj *pDb) {
|
||||
|
@ -340,7 +383,7 @@ int32_t mgmtGetVgroupMeta(STableMetaMsg *pMeta, SShowObj *pShow, void *pConn) {
|
|||
mgmtDecTableRef(pTable);
|
||||
pVgroup = mgmtGetVgroup(((SChildTableObj*)pTable)->vgId);
|
||||
if (NULL == pVgroup) return TSDB_CODE_INVALID_TABLE_ID;
|
||||
mgmtReleaseVgroup(pVgroup);
|
||||
mgmtDecVgroupRef(pVgroup);
|
||||
maxReplica = pVgroup->numOfVnodes > maxReplica ? pVgroup->numOfVnodes : maxReplica;
|
||||
} else {
|
||||
SVgObj *pVgroup = pDb->pHead;
|
||||
|
@ -391,27 +434,6 @@ int32_t mgmtGetVgroupMeta(STableMetaMsg *pMeta, SShowObj *pShow, void *pConn) {
|
|||
return 0;
|
||||
}
|
||||
|
||||
char *mgmtGetVnodeStatus(SVgObj *pVgroup, SVnodeGid *pVnode) {
|
||||
SDnodeObj *pDnode = mgmtGetDnode(pVnode->dnodeId);
|
||||
if (pDnode == NULL) {
|
||||
mError("vgroup:%d, not exist in dnode:%d", pVgroup->vgId, pDnode->dnodeId);
|
||||
return "null";
|
||||
}
|
||||
mgmtReleaseDnode(pDnode);
|
||||
|
||||
if (pDnode->status == TAOS_DN_STATUS_OFFLINE) {
|
||||
return "offline";
|
||||
}
|
||||
|
||||
for (int i = 0; i < pDnode->openVnodes; ++i) {
|
||||
if (pDnode->vload[i].vgId == pVgroup->vgId) {
|
||||
return pDnode->vload[i].status ? "ready" : "offline";
|
||||
}
|
||||
}
|
||||
|
||||
return "null";
|
||||
}
|
||||
|
||||
int32_t mgmtRetrieveVgroups(SShowObj *pShow, char *data, int32_t rows, void *pConn) {
|
||||
int32_t numOfRows = 0;
|
||||
SVgObj *pVgroup = NULL;
|
||||
|
@ -453,19 +475,24 @@ int32_t mgmtRetrieveVgroups(SShowObj *pShow, char *data, int32_t rows, void *pCo
|
|||
*(int16_t *) pWrite = pVgroup->vnodeGid[i].dnodeId;
|
||||
cols++;
|
||||
|
||||
tinet_ntoa(ipstr, pVgroup->vnodeGid[i].privateIp);
|
||||
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
|
||||
strcpy(pWrite, ipstr);
|
||||
cols++;
|
||||
SDnodeObj *pDnode = pVgroup->vnodeGid[i].pDnode;
|
||||
|
||||
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
|
||||
if (pVgroup->vnodeGid[i].dnodeId != 0) {
|
||||
char *vnodeStatus = mgmtGetVnodeStatus(pVgroup, pVgroup->vnodeGid + i);
|
||||
strcpy(pWrite, vnodeStatus);
|
||||
if (pDnode != NULL) {
|
||||
tinet_ntoa(ipstr, pDnode->privateIp);
|
||||
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
|
||||
strcpy(pWrite, ipstr);
|
||||
cols++;
|
||||
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
|
||||
strcpy(pWrite, mgmtGetMnodeRoleStr(pVgroup->vnodeGid[i].role));
|
||||
cols++;
|
||||
} else {
|
||||
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
|
||||
strcpy(pWrite, "null");
|
||||
cols++;
|
||||
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
|
||||
strcpy(pWrite, "null");
|
||||
cols++;
|
||||
}
|
||||
cols++;
|
||||
}
|
||||
|
||||
numOfRows++;
|
||||
|
@ -506,27 +533,38 @@ SMDCreateVnodeMsg *mgmtBuildCreateVnodeMsg(SVgObj *pVgroup) {
|
|||
SMDCreateVnodeMsg *pVnode = rpcMallocCont(sizeof(SMDCreateVnodeMsg));
|
||||
if (pVnode == NULL) return NULL;
|
||||
|
||||
pVnode->cfg = pDb->cfg;
|
||||
SMDVnodeCfg *pCfg = &pVnode->cfg;
|
||||
pCfg->vgId = htonl(pVgroup->vgId);
|
||||
pCfg->maxTables = htonl(pDb->cfg.maxSessions);
|
||||
pCfg->maxCacheSize = htobe64((int64_t)pDb->cfg.cacheBlockSize * pDb->cfg.cacheNumOfBlocks.totalBlocks);
|
||||
pCfg->maxCacheSize = htobe64(-1);
|
||||
pCfg->minRowsPerFileBlock = htonl(-1);
|
||||
pCfg->maxRowsPerFileBlock = htonl(-1);
|
||||
pCfg->daysPerFile = htonl(pDb->cfg.daysPerFile);
|
||||
pCfg->daysToKeep1 = htonl(pDb->cfg.daysToKeep1);
|
||||
pCfg->daysToKeep2 = htonl(pDb->cfg.daysToKeep2);
|
||||
pCfg->daysToKeep = htonl(pDb->cfg.daysToKeep);
|
||||
pCfg->daysToKeep = htonl(-1);
|
||||
pCfg->commitTime = htonl(pDb->cfg.commitTime);
|
||||
pCfg->precision = pDb->cfg.precision;
|
||||
pCfg->compression = pDb->cfg.compression;
|
||||
pCfg->compression = -1;
|
||||
pCfg->wals = 3;
|
||||
pCfg->commitLog = pDb->cfg.commitLog;
|
||||
pCfg->replications = (int8_t) pVgroup->numOfVnodes;
|
||||
pCfg->quorum = 1;
|
||||
|
||||
SVnodeCfg *pCfg = &pVnode->cfg;
|
||||
pCfg->vgId = htonl(pVgroup->vgId);
|
||||
pCfg->maxSessions = htonl(pCfg->maxSessions);
|
||||
pCfg->cacheBlockSize = htonl(pCfg->cacheBlockSize);
|
||||
pCfg->cacheNumOfBlocks.totalBlocks = htonl(pCfg->cacheNumOfBlocks.totalBlocks);
|
||||
pCfg->daysPerFile = htonl(pCfg->daysPerFile);
|
||||
pCfg->daysToKeep1 = htonl(pCfg->daysToKeep1);
|
||||
pCfg->daysToKeep2 = htonl(pCfg->daysToKeep2);
|
||||
pCfg->daysToKeep = htonl(pCfg->daysToKeep);
|
||||
pCfg->commitTime = htonl(pCfg->commitTime);
|
||||
pCfg->rowsInFileBlock = htonl(pCfg->rowsInFileBlock);
|
||||
pCfg->blocksPerTable = htons(pCfg->blocksPerTable);
|
||||
pCfg->replications = (int8_t) pVgroup->numOfVnodes;
|
||||
|
||||
SVnodeDesc *vpeerDesc = pVnode->vpeerDesc;
|
||||
SMDVnodeDesc *pNodes = pVnode->nodes;
|
||||
for (int32_t j = 0; j < pVgroup->numOfVnodes; ++j) {
|
||||
vpeerDesc[j].vgId = htonl(pVgroup->vgId);
|
||||
vpeerDesc[j].dnodeId = htonl(pVgroup->vnodeGid[j].dnodeId);
|
||||
vpeerDesc[j].ip = htonl(pVgroup->vnodeGid[j].privateIp);
|
||||
SDnodeObj *pDnode = pVgroup->vnodeGid[j].pDnode;
|
||||
if (pDnode != NULL) {
|
||||
pNodes[j].nodeId = htonl(pDnode->dnodeId);
|
||||
pNodes[j].nodeIp = htonl(pDnode->privateIp);
|
||||
strcpy(pNodes[j].nodeName, pDnode->dnodeName);
|
||||
if (j == 0) {
|
||||
pCfg->arbitratorIp = htonl(pDnode->privateIp);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return pVnode;
|
||||
|
@ -539,7 +577,7 @@ SRpcIpSet mgmtGetIpSetFromVgroup(SVgObj *pVgroup) {
|
|||
.port = tsDnodeMnodePort
|
||||
};
|
||||
for (int i = 0; i < pVgroup->numOfVnodes; ++i) {
|
||||
ipSet.ip[i] = pVgroup->vnodeGid[i].privateIp;
|
||||
ipSet.ip[i] = pVgroup->vnodeGid[i].pDnode->privateIp;
|
||||
}
|
||||
return ipSet;
|
||||
}
|
||||
|
@ -570,7 +608,7 @@ void mgmtSendCreateVnodeMsg(SVgObj *pVgroup, SRpcIpSet *ipSet, void *ahandle) {
|
|||
void mgmtSendCreateVgroupMsg(SVgObj *pVgroup, void *ahandle) {
|
||||
mTrace("vgroup:%d, send create all vnodes msg, ahandle:%p", pVgroup->vgId, ahandle);
|
||||
for (int32_t i = 0; i < pVgroup->numOfVnodes; ++i) {
|
||||
SRpcIpSet ipSet = mgmtGetIpSetFromIp(pVgroup->vnodeGid[i].privateIp);
|
||||
SRpcIpSet ipSet = mgmtGetIpSetFromIp(pVgroup->vnodeGid[i].pDnode->privateIp);
|
||||
mgmtSendCreateVnodeMsg(pVgroup, &ipSet, ahandle);
|
||||
}
|
||||
}
|
||||
|
@ -636,7 +674,7 @@ void mgmtSendDropVnodeMsg(int32_t vgId, SRpcIpSet *ipSet, void *ahandle) {
|
|||
static void mgmtSendDropVgroupMsg(SVgObj *pVgroup, void *ahandle) {
|
||||
mTrace("vgroup:%d, send drop all vnodes msg, ahandle:%p", pVgroup->vgId, ahandle);
|
||||
for (int32_t i = 0; i < pVgroup->numOfVnodes; ++i) {
|
||||
SRpcIpSet ipSet = mgmtGetIpSetFromIp(pVgroup->vnodeGid[i].privateIp);
|
||||
SRpcIpSet ipSet = mgmtGetIpSetFromIp(pVgroup->vnodeGid[i].pDnode->privateIp);
|
||||
mgmtSendDropVnodeMsg(pVgroup->vgId, &ipSet, ahandle);
|
||||
}
|
||||
}
|
||||
|
@ -687,7 +725,7 @@ static void mgmtProcessVnodeCfgMsg(SRpcMsg *rpcMsg) {
|
|||
mgmtSendSimpleResp(rpcMsg->handle, TSDB_CODE_NOT_ACTIVE_VNODE);
|
||||
return;
|
||||
}
|
||||
mgmtReleaseDnode(pDnode);
|
||||
mgmtDecDnodeRef(pDnode);
|
||||
|
||||
SVgObj *pVgroup = mgmtGetVgroup(pCfg->vgId);
|
||||
if (pVgroup == NULL) {
|
||||
|
@ -695,7 +733,7 @@ static void mgmtProcessVnodeCfgMsg(SRpcMsg *rpcMsg) {
|
|||
mgmtSendSimpleResp(rpcMsg->handle, TSDB_CODE_NOT_ACTIVE_VNODE);
|
||||
return;
|
||||
}
|
||||
mgmtReleaseVgroup(pVgroup);
|
||||
mgmtDecVgroupRef(pVgroup);
|
||||
|
||||
mgmtSendSimpleResp(rpcMsg->handle, TSDB_CODE_SUCCESS);
|
||||
|
||||
|
@ -711,7 +749,7 @@ void mgmtDropAllVgroups(SDbObj *pDropDb) {
|
|||
SVgObj *pVgroup = NULL;
|
||||
|
||||
while (1) {
|
||||
mgmtReleaseVgroup(pVgroup);
|
||||
mgmtDecVgroupRef(pVgroup);
|
||||
pNode = sdbFetchRow(tsVgroupSdb, pNode, (void **)&pVgroup);
|
||||
if (pVgroup == NULL) break;
|
||||
|
||||
|
|
|
@ -397,6 +397,7 @@ void monitorSaveAcctLog(char *acctId, int64_t currentPointsPerSecond, int64_t ma
|
|||
int64_t totalOutbound, int64_t maxOutbound, int64_t totalDbs, int64_t maxDbs,
|
||||
int64_t totalUsers, int64_t maxUsers, int64_t totalStreams, int64_t maxStreams,
|
||||
int64_t totalConns, int64_t maxConns, int8_t accessState) {
|
||||
if (monitor == NULL) return;
|
||||
if (monitor->state != MONITOR_STATE_INITIALIZED) return;
|
||||
|
||||
char sql[1024] = {0};
|
||||
|
|
|
@ -11,7 +11,7 @@
|
|||
#define IS_VALID_COMPRESSION(compression) (((compression) >= NO_COMPRESSION) && ((compression) <= TWO_STAGE_COMP))
|
||||
#define TSDB_MIN_ID 0
|
||||
#define TSDB_MAX_ID INT_MAX
|
||||
#define TSDB_MIN_TABLES 10
|
||||
#define TSDB_MIN_TABLES 4
|
||||
#define TSDB_MAX_TABLES 100000
|
||||
#define TSDB_DEFAULT_TABLES 1000
|
||||
#define TSDB_DEFAULT_DAYS_PER_FILE 10
|
||||
|
|
|
@ -170,6 +170,8 @@ char *taosIpStr(uint32_t ipInt);
|
|||
|
||||
uint32_t ip2uint(const char *const ip_addr);
|
||||
|
||||
void taosRemoveDir(char *rootDir);
|
||||
|
||||
#define TAOS_ALLOC_MODE_DEFAULT 0
|
||||
#define TAOS_ALLOC_MODE_RANDOM_FAIL 1
|
||||
#define TAOS_ALLOC_MODE_DETECT_LEAK 2
|
||||
|
|
|
@ -663,3 +663,27 @@ void tzfree(void *ptr) {
|
|||
free((void *)((char *)ptr - sizeof(size_t)));
|
||||
}
|
||||
}
|
||||
|
||||
void taosRemoveDir(char *rootDir) {
|
||||
DIR *dir = opendir(rootDir);
|
||||
if (dir == NULL) return;
|
||||
|
||||
struct dirent *de = NULL;
|
||||
while ((de = readdir(dir)) != NULL) {
|
||||
if (strcmp(de->d_name, ".") == 0 || strcmp(de->d_name, "..") == 0) continue;
|
||||
|
||||
char filename[1024];
|
||||
snprintf(filename, 1023, "%s/%s", rootDir, de->d_name);
|
||||
if (de->d_type & DT_DIR) {
|
||||
taosRemoveDir(filename);
|
||||
} else {
|
||||
remove(filename);
|
||||
uPrint("file:%s is removed", filename);
|
||||
}
|
||||
}
|
||||
|
||||
closedir(dir);
|
||||
rmdir(rootDir);
|
||||
|
||||
uPrint("dir:%s is removed", rootDir);
|
||||
}
|
|
@ -9,6 +9,7 @@ IF ((TD_LINUX_64) OR (TD_LINUX_32 AND TD_ARM))
|
|||
INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/tsdb/inc)
|
||||
INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/common/inc)
|
||||
INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/dnode/inc)
|
||||
INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/deps/cJson/inc)
|
||||
INCLUDE_DIRECTORIES(${TD_ENTERPRISE_DIR}/src/inc)
|
||||
INCLUDE_DIRECTORIES(inc)
|
||||
AUX_SOURCE_DIRECTORY(src SRC)
|
||||
|
|
|
@ -18,10 +18,12 @@
|
|||
#include "ihash.h"
|
||||
#include "taoserror.h"
|
||||
#include "taosmsg.h"
|
||||
#include "tutil.h"
|
||||
#include "trpc.h"
|
||||
#include "tsdb.h"
|
||||
#include "ttime.h"
|
||||
#include "ttimer.h"
|
||||
#include "cJSON.h"
|
||||
#include "twal.h"
|
||||
#include "tglobal.h"
|
||||
#include "dnode.h"
|
||||
|
@ -36,6 +38,8 @@ static void vnodeBuildVloadMsg(char *pNode, void * param);
|
|||
static int vnodeWalCallback(void *arg);
|
||||
static int32_t vnodeSaveCfg(SMDCreateVnodeMsg *pVnodeCfg);
|
||||
static int32_t vnodeReadCfg(SVnodeObj *pVnode);
|
||||
static int32_t vnodeSaveVersion(SVnodeObj *pVnode);
|
||||
static int32_t vnodeReadVersion(SVnodeObj *pVnode);
|
||||
static int vnodeWalCallback(void *arg);
|
||||
static uint32_t vnodeGetFileInfo(void *ahandle, char *name, uint32_t *index, int32_t *size);
|
||||
static int vnodeGetWalInfo(void *ahandle, char *name, uint32_t *index);
|
||||
|
@ -93,21 +97,21 @@ int32_t vnodeCreate(SMDCreateVnodeMsg *pVnodeCfg) {
|
|||
|
||||
STsdbCfg tsdbCfg = {0};
|
||||
tsdbCfg.precision = pVnodeCfg->cfg.precision;
|
||||
tsdbCfg.compression = -1;
|
||||
tsdbCfg.compression = pVnodeCfg->cfg.compression;;
|
||||
tsdbCfg.tsdbId = pVnodeCfg->cfg.vgId;
|
||||
tsdbCfg.maxTables = pVnodeCfg->cfg.maxSessions;
|
||||
tsdbCfg.maxTables = pVnodeCfg->cfg.maxTables;
|
||||
tsdbCfg.daysPerFile = pVnodeCfg->cfg.daysPerFile;
|
||||
tsdbCfg.minRowsPerFileBlock = -1;
|
||||
tsdbCfg.maxRowsPerFileBlock = -1;
|
||||
tsdbCfg.keep = -1;
|
||||
tsdbCfg.maxCacheSize = -1;
|
||||
tsdbCfg.minRowsPerFileBlock = pVnodeCfg->cfg.minRowsPerFileBlock;
|
||||
tsdbCfg.maxRowsPerFileBlock = pVnodeCfg->cfg.maxRowsPerFileBlock;
|
||||
tsdbCfg.keep = pVnodeCfg->cfg.daysToKeep;
|
||||
tsdbCfg.maxCacheSize = pVnodeCfg->cfg.maxCacheSize;
|
||||
|
||||
char tsdbDir[TSDB_FILENAME_LEN] = {0};
|
||||
sprintf(tsdbDir, "%s/vnode%d/tsdb", tsVnodeDir, pVnodeCfg->cfg.vgId);
|
||||
code = tsdbCreateRepo(tsdbDir, &tsdbCfg, NULL);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
dError("vgId:%d, failed to create tsdb in vnode, reason:%s", pVnodeCfg->cfg.vgId, tstrerror(terrno));
|
||||
return terrno;
|
||||
dError("vgId:%d, failed to create tsdb in vnode, reason:%s", pVnodeCfg->cfg.vgId, tstrerror(code));
|
||||
return TSDB_CODE_VG_INIT_FAILED;
|
||||
}
|
||||
|
||||
dPrint("vgId:%d, vnode is created, clog:%d", pVnodeCfg->cfg.vgId, pVnodeCfg->cfg.commitLog);
|
||||
|
@ -131,6 +135,39 @@ int32_t vnodeDrop(int32_t vgId) {
|
|||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
int32_t vnodeAlter(void *param, SMDCreateVnodeMsg *pVnodeCfg) {
|
||||
SVnodeObj *pVnode = param;
|
||||
int32_t code = vnodeSaveCfg(pVnodeCfg);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
dError("vgId:%d, failed to save vnode cfg, reason:%s", pVnodeCfg->cfg.vgId, tstrerror(code));
|
||||
return code;
|
||||
}
|
||||
|
||||
code = vnodeReadCfg(pVnode);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
dError("pVnode:%p vgId:%d, failed to read cfg file", pVnode, pVnode->vgId);
|
||||
taosDeleteIntHash(tsDnodeVnodesHash, pVnode->vgId);
|
||||
return code;
|
||||
}
|
||||
|
||||
code = syncReconfig(pVnode->sync, &pVnode->syncCfg);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
dTrace("pVnode:%p vgId:%d, failed to alter vnode, canot reconfig sync, result:%s", pVnode, pVnode->vgId,
|
||||
tstrerror(code));
|
||||
return code;
|
||||
}
|
||||
|
||||
code = tsdbConfigRepo(pVnode->tsdb, &pVnode->tsdbCfg);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
dTrace("pVnode:%p vgId:%d, failed to alter vnode, canot reconfig tsdb, result:%s", pVnode, pVnode->vgId,
|
||||
tstrerror(code));
|
||||
return code;
|
||||
}
|
||||
|
||||
dTrace("pVnode:%p vgId:%d, vnode is altered", pVnode, pVnode->vgId);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
int32_t vnodeOpen(int32_t vnode, char *rootDir) {
|
||||
char temp[TSDB_FILENAME_LEN];
|
||||
pthread_once(&vnodeModuleInit, vnodeInit);
|
||||
|
@ -149,11 +186,13 @@ int32_t vnodeOpen(int32_t vnode, char *rootDir) {
|
|||
return code;
|
||||
}
|
||||
|
||||
vnodeReadVersion(pVnode);
|
||||
|
||||
pVnode->wqueue = dnodeAllocateWqueue(pVnode);
|
||||
pVnode->rqueue = dnodeAllocateRqueue(pVnode);
|
||||
|
||||
sprintf(temp, "%s/wal", rootDir);
|
||||
pVnode->wal = walOpen(temp, &pVnode->walCfg);
|
||||
pVnode->wal = walOpen(temp, &pVnode->walCfg);
|
||||
|
||||
SSyncInfo syncInfo;
|
||||
syncInfo.vgId = pVnode->vgId;
|
||||
|
@ -166,10 +205,10 @@ int32_t vnodeOpen(int32_t vnode, char *rootDir) {
|
|||
syncInfo.writeToCache = vnodeWriteToQueue;
|
||||
syncInfo.confirmForward = dnodeSendRpcWriteRsp;
|
||||
syncInfo.notifyRole = vnodeNotifyRole;
|
||||
pVnode->sync = syncStart(&syncInfo);
|
||||
pVnode->sync = syncStart(&syncInfo);
|
||||
|
||||
pVnode->events = NULL;
|
||||
pVnode->cq = NULL;
|
||||
pVnode->events = NULL;
|
||||
pVnode->cq = NULL;
|
||||
|
||||
STsdbAppH appH = {0};
|
||||
appH.appH = (void *)pVnode;
|
||||
|
@ -227,7 +266,9 @@ void vnodeRelease(void *pVnodeRaw) {
|
|||
pVnode->wqueue = NULL;
|
||||
|
||||
if (pVnode->status == TAOS_VN_STATUS_DELETING) {
|
||||
// remove the whole directory
|
||||
char rootDir[TSDB_FILENAME_LEN] = {0};
|
||||
sprintf(rootDir, "%s/vnode%d", tsVnodeDir, vgId);
|
||||
taosRemoveDir(rootDir);
|
||||
}
|
||||
|
||||
free(pVnode);
|
||||
|
@ -246,7 +287,8 @@ void *vnodeGetVnode(int32_t vgId) {
|
|||
SVnodeObj **ppVnode = (SVnodeObj **)taosGetIntHashData(tsDnodeVnodesHash, vgId);
|
||||
if (ppVnode == NULL || *ppVnode == NULL) {
|
||||
terrno = TSDB_CODE_INVALID_VGROUP_ID;
|
||||
assert(false);
|
||||
dError("vgId:%d not exist");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return *ppVnode;
|
||||
|
@ -292,6 +334,7 @@ static void vnodeBuildVloadMsg(char *pNode, void * param) {
|
|||
pLoad->vgId = htonl(pVnode->vgId);
|
||||
pLoad->status = pVnode->status;
|
||||
pLoad->role = pVnode->role;
|
||||
pLoad->replica = pVnode->syncCfg.replica;
|
||||
}
|
||||
|
||||
static void vnodeCleanUp(SVnodeObj *pVnode) {
|
||||
|
@ -301,6 +344,7 @@ static void vnodeCleanUp(SVnodeObj *pVnode) {
|
|||
//syncStop(pVnode->sync);
|
||||
tsdbCloseRepo(pVnode->tsdb);
|
||||
walClose(pVnode->wal);
|
||||
vnodeSaveVersion(pVnode);
|
||||
|
||||
vnodeRelease(pVnode);
|
||||
}
|
||||
|
@ -328,88 +372,306 @@ static void vnodeNotifyRole(void *ahandle, int8_t role) {
|
|||
}
|
||||
|
||||
static int32_t vnodeSaveCfg(SMDCreateVnodeMsg *pVnodeCfg) {
|
||||
char cfgFile[TSDB_FILENAME_LEN * 2] = {0};
|
||||
sprintf(cfgFile, "%s/vnode%d/config", tsVnodeDir, pVnodeCfg->cfg.vgId);
|
||||
|
||||
char cfgFile[TSDB_FILENAME_LEN + 30] = {0};
|
||||
sprintf(cfgFile, "%s/vnode%d/config.json", tsVnodeDir, pVnodeCfg->cfg.vgId);
|
||||
FILE *fp = fopen(cfgFile, "w");
|
||||
if (!fp) return errno;
|
||||
|
||||
fprintf(fp, "commitLog %d\n", pVnodeCfg->cfg.commitLog);
|
||||
fprintf(fp, "wals %d\n", 3);
|
||||
fprintf(fp, "arbitratorIp %d\n", pVnodeCfg->vpeerDesc[0].ip);
|
||||
fprintf(fp, "quorum %d\n", 1);
|
||||
fprintf(fp, "replica %d\n", pVnodeCfg->cfg.replications);
|
||||
for (int32_t i = 0; i < pVnodeCfg->cfg.replications; i++) {
|
||||
fprintf(fp, "index%d nodeId %d nodeIp %u name n%d\n", i, pVnodeCfg->vpeerDesc[i].dnodeId, pVnodeCfg->vpeerDesc[i].ip, pVnodeCfg->vpeerDesc[i].dnodeId);
|
||||
if (!fp) {
|
||||
dError("vgId:%d, failed to open vnode cfg file for write, error:%s", pVnodeCfg->cfg.vgId, strerror(errno));
|
||||
return errno;
|
||||
}
|
||||
|
||||
fclose(fp);
|
||||
dTrace("vgId:%d, save vnode cfg successed", pVnodeCfg->cfg.vgId);
|
||||
char ipStr[20];
|
||||
int32_t len = 0;
|
||||
int32_t maxLen = 1000;
|
||||
char * content = calloc(1, maxLen + 1);
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
len += snprintf(content + len, maxLen - len, "{\n");
|
||||
|
||||
len += snprintf(content + len, maxLen - len, " \"precision\": %d,\n", pVnodeCfg->cfg.precision);
|
||||
len += snprintf(content + len, maxLen - len, " \"compression\": %d,\n", pVnodeCfg->cfg.compression);
|
||||
len += snprintf(content + len, maxLen - len, " \"maxTables\": %d,\n", pVnodeCfg->cfg.maxTables);
|
||||
len += snprintf(content + len, maxLen - len, " \"daysPerFile\": %d,\n", pVnodeCfg->cfg.daysPerFile);
|
||||
len += snprintf(content + len, maxLen - len, " \"minRowsPerFileBlock\": %d,\n", pVnodeCfg->cfg.minRowsPerFileBlock);
|
||||
len += snprintf(content + len, maxLen - len, " \"maxRowsPerFileBlock\": %d,\n", pVnodeCfg->cfg.maxRowsPerFileBlock);
|
||||
len += snprintf(content + len, maxLen - len, " \"daysToKeep\": %d,\n", pVnodeCfg->cfg.daysToKeep);
|
||||
|
||||
len += snprintf(content + len, maxLen - len, " \"maxCacheSize\": %" PRId64 ",\n", pVnodeCfg->cfg.maxCacheSize);
|
||||
|
||||
len += snprintf(content + len, maxLen - len, " \"commitLog\": %d,\n", pVnodeCfg->cfg.commitLog);
|
||||
len += snprintf(content + len, maxLen - len, " \"wals\": %d,\n", pVnodeCfg->cfg.wals);
|
||||
|
||||
uint32_t ipInt = pVnodeCfg->cfg.arbitratorIp;
|
||||
sprintf(ipStr, "%u.%u.%u.%u", ipInt & 0xFF, (ipInt >> 8) & 0xFF, (ipInt >> 16) & 0xFF, (uint8_t)(ipInt >> 24));
|
||||
len += snprintf(content + len, maxLen - len, " \"arbitratorIp\": \"%s\",\n", ipStr);
|
||||
|
||||
len += snprintf(content + len, maxLen - len, " \"quorum\": %d,\n", pVnodeCfg->cfg.quorum);
|
||||
len += snprintf(content + len, maxLen - len, " \"replica\": %d,\n", pVnodeCfg->cfg.replications);
|
||||
|
||||
len += snprintf(content + len, maxLen - len, " \"nodeInfos\": [{\n");
|
||||
for (int32_t i = 0; i < pVnodeCfg->cfg.replications; i++) {
|
||||
len += snprintf(content + len, maxLen - len, " \"nodeId\": %d,\n", pVnodeCfg->nodes[i].nodeId);
|
||||
|
||||
uint32_t ipInt = pVnodeCfg->nodes[i].nodeIp;
|
||||
sprintf(ipStr, "%u.%u.%u.%u", ipInt & 0xFF, (ipInt >> 8) & 0xFF, (ipInt >> 16) & 0xFF, (uint8_t)(ipInt >> 24));
|
||||
len += snprintf(content + len, maxLen - len, " \"nodeIp\": \"%s\",\n", ipStr);
|
||||
|
||||
len += snprintf(content + len, maxLen - len, " \"nodeName\": \"%s\"\n", pVnodeCfg->nodes[i].nodeName);
|
||||
|
||||
if (i < pVnodeCfg->cfg.replications - 1) {
|
||||
len += snprintf(content + len, maxLen - len, " },{\n");
|
||||
} else {
|
||||
len += snprintf(content + len, maxLen - len, " }]\n");
|
||||
}
|
||||
}
|
||||
len += snprintf(content + len, maxLen - len, "}\n");
|
||||
|
||||
fwrite(content, 1, len, fp);
|
||||
fclose(fp);
|
||||
free(content);
|
||||
|
||||
dPrint("vgId:%d, save vnode cfg successed", pVnodeCfg->cfg.vgId);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
// TODO: this is a simple implement
|
||||
static int32_t vnodeReadCfg(SVnodeObj *pVnode) {
|
||||
char option[5][16] = {0};
|
||||
char cfgFile[TSDB_FILENAME_LEN * 2] = {0};
|
||||
sprintf(cfgFile, "%s/vnode%d/config", tsVnodeDir, pVnode->vgId);
|
||||
|
||||
char cfgFile[TSDB_FILENAME_LEN + 30] = {0};
|
||||
sprintf(cfgFile, "%s/vnode%d/config.json", tsVnodeDir, pVnode->vgId);
|
||||
FILE *fp = fopen(cfgFile, "r");
|
||||
if (!fp) return errno;
|
||||
if (!fp) {
|
||||
dError("pVnode:%p vgId:%d, failed to open vnode cfg file for read, error:%s", pVnode, pVnode->vgId, strerror(errno));
|
||||
return errno;
|
||||
}
|
||||
|
||||
int32_t commitLog = -1;
|
||||
int32_t num = fscanf(fp, "%s %d", option[0], &commitLog);
|
||||
if (num != 2) return TSDB_CODE_INVALID_FILE_FORMAT;
|
||||
if (strcmp(option[0], "commitLog") != 0) return TSDB_CODE_INVALID_FILE_FORMAT;
|
||||
if (commitLog == -1) return TSDB_CODE_INVALID_FILE_FORMAT;
|
||||
pVnode->walCfg.commitLog = (int8_t)commitLog;
|
||||
int ret = TSDB_CODE_OTHERS;
|
||||
int maxLen = 1000;
|
||||
char *content = calloc(1, maxLen + 1);
|
||||
int len = fread(content, 1, maxLen, fp);
|
||||
if (len <= 0) {
|
||||
free(content);
|
||||
fclose(fp);
|
||||
dError("pVnode:%p vgId:%d, failed to read vnode cfg, content is null", pVnode, pVnode->vgId);
|
||||
return false;
|
||||
}
|
||||
|
||||
int32_t wals = -1;
|
||||
num = fscanf(fp, "%s %d", option[0], &wals);
|
||||
if (num != 2) return TSDB_CODE_INVALID_FILE_FORMAT;
|
||||
if (strcmp(option[0], "wals") != 0) return TSDB_CODE_INVALID_FILE_FORMAT;
|
||||
if (wals == -1) return TSDB_CODE_INVALID_FILE_FORMAT;
|
||||
pVnode->walCfg.wals = (int8_t)wals;
|
||||
cJSON *root = cJSON_Parse(content);
|
||||
if (root == NULL) {
|
||||
dError("pVnode:%p vgId:%d, failed to read vnode cfg, invalid json format", pVnode, pVnode->vgId);
|
||||
goto PARSE_OVER;
|
||||
}
|
||||
|
||||
cJSON *precision = cJSON_GetObjectItem(root, "precision");
|
||||
if (!precision || precision->type != cJSON_Number) {
|
||||
dError("pVnode:%p vgId:%d, failed to read vnode cfg, precision not found", pVnode, pVnode->vgId);
|
||||
goto PARSE_OVER;
|
||||
}
|
||||
pVnode->tsdbCfg.precision = (int8_t)precision->valueint;
|
||||
|
||||
cJSON *compression = cJSON_GetObjectItem(root, "compression");
|
||||
if (!compression || compression->type != cJSON_Number) {
|
||||
dError("pVnode:%p vgId:%d, failed to read vnode cfg, compression not found", pVnode, pVnode->vgId);
|
||||
goto PARSE_OVER;
|
||||
}
|
||||
pVnode->tsdbCfg.compression = (int8_t)compression->valueint;
|
||||
|
||||
cJSON *maxTables = cJSON_GetObjectItem(root, "maxTables");
|
||||
if (!maxTables || maxTables->type != cJSON_Number) {
|
||||
dError("pVnode:%p vgId:%d, failed to read vnode cfg, maxTables not found", pVnode, pVnode->vgId);
|
||||
goto PARSE_OVER;
|
||||
}
|
||||
pVnode->tsdbCfg.maxTables = maxTables->valueint;
|
||||
|
||||
cJSON *daysPerFile = cJSON_GetObjectItem(root, "daysPerFile");
|
||||
if (!daysPerFile || daysPerFile->type != cJSON_Number) {
|
||||
dError("pVnode:%p vgId:%d, failed to read vnode cfg, daysPerFile not found", pVnode, pVnode->vgId);
|
||||
goto PARSE_OVER;
|
||||
}
|
||||
pVnode->tsdbCfg.daysPerFile = daysPerFile->valueint;
|
||||
|
||||
cJSON *minRowsPerFileBlock = cJSON_GetObjectItem(root, "minRowsPerFileBlock");
|
||||
if (!minRowsPerFileBlock || minRowsPerFileBlock->type != cJSON_Number) {
|
||||
dError("pVnode:%p vgId:%d, failed to read vnode cfg, minRowsPerFileBlock not found", pVnode, pVnode->vgId);
|
||||
goto PARSE_OVER;
|
||||
}
|
||||
pVnode->tsdbCfg.minRowsPerFileBlock = minRowsPerFileBlock->valueint;
|
||||
|
||||
cJSON *maxRowsPerFileBlock = cJSON_GetObjectItem(root, "maxRowsPerFileBlock");
|
||||
if (!maxRowsPerFileBlock || maxRowsPerFileBlock->type != cJSON_Number) {
|
||||
dError("pVnode:%p vgId:%d, failed to read vnode cfg, maxRowsPerFileBlock not found", pVnode, pVnode->vgId);
|
||||
goto PARSE_OVER;
|
||||
}
|
||||
pVnode->tsdbCfg.maxRowsPerFileBlock = maxRowsPerFileBlock->valueint;
|
||||
|
||||
cJSON *daysToKeep = cJSON_GetObjectItem(root, "daysToKeep");
|
||||
if (!daysToKeep || daysToKeep->type != cJSON_Number) {
|
||||
dError("pVnode:%p vgId:%d, failed to read vnode cfg, daysToKeep not found", pVnode, pVnode->vgId);
|
||||
goto PARSE_OVER;
|
||||
}
|
||||
pVnode->tsdbCfg.keep = daysToKeep->valueint;
|
||||
|
||||
cJSON *maxCacheSize = cJSON_GetObjectItem(root, "maxCacheSize");
|
||||
if (!maxCacheSize || maxCacheSize->type != cJSON_Number) {
|
||||
dError("pVnode:%p vgId:%d, failed to read vnode cfg, maxCacheSize not found", pVnode, pVnode->vgId);
|
||||
goto PARSE_OVER;
|
||||
}
|
||||
pVnode->tsdbCfg.maxCacheSize = maxCacheSize->valueint;
|
||||
|
||||
cJSON *commitLog = cJSON_GetObjectItem(root, "commitLog");
|
||||
if (!commitLog || commitLog->type != cJSON_Number) {
|
||||
dError("pVnode:%p vgId:%d, failed to read vnode cfg, commitLog not found", pVnode, pVnode->vgId);
|
||||
goto PARSE_OVER;
|
||||
}
|
||||
pVnode->walCfg.commitLog = (int8_t)commitLog->valueint;
|
||||
|
||||
cJSON *wals = cJSON_GetObjectItem(root, "wals");
|
||||
if (!wals || wals->type != cJSON_Number) {
|
||||
dError("pVnode:%p vgId:%d, failed to read vnode cfg, wals not found", pVnode, pVnode->vgId);
|
||||
goto PARSE_OVER;
|
||||
}
|
||||
pVnode->walCfg.wals = (int8_t)wals->valueint;
|
||||
pVnode->walCfg.keep = 0;
|
||||
|
||||
int32_t arbitratorIp = -1;
|
||||
num = fscanf(fp, "%s %u", option[0], &arbitratorIp);
|
||||
if (num != 2) return TSDB_CODE_INVALID_FILE_FORMAT;
|
||||
if (strcmp(option[0], "arbitratorIp") != 0) return TSDB_CODE_INVALID_FILE_FORMAT;
|
||||
if (arbitratorIp == -1) return TSDB_CODE_INVALID_FILE_FORMAT;
|
||||
pVnode->syncCfg.arbitratorIp = arbitratorIp;
|
||||
cJSON *arbitratorIp = cJSON_GetObjectItem(root, "arbitratorIp");
|
||||
if (!arbitratorIp || arbitratorIp->type != cJSON_String || arbitratorIp->valuestring == NULL) {
|
||||
dError("pVnode:%p vgId:%d, failed to read vnode cfg, arbitratorIp not found", pVnode, pVnode->vgId);
|
||||
goto PARSE_OVER;
|
||||
}
|
||||
pVnode->syncCfg.arbitratorIp = inet_addr(arbitratorIp->valuestring);
|
||||
|
||||
int32_t quorum = -1;
|
||||
num = fscanf(fp, "%s %d", option[0], &quorum);
|
||||
if (num != 2) return TSDB_CODE_INVALID_FILE_FORMAT;
|
||||
if (strcmp(option[0], "quorum") != 0) return TSDB_CODE_INVALID_FILE_FORMAT;
|
||||
if (quorum == -1) return TSDB_CODE_INVALID_FILE_FORMAT;
|
||||
pVnode->syncCfg.quorum = (int8_t)quorum;
|
||||
cJSON *quorum = cJSON_GetObjectItem(root, "quorum");
|
||||
if (!quorum || quorum->type != cJSON_Number) {
|
||||
dError("failed to read vnode cfg, quorum not found", pVnode, pVnode->vgId);
|
||||
goto PARSE_OVER;
|
||||
}
|
||||
pVnode->syncCfg.quorum = (int8_t)quorum->valueint;
|
||||
|
||||
int32_t replica = -1;
|
||||
num = fscanf(fp, "%s %d", option[0], &replica);
|
||||
if (num != 2) return TSDB_CODE_INVALID_FILE_FORMAT;
|
||||
if (strcmp(option[0], "replica") != 0) return TSDB_CODE_INVALID_FILE_FORMAT;
|
||||
if (replica == -1) return TSDB_CODE_INVALID_FILE_FORMAT;
|
||||
pVnode->syncCfg.replica = (int8_t)replica;
|
||||
cJSON *replica = cJSON_GetObjectItem(root, "replica");
|
||||
if (!replica || replica->type != cJSON_Number) {
|
||||
dError("pVnode:%p vgId:%d, failed to read vnode cfg, replica not found", pVnode, pVnode->vgId);
|
||||
goto PARSE_OVER;
|
||||
}
|
||||
pVnode->syncCfg.replica = (int8_t)replica->valueint;
|
||||
|
||||
for (int32_t i = 0; i < replica; ++i) {
|
||||
int32_t dnodeId = -1;
|
||||
uint32_t dnodeIp = -1;
|
||||
num = fscanf(fp, "%s %s %d %s %u %s %s", option[0], option[1], &dnodeId, option[2], &dnodeIp, option[3], pVnode->syncCfg.nodeInfo[i].name);
|
||||
if (num != 7) return TSDB_CODE_INVALID_FILE_FORMAT;
|
||||
if (strcmp(option[1], "nodeId") != 0) return TSDB_CODE_INVALID_FILE_FORMAT;
|
||||
if (strcmp(option[2], "nodeIp") != 0) return TSDB_CODE_INVALID_FILE_FORMAT;
|
||||
if (strcmp(option[3], "name") != 0) return TSDB_CODE_INVALID_FILE_FORMAT;
|
||||
if (dnodeId == -1) return TSDB_CODE_INVALID_FILE_FORMAT;
|
||||
if (dnodeIp == -1) return TSDB_CODE_INVALID_FILE_FORMAT;
|
||||
pVnode->syncCfg.nodeInfo[i].nodeId = dnodeId;
|
||||
pVnode->syncCfg.nodeInfo[i].nodeIp = dnodeIp;
|
||||
cJSON *nodeInfos = cJSON_GetObjectItem(root, "nodeInfos");
|
||||
if (!nodeInfos || nodeInfos->type != cJSON_Array) {
|
||||
dError("pVnode:%p vgId:%d, failed to read vnode cfg, nodeInfos not found", pVnode, pVnode->vgId);
|
||||
goto PARSE_OVER;
|
||||
}
|
||||
|
||||
fclose(fp);
|
||||
dTrace("pVnode:%p vgId:%d, read vnode cfg successed", pVnode, pVnode->vgId);
|
||||
int size = cJSON_GetArraySize(nodeInfos);
|
||||
if (size != pVnode->syncCfg.replica) {
|
||||
dError("pVnode:%p vgId:%d, failed to read vnode cfg, nodeInfos size not matched", pVnode, pVnode->vgId);
|
||||
goto PARSE_OVER;
|
||||
}
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
for (int i = 0; i < size; ++i) {
|
||||
cJSON *nodeInfo = cJSON_GetArrayItem(nodeInfos, i);
|
||||
if (nodeInfo == NULL) continue;
|
||||
|
||||
cJSON *nodeId = cJSON_GetObjectItem(nodeInfo, "nodeId");
|
||||
if (!nodeId || nodeId->type != cJSON_Number) {
|
||||
dError("pVnode:%p vgId:%d, failed to read vnode cfg, nodeId not found", pVnode, pVnode->vgId);
|
||||
goto PARSE_OVER;
|
||||
}
|
||||
pVnode->syncCfg.nodeInfo[i].nodeId = nodeId->valueint;
|
||||
|
||||
cJSON *nodeIp = cJSON_GetObjectItem(nodeInfo, "nodeIp");
|
||||
if (!nodeIp || nodeIp->type != cJSON_String || nodeIp->valuestring == NULL) {
|
||||
dError("pVnode:%p vgId:%d, failed to read vnode cfg, nodeIp not found", pVnode, pVnode->vgId);
|
||||
goto PARSE_OVER;
|
||||
}
|
||||
pVnode->syncCfg.nodeInfo[i].nodeIp = inet_addr(nodeIp->valuestring);
|
||||
|
||||
cJSON *nodeName = cJSON_GetObjectItem(nodeInfo, "nodeName");
|
||||
if (!nodeName || nodeName->type != cJSON_String || nodeName->valuestring == NULL) {
|
||||
dError("pVnode:%p vgId:%d, failed to read vnode cfg, nodeName not found", pVnode, pVnode->vgId);
|
||||
goto PARSE_OVER;
|
||||
}
|
||||
strncpy(pVnode->syncCfg.nodeInfo[i].name, nodeName->valuestring, TSDB_NODE_NAME_LEN);
|
||||
}
|
||||
|
||||
ret = 0;
|
||||
|
||||
dPrint("pVnode:%p vgId:%d, read vnode cfg successed, replcia:%d", pVnode, pVnode->vgId, pVnode->syncCfg.replica);
|
||||
for (int32_t i = 0; i < pVnode->syncCfg.replica; i++) {
|
||||
dPrint("pVnode:%p vgId:%d, dnode:%d, ip:%s name:%s", pVnode, pVnode->vgId, pVnode->syncCfg.nodeInfo[i].nodeId,
|
||||
taosIpStr(pVnode->syncCfg.nodeInfo[i].nodeIp), pVnode->syncCfg.nodeInfo[i].name);
|
||||
}
|
||||
|
||||
PARSE_OVER:
|
||||
free(content);
|
||||
cJSON_Delete(root);
|
||||
fclose(fp);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
static int32_t vnodeSaveVersion(SVnodeObj *pVnode) {
|
||||
char versionFile[TSDB_FILENAME_LEN + 30] = {0};
|
||||
sprintf(versionFile, "%s/vnode%d/version.json", tsVnodeDir, pVnode->vgId);
|
||||
FILE *fp = fopen(versionFile, "w");
|
||||
if (!fp) {
|
||||
dError("pVnode:%p vgId:%d, failed to open vnode version file for write, error:%s", pVnode, pVnode->vgId);
|
||||
return errno;
|
||||
}
|
||||
|
||||
int32_t len = 0;
|
||||
int32_t maxLen = 30;
|
||||
char * content = calloc(1, maxLen + 1);
|
||||
|
||||
len += snprintf(content + len, maxLen - len, "{\n");
|
||||
len += snprintf(content + len, maxLen - len, " \"version\": %" PRId64 "\n", pVnode->version);
|
||||
len += snprintf(content + len, maxLen - len, "}\n");
|
||||
|
||||
fwrite(content, 1, len, fp);
|
||||
fclose(fp);
|
||||
free(content);
|
||||
|
||||
dPrint("pVnode:%p vgId:%d, save vnode version successed", pVnode, pVnode->vgId);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int32_t vnodeReadVersion(SVnodeObj *pVnode) {
|
||||
char versionFile[TSDB_FILENAME_LEN + 30] = {0};
|
||||
sprintf(versionFile, "%s/vnode%d/version.json", tsVnodeDir, pVnode->vgId);
|
||||
FILE *fp = fopen(versionFile, "w");
|
||||
if (!fp) {
|
||||
dError("pVnode:%p vgId:%d, failed to open vnode version file for write, error:%s", pVnode, pVnode->vgId);
|
||||
return errno;
|
||||
}
|
||||
|
||||
int ret = TSDB_CODE_OTHERS;
|
||||
int maxLen = 100;
|
||||
char *content = calloc(1, maxLen + 1);
|
||||
int len = fread(content, 1, maxLen, fp);
|
||||
if (len <= 0) {
|
||||
free(content);
|
||||
fclose(fp);
|
||||
dError("pVnode:%p vgId:%d, failed to read vnode version, content is null", pVnode, pVnode->vgId);
|
||||
return false;
|
||||
}
|
||||
|
||||
cJSON *root = cJSON_Parse(content);
|
||||
if (root == NULL) {
|
||||
dError("pVnode:%p vgId:%d, failed to read vnode version, invalid json format", pVnode, pVnode->vgId);
|
||||
goto PARSE_OVER;
|
||||
}
|
||||
|
||||
cJSON *version = cJSON_GetObjectItem(root, "version");
|
||||
if (!version || version->type != cJSON_Number) {
|
||||
dError("pVnode:%p vgId:%d, failed to read vnode version, version not found", pVnode, pVnode->vgId);
|
||||
goto PARSE_OVER;
|
||||
}
|
||||
pVnode->version = version->valueint;
|
||||
|
||||
ret = 0;
|
||||
|
||||
dPrint("pVnode:%p vgId:%d, read vnode version successed, version:%%" PRId64, pVnode, pVnode->vgId, pVnode->version);
|
||||
|
||||
PARSE_OVER:
|
||||
free(content);
|
||||
cJSON_Delete(root);
|
||||
fclose(fp);
|
||||
return ret;
|
||||
}
|
|
@ -105,7 +105,7 @@ echo "numOfThreadsPerCore 2.0" >> $TAOS_CFG
|
|||
echo "defaultPass taosdata" >> $TAOS_CFG
|
||||
echo "numOfLogLines 100000000" >> $TAOS_CFG
|
||||
echo "mgmtEqualVnodeNum 0" >> $TAOS_CFG
|
||||
echo "clog 0" >> $TAOS_CFG
|
||||
echo "clog 2" >> $TAOS_CFG
|
||||
echo "statusInterval 1" >> $TAOS_CFG
|
||||
echo "numOfTotalVnodes 4" >> $TAOS_CFG
|
||||
echo "asyncLog 0" >> $TAOS_CFG
|
||||
|
|
|
@ -34,6 +34,8 @@ cd .
|
|||
sh/ip.sh -i 1 -s up > /dev/null 2>&1 &
|
||||
sh/ip.sh -i 2 -s up > /dev/null 2>&1 &
|
||||
sh/ip.sh -i 3 -s up > /dev/null 2>&1 &
|
||||
sh/ip.sh -i 4 -s up > /dev/null 2>&1 &
|
||||
sh/ip.sh -i 5 -s up > /dev/null 2>&1 &
|
||||
|
||||
# Get responsible directories
|
||||
CODE_DIR=`dirname $0`
|
||||
|
|
|
@ -25,18 +25,18 @@ system sh/cfg.sh -n dnode2 -c mgmtEqualVnodeNum -v 4
|
|||
system sh/cfg.sh -n dnode3 -c mgmtEqualVnodeNum -v 4
|
||||
system sh/cfg.sh -n dnode4 -c mgmtEqualVnodeNum -v 4
|
||||
|
||||
system sh/cfg.sh -n dnode1 -c clog -v 1
|
||||
system sh/cfg.sh -n dnode2 -c clog -v 1
|
||||
system sh/cfg.sh -n dnode3 -c clog -v 1
|
||||
system sh/cfg.sh -n dnode4 -c clog -v 1
|
||||
system sh/cfg.sh -n dnode1 -c clog -v 2
|
||||
system sh/cfg.sh -n dnode2 -c clog -v 2
|
||||
system sh/cfg.sh -n dnode3 -c clog -v 2
|
||||
system sh/cfg.sh -n dnode4 -c clog -v 2
|
||||
|
||||
system sh/cfg.sh -n dnode1 -c clog -v 1
|
||||
system sh/cfg.sh -n dnode2 -c clog -v 1
|
||||
system sh/cfg.sh -n dnode3 -c clog -v 1
|
||||
system sh/cfg.sh -n dnode4 -c clog -v 1
|
||||
system sh/cfg.sh -n dnode1 -c clog -v 2
|
||||
system sh/cfg.sh -n dnode2 -c clog -v 2
|
||||
system sh/cfg.sh -n dnode3 -c clog -v 2
|
||||
system sh/cfg.sh -n dnode4 -c clog -v 2
|
||||
|
||||
print ========== step1
|
||||
system sh/exec.sh -n dnode1 -s start
|
||||
system sh/exec_up.sh -n dnode1 -s start
|
||||
sql connect
|
||||
sleep 3000
|
||||
|
||||
|
@ -49,15 +49,15 @@ sql insert into d1.t1 values(now+4s, 12)
|
|||
sql insert into d1.t1 values(now+5s, 11)
|
||||
|
||||
sql show dnodes
|
||||
print 192.168.0.1 freeVnodes $data3_192.168.0.1
|
||||
if $data3_192.168.0.1 != 3 then
|
||||
print 192.168.0.1 openVnodes $data3_1
|
||||
if $data3_1 != 1 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
print ========== step2
|
||||
sleep 2000
|
||||
sql create dnode 192.168.0.2
|
||||
system sh/exec.sh -n dnode2 -s start
|
||||
system sh/exec_up.sh -n dnode2 -s start
|
||||
|
||||
$x = 0
|
||||
show2:
|
||||
|
@ -68,12 +68,12 @@ show2:
|
|||
endi
|
||||
|
||||
sql show dnodes
|
||||
print 192.168.0.1 freeVnodes $data3_192.168.0.1
|
||||
print 192.168.0.2 freeVnodes $data3_192.168.0.2
|
||||
if $data3_192.168.0.1 != 4 then
|
||||
print 192.168.0.1 openVnodes $data3_1
|
||||
print 192.168.0.2 openVnodes $data3_2
|
||||
if $data3_1 != 0 then
|
||||
goto show2
|
||||
endi
|
||||
if $data3_192.168.0.2 != 3 then
|
||||
if $data3_2 != 1 then
|
||||
goto show2
|
||||
endi
|
||||
|
||||
|
@ -87,12 +87,12 @@ sql insert into d2.t2 values(now+4s, 22)
|
|||
sql insert into d2.t2 values(now+5s, 21)
|
||||
|
||||
sql show dnodes
|
||||
print 192.168.0.1 freeVnodes $data3_192.168.0.1
|
||||
print 192.168.0.2 freeVnodes $data3_192.168.0.2
|
||||
if $data3_192.168.0.1 != 4 then
|
||||
print 192.168.0.1 openVnodes $data3_1
|
||||
print 192.168.0.2 openVnodes $data3_2
|
||||
if $data3_1 != 0 then
|
||||
return -1
|
||||
endi
|
||||
if $data3_192.168.0.2 != 2 then
|
||||
if $data3_2 != 2 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
|
@ -108,23 +108,23 @@ show4:
|
|||
endi
|
||||
|
||||
sql show dnodes
|
||||
print 192.168.0.1 freeVnodes $data3_192.168.0.1
|
||||
print 192.168.0.2 freeVnodes $data3_192.168.0.2
|
||||
if $data3_192.168.0.1 != 2 then
|
||||
print 192.168.0.1 openVnodes $data3_1
|
||||
print 192.168.0.2 openVnodes $data3_2
|
||||
if $data3_1 != 2 then
|
||||
goto show4
|
||||
endi
|
||||
if $data3_192.168.0.2 != null then
|
||||
if $data3_2 != null then
|
||||
goto show4
|
||||
endi
|
||||
if $rows != 1 then
|
||||
goto show4
|
||||
endi
|
||||
|
||||
system sh/exec.sh -n dnode2 -s stop -x SIGINT
|
||||
system sh/exec_up.sh -n dnode2 -s stop -x SIGINT
|
||||
|
||||
print ========== step5
|
||||
sql create dnode 192.168.0.3
|
||||
system sh/exec.sh -n dnode3 -s start
|
||||
system sh/exec_up.sh -n dnode3 -s start
|
||||
|
||||
$x = 0
|
||||
show5:
|
||||
|
@ -135,16 +135,16 @@ show5:
|
|||
endi
|
||||
|
||||
sql show dnodes
|
||||
print 192.168.0.1 freeVnodes $data3_192.168.0.1
|
||||
print 192.168.0.2 freeVnodes $data3_192.168.0.2
|
||||
print 192.168.0.3 freeVnodes $data3_192.168.0.3
|
||||
if $data3_192.168.0.1 != 4 then
|
||||
print 192.168.0.1 openVnodes $data3_1
|
||||
print 192.168.0.2 openVnodes $data3_2
|
||||
print 192.168.0.3 openVnodes $data3_3
|
||||
if $data3_1 != 0 then
|
||||
goto show5
|
||||
endi
|
||||
if $data3_192.168.0.2 != null then
|
||||
if $data3_2 != null then
|
||||
goto show5
|
||||
endi
|
||||
if $data3_192.168.0.3 != 2 then
|
||||
if $data3_3 != 2 then
|
||||
goto show5
|
||||
endi
|
||||
|
||||
|
@ -158,23 +158,23 @@ sql insert into d3.t3 values(now+4s, 32)
|
|||
sql insert into d3.t3 values(now+5s, 31)
|
||||
|
||||
sql show dnodes
|
||||
print 192.168.0.1 freeVnodes $data3_192.168.0.1
|
||||
print 192.168.0.2 freeVnodes $data3_192.168.0.2
|
||||
print 192.168.0.3 freeVnodes $data3_192.168.0.3
|
||||
print 192.168.0.1 openVnodes $data3_1
|
||||
print 192.168.0.2 openVnodes $data3_2
|
||||
print 192.168.0.3 openVnodes $data3_3
|
||||
|
||||
if $data3_192.168.0.1 != 4 then
|
||||
if $data3_1 != 0 then
|
||||
return -1
|
||||
endi
|
||||
if $data3_192.168.0.2 != null then
|
||||
if $data3_2 != null then
|
||||
return -1
|
||||
endi
|
||||
if $data3_192.168.0.3 != 1 then
|
||||
if $data3_3 != 3 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
print ========== step7
|
||||
sql create dnode 192.168.0.4
|
||||
system sh/exec.sh -n dnode4 -s start
|
||||
system sh/exec_up.sh -n dnode4 -s start
|
||||
|
||||
$x = 0
|
||||
show7:
|
||||
|
@ -185,20 +185,20 @@ show7:
|
|||
endi
|
||||
|
||||
sql show dnodes
|
||||
print 192.168.0.1 freeVnodes $data3_192.168.0.1
|
||||
print 192.168.0.2 freeVnodes $data3_192.168.0.2
|
||||
print 192.168.0.3 freeVnodes $data3_192.168.0.3
|
||||
print 192.168.0.4 freeVnodes $data3_192.168.0.4
|
||||
if $data3_192.168.0.1 != 4 then
|
||||
print 192.168.0.1 openVnodes $data3_1
|
||||
print 192.168.0.2 openVnodes $data3_2
|
||||
print 192.168.0.3 openVnodes $data3_3
|
||||
print 192.168.0.4 openVnodes $data3_4
|
||||
if $data3_1 != 0 then
|
||||
goto show7
|
||||
endi
|
||||
if $data3_192.168.0.2 != null then
|
||||
if $data3_2 != null then
|
||||
goto show7
|
||||
endi
|
||||
if $data3_192.168.0.3 != 2 then
|
||||
if $data3_3 != 2 then
|
||||
goto show7
|
||||
endi
|
||||
if $data3_192.168.0.4 != 3 then
|
||||
if $data3_4 != 1 then
|
||||
goto show7
|
||||
endi
|
||||
|
||||
|
@ -212,21 +212,21 @@ sql insert into d4.t4 values(now+4s, 42)
|
|||
sql insert into d4.t4 values(now+5s, 41)
|
||||
|
||||
sql show dnodes
|
||||
print 192.168.0.1 freeVnodes $data3_192.168.0.1
|
||||
print 192.168.0.2 freeVnodes $data3_192.168.0.2
|
||||
print 192.168.0.3 freeVnodes $data3_192.168.0.3
|
||||
print 192.168.0.4 freeVnodes $data3_192.168.0.4
|
||||
print 192.168.0.1 openVnodes $data3_1
|
||||
print 192.168.0.2 openVnodes $data3_2
|
||||
print 192.168.0.3 openVnodes $data3_3
|
||||
print 192.168.0.4 openVnodes $data3_4
|
||||
|
||||
if $data3_192.168.0.1 != 4 then
|
||||
if $data3_1 != 0 then
|
||||
return -1
|
||||
endi
|
||||
if $data3_192.168.0.2 != null then
|
||||
if $data3_2 != null then
|
||||
return -1
|
||||
endi
|
||||
if $data3_192.168.0.3 != 2 then
|
||||
if $data3_3 != 2 then
|
||||
return -1
|
||||
endi
|
||||
if $data3_192.168.0.4 != 2 then
|
||||
if $data3_4 != 2 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
|
@ -242,25 +242,25 @@ show9:
|
|||
endi
|
||||
|
||||
sql show dnodes
|
||||
print 192.168.0.1 freeVnodes $data3_192.168.0.1
|
||||
print 192.168.0.2 freeVnodes $data3_192.168.0.2
|
||||
print 192.168.0.3 freeVnodes $data3_192.168.0.3
|
||||
print 192.168.0.4 freeVnodes $data3_192.168.0.4
|
||||
print 192.168.0.1 openVnodes $data3_1
|
||||
print 192.168.0.2 openVnodes $data3_2
|
||||
print 192.168.0.3 openVnodes $data3_3
|
||||
print 192.168.0.4 openVnodes $data3_4
|
||||
|
||||
if $data3_192.168.0.1 != 4 then
|
||||
if $data3_1 != 0 then
|
||||
goto show9
|
||||
endi
|
||||
if $data3_192.168.0.2 != null then
|
||||
if $data3_2 != null then
|
||||
goto show9
|
||||
endi
|
||||
if $data3_192.168.0.3 != null then
|
||||
if $data3_3 != null then
|
||||
goto show9
|
||||
endi
|
||||
if $data3_192.168.0.4 != 0 then
|
||||
if $data3_4 != 4 then
|
||||
goto show9
|
||||
endi
|
||||
|
||||
system sh/exec.sh -n dnode3 -s stop -x SIGINT
|
||||
system sh/exec_up.sh -n dnode3 -s stop -x SIGINT
|
||||
|
||||
print ========== step10
|
||||
sql select * from d1.t1 order by t desc
|
||||
|
|
|
@ -37,13 +37,13 @@ system sh/cfg.sh -n dnode4 -c clog -v 1
|
|||
system sh/cfg.sh -n dnode5 -c clog -v 1
|
||||
|
||||
print ========== step1
|
||||
system sh/exec.sh -n dnode1 -s start
|
||||
system sh/exec_up.sh -n dnode1 -s start
|
||||
sql connect
|
||||
|
||||
sql create dnode 192.168.0.2
|
||||
sql create dnode 192.168.0.3
|
||||
system sh/exec.sh -n dnode2 -s start
|
||||
system sh/exec.sh -n dnode3 -s start
|
||||
system sh/exec_up.sh -n dnode2 -s start
|
||||
system sh/exec_up.sh -n dnode3 -s start
|
||||
sleep 3000
|
||||
|
||||
sql create database d1 replica 2 tables 4
|
||||
|
@ -63,16 +63,16 @@ sql insert into d2.t2 values(now+4s, 22)
|
|||
sql insert into d2.t2 values(now+5s, 21)
|
||||
|
||||
sql show dnodes
|
||||
print 192.168.0.1 freeVnodes $data3_192.168.0.1
|
||||
print 192.168.0.2 freeVnodes $data3_192.168.0.2
|
||||
print 192.168.0.3 freeVnodes $data3_192.168.0.3
|
||||
if $data3_192.168.0.1 != 4 then
|
||||
print 192.168.0.1 openVnodes $data3_1
|
||||
print 192.168.0.2 openVnodes $data3_2
|
||||
print 192.168.0.3 openVnodes $data3_3
|
||||
if $data3_1 != 4 then
|
||||
return -1
|
||||
endi
|
||||
if $data3_192.168.0.2 != 2 then
|
||||
if $data3_2 != 2 then
|
||||
return -1
|
||||
endi
|
||||
if $data3_192.168.0.3 != 2 then
|
||||
if $data3_3 != 2 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
|
@ -88,24 +88,24 @@ show2:
|
|||
endi
|
||||
|
||||
sql show dnodes
|
||||
print 192.168.0.1 freeVnodes $data3_192.168.0.1
|
||||
print 192.168.0.2 freeVnodes $data3_192.168.0.2
|
||||
print 192.168.0.3 freeVnodes $data3_192.168.0.3
|
||||
if $data3_192.168.0.1 != 2 then
|
||||
print 192.168.0.1 openVnodes $data3_1
|
||||
print 192.168.0.2 openVnodes $data3_2
|
||||
print 192.168.0.3 openVnodes $data3_3
|
||||
if $data3_1 != 2 then
|
||||
goto show2
|
||||
endi
|
||||
if $data3_192.168.0.2 != null then
|
||||
if $data3_2 != null then
|
||||
goto show2
|
||||
endi
|
||||
if $data3_192.168.0.3 != 2 then
|
||||
if $data3_3 != 2 then
|
||||
goto show2
|
||||
endi
|
||||
|
||||
system sh/exec.sh -n dnode2 -s stop -x SIGINT
|
||||
system sh/exec_up.sh -n dnode2 -s stop -x SIGINT
|
||||
|
||||
print ========== step3
|
||||
sql create dnode 192.168.0.4
|
||||
system sh/exec.sh -n dnode4 -s start
|
||||
system sh/exec_up.sh -n dnode4 -s start
|
||||
|
||||
$x = 0
|
||||
show3:
|
||||
|
@ -116,20 +116,20 @@ show3:
|
|||
endi
|
||||
|
||||
sql show dnodes
|
||||
print 192.168.0.1 freeVnodes $data3_192.168.0.1
|
||||
print 192.168.0.2 freeVnodes $data3_192.168.0.2
|
||||
print 192.168.0.3 freeVnodes $data3_192.168.0.3
|
||||
print 192.168.0.4 freeVnodes $data3_192.168.0.4
|
||||
if $data3_192.168.0.1 != 4 then
|
||||
print 192.168.0.1 openVnodes $data3_1
|
||||
print 192.168.0.2 openVnodes $data3_2
|
||||
print 192.168.0.3 openVnodes $data3_3
|
||||
print 192.168.0.4 openVnodes $data3_4
|
||||
if $data3_1 != 4 then
|
||||
goto show3
|
||||
endi
|
||||
if $data3_192.168.0.2 != null then
|
||||
if $data3_2 != null then
|
||||
goto show3
|
||||
endi
|
||||
if $data3_192.168.0.3 != 2 then
|
||||
if $data3_3 != 2 then
|
||||
goto show3
|
||||
endi
|
||||
if $data3_192.168.0.4 != 2 then
|
||||
if $data3_4 != 2 then
|
||||
goto show3
|
||||
endi
|
||||
|
||||
|
@ -143,26 +143,26 @@ sql insert into d3.t3 values(now+4s, 32)
|
|||
sql insert into d3.t3 values(now+5s, 31)
|
||||
|
||||
sql show dnodes
|
||||
print 192.168.0.1 freeVnodes $data3_192.168.0.1
|
||||
print 192.168.0.2 freeVnodes $data3_192.168.0.2
|
||||
print 192.168.0.3 freeVnodes $data3_192.168.0.3
|
||||
print 192.168.0.4 freeVnodes $data3_192.168.0.4
|
||||
if $data3_192.168.0.1 != 4 then
|
||||
print 192.168.0.1 openVnodes $data3_1
|
||||
print 192.168.0.2 openVnodes $data3_2
|
||||
print 192.168.0.3 openVnodes $data3_3
|
||||
print 192.168.0.4 openVnodes $data3_4
|
||||
if $data3_1 != 4 then
|
||||
return -1
|
||||
endi
|
||||
if $data3_192.168.0.2 != null then
|
||||
if $data3_2 != null then
|
||||
return -1
|
||||
endi
|
||||
if $data3_192.168.0.3 != 1 then
|
||||
if $data3_3 != 1 then
|
||||
return -1
|
||||
endi
|
||||
if $data3_192.168.0.4 != 1 then
|
||||
if $data3_4 != 1 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
print ========== step5
|
||||
sql create dnode 192.168.0.5
|
||||
system sh/exec.sh -n dnode5 -s start
|
||||
system sh/exec_up.sh -n dnode5 -s start
|
||||
|
||||
$x = 0
|
||||
show5:
|
||||
|
@ -173,24 +173,24 @@ show5:
|
|||
endi
|
||||
|
||||
sql show dnodes
|
||||
print 192.168.0.1 freeVnodes $data3_192.168.0.1
|
||||
print 192.168.0.2 freeVnodes $data3_192.168.0.2
|
||||
print 192.168.0.3 freeVnodes $data3_192.168.0.3
|
||||
print 192.168.0.4 freeVnodes $data3_192.168.0.4
|
||||
print 192.168.0.5 freeVnodes $data3_192.168.0.5
|
||||
if $data3_192.168.0.1 != 4 then
|
||||
print 192.168.0.1 openVnodes $data3_1
|
||||
print 192.168.0.2 openVnodes $data3_2
|
||||
print 192.168.0.3 openVnodes $data3_3
|
||||
print 192.168.0.4 openVnodes $data3_4
|
||||
print 192.168.0.5 openVnodes $data3_5
|
||||
if $data3_1 != 4 then
|
||||
goto show5
|
||||
endi
|
||||
if $data3_192.168.0.2 != null then
|
||||
if $data3_2 != null then
|
||||
goto show5
|
||||
endi
|
||||
if $data3_192.168.0.3 != 2 then
|
||||
if $data3_3 != 2 then
|
||||
goto show5
|
||||
endi
|
||||
if $data3_192.168.0.4 != 2 then
|
||||
if $data3_4 != 2 then
|
||||
goto show5
|
||||
endi
|
||||
if $data3_192.168.0.5 != 2 then
|
||||
if $data3_5 != 2 then
|
||||
goto show5
|
||||
endi
|
||||
|
||||
|
@ -206,28 +206,28 @@ show6:
|
|||
endi
|
||||
|
||||
sql show dnodes
|
||||
print 192.168.0.1 freeVnodes $data3_192.168.0.1
|
||||
print 192.168.0.2 freeVnodes $data3_192.168.0.2
|
||||
print 192.168.0.3 freeVnodes $data3_192.168.0.3
|
||||
print 192.168.0.4 freeVnodes $data3_192.168.0.4
|
||||
print 192.168.0.5 freeVnodes $data3_192.168.0.5
|
||||
if $data3_192.168.0.1 != 4 then
|
||||
print 192.168.0.1 openVnodes $data3_1
|
||||
print 192.168.0.2 openVnodes $data3_2
|
||||
print 192.168.0.3 openVnodes $data3_3
|
||||
print 192.168.0.4 openVnodes $data3_4
|
||||
print 192.168.0.5 openVnodes $data3_5
|
||||
if $data3_1 != 4 then
|
||||
goto show6
|
||||
endi
|
||||
if $data3_192.168.0.2 != null then
|
||||
if $data3_2 != null then
|
||||
goto show6
|
||||
endi
|
||||
if $data3_192.168.0.3 != null then
|
||||
if $data3_3 != null then
|
||||
goto show6
|
||||
endi
|
||||
if $data3_192.168.0.4 != 1 then
|
||||
if $data3_4 != 1 then
|
||||
goto show6
|
||||
endi
|
||||
if $data3_192.168.0.5 != 1 then
|
||||
if $data3_5 != 1 then
|
||||
goto show6
|
||||
endi
|
||||
|
||||
system sh/exec.sh -n dnode3 -s stop -x SIGINT
|
||||
system sh/exec_up.sh -n dnode3 -s stop -x SIGINT
|
||||
|
||||
print ========== step7
|
||||
sql select * from d1.t1 order by t desc
|
||||
|
|
|
@ -43,15 +43,15 @@ system sh/cfg.sh -n dnode5 -c clog -v 1
|
|||
system sh/cfg.sh -n dnode6 -c clog -v 1
|
||||
|
||||
print ========== step1
|
||||
system sh/exec.sh -n dnode1 -s start
|
||||
system sh/exec_up.sh -n dnode1 -s start
|
||||
sql connect
|
||||
|
||||
sql create dnode 192.168.0.2
|
||||
sql create dnode 192.168.0.3
|
||||
sql create dnode 192.168.0.4
|
||||
system sh/exec.sh -n dnode2 -s start
|
||||
system sh/exec.sh -n dnode3 -s start
|
||||
system sh/exec.sh -n dnode4 -s start
|
||||
system sh/exec_up.sh -n dnode2 -s start
|
||||
system sh/exec_up.sh -n dnode3 -s start
|
||||
system sh/exec_up.sh -n dnode4 -s start
|
||||
sleep 3000
|
||||
|
||||
sql create database d1 replica 3 tables 4
|
||||
|
@ -71,21 +71,21 @@ sql insert into d2.t2 values(now+4s, 22)
|
|||
sql insert into d2.t2 values(now+5s, 21)
|
||||
|
||||
sql show dnodes
|
||||
print 192.168.0.1 freeVnodes $data3_192.168.0.1
|
||||
print 192.168.0.2 freeVnodes $data3_192.168.0.2
|
||||
print 192.168.0.3 freeVnodes $data3_192.168.0.3
|
||||
print 192.168.0.4 freeVnodes $data3_192.168.0.4
|
||||
print 192.168.0.1 openVnodes $data3_1
|
||||
print 192.168.0.2 openVnodes $data3_2
|
||||
print 192.168.0.3 openVnodes $data3_3
|
||||
print 192.168.0.4 openVnodes $data3_4
|
||||
|
||||
if $data3_192.168.0.1 != 4 then
|
||||
if $data3_1 != 4 then
|
||||
return -1
|
||||
endi
|
||||
if $data3_192.168.0.2 != 2 then
|
||||
if $data3_2 != 2 then
|
||||
return -1
|
||||
endi
|
||||
if $data3_192.168.0.3 != 2 then
|
||||
if $data3_3 != 2 then
|
||||
return -1
|
||||
endi
|
||||
if $data3_192.168.0.4 != 2 then
|
||||
if $data3_4 != 2 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
|
@ -101,29 +101,29 @@ show2:
|
|||
endi
|
||||
|
||||
sql show dnodes
|
||||
print 192.168.0.1 freeVnodes $data3_192.168.0.1
|
||||
print 192.168.0.2 freeVnodes $data3_192.168.0.2
|
||||
print 192.168.0.3 freeVnodes $data3_192.168.0.3
|
||||
print 192.168.0.4 freeVnodes $data3_192.168.0.4
|
||||
print 192.168.0.1 openVnodes $data3_1
|
||||
print 192.168.0.2 openVnodes $data3_2
|
||||
print 192.168.0.3 openVnodes $data3_3
|
||||
print 192.168.0.4 openVnodes $data3_4
|
||||
|
||||
if $data3_192.168.0.1 != 2 then
|
||||
if $data3_1 != 2 then
|
||||
goto show2
|
||||
endi
|
||||
if $data3_192.168.0.2 != null then
|
||||
if $data3_2 != null then
|
||||
goto show2
|
||||
endi
|
||||
if $data3_192.168.0.3 != 2 then
|
||||
if $data3_3 != 2 then
|
||||
goto show2
|
||||
endi
|
||||
if $data3_192.168.0.4 != 2 then
|
||||
if $data3_4 != 2 then
|
||||
goto show2
|
||||
endi
|
||||
|
||||
system sh/exec.sh -n dnode2 -s stop -x SIGINT
|
||||
system sh/exec_up.sh -n dnode2 -s stop -x SIGINT
|
||||
|
||||
print ========== step
|
||||
sql create dnode 192.168.0.5
|
||||
system sh/exec.sh -n dnode5 -s start
|
||||
system sh/exec_up.sh -n dnode5 -s start
|
||||
|
||||
$x = 0
|
||||
show3:
|
||||
|
@ -134,25 +134,25 @@ show3:
|
|||
endi
|
||||
|
||||
sql show dnodes
|
||||
print 192.168.0.1 freeVnodes $data3_192.168.0.1
|
||||
print 192.168.0.2 freeVnodes $data3_192.168.0.2
|
||||
print 192.168.0.3 freeVnodes $data3_192.168.0.3
|
||||
print 192.168.0.4 freeVnodes $data3_192.168.0.4
|
||||
print 192.168.0.5 freeVnodes $data3_192.168.0.5
|
||||
print 192.168.0.1 openVnodes $data3_1
|
||||
print 192.168.0.2 openVnodes $data3_2
|
||||
print 192.168.0.3 openVnodes $data3_3
|
||||
print 192.168.0.4 openVnodes $data3_4
|
||||
print 192.168.0.5 openVnodes $data3_5
|
||||
|
||||
if $data3_192.168.0.1 != 4 then
|
||||
if $data3_1 != 4 then
|
||||
goto show3
|
||||
endi
|
||||
if $data3_192.168.0.2 != null then
|
||||
if $data3_2 != null then
|
||||
goto show3
|
||||
endi
|
||||
if $data3_192.168.0.3 != 2 then
|
||||
if $data3_3 != 2 then
|
||||
goto show3
|
||||
endi
|
||||
if $data3_192.168.0.4 != 2 then
|
||||
if $data3_4 != 2 then
|
||||
goto show3
|
||||
endi
|
||||
if $data3_192.168.0.5 != 2 then
|
||||
if $data3_5 != 2 then
|
||||
goto show3
|
||||
endi
|
||||
|
||||
|
@ -174,31 +174,31 @@ show4:
|
|||
endi
|
||||
|
||||
sql show dnodes
|
||||
print 192.168.0.1 freeVnodes $data3_192.168.0.1
|
||||
print 192.168.0.2 freeVnodes $data3_192.168.0.2
|
||||
print 192.168.0.3 freeVnodes $data3_192.168.0.3
|
||||
print 192.168.0.4 freeVnodes $data3_192.168.0.4
|
||||
print 192.168.0.5 freeVnodes $data3_192.168.0.5
|
||||
print 192.168.0.1 openVnodes $data3_1
|
||||
print 192.168.0.2 openVnodes $data3_2
|
||||
print 192.168.0.3 openVnodes $data3_3
|
||||
print 192.168.0.4 openVnodes $data3_4
|
||||
print 192.168.0.5 openVnodes $data3_5
|
||||
|
||||
if $data3_192.168.0.1 != 4 then
|
||||
if $data3_1 != 4 then
|
||||
goto show4
|
||||
endi
|
||||
if $data3_192.168.0.2 != null then
|
||||
if $data3_2 != null then
|
||||
goto show4
|
||||
endi
|
||||
if $data3_192.168.0.3 != 1 then
|
||||
if $data3_3 != 1 then
|
||||
goto show4
|
||||
endi
|
||||
if $data3_192.168.0.4 != 1 then
|
||||
if $data3_4 != 1 then
|
||||
goto show4
|
||||
endi
|
||||
if $data3_192.168.0.5 != 1 then
|
||||
if $data3_5 != 1 then
|
||||
goto show4
|
||||
endi
|
||||
|
||||
print ========== step5
|
||||
sql create dnode 192.168.0.2
|
||||
system sh/exec.sh -n dnode2 -s start
|
||||
system sh/exec_up.sh -n dnode2 -s start
|
||||
|
||||
$x = 0
|
||||
show5:
|
||||
|
@ -209,16 +209,16 @@ show5:
|
|||
endi
|
||||
|
||||
sql show dnodes
|
||||
print 192.168.0.1 freeVnodes $data3_192.168.0.1
|
||||
print 192.168.0.2 freeVnodes $data3_192.168.0.2
|
||||
print 192.168.0.3 freeVnodes $data3_192.168.0.3
|
||||
print 192.168.0.4 freeVnodes $data3_192.168.0.4
|
||||
print 192.168.0.5 freeVnodes $data3_192.168.0.5
|
||||
print 192.168.0.1 openVnodes $data3_1
|
||||
print 192.168.0.2 openVnodes $data3_2
|
||||
print 192.168.0.3 openVnodes $data3_3
|
||||
print 192.168.0.4 openVnodes $data3_4
|
||||
print 192.168.0.5 openVnodes $data3_5
|
||||
|
||||
if $data3_192.168.0.1 != 4 then
|
||||
if $data3_1 != 4 then
|
||||
goto show5
|
||||
endi
|
||||
if $data3_192.168.0.2 != 2 then
|
||||
if $data3_2 != 2 then
|
||||
goto show5
|
||||
endi
|
||||
|
||||
|
@ -236,29 +236,29 @@ show6:
|
|||
endi
|
||||
|
||||
sql show dnodes
|
||||
print 192.168.0.1 freeVnodes $data3_192.168.0.1
|
||||
print 192.168.0.2 freeVnodes $data3_192.168.0.2
|
||||
print 192.168.0.3 freeVnodes $data3_192.168.0.3
|
||||
print 192.168.0.4 freeVnodes $data3_192.168.0.4
|
||||
print 192.168.0.5 freeVnodes $data3_192.168.0.5
|
||||
print 192.168.0.1 openVnodes $data3_1
|
||||
print 192.168.0.2 openVnodes $data3_2
|
||||
print 192.168.0.3 openVnodes $data3_3
|
||||
print 192.168.0.4 openVnodes $data3_4
|
||||
print 192.168.0.5 openVnodes $data3_5
|
||||
|
||||
if $data3_192.168.0.1 != 4 then
|
||||
if $data3_1 != 4 then
|
||||
goto show6
|
||||
endi
|
||||
if $data3_192.168.0.2 != 1 then
|
||||
if $data3_2 != 1 then
|
||||
goto show6
|
||||
endi
|
||||
if $data3_192.168.0.3 != null then
|
||||
if $data3_3 != null then
|
||||
goto show6
|
||||
endi
|
||||
if $data3_192.168.0.4 != 1 then
|
||||
if $data3_4 != 1 then
|
||||
goto show6
|
||||
endi
|
||||
if $data3_192.168.0.5 != 1 then
|
||||
if $data3_5 != 1 then
|
||||
goto show6
|
||||
endi
|
||||
|
||||
system sh/exec.sh -n dnode3 -s stop -x SIGINT
|
||||
system sh/exec_up.sh -n dnode3 -s stop -x SIGINT
|
||||
|
||||
print ========== step7
|
||||
sql select * from d1.t1 order by t desc
|
||||
|
|
|
@ -31,7 +31,7 @@ system sh/cfg.sh -n dnode3 -c clog -v 1
|
|||
system sh/cfg.sh -n dnode4 -c clog -v 1
|
||||
|
||||
print ========== step1
|
||||
system sh/exec.sh -n dnode1 -s start
|
||||
system sh/exec_up.sh -n dnode1 -s start
|
||||
sql connect
|
||||
sleep 3000
|
||||
|
||||
|
@ -52,14 +52,14 @@ sql insert into d2.t2 values(now+4s, 22)
|
|||
sql insert into d2.t2 values(now+5s, 21)
|
||||
|
||||
sql show dnodes
|
||||
print 192.168.0.1 freeVnodes $data3_192.168.0.1
|
||||
if $data3_192.168.0.1 != 2 then
|
||||
print 192.168.0.1 openVnodes $data3_1
|
||||
if $data3_1 != 2 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
print ========== step2
|
||||
sql create dnode 192.168.0.2
|
||||
system sh/exec.sh -n dnode2 -s start
|
||||
system sh/exec_up.sh -n dnode2 -s start
|
||||
|
||||
$x = 0
|
||||
show2:
|
||||
|
@ -70,12 +70,12 @@ show2:
|
|||
endi
|
||||
|
||||
sql show dnodes
|
||||
print 192.168.0.1 freeVnodes $data3_192.168.0.1
|
||||
print 192.168.0.2 freeVnodes $data3_192.168.0.2
|
||||
if $data3_192.168.0.1 != 4 then
|
||||
print 192.168.0.1 openVnodes $data3_1
|
||||
print 192.168.0.2 openVnodes $data3_2
|
||||
if $data3_1 != 4 then
|
||||
goto show2
|
||||
endi
|
||||
if $data3_192.168.0.2 != 2 then
|
||||
if $data3_2 != 2 then
|
||||
goto show2
|
||||
endi
|
||||
|
||||
|
@ -96,18 +96,18 @@ show3:
|
|||
return -1
|
||||
endi
|
||||
sql show dnodes
|
||||
print 192.168.0.1 freeVnodes $data3_192.168.0.1
|
||||
print 192.168.0.2 freeVnodes $data3_192.168.0.2
|
||||
if $data3_192.168.0.1 != 3 then
|
||||
print 192.168.0.1 openVnodes $data3_1
|
||||
print 192.168.0.2 openVnodes $data3_2
|
||||
if $data3_1 != 3 then
|
||||
goto show3
|
||||
endi
|
||||
if $data3_192.168.0.2 != 1 then
|
||||
if $data3_2 != 1 then
|
||||
goto show3
|
||||
endi
|
||||
|
||||
print ========== step3
|
||||
sql create dnode 192.168.0.3
|
||||
system sh/exec.sh -n dnode3 -s start
|
||||
system sh/exec_up.sh -n dnode3 -s start
|
||||
|
||||
$x = 0
|
||||
show4:
|
||||
|
@ -117,16 +117,16 @@ show4:
|
|||
return -1
|
||||
endi
|
||||
sql show dnodes
|
||||
print 192.168.0.1 freeVnodes $data3_192.168.0.1
|
||||
print 192.168.0.2 freeVnodes $data3_192.168.0.2
|
||||
print 192.168.0.3 freeVnodes $data3_192.168.0.3
|
||||
if $data3_192.168.0.1 != 4 then
|
||||
print 192.168.0.1 openVnodes $data3_1
|
||||
print 192.168.0.2 openVnodes $data3_2
|
||||
print 192.168.0.3 openVnodes $data3_3
|
||||
if $data3_1 != 4 then
|
||||
goto show4
|
||||
endi
|
||||
if $data3_192.168.0.2 != 2 then
|
||||
if $data3_2 != 2 then
|
||||
goto show4
|
||||
endi
|
||||
if $data3_192.168.0.3 != 2 then
|
||||
if $data3_3 != 2 then
|
||||
goto show4
|
||||
endi
|
||||
|
||||
|
@ -141,20 +141,20 @@ show5:
|
|||
return -1
|
||||
endi
|
||||
sql show dnodes
|
||||
print 192.168.0.1 freeVnodes $data3_192.168.0.1
|
||||
print 192.168.0.2 freeVnodes $data3_192.168.0.2
|
||||
print 192.168.0.3 freeVnodes $data3_192.168.0.3
|
||||
if $data3_192.168.0.1 != 3 then
|
||||
print 192.168.0.1 openVnodes $data3_1
|
||||
print 192.168.0.2 openVnodes $data3_2
|
||||
print 192.168.0.3 openVnodes $data3_3
|
||||
if $data3_1 != 3 then
|
||||
goto show5
|
||||
endi
|
||||
if $data3_192.168.0.2 != null then
|
||||
if $data3_2 != null then
|
||||
goto show5
|
||||
endi
|
||||
if $data3_192.168.0.3 != 1 then
|
||||
if $data3_3 != 1 then
|
||||
goto show5
|
||||
endi
|
||||
|
||||
system sh/exec.sh -n dnode2 -s stop -x SIGINT
|
||||
system sh/exec_up.sh -n dnode2 -s stop -x SIGINT
|
||||
|
||||
print ========== step6
|
||||
sql select * from d1.t1 order by t desc
|
||||
|
|
|
@ -22,19 +22,19 @@ system sh/cfg.sh -n dnode1 -c monitor -v 1
|
|||
system sh/cfg.sh -n dnode2 -c monitor -v 0
|
||||
|
||||
print ========== step1
|
||||
system sh/exec.sh -n dnode1 -s start
|
||||
system sh/exec_up.sh -n dnode1 -s start
|
||||
sql connect
|
||||
sleep 5000
|
||||
|
||||
sql show dnodes
|
||||
print 192.168.0.1 freeVnodes $data3_192.168.0.1
|
||||
if $data3_192.168.0.1 != 3 then
|
||||
print 192.168.0.1 openVnodes $data3_1
|
||||
if $data3_1 != 3 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
print ========== step2
|
||||
sql create dnode 192.168.0.2
|
||||
system sh/exec.sh -n dnode2 -s start
|
||||
system sh/exec_up.sh -n dnode2 -s start
|
||||
|
||||
$x = 0
|
||||
show2:
|
||||
|
@ -45,12 +45,12 @@ show2:
|
|||
endi
|
||||
|
||||
sql show dnodes
|
||||
print 192.168.0.1 freeVnodes $data3_192.168.0.1
|
||||
print 192.168.0.2 freeVnodes $data3_192.168.0.2
|
||||
if $data3_192.168.0.1 != 4 then
|
||||
print 192.168.0.1 openVnodes $data3_1
|
||||
print 192.168.0.2 openVnodes $data3_2
|
||||
if $data3_1 != 4 then
|
||||
goto show2
|
||||
endi
|
||||
if $data3_192.168.0.2 != 3 then
|
||||
if $data3_2 != 3 then
|
||||
goto show2
|
||||
endi
|
||||
|
||||
|
|
|
@ -29,10 +29,10 @@ system sh/cfg.sh -n dnode2 -c clog -v 1
|
|||
system sh/cfg.sh -n dnode3 -c clog -v 1
|
||||
|
||||
print ========== step1
|
||||
system sh/exec.sh -n dnode1 -s start
|
||||
system sh/exec_up.sh -n dnode1 -s start
|
||||
sql connect
|
||||
sql create dnode 192.168.0.2
|
||||
system sh/exec.sh -n dnode2 -s start
|
||||
system sh/exec_up.sh -n dnode2 -s start
|
||||
sleep 3000
|
||||
|
||||
sql show dnodes
|
||||
|
@ -44,7 +44,7 @@ if $data4_192.168.0.2 != ready then
|
|||
endi
|
||||
|
||||
print ========== step2
|
||||
system sh/exec.sh -n dnode2 -s stop -x SIGINT
|
||||
system sh/exec_up.sh -n dnode2 -s stop -x SIGINT
|
||||
sleep 6000
|
||||
|
||||
sql show dnodes
|
||||
|
|
|
@ -29,10 +29,10 @@ system sh/cfg.sh -n dnode2 -c clog -v 1
|
|||
system sh/cfg.sh -n dnode3 -c clog -v 1
|
||||
|
||||
print ========== step1
|
||||
system sh/exec.sh -n dnode1 -s start
|
||||
system sh/exec_up.sh -n dnode1 -s start
|
||||
sql connect
|
||||
sql create dnode 192.168.0.2
|
||||
system sh/exec.sh -n dnode2 -s start
|
||||
system sh/exec_up.sh -n dnode2 -s start
|
||||
sleep 3000
|
||||
|
||||
sql create database d1 replica 2 tables 4
|
||||
|
@ -48,7 +48,7 @@ if $data4_192.168.0.2 != ready then
|
|||
endi
|
||||
|
||||
print ========== step2
|
||||
system sh/exec.sh -n dnode2 -s stop -x SIGINT
|
||||
system sh/exec_up.sh -n dnode2 -s stop -x SIGINT
|
||||
sleep 5000
|
||||
|
||||
sql show dnodes
|
||||
|
@ -72,7 +72,7 @@ endi
|
|||
|
||||
print ========== step4
|
||||
sql create dnode 192.168.0.3
|
||||
system sh/exec.sh -n dnode3 -s start
|
||||
system sh/exec_up.sh -n dnode3 -s start
|
||||
sql drop dnode 192.168.0.2
|
||||
|
||||
sleep 5000
|
||||
|
|
|
@ -31,7 +31,7 @@ system sh/cfg.sh -n dnode3 -c clog -v 1
|
|||
system sh/cfg.sh -n dnode4 -c clog -v 1
|
||||
|
||||
print ========== step1
|
||||
system sh/exec.sh -n dnode1 -s start
|
||||
system sh/exec_up.sh -n dnode1 -s start
|
||||
sql connect
|
||||
sleep 3000
|
||||
|
||||
|
@ -52,14 +52,14 @@ sql insert into d2.t2 values(now+4s, 22)
|
|||
sql insert into d2.t2 values(now+5s, 21)
|
||||
|
||||
sql show dnodes
|
||||
print 192.168.0.1 freeVnodes $data3_192.168.0.1
|
||||
if $data3_192.168.0.1 != 2 then
|
||||
print 192.168.0.1 openVnodes $data3_1
|
||||
if $data3_1 != 2 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
print ========== step2
|
||||
sql create dnode 192.168.0.2
|
||||
system sh/exec.sh -n dnode2 -s start
|
||||
system sh/exec_up.sh -n dnode2 -s start
|
||||
sleep 9000
|
||||
|
||||
sql create database d3 replica 2 tables 4
|
||||
|
@ -79,12 +79,12 @@ show2:
|
|||
endi
|
||||
|
||||
sql show dnodes
|
||||
print 192.168.0.1 freeVnodes $data3_192.168.0.1
|
||||
print 192.168.0.2 freeVnodes $data3_192.168.0.2
|
||||
if $data3_192.168.0.1 != 3 then
|
||||
print 192.168.0.1 openVnodes $data3_1
|
||||
print 192.168.0.2 openVnodes $data3_2
|
||||
if $data3_1 != 3 then
|
||||
goto show2
|
||||
endi
|
||||
if $data3_192.168.0.2 != 1 then
|
||||
if $data3_2 != 1 then
|
||||
goto show2
|
||||
endi
|
||||
|
||||
|
@ -101,12 +101,12 @@ show3:
|
|||
endi
|
||||
|
||||
sql show dnodes
|
||||
print 192.168.0.1 freeVnodes $data3_192.168.0.1
|
||||
print 192.168.0.2 freeVnodes $data3_192.168.0.2 $data5_192.168.0.2
|
||||
print 192.168.0.1 openVnodes $data3_1
|
||||
print 192.168.0.2 openVnodes $data3_2 $data5_192.168.0.2
|
||||
|
||||
print ========== step4
|
||||
sql create dnode 192.168.0.3
|
||||
system sh/exec.sh -n dnode3 -s start
|
||||
system sh/exec_up.sh -n dnode3 -s start
|
||||
|
||||
$x = 0
|
||||
show4:
|
||||
|
@ -117,18 +117,18 @@ show4:
|
|||
endi
|
||||
|
||||
sql show dnodes
|
||||
print 192.168.0.1 freeVnodes $data3_192.168.0.1
|
||||
print 192.168.0.2 freeVnodes $data3_192.168.0.2
|
||||
print 192.168.0.3 freeVnodes $data3_192.168.0.3
|
||||
if $data3_192.168.0.2 != null then
|
||||
print 192.168.0.1 openVnodes $data3_1
|
||||
print 192.168.0.2 openVnodes $data3_2
|
||||
print 192.168.0.3 openVnodes $data3_3
|
||||
if $data3_2 != null then
|
||||
goto show4
|
||||
endi
|
||||
|
||||
system sh/exec.sh -n dnode2 -s stop -x SIGINT
|
||||
system sh/exec_up.sh -n dnode2 -s stop -x SIGINT
|
||||
|
||||
print ========== step5
|
||||
sql create dnode 192.168.0.4
|
||||
system sh/exec.sh -n dnode4 -s start
|
||||
system sh/exec_up.sh -n dnode4 -s start
|
||||
|
||||
$x = 0
|
||||
show5:
|
||||
|
@ -138,20 +138,20 @@ show5:
|
|||
return -1
|
||||
endi
|
||||
sql show dnodes
|
||||
print 192.168.0.1 freeVnodes $data3_192.168.0.1
|
||||
print 192.168.0.2 freeVnodes $data3_192.168.0.2
|
||||
print 192.168.0.3 freeVnodes $data3_192.168.0.3
|
||||
print 192.168.0.4 freeVnodes $data3_192.168.0.4
|
||||
if $data3_192.168.0.1 != 4 then
|
||||
print 192.168.0.1 openVnodes $data3_1
|
||||
print 192.168.0.2 openVnodes $data3_2
|
||||
print 192.168.0.3 openVnodes $data3_3
|
||||
print 192.168.0.4 openVnodes $data3_4
|
||||
if $data3_1 != 4 then
|
||||
goto show5
|
||||
endi
|
||||
if $data3_192.168.0.2 != null then
|
||||
if $data3_2 != null then
|
||||
goto show5
|
||||
endi
|
||||
if $data3_192.168.0.3 != 2 then
|
||||
if $data3_3 != 2 then
|
||||
goto show5
|
||||
endi
|
||||
if $data3_192.168.0.4 != 2 then
|
||||
if $data3_4 != 2 then
|
||||
goto show5
|
||||
endi
|
||||
|
||||
|
|
|
@ -31,7 +31,7 @@ system sh/cfg.sh -n dnode3 -c clog -v 1
|
|||
system sh/cfg.sh -n dnode4 -c clog -v 1
|
||||
|
||||
print ========== step1
|
||||
system sh/exec.sh -n dnode1 -s start
|
||||
system sh/exec_up.sh -n dnode1 -s start
|
||||
sql connect
|
||||
sleep 3000
|
||||
|
||||
|
@ -52,14 +52,14 @@ sql insert into d2.t2 values(now+4s, 22)
|
|||
sql insert into d2.t2 values(now+5s, 21)
|
||||
|
||||
sql show dnodes
|
||||
print 192.168.0.1 freeVnodes $data3_192.168.0.1
|
||||
if $data3_192.168.0.1 != 2 then
|
||||
print 192.168.0.1 openVnodes $data3_1
|
||||
if $data3_1 != 2 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
print ========== step2
|
||||
sql create dnode 192.168.0.2
|
||||
system sh/exec.sh -n dnode2 -s start
|
||||
system sh/exec_up.sh -n dnode2 -s start
|
||||
sleep 9000
|
||||
|
||||
sql create database d3 replica 2 tables 4
|
||||
|
@ -79,17 +79,17 @@ show2:
|
|||
endi
|
||||
|
||||
sql show dnodes
|
||||
print 192.168.0.1 freeVnodes $data3_192.168.0.1
|
||||
print 192.168.0.2 freeVnodes $data3_192.168.0.2
|
||||
if $data3_192.168.0.1 != 3 then
|
||||
print 192.168.0.1 openVnodes $data3_1
|
||||
print 192.168.0.2 openVnodes $data3_2
|
||||
if $data3_1 != 3 then
|
||||
goto show2
|
||||
endi
|
||||
if $data3_192.168.0.2 != 1 then
|
||||
if $data3_2 != 1 then
|
||||
goto show2
|
||||
endi
|
||||
|
||||
print ========== step3
|
||||
system sh/exec.sh -n dnode2 -s stop -x SIGINT
|
||||
system sh/exec_up.sh -n dnode2 -s stop -x SIGINT
|
||||
sql drop dnode 192.168.0.2
|
||||
sleep 7001
|
||||
|
||||
|
@ -102,12 +102,12 @@ show3:
|
|||
endi
|
||||
|
||||
sql show dnodes
|
||||
print 192.168.0.1 freeVnodes $data3_192.168.0.1
|
||||
print 192.168.0.2 freeVnodes $data3_192.168.0.2 $data5_192.168.0.2
|
||||
print 192.168.0.1 openVnodes $data3_1
|
||||
print 192.168.0.2 openVnodes $data3_2 $data5_192.168.0.2
|
||||
|
||||
print ========== step4
|
||||
sql create dnode 192.168.0.3
|
||||
system sh/exec.sh -n dnode3 -s start
|
||||
system sh/exec_up.sh -n dnode3 -s start
|
||||
|
||||
$x = 0
|
||||
show4:
|
||||
|
@ -118,16 +118,16 @@ show4:
|
|||
endi
|
||||
|
||||
sql show dnodes
|
||||
print 192.168.0.1 freeVnodes $data3_192.168.0.1
|
||||
print 192.168.0.2 freeVnodes $data3_192.168.0.2
|
||||
print 192.168.0.3 freeVnodes $data3_192.168.0.3
|
||||
if $data3_192.168.0.2 != null then
|
||||
print 192.168.0.1 openVnodes $data3_1
|
||||
print 192.168.0.2 openVnodes $data3_2
|
||||
print 192.168.0.3 openVnodes $data3_3
|
||||
if $data3_2 != null then
|
||||
goto show4
|
||||
endi
|
||||
if $data3_192.168.0.1 != 3 then
|
||||
if $data3_1 != 3 then
|
||||
goto show4
|
||||
endi
|
||||
if $data3_192.168.0.3 != 1 then
|
||||
if $data3_3 != 1 then
|
||||
goto show4
|
||||
endi
|
||||
|
||||
|
|
|
@ -31,7 +31,7 @@ system sh/cfg.sh -n dnode3 -c clog -v 1
|
|||
system sh/cfg.sh -n dnode4 -c clog -v 1
|
||||
|
||||
print ========== step1
|
||||
system sh/exec.sh -n dnode1 -s start
|
||||
system sh/exec_up.sh -n dnode1 -s start
|
||||
sql connect
|
||||
|
||||
sql create database d1 tables 4
|
||||
|
@ -43,14 +43,14 @@ sql insert into d1.t1 values(now+4s, 12)
|
|||
sql insert into d1.t1 values(now+5s, 11)
|
||||
|
||||
sql show dnodes
|
||||
print 192.168.0.1 freeVnodes $data3_192.168.0.1
|
||||
if $data3_192.168.0.1 != 3 then
|
||||
print 192.168.0.1 openVnodes $data3_1
|
||||
if $data3_1 != 3 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
print ========== step2
|
||||
sql create dnode 192.168.0.2
|
||||
system sh/exec.sh -n dnode2 -s start
|
||||
system sh/exec_up.sh -n dnode2 -s start
|
||||
|
||||
$x = 0
|
||||
show2:
|
||||
|
@ -60,12 +60,12 @@ show2:
|
|||
return -1
|
||||
endi
|
||||
sql show dnodes
|
||||
print 192.168.0.1 freeVnodes $data3_192.168.0.1
|
||||
print 192.168.0.2 freeVnodes $data3_192.168.0.2
|
||||
if $data3_192.168.0.1 != 4 then
|
||||
print 192.168.0.1 openVnodes $data3_1
|
||||
print 192.168.0.2 openVnodes $data3_2
|
||||
if $data3_1 != 4 then
|
||||
goto show2
|
||||
endi
|
||||
if $data3_192.168.0.2 != 3 then
|
||||
if $data3_2 != 3 then
|
||||
goto show2
|
||||
endi
|
||||
|
||||
|
@ -81,12 +81,12 @@ sql insert into d2.t2 values(now+5s, 21)
|
|||
|
||||
$x = 0
|
||||
sql show dnodes
|
||||
print 192.168.0.1 freeVnodes $data3_192.168.0.1
|
||||
print 192.168.0.2 freeVnodes $data3_192.168.0.2
|
||||
if $data3_192.168.0.1 != 4 then
|
||||
print 192.168.0.1 openVnodes $data3_1
|
||||
print 192.168.0.2 openVnodes $data3_2
|
||||
if $data3_1 != 4 then
|
||||
return -1
|
||||
endi
|
||||
if $data3_192.168.0.2 != 2 then
|
||||
if $data3_2 != 2 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
|
@ -101,19 +101,19 @@ show4:
|
|||
return -1
|
||||
endi
|
||||
sql show dnodes
|
||||
print 192.168.0.1 freeVnodes $data3_192.168.0.1
|
||||
print 192.168.0.2 freeVnodes $data3_192.168.0.2
|
||||
if $data3_192.168.0.1 != 2 then
|
||||
print 192.168.0.1 openVnodes $data3_1
|
||||
print 192.168.0.2 openVnodes $data3_2
|
||||
if $data3_1 != 2 then
|
||||
goto show4
|
||||
endi
|
||||
if $data3_192.168.0.2 != null then
|
||||
if $data3_2 != null then
|
||||
goto show4
|
||||
endi
|
||||
if $rows != 1 then
|
||||
goto show4
|
||||
endi
|
||||
|
||||
system sh/exec.sh -n dnode2 -s stop -x SIGINT
|
||||
system sh/exec_up.sh -n dnode2 -s stop -x SIGINT
|
||||
|
||||
print ========== step5
|
||||
sleep 2000
|
||||
|
@ -125,7 +125,7 @@ system sh/cfg.sh -n dnode2 -c balanceMonitorInterval -v 1
|
|||
system sh/cfg.sh -n dnode2 -c balanceStartInterval -v 10
|
||||
system sh/cfg.sh -n dnode2 -c mgmtEqualVnodeNum -v 4
|
||||
system sh/cfg.sh -n dnode2 -c clog -v 1
|
||||
system sh/exec.sh -n dnode2 -s start
|
||||
system sh/exec_up.sh -n dnode2 -s start
|
||||
|
||||
$x = 0
|
||||
show5:
|
||||
|
@ -135,12 +135,12 @@ show5:
|
|||
return -1
|
||||
endi
|
||||
sql show dnodes
|
||||
print 192.168.0.1 freeVnodes $data3_192.168.0.1
|
||||
print 192.168.0.2 freeVnodes $data3_192.168.0.2
|
||||
if $data3_192.168.0.1 != 4 then
|
||||
print 192.168.0.1 openVnodes $data3_1
|
||||
print 192.168.0.2 openVnodes $data3_2
|
||||
if $data3_1 != 4 then
|
||||
goto show5
|
||||
endi
|
||||
if $data3_192.168.0.2 != 2 then
|
||||
if $data3_2 != 2 then
|
||||
goto show5
|
||||
endi
|
||||
|
||||
|
@ -154,18 +154,18 @@ sql insert into d3.t3 values(now+4s, 32)
|
|||
sql insert into d3.t3 values(now+5s, 31)
|
||||
|
||||
sql show dnodes
|
||||
print 192.168.0.1 freeVnodes $data3_192.168.0.1
|
||||
print 192.168.0.2 freeVnodes $data3_192.168.0.2
|
||||
if $data3_192.168.0.1 != 4 then
|
||||
print 192.168.0.1 openVnodes $data3_1
|
||||
print 192.168.0.2 openVnodes $data3_2
|
||||
if $data3_1 != 4 then
|
||||
return -1
|
||||
endi
|
||||
if $data3_192.168.0.2 != 1 then
|
||||
if $data3_2 != 1 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
print ========== step7
|
||||
sql create dnode 192.168.0.3
|
||||
system sh/exec.sh -n dnode3 -s start
|
||||
system sh/exec_up.sh -n dnode3 -s start
|
||||
|
||||
$x = 0
|
||||
show7:
|
||||
|
@ -176,16 +176,16 @@ show7:
|
|||
endi
|
||||
|
||||
sql show dnodes
|
||||
print 192.168.0.1 freeVnodes $data3_192.168.0.1
|
||||
print 192.168.0.2 freeVnodes $data3_192.168.0.2
|
||||
print 192.168.0.3 freeVnodes $data3_192.168.0.3
|
||||
if $data3_192.168.0.1 != 4 then
|
||||
print 192.168.0.1 openVnodes $data3_1
|
||||
print 192.168.0.2 openVnodes $data3_2
|
||||
print 192.168.0.3 openVnodes $data3_3
|
||||
if $data3_1 != 4 then
|
||||
goto show7
|
||||
endi
|
||||
if $data3_192.168.0.2 != 2 then
|
||||
if $data3_2 != 2 then
|
||||
goto show7
|
||||
endi
|
||||
if $data3_192.168.0.3 != 3 then
|
||||
if $data3_3 != 3 then
|
||||
goto show7
|
||||
endi
|
||||
|
||||
|
@ -206,16 +206,16 @@ show8:
|
|||
return -1
|
||||
endi
|
||||
sql show dnodes
|
||||
print 192.168.0.1 freeVnodes $data3_192.168.0.1
|
||||
print 192.168.0.2 freeVnodes $data3_192.168.0.2
|
||||
print 192.168.0.3 freeVnodes $data3_192.168.0.3
|
||||
if $data3_192.168.0.1 != 4 then
|
||||
print 192.168.0.1 openVnodes $data3_1
|
||||
print 192.168.0.2 openVnodes $data3_2
|
||||
print 192.168.0.3 openVnodes $data3_3
|
||||
if $data3_1 != 4 then
|
||||
goto show8
|
||||
endi
|
||||
if $data3_192.168.0.2 != 2 then
|
||||
if $data3_2 != 2 then
|
||||
goto show8
|
||||
endi
|
||||
if $data3_192.168.0.3 != 2 then
|
||||
if $data3_3 != 2 then
|
||||
goto show8
|
||||
endi
|
||||
|
||||
|
@ -231,20 +231,20 @@ show9:
|
|||
endi
|
||||
|
||||
sql show dnodes
|
||||
print 192.168.0.1 freeVnodes $data3_192.168.0.1
|
||||
print 192.168.0.2 freeVnodes $data3_192.168.0.2
|
||||
print 192.168.0.3 freeVnodes $data3_192.168.0.3
|
||||
if $data3_192.168.0.1 != 4 then
|
||||
print 192.168.0.1 openVnodes $data3_1
|
||||
print 192.168.0.2 openVnodes $data3_2
|
||||
print 192.168.0.3 openVnodes $data3_3
|
||||
if $data3_1 != 4 then
|
||||
goto show9
|
||||
endi
|
||||
if $data3_192.168.0.2 != null then
|
||||
if $data3_2 != null then
|
||||
goto show9
|
||||
endi
|
||||
if $data3_192.168.0.3 != 0 then
|
||||
if $data3_3 != 0 then
|
||||
goto show9
|
||||
endi
|
||||
|
||||
system sh/exec.sh -n dnode2 -s stop -x SIGINT
|
||||
system sh/exec_up.sh -n dnode2 -s stop -x SIGINT
|
||||
|
||||
print ========== step10
|
||||
sql select * from d1.t1 order by t desc
|
||||
|
|
Loading…
Reference in New Issue