Merge branch 'develop' into feature/python-test-no-sudo
This commit is contained in:
commit
52ceb2a9e7
47
.travis.yml
47
.travis.yml
|
@ -24,10 +24,11 @@ matrix:
|
|||
- python-setuptools
|
||||
- python3-pip
|
||||
- python3-setuptools
|
||||
- valgrind
|
||||
|
||||
before_install:
|
||||
- sudo apt update -y -qq
|
||||
- sudo apt install -y net-tools python-pip python-setuptools python3-pip python3-setuptools
|
||||
- sudo apt install -y net-tools python-pip python-setuptools python3-pip python3-setuptools valgrind
|
||||
|
||||
before_script:
|
||||
- cd ${TRAVIS_BUILD_DIR}
|
||||
|
@ -43,16 +44,32 @@ matrix:
|
|||
case $TRAVIS_OS_NAME in
|
||||
linux)
|
||||
cd ${TRAVIS_BUILD_DIR}/debug
|
||||
sudo make install || exit $?
|
||||
sudo make install || travis_terminate $?
|
||||
|
||||
pip install --user ${TRAVIS_BUILD_DIR}/src/connector/python/linux/python2/
|
||||
pip3 install --user ${TRAVIS_BUILD_DIR}/src/connector/python/linux/python3/
|
||||
|
||||
cd ${TRAVIS_BUILD_DIR}/tests
|
||||
bash ./test-all.sh
|
||||
./test-all.sh || travis_terminate $?
|
||||
|
||||
if [ "$?" -ne "0" ]; then
|
||||
exit $?
|
||||
cd ${TRAVIS_BUILD_DIR}/tests/pytest
|
||||
./simpletest.sh -g 2>&1 | tee mem-error-out.txt
|
||||
sleep 1
|
||||
|
||||
# Color setting
|
||||
RED='\033[0;31m'
|
||||
GREEN='\033[1;32m'
|
||||
GREEN_DARK='\033[0;32m'
|
||||
GREEN_UNDERLINE='\033[4;32m'
|
||||
NC='\033[0m'
|
||||
|
||||
memError=`grep -m 1 'ERROR SUMMARY' mem-error-out.txt | awk '{print $4}'`
|
||||
|
||||
if [ -n "$memError" ]; then
|
||||
if [ "$memError" -gt 23 ]; then
|
||||
echo -e "${RED} ## Memory errors number valgrind reports is $memError. More than our threshold! ## ${NC} "
|
||||
travis_terminate $memError
|
||||
fi
|
||||
fi
|
||||
|
||||
;;
|
||||
|
@ -74,20 +91,20 @@ matrix:
|
|||
# GitHub project metadata
|
||||
# ** specific to your project **
|
||||
project:
|
||||
name: sangshuduo/TDengine
|
||||
name: TDengine
|
||||
version: 2.x
|
||||
description: sangshuduo/TDengine
|
||||
description: taosdata/TDengine
|
||||
|
||||
# Where email notification of build analysis results will be sent
|
||||
notification_email: sangshuduo@gmail.com
|
||||
notification_email: sdsang@taosdata.com
|
||||
|
||||
# Commands to prepare for build_command
|
||||
# ** likely specific to your build **
|
||||
build_command_prepend: cmake ..
|
||||
build_command_prepend: cmake .
|
||||
|
||||
# The command that will be added as an argument to "cov-build" to compile your project for analysis,
|
||||
# ** likely specific to your build **
|
||||
build_command: cmake --build .
|
||||
build_command: make
|
||||
|
||||
# Pattern to match selecting branches that will run analysis. We recommend leaving this set to 'coverity_scan'.
|
||||
# Take care in resource usage, and consider the build frequency allowances per
|
||||
|
@ -132,17 +149,17 @@ matrix:
|
|||
case $TRAVIS_OS_NAME in
|
||||
linux)
|
||||
cd ${TRAVIS_BUILD_DIR}/debug
|
||||
sudo make install || exit $?
|
||||
sudo make install || travis_terminate $?
|
||||
|
||||
pip install --user ${TRAVIS_BUILD_DIR}/src/connector/python/linux/python2/
|
||||
pip3 install --user ${TRAVIS_BUILD_DIR}/src/connector/python/linux/python3/
|
||||
|
||||
cd ${TRAVIS_BUILD_DIR}/tests
|
||||
|
||||
bash ./test-all.sh
|
||||
./test-all.sh
|
||||
|
||||
if [ "$?" -ne "0" ]; then
|
||||
exit $?
|
||||
travis_terminate $?
|
||||
fi
|
||||
|
||||
sudo pkill taosd
|
||||
|
@ -150,7 +167,7 @@ matrix:
|
|||
|
||||
cd ${TRAVIS_BUILD_DIR}
|
||||
lcov -d . --capture --rc lcov_branch_coverage=1 -o coverage.info
|
||||
lcov -l --rc lcov_branch_coverage=1 coverage.info || exit $?
|
||||
lcov -l --rc lcov_branch_coverage=1 coverage.info || travis_terminate $?
|
||||
|
||||
gem install coveralls-lcov
|
||||
|
||||
|
@ -166,7 +183,6 @@ matrix:
|
|||
echo -e "${GREEN} ## Uploaded to Coveralls.io! ## ${NC}"
|
||||
else
|
||||
echo -e "${RED} ## Coveralls.io not collect coverage report! ## ${NC} "
|
||||
exit $?
|
||||
fi
|
||||
|
||||
bash <(curl -s https://codecov.io/bash) -y .codecov.yml -f coverage.info
|
||||
|
@ -174,7 +190,6 @@ matrix:
|
|||
echo -e "${GREEN} ## Uploaded to Codecov! ## ${NC} "
|
||||
else
|
||||
echo -e "${RED} ## Codecov did not collect coverage report! ## ${NC} "
|
||||
exit $?
|
||||
fi
|
||||
|
||||
;;
|
||||
|
|
|
@ -465,7 +465,7 @@ extern void * tscQhandle;
|
|||
extern int tscKeepConn[];
|
||||
extern int tsInsertHeadSize;
|
||||
extern int tscNumOfThreads;
|
||||
extern SRpcIpSet tscMgmtIpList;
|
||||
extern SRpcIpSet tscMgmtIpSet;
|
||||
|
||||
typedef void (*__async_cb_func_t)(void *param, TAOS_RES *tres, int numOfRows);
|
||||
|
||||
|
|
|
@ -30,7 +30,7 @@
|
|||
|
||||
#define TSC_MGMT_VNODE 999
|
||||
|
||||
SRpcIpSet tscMgmtIpList;
|
||||
SRpcIpSet tscMgmtIpSet;
|
||||
SRpcIpSet tscDnodeIpSet;
|
||||
|
||||
int (*tscBuildMsg[TSDB_SQL_MAX])(SSqlObj *pSql, SSqlInfo *pInfo) = {0};
|
||||
|
@ -58,35 +58,40 @@ static void tscSetDnodeIpList(SSqlObj* pSql, STableMeta* pTableMeta) {
|
|||
}
|
||||
|
||||
void tscPrintMgmtIp() {
|
||||
if (tscMgmtIpList.numOfIps <= 0) {
|
||||
tscError("invalid mgmt IP list:%d", tscMgmtIpList.numOfIps);
|
||||
if (tscMgmtIpSet.numOfIps <= 0) {
|
||||
tscError("invalid mgmt IP list:%d", tscMgmtIpSet.numOfIps);
|
||||
} else {
|
||||
for (int i = 0; i < tscMgmtIpList.numOfIps; ++i) {
|
||||
tscTrace("mgmt index:%d ip:%d", i, tscMgmtIpList.ip[i]);
|
||||
for (int i = 0; i < tscMgmtIpSet.numOfIps; ++i) {
|
||||
tscTrace("mgmt index:%d ip:%d", i, tscMgmtIpSet.ip[i]);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void tscSetMgmtIpListFromCluster(SRpcIpSet *pIpList) {
|
||||
tscMgmtIpList.numOfIps = htons(pIpList->numOfIps);
|
||||
tscMgmtIpList.inUse = htons(pIpList->inUse);
|
||||
tscMgmtIpList.port = htons(pIpList->port);
|
||||
for (int32_t i = 0; i <tscMgmtIpList.numOfIps; ++i) {
|
||||
tscMgmtIpList.ip[i] = pIpList->ip[i];
|
||||
tscMgmtIpSet.numOfIps = pIpList->numOfIps;
|
||||
tscMgmtIpSet.inUse = pIpList->inUse;
|
||||
tscMgmtIpSet.port = htons(pIpList->port);
|
||||
for (int32_t i = 0; i < tscMgmtIpSet.numOfIps; ++i) {
|
||||
tscMgmtIpSet.ip[i] = htonl(pIpList->ip[i]);
|
||||
}
|
||||
}
|
||||
|
||||
void tscSetMgmtIpListFromEdge() {
|
||||
if (tscMgmtIpList.numOfIps != 1) {
|
||||
tscMgmtIpList.numOfIps = 1;
|
||||
tscMgmtIpList.inUse = 0;
|
||||
tscMgmtIpList.port = tsMnodeShellPort;
|
||||
tscMgmtIpList.ip[0] = inet_addr(tsMasterIp);
|
||||
if (tscMgmtIpSet.numOfIps != 1) {
|
||||
tscMgmtIpSet.numOfIps = 1;
|
||||
tscMgmtIpSet.inUse = 0;
|
||||
tscMgmtIpSet.port = tsMnodeShellPort;
|
||||
tscMgmtIpSet.ip[0] = inet_addr(tsMasterIp);
|
||||
tscTrace("edge mgmt IP list:");
|
||||
tscPrintMgmtIp();
|
||||
}
|
||||
}
|
||||
|
||||
void tscUpdateIpSet(void *ahandle, SRpcIpSet *pIpSet) {
|
||||
tscTrace("mgmt IP list is changed for ufp is called");
|
||||
tscMgmtIpSet = *pIpSet;
|
||||
}
|
||||
|
||||
void tscSetMgmtIpList(SRpcIpSet *pIpList) {
|
||||
/*
|
||||
* The iplist returned by the cluster edition is the current management nodes
|
||||
|
@ -109,7 +114,7 @@ void tscSetMgmtIpList(SRpcIpSet *pIpList) {
|
|||
UNUSED_FUNC
|
||||
static int32_t tscGetMgmtConnMaxRetryTimes() {
|
||||
int32_t factor = 2;
|
||||
return tscMgmtIpList.numOfIps * factor;
|
||||
return tscMgmtIpSet.numOfIps * factor;
|
||||
}
|
||||
|
||||
void tscProcessHeartBeatRsp(void *param, TAOS_RES *tres, int code) {
|
||||
|
@ -204,7 +209,7 @@ int tscSendMsgToServer(SSqlObj *pSql) {
|
|||
};
|
||||
rpcSendRequest(pVnodeConn, &pSql->ipList, &rpcMsg);
|
||||
} else {
|
||||
pSql->ipList = tscMgmtIpList;
|
||||
pSql->ipList = tscMgmtIpSet;
|
||||
pSql->ipList.port = tsMnodeShellPort;
|
||||
|
||||
tscTrace("%p msg:%s is sent to server %d", pSql, taosMsg[pSql->cmd.msgType], pSql->ipList.port);
|
||||
|
@ -425,7 +430,7 @@ int tscProcessSql(SSqlObj *pSql) {
|
|||
return pSql->res.code;
|
||||
}
|
||||
} else if (pSql->cmd.command < TSDB_SQL_LOCAL) {
|
||||
pSql->ipList = tscMgmtIpList;
|
||||
pSql->ipList = tscMgmtIpSet;
|
||||
} else { // local handler
|
||||
return (*tscProcessMsgRsp[pCmd->command])(pSql);
|
||||
}
|
||||
|
@ -2224,10 +2229,7 @@ int tscProcessConnectRsp(SSqlObj *pSql) {
|
|||
assert(len <= tListLen(pObj->db));
|
||||
strncpy(pObj->db, temp, tListLen(pObj->db));
|
||||
|
||||
// SIpList * pIpList;
|
||||
// char *rsp = pRes->pRsp + sizeof(SCMConnectRsp);
|
||||
// pIpList = (SIpList *)rsp;
|
||||
// tscSetMgmtIpList(pIpList);
|
||||
tscSetMgmtIpList(&pConnect->ipList);
|
||||
|
||||
strcpy(pObj->sversion, pConnect->serverVersion);
|
||||
pObj->writeAuth = pConnect->writeAuth;
|
||||
|
|
|
@ -72,23 +72,23 @@ STscObj *taosConnectImpl(const char *ip, const char *user, const char *pass, con
|
|||
}
|
||||
|
||||
if (ip && ip[0]) {
|
||||
tscMgmtIpList.inUse = 0;
|
||||
tscMgmtIpList.port = tsMnodeShellPort;
|
||||
tscMgmtIpList.numOfIps = 1;
|
||||
tscMgmtIpList.ip[0] = inet_addr(ip);
|
||||
tscMgmtIpSet.inUse = 0;
|
||||
tscMgmtIpSet.port = tsMnodeShellPort;
|
||||
tscMgmtIpSet.numOfIps = 1;
|
||||
tscMgmtIpSet.ip[0] = inet_addr(ip);
|
||||
|
||||
if (tsMasterIp[0] && strcmp(ip, tsMasterIp) != 0) {
|
||||
tscMgmtIpList.numOfIps = 2;
|
||||
tscMgmtIpList.ip[1] = inet_addr(tsMasterIp);
|
||||
tscMgmtIpSet.numOfIps = 2;
|
||||
tscMgmtIpSet.ip[1] = inet_addr(tsMasterIp);
|
||||
}
|
||||
|
||||
if (tsSecondIp[0] && strcmp(tsSecondIp, tsMasterIp) != 0) {
|
||||
tscMgmtIpList.numOfIps = 3;
|
||||
tscMgmtIpList.ip[2] = inet_addr(tsSecondIp);
|
||||
tscMgmtIpSet.numOfIps = 3;
|
||||
tscMgmtIpSet.ip[2] = inet_addr(tsSecondIp);
|
||||
}
|
||||
}
|
||||
|
||||
tscMgmtIpList.port = port ? port : tsMnodeShellPort;
|
||||
tscMgmtIpSet.port = port ? port : tsMnodeShellPort;
|
||||
|
||||
STscObj *pObj = (STscObj *)calloc(1, sizeof(STscObj));
|
||||
if (NULL == pObj) {
|
||||
|
|
|
@ -42,11 +42,13 @@ void * tscTmr;
|
|||
void * tscQhandle;
|
||||
void * tscCheckDiskUsageTmr;
|
||||
int tsInsertHeadSize;
|
||||
char tsLastUser[TSDB_USER_LEN + 1];
|
||||
|
||||
int tscNumOfThreads;
|
||||
|
||||
static pthread_once_t tscinit = PTHREAD_ONCE_INIT;
|
||||
void taosInitNote(int numOfNoteLines, int maxNotes, char* lable);
|
||||
void tscUpdateIpSet(void *ahandle, SRpcIpSet *pIpSet);
|
||||
|
||||
void tscCheckDiskUsage(void *para, void *unused) {
|
||||
taosGetDisk();
|
||||
|
@ -65,6 +67,7 @@ int32_t tscInitRpc(const char *user, const char *secret) {
|
|||
rpcInit.label = "TSC-vnode";
|
||||
rpcInit.numOfThreads = tscNumOfThreads;
|
||||
rpcInit.cfp = tscProcessMsgFromServer;
|
||||
rpcInit.ufp = tscUpdateIpSet;
|
||||
rpcInit.sessions = tsMaxVnodeConnections;
|
||||
rpcInit.connType = TAOS_CONN_CLIENT;
|
||||
rpcInit.user = (char*)user;
|
||||
|
@ -79,6 +82,13 @@ int32_t tscInitRpc(const char *user, const char *secret) {
|
|||
}
|
||||
}
|
||||
|
||||
// not stop service, switch users
|
||||
if (strcmp(tsLastUser, user) != 0 && pTscMgmtConn != NULL) {
|
||||
tscTrace("switch user from %s to %s", user, tsLastUser);
|
||||
rpcClose(pTscMgmtConn);
|
||||
pTscMgmtConn = NULL;
|
||||
}
|
||||
|
||||
if (pTscMgmtConn == NULL) {
|
||||
memset(&rpcInit, 0, sizeof(rpcInit));
|
||||
rpcInit.localIp = tsLocalIp;
|
||||
|
@ -92,6 +102,7 @@ int32_t tscInitRpc(const char *user, const char *secret) {
|
|||
rpcInit.user = (char*)user;
|
||||
rpcInit.ckey = "key";
|
||||
rpcInit.secret = secretEncrypt;
|
||||
strcpy(tsLastUser, user);
|
||||
|
||||
pTscMgmtConn = rpcOpen(&rpcInit);
|
||||
if (pTscMgmtConn == NULL) {
|
||||
|
@ -145,14 +156,14 @@ void taos_init_imp() {
|
|||
taosInitNote(tsNumOfLogLines / 10, 1, (char*)"tsc_note");
|
||||
}
|
||||
|
||||
tscMgmtIpList.inUse = 0;
|
||||
tscMgmtIpList.port = tsMnodeShellPort;
|
||||
tscMgmtIpList.numOfIps = 1;
|
||||
tscMgmtIpList.ip[0] = inet_addr(tsMasterIp);
|
||||
tscMgmtIpSet.inUse = 0;
|
||||
tscMgmtIpSet.port = tsMnodeShellPort;
|
||||
tscMgmtIpSet.numOfIps = 1;
|
||||
tscMgmtIpSet.ip[0] = inet_addr(tsMasterIp);
|
||||
|
||||
if (tsSecondIp[0] && strcmp(tsSecondIp, tsMasterIp) != 0) {
|
||||
tscMgmtIpList.numOfIps = 2;
|
||||
tscMgmtIpList.ip[1] = inet_addr(tsSecondIp);
|
||||
tscMgmtIpSet.numOfIps = 2;
|
||||
tscMgmtIpSet.ip[1] = inet_addr(tsSecondIp);
|
||||
}
|
||||
|
||||
tscInitMsgsFp();
|
||||
|
|
|
@ -54,6 +54,11 @@ static SRpcIpSet tsMnodeIpSet = {0};
|
|||
static SDMMnodeInfos tsMnodeInfos = {0};
|
||||
static SDMDnodeCfg tsDnodeCfg = {0};
|
||||
|
||||
void dnodeUpdateIpSet(void *ahandle, SRpcIpSet *pIpSet) {
|
||||
dTrace("mgmt IP list is changed for ufp is called");
|
||||
tsMnodeIpSet = *pIpSet;
|
||||
}
|
||||
|
||||
int32_t dnodeInitMClient() {
|
||||
dnodeReadDnodeCfg();
|
||||
tsRebootTime = taosGetTimestampSec();
|
||||
|
@ -90,6 +95,7 @@ int32_t dnodeInitMClient() {
|
|||
rpcInit.label = "DND-MC";
|
||||
rpcInit.numOfThreads = 1;
|
||||
rpcInit.cfp = dnodeProcessRspFromMnode;
|
||||
rpcInit.ufp = dnodeUpdateIpSet;
|
||||
rpcInit.sessions = 100;
|
||||
rpcInit.connType = TAOS_CONN_CLIENT;
|
||||
rpcInit.idleTime = tsShellActivityTimer * 2000;
|
||||
|
@ -292,7 +298,7 @@ static bool dnodeReadMnodeInfos() {
|
|||
tsMnodeInfos.nodeInfos[i].syncPort = (uint16_t)syncPort->valueint;
|
||||
|
||||
cJSON *nodeName = cJSON_GetObjectItem(nodeInfo, "nodeName");
|
||||
if (!nodeIp || nodeName->type != cJSON_String || nodeName->valuestring == NULL) {
|
||||
if (!nodeName || nodeName->type != cJSON_String || nodeName->valuestring == NULL) {
|
||||
dError("failed to read mnode mgmtIpList.json, nodeName not found");
|
||||
goto PARSE_OVER;
|
||||
}
|
||||
|
@ -304,7 +310,7 @@ static bool dnodeReadMnodeInfos() {
|
|||
dPrint("read mnode iplist successed, numOfIps:%d inUse:%d", tsMnodeInfos.nodeNum, tsMnodeInfos.inUse);
|
||||
for (int32_t i = 0; i < tsMnodeInfos.nodeNum; i++) {
|
||||
dPrint("mnode:%d, ip:%s:%u name:%s", tsMnodeInfos.nodeInfos[i].nodeId,
|
||||
taosIpStr(tsMnodeInfos.nodeInfos[i].nodeId), tsMnodeInfos.nodeInfos[i].nodePort,
|
||||
taosIpStr(tsMnodeInfos.nodeInfos[i].nodeIp), tsMnodeInfos.nodeInfos[i].nodePort,
|
||||
tsMnodeInfos.nodeInfos[i].nodeName);
|
||||
}
|
||||
|
||||
|
|
|
@ -33,7 +33,6 @@ static int32_t dnodeOpenVnodes();
|
|||
static void dnodeCloseVnodes();
|
||||
static int32_t dnodeProcessCreateVnodeMsg(SRpcMsg *pMsg);
|
||||
static int32_t dnodeProcessDropVnodeMsg(SRpcMsg *pMsg);
|
||||
static int32_t dnodeProcessAlterVnodeMsg(SRpcMsg *pMsg);
|
||||
static int32_t dnodeProcessAlterStreamMsg(SRpcMsg *pMsg);
|
||||
static int32_t dnodeProcessConfigDnodeMsg(SRpcMsg *pMsg);
|
||||
static int32_t (*dnodeProcessMgmtMsgFp[TSDB_MSG_TYPE_MAX])(SRpcMsg *pMsg);
|
||||
|
@ -41,7 +40,6 @@ static int32_t (*dnodeProcessMgmtMsgFp[TSDB_MSG_TYPE_MAX])(SRpcMsg *pMsg);
|
|||
int32_t dnodeInitMgmt() {
|
||||
dnodeProcessMgmtMsgFp[TSDB_MSG_TYPE_MD_CREATE_VNODE] = dnodeProcessCreateVnodeMsg;
|
||||
dnodeProcessMgmtMsgFp[TSDB_MSG_TYPE_MD_DROP_VNODE] = dnodeProcessDropVnodeMsg;
|
||||
dnodeProcessMgmtMsgFp[TSDB_MSG_TYPE_MD_ALTER_VNODE] = dnodeProcessAlterVnodeMsg;
|
||||
dnodeProcessMgmtMsgFp[TSDB_MSG_TYPE_MD_ALTER_STREAM] = dnodeProcessAlterStreamMsg;
|
||||
dnodeProcessMgmtMsgFp[TSDB_MSG_TYPE_MD_CONFIG_DNODE] = dnodeProcessConfigDnodeMsg;
|
||||
|
||||
|
@ -129,25 +127,31 @@ static void dnodeCloseVnodes() {
|
|||
|
||||
static int32_t dnodeProcessCreateVnodeMsg(SRpcMsg *rpcMsg) {
|
||||
SMDCreateVnodeMsg *pCreate = rpcMsg->pCont;
|
||||
pCreate->cfg.vgId = htonl(pCreate->cfg.vgId);
|
||||
pCreate->cfg.maxSessions = htonl(pCreate->cfg.maxSessions);
|
||||
pCreate->cfg.cacheBlockSize = htonl(pCreate->cfg.cacheBlockSize);
|
||||
pCreate->cfg.daysPerFile = htonl(pCreate->cfg.daysPerFile);
|
||||
pCreate->cfg.daysToKeep1 = htonl(pCreate->cfg.daysToKeep1);
|
||||
pCreate->cfg.daysToKeep2 = htonl(pCreate->cfg.daysToKeep2);
|
||||
pCreate->cfg.daysToKeep = htonl(pCreate->cfg.daysToKeep);
|
||||
pCreate->cfg.commitTime = htonl(pCreate->cfg.commitTime);
|
||||
pCreate->cfg.rowsInFileBlock = htonl(pCreate->cfg.rowsInFileBlock);
|
||||
pCreate->cfg.blocksPerTable = htons(pCreate->cfg.blocksPerTable);
|
||||
pCreate->cfg.cacheNumOfBlocks.totalBlocks = htonl(pCreate->cfg.cacheNumOfBlocks.totalBlocks);
|
||||
|
||||
pCreate->cfg.vgId = htonl(pCreate->cfg.vgId);
|
||||
pCreate->cfg.maxTables = htonl(pCreate->cfg.maxTables);
|
||||
pCreate->cfg.maxCacheSize = htobe64(pCreate->cfg.maxCacheSize);
|
||||
pCreate->cfg.minRowsPerFileBlock = htonl(pCreate->cfg.minRowsPerFileBlock);
|
||||
pCreate->cfg.maxRowsPerFileBlock = htonl(pCreate->cfg.maxRowsPerFileBlock);
|
||||
pCreate->cfg.daysPerFile = htonl(pCreate->cfg.daysPerFile);
|
||||
pCreate->cfg.daysToKeep1 = htonl(pCreate->cfg.daysToKeep1);
|
||||
pCreate->cfg.daysToKeep2 = htonl(pCreate->cfg.daysToKeep2);
|
||||
pCreate->cfg.daysToKeep = htonl(pCreate->cfg.daysToKeep);
|
||||
pCreate->cfg.commitTime = htonl(pCreate->cfg.commitTime);
|
||||
pCreate->cfg.arbitratorIp = htonl(pCreate->cfg.arbitratorIp);
|
||||
|
||||
for (int32_t j = 0; j < pCreate->cfg.replications; ++j) {
|
||||
pCreate->vpeerDesc[j].vgId = htonl(pCreate->vpeerDesc[j].vgId);
|
||||
pCreate->vpeerDesc[j].dnodeId = htonl(pCreate->vpeerDesc[j].dnodeId);
|
||||
pCreate->vpeerDesc[j].ip = htonl(pCreate->vpeerDesc[j].ip);
|
||||
pCreate->nodes[j].nodeId = htonl(pCreate->nodes[j].nodeId);
|
||||
pCreate->nodes[j].nodeIp = htonl(pCreate->nodes[j].nodeIp);
|
||||
}
|
||||
|
||||
return vnodeCreate(pCreate);
|
||||
void *pVnode = vnodeAccquireVnode(pCreate->cfg.vgId);
|
||||
if (pVnode != NULL) {
|
||||
int32_t code = vnodeAlter(pVnode, pCreate);
|
||||
vnodeRelease(pVnode);
|
||||
return code;
|
||||
} else {
|
||||
return vnodeCreate(pCreate);
|
||||
}
|
||||
}
|
||||
|
||||
static int32_t dnodeProcessDropVnodeMsg(SRpcMsg *rpcMsg) {
|
||||
|
@ -157,15 +161,6 @@ static int32_t dnodeProcessDropVnodeMsg(SRpcMsg *rpcMsg) {
|
|||
return vnodeDrop(pDrop->vgId);
|
||||
}
|
||||
|
||||
static int32_t dnodeProcessAlterVnodeMsg(SRpcMsg *rpcMsg) {
|
||||
SMDCreateVnodeMsg *pCreate = rpcMsg->pCont;
|
||||
pCreate->cfg.vgId = htonl(pCreate->cfg.vgId);
|
||||
pCreate->cfg.maxSessions = htonl(pCreate->cfg.maxSessions);
|
||||
pCreate->cfg.daysPerFile = htonl(pCreate->cfg.daysPerFile);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int32_t dnodeProcessAlterStreamMsg(SRpcMsg *pMsg) {
|
||||
// SMDAlterStreamMsg *pStream = pCont;
|
||||
// pStream->uid = htobe64(pStream->uid);
|
||||
|
|
|
@ -33,7 +33,6 @@ int32_t dnodeInitMnode() {
|
|||
dnodeProcessMgmtMsgFp[TSDB_MSG_TYPE_MD_DROP_STABLE] = dnodeWrite;
|
||||
dnodeProcessMgmtMsgFp[TSDB_MSG_TYPE_MD_CREATE_VNODE] = dnodeMgmt;
|
||||
dnodeProcessMgmtMsgFp[TSDB_MSG_TYPE_MD_DROP_VNODE] = dnodeMgmt;
|
||||
dnodeProcessMgmtMsgFp[TSDB_MSG_TYPE_MD_ALTER_VNODE] = dnodeMgmt;
|
||||
dnodeProcessMgmtMsgFp[TSDB_MSG_TYPE_MD_ALTER_STREAM] = dnodeMgmt;
|
||||
dnodeProcessMgmtMsgFp[TSDB_MSG_TYPE_MD_CONFIG_DNODE] = dnodeMgmt;
|
||||
|
||||
|
|
|
@ -75,14 +75,19 @@ TAOS_DEFINE_ERROR(TSDB_CODE_NOT_CONFIGURED, 0, 27, "not configured")
|
|||
TAOS_DEFINE_ERROR(TSDB_CODE_NODE_OFFLINE, 0, 28, "node offline")
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_NETWORK_UNAVAIL, 0, 29, "network unavailable")
|
||||
|
||||
// db & user
|
||||
// db
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_DB_NOT_SELECTED, 0, 100, "db not selected")
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_DB_ALREADY_EXIST, 0, 101, "database aleady exist")
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_INVALID_DB, 0, 102, "invalid database")
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_MONITOR_DB_FORBIDDEN, 0, 103, "monitor db forbidden")
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_USER_ALREADY_EXIST, 0, 104, "user already exist")
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_INVALID_USER, 0, 105, "invalid user")
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_INVALID_PASS, 0, 106, "invalid password")
|
||||
|
||||
// user
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_USER_ALREADY_EXIST, 0, 150, "user already exist")
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_INVALID_USER, 0, 151, "invalid user")
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_INVALID_PASS, 0, 152, "invalid password")
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_INVALID_USER_FORMAT, 0, 153, "invalid user format")
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_INVALID_PASS_FORMAT, 0, 154, "invalid password format")
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_NO_USER_FROM_CONN, 0, 155, "can not get user from conn")
|
||||
|
||||
// table
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_TABLE_ALREADY_EXIST, 0, 200, "table already exist")
|
||||
|
|
|
@ -48,14 +48,12 @@ extern "C" {
|
|||
#define TSDB_MSG_TYPE_MD_CREATE_VNODE_RSP 16
|
||||
#define TSDB_MSG_TYPE_MD_DROP_VNODE 17
|
||||
#define TSDB_MSG_TYPE_MD_DROP_VNODE_RSP 18
|
||||
#define TSDB_MSG_TYPE_MD_ALTER_VNODE 19
|
||||
#define TSDB_MSG_TYPE_MD_ALTER_VNODE_RSP 20
|
||||
#define TSDB_MSG_TYPE_MD_DROP_STABLE 21
|
||||
#define TSDB_MSG_TYPE_MD_DROP_STABLE_RSP 22
|
||||
#define TSDB_MSG_TYPE_MD_ALTER_STREAM 23
|
||||
#define TSDB_MSG_TYPE_MD_ALTER_STREAM_RSP 24
|
||||
#define TSDB_MSG_TYPE_MD_CONFIG_DNODE 25
|
||||
#define TSDB_MSG_TYPE_MD_CONFIG_DNODE_RSP 26
|
||||
#define TSDB_MSG_TYPE_MD_DROP_STABLE 19
|
||||
#define TSDB_MSG_TYPE_MD_DROP_STABLE_RSP 20
|
||||
#define TSDB_MSG_TYPE_MD_ALTER_STREAM 21
|
||||
#define TSDB_MSG_TYPE_MD_ALTER_STREAM_RSP 22
|
||||
#define TSDB_MSG_TYPE_MD_CONFIG_DNODE 23
|
||||
#define TSDB_MSG_TYPE_MD_CONFIG_DNODE_RSP 24
|
||||
|
||||
// message from client to mnode
|
||||
#define TSDB_MSG_TYPE_CM_CONNECT 31
|
||||
|
@ -245,12 +243,6 @@ typedef struct SSchema {
|
|||
int16_t bytes;
|
||||
} SSchema;
|
||||
|
||||
typedef struct {
|
||||
int32_t vgId;
|
||||
int32_t dnodeId;
|
||||
uint32_t ip;
|
||||
} SVnodeDesc;
|
||||
|
||||
typedef struct {
|
||||
int32_t contLen;
|
||||
int32_t vgId;
|
||||
|
@ -518,12 +510,10 @@ typedef struct {
|
|||
uint8_t status;
|
||||
uint8_t role;
|
||||
uint8_t accessState;
|
||||
uint8_t replica;
|
||||
uint8_t reserved[5];
|
||||
} SVnodeLoad;
|
||||
|
||||
/*
|
||||
* NOTE: sizeof(SVnodeCfg) < TSDB_FILE_HEADER_LEN / 4
|
||||
*/
|
||||
typedef struct {
|
||||
char acct[TSDB_USER_LEN + 1];
|
||||
char db[TSDB_DB_NAME_LEN + 1];
|
||||
|
@ -548,7 +538,7 @@ typedef struct {
|
|||
int8_t loadLatest; // load into mem or not
|
||||
uint8_t precision; // time resolution
|
||||
int8_t reserved[16];
|
||||
} SVnodeCfg, SDbCfg, SCMCreateDbMsg, SCMAlterDbMsg;
|
||||
} SDbCfg, SCMCreateDbMsg, SCMAlterDbMsg;
|
||||
|
||||
typedef struct {
|
||||
char db[TSDB_TABLE_ID_LEN + 1];
|
||||
|
@ -614,8 +604,35 @@ typedef struct {
|
|||
} SDMStatusRsp;
|
||||
|
||||
typedef struct {
|
||||
SVnodeCfg cfg;
|
||||
SVnodeDesc vpeerDesc[TSDB_MAX_MPEERS];
|
||||
uint32_t vgId;
|
||||
int32_t maxTables;
|
||||
int64_t maxCacheSize;
|
||||
int32_t minRowsPerFileBlock;
|
||||
int32_t maxRowsPerFileBlock;
|
||||
int32_t daysPerFile;
|
||||
int32_t daysToKeep;
|
||||
int32_t daysToKeep1;
|
||||
int32_t daysToKeep2;
|
||||
int32_t commitTime;
|
||||
uint8_t precision; // time resolution
|
||||
int8_t compression;
|
||||
int8_t wals;
|
||||
int8_t commitLog;
|
||||
int8_t replications;
|
||||
int8_t quorum;
|
||||
uint32_t arbitratorIp;
|
||||
int8_t reserved[16];
|
||||
} SMDVnodeCfg;
|
||||
|
||||
typedef struct {
|
||||
int32_t nodeId;
|
||||
uint32_t nodeIp;
|
||||
char nodeName[TSDB_NODE_NAME_LEN + 1];
|
||||
} SMDVnodeDesc;
|
||||
|
||||
typedef struct {
|
||||
SMDVnodeCfg cfg;
|
||||
SMDVnodeDesc nodes[TSDB_MAX_MPEERS];
|
||||
} SMDCreateVnodeMsg;
|
||||
|
||||
typedef struct {
|
||||
|
@ -673,9 +690,16 @@ typedef struct {
|
|||
int32_t metaElem[TSDB_MAX_JOIN_TABLE_NUM];
|
||||
} SSuperTableMetaMsg;
|
||||
|
||||
typedef struct {
|
||||
int32_t nodeId;
|
||||
uint32_t nodeIp;
|
||||
uint16_t nodePort;
|
||||
} SVnodeDesc;
|
||||
|
||||
typedef struct {
|
||||
SVnodeDesc vpeerDesc[TSDB_REPLICA_MAX_NUM];
|
||||
int16_t index; // used locally
|
||||
int32_t vgId;
|
||||
int32_t numOfSids;
|
||||
int32_t pSidExtInfoList[]; // offset value of STableIdInfo
|
||||
} SVnodeSidList;
|
||||
|
|
|
@ -38,6 +38,7 @@ typedef struct {
|
|||
int32_t vnodeCreate(SMDCreateVnodeMsg *pVnodeCfg);
|
||||
int32_t vnodeDrop(int32_t vgId);
|
||||
int32_t vnodeOpen(int32_t vgId, char *rootDir);
|
||||
int32_t vnodeAlter(void *pVnode, SMDCreateVnodeMsg *pVnodeCfg);
|
||||
int32_t vnodeClose(int32_t vgId);
|
||||
|
||||
void vnodeRelease(void *pVnode);
|
||||
|
|
|
@ -24,10 +24,10 @@ extern "C" {
|
|||
|
||||
int32_t mgmtInitAccts();
|
||||
void mgmtCleanUpAccts();
|
||||
void *mgmtGetAcct(char *acctName);
|
||||
void * mgmtGetAcct(char *acctName);
|
||||
void * mgmtGetNextAcct(void *pNode, SAcctObj **pAcct);
|
||||
void mgmtIncAcctRef(SAcctObj *pAcct);
|
||||
void mgmtDecAcctRef(SAcctObj *pAcct);
|
||||
|
||||
void mgmtAddDbToAcct(SAcctObj *pAcct, SDbObj *pDb);
|
||||
void mgmtDropDbFromAcct(SAcctObj *pAcct, SDbObj *pDb);
|
||||
void mgmtAddUserToAcct(SAcctObj *pAcct, SUserObj *pUser);
|
||||
|
|
|
@ -51,7 +51,6 @@ typedef struct SDnodeObj {
|
|||
int8_t reserved[15];
|
||||
int8_t updateEnd[1];
|
||||
int32_t refCount;
|
||||
SVnodeLoad vload[TSDB_MAX_VNODES];
|
||||
uint32_t moduleStatus;
|
||||
uint32_t lastReboot; // time stamp for last reboot
|
||||
float score; // calc in balance function
|
||||
|
@ -72,13 +71,6 @@ typedef struct SMnodeObj {
|
|||
SDnodeObj *pDnode;
|
||||
} SMnodeObj;
|
||||
|
||||
|
||||
typedef struct {
|
||||
int32_t dnodeId;
|
||||
uint32_t privateIp;
|
||||
uint32_t publicIp;
|
||||
} SVnodeGid;
|
||||
|
||||
typedef struct {
|
||||
char tableId[TSDB_TABLE_ID_LEN + 1];
|
||||
int8_t type;
|
||||
|
@ -120,24 +112,34 @@ typedef struct {
|
|||
SSuperTableObj *superTable;
|
||||
} SChildTableObj;
|
||||
|
||||
typedef struct {
|
||||
int32_t dnodeId;
|
||||
int8_t role;
|
||||
int8_t reserved[3];
|
||||
SDnodeObj* pDnode;
|
||||
} SVnodeGid;
|
||||
|
||||
typedef struct SVgObj {
|
||||
uint32_t vgId;
|
||||
char dbName[TSDB_DB_NAME_LEN + 1];
|
||||
int64_t createdTime;
|
||||
SVnodeGid vnodeGid[TSDB_VNODES_SUPPORT];
|
||||
int32_t numOfVnodes;
|
||||
int32_t lbDnodeId;
|
||||
int32_t lbTime;
|
||||
int8_t status;
|
||||
int8_t inUse;
|
||||
int8_t reserved[13];
|
||||
int8_t updateEnd[1];
|
||||
int32_t refCount;
|
||||
uint32_t vgId;
|
||||
char dbName[TSDB_DB_NAME_LEN + 1];
|
||||
int64_t createdTime;
|
||||
SVnodeGid vnodeGid[TSDB_VNODES_SUPPORT];
|
||||
int32_t numOfVnodes;
|
||||
int32_t lbDnodeId;
|
||||
int32_t lbTime;
|
||||
int8_t status;
|
||||
int8_t inUse;
|
||||
int8_t reserved[13];
|
||||
int8_t updateEnd[1];
|
||||
int32_t refCount;
|
||||
struct SVgObj *prev, *next;
|
||||
struct SDbObj *pDb;
|
||||
int32_t numOfTables;
|
||||
void * idPool;
|
||||
SChildTableObj ** tableList;
|
||||
int32_t numOfTables;
|
||||
int64_t totalStorage;
|
||||
int64_t compStorage;
|
||||
int64_t pointsWritten;
|
||||
void * idPool;
|
||||
SChildTableObj **tableList;
|
||||
} SVgObj;
|
||||
|
||||
typedef struct SDbObj {
|
||||
|
|
|
@ -35,12 +35,15 @@ void mgmtMonitorDnodeModule();
|
|||
|
||||
int32_t mgmtGetDnodesNum();
|
||||
void * mgmtGetNextDnode(void *pNode, SDnodeObj **pDnode);
|
||||
void mgmtReleaseDnode(SDnodeObj *pDnode);
|
||||
void mgmtIncDnodeRef(SDnodeObj *pDnode);
|
||||
void mgmtDecDnodeRef(SDnodeObj *pDnode);
|
||||
void * mgmtGetDnode(int32_t dnodeId);
|
||||
void * mgmtGetDnodeByIp(uint32_t ip);
|
||||
void mgmtUpdateDnode(SDnodeObj *pDnode);
|
||||
int32_t mgmtDropDnode(SDnodeObj *pDnode);
|
||||
|
||||
extern int32_t tsAccessSquence;
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -40,7 +40,7 @@ void * mgmtGetNextMnode(void *pNode, struct SMnodeObj **pMnode);
|
|||
void mgmtReleaseMnode(struct SMnodeObj *pMnode);
|
||||
|
||||
char * mgmtGetMnodeRoleStr();
|
||||
void mgmtGetMnodeIpList(SRpcIpSet *ipSet, bool usePublicIp);
|
||||
void mgmtGetMnodeIpSet(SRpcIpSet *ipSet, bool usePublicIp);
|
||||
void mgmtGetMnodeInfos(void *mnodes);
|
||||
|
||||
#ifdef __cplusplus
|
||||
|
|
|
@ -24,7 +24,9 @@ extern "C" {
|
|||
int32_t mgmtInitUsers();
|
||||
void mgmtCleanUpUsers();
|
||||
SUserObj *mgmtGetUser(char *name);
|
||||
void mgmtReleaseUser(SUserObj *pUser);
|
||||
void * mgmtGetNextUser(void *pNode, SUserObj **pUser);
|
||||
void mgmtIncUserRef(SUserObj *pUser);
|
||||
void mgmtDecUserRef(SUserObj *pUser);
|
||||
SUserObj *mgmtGetUserFromConn(void *pConn, bool *usePublicIp);
|
||||
int32_t mgmtCreateUser(SAcctObj *pAcct, char *name, char *pass);
|
||||
void mgmtDropAllUsers(SAcctObj *pAcct);
|
||||
|
|
|
@ -30,12 +30,13 @@ enum _TSDB_VG_STATUS {
|
|||
int32_t mgmtInitVgroups();
|
||||
void mgmtCleanUpVgroups();
|
||||
SVgObj *mgmtGetVgroup(int32_t vgId);
|
||||
void mgmtReleaseVgroup(SVgObj *pVgroup);
|
||||
void mgmtIncVgroupRef(SVgObj *pVgroup);
|
||||
void mgmtDecVgroupRef(SVgObj *pVgroup);
|
||||
void mgmtDropAllVgroups(SDbObj *pDropDb);
|
||||
|
||||
void * mgmtGetNextVgroup(void *pNode, SVgObj **pVgroup);
|
||||
void mgmtUpdateVgroup(SVgObj *pVgroup);
|
||||
void mgmtUpdateVgroupStatus(SVgObj *pVgroup, int32_t dnodeId, SVnodeLoad *pVload);
|
||||
void mgmtUpdateVgroupStatus(SVgObj *pVgroup, SDnodeObj *dnodeId, SVnodeLoad *pVload);
|
||||
|
||||
void mgmtCreateVgroup(SQueuedMsg *pMsg, SDbObj *pDb);
|
||||
void mgmtDropVgroup(SVgObj *pVgroup, void *ahandle);
|
||||
|
@ -46,6 +47,7 @@ void mgmtAddTableIntoVgroup(SVgObj *pVgroup, SChildTableObj *pTable);
|
|||
void mgmtRemoveTableFromVgroup(SVgObj *pVgroup, SChildTableObj *pTable);
|
||||
void mgmtSendCreateVnodeMsg(SVgObj *pVgroup, SRpcIpSet *ipSet, void *ahandle);
|
||||
void mgmtSendDropVnodeMsg(int32_t vgId, SRpcIpSet *ipSet, void *ahandle);
|
||||
void mgmtSendCreateVgroupMsg(SVgObj *pVgroup, void *ahandle);
|
||||
|
||||
SRpcIpSet mgmtGetIpSetFromVgroup(SVgObj *pVgroup);
|
||||
SRpcIpSet mgmtGetIpSetFromIp(uint32_t ip);
|
||||
|
|
|
@ -58,6 +58,7 @@ static int32_t mgmtActionAcctUpdate(SSdbOper *pOper) {
|
|||
memcpy(pSaved, pAcct, tsAcctUpdateSize);
|
||||
free(pAcct);
|
||||
}
|
||||
mgmtDecAcctRef(pSaved);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
|
@ -106,11 +107,11 @@ int32_t mgmtInitAccts() {
|
|||
|
||||
tsAcctSdb = sdbOpenTable(&tableDesc);
|
||||
if (tsAcctSdb == NULL) {
|
||||
mError("failed to init acct data");
|
||||
mError("table:%s, failed to create hash", tableDesc.tableName);
|
||||
return -1;
|
||||
}
|
||||
|
||||
mTrace("table:accounts table is created");
|
||||
mTrace("table:%s, hash is created", tableDesc.tableName);
|
||||
return acctInit();
|
||||
}
|
||||
|
||||
|
@ -123,6 +124,10 @@ void *mgmtGetAcct(char *name) {
|
|||
return sdbGetRow(tsAcctSdb, name);
|
||||
}
|
||||
|
||||
void *mgmtGetNextAcct(void *pNode, SAcctObj **pAcct) {
|
||||
return sdbFetchRow(tsAcctSdb, pNode, (void **)pAcct);
|
||||
}
|
||||
|
||||
void mgmtIncAcctRef(SAcctObj *pAcct) {
|
||||
sdbIncRef(tsAcctSdb, pAcct);
|
||||
}
|
||||
|
|
|
@ -47,7 +47,7 @@ int32_t balanceAllocVnodes(SVgObj *pVgroup) {
|
|||
vnodeUsage = usage;
|
||||
}
|
||||
}
|
||||
mgmtReleaseDnode(pDnode);
|
||||
mgmtDecDnodeRef(pDnode);
|
||||
}
|
||||
|
||||
if (pSelDnode == NULL) {
|
||||
|
@ -56,8 +56,7 @@ int32_t balanceAllocVnodes(SVgObj *pVgroup) {
|
|||
}
|
||||
|
||||
pVgroup->vnodeGid[0].dnodeId = pSelDnode->dnodeId;
|
||||
pVgroup->vnodeGid[0].privateIp = pSelDnode->privateIp;
|
||||
pVgroup->vnodeGid[0].publicIp = pSelDnode->publicIp;
|
||||
pVgroup->vnodeGid[0].pDnode = pSelDnode;
|
||||
|
||||
mTrace("dnode:%d, alloc one vnode to vgroup, openVnodes:%d", pSelDnode->dnodeId, pSelDnode->openVnodes);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
|
|
|
@ -28,8 +28,10 @@
|
|||
#include "mgmtLog.h"
|
||||
#include "mgmtDb.h"
|
||||
#include "mgmtDServer.h"
|
||||
#include "mgmtMnode.h"
|
||||
#include "mgmtProfile.h"
|
||||
#include "mgmtShell.h"
|
||||
#include "mgmtSdb.h"
|
||||
#include "mgmtTable.h"
|
||||
#include "mgmtVgroup.h"
|
||||
|
||||
|
@ -99,6 +101,18 @@ static void mgmtProcessMsgFromDnode(SRpcMsg *rpcMsg) {
|
|||
mgmtSendSimpleResp(rpcMsg->handle, TSDB_CODE_INVALID_MSG_LEN);
|
||||
return;
|
||||
}
|
||||
|
||||
if (!sdbIsMaster()) {
|
||||
SRpcConnInfo connInfo;
|
||||
rpcGetConnInfo(rpcMsg->handle, &connInfo);
|
||||
bool usePublicIp = false;
|
||||
|
||||
SRpcIpSet ipSet = {0};
|
||||
mgmtGetMnodeIpSet(&ipSet, usePublicIp);
|
||||
mTrace("conn from dnode ip:%s redirect msg", taosIpStr(connInfo.clientIp));
|
||||
rpcSendRedirectRsp(rpcMsg->handle, &ipSet);
|
||||
return;
|
||||
}
|
||||
|
||||
if (mgmtProcessDnodeMsgFp[rpcMsg->msgType]) {
|
||||
SRpcMsg *pMsg = malloc(sizeof(SRpcMsg));
|
||||
|
|
|
@ -63,6 +63,7 @@ static int32_t mgmtDbActionInsert(SSdbOper *pOper) {
|
|||
|
||||
if (pAcct != NULL) {
|
||||
mgmtAddDbToAcct(pAcct, pDb);
|
||||
mgmtDecAcctRef(pAcct);
|
||||
}
|
||||
else {
|
||||
mError("db:%s, acct:%s info not exist in sdb", pDb->name, pDb->cfg.acct);
|
||||
|
@ -80,6 +81,7 @@ static int32_t mgmtDbActionDelete(SSdbOper *pOper) {
|
|||
mgmtDropAllChildTables(pDb);
|
||||
mgmtDropAllSuperTables(pDb);
|
||||
mgmtDropAllVgroups(pDb);
|
||||
mgmtDecAcctRef(pAcct);
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
@ -527,7 +529,7 @@ static int32_t mgmtGetDbMeta(STableMetaMsg *pMeta, SShowObj *pShow, void *pConn)
|
|||
pShow->rowSize = pShow->offset[cols - 1] + pShow->bytes[cols - 1];
|
||||
pShow->numOfRows = pUser->pAcct->acctInfo.numOfDbs;
|
||||
|
||||
mgmtReleaseUser(pUser);
|
||||
mgmtDecUserRef(pUser);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -647,7 +649,7 @@ static int32_t mgmtRetrieveDbs(SShowObj *pShow, char *data, int32_t rows, void *
|
|||
}
|
||||
|
||||
pShow->numOfReads += numOfRows;
|
||||
mgmtReleaseUser(pUser);
|
||||
mgmtDecUserRef(pUser);
|
||||
return numOfRows;
|
||||
}
|
||||
|
||||
|
|
|
@ -23,6 +23,7 @@
|
|||
#include "tutil.h"
|
||||
#include "tsocket.h"
|
||||
#include "tbalance.h"
|
||||
#include "tsync.h"
|
||||
#include "dnode.h"
|
||||
#include "mgmtDef.h"
|
||||
#include "mgmtLog.h"
|
||||
|
@ -37,6 +38,8 @@
|
|||
|
||||
void *tsDnodeSdb = NULL;
|
||||
int32_t tsDnodeUpdateSize = 0;
|
||||
int32_t tsAccessSquence = 0;
|
||||
extern void * tsMnodeSdb;
|
||||
extern void * tsVgroupSdb;
|
||||
|
||||
static int32_t mgmtCreateDnode(uint32_t ip);
|
||||
|
@ -99,7 +102,13 @@ static int32_t mgmtDnodeActionDelete(SSdbOper *pOper) {
|
|||
}
|
||||
}
|
||||
|
||||
mgmtDropMnode(pDnode->dnodeId);
|
||||
SMnodeObj *pMnode = mgmtGetMnode(pDnode->dnodeId);
|
||||
if (pMnode != NULL) {
|
||||
SSdbOper oper = {.type = SDB_OPER_LOCAL, .table = tsMnodeSdb, .pObj = pMnode};
|
||||
sdbDeleteRow(&oper);
|
||||
mgmtReleaseMnode(pMnode);
|
||||
}
|
||||
|
||||
balanceNotify();
|
||||
|
||||
mTrace("dnode:%d, all vgroups:%d is dropped from sdb", pDnode->dnodeId, numOfVgroups);
|
||||
|
@ -139,7 +148,7 @@ static int32_t mgmtDnodeActionRestored() {
|
|||
mgmtCreateDnode(ip);
|
||||
SDnodeObj *pDnode = mgmtGetDnodeByIp(ip);
|
||||
mgmtAddMnode(pDnode->dnodeId);
|
||||
mgmtReleaseDnode(pDnode);
|
||||
mgmtDecDnodeRef(pDnode);
|
||||
}
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
|
@ -215,13 +224,17 @@ void *mgmtGetDnodeByIp(uint32_t ip) {
|
|||
if (ip == pDnode->privateIp) {
|
||||
return pDnode;
|
||||
}
|
||||
mgmtReleaseDnode(pDnode);
|
||||
mgmtDecDnodeRef(pDnode);
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
void mgmtReleaseDnode(SDnodeObj *pDnode) {
|
||||
void mgmtIncDnodeRef(SDnodeObj *pDnode) {
|
||||
sdbIncRef(tsDnodeSdb, pDnode);
|
||||
}
|
||||
|
||||
void mgmtDecDnodeRef(SDnodeObj *pDnode) {
|
||||
sdbDecRef(tsDnodeSdb, pDnode);
|
||||
}
|
||||
|
||||
|
@ -318,27 +331,27 @@ void mgmtProcessDnodeStatusMsg(SRpcMsg *rpcMsg) {
|
|||
pDnode->alternativeRole = pStatus->alternativeRole;
|
||||
pDnode->totalVnodes = pStatus->numOfTotalVnodes;
|
||||
pDnode->moduleStatus = pStatus->moduleStatus;
|
||||
pDnode->lastAccess = tsAccessSquence;
|
||||
|
||||
if (pStatus->dnodeId == 0) {
|
||||
mTrace("dnode:%d, first access, privateIp:%s, name:%s", pDnode->dnodeId, taosIpStr(pDnode->privateIp), pDnode->dnodeName);
|
||||
} else {
|
||||
//mTrace("dnode:%d, status received, access times %d", pDnode->dnodeId, pDnode->lastAccess);
|
||||
}
|
||||
|
||||
int32_t openVnodes = htons(pStatus->openVnodes);
|
||||
for (int32_t j = 0; j < openVnodes; ++j) {
|
||||
SVnodeLoad *pVload = &pStatus->load[j];
|
||||
pDnode->vload[j].vgId = htonl(pVload->vgId);
|
||||
pDnode->vload[j].totalStorage = htobe64(pVload->totalStorage);
|
||||
pDnode->vload[j].compStorage = htobe64(pVload->compStorage);
|
||||
pDnode->vload[j].pointsWritten = htobe64(pVload->pointsWritten);
|
||||
|
||||
SVgObj *pVgroup = mgmtGetVgroup(pDnode->vload[j].vgId);
|
||||
pVload->vgId = htonl(pVload->vgId);
|
||||
|
||||
SVgObj *pVgroup = mgmtGetVgroup(pVload->vgId);
|
||||
if (pVgroup == NULL) {
|
||||
SRpcIpSet ipSet = mgmtGetIpSetFromIp(pDnode->privateIp);
|
||||
mPrint("dnode:%d, vgroup:%d not exist in mnode, drop it", pDnode->dnodeId, pDnode->vload[j].vgId);
|
||||
mgmtSendDropVnodeMsg(pDnode->vload[j].vgId, &ipSet, NULL);
|
||||
mPrint("dnode:%d, vgroup:%d not exist in mnode, drop it", pDnode->dnodeId, pVload->vgId);
|
||||
mgmtSendDropVnodeMsg(pVload->vgId, &ipSet, NULL);
|
||||
} else {
|
||||
mgmtUpdateVgroupStatus(pVgroup, pDnode->dnodeId, pVload);
|
||||
mgmtReleaseVgroup(pVgroup);
|
||||
mgmtUpdateVgroupStatus(pVgroup, pDnode, pVload);
|
||||
mgmtDecVgroupRef(pVgroup);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -348,7 +361,7 @@ void mgmtProcessDnodeStatusMsg(SRpcMsg *rpcMsg) {
|
|||
balanceNotify();
|
||||
}
|
||||
|
||||
mgmtReleaseDnode(pDnode);
|
||||
mgmtDecDnodeRef(pDnode);
|
||||
|
||||
int32_t contLen = sizeof(SDMStatusRsp) + TSDB_MAX_VNODES * sizeof(SDMVgroupAccess);
|
||||
SDMStatusRsp *pRsp = rpcMallocCont(contLen);
|
||||
|
@ -444,7 +457,7 @@ static int32_t mgmtDropDnodeByIp(uint32_t ip) {
|
|||
return TSDB_CODE_NO_REMOVE_MASTER;
|
||||
}
|
||||
|
||||
#ifndef _VPEER
|
||||
#ifndef _SYNC
|
||||
return mgmtDropDnode(pDnode);
|
||||
#else
|
||||
return balanceDropDnode(pDnode);
|
||||
|
@ -495,7 +508,10 @@ static int32_t mgmtGetDnodeMeta(STableMetaMsg *pMeta, SShowObj *pShow, void *pCo
|
|||
SUserObj *pUser = mgmtGetUserFromConn(pConn, NULL);
|
||||
if (pUser == NULL) return 0;
|
||||
|
||||
if (strcmp(pUser->pAcct->user, "root") != 0) return TSDB_CODE_NO_RIGHTS;
|
||||
if (strcmp(pUser->pAcct->user, "root") != 0) {
|
||||
mgmtDecUserRef(pUser);
|
||||
return TSDB_CODE_NO_RIGHTS;
|
||||
}
|
||||
|
||||
int32_t cols = 0;
|
||||
SSchema *pSchema = pMeta->schema;
|
||||
|
@ -554,7 +570,7 @@ static int32_t mgmtGetDnodeMeta(STableMetaMsg *pMeta, SShowObj *pShow, void *pCo
|
|||
pShow->rowSize = pShow->offset[cols - 1] + pShow->bytes[cols - 1];
|
||||
pShow->pNode = NULL;
|
||||
|
||||
mgmtReleaseUser(pUser);
|
||||
mgmtDecUserRef(pUser);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -604,7 +620,7 @@ static int32_t mgmtRetrieveDnodes(SShowObj *pShow, char *data, int32_t rows, voi
|
|||
|
||||
|
||||
numOfRows++;
|
||||
mgmtReleaseDnode(pDnode);
|
||||
mgmtDecDnodeRef(pDnode);
|
||||
}
|
||||
|
||||
pShow->numOfReads += numOfRows;
|
||||
|
@ -622,7 +638,10 @@ static int32_t mgmtGetModuleMeta(STableMetaMsg *pMeta, SShowObj *pShow, void *pC
|
|||
SUserObj *pUser = mgmtGetUserFromConn(pConn, NULL);
|
||||
if (pUser == NULL) return 0;
|
||||
|
||||
if (strcmp(pUser->user, "root") != 0) return TSDB_CODE_NO_RIGHTS;
|
||||
if (strcmp(pUser->user, "root") != 0) {
|
||||
mgmtDecUserRef(pUser);
|
||||
return TSDB_CODE_NO_RIGHTS;
|
||||
}
|
||||
|
||||
SSchema *pSchema = pMeta->schema;
|
||||
|
||||
|
@ -661,7 +680,7 @@ static int32_t mgmtGetModuleMeta(STableMetaMsg *pMeta, SShowObj *pShow, void *pC
|
|||
pShow->numOfRows = mgmtGetDnodesNum() * TSDB_MOD_MAX;
|
||||
pShow->rowSize = pShow->offset[cols - 1] + pShow->bytes[cols - 1];
|
||||
pShow->pNode = NULL;
|
||||
mgmtReleaseUser(pUser);
|
||||
mgmtDecUserRef(pUser);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -712,7 +731,7 @@ int32_t mgmtRetrieveModules(SShowObj *pShow, char *data, int32_t rows, void *pCo
|
|||
numOfRows++;
|
||||
}
|
||||
|
||||
mgmtReleaseDnode(pDnode);
|
||||
mgmtDecDnodeRef(pDnode);
|
||||
}
|
||||
|
||||
pShow->numOfReads += numOfRows;
|
||||
|
@ -731,7 +750,10 @@ static int32_t mgmtGetConfigMeta(STableMetaMsg *pMeta, SShowObj *pShow, void *pC
|
|||
SUserObj *pUser = mgmtGetUserFromConn(pConn, NULL);
|
||||
if (pUser == NULL) return 0;
|
||||
|
||||
if (strcmp(pUser->user, "root") != 0) return TSDB_CODE_NO_RIGHTS;
|
||||
if (strcmp(pUser->user, "root") != 0) {
|
||||
mgmtDecUserRef(pUser);
|
||||
return TSDB_CODE_NO_RIGHTS;
|
||||
}
|
||||
|
||||
SSchema *pSchema = pMeta->schema;
|
||||
|
||||
|
@ -762,7 +784,7 @@ static int32_t mgmtGetConfigMeta(STableMetaMsg *pMeta, SShowObj *pShow, void *pC
|
|||
|
||||
pShow->rowSize = pShow->offset[cols - 1] + pShow->bytes[cols - 1];
|
||||
pShow->pNode = NULL;
|
||||
mgmtReleaseUser(pUser);
|
||||
mgmtDecUserRef(pUser);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -814,7 +836,11 @@ static int32_t mgmtGetVnodeMeta(STableMetaMsg *pMeta, SShowObj *pShow, void *pCo
|
|||
int32_t cols = 0;
|
||||
SUserObj *pUser = mgmtGetUserFromConn(pConn, NULL);
|
||||
if (pUser == NULL) return 0;
|
||||
if (strcmp(pUser->user, "root") != 0) return TSDB_CODE_NO_RIGHTS;
|
||||
|
||||
if (strcmp(pUser->user, "root") != 0) {
|
||||
mgmtDecUserRef(pUser);
|
||||
return TSDB_CODE_NO_RIGHTS;
|
||||
}
|
||||
|
||||
SSchema *pSchema = pMeta->schema;
|
||||
|
||||
|
@ -840,35 +866,18 @@ static int32_t mgmtGetVnodeMeta(STableMetaMsg *pMeta, SShowObj *pShow, void *pCo
|
|||
if (pShow->payloadLen > 0 ) {
|
||||
uint32_t ip = ip2uint(pShow->payload);
|
||||
pDnode = mgmtGetDnodeByIp(ip);
|
||||
if (NULL == pDnode) {
|
||||
return TSDB_CODE_NODE_OFFLINE;
|
||||
}
|
||||
|
||||
SVnodeLoad* pVnode;
|
||||
pShow->numOfRows = 0;
|
||||
for (int32_t i = 0 ; i < TSDB_MAX_VNODES; i++) {
|
||||
pVnode = &pDnode->vload[i];
|
||||
if (0 != pVnode->vgId) {
|
||||
pShow->numOfRows++;
|
||||
}
|
||||
}
|
||||
|
||||
pShow->pNode = pDnode;
|
||||
} else {
|
||||
while (true) {
|
||||
pShow->pNode = mgmtGetNextDnode(pShow->pNode, (SDnodeObj **)&pDnode);
|
||||
if (pDnode == NULL) break;
|
||||
pShow->numOfRows += pDnode->openVnodes;
|
||||
mgmtGetNextDnode(NULL, (SDnodeObj **)&pDnode);
|
||||
}
|
||||
|
||||
if (0 == pShow->numOfRows) return TSDB_CODE_NODE_OFFLINE;
|
||||
}
|
||||
|
||||
pShow->pNode = NULL;
|
||||
}
|
||||
if (pDnode != NULL) {
|
||||
pShow->numOfRows += pDnode->openVnodes;
|
||||
mgmtDecDnodeRef(pDnode);
|
||||
}
|
||||
|
||||
pShow->rowSize = pShow->offset[cols - 1] + pShow->bytes[cols - 1];
|
||||
mgmtReleaseDnode(pDnode);
|
||||
mgmtReleaseUser(pUser);
|
||||
pShow->pNode = pDnode;
|
||||
mgmtDecUserRef(pUser);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -881,35 +890,35 @@ static int32_t mgmtRetrieveVnodes(SShowObj *pShow, char *data, int32_t rows, voi
|
|||
|
||||
if (0 == rows) return 0;
|
||||
|
||||
if (pShow->payloadLen) {
|
||||
// output the vnodes info of the designated dnode. And output all vnodes of this dnode, instead of rows (max 100)
|
||||
pDnode = (SDnodeObj *)(pShow->pNode);
|
||||
if (pDnode != NULL) {
|
||||
SVnodeLoad* pVnode;
|
||||
for (int32_t i = 0 ; i < TSDB_MAX_VNODES; i++) {
|
||||
pVnode = &pDnode->vload[i];
|
||||
if (0 == pVnode->vgId) {
|
||||
continue;
|
||||
pDnode = (SDnodeObj *)(pShow->pNode);
|
||||
if (pDnode != NULL) {
|
||||
void *pNode = NULL;
|
||||
SVgObj *pVgroup;
|
||||
while (1) {
|
||||
pNode = mgmtGetNextVgroup(pNode, &pVgroup);
|
||||
if (pVgroup == NULL) break;
|
||||
|
||||
for (int32_t i = 0; i < pVgroup->numOfVnodes; ++i) {
|
||||
SVnodeGid *pVgid = &pVgroup->vnodeGid[i];
|
||||
if (pVgid->pDnode == pDnode) {
|
||||
cols = 0;
|
||||
|
||||
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
|
||||
*(uint32_t *)pWrite = pVgroup->vgId;
|
||||
cols++;
|
||||
|
||||
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
|
||||
strcpy(pWrite, mgmtGetMnodeRoleStr(pVgid->role));
|
||||
cols++;
|
||||
}
|
||||
|
||||
cols = 0;
|
||||
|
||||
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
|
||||
*(uint32_t *)pWrite = pVnode->vgId;
|
||||
cols++;
|
||||
|
||||
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
|
||||
strcpy(pWrite, pVnode->status ? "ready" : "offline");
|
||||
cols++;
|
||||
|
||||
numOfRows++;
|
||||
}
|
||||
|
||||
mgmtDecVgroupRef(pVgroup);
|
||||
}
|
||||
} else {
|
||||
// TODO: output all vnodes of all dnodes
|
||||
numOfRows = 0;
|
||||
}
|
||||
|
||||
|
||||
pShow->numOfReads += numOfRows;
|
||||
return numOfRows;
|
||||
}
|
||||
|
|
|
@ -149,12 +149,12 @@ void mgmtCleanUpSystem() {
|
|||
mgmtCleanUpShell();
|
||||
mgmtCleanupDClient();
|
||||
mgmtCleanupDServer();
|
||||
mgmtCleanUpAccts();
|
||||
mgmtCleanUpTables();
|
||||
mgmtCleanUpVgroups();
|
||||
mgmtCleanUpDbs();
|
||||
mgmtCleanupDnodes();
|
||||
mgmtCleanUpUsers();
|
||||
mgmtCleanUpAccts();
|
||||
sdbCleanUp();
|
||||
taosTmrCleanUp(tsMgmtTmr);
|
||||
tsMgmtIsRunning = false;
|
||||
|
|
|
@ -30,7 +30,7 @@
|
|||
#include "mgmtShell.h"
|
||||
#include "mgmtUser.h"
|
||||
|
||||
static void * tsMnodeSdb = NULL;
|
||||
void * tsMnodeSdb = NULL;
|
||||
static int32_t tsMnodeUpdateSize = 0;
|
||||
static int32_t mgmtGetMnodeMeta(STableMetaMsg *pMeta, SShowObj *pShow, void *pConn);
|
||||
static int32_t mgmtRetrieveMnodes(SShowObj *pShow, char *data, int32_t rows, void *pConn);
|
||||
|
@ -47,7 +47,7 @@ static int32_t mgmtMnodeActionInsert(SSdbOper *pOper) {
|
|||
|
||||
pMnode->pDnode = pDnode;
|
||||
pDnode->isMgmt = true;
|
||||
mgmtReleaseDnode(pDnode);
|
||||
mgmtDecDnodeRef(pDnode);
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
@ -58,7 +58,7 @@ static int32_t mgmtMnodeActionDelete(SSdbOper *pOper) {
|
|||
SDnodeObj *pDnode = mgmtGetDnode(pMnode->mnodeId);
|
||||
if (pDnode == NULL) return TSDB_CODE_DNODE_NOT_EXIST;
|
||||
pDnode->isMgmt = false;
|
||||
mgmtReleaseDnode(pDnode);
|
||||
mgmtDecDnodeRef(pDnode);
|
||||
|
||||
mTrace("mnode:%d, is dropped from sdb", pMnode->mnodeId);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
|
@ -171,7 +171,7 @@ char *mgmtGetMnodeRoleStr(int32_t role) {
|
|||
}
|
||||
}
|
||||
|
||||
void mgmtGetMnodeIpList(SRpcIpSet *ipSet, bool usePublicIp) {
|
||||
void mgmtGetMnodeIpSet(SRpcIpSet *ipSet, bool usePublicIp) {
|
||||
void *pNode = NULL;
|
||||
while (1) {
|
||||
SMnodeObj *pMnode = NULL;
|
||||
|
@ -268,7 +268,10 @@ static int32_t mgmtGetMnodeMeta(STableMetaMsg *pMeta, SShowObj *pShow, void *pCo
|
|||
SUserObj *pUser = mgmtGetUserFromConn(pConn, NULL);
|
||||
if (pUser == NULL) return 0;
|
||||
|
||||
if (strcmp(pUser->pAcct->user, "root") != 0) return TSDB_CODE_NO_RIGHTS;
|
||||
if (strcmp(pUser->pAcct->user, "root") != 0) {
|
||||
mgmtDecUserRef(pUser);
|
||||
return TSDB_CODE_NO_RIGHTS;
|
||||
}
|
||||
|
||||
int32_t cols = 0;
|
||||
SSchema *pSchema = pMeta->schema;
|
||||
|
@ -314,7 +317,7 @@ static int32_t mgmtGetMnodeMeta(STableMetaMsg *pMeta, SShowObj *pShow, void *pCo
|
|||
pShow->numOfRows = mgmtGetMnodesNum();
|
||||
pShow->rowSize = pShow->offset[cols - 1] + pShow->bytes[cols - 1];
|
||||
pShow->pNode = NULL;
|
||||
mgmtReleaseUser(pUser);
|
||||
mgmtDecUserRef(pUser);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -704,6 +704,7 @@ void mgmtProcessKillQueryMsg(SQueuedMsg *pMsg) {
|
|||
|
||||
rpcRsp.code = code;
|
||||
rpcSendResponse(&rpcRsp);
|
||||
mgmtDecUserRef(pUser);
|
||||
}
|
||||
|
||||
void mgmtProcessKillStreamMsg(SQueuedMsg *pMsg) {
|
||||
|
@ -727,6 +728,7 @@ void mgmtProcessKillStreamMsg(SQueuedMsg *pMsg) {
|
|||
|
||||
rpcRsp.code = code;
|
||||
rpcSendResponse(&rpcRsp);
|
||||
mgmtDecUserRef(pUser);
|
||||
}
|
||||
|
||||
void mgmtProcessKillConnectionMsg(SQueuedMsg *pMsg) {
|
||||
|
@ -750,6 +752,7 @@ void mgmtProcessKillConnectionMsg(SQueuedMsg *pMsg) {
|
|||
|
||||
rpcRsp.code = code;
|
||||
rpcSendResponse(&rpcRsp);
|
||||
mgmtDecUserRef(pUser);
|
||||
}
|
||||
|
||||
int32_t mgmtInitProfile() {
|
||||
|
@ -790,12 +793,12 @@ void *mgmtMallocQueuedMsg(SRpcMsg *rpcMsg) {
|
|||
void mgmtFreeQueuedMsg(SQueuedMsg *pMsg) {
|
||||
if (pMsg != NULL) {
|
||||
rpcFreeCont(pMsg->pCont);
|
||||
if (pMsg->pUser) mgmtReleaseUser(pMsg->pUser);
|
||||
if (pMsg->pUser) mgmtDecUserRef(pMsg->pUser);
|
||||
if (pMsg->pDb) mgmtDecDbRef(pMsg->pDb);
|
||||
if (pMsg->pVgroup) mgmtReleaseVgroup(pMsg->pVgroup);
|
||||
if (pMsg->pVgroup) mgmtDecVgroupRef(pMsg->pVgroup);
|
||||
if (pMsg->pTable) mgmtDecTableRef(pMsg->pTable);
|
||||
if (pMsg->pAcct) mgmtDecAcctRef(pMsg->pAcct);
|
||||
if (pMsg->pDnode) mgmtReleaseDnode(pMsg->pDnode);
|
||||
if (pMsg->pDnode) mgmtDecDnodeRef(pMsg->pDnode);
|
||||
free(pMsg);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -141,13 +141,19 @@ void mgmtDealyedAddToShellQueue(SQueuedMsg *queuedMsg) {
|
|||
|
||||
static void mgmtProcessMsgFromShell(SRpcMsg *rpcMsg) {
|
||||
if (rpcMsg == NULL || rpcMsg->pCont == NULL) {
|
||||
mgmtSendSimpleResp(rpcMsg->handle, TSDB_CODE_INVALID_MSG_LEN);
|
||||
return;
|
||||
}
|
||||
|
||||
if (!sdbIsMaster()) {
|
||||
// rpcSendRedirectRsp(rpcMsg->handle, mgmtGetMnodeIpListForRedirect());
|
||||
mgmtSendSimpleResp(rpcMsg->handle, TSDB_CODE_NO_MASTER);
|
||||
rpcFreeCont(rpcMsg->pCont);
|
||||
SRpcConnInfo connInfo;
|
||||
rpcGetConnInfo(rpcMsg->handle, &connInfo);
|
||||
bool usePublicIp = (connInfo.serverIp == tsPublicIpInt);
|
||||
|
||||
SRpcIpSet ipSet = {0};
|
||||
mgmtGetMnodeIpSet(&ipSet, usePublicIp);
|
||||
mTrace("conn from ip:%s user:%s redirect msg", taosIpStr(connInfo.clientIp), connInfo.user);
|
||||
rpcSendRedirectRsp(rpcMsg->handle, &ipSet);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -332,7 +338,7 @@ static void mgmtProcessHeartBeatMsg(SQueuedMsg *pMsg) {
|
|||
return;
|
||||
}
|
||||
|
||||
mgmtGetMnodeIpList(&pHBRsp->ipList, pMsg->usePublicIp);
|
||||
mgmtGetMnodeIpSet(&pHBRsp->ipList, pMsg->usePublicIp);
|
||||
|
||||
/*
|
||||
* TODO
|
||||
|
@ -357,14 +363,18 @@ static int mgmtShellRetriveAuth(char *user, char *spi, char *encrypt, char *secr
|
|||
*encrypt = 0;
|
||||
*ckey = 0;
|
||||
|
||||
if (!sdbIsMaster()) {
|
||||
*secret = 0;
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
SUserObj *pUser = mgmtGetUser(user);
|
||||
if (pUser == NULL) {
|
||||
*secret = 0;
|
||||
mgmtReleaseUser(pUser);
|
||||
return TSDB_CODE_INVALID_USER;
|
||||
} else {
|
||||
memcpy(secret, pUser->pass, TSDB_KEY_LEN);
|
||||
mgmtReleaseUser(pUser);
|
||||
mgmtDecUserRef(pUser);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
}
|
||||
|
@ -414,7 +424,7 @@ static void mgmtProcessConnectMsg(SQueuedMsg *pMsg) {
|
|||
pConnectRsp->writeAuth = pUser->writeAuth;
|
||||
pConnectRsp->superAuth = pUser->superAuth;
|
||||
|
||||
mgmtGetMnodeIpList(&pConnectRsp->ipList, pMsg->usePublicIp);
|
||||
mgmtGetMnodeIpSet(&pConnectRsp->ipList, pMsg->usePublicIp);
|
||||
|
||||
connect_over:
|
||||
rpcRsp.code = code;
|
||||
|
|
|
@ -97,7 +97,7 @@ static int32_t mgmtChildTableActionInsert(SSdbOper *pOper) {
|
|||
mError("ctable:%s, not in vgroup:%d", pTable->info.tableId, pTable->vgId);
|
||||
return TSDB_CODE_INVALID_VGROUP_ID;
|
||||
}
|
||||
mgmtReleaseVgroup(pVgroup);
|
||||
mgmtDecVgroupRef(pVgroup);
|
||||
|
||||
SDbObj *pDb = mgmtGetDb(pVgroup->dbName);
|
||||
if (pDb == NULL) {
|
||||
|
@ -108,7 +108,7 @@ static int32_t mgmtChildTableActionInsert(SSdbOper *pOper) {
|
|||
|
||||
SAcctObj *pAcct = mgmtGetAcct(pDb->cfg.acct);
|
||||
if (pAcct == NULL) {
|
||||
mError("ctable:%s, account:%s not exists", pTable->info.tableId, pDb->cfg.acct);
|
||||
mError("ctable:%s, acct:%s not exists", pTable->info.tableId, pDb->cfg.acct);
|
||||
return TSDB_CODE_INVALID_ACCT;
|
||||
}
|
||||
mgmtDecAcctRef(pAcct);
|
||||
|
@ -139,7 +139,7 @@ static int32_t mgmtChildTableActionDelete(SSdbOper *pOper) {
|
|||
if (pVgroup == NULL) {
|
||||
return TSDB_CODE_INVALID_VGROUP_ID;
|
||||
}
|
||||
mgmtReleaseVgroup(pVgroup);
|
||||
mgmtDecVgroupRef(pVgroup);
|
||||
|
||||
SDbObj *pDb = mgmtGetDb(pVgroup->dbName);
|
||||
if (pDb == NULL) {
|
||||
|
@ -150,7 +150,7 @@ static int32_t mgmtChildTableActionDelete(SSdbOper *pOper) {
|
|||
|
||||
SAcctObj *pAcct = mgmtGetAcct(pDb->cfg.acct);
|
||||
if (pAcct == NULL) {
|
||||
mError("ctable:%s, account:%s not exists", pTable->info.tableId, pDb->cfg.acct);
|
||||
mError("ctable:%s, acct:%s not exists", pTable->info.tableId, pDb->cfg.acct);
|
||||
return TSDB_CODE_INVALID_ACCT;
|
||||
}
|
||||
mgmtDecAcctRef(pAcct);
|
||||
|
@ -275,7 +275,7 @@ static int32_t mgmtChildTableActionRestored() {
|
|||
pNode = pLastNode;
|
||||
continue;
|
||||
}
|
||||
mgmtReleaseVgroup(pVgroup);
|
||||
mgmtDecVgroupRef(pVgroup);
|
||||
|
||||
if (strcmp(pVgroup->dbName, pDb->name) != 0) {
|
||||
mError("ctable:%s, db:%s not match with vgroup:%d db:%s sid:%d, discard it",
|
||||
|
@ -1194,17 +1194,15 @@ static void mgmtProcessSuperTableVgroupMsg(SQueuedMsg *pMsg) {
|
|||
|
||||
pRsp->vgroups[vg].vgId = htonl(vgId);
|
||||
for (int32_t vn = 0; vn < pVgroup->numOfVnodes; ++vn) {
|
||||
SDnodeObj *pDnode = mgmtGetDnode(pVgroup->vnodeGid[vn].dnodeId);
|
||||
SDnodeObj *pDnode = pVgroup->vnodeGid[vn].pDnode;
|
||||
if (pDnode == NULL) break;
|
||||
|
||||
pRsp->vgroups[vg].ipAddr[vn].ip = htonl(pDnode->privateIp);
|
||||
pRsp->vgroups[vg].ipAddr[vn].port = htons(tsDnodeShellPort);
|
||||
pRsp->vgroups[vg].numOfIps++;
|
||||
|
||||
mgmtReleaseDnode(pDnode);
|
||||
}
|
||||
|
||||
mgmtReleaseVgroup(pVgroup);
|
||||
mgmtDecVgroupRef(pVgroup);
|
||||
}
|
||||
pRsp->numOfVgroups = htonl(vg);
|
||||
|
||||
|
@ -1613,7 +1611,7 @@ static int32_t mgmtDoGetChildTableMeta(SQueuedMsg *pMsg, STableMetaMsg *pMeta) {
|
|||
pMeta->vgroup.ipAddr[i].port = htonl(tsDnodeShellPort);
|
||||
}
|
||||
pMeta->vgroup.numOfIps++;
|
||||
mgmtReleaseDnode(pDnode);
|
||||
mgmtDecDnodeRef(pDnode);
|
||||
}
|
||||
pMeta->vgroup.vgId = htonl(pVgroup->vgId);
|
||||
|
||||
|
@ -1742,7 +1740,7 @@ static SChildTableObj* mgmtGetTableByPos(uint32_t dnodeId, int32_t vnode, int32_
|
|||
|
||||
SChildTableObj *pTable = pVgroup->tableList[sid];
|
||||
mgmtIncTableRef((STableObj *)pTable);
|
||||
mgmtReleaseVgroup(pVgroup);
|
||||
mgmtDecVgroupRef(pVgroup);
|
||||
return pTable;
|
||||
}
|
||||
|
||||
|
|
|
@ -19,23 +19,23 @@
|
|||
#include "ttime.h"
|
||||
#include "tutil.h"
|
||||
#include "tglobal.h"
|
||||
#include "tgrant.h"
|
||||
#include "dnode.h"
|
||||
#include "mgmtDef.h"
|
||||
#include "mgmtLog.h"
|
||||
#include "mgmtAcct.h"
|
||||
#include "tgrant.h"
|
||||
#include "mgmtMnode.h"
|
||||
#include "mgmtSdb.h"
|
||||
#include "mgmtShell.h"
|
||||
#include "mgmtUser.h"
|
||||
|
||||
void * tsUserSdb = NULL;
|
||||
static void * tsUserSdb = NULL;
|
||||
static int32_t tsUserUpdateSize = 0;
|
||||
static int32_t mgmtGetUserMeta(STableMetaMsg *pMeta, SShowObj *pShow, void *pConn);
|
||||
static int32_t mgmtRetrieveUsers(SShowObj *pShow, char *data, int32_t rows, void *pConn);
|
||||
static void mgmtProcessCreateUserMsg(SQueuedMsg *pMsg);
|
||||
static void mgmtProcessAlterUserMsg(SQueuedMsg *pMsg);
|
||||
static void mgmtProcessDropUserMsg(SQueuedMsg *pMsg);
|
||||
static void mgmtProcessCreateUserMsg(SQueuedMsg *pMsg);
|
||||
static void mgmtProcessAlterUserMsg(SQueuedMsg *pMsg);
|
||||
static void mgmtProcessDropUserMsg(SQueuedMsg *pMsg);
|
||||
|
||||
static int32_t mgmtUserActionDestroy(SSdbOper *pOper) {
|
||||
tfree(pOper->pObj);
|
||||
|
@ -48,8 +48,8 @@ static int32_t mgmtUserActionInsert(SSdbOper *pOper) {
|
|||
|
||||
if (pAcct != NULL) {
|
||||
mgmtAddUserToAcct(pAcct, pUser);
|
||||
}
|
||||
else {
|
||||
mgmtDecAcctRef(pAcct);
|
||||
} else {
|
||||
mError("user:%s, acct:%s info not exist in sdb", pUser->user, pUser->acct);
|
||||
return TSDB_CODE_INVALID_ACCT;
|
||||
}
|
||||
|
@ -63,6 +63,7 @@ static int32_t mgmtUserActionDelete(SSdbOper *pOper) {
|
|||
|
||||
if (pAcct != NULL) {
|
||||
mgmtDropUserFromAcct(pAcct, pUser);
|
||||
mgmtDecAcctRef(pAcct);
|
||||
}
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
|
@ -72,9 +73,10 @@ static int32_t mgmtUserActionUpdate(SSdbOper *pOper) {
|
|||
SUserObj *pUser = pOper->pObj;
|
||||
SUserObj *pSaved = mgmtGetUser(pUser->user);
|
||||
if (pUser != pSaved) {
|
||||
memcpy(pSaved, pUser, pOper->rowSize);
|
||||
memcpy(pSaved, pUser, tsUserUpdateSize);
|
||||
free(pUser);
|
||||
}
|
||||
mgmtDecUserRef(pSaved);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
|
@ -86,7 +88,7 @@ static int32_t mgmtUserActionEncode(SSdbOper *pOper) {
|
|||
}
|
||||
|
||||
static int32_t mgmtUserActionDecode(SSdbOper *pOper) {
|
||||
SUserObj *pUser = (SUserObj *) calloc(1, sizeof(SUserObj));
|
||||
SUserObj *pUser = (SUserObj *)calloc(1, sizeof(SUserObj));
|
||||
if (pUser == NULL) return TSDB_CODE_SERV_OUT_OF_MEMORY;
|
||||
|
||||
memcpy(pUser, pOper->rowData, tsUserUpdateSize);
|
||||
|
@ -103,7 +105,7 @@ static int32_t mgmtUserActionRestored() {
|
|||
mgmtDecAcctRef(pAcct);
|
||||
}
|
||||
|
||||
return 0;
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
int32_t mgmtInitUsers() {
|
||||
|
@ -128,7 +130,7 @@ int32_t mgmtInitUsers() {
|
|||
|
||||
tsUserSdb = sdbOpenTable(&tableDesc);
|
||||
if (tsUserSdb == NULL) {
|
||||
mError("failed to init user data");
|
||||
mError("table:%s, failed to create hash", tableDesc.tableName);
|
||||
return -1;
|
||||
}
|
||||
|
||||
|
@ -138,7 +140,7 @@ int32_t mgmtInitUsers() {
|
|||
mgmtAddShellShowMetaHandle(TSDB_MGMT_TABLE_USER, mgmtGetUserMeta);
|
||||
mgmtAddShellShowRetrieveHandle(TSDB_MGMT_TABLE_USER, mgmtRetrieveUsers);
|
||||
|
||||
mTrace("table:users table is created");
|
||||
mTrace("table:%s, hash is created", tableDesc.tableName);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -150,7 +152,15 @@ SUserObj *mgmtGetUser(char *name) {
|
|||
return (SUserObj *)sdbGetRow(tsUserSdb, name);
|
||||
}
|
||||
|
||||
void mgmtReleaseUser(SUserObj *pUser) {
|
||||
void *mgmtGetNextUser(void *pNode, SUserObj **pUser) {
|
||||
return sdbFetchRow(tsUserSdb, pNode, (void **)pUser);
|
||||
}
|
||||
|
||||
void mgmtIncUserRef(SUserObj *pUser) {
|
||||
return sdbIncRef(tsUserSdb, pUser);
|
||||
}
|
||||
|
||||
void mgmtDecUserRef(SUserObj *pUser) {
|
||||
return sdbDecRef(tsUserSdb, pUser);
|
||||
}
|
||||
|
||||
|
@ -172,18 +182,22 @@ static int32_t mgmtUpdateUser(SUserObj *pUser) {
|
|||
|
||||
int32_t mgmtCreateUser(SAcctObj *pAcct, char *name, char *pass) {
|
||||
int32_t code = acctCheck(pAcct, ACCT_GRANT_USER);
|
||||
if (code != 0) {
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
return code;
|
||||
}
|
||||
|
||||
if (name[0] == 0 || pass[0] == 0) {
|
||||
return TSDB_CODE_INVALID_MSG_CONTENT;
|
||||
if (name[0] == 0) {
|
||||
return TSDB_CODE_INVALID_USER_FORMAT;
|
||||
}
|
||||
|
||||
if (pass[0] == 0) {
|
||||
return TSDB_CODE_INVALID_PASS_FORMAT;
|
||||
}
|
||||
|
||||
SUserObj *pUser = mgmtGetUser(name);
|
||||
if (pUser != NULL) {
|
||||
mTrace("user:%s is already there", name);
|
||||
mgmtReleaseUser(pUser);
|
||||
mTrace("user:%s, is already there", name);
|
||||
mgmtDecUserRef(pUser);
|
||||
return TSDB_CODE_USER_ALREADY_EXIST;
|
||||
}
|
||||
|
||||
|
@ -237,10 +251,10 @@ static int32_t mgmtDropUser(SUserObj *pUser) {
|
|||
static int32_t mgmtGetUserMeta(STableMetaMsg *pMeta, SShowObj *pShow, void *pConn) {
|
||||
SUserObj *pUser = mgmtGetUserFromConn(pConn, NULL);
|
||||
if (pUser == NULL) {
|
||||
return TSDB_CODE_INVALID_USER;
|
||||
return TSDB_CODE_NO_USER_FROM_CONN;
|
||||
}
|
||||
|
||||
int32_t cols = 0;
|
||||
int32_t cols = 0;
|
||||
SSchema *pSchema = pMeta->schema;
|
||||
|
||||
pShow->bytes[cols] = TSDB_USER_LEN;
|
||||
|
@ -273,7 +287,7 @@ static int32_t mgmtGetUserMeta(STableMetaMsg *pMeta, SShowObj *pShow, void *pCon
|
|||
pShow->numOfRows = pUser->pAcct->acctInfo.numOfUsers;
|
||||
pShow->rowSize = pShow->offset[cols - 1] + pShow->bytes[cols - 1];
|
||||
|
||||
mgmtReleaseUser(pUser);
|
||||
mgmtDecUserRef(pUser);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -308,8 +322,9 @@ static int32_t mgmtRetrieveUsers(SShowObj *pShow, char *data, int32_t rows, void
|
|||
cols++;
|
||||
|
||||
numOfRows++;
|
||||
mgmtReleaseUser(pUser);
|
||||
mgmtDecUserRef(pUser);
|
||||
}
|
||||
|
||||
pShow->numOfReads += numOfRows;
|
||||
return numOfRows;
|
||||
}
|
||||
|
@ -321,20 +336,21 @@ SUserObj *mgmtGetUserFromConn(void *pConn, bool *usePublicIp) {
|
|||
*usePublicIp = (connInfo.serverIp == tsPublicIpInt);
|
||||
}
|
||||
return mgmtGetUser(connInfo.user);
|
||||
} else {
|
||||
mError("can not get user from conn:%p", pConn);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void mgmtProcessCreateUserMsg(SQueuedMsg *pMsg) {
|
||||
int32_t code;
|
||||
SUserObj *pUser = pMsg->pUser;
|
||||
SUserObj *pOperUser = pMsg->pUser;
|
||||
|
||||
if (pUser->superAuth) {
|
||||
if (pOperUser->superAuth) {
|
||||
SCMCreateUserMsg *pCreate = pMsg->pCont;
|
||||
code = mgmtCreateUser(pUser->pAcct, pCreate->user, pCreate->pass);
|
||||
code = mgmtCreateUser(pOperUser->pAcct, pCreate->user, pCreate->pass);
|
||||
if (code == TSDB_CODE_SUCCESS) {
|
||||
mLPrint("user:%s is created by %s", pCreate->user, pUser->user);
|
||||
mLPrint("user:%s, is created by %s", pCreate->user, pOperUser->user);
|
||||
}
|
||||
} else {
|
||||
code = TSDB_CODE_NO_RIGHTS;
|
||||
|
@ -356,7 +372,7 @@ static void mgmtProcessAlterUserMsg(SQueuedMsg *pMsg) {
|
|||
|
||||
if (strcmp(pUser->user, "monitor") == 0 || (strcmp(pUser->user + 1, pUser->acct) == 0 && pUser->user[0] == '_')) {
|
||||
mgmtSendSimpleResp(pMsg->thandle, TSDB_CODE_NO_RIGHTS);
|
||||
mgmtReleaseUser(pUser);
|
||||
mgmtDecUserRef(pUser);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -380,7 +396,7 @@ static void mgmtProcessAlterUserMsg(SQueuedMsg *pMsg) {
|
|||
memset(pUser->pass, 0, sizeof(pUser->pass));
|
||||
taosEncryptPass((uint8_t*)pAlter->pass, strlen(pAlter->pass), pUser->pass);
|
||||
code = mgmtUpdateUser(pUser);
|
||||
mLPrint("user:%s password is altered by %s, result:%d", pUser->user, pOperUser->user, tstrerror(code));
|
||||
mLPrint("user:%s, password is altered by %s, result:%s", pUser->user, pOperUser->user, tstrerror(code));
|
||||
} else {
|
||||
code = TSDB_CODE_NO_RIGHTS;
|
||||
}
|
||||
|
@ -422,7 +438,7 @@ static void mgmtProcessAlterUserMsg(SQueuedMsg *pMsg) {
|
|||
}
|
||||
|
||||
code = mgmtUpdateUser(pUser);
|
||||
mLPrint("user:%s privilege is altered by %s, result:%d", pUser->user, pOperUser->user, tstrerror(code));
|
||||
mLPrint("user:%s, privilege is altered by %s, result:%s", pUser->user, pOperUser->user, tstrerror(code));
|
||||
} else {
|
||||
code = TSDB_CODE_NO_RIGHTS;
|
||||
}
|
||||
|
@ -432,7 +448,7 @@ static void mgmtProcessAlterUserMsg(SQueuedMsg *pMsg) {
|
|||
mgmtSendSimpleResp(pMsg->thandle, TSDB_CODE_NO_RIGHTS);
|
||||
}
|
||||
|
||||
mgmtReleaseUser(pUser);
|
||||
mgmtDecUserRef(pUser);
|
||||
}
|
||||
|
||||
static void mgmtProcessDropUserMsg(SQueuedMsg *pMsg) {
|
||||
|
@ -443,13 +459,13 @@ static void mgmtProcessDropUserMsg(SQueuedMsg *pMsg) {
|
|||
SUserObj *pUser = mgmtGetUser(pDrop->user);
|
||||
if (pUser == NULL) {
|
||||
mgmtSendSimpleResp(pMsg->thandle, TSDB_CODE_INVALID_USER);
|
||||
return ;
|
||||
return;
|
||||
}
|
||||
|
||||
if (strcmp(pUser->user, "monitor") == 0 || strcmp(pUser->user, pUser->acct) == 0 ||
|
||||
(strcmp(pUser->user + 1, pUser->acct) == 0 && pUser->user[0] == '_')) {
|
||||
mgmtSendSimpleResp(pMsg->thandle, TSDB_CODE_NO_RIGHTS);
|
||||
mgmtReleaseUser(pUser);
|
||||
mgmtDecUserRef(pUser);
|
||||
return ;
|
||||
}
|
||||
|
||||
|
@ -471,14 +487,14 @@ static void mgmtProcessDropUserMsg(SQueuedMsg *pMsg) {
|
|||
if (hasRight) {
|
||||
code = mgmtDropUser(pUser);
|
||||
if (code == TSDB_CODE_SUCCESS) {
|
||||
mLPrint("user:%s is dropped by %s, result:%s", pUser->user, pOperUser->user, tstrerror(code));
|
||||
mLPrint("user:%s, is dropped by %s, result:%s", pUser->user, pOperUser->user, tstrerror(code));
|
||||
}
|
||||
} else {
|
||||
code = TSDB_CODE_NO_RIGHTS;
|
||||
}
|
||||
|
||||
mgmtSendSimpleResp(pMsg->thandle, code);
|
||||
mgmtReleaseUser(pUser);
|
||||
mgmtDecUserRef(pUser);
|
||||
}
|
||||
|
||||
void mgmtDropAllUsers(SAcctObj *pAcct) {
|
||||
|
@ -504,7 +520,7 @@ void mgmtDropAllUsers(SAcctObj *pAcct) {
|
|||
numOfUsers++;
|
||||
}
|
||||
|
||||
mgmtReleaseUser(pUser);
|
||||
mgmtDecUserRef(pUser);
|
||||
}
|
||||
|
||||
mTrace("acct:%s, all users:%d is dropped from sdb", pAcct->user, numOfUsers);
|
||||
|
|
|
@ -44,9 +44,7 @@ static int32_t mgmtRetrieveVgroups(SShowObj *pShow, char *data, int32_t rows, vo
|
|||
static void mgmtProcessCreateVnodeRsp(SRpcMsg *rpcMsg);
|
||||
static void mgmtProcessDropVnodeRsp(SRpcMsg *rpcMsg);
|
||||
static void mgmtProcessVnodeCfgMsg(SRpcMsg *rpcMsg) ;
|
||||
|
||||
static void mgmtSendDropVgroupMsg(SVgObj *pVgroup, void *ahandle);
|
||||
static void mgmtSendCreateVgroupMsg(SVgObj *pVgroup, void *ahandle);
|
||||
static void mgmtSendDropVgroupMsg(SVgObj *pVgroup, void *ahandle);
|
||||
|
||||
static int32_t mgmtVgroupActionDestroy(SSdbOper *pOper) {
|
||||
SVgObj *pVgroup = pOper->pObj;
|
||||
|
@ -68,7 +66,6 @@ static int32_t mgmtVgroupActionInsert(SSdbOper *pOper) {
|
|||
if (pDb == NULL) {
|
||||
return TSDB_CODE_INVALID_DB;
|
||||
}
|
||||
mgmtDecDbRef(pDb);
|
||||
|
||||
pVgroup->pDb = pDb;
|
||||
pVgroup->prev = NULL;
|
||||
|
@ -91,15 +88,13 @@ static int32_t mgmtVgroupActionInsert(SSdbOper *pOper) {
|
|||
for (int32_t i = 0; i < pVgroup->numOfVnodes; ++i) {
|
||||
SDnodeObj *pDnode = mgmtGetDnode(pVgroup->vnodeGid[i].dnodeId);
|
||||
if (pDnode != NULL) {
|
||||
pVgroup->vnodeGid[i].privateIp = pDnode->privateIp;
|
||||
pVgroup->vnodeGid[i].publicIp = pDnode->publicIp;
|
||||
atomic_add_fetch_32(&pDnode->openVnodes, 1);
|
||||
mgmtReleaseDnode(pDnode);
|
||||
}
|
||||
pVgroup->vnodeGid[i].pDnode = pDnode;
|
||||
atomic_add_fetch_32(&pDnode->openVnodes, 1);
|
||||
mgmtDecDnodeRef(pDnode);
|
||||
}
|
||||
}
|
||||
|
||||
mgmtAddVgroupIntoDb(pVgroup);
|
||||
mgmtIncDbRef(pVgroup->pDb);
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
@ -115,10 +110,10 @@ static int32_t mgmtVgroupActionDelete(SSdbOper *pOper) {
|
|||
|
||||
for (int32_t i = 0; i < pVgroup->numOfVnodes; ++i) {
|
||||
SDnodeObj *pDnode = mgmtGetDnode(pVgroup->vnodeGid[i].dnodeId);
|
||||
if (pDnode) {
|
||||
if (pDnode != NULL) {
|
||||
atomic_sub_fetch_32(&pDnode->openVnodes, 1);
|
||||
}
|
||||
mgmtReleaseDnode(pDnode);
|
||||
mgmtDecDnodeRef(pDnode);
|
||||
}
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
|
@ -127,9 +122,25 @@ static int32_t mgmtVgroupActionDelete(SSdbOper *pOper) {
|
|||
static int32_t mgmtVgroupActionUpdate(SSdbOper *pOper) {
|
||||
SVgObj *pNew = pOper->pObj;
|
||||
SVgObj *pVgroup = mgmtGetVgroup(pNew->vgId);
|
||||
|
||||
if (pVgroup != pNew) {
|
||||
for (int32_t i = 0; i < pVgroup->numOfVnodes; ++i) {
|
||||
SDnodeObj *pDnode = pVgroup->vnodeGid[i].pDnode;
|
||||
if (pDnode != NULL) {
|
||||
atomic_sub_fetch_32(&pDnode->openVnodes, 1);
|
||||
}
|
||||
}
|
||||
|
||||
memcpy(pVgroup, pNew, pOper->rowSize);
|
||||
free(pNew);
|
||||
|
||||
for (int32_t i = 0; i < pVgroup->numOfVnodes; ++i) {
|
||||
SDnodeObj *pDnode = mgmtGetDnode(pVgroup->vnodeGid[i].dnodeId);
|
||||
pVgroup->vnodeGid[i].pDnode = pDnode;
|
||||
if (pDnode != NULL) {
|
||||
atomic_add_fetch_32(&pDnode->openVnodes, 1);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
int32_t oldTables = taosIdPoolMaxSize(pVgroup->idPool);
|
||||
|
@ -150,6 +161,12 @@ static int32_t mgmtVgroupActionUpdate(SSdbOper *pOper) {
|
|||
static int32_t mgmtVgroupActionEncode(SSdbOper *pOper) {
|
||||
SVgObj *pVgroup = pOper->pObj;
|
||||
memcpy(pOper->rowData, pVgroup, tsVgUpdateSize);
|
||||
SVgObj *pTmpVgroup = pOper->rowData;
|
||||
for (int32_t i = 0; i < TSDB_VNODES_SUPPORT; ++i) {
|
||||
pTmpVgroup->vnodeGid[i].pDnode = NULL;
|
||||
pTmpVgroup->vnodeGid[i].role = 0;
|
||||
}
|
||||
|
||||
pOper->rowSize = tsVgUpdateSize;
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
@ -204,7 +221,11 @@ int32_t mgmtInitVgroups() {
|
|||
return 0;
|
||||
}
|
||||
|
||||
void mgmtReleaseVgroup(SVgObj *pVgroup) {
|
||||
void mgmtIncVgroupRef(SVgObj *pVgroup) {
|
||||
return sdbIncRef(tsVgroupSdb, pVgroup);
|
||||
}
|
||||
|
||||
void mgmtDecVgroupRef(SVgObj *pVgroup) {
|
||||
return sdbDecRef(tsVgroupSdb, pVgroup);
|
||||
}
|
||||
|
||||
|
@ -224,16 +245,38 @@ void mgmtUpdateVgroup(SVgObj *pVgroup) {
|
|||
mgmtSendCreateVgroupMsg(pVgroup, NULL);
|
||||
}
|
||||
|
||||
void mgmtUpdateVgroupStatus(SVgObj *pVgroup, int32_t dnodeId, SVnodeLoad *pVload) {
|
||||
if (pVload->role == TAOS_SYNC_ROLE_MASTER) {
|
||||
for (int32_t i = 0; i < pVgroup->numOfVnodes; ++i) {
|
||||
SVnodeGid *pVgid = &pVgroup->vnodeGid[i];
|
||||
if (pVgid->dnodeId == dnodeId) {
|
||||
void mgmtUpdateVgroupStatus(SVgObj *pVgroup, SDnodeObj *pDnode, SVnodeLoad *pVload) {
|
||||
bool dnodeExist = false;
|
||||
for (int32_t i = 0; i < pVgroup->numOfVnodes; ++i) {
|
||||
SVnodeGid *pVgid = &pVgroup->vnodeGid[i];
|
||||
if (pVgid->pDnode == pDnode) {
|
||||
pVgid->role = pVload->role;
|
||||
if (pVload->role == TAOS_SYNC_ROLE_MASTER) {
|
||||
pVgroup->inUse = i;
|
||||
break;
|
||||
}
|
||||
dnodeExist = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (!dnodeExist) {
|
||||
SRpcIpSet ipSet = mgmtGetIpSetFromIp(pDnode->privateIp);
|
||||
mError("vgroup:%d, dnode:%d not exist in mnode, drop it", pVload->vgId, pDnode->dnodeId);
|
||||
mgmtSendDropVnodeMsg(pVload->vgId, &ipSet, NULL);
|
||||
return;
|
||||
}
|
||||
|
||||
if (pVload->role == TAOS_SYNC_ROLE_MASTER) {
|
||||
pVgroup->totalStorage = htobe64(pVload->totalStorage);
|
||||
pVgroup->compStorage = htobe64(pVload->compStorage);
|
||||
pVgroup->pointsWritten = htobe64(pVload->pointsWritten);
|
||||
}
|
||||
|
||||
if (pVload->replica != pVgroup->numOfVnodes) {
|
||||
mError("dnode:%d, vgroup:%d replica:%d not match with mgmt:%d", pDnode->dnodeId, pVload->vgId, pVload->replica,
|
||||
pVgroup->numOfVnodes);
|
||||
mgmtSendCreateVgroupMsg(pVgroup, NULL);
|
||||
}
|
||||
}
|
||||
|
||||
SVgObj *mgmtGetAvailableVgroup(SDbObj *pDb) {
|
||||
|
@ -340,7 +383,7 @@ int32_t mgmtGetVgroupMeta(STableMetaMsg *pMeta, SShowObj *pShow, void *pConn) {
|
|||
mgmtDecTableRef(pTable);
|
||||
pVgroup = mgmtGetVgroup(((SChildTableObj*)pTable)->vgId);
|
||||
if (NULL == pVgroup) return TSDB_CODE_INVALID_TABLE_ID;
|
||||
mgmtReleaseVgroup(pVgroup);
|
||||
mgmtDecVgroupRef(pVgroup);
|
||||
maxReplica = pVgroup->numOfVnodes > maxReplica ? pVgroup->numOfVnodes : maxReplica;
|
||||
} else {
|
||||
SVgObj *pVgroup = pDb->pHead;
|
||||
|
@ -391,27 +434,6 @@ int32_t mgmtGetVgroupMeta(STableMetaMsg *pMeta, SShowObj *pShow, void *pConn) {
|
|||
return 0;
|
||||
}
|
||||
|
||||
char *mgmtGetVnodeStatus(SVgObj *pVgroup, SVnodeGid *pVnode) {
|
||||
SDnodeObj *pDnode = mgmtGetDnode(pVnode->dnodeId);
|
||||
if (pDnode == NULL) {
|
||||
mError("vgroup:%d, not exist in dnode:%d", pVgroup->vgId, pDnode->dnodeId);
|
||||
return "null";
|
||||
}
|
||||
mgmtReleaseDnode(pDnode);
|
||||
|
||||
if (pDnode->status == TAOS_DN_STATUS_OFFLINE) {
|
||||
return "offline";
|
||||
}
|
||||
|
||||
for (int i = 0; i < pDnode->openVnodes; ++i) {
|
||||
if (pDnode->vload[i].vgId == pVgroup->vgId) {
|
||||
return pDnode->vload[i].status ? "ready" : "offline";
|
||||
}
|
||||
}
|
||||
|
||||
return "null";
|
||||
}
|
||||
|
||||
int32_t mgmtRetrieveVgroups(SShowObj *pShow, char *data, int32_t rows, void *pConn) {
|
||||
int32_t numOfRows = 0;
|
||||
SVgObj *pVgroup = NULL;
|
||||
|
@ -453,19 +475,24 @@ int32_t mgmtRetrieveVgroups(SShowObj *pShow, char *data, int32_t rows, void *pCo
|
|||
*(int16_t *) pWrite = pVgroup->vnodeGid[i].dnodeId;
|
||||
cols++;
|
||||
|
||||
tinet_ntoa(ipstr, pVgroup->vnodeGid[i].privateIp);
|
||||
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
|
||||
strcpy(pWrite, ipstr);
|
||||
cols++;
|
||||
SDnodeObj *pDnode = pVgroup->vnodeGid[i].pDnode;
|
||||
|
||||
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
|
||||
if (pVgroup->vnodeGid[i].dnodeId != 0) {
|
||||
char *vnodeStatus = mgmtGetVnodeStatus(pVgroup, pVgroup->vnodeGid + i);
|
||||
strcpy(pWrite, vnodeStatus);
|
||||
if (pDnode != NULL) {
|
||||
tinet_ntoa(ipstr, pDnode->privateIp);
|
||||
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
|
||||
strcpy(pWrite, ipstr);
|
||||
cols++;
|
||||
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
|
||||
strcpy(pWrite, mgmtGetMnodeRoleStr(pVgroup->vnodeGid[i].role));
|
||||
cols++;
|
||||
} else {
|
||||
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
|
||||
strcpy(pWrite, "null");
|
||||
cols++;
|
||||
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
|
||||
strcpy(pWrite, "null");
|
||||
cols++;
|
||||
}
|
||||
cols++;
|
||||
}
|
||||
|
||||
numOfRows++;
|
||||
|
@ -506,27 +533,38 @@ SMDCreateVnodeMsg *mgmtBuildCreateVnodeMsg(SVgObj *pVgroup) {
|
|||
SMDCreateVnodeMsg *pVnode = rpcMallocCont(sizeof(SMDCreateVnodeMsg));
|
||||
if (pVnode == NULL) return NULL;
|
||||
|
||||
pVnode->cfg = pDb->cfg;
|
||||
|
||||
SVnodeCfg *pCfg = &pVnode->cfg;
|
||||
pCfg->vgId = htonl(pVgroup->vgId);
|
||||
pCfg->maxSessions = htonl(pCfg->maxSessions);
|
||||
pCfg->cacheBlockSize = htonl(pCfg->cacheBlockSize);
|
||||
pCfg->cacheNumOfBlocks.totalBlocks = htonl(pCfg->cacheNumOfBlocks.totalBlocks);
|
||||
pCfg->daysPerFile = htonl(pCfg->daysPerFile);
|
||||
pCfg->daysToKeep1 = htonl(pCfg->daysToKeep1);
|
||||
pCfg->daysToKeep2 = htonl(pCfg->daysToKeep2);
|
||||
pCfg->daysToKeep = htonl(pCfg->daysToKeep);
|
||||
pCfg->commitTime = htonl(pCfg->commitTime);
|
||||
pCfg->rowsInFileBlock = htonl(pCfg->rowsInFileBlock);
|
||||
pCfg->blocksPerTable = htons(pCfg->blocksPerTable);
|
||||
pCfg->replications = (int8_t) pVgroup->numOfVnodes;
|
||||
SMDVnodeCfg *pCfg = &pVnode->cfg;
|
||||
pCfg->vgId = htonl(pVgroup->vgId);
|
||||
pCfg->maxTables = htonl(pDb->cfg.maxSessions);
|
||||
pCfg->maxCacheSize = htobe64((int64_t)pDb->cfg.cacheBlockSize * pDb->cfg.cacheNumOfBlocks.totalBlocks);
|
||||
pCfg->maxCacheSize = htobe64(-1);
|
||||
pCfg->minRowsPerFileBlock = htonl(-1);
|
||||
pCfg->maxRowsPerFileBlock = htonl(-1);
|
||||
pCfg->daysPerFile = htonl(pDb->cfg.daysPerFile);
|
||||
pCfg->daysToKeep1 = htonl(pDb->cfg.daysToKeep1);
|
||||
pCfg->daysToKeep2 = htonl(pDb->cfg.daysToKeep2);
|
||||
pCfg->daysToKeep = htonl(pDb->cfg.daysToKeep);
|
||||
pCfg->daysToKeep = htonl(-1);
|
||||
pCfg->commitTime = htonl(pDb->cfg.commitTime);
|
||||
pCfg->precision = pDb->cfg.precision;
|
||||
pCfg->compression = pDb->cfg.compression;
|
||||
pCfg->compression = -1;
|
||||
pCfg->wals = 3;
|
||||
pCfg->commitLog = pDb->cfg.commitLog;
|
||||
pCfg->replications = (int8_t) pVgroup->numOfVnodes;
|
||||
pCfg->quorum = 1;
|
||||
|
||||
SVnodeDesc *vpeerDesc = pVnode->vpeerDesc;
|
||||
SMDVnodeDesc *pNodes = pVnode->nodes;
|
||||
for (int32_t j = 0; j < pVgroup->numOfVnodes; ++j) {
|
||||
vpeerDesc[j].vgId = htonl(pVgroup->vgId);
|
||||
vpeerDesc[j].dnodeId = htonl(pVgroup->vnodeGid[j].dnodeId);
|
||||
vpeerDesc[j].ip = htonl(pVgroup->vnodeGid[j].privateIp);
|
||||
SDnodeObj *pDnode = pVgroup->vnodeGid[j].pDnode;
|
||||
if (pDnode != NULL) {
|
||||
pNodes[j].nodeId = htonl(pDnode->dnodeId);
|
||||
pNodes[j].nodeIp = htonl(pDnode->privateIp);
|
||||
strcpy(pNodes[j].nodeName, pDnode->dnodeName);
|
||||
if (j == 0) {
|
||||
pCfg->arbitratorIp = htonl(pDnode->privateIp);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return pVnode;
|
||||
|
@ -539,7 +577,7 @@ SRpcIpSet mgmtGetIpSetFromVgroup(SVgObj *pVgroup) {
|
|||
.port = tsDnodeMnodePort
|
||||
};
|
||||
for (int i = 0; i < pVgroup->numOfVnodes; ++i) {
|
||||
ipSet.ip[i] = pVgroup->vnodeGid[i].privateIp;
|
||||
ipSet.ip[i] = pVgroup->vnodeGid[i].pDnode->privateIp;
|
||||
}
|
||||
return ipSet;
|
||||
}
|
||||
|
@ -570,7 +608,7 @@ void mgmtSendCreateVnodeMsg(SVgObj *pVgroup, SRpcIpSet *ipSet, void *ahandle) {
|
|||
void mgmtSendCreateVgroupMsg(SVgObj *pVgroup, void *ahandle) {
|
||||
mTrace("vgroup:%d, send create all vnodes msg, ahandle:%p", pVgroup->vgId, ahandle);
|
||||
for (int32_t i = 0; i < pVgroup->numOfVnodes; ++i) {
|
||||
SRpcIpSet ipSet = mgmtGetIpSetFromIp(pVgroup->vnodeGid[i].privateIp);
|
||||
SRpcIpSet ipSet = mgmtGetIpSetFromIp(pVgroup->vnodeGid[i].pDnode->privateIp);
|
||||
mgmtSendCreateVnodeMsg(pVgroup, &ipSet, ahandle);
|
||||
}
|
||||
}
|
||||
|
@ -636,7 +674,7 @@ void mgmtSendDropVnodeMsg(int32_t vgId, SRpcIpSet *ipSet, void *ahandle) {
|
|||
static void mgmtSendDropVgroupMsg(SVgObj *pVgroup, void *ahandle) {
|
||||
mTrace("vgroup:%d, send drop all vnodes msg, ahandle:%p", pVgroup->vgId, ahandle);
|
||||
for (int32_t i = 0; i < pVgroup->numOfVnodes; ++i) {
|
||||
SRpcIpSet ipSet = mgmtGetIpSetFromIp(pVgroup->vnodeGid[i].privateIp);
|
||||
SRpcIpSet ipSet = mgmtGetIpSetFromIp(pVgroup->vnodeGid[i].pDnode->privateIp);
|
||||
mgmtSendDropVnodeMsg(pVgroup->vgId, &ipSet, ahandle);
|
||||
}
|
||||
}
|
||||
|
@ -687,7 +725,7 @@ static void mgmtProcessVnodeCfgMsg(SRpcMsg *rpcMsg) {
|
|||
mgmtSendSimpleResp(rpcMsg->handle, TSDB_CODE_NOT_ACTIVE_VNODE);
|
||||
return;
|
||||
}
|
||||
mgmtReleaseDnode(pDnode);
|
||||
mgmtDecDnodeRef(pDnode);
|
||||
|
||||
SVgObj *pVgroup = mgmtGetVgroup(pCfg->vgId);
|
||||
if (pVgroup == NULL) {
|
||||
|
@ -695,7 +733,7 @@ static void mgmtProcessVnodeCfgMsg(SRpcMsg *rpcMsg) {
|
|||
mgmtSendSimpleResp(rpcMsg->handle, TSDB_CODE_NOT_ACTIVE_VNODE);
|
||||
return;
|
||||
}
|
||||
mgmtReleaseVgroup(pVgroup);
|
||||
mgmtDecVgroupRef(pVgroup);
|
||||
|
||||
mgmtSendSimpleResp(rpcMsg->handle, TSDB_CODE_SUCCESS);
|
||||
|
||||
|
@ -711,7 +749,7 @@ void mgmtDropAllVgroups(SDbObj *pDropDb) {
|
|||
SVgObj *pVgroup = NULL;
|
||||
|
||||
while (1) {
|
||||
mgmtReleaseVgroup(pVgroup);
|
||||
mgmtDecVgroupRef(pVgroup);
|
||||
pNode = sdbFetchRow(tsVgroupSdb, pNode, (void **)&pVgroup);
|
||||
if (pVgroup == NULL) break;
|
||||
|
||||
|
|
|
@ -397,6 +397,7 @@ void monitorSaveAcctLog(char *acctId, int64_t currentPointsPerSecond, int64_t ma
|
|||
int64_t totalOutbound, int64_t maxOutbound, int64_t totalDbs, int64_t maxDbs,
|
||||
int64_t totalUsers, int64_t maxUsers, int64_t totalStreams, int64_t maxStreams,
|
||||
int64_t totalConns, int64_t maxConns, int8_t accessState) {
|
||||
if (monitor == NULL) return;
|
||||
if (monitor->state != MONITOR_STATE_INITIALIZED) return;
|
||||
|
||||
char sql[1024] = {0};
|
||||
|
|
|
@ -466,8 +466,8 @@ static void taosFreeFdObj(SFdObj *pFdObj) {
|
|||
}
|
||||
|
||||
pFdObj->signature = NULL;
|
||||
close(pFdObj->fd);
|
||||
epoll_ctl(pThreadObj->pollFd, EPOLL_CTL_DEL, pFdObj->fd, NULL);
|
||||
close(pFdObj->fd);
|
||||
|
||||
pThreadObj->numOfFds--;
|
||||
|
||||
|
|
|
@ -11,7 +11,7 @@
|
|||
#define IS_VALID_COMPRESSION(compression) (((compression) >= NO_COMPRESSION) && ((compression) <= TWO_STAGE_COMP))
|
||||
#define TSDB_MIN_ID 0
|
||||
#define TSDB_MAX_ID INT_MAX
|
||||
#define TSDB_MIN_TABLES 10
|
||||
#define TSDB_MIN_TABLES 4
|
||||
#define TSDB_MAX_TABLES 100000
|
||||
#define TSDB_DEFAULT_TABLES 1000
|
||||
#define TSDB_DEFAULT_DAYS_PER_FILE 10
|
||||
|
@ -282,6 +282,8 @@ int32_t tsdbConfigRepo(TsdbRepoT *repo, STsdbCfg *pCfg) {
|
|||
|
||||
int32_t tsdbTriggerCommit(TsdbRepoT *repo) {
|
||||
STsdbRepo *pRepo = (STsdbRepo *)repo;
|
||||
|
||||
if (pRepo->appH.walCallBack) pRepo->appH.walCallBack(pRepo->appH.appH);
|
||||
|
||||
tsdbLockRepo(repo);
|
||||
if (pRepo->commit) {
|
||||
|
@ -387,7 +389,7 @@ int tsdbInitTableCfg(STableCfg *config, ETableType type, int64_t uid, int32_t ti
|
|||
config->superUid = TSDB_INVALID_SUPER_TABLE_ID;
|
||||
config->tableId.uid = uid;
|
||||
config->tableId.tid = tid;
|
||||
config->name = strdup("test1");
|
||||
config->name = NULL;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -854,8 +856,6 @@ static void *tsdbCommitData(void *arg) {
|
|||
SRWHelper whelper = {0};
|
||||
if (pCache->imem == NULL) return NULL;
|
||||
|
||||
if (pRepo->appH.walCallBack) pRepo->appH.walCallBack(pRepo->appH.appH);
|
||||
|
||||
// Create the iterator to read from cache
|
||||
SSkipListIterator **iters = tsdbCreateTableIters(pMeta, pCfg->maxTables);
|
||||
if (iters == NULL) {
|
||||
|
@ -880,6 +880,7 @@ static void *tsdbCommitData(void *arg) {
|
|||
_exit:
|
||||
tdFreeDataCols(pDataCols);
|
||||
tsdbDestroyTableIters(iters, pCfg->maxTables);
|
||||
tsdbDestroyHelper(&whelper);
|
||||
|
||||
tsdbLockRepo(arg);
|
||||
tdListMove(pCache->imem->list, pCache->pool.memPool);
|
||||
|
|
|
@ -403,6 +403,7 @@ int tsdbWriteCompInfo(SRWHelper *pHelper) {
|
|||
} else {
|
||||
pHelper->pCompInfo->delimiter = TSDB_FILE_DELIMITER;
|
||||
pHelper->pCompInfo->uid = pHelper->tableInfo.uid;
|
||||
pHelper->pCompInfo->checksum = 0;
|
||||
ASSERT((pIdx->len - sizeof(SCompInfo) - sizeof(TSCKSUM)) % sizeof(SCompBlock) == 0);
|
||||
taosCalcChecksumAppend(0, (uint8_t *)pHelper->pCompInfo, pIdx->len);
|
||||
pIdx->offset = lseek(pHelper->files.nHeadF.fd, 0, SEEK_END);
|
||||
|
|
|
@ -4,6 +4,7 @@
|
|||
|
||||
#include "tdataformat.h"
|
||||
#include "tsdbMain.h"
|
||||
#include "tskiplist.h"
|
||||
|
||||
static double getCurTime() {
|
||||
struct timeval tv;
|
||||
|
@ -141,6 +142,7 @@ TEST(TsdbTest, createRepo) {
|
|||
STableCfg tCfg;
|
||||
ASSERT_EQ(tsdbInitTableCfg(&tCfg, TSDB_SUPER_TABLE, 987607499877672L, 0), -1);
|
||||
ASSERT_EQ(tsdbInitTableCfg(&tCfg, TSDB_NORMAL_TABLE, 987607499877672L, 0), 0);
|
||||
tsdbTableSetName(&tCfg, "test", false);
|
||||
|
||||
int nCols = 5;
|
||||
STSchema *schema = tdNewSchema(nCols);
|
||||
|
@ -167,7 +169,7 @@ TEST(TsdbTest, createRepo) {
|
|||
.sversion = tCfg.sversion,
|
||||
.startTime = 1584081000000,
|
||||
.interval = 1000,
|
||||
.totalRows = 5000000,
|
||||
.totalRows = 10000000,
|
||||
.rowsPerSubmit = 1,
|
||||
.pSchema = schema
|
||||
};
|
||||
|
@ -262,4 +264,47 @@ TEST(TsdbTest, DISABLED_createFileGroup) {
|
|||
// ASSERT_EQ(tsdbCreateFileGroup("/home/ubuntu/work/ttest/vnode0/data", 1820, &fGroup, 1000), 0);
|
||||
|
||||
int k = 0;
|
||||
}
|
||||
|
||||
static char *getTKey(const void *data) {
|
||||
return (char *)data;
|
||||
}
|
||||
|
||||
static void insertSkipList(bool isAscend) {
|
||||
TSKEY start_time = 1587393453000;
|
||||
TSKEY interval = 1000;
|
||||
|
||||
SSkipList *pList = tSkipListCreate(5, TSDB_DATA_TYPE_TIMESTAMP, sizeof(TSKEY), 0, 0, 1, getTKey);
|
||||
ASSERT_NE(pList, nullptr);
|
||||
|
||||
for (size_t i = 0; i < 20000000; i++)
|
||||
{
|
||||
TSKEY time = isAscend ? (start_time + i * interval) : (start_time - i * interval);
|
||||
int32_t level = 0;
|
||||
int32_t headSize = 0;
|
||||
|
||||
tSkipListNewNodeInfo(pList, &level, &headSize);
|
||||
SSkipListNode *pNode = (SSkipListNode *)malloc(headSize + sizeof(TSKEY));
|
||||
ASSERT_NE(pNode, nullptr);
|
||||
pNode->level = level;
|
||||
*(TSKEY *)((char *)pNode + headSize) = time;
|
||||
tSkipListPut(pList, pNode);
|
||||
}
|
||||
|
||||
tSkipListDestroy(pList);
|
||||
}
|
||||
|
||||
TEST(TsdbTest, DISABLED_testSkipList) {
|
||||
// TEST(TsdbTest, testSkipList) {
|
||||
double stime = getCurTime();
|
||||
insertSkipList(true);
|
||||
double etime = getCurTime();
|
||||
|
||||
printf("Time used to insert 100000000 records takes %f seconds\n", etime-stime);
|
||||
|
||||
stime = getCurTime();
|
||||
insertSkipList(false);
|
||||
etime = getCurTime();
|
||||
|
||||
printf("Time used to insert 100000000 records takes %f seconds\n", etime-stime);
|
||||
}
|
|
@ -170,6 +170,8 @@ char *taosIpStr(uint32_t ipInt);
|
|||
|
||||
uint32_t ip2uint(const char *const ip_addr);
|
||||
|
||||
void taosRemoveDir(char *rootDir);
|
||||
|
||||
#define TAOS_ALLOC_MODE_DEFAULT 0
|
||||
#define TAOS_ALLOC_MODE_RANDOM_FAIL 1
|
||||
#define TAOS_ALLOC_MODE_DETECT_LEAK 2
|
||||
|
|
|
@ -662,4 +662,28 @@ void tzfree(void *ptr) {
|
|||
if (ptr) {
|
||||
free((void *)((char *)ptr - sizeof(size_t)));
|
||||
}
|
||||
}
|
||||
|
||||
void taosRemoveDir(char *rootDir) {
|
||||
DIR *dir = opendir(rootDir);
|
||||
if (dir == NULL) return;
|
||||
|
||||
struct dirent *de = NULL;
|
||||
while ((de = readdir(dir)) != NULL) {
|
||||
if (strcmp(de->d_name, ".") == 0 || strcmp(de->d_name, "..") == 0) continue;
|
||||
|
||||
char filename[1024];
|
||||
snprintf(filename, 1023, "%s/%s", rootDir, de->d_name);
|
||||
if (de->d_type & DT_DIR) {
|
||||
taosRemoveDir(filename);
|
||||
} else {
|
||||
remove(filename);
|
||||
uPrint("file:%s is removed", filename);
|
||||
}
|
||||
}
|
||||
|
||||
closedir(dir);
|
||||
rmdir(rootDir);
|
||||
|
||||
uPrint("dir:%s is removed", rootDir);
|
||||
}
|
|
@ -9,6 +9,7 @@ IF ((TD_LINUX_64) OR (TD_LINUX_32 AND TD_ARM))
|
|||
INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/tsdb/inc)
|
||||
INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/common/inc)
|
||||
INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/dnode/inc)
|
||||
INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/deps/cJson/inc)
|
||||
INCLUDE_DIRECTORIES(${TD_ENTERPRISE_DIR}/src/inc)
|
||||
INCLUDE_DIRECTORIES(inc)
|
||||
AUX_SOURCE_DIRECTORY(src SRC)
|
||||
|
|
|
@ -18,10 +18,12 @@
|
|||
#include "ihash.h"
|
||||
#include "taoserror.h"
|
||||
#include "taosmsg.h"
|
||||
#include "tutil.h"
|
||||
#include "trpc.h"
|
||||
#include "tsdb.h"
|
||||
#include "ttime.h"
|
||||
#include "ttimer.h"
|
||||
#include "cJSON.h"
|
||||
#include "twal.h"
|
||||
#include "tglobal.h"
|
||||
#include "dnode.h"
|
||||
|
@ -36,6 +38,8 @@ static void vnodeBuildVloadMsg(char *pNode, void * param);
|
|||
static int vnodeWalCallback(void *arg);
|
||||
static int32_t vnodeSaveCfg(SMDCreateVnodeMsg *pVnodeCfg);
|
||||
static int32_t vnodeReadCfg(SVnodeObj *pVnode);
|
||||
static int32_t vnodeSaveVersion(SVnodeObj *pVnode);
|
||||
static int32_t vnodeReadVersion(SVnodeObj *pVnode);
|
||||
static int vnodeWalCallback(void *arg);
|
||||
static uint32_t vnodeGetFileInfo(void *ahandle, char *name, uint32_t *index, int32_t *size);
|
||||
static int vnodeGetWalInfo(void *ahandle, char *name, uint32_t *index);
|
||||
|
@ -93,21 +97,21 @@ int32_t vnodeCreate(SMDCreateVnodeMsg *pVnodeCfg) {
|
|||
|
||||
STsdbCfg tsdbCfg = {0};
|
||||
tsdbCfg.precision = pVnodeCfg->cfg.precision;
|
||||
tsdbCfg.compression = -1;
|
||||
tsdbCfg.compression = pVnodeCfg->cfg.compression;;
|
||||
tsdbCfg.tsdbId = pVnodeCfg->cfg.vgId;
|
||||
tsdbCfg.maxTables = pVnodeCfg->cfg.maxSessions;
|
||||
tsdbCfg.maxTables = pVnodeCfg->cfg.maxTables;
|
||||
tsdbCfg.daysPerFile = pVnodeCfg->cfg.daysPerFile;
|
||||
tsdbCfg.minRowsPerFileBlock = -1;
|
||||
tsdbCfg.maxRowsPerFileBlock = -1;
|
||||
tsdbCfg.keep = -1;
|
||||
tsdbCfg.maxCacheSize = -1;
|
||||
tsdbCfg.minRowsPerFileBlock = pVnodeCfg->cfg.minRowsPerFileBlock;
|
||||
tsdbCfg.maxRowsPerFileBlock = pVnodeCfg->cfg.maxRowsPerFileBlock;
|
||||
tsdbCfg.keep = pVnodeCfg->cfg.daysToKeep;
|
||||
tsdbCfg.maxCacheSize = pVnodeCfg->cfg.maxCacheSize;
|
||||
|
||||
char tsdbDir[TSDB_FILENAME_LEN] = {0};
|
||||
sprintf(tsdbDir, "%s/vnode%d/tsdb", tsVnodeDir, pVnodeCfg->cfg.vgId);
|
||||
code = tsdbCreateRepo(tsdbDir, &tsdbCfg, NULL);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
dError("vgId:%d, failed to create tsdb in vnode, reason:%s", pVnodeCfg->cfg.vgId, tstrerror(terrno));
|
||||
return terrno;
|
||||
dError("vgId:%d, failed to create tsdb in vnode, reason:%s", pVnodeCfg->cfg.vgId, tstrerror(code));
|
||||
return TSDB_CODE_VG_INIT_FAILED;
|
||||
}
|
||||
|
||||
dPrint("vgId:%d, vnode is created, clog:%d", pVnodeCfg->cfg.vgId, pVnodeCfg->cfg.commitLog);
|
||||
|
@ -131,6 +135,39 @@ int32_t vnodeDrop(int32_t vgId) {
|
|||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
int32_t vnodeAlter(void *param, SMDCreateVnodeMsg *pVnodeCfg) {
|
||||
SVnodeObj *pVnode = param;
|
||||
int32_t code = vnodeSaveCfg(pVnodeCfg);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
dError("vgId:%d, failed to save vnode cfg, reason:%s", pVnodeCfg->cfg.vgId, tstrerror(code));
|
||||
return code;
|
||||
}
|
||||
|
||||
code = vnodeReadCfg(pVnode);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
dError("pVnode:%p vgId:%d, failed to read cfg file", pVnode, pVnode->vgId);
|
||||
taosDeleteIntHash(tsDnodeVnodesHash, pVnode->vgId);
|
||||
return code;
|
||||
}
|
||||
|
||||
code = syncReconfig(pVnode->sync, &pVnode->syncCfg);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
dTrace("pVnode:%p vgId:%d, failed to alter vnode, canot reconfig sync, result:%s", pVnode, pVnode->vgId,
|
||||
tstrerror(code));
|
||||
return code;
|
||||
}
|
||||
|
||||
code = tsdbConfigRepo(pVnode->tsdb, &pVnode->tsdbCfg);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
dTrace("pVnode:%p vgId:%d, failed to alter vnode, canot reconfig tsdb, result:%s", pVnode, pVnode->vgId,
|
||||
tstrerror(code));
|
||||
return code;
|
||||
}
|
||||
|
||||
dTrace("pVnode:%p vgId:%d, vnode is altered", pVnode, pVnode->vgId);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
int32_t vnodeOpen(int32_t vnode, char *rootDir) {
|
||||
char temp[TSDB_FILENAME_LEN];
|
||||
pthread_once(&vnodeModuleInit, vnodeInit);
|
||||
|
@ -149,11 +186,13 @@ int32_t vnodeOpen(int32_t vnode, char *rootDir) {
|
|||
return code;
|
||||
}
|
||||
|
||||
vnodeReadVersion(pVnode);
|
||||
|
||||
pVnode->wqueue = dnodeAllocateWqueue(pVnode);
|
||||
pVnode->rqueue = dnodeAllocateRqueue(pVnode);
|
||||
|
||||
sprintf(temp, "%s/wal", rootDir);
|
||||
pVnode->wal = walOpen(temp, &pVnode->walCfg);
|
||||
pVnode->wal = walOpen(temp, &pVnode->walCfg);
|
||||
|
||||
SSyncInfo syncInfo;
|
||||
syncInfo.vgId = pVnode->vgId;
|
||||
|
@ -166,10 +205,10 @@ int32_t vnodeOpen(int32_t vnode, char *rootDir) {
|
|||
syncInfo.writeToCache = vnodeWriteToQueue;
|
||||
syncInfo.confirmForward = dnodeSendRpcWriteRsp;
|
||||
syncInfo.notifyRole = vnodeNotifyRole;
|
||||
pVnode->sync = syncStart(&syncInfo);
|
||||
pVnode->sync = syncStart(&syncInfo);
|
||||
|
||||
pVnode->events = NULL;
|
||||
pVnode->cq = NULL;
|
||||
pVnode->events = NULL;
|
||||
pVnode->cq = NULL;
|
||||
|
||||
STsdbAppH appH = {0};
|
||||
appH.appH = (void *)pVnode;
|
||||
|
@ -227,7 +266,9 @@ void vnodeRelease(void *pVnodeRaw) {
|
|||
pVnode->wqueue = NULL;
|
||||
|
||||
if (pVnode->status == TAOS_VN_STATUS_DELETING) {
|
||||
// remove the whole directory
|
||||
char rootDir[TSDB_FILENAME_LEN] = {0};
|
||||
sprintf(rootDir, "%s/vnode%d", tsVnodeDir, vgId);
|
||||
taosRemoveDir(rootDir);
|
||||
}
|
||||
|
||||
free(pVnode);
|
||||
|
@ -246,7 +287,8 @@ void *vnodeGetVnode(int32_t vgId) {
|
|||
SVnodeObj **ppVnode = (SVnodeObj **)taosGetIntHashData(tsDnodeVnodesHash, vgId);
|
||||
if (ppVnode == NULL || *ppVnode == NULL) {
|
||||
terrno = TSDB_CODE_INVALID_VGROUP_ID;
|
||||
assert(false);
|
||||
dError("vgId:%d not exist");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return *ppVnode;
|
||||
|
@ -292,6 +334,7 @@ static void vnodeBuildVloadMsg(char *pNode, void * param) {
|
|||
pLoad->vgId = htonl(pVnode->vgId);
|
||||
pLoad->status = pVnode->status;
|
||||
pLoad->role = pVnode->role;
|
||||
pLoad->replica = pVnode->syncCfg.replica;
|
||||
}
|
||||
|
||||
static void vnodeCleanUp(SVnodeObj *pVnode) {
|
||||
|
@ -301,6 +344,7 @@ static void vnodeCleanUp(SVnodeObj *pVnode) {
|
|||
//syncStop(pVnode->sync);
|
||||
tsdbCloseRepo(pVnode->tsdb);
|
||||
walClose(pVnode->wal);
|
||||
vnodeSaveVersion(pVnode);
|
||||
|
||||
vnodeRelease(pVnode);
|
||||
}
|
||||
|
@ -328,88 +372,306 @@ static void vnodeNotifyRole(void *ahandle, int8_t role) {
|
|||
}
|
||||
|
||||
static int32_t vnodeSaveCfg(SMDCreateVnodeMsg *pVnodeCfg) {
|
||||
char cfgFile[TSDB_FILENAME_LEN * 2] = {0};
|
||||
sprintf(cfgFile, "%s/vnode%d/config", tsVnodeDir, pVnodeCfg->cfg.vgId);
|
||||
|
||||
char cfgFile[TSDB_FILENAME_LEN + 30] = {0};
|
||||
sprintf(cfgFile, "%s/vnode%d/config.json", tsVnodeDir, pVnodeCfg->cfg.vgId);
|
||||
FILE *fp = fopen(cfgFile, "w");
|
||||
if (!fp) return errno;
|
||||
|
||||
fprintf(fp, "commitLog %d\n", pVnodeCfg->cfg.commitLog);
|
||||
fprintf(fp, "wals %d\n", 3);
|
||||
fprintf(fp, "arbitratorIp %d\n", pVnodeCfg->vpeerDesc[0].ip);
|
||||
fprintf(fp, "quorum %d\n", 1);
|
||||
fprintf(fp, "replica %d\n", pVnodeCfg->cfg.replications);
|
||||
for (int32_t i = 0; i < pVnodeCfg->cfg.replications; i++) {
|
||||
fprintf(fp, "index%d nodeId %d nodeIp %u name n%d\n", i, pVnodeCfg->vpeerDesc[i].dnodeId, pVnodeCfg->vpeerDesc[i].ip, pVnodeCfg->vpeerDesc[i].dnodeId);
|
||||
if (!fp) {
|
||||
dError("vgId:%d, failed to open vnode cfg file for write, error:%s", pVnodeCfg->cfg.vgId, strerror(errno));
|
||||
return errno;
|
||||
}
|
||||
|
||||
fclose(fp);
|
||||
dTrace("vgId:%d, save vnode cfg successed", pVnodeCfg->cfg.vgId);
|
||||
char ipStr[20];
|
||||
int32_t len = 0;
|
||||
int32_t maxLen = 1000;
|
||||
char * content = calloc(1, maxLen + 1);
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
len += snprintf(content + len, maxLen - len, "{\n");
|
||||
|
||||
len += snprintf(content + len, maxLen - len, " \"precision\": %d,\n", pVnodeCfg->cfg.precision);
|
||||
len += snprintf(content + len, maxLen - len, " \"compression\": %d,\n", pVnodeCfg->cfg.compression);
|
||||
len += snprintf(content + len, maxLen - len, " \"maxTables\": %d,\n", pVnodeCfg->cfg.maxTables);
|
||||
len += snprintf(content + len, maxLen - len, " \"daysPerFile\": %d,\n", pVnodeCfg->cfg.daysPerFile);
|
||||
len += snprintf(content + len, maxLen - len, " \"minRowsPerFileBlock\": %d,\n", pVnodeCfg->cfg.minRowsPerFileBlock);
|
||||
len += snprintf(content + len, maxLen - len, " \"maxRowsPerFileBlock\": %d,\n", pVnodeCfg->cfg.maxRowsPerFileBlock);
|
||||
len += snprintf(content + len, maxLen - len, " \"daysToKeep\": %d,\n", pVnodeCfg->cfg.daysToKeep);
|
||||
|
||||
len += snprintf(content + len, maxLen - len, " \"maxCacheSize\": %" PRId64 ",\n", pVnodeCfg->cfg.maxCacheSize);
|
||||
|
||||
len += snprintf(content + len, maxLen - len, " \"commitLog\": %d,\n", pVnodeCfg->cfg.commitLog);
|
||||
len += snprintf(content + len, maxLen - len, " \"wals\": %d,\n", pVnodeCfg->cfg.wals);
|
||||
|
||||
uint32_t ipInt = pVnodeCfg->cfg.arbitratorIp;
|
||||
sprintf(ipStr, "%u.%u.%u.%u", ipInt & 0xFF, (ipInt >> 8) & 0xFF, (ipInt >> 16) & 0xFF, (uint8_t)(ipInt >> 24));
|
||||
len += snprintf(content + len, maxLen - len, " \"arbitratorIp\": \"%s\",\n", ipStr);
|
||||
|
||||
len += snprintf(content + len, maxLen - len, " \"quorum\": %d,\n", pVnodeCfg->cfg.quorum);
|
||||
len += snprintf(content + len, maxLen - len, " \"replica\": %d,\n", pVnodeCfg->cfg.replications);
|
||||
|
||||
len += snprintf(content + len, maxLen - len, " \"nodeInfos\": [{\n");
|
||||
for (int32_t i = 0; i < pVnodeCfg->cfg.replications; i++) {
|
||||
len += snprintf(content + len, maxLen - len, " \"nodeId\": %d,\n", pVnodeCfg->nodes[i].nodeId);
|
||||
|
||||
uint32_t ipInt = pVnodeCfg->nodes[i].nodeIp;
|
||||
sprintf(ipStr, "%u.%u.%u.%u", ipInt & 0xFF, (ipInt >> 8) & 0xFF, (ipInt >> 16) & 0xFF, (uint8_t)(ipInt >> 24));
|
||||
len += snprintf(content + len, maxLen - len, " \"nodeIp\": \"%s\",\n", ipStr);
|
||||
|
||||
len += snprintf(content + len, maxLen - len, " \"nodeName\": \"%s\"\n", pVnodeCfg->nodes[i].nodeName);
|
||||
|
||||
if (i < pVnodeCfg->cfg.replications - 1) {
|
||||
len += snprintf(content + len, maxLen - len, " },{\n");
|
||||
} else {
|
||||
len += snprintf(content + len, maxLen - len, " }]\n");
|
||||
}
|
||||
}
|
||||
len += snprintf(content + len, maxLen - len, "}\n");
|
||||
|
||||
fwrite(content, 1, len, fp);
|
||||
fclose(fp);
|
||||
free(content);
|
||||
|
||||
dPrint("vgId:%d, save vnode cfg successed", pVnodeCfg->cfg.vgId);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
// TODO: this is a simple implement
|
||||
static int32_t vnodeReadCfg(SVnodeObj *pVnode) {
|
||||
char option[5][16] = {0};
|
||||
char cfgFile[TSDB_FILENAME_LEN * 2] = {0};
|
||||
sprintf(cfgFile, "%s/vnode%d/config", tsVnodeDir, pVnode->vgId);
|
||||
|
||||
char cfgFile[TSDB_FILENAME_LEN + 30] = {0};
|
||||
sprintf(cfgFile, "%s/vnode%d/config.json", tsVnodeDir, pVnode->vgId);
|
||||
FILE *fp = fopen(cfgFile, "r");
|
||||
if (!fp) return errno;
|
||||
if (!fp) {
|
||||
dError("pVnode:%p vgId:%d, failed to open vnode cfg file for read, error:%s", pVnode, pVnode->vgId, strerror(errno));
|
||||
return errno;
|
||||
}
|
||||
|
||||
int32_t commitLog = -1;
|
||||
int32_t num = fscanf(fp, "%s %d", option[0], &commitLog);
|
||||
if (num != 2) return TSDB_CODE_INVALID_FILE_FORMAT;
|
||||
if (strcmp(option[0], "commitLog") != 0) return TSDB_CODE_INVALID_FILE_FORMAT;
|
||||
if (commitLog == -1) return TSDB_CODE_INVALID_FILE_FORMAT;
|
||||
pVnode->walCfg.commitLog = (int8_t)commitLog;
|
||||
int ret = TSDB_CODE_OTHERS;
|
||||
int maxLen = 1000;
|
||||
char *content = calloc(1, maxLen + 1);
|
||||
int len = fread(content, 1, maxLen, fp);
|
||||
if (len <= 0) {
|
||||
free(content);
|
||||
fclose(fp);
|
||||
dError("pVnode:%p vgId:%d, failed to read vnode cfg, content is null", pVnode, pVnode->vgId);
|
||||
return false;
|
||||
}
|
||||
|
||||
int32_t wals = -1;
|
||||
num = fscanf(fp, "%s %d", option[0], &wals);
|
||||
if (num != 2) return TSDB_CODE_INVALID_FILE_FORMAT;
|
||||
if (strcmp(option[0], "wals") != 0) return TSDB_CODE_INVALID_FILE_FORMAT;
|
||||
if (wals == -1) return TSDB_CODE_INVALID_FILE_FORMAT;
|
||||
pVnode->walCfg.wals = (int8_t)wals;
|
||||
cJSON *root = cJSON_Parse(content);
|
||||
if (root == NULL) {
|
||||
dError("pVnode:%p vgId:%d, failed to read vnode cfg, invalid json format", pVnode, pVnode->vgId);
|
||||
goto PARSE_OVER;
|
||||
}
|
||||
|
||||
cJSON *precision = cJSON_GetObjectItem(root, "precision");
|
||||
if (!precision || precision->type != cJSON_Number) {
|
||||
dError("pVnode:%p vgId:%d, failed to read vnode cfg, precision not found", pVnode, pVnode->vgId);
|
||||
goto PARSE_OVER;
|
||||
}
|
||||
pVnode->tsdbCfg.precision = (int8_t)precision->valueint;
|
||||
|
||||
cJSON *compression = cJSON_GetObjectItem(root, "compression");
|
||||
if (!compression || compression->type != cJSON_Number) {
|
||||
dError("pVnode:%p vgId:%d, failed to read vnode cfg, compression not found", pVnode, pVnode->vgId);
|
||||
goto PARSE_OVER;
|
||||
}
|
||||
pVnode->tsdbCfg.compression = (int8_t)compression->valueint;
|
||||
|
||||
cJSON *maxTables = cJSON_GetObjectItem(root, "maxTables");
|
||||
if (!maxTables || maxTables->type != cJSON_Number) {
|
||||
dError("pVnode:%p vgId:%d, failed to read vnode cfg, maxTables not found", pVnode, pVnode->vgId);
|
||||
goto PARSE_OVER;
|
||||
}
|
||||
pVnode->tsdbCfg.maxTables = maxTables->valueint;
|
||||
|
||||
cJSON *daysPerFile = cJSON_GetObjectItem(root, "daysPerFile");
|
||||
if (!daysPerFile || daysPerFile->type != cJSON_Number) {
|
||||
dError("pVnode:%p vgId:%d, failed to read vnode cfg, daysPerFile not found", pVnode, pVnode->vgId);
|
||||
goto PARSE_OVER;
|
||||
}
|
||||
pVnode->tsdbCfg.daysPerFile = daysPerFile->valueint;
|
||||
|
||||
cJSON *minRowsPerFileBlock = cJSON_GetObjectItem(root, "minRowsPerFileBlock");
|
||||
if (!minRowsPerFileBlock || minRowsPerFileBlock->type != cJSON_Number) {
|
||||
dError("pVnode:%p vgId:%d, failed to read vnode cfg, minRowsPerFileBlock not found", pVnode, pVnode->vgId);
|
||||
goto PARSE_OVER;
|
||||
}
|
||||
pVnode->tsdbCfg.minRowsPerFileBlock = minRowsPerFileBlock->valueint;
|
||||
|
||||
cJSON *maxRowsPerFileBlock = cJSON_GetObjectItem(root, "maxRowsPerFileBlock");
|
||||
if (!maxRowsPerFileBlock || maxRowsPerFileBlock->type != cJSON_Number) {
|
||||
dError("pVnode:%p vgId:%d, failed to read vnode cfg, maxRowsPerFileBlock not found", pVnode, pVnode->vgId);
|
||||
goto PARSE_OVER;
|
||||
}
|
||||
pVnode->tsdbCfg.maxRowsPerFileBlock = maxRowsPerFileBlock->valueint;
|
||||
|
||||
cJSON *daysToKeep = cJSON_GetObjectItem(root, "daysToKeep");
|
||||
if (!daysToKeep || daysToKeep->type != cJSON_Number) {
|
||||
dError("pVnode:%p vgId:%d, failed to read vnode cfg, daysToKeep not found", pVnode, pVnode->vgId);
|
||||
goto PARSE_OVER;
|
||||
}
|
||||
pVnode->tsdbCfg.keep = daysToKeep->valueint;
|
||||
|
||||
cJSON *maxCacheSize = cJSON_GetObjectItem(root, "maxCacheSize");
|
||||
if (!maxCacheSize || maxCacheSize->type != cJSON_Number) {
|
||||
dError("pVnode:%p vgId:%d, failed to read vnode cfg, maxCacheSize not found", pVnode, pVnode->vgId);
|
||||
goto PARSE_OVER;
|
||||
}
|
||||
pVnode->tsdbCfg.maxCacheSize = maxCacheSize->valueint;
|
||||
|
||||
cJSON *commitLog = cJSON_GetObjectItem(root, "commitLog");
|
||||
if (!commitLog || commitLog->type != cJSON_Number) {
|
||||
dError("pVnode:%p vgId:%d, failed to read vnode cfg, commitLog not found", pVnode, pVnode->vgId);
|
||||
goto PARSE_OVER;
|
||||
}
|
||||
pVnode->walCfg.commitLog = (int8_t)commitLog->valueint;
|
||||
|
||||
cJSON *wals = cJSON_GetObjectItem(root, "wals");
|
||||
if (!wals || wals->type != cJSON_Number) {
|
||||
dError("pVnode:%p vgId:%d, failed to read vnode cfg, wals not found", pVnode, pVnode->vgId);
|
||||
goto PARSE_OVER;
|
||||
}
|
||||
pVnode->walCfg.wals = (int8_t)wals->valueint;
|
||||
pVnode->walCfg.keep = 0;
|
||||
|
||||
int32_t arbitratorIp = -1;
|
||||
num = fscanf(fp, "%s %u", option[0], &arbitratorIp);
|
||||
if (num != 2) return TSDB_CODE_INVALID_FILE_FORMAT;
|
||||
if (strcmp(option[0], "arbitratorIp") != 0) return TSDB_CODE_INVALID_FILE_FORMAT;
|
||||
if (arbitratorIp == -1) return TSDB_CODE_INVALID_FILE_FORMAT;
|
||||
pVnode->syncCfg.arbitratorIp = arbitratorIp;
|
||||
cJSON *arbitratorIp = cJSON_GetObjectItem(root, "arbitratorIp");
|
||||
if (!arbitratorIp || arbitratorIp->type != cJSON_String || arbitratorIp->valuestring == NULL) {
|
||||
dError("pVnode:%p vgId:%d, failed to read vnode cfg, arbitratorIp not found", pVnode, pVnode->vgId);
|
||||
goto PARSE_OVER;
|
||||
}
|
||||
pVnode->syncCfg.arbitratorIp = inet_addr(arbitratorIp->valuestring);
|
||||
|
||||
int32_t quorum = -1;
|
||||
num = fscanf(fp, "%s %d", option[0], &quorum);
|
||||
if (num != 2) return TSDB_CODE_INVALID_FILE_FORMAT;
|
||||
if (strcmp(option[0], "quorum") != 0) return TSDB_CODE_INVALID_FILE_FORMAT;
|
||||
if (quorum == -1) return TSDB_CODE_INVALID_FILE_FORMAT;
|
||||
pVnode->syncCfg.quorum = (int8_t)quorum;
|
||||
cJSON *quorum = cJSON_GetObjectItem(root, "quorum");
|
||||
if (!quorum || quorum->type != cJSON_Number) {
|
||||
dError("failed to read vnode cfg, quorum not found", pVnode, pVnode->vgId);
|
||||
goto PARSE_OVER;
|
||||
}
|
||||
pVnode->syncCfg.quorum = (int8_t)quorum->valueint;
|
||||
|
||||
int32_t replica = -1;
|
||||
num = fscanf(fp, "%s %d", option[0], &replica);
|
||||
if (num != 2) return TSDB_CODE_INVALID_FILE_FORMAT;
|
||||
if (strcmp(option[0], "replica") != 0) return TSDB_CODE_INVALID_FILE_FORMAT;
|
||||
if (replica == -1) return TSDB_CODE_INVALID_FILE_FORMAT;
|
||||
pVnode->syncCfg.replica = (int8_t)replica;
|
||||
cJSON *replica = cJSON_GetObjectItem(root, "replica");
|
||||
if (!replica || replica->type != cJSON_Number) {
|
||||
dError("pVnode:%p vgId:%d, failed to read vnode cfg, replica not found", pVnode, pVnode->vgId);
|
||||
goto PARSE_OVER;
|
||||
}
|
||||
pVnode->syncCfg.replica = (int8_t)replica->valueint;
|
||||
|
||||
for (int32_t i = 0; i < replica; ++i) {
|
||||
int32_t dnodeId = -1;
|
||||
uint32_t dnodeIp = -1;
|
||||
num = fscanf(fp, "%s %s %d %s %u %s %s", option[0], option[1], &dnodeId, option[2], &dnodeIp, option[3], pVnode->syncCfg.nodeInfo[i].name);
|
||||
if (num != 7) return TSDB_CODE_INVALID_FILE_FORMAT;
|
||||
if (strcmp(option[1], "nodeId") != 0) return TSDB_CODE_INVALID_FILE_FORMAT;
|
||||
if (strcmp(option[2], "nodeIp") != 0) return TSDB_CODE_INVALID_FILE_FORMAT;
|
||||
if (strcmp(option[3], "name") != 0) return TSDB_CODE_INVALID_FILE_FORMAT;
|
||||
if (dnodeId == -1) return TSDB_CODE_INVALID_FILE_FORMAT;
|
||||
if (dnodeIp == -1) return TSDB_CODE_INVALID_FILE_FORMAT;
|
||||
pVnode->syncCfg.nodeInfo[i].nodeId = dnodeId;
|
||||
pVnode->syncCfg.nodeInfo[i].nodeIp = dnodeIp;
|
||||
cJSON *nodeInfos = cJSON_GetObjectItem(root, "nodeInfos");
|
||||
if (!nodeInfos || nodeInfos->type != cJSON_Array) {
|
||||
dError("pVnode:%p vgId:%d, failed to read vnode cfg, nodeInfos not found", pVnode, pVnode->vgId);
|
||||
goto PARSE_OVER;
|
||||
}
|
||||
|
||||
fclose(fp);
|
||||
dTrace("pVnode:%p vgId:%d, read vnode cfg successed", pVnode, pVnode->vgId);
|
||||
int size = cJSON_GetArraySize(nodeInfos);
|
||||
if (size != pVnode->syncCfg.replica) {
|
||||
dError("pVnode:%p vgId:%d, failed to read vnode cfg, nodeInfos size not matched", pVnode, pVnode->vgId);
|
||||
goto PARSE_OVER;
|
||||
}
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
for (int i = 0; i < size; ++i) {
|
||||
cJSON *nodeInfo = cJSON_GetArrayItem(nodeInfos, i);
|
||||
if (nodeInfo == NULL) continue;
|
||||
|
||||
cJSON *nodeId = cJSON_GetObjectItem(nodeInfo, "nodeId");
|
||||
if (!nodeId || nodeId->type != cJSON_Number) {
|
||||
dError("pVnode:%p vgId:%d, failed to read vnode cfg, nodeId not found", pVnode, pVnode->vgId);
|
||||
goto PARSE_OVER;
|
||||
}
|
||||
pVnode->syncCfg.nodeInfo[i].nodeId = nodeId->valueint;
|
||||
|
||||
cJSON *nodeIp = cJSON_GetObjectItem(nodeInfo, "nodeIp");
|
||||
if (!nodeIp || nodeIp->type != cJSON_String || nodeIp->valuestring == NULL) {
|
||||
dError("pVnode:%p vgId:%d, failed to read vnode cfg, nodeIp not found", pVnode, pVnode->vgId);
|
||||
goto PARSE_OVER;
|
||||
}
|
||||
pVnode->syncCfg.nodeInfo[i].nodeIp = inet_addr(nodeIp->valuestring);
|
||||
|
||||
cJSON *nodeName = cJSON_GetObjectItem(nodeInfo, "nodeName");
|
||||
if (!nodeName || nodeName->type != cJSON_String || nodeName->valuestring == NULL) {
|
||||
dError("pVnode:%p vgId:%d, failed to read vnode cfg, nodeName not found", pVnode, pVnode->vgId);
|
||||
goto PARSE_OVER;
|
||||
}
|
||||
strncpy(pVnode->syncCfg.nodeInfo[i].name, nodeName->valuestring, TSDB_NODE_NAME_LEN);
|
||||
}
|
||||
|
||||
ret = 0;
|
||||
|
||||
dPrint("pVnode:%p vgId:%d, read vnode cfg successed, replcia:%d", pVnode, pVnode->vgId, pVnode->syncCfg.replica);
|
||||
for (int32_t i = 0; i < pVnode->syncCfg.replica; i++) {
|
||||
dPrint("pVnode:%p vgId:%d, dnode:%d, ip:%s name:%s", pVnode, pVnode->vgId, pVnode->syncCfg.nodeInfo[i].nodeId,
|
||||
taosIpStr(pVnode->syncCfg.nodeInfo[i].nodeIp), pVnode->syncCfg.nodeInfo[i].name);
|
||||
}
|
||||
|
||||
PARSE_OVER:
|
||||
free(content);
|
||||
cJSON_Delete(root);
|
||||
fclose(fp);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
static int32_t vnodeSaveVersion(SVnodeObj *pVnode) {
|
||||
char versionFile[TSDB_FILENAME_LEN + 30] = {0};
|
||||
sprintf(versionFile, "%s/vnode%d/version.json", tsVnodeDir, pVnode->vgId);
|
||||
FILE *fp = fopen(versionFile, "w");
|
||||
if (!fp) {
|
||||
dError("pVnode:%p vgId:%d, failed to open vnode version file for write, error:%s", pVnode, pVnode->vgId);
|
||||
return errno;
|
||||
}
|
||||
|
||||
int32_t len = 0;
|
||||
int32_t maxLen = 30;
|
||||
char * content = calloc(1, maxLen + 1);
|
||||
|
||||
len += snprintf(content + len, maxLen - len, "{\n");
|
||||
len += snprintf(content + len, maxLen - len, " \"version\": %" PRId64 "\n", pVnode->version);
|
||||
len += snprintf(content + len, maxLen - len, "}\n");
|
||||
|
||||
fwrite(content, 1, len, fp);
|
||||
fclose(fp);
|
||||
free(content);
|
||||
|
||||
dPrint("pVnode:%p vgId:%d, save vnode version successed", pVnode, pVnode->vgId);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int32_t vnodeReadVersion(SVnodeObj *pVnode) {
|
||||
char versionFile[TSDB_FILENAME_LEN + 30] = {0};
|
||||
sprintf(versionFile, "%s/vnode%d/version.json", tsVnodeDir, pVnode->vgId);
|
||||
FILE *fp = fopen(versionFile, "w");
|
||||
if (!fp) {
|
||||
dError("pVnode:%p vgId:%d, failed to open vnode version file for write, error:%s", pVnode, pVnode->vgId);
|
||||
return errno;
|
||||
}
|
||||
|
||||
int ret = TSDB_CODE_OTHERS;
|
||||
int maxLen = 100;
|
||||
char *content = calloc(1, maxLen + 1);
|
||||
int len = fread(content, 1, maxLen, fp);
|
||||
if (len <= 0) {
|
||||
free(content);
|
||||
fclose(fp);
|
||||
dError("pVnode:%p vgId:%d, failed to read vnode version, content is null", pVnode, pVnode->vgId);
|
||||
return false;
|
||||
}
|
||||
|
||||
cJSON *root = cJSON_Parse(content);
|
||||
if (root == NULL) {
|
||||
dError("pVnode:%p vgId:%d, failed to read vnode version, invalid json format", pVnode, pVnode->vgId);
|
||||
goto PARSE_OVER;
|
||||
}
|
||||
|
||||
cJSON *version = cJSON_GetObjectItem(root, "version");
|
||||
if (!version || version->type != cJSON_Number) {
|
||||
dError("pVnode:%p vgId:%d, failed to read vnode version, version not found", pVnode, pVnode->vgId);
|
||||
goto PARSE_OVER;
|
||||
}
|
||||
pVnode->version = version->valueint;
|
||||
|
||||
ret = 0;
|
||||
|
||||
dPrint("pVnode:%p vgId:%d, read vnode version successed, version:%%" PRId64, pVnode, pVnode->vgId, pVnode->version);
|
||||
|
||||
PARSE_OVER:
|
||||
free(content);
|
||||
cJSON_Delete(root);
|
||||
fclose(fp);
|
||||
return ret;
|
||||
}
|
|
@ -17,6 +17,7 @@ from util.log import *
|
|||
from util.cases import *
|
||||
from util.sql import *
|
||||
|
||||
|
||||
class TDTestCase:
|
||||
def init(self, conn):
|
||||
tdLog.debug("start to execute %s" % __file__)
|
||||
|
@ -24,25 +25,24 @@ class TDTestCase:
|
|||
|
||||
def run(self):
|
||||
tdSql.prepare()
|
||||
tdSql.execute('show databases')
|
||||
tdSql.execute('drop database if exists db')
|
||||
tdSql.execute('create database db')
|
||||
tdSql.execute('use db')
|
||||
tdSql.execute('create table tb (ts timestamp, speed int)')
|
||||
|
||||
ret = tdSql.execute('create table tb (ts timestamp, speed int)')
|
||||
|
||||
insertRows = 10
|
||||
tdLog.info("insert %d rows" % (insertRows))
|
||||
for i in range(0, insertRows):
|
||||
tdSql.execute('insert into tb values (now + %dm, %d)' % (i, i))
|
||||
ret = tdSql.execute(
|
||||
'insert into tb values (now + %dm, %d)' %
|
||||
(i, i))
|
||||
|
||||
# tdLog.info("insert earlier data")
|
||||
# tdSql.execute('insert into tb values (now - 5m , 10)')
|
||||
# tdSql.execute('insert into tb values (now - 6m , 10)')
|
||||
# tdSql.execute('insert into tb values (now - 7m , 10)')
|
||||
# tdSql.execute('insert into tb values (now - 8m , 10)')
|
||||
tdLog.info("insert earlier data")
|
||||
tdSql.execute('insert into tb values (now - 5m , 10)')
|
||||
tdSql.execute('insert into tb values (now - 6m , 10)')
|
||||
tdSql.execute('insert into tb values (now - 7m , 10)')
|
||||
tdSql.execute('insert into tb values (now - 8m , 10)')
|
||||
|
||||
tdSql.query("select * from tb")
|
||||
tdSql.checkRows(insertRows)
|
||||
tdSql.checkRows(insertRows + 4)
|
||||
|
||||
def stop(self):
|
||||
tdSql.close()
|
||||
|
|
|
@ -1 +1,3 @@
|
|||
sudo python ./test.py -f insert/basic.py
|
||||
#!/bin/bash
|
||||
python3 ./test.py -f insert/basic.py $1
|
||||
python3 ./test.py -s $1
|
||||
|
|
|
@ -15,22 +15,25 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
import sys
|
||||
import getopt
|
||||
import subprocess
|
||||
from distutils.log import warn as printf
|
||||
|
||||
from util.log import *
|
||||
from util.dnodes import *
|
||||
from util.cases import *
|
||||
|
||||
import taos
|
||||
|
||||
# add testcase here:
|
||||
from insert.basic import *
|
||||
|
||||
if __name__ == "__main__":
|
||||
fileName = "all"
|
||||
deployPath = ""
|
||||
masterIp = ""
|
||||
testCluster = False
|
||||
opts, args = getopt.getopt(sys.argv[1:], 'f:p:m:sch', [
|
||||
'file=', 'path=', 'master', 'stop', 'cluster', 'help'])
|
||||
valgrind = 0
|
||||
stop = 0
|
||||
opts, args = getopt.gnu_getopt(sys.argv[1:], 'f:p:m:scgh', [
|
||||
'file=', 'path=', 'master', 'stop', 'cluster', 'valgrind', 'help'])
|
||||
for key, value in opts:
|
||||
if key in ['-h', '--help']:
|
||||
tdLog.printNoPrefix(
|
||||
|
@ -41,21 +44,50 @@ if __name__ == "__main__":
|
|||
tdLog.printNoPrefix('-c Test Cluster Flag')
|
||||
tdLog.printNoPrefix('-s stop All dnodes')
|
||||
sys.exit(0)
|
||||
|
||||
if key in ['-f', '--file']:
|
||||
fileName = value
|
||||
|
||||
if key in ['-p', '--path']:
|
||||
deployPath = value
|
||||
|
||||
if key in ['-m', '--master']:
|
||||
masterIp = value
|
||||
|
||||
if key in ['-c', '--cluster']:
|
||||
testCluster = True
|
||||
|
||||
if key in ['-g', '--valgrind']:
|
||||
valgrind = 1
|
||||
|
||||
if key in ['-s', '--stop']:
|
||||
cmd = "ps -ef|grep -w taosd | grep 'taosd' | grep -v grep | awk '{print $2}' && pkill -9 taosd"
|
||||
os.system(cmd)
|
||||
tdLog.exit('stop All dnodes')
|
||||
stop = 1
|
||||
|
||||
if (stop != 0):
|
||||
if (valgrind == 0):
|
||||
toBeKilled = "taosd"
|
||||
else:
|
||||
toBeKilled = "valgrind.bin"
|
||||
|
||||
killCmd = "ps -ef|grep -w %s| grep -v grep | awk '{print $2}' | xargs kill -HUP " % toBeKilled
|
||||
# os.system(killCmd)
|
||||
# time.sleep(1)
|
||||
|
||||
psCmd = "ps -ef|grep -w %s| grep -v grep | awk '{print $2}'" % toBeKilled
|
||||
processID = subprocess.check_output(psCmd, shell=True)
|
||||
|
||||
while(processID):
|
||||
os.system(killCmd)
|
||||
time.sleep(1)
|
||||
processID = subprocess.check_output(psCmd, shell=True)
|
||||
|
||||
tdLog.exit('stop All dnodes')
|
||||
|
||||
if masterIp == "":
|
||||
tdDnodes.init(deployPath)
|
||||
tdDnodes.setTestCluster(testCluster)
|
||||
tdDnodes.setValgrind(valgrind)
|
||||
|
||||
if testCluster:
|
||||
tdLog.notice("Procedures for testing cluster")
|
||||
if fileName == "all":
|
||||
|
|
|
@ -15,6 +15,8 @@ import sys
|
|||
import os
|
||||
import time
|
||||
import datetime
|
||||
import inspect
|
||||
import importlib
|
||||
from util.log import *
|
||||
|
||||
|
||||
|
@ -30,6 +32,10 @@ class TDCases:
|
|||
self.windowsCases = []
|
||||
self.clusterCases = []
|
||||
|
||||
def __dynamicLoadModule(self, fileName):
|
||||
moduleName = fileName.replace(".py", "").replace("/", ".")
|
||||
return importlib.import_module(moduleName, package='..')
|
||||
|
||||
def addWindows(self, name, case):
|
||||
self.windowsCases.append(TDCase(name, case))
|
||||
|
||||
|
@ -40,64 +46,93 @@ class TDCases:
|
|||
self.clusterCases.append(TDCase(name, case))
|
||||
|
||||
def runAllLinux(self, conn):
|
||||
tdLog.notice("run total %d cases" % (len(self.linuxCases)))
|
||||
for case in self.linuxCases:
|
||||
case.case.init(conn)
|
||||
case.case.run()
|
||||
case.case.stop()
|
||||
tdLog.notice("total %d cases executed" % (len(self.linuxCases)))
|
||||
# TODO: load all Linux cases here
|
||||
runNum = 0
|
||||
for tmp in self.linuxCases:
|
||||
if tmp.name.find(fileName) != -1:
|
||||
case = testModule.TDTestCase()
|
||||
case.init(conn)
|
||||
case.run()
|
||||
case.stop()
|
||||
runNum += 1
|
||||
continue
|
||||
|
||||
tdLog.notice("total %d Linux test case(s) executed" % (runNum))
|
||||
|
||||
def runOneLinux(self, conn, fileName):
|
||||
tdLog.notice("run cases like %s" % (fileName))
|
||||
testModule = self.__dynamicLoadModule(fileName)
|
||||
|
||||
runNum = 0
|
||||
for case in self.linuxCases:
|
||||
if case.name.find(fileName) != -1:
|
||||
case.case.init(conn)
|
||||
case.case.run()
|
||||
case.case.stop()
|
||||
time.sleep(5)
|
||||
for tmp in self.linuxCases:
|
||||
if tmp.name.find(fileName) != -1:
|
||||
case = testModule.TDTestCase()
|
||||
case.init(conn)
|
||||
case.run()
|
||||
case.stop()
|
||||
runNum += 1
|
||||
tdLog.notice("total %d cases executed" % (runNum))
|
||||
continue
|
||||
|
||||
tdLog.notice("total %d Linux test case(s) executed" % (runNum))
|
||||
|
||||
def runAllWindows(self, conn):
|
||||
tdLog.notice("run total %d cases" % (len(self.windowsCases)))
|
||||
for case in self.windowsCases:
|
||||
case.case.init(conn)
|
||||
case.case.run()
|
||||
case.case.stop()
|
||||
tdLog.notice("total %d cases executed" % (len(self.windowsCases)))
|
||||
# TODO: load all Windows cases here
|
||||
runNum = 0
|
||||
for tmp in self.windowsCases:
|
||||
if tmp.name.find(fileName) != -1:
|
||||
case = testModule.TDTestCase()
|
||||
case.init(conn)
|
||||
case.run()
|
||||
case.stop()
|
||||
runNum += 1
|
||||
continue
|
||||
|
||||
tdLog.notice("total %d Windows test case(s) executed" % (runNum))
|
||||
|
||||
def runOneWindows(self, conn, fileName):
|
||||
tdLog.notice("run cases like %s" % (fileName))
|
||||
testModule = self.__dynamicLoadModule(fileName)
|
||||
|
||||
runNum = 0
|
||||
for case in self.windowsCases:
|
||||
if case.name.find(fileName) != -1:
|
||||
case.case.init(conn)
|
||||
case.case.run()
|
||||
case.case.stop()
|
||||
time.sleep(2)
|
||||
for tmp in self.windowsCases:
|
||||
if tmp.name.find(fileName) != -1:
|
||||
case = testModule.TDTestCase()
|
||||
case.init(conn)
|
||||
case.run()
|
||||
case.stop()
|
||||
runNum += 1
|
||||
tdLog.notice("total %d cases executed" % (runNum))
|
||||
continue
|
||||
tdLog.notice("total %d Windows case(s) executed" % (runNum))
|
||||
|
||||
def runAllCluster(self):
|
||||
tdLog.notice("run total %d cases" % (len(self.clusterCases)))
|
||||
for case in self.clusterCases:
|
||||
case.case.init()
|
||||
case.case.run()
|
||||
case.case.stop()
|
||||
tdLog.notice("total %d cases executed" % (len(self.clusterCases)))
|
||||
# TODO: load all cluster case module here
|
||||
|
||||
runNum = 0
|
||||
for tmp in self.clusterCases:
|
||||
if tmp.name.find(fileName) != -1:
|
||||
tdLog.notice("run cases like %s" % (fileName))
|
||||
case = testModule.TDTestCase()
|
||||
case.init()
|
||||
case.run()
|
||||
case.stop()
|
||||
runNum += 1
|
||||
continue
|
||||
|
||||
tdLog.notice("total %d Cluster test case(s) executed" % (runNum))
|
||||
|
||||
def runOneCluster(self, fileName):
|
||||
tdLog.notice("run cases like %s" % (fileName))
|
||||
testModule = self.__dynamicLoadModule(fileName)
|
||||
|
||||
runNum = 0
|
||||
for case in self.clusterCases:
|
||||
if case.name.find(fileName) != -1:
|
||||
case.case.init()
|
||||
case.case.run()
|
||||
case.case.stop()
|
||||
time.sleep(2)
|
||||
for tmp in self.clusterCases:
|
||||
if tmp.name.find(fileName) != -1:
|
||||
tdLog.notice("run cases like %s" % (fileName))
|
||||
case = testModule.TDTestCase()
|
||||
case.init()
|
||||
case.run()
|
||||
case.stop()
|
||||
runNum += 1
|
||||
tdLog.notice("total %d cases executed" % (runNum))
|
||||
continue
|
||||
|
||||
tdLog.notice("total %d Cluster test case(s) executed" % (runNum))
|
||||
|
||||
|
||||
tdCases = TDCases()
|
||||
|
|
|
@ -14,6 +14,7 @@
|
|||
import sys
|
||||
import os
|
||||
import os.path
|
||||
import subprocess
|
||||
from util.log import *
|
||||
|
||||
|
||||
|
@ -78,10 +79,18 @@ class TDDnode:
|
|||
self.index = index
|
||||
self.running = 0
|
||||
self.deployed = 0
|
||||
self.testCluster = False
|
||||
self.valgrind = 0
|
||||
|
||||
def init(self, path):
|
||||
self.path = path
|
||||
|
||||
def setTestCluster(self, value):
|
||||
self.testCluster = value
|
||||
|
||||
def setValgrind(self, value):
|
||||
self.valgrind = value
|
||||
|
||||
def deploy(self):
|
||||
self.logDir = "%s/sim/dnode%d/log" % (self.path, self.index)
|
||||
self.dataDir = "%s/sim/dnode%d/data" % (self.path, self.index)
|
||||
|
@ -116,7 +125,9 @@ class TDDnode:
|
|||
if os.system(cmd) != 0:
|
||||
tdLog.exit(cmd)
|
||||
|
||||
self.startIP()
|
||||
if self.testCluster:
|
||||
self.startIP()
|
||||
|
||||
self.cfg("masterIp", "192.168.0.1")
|
||||
self.cfg("secondIp", "192.168.0.2")
|
||||
self.cfg("publicIp", "192.168.0.%d" % (self.index))
|
||||
|
@ -164,9 +175,18 @@ class TDDnode:
|
|||
|
||||
if self.deployed == 0:
|
||||
tdLog.exit("dnode:%d is not deployed" % (self.index))
|
||||
cmd = "nohup %staosd -c %s > /dev/null 2>&1 & " % (
|
||||
binPath, self.cfgDir)
|
||||
print(cmd)
|
||||
|
||||
if self.valgrind == 0:
|
||||
cmd = "nohup %staosd -c %s > /dev/null 2>&1 & " % (
|
||||
binPath, self.cfgDir)
|
||||
else:
|
||||
valgrindCmdline = "valgrind --tool=memcheck --leak-check=full --show-reachable=no --track-origins=yes --show-leak-kinds=all -v --workaround-gcc296-bugs=yes"
|
||||
|
||||
cmd = "nohup %s %staosd -c %s 2>&1 & " % (
|
||||
valgrindCmdline, binPath, self.cfgDir)
|
||||
|
||||
print(cmd)
|
||||
|
||||
if os.system(cmd) != 0:
|
||||
tdLog.exit(cmd)
|
||||
self.running = 1
|
||||
|
@ -275,8 +295,16 @@ class TDDnodes:
|
|||
self.sim.init(self.path)
|
||||
self.sim.deploy()
|
||||
|
||||
def setTestCluster(self, value):
|
||||
self.testCluster = value
|
||||
|
||||
def setValgrind(self, value):
|
||||
self.valgrind = value
|
||||
|
||||
def deploy(self, index):
|
||||
self.check(index)
|
||||
self.dnodes[index - 1].setTestCluster(self.testCluster)
|
||||
self.dnodes[index - 1].setValgrind(self.valgrind)
|
||||
self.dnodes[index - 1].deploy()
|
||||
|
||||
def cfg(self, index, option, value):
|
||||
|
@ -297,11 +325,15 @@ class TDDnodes:
|
|||
|
||||
def startIP(self, index):
|
||||
self.check(index)
|
||||
self.dnodes[index - 1].startIP()
|
||||
|
||||
if self.testCluster:
|
||||
self.dnodes[index - 1].startIP()
|
||||
|
||||
def stopIP(self, index):
|
||||
self.check(index)
|
||||
self.dnodes[index - 1].stopIP()
|
||||
|
||||
if self.dnodes[index - 1].testCluster:
|
||||
self.dnodes[index - 1].stopIP()
|
||||
|
||||
def check(self, index):
|
||||
if index < 1 or index > 10:
|
||||
|
@ -312,11 +344,14 @@ class TDDnodes:
|
|||
for i in range(len(self.dnodes)):
|
||||
self.dnodes[i].stop()
|
||||
|
||||
cmd = "sudo systemctl stop taosd"
|
||||
os.system(cmd)
|
||||
psCmd = "ps -ef | grep -w taosd | grep 'root' | grep -v grep | awk '{print $2}'"
|
||||
processID = subprocess.check_output(psCmd, shell=True)
|
||||
if processID:
|
||||
cmd = "sudo systemctl stop taosd"
|
||||
os.system(cmd)
|
||||
# if os.system(cmd) != 0 :
|
||||
# tdLog.exit(cmd)
|
||||
cmd = "ps -ef | grep -w taosd | grep 'dnode' | grep -v grep | awk '{print $2}' && sudo pkill -sigkill taosd"
|
||||
cmd = "ps -ef | grep -w taosd | grep 'dnode' | grep -v grep | awk '{print $2}' && pkill -sigkill taosd"
|
||||
os.system(cmd)
|
||||
# if os.system(cmd) != 0 :
|
||||
# tdLog.exit(cmd)
|
||||
|
|
|
@ -42,7 +42,7 @@ class TDLog:
|
|||
printf("\033[1;31m%s %s\033[0m" % (datetime.datetime.now(), err))
|
||||
sys.exit(1)
|
||||
|
||||
def printfNoPrefix(self, info):
|
||||
def printNoPrefix(self, info):
|
||||
printf("\033[1;36m%s\033[0m" % (info))
|
||||
|
||||
|
||||
|
|
|
@ -3,6 +3,6 @@
|
|||
run general/db/testSuite.sim
|
||||
run general/insert/testSuite.sim
|
||||
run general/table/testSuite.sim
|
||||
run general/user/testSuite.sim
|
||||
run general/user/basicSuite.sim
|
||||
|
||||
##################################
|
||||
|
|
|
@ -66,6 +66,4 @@ print $data10 $data11 $data22
|
|||
print $data20 $data11 $data22
|
||||
print $data30 $data31 $data32
|
||||
|
||||
|
||||
|
||||
|
||||
system sh/exec.sh -n dnode1 -s stop
|
|
@ -0,0 +1 @@
|
|||
run general/user/basic1.sim
|
|
@ -1 +1,6 @@
|
|||
run general/user/basic1.sim
|
||||
run general/user/basic1.sim
|
||||
run general/user/pass_alter.sim
|
||||
run general/user/pass_len.sim
|
||||
run general/user/user_create.sim
|
||||
run general/user/user_len.sim
|
||||
#run general/user/monitor.sim
|
|
@ -4,8 +4,6 @@ system sh/ip.sh -i 1 -s up
|
|||
system sh/deploy.sh -n dnode1 -m 192.168.0.1 -i 192.168.0.1
|
||||
system sh/cfg.sh -n dnode1 -c clog -v 0
|
||||
system sh/exec.sh -n dnode1 -s start
|
||||
|
||||
sleep 3000
|
||||
sql connect
|
||||
|
||||
print =============== step1
|
|
@ -93,7 +93,7 @@ echo "privateIp $NODE_IP" >> $TAOS_CFG
|
|||
echo "dDebugFlag 199" >> $TAOS_CFG
|
||||
echo "mDebugFlag 199" >> $TAOS_CFG
|
||||
echo "sdbDebugFlag 199" >> $TAOS_CFG
|
||||
echo "rpcDebugFlag 131" >> $TAOS_CFG
|
||||
echo "rpcDebugFlag 135" >> $TAOS_CFG
|
||||
echo "tmrDebugFlag 131" >> $TAOS_CFG
|
||||
echo "cDebugFlag 135" >> $TAOS_CFG
|
||||
echo "httpDebugFlag 131" >> $TAOS_CFG
|
||||
|
@ -105,7 +105,7 @@ echo "numOfThreadsPerCore 2.0" >> $TAOS_CFG
|
|||
echo "defaultPass taosdata" >> $TAOS_CFG
|
||||
echo "numOfLogLines 100000000" >> $TAOS_CFG
|
||||
echo "mgmtEqualVnodeNum 0" >> $TAOS_CFG
|
||||
echo "clog 0" >> $TAOS_CFG
|
||||
echo "clog 2" >> $TAOS_CFG
|
||||
echo "statusInterval 1" >> $TAOS_CFG
|
||||
echo "numOfTotalVnodes 4" >> $TAOS_CFG
|
||||
echo "asyncLog 0" >> $TAOS_CFG
|
||||
|
|
|
@ -11,7 +11,8 @@ set +e
|
|||
FILE_NAME=
|
||||
RELEASE=0
|
||||
ASYNC=0
|
||||
while getopts "f:a" arg
|
||||
VALGRIND=0
|
||||
while getopts "f:av" arg
|
||||
do
|
||||
case $arg in
|
||||
f)
|
||||
|
@ -20,6 +21,9 @@ do
|
|||
a)
|
||||
ASYNC=1
|
||||
;;
|
||||
v)
|
||||
VALGRIND=1
|
||||
;;
|
||||
?)
|
||||
echo "unknow argument"
|
||||
;;
|
||||
|
@ -30,6 +34,8 @@ cd .
|
|||
sh/ip.sh -i 1 -s up > /dev/null 2>&1 &
|
||||
sh/ip.sh -i 2 -s up > /dev/null 2>&1 &
|
||||
sh/ip.sh -i 3 -s up > /dev/null 2>&1 &
|
||||
sh/ip.sh -i 4 -s up > /dev/null 2>&1 &
|
||||
sh/ip.sh -i 5 -s up > /dev/null 2>&1 &
|
||||
|
||||
# Get responsible directories
|
||||
CODE_DIR=`dirname $0`
|
||||
|
@ -96,10 +102,14 @@ ulimit -c unlimited
|
|||
#sudo sysctl -w kernel.core_pattern=$TOP_DIR/core.%p.%e
|
||||
|
||||
if [ -n "$FILE_NAME" ]; then
|
||||
echo "ExcuteCmd:" $PROGRAM -c $CFG_DIR -f $FILE_NAME
|
||||
echo "------------------------------------------------------------------------"
|
||||
#valgrind --tool=memcheck --leak-check=full --show-reachable=no --track-origins=yes --show-leak-kinds=all -v --workaround-gcc296-bugs=yes --log-file=valgrind.log $PROGRAM -c $CFG_DIR -f $FILE_NAME
|
||||
$PROGRAM -c $CFG_DIR -f $FILE_NAME
|
||||
if [ $VALGRIND -eq 1 ]; then
|
||||
echo valgrind --tool=memcheck --leak-check=full --show-reachable=no --track-origins=yes --show-leak-kinds=all -v --workaround-gcc296-bugs=yes --log-file=${CODE_DIR}/../script/valgrind.log $PROGRAM -c $CFG_DIR -f $FILE_NAME
|
||||
valgrind --tool=memcheck --leak-check=full --show-reachable=no --track-origins=yes --show-leak-kinds=all -v --workaround-gcc296-bugs=yes --log-file=${CODE_DIR}/../script/valgrind.log $PROGRAM -c $CFG_DIR -f $FILE_NAME
|
||||
else
|
||||
echo "ExcuteCmd:" $PROGRAM -c $CFG_DIR -f $FILE_NAME
|
||||
$PROGRAM -c $CFG_DIR -f $FILE_NAME
|
||||
fi
|
||||
else
|
||||
echo "ExcuteCmd:" $PROGRAM -c $CFG_DIR -f basicSuite.sim
|
||||
echo "------------------------------------------------------------------------"
|
||||
|
|
|
@ -25,18 +25,18 @@ system sh/cfg.sh -n dnode2 -c mgmtEqualVnodeNum -v 4
|
|||
system sh/cfg.sh -n dnode3 -c mgmtEqualVnodeNum -v 4
|
||||
system sh/cfg.sh -n dnode4 -c mgmtEqualVnodeNum -v 4
|
||||
|
||||
system sh/cfg.sh -n dnode1 -c clog -v 1
|
||||
system sh/cfg.sh -n dnode2 -c clog -v 1
|
||||
system sh/cfg.sh -n dnode3 -c clog -v 1
|
||||
system sh/cfg.sh -n dnode4 -c clog -v 1
|
||||
system sh/cfg.sh -n dnode1 -c clog -v 2
|
||||
system sh/cfg.sh -n dnode2 -c clog -v 2
|
||||
system sh/cfg.sh -n dnode3 -c clog -v 2
|
||||
system sh/cfg.sh -n dnode4 -c clog -v 2
|
||||
|
||||
system sh/cfg.sh -n dnode1 -c clog -v 1
|
||||
system sh/cfg.sh -n dnode2 -c clog -v 1
|
||||
system sh/cfg.sh -n dnode3 -c clog -v 1
|
||||
system sh/cfg.sh -n dnode4 -c clog -v 1
|
||||
system sh/cfg.sh -n dnode1 -c clog -v 2
|
||||
system sh/cfg.sh -n dnode2 -c clog -v 2
|
||||
system sh/cfg.sh -n dnode3 -c clog -v 2
|
||||
system sh/cfg.sh -n dnode4 -c clog -v 2
|
||||
|
||||
print ========== step1
|
||||
system sh/exec.sh -n dnode1 -s start
|
||||
system sh/exec_up.sh -n dnode1 -s start
|
||||
sql connect
|
||||
sleep 3000
|
||||
|
||||
|
@ -49,15 +49,15 @@ sql insert into d1.t1 values(now+4s, 12)
|
|||
sql insert into d1.t1 values(now+5s, 11)
|
||||
|
||||
sql show dnodes
|
||||
print 192.168.0.1 freeVnodes $data3_192.168.0.1
|
||||
if $data3_192.168.0.1 != 3 then
|
||||
print 192.168.0.1 openVnodes $data3_1
|
||||
if $data3_1 != 1 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
print ========== step2
|
||||
sleep 2000
|
||||
sql create dnode 192.168.0.2
|
||||
system sh/exec.sh -n dnode2 -s start
|
||||
system sh/exec_up.sh -n dnode2 -s start
|
||||
|
||||
$x = 0
|
||||
show2:
|
||||
|
@ -68,12 +68,12 @@ show2:
|
|||
endi
|
||||
|
||||
sql show dnodes
|
||||
print 192.168.0.1 freeVnodes $data3_192.168.0.1
|
||||
print 192.168.0.2 freeVnodes $data3_192.168.0.2
|
||||
if $data3_192.168.0.1 != 4 then
|
||||
print 192.168.0.1 openVnodes $data3_1
|
||||
print 192.168.0.2 openVnodes $data3_2
|
||||
if $data3_1 != 0 then
|
||||
goto show2
|
||||
endi
|
||||
if $data3_192.168.0.2 != 3 then
|
||||
if $data3_2 != 1 then
|
||||
goto show2
|
||||
endi
|
||||
|
||||
|
@ -87,12 +87,12 @@ sql insert into d2.t2 values(now+4s, 22)
|
|||
sql insert into d2.t2 values(now+5s, 21)
|
||||
|
||||
sql show dnodes
|
||||
print 192.168.0.1 freeVnodes $data3_192.168.0.1
|
||||
print 192.168.0.2 freeVnodes $data3_192.168.0.2
|
||||
if $data3_192.168.0.1 != 4 then
|
||||
print 192.168.0.1 openVnodes $data3_1
|
||||
print 192.168.0.2 openVnodes $data3_2
|
||||
if $data3_1 != 0 then
|
||||
return -1
|
||||
endi
|
||||
if $data3_192.168.0.2 != 2 then
|
||||
if $data3_2 != 2 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
|
@ -108,23 +108,23 @@ show4:
|
|||
endi
|
||||
|
||||
sql show dnodes
|
||||
print 192.168.0.1 freeVnodes $data3_192.168.0.1
|
||||
print 192.168.0.2 freeVnodes $data3_192.168.0.2
|
||||
if $data3_192.168.0.1 != 2 then
|
||||
print 192.168.0.1 openVnodes $data3_1
|
||||
print 192.168.0.2 openVnodes $data3_2
|
||||
if $data3_1 != 2 then
|
||||
goto show4
|
||||
endi
|
||||
if $data3_192.168.0.2 != null then
|
||||
if $data3_2 != null then
|
||||
goto show4
|
||||
endi
|
||||
if $rows != 1 then
|
||||
goto show4
|
||||
endi
|
||||
|
||||
system sh/exec.sh -n dnode2 -s stop -x SIGINT
|
||||
system sh/exec_up.sh -n dnode2 -s stop -x SIGINT
|
||||
|
||||
print ========== step5
|
||||
sql create dnode 192.168.0.3
|
||||
system sh/exec.sh -n dnode3 -s start
|
||||
system sh/exec_up.sh -n dnode3 -s start
|
||||
|
||||
$x = 0
|
||||
show5:
|
||||
|
@ -135,16 +135,16 @@ show5:
|
|||
endi
|
||||
|
||||
sql show dnodes
|
||||
print 192.168.0.1 freeVnodes $data3_192.168.0.1
|
||||
print 192.168.0.2 freeVnodes $data3_192.168.0.2
|
||||
print 192.168.0.3 freeVnodes $data3_192.168.0.3
|
||||
if $data3_192.168.0.1 != 4 then
|
||||
print 192.168.0.1 openVnodes $data3_1
|
||||
print 192.168.0.2 openVnodes $data3_2
|
||||
print 192.168.0.3 openVnodes $data3_3
|
||||
if $data3_1 != 0 then
|
||||
goto show5
|
||||
endi
|
||||
if $data3_192.168.0.2 != null then
|
||||
if $data3_2 != null then
|
||||
goto show5
|
||||
endi
|
||||
if $data3_192.168.0.3 != 2 then
|
||||
if $data3_3 != 2 then
|
||||
goto show5
|
||||
endi
|
||||
|
||||
|
@ -158,23 +158,23 @@ sql insert into d3.t3 values(now+4s, 32)
|
|||
sql insert into d3.t3 values(now+5s, 31)
|
||||
|
||||
sql show dnodes
|
||||
print 192.168.0.1 freeVnodes $data3_192.168.0.1
|
||||
print 192.168.0.2 freeVnodes $data3_192.168.0.2
|
||||
print 192.168.0.3 freeVnodes $data3_192.168.0.3
|
||||
print 192.168.0.1 openVnodes $data3_1
|
||||
print 192.168.0.2 openVnodes $data3_2
|
||||
print 192.168.0.3 openVnodes $data3_3
|
||||
|
||||
if $data3_192.168.0.1 != 4 then
|
||||
if $data3_1 != 0 then
|
||||
return -1
|
||||
endi
|
||||
if $data3_192.168.0.2 != null then
|
||||
if $data3_2 != null then
|
||||
return -1
|
||||
endi
|
||||
if $data3_192.168.0.3 != 1 then
|
||||
if $data3_3 != 3 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
print ========== step7
|
||||
sql create dnode 192.168.0.4
|
||||
system sh/exec.sh -n dnode4 -s start
|
||||
system sh/exec_up.sh -n dnode4 -s start
|
||||
|
||||
$x = 0
|
||||
show7:
|
||||
|
@ -185,20 +185,20 @@ show7:
|
|||
endi
|
||||
|
||||
sql show dnodes
|
||||
print 192.168.0.1 freeVnodes $data3_192.168.0.1
|
||||
print 192.168.0.2 freeVnodes $data3_192.168.0.2
|
||||
print 192.168.0.3 freeVnodes $data3_192.168.0.3
|
||||
print 192.168.0.4 freeVnodes $data3_192.168.0.4
|
||||
if $data3_192.168.0.1 != 4 then
|
||||
print 192.168.0.1 openVnodes $data3_1
|
||||
print 192.168.0.2 openVnodes $data3_2
|
||||
print 192.168.0.3 openVnodes $data3_3
|
||||
print 192.168.0.4 openVnodes $data3_4
|
||||
if $data3_1 != 0 then
|
||||
goto show7
|
||||
endi
|
||||
if $data3_192.168.0.2 != null then
|
||||
if $data3_2 != null then
|
||||
goto show7
|
||||
endi
|
||||
if $data3_192.168.0.3 != 2 then
|
||||
if $data3_3 != 2 then
|
||||
goto show7
|
||||
endi
|
||||
if $data3_192.168.0.4 != 3 then
|
||||
if $data3_4 != 1 then
|
||||
goto show7
|
||||
endi
|
||||
|
||||
|
@ -212,21 +212,21 @@ sql insert into d4.t4 values(now+4s, 42)
|
|||
sql insert into d4.t4 values(now+5s, 41)
|
||||
|
||||
sql show dnodes
|
||||
print 192.168.0.1 freeVnodes $data3_192.168.0.1
|
||||
print 192.168.0.2 freeVnodes $data3_192.168.0.2
|
||||
print 192.168.0.3 freeVnodes $data3_192.168.0.3
|
||||
print 192.168.0.4 freeVnodes $data3_192.168.0.4
|
||||
print 192.168.0.1 openVnodes $data3_1
|
||||
print 192.168.0.2 openVnodes $data3_2
|
||||
print 192.168.0.3 openVnodes $data3_3
|
||||
print 192.168.0.4 openVnodes $data3_4
|
||||
|
||||
if $data3_192.168.0.1 != 4 then
|
||||
if $data3_1 != 0 then
|
||||
return -1
|
||||
endi
|
||||
if $data3_192.168.0.2 != null then
|
||||
if $data3_2 != null then
|
||||
return -1
|
||||
endi
|
||||
if $data3_192.168.0.3 != 2 then
|
||||
if $data3_3 != 2 then
|
||||
return -1
|
||||
endi
|
||||
if $data3_192.168.0.4 != 2 then
|
||||
if $data3_4 != 2 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
|
@ -242,25 +242,25 @@ show9:
|
|||
endi
|
||||
|
||||
sql show dnodes
|
||||
print 192.168.0.1 freeVnodes $data3_192.168.0.1
|
||||
print 192.168.0.2 freeVnodes $data3_192.168.0.2
|
||||
print 192.168.0.3 freeVnodes $data3_192.168.0.3
|
||||
print 192.168.0.4 freeVnodes $data3_192.168.0.4
|
||||
print 192.168.0.1 openVnodes $data3_1
|
||||
print 192.168.0.2 openVnodes $data3_2
|
||||
print 192.168.0.3 openVnodes $data3_3
|
||||
print 192.168.0.4 openVnodes $data3_4
|
||||
|
||||
if $data3_192.168.0.1 != 4 then
|
||||
if $data3_1 != 0 then
|
||||
goto show9
|
||||
endi
|
||||
if $data3_192.168.0.2 != null then
|
||||
if $data3_2 != null then
|
||||
goto show9
|
||||
endi
|
||||
if $data3_192.168.0.3 != null then
|
||||
if $data3_3 != null then
|
||||
goto show9
|
||||
endi
|
||||
if $data3_192.168.0.4 != 0 then
|
||||
if $data3_4 != 4 then
|
||||
goto show9
|
||||
endi
|
||||
|
||||
system sh/exec.sh -n dnode3 -s stop -x SIGINT
|
||||
system sh/exec_up.sh -n dnode3 -s stop -x SIGINT
|
||||
|
||||
print ========== step10
|
||||
sql select * from d1.t1 order by t desc
|
||||
|
|
|
@ -37,13 +37,13 @@ system sh/cfg.sh -n dnode4 -c clog -v 1
|
|||
system sh/cfg.sh -n dnode5 -c clog -v 1
|
||||
|
||||
print ========== step1
|
||||
system sh/exec.sh -n dnode1 -s start
|
||||
system sh/exec_up.sh -n dnode1 -s start
|
||||
sql connect
|
||||
|
||||
sql create dnode 192.168.0.2
|
||||
sql create dnode 192.168.0.3
|
||||
system sh/exec.sh -n dnode2 -s start
|
||||
system sh/exec.sh -n dnode3 -s start
|
||||
system sh/exec_up.sh -n dnode2 -s start
|
||||
system sh/exec_up.sh -n dnode3 -s start
|
||||
sleep 3000
|
||||
|
||||
sql create database d1 replica 2 tables 4
|
||||
|
@ -63,16 +63,16 @@ sql insert into d2.t2 values(now+4s, 22)
|
|||
sql insert into d2.t2 values(now+5s, 21)
|
||||
|
||||
sql show dnodes
|
||||
print 192.168.0.1 freeVnodes $data3_192.168.0.1
|
||||
print 192.168.0.2 freeVnodes $data3_192.168.0.2
|
||||
print 192.168.0.3 freeVnodes $data3_192.168.0.3
|
||||
if $data3_192.168.0.1 != 4 then
|
||||
print 192.168.0.1 openVnodes $data3_1
|
||||
print 192.168.0.2 openVnodes $data3_2
|
||||
print 192.168.0.3 openVnodes $data3_3
|
||||
if $data3_1 != 4 then
|
||||
return -1
|
||||
endi
|
||||
if $data3_192.168.0.2 != 2 then
|
||||
if $data3_2 != 2 then
|
||||
return -1
|
||||
endi
|
||||
if $data3_192.168.0.3 != 2 then
|
||||
if $data3_3 != 2 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
|
@ -88,24 +88,24 @@ show2:
|
|||
endi
|
||||
|
||||
sql show dnodes
|
||||
print 192.168.0.1 freeVnodes $data3_192.168.0.1
|
||||
print 192.168.0.2 freeVnodes $data3_192.168.0.2
|
||||
print 192.168.0.3 freeVnodes $data3_192.168.0.3
|
||||
if $data3_192.168.0.1 != 2 then
|
||||
print 192.168.0.1 openVnodes $data3_1
|
||||
print 192.168.0.2 openVnodes $data3_2
|
||||
print 192.168.0.3 openVnodes $data3_3
|
||||
if $data3_1 != 2 then
|
||||
goto show2
|
||||
endi
|
||||
if $data3_192.168.0.2 != null then
|
||||
if $data3_2 != null then
|
||||
goto show2
|
||||
endi
|
||||
if $data3_192.168.0.3 != 2 then
|
||||
if $data3_3 != 2 then
|
||||
goto show2
|
||||
endi
|
||||
|
||||
system sh/exec.sh -n dnode2 -s stop -x SIGINT
|
||||
system sh/exec_up.sh -n dnode2 -s stop -x SIGINT
|
||||
|
||||
print ========== step3
|
||||
sql create dnode 192.168.0.4
|
||||
system sh/exec.sh -n dnode4 -s start
|
||||
system sh/exec_up.sh -n dnode4 -s start
|
||||
|
||||
$x = 0
|
||||
show3:
|
||||
|
@ -116,20 +116,20 @@ show3:
|
|||
endi
|
||||
|
||||
sql show dnodes
|
||||
print 192.168.0.1 freeVnodes $data3_192.168.0.1
|
||||
print 192.168.0.2 freeVnodes $data3_192.168.0.2
|
||||
print 192.168.0.3 freeVnodes $data3_192.168.0.3
|
||||
print 192.168.0.4 freeVnodes $data3_192.168.0.4
|
||||
if $data3_192.168.0.1 != 4 then
|
||||
print 192.168.0.1 openVnodes $data3_1
|
||||
print 192.168.0.2 openVnodes $data3_2
|
||||
print 192.168.0.3 openVnodes $data3_3
|
||||
print 192.168.0.4 openVnodes $data3_4
|
||||
if $data3_1 != 4 then
|
||||
goto show3
|
||||
endi
|
||||
if $data3_192.168.0.2 != null then
|
||||
if $data3_2 != null then
|
||||
goto show3
|
||||
endi
|
||||
if $data3_192.168.0.3 != 2 then
|
||||
if $data3_3 != 2 then
|
||||
goto show3
|
||||
endi
|
||||
if $data3_192.168.0.4 != 2 then
|
||||
if $data3_4 != 2 then
|
||||
goto show3
|
||||
endi
|
||||
|
||||
|
@ -143,26 +143,26 @@ sql insert into d3.t3 values(now+4s, 32)
|
|||
sql insert into d3.t3 values(now+5s, 31)
|
||||
|
||||
sql show dnodes
|
||||
print 192.168.0.1 freeVnodes $data3_192.168.0.1
|
||||
print 192.168.0.2 freeVnodes $data3_192.168.0.2
|
||||
print 192.168.0.3 freeVnodes $data3_192.168.0.3
|
||||
print 192.168.0.4 freeVnodes $data3_192.168.0.4
|
||||
if $data3_192.168.0.1 != 4 then
|
||||
print 192.168.0.1 openVnodes $data3_1
|
||||
print 192.168.0.2 openVnodes $data3_2
|
||||
print 192.168.0.3 openVnodes $data3_3
|
||||
print 192.168.0.4 openVnodes $data3_4
|
||||
if $data3_1 != 4 then
|
||||
return -1
|
||||
endi
|
||||
if $data3_192.168.0.2 != null then
|
||||
if $data3_2 != null then
|
||||
return -1
|
||||
endi
|
||||
if $data3_192.168.0.3 != 1 then
|
||||
if $data3_3 != 1 then
|
||||
return -1
|
||||
endi
|
||||
if $data3_192.168.0.4 != 1 then
|
||||
if $data3_4 != 1 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
print ========== step5
|
||||
sql create dnode 192.168.0.5
|
||||
system sh/exec.sh -n dnode5 -s start
|
||||
system sh/exec_up.sh -n dnode5 -s start
|
||||
|
||||
$x = 0
|
||||
show5:
|
||||
|
@ -173,24 +173,24 @@ show5:
|
|||
endi
|
||||
|
||||
sql show dnodes
|
||||
print 192.168.0.1 freeVnodes $data3_192.168.0.1
|
||||
print 192.168.0.2 freeVnodes $data3_192.168.0.2
|
||||
print 192.168.0.3 freeVnodes $data3_192.168.0.3
|
||||
print 192.168.0.4 freeVnodes $data3_192.168.0.4
|
||||
print 192.168.0.5 freeVnodes $data3_192.168.0.5
|
||||
if $data3_192.168.0.1 != 4 then
|
||||
print 192.168.0.1 openVnodes $data3_1
|
||||
print 192.168.0.2 openVnodes $data3_2
|
||||
print 192.168.0.3 openVnodes $data3_3
|
||||
print 192.168.0.4 openVnodes $data3_4
|
||||
print 192.168.0.5 openVnodes $data3_5
|
||||
if $data3_1 != 4 then
|
||||
goto show5
|
||||
endi
|
||||
if $data3_192.168.0.2 != null then
|
||||
if $data3_2 != null then
|
||||
goto show5
|
||||
endi
|
||||
if $data3_192.168.0.3 != 2 then
|
||||
if $data3_3 != 2 then
|
||||
goto show5
|
||||
endi
|
||||
if $data3_192.168.0.4 != 2 then
|
||||
if $data3_4 != 2 then
|
||||
goto show5
|
||||
endi
|
||||
if $data3_192.168.0.5 != 2 then
|
||||
if $data3_5 != 2 then
|
||||
goto show5
|
||||
endi
|
||||
|
||||
|
@ -206,28 +206,28 @@ show6:
|
|||
endi
|
||||
|
||||
sql show dnodes
|
||||
print 192.168.0.1 freeVnodes $data3_192.168.0.1
|
||||
print 192.168.0.2 freeVnodes $data3_192.168.0.2
|
||||
print 192.168.0.3 freeVnodes $data3_192.168.0.3
|
||||
print 192.168.0.4 freeVnodes $data3_192.168.0.4
|
||||
print 192.168.0.5 freeVnodes $data3_192.168.0.5
|
||||
if $data3_192.168.0.1 != 4 then
|
||||
print 192.168.0.1 openVnodes $data3_1
|
||||
print 192.168.0.2 openVnodes $data3_2
|
||||
print 192.168.0.3 openVnodes $data3_3
|
||||
print 192.168.0.4 openVnodes $data3_4
|
||||
print 192.168.0.5 openVnodes $data3_5
|
||||
if $data3_1 != 4 then
|
||||
goto show6
|
||||
endi
|
||||
if $data3_192.168.0.2 != null then
|
||||
if $data3_2 != null then
|
||||
goto show6
|
||||
endi
|
||||
if $data3_192.168.0.3 != null then
|
||||
if $data3_3 != null then
|
||||
goto show6
|
||||
endi
|
||||
if $data3_192.168.0.4 != 1 then
|
||||
if $data3_4 != 1 then
|
||||
goto show6
|
||||
endi
|
||||
if $data3_192.168.0.5 != 1 then
|
||||
if $data3_5 != 1 then
|
||||
goto show6
|
||||
endi
|
||||
|
||||
system sh/exec.sh -n dnode3 -s stop -x SIGINT
|
||||
system sh/exec_up.sh -n dnode3 -s stop -x SIGINT
|
||||
|
||||
print ========== step7
|
||||
sql select * from d1.t1 order by t desc
|
||||
|
|
|
@ -43,15 +43,15 @@ system sh/cfg.sh -n dnode5 -c clog -v 1
|
|||
system sh/cfg.sh -n dnode6 -c clog -v 1
|
||||
|
||||
print ========== step1
|
||||
system sh/exec.sh -n dnode1 -s start
|
||||
system sh/exec_up.sh -n dnode1 -s start
|
||||
sql connect
|
||||
|
||||
sql create dnode 192.168.0.2
|
||||
sql create dnode 192.168.0.3
|
||||
sql create dnode 192.168.0.4
|
||||
system sh/exec.sh -n dnode2 -s start
|
||||
system sh/exec.sh -n dnode3 -s start
|
||||
system sh/exec.sh -n dnode4 -s start
|
||||
system sh/exec_up.sh -n dnode2 -s start
|
||||
system sh/exec_up.sh -n dnode3 -s start
|
||||
system sh/exec_up.sh -n dnode4 -s start
|
||||
sleep 3000
|
||||
|
||||
sql create database d1 replica 3 tables 4
|
||||
|
@ -71,21 +71,21 @@ sql insert into d2.t2 values(now+4s, 22)
|
|||
sql insert into d2.t2 values(now+5s, 21)
|
||||
|
||||
sql show dnodes
|
||||
print 192.168.0.1 freeVnodes $data3_192.168.0.1
|
||||
print 192.168.0.2 freeVnodes $data3_192.168.0.2
|
||||
print 192.168.0.3 freeVnodes $data3_192.168.0.3
|
||||
print 192.168.0.4 freeVnodes $data3_192.168.0.4
|
||||
print 192.168.0.1 openVnodes $data3_1
|
||||
print 192.168.0.2 openVnodes $data3_2
|
||||
print 192.168.0.3 openVnodes $data3_3
|
||||
print 192.168.0.4 openVnodes $data3_4
|
||||
|
||||
if $data3_192.168.0.1 != 4 then
|
||||
if $data3_1 != 4 then
|
||||
return -1
|
||||
endi
|
||||
if $data3_192.168.0.2 != 2 then
|
||||
if $data3_2 != 2 then
|
||||
return -1
|
||||
endi
|
||||
if $data3_192.168.0.3 != 2 then
|
||||
if $data3_3 != 2 then
|
||||
return -1
|
||||
endi
|
||||
if $data3_192.168.0.4 != 2 then
|
||||
if $data3_4 != 2 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
|
@ -101,29 +101,29 @@ show2:
|
|||
endi
|
||||
|
||||
sql show dnodes
|
||||
print 192.168.0.1 freeVnodes $data3_192.168.0.1
|
||||
print 192.168.0.2 freeVnodes $data3_192.168.0.2
|
||||
print 192.168.0.3 freeVnodes $data3_192.168.0.3
|
||||
print 192.168.0.4 freeVnodes $data3_192.168.0.4
|
||||
print 192.168.0.1 openVnodes $data3_1
|
||||
print 192.168.0.2 openVnodes $data3_2
|
||||
print 192.168.0.3 openVnodes $data3_3
|
||||
print 192.168.0.4 openVnodes $data3_4
|
||||
|
||||
if $data3_192.168.0.1 != 2 then
|
||||
if $data3_1 != 2 then
|
||||
goto show2
|
||||
endi
|
||||
if $data3_192.168.0.2 != null then
|
||||
if $data3_2 != null then
|
||||
goto show2
|
||||
endi
|
||||
if $data3_192.168.0.3 != 2 then
|
||||
if $data3_3 != 2 then
|
||||
goto show2
|
||||
endi
|
||||
if $data3_192.168.0.4 != 2 then
|
||||
if $data3_4 != 2 then
|
||||
goto show2
|
||||
endi
|
||||
|
||||
system sh/exec.sh -n dnode2 -s stop -x SIGINT
|
||||
system sh/exec_up.sh -n dnode2 -s stop -x SIGINT
|
||||
|
||||
print ========== step
|
||||
sql create dnode 192.168.0.5
|
||||
system sh/exec.sh -n dnode5 -s start
|
||||
system sh/exec_up.sh -n dnode5 -s start
|
||||
|
||||
$x = 0
|
||||
show3:
|
||||
|
@ -134,25 +134,25 @@ show3:
|
|||
endi
|
||||
|
||||
sql show dnodes
|
||||
print 192.168.0.1 freeVnodes $data3_192.168.0.1
|
||||
print 192.168.0.2 freeVnodes $data3_192.168.0.2
|
||||
print 192.168.0.3 freeVnodes $data3_192.168.0.3
|
||||
print 192.168.0.4 freeVnodes $data3_192.168.0.4
|
||||
print 192.168.0.5 freeVnodes $data3_192.168.0.5
|
||||
print 192.168.0.1 openVnodes $data3_1
|
||||
print 192.168.0.2 openVnodes $data3_2
|
||||
print 192.168.0.3 openVnodes $data3_3
|
||||
print 192.168.0.4 openVnodes $data3_4
|
||||
print 192.168.0.5 openVnodes $data3_5
|
||||
|
||||
if $data3_192.168.0.1 != 4 then
|
||||
if $data3_1 != 4 then
|
||||
goto show3
|
||||
endi
|
||||
if $data3_192.168.0.2 != null then
|
||||
if $data3_2 != null then
|
||||
goto show3
|
||||
endi
|
||||
if $data3_192.168.0.3 != 2 then
|
||||
if $data3_3 != 2 then
|
||||
goto show3
|
||||
endi
|
||||
if $data3_192.168.0.4 != 2 then
|
||||
if $data3_4 != 2 then
|
||||
goto show3
|
||||
endi
|
||||
if $data3_192.168.0.5 != 2 then
|
||||
if $data3_5 != 2 then
|
||||
goto show3
|
||||
endi
|
||||
|
||||
|
@ -174,31 +174,31 @@ show4:
|
|||
endi
|
||||
|
||||
sql show dnodes
|
||||
print 192.168.0.1 freeVnodes $data3_192.168.0.1
|
||||
print 192.168.0.2 freeVnodes $data3_192.168.0.2
|
||||
print 192.168.0.3 freeVnodes $data3_192.168.0.3
|
||||
print 192.168.0.4 freeVnodes $data3_192.168.0.4
|
||||
print 192.168.0.5 freeVnodes $data3_192.168.0.5
|
||||
print 192.168.0.1 openVnodes $data3_1
|
||||
print 192.168.0.2 openVnodes $data3_2
|
||||
print 192.168.0.3 openVnodes $data3_3
|
||||
print 192.168.0.4 openVnodes $data3_4
|
||||
print 192.168.0.5 openVnodes $data3_5
|
||||
|
||||
if $data3_192.168.0.1 != 4 then
|
||||
if $data3_1 != 4 then
|
||||
goto show4
|
||||
endi
|
||||
if $data3_192.168.0.2 != null then
|
||||
if $data3_2 != null then
|
||||
goto show4
|
||||
endi
|
||||
if $data3_192.168.0.3 != 1 then
|
||||
if $data3_3 != 1 then
|
||||
goto show4
|
||||
endi
|
||||
if $data3_192.168.0.4 != 1 then
|
||||
if $data3_4 != 1 then
|
||||
goto show4
|
||||
endi
|
||||
if $data3_192.168.0.5 != 1 then
|
||||
if $data3_5 != 1 then
|
||||
goto show4
|
||||
endi
|
||||
|
||||
print ========== step5
|
||||
sql create dnode 192.168.0.2
|
||||
system sh/exec.sh -n dnode2 -s start
|
||||
system sh/exec_up.sh -n dnode2 -s start
|
||||
|
||||
$x = 0
|
||||
show5:
|
||||
|
@ -209,16 +209,16 @@ show5:
|
|||
endi
|
||||
|
||||
sql show dnodes
|
||||
print 192.168.0.1 freeVnodes $data3_192.168.0.1
|
||||
print 192.168.0.2 freeVnodes $data3_192.168.0.2
|
||||
print 192.168.0.3 freeVnodes $data3_192.168.0.3
|
||||
print 192.168.0.4 freeVnodes $data3_192.168.0.4
|
||||
print 192.168.0.5 freeVnodes $data3_192.168.0.5
|
||||
print 192.168.0.1 openVnodes $data3_1
|
||||
print 192.168.0.2 openVnodes $data3_2
|
||||
print 192.168.0.3 openVnodes $data3_3
|
||||
print 192.168.0.4 openVnodes $data3_4
|
||||
print 192.168.0.5 openVnodes $data3_5
|
||||
|
||||
if $data3_192.168.0.1 != 4 then
|
||||
if $data3_1 != 4 then
|
||||
goto show5
|
||||
endi
|
||||
if $data3_192.168.0.2 != 2 then
|
||||
if $data3_2 != 2 then
|
||||
goto show5
|
||||
endi
|
||||
|
||||
|
@ -236,29 +236,29 @@ show6:
|
|||
endi
|
||||
|
||||
sql show dnodes
|
||||
print 192.168.0.1 freeVnodes $data3_192.168.0.1
|
||||
print 192.168.0.2 freeVnodes $data3_192.168.0.2
|
||||
print 192.168.0.3 freeVnodes $data3_192.168.0.3
|
||||
print 192.168.0.4 freeVnodes $data3_192.168.0.4
|
||||
print 192.168.0.5 freeVnodes $data3_192.168.0.5
|
||||
print 192.168.0.1 openVnodes $data3_1
|
||||
print 192.168.0.2 openVnodes $data3_2
|
||||
print 192.168.0.3 openVnodes $data3_3
|
||||
print 192.168.0.4 openVnodes $data3_4
|
||||
print 192.168.0.5 openVnodes $data3_5
|
||||
|
||||
if $data3_192.168.0.1 != 4 then
|
||||
if $data3_1 != 4 then
|
||||
goto show6
|
||||
endi
|
||||
if $data3_192.168.0.2 != 1 then
|
||||
if $data3_2 != 1 then
|
||||
goto show6
|
||||
endi
|
||||
if $data3_192.168.0.3 != null then
|
||||
if $data3_3 != null then
|
||||
goto show6
|
||||
endi
|
||||
if $data3_192.168.0.4 != 1 then
|
||||
if $data3_4 != 1 then
|
||||
goto show6
|
||||
endi
|
||||
if $data3_192.168.0.5 != 1 then
|
||||
if $data3_5 != 1 then
|
||||
goto show6
|
||||
endi
|
||||
|
||||
system sh/exec.sh -n dnode3 -s stop -x SIGINT
|
||||
system sh/exec_up.sh -n dnode3 -s stop -x SIGINT
|
||||
|
||||
print ========== step7
|
||||
sql select * from d1.t1 order by t desc
|
||||
|
|
|
@ -31,7 +31,7 @@ system sh/cfg.sh -n dnode3 -c clog -v 1
|
|||
system sh/cfg.sh -n dnode4 -c clog -v 1
|
||||
|
||||
print ========== step1
|
||||
system sh/exec.sh -n dnode1 -s start
|
||||
system sh/exec_up.sh -n dnode1 -s start
|
||||
sql connect
|
||||
sleep 3000
|
||||
|
||||
|
@ -52,14 +52,14 @@ sql insert into d2.t2 values(now+4s, 22)
|
|||
sql insert into d2.t2 values(now+5s, 21)
|
||||
|
||||
sql show dnodes
|
||||
print 192.168.0.1 freeVnodes $data3_192.168.0.1
|
||||
if $data3_192.168.0.1 != 2 then
|
||||
print 192.168.0.1 openVnodes $data3_1
|
||||
if $data3_1 != 2 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
print ========== step2
|
||||
sql create dnode 192.168.0.2
|
||||
system sh/exec.sh -n dnode2 -s start
|
||||
system sh/exec_up.sh -n dnode2 -s start
|
||||
|
||||
$x = 0
|
||||
show2:
|
||||
|
@ -70,12 +70,12 @@ show2:
|
|||
endi
|
||||
|
||||
sql show dnodes
|
||||
print 192.168.0.1 freeVnodes $data3_192.168.0.1
|
||||
print 192.168.0.2 freeVnodes $data3_192.168.0.2
|
||||
if $data3_192.168.0.1 != 4 then
|
||||
print 192.168.0.1 openVnodes $data3_1
|
||||
print 192.168.0.2 openVnodes $data3_2
|
||||
if $data3_1 != 4 then
|
||||
goto show2
|
||||
endi
|
||||
if $data3_192.168.0.2 != 2 then
|
||||
if $data3_2 != 2 then
|
||||
goto show2
|
||||
endi
|
||||
|
||||
|
@ -96,18 +96,18 @@ show3:
|
|||
return -1
|
||||
endi
|
||||
sql show dnodes
|
||||
print 192.168.0.1 freeVnodes $data3_192.168.0.1
|
||||
print 192.168.0.2 freeVnodes $data3_192.168.0.2
|
||||
if $data3_192.168.0.1 != 3 then
|
||||
print 192.168.0.1 openVnodes $data3_1
|
||||
print 192.168.0.2 openVnodes $data3_2
|
||||
if $data3_1 != 3 then
|
||||
goto show3
|
||||
endi
|
||||
if $data3_192.168.0.2 != 1 then
|
||||
if $data3_2 != 1 then
|
||||
goto show3
|
||||
endi
|
||||
|
||||
print ========== step3
|
||||
sql create dnode 192.168.0.3
|
||||
system sh/exec.sh -n dnode3 -s start
|
||||
system sh/exec_up.sh -n dnode3 -s start
|
||||
|
||||
$x = 0
|
||||
show4:
|
||||
|
@ -117,16 +117,16 @@ show4:
|
|||
return -1
|
||||
endi
|
||||
sql show dnodes
|
||||
print 192.168.0.1 freeVnodes $data3_192.168.0.1
|
||||
print 192.168.0.2 freeVnodes $data3_192.168.0.2
|
||||
print 192.168.0.3 freeVnodes $data3_192.168.0.3
|
||||
if $data3_192.168.0.1 != 4 then
|
||||
print 192.168.0.1 openVnodes $data3_1
|
||||
print 192.168.0.2 openVnodes $data3_2
|
||||
print 192.168.0.3 openVnodes $data3_3
|
||||
if $data3_1 != 4 then
|
||||
goto show4
|
||||
endi
|
||||
if $data3_192.168.0.2 != 2 then
|
||||
if $data3_2 != 2 then
|
||||
goto show4
|
||||
endi
|
||||
if $data3_192.168.0.3 != 2 then
|
||||
if $data3_3 != 2 then
|
||||
goto show4
|
||||
endi
|
||||
|
||||
|
@ -141,20 +141,20 @@ show5:
|
|||
return -1
|
||||
endi
|
||||
sql show dnodes
|
||||
print 192.168.0.1 freeVnodes $data3_192.168.0.1
|
||||
print 192.168.0.2 freeVnodes $data3_192.168.0.2
|
||||
print 192.168.0.3 freeVnodes $data3_192.168.0.3
|
||||
if $data3_192.168.0.1 != 3 then
|
||||
print 192.168.0.1 openVnodes $data3_1
|
||||
print 192.168.0.2 openVnodes $data3_2
|
||||
print 192.168.0.3 openVnodes $data3_3
|
||||
if $data3_1 != 3 then
|
||||
goto show5
|
||||
endi
|
||||
if $data3_192.168.0.2 != null then
|
||||
if $data3_2 != null then
|
||||
goto show5
|
||||
endi
|
||||
if $data3_192.168.0.3 != 1 then
|
||||
if $data3_3 != 1 then
|
||||
goto show5
|
||||
endi
|
||||
|
||||
system sh/exec.sh -n dnode2 -s stop -x SIGINT
|
||||
system sh/exec_up.sh -n dnode2 -s stop -x SIGINT
|
||||
|
||||
print ========== step6
|
||||
sql select * from d1.t1 order by t desc
|
||||
|
|
|
@ -22,19 +22,19 @@ system sh/cfg.sh -n dnode1 -c monitor -v 1
|
|||
system sh/cfg.sh -n dnode2 -c monitor -v 0
|
||||
|
||||
print ========== step1
|
||||
system sh/exec.sh -n dnode1 -s start
|
||||
system sh/exec_up.sh -n dnode1 -s start
|
||||
sql connect
|
||||
sleep 5000
|
||||
|
||||
sql show dnodes
|
||||
print 192.168.0.1 freeVnodes $data3_192.168.0.1
|
||||
if $data3_192.168.0.1 != 3 then
|
||||
print 192.168.0.1 openVnodes $data3_1
|
||||
if $data3_1 != 3 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
print ========== step2
|
||||
sql create dnode 192.168.0.2
|
||||
system sh/exec.sh -n dnode2 -s start
|
||||
system sh/exec_up.sh -n dnode2 -s start
|
||||
|
||||
$x = 0
|
||||
show2:
|
||||
|
@ -45,12 +45,12 @@ show2:
|
|||
endi
|
||||
|
||||
sql show dnodes
|
||||
print 192.168.0.1 freeVnodes $data3_192.168.0.1
|
||||
print 192.168.0.2 freeVnodes $data3_192.168.0.2
|
||||
if $data3_192.168.0.1 != 4 then
|
||||
print 192.168.0.1 openVnodes $data3_1
|
||||
print 192.168.0.2 openVnodes $data3_2
|
||||
if $data3_1 != 4 then
|
||||
goto show2
|
||||
endi
|
||||
if $data3_192.168.0.2 != 3 then
|
||||
if $data3_2 != 3 then
|
||||
goto show2
|
||||
endi
|
||||
|
||||
|
|
|
@ -29,10 +29,10 @@ system sh/cfg.sh -n dnode2 -c clog -v 1
|
|||
system sh/cfg.sh -n dnode3 -c clog -v 1
|
||||
|
||||
print ========== step1
|
||||
system sh/exec.sh -n dnode1 -s start
|
||||
system sh/exec_up.sh -n dnode1 -s start
|
||||
sql connect
|
||||
sql create dnode 192.168.0.2
|
||||
system sh/exec.sh -n dnode2 -s start
|
||||
system sh/exec_up.sh -n dnode2 -s start
|
||||
sleep 3000
|
||||
|
||||
sql show dnodes
|
||||
|
@ -44,7 +44,7 @@ if $data4_192.168.0.2 != ready then
|
|||
endi
|
||||
|
||||
print ========== step2
|
||||
system sh/exec.sh -n dnode2 -s stop -x SIGINT
|
||||
system sh/exec_up.sh -n dnode2 -s stop -x SIGINT
|
||||
sleep 6000
|
||||
|
||||
sql show dnodes
|
||||
|
|
|
@ -29,10 +29,10 @@ system sh/cfg.sh -n dnode2 -c clog -v 1
|
|||
system sh/cfg.sh -n dnode3 -c clog -v 1
|
||||
|
||||
print ========== step1
|
||||
system sh/exec.sh -n dnode1 -s start
|
||||
system sh/exec_up.sh -n dnode1 -s start
|
||||
sql connect
|
||||
sql create dnode 192.168.0.2
|
||||
system sh/exec.sh -n dnode2 -s start
|
||||
system sh/exec_up.sh -n dnode2 -s start
|
||||
sleep 3000
|
||||
|
||||
sql create database d1 replica 2 tables 4
|
||||
|
@ -48,7 +48,7 @@ if $data4_192.168.0.2 != ready then
|
|||
endi
|
||||
|
||||
print ========== step2
|
||||
system sh/exec.sh -n dnode2 -s stop -x SIGINT
|
||||
system sh/exec_up.sh -n dnode2 -s stop -x SIGINT
|
||||
sleep 5000
|
||||
|
||||
sql show dnodes
|
||||
|
@ -72,7 +72,7 @@ endi
|
|||
|
||||
print ========== step4
|
||||
sql create dnode 192.168.0.3
|
||||
system sh/exec.sh -n dnode3 -s start
|
||||
system sh/exec_up.sh -n dnode3 -s start
|
||||
sql drop dnode 192.168.0.2
|
||||
|
||||
sleep 5000
|
||||
|
|
|
@ -31,7 +31,7 @@ system sh/cfg.sh -n dnode3 -c clog -v 1
|
|||
system sh/cfg.sh -n dnode4 -c clog -v 1
|
||||
|
||||
print ========== step1
|
||||
system sh/exec.sh -n dnode1 -s start
|
||||
system sh/exec_up.sh -n dnode1 -s start
|
||||
sql connect
|
||||
sleep 3000
|
||||
|
||||
|
@ -52,14 +52,14 @@ sql insert into d2.t2 values(now+4s, 22)
|
|||
sql insert into d2.t2 values(now+5s, 21)
|
||||
|
||||
sql show dnodes
|
||||
print 192.168.0.1 freeVnodes $data3_192.168.0.1
|
||||
if $data3_192.168.0.1 != 2 then
|
||||
print 192.168.0.1 openVnodes $data3_1
|
||||
if $data3_1 != 2 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
print ========== step2
|
||||
sql create dnode 192.168.0.2
|
||||
system sh/exec.sh -n dnode2 -s start
|
||||
system sh/exec_up.sh -n dnode2 -s start
|
||||
sleep 9000
|
||||
|
||||
sql create database d3 replica 2 tables 4
|
||||
|
@ -79,12 +79,12 @@ show2:
|
|||
endi
|
||||
|
||||
sql show dnodes
|
||||
print 192.168.0.1 freeVnodes $data3_192.168.0.1
|
||||
print 192.168.0.2 freeVnodes $data3_192.168.0.2
|
||||
if $data3_192.168.0.1 != 3 then
|
||||
print 192.168.0.1 openVnodes $data3_1
|
||||
print 192.168.0.2 openVnodes $data3_2
|
||||
if $data3_1 != 3 then
|
||||
goto show2
|
||||
endi
|
||||
if $data3_192.168.0.2 != 1 then
|
||||
if $data3_2 != 1 then
|
||||
goto show2
|
||||
endi
|
||||
|
||||
|
@ -101,12 +101,12 @@ show3:
|
|||
endi
|
||||
|
||||
sql show dnodes
|
||||
print 192.168.0.1 freeVnodes $data3_192.168.0.1
|
||||
print 192.168.0.2 freeVnodes $data3_192.168.0.2 $data5_192.168.0.2
|
||||
print 192.168.0.1 openVnodes $data3_1
|
||||
print 192.168.0.2 openVnodes $data3_2 $data5_192.168.0.2
|
||||
|
||||
print ========== step4
|
||||
sql create dnode 192.168.0.3
|
||||
system sh/exec.sh -n dnode3 -s start
|
||||
system sh/exec_up.sh -n dnode3 -s start
|
||||
|
||||
$x = 0
|
||||
show4:
|
||||
|
@ -117,18 +117,18 @@ show4:
|
|||
endi
|
||||
|
||||
sql show dnodes
|
||||
print 192.168.0.1 freeVnodes $data3_192.168.0.1
|
||||
print 192.168.0.2 freeVnodes $data3_192.168.0.2
|
||||
print 192.168.0.3 freeVnodes $data3_192.168.0.3
|
||||
if $data3_192.168.0.2 != null then
|
||||
print 192.168.0.1 openVnodes $data3_1
|
||||
print 192.168.0.2 openVnodes $data3_2
|
||||
print 192.168.0.3 openVnodes $data3_3
|
||||
if $data3_2 != null then
|
||||
goto show4
|
||||
endi
|
||||
|
||||
system sh/exec.sh -n dnode2 -s stop -x SIGINT
|
||||
system sh/exec_up.sh -n dnode2 -s stop -x SIGINT
|
||||
|
||||
print ========== step5
|
||||
sql create dnode 192.168.0.4
|
||||
system sh/exec.sh -n dnode4 -s start
|
||||
system sh/exec_up.sh -n dnode4 -s start
|
||||
|
||||
$x = 0
|
||||
show5:
|
||||
|
@ -138,20 +138,20 @@ show5:
|
|||
return -1
|
||||
endi
|
||||
sql show dnodes
|
||||
print 192.168.0.1 freeVnodes $data3_192.168.0.1
|
||||
print 192.168.0.2 freeVnodes $data3_192.168.0.2
|
||||
print 192.168.0.3 freeVnodes $data3_192.168.0.3
|
||||
print 192.168.0.4 freeVnodes $data3_192.168.0.4
|
||||
if $data3_192.168.0.1 != 4 then
|
||||
print 192.168.0.1 openVnodes $data3_1
|
||||
print 192.168.0.2 openVnodes $data3_2
|
||||
print 192.168.0.3 openVnodes $data3_3
|
||||
print 192.168.0.4 openVnodes $data3_4
|
||||
if $data3_1 != 4 then
|
||||
goto show5
|
||||
endi
|
||||
if $data3_192.168.0.2 != null then
|
||||
if $data3_2 != null then
|
||||
goto show5
|
||||
endi
|
||||
if $data3_192.168.0.3 != 2 then
|
||||
if $data3_3 != 2 then
|
||||
goto show5
|
||||
endi
|
||||
if $data3_192.168.0.4 != 2 then
|
||||
if $data3_4 != 2 then
|
||||
goto show5
|
||||
endi
|
||||
|
||||
|
|
|
@ -31,7 +31,7 @@ system sh/cfg.sh -n dnode3 -c clog -v 1
|
|||
system sh/cfg.sh -n dnode4 -c clog -v 1
|
||||
|
||||
print ========== step1
|
||||
system sh/exec.sh -n dnode1 -s start
|
||||
system sh/exec_up.sh -n dnode1 -s start
|
||||
sql connect
|
||||
sleep 3000
|
||||
|
||||
|
@ -52,14 +52,14 @@ sql insert into d2.t2 values(now+4s, 22)
|
|||
sql insert into d2.t2 values(now+5s, 21)
|
||||
|
||||
sql show dnodes
|
||||
print 192.168.0.1 freeVnodes $data3_192.168.0.1
|
||||
if $data3_192.168.0.1 != 2 then
|
||||
print 192.168.0.1 openVnodes $data3_1
|
||||
if $data3_1 != 2 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
print ========== step2
|
||||
sql create dnode 192.168.0.2
|
||||
system sh/exec.sh -n dnode2 -s start
|
||||
system sh/exec_up.sh -n dnode2 -s start
|
||||
sleep 9000
|
||||
|
||||
sql create database d3 replica 2 tables 4
|
||||
|
@ -79,17 +79,17 @@ show2:
|
|||
endi
|
||||
|
||||
sql show dnodes
|
||||
print 192.168.0.1 freeVnodes $data3_192.168.0.1
|
||||
print 192.168.0.2 freeVnodes $data3_192.168.0.2
|
||||
if $data3_192.168.0.1 != 3 then
|
||||
print 192.168.0.1 openVnodes $data3_1
|
||||
print 192.168.0.2 openVnodes $data3_2
|
||||
if $data3_1 != 3 then
|
||||
goto show2
|
||||
endi
|
||||
if $data3_192.168.0.2 != 1 then
|
||||
if $data3_2 != 1 then
|
||||
goto show2
|
||||
endi
|
||||
|
||||
print ========== step3
|
||||
system sh/exec.sh -n dnode2 -s stop -x SIGINT
|
||||
system sh/exec_up.sh -n dnode2 -s stop -x SIGINT
|
||||
sql drop dnode 192.168.0.2
|
||||
sleep 7001
|
||||
|
||||
|
@ -102,12 +102,12 @@ show3:
|
|||
endi
|
||||
|
||||
sql show dnodes
|
||||
print 192.168.0.1 freeVnodes $data3_192.168.0.1
|
||||
print 192.168.0.2 freeVnodes $data3_192.168.0.2 $data5_192.168.0.2
|
||||
print 192.168.0.1 openVnodes $data3_1
|
||||
print 192.168.0.2 openVnodes $data3_2 $data5_192.168.0.2
|
||||
|
||||
print ========== step4
|
||||
sql create dnode 192.168.0.3
|
||||
system sh/exec.sh -n dnode3 -s start
|
||||
system sh/exec_up.sh -n dnode3 -s start
|
||||
|
||||
$x = 0
|
||||
show4:
|
||||
|
@ -118,16 +118,16 @@ show4:
|
|||
endi
|
||||
|
||||
sql show dnodes
|
||||
print 192.168.0.1 freeVnodes $data3_192.168.0.1
|
||||
print 192.168.0.2 freeVnodes $data3_192.168.0.2
|
||||
print 192.168.0.3 freeVnodes $data3_192.168.0.3
|
||||
if $data3_192.168.0.2 != null then
|
||||
print 192.168.0.1 openVnodes $data3_1
|
||||
print 192.168.0.2 openVnodes $data3_2
|
||||
print 192.168.0.3 openVnodes $data3_3
|
||||
if $data3_2 != null then
|
||||
goto show4
|
||||
endi
|
||||
if $data3_192.168.0.1 != 3 then
|
||||
if $data3_1 != 3 then
|
||||
goto show4
|
||||
endi
|
||||
if $data3_192.168.0.3 != 1 then
|
||||
if $data3_3 != 1 then
|
||||
goto show4
|
||||
endi
|
||||
|
||||
|
|
|
@ -31,7 +31,7 @@ system sh/cfg.sh -n dnode3 -c clog -v 1
|
|||
system sh/cfg.sh -n dnode4 -c clog -v 1
|
||||
|
||||
print ========== step1
|
||||
system sh/exec.sh -n dnode1 -s start
|
||||
system sh/exec_up.sh -n dnode1 -s start
|
||||
sql connect
|
||||
|
||||
sql create database d1 tables 4
|
||||
|
@ -43,14 +43,14 @@ sql insert into d1.t1 values(now+4s, 12)
|
|||
sql insert into d1.t1 values(now+5s, 11)
|
||||
|
||||
sql show dnodes
|
||||
print 192.168.0.1 freeVnodes $data3_192.168.0.1
|
||||
if $data3_192.168.0.1 != 3 then
|
||||
print 192.168.0.1 openVnodes $data3_1
|
||||
if $data3_1 != 3 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
print ========== step2
|
||||
sql create dnode 192.168.0.2
|
||||
system sh/exec.sh -n dnode2 -s start
|
||||
system sh/exec_up.sh -n dnode2 -s start
|
||||
|
||||
$x = 0
|
||||
show2:
|
||||
|
@ -60,12 +60,12 @@ show2:
|
|||
return -1
|
||||
endi
|
||||
sql show dnodes
|
||||
print 192.168.0.1 freeVnodes $data3_192.168.0.1
|
||||
print 192.168.0.2 freeVnodes $data3_192.168.0.2
|
||||
if $data3_192.168.0.1 != 4 then
|
||||
print 192.168.0.1 openVnodes $data3_1
|
||||
print 192.168.0.2 openVnodes $data3_2
|
||||
if $data3_1 != 4 then
|
||||
goto show2
|
||||
endi
|
||||
if $data3_192.168.0.2 != 3 then
|
||||
if $data3_2 != 3 then
|
||||
goto show2
|
||||
endi
|
||||
|
||||
|
@ -81,12 +81,12 @@ sql insert into d2.t2 values(now+5s, 21)
|
|||
|
||||
$x = 0
|
||||
sql show dnodes
|
||||
print 192.168.0.1 freeVnodes $data3_192.168.0.1
|
||||
print 192.168.0.2 freeVnodes $data3_192.168.0.2
|
||||
if $data3_192.168.0.1 != 4 then
|
||||
print 192.168.0.1 openVnodes $data3_1
|
||||
print 192.168.0.2 openVnodes $data3_2
|
||||
if $data3_1 != 4 then
|
||||
return -1
|
||||
endi
|
||||
if $data3_192.168.0.2 != 2 then
|
||||
if $data3_2 != 2 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
|
@ -101,19 +101,19 @@ show4:
|
|||
return -1
|
||||
endi
|
||||
sql show dnodes
|
||||
print 192.168.0.1 freeVnodes $data3_192.168.0.1
|
||||
print 192.168.0.2 freeVnodes $data3_192.168.0.2
|
||||
if $data3_192.168.0.1 != 2 then
|
||||
print 192.168.0.1 openVnodes $data3_1
|
||||
print 192.168.0.2 openVnodes $data3_2
|
||||
if $data3_1 != 2 then
|
||||
goto show4
|
||||
endi
|
||||
if $data3_192.168.0.2 != null then
|
||||
if $data3_2 != null then
|
||||
goto show4
|
||||
endi
|
||||
if $rows != 1 then
|
||||
goto show4
|
||||
endi
|
||||
|
||||
system sh/exec.sh -n dnode2 -s stop -x SIGINT
|
||||
system sh/exec_up.sh -n dnode2 -s stop -x SIGINT
|
||||
|
||||
print ========== step5
|
||||
sleep 2000
|
||||
|
@ -125,7 +125,7 @@ system sh/cfg.sh -n dnode2 -c balanceMonitorInterval -v 1
|
|||
system sh/cfg.sh -n dnode2 -c balanceStartInterval -v 10
|
||||
system sh/cfg.sh -n dnode2 -c mgmtEqualVnodeNum -v 4
|
||||
system sh/cfg.sh -n dnode2 -c clog -v 1
|
||||
system sh/exec.sh -n dnode2 -s start
|
||||
system sh/exec_up.sh -n dnode2 -s start
|
||||
|
||||
$x = 0
|
||||
show5:
|
||||
|
@ -135,12 +135,12 @@ show5:
|
|||
return -1
|
||||
endi
|
||||
sql show dnodes
|
||||
print 192.168.0.1 freeVnodes $data3_192.168.0.1
|
||||
print 192.168.0.2 freeVnodes $data3_192.168.0.2
|
||||
if $data3_192.168.0.1 != 4 then
|
||||
print 192.168.0.1 openVnodes $data3_1
|
||||
print 192.168.0.2 openVnodes $data3_2
|
||||
if $data3_1 != 4 then
|
||||
goto show5
|
||||
endi
|
||||
if $data3_192.168.0.2 != 2 then
|
||||
if $data3_2 != 2 then
|
||||
goto show5
|
||||
endi
|
||||
|
||||
|
@ -154,18 +154,18 @@ sql insert into d3.t3 values(now+4s, 32)
|
|||
sql insert into d3.t3 values(now+5s, 31)
|
||||
|
||||
sql show dnodes
|
||||
print 192.168.0.1 freeVnodes $data3_192.168.0.1
|
||||
print 192.168.0.2 freeVnodes $data3_192.168.0.2
|
||||
if $data3_192.168.0.1 != 4 then
|
||||
print 192.168.0.1 openVnodes $data3_1
|
||||
print 192.168.0.2 openVnodes $data3_2
|
||||
if $data3_1 != 4 then
|
||||
return -1
|
||||
endi
|
||||
if $data3_192.168.0.2 != 1 then
|
||||
if $data3_2 != 1 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
print ========== step7
|
||||
sql create dnode 192.168.0.3
|
||||
system sh/exec.sh -n dnode3 -s start
|
||||
system sh/exec_up.sh -n dnode3 -s start
|
||||
|
||||
$x = 0
|
||||
show7:
|
||||
|
@ -176,16 +176,16 @@ show7:
|
|||
endi
|
||||
|
||||
sql show dnodes
|
||||
print 192.168.0.1 freeVnodes $data3_192.168.0.1
|
||||
print 192.168.0.2 freeVnodes $data3_192.168.0.2
|
||||
print 192.168.0.3 freeVnodes $data3_192.168.0.3
|
||||
if $data3_192.168.0.1 != 4 then
|
||||
print 192.168.0.1 openVnodes $data3_1
|
||||
print 192.168.0.2 openVnodes $data3_2
|
||||
print 192.168.0.3 openVnodes $data3_3
|
||||
if $data3_1 != 4 then
|
||||
goto show7
|
||||
endi
|
||||
if $data3_192.168.0.2 != 2 then
|
||||
if $data3_2 != 2 then
|
||||
goto show7
|
||||
endi
|
||||
if $data3_192.168.0.3 != 3 then
|
||||
if $data3_3 != 3 then
|
||||
goto show7
|
||||
endi
|
||||
|
||||
|
@ -206,16 +206,16 @@ show8:
|
|||
return -1
|
||||
endi
|
||||
sql show dnodes
|
||||
print 192.168.0.1 freeVnodes $data3_192.168.0.1
|
||||
print 192.168.0.2 freeVnodes $data3_192.168.0.2
|
||||
print 192.168.0.3 freeVnodes $data3_192.168.0.3
|
||||
if $data3_192.168.0.1 != 4 then
|
||||
print 192.168.0.1 openVnodes $data3_1
|
||||
print 192.168.0.2 openVnodes $data3_2
|
||||
print 192.168.0.3 openVnodes $data3_3
|
||||
if $data3_1 != 4 then
|
||||
goto show8
|
||||
endi
|
||||
if $data3_192.168.0.2 != 2 then
|
||||
if $data3_2 != 2 then
|
||||
goto show8
|
||||
endi
|
||||
if $data3_192.168.0.3 != 2 then
|
||||
if $data3_3 != 2 then
|
||||
goto show8
|
||||
endi
|
||||
|
||||
|
@ -231,20 +231,20 @@ show9:
|
|||
endi
|
||||
|
||||
sql show dnodes
|
||||
print 192.168.0.1 freeVnodes $data3_192.168.0.1
|
||||
print 192.168.0.2 freeVnodes $data3_192.168.0.2
|
||||
print 192.168.0.3 freeVnodes $data3_192.168.0.3
|
||||
if $data3_192.168.0.1 != 4 then
|
||||
print 192.168.0.1 openVnodes $data3_1
|
||||
print 192.168.0.2 openVnodes $data3_2
|
||||
print 192.168.0.3 openVnodes $data3_3
|
||||
if $data3_1 != 4 then
|
||||
goto show9
|
||||
endi
|
||||
if $data3_192.168.0.2 != null then
|
||||
if $data3_2 != null then
|
||||
goto show9
|
||||
endi
|
||||
if $data3_192.168.0.3 != 0 then
|
||||
if $data3_3 != 0 then
|
||||
goto show9
|
||||
endi
|
||||
|
||||
system sh/exec.sh -n dnode2 -s stop -x SIGINT
|
||||
system sh/exec_up.sh -n dnode2 -s stop -x SIGINT
|
||||
|
||||
print ========== step10
|
||||
sql select * from d1.t1 order by t desc
|
||||
|
|
|
@ -109,3 +109,7 @@ endi
|
|||
if $data3_3 != null then
|
||||
goto show7
|
||||
endi
|
||||
|
||||
system sh/exec_up.sh -n dnode1 -s stop
|
||||
system sh/exec_up.sh -n dnode2 -s stop
|
||||
system sh/exec_up.sh -n dnode3 -s stop
|
|
@ -77,4 +77,8 @@ if $data3_1 != master then
|
|||
endi
|
||||
if $data3_2 != slave then
|
||||
goto step5
|
||||
endi
|
||||
endi
|
||||
|
||||
system sh/exec_up.sh -n dnode1 -s stop
|
||||
system sh/exec_up.sh -n dnode2 -s stop
|
||||
system sh/exec_up.sh -n dnode3 -s stop
|
|
@ -93,3 +93,7 @@ endi
|
|||
if $dnode3Role != slave then
|
||||
return -1
|
||||
endi
|
||||
|
||||
system sh/exec_up.sh -n dnode1 -s stop
|
||||
system sh/exec_up.sh -n dnode2 -s stop
|
||||
system sh/exec_up.sh -n dnode3 -s stop
|
|
@ -123,3 +123,6 @@ if $dnode3Role != slave then
|
|||
return -1
|
||||
endi
|
||||
|
||||
system sh/exec_up.sh -n dnode1 -s stop
|
||||
system sh/exec_up.sh -n dnode2 -s stop
|
||||
system sh/exec_up.sh -n dnode3 -s stop
|
|
@ -83,3 +83,7 @@ endi
|
|||
if $dnode3Role != null then
|
||||
return -1
|
||||
endi
|
||||
|
||||
system sh/exec_up.sh -n dnode1 -s stop
|
||||
system sh/exec_up.sh -n dnode2 -s stop
|
||||
system sh/exec_up.sh -n dnode3 -s stop
|
|
@ -38,5 +38,6 @@ if $data4_2 != 4 then
|
|||
return -1
|
||||
endi
|
||||
|
||||
|
||||
|
||||
system sh/exec_up.sh -n dnode1 -s stop
|
||||
system sh/exec_up.sh -n dnode2 -s stop
|
||||
system sh/exec_up.sh -n dnode3 -s stop
|
|
@ -7,4 +7,3 @@ run unique/mnode/mgmt33.sim
|
|||
run unique/mnode/mgmt34.sim
|
||||
run unique/mnode/mgmtr2.sim
|
||||
run unique/mnode/secondIp.sim
|
||||
|
||||
|
|
|
@ -8,32 +8,33 @@ GREEN_UNDERLINE='\033[4;32m'
|
|||
NC='\033[0m'
|
||||
|
||||
cd script
|
||||
sudo ./test.sh 2>&1 | grep 'success\|failed' | tee out.txt
|
||||
./test.sh -f basicSuite.sim 2>&1 | grep 'success\|failed\|fault' | tee out.txt
|
||||
|
||||
total_success=`grep success out.txt | wc -l`
|
||||
totalSuccess=`grep success out.txt | wc -l`
|
||||
totalBasic=`grep success out.txt | grep Suite | wc -l`
|
||||
|
||||
if [ "$total_success" -gt "0" ]; then
|
||||
total_success=`expr $total_success - 1`
|
||||
echo -e "${GREEN} ### Total $total_success TSIM case(s) succeed! ### ${NC}"
|
||||
if [ "$totalSuccess" -gt "0" ]; then
|
||||
totalSuccess=`expr $totalSuccess - $totalBasic`
|
||||
echo -e "${GREEN} ### Total $totalSuccess TSIM case(s) succeed! ### ${NC}"
|
||||
fi
|
||||
|
||||
total_failed=`grep failed out.txt | wc -l`
|
||||
if [ "$total_failed" -ne "0" ]; then
|
||||
echo -e "${RED} ### Total $total_failed TSIM case(s) failed! ### ${NC}"
|
||||
exit $total_failed
|
||||
totalFailed=`grep 'failed\|fault' out.txt | wc -l`
|
||||
if [ "$totalFailed" -ne "0" ]; then
|
||||
echo -e "${RED} ### Total $totalFailed TSIM case(s) failed! ### ${NC}"
|
||||
exit $totalFailed
|
||||
fi
|
||||
|
||||
cd ../pytest
|
||||
sudo ./simpletest.sh 2>&1 | grep 'successfully executed\|failed' | tee pytest-out.txt
|
||||
total_py_success=`grep 'successfully executed' pytest-out.txt | wc -l`
|
||||
./simpletest.sh 2>&1 | grep 'successfully executed\|failed' | tee pytest-out.txt
|
||||
totalPySuccess=`grep 'successfully executed' pytest-out.txt | wc -l`
|
||||
|
||||
if [ "$total_py_success" -gt "0" ]; then
|
||||
echo -e "${GREEN} ### Total $total_py_success python case(s) succeed! ### ${NC}"
|
||||
if [ "$totalPySuccess" -gt "0" ]; then
|
||||
echo -e "${GREEN} ### Total $totalPySuccess python case(s) succeed! ### ${NC}"
|
||||
fi
|
||||
|
||||
total_py_failed=`grep 'failed' pytest-out.txt | wc -l`
|
||||
if [ "$total_py_failed" -ne "0" ]; then
|
||||
echo -e "${RED} ### Total $total_py_failed python case(s) failed! ### ${NC}"
|
||||
exit $total_py_failed
|
||||
totalPyFailed=`grep 'failed' pytest-out.txt | wc -l`
|
||||
if [ "$totalPyFailed" -ne "0" ]; then
|
||||
echo -e "${RED} ### Total $totalPyFailed python case(s) failed! ### ${NC}"
|
||||
exit $totalPyFailed
|
||||
fi
|
||||
|
||||
|
|
Loading…
Reference in New Issue