Merge branch 'develop' into feature/python-test-no-sudo

This commit is contained in:
Steven Li 2020-04-22 16:33:55 -07:00
commit 52ceb2a9e7
74 changed files with 1497 additions and 860 deletions

View File

@ -24,10 +24,11 @@ matrix:
- python-setuptools - python-setuptools
- python3-pip - python3-pip
- python3-setuptools - python3-setuptools
- valgrind
before_install: before_install:
- sudo apt update -y -qq - sudo apt update -y -qq
- sudo apt install -y net-tools python-pip python-setuptools python3-pip python3-setuptools - sudo apt install -y net-tools python-pip python-setuptools python3-pip python3-setuptools valgrind
before_script: before_script:
- cd ${TRAVIS_BUILD_DIR} - cd ${TRAVIS_BUILD_DIR}
@ -43,16 +44,32 @@ matrix:
case $TRAVIS_OS_NAME in case $TRAVIS_OS_NAME in
linux) linux)
cd ${TRAVIS_BUILD_DIR}/debug cd ${TRAVIS_BUILD_DIR}/debug
sudo make install || exit $? sudo make install || travis_terminate $?
pip install --user ${TRAVIS_BUILD_DIR}/src/connector/python/linux/python2/ pip install --user ${TRAVIS_BUILD_DIR}/src/connector/python/linux/python2/
pip3 install --user ${TRAVIS_BUILD_DIR}/src/connector/python/linux/python3/ pip3 install --user ${TRAVIS_BUILD_DIR}/src/connector/python/linux/python3/
cd ${TRAVIS_BUILD_DIR}/tests cd ${TRAVIS_BUILD_DIR}/tests
bash ./test-all.sh ./test-all.sh || travis_terminate $?
if [ "$?" -ne "0" ]; then cd ${TRAVIS_BUILD_DIR}/tests/pytest
exit $? ./simpletest.sh -g 2>&1 | tee mem-error-out.txt
sleep 1
# Color setting
RED='\033[0;31m'
GREEN='\033[1;32m'
GREEN_DARK='\033[0;32m'
GREEN_UNDERLINE='\033[4;32m'
NC='\033[0m'
memError=`grep -m 1 'ERROR SUMMARY' mem-error-out.txt | awk '{print $4}'`
if [ -n "$memError" ]; then
if [ "$memError" -gt 23 ]; then
echo -e "${RED} ## Memory errors number valgrind reports is $memError. More than our threshold! ## ${NC} "
travis_terminate $memError
fi
fi fi
;; ;;
@ -74,20 +91,20 @@ matrix:
# GitHub project metadata # GitHub project metadata
# ** specific to your project ** # ** specific to your project **
project: project:
name: sangshuduo/TDengine name: TDengine
version: 2.x version: 2.x
description: sangshuduo/TDengine description: taosdata/TDengine
# Where email notification of build analysis results will be sent # Where email notification of build analysis results will be sent
notification_email: sangshuduo@gmail.com notification_email: sdsang@taosdata.com
# Commands to prepare for build_command # Commands to prepare for build_command
# ** likely specific to your build ** # ** likely specific to your build **
build_command_prepend: cmake .. build_command_prepend: cmake .
# The command that will be added as an argument to "cov-build" to compile your project for analysis, # The command that will be added as an argument to "cov-build" to compile your project for analysis,
# ** likely specific to your build ** # ** likely specific to your build **
build_command: cmake --build . build_command: make
# Pattern to match selecting branches that will run analysis. We recommend leaving this set to 'coverity_scan'. # Pattern to match selecting branches that will run analysis. We recommend leaving this set to 'coverity_scan'.
# Take care in resource usage, and consider the build frequency allowances per # Take care in resource usage, and consider the build frequency allowances per
@ -132,17 +149,17 @@ matrix:
case $TRAVIS_OS_NAME in case $TRAVIS_OS_NAME in
linux) linux)
cd ${TRAVIS_BUILD_DIR}/debug cd ${TRAVIS_BUILD_DIR}/debug
sudo make install || exit $? sudo make install || travis_terminate $?
pip install --user ${TRAVIS_BUILD_DIR}/src/connector/python/linux/python2/ pip install --user ${TRAVIS_BUILD_DIR}/src/connector/python/linux/python2/
pip3 install --user ${TRAVIS_BUILD_DIR}/src/connector/python/linux/python3/ pip3 install --user ${TRAVIS_BUILD_DIR}/src/connector/python/linux/python3/
cd ${TRAVIS_BUILD_DIR}/tests cd ${TRAVIS_BUILD_DIR}/tests
bash ./test-all.sh ./test-all.sh
if [ "$?" -ne "0" ]; then if [ "$?" -ne "0" ]; then
exit $? travis_terminate $?
fi fi
sudo pkill taosd sudo pkill taosd
@ -150,7 +167,7 @@ matrix:
cd ${TRAVIS_BUILD_DIR} cd ${TRAVIS_BUILD_DIR}
lcov -d . --capture --rc lcov_branch_coverage=1 -o coverage.info lcov -d . --capture --rc lcov_branch_coverage=1 -o coverage.info
lcov -l --rc lcov_branch_coverage=1 coverage.info || exit $? lcov -l --rc lcov_branch_coverage=1 coverage.info || travis_terminate $?
gem install coveralls-lcov gem install coveralls-lcov
@ -166,7 +183,6 @@ matrix:
echo -e "${GREEN} ## Uploaded to Coveralls.io! ## ${NC}" echo -e "${GREEN} ## Uploaded to Coveralls.io! ## ${NC}"
else else
echo -e "${RED} ## Coveralls.io not collect coverage report! ## ${NC} " echo -e "${RED} ## Coveralls.io not collect coverage report! ## ${NC} "
exit $?
fi fi
bash <(curl -s https://codecov.io/bash) -y .codecov.yml -f coverage.info bash <(curl -s https://codecov.io/bash) -y .codecov.yml -f coverage.info
@ -174,7 +190,6 @@ matrix:
echo -e "${GREEN} ## Uploaded to Codecov! ## ${NC} " echo -e "${GREEN} ## Uploaded to Codecov! ## ${NC} "
else else
echo -e "${RED} ## Codecov did not collect coverage report! ## ${NC} " echo -e "${RED} ## Codecov did not collect coverage report! ## ${NC} "
exit $?
fi fi
;; ;;

View File

@ -465,7 +465,7 @@ extern void * tscQhandle;
extern int tscKeepConn[]; extern int tscKeepConn[];
extern int tsInsertHeadSize; extern int tsInsertHeadSize;
extern int tscNumOfThreads; extern int tscNumOfThreads;
extern SRpcIpSet tscMgmtIpList; extern SRpcIpSet tscMgmtIpSet;
typedef void (*__async_cb_func_t)(void *param, TAOS_RES *tres, int numOfRows); typedef void (*__async_cb_func_t)(void *param, TAOS_RES *tres, int numOfRows);

View File

@ -30,7 +30,7 @@
#define TSC_MGMT_VNODE 999 #define TSC_MGMT_VNODE 999
SRpcIpSet tscMgmtIpList; SRpcIpSet tscMgmtIpSet;
SRpcIpSet tscDnodeIpSet; SRpcIpSet tscDnodeIpSet;
int (*tscBuildMsg[TSDB_SQL_MAX])(SSqlObj *pSql, SSqlInfo *pInfo) = {0}; int (*tscBuildMsg[TSDB_SQL_MAX])(SSqlObj *pSql, SSqlInfo *pInfo) = {0};
@ -58,35 +58,40 @@ static void tscSetDnodeIpList(SSqlObj* pSql, STableMeta* pTableMeta) {
} }
void tscPrintMgmtIp() { void tscPrintMgmtIp() {
if (tscMgmtIpList.numOfIps <= 0) { if (tscMgmtIpSet.numOfIps <= 0) {
tscError("invalid mgmt IP list:%d", tscMgmtIpList.numOfIps); tscError("invalid mgmt IP list:%d", tscMgmtIpSet.numOfIps);
} else { } else {
for (int i = 0; i < tscMgmtIpList.numOfIps; ++i) { for (int i = 0; i < tscMgmtIpSet.numOfIps; ++i) {
tscTrace("mgmt index:%d ip:%d", i, tscMgmtIpList.ip[i]); tscTrace("mgmt index:%d ip:%d", i, tscMgmtIpSet.ip[i]);
} }
} }
} }
void tscSetMgmtIpListFromCluster(SRpcIpSet *pIpList) { void tscSetMgmtIpListFromCluster(SRpcIpSet *pIpList) {
tscMgmtIpList.numOfIps = htons(pIpList->numOfIps); tscMgmtIpSet.numOfIps = pIpList->numOfIps;
tscMgmtIpList.inUse = htons(pIpList->inUse); tscMgmtIpSet.inUse = pIpList->inUse;
tscMgmtIpList.port = htons(pIpList->port); tscMgmtIpSet.port = htons(pIpList->port);
for (int32_t i = 0; i <tscMgmtIpList.numOfIps; ++i) { for (int32_t i = 0; i < tscMgmtIpSet.numOfIps; ++i) {
tscMgmtIpList.ip[i] = pIpList->ip[i]; tscMgmtIpSet.ip[i] = htonl(pIpList->ip[i]);
} }
} }
void tscSetMgmtIpListFromEdge() { void tscSetMgmtIpListFromEdge() {
if (tscMgmtIpList.numOfIps != 1) { if (tscMgmtIpSet.numOfIps != 1) {
tscMgmtIpList.numOfIps = 1; tscMgmtIpSet.numOfIps = 1;
tscMgmtIpList.inUse = 0; tscMgmtIpSet.inUse = 0;
tscMgmtIpList.port = tsMnodeShellPort; tscMgmtIpSet.port = tsMnodeShellPort;
tscMgmtIpList.ip[0] = inet_addr(tsMasterIp); tscMgmtIpSet.ip[0] = inet_addr(tsMasterIp);
tscTrace("edge mgmt IP list:"); tscTrace("edge mgmt IP list:");
tscPrintMgmtIp(); tscPrintMgmtIp();
} }
} }
void tscUpdateIpSet(void *ahandle, SRpcIpSet *pIpSet) {
tscTrace("mgmt IP list is changed for ufp is called");
tscMgmtIpSet = *pIpSet;
}
void tscSetMgmtIpList(SRpcIpSet *pIpList) { void tscSetMgmtIpList(SRpcIpSet *pIpList) {
/* /*
* The iplist returned by the cluster edition is the current management nodes * The iplist returned by the cluster edition is the current management nodes
@ -109,7 +114,7 @@ void tscSetMgmtIpList(SRpcIpSet *pIpList) {
UNUSED_FUNC UNUSED_FUNC
static int32_t tscGetMgmtConnMaxRetryTimes() { static int32_t tscGetMgmtConnMaxRetryTimes() {
int32_t factor = 2; int32_t factor = 2;
return tscMgmtIpList.numOfIps * factor; return tscMgmtIpSet.numOfIps * factor;
} }
void tscProcessHeartBeatRsp(void *param, TAOS_RES *tres, int code) { void tscProcessHeartBeatRsp(void *param, TAOS_RES *tres, int code) {
@ -204,7 +209,7 @@ int tscSendMsgToServer(SSqlObj *pSql) {
}; };
rpcSendRequest(pVnodeConn, &pSql->ipList, &rpcMsg); rpcSendRequest(pVnodeConn, &pSql->ipList, &rpcMsg);
} else { } else {
pSql->ipList = tscMgmtIpList; pSql->ipList = tscMgmtIpSet;
pSql->ipList.port = tsMnodeShellPort; pSql->ipList.port = tsMnodeShellPort;
tscTrace("%p msg:%s is sent to server %d", pSql, taosMsg[pSql->cmd.msgType], pSql->ipList.port); tscTrace("%p msg:%s is sent to server %d", pSql, taosMsg[pSql->cmd.msgType], pSql->ipList.port);
@ -425,7 +430,7 @@ int tscProcessSql(SSqlObj *pSql) {
return pSql->res.code; return pSql->res.code;
} }
} else if (pSql->cmd.command < TSDB_SQL_LOCAL) { } else if (pSql->cmd.command < TSDB_SQL_LOCAL) {
pSql->ipList = tscMgmtIpList; pSql->ipList = tscMgmtIpSet;
} else { // local handler } else { // local handler
return (*tscProcessMsgRsp[pCmd->command])(pSql); return (*tscProcessMsgRsp[pCmd->command])(pSql);
} }
@ -2224,10 +2229,7 @@ int tscProcessConnectRsp(SSqlObj *pSql) {
assert(len <= tListLen(pObj->db)); assert(len <= tListLen(pObj->db));
strncpy(pObj->db, temp, tListLen(pObj->db)); strncpy(pObj->db, temp, tListLen(pObj->db));
// SIpList * pIpList; tscSetMgmtIpList(&pConnect->ipList);
// char *rsp = pRes->pRsp + sizeof(SCMConnectRsp);
// pIpList = (SIpList *)rsp;
// tscSetMgmtIpList(pIpList);
strcpy(pObj->sversion, pConnect->serverVersion); strcpy(pObj->sversion, pConnect->serverVersion);
pObj->writeAuth = pConnect->writeAuth; pObj->writeAuth = pConnect->writeAuth;

View File

@ -72,23 +72,23 @@ STscObj *taosConnectImpl(const char *ip, const char *user, const char *pass, con
} }
if (ip && ip[0]) { if (ip && ip[0]) {
tscMgmtIpList.inUse = 0; tscMgmtIpSet.inUse = 0;
tscMgmtIpList.port = tsMnodeShellPort; tscMgmtIpSet.port = tsMnodeShellPort;
tscMgmtIpList.numOfIps = 1; tscMgmtIpSet.numOfIps = 1;
tscMgmtIpList.ip[0] = inet_addr(ip); tscMgmtIpSet.ip[0] = inet_addr(ip);
if (tsMasterIp[0] && strcmp(ip, tsMasterIp) != 0) { if (tsMasterIp[0] && strcmp(ip, tsMasterIp) != 0) {
tscMgmtIpList.numOfIps = 2; tscMgmtIpSet.numOfIps = 2;
tscMgmtIpList.ip[1] = inet_addr(tsMasterIp); tscMgmtIpSet.ip[1] = inet_addr(tsMasterIp);
} }
if (tsSecondIp[0] && strcmp(tsSecondIp, tsMasterIp) != 0) { if (tsSecondIp[0] && strcmp(tsSecondIp, tsMasterIp) != 0) {
tscMgmtIpList.numOfIps = 3; tscMgmtIpSet.numOfIps = 3;
tscMgmtIpList.ip[2] = inet_addr(tsSecondIp); tscMgmtIpSet.ip[2] = inet_addr(tsSecondIp);
} }
} }
tscMgmtIpList.port = port ? port : tsMnodeShellPort; tscMgmtIpSet.port = port ? port : tsMnodeShellPort;
STscObj *pObj = (STscObj *)calloc(1, sizeof(STscObj)); STscObj *pObj = (STscObj *)calloc(1, sizeof(STscObj));
if (NULL == pObj) { if (NULL == pObj) {

View File

@ -42,11 +42,13 @@ void * tscTmr;
void * tscQhandle; void * tscQhandle;
void * tscCheckDiskUsageTmr; void * tscCheckDiskUsageTmr;
int tsInsertHeadSize; int tsInsertHeadSize;
char tsLastUser[TSDB_USER_LEN + 1];
int tscNumOfThreads; int tscNumOfThreads;
static pthread_once_t tscinit = PTHREAD_ONCE_INIT; static pthread_once_t tscinit = PTHREAD_ONCE_INIT;
void taosInitNote(int numOfNoteLines, int maxNotes, char* lable); void taosInitNote(int numOfNoteLines, int maxNotes, char* lable);
void tscUpdateIpSet(void *ahandle, SRpcIpSet *pIpSet);
void tscCheckDiskUsage(void *para, void *unused) { void tscCheckDiskUsage(void *para, void *unused) {
taosGetDisk(); taosGetDisk();
@ -65,6 +67,7 @@ int32_t tscInitRpc(const char *user, const char *secret) {
rpcInit.label = "TSC-vnode"; rpcInit.label = "TSC-vnode";
rpcInit.numOfThreads = tscNumOfThreads; rpcInit.numOfThreads = tscNumOfThreads;
rpcInit.cfp = tscProcessMsgFromServer; rpcInit.cfp = tscProcessMsgFromServer;
rpcInit.ufp = tscUpdateIpSet;
rpcInit.sessions = tsMaxVnodeConnections; rpcInit.sessions = tsMaxVnodeConnections;
rpcInit.connType = TAOS_CONN_CLIENT; rpcInit.connType = TAOS_CONN_CLIENT;
rpcInit.user = (char*)user; rpcInit.user = (char*)user;
@ -79,6 +82,13 @@ int32_t tscInitRpc(const char *user, const char *secret) {
} }
} }
// not stop service, switch users
if (strcmp(tsLastUser, user) != 0 && pTscMgmtConn != NULL) {
tscTrace("switch user from %s to %s", user, tsLastUser);
rpcClose(pTscMgmtConn);
pTscMgmtConn = NULL;
}
if (pTscMgmtConn == NULL) { if (pTscMgmtConn == NULL) {
memset(&rpcInit, 0, sizeof(rpcInit)); memset(&rpcInit, 0, sizeof(rpcInit));
rpcInit.localIp = tsLocalIp; rpcInit.localIp = tsLocalIp;
@ -92,6 +102,7 @@ int32_t tscInitRpc(const char *user, const char *secret) {
rpcInit.user = (char*)user; rpcInit.user = (char*)user;
rpcInit.ckey = "key"; rpcInit.ckey = "key";
rpcInit.secret = secretEncrypt; rpcInit.secret = secretEncrypt;
strcpy(tsLastUser, user);
pTscMgmtConn = rpcOpen(&rpcInit); pTscMgmtConn = rpcOpen(&rpcInit);
if (pTscMgmtConn == NULL) { if (pTscMgmtConn == NULL) {
@ -145,14 +156,14 @@ void taos_init_imp() {
taosInitNote(tsNumOfLogLines / 10, 1, (char*)"tsc_note"); taosInitNote(tsNumOfLogLines / 10, 1, (char*)"tsc_note");
} }
tscMgmtIpList.inUse = 0; tscMgmtIpSet.inUse = 0;
tscMgmtIpList.port = tsMnodeShellPort; tscMgmtIpSet.port = tsMnodeShellPort;
tscMgmtIpList.numOfIps = 1; tscMgmtIpSet.numOfIps = 1;
tscMgmtIpList.ip[0] = inet_addr(tsMasterIp); tscMgmtIpSet.ip[0] = inet_addr(tsMasterIp);
if (tsSecondIp[0] && strcmp(tsSecondIp, tsMasterIp) != 0) { if (tsSecondIp[0] && strcmp(tsSecondIp, tsMasterIp) != 0) {
tscMgmtIpList.numOfIps = 2; tscMgmtIpSet.numOfIps = 2;
tscMgmtIpList.ip[1] = inet_addr(tsSecondIp); tscMgmtIpSet.ip[1] = inet_addr(tsSecondIp);
} }
tscInitMsgsFp(); tscInitMsgsFp();

View File

@ -54,6 +54,11 @@ static SRpcIpSet tsMnodeIpSet = {0};
static SDMMnodeInfos tsMnodeInfos = {0}; static SDMMnodeInfos tsMnodeInfos = {0};
static SDMDnodeCfg tsDnodeCfg = {0}; static SDMDnodeCfg tsDnodeCfg = {0};
void dnodeUpdateIpSet(void *ahandle, SRpcIpSet *pIpSet) {
dTrace("mgmt IP list is changed for ufp is called");
tsMnodeIpSet = *pIpSet;
}
int32_t dnodeInitMClient() { int32_t dnodeInitMClient() {
dnodeReadDnodeCfg(); dnodeReadDnodeCfg();
tsRebootTime = taosGetTimestampSec(); tsRebootTime = taosGetTimestampSec();
@ -90,6 +95,7 @@ int32_t dnodeInitMClient() {
rpcInit.label = "DND-MC"; rpcInit.label = "DND-MC";
rpcInit.numOfThreads = 1; rpcInit.numOfThreads = 1;
rpcInit.cfp = dnodeProcessRspFromMnode; rpcInit.cfp = dnodeProcessRspFromMnode;
rpcInit.ufp = dnodeUpdateIpSet;
rpcInit.sessions = 100; rpcInit.sessions = 100;
rpcInit.connType = TAOS_CONN_CLIENT; rpcInit.connType = TAOS_CONN_CLIENT;
rpcInit.idleTime = tsShellActivityTimer * 2000; rpcInit.idleTime = tsShellActivityTimer * 2000;
@ -292,7 +298,7 @@ static bool dnodeReadMnodeInfos() {
tsMnodeInfos.nodeInfos[i].syncPort = (uint16_t)syncPort->valueint; tsMnodeInfos.nodeInfos[i].syncPort = (uint16_t)syncPort->valueint;
cJSON *nodeName = cJSON_GetObjectItem(nodeInfo, "nodeName"); cJSON *nodeName = cJSON_GetObjectItem(nodeInfo, "nodeName");
if (!nodeIp || nodeName->type != cJSON_String || nodeName->valuestring == NULL) { if (!nodeName || nodeName->type != cJSON_String || nodeName->valuestring == NULL) {
dError("failed to read mnode mgmtIpList.json, nodeName not found"); dError("failed to read mnode mgmtIpList.json, nodeName not found");
goto PARSE_OVER; goto PARSE_OVER;
} }
@ -304,7 +310,7 @@ static bool dnodeReadMnodeInfos() {
dPrint("read mnode iplist successed, numOfIps:%d inUse:%d", tsMnodeInfos.nodeNum, tsMnodeInfos.inUse); dPrint("read mnode iplist successed, numOfIps:%d inUse:%d", tsMnodeInfos.nodeNum, tsMnodeInfos.inUse);
for (int32_t i = 0; i < tsMnodeInfos.nodeNum; i++) { for (int32_t i = 0; i < tsMnodeInfos.nodeNum; i++) {
dPrint("mnode:%d, ip:%s:%u name:%s", tsMnodeInfos.nodeInfos[i].nodeId, dPrint("mnode:%d, ip:%s:%u name:%s", tsMnodeInfos.nodeInfos[i].nodeId,
taosIpStr(tsMnodeInfos.nodeInfos[i].nodeId), tsMnodeInfos.nodeInfos[i].nodePort, taosIpStr(tsMnodeInfos.nodeInfos[i].nodeIp), tsMnodeInfos.nodeInfos[i].nodePort,
tsMnodeInfos.nodeInfos[i].nodeName); tsMnodeInfos.nodeInfos[i].nodeName);
} }

View File

@ -33,7 +33,6 @@ static int32_t dnodeOpenVnodes();
static void dnodeCloseVnodes(); static void dnodeCloseVnodes();
static int32_t dnodeProcessCreateVnodeMsg(SRpcMsg *pMsg); static int32_t dnodeProcessCreateVnodeMsg(SRpcMsg *pMsg);
static int32_t dnodeProcessDropVnodeMsg(SRpcMsg *pMsg); static int32_t dnodeProcessDropVnodeMsg(SRpcMsg *pMsg);
static int32_t dnodeProcessAlterVnodeMsg(SRpcMsg *pMsg);
static int32_t dnodeProcessAlterStreamMsg(SRpcMsg *pMsg); static int32_t dnodeProcessAlterStreamMsg(SRpcMsg *pMsg);
static int32_t dnodeProcessConfigDnodeMsg(SRpcMsg *pMsg); static int32_t dnodeProcessConfigDnodeMsg(SRpcMsg *pMsg);
static int32_t (*dnodeProcessMgmtMsgFp[TSDB_MSG_TYPE_MAX])(SRpcMsg *pMsg); static int32_t (*dnodeProcessMgmtMsgFp[TSDB_MSG_TYPE_MAX])(SRpcMsg *pMsg);
@ -41,7 +40,6 @@ static int32_t (*dnodeProcessMgmtMsgFp[TSDB_MSG_TYPE_MAX])(SRpcMsg *pMsg);
int32_t dnodeInitMgmt() { int32_t dnodeInitMgmt() {
dnodeProcessMgmtMsgFp[TSDB_MSG_TYPE_MD_CREATE_VNODE] = dnodeProcessCreateVnodeMsg; dnodeProcessMgmtMsgFp[TSDB_MSG_TYPE_MD_CREATE_VNODE] = dnodeProcessCreateVnodeMsg;
dnodeProcessMgmtMsgFp[TSDB_MSG_TYPE_MD_DROP_VNODE] = dnodeProcessDropVnodeMsg; dnodeProcessMgmtMsgFp[TSDB_MSG_TYPE_MD_DROP_VNODE] = dnodeProcessDropVnodeMsg;
dnodeProcessMgmtMsgFp[TSDB_MSG_TYPE_MD_ALTER_VNODE] = dnodeProcessAlterVnodeMsg;
dnodeProcessMgmtMsgFp[TSDB_MSG_TYPE_MD_ALTER_STREAM] = dnodeProcessAlterStreamMsg; dnodeProcessMgmtMsgFp[TSDB_MSG_TYPE_MD_ALTER_STREAM] = dnodeProcessAlterStreamMsg;
dnodeProcessMgmtMsgFp[TSDB_MSG_TYPE_MD_CONFIG_DNODE] = dnodeProcessConfigDnodeMsg; dnodeProcessMgmtMsgFp[TSDB_MSG_TYPE_MD_CONFIG_DNODE] = dnodeProcessConfigDnodeMsg;
@ -129,25 +127,31 @@ static void dnodeCloseVnodes() {
static int32_t dnodeProcessCreateVnodeMsg(SRpcMsg *rpcMsg) { static int32_t dnodeProcessCreateVnodeMsg(SRpcMsg *rpcMsg) {
SMDCreateVnodeMsg *pCreate = rpcMsg->pCont; SMDCreateVnodeMsg *pCreate = rpcMsg->pCont;
pCreate->cfg.vgId = htonl(pCreate->cfg.vgId); pCreate->cfg.vgId = htonl(pCreate->cfg.vgId);
pCreate->cfg.maxSessions = htonl(pCreate->cfg.maxSessions); pCreate->cfg.maxTables = htonl(pCreate->cfg.maxTables);
pCreate->cfg.cacheBlockSize = htonl(pCreate->cfg.cacheBlockSize); pCreate->cfg.maxCacheSize = htobe64(pCreate->cfg.maxCacheSize);
pCreate->cfg.daysPerFile = htonl(pCreate->cfg.daysPerFile); pCreate->cfg.minRowsPerFileBlock = htonl(pCreate->cfg.minRowsPerFileBlock);
pCreate->cfg.daysToKeep1 = htonl(pCreate->cfg.daysToKeep1); pCreate->cfg.maxRowsPerFileBlock = htonl(pCreate->cfg.maxRowsPerFileBlock);
pCreate->cfg.daysToKeep2 = htonl(pCreate->cfg.daysToKeep2); pCreate->cfg.daysPerFile = htonl(pCreate->cfg.daysPerFile);
pCreate->cfg.daysToKeep = htonl(pCreate->cfg.daysToKeep); pCreate->cfg.daysToKeep1 = htonl(pCreate->cfg.daysToKeep1);
pCreate->cfg.commitTime = htonl(pCreate->cfg.commitTime); pCreate->cfg.daysToKeep2 = htonl(pCreate->cfg.daysToKeep2);
pCreate->cfg.rowsInFileBlock = htonl(pCreate->cfg.rowsInFileBlock); pCreate->cfg.daysToKeep = htonl(pCreate->cfg.daysToKeep);
pCreate->cfg.blocksPerTable = htons(pCreate->cfg.blocksPerTable); pCreate->cfg.commitTime = htonl(pCreate->cfg.commitTime);
pCreate->cfg.cacheNumOfBlocks.totalBlocks = htonl(pCreate->cfg.cacheNumOfBlocks.totalBlocks); pCreate->cfg.arbitratorIp = htonl(pCreate->cfg.arbitratorIp);
for (int32_t j = 0; j < pCreate->cfg.replications; ++j) { for (int32_t j = 0; j < pCreate->cfg.replications; ++j) {
pCreate->vpeerDesc[j].vgId = htonl(pCreate->vpeerDesc[j].vgId); pCreate->nodes[j].nodeId = htonl(pCreate->nodes[j].nodeId);
pCreate->vpeerDesc[j].dnodeId = htonl(pCreate->vpeerDesc[j].dnodeId); pCreate->nodes[j].nodeIp = htonl(pCreate->nodes[j].nodeIp);
pCreate->vpeerDesc[j].ip = htonl(pCreate->vpeerDesc[j].ip);
} }
return vnodeCreate(pCreate); void *pVnode = vnodeAccquireVnode(pCreate->cfg.vgId);
if (pVnode != NULL) {
int32_t code = vnodeAlter(pVnode, pCreate);
vnodeRelease(pVnode);
return code;
} else {
return vnodeCreate(pCreate);
}
} }
static int32_t dnodeProcessDropVnodeMsg(SRpcMsg *rpcMsg) { static int32_t dnodeProcessDropVnodeMsg(SRpcMsg *rpcMsg) {
@ -157,15 +161,6 @@ static int32_t dnodeProcessDropVnodeMsg(SRpcMsg *rpcMsg) {
return vnodeDrop(pDrop->vgId); return vnodeDrop(pDrop->vgId);
} }
static int32_t dnodeProcessAlterVnodeMsg(SRpcMsg *rpcMsg) {
SMDCreateVnodeMsg *pCreate = rpcMsg->pCont;
pCreate->cfg.vgId = htonl(pCreate->cfg.vgId);
pCreate->cfg.maxSessions = htonl(pCreate->cfg.maxSessions);
pCreate->cfg.daysPerFile = htonl(pCreate->cfg.daysPerFile);
return 0;
}
static int32_t dnodeProcessAlterStreamMsg(SRpcMsg *pMsg) { static int32_t dnodeProcessAlterStreamMsg(SRpcMsg *pMsg) {
// SMDAlterStreamMsg *pStream = pCont; // SMDAlterStreamMsg *pStream = pCont;
// pStream->uid = htobe64(pStream->uid); // pStream->uid = htobe64(pStream->uid);

View File

@ -33,7 +33,6 @@ int32_t dnodeInitMnode() {
dnodeProcessMgmtMsgFp[TSDB_MSG_TYPE_MD_DROP_STABLE] = dnodeWrite; dnodeProcessMgmtMsgFp[TSDB_MSG_TYPE_MD_DROP_STABLE] = dnodeWrite;
dnodeProcessMgmtMsgFp[TSDB_MSG_TYPE_MD_CREATE_VNODE] = dnodeMgmt; dnodeProcessMgmtMsgFp[TSDB_MSG_TYPE_MD_CREATE_VNODE] = dnodeMgmt;
dnodeProcessMgmtMsgFp[TSDB_MSG_TYPE_MD_DROP_VNODE] = dnodeMgmt; dnodeProcessMgmtMsgFp[TSDB_MSG_TYPE_MD_DROP_VNODE] = dnodeMgmt;
dnodeProcessMgmtMsgFp[TSDB_MSG_TYPE_MD_ALTER_VNODE] = dnodeMgmt;
dnodeProcessMgmtMsgFp[TSDB_MSG_TYPE_MD_ALTER_STREAM] = dnodeMgmt; dnodeProcessMgmtMsgFp[TSDB_MSG_TYPE_MD_ALTER_STREAM] = dnodeMgmt;
dnodeProcessMgmtMsgFp[TSDB_MSG_TYPE_MD_CONFIG_DNODE] = dnodeMgmt; dnodeProcessMgmtMsgFp[TSDB_MSG_TYPE_MD_CONFIG_DNODE] = dnodeMgmt;

View File

@ -75,14 +75,19 @@ TAOS_DEFINE_ERROR(TSDB_CODE_NOT_CONFIGURED, 0, 27, "not configured")
TAOS_DEFINE_ERROR(TSDB_CODE_NODE_OFFLINE, 0, 28, "node offline") TAOS_DEFINE_ERROR(TSDB_CODE_NODE_OFFLINE, 0, 28, "node offline")
TAOS_DEFINE_ERROR(TSDB_CODE_NETWORK_UNAVAIL, 0, 29, "network unavailable") TAOS_DEFINE_ERROR(TSDB_CODE_NETWORK_UNAVAIL, 0, 29, "network unavailable")
// db & user // db
TAOS_DEFINE_ERROR(TSDB_CODE_DB_NOT_SELECTED, 0, 100, "db not selected") TAOS_DEFINE_ERROR(TSDB_CODE_DB_NOT_SELECTED, 0, 100, "db not selected")
TAOS_DEFINE_ERROR(TSDB_CODE_DB_ALREADY_EXIST, 0, 101, "database aleady exist") TAOS_DEFINE_ERROR(TSDB_CODE_DB_ALREADY_EXIST, 0, 101, "database aleady exist")
TAOS_DEFINE_ERROR(TSDB_CODE_INVALID_DB, 0, 102, "invalid database") TAOS_DEFINE_ERROR(TSDB_CODE_INVALID_DB, 0, 102, "invalid database")
TAOS_DEFINE_ERROR(TSDB_CODE_MONITOR_DB_FORBIDDEN, 0, 103, "monitor db forbidden") TAOS_DEFINE_ERROR(TSDB_CODE_MONITOR_DB_FORBIDDEN, 0, 103, "monitor db forbidden")
TAOS_DEFINE_ERROR(TSDB_CODE_USER_ALREADY_EXIST, 0, 104, "user already exist")
TAOS_DEFINE_ERROR(TSDB_CODE_INVALID_USER, 0, 105, "invalid user") // user
TAOS_DEFINE_ERROR(TSDB_CODE_INVALID_PASS, 0, 106, "invalid password") TAOS_DEFINE_ERROR(TSDB_CODE_USER_ALREADY_EXIST, 0, 150, "user already exist")
TAOS_DEFINE_ERROR(TSDB_CODE_INVALID_USER, 0, 151, "invalid user")
TAOS_DEFINE_ERROR(TSDB_CODE_INVALID_PASS, 0, 152, "invalid password")
TAOS_DEFINE_ERROR(TSDB_CODE_INVALID_USER_FORMAT, 0, 153, "invalid user format")
TAOS_DEFINE_ERROR(TSDB_CODE_INVALID_PASS_FORMAT, 0, 154, "invalid password format")
TAOS_DEFINE_ERROR(TSDB_CODE_NO_USER_FROM_CONN, 0, 155, "can not get user from conn")
// table // table
TAOS_DEFINE_ERROR(TSDB_CODE_TABLE_ALREADY_EXIST, 0, 200, "table already exist") TAOS_DEFINE_ERROR(TSDB_CODE_TABLE_ALREADY_EXIST, 0, 200, "table already exist")

View File

@ -48,14 +48,12 @@ extern "C" {
#define TSDB_MSG_TYPE_MD_CREATE_VNODE_RSP 16 #define TSDB_MSG_TYPE_MD_CREATE_VNODE_RSP 16
#define TSDB_MSG_TYPE_MD_DROP_VNODE 17 #define TSDB_MSG_TYPE_MD_DROP_VNODE 17
#define TSDB_MSG_TYPE_MD_DROP_VNODE_RSP 18 #define TSDB_MSG_TYPE_MD_DROP_VNODE_RSP 18
#define TSDB_MSG_TYPE_MD_ALTER_VNODE 19 #define TSDB_MSG_TYPE_MD_DROP_STABLE 19
#define TSDB_MSG_TYPE_MD_ALTER_VNODE_RSP 20 #define TSDB_MSG_TYPE_MD_DROP_STABLE_RSP 20
#define TSDB_MSG_TYPE_MD_DROP_STABLE 21 #define TSDB_MSG_TYPE_MD_ALTER_STREAM 21
#define TSDB_MSG_TYPE_MD_DROP_STABLE_RSP 22 #define TSDB_MSG_TYPE_MD_ALTER_STREAM_RSP 22
#define TSDB_MSG_TYPE_MD_ALTER_STREAM 23 #define TSDB_MSG_TYPE_MD_CONFIG_DNODE 23
#define TSDB_MSG_TYPE_MD_ALTER_STREAM_RSP 24 #define TSDB_MSG_TYPE_MD_CONFIG_DNODE_RSP 24
#define TSDB_MSG_TYPE_MD_CONFIG_DNODE 25
#define TSDB_MSG_TYPE_MD_CONFIG_DNODE_RSP 26
// message from client to mnode // message from client to mnode
#define TSDB_MSG_TYPE_CM_CONNECT 31 #define TSDB_MSG_TYPE_CM_CONNECT 31
@ -245,12 +243,6 @@ typedef struct SSchema {
int16_t bytes; int16_t bytes;
} SSchema; } SSchema;
typedef struct {
int32_t vgId;
int32_t dnodeId;
uint32_t ip;
} SVnodeDesc;
typedef struct { typedef struct {
int32_t contLen; int32_t contLen;
int32_t vgId; int32_t vgId;
@ -518,12 +510,10 @@ typedef struct {
uint8_t status; uint8_t status;
uint8_t role; uint8_t role;
uint8_t accessState; uint8_t accessState;
uint8_t replica;
uint8_t reserved[5]; uint8_t reserved[5];
} SVnodeLoad; } SVnodeLoad;
/*
* NOTE: sizeof(SVnodeCfg) < TSDB_FILE_HEADER_LEN / 4
*/
typedef struct { typedef struct {
char acct[TSDB_USER_LEN + 1]; char acct[TSDB_USER_LEN + 1];
char db[TSDB_DB_NAME_LEN + 1]; char db[TSDB_DB_NAME_LEN + 1];
@ -548,7 +538,7 @@ typedef struct {
int8_t loadLatest; // load into mem or not int8_t loadLatest; // load into mem or not
uint8_t precision; // time resolution uint8_t precision; // time resolution
int8_t reserved[16]; int8_t reserved[16];
} SVnodeCfg, SDbCfg, SCMCreateDbMsg, SCMAlterDbMsg; } SDbCfg, SCMCreateDbMsg, SCMAlterDbMsg;
typedef struct { typedef struct {
char db[TSDB_TABLE_ID_LEN + 1]; char db[TSDB_TABLE_ID_LEN + 1];
@ -614,8 +604,35 @@ typedef struct {
} SDMStatusRsp; } SDMStatusRsp;
typedef struct { typedef struct {
SVnodeCfg cfg; uint32_t vgId;
SVnodeDesc vpeerDesc[TSDB_MAX_MPEERS]; int32_t maxTables;
int64_t maxCacheSize;
int32_t minRowsPerFileBlock;
int32_t maxRowsPerFileBlock;
int32_t daysPerFile;
int32_t daysToKeep;
int32_t daysToKeep1;
int32_t daysToKeep2;
int32_t commitTime;
uint8_t precision; // time resolution
int8_t compression;
int8_t wals;
int8_t commitLog;
int8_t replications;
int8_t quorum;
uint32_t arbitratorIp;
int8_t reserved[16];
} SMDVnodeCfg;
typedef struct {
int32_t nodeId;
uint32_t nodeIp;
char nodeName[TSDB_NODE_NAME_LEN + 1];
} SMDVnodeDesc;
typedef struct {
SMDVnodeCfg cfg;
SMDVnodeDesc nodes[TSDB_MAX_MPEERS];
} SMDCreateVnodeMsg; } SMDCreateVnodeMsg;
typedef struct { typedef struct {
@ -673,9 +690,16 @@ typedef struct {
int32_t metaElem[TSDB_MAX_JOIN_TABLE_NUM]; int32_t metaElem[TSDB_MAX_JOIN_TABLE_NUM];
} SSuperTableMetaMsg; } SSuperTableMetaMsg;
typedef struct {
int32_t nodeId;
uint32_t nodeIp;
uint16_t nodePort;
} SVnodeDesc;
typedef struct { typedef struct {
SVnodeDesc vpeerDesc[TSDB_REPLICA_MAX_NUM]; SVnodeDesc vpeerDesc[TSDB_REPLICA_MAX_NUM];
int16_t index; // used locally int16_t index; // used locally
int32_t vgId;
int32_t numOfSids; int32_t numOfSids;
int32_t pSidExtInfoList[]; // offset value of STableIdInfo int32_t pSidExtInfoList[]; // offset value of STableIdInfo
} SVnodeSidList; } SVnodeSidList;

View File

@ -38,6 +38,7 @@ typedef struct {
int32_t vnodeCreate(SMDCreateVnodeMsg *pVnodeCfg); int32_t vnodeCreate(SMDCreateVnodeMsg *pVnodeCfg);
int32_t vnodeDrop(int32_t vgId); int32_t vnodeDrop(int32_t vgId);
int32_t vnodeOpen(int32_t vgId, char *rootDir); int32_t vnodeOpen(int32_t vgId, char *rootDir);
int32_t vnodeAlter(void *pVnode, SMDCreateVnodeMsg *pVnodeCfg);
int32_t vnodeClose(int32_t vgId); int32_t vnodeClose(int32_t vgId);
void vnodeRelease(void *pVnode); void vnodeRelease(void *pVnode);

View File

@ -24,10 +24,10 @@ extern "C" {
int32_t mgmtInitAccts(); int32_t mgmtInitAccts();
void mgmtCleanUpAccts(); void mgmtCleanUpAccts();
void *mgmtGetAcct(char *acctName); void * mgmtGetAcct(char *acctName);
void * mgmtGetNextAcct(void *pNode, SAcctObj **pAcct);
void mgmtIncAcctRef(SAcctObj *pAcct); void mgmtIncAcctRef(SAcctObj *pAcct);
void mgmtDecAcctRef(SAcctObj *pAcct); void mgmtDecAcctRef(SAcctObj *pAcct);
void mgmtAddDbToAcct(SAcctObj *pAcct, SDbObj *pDb); void mgmtAddDbToAcct(SAcctObj *pAcct, SDbObj *pDb);
void mgmtDropDbFromAcct(SAcctObj *pAcct, SDbObj *pDb); void mgmtDropDbFromAcct(SAcctObj *pAcct, SDbObj *pDb);
void mgmtAddUserToAcct(SAcctObj *pAcct, SUserObj *pUser); void mgmtAddUserToAcct(SAcctObj *pAcct, SUserObj *pUser);

View File

@ -51,7 +51,6 @@ typedef struct SDnodeObj {
int8_t reserved[15]; int8_t reserved[15];
int8_t updateEnd[1]; int8_t updateEnd[1];
int32_t refCount; int32_t refCount;
SVnodeLoad vload[TSDB_MAX_VNODES];
uint32_t moduleStatus; uint32_t moduleStatus;
uint32_t lastReboot; // time stamp for last reboot uint32_t lastReboot; // time stamp for last reboot
float score; // calc in balance function float score; // calc in balance function
@ -72,13 +71,6 @@ typedef struct SMnodeObj {
SDnodeObj *pDnode; SDnodeObj *pDnode;
} SMnodeObj; } SMnodeObj;
typedef struct {
int32_t dnodeId;
uint32_t privateIp;
uint32_t publicIp;
} SVnodeGid;
typedef struct { typedef struct {
char tableId[TSDB_TABLE_ID_LEN + 1]; char tableId[TSDB_TABLE_ID_LEN + 1];
int8_t type; int8_t type;
@ -120,24 +112,34 @@ typedef struct {
SSuperTableObj *superTable; SSuperTableObj *superTable;
} SChildTableObj; } SChildTableObj;
typedef struct {
int32_t dnodeId;
int8_t role;
int8_t reserved[3];
SDnodeObj* pDnode;
} SVnodeGid;
typedef struct SVgObj { typedef struct SVgObj {
uint32_t vgId; uint32_t vgId;
char dbName[TSDB_DB_NAME_LEN + 1]; char dbName[TSDB_DB_NAME_LEN + 1];
int64_t createdTime; int64_t createdTime;
SVnodeGid vnodeGid[TSDB_VNODES_SUPPORT]; SVnodeGid vnodeGid[TSDB_VNODES_SUPPORT];
int32_t numOfVnodes; int32_t numOfVnodes;
int32_t lbDnodeId; int32_t lbDnodeId;
int32_t lbTime; int32_t lbTime;
int8_t status; int8_t status;
int8_t inUse; int8_t inUse;
int8_t reserved[13]; int8_t reserved[13];
int8_t updateEnd[1]; int8_t updateEnd[1];
int32_t refCount; int32_t refCount;
struct SVgObj *prev, *next; struct SVgObj *prev, *next;
struct SDbObj *pDb; struct SDbObj *pDb;
int32_t numOfTables; int32_t numOfTables;
void * idPool; int64_t totalStorage;
SChildTableObj ** tableList; int64_t compStorage;
int64_t pointsWritten;
void * idPool;
SChildTableObj **tableList;
} SVgObj; } SVgObj;
typedef struct SDbObj { typedef struct SDbObj {

View File

@ -35,12 +35,15 @@ void mgmtMonitorDnodeModule();
int32_t mgmtGetDnodesNum(); int32_t mgmtGetDnodesNum();
void * mgmtGetNextDnode(void *pNode, SDnodeObj **pDnode); void * mgmtGetNextDnode(void *pNode, SDnodeObj **pDnode);
void mgmtReleaseDnode(SDnodeObj *pDnode); void mgmtIncDnodeRef(SDnodeObj *pDnode);
void mgmtDecDnodeRef(SDnodeObj *pDnode);
void * mgmtGetDnode(int32_t dnodeId); void * mgmtGetDnode(int32_t dnodeId);
void * mgmtGetDnodeByIp(uint32_t ip); void * mgmtGetDnodeByIp(uint32_t ip);
void mgmtUpdateDnode(SDnodeObj *pDnode); void mgmtUpdateDnode(SDnodeObj *pDnode);
int32_t mgmtDropDnode(SDnodeObj *pDnode); int32_t mgmtDropDnode(SDnodeObj *pDnode);
extern int32_t tsAccessSquence;
#ifdef __cplusplus #ifdef __cplusplus
} }
#endif #endif

View File

@ -40,7 +40,7 @@ void * mgmtGetNextMnode(void *pNode, struct SMnodeObj **pMnode);
void mgmtReleaseMnode(struct SMnodeObj *pMnode); void mgmtReleaseMnode(struct SMnodeObj *pMnode);
char * mgmtGetMnodeRoleStr(); char * mgmtGetMnodeRoleStr();
void mgmtGetMnodeIpList(SRpcIpSet *ipSet, bool usePublicIp); void mgmtGetMnodeIpSet(SRpcIpSet *ipSet, bool usePublicIp);
void mgmtGetMnodeInfos(void *mnodes); void mgmtGetMnodeInfos(void *mnodes);
#ifdef __cplusplus #ifdef __cplusplus

View File

@ -24,7 +24,9 @@ extern "C" {
int32_t mgmtInitUsers(); int32_t mgmtInitUsers();
void mgmtCleanUpUsers(); void mgmtCleanUpUsers();
SUserObj *mgmtGetUser(char *name); SUserObj *mgmtGetUser(char *name);
void mgmtReleaseUser(SUserObj *pUser); void * mgmtGetNextUser(void *pNode, SUserObj **pUser);
void mgmtIncUserRef(SUserObj *pUser);
void mgmtDecUserRef(SUserObj *pUser);
SUserObj *mgmtGetUserFromConn(void *pConn, bool *usePublicIp); SUserObj *mgmtGetUserFromConn(void *pConn, bool *usePublicIp);
int32_t mgmtCreateUser(SAcctObj *pAcct, char *name, char *pass); int32_t mgmtCreateUser(SAcctObj *pAcct, char *name, char *pass);
void mgmtDropAllUsers(SAcctObj *pAcct); void mgmtDropAllUsers(SAcctObj *pAcct);

View File

@ -30,12 +30,13 @@ enum _TSDB_VG_STATUS {
int32_t mgmtInitVgroups(); int32_t mgmtInitVgroups();
void mgmtCleanUpVgroups(); void mgmtCleanUpVgroups();
SVgObj *mgmtGetVgroup(int32_t vgId); SVgObj *mgmtGetVgroup(int32_t vgId);
void mgmtReleaseVgroup(SVgObj *pVgroup); void mgmtIncVgroupRef(SVgObj *pVgroup);
void mgmtDecVgroupRef(SVgObj *pVgroup);
void mgmtDropAllVgroups(SDbObj *pDropDb); void mgmtDropAllVgroups(SDbObj *pDropDb);
void * mgmtGetNextVgroup(void *pNode, SVgObj **pVgroup); void * mgmtGetNextVgroup(void *pNode, SVgObj **pVgroup);
void mgmtUpdateVgroup(SVgObj *pVgroup); void mgmtUpdateVgroup(SVgObj *pVgroup);
void mgmtUpdateVgroupStatus(SVgObj *pVgroup, int32_t dnodeId, SVnodeLoad *pVload); void mgmtUpdateVgroupStatus(SVgObj *pVgroup, SDnodeObj *dnodeId, SVnodeLoad *pVload);
void mgmtCreateVgroup(SQueuedMsg *pMsg, SDbObj *pDb); void mgmtCreateVgroup(SQueuedMsg *pMsg, SDbObj *pDb);
void mgmtDropVgroup(SVgObj *pVgroup, void *ahandle); void mgmtDropVgroup(SVgObj *pVgroup, void *ahandle);
@ -46,6 +47,7 @@ void mgmtAddTableIntoVgroup(SVgObj *pVgroup, SChildTableObj *pTable);
void mgmtRemoveTableFromVgroup(SVgObj *pVgroup, SChildTableObj *pTable); void mgmtRemoveTableFromVgroup(SVgObj *pVgroup, SChildTableObj *pTable);
void mgmtSendCreateVnodeMsg(SVgObj *pVgroup, SRpcIpSet *ipSet, void *ahandle); void mgmtSendCreateVnodeMsg(SVgObj *pVgroup, SRpcIpSet *ipSet, void *ahandle);
void mgmtSendDropVnodeMsg(int32_t vgId, SRpcIpSet *ipSet, void *ahandle); void mgmtSendDropVnodeMsg(int32_t vgId, SRpcIpSet *ipSet, void *ahandle);
void mgmtSendCreateVgroupMsg(SVgObj *pVgroup, void *ahandle);
SRpcIpSet mgmtGetIpSetFromVgroup(SVgObj *pVgroup); SRpcIpSet mgmtGetIpSetFromVgroup(SVgObj *pVgroup);
SRpcIpSet mgmtGetIpSetFromIp(uint32_t ip); SRpcIpSet mgmtGetIpSetFromIp(uint32_t ip);

View File

@ -58,6 +58,7 @@ static int32_t mgmtActionAcctUpdate(SSdbOper *pOper) {
memcpy(pSaved, pAcct, tsAcctUpdateSize); memcpy(pSaved, pAcct, tsAcctUpdateSize);
free(pAcct); free(pAcct);
} }
mgmtDecAcctRef(pSaved);
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
} }
@ -106,11 +107,11 @@ int32_t mgmtInitAccts() {
tsAcctSdb = sdbOpenTable(&tableDesc); tsAcctSdb = sdbOpenTable(&tableDesc);
if (tsAcctSdb == NULL) { if (tsAcctSdb == NULL) {
mError("failed to init acct data"); mError("table:%s, failed to create hash", tableDesc.tableName);
return -1; return -1;
} }
mTrace("table:accounts table is created"); mTrace("table:%s, hash is created", tableDesc.tableName);
return acctInit(); return acctInit();
} }
@ -123,6 +124,10 @@ void *mgmtGetAcct(char *name) {
return sdbGetRow(tsAcctSdb, name); return sdbGetRow(tsAcctSdb, name);
} }
void *mgmtGetNextAcct(void *pNode, SAcctObj **pAcct) {
return sdbFetchRow(tsAcctSdb, pNode, (void **)pAcct);
}
void mgmtIncAcctRef(SAcctObj *pAcct) { void mgmtIncAcctRef(SAcctObj *pAcct) {
sdbIncRef(tsAcctSdb, pAcct); sdbIncRef(tsAcctSdb, pAcct);
} }

View File

@ -47,7 +47,7 @@ int32_t balanceAllocVnodes(SVgObj *pVgroup) {
vnodeUsage = usage; vnodeUsage = usage;
} }
} }
mgmtReleaseDnode(pDnode); mgmtDecDnodeRef(pDnode);
} }
if (pSelDnode == NULL) { if (pSelDnode == NULL) {
@ -56,8 +56,7 @@ int32_t balanceAllocVnodes(SVgObj *pVgroup) {
} }
pVgroup->vnodeGid[0].dnodeId = pSelDnode->dnodeId; pVgroup->vnodeGid[0].dnodeId = pSelDnode->dnodeId;
pVgroup->vnodeGid[0].privateIp = pSelDnode->privateIp; pVgroup->vnodeGid[0].pDnode = pSelDnode;
pVgroup->vnodeGid[0].publicIp = pSelDnode->publicIp;
mTrace("dnode:%d, alloc one vnode to vgroup, openVnodes:%d", pSelDnode->dnodeId, pSelDnode->openVnodes); mTrace("dnode:%d, alloc one vnode to vgroup, openVnodes:%d", pSelDnode->dnodeId, pSelDnode->openVnodes);
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;

View File

@ -28,8 +28,10 @@
#include "mgmtLog.h" #include "mgmtLog.h"
#include "mgmtDb.h" #include "mgmtDb.h"
#include "mgmtDServer.h" #include "mgmtDServer.h"
#include "mgmtMnode.h"
#include "mgmtProfile.h" #include "mgmtProfile.h"
#include "mgmtShell.h" #include "mgmtShell.h"
#include "mgmtSdb.h"
#include "mgmtTable.h" #include "mgmtTable.h"
#include "mgmtVgroup.h" #include "mgmtVgroup.h"
@ -99,6 +101,18 @@ static void mgmtProcessMsgFromDnode(SRpcMsg *rpcMsg) {
mgmtSendSimpleResp(rpcMsg->handle, TSDB_CODE_INVALID_MSG_LEN); mgmtSendSimpleResp(rpcMsg->handle, TSDB_CODE_INVALID_MSG_LEN);
return; return;
} }
if (!sdbIsMaster()) {
SRpcConnInfo connInfo;
rpcGetConnInfo(rpcMsg->handle, &connInfo);
bool usePublicIp = false;
SRpcIpSet ipSet = {0};
mgmtGetMnodeIpSet(&ipSet, usePublicIp);
mTrace("conn from dnode ip:%s redirect msg", taosIpStr(connInfo.clientIp));
rpcSendRedirectRsp(rpcMsg->handle, &ipSet);
return;
}
if (mgmtProcessDnodeMsgFp[rpcMsg->msgType]) { if (mgmtProcessDnodeMsgFp[rpcMsg->msgType]) {
SRpcMsg *pMsg = malloc(sizeof(SRpcMsg)); SRpcMsg *pMsg = malloc(sizeof(SRpcMsg));

View File

@ -63,6 +63,7 @@ static int32_t mgmtDbActionInsert(SSdbOper *pOper) {
if (pAcct != NULL) { if (pAcct != NULL) {
mgmtAddDbToAcct(pAcct, pDb); mgmtAddDbToAcct(pAcct, pDb);
mgmtDecAcctRef(pAcct);
} }
else { else {
mError("db:%s, acct:%s info not exist in sdb", pDb->name, pDb->cfg.acct); mError("db:%s, acct:%s info not exist in sdb", pDb->name, pDb->cfg.acct);
@ -80,6 +81,7 @@ static int32_t mgmtDbActionDelete(SSdbOper *pOper) {
mgmtDropAllChildTables(pDb); mgmtDropAllChildTables(pDb);
mgmtDropAllSuperTables(pDb); mgmtDropAllSuperTables(pDb);
mgmtDropAllVgroups(pDb); mgmtDropAllVgroups(pDb);
mgmtDecAcctRef(pAcct);
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
} }
@ -527,7 +529,7 @@ static int32_t mgmtGetDbMeta(STableMetaMsg *pMeta, SShowObj *pShow, void *pConn)
pShow->rowSize = pShow->offset[cols - 1] + pShow->bytes[cols - 1]; pShow->rowSize = pShow->offset[cols - 1] + pShow->bytes[cols - 1];
pShow->numOfRows = pUser->pAcct->acctInfo.numOfDbs; pShow->numOfRows = pUser->pAcct->acctInfo.numOfDbs;
mgmtReleaseUser(pUser); mgmtDecUserRef(pUser);
return 0; return 0;
} }
@ -647,7 +649,7 @@ static int32_t mgmtRetrieveDbs(SShowObj *pShow, char *data, int32_t rows, void *
} }
pShow->numOfReads += numOfRows; pShow->numOfReads += numOfRows;
mgmtReleaseUser(pUser); mgmtDecUserRef(pUser);
return numOfRows; return numOfRows;
} }

View File

@ -23,6 +23,7 @@
#include "tutil.h" #include "tutil.h"
#include "tsocket.h" #include "tsocket.h"
#include "tbalance.h" #include "tbalance.h"
#include "tsync.h"
#include "dnode.h" #include "dnode.h"
#include "mgmtDef.h" #include "mgmtDef.h"
#include "mgmtLog.h" #include "mgmtLog.h"
@ -37,6 +38,8 @@
void *tsDnodeSdb = NULL; void *tsDnodeSdb = NULL;
int32_t tsDnodeUpdateSize = 0; int32_t tsDnodeUpdateSize = 0;
int32_t tsAccessSquence = 0;
extern void * tsMnodeSdb;
extern void * tsVgroupSdb; extern void * tsVgroupSdb;
static int32_t mgmtCreateDnode(uint32_t ip); static int32_t mgmtCreateDnode(uint32_t ip);
@ -99,7 +102,13 @@ static int32_t mgmtDnodeActionDelete(SSdbOper *pOper) {
} }
} }
mgmtDropMnode(pDnode->dnodeId); SMnodeObj *pMnode = mgmtGetMnode(pDnode->dnodeId);
if (pMnode != NULL) {
SSdbOper oper = {.type = SDB_OPER_LOCAL, .table = tsMnodeSdb, .pObj = pMnode};
sdbDeleteRow(&oper);
mgmtReleaseMnode(pMnode);
}
balanceNotify(); balanceNotify();
mTrace("dnode:%d, all vgroups:%d is dropped from sdb", pDnode->dnodeId, numOfVgroups); mTrace("dnode:%d, all vgroups:%d is dropped from sdb", pDnode->dnodeId, numOfVgroups);
@ -139,7 +148,7 @@ static int32_t mgmtDnodeActionRestored() {
mgmtCreateDnode(ip); mgmtCreateDnode(ip);
SDnodeObj *pDnode = mgmtGetDnodeByIp(ip); SDnodeObj *pDnode = mgmtGetDnodeByIp(ip);
mgmtAddMnode(pDnode->dnodeId); mgmtAddMnode(pDnode->dnodeId);
mgmtReleaseDnode(pDnode); mgmtDecDnodeRef(pDnode);
} }
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
@ -215,13 +224,17 @@ void *mgmtGetDnodeByIp(uint32_t ip) {
if (ip == pDnode->privateIp) { if (ip == pDnode->privateIp) {
return pDnode; return pDnode;
} }
mgmtReleaseDnode(pDnode); mgmtDecDnodeRef(pDnode);
} }
return NULL; return NULL;
} }
void mgmtReleaseDnode(SDnodeObj *pDnode) { void mgmtIncDnodeRef(SDnodeObj *pDnode) {
sdbIncRef(tsDnodeSdb, pDnode);
}
void mgmtDecDnodeRef(SDnodeObj *pDnode) {
sdbDecRef(tsDnodeSdb, pDnode); sdbDecRef(tsDnodeSdb, pDnode);
} }
@ -318,27 +331,27 @@ void mgmtProcessDnodeStatusMsg(SRpcMsg *rpcMsg) {
pDnode->alternativeRole = pStatus->alternativeRole; pDnode->alternativeRole = pStatus->alternativeRole;
pDnode->totalVnodes = pStatus->numOfTotalVnodes; pDnode->totalVnodes = pStatus->numOfTotalVnodes;
pDnode->moduleStatus = pStatus->moduleStatus; pDnode->moduleStatus = pStatus->moduleStatus;
pDnode->lastAccess = tsAccessSquence;
if (pStatus->dnodeId == 0) { if (pStatus->dnodeId == 0) {
mTrace("dnode:%d, first access, privateIp:%s, name:%s", pDnode->dnodeId, taosIpStr(pDnode->privateIp), pDnode->dnodeName); mTrace("dnode:%d, first access, privateIp:%s, name:%s", pDnode->dnodeId, taosIpStr(pDnode->privateIp), pDnode->dnodeName);
} else {
//mTrace("dnode:%d, status received, access times %d", pDnode->dnodeId, pDnode->lastAccess);
} }
int32_t openVnodes = htons(pStatus->openVnodes); int32_t openVnodes = htons(pStatus->openVnodes);
for (int32_t j = 0; j < openVnodes; ++j) { for (int32_t j = 0; j < openVnodes; ++j) {
SVnodeLoad *pVload = &pStatus->load[j]; SVnodeLoad *pVload = &pStatus->load[j];
pDnode->vload[j].vgId = htonl(pVload->vgId); pVload->vgId = htonl(pVload->vgId);
pDnode->vload[j].totalStorage = htobe64(pVload->totalStorage);
pDnode->vload[j].compStorage = htobe64(pVload->compStorage); SVgObj *pVgroup = mgmtGetVgroup(pVload->vgId);
pDnode->vload[j].pointsWritten = htobe64(pVload->pointsWritten);
SVgObj *pVgroup = mgmtGetVgroup(pDnode->vload[j].vgId);
if (pVgroup == NULL) { if (pVgroup == NULL) {
SRpcIpSet ipSet = mgmtGetIpSetFromIp(pDnode->privateIp); SRpcIpSet ipSet = mgmtGetIpSetFromIp(pDnode->privateIp);
mPrint("dnode:%d, vgroup:%d not exist in mnode, drop it", pDnode->dnodeId, pDnode->vload[j].vgId); mPrint("dnode:%d, vgroup:%d not exist in mnode, drop it", pDnode->dnodeId, pVload->vgId);
mgmtSendDropVnodeMsg(pDnode->vload[j].vgId, &ipSet, NULL); mgmtSendDropVnodeMsg(pVload->vgId, &ipSet, NULL);
} else { } else {
mgmtUpdateVgroupStatus(pVgroup, pDnode->dnodeId, pVload); mgmtUpdateVgroupStatus(pVgroup, pDnode, pVload);
mgmtReleaseVgroup(pVgroup); mgmtDecVgroupRef(pVgroup);
} }
} }
@ -348,7 +361,7 @@ void mgmtProcessDnodeStatusMsg(SRpcMsg *rpcMsg) {
balanceNotify(); balanceNotify();
} }
mgmtReleaseDnode(pDnode); mgmtDecDnodeRef(pDnode);
int32_t contLen = sizeof(SDMStatusRsp) + TSDB_MAX_VNODES * sizeof(SDMVgroupAccess); int32_t contLen = sizeof(SDMStatusRsp) + TSDB_MAX_VNODES * sizeof(SDMVgroupAccess);
SDMStatusRsp *pRsp = rpcMallocCont(contLen); SDMStatusRsp *pRsp = rpcMallocCont(contLen);
@ -444,7 +457,7 @@ static int32_t mgmtDropDnodeByIp(uint32_t ip) {
return TSDB_CODE_NO_REMOVE_MASTER; return TSDB_CODE_NO_REMOVE_MASTER;
} }
#ifndef _VPEER #ifndef _SYNC
return mgmtDropDnode(pDnode); return mgmtDropDnode(pDnode);
#else #else
return balanceDropDnode(pDnode); return balanceDropDnode(pDnode);
@ -495,7 +508,10 @@ static int32_t mgmtGetDnodeMeta(STableMetaMsg *pMeta, SShowObj *pShow, void *pCo
SUserObj *pUser = mgmtGetUserFromConn(pConn, NULL); SUserObj *pUser = mgmtGetUserFromConn(pConn, NULL);
if (pUser == NULL) return 0; if (pUser == NULL) return 0;
if (strcmp(pUser->pAcct->user, "root") != 0) return TSDB_CODE_NO_RIGHTS; if (strcmp(pUser->pAcct->user, "root") != 0) {
mgmtDecUserRef(pUser);
return TSDB_CODE_NO_RIGHTS;
}
int32_t cols = 0; int32_t cols = 0;
SSchema *pSchema = pMeta->schema; SSchema *pSchema = pMeta->schema;
@ -554,7 +570,7 @@ static int32_t mgmtGetDnodeMeta(STableMetaMsg *pMeta, SShowObj *pShow, void *pCo
pShow->rowSize = pShow->offset[cols - 1] + pShow->bytes[cols - 1]; pShow->rowSize = pShow->offset[cols - 1] + pShow->bytes[cols - 1];
pShow->pNode = NULL; pShow->pNode = NULL;
mgmtReleaseUser(pUser); mgmtDecUserRef(pUser);
return 0; return 0;
} }
@ -604,7 +620,7 @@ static int32_t mgmtRetrieveDnodes(SShowObj *pShow, char *data, int32_t rows, voi
numOfRows++; numOfRows++;
mgmtReleaseDnode(pDnode); mgmtDecDnodeRef(pDnode);
} }
pShow->numOfReads += numOfRows; pShow->numOfReads += numOfRows;
@ -622,7 +638,10 @@ static int32_t mgmtGetModuleMeta(STableMetaMsg *pMeta, SShowObj *pShow, void *pC
SUserObj *pUser = mgmtGetUserFromConn(pConn, NULL); SUserObj *pUser = mgmtGetUserFromConn(pConn, NULL);
if (pUser == NULL) return 0; if (pUser == NULL) return 0;
if (strcmp(pUser->user, "root") != 0) return TSDB_CODE_NO_RIGHTS; if (strcmp(pUser->user, "root") != 0) {
mgmtDecUserRef(pUser);
return TSDB_CODE_NO_RIGHTS;
}
SSchema *pSchema = pMeta->schema; SSchema *pSchema = pMeta->schema;
@ -661,7 +680,7 @@ static int32_t mgmtGetModuleMeta(STableMetaMsg *pMeta, SShowObj *pShow, void *pC
pShow->numOfRows = mgmtGetDnodesNum() * TSDB_MOD_MAX; pShow->numOfRows = mgmtGetDnodesNum() * TSDB_MOD_MAX;
pShow->rowSize = pShow->offset[cols - 1] + pShow->bytes[cols - 1]; pShow->rowSize = pShow->offset[cols - 1] + pShow->bytes[cols - 1];
pShow->pNode = NULL; pShow->pNode = NULL;
mgmtReleaseUser(pUser); mgmtDecUserRef(pUser);
return 0; return 0;
} }
@ -712,7 +731,7 @@ int32_t mgmtRetrieveModules(SShowObj *pShow, char *data, int32_t rows, void *pCo
numOfRows++; numOfRows++;
} }
mgmtReleaseDnode(pDnode); mgmtDecDnodeRef(pDnode);
} }
pShow->numOfReads += numOfRows; pShow->numOfReads += numOfRows;
@ -731,7 +750,10 @@ static int32_t mgmtGetConfigMeta(STableMetaMsg *pMeta, SShowObj *pShow, void *pC
SUserObj *pUser = mgmtGetUserFromConn(pConn, NULL); SUserObj *pUser = mgmtGetUserFromConn(pConn, NULL);
if (pUser == NULL) return 0; if (pUser == NULL) return 0;
if (strcmp(pUser->user, "root") != 0) return TSDB_CODE_NO_RIGHTS; if (strcmp(pUser->user, "root") != 0) {
mgmtDecUserRef(pUser);
return TSDB_CODE_NO_RIGHTS;
}
SSchema *pSchema = pMeta->schema; SSchema *pSchema = pMeta->schema;
@ -762,7 +784,7 @@ static int32_t mgmtGetConfigMeta(STableMetaMsg *pMeta, SShowObj *pShow, void *pC
pShow->rowSize = pShow->offset[cols - 1] + pShow->bytes[cols - 1]; pShow->rowSize = pShow->offset[cols - 1] + pShow->bytes[cols - 1];
pShow->pNode = NULL; pShow->pNode = NULL;
mgmtReleaseUser(pUser); mgmtDecUserRef(pUser);
return 0; return 0;
} }
@ -814,7 +836,11 @@ static int32_t mgmtGetVnodeMeta(STableMetaMsg *pMeta, SShowObj *pShow, void *pCo
int32_t cols = 0; int32_t cols = 0;
SUserObj *pUser = mgmtGetUserFromConn(pConn, NULL); SUserObj *pUser = mgmtGetUserFromConn(pConn, NULL);
if (pUser == NULL) return 0; if (pUser == NULL) return 0;
if (strcmp(pUser->user, "root") != 0) return TSDB_CODE_NO_RIGHTS;
if (strcmp(pUser->user, "root") != 0) {
mgmtDecUserRef(pUser);
return TSDB_CODE_NO_RIGHTS;
}
SSchema *pSchema = pMeta->schema; SSchema *pSchema = pMeta->schema;
@ -840,35 +866,18 @@ static int32_t mgmtGetVnodeMeta(STableMetaMsg *pMeta, SShowObj *pShow, void *pCo
if (pShow->payloadLen > 0 ) { if (pShow->payloadLen > 0 ) {
uint32_t ip = ip2uint(pShow->payload); uint32_t ip = ip2uint(pShow->payload);
pDnode = mgmtGetDnodeByIp(ip); pDnode = mgmtGetDnodeByIp(ip);
if (NULL == pDnode) {
return TSDB_CODE_NODE_OFFLINE;
}
SVnodeLoad* pVnode;
pShow->numOfRows = 0;
for (int32_t i = 0 ; i < TSDB_MAX_VNODES; i++) {
pVnode = &pDnode->vload[i];
if (0 != pVnode->vgId) {
pShow->numOfRows++;
}
}
pShow->pNode = pDnode;
} else { } else {
while (true) { mgmtGetNextDnode(NULL, (SDnodeObj **)&pDnode);
pShow->pNode = mgmtGetNextDnode(pShow->pNode, (SDnodeObj **)&pDnode); }
if (pDnode == NULL) break;
pShow->numOfRows += pDnode->openVnodes;
if (0 == pShow->numOfRows) return TSDB_CODE_NODE_OFFLINE; if (pDnode != NULL) {
} pShow->numOfRows += pDnode->openVnodes;
mgmtDecDnodeRef(pDnode);
pShow->pNode = NULL; }
}
pShow->rowSize = pShow->offset[cols - 1] + pShow->bytes[cols - 1]; pShow->rowSize = pShow->offset[cols - 1] + pShow->bytes[cols - 1];
mgmtReleaseDnode(pDnode); pShow->pNode = pDnode;
mgmtReleaseUser(pUser); mgmtDecUserRef(pUser);
return 0; return 0;
} }
@ -881,35 +890,35 @@ static int32_t mgmtRetrieveVnodes(SShowObj *pShow, char *data, int32_t rows, voi
if (0 == rows) return 0; if (0 == rows) return 0;
if (pShow->payloadLen) { pDnode = (SDnodeObj *)(pShow->pNode);
// output the vnodes info of the designated dnode. And output all vnodes of this dnode, instead of rows (max 100) if (pDnode != NULL) {
pDnode = (SDnodeObj *)(pShow->pNode); void *pNode = NULL;
if (pDnode != NULL) { SVgObj *pVgroup;
SVnodeLoad* pVnode; while (1) {
for (int32_t i = 0 ; i < TSDB_MAX_VNODES; i++) { pNode = mgmtGetNextVgroup(pNode, &pVgroup);
pVnode = &pDnode->vload[i]; if (pVgroup == NULL) break;
if (0 == pVnode->vgId) {
continue; for (int32_t i = 0; i < pVgroup->numOfVnodes; ++i) {
SVnodeGid *pVgid = &pVgroup->vnodeGid[i];
if (pVgid->pDnode == pDnode) {
cols = 0;
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
*(uint32_t *)pWrite = pVgroup->vgId;
cols++;
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
strcpy(pWrite, mgmtGetMnodeRoleStr(pVgid->role));
cols++;
} }
cols = 0;
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
*(uint32_t *)pWrite = pVnode->vgId;
cols++;
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
strcpy(pWrite, pVnode->status ? "ready" : "offline");
cols++;
numOfRows++;
} }
mgmtDecVgroupRef(pVgroup);
} }
} else { } else {
// TODO: output all vnodes of all dnodes
numOfRows = 0; numOfRows = 0;
} }
pShow->numOfReads += numOfRows; pShow->numOfReads += numOfRows;
return numOfRows; return numOfRows;
} }

View File

@ -149,12 +149,12 @@ void mgmtCleanUpSystem() {
mgmtCleanUpShell(); mgmtCleanUpShell();
mgmtCleanupDClient(); mgmtCleanupDClient();
mgmtCleanupDServer(); mgmtCleanupDServer();
mgmtCleanUpAccts();
mgmtCleanUpTables(); mgmtCleanUpTables();
mgmtCleanUpVgroups(); mgmtCleanUpVgroups();
mgmtCleanUpDbs(); mgmtCleanUpDbs();
mgmtCleanupDnodes(); mgmtCleanupDnodes();
mgmtCleanUpUsers(); mgmtCleanUpUsers();
mgmtCleanUpAccts();
sdbCleanUp(); sdbCleanUp();
taosTmrCleanUp(tsMgmtTmr); taosTmrCleanUp(tsMgmtTmr);
tsMgmtIsRunning = false; tsMgmtIsRunning = false;

View File

@ -30,7 +30,7 @@
#include "mgmtShell.h" #include "mgmtShell.h"
#include "mgmtUser.h" #include "mgmtUser.h"
static void * tsMnodeSdb = NULL; void * tsMnodeSdb = NULL;
static int32_t tsMnodeUpdateSize = 0; static int32_t tsMnodeUpdateSize = 0;
static int32_t mgmtGetMnodeMeta(STableMetaMsg *pMeta, SShowObj *pShow, void *pConn); static int32_t mgmtGetMnodeMeta(STableMetaMsg *pMeta, SShowObj *pShow, void *pConn);
static int32_t mgmtRetrieveMnodes(SShowObj *pShow, char *data, int32_t rows, void *pConn); static int32_t mgmtRetrieveMnodes(SShowObj *pShow, char *data, int32_t rows, void *pConn);
@ -47,7 +47,7 @@ static int32_t mgmtMnodeActionInsert(SSdbOper *pOper) {
pMnode->pDnode = pDnode; pMnode->pDnode = pDnode;
pDnode->isMgmt = true; pDnode->isMgmt = true;
mgmtReleaseDnode(pDnode); mgmtDecDnodeRef(pDnode);
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
} }
@ -58,7 +58,7 @@ static int32_t mgmtMnodeActionDelete(SSdbOper *pOper) {
SDnodeObj *pDnode = mgmtGetDnode(pMnode->mnodeId); SDnodeObj *pDnode = mgmtGetDnode(pMnode->mnodeId);
if (pDnode == NULL) return TSDB_CODE_DNODE_NOT_EXIST; if (pDnode == NULL) return TSDB_CODE_DNODE_NOT_EXIST;
pDnode->isMgmt = false; pDnode->isMgmt = false;
mgmtReleaseDnode(pDnode); mgmtDecDnodeRef(pDnode);
mTrace("mnode:%d, is dropped from sdb", pMnode->mnodeId); mTrace("mnode:%d, is dropped from sdb", pMnode->mnodeId);
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
@ -171,7 +171,7 @@ char *mgmtGetMnodeRoleStr(int32_t role) {
} }
} }
void mgmtGetMnodeIpList(SRpcIpSet *ipSet, bool usePublicIp) { void mgmtGetMnodeIpSet(SRpcIpSet *ipSet, bool usePublicIp) {
void *pNode = NULL; void *pNode = NULL;
while (1) { while (1) {
SMnodeObj *pMnode = NULL; SMnodeObj *pMnode = NULL;
@ -268,7 +268,10 @@ static int32_t mgmtGetMnodeMeta(STableMetaMsg *pMeta, SShowObj *pShow, void *pCo
SUserObj *pUser = mgmtGetUserFromConn(pConn, NULL); SUserObj *pUser = mgmtGetUserFromConn(pConn, NULL);
if (pUser == NULL) return 0; if (pUser == NULL) return 0;
if (strcmp(pUser->pAcct->user, "root") != 0) return TSDB_CODE_NO_RIGHTS; if (strcmp(pUser->pAcct->user, "root") != 0) {
mgmtDecUserRef(pUser);
return TSDB_CODE_NO_RIGHTS;
}
int32_t cols = 0; int32_t cols = 0;
SSchema *pSchema = pMeta->schema; SSchema *pSchema = pMeta->schema;
@ -314,7 +317,7 @@ static int32_t mgmtGetMnodeMeta(STableMetaMsg *pMeta, SShowObj *pShow, void *pCo
pShow->numOfRows = mgmtGetMnodesNum(); pShow->numOfRows = mgmtGetMnodesNum();
pShow->rowSize = pShow->offset[cols - 1] + pShow->bytes[cols - 1]; pShow->rowSize = pShow->offset[cols - 1] + pShow->bytes[cols - 1];
pShow->pNode = NULL; pShow->pNode = NULL;
mgmtReleaseUser(pUser); mgmtDecUserRef(pUser);
return 0; return 0;
} }

View File

@ -704,6 +704,7 @@ void mgmtProcessKillQueryMsg(SQueuedMsg *pMsg) {
rpcRsp.code = code; rpcRsp.code = code;
rpcSendResponse(&rpcRsp); rpcSendResponse(&rpcRsp);
mgmtDecUserRef(pUser);
} }
void mgmtProcessKillStreamMsg(SQueuedMsg *pMsg) { void mgmtProcessKillStreamMsg(SQueuedMsg *pMsg) {
@ -727,6 +728,7 @@ void mgmtProcessKillStreamMsg(SQueuedMsg *pMsg) {
rpcRsp.code = code; rpcRsp.code = code;
rpcSendResponse(&rpcRsp); rpcSendResponse(&rpcRsp);
mgmtDecUserRef(pUser);
} }
void mgmtProcessKillConnectionMsg(SQueuedMsg *pMsg) { void mgmtProcessKillConnectionMsg(SQueuedMsg *pMsg) {
@ -750,6 +752,7 @@ void mgmtProcessKillConnectionMsg(SQueuedMsg *pMsg) {
rpcRsp.code = code; rpcRsp.code = code;
rpcSendResponse(&rpcRsp); rpcSendResponse(&rpcRsp);
mgmtDecUserRef(pUser);
} }
int32_t mgmtInitProfile() { int32_t mgmtInitProfile() {
@ -790,12 +793,12 @@ void *mgmtMallocQueuedMsg(SRpcMsg *rpcMsg) {
void mgmtFreeQueuedMsg(SQueuedMsg *pMsg) { void mgmtFreeQueuedMsg(SQueuedMsg *pMsg) {
if (pMsg != NULL) { if (pMsg != NULL) {
rpcFreeCont(pMsg->pCont); rpcFreeCont(pMsg->pCont);
if (pMsg->pUser) mgmtReleaseUser(pMsg->pUser); if (pMsg->pUser) mgmtDecUserRef(pMsg->pUser);
if (pMsg->pDb) mgmtDecDbRef(pMsg->pDb); if (pMsg->pDb) mgmtDecDbRef(pMsg->pDb);
if (pMsg->pVgroup) mgmtReleaseVgroup(pMsg->pVgroup); if (pMsg->pVgroup) mgmtDecVgroupRef(pMsg->pVgroup);
if (pMsg->pTable) mgmtDecTableRef(pMsg->pTable); if (pMsg->pTable) mgmtDecTableRef(pMsg->pTable);
if (pMsg->pAcct) mgmtDecAcctRef(pMsg->pAcct); if (pMsg->pAcct) mgmtDecAcctRef(pMsg->pAcct);
if (pMsg->pDnode) mgmtReleaseDnode(pMsg->pDnode); if (pMsg->pDnode) mgmtDecDnodeRef(pMsg->pDnode);
free(pMsg); free(pMsg);
} }
} }

View File

@ -141,13 +141,19 @@ void mgmtDealyedAddToShellQueue(SQueuedMsg *queuedMsg) {
static void mgmtProcessMsgFromShell(SRpcMsg *rpcMsg) { static void mgmtProcessMsgFromShell(SRpcMsg *rpcMsg) {
if (rpcMsg == NULL || rpcMsg->pCont == NULL) { if (rpcMsg == NULL || rpcMsg->pCont == NULL) {
mgmtSendSimpleResp(rpcMsg->handle, TSDB_CODE_INVALID_MSG_LEN);
return; return;
} }
if (!sdbIsMaster()) { if (!sdbIsMaster()) {
// rpcSendRedirectRsp(rpcMsg->handle, mgmtGetMnodeIpListForRedirect()); SRpcConnInfo connInfo;
mgmtSendSimpleResp(rpcMsg->handle, TSDB_CODE_NO_MASTER); rpcGetConnInfo(rpcMsg->handle, &connInfo);
rpcFreeCont(rpcMsg->pCont); bool usePublicIp = (connInfo.serverIp == tsPublicIpInt);
SRpcIpSet ipSet = {0};
mgmtGetMnodeIpSet(&ipSet, usePublicIp);
mTrace("conn from ip:%s user:%s redirect msg", taosIpStr(connInfo.clientIp), connInfo.user);
rpcSendRedirectRsp(rpcMsg->handle, &ipSet);
return; return;
} }
@ -332,7 +338,7 @@ static void mgmtProcessHeartBeatMsg(SQueuedMsg *pMsg) {
return; return;
} }
mgmtGetMnodeIpList(&pHBRsp->ipList, pMsg->usePublicIp); mgmtGetMnodeIpSet(&pHBRsp->ipList, pMsg->usePublicIp);
/* /*
* TODO * TODO
@ -357,14 +363,18 @@ static int mgmtShellRetriveAuth(char *user, char *spi, char *encrypt, char *secr
*encrypt = 0; *encrypt = 0;
*ckey = 0; *ckey = 0;
if (!sdbIsMaster()) {
*secret = 0;
return TSDB_CODE_SUCCESS;
}
SUserObj *pUser = mgmtGetUser(user); SUserObj *pUser = mgmtGetUser(user);
if (pUser == NULL) { if (pUser == NULL) {
*secret = 0; *secret = 0;
mgmtReleaseUser(pUser);
return TSDB_CODE_INVALID_USER; return TSDB_CODE_INVALID_USER;
} else { } else {
memcpy(secret, pUser->pass, TSDB_KEY_LEN); memcpy(secret, pUser->pass, TSDB_KEY_LEN);
mgmtReleaseUser(pUser); mgmtDecUserRef(pUser);
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
} }
} }
@ -414,7 +424,7 @@ static void mgmtProcessConnectMsg(SQueuedMsg *pMsg) {
pConnectRsp->writeAuth = pUser->writeAuth; pConnectRsp->writeAuth = pUser->writeAuth;
pConnectRsp->superAuth = pUser->superAuth; pConnectRsp->superAuth = pUser->superAuth;
mgmtGetMnodeIpList(&pConnectRsp->ipList, pMsg->usePublicIp); mgmtGetMnodeIpSet(&pConnectRsp->ipList, pMsg->usePublicIp);
connect_over: connect_over:
rpcRsp.code = code; rpcRsp.code = code;

View File

@ -97,7 +97,7 @@ static int32_t mgmtChildTableActionInsert(SSdbOper *pOper) {
mError("ctable:%s, not in vgroup:%d", pTable->info.tableId, pTable->vgId); mError("ctable:%s, not in vgroup:%d", pTable->info.tableId, pTable->vgId);
return TSDB_CODE_INVALID_VGROUP_ID; return TSDB_CODE_INVALID_VGROUP_ID;
} }
mgmtReleaseVgroup(pVgroup); mgmtDecVgroupRef(pVgroup);
SDbObj *pDb = mgmtGetDb(pVgroup->dbName); SDbObj *pDb = mgmtGetDb(pVgroup->dbName);
if (pDb == NULL) { if (pDb == NULL) {
@ -108,7 +108,7 @@ static int32_t mgmtChildTableActionInsert(SSdbOper *pOper) {
SAcctObj *pAcct = mgmtGetAcct(pDb->cfg.acct); SAcctObj *pAcct = mgmtGetAcct(pDb->cfg.acct);
if (pAcct == NULL) { if (pAcct == NULL) {
mError("ctable:%s, account:%s not exists", pTable->info.tableId, pDb->cfg.acct); mError("ctable:%s, acct:%s not exists", pTable->info.tableId, pDb->cfg.acct);
return TSDB_CODE_INVALID_ACCT; return TSDB_CODE_INVALID_ACCT;
} }
mgmtDecAcctRef(pAcct); mgmtDecAcctRef(pAcct);
@ -139,7 +139,7 @@ static int32_t mgmtChildTableActionDelete(SSdbOper *pOper) {
if (pVgroup == NULL) { if (pVgroup == NULL) {
return TSDB_CODE_INVALID_VGROUP_ID; return TSDB_CODE_INVALID_VGROUP_ID;
} }
mgmtReleaseVgroup(pVgroup); mgmtDecVgroupRef(pVgroup);
SDbObj *pDb = mgmtGetDb(pVgroup->dbName); SDbObj *pDb = mgmtGetDb(pVgroup->dbName);
if (pDb == NULL) { if (pDb == NULL) {
@ -150,7 +150,7 @@ static int32_t mgmtChildTableActionDelete(SSdbOper *pOper) {
SAcctObj *pAcct = mgmtGetAcct(pDb->cfg.acct); SAcctObj *pAcct = mgmtGetAcct(pDb->cfg.acct);
if (pAcct == NULL) { if (pAcct == NULL) {
mError("ctable:%s, account:%s not exists", pTable->info.tableId, pDb->cfg.acct); mError("ctable:%s, acct:%s not exists", pTable->info.tableId, pDb->cfg.acct);
return TSDB_CODE_INVALID_ACCT; return TSDB_CODE_INVALID_ACCT;
} }
mgmtDecAcctRef(pAcct); mgmtDecAcctRef(pAcct);
@ -275,7 +275,7 @@ static int32_t mgmtChildTableActionRestored() {
pNode = pLastNode; pNode = pLastNode;
continue; continue;
} }
mgmtReleaseVgroup(pVgroup); mgmtDecVgroupRef(pVgroup);
if (strcmp(pVgroup->dbName, pDb->name) != 0) { if (strcmp(pVgroup->dbName, pDb->name) != 0) {
mError("ctable:%s, db:%s not match with vgroup:%d db:%s sid:%d, discard it", mError("ctable:%s, db:%s not match with vgroup:%d db:%s sid:%d, discard it",
@ -1194,17 +1194,15 @@ static void mgmtProcessSuperTableVgroupMsg(SQueuedMsg *pMsg) {
pRsp->vgroups[vg].vgId = htonl(vgId); pRsp->vgroups[vg].vgId = htonl(vgId);
for (int32_t vn = 0; vn < pVgroup->numOfVnodes; ++vn) { for (int32_t vn = 0; vn < pVgroup->numOfVnodes; ++vn) {
SDnodeObj *pDnode = mgmtGetDnode(pVgroup->vnodeGid[vn].dnodeId); SDnodeObj *pDnode = pVgroup->vnodeGid[vn].pDnode;
if (pDnode == NULL) break; if (pDnode == NULL) break;
pRsp->vgroups[vg].ipAddr[vn].ip = htonl(pDnode->privateIp); pRsp->vgroups[vg].ipAddr[vn].ip = htonl(pDnode->privateIp);
pRsp->vgroups[vg].ipAddr[vn].port = htons(tsDnodeShellPort); pRsp->vgroups[vg].ipAddr[vn].port = htons(tsDnodeShellPort);
pRsp->vgroups[vg].numOfIps++; pRsp->vgroups[vg].numOfIps++;
mgmtReleaseDnode(pDnode);
} }
mgmtReleaseVgroup(pVgroup); mgmtDecVgroupRef(pVgroup);
} }
pRsp->numOfVgroups = htonl(vg); pRsp->numOfVgroups = htonl(vg);
@ -1613,7 +1611,7 @@ static int32_t mgmtDoGetChildTableMeta(SQueuedMsg *pMsg, STableMetaMsg *pMeta) {
pMeta->vgroup.ipAddr[i].port = htonl(tsDnodeShellPort); pMeta->vgroup.ipAddr[i].port = htonl(tsDnodeShellPort);
} }
pMeta->vgroup.numOfIps++; pMeta->vgroup.numOfIps++;
mgmtReleaseDnode(pDnode); mgmtDecDnodeRef(pDnode);
} }
pMeta->vgroup.vgId = htonl(pVgroup->vgId); pMeta->vgroup.vgId = htonl(pVgroup->vgId);
@ -1742,7 +1740,7 @@ static SChildTableObj* mgmtGetTableByPos(uint32_t dnodeId, int32_t vnode, int32_
SChildTableObj *pTable = pVgroup->tableList[sid]; SChildTableObj *pTable = pVgroup->tableList[sid];
mgmtIncTableRef((STableObj *)pTable); mgmtIncTableRef((STableObj *)pTable);
mgmtReleaseVgroup(pVgroup); mgmtDecVgroupRef(pVgroup);
return pTable; return pTable;
} }

View File

@ -19,23 +19,23 @@
#include "ttime.h" #include "ttime.h"
#include "tutil.h" #include "tutil.h"
#include "tglobal.h" #include "tglobal.h"
#include "tgrant.h"
#include "dnode.h" #include "dnode.h"
#include "mgmtDef.h" #include "mgmtDef.h"
#include "mgmtLog.h" #include "mgmtLog.h"
#include "mgmtAcct.h" #include "mgmtAcct.h"
#include "tgrant.h"
#include "mgmtMnode.h" #include "mgmtMnode.h"
#include "mgmtSdb.h" #include "mgmtSdb.h"
#include "mgmtShell.h" #include "mgmtShell.h"
#include "mgmtUser.h" #include "mgmtUser.h"
void * tsUserSdb = NULL; static void * tsUserSdb = NULL;
static int32_t tsUserUpdateSize = 0; static int32_t tsUserUpdateSize = 0;
static int32_t mgmtGetUserMeta(STableMetaMsg *pMeta, SShowObj *pShow, void *pConn); static int32_t mgmtGetUserMeta(STableMetaMsg *pMeta, SShowObj *pShow, void *pConn);
static int32_t mgmtRetrieveUsers(SShowObj *pShow, char *data, int32_t rows, void *pConn); static int32_t mgmtRetrieveUsers(SShowObj *pShow, char *data, int32_t rows, void *pConn);
static void mgmtProcessCreateUserMsg(SQueuedMsg *pMsg); static void mgmtProcessCreateUserMsg(SQueuedMsg *pMsg);
static void mgmtProcessAlterUserMsg(SQueuedMsg *pMsg); static void mgmtProcessAlterUserMsg(SQueuedMsg *pMsg);
static void mgmtProcessDropUserMsg(SQueuedMsg *pMsg); static void mgmtProcessDropUserMsg(SQueuedMsg *pMsg);
static int32_t mgmtUserActionDestroy(SSdbOper *pOper) { static int32_t mgmtUserActionDestroy(SSdbOper *pOper) {
tfree(pOper->pObj); tfree(pOper->pObj);
@ -48,8 +48,8 @@ static int32_t mgmtUserActionInsert(SSdbOper *pOper) {
if (pAcct != NULL) { if (pAcct != NULL) {
mgmtAddUserToAcct(pAcct, pUser); mgmtAddUserToAcct(pAcct, pUser);
} mgmtDecAcctRef(pAcct);
else { } else {
mError("user:%s, acct:%s info not exist in sdb", pUser->user, pUser->acct); mError("user:%s, acct:%s info not exist in sdb", pUser->user, pUser->acct);
return TSDB_CODE_INVALID_ACCT; return TSDB_CODE_INVALID_ACCT;
} }
@ -63,6 +63,7 @@ static int32_t mgmtUserActionDelete(SSdbOper *pOper) {
if (pAcct != NULL) { if (pAcct != NULL) {
mgmtDropUserFromAcct(pAcct, pUser); mgmtDropUserFromAcct(pAcct, pUser);
mgmtDecAcctRef(pAcct);
} }
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
@ -72,9 +73,10 @@ static int32_t mgmtUserActionUpdate(SSdbOper *pOper) {
SUserObj *pUser = pOper->pObj; SUserObj *pUser = pOper->pObj;
SUserObj *pSaved = mgmtGetUser(pUser->user); SUserObj *pSaved = mgmtGetUser(pUser->user);
if (pUser != pSaved) { if (pUser != pSaved) {
memcpy(pSaved, pUser, pOper->rowSize); memcpy(pSaved, pUser, tsUserUpdateSize);
free(pUser); free(pUser);
} }
mgmtDecUserRef(pSaved);
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
} }
@ -86,7 +88,7 @@ static int32_t mgmtUserActionEncode(SSdbOper *pOper) {
} }
static int32_t mgmtUserActionDecode(SSdbOper *pOper) { static int32_t mgmtUserActionDecode(SSdbOper *pOper) {
SUserObj *pUser = (SUserObj *) calloc(1, sizeof(SUserObj)); SUserObj *pUser = (SUserObj *)calloc(1, sizeof(SUserObj));
if (pUser == NULL) return TSDB_CODE_SERV_OUT_OF_MEMORY; if (pUser == NULL) return TSDB_CODE_SERV_OUT_OF_MEMORY;
memcpy(pUser, pOper->rowData, tsUserUpdateSize); memcpy(pUser, pOper->rowData, tsUserUpdateSize);
@ -103,7 +105,7 @@ static int32_t mgmtUserActionRestored() {
mgmtDecAcctRef(pAcct); mgmtDecAcctRef(pAcct);
} }
return 0; return TSDB_CODE_SUCCESS;
} }
int32_t mgmtInitUsers() { int32_t mgmtInitUsers() {
@ -128,7 +130,7 @@ int32_t mgmtInitUsers() {
tsUserSdb = sdbOpenTable(&tableDesc); tsUserSdb = sdbOpenTable(&tableDesc);
if (tsUserSdb == NULL) { if (tsUserSdb == NULL) {
mError("failed to init user data"); mError("table:%s, failed to create hash", tableDesc.tableName);
return -1; return -1;
} }
@ -138,7 +140,7 @@ int32_t mgmtInitUsers() {
mgmtAddShellShowMetaHandle(TSDB_MGMT_TABLE_USER, mgmtGetUserMeta); mgmtAddShellShowMetaHandle(TSDB_MGMT_TABLE_USER, mgmtGetUserMeta);
mgmtAddShellShowRetrieveHandle(TSDB_MGMT_TABLE_USER, mgmtRetrieveUsers); mgmtAddShellShowRetrieveHandle(TSDB_MGMT_TABLE_USER, mgmtRetrieveUsers);
mTrace("table:users table is created"); mTrace("table:%s, hash is created", tableDesc.tableName);
return 0; return 0;
} }
@ -150,7 +152,15 @@ SUserObj *mgmtGetUser(char *name) {
return (SUserObj *)sdbGetRow(tsUserSdb, name); return (SUserObj *)sdbGetRow(tsUserSdb, name);
} }
void mgmtReleaseUser(SUserObj *pUser) { void *mgmtGetNextUser(void *pNode, SUserObj **pUser) {
return sdbFetchRow(tsUserSdb, pNode, (void **)pUser);
}
void mgmtIncUserRef(SUserObj *pUser) {
return sdbIncRef(tsUserSdb, pUser);
}
void mgmtDecUserRef(SUserObj *pUser) {
return sdbDecRef(tsUserSdb, pUser); return sdbDecRef(tsUserSdb, pUser);
} }
@ -172,18 +182,22 @@ static int32_t mgmtUpdateUser(SUserObj *pUser) {
int32_t mgmtCreateUser(SAcctObj *pAcct, char *name, char *pass) { int32_t mgmtCreateUser(SAcctObj *pAcct, char *name, char *pass) {
int32_t code = acctCheck(pAcct, ACCT_GRANT_USER); int32_t code = acctCheck(pAcct, ACCT_GRANT_USER);
if (code != 0) { if (code != TSDB_CODE_SUCCESS) {
return code; return code;
} }
if (name[0] == 0 || pass[0] == 0) { if (name[0] == 0) {
return TSDB_CODE_INVALID_MSG_CONTENT; return TSDB_CODE_INVALID_USER_FORMAT;
}
if (pass[0] == 0) {
return TSDB_CODE_INVALID_PASS_FORMAT;
} }
SUserObj *pUser = mgmtGetUser(name); SUserObj *pUser = mgmtGetUser(name);
if (pUser != NULL) { if (pUser != NULL) {
mTrace("user:%s is already there", name); mTrace("user:%s, is already there", name);
mgmtReleaseUser(pUser); mgmtDecUserRef(pUser);
return TSDB_CODE_USER_ALREADY_EXIST; return TSDB_CODE_USER_ALREADY_EXIST;
} }
@ -237,10 +251,10 @@ static int32_t mgmtDropUser(SUserObj *pUser) {
static int32_t mgmtGetUserMeta(STableMetaMsg *pMeta, SShowObj *pShow, void *pConn) { static int32_t mgmtGetUserMeta(STableMetaMsg *pMeta, SShowObj *pShow, void *pConn) {
SUserObj *pUser = mgmtGetUserFromConn(pConn, NULL); SUserObj *pUser = mgmtGetUserFromConn(pConn, NULL);
if (pUser == NULL) { if (pUser == NULL) {
return TSDB_CODE_INVALID_USER; return TSDB_CODE_NO_USER_FROM_CONN;
} }
int32_t cols = 0; int32_t cols = 0;
SSchema *pSchema = pMeta->schema; SSchema *pSchema = pMeta->schema;
pShow->bytes[cols] = TSDB_USER_LEN; pShow->bytes[cols] = TSDB_USER_LEN;
@ -273,7 +287,7 @@ static int32_t mgmtGetUserMeta(STableMetaMsg *pMeta, SShowObj *pShow, void *pCon
pShow->numOfRows = pUser->pAcct->acctInfo.numOfUsers; pShow->numOfRows = pUser->pAcct->acctInfo.numOfUsers;
pShow->rowSize = pShow->offset[cols - 1] + pShow->bytes[cols - 1]; pShow->rowSize = pShow->offset[cols - 1] + pShow->bytes[cols - 1];
mgmtReleaseUser(pUser); mgmtDecUserRef(pUser);
return 0; return 0;
} }
@ -308,8 +322,9 @@ static int32_t mgmtRetrieveUsers(SShowObj *pShow, char *data, int32_t rows, void
cols++; cols++;
numOfRows++; numOfRows++;
mgmtReleaseUser(pUser); mgmtDecUserRef(pUser);
} }
pShow->numOfReads += numOfRows; pShow->numOfReads += numOfRows;
return numOfRows; return numOfRows;
} }
@ -321,20 +336,21 @@ SUserObj *mgmtGetUserFromConn(void *pConn, bool *usePublicIp) {
*usePublicIp = (connInfo.serverIp == tsPublicIpInt); *usePublicIp = (connInfo.serverIp == tsPublicIpInt);
} }
return mgmtGetUser(connInfo.user); return mgmtGetUser(connInfo.user);
} else {
mError("can not get user from conn:%p", pConn);
return NULL;
} }
return NULL;
} }
static void mgmtProcessCreateUserMsg(SQueuedMsg *pMsg) { static void mgmtProcessCreateUserMsg(SQueuedMsg *pMsg) {
int32_t code; int32_t code;
SUserObj *pUser = pMsg->pUser; SUserObj *pOperUser = pMsg->pUser;
if (pUser->superAuth) { if (pOperUser->superAuth) {
SCMCreateUserMsg *pCreate = pMsg->pCont; SCMCreateUserMsg *pCreate = pMsg->pCont;
code = mgmtCreateUser(pUser->pAcct, pCreate->user, pCreate->pass); code = mgmtCreateUser(pOperUser->pAcct, pCreate->user, pCreate->pass);
if (code == TSDB_CODE_SUCCESS) { if (code == TSDB_CODE_SUCCESS) {
mLPrint("user:%s is created by %s", pCreate->user, pUser->user); mLPrint("user:%s, is created by %s", pCreate->user, pOperUser->user);
} }
} else { } else {
code = TSDB_CODE_NO_RIGHTS; code = TSDB_CODE_NO_RIGHTS;
@ -356,7 +372,7 @@ static void mgmtProcessAlterUserMsg(SQueuedMsg *pMsg) {
if (strcmp(pUser->user, "monitor") == 0 || (strcmp(pUser->user + 1, pUser->acct) == 0 && pUser->user[0] == '_')) { if (strcmp(pUser->user, "monitor") == 0 || (strcmp(pUser->user + 1, pUser->acct) == 0 && pUser->user[0] == '_')) {
mgmtSendSimpleResp(pMsg->thandle, TSDB_CODE_NO_RIGHTS); mgmtSendSimpleResp(pMsg->thandle, TSDB_CODE_NO_RIGHTS);
mgmtReleaseUser(pUser); mgmtDecUserRef(pUser);
return; return;
} }
@ -380,7 +396,7 @@ static void mgmtProcessAlterUserMsg(SQueuedMsg *pMsg) {
memset(pUser->pass, 0, sizeof(pUser->pass)); memset(pUser->pass, 0, sizeof(pUser->pass));
taosEncryptPass((uint8_t*)pAlter->pass, strlen(pAlter->pass), pUser->pass); taosEncryptPass((uint8_t*)pAlter->pass, strlen(pAlter->pass), pUser->pass);
code = mgmtUpdateUser(pUser); code = mgmtUpdateUser(pUser);
mLPrint("user:%s password is altered by %s, result:%d", pUser->user, pOperUser->user, tstrerror(code)); mLPrint("user:%s, password is altered by %s, result:%s", pUser->user, pOperUser->user, tstrerror(code));
} else { } else {
code = TSDB_CODE_NO_RIGHTS; code = TSDB_CODE_NO_RIGHTS;
} }
@ -422,7 +438,7 @@ static void mgmtProcessAlterUserMsg(SQueuedMsg *pMsg) {
} }
code = mgmtUpdateUser(pUser); code = mgmtUpdateUser(pUser);
mLPrint("user:%s privilege is altered by %s, result:%d", pUser->user, pOperUser->user, tstrerror(code)); mLPrint("user:%s, privilege is altered by %s, result:%s", pUser->user, pOperUser->user, tstrerror(code));
} else { } else {
code = TSDB_CODE_NO_RIGHTS; code = TSDB_CODE_NO_RIGHTS;
} }
@ -432,7 +448,7 @@ static void mgmtProcessAlterUserMsg(SQueuedMsg *pMsg) {
mgmtSendSimpleResp(pMsg->thandle, TSDB_CODE_NO_RIGHTS); mgmtSendSimpleResp(pMsg->thandle, TSDB_CODE_NO_RIGHTS);
} }
mgmtReleaseUser(pUser); mgmtDecUserRef(pUser);
} }
static void mgmtProcessDropUserMsg(SQueuedMsg *pMsg) { static void mgmtProcessDropUserMsg(SQueuedMsg *pMsg) {
@ -443,13 +459,13 @@ static void mgmtProcessDropUserMsg(SQueuedMsg *pMsg) {
SUserObj *pUser = mgmtGetUser(pDrop->user); SUserObj *pUser = mgmtGetUser(pDrop->user);
if (pUser == NULL) { if (pUser == NULL) {
mgmtSendSimpleResp(pMsg->thandle, TSDB_CODE_INVALID_USER); mgmtSendSimpleResp(pMsg->thandle, TSDB_CODE_INVALID_USER);
return ; return;
} }
if (strcmp(pUser->user, "monitor") == 0 || strcmp(pUser->user, pUser->acct) == 0 || if (strcmp(pUser->user, "monitor") == 0 || strcmp(pUser->user, pUser->acct) == 0 ||
(strcmp(pUser->user + 1, pUser->acct) == 0 && pUser->user[0] == '_')) { (strcmp(pUser->user + 1, pUser->acct) == 0 && pUser->user[0] == '_')) {
mgmtSendSimpleResp(pMsg->thandle, TSDB_CODE_NO_RIGHTS); mgmtSendSimpleResp(pMsg->thandle, TSDB_CODE_NO_RIGHTS);
mgmtReleaseUser(pUser); mgmtDecUserRef(pUser);
return ; return ;
} }
@ -471,14 +487,14 @@ static void mgmtProcessDropUserMsg(SQueuedMsg *pMsg) {
if (hasRight) { if (hasRight) {
code = mgmtDropUser(pUser); code = mgmtDropUser(pUser);
if (code == TSDB_CODE_SUCCESS) { if (code == TSDB_CODE_SUCCESS) {
mLPrint("user:%s is dropped by %s, result:%s", pUser->user, pOperUser->user, tstrerror(code)); mLPrint("user:%s, is dropped by %s, result:%s", pUser->user, pOperUser->user, tstrerror(code));
} }
} else { } else {
code = TSDB_CODE_NO_RIGHTS; code = TSDB_CODE_NO_RIGHTS;
} }
mgmtSendSimpleResp(pMsg->thandle, code); mgmtSendSimpleResp(pMsg->thandle, code);
mgmtReleaseUser(pUser); mgmtDecUserRef(pUser);
} }
void mgmtDropAllUsers(SAcctObj *pAcct) { void mgmtDropAllUsers(SAcctObj *pAcct) {
@ -504,7 +520,7 @@ void mgmtDropAllUsers(SAcctObj *pAcct) {
numOfUsers++; numOfUsers++;
} }
mgmtReleaseUser(pUser); mgmtDecUserRef(pUser);
} }
mTrace("acct:%s, all users:%d is dropped from sdb", pAcct->user, numOfUsers); mTrace("acct:%s, all users:%d is dropped from sdb", pAcct->user, numOfUsers);

View File

@ -44,9 +44,7 @@ static int32_t mgmtRetrieveVgroups(SShowObj *pShow, char *data, int32_t rows, vo
static void mgmtProcessCreateVnodeRsp(SRpcMsg *rpcMsg); static void mgmtProcessCreateVnodeRsp(SRpcMsg *rpcMsg);
static void mgmtProcessDropVnodeRsp(SRpcMsg *rpcMsg); static void mgmtProcessDropVnodeRsp(SRpcMsg *rpcMsg);
static void mgmtProcessVnodeCfgMsg(SRpcMsg *rpcMsg) ; static void mgmtProcessVnodeCfgMsg(SRpcMsg *rpcMsg) ;
static void mgmtSendDropVgroupMsg(SVgObj *pVgroup, void *ahandle);
static void mgmtSendDropVgroupMsg(SVgObj *pVgroup, void *ahandle);
static void mgmtSendCreateVgroupMsg(SVgObj *pVgroup, void *ahandle);
static int32_t mgmtVgroupActionDestroy(SSdbOper *pOper) { static int32_t mgmtVgroupActionDestroy(SSdbOper *pOper) {
SVgObj *pVgroup = pOper->pObj; SVgObj *pVgroup = pOper->pObj;
@ -68,7 +66,6 @@ static int32_t mgmtVgroupActionInsert(SSdbOper *pOper) {
if (pDb == NULL) { if (pDb == NULL) {
return TSDB_CODE_INVALID_DB; return TSDB_CODE_INVALID_DB;
} }
mgmtDecDbRef(pDb);
pVgroup->pDb = pDb; pVgroup->pDb = pDb;
pVgroup->prev = NULL; pVgroup->prev = NULL;
@ -91,15 +88,13 @@ static int32_t mgmtVgroupActionInsert(SSdbOper *pOper) {
for (int32_t i = 0; i < pVgroup->numOfVnodes; ++i) { for (int32_t i = 0; i < pVgroup->numOfVnodes; ++i) {
SDnodeObj *pDnode = mgmtGetDnode(pVgroup->vnodeGid[i].dnodeId); SDnodeObj *pDnode = mgmtGetDnode(pVgroup->vnodeGid[i].dnodeId);
if (pDnode != NULL) { if (pDnode != NULL) {
pVgroup->vnodeGid[i].privateIp = pDnode->privateIp; pVgroup->vnodeGid[i].pDnode = pDnode;
pVgroup->vnodeGid[i].publicIp = pDnode->publicIp; atomic_add_fetch_32(&pDnode->openVnodes, 1);
atomic_add_fetch_32(&pDnode->openVnodes, 1); mgmtDecDnodeRef(pDnode);
mgmtReleaseDnode(pDnode); }
}
} }
mgmtAddVgroupIntoDb(pVgroup); mgmtAddVgroupIntoDb(pVgroup);
mgmtIncDbRef(pVgroup->pDb);
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
} }
@ -115,10 +110,10 @@ static int32_t mgmtVgroupActionDelete(SSdbOper *pOper) {
for (int32_t i = 0; i < pVgroup->numOfVnodes; ++i) { for (int32_t i = 0; i < pVgroup->numOfVnodes; ++i) {
SDnodeObj *pDnode = mgmtGetDnode(pVgroup->vnodeGid[i].dnodeId); SDnodeObj *pDnode = mgmtGetDnode(pVgroup->vnodeGid[i].dnodeId);
if (pDnode) { if (pDnode != NULL) {
atomic_sub_fetch_32(&pDnode->openVnodes, 1); atomic_sub_fetch_32(&pDnode->openVnodes, 1);
} }
mgmtReleaseDnode(pDnode); mgmtDecDnodeRef(pDnode);
} }
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
@ -127,9 +122,25 @@ static int32_t mgmtVgroupActionDelete(SSdbOper *pOper) {
static int32_t mgmtVgroupActionUpdate(SSdbOper *pOper) { static int32_t mgmtVgroupActionUpdate(SSdbOper *pOper) {
SVgObj *pNew = pOper->pObj; SVgObj *pNew = pOper->pObj;
SVgObj *pVgroup = mgmtGetVgroup(pNew->vgId); SVgObj *pVgroup = mgmtGetVgroup(pNew->vgId);
if (pVgroup != pNew) { if (pVgroup != pNew) {
for (int32_t i = 0; i < pVgroup->numOfVnodes; ++i) {
SDnodeObj *pDnode = pVgroup->vnodeGid[i].pDnode;
if (pDnode != NULL) {
atomic_sub_fetch_32(&pDnode->openVnodes, 1);
}
}
memcpy(pVgroup, pNew, pOper->rowSize); memcpy(pVgroup, pNew, pOper->rowSize);
free(pNew); free(pNew);
for (int32_t i = 0; i < pVgroup->numOfVnodes; ++i) {
SDnodeObj *pDnode = mgmtGetDnode(pVgroup->vnodeGid[i].dnodeId);
pVgroup->vnodeGid[i].pDnode = pDnode;
if (pDnode != NULL) {
atomic_add_fetch_32(&pDnode->openVnodes, 1);
}
}
} }
int32_t oldTables = taosIdPoolMaxSize(pVgroup->idPool); int32_t oldTables = taosIdPoolMaxSize(pVgroup->idPool);
@ -150,6 +161,12 @@ static int32_t mgmtVgroupActionUpdate(SSdbOper *pOper) {
static int32_t mgmtVgroupActionEncode(SSdbOper *pOper) { static int32_t mgmtVgroupActionEncode(SSdbOper *pOper) {
SVgObj *pVgroup = pOper->pObj; SVgObj *pVgroup = pOper->pObj;
memcpy(pOper->rowData, pVgroup, tsVgUpdateSize); memcpy(pOper->rowData, pVgroup, tsVgUpdateSize);
SVgObj *pTmpVgroup = pOper->rowData;
for (int32_t i = 0; i < TSDB_VNODES_SUPPORT; ++i) {
pTmpVgroup->vnodeGid[i].pDnode = NULL;
pTmpVgroup->vnodeGid[i].role = 0;
}
pOper->rowSize = tsVgUpdateSize; pOper->rowSize = tsVgUpdateSize;
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
} }
@ -204,7 +221,11 @@ int32_t mgmtInitVgroups() {
return 0; return 0;
} }
void mgmtReleaseVgroup(SVgObj *pVgroup) { void mgmtIncVgroupRef(SVgObj *pVgroup) {
return sdbIncRef(tsVgroupSdb, pVgroup);
}
void mgmtDecVgroupRef(SVgObj *pVgroup) {
return sdbDecRef(tsVgroupSdb, pVgroup); return sdbDecRef(tsVgroupSdb, pVgroup);
} }
@ -224,16 +245,38 @@ void mgmtUpdateVgroup(SVgObj *pVgroup) {
mgmtSendCreateVgroupMsg(pVgroup, NULL); mgmtSendCreateVgroupMsg(pVgroup, NULL);
} }
void mgmtUpdateVgroupStatus(SVgObj *pVgroup, int32_t dnodeId, SVnodeLoad *pVload) { void mgmtUpdateVgroupStatus(SVgObj *pVgroup, SDnodeObj *pDnode, SVnodeLoad *pVload) {
if (pVload->role == TAOS_SYNC_ROLE_MASTER) { bool dnodeExist = false;
for (int32_t i = 0; i < pVgroup->numOfVnodes; ++i) { for (int32_t i = 0; i < pVgroup->numOfVnodes; ++i) {
SVnodeGid *pVgid = &pVgroup->vnodeGid[i]; SVnodeGid *pVgid = &pVgroup->vnodeGid[i];
if (pVgid->dnodeId == dnodeId) { if (pVgid->pDnode == pDnode) {
pVgid->role = pVload->role;
if (pVload->role == TAOS_SYNC_ROLE_MASTER) {
pVgroup->inUse = i; pVgroup->inUse = i;
break;
} }
dnodeExist = true;
break;
} }
} }
if (!dnodeExist) {
SRpcIpSet ipSet = mgmtGetIpSetFromIp(pDnode->privateIp);
mError("vgroup:%d, dnode:%d not exist in mnode, drop it", pVload->vgId, pDnode->dnodeId);
mgmtSendDropVnodeMsg(pVload->vgId, &ipSet, NULL);
return;
}
if (pVload->role == TAOS_SYNC_ROLE_MASTER) {
pVgroup->totalStorage = htobe64(pVload->totalStorage);
pVgroup->compStorage = htobe64(pVload->compStorage);
pVgroup->pointsWritten = htobe64(pVload->pointsWritten);
}
if (pVload->replica != pVgroup->numOfVnodes) {
mError("dnode:%d, vgroup:%d replica:%d not match with mgmt:%d", pDnode->dnodeId, pVload->vgId, pVload->replica,
pVgroup->numOfVnodes);
mgmtSendCreateVgroupMsg(pVgroup, NULL);
}
} }
SVgObj *mgmtGetAvailableVgroup(SDbObj *pDb) { SVgObj *mgmtGetAvailableVgroup(SDbObj *pDb) {
@ -340,7 +383,7 @@ int32_t mgmtGetVgroupMeta(STableMetaMsg *pMeta, SShowObj *pShow, void *pConn) {
mgmtDecTableRef(pTable); mgmtDecTableRef(pTable);
pVgroup = mgmtGetVgroup(((SChildTableObj*)pTable)->vgId); pVgroup = mgmtGetVgroup(((SChildTableObj*)pTable)->vgId);
if (NULL == pVgroup) return TSDB_CODE_INVALID_TABLE_ID; if (NULL == pVgroup) return TSDB_CODE_INVALID_TABLE_ID;
mgmtReleaseVgroup(pVgroup); mgmtDecVgroupRef(pVgroup);
maxReplica = pVgroup->numOfVnodes > maxReplica ? pVgroup->numOfVnodes : maxReplica; maxReplica = pVgroup->numOfVnodes > maxReplica ? pVgroup->numOfVnodes : maxReplica;
} else { } else {
SVgObj *pVgroup = pDb->pHead; SVgObj *pVgroup = pDb->pHead;
@ -391,27 +434,6 @@ int32_t mgmtGetVgroupMeta(STableMetaMsg *pMeta, SShowObj *pShow, void *pConn) {
return 0; return 0;
} }
char *mgmtGetVnodeStatus(SVgObj *pVgroup, SVnodeGid *pVnode) {
SDnodeObj *pDnode = mgmtGetDnode(pVnode->dnodeId);
if (pDnode == NULL) {
mError("vgroup:%d, not exist in dnode:%d", pVgroup->vgId, pDnode->dnodeId);
return "null";
}
mgmtReleaseDnode(pDnode);
if (pDnode->status == TAOS_DN_STATUS_OFFLINE) {
return "offline";
}
for (int i = 0; i < pDnode->openVnodes; ++i) {
if (pDnode->vload[i].vgId == pVgroup->vgId) {
return pDnode->vload[i].status ? "ready" : "offline";
}
}
return "null";
}
int32_t mgmtRetrieveVgroups(SShowObj *pShow, char *data, int32_t rows, void *pConn) { int32_t mgmtRetrieveVgroups(SShowObj *pShow, char *data, int32_t rows, void *pConn) {
int32_t numOfRows = 0; int32_t numOfRows = 0;
SVgObj *pVgroup = NULL; SVgObj *pVgroup = NULL;
@ -453,19 +475,24 @@ int32_t mgmtRetrieveVgroups(SShowObj *pShow, char *data, int32_t rows, void *pCo
*(int16_t *) pWrite = pVgroup->vnodeGid[i].dnodeId; *(int16_t *) pWrite = pVgroup->vnodeGid[i].dnodeId;
cols++; cols++;
tinet_ntoa(ipstr, pVgroup->vnodeGid[i].privateIp); SDnodeObj *pDnode = pVgroup->vnodeGid[i].pDnode;
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
strcpy(pWrite, ipstr);
cols++;
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows; if (pDnode != NULL) {
if (pVgroup->vnodeGid[i].dnodeId != 0) { tinet_ntoa(ipstr, pDnode->privateIp);
char *vnodeStatus = mgmtGetVnodeStatus(pVgroup, pVgroup->vnodeGid + i); pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
strcpy(pWrite, vnodeStatus); strcpy(pWrite, ipstr);
cols++;
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
strcpy(pWrite, mgmtGetMnodeRoleStr(pVgroup->vnodeGid[i].role));
cols++;
} else { } else {
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
strcpy(pWrite, "null"); strcpy(pWrite, "null");
cols++;
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
strcpy(pWrite, "null");
cols++;
} }
cols++;
} }
numOfRows++; numOfRows++;
@ -506,27 +533,38 @@ SMDCreateVnodeMsg *mgmtBuildCreateVnodeMsg(SVgObj *pVgroup) {
SMDCreateVnodeMsg *pVnode = rpcMallocCont(sizeof(SMDCreateVnodeMsg)); SMDCreateVnodeMsg *pVnode = rpcMallocCont(sizeof(SMDCreateVnodeMsg));
if (pVnode == NULL) return NULL; if (pVnode == NULL) return NULL;
pVnode->cfg = pDb->cfg; SMDVnodeCfg *pCfg = &pVnode->cfg;
pCfg->vgId = htonl(pVgroup->vgId);
SVnodeCfg *pCfg = &pVnode->cfg; pCfg->maxTables = htonl(pDb->cfg.maxSessions);
pCfg->vgId = htonl(pVgroup->vgId); pCfg->maxCacheSize = htobe64((int64_t)pDb->cfg.cacheBlockSize * pDb->cfg.cacheNumOfBlocks.totalBlocks);
pCfg->maxSessions = htonl(pCfg->maxSessions); pCfg->maxCacheSize = htobe64(-1);
pCfg->cacheBlockSize = htonl(pCfg->cacheBlockSize); pCfg->minRowsPerFileBlock = htonl(-1);
pCfg->cacheNumOfBlocks.totalBlocks = htonl(pCfg->cacheNumOfBlocks.totalBlocks); pCfg->maxRowsPerFileBlock = htonl(-1);
pCfg->daysPerFile = htonl(pCfg->daysPerFile); pCfg->daysPerFile = htonl(pDb->cfg.daysPerFile);
pCfg->daysToKeep1 = htonl(pCfg->daysToKeep1); pCfg->daysToKeep1 = htonl(pDb->cfg.daysToKeep1);
pCfg->daysToKeep2 = htonl(pCfg->daysToKeep2); pCfg->daysToKeep2 = htonl(pDb->cfg.daysToKeep2);
pCfg->daysToKeep = htonl(pCfg->daysToKeep); pCfg->daysToKeep = htonl(pDb->cfg.daysToKeep);
pCfg->commitTime = htonl(pCfg->commitTime); pCfg->daysToKeep = htonl(-1);
pCfg->rowsInFileBlock = htonl(pCfg->rowsInFileBlock); pCfg->commitTime = htonl(pDb->cfg.commitTime);
pCfg->blocksPerTable = htons(pCfg->blocksPerTable); pCfg->precision = pDb->cfg.precision;
pCfg->replications = (int8_t) pVgroup->numOfVnodes; pCfg->compression = pDb->cfg.compression;
pCfg->compression = -1;
pCfg->wals = 3;
pCfg->commitLog = pDb->cfg.commitLog;
pCfg->replications = (int8_t) pVgroup->numOfVnodes;
pCfg->quorum = 1;
SVnodeDesc *vpeerDesc = pVnode->vpeerDesc; SMDVnodeDesc *pNodes = pVnode->nodes;
for (int32_t j = 0; j < pVgroup->numOfVnodes; ++j) { for (int32_t j = 0; j < pVgroup->numOfVnodes; ++j) {
vpeerDesc[j].vgId = htonl(pVgroup->vgId); SDnodeObj *pDnode = pVgroup->vnodeGid[j].pDnode;
vpeerDesc[j].dnodeId = htonl(pVgroup->vnodeGid[j].dnodeId); if (pDnode != NULL) {
vpeerDesc[j].ip = htonl(pVgroup->vnodeGid[j].privateIp); pNodes[j].nodeId = htonl(pDnode->dnodeId);
pNodes[j].nodeIp = htonl(pDnode->privateIp);
strcpy(pNodes[j].nodeName, pDnode->dnodeName);
if (j == 0) {
pCfg->arbitratorIp = htonl(pDnode->privateIp);
}
}
} }
return pVnode; return pVnode;
@ -539,7 +577,7 @@ SRpcIpSet mgmtGetIpSetFromVgroup(SVgObj *pVgroup) {
.port = tsDnodeMnodePort .port = tsDnodeMnodePort
}; };
for (int i = 0; i < pVgroup->numOfVnodes; ++i) { for (int i = 0; i < pVgroup->numOfVnodes; ++i) {
ipSet.ip[i] = pVgroup->vnodeGid[i].privateIp; ipSet.ip[i] = pVgroup->vnodeGid[i].pDnode->privateIp;
} }
return ipSet; return ipSet;
} }
@ -570,7 +608,7 @@ void mgmtSendCreateVnodeMsg(SVgObj *pVgroup, SRpcIpSet *ipSet, void *ahandle) {
void mgmtSendCreateVgroupMsg(SVgObj *pVgroup, void *ahandle) { void mgmtSendCreateVgroupMsg(SVgObj *pVgroup, void *ahandle) {
mTrace("vgroup:%d, send create all vnodes msg, ahandle:%p", pVgroup->vgId, ahandle); mTrace("vgroup:%d, send create all vnodes msg, ahandle:%p", pVgroup->vgId, ahandle);
for (int32_t i = 0; i < pVgroup->numOfVnodes; ++i) { for (int32_t i = 0; i < pVgroup->numOfVnodes; ++i) {
SRpcIpSet ipSet = mgmtGetIpSetFromIp(pVgroup->vnodeGid[i].privateIp); SRpcIpSet ipSet = mgmtGetIpSetFromIp(pVgroup->vnodeGid[i].pDnode->privateIp);
mgmtSendCreateVnodeMsg(pVgroup, &ipSet, ahandle); mgmtSendCreateVnodeMsg(pVgroup, &ipSet, ahandle);
} }
} }
@ -636,7 +674,7 @@ void mgmtSendDropVnodeMsg(int32_t vgId, SRpcIpSet *ipSet, void *ahandle) {
static void mgmtSendDropVgroupMsg(SVgObj *pVgroup, void *ahandle) { static void mgmtSendDropVgroupMsg(SVgObj *pVgroup, void *ahandle) {
mTrace("vgroup:%d, send drop all vnodes msg, ahandle:%p", pVgroup->vgId, ahandle); mTrace("vgroup:%d, send drop all vnodes msg, ahandle:%p", pVgroup->vgId, ahandle);
for (int32_t i = 0; i < pVgroup->numOfVnodes; ++i) { for (int32_t i = 0; i < pVgroup->numOfVnodes; ++i) {
SRpcIpSet ipSet = mgmtGetIpSetFromIp(pVgroup->vnodeGid[i].privateIp); SRpcIpSet ipSet = mgmtGetIpSetFromIp(pVgroup->vnodeGid[i].pDnode->privateIp);
mgmtSendDropVnodeMsg(pVgroup->vgId, &ipSet, ahandle); mgmtSendDropVnodeMsg(pVgroup->vgId, &ipSet, ahandle);
} }
} }
@ -687,7 +725,7 @@ static void mgmtProcessVnodeCfgMsg(SRpcMsg *rpcMsg) {
mgmtSendSimpleResp(rpcMsg->handle, TSDB_CODE_NOT_ACTIVE_VNODE); mgmtSendSimpleResp(rpcMsg->handle, TSDB_CODE_NOT_ACTIVE_VNODE);
return; return;
} }
mgmtReleaseDnode(pDnode); mgmtDecDnodeRef(pDnode);
SVgObj *pVgroup = mgmtGetVgroup(pCfg->vgId); SVgObj *pVgroup = mgmtGetVgroup(pCfg->vgId);
if (pVgroup == NULL) { if (pVgroup == NULL) {
@ -695,7 +733,7 @@ static void mgmtProcessVnodeCfgMsg(SRpcMsg *rpcMsg) {
mgmtSendSimpleResp(rpcMsg->handle, TSDB_CODE_NOT_ACTIVE_VNODE); mgmtSendSimpleResp(rpcMsg->handle, TSDB_CODE_NOT_ACTIVE_VNODE);
return; return;
} }
mgmtReleaseVgroup(pVgroup); mgmtDecVgroupRef(pVgroup);
mgmtSendSimpleResp(rpcMsg->handle, TSDB_CODE_SUCCESS); mgmtSendSimpleResp(rpcMsg->handle, TSDB_CODE_SUCCESS);
@ -711,7 +749,7 @@ void mgmtDropAllVgroups(SDbObj *pDropDb) {
SVgObj *pVgroup = NULL; SVgObj *pVgroup = NULL;
while (1) { while (1) {
mgmtReleaseVgroup(pVgroup); mgmtDecVgroupRef(pVgroup);
pNode = sdbFetchRow(tsVgroupSdb, pNode, (void **)&pVgroup); pNode = sdbFetchRow(tsVgroupSdb, pNode, (void **)&pVgroup);
if (pVgroup == NULL) break; if (pVgroup == NULL) break;

View File

@ -397,6 +397,7 @@ void monitorSaveAcctLog(char *acctId, int64_t currentPointsPerSecond, int64_t ma
int64_t totalOutbound, int64_t maxOutbound, int64_t totalDbs, int64_t maxDbs, int64_t totalOutbound, int64_t maxOutbound, int64_t totalDbs, int64_t maxDbs,
int64_t totalUsers, int64_t maxUsers, int64_t totalStreams, int64_t maxStreams, int64_t totalUsers, int64_t maxUsers, int64_t totalStreams, int64_t maxStreams,
int64_t totalConns, int64_t maxConns, int8_t accessState) { int64_t totalConns, int64_t maxConns, int8_t accessState) {
if (monitor == NULL) return;
if (monitor->state != MONITOR_STATE_INITIALIZED) return; if (monitor->state != MONITOR_STATE_INITIALIZED) return;
char sql[1024] = {0}; char sql[1024] = {0};

View File

@ -466,8 +466,8 @@ static void taosFreeFdObj(SFdObj *pFdObj) {
} }
pFdObj->signature = NULL; pFdObj->signature = NULL;
close(pFdObj->fd);
epoll_ctl(pThreadObj->pollFd, EPOLL_CTL_DEL, pFdObj->fd, NULL); epoll_ctl(pThreadObj->pollFd, EPOLL_CTL_DEL, pFdObj->fd, NULL);
close(pFdObj->fd);
pThreadObj->numOfFds--; pThreadObj->numOfFds--;

View File

@ -11,7 +11,7 @@
#define IS_VALID_COMPRESSION(compression) (((compression) >= NO_COMPRESSION) && ((compression) <= TWO_STAGE_COMP)) #define IS_VALID_COMPRESSION(compression) (((compression) >= NO_COMPRESSION) && ((compression) <= TWO_STAGE_COMP))
#define TSDB_MIN_ID 0 #define TSDB_MIN_ID 0
#define TSDB_MAX_ID INT_MAX #define TSDB_MAX_ID INT_MAX
#define TSDB_MIN_TABLES 10 #define TSDB_MIN_TABLES 4
#define TSDB_MAX_TABLES 100000 #define TSDB_MAX_TABLES 100000
#define TSDB_DEFAULT_TABLES 1000 #define TSDB_DEFAULT_TABLES 1000
#define TSDB_DEFAULT_DAYS_PER_FILE 10 #define TSDB_DEFAULT_DAYS_PER_FILE 10
@ -282,6 +282,8 @@ int32_t tsdbConfigRepo(TsdbRepoT *repo, STsdbCfg *pCfg) {
int32_t tsdbTriggerCommit(TsdbRepoT *repo) { int32_t tsdbTriggerCommit(TsdbRepoT *repo) {
STsdbRepo *pRepo = (STsdbRepo *)repo; STsdbRepo *pRepo = (STsdbRepo *)repo;
if (pRepo->appH.walCallBack) pRepo->appH.walCallBack(pRepo->appH.appH);
tsdbLockRepo(repo); tsdbLockRepo(repo);
if (pRepo->commit) { if (pRepo->commit) {
@ -387,7 +389,7 @@ int tsdbInitTableCfg(STableCfg *config, ETableType type, int64_t uid, int32_t ti
config->superUid = TSDB_INVALID_SUPER_TABLE_ID; config->superUid = TSDB_INVALID_SUPER_TABLE_ID;
config->tableId.uid = uid; config->tableId.uid = uid;
config->tableId.tid = tid; config->tableId.tid = tid;
config->name = strdup("test1"); config->name = NULL;
return 0; return 0;
} }
@ -854,8 +856,6 @@ static void *tsdbCommitData(void *arg) {
SRWHelper whelper = {0}; SRWHelper whelper = {0};
if (pCache->imem == NULL) return NULL; if (pCache->imem == NULL) return NULL;
if (pRepo->appH.walCallBack) pRepo->appH.walCallBack(pRepo->appH.appH);
// Create the iterator to read from cache // Create the iterator to read from cache
SSkipListIterator **iters = tsdbCreateTableIters(pMeta, pCfg->maxTables); SSkipListIterator **iters = tsdbCreateTableIters(pMeta, pCfg->maxTables);
if (iters == NULL) { if (iters == NULL) {
@ -880,6 +880,7 @@ static void *tsdbCommitData(void *arg) {
_exit: _exit:
tdFreeDataCols(pDataCols); tdFreeDataCols(pDataCols);
tsdbDestroyTableIters(iters, pCfg->maxTables); tsdbDestroyTableIters(iters, pCfg->maxTables);
tsdbDestroyHelper(&whelper);
tsdbLockRepo(arg); tsdbLockRepo(arg);
tdListMove(pCache->imem->list, pCache->pool.memPool); tdListMove(pCache->imem->list, pCache->pool.memPool);

View File

@ -403,6 +403,7 @@ int tsdbWriteCompInfo(SRWHelper *pHelper) {
} else { } else {
pHelper->pCompInfo->delimiter = TSDB_FILE_DELIMITER; pHelper->pCompInfo->delimiter = TSDB_FILE_DELIMITER;
pHelper->pCompInfo->uid = pHelper->tableInfo.uid; pHelper->pCompInfo->uid = pHelper->tableInfo.uid;
pHelper->pCompInfo->checksum = 0;
ASSERT((pIdx->len - sizeof(SCompInfo) - sizeof(TSCKSUM)) % sizeof(SCompBlock) == 0); ASSERT((pIdx->len - sizeof(SCompInfo) - sizeof(TSCKSUM)) % sizeof(SCompBlock) == 0);
taosCalcChecksumAppend(0, (uint8_t *)pHelper->pCompInfo, pIdx->len); taosCalcChecksumAppend(0, (uint8_t *)pHelper->pCompInfo, pIdx->len);
pIdx->offset = lseek(pHelper->files.nHeadF.fd, 0, SEEK_END); pIdx->offset = lseek(pHelper->files.nHeadF.fd, 0, SEEK_END);

View File

@ -4,6 +4,7 @@
#include "tdataformat.h" #include "tdataformat.h"
#include "tsdbMain.h" #include "tsdbMain.h"
#include "tskiplist.h"
static double getCurTime() { static double getCurTime() {
struct timeval tv; struct timeval tv;
@ -141,6 +142,7 @@ TEST(TsdbTest, createRepo) {
STableCfg tCfg; STableCfg tCfg;
ASSERT_EQ(tsdbInitTableCfg(&tCfg, TSDB_SUPER_TABLE, 987607499877672L, 0), -1); ASSERT_EQ(tsdbInitTableCfg(&tCfg, TSDB_SUPER_TABLE, 987607499877672L, 0), -1);
ASSERT_EQ(tsdbInitTableCfg(&tCfg, TSDB_NORMAL_TABLE, 987607499877672L, 0), 0); ASSERT_EQ(tsdbInitTableCfg(&tCfg, TSDB_NORMAL_TABLE, 987607499877672L, 0), 0);
tsdbTableSetName(&tCfg, "test", false);
int nCols = 5; int nCols = 5;
STSchema *schema = tdNewSchema(nCols); STSchema *schema = tdNewSchema(nCols);
@ -167,7 +169,7 @@ TEST(TsdbTest, createRepo) {
.sversion = tCfg.sversion, .sversion = tCfg.sversion,
.startTime = 1584081000000, .startTime = 1584081000000,
.interval = 1000, .interval = 1000,
.totalRows = 5000000, .totalRows = 10000000,
.rowsPerSubmit = 1, .rowsPerSubmit = 1,
.pSchema = schema .pSchema = schema
}; };
@ -262,4 +264,47 @@ TEST(TsdbTest, DISABLED_createFileGroup) {
// ASSERT_EQ(tsdbCreateFileGroup("/home/ubuntu/work/ttest/vnode0/data", 1820, &fGroup, 1000), 0); // ASSERT_EQ(tsdbCreateFileGroup("/home/ubuntu/work/ttest/vnode0/data", 1820, &fGroup, 1000), 0);
int k = 0; int k = 0;
}
static char *getTKey(const void *data) {
return (char *)data;
}
static void insertSkipList(bool isAscend) {
TSKEY start_time = 1587393453000;
TSKEY interval = 1000;
SSkipList *pList = tSkipListCreate(5, TSDB_DATA_TYPE_TIMESTAMP, sizeof(TSKEY), 0, 0, 1, getTKey);
ASSERT_NE(pList, nullptr);
for (size_t i = 0; i < 20000000; i++)
{
TSKEY time = isAscend ? (start_time + i * interval) : (start_time - i * interval);
int32_t level = 0;
int32_t headSize = 0;
tSkipListNewNodeInfo(pList, &level, &headSize);
SSkipListNode *pNode = (SSkipListNode *)malloc(headSize + sizeof(TSKEY));
ASSERT_NE(pNode, nullptr);
pNode->level = level;
*(TSKEY *)((char *)pNode + headSize) = time;
tSkipListPut(pList, pNode);
}
tSkipListDestroy(pList);
}
TEST(TsdbTest, DISABLED_testSkipList) {
// TEST(TsdbTest, testSkipList) {
double stime = getCurTime();
insertSkipList(true);
double etime = getCurTime();
printf("Time used to insert 100000000 records takes %f seconds\n", etime-stime);
stime = getCurTime();
insertSkipList(false);
etime = getCurTime();
printf("Time used to insert 100000000 records takes %f seconds\n", etime-stime);
} }

View File

@ -170,6 +170,8 @@ char *taosIpStr(uint32_t ipInt);
uint32_t ip2uint(const char *const ip_addr); uint32_t ip2uint(const char *const ip_addr);
void taosRemoveDir(char *rootDir);
#define TAOS_ALLOC_MODE_DEFAULT 0 #define TAOS_ALLOC_MODE_DEFAULT 0
#define TAOS_ALLOC_MODE_RANDOM_FAIL 1 #define TAOS_ALLOC_MODE_RANDOM_FAIL 1
#define TAOS_ALLOC_MODE_DETECT_LEAK 2 #define TAOS_ALLOC_MODE_DETECT_LEAK 2

View File

@ -662,4 +662,28 @@ void tzfree(void *ptr) {
if (ptr) { if (ptr) {
free((void *)((char *)ptr - sizeof(size_t))); free((void *)((char *)ptr - sizeof(size_t)));
} }
}
void taosRemoveDir(char *rootDir) {
DIR *dir = opendir(rootDir);
if (dir == NULL) return;
struct dirent *de = NULL;
while ((de = readdir(dir)) != NULL) {
if (strcmp(de->d_name, ".") == 0 || strcmp(de->d_name, "..") == 0) continue;
char filename[1024];
snprintf(filename, 1023, "%s/%s", rootDir, de->d_name);
if (de->d_type & DT_DIR) {
taosRemoveDir(filename);
} else {
remove(filename);
uPrint("file:%s is removed", filename);
}
}
closedir(dir);
rmdir(rootDir);
uPrint("dir:%s is removed", rootDir);
} }

View File

@ -9,6 +9,7 @@ IF ((TD_LINUX_64) OR (TD_LINUX_32 AND TD_ARM))
INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/tsdb/inc) INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/tsdb/inc)
INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/common/inc) INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/common/inc)
INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/dnode/inc) INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/dnode/inc)
INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/deps/cJson/inc)
INCLUDE_DIRECTORIES(${TD_ENTERPRISE_DIR}/src/inc) INCLUDE_DIRECTORIES(${TD_ENTERPRISE_DIR}/src/inc)
INCLUDE_DIRECTORIES(inc) INCLUDE_DIRECTORIES(inc)
AUX_SOURCE_DIRECTORY(src SRC) AUX_SOURCE_DIRECTORY(src SRC)

View File

@ -18,10 +18,12 @@
#include "ihash.h" #include "ihash.h"
#include "taoserror.h" #include "taoserror.h"
#include "taosmsg.h" #include "taosmsg.h"
#include "tutil.h"
#include "trpc.h" #include "trpc.h"
#include "tsdb.h" #include "tsdb.h"
#include "ttime.h" #include "ttime.h"
#include "ttimer.h" #include "ttimer.h"
#include "cJSON.h"
#include "twal.h" #include "twal.h"
#include "tglobal.h" #include "tglobal.h"
#include "dnode.h" #include "dnode.h"
@ -36,6 +38,8 @@ static void vnodeBuildVloadMsg(char *pNode, void * param);
static int vnodeWalCallback(void *arg); static int vnodeWalCallback(void *arg);
static int32_t vnodeSaveCfg(SMDCreateVnodeMsg *pVnodeCfg); static int32_t vnodeSaveCfg(SMDCreateVnodeMsg *pVnodeCfg);
static int32_t vnodeReadCfg(SVnodeObj *pVnode); static int32_t vnodeReadCfg(SVnodeObj *pVnode);
static int32_t vnodeSaveVersion(SVnodeObj *pVnode);
static int32_t vnodeReadVersion(SVnodeObj *pVnode);
static int vnodeWalCallback(void *arg); static int vnodeWalCallback(void *arg);
static uint32_t vnodeGetFileInfo(void *ahandle, char *name, uint32_t *index, int32_t *size); static uint32_t vnodeGetFileInfo(void *ahandle, char *name, uint32_t *index, int32_t *size);
static int vnodeGetWalInfo(void *ahandle, char *name, uint32_t *index); static int vnodeGetWalInfo(void *ahandle, char *name, uint32_t *index);
@ -93,21 +97,21 @@ int32_t vnodeCreate(SMDCreateVnodeMsg *pVnodeCfg) {
STsdbCfg tsdbCfg = {0}; STsdbCfg tsdbCfg = {0};
tsdbCfg.precision = pVnodeCfg->cfg.precision; tsdbCfg.precision = pVnodeCfg->cfg.precision;
tsdbCfg.compression = -1; tsdbCfg.compression = pVnodeCfg->cfg.compression;;
tsdbCfg.tsdbId = pVnodeCfg->cfg.vgId; tsdbCfg.tsdbId = pVnodeCfg->cfg.vgId;
tsdbCfg.maxTables = pVnodeCfg->cfg.maxSessions; tsdbCfg.maxTables = pVnodeCfg->cfg.maxTables;
tsdbCfg.daysPerFile = pVnodeCfg->cfg.daysPerFile; tsdbCfg.daysPerFile = pVnodeCfg->cfg.daysPerFile;
tsdbCfg.minRowsPerFileBlock = -1; tsdbCfg.minRowsPerFileBlock = pVnodeCfg->cfg.minRowsPerFileBlock;
tsdbCfg.maxRowsPerFileBlock = -1; tsdbCfg.maxRowsPerFileBlock = pVnodeCfg->cfg.maxRowsPerFileBlock;
tsdbCfg.keep = -1; tsdbCfg.keep = pVnodeCfg->cfg.daysToKeep;
tsdbCfg.maxCacheSize = -1; tsdbCfg.maxCacheSize = pVnodeCfg->cfg.maxCacheSize;
char tsdbDir[TSDB_FILENAME_LEN] = {0}; char tsdbDir[TSDB_FILENAME_LEN] = {0};
sprintf(tsdbDir, "%s/vnode%d/tsdb", tsVnodeDir, pVnodeCfg->cfg.vgId); sprintf(tsdbDir, "%s/vnode%d/tsdb", tsVnodeDir, pVnodeCfg->cfg.vgId);
code = tsdbCreateRepo(tsdbDir, &tsdbCfg, NULL); code = tsdbCreateRepo(tsdbDir, &tsdbCfg, NULL);
if (code != TSDB_CODE_SUCCESS) { if (code != TSDB_CODE_SUCCESS) {
dError("vgId:%d, failed to create tsdb in vnode, reason:%s", pVnodeCfg->cfg.vgId, tstrerror(terrno)); dError("vgId:%d, failed to create tsdb in vnode, reason:%s", pVnodeCfg->cfg.vgId, tstrerror(code));
return terrno; return TSDB_CODE_VG_INIT_FAILED;
} }
dPrint("vgId:%d, vnode is created, clog:%d", pVnodeCfg->cfg.vgId, pVnodeCfg->cfg.commitLog); dPrint("vgId:%d, vnode is created, clog:%d", pVnodeCfg->cfg.vgId, pVnodeCfg->cfg.commitLog);
@ -131,6 +135,39 @@ int32_t vnodeDrop(int32_t vgId) {
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
} }
int32_t vnodeAlter(void *param, SMDCreateVnodeMsg *pVnodeCfg) {
SVnodeObj *pVnode = param;
int32_t code = vnodeSaveCfg(pVnodeCfg);
if (code != TSDB_CODE_SUCCESS) {
dError("vgId:%d, failed to save vnode cfg, reason:%s", pVnodeCfg->cfg.vgId, tstrerror(code));
return code;
}
code = vnodeReadCfg(pVnode);
if (code != TSDB_CODE_SUCCESS) {
dError("pVnode:%p vgId:%d, failed to read cfg file", pVnode, pVnode->vgId);
taosDeleteIntHash(tsDnodeVnodesHash, pVnode->vgId);
return code;
}
code = syncReconfig(pVnode->sync, &pVnode->syncCfg);
if (code != TSDB_CODE_SUCCESS) {
dTrace("pVnode:%p vgId:%d, failed to alter vnode, canot reconfig sync, result:%s", pVnode, pVnode->vgId,
tstrerror(code));
return code;
}
code = tsdbConfigRepo(pVnode->tsdb, &pVnode->tsdbCfg);
if (code != TSDB_CODE_SUCCESS) {
dTrace("pVnode:%p vgId:%d, failed to alter vnode, canot reconfig tsdb, result:%s", pVnode, pVnode->vgId,
tstrerror(code));
return code;
}
dTrace("pVnode:%p vgId:%d, vnode is altered", pVnode, pVnode->vgId);
return TSDB_CODE_SUCCESS;
}
int32_t vnodeOpen(int32_t vnode, char *rootDir) { int32_t vnodeOpen(int32_t vnode, char *rootDir) {
char temp[TSDB_FILENAME_LEN]; char temp[TSDB_FILENAME_LEN];
pthread_once(&vnodeModuleInit, vnodeInit); pthread_once(&vnodeModuleInit, vnodeInit);
@ -149,11 +186,13 @@ int32_t vnodeOpen(int32_t vnode, char *rootDir) {
return code; return code;
} }
vnodeReadVersion(pVnode);
pVnode->wqueue = dnodeAllocateWqueue(pVnode); pVnode->wqueue = dnodeAllocateWqueue(pVnode);
pVnode->rqueue = dnodeAllocateRqueue(pVnode); pVnode->rqueue = dnodeAllocateRqueue(pVnode);
sprintf(temp, "%s/wal", rootDir); sprintf(temp, "%s/wal", rootDir);
pVnode->wal = walOpen(temp, &pVnode->walCfg); pVnode->wal = walOpen(temp, &pVnode->walCfg);
SSyncInfo syncInfo; SSyncInfo syncInfo;
syncInfo.vgId = pVnode->vgId; syncInfo.vgId = pVnode->vgId;
@ -166,10 +205,10 @@ int32_t vnodeOpen(int32_t vnode, char *rootDir) {
syncInfo.writeToCache = vnodeWriteToQueue; syncInfo.writeToCache = vnodeWriteToQueue;
syncInfo.confirmForward = dnodeSendRpcWriteRsp; syncInfo.confirmForward = dnodeSendRpcWriteRsp;
syncInfo.notifyRole = vnodeNotifyRole; syncInfo.notifyRole = vnodeNotifyRole;
pVnode->sync = syncStart(&syncInfo); pVnode->sync = syncStart(&syncInfo);
pVnode->events = NULL; pVnode->events = NULL;
pVnode->cq = NULL; pVnode->cq = NULL;
STsdbAppH appH = {0}; STsdbAppH appH = {0};
appH.appH = (void *)pVnode; appH.appH = (void *)pVnode;
@ -227,7 +266,9 @@ void vnodeRelease(void *pVnodeRaw) {
pVnode->wqueue = NULL; pVnode->wqueue = NULL;
if (pVnode->status == TAOS_VN_STATUS_DELETING) { if (pVnode->status == TAOS_VN_STATUS_DELETING) {
// remove the whole directory char rootDir[TSDB_FILENAME_LEN] = {0};
sprintf(rootDir, "%s/vnode%d", tsVnodeDir, vgId);
taosRemoveDir(rootDir);
} }
free(pVnode); free(pVnode);
@ -246,7 +287,8 @@ void *vnodeGetVnode(int32_t vgId) {
SVnodeObj **ppVnode = (SVnodeObj **)taosGetIntHashData(tsDnodeVnodesHash, vgId); SVnodeObj **ppVnode = (SVnodeObj **)taosGetIntHashData(tsDnodeVnodesHash, vgId);
if (ppVnode == NULL || *ppVnode == NULL) { if (ppVnode == NULL || *ppVnode == NULL) {
terrno = TSDB_CODE_INVALID_VGROUP_ID; terrno = TSDB_CODE_INVALID_VGROUP_ID;
assert(false); dError("vgId:%d not exist");
return NULL;
} }
return *ppVnode; return *ppVnode;
@ -292,6 +334,7 @@ static void vnodeBuildVloadMsg(char *pNode, void * param) {
pLoad->vgId = htonl(pVnode->vgId); pLoad->vgId = htonl(pVnode->vgId);
pLoad->status = pVnode->status; pLoad->status = pVnode->status;
pLoad->role = pVnode->role; pLoad->role = pVnode->role;
pLoad->replica = pVnode->syncCfg.replica;
} }
static void vnodeCleanUp(SVnodeObj *pVnode) { static void vnodeCleanUp(SVnodeObj *pVnode) {
@ -301,6 +344,7 @@ static void vnodeCleanUp(SVnodeObj *pVnode) {
//syncStop(pVnode->sync); //syncStop(pVnode->sync);
tsdbCloseRepo(pVnode->tsdb); tsdbCloseRepo(pVnode->tsdb);
walClose(pVnode->wal); walClose(pVnode->wal);
vnodeSaveVersion(pVnode);
vnodeRelease(pVnode); vnodeRelease(pVnode);
} }
@ -328,88 +372,306 @@ static void vnodeNotifyRole(void *ahandle, int8_t role) {
} }
static int32_t vnodeSaveCfg(SMDCreateVnodeMsg *pVnodeCfg) { static int32_t vnodeSaveCfg(SMDCreateVnodeMsg *pVnodeCfg) {
char cfgFile[TSDB_FILENAME_LEN * 2] = {0}; char cfgFile[TSDB_FILENAME_LEN + 30] = {0};
sprintf(cfgFile, "%s/vnode%d/config", tsVnodeDir, pVnodeCfg->cfg.vgId); sprintf(cfgFile, "%s/vnode%d/config.json", tsVnodeDir, pVnodeCfg->cfg.vgId);
FILE *fp = fopen(cfgFile, "w"); FILE *fp = fopen(cfgFile, "w");
if (!fp) return errno; if (!fp) {
dError("vgId:%d, failed to open vnode cfg file for write, error:%s", pVnodeCfg->cfg.vgId, strerror(errno));
fprintf(fp, "commitLog %d\n", pVnodeCfg->cfg.commitLog); return errno;
fprintf(fp, "wals %d\n", 3);
fprintf(fp, "arbitratorIp %d\n", pVnodeCfg->vpeerDesc[0].ip);
fprintf(fp, "quorum %d\n", 1);
fprintf(fp, "replica %d\n", pVnodeCfg->cfg.replications);
for (int32_t i = 0; i < pVnodeCfg->cfg.replications; i++) {
fprintf(fp, "index%d nodeId %d nodeIp %u name n%d\n", i, pVnodeCfg->vpeerDesc[i].dnodeId, pVnodeCfg->vpeerDesc[i].ip, pVnodeCfg->vpeerDesc[i].dnodeId);
} }
fclose(fp); char ipStr[20];
dTrace("vgId:%d, save vnode cfg successed", pVnodeCfg->cfg.vgId); int32_t len = 0;
int32_t maxLen = 1000;
char * content = calloc(1, maxLen + 1);
return TSDB_CODE_SUCCESS; len += snprintf(content + len, maxLen - len, "{\n");
len += snprintf(content + len, maxLen - len, " \"precision\": %d,\n", pVnodeCfg->cfg.precision);
len += snprintf(content + len, maxLen - len, " \"compression\": %d,\n", pVnodeCfg->cfg.compression);
len += snprintf(content + len, maxLen - len, " \"maxTables\": %d,\n", pVnodeCfg->cfg.maxTables);
len += snprintf(content + len, maxLen - len, " \"daysPerFile\": %d,\n", pVnodeCfg->cfg.daysPerFile);
len += snprintf(content + len, maxLen - len, " \"minRowsPerFileBlock\": %d,\n", pVnodeCfg->cfg.minRowsPerFileBlock);
len += snprintf(content + len, maxLen - len, " \"maxRowsPerFileBlock\": %d,\n", pVnodeCfg->cfg.maxRowsPerFileBlock);
len += snprintf(content + len, maxLen - len, " \"daysToKeep\": %d,\n", pVnodeCfg->cfg.daysToKeep);
len += snprintf(content + len, maxLen - len, " \"maxCacheSize\": %" PRId64 ",\n", pVnodeCfg->cfg.maxCacheSize);
len += snprintf(content + len, maxLen - len, " \"commitLog\": %d,\n", pVnodeCfg->cfg.commitLog);
len += snprintf(content + len, maxLen - len, " \"wals\": %d,\n", pVnodeCfg->cfg.wals);
uint32_t ipInt = pVnodeCfg->cfg.arbitratorIp;
sprintf(ipStr, "%u.%u.%u.%u", ipInt & 0xFF, (ipInt >> 8) & 0xFF, (ipInt >> 16) & 0xFF, (uint8_t)(ipInt >> 24));
len += snprintf(content + len, maxLen - len, " \"arbitratorIp\": \"%s\",\n", ipStr);
len += snprintf(content + len, maxLen - len, " \"quorum\": %d,\n", pVnodeCfg->cfg.quorum);
len += snprintf(content + len, maxLen - len, " \"replica\": %d,\n", pVnodeCfg->cfg.replications);
len += snprintf(content + len, maxLen - len, " \"nodeInfos\": [{\n");
for (int32_t i = 0; i < pVnodeCfg->cfg.replications; i++) {
len += snprintf(content + len, maxLen - len, " \"nodeId\": %d,\n", pVnodeCfg->nodes[i].nodeId);
uint32_t ipInt = pVnodeCfg->nodes[i].nodeIp;
sprintf(ipStr, "%u.%u.%u.%u", ipInt & 0xFF, (ipInt >> 8) & 0xFF, (ipInt >> 16) & 0xFF, (uint8_t)(ipInt >> 24));
len += snprintf(content + len, maxLen - len, " \"nodeIp\": \"%s\",\n", ipStr);
len += snprintf(content + len, maxLen - len, " \"nodeName\": \"%s\"\n", pVnodeCfg->nodes[i].nodeName);
if (i < pVnodeCfg->cfg.replications - 1) {
len += snprintf(content + len, maxLen - len, " },{\n");
} else {
len += snprintf(content + len, maxLen - len, " }]\n");
}
}
len += snprintf(content + len, maxLen - len, "}\n");
fwrite(content, 1, len, fp);
fclose(fp);
free(content);
dPrint("vgId:%d, save vnode cfg successed", pVnodeCfg->cfg.vgId);
return 0;
} }
// TODO: this is a simple implement
static int32_t vnodeReadCfg(SVnodeObj *pVnode) { static int32_t vnodeReadCfg(SVnodeObj *pVnode) {
char option[5][16] = {0}; char cfgFile[TSDB_FILENAME_LEN + 30] = {0};
char cfgFile[TSDB_FILENAME_LEN * 2] = {0}; sprintf(cfgFile, "%s/vnode%d/config.json", tsVnodeDir, pVnode->vgId);
sprintf(cfgFile, "%s/vnode%d/config", tsVnodeDir, pVnode->vgId);
FILE *fp = fopen(cfgFile, "r"); FILE *fp = fopen(cfgFile, "r");
if (!fp) return errno; if (!fp) {
dError("pVnode:%p vgId:%d, failed to open vnode cfg file for read, error:%s", pVnode, pVnode->vgId, strerror(errno));
return errno;
}
int32_t commitLog = -1; int ret = TSDB_CODE_OTHERS;
int32_t num = fscanf(fp, "%s %d", option[0], &commitLog); int maxLen = 1000;
if (num != 2) return TSDB_CODE_INVALID_FILE_FORMAT; char *content = calloc(1, maxLen + 1);
if (strcmp(option[0], "commitLog") != 0) return TSDB_CODE_INVALID_FILE_FORMAT; int len = fread(content, 1, maxLen, fp);
if (commitLog == -1) return TSDB_CODE_INVALID_FILE_FORMAT; if (len <= 0) {
pVnode->walCfg.commitLog = (int8_t)commitLog; free(content);
fclose(fp);
dError("pVnode:%p vgId:%d, failed to read vnode cfg, content is null", pVnode, pVnode->vgId);
return false;
}
int32_t wals = -1; cJSON *root = cJSON_Parse(content);
num = fscanf(fp, "%s %d", option[0], &wals); if (root == NULL) {
if (num != 2) return TSDB_CODE_INVALID_FILE_FORMAT; dError("pVnode:%p vgId:%d, failed to read vnode cfg, invalid json format", pVnode, pVnode->vgId);
if (strcmp(option[0], "wals") != 0) return TSDB_CODE_INVALID_FILE_FORMAT; goto PARSE_OVER;
if (wals == -1) return TSDB_CODE_INVALID_FILE_FORMAT; }
pVnode->walCfg.wals = (int8_t)wals;
cJSON *precision = cJSON_GetObjectItem(root, "precision");
if (!precision || precision->type != cJSON_Number) {
dError("pVnode:%p vgId:%d, failed to read vnode cfg, precision not found", pVnode, pVnode->vgId);
goto PARSE_OVER;
}
pVnode->tsdbCfg.precision = (int8_t)precision->valueint;
cJSON *compression = cJSON_GetObjectItem(root, "compression");
if (!compression || compression->type != cJSON_Number) {
dError("pVnode:%p vgId:%d, failed to read vnode cfg, compression not found", pVnode, pVnode->vgId);
goto PARSE_OVER;
}
pVnode->tsdbCfg.compression = (int8_t)compression->valueint;
cJSON *maxTables = cJSON_GetObjectItem(root, "maxTables");
if (!maxTables || maxTables->type != cJSON_Number) {
dError("pVnode:%p vgId:%d, failed to read vnode cfg, maxTables not found", pVnode, pVnode->vgId);
goto PARSE_OVER;
}
pVnode->tsdbCfg.maxTables = maxTables->valueint;
cJSON *daysPerFile = cJSON_GetObjectItem(root, "daysPerFile");
if (!daysPerFile || daysPerFile->type != cJSON_Number) {
dError("pVnode:%p vgId:%d, failed to read vnode cfg, daysPerFile not found", pVnode, pVnode->vgId);
goto PARSE_OVER;
}
pVnode->tsdbCfg.daysPerFile = daysPerFile->valueint;
cJSON *minRowsPerFileBlock = cJSON_GetObjectItem(root, "minRowsPerFileBlock");
if (!minRowsPerFileBlock || minRowsPerFileBlock->type != cJSON_Number) {
dError("pVnode:%p vgId:%d, failed to read vnode cfg, minRowsPerFileBlock not found", pVnode, pVnode->vgId);
goto PARSE_OVER;
}
pVnode->tsdbCfg.minRowsPerFileBlock = minRowsPerFileBlock->valueint;
cJSON *maxRowsPerFileBlock = cJSON_GetObjectItem(root, "maxRowsPerFileBlock");
if (!maxRowsPerFileBlock || maxRowsPerFileBlock->type != cJSON_Number) {
dError("pVnode:%p vgId:%d, failed to read vnode cfg, maxRowsPerFileBlock not found", pVnode, pVnode->vgId);
goto PARSE_OVER;
}
pVnode->tsdbCfg.maxRowsPerFileBlock = maxRowsPerFileBlock->valueint;
cJSON *daysToKeep = cJSON_GetObjectItem(root, "daysToKeep");
if (!daysToKeep || daysToKeep->type != cJSON_Number) {
dError("pVnode:%p vgId:%d, failed to read vnode cfg, daysToKeep not found", pVnode, pVnode->vgId);
goto PARSE_OVER;
}
pVnode->tsdbCfg.keep = daysToKeep->valueint;
cJSON *maxCacheSize = cJSON_GetObjectItem(root, "maxCacheSize");
if (!maxCacheSize || maxCacheSize->type != cJSON_Number) {
dError("pVnode:%p vgId:%d, failed to read vnode cfg, maxCacheSize not found", pVnode, pVnode->vgId);
goto PARSE_OVER;
}
pVnode->tsdbCfg.maxCacheSize = maxCacheSize->valueint;
cJSON *commitLog = cJSON_GetObjectItem(root, "commitLog");
if (!commitLog || commitLog->type != cJSON_Number) {
dError("pVnode:%p vgId:%d, failed to read vnode cfg, commitLog not found", pVnode, pVnode->vgId);
goto PARSE_OVER;
}
pVnode->walCfg.commitLog = (int8_t)commitLog->valueint;
cJSON *wals = cJSON_GetObjectItem(root, "wals");
if (!wals || wals->type != cJSON_Number) {
dError("pVnode:%p vgId:%d, failed to read vnode cfg, wals not found", pVnode, pVnode->vgId);
goto PARSE_OVER;
}
pVnode->walCfg.wals = (int8_t)wals->valueint;
pVnode->walCfg.keep = 0; pVnode->walCfg.keep = 0;
int32_t arbitratorIp = -1; cJSON *arbitratorIp = cJSON_GetObjectItem(root, "arbitratorIp");
num = fscanf(fp, "%s %u", option[0], &arbitratorIp); if (!arbitratorIp || arbitratorIp->type != cJSON_String || arbitratorIp->valuestring == NULL) {
if (num != 2) return TSDB_CODE_INVALID_FILE_FORMAT; dError("pVnode:%p vgId:%d, failed to read vnode cfg, arbitratorIp not found", pVnode, pVnode->vgId);
if (strcmp(option[0], "arbitratorIp") != 0) return TSDB_CODE_INVALID_FILE_FORMAT; goto PARSE_OVER;
if (arbitratorIp == -1) return TSDB_CODE_INVALID_FILE_FORMAT; }
pVnode->syncCfg.arbitratorIp = arbitratorIp; pVnode->syncCfg.arbitratorIp = inet_addr(arbitratorIp->valuestring);
int32_t quorum = -1; cJSON *quorum = cJSON_GetObjectItem(root, "quorum");
num = fscanf(fp, "%s %d", option[0], &quorum); if (!quorum || quorum->type != cJSON_Number) {
if (num != 2) return TSDB_CODE_INVALID_FILE_FORMAT; dError("failed to read vnode cfg, quorum not found", pVnode, pVnode->vgId);
if (strcmp(option[0], "quorum") != 0) return TSDB_CODE_INVALID_FILE_FORMAT; goto PARSE_OVER;
if (quorum == -1) return TSDB_CODE_INVALID_FILE_FORMAT; }
pVnode->syncCfg.quorum = (int8_t)quorum; pVnode->syncCfg.quorum = (int8_t)quorum->valueint;
int32_t replica = -1; cJSON *replica = cJSON_GetObjectItem(root, "replica");
num = fscanf(fp, "%s %d", option[0], &replica); if (!replica || replica->type != cJSON_Number) {
if (num != 2) return TSDB_CODE_INVALID_FILE_FORMAT; dError("pVnode:%p vgId:%d, failed to read vnode cfg, replica not found", pVnode, pVnode->vgId);
if (strcmp(option[0], "replica") != 0) return TSDB_CODE_INVALID_FILE_FORMAT; goto PARSE_OVER;
if (replica == -1) return TSDB_CODE_INVALID_FILE_FORMAT; }
pVnode->syncCfg.replica = (int8_t)replica; pVnode->syncCfg.replica = (int8_t)replica->valueint;
for (int32_t i = 0; i < replica; ++i) { cJSON *nodeInfos = cJSON_GetObjectItem(root, "nodeInfos");
int32_t dnodeId = -1; if (!nodeInfos || nodeInfos->type != cJSON_Array) {
uint32_t dnodeIp = -1; dError("pVnode:%p vgId:%d, failed to read vnode cfg, nodeInfos not found", pVnode, pVnode->vgId);
num = fscanf(fp, "%s %s %d %s %u %s %s", option[0], option[1], &dnodeId, option[2], &dnodeIp, option[3], pVnode->syncCfg.nodeInfo[i].name); goto PARSE_OVER;
if (num != 7) return TSDB_CODE_INVALID_FILE_FORMAT;
if (strcmp(option[1], "nodeId") != 0) return TSDB_CODE_INVALID_FILE_FORMAT;
if (strcmp(option[2], "nodeIp") != 0) return TSDB_CODE_INVALID_FILE_FORMAT;
if (strcmp(option[3], "name") != 0) return TSDB_CODE_INVALID_FILE_FORMAT;
if (dnodeId == -1) return TSDB_CODE_INVALID_FILE_FORMAT;
if (dnodeIp == -1) return TSDB_CODE_INVALID_FILE_FORMAT;
pVnode->syncCfg.nodeInfo[i].nodeId = dnodeId;
pVnode->syncCfg.nodeInfo[i].nodeIp = dnodeIp;
} }
fclose(fp); int size = cJSON_GetArraySize(nodeInfos);
dTrace("pVnode:%p vgId:%d, read vnode cfg successed", pVnode, pVnode->vgId); if (size != pVnode->syncCfg.replica) {
dError("pVnode:%p vgId:%d, failed to read vnode cfg, nodeInfos size not matched", pVnode, pVnode->vgId);
goto PARSE_OVER;
}
return TSDB_CODE_SUCCESS; for (int i = 0; i < size; ++i) {
cJSON *nodeInfo = cJSON_GetArrayItem(nodeInfos, i);
if (nodeInfo == NULL) continue;
cJSON *nodeId = cJSON_GetObjectItem(nodeInfo, "nodeId");
if (!nodeId || nodeId->type != cJSON_Number) {
dError("pVnode:%p vgId:%d, failed to read vnode cfg, nodeId not found", pVnode, pVnode->vgId);
goto PARSE_OVER;
}
pVnode->syncCfg.nodeInfo[i].nodeId = nodeId->valueint;
cJSON *nodeIp = cJSON_GetObjectItem(nodeInfo, "nodeIp");
if (!nodeIp || nodeIp->type != cJSON_String || nodeIp->valuestring == NULL) {
dError("pVnode:%p vgId:%d, failed to read vnode cfg, nodeIp not found", pVnode, pVnode->vgId);
goto PARSE_OVER;
}
pVnode->syncCfg.nodeInfo[i].nodeIp = inet_addr(nodeIp->valuestring);
cJSON *nodeName = cJSON_GetObjectItem(nodeInfo, "nodeName");
if (!nodeName || nodeName->type != cJSON_String || nodeName->valuestring == NULL) {
dError("pVnode:%p vgId:%d, failed to read vnode cfg, nodeName not found", pVnode, pVnode->vgId);
goto PARSE_OVER;
}
strncpy(pVnode->syncCfg.nodeInfo[i].name, nodeName->valuestring, TSDB_NODE_NAME_LEN);
}
ret = 0;
dPrint("pVnode:%p vgId:%d, read vnode cfg successed, replcia:%d", pVnode, pVnode->vgId, pVnode->syncCfg.replica);
for (int32_t i = 0; i < pVnode->syncCfg.replica; i++) {
dPrint("pVnode:%p vgId:%d, dnode:%d, ip:%s name:%s", pVnode, pVnode->vgId, pVnode->syncCfg.nodeInfo[i].nodeId,
taosIpStr(pVnode->syncCfg.nodeInfo[i].nodeIp), pVnode->syncCfg.nodeInfo[i].name);
}
PARSE_OVER:
free(content);
cJSON_Delete(root);
fclose(fp);
return ret;
} }
static int32_t vnodeSaveVersion(SVnodeObj *pVnode) {
char versionFile[TSDB_FILENAME_LEN + 30] = {0};
sprintf(versionFile, "%s/vnode%d/version.json", tsVnodeDir, pVnode->vgId);
FILE *fp = fopen(versionFile, "w");
if (!fp) {
dError("pVnode:%p vgId:%d, failed to open vnode version file for write, error:%s", pVnode, pVnode->vgId);
return errno;
}
int32_t len = 0;
int32_t maxLen = 30;
char * content = calloc(1, maxLen + 1);
len += snprintf(content + len, maxLen - len, "{\n");
len += snprintf(content + len, maxLen - len, " \"version\": %" PRId64 "\n", pVnode->version);
len += snprintf(content + len, maxLen - len, "}\n");
fwrite(content, 1, len, fp);
fclose(fp);
free(content);
dPrint("pVnode:%p vgId:%d, save vnode version successed", pVnode, pVnode->vgId);
return 0;
}
static int32_t vnodeReadVersion(SVnodeObj *pVnode) {
char versionFile[TSDB_FILENAME_LEN + 30] = {0};
sprintf(versionFile, "%s/vnode%d/version.json", tsVnodeDir, pVnode->vgId);
FILE *fp = fopen(versionFile, "w");
if (!fp) {
dError("pVnode:%p vgId:%d, failed to open vnode version file for write, error:%s", pVnode, pVnode->vgId);
return errno;
}
int ret = TSDB_CODE_OTHERS;
int maxLen = 100;
char *content = calloc(1, maxLen + 1);
int len = fread(content, 1, maxLen, fp);
if (len <= 0) {
free(content);
fclose(fp);
dError("pVnode:%p vgId:%d, failed to read vnode version, content is null", pVnode, pVnode->vgId);
return false;
}
cJSON *root = cJSON_Parse(content);
if (root == NULL) {
dError("pVnode:%p vgId:%d, failed to read vnode version, invalid json format", pVnode, pVnode->vgId);
goto PARSE_OVER;
}
cJSON *version = cJSON_GetObjectItem(root, "version");
if (!version || version->type != cJSON_Number) {
dError("pVnode:%p vgId:%d, failed to read vnode version, version not found", pVnode, pVnode->vgId);
goto PARSE_OVER;
}
pVnode->version = version->valueint;
ret = 0;
dPrint("pVnode:%p vgId:%d, read vnode version successed, version:%%" PRId64, pVnode, pVnode->vgId, pVnode->version);
PARSE_OVER:
free(content);
cJSON_Delete(root);
fclose(fp);
return ret;
}

View File

@ -17,6 +17,7 @@ from util.log import *
from util.cases import * from util.cases import *
from util.sql import * from util.sql import *
class TDTestCase: class TDTestCase:
def init(self, conn): def init(self, conn):
tdLog.debug("start to execute %s" % __file__) tdLog.debug("start to execute %s" % __file__)
@ -24,25 +25,24 @@ class TDTestCase:
def run(self): def run(self):
tdSql.prepare() tdSql.prepare()
tdSql.execute('show databases')
tdSql.execute('drop database if exists db') ret = tdSql.execute('create table tb (ts timestamp, speed int)')
tdSql.execute('create database db')
tdSql.execute('use db')
tdSql.execute('create table tb (ts timestamp, speed int)')
insertRows = 10 insertRows = 10
tdLog.info("insert %d rows" % (insertRows)) tdLog.info("insert %d rows" % (insertRows))
for i in range(0, insertRows): for i in range(0, insertRows):
tdSql.execute('insert into tb values (now + %dm, %d)' % (i, i)) ret = tdSql.execute(
'insert into tb values (now + %dm, %d)' %
(i, i))
# tdLog.info("insert earlier data") tdLog.info("insert earlier data")
# tdSql.execute('insert into tb values (now - 5m , 10)') tdSql.execute('insert into tb values (now - 5m , 10)')
# tdSql.execute('insert into tb values (now - 6m , 10)') tdSql.execute('insert into tb values (now - 6m , 10)')
# tdSql.execute('insert into tb values (now - 7m , 10)') tdSql.execute('insert into tb values (now - 7m , 10)')
# tdSql.execute('insert into tb values (now - 8m , 10)') tdSql.execute('insert into tb values (now - 8m , 10)')
tdSql.query("select * from tb") tdSql.query("select * from tb")
tdSql.checkRows(insertRows) tdSql.checkRows(insertRows + 4)
def stop(self): def stop(self):
tdSql.close() tdSql.close()

View File

@ -1 +1,3 @@
sudo python ./test.py -f insert/basic.py #!/bin/bash
python3 ./test.py -f insert/basic.py $1
python3 ./test.py -s $1

View File

@ -15,22 +15,25 @@
# -*- coding: utf-8 -*- # -*- coding: utf-8 -*-
import sys import sys
import getopt import getopt
import subprocess
from distutils.log import warn as printf
from util.log import * from util.log import *
from util.dnodes import * from util.dnodes import *
from util.cases import * from util.cases import *
import taos import taos
# add testcase here:
from insert.basic import *
if __name__ == "__main__": if __name__ == "__main__":
fileName = "all" fileName = "all"
deployPath = "" deployPath = ""
masterIp = "" masterIp = ""
testCluster = False testCluster = False
opts, args = getopt.getopt(sys.argv[1:], 'f:p:m:sch', [ valgrind = 0
'file=', 'path=', 'master', 'stop', 'cluster', 'help']) stop = 0
opts, args = getopt.gnu_getopt(sys.argv[1:], 'f:p:m:scgh', [
'file=', 'path=', 'master', 'stop', 'cluster', 'valgrind', 'help'])
for key, value in opts: for key, value in opts:
if key in ['-h', '--help']: if key in ['-h', '--help']:
tdLog.printNoPrefix( tdLog.printNoPrefix(
@ -41,21 +44,50 @@ if __name__ == "__main__":
tdLog.printNoPrefix('-c Test Cluster Flag') tdLog.printNoPrefix('-c Test Cluster Flag')
tdLog.printNoPrefix('-s stop All dnodes') tdLog.printNoPrefix('-s stop All dnodes')
sys.exit(0) sys.exit(0)
if key in ['-f', '--file']: if key in ['-f', '--file']:
fileName = value fileName = value
if key in ['-p', '--path']: if key in ['-p', '--path']:
deployPath = value deployPath = value
if key in ['-m', '--master']: if key in ['-m', '--master']:
masterIp = value masterIp = value
if key in ['-c', '--cluster']: if key in ['-c', '--cluster']:
testCluster = True testCluster = True
if key in ['-g', '--valgrind']:
valgrind = 1
if key in ['-s', '--stop']: if key in ['-s', '--stop']:
cmd = "ps -ef|grep -w taosd | grep 'taosd' | grep -v grep | awk '{print $2}' && pkill -9 taosd" stop = 1
os.system(cmd)
tdLog.exit('stop All dnodes') if (stop != 0):
if (valgrind == 0):
toBeKilled = "taosd"
else:
toBeKilled = "valgrind.bin"
killCmd = "ps -ef|grep -w %s| grep -v grep | awk '{print $2}' | xargs kill -HUP " % toBeKilled
# os.system(killCmd)
# time.sleep(1)
psCmd = "ps -ef|grep -w %s| grep -v grep | awk '{print $2}'" % toBeKilled
processID = subprocess.check_output(psCmd, shell=True)
while(processID):
os.system(killCmd)
time.sleep(1)
processID = subprocess.check_output(psCmd, shell=True)
tdLog.exit('stop All dnodes')
if masterIp == "": if masterIp == "":
tdDnodes.init(deployPath) tdDnodes.init(deployPath)
tdDnodes.setTestCluster(testCluster)
tdDnodes.setValgrind(valgrind)
if testCluster: if testCluster:
tdLog.notice("Procedures for testing cluster") tdLog.notice("Procedures for testing cluster")
if fileName == "all": if fileName == "all":

View File

@ -15,6 +15,8 @@ import sys
import os import os
import time import time
import datetime import datetime
import inspect
import importlib
from util.log import * from util.log import *
@ -30,6 +32,10 @@ class TDCases:
self.windowsCases = [] self.windowsCases = []
self.clusterCases = [] self.clusterCases = []
def __dynamicLoadModule(self, fileName):
moduleName = fileName.replace(".py", "").replace("/", ".")
return importlib.import_module(moduleName, package='..')
def addWindows(self, name, case): def addWindows(self, name, case):
self.windowsCases.append(TDCase(name, case)) self.windowsCases.append(TDCase(name, case))
@ -40,64 +46,93 @@ class TDCases:
self.clusterCases.append(TDCase(name, case)) self.clusterCases.append(TDCase(name, case))
def runAllLinux(self, conn): def runAllLinux(self, conn):
tdLog.notice("run total %d cases" % (len(self.linuxCases))) # TODO: load all Linux cases here
for case in self.linuxCases: runNum = 0
case.case.init(conn) for tmp in self.linuxCases:
case.case.run() if tmp.name.find(fileName) != -1:
case.case.stop() case = testModule.TDTestCase()
tdLog.notice("total %d cases executed" % (len(self.linuxCases))) case.init(conn)
case.run()
case.stop()
runNum += 1
continue
tdLog.notice("total %d Linux test case(s) executed" % (runNum))
def runOneLinux(self, conn, fileName): def runOneLinux(self, conn, fileName):
tdLog.notice("run cases like %s" % (fileName)) testModule = self.__dynamicLoadModule(fileName)
runNum = 0 runNum = 0
for case in self.linuxCases: for tmp in self.linuxCases:
if case.name.find(fileName) != -1: if tmp.name.find(fileName) != -1:
case.case.init(conn) case = testModule.TDTestCase()
case.case.run() case.init(conn)
case.case.stop() case.run()
time.sleep(5) case.stop()
runNum += 1 runNum += 1
tdLog.notice("total %d cases executed" % (runNum)) continue
tdLog.notice("total %d Linux test case(s) executed" % (runNum))
def runAllWindows(self, conn): def runAllWindows(self, conn):
tdLog.notice("run total %d cases" % (len(self.windowsCases))) # TODO: load all Windows cases here
for case in self.windowsCases: runNum = 0
case.case.init(conn) for tmp in self.windowsCases:
case.case.run() if tmp.name.find(fileName) != -1:
case.case.stop() case = testModule.TDTestCase()
tdLog.notice("total %d cases executed" % (len(self.windowsCases))) case.init(conn)
case.run()
case.stop()
runNum += 1
continue
tdLog.notice("total %d Windows test case(s) executed" % (runNum))
def runOneWindows(self, conn, fileName): def runOneWindows(self, conn, fileName):
tdLog.notice("run cases like %s" % (fileName)) testModule = self.__dynamicLoadModule(fileName)
runNum = 0 runNum = 0
for case in self.windowsCases: for tmp in self.windowsCases:
if case.name.find(fileName) != -1: if tmp.name.find(fileName) != -1:
case.case.init(conn) case = testModule.TDTestCase()
case.case.run() case.init(conn)
case.case.stop() case.run()
time.sleep(2) case.stop()
runNum += 1 runNum += 1
tdLog.notice("total %d cases executed" % (runNum)) continue
tdLog.notice("total %d Windows case(s) executed" % (runNum))
def runAllCluster(self): def runAllCluster(self):
tdLog.notice("run total %d cases" % (len(self.clusterCases))) # TODO: load all cluster case module here
for case in self.clusterCases:
case.case.init() runNum = 0
case.case.run() for tmp in self.clusterCases:
case.case.stop() if tmp.name.find(fileName) != -1:
tdLog.notice("total %d cases executed" % (len(self.clusterCases))) tdLog.notice("run cases like %s" % (fileName))
case = testModule.TDTestCase()
case.init()
case.run()
case.stop()
runNum += 1
continue
tdLog.notice("total %d Cluster test case(s) executed" % (runNum))
def runOneCluster(self, fileName): def runOneCluster(self, fileName):
tdLog.notice("run cases like %s" % (fileName)) testModule = self.__dynamicLoadModule(fileName)
runNum = 0 runNum = 0
for case in self.clusterCases: for tmp in self.clusterCases:
if case.name.find(fileName) != -1: if tmp.name.find(fileName) != -1:
case.case.init() tdLog.notice("run cases like %s" % (fileName))
case.case.run() case = testModule.TDTestCase()
case.case.stop() case.init()
time.sleep(2) case.run()
case.stop()
runNum += 1 runNum += 1
tdLog.notice("total %d cases executed" % (runNum)) continue
tdLog.notice("total %d Cluster test case(s) executed" % (runNum))
tdCases = TDCases() tdCases = TDCases()

View File

@ -14,6 +14,7 @@
import sys import sys
import os import os
import os.path import os.path
import subprocess
from util.log import * from util.log import *
@ -78,10 +79,18 @@ class TDDnode:
self.index = index self.index = index
self.running = 0 self.running = 0
self.deployed = 0 self.deployed = 0
self.testCluster = False
self.valgrind = 0
def init(self, path): def init(self, path):
self.path = path self.path = path
def setTestCluster(self, value):
self.testCluster = value
def setValgrind(self, value):
self.valgrind = value
def deploy(self): def deploy(self):
self.logDir = "%s/sim/dnode%d/log" % (self.path, self.index) self.logDir = "%s/sim/dnode%d/log" % (self.path, self.index)
self.dataDir = "%s/sim/dnode%d/data" % (self.path, self.index) self.dataDir = "%s/sim/dnode%d/data" % (self.path, self.index)
@ -116,7 +125,9 @@ class TDDnode:
if os.system(cmd) != 0: if os.system(cmd) != 0:
tdLog.exit(cmd) tdLog.exit(cmd)
self.startIP() if self.testCluster:
self.startIP()
self.cfg("masterIp", "192.168.0.1") self.cfg("masterIp", "192.168.0.1")
self.cfg("secondIp", "192.168.0.2") self.cfg("secondIp", "192.168.0.2")
self.cfg("publicIp", "192.168.0.%d" % (self.index)) self.cfg("publicIp", "192.168.0.%d" % (self.index))
@ -164,9 +175,18 @@ class TDDnode:
if self.deployed == 0: if self.deployed == 0:
tdLog.exit("dnode:%d is not deployed" % (self.index)) tdLog.exit("dnode:%d is not deployed" % (self.index))
cmd = "nohup %staosd -c %s > /dev/null 2>&1 & " % (
binPath, self.cfgDir) if self.valgrind == 0:
print(cmd) cmd = "nohup %staosd -c %s > /dev/null 2>&1 & " % (
binPath, self.cfgDir)
else:
valgrindCmdline = "valgrind --tool=memcheck --leak-check=full --show-reachable=no --track-origins=yes --show-leak-kinds=all -v --workaround-gcc296-bugs=yes"
cmd = "nohup %s %staosd -c %s 2>&1 & " % (
valgrindCmdline, binPath, self.cfgDir)
print(cmd)
if os.system(cmd) != 0: if os.system(cmd) != 0:
tdLog.exit(cmd) tdLog.exit(cmd)
self.running = 1 self.running = 1
@ -275,8 +295,16 @@ class TDDnodes:
self.sim.init(self.path) self.sim.init(self.path)
self.sim.deploy() self.sim.deploy()
def setTestCluster(self, value):
self.testCluster = value
def setValgrind(self, value):
self.valgrind = value
def deploy(self, index): def deploy(self, index):
self.check(index) self.check(index)
self.dnodes[index - 1].setTestCluster(self.testCluster)
self.dnodes[index - 1].setValgrind(self.valgrind)
self.dnodes[index - 1].deploy() self.dnodes[index - 1].deploy()
def cfg(self, index, option, value): def cfg(self, index, option, value):
@ -297,11 +325,15 @@ class TDDnodes:
def startIP(self, index): def startIP(self, index):
self.check(index) self.check(index)
self.dnodes[index - 1].startIP()
if self.testCluster:
self.dnodes[index - 1].startIP()
def stopIP(self, index): def stopIP(self, index):
self.check(index) self.check(index)
self.dnodes[index - 1].stopIP()
if self.dnodes[index - 1].testCluster:
self.dnodes[index - 1].stopIP()
def check(self, index): def check(self, index):
if index < 1 or index > 10: if index < 1 or index > 10:
@ -312,11 +344,14 @@ class TDDnodes:
for i in range(len(self.dnodes)): for i in range(len(self.dnodes)):
self.dnodes[i].stop() self.dnodes[i].stop()
cmd = "sudo systemctl stop taosd" psCmd = "ps -ef | grep -w taosd | grep 'root' | grep -v grep | awk '{print $2}'"
os.system(cmd) processID = subprocess.check_output(psCmd, shell=True)
if processID:
cmd = "sudo systemctl stop taosd"
os.system(cmd)
# if os.system(cmd) != 0 : # if os.system(cmd) != 0 :
# tdLog.exit(cmd) # tdLog.exit(cmd)
cmd = "ps -ef | grep -w taosd | grep 'dnode' | grep -v grep | awk '{print $2}' && sudo pkill -sigkill taosd" cmd = "ps -ef | grep -w taosd | grep 'dnode' | grep -v grep | awk '{print $2}' && pkill -sigkill taosd"
os.system(cmd) os.system(cmd)
# if os.system(cmd) != 0 : # if os.system(cmd) != 0 :
# tdLog.exit(cmd) # tdLog.exit(cmd)

View File

@ -42,7 +42,7 @@ class TDLog:
printf("\033[1;31m%s %s\033[0m" % (datetime.datetime.now(), err)) printf("\033[1;31m%s %s\033[0m" % (datetime.datetime.now(), err))
sys.exit(1) sys.exit(1)
def printfNoPrefix(self, info): def printNoPrefix(self, info):
printf("\033[1;36m%s\033[0m" % (info)) printf("\033[1;36m%s\033[0m" % (info))

View File

@ -3,6 +3,6 @@
run general/db/testSuite.sim run general/db/testSuite.sim
run general/insert/testSuite.sim run general/insert/testSuite.sim
run general/table/testSuite.sim run general/table/testSuite.sim
run general/user/testSuite.sim run general/user/basicSuite.sim
################################## ##################################

View File

@ -66,6 +66,4 @@ print $data10 $data11 $data22
print $data20 $data11 $data22 print $data20 $data11 $data22
print $data30 $data31 $data32 print $data30 $data31 $data32
system sh/exec.sh -n dnode1 -s stop

View File

@ -0,0 +1 @@
run general/user/basic1.sim

View File

@ -1 +1,6 @@
run general/user/basic1.sim run general/user/basic1.sim
run general/user/pass_alter.sim
run general/user/pass_len.sim
run general/user/user_create.sim
run general/user/user_len.sim
#run general/user/monitor.sim

View File

@ -4,8 +4,6 @@ system sh/ip.sh -i 1 -s up
system sh/deploy.sh -n dnode1 -m 192.168.0.1 -i 192.168.0.1 system sh/deploy.sh -n dnode1 -m 192.168.0.1 -i 192.168.0.1
system sh/cfg.sh -n dnode1 -c clog -v 0 system sh/cfg.sh -n dnode1 -c clog -v 0
system sh/exec.sh -n dnode1 -s start system sh/exec.sh -n dnode1 -s start
sleep 3000
sql connect sql connect
print =============== step1 print =============== step1

View File

@ -93,7 +93,7 @@ echo "privateIp $NODE_IP" >> $TAOS_CFG
echo "dDebugFlag 199" >> $TAOS_CFG echo "dDebugFlag 199" >> $TAOS_CFG
echo "mDebugFlag 199" >> $TAOS_CFG echo "mDebugFlag 199" >> $TAOS_CFG
echo "sdbDebugFlag 199" >> $TAOS_CFG echo "sdbDebugFlag 199" >> $TAOS_CFG
echo "rpcDebugFlag 131" >> $TAOS_CFG echo "rpcDebugFlag 135" >> $TAOS_CFG
echo "tmrDebugFlag 131" >> $TAOS_CFG echo "tmrDebugFlag 131" >> $TAOS_CFG
echo "cDebugFlag 135" >> $TAOS_CFG echo "cDebugFlag 135" >> $TAOS_CFG
echo "httpDebugFlag 131" >> $TAOS_CFG echo "httpDebugFlag 131" >> $TAOS_CFG
@ -105,7 +105,7 @@ echo "numOfThreadsPerCore 2.0" >> $TAOS_CFG
echo "defaultPass taosdata" >> $TAOS_CFG echo "defaultPass taosdata" >> $TAOS_CFG
echo "numOfLogLines 100000000" >> $TAOS_CFG echo "numOfLogLines 100000000" >> $TAOS_CFG
echo "mgmtEqualVnodeNum 0" >> $TAOS_CFG echo "mgmtEqualVnodeNum 0" >> $TAOS_CFG
echo "clog 0" >> $TAOS_CFG echo "clog 2" >> $TAOS_CFG
echo "statusInterval 1" >> $TAOS_CFG echo "statusInterval 1" >> $TAOS_CFG
echo "numOfTotalVnodes 4" >> $TAOS_CFG echo "numOfTotalVnodes 4" >> $TAOS_CFG
echo "asyncLog 0" >> $TAOS_CFG echo "asyncLog 0" >> $TAOS_CFG

View File

@ -11,7 +11,8 @@ set +e
FILE_NAME= FILE_NAME=
RELEASE=0 RELEASE=0
ASYNC=0 ASYNC=0
while getopts "f:a" arg VALGRIND=0
while getopts "f:av" arg
do do
case $arg in case $arg in
f) f)
@ -20,6 +21,9 @@ do
a) a)
ASYNC=1 ASYNC=1
;; ;;
v)
VALGRIND=1
;;
?) ?)
echo "unknow argument" echo "unknow argument"
;; ;;
@ -30,6 +34,8 @@ cd .
sh/ip.sh -i 1 -s up > /dev/null 2>&1 & sh/ip.sh -i 1 -s up > /dev/null 2>&1 &
sh/ip.sh -i 2 -s up > /dev/null 2>&1 & sh/ip.sh -i 2 -s up > /dev/null 2>&1 &
sh/ip.sh -i 3 -s up > /dev/null 2>&1 & sh/ip.sh -i 3 -s up > /dev/null 2>&1 &
sh/ip.sh -i 4 -s up > /dev/null 2>&1 &
sh/ip.sh -i 5 -s up > /dev/null 2>&1 &
# Get responsible directories # Get responsible directories
CODE_DIR=`dirname $0` CODE_DIR=`dirname $0`
@ -96,10 +102,14 @@ ulimit -c unlimited
#sudo sysctl -w kernel.core_pattern=$TOP_DIR/core.%p.%e #sudo sysctl -w kernel.core_pattern=$TOP_DIR/core.%p.%e
if [ -n "$FILE_NAME" ]; then if [ -n "$FILE_NAME" ]; then
echo "ExcuteCmd:" $PROGRAM -c $CFG_DIR -f $FILE_NAME
echo "------------------------------------------------------------------------" echo "------------------------------------------------------------------------"
#valgrind --tool=memcheck --leak-check=full --show-reachable=no --track-origins=yes --show-leak-kinds=all -v --workaround-gcc296-bugs=yes --log-file=valgrind.log $PROGRAM -c $CFG_DIR -f $FILE_NAME if [ $VALGRIND -eq 1 ]; then
$PROGRAM -c $CFG_DIR -f $FILE_NAME echo valgrind --tool=memcheck --leak-check=full --show-reachable=no --track-origins=yes --show-leak-kinds=all -v --workaround-gcc296-bugs=yes --log-file=${CODE_DIR}/../script/valgrind.log $PROGRAM -c $CFG_DIR -f $FILE_NAME
valgrind --tool=memcheck --leak-check=full --show-reachable=no --track-origins=yes --show-leak-kinds=all -v --workaround-gcc296-bugs=yes --log-file=${CODE_DIR}/../script/valgrind.log $PROGRAM -c $CFG_DIR -f $FILE_NAME
else
echo "ExcuteCmd:" $PROGRAM -c $CFG_DIR -f $FILE_NAME
$PROGRAM -c $CFG_DIR -f $FILE_NAME
fi
else else
echo "ExcuteCmd:" $PROGRAM -c $CFG_DIR -f basicSuite.sim echo "ExcuteCmd:" $PROGRAM -c $CFG_DIR -f basicSuite.sim
echo "------------------------------------------------------------------------" echo "------------------------------------------------------------------------"

View File

@ -25,18 +25,18 @@ system sh/cfg.sh -n dnode2 -c mgmtEqualVnodeNum -v 4
system sh/cfg.sh -n dnode3 -c mgmtEqualVnodeNum -v 4 system sh/cfg.sh -n dnode3 -c mgmtEqualVnodeNum -v 4
system sh/cfg.sh -n dnode4 -c mgmtEqualVnodeNum -v 4 system sh/cfg.sh -n dnode4 -c mgmtEqualVnodeNum -v 4
system sh/cfg.sh -n dnode1 -c clog -v 1 system sh/cfg.sh -n dnode1 -c clog -v 2
system sh/cfg.sh -n dnode2 -c clog -v 1 system sh/cfg.sh -n dnode2 -c clog -v 2
system sh/cfg.sh -n dnode3 -c clog -v 1 system sh/cfg.sh -n dnode3 -c clog -v 2
system sh/cfg.sh -n dnode4 -c clog -v 1 system sh/cfg.sh -n dnode4 -c clog -v 2
system sh/cfg.sh -n dnode1 -c clog -v 1 system sh/cfg.sh -n dnode1 -c clog -v 2
system sh/cfg.sh -n dnode2 -c clog -v 1 system sh/cfg.sh -n dnode2 -c clog -v 2
system sh/cfg.sh -n dnode3 -c clog -v 1 system sh/cfg.sh -n dnode3 -c clog -v 2
system sh/cfg.sh -n dnode4 -c clog -v 1 system sh/cfg.sh -n dnode4 -c clog -v 2
print ========== step1 print ========== step1
system sh/exec.sh -n dnode1 -s start system sh/exec_up.sh -n dnode1 -s start
sql connect sql connect
sleep 3000 sleep 3000
@ -49,15 +49,15 @@ sql insert into d1.t1 values(now+4s, 12)
sql insert into d1.t1 values(now+5s, 11) sql insert into d1.t1 values(now+5s, 11)
sql show dnodes sql show dnodes
print 192.168.0.1 freeVnodes $data3_192.168.0.1 print 192.168.0.1 openVnodes $data3_1
if $data3_192.168.0.1 != 3 then if $data3_1 != 1 then
return -1 return -1
endi endi
print ========== step2 print ========== step2
sleep 2000 sleep 2000
sql create dnode 192.168.0.2 sql create dnode 192.168.0.2
system sh/exec.sh -n dnode2 -s start system sh/exec_up.sh -n dnode2 -s start
$x = 0 $x = 0
show2: show2:
@ -68,12 +68,12 @@ show2:
endi endi
sql show dnodes sql show dnodes
print 192.168.0.1 freeVnodes $data3_192.168.0.1 print 192.168.0.1 openVnodes $data3_1
print 192.168.0.2 freeVnodes $data3_192.168.0.2 print 192.168.0.2 openVnodes $data3_2
if $data3_192.168.0.1 != 4 then if $data3_1 != 0 then
goto show2 goto show2
endi endi
if $data3_192.168.0.2 != 3 then if $data3_2 != 1 then
goto show2 goto show2
endi endi
@ -87,12 +87,12 @@ sql insert into d2.t2 values(now+4s, 22)
sql insert into d2.t2 values(now+5s, 21) sql insert into d2.t2 values(now+5s, 21)
sql show dnodes sql show dnodes
print 192.168.0.1 freeVnodes $data3_192.168.0.1 print 192.168.0.1 openVnodes $data3_1
print 192.168.0.2 freeVnodes $data3_192.168.0.2 print 192.168.0.2 openVnodes $data3_2
if $data3_192.168.0.1 != 4 then if $data3_1 != 0 then
return -1 return -1
endi endi
if $data3_192.168.0.2 != 2 then if $data3_2 != 2 then
return -1 return -1
endi endi
@ -108,23 +108,23 @@ show4:
endi endi
sql show dnodes sql show dnodes
print 192.168.0.1 freeVnodes $data3_192.168.0.1 print 192.168.0.1 openVnodes $data3_1
print 192.168.0.2 freeVnodes $data3_192.168.0.2 print 192.168.0.2 openVnodes $data3_2
if $data3_192.168.0.1 != 2 then if $data3_1 != 2 then
goto show4 goto show4
endi endi
if $data3_192.168.0.2 != null then if $data3_2 != null then
goto show4 goto show4
endi endi
if $rows != 1 then if $rows != 1 then
goto show4 goto show4
endi endi
system sh/exec.sh -n dnode2 -s stop -x SIGINT system sh/exec_up.sh -n dnode2 -s stop -x SIGINT
print ========== step5 print ========== step5
sql create dnode 192.168.0.3 sql create dnode 192.168.0.3
system sh/exec.sh -n dnode3 -s start system sh/exec_up.sh -n dnode3 -s start
$x = 0 $x = 0
show5: show5:
@ -135,16 +135,16 @@ show5:
endi endi
sql show dnodes sql show dnodes
print 192.168.0.1 freeVnodes $data3_192.168.0.1 print 192.168.0.1 openVnodes $data3_1
print 192.168.0.2 freeVnodes $data3_192.168.0.2 print 192.168.0.2 openVnodes $data3_2
print 192.168.0.3 freeVnodes $data3_192.168.0.3 print 192.168.0.3 openVnodes $data3_3
if $data3_192.168.0.1 != 4 then if $data3_1 != 0 then
goto show5 goto show5
endi endi
if $data3_192.168.0.2 != null then if $data3_2 != null then
goto show5 goto show5
endi endi
if $data3_192.168.0.3 != 2 then if $data3_3 != 2 then
goto show5 goto show5
endi endi
@ -158,23 +158,23 @@ sql insert into d3.t3 values(now+4s, 32)
sql insert into d3.t3 values(now+5s, 31) sql insert into d3.t3 values(now+5s, 31)
sql show dnodes sql show dnodes
print 192.168.0.1 freeVnodes $data3_192.168.0.1 print 192.168.0.1 openVnodes $data3_1
print 192.168.0.2 freeVnodes $data3_192.168.0.2 print 192.168.0.2 openVnodes $data3_2
print 192.168.0.3 freeVnodes $data3_192.168.0.3 print 192.168.0.3 openVnodes $data3_3
if $data3_192.168.0.1 != 4 then if $data3_1 != 0 then
return -1 return -1
endi endi
if $data3_192.168.0.2 != null then if $data3_2 != null then
return -1 return -1
endi endi
if $data3_192.168.0.3 != 1 then if $data3_3 != 3 then
return -1 return -1
endi endi
print ========== step7 print ========== step7
sql create dnode 192.168.0.4 sql create dnode 192.168.0.4
system sh/exec.sh -n dnode4 -s start system sh/exec_up.sh -n dnode4 -s start
$x = 0 $x = 0
show7: show7:
@ -185,20 +185,20 @@ show7:
endi endi
sql show dnodes sql show dnodes
print 192.168.0.1 freeVnodes $data3_192.168.0.1 print 192.168.0.1 openVnodes $data3_1
print 192.168.0.2 freeVnodes $data3_192.168.0.2 print 192.168.0.2 openVnodes $data3_2
print 192.168.0.3 freeVnodes $data3_192.168.0.3 print 192.168.0.3 openVnodes $data3_3
print 192.168.0.4 freeVnodes $data3_192.168.0.4 print 192.168.0.4 openVnodes $data3_4
if $data3_192.168.0.1 != 4 then if $data3_1 != 0 then
goto show7 goto show7
endi endi
if $data3_192.168.0.2 != null then if $data3_2 != null then
goto show7 goto show7
endi endi
if $data3_192.168.0.3 != 2 then if $data3_3 != 2 then
goto show7 goto show7
endi endi
if $data3_192.168.0.4 != 3 then if $data3_4 != 1 then
goto show7 goto show7
endi endi
@ -212,21 +212,21 @@ sql insert into d4.t4 values(now+4s, 42)
sql insert into d4.t4 values(now+5s, 41) sql insert into d4.t4 values(now+5s, 41)
sql show dnodes sql show dnodes
print 192.168.0.1 freeVnodes $data3_192.168.0.1 print 192.168.0.1 openVnodes $data3_1
print 192.168.0.2 freeVnodes $data3_192.168.0.2 print 192.168.0.2 openVnodes $data3_2
print 192.168.0.3 freeVnodes $data3_192.168.0.3 print 192.168.0.3 openVnodes $data3_3
print 192.168.0.4 freeVnodes $data3_192.168.0.4 print 192.168.0.4 openVnodes $data3_4
if $data3_192.168.0.1 != 4 then if $data3_1 != 0 then
return -1 return -1
endi endi
if $data3_192.168.0.2 != null then if $data3_2 != null then
return -1 return -1
endi endi
if $data3_192.168.0.3 != 2 then if $data3_3 != 2 then
return -1 return -1
endi endi
if $data3_192.168.0.4 != 2 then if $data3_4 != 2 then
return -1 return -1
endi endi
@ -242,25 +242,25 @@ show9:
endi endi
sql show dnodes sql show dnodes
print 192.168.0.1 freeVnodes $data3_192.168.0.1 print 192.168.0.1 openVnodes $data3_1
print 192.168.0.2 freeVnodes $data3_192.168.0.2 print 192.168.0.2 openVnodes $data3_2
print 192.168.0.3 freeVnodes $data3_192.168.0.3 print 192.168.0.3 openVnodes $data3_3
print 192.168.0.4 freeVnodes $data3_192.168.0.4 print 192.168.0.4 openVnodes $data3_4
if $data3_192.168.0.1 != 4 then if $data3_1 != 0 then
goto show9 goto show9
endi endi
if $data3_192.168.0.2 != null then if $data3_2 != null then
goto show9 goto show9
endi endi
if $data3_192.168.0.3 != null then if $data3_3 != null then
goto show9 goto show9
endi endi
if $data3_192.168.0.4 != 0 then if $data3_4 != 4 then
goto show9 goto show9
endi endi
system sh/exec.sh -n dnode3 -s stop -x SIGINT system sh/exec_up.sh -n dnode3 -s stop -x SIGINT
print ========== step10 print ========== step10
sql select * from d1.t1 order by t desc sql select * from d1.t1 order by t desc

View File

@ -37,13 +37,13 @@ system sh/cfg.sh -n dnode4 -c clog -v 1
system sh/cfg.sh -n dnode5 -c clog -v 1 system sh/cfg.sh -n dnode5 -c clog -v 1
print ========== step1 print ========== step1
system sh/exec.sh -n dnode1 -s start system sh/exec_up.sh -n dnode1 -s start
sql connect sql connect
sql create dnode 192.168.0.2 sql create dnode 192.168.0.2
sql create dnode 192.168.0.3 sql create dnode 192.168.0.3
system sh/exec.sh -n dnode2 -s start system sh/exec_up.sh -n dnode2 -s start
system sh/exec.sh -n dnode3 -s start system sh/exec_up.sh -n dnode3 -s start
sleep 3000 sleep 3000
sql create database d1 replica 2 tables 4 sql create database d1 replica 2 tables 4
@ -63,16 +63,16 @@ sql insert into d2.t2 values(now+4s, 22)
sql insert into d2.t2 values(now+5s, 21) sql insert into d2.t2 values(now+5s, 21)
sql show dnodes sql show dnodes
print 192.168.0.1 freeVnodes $data3_192.168.0.1 print 192.168.0.1 openVnodes $data3_1
print 192.168.0.2 freeVnodes $data3_192.168.0.2 print 192.168.0.2 openVnodes $data3_2
print 192.168.0.3 freeVnodes $data3_192.168.0.3 print 192.168.0.3 openVnodes $data3_3
if $data3_192.168.0.1 != 4 then if $data3_1 != 4 then
return -1 return -1
endi endi
if $data3_192.168.0.2 != 2 then if $data3_2 != 2 then
return -1 return -1
endi endi
if $data3_192.168.0.3 != 2 then if $data3_3 != 2 then
return -1 return -1
endi endi
@ -88,24 +88,24 @@ show2:
endi endi
sql show dnodes sql show dnodes
print 192.168.0.1 freeVnodes $data3_192.168.0.1 print 192.168.0.1 openVnodes $data3_1
print 192.168.0.2 freeVnodes $data3_192.168.0.2 print 192.168.0.2 openVnodes $data3_2
print 192.168.0.3 freeVnodes $data3_192.168.0.3 print 192.168.0.3 openVnodes $data3_3
if $data3_192.168.0.1 != 2 then if $data3_1 != 2 then
goto show2 goto show2
endi endi
if $data3_192.168.0.2 != null then if $data3_2 != null then
goto show2 goto show2
endi endi
if $data3_192.168.0.3 != 2 then if $data3_3 != 2 then
goto show2 goto show2
endi endi
system sh/exec.sh -n dnode2 -s stop -x SIGINT system sh/exec_up.sh -n dnode2 -s stop -x SIGINT
print ========== step3 print ========== step3
sql create dnode 192.168.0.4 sql create dnode 192.168.0.4
system sh/exec.sh -n dnode4 -s start system sh/exec_up.sh -n dnode4 -s start
$x = 0 $x = 0
show3: show3:
@ -116,20 +116,20 @@ show3:
endi endi
sql show dnodes sql show dnodes
print 192.168.0.1 freeVnodes $data3_192.168.0.1 print 192.168.0.1 openVnodes $data3_1
print 192.168.0.2 freeVnodes $data3_192.168.0.2 print 192.168.0.2 openVnodes $data3_2
print 192.168.0.3 freeVnodes $data3_192.168.0.3 print 192.168.0.3 openVnodes $data3_3
print 192.168.0.4 freeVnodes $data3_192.168.0.4 print 192.168.0.4 openVnodes $data3_4
if $data3_192.168.0.1 != 4 then if $data3_1 != 4 then
goto show3 goto show3
endi endi
if $data3_192.168.0.2 != null then if $data3_2 != null then
goto show3 goto show3
endi endi
if $data3_192.168.0.3 != 2 then if $data3_3 != 2 then
goto show3 goto show3
endi endi
if $data3_192.168.0.4 != 2 then if $data3_4 != 2 then
goto show3 goto show3
endi endi
@ -143,26 +143,26 @@ sql insert into d3.t3 values(now+4s, 32)
sql insert into d3.t3 values(now+5s, 31) sql insert into d3.t3 values(now+5s, 31)
sql show dnodes sql show dnodes
print 192.168.0.1 freeVnodes $data3_192.168.0.1 print 192.168.0.1 openVnodes $data3_1
print 192.168.0.2 freeVnodes $data3_192.168.0.2 print 192.168.0.2 openVnodes $data3_2
print 192.168.0.3 freeVnodes $data3_192.168.0.3 print 192.168.0.3 openVnodes $data3_3
print 192.168.0.4 freeVnodes $data3_192.168.0.4 print 192.168.0.4 openVnodes $data3_4
if $data3_192.168.0.1 != 4 then if $data3_1 != 4 then
return -1 return -1
endi endi
if $data3_192.168.0.2 != null then if $data3_2 != null then
return -1 return -1
endi endi
if $data3_192.168.0.3 != 1 then if $data3_3 != 1 then
return -1 return -1
endi endi
if $data3_192.168.0.4 != 1 then if $data3_4 != 1 then
return -1 return -1
endi endi
print ========== step5 print ========== step5
sql create dnode 192.168.0.5 sql create dnode 192.168.0.5
system sh/exec.sh -n dnode5 -s start system sh/exec_up.sh -n dnode5 -s start
$x = 0 $x = 0
show5: show5:
@ -173,24 +173,24 @@ show5:
endi endi
sql show dnodes sql show dnodes
print 192.168.0.1 freeVnodes $data3_192.168.0.1 print 192.168.0.1 openVnodes $data3_1
print 192.168.0.2 freeVnodes $data3_192.168.0.2 print 192.168.0.2 openVnodes $data3_2
print 192.168.0.3 freeVnodes $data3_192.168.0.3 print 192.168.0.3 openVnodes $data3_3
print 192.168.0.4 freeVnodes $data3_192.168.0.4 print 192.168.0.4 openVnodes $data3_4
print 192.168.0.5 freeVnodes $data3_192.168.0.5 print 192.168.0.5 openVnodes $data3_5
if $data3_192.168.0.1 != 4 then if $data3_1 != 4 then
goto show5 goto show5
endi endi
if $data3_192.168.0.2 != null then if $data3_2 != null then
goto show5 goto show5
endi endi
if $data3_192.168.0.3 != 2 then if $data3_3 != 2 then
goto show5 goto show5
endi endi
if $data3_192.168.0.4 != 2 then if $data3_4 != 2 then
goto show5 goto show5
endi endi
if $data3_192.168.0.5 != 2 then if $data3_5 != 2 then
goto show5 goto show5
endi endi
@ -206,28 +206,28 @@ show6:
endi endi
sql show dnodes sql show dnodes
print 192.168.0.1 freeVnodes $data3_192.168.0.1 print 192.168.0.1 openVnodes $data3_1
print 192.168.0.2 freeVnodes $data3_192.168.0.2 print 192.168.0.2 openVnodes $data3_2
print 192.168.0.3 freeVnodes $data3_192.168.0.3 print 192.168.0.3 openVnodes $data3_3
print 192.168.0.4 freeVnodes $data3_192.168.0.4 print 192.168.0.4 openVnodes $data3_4
print 192.168.0.5 freeVnodes $data3_192.168.0.5 print 192.168.0.5 openVnodes $data3_5
if $data3_192.168.0.1 != 4 then if $data3_1 != 4 then
goto show6 goto show6
endi endi
if $data3_192.168.0.2 != null then if $data3_2 != null then
goto show6 goto show6
endi endi
if $data3_192.168.0.3 != null then if $data3_3 != null then
goto show6 goto show6
endi endi
if $data3_192.168.0.4 != 1 then if $data3_4 != 1 then
goto show6 goto show6
endi endi
if $data3_192.168.0.5 != 1 then if $data3_5 != 1 then
goto show6 goto show6
endi endi
system sh/exec.sh -n dnode3 -s stop -x SIGINT system sh/exec_up.sh -n dnode3 -s stop -x SIGINT
print ========== step7 print ========== step7
sql select * from d1.t1 order by t desc sql select * from d1.t1 order by t desc

View File

@ -43,15 +43,15 @@ system sh/cfg.sh -n dnode5 -c clog -v 1
system sh/cfg.sh -n dnode6 -c clog -v 1 system sh/cfg.sh -n dnode6 -c clog -v 1
print ========== step1 print ========== step1
system sh/exec.sh -n dnode1 -s start system sh/exec_up.sh -n dnode1 -s start
sql connect sql connect
sql create dnode 192.168.0.2 sql create dnode 192.168.0.2
sql create dnode 192.168.0.3 sql create dnode 192.168.0.3
sql create dnode 192.168.0.4 sql create dnode 192.168.0.4
system sh/exec.sh -n dnode2 -s start system sh/exec_up.sh -n dnode2 -s start
system sh/exec.sh -n dnode3 -s start system sh/exec_up.sh -n dnode3 -s start
system sh/exec.sh -n dnode4 -s start system sh/exec_up.sh -n dnode4 -s start
sleep 3000 sleep 3000
sql create database d1 replica 3 tables 4 sql create database d1 replica 3 tables 4
@ -71,21 +71,21 @@ sql insert into d2.t2 values(now+4s, 22)
sql insert into d2.t2 values(now+5s, 21) sql insert into d2.t2 values(now+5s, 21)
sql show dnodes sql show dnodes
print 192.168.0.1 freeVnodes $data3_192.168.0.1 print 192.168.0.1 openVnodes $data3_1
print 192.168.0.2 freeVnodes $data3_192.168.0.2 print 192.168.0.2 openVnodes $data3_2
print 192.168.0.3 freeVnodes $data3_192.168.0.3 print 192.168.0.3 openVnodes $data3_3
print 192.168.0.4 freeVnodes $data3_192.168.0.4 print 192.168.0.4 openVnodes $data3_4
if $data3_192.168.0.1 != 4 then if $data3_1 != 4 then
return -1 return -1
endi endi
if $data3_192.168.0.2 != 2 then if $data3_2 != 2 then
return -1 return -1
endi endi
if $data3_192.168.0.3 != 2 then if $data3_3 != 2 then
return -1 return -1
endi endi
if $data3_192.168.0.4 != 2 then if $data3_4 != 2 then
return -1 return -1
endi endi
@ -101,29 +101,29 @@ show2:
endi endi
sql show dnodes sql show dnodes
print 192.168.0.1 freeVnodes $data3_192.168.0.1 print 192.168.0.1 openVnodes $data3_1
print 192.168.0.2 freeVnodes $data3_192.168.0.2 print 192.168.0.2 openVnodes $data3_2
print 192.168.0.3 freeVnodes $data3_192.168.0.3 print 192.168.0.3 openVnodes $data3_3
print 192.168.0.4 freeVnodes $data3_192.168.0.4 print 192.168.0.4 openVnodes $data3_4
if $data3_192.168.0.1 != 2 then if $data3_1 != 2 then
goto show2 goto show2
endi endi
if $data3_192.168.0.2 != null then if $data3_2 != null then
goto show2 goto show2
endi endi
if $data3_192.168.0.3 != 2 then if $data3_3 != 2 then
goto show2 goto show2
endi endi
if $data3_192.168.0.4 != 2 then if $data3_4 != 2 then
goto show2 goto show2
endi endi
system sh/exec.sh -n dnode2 -s stop -x SIGINT system sh/exec_up.sh -n dnode2 -s stop -x SIGINT
print ========== step print ========== step
sql create dnode 192.168.0.5 sql create dnode 192.168.0.5
system sh/exec.sh -n dnode5 -s start system sh/exec_up.sh -n dnode5 -s start
$x = 0 $x = 0
show3: show3:
@ -134,25 +134,25 @@ show3:
endi endi
sql show dnodes sql show dnodes
print 192.168.0.1 freeVnodes $data3_192.168.0.1 print 192.168.0.1 openVnodes $data3_1
print 192.168.0.2 freeVnodes $data3_192.168.0.2 print 192.168.0.2 openVnodes $data3_2
print 192.168.0.3 freeVnodes $data3_192.168.0.3 print 192.168.0.3 openVnodes $data3_3
print 192.168.0.4 freeVnodes $data3_192.168.0.4 print 192.168.0.4 openVnodes $data3_4
print 192.168.0.5 freeVnodes $data3_192.168.0.5 print 192.168.0.5 openVnodes $data3_5
if $data3_192.168.0.1 != 4 then if $data3_1 != 4 then
goto show3 goto show3
endi endi
if $data3_192.168.0.2 != null then if $data3_2 != null then
goto show3 goto show3
endi endi
if $data3_192.168.0.3 != 2 then if $data3_3 != 2 then
goto show3 goto show3
endi endi
if $data3_192.168.0.4 != 2 then if $data3_4 != 2 then
goto show3 goto show3
endi endi
if $data3_192.168.0.5 != 2 then if $data3_5 != 2 then
goto show3 goto show3
endi endi
@ -174,31 +174,31 @@ show4:
endi endi
sql show dnodes sql show dnodes
print 192.168.0.1 freeVnodes $data3_192.168.0.1 print 192.168.0.1 openVnodes $data3_1
print 192.168.0.2 freeVnodes $data3_192.168.0.2 print 192.168.0.2 openVnodes $data3_2
print 192.168.0.3 freeVnodes $data3_192.168.0.3 print 192.168.0.3 openVnodes $data3_3
print 192.168.0.4 freeVnodes $data3_192.168.0.4 print 192.168.0.4 openVnodes $data3_4
print 192.168.0.5 freeVnodes $data3_192.168.0.5 print 192.168.0.5 openVnodes $data3_5
if $data3_192.168.0.1 != 4 then if $data3_1 != 4 then
goto show4 goto show4
endi endi
if $data3_192.168.0.2 != null then if $data3_2 != null then
goto show4 goto show4
endi endi
if $data3_192.168.0.3 != 1 then if $data3_3 != 1 then
goto show4 goto show4
endi endi
if $data3_192.168.0.4 != 1 then if $data3_4 != 1 then
goto show4 goto show4
endi endi
if $data3_192.168.0.5 != 1 then if $data3_5 != 1 then
goto show4 goto show4
endi endi
print ========== step5 print ========== step5
sql create dnode 192.168.0.2 sql create dnode 192.168.0.2
system sh/exec.sh -n dnode2 -s start system sh/exec_up.sh -n dnode2 -s start
$x = 0 $x = 0
show5: show5:
@ -209,16 +209,16 @@ show5:
endi endi
sql show dnodes sql show dnodes
print 192.168.0.1 freeVnodes $data3_192.168.0.1 print 192.168.0.1 openVnodes $data3_1
print 192.168.0.2 freeVnodes $data3_192.168.0.2 print 192.168.0.2 openVnodes $data3_2
print 192.168.0.3 freeVnodes $data3_192.168.0.3 print 192.168.0.3 openVnodes $data3_3
print 192.168.0.4 freeVnodes $data3_192.168.0.4 print 192.168.0.4 openVnodes $data3_4
print 192.168.0.5 freeVnodes $data3_192.168.0.5 print 192.168.0.5 openVnodes $data3_5
if $data3_192.168.0.1 != 4 then if $data3_1 != 4 then
goto show5 goto show5
endi endi
if $data3_192.168.0.2 != 2 then if $data3_2 != 2 then
goto show5 goto show5
endi endi
@ -236,29 +236,29 @@ show6:
endi endi
sql show dnodes sql show dnodes
print 192.168.0.1 freeVnodes $data3_192.168.0.1 print 192.168.0.1 openVnodes $data3_1
print 192.168.0.2 freeVnodes $data3_192.168.0.2 print 192.168.0.2 openVnodes $data3_2
print 192.168.0.3 freeVnodes $data3_192.168.0.3 print 192.168.0.3 openVnodes $data3_3
print 192.168.0.4 freeVnodes $data3_192.168.0.4 print 192.168.0.4 openVnodes $data3_4
print 192.168.0.5 freeVnodes $data3_192.168.0.5 print 192.168.0.5 openVnodes $data3_5
if $data3_192.168.0.1 != 4 then if $data3_1 != 4 then
goto show6 goto show6
endi endi
if $data3_192.168.0.2 != 1 then if $data3_2 != 1 then
goto show6 goto show6
endi endi
if $data3_192.168.0.3 != null then if $data3_3 != null then
goto show6 goto show6
endi endi
if $data3_192.168.0.4 != 1 then if $data3_4 != 1 then
goto show6 goto show6
endi endi
if $data3_192.168.0.5 != 1 then if $data3_5 != 1 then
goto show6 goto show6
endi endi
system sh/exec.sh -n dnode3 -s stop -x SIGINT system sh/exec_up.sh -n dnode3 -s stop -x SIGINT
print ========== step7 print ========== step7
sql select * from d1.t1 order by t desc sql select * from d1.t1 order by t desc

View File

@ -31,7 +31,7 @@ system sh/cfg.sh -n dnode3 -c clog -v 1
system sh/cfg.sh -n dnode4 -c clog -v 1 system sh/cfg.sh -n dnode4 -c clog -v 1
print ========== step1 print ========== step1
system sh/exec.sh -n dnode1 -s start system sh/exec_up.sh -n dnode1 -s start
sql connect sql connect
sleep 3000 sleep 3000
@ -52,14 +52,14 @@ sql insert into d2.t2 values(now+4s, 22)
sql insert into d2.t2 values(now+5s, 21) sql insert into d2.t2 values(now+5s, 21)
sql show dnodes sql show dnodes
print 192.168.0.1 freeVnodes $data3_192.168.0.1 print 192.168.0.1 openVnodes $data3_1
if $data3_192.168.0.1 != 2 then if $data3_1 != 2 then
return -1 return -1
endi endi
print ========== step2 print ========== step2
sql create dnode 192.168.0.2 sql create dnode 192.168.0.2
system sh/exec.sh -n dnode2 -s start system sh/exec_up.sh -n dnode2 -s start
$x = 0 $x = 0
show2: show2:
@ -70,12 +70,12 @@ show2:
endi endi
sql show dnodes sql show dnodes
print 192.168.0.1 freeVnodes $data3_192.168.0.1 print 192.168.0.1 openVnodes $data3_1
print 192.168.0.2 freeVnodes $data3_192.168.0.2 print 192.168.0.2 openVnodes $data3_2
if $data3_192.168.0.1 != 4 then if $data3_1 != 4 then
goto show2 goto show2
endi endi
if $data3_192.168.0.2 != 2 then if $data3_2 != 2 then
goto show2 goto show2
endi endi
@ -96,18 +96,18 @@ show3:
return -1 return -1
endi endi
sql show dnodes sql show dnodes
print 192.168.0.1 freeVnodes $data3_192.168.0.1 print 192.168.0.1 openVnodes $data3_1
print 192.168.0.2 freeVnodes $data3_192.168.0.2 print 192.168.0.2 openVnodes $data3_2
if $data3_192.168.0.1 != 3 then if $data3_1 != 3 then
goto show3 goto show3
endi endi
if $data3_192.168.0.2 != 1 then if $data3_2 != 1 then
goto show3 goto show3
endi endi
print ========== step3 print ========== step3
sql create dnode 192.168.0.3 sql create dnode 192.168.0.3
system sh/exec.sh -n dnode3 -s start system sh/exec_up.sh -n dnode3 -s start
$x = 0 $x = 0
show4: show4:
@ -117,16 +117,16 @@ show4:
return -1 return -1
endi endi
sql show dnodes sql show dnodes
print 192.168.0.1 freeVnodes $data3_192.168.0.1 print 192.168.0.1 openVnodes $data3_1
print 192.168.0.2 freeVnodes $data3_192.168.0.2 print 192.168.0.2 openVnodes $data3_2
print 192.168.0.3 freeVnodes $data3_192.168.0.3 print 192.168.0.3 openVnodes $data3_3
if $data3_192.168.0.1 != 4 then if $data3_1 != 4 then
goto show4 goto show4
endi endi
if $data3_192.168.0.2 != 2 then if $data3_2 != 2 then
goto show4 goto show4
endi endi
if $data3_192.168.0.3 != 2 then if $data3_3 != 2 then
goto show4 goto show4
endi endi
@ -141,20 +141,20 @@ show5:
return -1 return -1
endi endi
sql show dnodes sql show dnodes
print 192.168.0.1 freeVnodes $data3_192.168.0.1 print 192.168.0.1 openVnodes $data3_1
print 192.168.0.2 freeVnodes $data3_192.168.0.2 print 192.168.0.2 openVnodes $data3_2
print 192.168.0.3 freeVnodes $data3_192.168.0.3 print 192.168.0.3 openVnodes $data3_3
if $data3_192.168.0.1 != 3 then if $data3_1 != 3 then
goto show5 goto show5
endi endi
if $data3_192.168.0.2 != null then if $data3_2 != null then
goto show5 goto show5
endi endi
if $data3_192.168.0.3 != 1 then if $data3_3 != 1 then
goto show5 goto show5
endi endi
system sh/exec.sh -n dnode2 -s stop -x SIGINT system sh/exec_up.sh -n dnode2 -s stop -x SIGINT
print ========== step6 print ========== step6
sql select * from d1.t1 order by t desc sql select * from d1.t1 order by t desc

View File

@ -22,19 +22,19 @@ system sh/cfg.sh -n dnode1 -c monitor -v 1
system sh/cfg.sh -n dnode2 -c monitor -v 0 system sh/cfg.sh -n dnode2 -c monitor -v 0
print ========== step1 print ========== step1
system sh/exec.sh -n dnode1 -s start system sh/exec_up.sh -n dnode1 -s start
sql connect sql connect
sleep 5000 sleep 5000
sql show dnodes sql show dnodes
print 192.168.0.1 freeVnodes $data3_192.168.0.1 print 192.168.0.1 openVnodes $data3_1
if $data3_192.168.0.1 != 3 then if $data3_1 != 3 then
return -1 return -1
endi endi
print ========== step2 print ========== step2
sql create dnode 192.168.0.2 sql create dnode 192.168.0.2
system sh/exec.sh -n dnode2 -s start system sh/exec_up.sh -n dnode2 -s start
$x = 0 $x = 0
show2: show2:
@ -45,12 +45,12 @@ show2:
endi endi
sql show dnodes sql show dnodes
print 192.168.0.1 freeVnodes $data3_192.168.0.1 print 192.168.0.1 openVnodes $data3_1
print 192.168.0.2 freeVnodes $data3_192.168.0.2 print 192.168.0.2 openVnodes $data3_2
if $data3_192.168.0.1 != 4 then if $data3_1 != 4 then
goto show2 goto show2
endi endi
if $data3_192.168.0.2 != 3 then if $data3_2 != 3 then
goto show2 goto show2
endi endi

View File

@ -29,10 +29,10 @@ system sh/cfg.sh -n dnode2 -c clog -v 1
system sh/cfg.sh -n dnode3 -c clog -v 1 system sh/cfg.sh -n dnode3 -c clog -v 1
print ========== step1 print ========== step1
system sh/exec.sh -n dnode1 -s start system sh/exec_up.sh -n dnode1 -s start
sql connect sql connect
sql create dnode 192.168.0.2 sql create dnode 192.168.0.2
system sh/exec.sh -n dnode2 -s start system sh/exec_up.sh -n dnode2 -s start
sleep 3000 sleep 3000
sql show dnodes sql show dnodes
@ -44,7 +44,7 @@ if $data4_192.168.0.2 != ready then
endi endi
print ========== step2 print ========== step2
system sh/exec.sh -n dnode2 -s stop -x SIGINT system sh/exec_up.sh -n dnode2 -s stop -x SIGINT
sleep 6000 sleep 6000
sql show dnodes sql show dnodes

View File

@ -29,10 +29,10 @@ system sh/cfg.sh -n dnode2 -c clog -v 1
system sh/cfg.sh -n dnode3 -c clog -v 1 system sh/cfg.sh -n dnode3 -c clog -v 1
print ========== step1 print ========== step1
system sh/exec.sh -n dnode1 -s start system sh/exec_up.sh -n dnode1 -s start
sql connect sql connect
sql create dnode 192.168.0.2 sql create dnode 192.168.0.2
system sh/exec.sh -n dnode2 -s start system sh/exec_up.sh -n dnode2 -s start
sleep 3000 sleep 3000
sql create database d1 replica 2 tables 4 sql create database d1 replica 2 tables 4
@ -48,7 +48,7 @@ if $data4_192.168.0.2 != ready then
endi endi
print ========== step2 print ========== step2
system sh/exec.sh -n dnode2 -s stop -x SIGINT system sh/exec_up.sh -n dnode2 -s stop -x SIGINT
sleep 5000 sleep 5000
sql show dnodes sql show dnodes
@ -72,7 +72,7 @@ endi
print ========== step4 print ========== step4
sql create dnode 192.168.0.3 sql create dnode 192.168.0.3
system sh/exec.sh -n dnode3 -s start system sh/exec_up.sh -n dnode3 -s start
sql drop dnode 192.168.0.2 sql drop dnode 192.168.0.2
sleep 5000 sleep 5000

View File

@ -31,7 +31,7 @@ system sh/cfg.sh -n dnode3 -c clog -v 1
system sh/cfg.sh -n dnode4 -c clog -v 1 system sh/cfg.sh -n dnode4 -c clog -v 1
print ========== step1 print ========== step1
system sh/exec.sh -n dnode1 -s start system sh/exec_up.sh -n dnode1 -s start
sql connect sql connect
sleep 3000 sleep 3000
@ -52,14 +52,14 @@ sql insert into d2.t2 values(now+4s, 22)
sql insert into d2.t2 values(now+5s, 21) sql insert into d2.t2 values(now+5s, 21)
sql show dnodes sql show dnodes
print 192.168.0.1 freeVnodes $data3_192.168.0.1 print 192.168.0.1 openVnodes $data3_1
if $data3_192.168.0.1 != 2 then if $data3_1 != 2 then
return -1 return -1
endi endi
print ========== step2 print ========== step2
sql create dnode 192.168.0.2 sql create dnode 192.168.0.2
system sh/exec.sh -n dnode2 -s start system sh/exec_up.sh -n dnode2 -s start
sleep 9000 sleep 9000
sql create database d3 replica 2 tables 4 sql create database d3 replica 2 tables 4
@ -79,12 +79,12 @@ show2:
endi endi
sql show dnodes sql show dnodes
print 192.168.0.1 freeVnodes $data3_192.168.0.1 print 192.168.0.1 openVnodes $data3_1
print 192.168.0.2 freeVnodes $data3_192.168.0.2 print 192.168.0.2 openVnodes $data3_2
if $data3_192.168.0.1 != 3 then if $data3_1 != 3 then
goto show2 goto show2
endi endi
if $data3_192.168.0.2 != 1 then if $data3_2 != 1 then
goto show2 goto show2
endi endi
@ -101,12 +101,12 @@ show3:
endi endi
sql show dnodes sql show dnodes
print 192.168.0.1 freeVnodes $data3_192.168.0.1 print 192.168.0.1 openVnodes $data3_1
print 192.168.0.2 freeVnodes $data3_192.168.0.2 $data5_192.168.0.2 print 192.168.0.2 openVnodes $data3_2 $data5_192.168.0.2
print ========== step4 print ========== step4
sql create dnode 192.168.0.3 sql create dnode 192.168.0.3
system sh/exec.sh -n dnode3 -s start system sh/exec_up.sh -n dnode3 -s start
$x = 0 $x = 0
show4: show4:
@ -117,18 +117,18 @@ show4:
endi endi
sql show dnodes sql show dnodes
print 192.168.0.1 freeVnodes $data3_192.168.0.1 print 192.168.0.1 openVnodes $data3_1
print 192.168.0.2 freeVnodes $data3_192.168.0.2 print 192.168.0.2 openVnodes $data3_2
print 192.168.0.3 freeVnodes $data3_192.168.0.3 print 192.168.0.3 openVnodes $data3_3
if $data3_192.168.0.2 != null then if $data3_2 != null then
goto show4 goto show4
endi endi
system sh/exec.sh -n dnode2 -s stop -x SIGINT system sh/exec_up.sh -n dnode2 -s stop -x SIGINT
print ========== step5 print ========== step5
sql create dnode 192.168.0.4 sql create dnode 192.168.0.4
system sh/exec.sh -n dnode4 -s start system sh/exec_up.sh -n dnode4 -s start
$x = 0 $x = 0
show5: show5:
@ -138,20 +138,20 @@ show5:
return -1 return -1
endi endi
sql show dnodes sql show dnodes
print 192.168.0.1 freeVnodes $data3_192.168.0.1 print 192.168.0.1 openVnodes $data3_1
print 192.168.0.2 freeVnodes $data3_192.168.0.2 print 192.168.0.2 openVnodes $data3_2
print 192.168.0.3 freeVnodes $data3_192.168.0.3 print 192.168.0.3 openVnodes $data3_3
print 192.168.0.4 freeVnodes $data3_192.168.0.4 print 192.168.0.4 openVnodes $data3_4
if $data3_192.168.0.1 != 4 then if $data3_1 != 4 then
goto show5 goto show5
endi endi
if $data3_192.168.0.2 != null then if $data3_2 != null then
goto show5 goto show5
endi endi
if $data3_192.168.0.3 != 2 then if $data3_3 != 2 then
goto show5 goto show5
endi endi
if $data3_192.168.0.4 != 2 then if $data3_4 != 2 then
goto show5 goto show5
endi endi

View File

@ -31,7 +31,7 @@ system sh/cfg.sh -n dnode3 -c clog -v 1
system sh/cfg.sh -n dnode4 -c clog -v 1 system sh/cfg.sh -n dnode4 -c clog -v 1
print ========== step1 print ========== step1
system sh/exec.sh -n dnode1 -s start system sh/exec_up.sh -n dnode1 -s start
sql connect sql connect
sleep 3000 sleep 3000
@ -52,14 +52,14 @@ sql insert into d2.t2 values(now+4s, 22)
sql insert into d2.t2 values(now+5s, 21) sql insert into d2.t2 values(now+5s, 21)
sql show dnodes sql show dnodes
print 192.168.0.1 freeVnodes $data3_192.168.0.1 print 192.168.0.1 openVnodes $data3_1
if $data3_192.168.0.1 != 2 then if $data3_1 != 2 then
return -1 return -1
endi endi
print ========== step2 print ========== step2
sql create dnode 192.168.0.2 sql create dnode 192.168.0.2
system sh/exec.sh -n dnode2 -s start system sh/exec_up.sh -n dnode2 -s start
sleep 9000 sleep 9000
sql create database d3 replica 2 tables 4 sql create database d3 replica 2 tables 4
@ -79,17 +79,17 @@ show2:
endi endi
sql show dnodes sql show dnodes
print 192.168.0.1 freeVnodes $data3_192.168.0.1 print 192.168.0.1 openVnodes $data3_1
print 192.168.0.2 freeVnodes $data3_192.168.0.2 print 192.168.0.2 openVnodes $data3_2
if $data3_192.168.0.1 != 3 then if $data3_1 != 3 then
goto show2 goto show2
endi endi
if $data3_192.168.0.2 != 1 then if $data3_2 != 1 then
goto show2 goto show2
endi endi
print ========== step3 print ========== step3
system sh/exec.sh -n dnode2 -s stop -x SIGINT system sh/exec_up.sh -n dnode2 -s stop -x SIGINT
sql drop dnode 192.168.0.2 sql drop dnode 192.168.0.2
sleep 7001 sleep 7001
@ -102,12 +102,12 @@ show3:
endi endi
sql show dnodes sql show dnodes
print 192.168.0.1 freeVnodes $data3_192.168.0.1 print 192.168.0.1 openVnodes $data3_1
print 192.168.0.2 freeVnodes $data3_192.168.0.2 $data5_192.168.0.2 print 192.168.0.2 openVnodes $data3_2 $data5_192.168.0.2
print ========== step4 print ========== step4
sql create dnode 192.168.0.3 sql create dnode 192.168.0.3
system sh/exec.sh -n dnode3 -s start system sh/exec_up.sh -n dnode3 -s start
$x = 0 $x = 0
show4: show4:
@ -118,16 +118,16 @@ show4:
endi endi
sql show dnodes sql show dnodes
print 192.168.0.1 freeVnodes $data3_192.168.0.1 print 192.168.0.1 openVnodes $data3_1
print 192.168.0.2 freeVnodes $data3_192.168.0.2 print 192.168.0.2 openVnodes $data3_2
print 192.168.0.3 freeVnodes $data3_192.168.0.3 print 192.168.0.3 openVnodes $data3_3
if $data3_192.168.0.2 != null then if $data3_2 != null then
goto show4 goto show4
endi endi
if $data3_192.168.0.1 != 3 then if $data3_1 != 3 then
goto show4 goto show4
endi endi
if $data3_192.168.0.3 != 1 then if $data3_3 != 1 then
goto show4 goto show4
endi endi

View File

@ -31,7 +31,7 @@ system sh/cfg.sh -n dnode3 -c clog -v 1
system sh/cfg.sh -n dnode4 -c clog -v 1 system sh/cfg.sh -n dnode4 -c clog -v 1
print ========== step1 print ========== step1
system sh/exec.sh -n dnode1 -s start system sh/exec_up.sh -n dnode1 -s start
sql connect sql connect
sql create database d1 tables 4 sql create database d1 tables 4
@ -43,14 +43,14 @@ sql insert into d1.t1 values(now+4s, 12)
sql insert into d1.t1 values(now+5s, 11) sql insert into d1.t1 values(now+5s, 11)
sql show dnodes sql show dnodes
print 192.168.0.1 freeVnodes $data3_192.168.0.1 print 192.168.0.1 openVnodes $data3_1
if $data3_192.168.0.1 != 3 then if $data3_1 != 3 then
return -1 return -1
endi endi
print ========== step2 print ========== step2
sql create dnode 192.168.0.2 sql create dnode 192.168.0.2
system sh/exec.sh -n dnode2 -s start system sh/exec_up.sh -n dnode2 -s start
$x = 0 $x = 0
show2: show2:
@ -60,12 +60,12 @@ show2:
return -1 return -1
endi endi
sql show dnodes sql show dnodes
print 192.168.0.1 freeVnodes $data3_192.168.0.1 print 192.168.0.1 openVnodes $data3_1
print 192.168.0.2 freeVnodes $data3_192.168.0.2 print 192.168.0.2 openVnodes $data3_2
if $data3_192.168.0.1 != 4 then if $data3_1 != 4 then
goto show2 goto show2
endi endi
if $data3_192.168.0.2 != 3 then if $data3_2 != 3 then
goto show2 goto show2
endi endi
@ -81,12 +81,12 @@ sql insert into d2.t2 values(now+5s, 21)
$x = 0 $x = 0
sql show dnodes sql show dnodes
print 192.168.0.1 freeVnodes $data3_192.168.0.1 print 192.168.0.1 openVnodes $data3_1
print 192.168.0.2 freeVnodes $data3_192.168.0.2 print 192.168.0.2 openVnodes $data3_2
if $data3_192.168.0.1 != 4 then if $data3_1 != 4 then
return -1 return -1
endi endi
if $data3_192.168.0.2 != 2 then if $data3_2 != 2 then
return -1 return -1
endi endi
@ -101,19 +101,19 @@ show4:
return -1 return -1
endi endi
sql show dnodes sql show dnodes
print 192.168.0.1 freeVnodes $data3_192.168.0.1 print 192.168.0.1 openVnodes $data3_1
print 192.168.0.2 freeVnodes $data3_192.168.0.2 print 192.168.0.2 openVnodes $data3_2
if $data3_192.168.0.1 != 2 then if $data3_1 != 2 then
goto show4 goto show4
endi endi
if $data3_192.168.0.2 != null then if $data3_2 != null then
goto show4 goto show4
endi endi
if $rows != 1 then if $rows != 1 then
goto show4 goto show4
endi endi
system sh/exec.sh -n dnode2 -s stop -x SIGINT system sh/exec_up.sh -n dnode2 -s stop -x SIGINT
print ========== step5 print ========== step5
sleep 2000 sleep 2000
@ -125,7 +125,7 @@ system sh/cfg.sh -n dnode2 -c balanceMonitorInterval -v 1
system sh/cfg.sh -n dnode2 -c balanceStartInterval -v 10 system sh/cfg.sh -n dnode2 -c balanceStartInterval -v 10
system sh/cfg.sh -n dnode2 -c mgmtEqualVnodeNum -v 4 system sh/cfg.sh -n dnode2 -c mgmtEqualVnodeNum -v 4
system sh/cfg.sh -n dnode2 -c clog -v 1 system sh/cfg.sh -n dnode2 -c clog -v 1
system sh/exec.sh -n dnode2 -s start system sh/exec_up.sh -n dnode2 -s start
$x = 0 $x = 0
show5: show5:
@ -135,12 +135,12 @@ show5:
return -1 return -1
endi endi
sql show dnodes sql show dnodes
print 192.168.0.1 freeVnodes $data3_192.168.0.1 print 192.168.0.1 openVnodes $data3_1
print 192.168.0.2 freeVnodes $data3_192.168.0.2 print 192.168.0.2 openVnodes $data3_2
if $data3_192.168.0.1 != 4 then if $data3_1 != 4 then
goto show5 goto show5
endi endi
if $data3_192.168.0.2 != 2 then if $data3_2 != 2 then
goto show5 goto show5
endi endi
@ -154,18 +154,18 @@ sql insert into d3.t3 values(now+4s, 32)
sql insert into d3.t3 values(now+5s, 31) sql insert into d3.t3 values(now+5s, 31)
sql show dnodes sql show dnodes
print 192.168.0.1 freeVnodes $data3_192.168.0.1 print 192.168.0.1 openVnodes $data3_1
print 192.168.0.2 freeVnodes $data3_192.168.0.2 print 192.168.0.2 openVnodes $data3_2
if $data3_192.168.0.1 != 4 then if $data3_1 != 4 then
return -1 return -1
endi endi
if $data3_192.168.0.2 != 1 then if $data3_2 != 1 then
return -1 return -1
endi endi
print ========== step7 print ========== step7
sql create dnode 192.168.0.3 sql create dnode 192.168.0.3
system sh/exec.sh -n dnode3 -s start system sh/exec_up.sh -n dnode3 -s start
$x = 0 $x = 0
show7: show7:
@ -176,16 +176,16 @@ show7:
endi endi
sql show dnodes sql show dnodes
print 192.168.0.1 freeVnodes $data3_192.168.0.1 print 192.168.0.1 openVnodes $data3_1
print 192.168.0.2 freeVnodes $data3_192.168.0.2 print 192.168.0.2 openVnodes $data3_2
print 192.168.0.3 freeVnodes $data3_192.168.0.3 print 192.168.0.3 openVnodes $data3_3
if $data3_192.168.0.1 != 4 then if $data3_1 != 4 then
goto show7 goto show7
endi endi
if $data3_192.168.0.2 != 2 then if $data3_2 != 2 then
goto show7 goto show7
endi endi
if $data3_192.168.0.3 != 3 then if $data3_3 != 3 then
goto show7 goto show7
endi endi
@ -206,16 +206,16 @@ show8:
return -1 return -1
endi endi
sql show dnodes sql show dnodes
print 192.168.0.1 freeVnodes $data3_192.168.0.1 print 192.168.0.1 openVnodes $data3_1
print 192.168.0.2 freeVnodes $data3_192.168.0.2 print 192.168.0.2 openVnodes $data3_2
print 192.168.0.3 freeVnodes $data3_192.168.0.3 print 192.168.0.3 openVnodes $data3_3
if $data3_192.168.0.1 != 4 then if $data3_1 != 4 then
goto show8 goto show8
endi endi
if $data3_192.168.0.2 != 2 then if $data3_2 != 2 then
goto show8 goto show8
endi endi
if $data3_192.168.0.3 != 2 then if $data3_3 != 2 then
goto show8 goto show8
endi endi
@ -231,20 +231,20 @@ show9:
endi endi
sql show dnodes sql show dnodes
print 192.168.0.1 freeVnodes $data3_192.168.0.1 print 192.168.0.1 openVnodes $data3_1
print 192.168.0.2 freeVnodes $data3_192.168.0.2 print 192.168.0.2 openVnodes $data3_2
print 192.168.0.3 freeVnodes $data3_192.168.0.3 print 192.168.0.3 openVnodes $data3_3
if $data3_192.168.0.1 != 4 then if $data3_1 != 4 then
goto show9 goto show9
endi endi
if $data3_192.168.0.2 != null then if $data3_2 != null then
goto show9 goto show9
endi endi
if $data3_192.168.0.3 != 0 then if $data3_3 != 0 then
goto show9 goto show9
endi endi
system sh/exec.sh -n dnode2 -s stop -x SIGINT system sh/exec_up.sh -n dnode2 -s stop -x SIGINT
print ========== step10 print ========== step10
sql select * from d1.t1 order by t desc sql select * from d1.t1 order by t desc

View File

@ -109,3 +109,7 @@ endi
if $data3_3 != null then if $data3_3 != null then
goto show7 goto show7
endi endi
system sh/exec_up.sh -n dnode1 -s stop
system sh/exec_up.sh -n dnode2 -s stop
system sh/exec_up.sh -n dnode3 -s stop

View File

@ -77,4 +77,8 @@ if $data3_1 != master then
endi endi
if $data3_2 != slave then if $data3_2 != slave then
goto step5 goto step5
endi endi
system sh/exec_up.sh -n dnode1 -s stop
system sh/exec_up.sh -n dnode2 -s stop
system sh/exec_up.sh -n dnode3 -s stop

View File

@ -93,3 +93,7 @@ endi
if $dnode3Role != slave then if $dnode3Role != slave then
return -1 return -1
endi endi
system sh/exec_up.sh -n dnode1 -s stop
system sh/exec_up.sh -n dnode2 -s stop
system sh/exec_up.sh -n dnode3 -s stop

View File

@ -123,3 +123,6 @@ if $dnode3Role != slave then
return -1 return -1
endi endi
system sh/exec_up.sh -n dnode1 -s stop
system sh/exec_up.sh -n dnode2 -s stop
system sh/exec_up.sh -n dnode3 -s stop

View File

@ -83,3 +83,7 @@ endi
if $dnode3Role != null then if $dnode3Role != null then
return -1 return -1
endi endi
system sh/exec_up.sh -n dnode1 -s stop
system sh/exec_up.sh -n dnode2 -s stop
system sh/exec_up.sh -n dnode3 -s stop

View File

@ -38,5 +38,6 @@ if $data4_2 != 4 then
return -1 return -1
endi endi
system sh/exec_up.sh -n dnode1 -s stop
system sh/exec_up.sh -n dnode2 -s stop
system sh/exec_up.sh -n dnode3 -s stop

View File

@ -7,4 +7,3 @@ run unique/mnode/mgmt33.sim
run unique/mnode/mgmt34.sim run unique/mnode/mgmt34.sim
run unique/mnode/mgmtr2.sim run unique/mnode/mgmtr2.sim
run unique/mnode/secondIp.sim run unique/mnode/secondIp.sim

View File

@ -8,32 +8,33 @@ GREEN_UNDERLINE='\033[4;32m'
NC='\033[0m' NC='\033[0m'
cd script cd script
sudo ./test.sh 2>&1 | grep 'success\|failed' | tee out.txt ./test.sh -f basicSuite.sim 2>&1 | grep 'success\|failed\|fault' | tee out.txt
total_success=`grep success out.txt | wc -l` totalSuccess=`grep success out.txt | wc -l`
totalBasic=`grep success out.txt | grep Suite | wc -l`
if [ "$total_success" -gt "0" ]; then if [ "$totalSuccess" -gt "0" ]; then
total_success=`expr $total_success - 1` totalSuccess=`expr $totalSuccess - $totalBasic`
echo -e "${GREEN} ### Total $total_success TSIM case(s) succeed! ### ${NC}" echo -e "${GREEN} ### Total $totalSuccess TSIM case(s) succeed! ### ${NC}"
fi fi
total_failed=`grep failed out.txt | wc -l` totalFailed=`grep 'failed\|fault' out.txt | wc -l`
if [ "$total_failed" -ne "0" ]; then if [ "$totalFailed" -ne "0" ]; then
echo -e "${RED} ### Total $total_failed TSIM case(s) failed! ### ${NC}" echo -e "${RED} ### Total $totalFailed TSIM case(s) failed! ### ${NC}"
exit $total_failed exit $totalFailed
fi fi
cd ../pytest cd ../pytest
sudo ./simpletest.sh 2>&1 | grep 'successfully executed\|failed' | tee pytest-out.txt ./simpletest.sh 2>&1 | grep 'successfully executed\|failed' | tee pytest-out.txt
total_py_success=`grep 'successfully executed' pytest-out.txt | wc -l` totalPySuccess=`grep 'successfully executed' pytest-out.txt | wc -l`
if [ "$total_py_success" -gt "0" ]; then if [ "$totalPySuccess" -gt "0" ]; then
echo -e "${GREEN} ### Total $total_py_success python case(s) succeed! ### ${NC}" echo -e "${GREEN} ### Total $totalPySuccess python case(s) succeed! ### ${NC}"
fi fi
total_py_failed=`grep 'failed' pytest-out.txt | wc -l` totalPyFailed=`grep 'failed' pytest-out.txt | wc -l`
if [ "$total_py_failed" -ne "0" ]; then if [ "$totalPyFailed" -ne "0" ]; then
echo -e "${RED} ### Total $total_py_failed python case(s) failed! ### ${NC}" echo -e "${RED} ### Total $totalPyFailed python case(s) failed! ### ${NC}"
exit $total_py_failed exit $totalPyFailed
fi fi