From da867c0c54a0e9f0555af25945f3988b2fb2ac21 Mon Sep 17 00:00:00 2001 From: hzcheng Date: Tue, 21 Apr 2020 09:34:23 +0800 Subject: [PATCH 01/18] TD-100 --- src/tsdb/tests/tsdbTests.cpp | 49 +++++++++++++++++++++++++++++++++--- 1 file changed, 46 insertions(+), 3 deletions(-) diff --git a/src/tsdb/tests/tsdbTests.cpp b/src/tsdb/tests/tsdbTests.cpp index 8c59d63cb2..8c441bee21 100644 --- a/src/tsdb/tests/tsdbTests.cpp +++ b/src/tsdb/tests/tsdbTests.cpp @@ -4,6 +4,7 @@ #include "tdataformat.h" #include "tsdbMain.h" +#include "tskiplist.h" static double getCurTime() { struct timeval tv; @@ -125,8 +126,8 @@ TEST(TsdbTest, DISABLED_tableEncodeDecode) { ASSERT_EQ(memcmp(pTable->schema, tTable->schema, sizeof(STSchema) + sizeof(STColumn) * nCols), 0); } -// TEST(TsdbTest, DISABLED_createRepo) { -TEST(TsdbTest, createRepo) { +TEST(TsdbTest, DISABLED_createRepo) { +// TEST(TsdbTest, createRepo) { STsdbCfg config; STsdbRepo *repo; @@ -167,7 +168,7 @@ TEST(TsdbTest, createRepo) { .sversion = tCfg.sversion, .startTime = 1584081000000, .interval = 1000, - .totalRows = 5000000, + .totalRows = 10000000, .rowsPerSubmit = 1, .pSchema = schema }; @@ -262,4 +263,46 @@ TEST(TsdbTest, DISABLED_createFileGroup) { // ASSERT_EQ(tsdbCreateFileGroup("/home/ubuntu/work/ttest/vnode0/data", 1820, &fGroup, 1000), 0); int k = 0; +} + +static char *getTKey(const void *data) { + return (char *)data; +} + +static void insertSkipList(bool isAscend) { + TSKEY start_time = 1587393453000; + TSKEY interval = 1000; + + SSkipList *pList = tSkipListCreate(5, TSDB_DATA_TYPE_TIMESTAMP, sizeof(TSKEY), 0, 0, 1, getTKey); + ASSERT_NE(pList, nullptr); + + for (size_t i = 0; i < 20000000; i++) + { + TSKEY time = isAscend ? (start_time + i * interval) : (start_time - i * interval); + int32_t level = 0; + int32_t headSize = 0; + + tSkipListNewNodeInfo(pList, &level, &headSize); + SSkipListNode *pNode = (SSkipListNode *)malloc(headSize + sizeof(TSKEY)); + ASSERT_NE(pNode, nullptr); + pNode->level = level; + *(TSKEY *)((char *)pNode + headSize) = time; + tSkipListPut(pList, pNode); + } + + tSkipListDestroy(pList); +} + +TEST(TsdbTest, testSkipList) { + double stime = getCurTime(); + insertSkipList(true); + double etime = getCurTime(); + + printf("Time used to insert 100000000 records takes %f seconds\n", etime-stime); + + stime = getCurTime(); + insertSkipList(false); + etime = getCurTime(); + + printf("Time used to insert 100000000 records takes %f seconds\n", etime-stime); } \ No newline at end of file From ba6cf0dd9ba6d58b1c5de6601d1371fe70485c37 Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Tue, 21 Apr 2020 11:29:36 +0800 Subject: [PATCH 02/18] add valgrind test support. current threshold is 23. [TD-138] --- .travis.yml | 45 ++++++++++++++++++++++++------------- tests/pytest/simpletest.sh | 4 +++- tests/pytest/test.py | 43 ++++++++++++++++++++++++++++++----- tests/pytest/util/dnodes.py | 36 ++++++++++++++++++++++++----- tests/pytest/util/log.py | 2 +- tests/test-all.sh | 35 +++++++++++++++-------------- 6 files changed, 120 insertions(+), 45 deletions(-) diff --git a/.travis.yml b/.travis.yml index 8413a00977..994928bb01 100644 --- a/.travis.yml +++ b/.travis.yml @@ -24,10 +24,11 @@ matrix: - python-setuptools - python3-pip - python3-setuptools + - valgrind before_install: - sudo apt update -y -qq - - sudo apt install -y net-tools python-pip python-setuptools python3-pip python3-setuptools + - sudo apt install -y net-tools python-pip python-setuptools python3-pip python3-setuptools valgrind before_script: - cd ${TRAVIS_BUILD_DIR} @@ -43,16 +44,32 @@ matrix: case $TRAVIS_OS_NAME in linux) cd ${TRAVIS_BUILD_DIR}/debug - sudo make install || exit $? + sudo make install || travis_terminate $? pip install --user ${TRAVIS_BUILD_DIR}/src/connector/python/linux/python2/ pip3 install --user ${TRAVIS_BUILD_DIR}/src/connector/python/linux/python3/ cd ${TRAVIS_BUILD_DIR}/tests - bash ./test-all.sh + ./test-all.sh || travis_terminate $? - if [ "$?" -ne "0" ]; then - exit $? + cd ${TRAVIS_BUILD_DIR}/tests/pytest + ./simpletest.sh -g 2>&1 | tee mem-error-out.txt + sleep 1 + + # Color setting + RED='\033[0;31m' + GREEN='\033[1;32m' + GREEN_DARK='\033[0;32m' + GREEN_UNDERLINE='\033[4;32m' + NC='\033[0m' + + memError=`grep -m 1 'ERROR SUMMARY' mem-error-out.txt | awk '{print $4}'` + + if [ -n "$memError" ]; then + if [ "$memError" -gt 23 ]; then + echo -e "${RED} ## Memory errors number valgrind reports is $memError. More than our threshold! ## ${NC} " + travis_terminate $memError + fi fi ;; @@ -74,12 +91,12 @@ matrix: # GitHub project metadata # ** specific to your project ** project: - name: sangshuduo/TDengine + name: TDengine version: 2.x - description: sangshuduo/TDengine + description: taosdata/TDengine # Where email notification of build analysis results will be sent - notification_email: sangshuduo@gmail.com + notification_email: sdsang@taosdata.com # Commands to prepare for build_command # ** likely specific to your build ** @@ -87,7 +104,7 @@ matrix: # The command that will be added as an argument to "cov-build" to compile your project for analysis, # ** likely specific to your build ** - build_command: cmake --build . + build_command: make # Pattern to match selecting branches that will run analysis. We recommend leaving this set to 'coverity_scan'. # Take care in resource usage, and consider the build frequency allowances per @@ -132,17 +149,17 @@ matrix: case $TRAVIS_OS_NAME in linux) cd ${TRAVIS_BUILD_DIR}/debug - sudo make install || exit $? + sudo make install || travis_terminate $? pip install --user ${TRAVIS_BUILD_DIR}/src/connector/python/linux/python2/ pip3 install --user ${TRAVIS_BUILD_DIR}/src/connector/python/linux/python3/ cd ${TRAVIS_BUILD_DIR}/tests - bash ./test-all.sh + ./test-all.sh if [ "$?" -ne "0" ]; then - exit $? + travis_terminate $? fi sudo pkill taosd @@ -150,7 +167,7 @@ matrix: cd ${TRAVIS_BUILD_DIR} lcov -d . --capture --rc lcov_branch_coverage=1 -o coverage.info - lcov -l --rc lcov_branch_coverage=1 coverage.info || exit $? + lcov -l --rc lcov_branch_coverage=1 coverage.info || travis_terminate $? gem install coveralls-lcov @@ -166,7 +183,6 @@ matrix: echo -e "${GREEN} ## Uploaded to Coveralls.io! ## ${NC}" else echo -e "${RED} ## Coveralls.io not collect coverage report! ## ${NC} " - exit $? fi bash <(curl -s https://codecov.io/bash) -y .codecov.yml -f coverage.info @@ -174,7 +190,6 @@ matrix: echo -e "${GREEN} ## Uploaded to Codecov! ## ${NC} " else echo -e "${RED} ## Codecov did not collect coverage report! ## ${NC} " - exit $? fi ;; diff --git a/tests/pytest/simpletest.sh b/tests/pytest/simpletest.sh index bffb3689b2..a6e023bde8 100755 --- a/tests/pytest/simpletest.sh +++ b/tests/pytest/simpletest.sh @@ -1 +1,3 @@ -sudo python ./test.py -f insert/basic.py +#!/bin/bash +python2 ./test.py -f insert/basic.py $1 +python2 ./test.py -s $1 diff --git a/tests/pytest/test.py b/tests/pytest/test.py index ea727d5f6e..f5d4cc7c29 100644 --- a/tests/pytest/test.py +++ b/tests/pytest/test.py @@ -15,6 +15,9 @@ # -*- coding: utf-8 -*- import sys import getopt +import subprocess +from distutils.log import warn as printf + from util.log import * from util.dnodes import * from util.cases import * @@ -29,8 +32,10 @@ if __name__ == "__main__": deployPath = "" masterIp = "" testCluster = False - opts, args = getopt.getopt(sys.argv[1:], 'f:p:m:sch', [ - 'file=', 'path=', 'master', 'stop', 'cluster', 'help']) + valgrind = 0 + stop = 0 + opts, args = getopt.gnu_getopt(sys.argv[1:], 'f:p:m:scgh', [ + 'file=', 'path=', 'master', 'stop', 'cluster', 'valgrind', 'help']) for key, value in opts: if key in ['-h', '--help']: tdLog.printNoPrefix( @@ -41,21 +46,49 @@ if __name__ == "__main__": tdLog.printNoPrefix('-c Test Cluster Flag') tdLog.printNoPrefix('-s stop All dnodes') sys.exit(0) + if key in ['-f', '--file']: fileName = value + if key in ['-p', '--path']: deployPath = value + if key in ['-m', '--master']: masterIp = value + if key in ['-c', '--cluster']: testCluster = True + + if key in ['-g', '--valgrind']: + valgrind = 1 + if key in ['-s', '--stop']: - cmd = "ps -ef|grep -w taosd | grep 'taosd' | grep -v grep | awk '{print $2}' && pkill -9 taosd" - os.system(cmd) - tdLog.exit('stop All dnodes') + stop = 1 + + if (stop != 0): + if (valgrind == 0): + toBeKilled = "taosd" + else: + toBeKilled = "valgrind.bin" + + killCmd = "ps -ef|grep -w %s| grep -v grep | awk '{print $2}' | xargs kill -HUP " % toBeKilled + os.system(killCmd) + time.sleep(1) + + psCmd = "ps -ef|grep -w %s| grep -v grep | awk '{print $2}'" % toBeKilled + processID = subprocess.check_output(psCmd, shell=True) + + while( processID ): + os.system(killCmd) + time.sleep(1) + processID = subprocess.check_output(psCmd, shell=True) + + tdLog.exit('stop All dnodes') if masterIp == "": tdDnodes.init(deployPath) + tdDnodes.setValgrind(valgrind) + if testCluster: tdLog.notice("Procedures for testing cluster") if fileName == "all": diff --git a/tests/pytest/util/dnodes.py b/tests/pytest/util/dnodes.py index 2be4f94802..45eaa9b30b 100644 --- a/tests/pytest/util/dnodes.py +++ b/tests/pytest/util/dnodes.py @@ -14,6 +14,7 @@ import sys import os import os.path +import subprocess from util.log import * @@ -29,6 +30,9 @@ class TDSimClient: if os.system(cmd) != 0: tdLog.exit(cmd) + def setValgrind(self, value): + self.valgrind = value + def deploy(self): self.logDir = "%s/sim/psim/log" % (self.path,) self.cfgDir = "%s/sim/psim/cfg" % (self.path) @@ -78,10 +82,14 @@ class TDDnode: self.index = index self.running = 0 self.deployed = 0 + self.valgrind = 0 def init(self, path): self.path = path + def setValgrind(self, value): + self.valgrind = value + def deploy(self): self.logDir = "%s/sim/dnode%d/log" % (self.path, self.index) self.dataDir = "%s/sim/dnode%d/data" % (self.path, self.index) @@ -164,9 +172,18 @@ class TDDnode: if self.deployed == 0: tdLog.exit("dnode:%d is not deployed" % (self.index)) - cmd = "nohup %staosd -c %s > /dev/null 2>&1 & " % ( - binPath, self.cfgDir) - print(cmd) + + if self.valgrind == 0: + cmd = "nohup %staosd -c %s > /dev/null 2>&1 & " % ( + binPath, self.cfgDir) + else: + valgrindCmdline = "valgrind --tool=memcheck --leak-check=full --show-reachable=no --track-origins=yes --show-leak-kinds=all -v --workaround-gcc296-bugs=yes" + + cmd = "nohup %s %staosd -c %s 2>&1 & " % ( + valgrindCmdline, binPath, self.cfgDir) + + print(cmd) + if os.system(cmd) != 0: tdLog.exit(cmd) self.running = 1 @@ -275,8 +292,12 @@ class TDDnodes: self.sim.init(self.path) self.sim.deploy() + def setValgrind(self, value): + self.valgrind = value + def deploy(self, index): self.check(index) + self.dnodes[index - 1].setValgrind(self.valgrind) self.dnodes[index - 1].deploy() def cfg(self, index, option, value): @@ -312,11 +333,14 @@ class TDDnodes: for i in range(len(self.dnodes)): self.dnodes[i].stop() - cmd = "sudo systemctl stop taosd" - os.system(cmd) + psCmd = "ps -ef | grep -w taosd | grep 'root' | grep -v grep | awk '{print $2}'" + processID = subprocess.check_output(psCmd, shell=True) + if processID: + cmd = "sudo systemctl stop taosd" + os.system(cmd) # if os.system(cmd) != 0 : # tdLog.exit(cmd) - cmd = "ps -ef | grep -w taosd | grep 'dnode' | grep -v grep | awk '{print $2}' && sudo pkill -sigkill taosd" + cmd = "ps -ef | grep -w taosd | grep 'dnode' | grep -v grep | awk '{print $2}' && pkill -sigkill taosd" os.system(cmd) # if os.system(cmd) != 0 : # tdLog.exit(cmd) diff --git a/tests/pytest/util/log.py b/tests/pytest/util/log.py index c7032df3c4..97c8b2ef7f 100644 --- a/tests/pytest/util/log.py +++ b/tests/pytest/util/log.py @@ -42,7 +42,7 @@ class TDLog: printf("\033[1;31m%s %s\033[0m" % (datetime.datetime.now(), err)) sys.exit(1) - def printfNoPrefix(self, info): + def printNoPrefix(self, info): printf("\033[1;36m%s\033[0m" % (info)) diff --git a/tests/test-all.sh b/tests/test-all.sh index 8bd01119c4..dee89b9dc5 100755 --- a/tests/test-all.sh +++ b/tests/test-all.sh @@ -8,32 +8,33 @@ GREEN_UNDERLINE='\033[4;32m' NC='\033[0m' cd script -sudo ./test.sh 2>&1 | grep 'success\|failed' | tee out.txt +./test.sh -f basicSuite.sim 2>&1 | grep 'success\|failed\|fault' | tee out.txt -total_success=`grep success out.txt | wc -l` +totalSuccess=`grep success out.txt | wc -l` +totalBasic=`grep success out.txt | grep Suite | wc -l` -if [ "$total_success" -gt "0" ]; then - total_success=`expr $total_success - 1` - echo -e "${GREEN} ### Total $total_success TSIM case(s) succeed! ### ${NC}" +if [ "$totalSuccess" -gt "0" ]; then + totalSuccess=`expr $totalSuccess - $totalBasic` + echo -e "${GREEN} ### Total $totalSuccess TSIM case(s) succeed! ### ${NC}" fi -total_failed=`grep failed out.txt | wc -l` -if [ "$total_failed" -ne "0" ]; then - echo -e "${RED} ### Total $total_failed TSIM case(s) failed! ### ${NC}" - exit $total_failed +totalFailed=`grep 'failed\|fault' out.txt | wc -l` +if [ "$totalFailed" -ne "0" ]; then + echo -e "${RED} ### Total $totalFailed TSIM case(s) failed! ### ${NC}" + exit $totalFailed fi cd ../pytest -sudo ./simpletest.sh 2>&1 | grep 'successfully executed\|failed' | tee pytest-out.txt -total_py_success=`grep 'successfully executed' pytest-out.txt | wc -l` +./simpletest.sh 2>&1 | grep 'successfully executed\|failed' | tee pytest-out.txt +totalPySuccess=`grep 'successfully executed' pytest-out.txt | wc -l` -if [ "$total_py_success" -gt "0" ]; then - echo -e "${GREEN} ### Total $total_py_success python case(s) succeed! ### ${NC}" +if [ "$totalPySuccess" -gt "0" ]; then + echo -e "${GREEN} ### Total $totalPySuccess python case(s) succeed! ### ${NC}" fi -total_py_failed=`grep 'failed' pytest-out.txt | wc -l` -if [ "$total_py_failed" -ne "0" ]; then - echo -e "${RED} ### Total $total_py_failed python case(s) failed! ### ${NC}" - exit $total_py_failed +totalPyFailed=`grep 'failed' pytest-out.txt | wc -l` +if [ "$totalPyFailed" -ne "0" ]; then + echo -e "${RED} ### Total $totalPyFailed python case(s) failed! ### ${NC}" + exit $totalPyFailed fi From db21aead7637fa328425f18ebd0f7a38f7d24c64 Mon Sep 17 00:00:00 2001 From: slguan Date: Tue, 21 Apr 2020 11:45:49 +0800 Subject: [PATCH 03/18] [TD-148] fix redirect message --- src/client/src/tscServer.c | 18 ++++++++++-------- src/client/src/tscSystem.c | 2 ++ src/mnode/src/mgmtShell.c | 16 +++++++++++++--- tests/script/sh/deploy.sh | 2 +- 4 files changed, 26 insertions(+), 12 deletions(-) diff --git a/src/client/src/tscServer.c b/src/client/src/tscServer.c index ad5f9cd0af..9bfac58cf4 100644 --- a/src/client/src/tscServer.c +++ b/src/client/src/tscServer.c @@ -68,11 +68,11 @@ void tscPrintMgmtIp() { } void tscSetMgmtIpListFromCluster(SRpcIpSet *pIpList) { - tscMgmtIpList.numOfIps = htons(pIpList->numOfIps); - tscMgmtIpList.inUse = htons(pIpList->inUse); + tscMgmtIpList.numOfIps = pIpList->numOfIps; + tscMgmtIpList.inUse = pIpList->inUse; tscMgmtIpList.port = htons(pIpList->port); - for (int32_t i = 0; i ip[i]; + for (int32_t i = 0; i < tscMgmtIpList.numOfIps; ++i) { + tscMgmtIpList.ip[i] = htonl(pIpList->ip[i]); } } @@ -87,6 +87,11 @@ void tscSetMgmtIpListFromEdge() { } } +void tscUpdateIpSet(void *ahandle, SRpcIpSet *pIpSet) { + tscTrace("mgmt IP list is changed for ufp is called"); + tscSetMgmtIpListFromCluster(pIpSet); +} + void tscSetMgmtIpList(SRpcIpSet *pIpList) { /* * The iplist returned by the cluster edition is the current management nodes @@ -2224,10 +2229,7 @@ int tscProcessConnectRsp(SSqlObj *pSql) { assert(len <= tListLen(pObj->db)); strncpy(pObj->db, temp, tListLen(pObj->db)); -// SIpList * pIpList; -// char *rsp = pRes->pRsp + sizeof(SCMConnectRsp); -// pIpList = (SIpList *)rsp; -// tscSetMgmtIpList(pIpList); + tscSetMgmtIpList(&pConnect->ipList); strcpy(pObj->sversion, pConnect->serverVersion); pObj->writeAuth = pConnect->writeAuth; diff --git a/src/client/src/tscSystem.c b/src/client/src/tscSystem.c index a24ca4d045..093890bbdc 100644 --- a/src/client/src/tscSystem.c +++ b/src/client/src/tscSystem.c @@ -47,6 +47,7 @@ int tscNumOfThreads; static pthread_once_t tscinit = PTHREAD_ONCE_INIT; void taosInitNote(int numOfNoteLines, int maxNotes, char* lable); +void tscUpdateIpSet(void *ahandle, SRpcIpSet *pIpSet); void tscCheckDiskUsage(void *para, void *unused) { taosGetDisk(); @@ -65,6 +66,7 @@ int32_t tscInitRpc(const char *user, const char *secret) { rpcInit.label = "TSC-vnode"; rpcInit.numOfThreads = tscNumOfThreads; rpcInit.cfp = tscProcessMsgFromServer; + rpcInit.ufp = tscUpdateIpSet; rpcInit.sessions = tsMaxVnodeConnections; rpcInit.connType = TAOS_CONN_CLIENT; rpcInit.user = (char*)user; diff --git a/src/mnode/src/mgmtShell.c b/src/mnode/src/mgmtShell.c index 522968895e..f12240a766 100644 --- a/src/mnode/src/mgmtShell.c +++ b/src/mnode/src/mgmtShell.c @@ -145,9 +145,14 @@ static void mgmtProcessMsgFromShell(SRpcMsg *rpcMsg) { } if (!sdbIsMaster()) { - // rpcSendRedirectRsp(rpcMsg->handle, mgmtGetMnodeIpListForRedirect()); - mgmtSendSimpleResp(rpcMsg->handle, TSDB_CODE_NO_MASTER); - rpcFreeCont(rpcMsg->pCont); + SRpcConnInfo connInfo; + rpcGetConnInfo(rpcMsg->handle, &connInfo); + bool usePublicIp = (connInfo.serverIp == tsPublicIpInt); + + SRpcIpSet ipSet = {0}; + mgmtGetMnodeIpList(&ipSet, usePublicIp); + mTrace("conn from ip:%s user:%s redirect msg", taosIpStr(connInfo.clientIp), connInfo.user); + rpcSendRedirectRsp(rpcMsg->handle, &ipSet); return; } @@ -357,6 +362,11 @@ static int mgmtShellRetriveAuth(char *user, char *spi, char *encrypt, char *secr *encrypt = 0; *ckey = 0; + if (!sdbIsMaster()) { + *secret = 0; + return TSDB_CODE_SUCCESS; + } + SUserObj *pUser = mgmtGetUser(user); if (pUser == NULL) { *secret = 0; diff --git a/tests/script/sh/deploy.sh b/tests/script/sh/deploy.sh index 12f9689fd3..6c4ada50fe 100755 --- a/tests/script/sh/deploy.sh +++ b/tests/script/sh/deploy.sh @@ -93,7 +93,7 @@ echo "privateIp $NODE_IP" >> $TAOS_CFG echo "dDebugFlag 199" >> $TAOS_CFG echo "mDebugFlag 199" >> $TAOS_CFG echo "sdbDebugFlag 199" >> $TAOS_CFG -echo "rpcDebugFlag 131" >> $TAOS_CFG +echo "rpcDebugFlag 135" >> $TAOS_CFG echo "tmrDebugFlag 131" >> $TAOS_CFG echo "cDebugFlag 135" >> $TAOS_CFG echo "httpDebugFlag 131" >> $TAOS_CFG From 273cc32249ad1a8229661aad71ce75acebc92910 Mon Sep 17 00:00:00 2001 From: slguan Date: Tue, 21 Apr 2020 13:27:03 +0800 Subject: [PATCH 04/18] [TD-148] change ip sets --- src/client/inc/tsclient.h | 2 +- src/client/src/tscServer.c | 38 ++++++++++++------------- src/client/src/tscSql.c | 18 ++++++------ src/client/src/tscSystem.c | 12 ++++---- src/dnode/src/dnodeMClient.c | 6 ++++ src/mnode/inc/mgmtMnode.h | 2 +- src/mnode/src/mgmtDServer.c | 14 +++++++++ src/mnode/src/mgmtMnode.c | 2 +- src/mnode/src/mgmtShell.c | 7 +++-- tests/script/test.sh | 16 ++++++++--- tests/script/unique/mnode/mgmt22.sim | 4 +++ tests/script/unique/mnode/mgmt24.sim | 6 +++- tests/script/unique/mnode/mgmt25.sim | 4 +++ tests/script/unique/mnode/mgmt26.sim | 3 ++ tests/script/unique/mnode/mgmtr2.sim | 4 +++ tests/script/unique/mnode/secondIp.sim | 5 ++-- tests/script/unique/mnode/testSuite.sim | 1 - 17 files changed, 96 insertions(+), 48 deletions(-) diff --git a/src/client/inc/tsclient.h b/src/client/inc/tsclient.h index f225d546f3..bba45a672e 100644 --- a/src/client/inc/tsclient.h +++ b/src/client/inc/tsclient.h @@ -465,7 +465,7 @@ extern void * tscQhandle; extern int tscKeepConn[]; extern int tsInsertHeadSize; extern int tscNumOfThreads; -extern SRpcIpSet tscMgmtIpList; +extern SRpcIpSet tscMgmtIpSet; typedef void (*__async_cb_func_t)(void *param, TAOS_RES *tres, int numOfRows); diff --git a/src/client/src/tscServer.c b/src/client/src/tscServer.c index 9bfac58cf4..d1fd3f9a2d 100644 --- a/src/client/src/tscServer.c +++ b/src/client/src/tscServer.c @@ -30,7 +30,7 @@ #define TSC_MGMT_VNODE 999 -SRpcIpSet tscMgmtIpList; +SRpcIpSet tscMgmtIpSet; SRpcIpSet tscDnodeIpSet; int (*tscBuildMsg[TSDB_SQL_MAX])(SSqlObj *pSql, SSqlInfo *pInfo) = {0}; @@ -58,30 +58,30 @@ static void tscSetDnodeIpList(SSqlObj* pSql, STableMeta* pTableMeta) { } void tscPrintMgmtIp() { - if (tscMgmtIpList.numOfIps <= 0) { - tscError("invalid mgmt IP list:%d", tscMgmtIpList.numOfIps); + if (tscMgmtIpSet.numOfIps <= 0) { + tscError("invalid mgmt IP list:%d", tscMgmtIpSet.numOfIps); } else { - for (int i = 0; i < tscMgmtIpList.numOfIps; ++i) { - tscTrace("mgmt index:%d ip:%d", i, tscMgmtIpList.ip[i]); + for (int i = 0; i < tscMgmtIpSet.numOfIps; ++i) { + tscTrace("mgmt index:%d ip:%d", i, tscMgmtIpSet.ip[i]); } } } void tscSetMgmtIpListFromCluster(SRpcIpSet *pIpList) { - tscMgmtIpList.numOfIps = pIpList->numOfIps; - tscMgmtIpList.inUse = pIpList->inUse; - tscMgmtIpList.port = htons(pIpList->port); - for (int32_t i = 0; i < tscMgmtIpList.numOfIps; ++i) { - tscMgmtIpList.ip[i] = htonl(pIpList->ip[i]); + tscMgmtIpSet.numOfIps = pIpList->numOfIps; + tscMgmtIpSet.inUse = pIpList->inUse; + tscMgmtIpSet.port = htons(pIpList->port); + for (int32_t i = 0; i < tscMgmtIpSet.numOfIps; ++i) { + tscMgmtIpSet.ip[i] = htonl(pIpList->ip[i]); } } void tscSetMgmtIpListFromEdge() { - if (tscMgmtIpList.numOfIps != 1) { - tscMgmtIpList.numOfIps = 1; - tscMgmtIpList.inUse = 0; - tscMgmtIpList.port = tsMnodeShellPort; - tscMgmtIpList.ip[0] = inet_addr(tsMasterIp); + if (tscMgmtIpSet.numOfIps != 1) { + tscMgmtIpSet.numOfIps = 1; + tscMgmtIpSet.inUse = 0; + tscMgmtIpSet.port = tsMnodeShellPort; + tscMgmtIpSet.ip[0] = inet_addr(tsMasterIp); tscTrace("edge mgmt IP list:"); tscPrintMgmtIp(); } @@ -89,7 +89,7 @@ void tscSetMgmtIpListFromEdge() { void tscUpdateIpSet(void *ahandle, SRpcIpSet *pIpSet) { tscTrace("mgmt IP list is changed for ufp is called"); - tscSetMgmtIpListFromCluster(pIpSet); + tscMgmtIpSet = *pIpSet; } void tscSetMgmtIpList(SRpcIpSet *pIpList) { @@ -114,7 +114,7 @@ void tscSetMgmtIpList(SRpcIpSet *pIpList) { UNUSED_FUNC static int32_t tscGetMgmtConnMaxRetryTimes() { int32_t factor = 2; - return tscMgmtIpList.numOfIps * factor; + return tscMgmtIpSet.numOfIps * factor; } void tscProcessHeartBeatRsp(void *param, TAOS_RES *tres, int code) { @@ -209,7 +209,7 @@ int tscSendMsgToServer(SSqlObj *pSql) { }; rpcSendRequest(pVnodeConn, &pSql->ipList, &rpcMsg); } else { - pSql->ipList = tscMgmtIpList; + pSql->ipList = tscMgmtIpSet; pSql->ipList.port = tsMnodeShellPort; tscTrace("%p msg:%s is sent to server %d", pSql, taosMsg[pSql->cmd.msgType], pSql->ipList.port); @@ -430,7 +430,7 @@ int tscProcessSql(SSqlObj *pSql) { return pSql->res.code; } } else if (pSql->cmd.command < TSDB_SQL_LOCAL) { - pSql->ipList = tscMgmtIpList; + pSql->ipList = tscMgmtIpSet; } else { // local handler return (*tscProcessMsgRsp[pCmd->command])(pSql); } diff --git a/src/client/src/tscSql.c b/src/client/src/tscSql.c index 4f02a96eec..6fbd5e7739 100644 --- a/src/client/src/tscSql.c +++ b/src/client/src/tscSql.c @@ -72,23 +72,23 @@ STscObj *taosConnectImpl(const char *ip, const char *user, const char *pass, con } if (ip && ip[0]) { - tscMgmtIpList.inUse = 0; - tscMgmtIpList.port = tsMnodeShellPort; - tscMgmtIpList.numOfIps = 1; - tscMgmtIpList.ip[0] = inet_addr(ip); + tscMgmtIpSet.inUse = 0; + tscMgmtIpSet.port = tsMnodeShellPort; + tscMgmtIpSet.numOfIps = 1; + tscMgmtIpSet.ip[0] = inet_addr(ip); if (tsMasterIp[0] && strcmp(ip, tsMasterIp) != 0) { - tscMgmtIpList.numOfIps = 2; - tscMgmtIpList.ip[1] = inet_addr(tsMasterIp); + tscMgmtIpSet.numOfIps = 2; + tscMgmtIpSet.ip[1] = inet_addr(tsMasterIp); } if (tsSecondIp[0] && strcmp(tsSecondIp, tsMasterIp) != 0) { - tscMgmtIpList.numOfIps = 3; - tscMgmtIpList.ip[2] = inet_addr(tsSecondIp); + tscMgmtIpSet.numOfIps = 3; + tscMgmtIpSet.ip[2] = inet_addr(tsSecondIp); } } - tscMgmtIpList.port = port ? port : tsMnodeShellPort; + tscMgmtIpSet.port = port ? port : tsMnodeShellPort; STscObj *pObj = (STscObj *)calloc(1, sizeof(STscObj)); if (NULL == pObj) { diff --git a/src/client/src/tscSystem.c b/src/client/src/tscSystem.c index 093890bbdc..68b11ce416 100644 --- a/src/client/src/tscSystem.c +++ b/src/client/src/tscSystem.c @@ -147,14 +147,14 @@ void taos_init_imp() { taosInitNote(tsNumOfLogLines / 10, 1, (char*)"tsc_note"); } - tscMgmtIpList.inUse = 0; - tscMgmtIpList.port = tsMnodeShellPort; - tscMgmtIpList.numOfIps = 1; - tscMgmtIpList.ip[0] = inet_addr(tsMasterIp); + tscMgmtIpSet.inUse = 0; + tscMgmtIpSet.port = tsMnodeShellPort; + tscMgmtIpSet.numOfIps = 1; + tscMgmtIpSet.ip[0] = inet_addr(tsMasterIp); if (tsSecondIp[0] && strcmp(tsSecondIp, tsMasterIp) != 0) { - tscMgmtIpList.numOfIps = 2; - tscMgmtIpList.ip[1] = inet_addr(tsSecondIp); + tscMgmtIpSet.numOfIps = 2; + tscMgmtIpSet.ip[1] = inet_addr(tsSecondIp); } tscInitMsgsFp(); diff --git a/src/dnode/src/dnodeMClient.c b/src/dnode/src/dnodeMClient.c index 8d5d0a02ec..78f4d076fc 100644 --- a/src/dnode/src/dnodeMClient.c +++ b/src/dnode/src/dnodeMClient.c @@ -54,6 +54,11 @@ static SRpcIpSet tsMnodeIpSet = {0}; static SDMMnodeInfos tsMnodeInfos = {0}; static SDMDnodeCfg tsDnodeCfg = {0}; +void dnodeUpdateIpSet(void *ahandle, SRpcIpSet *pIpSet) { + dTrace("mgmt IP list is changed for ufp is called"); + tsMnodeIpSet = *pIpSet; +} + int32_t dnodeInitMClient() { dnodeReadDnodeCfg(); tsRebootTime = taosGetTimestampSec(); @@ -90,6 +95,7 @@ int32_t dnodeInitMClient() { rpcInit.label = "DND-MC"; rpcInit.numOfThreads = 1; rpcInit.cfp = dnodeProcessRspFromMnode; + rpcInit.ufp = dnodeUpdateIpSet; rpcInit.sessions = 100; rpcInit.connType = TAOS_CONN_CLIENT; rpcInit.idleTime = tsShellActivityTimer * 2000; diff --git a/src/mnode/inc/mgmtMnode.h b/src/mnode/inc/mgmtMnode.h index f9f2909d95..4fb57d7151 100644 --- a/src/mnode/inc/mgmtMnode.h +++ b/src/mnode/inc/mgmtMnode.h @@ -40,7 +40,7 @@ void * mgmtGetNextMnode(void *pNode, struct SMnodeObj **pMnode); void mgmtReleaseMnode(struct SMnodeObj *pMnode); char * mgmtGetMnodeRoleStr(); -void mgmtGetMnodeIpList(SRpcIpSet *ipSet, bool usePublicIp); +void mgmtGetMnodeIpSet(SRpcIpSet *ipSet, bool usePublicIp); void mgmtGetMnodeInfos(void *mnodes); #ifdef __cplusplus diff --git a/src/mnode/src/mgmtDServer.c b/src/mnode/src/mgmtDServer.c index 80a87813d9..b551c0eae5 100644 --- a/src/mnode/src/mgmtDServer.c +++ b/src/mnode/src/mgmtDServer.c @@ -28,8 +28,10 @@ #include "mgmtLog.h" #include "mgmtDb.h" #include "mgmtDServer.h" +#include "mgmtMnode.h" #include "mgmtProfile.h" #include "mgmtShell.h" +#include "mgmtSdb.h" #include "mgmtTable.h" #include "mgmtVgroup.h" @@ -99,6 +101,18 @@ static void mgmtProcessMsgFromDnode(SRpcMsg *rpcMsg) { mgmtSendSimpleResp(rpcMsg->handle, TSDB_CODE_INVALID_MSG_LEN); return; } + + if (!sdbIsMaster()) { + SRpcConnInfo connInfo; + rpcGetConnInfo(rpcMsg->handle, &connInfo); + bool usePublicIp = false; + + SRpcIpSet ipSet = {0}; + mgmtGetMnodeIpSet(&ipSet, usePublicIp); + mTrace("conn from dnode ip:%s redirect msg", taosIpStr(connInfo.clientIp)); + rpcSendRedirectRsp(rpcMsg->handle, &ipSet); + return; + } if (mgmtProcessDnodeMsgFp[rpcMsg->msgType]) { SRpcMsg *pMsg = malloc(sizeof(SRpcMsg)); diff --git a/src/mnode/src/mgmtMnode.c b/src/mnode/src/mgmtMnode.c index 6546b8ea37..05de2ecfdb 100644 --- a/src/mnode/src/mgmtMnode.c +++ b/src/mnode/src/mgmtMnode.c @@ -171,7 +171,7 @@ char *mgmtGetMnodeRoleStr(int32_t role) { } } -void mgmtGetMnodeIpList(SRpcIpSet *ipSet, bool usePublicIp) { +void mgmtGetMnodeIpSet(SRpcIpSet *ipSet, bool usePublicIp) { void *pNode = NULL; while (1) { SMnodeObj *pMnode = NULL; diff --git a/src/mnode/src/mgmtShell.c b/src/mnode/src/mgmtShell.c index f12240a766..e2f393e6e9 100644 --- a/src/mnode/src/mgmtShell.c +++ b/src/mnode/src/mgmtShell.c @@ -141,6 +141,7 @@ void mgmtDealyedAddToShellQueue(SQueuedMsg *queuedMsg) { static void mgmtProcessMsgFromShell(SRpcMsg *rpcMsg) { if (rpcMsg == NULL || rpcMsg->pCont == NULL) { + mgmtSendSimpleResp(rpcMsg->handle, TSDB_CODE_INVALID_MSG_LEN); return; } @@ -150,7 +151,7 @@ static void mgmtProcessMsgFromShell(SRpcMsg *rpcMsg) { bool usePublicIp = (connInfo.serverIp == tsPublicIpInt); SRpcIpSet ipSet = {0}; - mgmtGetMnodeIpList(&ipSet, usePublicIp); + mgmtGetMnodeIpSet(&ipSet, usePublicIp); mTrace("conn from ip:%s user:%s redirect msg", taosIpStr(connInfo.clientIp), connInfo.user); rpcSendRedirectRsp(rpcMsg->handle, &ipSet); return; @@ -337,7 +338,7 @@ static void mgmtProcessHeartBeatMsg(SQueuedMsg *pMsg) { return; } - mgmtGetMnodeIpList(&pHBRsp->ipList, pMsg->usePublicIp); + mgmtGetMnodeIpSet(&pHBRsp->ipList, pMsg->usePublicIp); /* * TODO @@ -424,7 +425,7 @@ static void mgmtProcessConnectMsg(SQueuedMsg *pMsg) { pConnectRsp->writeAuth = pUser->writeAuth; pConnectRsp->superAuth = pUser->superAuth; - mgmtGetMnodeIpList(&pConnectRsp->ipList, pMsg->usePublicIp); + mgmtGetMnodeIpSet(&pConnectRsp->ipList, pMsg->usePublicIp); connect_over: rpcRsp.code = code; diff --git a/tests/script/test.sh b/tests/script/test.sh index 5fd80d3909..bce6291fbe 100755 --- a/tests/script/test.sh +++ b/tests/script/test.sh @@ -11,7 +11,8 @@ set +e FILE_NAME= RELEASE=0 ASYNC=0 -while getopts "f:a" arg +VALGRIND=0 +while getopts "f:av" arg do case $arg in f) @@ -20,6 +21,9 @@ do a) ASYNC=1 ;; + v) + VALGRIND=1 + ;; ?) echo "unknow argument" ;; @@ -96,10 +100,14 @@ ulimit -c unlimited #sudo sysctl -w kernel.core_pattern=$TOP_DIR/core.%p.%e if [ -n "$FILE_NAME" ]; then - echo "ExcuteCmd:" $PROGRAM -c $CFG_DIR -f $FILE_NAME echo "------------------------------------------------------------------------" - #valgrind --tool=memcheck --leak-check=full --show-reachable=no --track-origins=yes --show-leak-kinds=all -v --workaround-gcc296-bugs=yes --log-file=valgrind.log $PROGRAM -c $CFG_DIR -f $FILE_NAME - $PROGRAM -c $CFG_DIR -f $FILE_NAME + if [ $VALGRIND -eq 1 ]; then + echo valgrind --tool=memcheck --leak-check=full --show-reachable=no --track-origins=yes --show-leak-kinds=all -v --workaround-gcc296-bugs=yes --log-file=${CODE_DIR}/../script/valgrind.log $PROGRAM -c $CFG_DIR -f $FILE_NAME + valgrind --tool=memcheck --leak-check=full --show-reachable=no --track-origins=yes --show-leak-kinds=all -v --workaround-gcc296-bugs=yes --log-file=${CODE_DIR}/../script/valgrind.log $PROGRAM -c $CFG_DIR -f $FILE_NAME + else + echo "ExcuteCmd:" $PROGRAM -c $CFG_DIR -f $FILE_NAME + $PROGRAM -c $CFG_DIR -f $FILE_NAME + fi else echo "ExcuteCmd:" $PROGRAM -c $CFG_DIR -f basicSuite.sim echo "------------------------------------------------------------------------" diff --git a/tests/script/unique/mnode/mgmt22.sim b/tests/script/unique/mnode/mgmt22.sim index 3bfa86b55a..f6e54ffc2f 100644 --- a/tests/script/unique/mnode/mgmt22.sim +++ b/tests/script/unique/mnode/mgmt22.sim @@ -109,3 +109,7 @@ endi if $data3_3 != null then goto show7 endi + +system sh/exec_up.sh -n dnode1 -s stop +system sh/exec_up.sh -n dnode2 -s stop +system sh/exec_up.sh -n dnode3 -s stop \ No newline at end of file diff --git a/tests/script/unique/mnode/mgmt24.sim b/tests/script/unique/mnode/mgmt24.sim index 6635c4e706..9418fd3398 100644 --- a/tests/script/unique/mnode/mgmt24.sim +++ b/tests/script/unique/mnode/mgmt24.sim @@ -77,4 +77,8 @@ if $data3_1 != master then endi if $data3_2 != slave then goto step5 -endi \ No newline at end of file +endi + +system sh/exec_up.sh -n dnode1 -s stop +system sh/exec_up.sh -n dnode2 -s stop +system sh/exec_up.sh -n dnode3 -s stop \ No newline at end of file diff --git a/tests/script/unique/mnode/mgmt25.sim b/tests/script/unique/mnode/mgmt25.sim index ca935d744f..4f5e2bf3c8 100644 --- a/tests/script/unique/mnode/mgmt25.sim +++ b/tests/script/unique/mnode/mgmt25.sim @@ -93,3 +93,7 @@ endi if $dnode3Role != slave then return -1 endi + +system sh/exec_up.sh -n dnode1 -s stop +system sh/exec_up.sh -n dnode2 -s stop +system sh/exec_up.sh -n dnode3 -s stop \ No newline at end of file diff --git a/tests/script/unique/mnode/mgmt26.sim b/tests/script/unique/mnode/mgmt26.sim index 4a5958b88a..57c6003431 100644 --- a/tests/script/unique/mnode/mgmt26.sim +++ b/tests/script/unique/mnode/mgmt26.sim @@ -123,3 +123,6 @@ if $dnode3Role != slave then return -1 endi +system sh/exec_up.sh -n dnode1 -s stop +system sh/exec_up.sh -n dnode2 -s stop +system sh/exec_up.sh -n dnode3 -s stop \ No newline at end of file diff --git a/tests/script/unique/mnode/mgmtr2.sim b/tests/script/unique/mnode/mgmtr2.sim index 06e8f6b9d9..666b8a7b33 100644 --- a/tests/script/unique/mnode/mgmtr2.sim +++ b/tests/script/unique/mnode/mgmtr2.sim @@ -83,3 +83,7 @@ endi if $dnode3Role != null then return -1 endi + +system sh/exec_up.sh -n dnode1 -s stop +system sh/exec_up.sh -n dnode2 -s stop +system sh/exec_up.sh -n dnode3 -s stop \ No newline at end of file diff --git a/tests/script/unique/mnode/secondIp.sim b/tests/script/unique/mnode/secondIp.sim index 29d27cd88c..6902c7d498 100644 --- a/tests/script/unique/mnode/secondIp.sim +++ b/tests/script/unique/mnode/secondIp.sim @@ -38,5 +38,6 @@ if $data4_2 != 4 then return -1 endi - - +system sh/exec_up.sh -n dnode1 -s stop +system sh/exec_up.sh -n dnode2 -s stop +system sh/exec_up.sh -n dnode3 -s stop \ No newline at end of file diff --git a/tests/script/unique/mnode/testSuite.sim b/tests/script/unique/mnode/testSuite.sim index e1712fa4f6..279574e47d 100644 --- a/tests/script/unique/mnode/testSuite.sim +++ b/tests/script/unique/mnode/testSuite.sim @@ -7,4 +7,3 @@ run unique/mnode/mgmt33.sim run unique/mnode/mgmt34.sim run unique/mnode/mgmtr2.sim run unique/mnode/secondIp.sim - From 46b13b8ad1aac20f3555e27adc97e931a7888e44 Mon Sep 17 00:00:00 2001 From: hzcheng Date: Tue, 21 Apr 2020 13:42:36 +0800 Subject: [PATCH 05/18] Fix valgrind warning --- src/tsdb/src/tsdbMain.c | 3 ++- src/tsdb/src/tsdbRWHelper.c | 1 + 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/src/tsdb/src/tsdbMain.c b/src/tsdb/src/tsdbMain.c index 648188cdae..925ff32cb5 100644 --- a/src/tsdb/src/tsdbMain.c +++ b/src/tsdb/src/tsdbMain.c @@ -387,7 +387,7 @@ int tsdbInitTableCfg(STableCfg *config, ETableType type, int64_t uid, int32_t ti config->superUid = TSDB_INVALID_SUPER_TABLE_ID; config->tableId.uid = uid; config->tableId.tid = tid; - config->name = strdup("test1"); + config->name = NULL; return 0; } @@ -880,6 +880,7 @@ static void *tsdbCommitData(void *arg) { _exit: tdFreeDataCols(pDataCols); tsdbDestroyTableIters(iters, pCfg->maxTables); + tsdbDestroyHelper(&whelper); tsdbLockRepo(arg); tdListMove(pCache->imem->list, pCache->pool.memPool); diff --git a/src/tsdb/src/tsdbRWHelper.c b/src/tsdb/src/tsdbRWHelper.c index c0f509d1f2..8b5364e362 100644 --- a/src/tsdb/src/tsdbRWHelper.c +++ b/src/tsdb/src/tsdbRWHelper.c @@ -403,6 +403,7 @@ int tsdbWriteCompInfo(SRWHelper *pHelper) { } else { pHelper->pCompInfo->delimiter = TSDB_FILE_DELIMITER; pHelper->pCompInfo->uid = pHelper->tableInfo.uid; + pHelper->pCompInfo->checksum = 0; ASSERT((pIdx->len - sizeof(SCompInfo) - sizeof(TSCKSUM)) % sizeof(SCompBlock) == 0); taosCalcChecksumAppend(0, (uint8_t *)pHelper->pCompInfo, pIdx->len); pIdx->offset = lseek(pHelper->files.nHeadF.fd, 0, SEEK_END); From d9a60557fb7901d22e77780dbe6e1334d9bbda82 Mon Sep 17 00:00:00 2001 From: hzcheng Date: Tue, 21 Apr 2020 14:00:56 +0800 Subject: [PATCH 06/18] nothing --- src/tsdb/tests/tsdbTests.cpp | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/src/tsdb/tests/tsdbTests.cpp b/src/tsdb/tests/tsdbTests.cpp index 8c441bee21..84711b07f8 100644 --- a/src/tsdb/tests/tsdbTests.cpp +++ b/src/tsdb/tests/tsdbTests.cpp @@ -126,8 +126,8 @@ TEST(TsdbTest, DISABLED_tableEncodeDecode) { ASSERT_EQ(memcmp(pTable->schema, tTable->schema, sizeof(STSchema) + sizeof(STColumn) * nCols), 0); } -TEST(TsdbTest, DISABLED_createRepo) { -// TEST(TsdbTest, createRepo) { +// TEST(TsdbTest, DISABLED_createRepo) { +TEST(TsdbTest, createRepo) { STsdbCfg config; STsdbRepo *repo; @@ -142,6 +142,7 @@ TEST(TsdbTest, DISABLED_createRepo) { STableCfg tCfg; ASSERT_EQ(tsdbInitTableCfg(&tCfg, TSDB_SUPER_TABLE, 987607499877672L, 0), -1); ASSERT_EQ(tsdbInitTableCfg(&tCfg, TSDB_NORMAL_TABLE, 987607499877672L, 0), 0); + tsdbTableSetName(&tCfg, "test", false); int nCols = 5; STSchema *schema = tdNewSchema(nCols); @@ -293,7 +294,8 @@ static void insertSkipList(bool isAscend) { tSkipListDestroy(pList); } -TEST(TsdbTest, testSkipList) { +TEST(TsdbTest, DISABLED_testSkipList) { +// TEST(TsdbTest, testSkipList) { double stime = getCurTime(); insertSkipList(true); double etime = getCurTime(); From 64943e42d4b4b6f4163bcc1fd36c87b24de190e4 Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Tue, 21 Apr 2020 14:44:09 +0800 Subject: [PATCH 07/18] fix make sequence for coverity_scan. --- .travis.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.travis.yml b/.travis.yml index 994928bb01..49c94e5247 100644 --- a/.travis.yml +++ b/.travis.yml @@ -100,7 +100,7 @@ matrix: # Commands to prepare for build_command # ** likely specific to your build ** - build_command_prepend: cmake .. + build_command_prepend: cmake . # The command that will be added as an argument to "cov-build" to compile your project for analysis, # ** likely specific to your build ** From 4db9f9189b93f6d9479bee18dbe0884ce43b81c3 Mon Sep 17 00:00:00 2001 From: hzcheng Date: Tue, 21 Apr 2020 18:47:23 +0800 Subject: [PATCH 08/18] fix wal call error --- src/tsdb/src/tsdbMain.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/tsdb/src/tsdbMain.c b/src/tsdb/src/tsdbMain.c index 925ff32cb5..9a97c773c4 100644 --- a/src/tsdb/src/tsdbMain.c +++ b/src/tsdb/src/tsdbMain.c @@ -282,6 +282,8 @@ int32_t tsdbConfigRepo(TsdbRepoT *repo, STsdbCfg *pCfg) { int32_t tsdbTriggerCommit(TsdbRepoT *repo) { STsdbRepo *pRepo = (STsdbRepo *)repo; + + if (pRepo->appH.walCallBack) pRepo->appH.walCallBack(pRepo->appH.appH); tsdbLockRepo(repo); if (pRepo->commit) { @@ -854,8 +856,6 @@ static void *tsdbCommitData(void *arg) { SRWHelper whelper = {0}; if (pCache->imem == NULL) return NULL; - if (pRepo->appH.walCallBack) pRepo->appH.walCallBack(pRepo->appH.appH); - // Create the iterator to read from cache SSkipListIterator **iters = tsdbCreateTableIters(pMeta, pCfg->maxTables); if (iters == NULL) { From 8e444435d17fae91c68196b4a09b96e6ce3a8371 Mon Sep 17 00:00:00 2001 From: slguan Date: Tue, 21 Apr 2020 21:43:11 +0800 Subject: [PATCH 09/18] [TD-150] save vnode cfg use json format --- src/dnode/src/dnodeMClient.c | 4 +- src/dnode/src/dnodeMgmt.c | 48 +++--- src/inc/taosmsg.h | 49 ++++-- src/mnode/src/mgmtDnode.c | 2 +- src/mnode/src/mgmtVgroup.c | 48 +++--- src/tsdb/src/tsdbMain.c | 2 +- src/vnode/CMakeLists.txt | 1 + src/vnode/src/vnodeMain.c | 297 ++++++++++++++++++++++++++--------- tests/script/sh/deploy.sh | 4 +- tests/script/test.sh | 2 + 10 files changed, 328 insertions(+), 129 deletions(-) diff --git a/src/dnode/src/dnodeMClient.c b/src/dnode/src/dnodeMClient.c index 78f4d076fc..ee805a2a0c 100644 --- a/src/dnode/src/dnodeMClient.c +++ b/src/dnode/src/dnodeMClient.c @@ -298,7 +298,7 @@ static bool dnodeReadMnodeInfos() { tsMnodeInfos.nodeInfos[i].syncPort = (uint16_t)syncPort->valueint; cJSON *nodeName = cJSON_GetObjectItem(nodeInfo, "nodeName"); - if (!nodeIp || nodeName->type != cJSON_String || nodeName->valuestring == NULL) { + if (!nodeName || nodeName->type != cJSON_String || nodeName->valuestring == NULL) { dError("failed to read mnode mgmtIpList.json, nodeName not found"); goto PARSE_OVER; } @@ -310,7 +310,7 @@ static bool dnodeReadMnodeInfos() { dPrint("read mnode iplist successed, numOfIps:%d inUse:%d", tsMnodeInfos.nodeNum, tsMnodeInfos.inUse); for (int32_t i = 0; i < tsMnodeInfos.nodeNum; i++) { dPrint("mnode:%d, ip:%s:%u name:%s", tsMnodeInfos.nodeInfos[i].nodeId, - taosIpStr(tsMnodeInfos.nodeInfos[i].nodeId), tsMnodeInfos.nodeInfos[i].nodePort, + taosIpStr(tsMnodeInfos.nodeInfos[i].nodeIp), tsMnodeInfos.nodeInfos[i].nodePort, tsMnodeInfos.nodeInfos[i].nodeName); } diff --git a/src/dnode/src/dnodeMgmt.c b/src/dnode/src/dnodeMgmt.c index 7054cb8cd0..99209c734c 100644 --- a/src/dnode/src/dnodeMgmt.c +++ b/src/dnode/src/dnodeMgmt.c @@ -129,22 +129,21 @@ static void dnodeCloseVnodes() { static int32_t dnodeProcessCreateVnodeMsg(SRpcMsg *rpcMsg) { SMDCreateVnodeMsg *pCreate = rpcMsg->pCont; - pCreate->cfg.vgId = htonl(pCreate->cfg.vgId); - pCreate->cfg.maxSessions = htonl(pCreate->cfg.maxSessions); - pCreate->cfg.cacheBlockSize = htonl(pCreate->cfg.cacheBlockSize); - pCreate->cfg.daysPerFile = htonl(pCreate->cfg.daysPerFile); - pCreate->cfg.daysToKeep1 = htonl(pCreate->cfg.daysToKeep1); - pCreate->cfg.daysToKeep2 = htonl(pCreate->cfg.daysToKeep2); - pCreate->cfg.daysToKeep = htonl(pCreate->cfg.daysToKeep); - pCreate->cfg.commitTime = htonl(pCreate->cfg.commitTime); - pCreate->cfg.rowsInFileBlock = htonl(pCreate->cfg.rowsInFileBlock); - pCreate->cfg.blocksPerTable = htons(pCreate->cfg.blocksPerTable); - pCreate->cfg.cacheNumOfBlocks.totalBlocks = htonl(pCreate->cfg.cacheNumOfBlocks.totalBlocks); - + pCreate->cfg.vgId = htonl(pCreate->cfg.vgId); + pCreate->cfg.maxTables = htonl(pCreate->cfg.maxTables); + pCreate->cfg.maxCacheSize = htobe64(pCreate->cfg.maxCacheSize); + pCreate->cfg.minRowsPerFileBlock = htonl(pCreate->cfg.minRowsPerFileBlock); + pCreate->cfg.maxRowsPerFileBlock = htonl(pCreate->cfg.maxRowsPerFileBlock); + pCreate->cfg.daysPerFile = htonl(pCreate->cfg.daysPerFile); + pCreate->cfg.daysToKeep1 = htonl(pCreate->cfg.daysToKeep1); + pCreate->cfg.daysToKeep2 = htonl(pCreate->cfg.daysToKeep2); + pCreate->cfg.daysToKeep = htonl(pCreate->cfg.daysToKeep); + pCreate->cfg.commitTime = htonl(pCreate->cfg.commitTime); + pCreate->cfg.arbitratorIp = htonl(pCreate->cfg.arbitratorIp); + for (int32_t j = 0; j < pCreate->cfg.replications; ++j) { - pCreate->vpeerDesc[j].vgId = htonl(pCreate->vpeerDesc[j].vgId); - pCreate->vpeerDesc[j].dnodeId = htonl(pCreate->vpeerDesc[j].dnodeId); - pCreate->vpeerDesc[j].ip = htonl(pCreate->vpeerDesc[j].ip); + pCreate->nodes[j].nodeId = htonl(pCreate->nodes[j].nodeId); + pCreate->nodes[j].nodeIp = htonl(pCreate->nodes[j].nodeIp); } return vnodeCreate(pCreate); @@ -159,9 +158,22 @@ static int32_t dnodeProcessDropVnodeMsg(SRpcMsg *rpcMsg) { static int32_t dnodeProcessAlterVnodeMsg(SRpcMsg *rpcMsg) { SMDCreateVnodeMsg *pCreate = rpcMsg->pCont; - pCreate->cfg.vgId = htonl(pCreate->cfg.vgId); - pCreate->cfg.maxSessions = htonl(pCreate->cfg.maxSessions); - pCreate->cfg.daysPerFile = htonl(pCreate->cfg.daysPerFile); + pCreate->cfg.vgId = htonl(pCreate->cfg.vgId); + pCreate->cfg.maxTables = htonl(pCreate->cfg.maxTables); + pCreate->cfg.maxCacheSize = htobe64(pCreate->cfg.maxCacheSize); + pCreate->cfg.minRowsPerFileBlock = htonl(pCreate->cfg.minRowsPerFileBlock); + pCreate->cfg.maxRowsPerFileBlock = htonl(pCreate->cfg.maxRowsPerFileBlock); + pCreate->cfg.daysPerFile = htonl(pCreate->cfg.daysPerFile); + pCreate->cfg.daysToKeep1 = htonl(pCreate->cfg.daysToKeep1); + pCreate->cfg.daysToKeep2 = htonl(pCreate->cfg.daysToKeep2); + pCreate->cfg.daysToKeep = htonl(pCreate->cfg.daysToKeep); + pCreate->cfg.commitTime = htonl(pCreate->cfg.commitTime); + pCreate->cfg.arbitratorIp = htonl(pCreate->cfg.arbitratorIp); + + for (int32_t j = 0; j < pCreate->cfg.replications; ++j) { + pCreate->nodes[j].nodeId = htonl(pCreate->nodes[j].nodeId); + pCreate->nodes[j].nodeIp = htonl(pCreate->nodes[j].nodeIp); + } return 0; } diff --git a/src/inc/taosmsg.h b/src/inc/taosmsg.h index ef7cbc8eab..c3d745c7ac 100644 --- a/src/inc/taosmsg.h +++ b/src/inc/taosmsg.h @@ -245,12 +245,6 @@ typedef struct SSchema { int16_t bytes; } SSchema; -typedef struct { - int32_t vgId; - int32_t dnodeId; - uint32_t ip; -} SVnodeDesc; - typedef struct { int32_t contLen; int32_t vgId; @@ -521,9 +515,6 @@ typedef struct { uint8_t reserved[5]; } SVnodeLoad; -/* - * NOTE: sizeof(SVnodeCfg) < TSDB_FILE_HEADER_LEN / 4 - */ typedef struct { char acct[TSDB_USER_LEN + 1]; char db[TSDB_DB_NAME_LEN + 1]; @@ -548,7 +539,7 @@ typedef struct { int8_t loadLatest; // load into mem or not uint8_t precision; // time resolution int8_t reserved[16]; -} SVnodeCfg, SDbCfg, SCMCreateDbMsg, SCMAlterDbMsg; +} SDbCfg, SCMCreateDbMsg, SCMAlterDbMsg; typedef struct { char db[TSDB_TABLE_ID_LEN + 1]; @@ -614,8 +605,35 @@ typedef struct { } SDMStatusRsp; typedef struct { - SVnodeCfg cfg; - SVnodeDesc vpeerDesc[TSDB_MAX_MPEERS]; + uint32_t vgId; + int32_t maxTables; + int64_t maxCacheSize; + int32_t minRowsPerFileBlock; + int32_t maxRowsPerFileBlock; + int32_t daysPerFile; + int32_t daysToKeep; + int32_t daysToKeep1; + int32_t daysToKeep2; + int32_t commitTime; + uint8_t precision; // time resolution + int8_t compression; + int8_t wals; + int8_t commitLog; + int8_t replications; + int8_t quorum; + uint32_t arbitratorIp; + int8_t reserved[16]; +} SMDVnodeCfg; + +typedef struct { + int32_t nodeId; + uint32_t nodeIp; + char nodeName[TSDB_NODE_NAME_LEN + 1]; +} SMDVnodeDesc; + +typedef struct { + SMDVnodeCfg cfg; + SMDVnodeDesc nodes[TSDB_MAX_MPEERS]; } SMDCreateVnodeMsg; typedef struct { @@ -673,9 +691,16 @@ typedef struct { int32_t metaElem[TSDB_MAX_JOIN_TABLE_NUM]; } SSuperTableMetaMsg; +typedef struct { + int32_t nodeId; + uint32_t nodeIp; + uint16_t nodePort; +} SVnodeDesc; + typedef struct { SVnodeDesc vpeerDesc[TSDB_REPLICA_MAX_NUM]; int16_t index; // used locally + int32_t vgId; int32_t numOfSids; int32_t pSidExtInfoList[]; // offset value of STableIdInfo } SVnodeSidList; diff --git a/src/mnode/src/mgmtDnode.c b/src/mnode/src/mgmtDnode.c index 145a56130f..dd38040147 100644 --- a/src/mnode/src/mgmtDnode.c +++ b/src/mnode/src/mgmtDnode.c @@ -444,7 +444,7 @@ static int32_t mgmtDropDnodeByIp(uint32_t ip) { return TSDB_CODE_NO_REMOVE_MASTER; } -#ifndef _VPEER +#ifndef _SYNC return mgmtDropDnode(pDnode); #else return balanceDropDnode(pDnode); diff --git a/src/mnode/src/mgmtVgroup.c b/src/mnode/src/mgmtVgroup.c index eca052ba34..3ce4f41a51 100644 --- a/src/mnode/src/mgmtVgroup.c +++ b/src/mnode/src/mgmtVgroup.c @@ -506,27 +506,37 @@ SMDCreateVnodeMsg *mgmtBuildCreateVnodeMsg(SVgObj *pVgroup) { SMDCreateVnodeMsg *pVnode = rpcMallocCont(sizeof(SMDCreateVnodeMsg)); if (pVnode == NULL) return NULL; - pVnode->cfg = pDb->cfg; + SMDVnodeCfg *pCfg = &pVnode->cfg; + pCfg->vgId = htonl(pVgroup->vgId); + pCfg->maxTables = htonl(pDb->cfg.maxSessions); + pCfg->maxCacheSize = htobe64((int64_t)pDb->cfg.cacheBlockSize * pDb->cfg.cacheNumOfBlocks.totalBlocks); + pCfg->maxCacheSize = htobe64(-1); + pCfg->minRowsPerFileBlock = htonl(-1); + pCfg->maxRowsPerFileBlock = htonl(-1); + pCfg->daysPerFile = htonl(pDb->cfg.daysPerFile); + pCfg->daysToKeep1 = htonl(pDb->cfg.daysToKeep1); + pCfg->daysToKeep2 = htonl(pDb->cfg.daysToKeep2); + pCfg->daysToKeep = htonl(pDb->cfg.daysToKeep); + pCfg->daysToKeep = htonl(-1); + pCfg->commitTime = htonl(pDb->cfg.commitTime); + pCfg->precision = pDb->cfg.precision; + pCfg->compression = pDb->cfg.compression; + pCfg->compression = -1; + pCfg->wals = 3; + pCfg->commitLog = pDb->cfg.commitLog; + pCfg->replications = (int8_t) pVgroup->numOfVnodes; + pCfg->quorum = 1; + pCfg->arbitratorIp = htonl(pVgroup->vnodeGid[0].privateIp); - SVnodeCfg *pCfg = &pVnode->cfg; - pCfg->vgId = htonl(pVgroup->vgId); - pCfg->maxSessions = htonl(pCfg->maxSessions); - pCfg->cacheBlockSize = htonl(pCfg->cacheBlockSize); - pCfg->cacheNumOfBlocks.totalBlocks = htonl(pCfg->cacheNumOfBlocks.totalBlocks); - pCfg->daysPerFile = htonl(pCfg->daysPerFile); - pCfg->daysToKeep1 = htonl(pCfg->daysToKeep1); - pCfg->daysToKeep2 = htonl(pCfg->daysToKeep2); - pCfg->daysToKeep = htonl(pCfg->daysToKeep); - pCfg->commitTime = htonl(pCfg->commitTime); - pCfg->rowsInFileBlock = htonl(pCfg->rowsInFileBlock); - pCfg->blocksPerTable = htons(pCfg->blocksPerTable); - pCfg->replications = (int8_t) pVgroup->numOfVnodes; - - SVnodeDesc *vpeerDesc = pVnode->vpeerDesc; + SMDVnodeDesc *pNodes = pVnode->nodes; for (int32_t j = 0; j < pVgroup->numOfVnodes; ++j) { - vpeerDesc[j].vgId = htonl(pVgroup->vgId); - vpeerDesc[j].dnodeId = htonl(pVgroup->vnodeGid[j].dnodeId); - vpeerDesc[j].ip = htonl(pVgroup->vnodeGid[j].privateIp); + SDnodeObj *pDnode = mgmtGetDnode(pVgroup->vnodeGid[j].dnodeId); + if (pDnode != NULL) { + pNodes[j].nodeId = htonl(pDnode->dnodeId); + pNodes[j].nodeIp = htonl(pDnode->privateIp); + strcpy(pNodes[j].nodeName, pDnode->dnodeName); + mgmtReleaseDnode(pDnode); + } } return pVnode; diff --git a/src/tsdb/src/tsdbMain.c b/src/tsdb/src/tsdbMain.c index 648188cdae..8026b00b5b 100644 --- a/src/tsdb/src/tsdbMain.c +++ b/src/tsdb/src/tsdbMain.c @@ -11,7 +11,7 @@ #define IS_VALID_COMPRESSION(compression) (((compression) >= NO_COMPRESSION) && ((compression) <= TWO_STAGE_COMP)) #define TSDB_MIN_ID 0 #define TSDB_MAX_ID INT_MAX -#define TSDB_MIN_TABLES 10 +#define TSDB_MIN_TABLES 4 #define TSDB_MAX_TABLES 100000 #define TSDB_DEFAULT_TABLES 1000 #define TSDB_DEFAULT_DAYS_PER_FILE 10 diff --git a/src/vnode/CMakeLists.txt b/src/vnode/CMakeLists.txt index 51065b8645..6ceb83cb45 100644 --- a/src/vnode/CMakeLists.txt +++ b/src/vnode/CMakeLists.txt @@ -9,6 +9,7 @@ IF ((TD_LINUX_64) OR (TD_LINUX_32 AND TD_ARM)) INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/tsdb/inc) INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/common/inc) INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/dnode/inc) + INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/deps/cJson/inc) INCLUDE_DIRECTORIES(${TD_ENTERPRISE_DIR}/src/inc) INCLUDE_DIRECTORIES(inc) AUX_SOURCE_DIRECTORY(src SRC) diff --git a/src/vnode/src/vnodeMain.c b/src/vnode/src/vnodeMain.c index 9370e93683..029d4c8c84 100644 --- a/src/vnode/src/vnodeMain.c +++ b/src/vnode/src/vnodeMain.c @@ -18,10 +18,12 @@ #include "ihash.h" #include "taoserror.h" #include "taosmsg.h" +#include "tutil.h" #include "trpc.h" #include "tsdb.h" #include "ttime.h" #include "ttimer.h" +#include "cJSON.h" #include "twal.h" #include "tglobal.h" #include "dnode.h" @@ -93,21 +95,21 @@ int32_t vnodeCreate(SMDCreateVnodeMsg *pVnodeCfg) { STsdbCfg tsdbCfg = {0}; tsdbCfg.precision = pVnodeCfg->cfg.precision; - tsdbCfg.compression = -1; + tsdbCfg.compression = pVnodeCfg->cfg.compression;; tsdbCfg.tsdbId = pVnodeCfg->cfg.vgId; - tsdbCfg.maxTables = pVnodeCfg->cfg.maxSessions; + tsdbCfg.maxTables = pVnodeCfg->cfg.maxTables; tsdbCfg.daysPerFile = pVnodeCfg->cfg.daysPerFile; - tsdbCfg.minRowsPerFileBlock = -1; - tsdbCfg.maxRowsPerFileBlock = -1; - tsdbCfg.keep = -1; - tsdbCfg.maxCacheSize = -1; + tsdbCfg.minRowsPerFileBlock = pVnodeCfg->cfg.minRowsPerFileBlock; + tsdbCfg.maxRowsPerFileBlock = pVnodeCfg->cfg.maxRowsPerFileBlock; + tsdbCfg.keep = pVnodeCfg->cfg.daysToKeep; + tsdbCfg.maxCacheSize = pVnodeCfg->cfg.maxCacheSize; char tsdbDir[TSDB_FILENAME_LEN] = {0}; sprintf(tsdbDir, "%s/vnode%d/tsdb", tsVnodeDir, pVnodeCfg->cfg.vgId); code = tsdbCreateRepo(tsdbDir, &tsdbCfg, NULL); if (code != TSDB_CODE_SUCCESS) { - dError("vgId:%d, failed to create tsdb in vnode, reason:%s", pVnodeCfg->cfg.vgId, tstrerror(terrno)); - return terrno; + dError("vgId:%d, failed to create tsdb in vnode, reason:%s", pVnodeCfg->cfg.vgId, tstrerror(code)); + return TSDB_CODE_VG_INIT_FAILED; } dPrint("vgId:%d, vnode is created, clog:%d", pVnodeCfg->cfg.vgId, pVnodeCfg->cfg.commitLog); @@ -328,88 +330,235 @@ static void vnodeNotifyRole(void *ahandle, int8_t role) { } static int32_t vnodeSaveCfg(SMDCreateVnodeMsg *pVnodeCfg) { - char cfgFile[TSDB_FILENAME_LEN * 2] = {0}; - sprintf(cfgFile, "%s/vnode%d/config", tsVnodeDir, pVnodeCfg->cfg.vgId); - + char cfgFile[TSDB_FILENAME_LEN + 30] = {0}; + sprintf(cfgFile, "%s/vnode%d/config.json", tsVnodeDir, pVnodeCfg->cfg.vgId); FILE *fp = fopen(cfgFile, "w"); - if (!fp) return errno; - - fprintf(fp, "commitLog %d\n", pVnodeCfg->cfg.commitLog); - fprintf(fp, "wals %d\n", 3); - fprintf(fp, "arbitratorIp %d\n", pVnodeCfg->vpeerDesc[0].ip); - fprintf(fp, "quorum %d\n", 1); - fprintf(fp, "replica %d\n", pVnodeCfg->cfg.replications); - for (int32_t i = 0; i < pVnodeCfg->cfg.replications; i++) { - fprintf(fp, "index%d nodeId %d nodeIp %u name n%d\n", i, pVnodeCfg->vpeerDesc[i].dnodeId, pVnodeCfg->vpeerDesc[i].ip, pVnodeCfg->vpeerDesc[i].dnodeId); + if (!fp) { + dError("vgId:%d, failed to open vnode cfg file for write, error:%s", pVnodeCfg->cfg.vgId, strerror(errno)); + return errno; } - fclose(fp); - dTrace("vgId:%d, save vnode cfg successed", pVnodeCfg->cfg.vgId); + char ipStr[20]; + int32_t len = 0; + int32_t maxLen = 1000; + char * content = calloc(1, maxLen + 1); - return TSDB_CODE_SUCCESS; + len += snprintf(content + len, maxLen - len, "{\n"); + + len += snprintf(content + len, maxLen - len, " \"precision\": %d,\n", pVnodeCfg->cfg.precision); + len += snprintf(content + len, maxLen - len, " \"compression\": %d,\n", pVnodeCfg->cfg.compression); + len += snprintf(content + len, maxLen - len, " \"maxTables\": %d,\n", pVnodeCfg->cfg.maxTables); + len += snprintf(content + len, maxLen - len, " \"daysPerFile\": %d,\n", pVnodeCfg->cfg.daysPerFile); + len += snprintf(content + len, maxLen - len, " \"minRowsPerFileBlock\": %d,\n", pVnodeCfg->cfg.minRowsPerFileBlock); + len += snprintf(content + len, maxLen - len, " \"maxRowsPerFileBlock\": %d,\n", pVnodeCfg->cfg.maxRowsPerFileBlock); + len += snprintf(content + len, maxLen - len, " \"daysToKeep\": %d,\n", pVnodeCfg->cfg.daysToKeep); + + len += snprintf(content + len, maxLen - len, " \"maxCacheSize\": %" PRId64 ",\n", pVnodeCfg->cfg.maxCacheSize); + + len += snprintf(content + len, maxLen - len, " \"commitLog\": %d,\n", pVnodeCfg->cfg.commitLog); + len += snprintf(content + len, maxLen - len, " \"wals\": %d,\n", pVnodeCfg->cfg.wals); + + uint32_t ipInt = pVnodeCfg->cfg.arbitratorIp; + sprintf(ipStr, "%u.%u.%u.%u", ipInt & 0xFF, (ipInt >> 8) & 0xFF, (ipInt >> 16) & 0xFF, (uint8_t)(ipInt >> 24)); + len += snprintf(content + len, maxLen - len, " \"arbitratorIp\": \"%s\",\n", ipStr); + + len += snprintf(content + len, maxLen - len, " \"quorum\": %d,\n", pVnodeCfg->cfg.quorum); + len += snprintf(content + len, maxLen - len, " \"replica\": %d,\n", pVnodeCfg->cfg.replications); + + len += snprintf(content + len, maxLen - len, " \"nodeInfos\": [{\n"); + for (int32_t i = 0; i < pVnodeCfg->cfg.replications; i++) { + len += snprintf(content + len, maxLen - len, " \"nodeId\": %d,\n", pVnodeCfg->nodes[i].nodeId); + + uint32_t ipInt = pVnodeCfg->nodes[i].nodeIp; + sprintf(ipStr, "%u.%u.%u.%u", ipInt & 0xFF, (ipInt >> 8) & 0xFF, (ipInt >> 16) & 0xFF, (uint8_t)(ipInt >> 24)); + len += snprintf(content + len, maxLen - len, " \"nodeIp\": \"%s\",\n", ipStr); + + len += snprintf(content + len, maxLen - len, " \"nodeName\": \"%s\"\n", pVnodeCfg->nodes[i].nodeName); + + if (i < pVnodeCfg->cfg.replications - 1) { + len += snprintf(content + len, maxLen - len, " },{\n"); + } else { + len += snprintf(content + len, maxLen - len, " }]\n"); + } + } + len += snprintf(content + len, maxLen - len, "}\n"); + + fwrite(content, 1, len, fp); + fclose(fp); + free(content); + + dPrint("vgId:%d, save vnode cfg successed", pVnodeCfg->cfg.vgId); + + return 0; } -// TODO: this is a simple implement static int32_t vnodeReadCfg(SVnodeObj *pVnode) { - char option[5][16] = {0}; - char cfgFile[TSDB_FILENAME_LEN * 2] = {0}; - sprintf(cfgFile, "%s/vnode%d/config", tsVnodeDir, pVnode->vgId); - + char cfgFile[TSDB_FILENAME_LEN + 30] = {0}; + sprintf(cfgFile, "%s/vnode%d/config.json", tsVnodeDir, pVnode->vgId); FILE *fp = fopen(cfgFile, "r"); - if (!fp) return errno; + if (!fp) { + dError("pVnode:%p vgId:%d, failed to open vnode cfg file for read, error:%s", pVnode, pVnode->vgId, strerror(errno)); + return errno; + } - int32_t commitLog = -1; - int32_t num = fscanf(fp, "%s %d", option[0], &commitLog); - if (num != 2) return TSDB_CODE_INVALID_FILE_FORMAT; - if (strcmp(option[0], "commitLog") != 0) return TSDB_CODE_INVALID_FILE_FORMAT; - if (commitLog == -1) return TSDB_CODE_INVALID_FILE_FORMAT; - pVnode->walCfg.commitLog = (int8_t)commitLog; + int ret = TSDB_CODE_OTHERS; + int maxLen = 1000; + char *content = calloc(1, maxLen + 1); + int len = fread(content, 1, maxLen, fp); + if (len <= 0) { + free(content); + fclose(fp); + dError("pVnode:%p vgId:%d, failed to vnode cfg, content is null", pVnode, pVnode->vgId); + return false; + } - int32_t wals = -1; - num = fscanf(fp, "%s %d", option[0], &wals); - if (num != 2) return TSDB_CODE_INVALID_FILE_FORMAT; - if (strcmp(option[0], "wals") != 0) return TSDB_CODE_INVALID_FILE_FORMAT; - if (wals == -1) return TSDB_CODE_INVALID_FILE_FORMAT; - pVnode->walCfg.wals = (int8_t)wals; + cJSON *root = cJSON_Parse(content); + if (root == NULL) { + dError("pVnode:%p vgId:%d, failed to vnode cfg, invalid json format", pVnode, pVnode->vgId); + goto PARSE_OVER; + } + + cJSON *precision = cJSON_GetObjectItem(root, "precision"); + if (!precision || precision->type != cJSON_Number) { + dError("pVnode:%p vgId:%d, failed to vnode cfg, precision not found", pVnode, pVnode->vgId); + goto PARSE_OVER; + } + pVnode->tsdbCfg.precision = (int8_t)precision->valueint; + + cJSON *compression = cJSON_GetObjectItem(root, "compression"); + if (!compression || compression->type != cJSON_Number) { + dError("pVnode:%p vgId:%d, failed to vnode cfg, compression not found", pVnode, pVnode->vgId); + goto PARSE_OVER; + } + pVnode->tsdbCfg.compression = (int8_t)compression->valueint; + + cJSON *maxTables = cJSON_GetObjectItem(root, "maxTables"); + if (!maxTables || maxTables->type != cJSON_Number) { + dError("pVnode:%p vgId:%d, failed to vnode cfg, maxTables not found", pVnode, pVnode->vgId); + goto PARSE_OVER; + } + pVnode->tsdbCfg.maxTables = maxTables->valueint; + + cJSON *daysPerFile = cJSON_GetObjectItem(root, "daysPerFile"); + if (!daysPerFile || daysPerFile->type != cJSON_Number) { + dError("pVnode:%p vgId:%d, failed to vnode cfg, daysPerFile not found", pVnode, pVnode->vgId); + goto PARSE_OVER; + } + pVnode->tsdbCfg.daysPerFile = daysPerFile->valueint; + + cJSON *minRowsPerFileBlock = cJSON_GetObjectItem(root, "minRowsPerFileBlock"); + if (!minRowsPerFileBlock || minRowsPerFileBlock->type != cJSON_Number) { + dError("pVnode:%p vgId:%d, failed to vnode cfg, minRowsPerFileBlock not found", pVnode, pVnode->vgId); + goto PARSE_OVER; + } + pVnode->tsdbCfg.minRowsPerFileBlock = minRowsPerFileBlock->valueint; + + cJSON *maxRowsPerFileBlock = cJSON_GetObjectItem(root, "maxRowsPerFileBlock"); + if (!maxRowsPerFileBlock || maxRowsPerFileBlock->type != cJSON_Number) { + dError("pVnode:%p vgId:%d, failed to vnode cfg, maxRowsPerFileBlock not found", pVnode, pVnode->vgId); + goto PARSE_OVER; + } + pVnode->tsdbCfg.maxRowsPerFileBlock = maxRowsPerFileBlock->valueint; + + cJSON *keep = cJSON_GetObjectItem(root, "keep"); + if (!keep || keep->type != cJSON_Number) { + dError("pVnode:%p vgId:%d, failed to vnode cfg, keep not found", pVnode, pVnode->vgId); + goto PARSE_OVER; + } + pVnode->tsdbCfg.keep = keep->valueint; + + cJSON *maxCacheSize = cJSON_GetObjectItem(root, "maxCacheSize"); + if (!maxCacheSize || maxCacheSize->type != cJSON_Number) { + dError("pVnode:%p vgId:%d, failed to vnode cfg, maxCacheSize not found", pVnode, pVnode->vgId); + goto PARSE_OVER; + } + pVnode->tsdbCfg.maxCacheSize = maxCacheSize->valueint; + + cJSON *commitLog = cJSON_GetObjectItem(root, "commitLog"); + if (!commitLog || commitLog->type != cJSON_Number) { + dError("pVnode:%p vgId:%d, failed to vnode cfg, commitLog not found", pVnode, pVnode->vgId); + goto PARSE_OVER; + } + pVnode->walCfg.commitLog = (int8_t)commitLog->valueint; + + cJSON *wals = cJSON_GetObjectItem(root, "wals"); + if (!wals || wals->type != cJSON_Number) { + dError("pVnode:%p vgId:%d, failed to vnode cfg, wals not found", pVnode, pVnode->vgId); + goto PARSE_OVER; + } + pVnode->walCfg.wals = (int8_t)wals->valueint; pVnode->walCfg.keep = 0; - int32_t arbitratorIp = -1; - num = fscanf(fp, "%s %u", option[0], &arbitratorIp); - if (num != 2) return TSDB_CODE_INVALID_FILE_FORMAT; - if (strcmp(option[0], "arbitratorIp") != 0) return TSDB_CODE_INVALID_FILE_FORMAT; - if (arbitratorIp == -1) return TSDB_CODE_INVALID_FILE_FORMAT; - pVnode->syncCfg.arbitratorIp = arbitratorIp; + cJSON *arbitratorIp = cJSON_GetObjectItem(root, "arbitratorIp"); + if (!arbitratorIp || arbitratorIp->type != cJSON_String || arbitratorIp->valuestring == NULL) { + dError("pVnode:%p vgId:%d, failed to vnode cfg, arbitratorIp not found", pVnode, pVnode->vgId); + goto PARSE_OVER; + } + pVnode->syncCfg.arbitratorIp = inet_addr(arbitratorIp->valuestring); - int32_t quorum = -1; - num = fscanf(fp, "%s %d", option[0], &quorum); - if (num != 2) return TSDB_CODE_INVALID_FILE_FORMAT; - if (strcmp(option[0], "quorum") != 0) return TSDB_CODE_INVALID_FILE_FORMAT; - if (quorum == -1) return TSDB_CODE_INVALID_FILE_FORMAT; - pVnode->syncCfg.quorum = (int8_t)quorum; + cJSON *quorum = cJSON_GetObjectItem(root, "quorum"); + if (!quorum || quorum->type != cJSON_Number) { + dError("failed to vnode cfg, quorum not found", pVnode, pVnode->vgId); + goto PARSE_OVER; + } + pVnode->syncCfg.quorum = (int8_t)quorum->valueint; - int32_t replica = -1; - num = fscanf(fp, "%s %d", option[0], &replica); - if (num != 2) return TSDB_CODE_INVALID_FILE_FORMAT; - if (strcmp(option[0], "replica") != 0) return TSDB_CODE_INVALID_FILE_FORMAT; - if (replica == -1) return TSDB_CODE_INVALID_FILE_FORMAT; - pVnode->syncCfg.replica = (int8_t)replica; + cJSON *replica = cJSON_GetObjectItem(root, "replica"); + if (!replica || replica->type != cJSON_Number) { + dError("pVnode:%p vgId:%d, failed to vnode cfg, replica not found", pVnode, pVnode->vgId); + goto PARSE_OVER; + } + pVnode->syncCfg.replica = (int8_t)replica->valueint; - for (int32_t i = 0; i < replica; ++i) { - int32_t dnodeId = -1; - uint32_t dnodeIp = -1; - num = fscanf(fp, "%s %s %d %s %u %s %s", option[0], option[1], &dnodeId, option[2], &dnodeIp, option[3], pVnode->syncCfg.nodeInfo[i].name); - if (num != 7) return TSDB_CODE_INVALID_FILE_FORMAT; - if (strcmp(option[1], "nodeId") != 0) return TSDB_CODE_INVALID_FILE_FORMAT; - if (strcmp(option[2], "nodeIp") != 0) return TSDB_CODE_INVALID_FILE_FORMAT; - if (strcmp(option[3], "name") != 0) return TSDB_CODE_INVALID_FILE_FORMAT; - if (dnodeId == -1) return TSDB_CODE_INVALID_FILE_FORMAT; - if (dnodeIp == -1) return TSDB_CODE_INVALID_FILE_FORMAT; - pVnode->syncCfg.nodeInfo[i].nodeId = dnodeId; - pVnode->syncCfg.nodeInfo[i].nodeIp = dnodeIp; + cJSON *nodeInfos = cJSON_GetObjectItem(root, "nodeInfos"); + if (!nodeInfos || nodeInfos->type != cJSON_Array) { + dError("pVnode:%p vgId:%d, failed to vnode cfg, nodeInfos not found", pVnode, pVnode->vgId); + goto PARSE_OVER; } - fclose(fp); - dTrace("pVnode:%p vgId:%d, read vnode cfg successed", pVnode, pVnode->vgId); + int size = cJSON_GetArraySize(nodeInfos); + if (size != pVnode->syncCfg.replica) { + dError("pVnode:%p vgId:%d, failed to vnode cfg, nodeInfos size not matched", pVnode, pVnode->vgId); + goto PARSE_OVER; + } - return TSDB_CODE_SUCCESS; + for (int i = 0; i < size; ++i) { + cJSON *nodeInfo = cJSON_GetArrayItem(nodeInfos, i); + if (nodeInfo == NULL) continue; + + cJSON *nodeId = cJSON_GetObjectItem(nodeInfo, "nodeId"); + if (!nodeId || nodeId->type != cJSON_Number) { + dError("pVnode:%p vgId:%d, failed to vnode cfg, nodeId not found", pVnode, pVnode->vgId); + goto PARSE_OVER; + } + pVnode->syncCfg.nodeInfo[i].nodeId = nodeId->valueint; + + cJSON *nodeIp = cJSON_GetObjectItem(nodeInfo, "nodeIp"); + if (!nodeIp || nodeIp->type != cJSON_String || nodeIp->valuestring == NULL) { + dError("pVnode:%p vgId:%d, failed to vnode cfg, nodeIp not found", pVnode, pVnode->vgId); + goto PARSE_OVER; + } + pVnode->syncCfg.nodeInfo[i].nodeIp = inet_addr(nodeIp->valuestring); + + cJSON *nodeName = cJSON_GetObjectItem(nodeInfo, "nodeName"); + if (!nodeName || nodeName->type != cJSON_String || nodeName->valuestring == NULL) { + dError("pVnode:%p vgId:%d, failed to vnode cfg, nodeName not found", pVnode, pVnode->vgId); + goto PARSE_OVER; + } + strncpy(pVnode->syncCfg.nodeInfo[i].name, nodeName->valuestring, TSDB_NODE_NAME_LEN); + } + + ret = 0; + + dPrint("pVnode:%p vgId:%d, read vnode cfg successed, replcia:%d", pVnode, pVnode->vgId, pVnode->syncCfg.replica); + for (int32_t i = 0; i < pVnode->syncCfg.replica; i++) { + dPrint("pVnode:%p vgId:%d, dnode:%d, ip:%s name:%s", pVnode, pVnode->vgId, pVnode->syncCfg.nodeInfo[i].nodeId, + taosIpStr(pVnode->syncCfg.nodeInfo[i].nodeIp), pVnode->syncCfg.nodeInfo[i].name); + } + +PARSE_OVER: + free(content); + cJSON_Delete(root); + fclose(fp); + return ret; } diff --git a/tests/script/sh/deploy.sh b/tests/script/sh/deploy.sh index 6c4ada50fe..b1aa7c6382 100755 --- a/tests/script/sh/deploy.sh +++ b/tests/script/sh/deploy.sh @@ -93,7 +93,7 @@ echo "privateIp $NODE_IP" >> $TAOS_CFG echo "dDebugFlag 199" >> $TAOS_CFG echo "mDebugFlag 199" >> $TAOS_CFG echo "sdbDebugFlag 199" >> $TAOS_CFG -echo "rpcDebugFlag 135" >> $TAOS_CFG +echo "rpcDebugFlag 131" >> $TAOS_CFG echo "tmrDebugFlag 131" >> $TAOS_CFG echo "cDebugFlag 135" >> $TAOS_CFG echo "httpDebugFlag 131" >> $TAOS_CFG @@ -105,7 +105,7 @@ echo "numOfThreadsPerCore 2.0" >> $TAOS_CFG echo "defaultPass taosdata" >> $TAOS_CFG echo "numOfLogLines 100000000" >> $TAOS_CFG echo "mgmtEqualVnodeNum 0" >> $TAOS_CFG -echo "clog 0" >> $TAOS_CFG +echo "clog 2" >> $TAOS_CFG echo "statusInterval 1" >> $TAOS_CFG echo "numOfTotalVnodes 4" >> $TAOS_CFG echo "asyncLog 0" >> $TAOS_CFG diff --git a/tests/script/test.sh b/tests/script/test.sh index bce6291fbe..b9660458b0 100755 --- a/tests/script/test.sh +++ b/tests/script/test.sh @@ -34,6 +34,8 @@ cd . sh/ip.sh -i 1 -s up > /dev/null 2>&1 & sh/ip.sh -i 2 -s up > /dev/null 2>&1 & sh/ip.sh -i 3 -s up > /dev/null 2>&1 & +sh/ip.sh -i 4 -s up > /dev/null 2>&1 & +sh/ip.sh -i 5 -s up > /dev/null 2>&1 & # Get responsible directories CODE_DIR=`dirname $0` From d099ab73b9131824af53106062a8428a99f843cd Mon Sep 17 00:00:00 2001 From: slguan Date: Tue, 21 Apr 2020 23:21:13 +0800 Subject: [PATCH 10/18] remove un necessary vload --- src/mnode/inc/mgmtDef.h | 48 +++++++------- src/mnode/inc/mgmtDnode.h | 3 +- src/mnode/inc/mgmtUser.h | 3 +- src/mnode/inc/mgmtVgroup.h | 5 +- src/mnode/src/mgmtBalance.c | 5 +- src/mnode/src/mgmtDb.c | 4 +- src/mnode/src/mgmtDnode.c | 124 +++++++++++++++++------------------- src/mnode/src/mgmtMnode.c | 6 +- src/mnode/src/mgmtProfile.c | 6 +- src/mnode/src/mgmtShell.c | 4 +- src/mnode/src/mgmtTable.c | 16 ++--- src/mnode/src/mgmtUser.c | 22 ++++--- src/mnode/src/mgmtVgroup.c | 107 +++++++++++++++---------------- src/vnode/src/vnodeMain.c | 46 ++++++------- 14 files changed, 194 insertions(+), 205 deletions(-) diff --git a/src/mnode/inc/mgmtDef.h b/src/mnode/inc/mgmtDef.h index a31500750e..7ed4507ee2 100644 --- a/src/mnode/inc/mgmtDef.h +++ b/src/mnode/inc/mgmtDef.h @@ -51,7 +51,6 @@ typedef struct SDnodeObj { int8_t reserved[15]; int8_t updateEnd[1]; int32_t refCount; - SVnodeLoad vload[TSDB_MAX_VNODES]; uint32_t moduleStatus; uint32_t lastReboot; // time stamp for last reboot float score; // calc in balance function @@ -72,13 +71,6 @@ typedef struct SMnodeObj { SDnodeObj *pDnode; } SMnodeObj; - -typedef struct { - int32_t dnodeId; - uint32_t privateIp; - uint32_t publicIp; -} SVnodeGid; - typedef struct { char tableId[TSDB_TABLE_ID_LEN + 1]; int8_t type; @@ -120,24 +112,34 @@ typedef struct { SSuperTableObj *superTable; } SChildTableObj; +typedef struct { + int32_t dnodeId; + int8_t role; + int8_t reserved[3]; + SDnodeObj* pDnode; +} SVnodeGid; + typedef struct SVgObj { - uint32_t vgId; - char dbName[TSDB_DB_NAME_LEN + 1]; - int64_t createdTime; - SVnodeGid vnodeGid[TSDB_VNODES_SUPPORT]; - int32_t numOfVnodes; - int32_t lbDnodeId; - int32_t lbTime; - int8_t status; - int8_t inUse; - int8_t reserved[13]; - int8_t updateEnd[1]; - int32_t refCount; + uint32_t vgId; + char dbName[TSDB_DB_NAME_LEN + 1]; + int64_t createdTime; + SVnodeGid vnodeGid[TSDB_VNODES_SUPPORT]; + int32_t numOfVnodes; + int32_t lbDnodeId; + int32_t lbTime; + int8_t status; + int8_t inUse; + int8_t reserved[13]; + int8_t updateEnd[1]; + int32_t refCount; struct SVgObj *prev, *next; struct SDbObj *pDb; - int32_t numOfTables; - void * idPool; - SChildTableObj ** tableList; + int32_t numOfTables; + int64_t totalStorage; + int64_t compStorage; + int64_t pointsWritten; + void * idPool; + SChildTableObj **tableList; } SVgObj; typedef struct SDbObj { diff --git a/src/mnode/inc/mgmtDnode.h b/src/mnode/inc/mgmtDnode.h index 48111d3110..c0e04aae05 100644 --- a/src/mnode/inc/mgmtDnode.h +++ b/src/mnode/inc/mgmtDnode.h @@ -35,7 +35,8 @@ void mgmtMonitorDnodeModule(); int32_t mgmtGetDnodesNum(); void * mgmtGetNextDnode(void *pNode, SDnodeObj **pDnode); -void mgmtReleaseDnode(SDnodeObj *pDnode); +void mgmtIncDnodeRef(SDnodeObj *pDnode); +void mgmtDecDnodeRef(SDnodeObj *pDnode); void * mgmtGetDnode(int32_t dnodeId); void * mgmtGetDnodeByIp(uint32_t ip); void mgmtUpdateDnode(SDnodeObj *pDnode); diff --git a/src/mnode/inc/mgmtUser.h b/src/mnode/inc/mgmtUser.h index b6a71f6efd..8f1cf5d450 100644 --- a/src/mnode/inc/mgmtUser.h +++ b/src/mnode/inc/mgmtUser.h @@ -24,7 +24,8 @@ extern "C" { int32_t mgmtInitUsers(); void mgmtCleanUpUsers(); SUserObj *mgmtGetUser(char *name); -void mgmtReleaseUser(SUserObj *pUser); +void mgmtIncUserRef(SUserObj *pUser); +void mgmtDecUserRef(SUserObj *pUser); SUserObj *mgmtGetUserFromConn(void *pConn, bool *usePublicIp); int32_t mgmtCreateUser(SAcctObj *pAcct, char *name, char *pass); void mgmtDropAllUsers(SAcctObj *pAcct); diff --git a/src/mnode/inc/mgmtVgroup.h b/src/mnode/inc/mgmtVgroup.h index 3da002026b..534e640c4d 100644 --- a/src/mnode/inc/mgmtVgroup.h +++ b/src/mnode/inc/mgmtVgroup.h @@ -30,12 +30,13 @@ enum _TSDB_VG_STATUS { int32_t mgmtInitVgroups(); void mgmtCleanUpVgroups(); SVgObj *mgmtGetVgroup(int32_t vgId); -void mgmtReleaseVgroup(SVgObj *pVgroup); +void mgmtIncVgroupRef(SVgObj *pVgroup); +void mgmtDecVgroupRef(SVgObj *pVgroup); void mgmtDropAllVgroups(SDbObj *pDropDb); void * mgmtGetNextVgroup(void *pNode, SVgObj **pVgroup); void mgmtUpdateVgroup(SVgObj *pVgroup); -void mgmtUpdateVgroupStatus(SVgObj *pVgroup, int32_t dnodeId, SVnodeLoad *pVload); +void mgmtUpdateVgroupStatus(SVgObj *pVgroup, SDnodeObj *dnodeId, SVnodeLoad *pVload); void mgmtCreateVgroup(SQueuedMsg *pMsg, SDbObj *pDb); void mgmtDropVgroup(SVgObj *pVgroup, void *ahandle); diff --git a/src/mnode/src/mgmtBalance.c b/src/mnode/src/mgmtBalance.c index 8ca651be2c..7b85dc08e3 100644 --- a/src/mnode/src/mgmtBalance.c +++ b/src/mnode/src/mgmtBalance.c @@ -47,7 +47,7 @@ int32_t balanceAllocVnodes(SVgObj *pVgroup) { vnodeUsage = usage; } } - mgmtReleaseDnode(pDnode); + mgmtDecDnodeRef(pDnode); } if (pSelDnode == NULL) { @@ -56,8 +56,7 @@ int32_t balanceAllocVnodes(SVgObj *pVgroup) { } pVgroup->vnodeGid[0].dnodeId = pSelDnode->dnodeId; - pVgroup->vnodeGid[0].privateIp = pSelDnode->privateIp; - pVgroup->vnodeGid[0].publicIp = pSelDnode->publicIp; + pVgroup->vnodeGid[0].pDnode = pSelDnode; mTrace("dnode:%d, alloc one vnode to vgroup, openVnodes:%d", pSelDnode->dnodeId, pSelDnode->openVnodes); return TSDB_CODE_SUCCESS; diff --git a/src/mnode/src/mgmtDb.c b/src/mnode/src/mgmtDb.c index 805546e15e..d66b949421 100644 --- a/src/mnode/src/mgmtDb.c +++ b/src/mnode/src/mgmtDb.c @@ -527,7 +527,7 @@ static int32_t mgmtGetDbMeta(STableMetaMsg *pMeta, SShowObj *pShow, void *pConn) pShow->rowSize = pShow->offset[cols - 1] + pShow->bytes[cols - 1]; pShow->numOfRows = pUser->pAcct->acctInfo.numOfDbs; - mgmtReleaseUser(pUser); + mgmtDecUserRef(pUser); return 0; } @@ -647,7 +647,7 @@ static int32_t mgmtRetrieveDbs(SShowObj *pShow, char *data, int32_t rows, void * } pShow->numOfReads += numOfRows; - mgmtReleaseUser(pUser); + mgmtDecUserRef(pUser); return numOfRows; } diff --git a/src/mnode/src/mgmtDnode.c b/src/mnode/src/mgmtDnode.c index dd38040147..4db7415684 100644 --- a/src/mnode/src/mgmtDnode.c +++ b/src/mnode/src/mgmtDnode.c @@ -23,6 +23,7 @@ #include "tutil.h" #include "tsocket.h" #include "tbalance.h" +#include "tsync.h" #include "dnode.h" #include "mgmtDef.h" #include "mgmtLog.h" @@ -139,7 +140,7 @@ static int32_t mgmtDnodeActionRestored() { mgmtCreateDnode(ip); SDnodeObj *pDnode = mgmtGetDnodeByIp(ip); mgmtAddMnode(pDnode->dnodeId); - mgmtReleaseDnode(pDnode); + mgmtDecDnodeRef(pDnode); } return TSDB_CODE_SUCCESS; @@ -215,13 +216,17 @@ void *mgmtGetDnodeByIp(uint32_t ip) { if (ip == pDnode->privateIp) { return pDnode; } - mgmtReleaseDnode(pDnode); + mgmtDecDnodeRef(pDnode); } return NULL; } -void mgmtReleaseDnode(SDnodeObj *pDnode) { +void mgmtIncDnodeRef(SDnodeObj *pDnode) { + sdbIncRef(tsDnodeSdb, pDnode); +} + +void mgmtDecDnodeRef(SDnodeObj *pDnode) { sdbDecRef(tsDnodeSdb, pDnode); } @@ -326,19 +331,21 @@ void mgmtProcessDnodeStatusMsg(SRpcMsg *rpcMsg) { int32_t openVnodes = htons(pStatus->openVnodes); for (int32_t j = 0; j < openVnodes; ++j) { SVnodeLoad *pVload = &pStatus->load[j]; - pDnode->vload[j].vgId = htonl(pVload->vgId); - pDnode->vload[j].totalStorage = htobe64(pVload->totalStorage); - pDnode->vload[j].compStorage = htobe64(pVload->compStorage); - pDnode->vload[j].pointsWritten = htobe64(pVload->pointsWritten); - - SVgObj *pVgroup = mgmtGetVgroup(pDnode->vload[j].vgId); + pVload->vgId = htonl(pVload->vgId); + + SVgObj *pVgroup = mgmtGetVgroup(pVload->vgId); if (pVgroup == NULL) { SRpcIpSet ipSet = mgmtGetIpSetFromIp(pDnode->privateIp); - mPrint("dnode:%d, vgroup:%d not exist in mnode, drop it", pDnode->dnodeId, pDnode->vload[j].vgId); - mgmtSendDropVnodeMsg(pDnode->vload[j].vgId, &ipSet, NULL); + mPrint("dnode:%d, vgroup:%d not exist in mnode, drop it", pDnode->dnodeId, pVload->vgId); + mgmtSendDropVnodeMsg(pVload->vgId, &ipSet, NULL); } else { - mgmtUpdateVgroupStatus(pVgroup, pDnode->dnodeId, pVload); - mgmtReleaseVgroup(pVgroup); + mgmtUpdateVgroupStatus(pVgroup, pDnode, pVload); + if (pVload->role == TAOS_SYNC_ROLE_MASTER) { + pVgroup->totalStorage = htobe64(pVload->totalStorage); + pVgroup->compStorage = htobe64(pVload->compStorage); + pVgroup->pointsWritten = htobe64(pVload->pointsWritten); + } + mgmtDecVgroupRef(pVgroup); } } @@ -348,7 +355,7 @@ void mgmtProcessDnodeStatusMsg(SRpcMsg *rpcMsg) { balanceNotify(); } - mgmtReleaseDnode(pDnode); + mgmtDecDnodeRef(pDnode); int32_t contLen = sizeof(SDMStatusRsp) + TSDB_MAX_VNODES * sizeof(SDMVgroupAccess); SDMStatusRsp *pRsp = rpcMallocCont(contLen); @@ -554,7 +561,7 @@ static int32_t mgmtGetDnodeMeta(STableMetaMsg *pMeta, SShowObj *pShow, void *pCo pShow->rowSize = pShow->offset[cols - 1] + pShow->bytes[cols - 1]; pShow->pNode = NULL; - mgmtReleaseUser(pUser); + mgmtDecUserRef(pUser); return 0; } @@ -604,7 +611,7 @@ static int32_t mgmtRetrieveDnodes(SShowObj *pShow, char *data, int32_t rows, voi numOfRows++; - mgmtReleaseDnode(pDnode); + mgmtDecDnodeRef(pDnode); } pShow->numOfReads += numOfRows; @@ -661,7 +668,7 @@ static int32_t mgmtGetModuleMeta(STableMetaMsg *pMeta, SShowObj *pShow, void *pC pShow->numOfRows = mgmtGetDnodesNum() * TSDB_MOD_MAX; pShow->rowSize = pShow->offset[cols - 1] + pShow->bytes[cols - 1]; pShow->pNode = NULL; - mgmtReleaseUser(pUser); + mgmtDecUserRef(pUser); return 0; } @@ -712,7 +719,7 @@ int32_t mgmtRetrieveModules(SShowObj *pShow, char *data, int32_t rows, void *pCo numOfRows++; } - mgmtReleaseDnode(pDnode); + mgmtDecDnodeRef(pDnode); } pShow->numOfReads += numOfRows; @@ -762,7 +769,7 @@ static int32_t mgmtGetConfigMeta(STableMetaMsg *pMeta, SShowObj *pShow, void *pC pShow->rowSize = pShow->offset[cols - 1] + pShow->bytes[cols - 1]; pShow->pNode = NULL; - mgmtReleaseUser(pUser); + mgmtDecUserRef(pUser); return 0; } @@ -840,35 +847,18 @@ static int32_t mgmtGetVnodeMeta(STableMetaMsg *pMeta, SShowObj *pShow, void *pCo if (pShow->payloadLen > 0 ) { uint32_t ip = ip2uint(pShow->payload); pDnode = mgmtGetDnodeByIp(ip); - if (NULL == pDnode) { - return TSDB_CODE_NODE_OFFLINE; - } - - SVnodeLoad* pVnode; - pShow->numOfRows = 0; - for (int32_t i = 0 ; i < TSDB_MAX_VNODES; i++) { - pVnode = &pDnode->vload[i]; - if (0 != pVnode->vgId) { - pShow->numOfRows++; - } - } - - pShow->pNode = pDnode; } else { - while (true) { - pShow->pNode = mgmtGetNextDnode(pShow->pNode, (SDnodeObj **)&pDnode); - if (pDnode == NULL) break; - pShow->numOfRows += pDnode->openVnodes; + mgmtGetNextDnode(NULL, (SDnodeObj **)&pDnode); + } - if (0 == pShow->numOfRows) return TSDB_CODE_NODE_OFFLINE; - } - - pShow->pNode = NULL; - } + if (pDnode != NULL) { + pShow->numOfRows += pDnode->openVnodes; + mgmtDecDnodeRef(pDnode); + } pShow->rowSize = pShow->offset[cols - 1] + pShow->bytes[cols - 1]; - mgmtReleaseDnode(pDnode); - mgmtReleaseUser(pUser); + pShow->pNode = pDnode; + mgmtDecUserRef(pUser); return 0; } @@ -881,35 +871,35 @@ static int32_t mgmtRetrieveVnodes(SShowObj *pShow, char *data, int32_t rows, voi if (0 == rows) return 0; - if (pShow->payloadLen) { - // output the vnodes info of the designated dnode. And output all vnodes of this dnode, instead of rows (max 100) - pDnode = (SDnodeObj *)(pShow->pNode); - if (pDnode != NULL) { - SVnodeLoad* pVnode; - for (int32_t i = 0 ; i < TSDB_MAX_VNODES; i++) { - pVnode = &pDnode->vload[i]; - if (0 == pVnode->vgId) { - continue; + pDnode = (SDnodeObj *)(pShow->pNode); + if (pDnode != NULL) { + void *pNode = NULL; + SVgObj *pVgroup; + while (1) { + pNode = mgmtGetNextVgroup(pNode, &pVgroup); + if (pVgroup == NULL) break; + + for (int32_t i = 0; i < pVgroup->numOfVnodes; ++i) { + SVnodeGid *pVgid = &pVgroup->vnodeGid[i]; + if (pVgid->pDnode == pDnode) { + cols = 0; + + pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows; + *(uint32_t *)pWrite = pVgroup->vgId; + cols++; + + pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows; + strcpy(pWrite, mgmtGetMnodeRoleStr(pVgid->role)); + cols++; } - - cols = 0; - - pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows; - *(uint32_t *)pWrite = pVnode->vgId; - cols++; - - pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows; - strcpy(pWrite, pVnode->status ? "ready" : "offline"); - cols++; - - numOfRows++; } + + mgmtDecVgroupRef(pVgroup); } } else { - // TODO: output all vnodes of all dnodes numOfRows = 0; } - + pShow->numOfReads += numOfRows; return numOfRows; } diff --git a/src/mnode/src/mgmtMnode.c b/src/mnode/src/mgmtMnode.c index 05de2ecfdb..3eae218660 100644 --- a/src/mnode/src/mgmtMnode.c +++ b/src/mnode/src/mgmtMnode.c @@ -47,7 +47,7 @@ static int32_t mgmtMnodeActionInsert(SSdbOper *pOper) { pMnode->pDnode = pDnode; pDnode->isMgmt = true; - mgmtReleaseDnode(pDnode); + mgmtDecDnodeRef(pDnode); return TSDB_CODE_SUCCESS; } @@ -58,7 +58,7 @@ static int32_t mgmtMnodeActionDelete(SSdbOper *pOper) { SDnodeObj *pDnode = mgmtGetDnode(pMnode->mnodeId); if (pDnode == NULL) return TSDB_CODE_DNODE_NOT_EXIST; pDnode->isMgmt = false; - mgmtReleaseDnode(pDnode); + mgmtDecDnodeRef(pDnode); mTrace("mnode:%d, is dropped from sdb", pMnode->mnodeId); return TSDB_CODE_SUCCESS; @@ -314,7 +314,7 @@ static int32_t mgmtGetMnodeMeta(STableMetaMsg *pMeta, SShowObj *pShow, void *pCo pShow->numOfRows = mgmtGetMnodesNum(); pShow->rowSize = pShow->offset[cols - 1] + pShow->bytes[cols - 1]; pShow->pNode = NULL; - mgmtReleaseUser(pUser); + mgmtDecUserRef(pUser); return 0; } diff --git a/src/mnode/src/mgmtProfile.c b/src/mnode/src/mgmtProfile.c index 1f557681ff..47bf805138 100644 --- a/src/mnode/src/mgmtProfile.c +++ b/src/mnode/src/mgmtProfile.c @@ -790,12 +790,12 @@ void *mgmtMallocQueuedMsg(SRpcMsg *rpcMsg) { void mgmtFreeQueuedMsg(SQueuedMsg *pMsg) { if (pMsg != NULL) { rpcFreeCont(pMsg->pCont); - if (pMsg->pUser) mgmtReleaseUser(pMsg->pUser); + if (pMsg->pUser) mgmtDecUserRef(pMsg->pUser); if (pMsg->pDb) mgmtDecDbRef(pMsg->pDb); - if (pMsg->pVgroup) mgmtReleaseVgroup(pMsg->pVgroup); + if (pMsg->pVgroup) mgmtDecVgroupRef(pMsg->pVgroup); if (pMsg->pTable) mgmtDecTableRef(pMsg->pTable); if (pMsg->pAcct) mgmtDecAcctRef(pMsg->pAcct); - if (pMsg->pDnode) mgmtReleaseDnode(pMsg->pDnode); + if (pMsg->pDnode) mgmtDecDnodeRef(pMsg->pDnode); free(pMsg); } } diff --git a/src/mnode/src/mgmtShell.c b/src/mnode/src/mgmtShell.c index e2f393e6e9..54c66c5ba1 100644 --- a/src/mnode/src/mgmtShell.c +++ b/src/mnode/src/mgmtShell.c @@ -371,11 +371,11 @@ static int mgmtShellRetriveAuth(char *user, char *spi, char *encrypt, char *secr SUserObj *pUser = mgmtGetUser(user); if (pUser == NULL) { *secret = 0; - mgmtReleaseUser(pUser); + mgmtDecUserRef(pUser); return TSDB_CODE_INVALID_USER; } else { memcpy(secret, pUser->pass, TSDB_KEY_LEN); - mgmtReleaseUser(pUser); + mgmtDecUserRef(pUser); return TSDB_CODE_SUCCESS; } } diff --git a/src/mnode/src/mgmtTable.c b/src/mnode/src/mgmtTable.c index 20d8459943..1209b489d6 100644 --- a/src/mnode/src/mgmtTable.c +++ b/src/mnode/src/mgmtTable.c @@ -97,7 +97,7 @@ static int32_t mgmtChildTableActionInsert(SSdbOper *pOper) { mError("ctable:%s, not in vgroup:%d", pTable->info.tableId, pTable->vgId); return TSDB_CODE_INVALID_VGROUP_ID; } - mgmtReleaseVgroup(pVgroup); + mgmtDecVgroupRef(pVgroup); SDbObj *pDb = mgmtGetDb(pVgroup->dbName); if (pDb == NULL) { @@ -139,7 +139,7 @@ static int32_t mgmtChildTableActionDelete(SSdbOper *pOper) { if (pVgroup == NULL) { return TSDB_CODE_INVALID_VGROUP_ID; } - mgmtReleaseVgroup(pVgroup); + mgmtDecVgroupRef(pVgroup); SDbObj *pDb = mgmtGetDb(pVgroup->dbName); if (pDb == NULL) { @@ -275,7 +275,7 @@ static int32_t mgmtChildTableActionRestored() { pNode = pLastNode; continue; } - mgmtReleaseVgroup(pVgroup); + mgmtDecVgroupRef(pVgroup); if (strcmp(pVgroup->dbName, pDb->name) != 0) { mError("ctable:%s, db:%s not match with vgroup:%d db:%s sid:%d, discard it", @@ -1194,17 +1194,15 @@ static void mgmtProcessSuperTableVgroupMsg(SQueuedMsg *pMsg) { pRsp->vgroups[vg].vgId = htonl(vgId); for (int32_t vn = 0; vn < pVgroup->numOfVnodes; ++vn) { - SDnodeObj *pDnode = mgmtGetDnode(pVgroup->vnodeGid[vn].dnodeId); + SDnodeObj *pDnode = pVgroup->vnodeGid[vn].pDnode; if (pDnode == NULL) break; pRsp->vgroups[vg].ipAddr[vn].ip = htonl(pDnode->privateIp); pRsp->vgroups[vg].ipAddr[vn].port = htons(tsDnodeShellPort); pRsp->vgroups[vg].numOfIps++; - - mgmtReleaseDnode(pDnode); } - mgmtReleaseVgroup(pVgroup); + mgmtDecVgroupRef(pVgroup); } pRsp->numOfVgroups = htonl(vg); @@ -1613,7 +1611,7 @@ static int32_t mgmtDoGetChildTableMeta(SQueuedMsg *pMsg, STableMetaMsg *pMeta) { pMeta->vgroup.ipAddr[i].port = htonl(tsDnodeShellPort); } pMeta->vgroup.numOfIps++; - mgmtReleaseDnode(pDnode); + mgmtDecDnodeRef(pDnode); } pMeta->vgroup.vgId = htonl(pVgroup->vgId); @@ -1742,7 +1740,7 @@ static SChildTableObj* mgmtGetTableByPos(uint32_t dnodeId, int32_t vnode, int32_ SChildTableObj *pTable = pVgroup->tableList[sid]; mgmtIncTableRef((STableObj *)pTable); - mgmtReleaseVgroup(pVgroup); + mgmtDecVgroupRef(pVgroup); return pTable; } diff --git a/src/mnode/src/mgmtUser.c b/src/mnode/src/mgmtUser.c index fb65d61a35..240704f9ec 100644 --- a/src/mnode/src/mgmtUser.c +++ b/src/mnode/src/mgmtUser.c @@ -150,7 +150,11 @@ SUserObj *mgmtGetUser(char *name) { return (SUserObj *)sdbGetRow(tsUserSdb, name); } -void mgmtReleaseUser(SUserObj *pUser) { +void mgmtIncUserRef(SUserObj *pUser) { + return sdbIncRef(tsUserSdb, pUser); +} + +void mgmtDecUserRef(SUserObj *pUser) { return sdbDecRef(tsUserSdb, pUser); } @@ -183,7 +187,7 @@ int32_t mgmtCreateUser(SAcctObj *pAcct, char *name, char *pass) { SUserObj *pUser = mgmtGetUser(name); if (pUser != NULL) { mTrace("user:%s is already there", name); - mgmtReleaseUser(pUser); + mgmtDecUserRef(pUser); return TSDB_CODE_USER_ALREADY_EXIST; } @@ -273,7 +277,7 @@ static int32_t mgmtGetUserMeta(STableMetaMsg *pMeta, SShowObj *pShow, void *pCon pShow->numOfRows = pUser->pAcct->acctInfo.numOfUsers; pShow->rowSize = pShow->offset[cols - 1] + pShow->bytes[cols - 1]; - mgmtReleaseUser(pUser); + mgmtDecUserRef(pUser); return 0; } @@ -308,7 +312,7 @@ static int32_t mgmtRetrieveUsers(SShowObj *pShow, char *data, int32_t rows, void cols++; numOfRows++; - mgmtReleaseUser(pUser); + mgmtDecUserRef(pUser); } pShow->numOfReads += numOfRows; return numOfRows; @@ -356,7 +360,7 @@ static void mgmtProcessAlterUserMsg(SQueuedMsg *pMsg) { if (strcmp(pUser->user, "monitor") == 0 || (strcmp(pUser->user + 1, pUser->acct) == 0 && pUser->user[0] == '_')) { mgmtSendSimpleResp(pMsg->thandle, TSDB_CODE_NO_RIGHTS); - mgmtReleaseUser(pUser); + mgmtDecUserRef(pUser); return; } @@ -432,7 +436,7 @@ static void mgmtProcessAlterUserMsg(SQueuedMsg *pMsg) { mgmtSendSimpleResp(pMsg->thandle, TSDB_CODE_NO_RIGHTS); } - mgmtReleaseUser(pUser); + mgmtDecUserRef(pUser); } static void mgmtProcessDropUserMsg(SQueuedMsg *pMsg) { @@ -449,7 +453,7 @@ static void mgmtProcessDropUserMsg(SQueuedMsg *pMsg) { if (strcmp(pUser->user, "monitor") == 0 || strcmp(pUser->user, pUser->acct) == 0 || (strcmp(pUser->user + 1, pUser->acct) == 0 && pUser->user[0] == '_')) { mgmtSendSimpleResp(pMsg->thandle, TSDB_CODE_NO_RIGHTS); - mgmtReleaseUser(pUser); + mgmtDecUserRef(pUser); return ; } @@ -478,7 +482,7 @@ static void mgmtProcessDropUserMsg(SQueuedMsg *pMsg) { } mgmtSendSimpleResp(pMsg->thandle, code); - mgmtReleaseUser(pUser); + mgmtDecUserRef(pUser); } void mgmtDropAllUsers(SAcctObj *pAcct) { @@ -504,7 +508,7 @@ void mgmtDropAllUsers(SAcctObj *pAcct) { numOfUsers++; } - mgmtReleaseUser(pUser); + mgmtDecUserRef(pUser); } mTrace("acct:%s, all users:%d is dropped from sdb", pAcct->user, numOfUsers); diff --git a/src/mnode/src/mgmtVgroup.c b/src/mnode/src/mgmtVgroup.c index 3ce4f41a51..3bccf385f1 100644 --- a/src/mnode/src/mgmtVgroup.c +++ b/src/mnode/src/mgmtVgroup.c @@ -68,7 +68,6 @@ static int32_t mgmtVgroupActionInsert(SSdbOper *pOper) { if (pDb == NULL) { return TSDB_CODE_INVALID_DB; } - mgmtDecDbRef(pDb); pVgroup->pDb = pDb; pVgroup->prev = NULL; @@ -91,15 +90,13 @@ static int32_t mgmtVgroupActionInsert(SSdbOper *pOper) { for (int32_t i = 0; i < pVgroup->numOfVnodes; ++i) { SDnodeObj *pDnode = mgmtGetDnode(pVgroup->vnodeGid[i].dnodeId); if (pDnode != NULL) { - pVgroup->vnodeGid[i].privateIp = pDnode->privateIp; - pVgroup->vnodeGid[i].publicIp = pDnode->publicIp; - atomic_add_fetch_32(&pDnode->openVnodes, 1); - mgmtReleaseDnode(pDnode); - } + pVgroup->vnodeGid[i].pDnode = pDnode; + atomic_add_fetch_32(&pDnode->openVnodes, 1); + mgmtDecDnodeRef(pDnode); + } } mgmtAddVgroupIntoDb(pVgroup); - mgmtIncDbRef(pVgroup->pDb); return TSDB_CODE_SUCCESS; } @@ -115,10 +112,10 @@ static int32_t mgmtVgroupActionDelete(SSdbOper *pOper) { for (int32_t i = 0; i < pVgroup->numOfVnodes; ++i) { SDnodeObj *pDnode = mgmtGetDnode(pVgroup->vnodeGid[i].dnodeId); - if (pDnode) { + if (pDnode != NULL) { atomic_sub_fetch_32(&pDnode->openVnodes, 1); } - mgmtReleaseDnode(pDnode); + mgmtDecDnodeRef(pDnode); } return TSDB_CODE_SUCCESS; @@ -150,6 +147,12 @@ static int32_t mgmtVgroupActionUpdate(SSdbOper *pOper) { static int32_t mgmtVgroupActionEncode(SSdbOper *pOper) { SVgObj *pVgroup = pOper->pObj; memcpy(pOper->rowData, pVgroup, tsVgUpdateSize); + SVgObj *pTmpVgroup = pOper->rowData; + for (int32_t i = 0; i < TSDB_VNODES_SUPPORT; ++i) { + pTmpVgroup->vnodeGid[i].pDnode = NULL; + pTmpVgroup->vnodeGid[i].role = 0; + } + pOper->rowSize = tsVgUpdateSize; return TSDB_CODE_SUCCESS; } @@ -204,7 +207,11 @@ int32_t mgmtInitVgroups() { return 0; } -void mgmtReleaseVgroup(SVgObj *pVgroup) { +void mgmtIncVgroupRef(SVgObj *pVgroup) { + return sdbIncRef(tsVgroupSdb, pVgroup); +} + +void mgmtDecVgroupRef(SVgObj *pVgroup) { return sdbDecRef(tsVgroupSdb, pVgroup); } @@ -224,14 +231,15 @@ void mgmtUpdateVgroup(SVgObj *pVgroup) { mgmtSendCreateVgroupMsg(pVgroup, NULL); } -void mgmtUpdateVgroupStatus(SVgObj *pVgroup, int32_t dnodeId, SVnodeLoad *pVload) { - if (pVload->role == TAOS_SYNC_ROLE_MASTER) { - for (int32_t i = 0; i < pVgroup->numOfVnodes; ++i) { - SVnodeGid *pVgid = &pVgroup->vnodeGid[i]; - if (pVgid->dnodeId == dnodeId) { +void mgmtUpdateVgroupStatus(SVgObj *pVgroup, SDnodeObj *pDnode, SVnodeLoad *pVload) { + for (int32_t i = 0; i < pVgroup->numOfVnodes; ++i) { + SVnodeGid *pVgid = &pVgroup->vnodeGid[i]; + if (pVgid->pDnode == pDnode) { + pVgid->role = pVload->role; + if (pVload->role == TAOS_SYNC_ROLE_MASTER) { pVgroup->inUse = i; - break; } + break; } } } @@ -340,7 +348,7 @@ int32_t mgmtGetVgroupMeta(STableMetaMsg *pMeta, SShowObj *pShow, void *pConn) { mgmtDecTableRef(pTable); pVgroup = mgmtGetVgroup(((SChildTableObj*)pTable)->vgId); if (NULL == pVgroup) return TSDB_CODE_INVALID_TABLE_ID; - mgmtReleaseVgroup(pVgroup); + mgmtDecVgroupRef(pVgroup); maxReplica = pVgroup->numOfVnodes > maxReplica ? pVgroup->numOfVnodes : maxReplica; } else { SVgObj *pVgroup = pDb->pHead; @@ -391,27 +399,6 @@ int32_t mgmtGetVgroupMeta(STableMetaMsg *pMeta, SShowObj *pShow, void *pConn) { return 0; } -char *mgmtGetVnodeStatus(SVgObj *pVgroup, SVnodeGid *pVnode) { - SDnodeObj *pDnode = mgmtGetDnode(pVnode->dnodeId); - if (pDnode == NULL) { - mError("vgroup:%d, not exist in dnode:%d", pVgroup->vgId, pDnode->dnodeId); - return "null"; - } - mgmtReleaseDnode(pDnode); - - if (pDnode->status == TAOS_DN_STATUS_OFFLINE) { - return "offline"; - } - - for (int i = 0; i < pDnode->openVnodes; ++i) { - if (pDnode->vload[i].vgId == pVgroup->vgId) { - return pDnode->vload[i].status ? "ready" : "offline"; - } - } - - return "null"; -} - int32_t mgmtRetrieveVgroups(SShowObj *pShow, char *data, int32_t rows, void *pConn) { int32_t numOfRows = 0; SVgObj *pVgroup = NULL; @@ -453,19 +440,24 @@ int32_t mgmtRetrieveVgroups(SShowObj *pShow, char *data, int32_t rows, void *pCo *(int16_t *) pWrite = pVgroup->vnodeGid[i].dnodeId; cols++; - tinet_ntoa(ipstr, pVgroup->vnodeGid[i].privateIp); - pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows; - strcpy(pWrite, ipstr); - cols++; + SDnodeObj *pDnode = pVgroup->vnodeGid[i].pDnode; - pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows; - if (pVgroup->vnodeGid[i].dnodeId != 0) { - char *vnodeStatus = mgmtGetVnodeStatus(pVgroup, pVgroup->vnodeGid + i); - strcpy(pWrite, vnodeStatus); + if (pDnode != NULL) { + tinet_ntoa(ipstr, pDnode->privateIp); + pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows; + strcpy(pWrite, ipstr); + cols++; + pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows; + strcpy(pWrite, mgmtGetMnodeRoleStr(pVgroup->vnodeGid[i].role)); + cols++; } else { + pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows; strcpy(pWrite, "null"); + cols++; + pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows; + strcpy(pWrite, "null"); + cols++; } - cols++; } numOfRows++; @@ -526,16 +518,17 @@ SMDCreateVnodeMsg *mgmtBuildCreateVnodeMsg(SVgObj *pVgroup) { pCfg->commitLog = pDb->cfg.commitLog; pCfg->replications = (int8_t) pVgroup->numOfVnodes; pCfg->quorum = 1; - pCfg->arbitratorIp = htonl(pVgroup->vnodeGid[0].privateIp); - + SMDVnodeDesc *pNodes = pVnode->nodes; for (int32_t j = 0; j < pVgroup->numOfVnodes; ++j) { - SDnodeObj *pDnode = mgmtGetDnode(pVgroup->vnodeGid[j].dnodeId); + SDnodeObj *pDnode = pVgroup->vnodeGid[0].pDnode; if (pDnode != NULL) { pNodes[j].nodeId = htonl(pDnode->dnodeId); pNodes[j].nodeIp = htonl(pDnode->privateIp); strcpy(pNodes[j].nodeName, pDnode->dnodeName); - mgmtReleaseDnode(pDnode); + if (j == 0) { + pCfg->arbitratorIp = htonl(pDnode->privateIp); + } } } @@ -549,7 +542,7 @@ SRpcIpSet mgmtGetIpSetFromVgroup(SVgObj *pVgroup) { .port = tsDnodeMnodePort }; for (int i = 0; i < pVgroup->numOfVnodes; ++i) { - ipSet.ip[i] = pVgroup->vnodeGid[i].privateIp; + ipSet.ip[i] = pVgroup->vnodeGid[i].pDnode->privateIp; } return ipSet; } @@ -580,7 +573,7 @@ void mgmtSendCreateVnodeMsg(SVgObj *pVgroup, SRpcIpSet *ipSet, void *ahandle) { void mgmtSendCreateVgroupMsg(SVgObj *pVgroup, void *ahandle) { mTrace("vgroup:%d, send create all vnodes msg, ahandle:%p", pVgroup->vgId, ahandle); for (int32_t i = 0; i < pVgroup->numOfVnodes; ++i) { - SRpcIpSet ipSet = mgmtGetIpSetFromIp(pVgroup->vnodeGid[i].privateIp); + SRpcIpSet ipSet = mgmtGetIpSetFromIp(pVgroup->vnodeGid[i].pDnode->privateIp); mgmtSendCreateVnodeMsg(pVgroup, &ipSet, ahandle); } } @@ -646,7 +639,7 @@ void mgmtSendDropVnodeMsg(int32_t vgId, SRpcIpSet *ipSet, void *ahandle) { static void mgmtSendDropVgroupMsg(SVgObj *pVgroup, void *ahandle) { mTrace("vgroup:%d, send drop all vnodes msg, ahandle:%p", pVgroup->vgId, ahandle); for (int32_t i = 0; i < pVgroup->numOfVnodes; ++i) { - SRpcIpSet ipSet = mgmtGetIpSetFromIp(pVgroup->vnodeGid[i].privateIp); + SRpcIpSet ipSet = mgmtGetIpSetFromIp(pVgroup->vnodeGid[i].pDnode->privateIp); mgmtSendDropVnodeMsg(pVgroup->vgId, &ipSet, ahandle); } } @@ -697,7 +690,7 @@ static void mgmtProcessVnodeCfgMsg(SRpcMsg *rpcMsg) { mgmtSendSimpleResp(rpcMsg->handle, TSDB_CODE_NOT_ACTIVE_VNODE); return; } - mgmtReleaseDnode(pDnode); + mgmtDecDnodeRef(pDnode); SVgObj *pVgroup = mgmtGetVgroup(pCfg->vgId); if (pVgroup == NULL) { @@ -705,7 +698,7 @@ static void mgmtProcessVnodeCfgMsg(SRpcMsg *rpcMsg) { mgmtSendSimpleResp(rpcMsg->handle, TSDB_CODE_NOT_ACTIVE_VNODE); return; } - mgmtReleaseVgroup(pVgroup); + mgmtDecVgroupRef(pVgroup); mgmtSendSimpleResp(rpcMsg->handle, TSDB_CODE_SUCCESS); @@ -721,7 +714,7 @@ void mgmtDropAllVgroups(SDbObj *pDropDb) { SVgObj *pVgroup = NULL; while (1) { - mgmtReleaseVgroup(pVgroup); + mgmtDecVgroupRef(pVgroup); pNode = sdbFetchRow(tsVgroupSdb, pNode, (void **)&pVgroup); if (pVgroup == NULL) break; diff --git a/src/vnode/src/vnodeMain.c b/src/vnode/src/vnodeMain.c index 029d4c8c84..261be8e5f0 100644 --- a/src/vnode/src/vnodeMain.c +++ b/src/vnode/src/vnodeMain.c @@ -408,82 +408,82 @@ static int32_t vnodeReadCfg(SVnodeObj *pVnode) { if (len <= 0) { free(content); fclose(fp); - dError("pVnode:%p vgId:%d, failed to vnode cfg, content is null", pVnode, pVnode->vgId); + dError("pVnode:%p vgId:%d, failed to read vnode cfg, content is null", pVnode, pVnode->vgId); return false; } cJSON *root = cJSON_Parse(content); if (root == NULL) { - dError("pVnode:%p vgId:%d, failed to vnode cfg, invalid json format", pVnode, pVnode->vgId); + dError("pVnode:%p vgId:%d, failed to read vnode cfg, invalid json format", pVnode, pVnode->vgId); goto PARSE_OVER; } cJSON *precision = cJSON_GetObjectItem(root, "precision"); if (!precision || precision->type != cJSON_Number) { - dError("pVnode:%p vgId:%d, failed to vnode cfg, precision not found", pVnode, pVnode->vgId); + dError("pVnode:%p vgId:%d, failed to read vnode cfg, precision not found", pVnode, pVnode->vgId); goto PARSE_OVER; } pVnode->tsdbCfg.precision = (int8_t)precision->valueint; cJSON *compression = cJSON_GetObjectItem(root, "compression"); if (!compression || compression->type != cJSON_Number) { - dError("pVnode:%p vgId:%d, failed to vnode cfg, compression not found", pVnode, pVnode->vgId); + dError("pVnode:%p vgId:%d, failed to read vnode cfg, compression not found", pVnode, pVnode->vgId); goto PARSE_OVER; } pVnode->tsdbCfg.compression = (int8_t)compression->valueint; cJSON *maxTables = cJSON_GetObjectItem(root, "maxTables"); if (!maxTables || maxTables->type != cJSON_Number) { - dError("pVnode:%p vgId:%d, failed to vnode cfg, maxTables not found", pVnode, pVnode->vgId); + dError("pVnode:%p vgId:%d, failed to read vnode cfg, maxTables not found", pVnode, pVnode->vgId); goto PARSE_OVER; } pVnode->tsdbCfg.maxTables = maxTables->valueint; cJSON *daysPerFile = cJSON_GetObjectItem(root, "daysPerFile"); if (!daysPerFile || daysPerFile->type != cJSON_Number) { - dError("pVnode:%p vgId:%d, failed to vnode cfg, daysPerFile not found", pVnode, pVnode->vgId); + dError("pVnode:%p vgId:%d, failed to read vnode cfg, daysPerFile not found", pVnode, pVnode->vgId); goto PARSE_OVER; } pVnode->tsdbCfg.daysPerFile = daysPerFile->valueint; cJSON *minRowsPerFileBlock = cJSON_GetObjectItem(root, "minRowsPerFileBlock"); if (!minRowsPerFileBlock || minRowsPerFileBlock->type != cJSON_Number) { - dError("pVnode:%p vgId:%d, failed to vnode cfg, minRowsPerFileBlock not found", pVnode, pVnode->vgId); + dError("pVnode:%p vgId:%d, failed to read vnode cfg, minRowsPerFileBlock not found", pVnode, pVnode->vgId); goto PARSE_OVER; } pVnode->tsdbCfg.minRowsPerFileBlock = minRowsPerFileBlock->valueint; cJSON *maxRowsPerFileBlock = cJSON_GetObjectItem(root, "maxRowsPerFileBlock"); if (!maxRowsPerFileBlock || maxRowsPerFileBlock->type != cJSON_Number) { - dError("pVnode:%p vgId:%d, failed to vnode cfg, maxRowsPerFileBlock not found", pVnode, pVnode->vgId); + dError("pVnode:%p vgId:%d, failed to read vnode cfg, maxRowsPerFileBlock not found", pVnode, pVnode->vgId); goto PARSE_OVER; } pVnode->tsdbCfg.maxRowsPerFileBlock = maxRowsPerFileBlock->valueint; - cJSON *keep = cJSON_GetObjectItem(root, "keep"); - if (!keep || keep->type != cJSON_Number) { - dError("pVnode:%p vgId:%d, failed to vnode cfg, keep not found", pVnode, pVnode->vgId); + cJSON *daysToKeep = cJSON_GetObjectItem(root, "daysToKeep"); + if (!daysToKeep || daysToKeep->type != cJSON_Number) { + dError("pVnode:%p vgId:%d, failed to read vnode cfg, daysToKeep not found", pVnode, pVnode->vgId); goto PARSE_OVER; } - pVnode->tsdbCfg.keep = keep->valueint; + pVnode->tsdbCfg.keep = daysToKeep->valueint; cJSON *maxCacheSize = cJSON_GetObjectItem(root, "maxCacheSize"); if (!maxCacheSize || maxCacheSize->type != cJSON_Number) { - dError("pVnode:%p vgId:%d, failed to vnode cfg, maxCacheSize not found", pVnode, pVnode->vgId); + dError("pVnode:%p vgId:%d, failed to read vnode cfg, maxCacheSize not found", pVnode, pVnode->vgId); goto PARSE_OVER; } pVnode->tsdbCfg.maxCacheSize = maxCacheSize->valueint; cJSON *commitLog = cJSON_GetObjectItem(root, "commitLog"); if (!commitLog || commitLog->type != cJSON_Number) { - dError("pVnode:%p vgId:%d, failed to vnode cfg, commitLog not found", pVnode, pVnode->vgId); + dError("pVnode:%p vgId:%d, failed to read vnode cfg, commitLog not found", pVnode, pVnode->vgId); goto PARSE_OVER; } pVnode->walCfg.commitLog = (int8_t)commitLog->valueint; cJSON *wals = cJSON_GetObjectItem(root, "wals"); if (!wals || wals->type != cJSON_Number) { - dError("pVnode:%p vgId:%d, failed to vnode cfg, wals not found", pVnode, pVnode->vgId); + dError("pVnode:%p vgId:%d, failed to read vnode cfg, wals not found", pVnode, pVnode->vgId); goto PARSE_OVER; } pVnode->walCfg.wals = (int8_t)wals->valueint; @@ -491,34 +491,34 @@ static int32_t vnodeReadCfg(SVnodeObj *pVnode) { cJSON *arbitratorIp = cJSON_GetObjectItem(root, "arbitratorIp"); if (!arbitratorIp || arbitratorIp->type != cJSON_String || arbitratorIp->valuestring == NULL) { - dError("pVnode:%p vgId:%d, failed to vnode cfg, arbitratorIp not found", pVnode, pVnode->vgId); + dError("pVnode:%p vgId:%d, failed to read vnode cfg, arbitratorIp not found", pVnode, pVnode->vgId); goto PARSE_OVER; } pVnode->syncCfg.arbitratorIp = inet_addr(arbitratorIp->valuestring); cJSON *quorum = cJSON_GetObjectItem(root, "quorum"); if (!quorum || quorum->type != cJSON_Number) { - dError("failed to vnode cfg, quorum not found", pVnode, pVnode->vgId); + dError("failed to read vnode cfg, quorum not found", pVnode, pVnode->vgId); goto PARSE_OVER; } pVnode->syncCfg.quorum = (int8_t)quorum->valueint; cJSON *replica = cJSON_GetObjectItem(root, "replica"); if (!replica || replica->type != cJSON_Number) { - dError("pVnode:%p vgId:%d, failed to vnode cfg, replica not found", pVnode, pVnode->vgId); + dError("pVnode:%p vgId:%d, failed to read vnode cfg, replica not found", pVnode, pVnode->vgId); goto PARSE_OVER; } pVnode->syncCfg.replica = (int8_t)replica->valueint; cJSON *nodeInfos = cJSON_GetObjectItem(root, "nodeInfos"); if (!nodeInfos || nodeInfos->type != cJSON_Array) { - dError("pVnode:%p vgId:%d, failed to vnode cfg, nodeInfos not found", pVnode, pVnode->vgId); + dError("pVnode:%p vgId:%d, failed to read vnode cfg, nodeInfos not found", pVnode, pVnode->vgId); goto PARSE_OVER; } int size = cJSON_GetArraySize(nodeInfos); if (size != pVnode->syncCfg.replica) { - dError("pVnode:%p vgId:%d, failed to vnode cfg, nodeInfos size not matched", pVnode, pVnode->vgId); + dError("pVnode:%p vgId:%d, failed to read vnode cfg, nodeInfos size not matched", pVnode, pVnode->vgId); goto PARSE_OVER; } @@ -528,21 +528,21 @@ static int32_t vnodeReadCfg(SVnodeObj *pVnode) { cJSON *nodeId = cJSON_GetObjectItem(nodeInfo, "nodeId"); if (!nodeId || nodeId->type != cJSON_Number) { - dError("pVnode:%p vgId:%d, failed to vnode cfg, nodeId not found", pVnode, pVnode->vgId); + dError("pVnode:%p vgId:%d, failed to read vnode cfg, nodeId not found", pVnode, pVnode->vgId); goto PARSE_OVER; } pVnode->syncCfg.nodeInfo[i].nodeId = nodeId->valueint; cJSON *nodeIp = cJSON_GetObjectItem(nodeInfo, "nodeIp"); if (!nodeIp || nodeIp->type != cJSON_String || nodeIp->valuestring == NULL) { - dError("pVnode:%p vgId:%d, failed to vnode cfg, nodeIp not found", pVnode, pVnode->vgId); + dError("pVnode:%p vgId:%d, failed to read vnode cfg, nodeIp not found", pVnode, pVnode->vgId); goto PARSE_OVER; } pVnode->syncCfg.nodeInfo[i].nodeIp = inet_addr(nodeIp->valuestring); cJSON *nodeName = cJSON_GetObjectItem(nodeInfo, "nodeName"); if (!nodeName || nodeName->type != cJSON_String || nodeName->valuestring == NULL) { - dError("pVnode:%p vgId:%d, failed to vnode cfg, nodeName not found", pVnode, pVnode->vgId); + dError("pVnode:%p vgId:%d, failed to read vnode cfg, nodeName not found", pVnode, pVnode->vgId); goto PARSE_OVER; } strncpy(pVnode->syncCfg.nodeInfo[i].name, nodeName->valuestring, TSDB_NODE_NAME_LEN); From 69a559164a9011de6b16b560bdd99bb9755dbc0d Mon Sep 17 00:00:00 2001 From: slguan Date: Wed, 22 Apr 2020 10:59:27 +0800 Subject: [PATCH 11/18] add dnode offline timer --- src/mnode/inc/mgmtDnode.h | 2 ++ src/mnode/src/mgmtDnode.c | 2 ++ src/mnode/src/mgmtMain.c | 2 +- src/vnode/src/vnodeMain.c | 76 +++++++++++++++++++++++++++++++++++++++ 4 files changed, 81 insertions(+), 1 deletion(-) diff --git a/src/mnode/inc/mgmtDnode.h b/src/mnode/inc/mgmtDnode.h index c0e04aae05..8acd4e9117 100644 --- a/src/mnode/inc/mgmtDnode.h +++ b/src/mnode/inc/mgmtDnode.h @@ -42,6 +42,8 @@ void * mgmtGetDnodeByIp(uint32_t ip); void mgmtUpdateDnode(SDnodeObj *pDnode); int32_t mgmtDropDnode(SDnodeObj *pDnode); +extern int32_t tsAccessSquence; + #ifdef __cplusplus } #endif diff --git a/src/mnode/src/mgmtDnode.c b/src/mnode/src/mgmtDnode.c index 4db7415684..4f959605c0 100644 --- a/src/mnode/src/mgmtDnode.c +++ b/src/mnode/src/mgmtDnode.c @@ -38,6 +38,7 @@ void *tsDnodeSdb = NULL; int32_t tsDnodeUpdateSize = 0; +int32_t tsAccessSquence = 0; extern void * tsVgroupSdb; static int32_t mgmtCreateDnode(uint32_t ip); @@ -323,6 +324,7 @@ void mgmtProcessDnodeStatusMsg(SRpcMsg *rpcMsg) { pDnode->alternativeRole = pStatus->alternativeRole; pDnode->totalVnodes = pStatus->numOfTotalVnodes; pDnode->moduleStatus = pStatus->moduleStatus; + pDnode->lastAccess = tsAccessSquence; if (pStatus->dnodeId == 0) { mTrace("dnode:%d, first access, privateIp:%s, name:%s", pDnode->dnodeId, taosIpStr(pDnode->privateIp), pDnode->dnodeName); diff --git a/src/mnode/src/mgmtMain.c b/src/mnode/src/mgmtMain.c index ac41e9c120..e01b1e7756 100644 --- a/src/mnode/src/mgmtMain.c +++ b/src/mnode/src/mgmtMain.c @@ -149,12 +149,12 @@ void mgmtCleanUpSystem() { mgmtCleanUpShell(); mgmtCleanupDClient(); mgmtCleanupDServer(); + mgmtCleanUpAccts(); mgmtCleanUpTables(); mgmtCleanUpVgroups(); mgmtCleanUpDbs(); mgmtCleanupDnodes(); mgmtCleanUpUsers(); - mgmtCleanUpAccts(); sdbCleanUp(); taosTmrCleanUp(tsMgmtTmr); tsMgmtIsRunning = false; diff --git a/src/vnode/src/vnodeMain.c b/src/vnode/src/vnodeMain.c index 261be8e5f0..5fd337ceca 100644 --- a/src/vnode/src/vnodeMain.c +++ b/src/vnode/src/vnodeMain.c @@ -38,6 +38,8 @@ static void vnodeBuildVloadMsg(char *pNode, void * param); static int vnodeWalCallback(void *arg); static int32_t vnodeSaveCfg(SMDCreateVnodeMsg *pVnodeCfg); static int32_t vnodeReadCfg(SVnodeObj *pVnode); +static int32_t vnodeSaveVersion(SVnodeObj *pVnode); +static int32_t vnodeReadVersion(SVnodeObj *pVnode); static int vnodeWalCallback(void *arg); static uint32_t vnodeGetFileInfo(void *ahandle, char *name, uint32_t *index, int32_t *size); static int vnodeGetWalInfo(void *ahandle, char *name, uint32_t *index); @@ -151,6 +153,8 @@ int32_t vnodeOpen(int32_t vnode, char *rootDir) { return code; } + vnodeReadVersion(pVnode); + pVnode->wqueue = dnodeAllocateWqueue(pVnode); pVnode->rqueue = dnodeAllocateRqueue(pVnode); @@ -303,6 +307,7 @@ static void vnodeCleanUp(SVnodeObj *pVnode) { //syncStop(pVnode->sync); tsdbCloseRepo(pVnode->tsdb); walClose(pVnode->wal); + vnodeSaveVersion(pVnode); vnodeRelease(pVnode); } @@ -562,3 +567,74 @@ PARSE_OVER: fclose(fp); return ret; } + + +static int32_t vnodeSaveVersion(SVnodeObj *pVnode) { + char versionFile[TSDB_FILENAME_LEN + 30] = {0}; + sprintf(versionFile, "%s/vnode%d/version.json", tsVnodeDir, pVnode->vgId); + FILE *fp = fopen(versionFile, "w"); + if (!fp) { + dError("pVnode:%p vgId:%d, failed to open vnode version file for write, error:%s", pVnode, pVnode->vgId); + return errno; + } + + int32_t len = 0; + int32_t maxLen = 30; + char * content = calloc(1, maxLen + 1); + + len += snprintf(content + len, maxLen - len, "{\n"); + len += snprintf(content + len, maxLen - len, " \"version\": %" PRId64 "\n", pVnode->version); + len += snprintf(content + len, maxLen - len, "}\n"); + + fwrite(content, 1, len, fp); + fclose(fp); + free(content); + + dPrint("pVnode:%p vgId:%d, save vnode version successed", pVnode, pVnode->vgId); + + return 0; +} + +static int32_t vnodeReadVersion(SVnodeObj *pVnode) { + char versionFile[TSDB_FILENAME_LEN + 30] = {0}; + sprintf(versionFile, "%s/vnode%d/version.json", tsVnodeDir, pVnode->vgId); + FILE *fp = fopen(versionFile, "w"); + if (!fp) { + dError("pVnode:%p vgId:%d, failed to open vnode version file for write, error:%s", pVnode, pVnode->vgId); + return errno; + } + + int ret = TSDB_CODE_OTHERS; + int maxLen = 100; + char *content = calloc(1, maxLen + 1); + int len = fread(content, 1, maxLen, fp); + if (len <= 0) { + free(content); + fclose(fp); + dError("pVnode:%p vgId:%d, failed to read vnode version, content is null", pVnode, pVnode->vgId); + return false; + } + + cJSON *root = cJSON_Parse(content); + if (root == NULL) { + dError("pVnode:%p vgId:%d, failed to read vnode version, invalid json format", pVnode, pVnode->vgId); + goto PARSE_OVER; + } + + cJSON *version = cJSON_GetObjectItem(root, "version"); + if (!version || version->type != cJSON_Number) { + dError("pVnode:%p vgId:%d, failed to read vnode version, version not found", pVnode, pVnode->vgId); + goto PARSE_OVER; + } + pVnode->version = version->valueint; + + ret = 0; + + dPrint("pVnode:%p vgId:%d, read vnode version successed, version:%%" PRId64, pVnode, pVnode->vgId, pVnode->version); + +PARSE_OVER: + free(content); + cJSON_Delete(root); + fclose(fp); + return ret; +} \ No newline at end of file From ac19d3726e2dd6b9ff15e4660d3a76d4c7e623f2 Mon Sep 17 00:00:00 2001 From: slguan Date: Wed, 22 Apr 2020 12:14:50 +0800 Subject: [PATCH 12/18] fix add mnode error --- src/mnode/src/mgmtDnode.c | 9 ++++++++- src/mnode/src/mgmtMnode.c | 2 +- src/plugins/monitor/src/monitorSystem.c | 1 + 3 files changed, 10 insertions(+), 2 deletions(-) diff --git a/src/mnode/src/mgmtDnode.c b/src/mnode/src/mgmtDnode.c index 4f959605c0..bf58adf594 100644 --- a/src/mnode/src/mgmtDnode.c +++ b/src/mnode/src/mgmtDnode.c @@ -39,6 +39,7 @@ void *tsDnodeSdb = NULL; int32_t tsDnodeUpdateSize = 0; int32_t tsAccessSquence = 0; +extern void * tsMnodeSdb; extern void * tsVgroupSdb; static int32_t mgmtCreateDnode(uint32_t ip); @@ -101,7 +102,13 @@ static int32_t mgmtDnodeActionDelete(SSdbOper *pOper) { } } - mgmtDropMnode(pDnode->dnodeId); + SMnodeObj *pMnode = mgmtGetMnode(pDnode->dnodeId); + if (pMnode != NULL) { + SSdbOper oper = {.type = SDB_OPER_LOCAL, .table = tsMnodeSdb, .pObj = pMnode}; + sdbDeleteRow(&oper); + mgmtReleaseMnode(pMnode); + } + balanceNotify(); mTrace("dnode:%d, all vgroups:%d is dropped from sdb", pDnode->dnodeId, numOfVgroups); diff --git a/src/mnode/src/mgmtMnode.c b/src/mnode/src/mgmtMnode.c index 3eae218660..922788749e 100644 --- a/src/mnode/src/mgmtMnode.c +++ b/src/mnode/src/mgmtMnode.c @@ -30,7 +30,7 @@ #include "mgmtShell.h" #include "mgmtUser.h" -static void * tsMnodeSdb = NULL; +void * tsMnodeSdb = NULL; static int32_t tsMnodeUpdateSize = 0; static int32_t mgmtGetMnodeMeta(STableMetaMsg *pMeta, SShowObj *pShow, void *pConn); static int32_t mgmtRetrieveMnodes(SShowObj *pShow, char *data, int32_t rows, void *pConn); diff --git a/src/plugins/monitor/src/monitorSystem.c b/src/plugins/monitor/src/monitorSystem.c index 1811733790..776fdca986 100644 --- a/src/plugins/monitor/src/monitorSystem.c +++ b/src/plugins/monitor/src/monitorSystem.c @@ -397,6 +397,7 @@ void monitorSaveAcctLog(char *acctId, int64_t currentPointsPerSecond, int64_t ma int64_t totalOutbound, int64_t maxOutbound, int64_t totalDbs, int64_t maxDbs, int64_t totalUsers, int64_t maxUsers, int64_t totalStreams, int64_t maxStreams, int64_t totalConns, int64_t maxConns, int8_t accessState) { + if (monitor == NULL) return; if (monitor->state != MONITOR_STATE_INITIALIZED) return; char sql[1024] = {0}; From 1260d4097673a56d624e0b2499e7a84d08aa2701 Mon Sep 17 00:00:00 2001 From: slguan Date: Wed, 22 Apr 2020 12:17:32 +0800 Subject: [PATCH 13/18] script --- tests/script/unique/dnode/balance1.sim | 112 ++++++++++---------- tests/script/unique/dnode/balance2.sim | 110 +++++++++---------- tests/script/unique/dnode/balance3.sim | 122 +++++++++++----------- tests/script/unique/dnode/balancex.sim | 52 ++++----- tests/script/unique/dnode/monitor_bug.sim | 16 +-- tests/script/unique/dnode/offline1.sim | 6 +- tests/script/unique/dnode/offline2.sim | 8 +- tests/script/unique/dnode/remove1.sim | 50 ++++----- tests/script/unique/dnode/remove2.sim | 36 +++---- tests/script/unique/dnode/vnode_clean.sim | 92 ++++++++-------- 10 files changed, 302 insertions(+), 302 deletions(-) diff --git a/tests/script/unique/dnode/balance1.sim b/tests/script/unique/dnode/balance1.sim index b7da8cb9ca..413d4d74b3 100644 --- a/tests/script/unique/dnode/balance1.sim +++ b/tests/script/unique/dnode/balance1.sim @@ -36,7 +36,7 @@ system sh/cfg.sh -n dnode3 -c clog -v 1 system sh/cfg.sh -n dnode4 -c clog -v 1 print ========== step1 -system sh/exec.sh -n dnode1 -s start +system sh/exec_up.sh -n dnode1 -s start sql connect sleep 3000 @@ -49,15 +49,15 @@ sql insert into d1.t1 values(now+4s, 12) sql insert into d1.t1 values(now+5s, 11) sql show dnodes -print 192.168.0.1 freeVnodes $data3_192.168.0.1 -if $data3_192.168.0.1 != 3 then +print 192.168.0.1 openVnodes $data3_1 +if $data3_1 != 1 then return -1 endi print ========== step2 sleep 2000 sql create dnode 192.168.0.2 -system sh/exec.sh -n dnode2 -s start +system sh/exec_up.sh -n dnode2 -s start $x = 0 show2: @@ -68,12 +68,12 @@ show2: endi sql show dnodes -print 192.168.0.1 freeVnodes $data3_192.168.0.1 -print 192.168.0.2 freeVnodes $data3_192.168.0.2 -if $data3_192.168.0.1 != 4 then +print 192.168.0.1 openVnodes $data3_1 +print 192.168.0.2 openVnodes $data3_2 +if $data3_1 != 0 then goto show2 endi -if $data3_192.168.0.2 != 3 then +if $data3_2 != 1 then goto show2 endi @@ -87,12 +87,12 @@ sql insert into d2.t2 values(now+4s, 22) sql insert into d2.t2 values(now+5s, 21) sql show dnodes -print 192.168.0.1 freeVnodes $data3_192.168.0.1 -print 192.168.0.2 freeVnodes $data3_192.168.0.2 -if $data3_192.168.0.1 != 4 then +print 192.168.0.1 openVnodes $data3_1 +print 192.168.0.2 openVnodes $data3_2 +if $data3_1 != 0 then return -1 endi -if $data3_192.168.0.2 != 2 then +if $data3_2 != 2 then return -1 endi @@ -108,23 +108,23 @@ show4: endi sql show dnodes -print 192.168.0.1 freeVnodes $data3_192.168.0.1 -print 192.168.0.2 freeVnodes $data3_192.168.0.2 -if $data3_192.168.0.1 != 2 then +print 192.168.0.1 openVnodes $data3_1 +print 192.168.0.2 openVnodes $data3_2 +if $data3_1 != 2 then goto show4 endi -if $data3_192.168.0.2 != null then +if $data3_2 != null then goto show4 endi if $rows != 1 then goto show4 endi -system sh/exec.sh -n dnode2 -s stop -x SIGINT +system sh/exec_up.sh -n dnode2 -s stop -x SIGINT print ========== step5 sql create dnode 192.168.0.3 -system sh/exec.sh -n dnode3 -s start +system sh/exec_up.sh -n dnode3 -s start $x = 0 show5: @@ -135,16 +135,16 @@ show5: endi sql show dnodes -print 192.168.0.1 freeVnodes $data3_192.168.0.1 -print 192.168.0.2 freeVnodes $data3_192.168.0.2 -print 192.168.0.3 freeVnodes $data3_192.168.0.3 -if $data3_192.168.0.1 != 4 then +print 192.168.0.1 openVnodes $data3_1 +print 192.168.0.2 openVnodes $data3_2 +print 192.168.0.3 openVnodes $data3_3 +if $data3_1 != 0 then goto show5 endi -if $data3_192.168.0.2 != null then +if $data3_2 != null then goto show5 endi -if $data3_192.168.0.3 != 2 then +if $data3_3 != 2 then goto show5 endi @@ -158,23 +158,23 @@ sql insert into d3.t3 values(now+4s, 32) sql insert into d3.t3 values(now+5s, 31) sql show dnodes -print 192.168.0.1 freeVnodes $data3_192.168.0.1 -print 192.168.0.2 freeVnodes $data3_192.168.0.2 -print 192.168.0.3 freeVnodes $data3_192.168.0.3 +print 192.168.0.1 openVnodes $data3_1 +print 192.168.0.2 openVnodes $data3_2 +print 192.168.0.3 openVnodes $data3_3 -if $data3_192.168.0.1 != 4 then +if $data3_1 != 0 then return -1 endi -if $data3_192.168.0.2 != null then +if $data3_2 != null then return -1 endi -if $data3_192.168.0.3 != 1 then +if $data3_3 != 3 then return -1 endi print ========== step7 sql create dnode 192.168.0.4 -system sh/exec.sh -n dnode4 -s start +system sh/exec_up.sh -n dnode4 -s start $x = 0 show7: @@ -185,20 +185,20 @@ show7: endi sql show dnodes -print 192.168.0.1 freeVnodes $data3_192.168.0.1 -print 192.168.0.2 freeVnodes $data3_192.168.0.2 -print 192.168.0.3 freeVnodes $data3_192.168.0.3 -print 192.168.0.4 freeVnodes $data3_192.168.0.4 -if $data3_192.168.0.1 != 4 then +print 192.168.0.1 openVnodes $data3_1 +print 192.168.0.2 openVnodes $data3_2 +print 192.168.0.3 openVnodes $data3_3 +print 192.168.0.4 openVnodes $data3_4 +if $data3_1 != 0 then goto show7 endi -if $data3_192.168.0.2 != null then +if $data3_2 != null then goto show7 endi -if $data3_192.168.0.3 != 2 then +if $data3_3 != 2 then goto show7 endi -if $data3_192.168.0.4 != 3 then +if $data3_4 != 1 then goto show7 endi @@ -212,21 +212,21 @@ sql insert into d4.t4 values(now+4s, 42) sql insert into d4.t4 values(now+5s, 41) sql show dnodes -print 192.168.0.1 freeVnodes $data3_192.168.0.1 -print 192.168.0.2 freeVnodes $data3_192.168.0.2 -print 192.168.0.3 freeVnodes $data3_192.168.0.3 -print 192.168.0.4 freeVnodes $data3_192.168.0.4 +print 192.168.0.1 openVnodes $data3_1 +print 192.168.0.2 openVnodes $data3_2 +print 192.168.0.3 openVnodes $data3_3 +print 192.168.0.4 openVnodes $data3_4 -if $data3_192.168.0.1 != 4 then +if $data3_1 != 0 then return -1 endi -if $data3_192.168.0.2 != null then +if $data3_2 != null then return -1 endi -if $data3_192.168.0.3 != 2 then +if $data3_3 != 2 then return -1 endi -if $data3_192.168.0.4 != 2 then +if $data3_4 != 2 then return -1 endi @@ -242,25 +242,25 @@ show9: endi sql show dnodes -print 192.168.0.1 freeVnodes $data3_192.168.0.1 -print 192.168.0.2 freeVnodes $data3_192.168.0.2 -print 192.168.0.3 freeVnodes $data3_192.168.0.3 -print 192.168.0.4 freeVnodes $data3_192.168.0.4 +print 192.168.0.1 openVnodes $data3_1 +print 192.168.0.2 openVnodes $data3_2 +print 192.168.0.3 openVnodes $data3_3 +print 192.168.0.4 openVnodes $data3_4 -if $data3_192.168.0.1 != 4 then +if $data3_1 != 0 then goto show9 endi -if $data3_192.168.0.2 != null then +if $data3_2 != null then goto show9 endi -if $data3_192.168.0.3 != null then +if $data3_3 != null then goto show9 endi -if $data3_192.168.0.4 != 0 then +if $data3_4 != 4 then goto show9 endi -system sh/exec.sh -n dnode3 -s stop -x SIGINT +system sh/exec_up.sh -n dnode3 -s stop -x SIGINT print ========== step10 sql select * from d1.t1 order by t desc diff --git a/tests/script/unique/dnode/balance2.sim b/tests/script/unique/dnode/balance2.sim index afe82f8cdd..5e887d54f5 100644 --- a/tests/script/unique/dnode/balance2.sim +++ b/tests/script/unique/dnode/balance2.sim @@ -37,13 +37,13 @@ system sh/cfg.sh -n dnode4 -c clog -v 1 system sh/cfg.sh -n dnode5 -c clog -v 1 print ========== step1 -system sh/exec.sh -n dnode1 -s start +system sh/exec_up.sh -n dnode1 -s start sql connect sql create dnode 192.168.0.2 sql create dnode 192.168.0.3 -system sh/exec.sh -n dnode2 -s start -system sh/exec.sh -n dnode3 -s start +system sh/exec_up.sh -n dnode2 -s start +system sh/exec_up.sh -n dnode3 -s start sleep 3000 sql create database d1 replica 2 tables 4 @@ -63,16 +63,16 @@ sql insert into d2.t2 values(now+4s, 22) sql insert into d2.t2 values(now+5s, 21) sql show dnodes -print 192.168.0.1 freeVnodes $data3_192.168.0.1 -print 192.168.0.2 freeVnodes $data3_192.168.0.2 -print 192.168.0.3 freeVnodes $data3_192.168.0.3 -if $data3_192.168.0.1 != 4 then +print 192.168.0.1 openVnodes $data3_1 +print 192.168.0.2 openVnodes $data3_2 +print 192.168.0.3 openVnodes $data3_3 +if $data3_1 != 4 then return -1 endi -if $data3_192.168.0.2 != 2 then +if $data3_2 != 2 then return -1 endi -if $data3_192.168.0.3 != 2 then +if $data3_3 != 2 then return -1 endi @@ -88,24 +88,24 @@ show2: endi sql show dnodes -print 192.168.0.1 freeVnodes $data3_192.168.0.1 -print 192.168.0.2 freeVnodes $data3_192.168.0.2 -print 192.168.0.3 freeVnodes $data3_192.168.0.3 -if $data3_192.168.0.1 != 2 then +print 192.168.0.1 openVnodes $data3_1 +print 192.168.0.2 openVnodes $data3_2 +print 192.168.0.3 openVnodes $data3_3 +if $data3_1 != 2 then goto show2 endi -if $data3_192.168.0.2 != null then +if $data3_2 != null then goto show2 endi -if $data3_192.168.0.3 != 2 then +if $data3_3 != 2 then goto show2 endi -system sh/exec.sh -n dnode2 -s stop -x SIGINT +system sh/exec_up.sh -n dnode2 -s stop -x SIGINT print ========== step3 sql create dnode 192.168.0.4 -system sh/exec.sh -n dnode4 -s start +system sh/exec_up.sh -n dnode4 -s start $x = 0 show3: @@ -116,20 +116,20 @@ show3: endi sql show dnodes -print 192.168.0.1 freeVnodes $data3_192.168.0.1 -print 192.168.0.2 freeVnodes $data3_192.168.0.2 -print 192.168.0.3 freeVnodes $data3_192.168.0.3 -print 192.168.0.4 freeVnodes $data3_192.168.0.4 -if $data3_192.168.0.1 != 4 then +print 192.168.0.1 openVnodes $data3_1 +print 192.168.0.2 openVnodes $data3_2 +print 192.168.0.3 openVnodes $data3_3 +print 192.168.0.4 openVnodes $data3_4 +if $data3_1 != 4 then goto show3 endi -if $data3_192.168.0.2 != null then +if $data3_2 != null then goto show3 endi -if $data3_192.168.0.3 != 2 then +if $data3_3 != 2 then goto show3 endi -if $data3_192.168.0.4 != 2 then +if $data3_4 != 2 then goto show3 endi @@ -143,26 +143,26 @@ sql insert into d3.t3 values(now+4s, 32) sql insert into d3.t3 values(now+5s, 31) sql show dnodes -print 192.168.0.1 freeVnodes $data3_192.168.0.1 -print 192.168.0.2 freeVnodes $data3_192.168.0.2 -print 192.168.0.3 freeVnodes $data3_192.168.0.3 -print 192.168.0.4 freeVnodes $data3_192.168.0.4 -if $data3_192.168.0.1 != 4 then +print 192.168.0.1 openVnodes $data3_1 +print 192.168.0.2 openVnodes $data3_2 +print 192.168.0.3 openVnodes $data3_3 +print 192.168.0.4 openVnodes $data3_4 +if $data3_1 != 4 then return -1 endi -if $data3_192.168.0.2 != null then +if $data3_2 != null then return -1 endi -if $data3_192.168.0.3 != 1 then +if $data3_3 != 1 then return -1 endi -if $data3_192.168.0.4 != 1 then +if $data3_4 != 1 then return -1 endi print ========== step5 sql create dnode 192.168.0.5 -system sh/exec.sh -n dnode5 -s start +system sh/exec_up.sh -n dnode5 -s start $x = 0 show5: @@ -173,24 +173,24 @@ show5: endi sql show dnodes -print 192.168.0.1 freeVnodes $data3_192.168.0.1 -print 192.168.0.2 freeVnodes $data3_192.168.0.2 -print 192.168.0.3 freeVnodes $data3_192.168.0.3 -print 192.168.0.4 freeVnodes $data3_192.168.0.4 -print 192.168.0.5 freeVnodes $data3_192.168.0.5 -if $data3_192.168.0.1 != 4 then +print 192.168.0.1 openVnodes $data3_1 +print 192.168.0.2 openVnodes $data3_2 +print 192.168.0.3 openVnodes $data3_3 +print 192.168.0.4 openVnodes $data3_4 +print 192.168.0.5 openVnodes $data3_5 +if $data3_1 != 4 then goto show5 endi -if $data3_192.168.0.2 != null then +if $data3_2 != null then goto show5 endi -if $data3_192.168.0.3 != 2 then +if $data3_3 != 2 then goto show5 endi -if $data3_192.168.0.4 != 2 then +if $data3_4 != 2 then goto show5 endi -if $data3_192.168.0.5 != 2 then +if $data3_5 != 2 then goto show5 endi @@ -206,28 +206,28 @@ show6: endi sql show dnodes -print 192.168.0.1 freeVnodes $data3_192.168.0.1 -print 192.168.0.2 freeVnodes $data3_192.168.0.2 -print 192.168.0.3 freeVnodes $data3_192.168.0.3 -print 192.168.0.4 freeVnodes $data3_192.168.0.4 -print 192.168.0.5 freeVnodes $data3_192.168.0.5 -if $data3_192.168.0.1 != 4 then +print 192.168.0.1 openVnodes $data3_1 +print 192.168.0.2 openVnodes $data3_2 +print 192.168.0.3 openVnodes $data3_3 +print 192.168.0.4 openVnodes $data3_4 +print 192.168.0.5 openVnodes $data3_5 +if $data3_1 != 4 then goto show6 endi -if $data3_192.168.0.2 != null then +if $data3_2 != null then goto show6 endi -if $data3_192.168.0.3 != null then +if $data3_3 != null then goto show6 endi -if $data3_192.168.0.4 != 1 then +if $data3_4 != 1 then goto show6 endi -if $data3_192.168.0.5 != 1 then +if $data3_5 != 1 then goto show6 endi -system sh/exec.sh -n dnode3 -s stop -x SIGINT +system sh/exec_up.sh -n dnode3 -s stop -x SIGINT print ========== step7 sql select * from d1.t1 order by t desc diff --git a/tests/script/unique/dnode/balance3.sim b/tests/script/unique/dnode/balance3.sim index fc46736ef8..7baa6444ee 100644 --- a/tests/script/unique/dnode/balance3.sim +++ b/tests/script/unique/dnode/balance3.sim @@ -43,15 +43,15 @@ system sh/cfg.sh -n dnode5 -c clog -v 1 system sh/cfg.sh -n dnode6 -c clog -v 1 print ========== step1 -system sh/exec.sh -n dnode1 -s start +system sh/exec_up.sh -n dnode1 -s start sql connect sql create dnode 192.168.0.2 sql create dnode 192.168.0.3 sql create dnode 192.168.0.4 -system sh/exec.sh -n dnode2 -s start -system sh/exec.sh -n dnode3 -s start -system sh/exec.sh -n dnode4 -s start +system sh/exec_up.sh -n dnode2 -s start +system sh/exec_up.sh -n dnode3 -s start +system sh/exec_up.sh -n dnode4 -s start sleep 3000 sql create database d1 replica 3 tables 4 @@ -71,21 +71,21 @@ sql insert into d2.t2 values(now+4s, 22) sql insert into d2.t2 values(now+5s, 21) sql show dnodes -print 192.168.0.1 freeVnodes $data3_192.168.0.1 -print 192.168.0.2 freeVnodes $data3_192.168.0.2 -print 192.168.0.3 freeVnodes $data3_192.168.0.3 -print 192.168.0.4 freeVnodes $data3_192.168.0.4 +print 192.168.0.1 openVnodes $data3_1 +print 192.168.0.2 openVnodes $data3_2 +print 192.168.0.3 openVnodes $data3_3 +print 192.168.0.4 openVnodes $data3_4 -if $data3_192.168.0.1 != 4 then +if $data3_1 != 4 then return -1 endi -if $data3_192.168.0.2 != 2 then +if $data3_2 != 2 then return -1 endi -if $data3_192.168.0.3 != 2 then +if $data3_3 != 2 then return -1 endi -if $data3_192.168.0.4 != 2 then +if $data3_4 != 2 then return -1 endi @@ -101,29 +101,29 @@ show2: endi sql show dnodes -print 192.168.0.1 freeVnodes $data3_192.168.0.1 -print 192.168.0.2 freeVnodes $data3_192.168.0.2 -print 192.168.0.3 freeVnodes $data3_192.168.0.3 -print 192.168.0.4 freeVnodes $data3_192.168.0.4 +print 192.168.0.1 openVnodes $data3_1 +print 192.168.0.2 openVnodes $data3_2 +print 192.168.0.3 openVnodes $data3_3 +print 192.168.0.4 openVnodes $data3_4 -if $data3_192.168.0.1 != 2 then +if $data3_1 != 2 then goto show2 endi -if $data3_192.168.0.2 != null then +if $data3_2 != null then goto show2 endi -if $data3_192.168.0.3 != 2 then +if $data3_3 != 2 then goto show2 endi -if $data3_192.168.0.4 != 2 then +if $data3_4 != 2 then goto show2 endi -system sh/exec.sh -n dnode2 -s stop -x SIGINT +system sh/exec_up.sh -n dnode2 -s stop -x SIGINT print ========== step sql create dnode 192.168.0.5 -system sh/exec.sh -n dnode5 -s start +system sh/exec_up.sh -n dnode5 -s start $x = 0 show3: @@ -134,25 +134,25 @@ show3: endi sql show dnodes -print 192.168.0.1 freeVnodes $data3_192.168.0.1 -print 192.168.0.2 freeVnodes $data3_192.168.0.2 -print 192.168.0.3 freeVnodes $data3_192.168.0.3 -print 192.168.0.4 freeVnodes $data3_192.168.0.4 -print 192.168.0.5 freeVnodes $data3_192.168.0.5 +print 192.168.0.1 openVnodes $data3_1 +print 192.168.0.2 openVnodes $data3_2 +print 192.168.0.3 openVnodes $data3_3 +print 192.168.0.4 openVnodes $data3_4 +print 192.168.0.5 openVnodes $data3_5 -if $data3_192.168.0.1 != 4 then +if $data3_1 != 4 then goto show3 endi -if $data3_192.168.0.2 != null then +if $data3_2 != null then goto show3 endi -if $data3_192.168.0.3 != 2 then +if $data3_3 != 2 then goto show3 endi -if $data3_192.168.0.4 != 2 then +if $data3_4 != 2 then goto show3 endi -if $data3_192.168.0.5 != 2 then +if $data3_5 != 2 then goto show3 endi @@ -174,31 +174,31 @@ show4: endi sql show dnodes -print 192.168.0.1 freeVnodes $data3_192.168.0.1 -print 192.168.0.2 freeVnodes $data3_192.168.0.2 -print 192.168.0.3 freeVnodes $data3_192.168.0.3 -print 192.168.0.4 freeVnodes $data3_192.168.0.4 -print 192.168.0.5 freeVnodes $data3_192.168.0.5 +print 192.168.0.1 openVnodes $data3_1 +print 192.168.0.2 openVnodes $data3_2 +print 192.168.0.3 openVnodes $data3_3 +print 192.168.0.4 openVnodes $data3_4 +print 192.168.0.5 openVnodes $data3_5 -if $data3_192.168.0.1 != 4 then +if $data3_1 != 4 then goto show4 endi -if $data3_192.168.0.2 != null then +if $data3_2 != null then goto show4 endi -if $data3_192.168.0.3 != 1 then +if $data3_3 != 1 then goto show4 endi -if $data3_192.168.0.4 != 1 then +if $data3_4 != 1 then goto show4 endi -if $data3_192.168.0.5 != 1 then +if $data3_5 != 1 then goto show4 endi print ========== step5 sql create dnode 192.168.0.2 -system sh/exec.sh -n dnode2 -s start +system sh/exec_up.sh -n dnode2 -s start $x = 0 show5: @@ -209,16 +209,16 @@ show5: endi sql show dnodes -print 192.168.0.1 freeVnodes $data3_192.168.0.1 -print 192.168.0.2 freeVnodes $data3_192.168.0.2 -print 192.168.0.3 freeVnodes $data3_192.168.0.3 -print 192.168.0.4 freeVnodes $data3_192.168.0.4 -print 192.168.0.5 freeVnodes $data3_192.168.0.5 +print 192.168.0.1 openVnodes $data3_1 +print 192.168.0.2 openVnodes $data3_2 +print 192.168.0.3 openVnodes $data3_3 +print 192.168.0.4 openVnodes $data3_4 +print 192.168.0.5 openVnodes $data3_5 -if $data3_192.168.0.1 != 4 then +if $data3_1 != 4 then goto show5 endi -if $data3_192.168.0.2 != 2 then +if $data3_2 != 2 then goto show5 endi @@ -236,29 +236,29 @@ show6: endi sql show dnodes -print 192.168.0.1 freeVnodes $data3_192.168.0.1 -print 192.168.0.2 freeVnodes $data3_192.168.0.2 -print 192.168.0.3 freeVnodes $data3_192.168.0.3 -print 192.168.0.4 freeVnodes $data3_192.168.0.4 -print 192.168.0.5 freeVnodes $data3_192.168.0.5 +print 192.168.0.1 openVnodes $data3_1 +print 192.168.0.2 openVnodes $data3_2 +print 192.168.0.3 openVnodes $data3_3 +print 192.168.0.4 openVnodes $data3_4 +print 192.168.0.5 openVnodes $data3_5 -if $data3_192.168.0.1 != 4 then +if $data3_1 != 4 then goto show6 endi -if $data3_192.168.0.2 != 1 then +if $data3_2 != 1 then goto show6 endi -if $data3_192.168.0.3 != null then +if $data3_3 != null then goto show6 endi -if $data3_192.168.0.4 != 1 then +if $data3_4 != 1 then goto show6 endi -if $data3_192.168.0.5 != 1 then +if $data3_5 != 1 then goto show6 endi -system sh/exec.sh -n dnode3 -s stop -x SIGINT +system sh/exec_up.sh -n dnode3 -s stop -x SIGINT print ========== step7 sql select * from d1.t1 order by t desc diff --git a/tests/script/unique/dnode/balancex.sim b/tests/script/unique/dnode/balancex.sim index 9e90ebf5d4..9153224a22 100644 --- a/tests/script/unique/dnode/balancex.sim +++ b/tests/script/unique/dnode/balancex.sim @@ -31,7 +31,7 @@ system sh/cfg.sh -n dnode3 -c clog -v 1 system sh/cfg.sh -n dnode4 -c clog -v 1 print ========== step1 -system sh/exec.sh -n dnode1 -s start +system sh/exec_up.sh -n dnode1 -s start sql connect sleep 3000 @@ -52,14 +52,14 @@ sql insert into d2.t2 values(now+4s, 22) sql insert into d2.t2 values(now+5s, 21) sql show dnodes -print 192.168.0.1 freeVnodes $data3_192.168.0.1 -if $data3_192.168.0.1 != 2 then +print 192.168.0.1 openVnodes $data3_1 +if $data3_1 != 2 then return -1 endi print ========== step2 sql create dnode 192.168.0.2 -system sh/exec.sh -n dnode2 -s start +system sh/exec_up.sh -n dnode2 -s start $x = 0 show2: @@ -70,12 +70,12 @@ show2: endi sql show dnodes -print 192.168.0.1 freeVnodes $data3_192.168.0.1 -print 192.168.0.2 freeVnodes $data3_192.168.0.2 -if $data3_192.168.0.1 != 4 then +print 192.168.0.1 openVnodes $data3_1 +print 192.168.0.2 openVnodes $data3_2 +if $data3_1 != 4 then goto show2 endi -if $data3_192.168.0.2 != 2 then +if $data3_2 != 2 then goto show2 endi @@ -96,18 +96,18 @@ show3: return -1 endi sql show dnodes -print 192.168.0.1 freeVnodes $data3_192.168.0.1 -print 192.168.0.2 freeVnodes $data3_192.168.0.2 -if $data3_192.168.0.1 != 3 then +print 192.168.0.1 openVnodes $data3_1 +print 192.168.0.2 openVnodes $data3_2 +if $data3_1 != 3 then goto show3 endi -if $data3_192.168.0.2 != 1 then +if $data3_2 != 1 then goto show3 endi print ========== step3 sql create dnode 192.168.0.3 -system sh/exec.sh -n dnode3 -s start +system sh/exec_up.sh -n dnode3 -s start $x = 0 show4: @@ -117,16 +117,16 @@ show4: return -1 endi sql show dnodes -print 192.168.0.1 freeVnodes $data3_192.168.0.1 -print 192.168.0.2 freeVnodes $data3_192.168.0.2 -print 192.168.0.3 freeVnodes $data3_192.168.0.3 -if $data3_192.168.0.1 != 4 then +print 192.168.0.1 openVnodes $data3_1 +print 192.168.0.2 openVnodes $data3_2 +print 192.168.0.3 openVnodes $data3_3 +if $data3_1 != 4 then goto show4 endi -if $data3_192.168.0.2 != 2 then +if $data3_2 != 2 then goto show4 endi -if $data3_192.168.0.3 != 2 then +if $data3_3 != 2 then goto show4 endi @@ -141,20 +141,20 @@ show5: return -1 endi sql show dnodes -print 192.168.0.1 freeVnodes $data3_192.168.0.1 -print 192.168.0.2 freeVnodes $data3_192.168.0.2 -print 192.168.0.3 freeVnodes $data3_192.168.0.3 -if $data3_192.168.0.1 != 3 then +print 192.168.0.1 openVnodes $data3_1 +print 192.168.0.2 openVnodes $data3_2 +print 192.168.0.3 openVnodes $data3_3 +if $data3_1 != 3 then goto show5 endi -if $data3_192.168.0.2 != null then +if $data3_2 != null then goto show5 endi -if $data3_192.168.0.3 != 1 then +if $data3_3 != 1 then goto show5 endi -system sh/exec.sh -n dnode2 -s stop -x SIGINT +system sh/exec_up.sh -n dnode2 -s stop -x SIGINT print ========== step6 sql select * from d1.t1 order by t desc diff --git a/tests/script/unique/dnode/monitor_bug.sim b/tests/script/unique/dnode/monitor_bug.sim index dd792bccbb..c95ddac43c 100644 --- a/tests/script/unique/dnode/monitor_bug.sim +++ b/tests/script/unique/dnode/monitor_bug.sim @@ -22,19 +22,19 @@ system sh/cfg.sh -n dnode1 -c monitor -v 1 system sh/cfg.sh -n dnode2 -c monitor -v 0 print ========== step1 -system sh/exec.sh -n dnode1 -s start +system sh/exec_up.sh -n dnode1 -s start sql connect sleep 5000 sql show dnodes -print 192.168.0.1 freeVnodes $data3_192.168.0.1 -if $data3_192.168.0.1 != 3 then +print 192.168.0.1 openVnodes $data3_1 +if $data3_1 != 3 then return -1 endi print ========== step2 sql create dnode 192.168.0.2 -system sh/exec.sh -n dnode2 -s start +system sh/exec_up.sh -n dnode2 -s start $x = 0 show2: @@ -45,12 +45,12 @@ show2: endi sql show dnodes -print 192.168.0.1 freeVnodes $data3_192.168.0.1 -print 192.168.0.2 freeVnodes $data3_192.168.0.2 -if $data3_192.168.0.1 != 4 then +print 192.168.0.1 openVnodes $data3_1 +print 192.168.0.2 openVnodes $data3_2 +if $data3_1 != 4 then goto show2 endi -if $data3_192.168.0.2 != 3 then +if $data3_2 != 3 then goto show2 endi diff --git a/tests/script/unique/dnode/offline1.sim b/tests/script/unique/dnode/offline1.sim index 4e05916e56..d61971a439 100644 --- a/tests/script/unique/dnode/offline1.sim +++ b/tests/script/unique/dnode/offline1.sim @@ -29,10 +29,10 @@ system sh/cfg.sh -n dnode2 -c clog -v 1 system sh/cfg.sh -n dnode3 -c clog -v 1 print ========== step1 -system sh/exec.sh -n dnode1 -s start +system sh/exec_up.sh -n dnode1 -s start sql connect sql create dnode 192.168.0.2 -system sh/exec.sh -n dnode2 -s start +system sh/exec_up.sh -n dnode2 -s start sleep 3000 sql show dnodes @@ -44,7 +44,7 @@ if $data4_192.168.0.2 != ready then endi print ========== step2 -system sh/exec.sh -n dnode2 -s stop -x SIGINT +system sh/exec_up.sh -n dnode2 -s stop -x SIGINT sleep 6000 sql show dnodes diff --git a/tests/script/unique/dnode/offline2.sim b/tests/script/unique/dnode/offline2.sim index cd9ec5ee5c..9a0bfd18b7 100644 --- a/tests/script/unique/dnode/offline2.sim +++ b/tests/script/unique/dnode/offline2.sim @@ -29,10 +29,10 @@ system sh/cfg.sh -n dnode2 -c clog -v 1 system sh/cfg.sh -n dnode3 -c clog -v 1 print ========== step1 -system sh/exec.sh -n dnode1 -s start +system sh/exec_up.sh -n dnode1 -s start sql connect sql create dnode 192.168.0.2 -system sh/exec.sh -n dnode2 -s start +system sh/exec_up.sh -n dnode2 -s start sleep 3000 sql create database d1 replica 2 tables 4 @@ -48,7 +48,7 @@ if $data4_192.168.0.2 != ready then endi print ========== step2 -system sh/exec.sh -n dnode2 -s stop -x SIGINT +system sh/exec_up.sh -n dnode2 -s stop -x SIGINT sleep 5000 sql show dnodes @@ -72,7 +72,7 @@ endi print ========== step4 sql create dnode 192.168.0.3 -system sh/exec.sh -n dnode3 -s start +system sh/exec_up.sh -n dnode3 -s start sql drop dnode 192.168.0.2 sleep 5000 diff --git a/tests/script/unique/dnode/remove1.sim b/tests/script/unique/dnode/remove1.sim index 8d5a3c5ee6..76429cdd22 100644 --- a/tests/script/unique/dnode/remove1.sim +++ b/tests/script/unique/dnode/remove1.sim @@ -31,7 +31,7 @@ system sh/cfg.sh -n dnode3 -c clog -v 1 system sh/cfg.sh -n dnode4 -c clog -v 1 print ========== step1 -system sh/exec.sh -n dnode1 -s start +system sh/exec_up.sh -n dnode1 -s start sql connect sleep 3000 @@ -52,14 +52,14 @@ sql insert into d2.t2 values(now+4s, 22) sql insert into d2.t2 values(now+5s, 21) sql show dnodes -print 192.168.0.1 freeVnodes $data3_192.168.0.1 -if $data3_192.168.0.1 != 2 then +print 192.168.0.1 openVnodes $data3_1 +if $data3_1 != 2 then return -1 endi print ========== step2 sql create dnode 192.168.0.2 -system sh/exec.sh -n dnode2 -s start +system sh/exec_up.sh -n dnode2 -s start sleep 9000 sql create database d3 replica 2 tables 4 @@ -79,12 +79,12 @@ show2: endi sql show dnodes -print 192.168.0.1 freeVnodes $data3_192.168.0.1 -print 192.168.0.2 freeVnodes $data3_192.168.0.2 -if $data3_192.168.0.1 != 3 then +print 192.168.0.1 openVnodes $data3_1 +print 192.168.0.2 openVnodes $data3_2 +if $data3_1 != 3 then goto show2 endi -if $data3_192.168.0.2 != 1 then +if $data3_2 != 1 then goto show2 endi @@ -101,12 +101,12 @@ show3: endi sql show dnodes -print 192.168.0.1 freeVnodes $data3_192.168.0.1 -print 192.168.0.2 freeVnodes $data3_192.168.0.2 $data5_192.168.0.2 +print 192.168.0.1 openVnodes $data3_1 +print 192.168.0.2 openVnodes $data3_2 $data5_192.168.0.2 print ========== step4 sql create dnode 192.168.0.3 -system sh/exec.sh -n dnode3 -s start +system sh/exec_up.sh -n dnode3 -s start $x = 0 show4: @@ -117,18 +117,18 @@ show4: endi sql show dnodes -print 192.168.0.1 freeVnodes $data3_192.168.0.1 -print 192.168.0.2 freeVnodes $data3_192.168.0.2 -print 192.168.0.3 freeVnodes $data3_192.168.0.3 -if $data3_192.168.0.2 != null then +print 192.168.0.1 openVnodes $data3_1 +print 192.168.0.2 openVnodes $data3_2 +print 192.168.0.3 openVnodes $data3_3 +if $data3_2 != null then goto show4 endi -system sh/exec.sh -n dnode2 -s stop -x SIGINT +system sh/exec_up.sh -n dnode2 -s stop -x SIGINT print ========== step5 sql create dnode 192.168.0.4 -system sh/exec.sh -n dnode4 -s start +system sh/exec_up.sh -n dnode4 -s start $x = 0 show5: @@ -138,20 +138,20 @@ show5: return -1 endi sql show dnodes -print 192.168.0.1 freeVnodes $data3_192.168.0.1 -print 192.168.0.2 freeVnodes $data3_192.168.0.2 -print 192.168.0.3 freeVnodes $data3_192.168.0.3 -print 192.168.0.4 freeVnodes $data3_192.168.0.4 -if $data3_192.168.0.1 != 4 then +print 192.168.0.1 openVnodes $data3_1 +print 192.168.0.2 openVnodes $data3_2 +print 192.168.0.3 openVnodes $data3_3 +print 192.168.0.4 openVnodes $data3_4 +if $data3_1 != 4 then goto show5 endi -if $data3_192.168.0.2 != null then +if $data3_2 != null then goto show5 endi -if $data3_192.168.0.3 != 2 then +if $data3_3 != 2 then goto show5 endi -if $data3_192.168.0.4 != 2 then +if $data3_4 != 2 then goto show5 endi diff --git a/tests/script/unique/dnode/remove2.sim b/tests/script/unique/dnode/remove2.sim index d8727767ad..9b808920a6 100644 --- a/tests/script/unique/dnode/remove2.sim +++ b/tests/script/unique/dnode/remove2.sim @@ -31,7 +31,7 @@ system sh/cfg.sh -n dnode3 -c clog -v 1 system sh/cfg.sh -n dnode4 -c clog -v 1 print ========== step1 -system sh/exec.sh -n dnode1 -s start +system sh/exec_up.sh -n dnode1 -s start sql connect sleep 3000 @@ -52,14 +52,14 @@ sql insert into d2.t2 values(now+4s, 22) sql insert into d2.t2 values(now+5s, 21) sql show dnodes -print 192.168.0.1 freeVnodes $data3_192.168.0.1 -if $data3_192.168.0.1 != 2 then +print 192.168.0.1 openVnodes $data3_1 +if $data3_1 != 2 then return -1 endi print ========== step2 sql create dnode 192.168.0.2 -system sh/exec.sh -n dnode2 -s start +system sh/exec_up.sh -n dnode2 -s start sleep 9000 sql create database d3 replica 2 tables 4 @@ -79,17 +79,17 @@ show2: endi sql show dnodes -print 192.168.0.1 freeVnodes $data3_192.168.0.1 -print 192.168.0.2 freeVnodes $data3_192.168.0.2 -if $data3_192.168.0.1 != 3 then +print 192.168.0.1 openVnodes $data3_1 +print 192.168.0.2 openVnodes $data3_2 +if $data3_1 != 3 then goto show2 endi -if $data3_192.168.0.2 != 1 then +if $data3_2 != 1 then goto show2 endi print ========== step3 -system sh/exec.sh -n dnode2 -s stop -x SIGINT +system sh/exec_up.sh -n dnode2 -s stop -x SIGINT sql drop dnode 192.168.0.2 sleep 7001 @@ -102,12 +102,12 @@ show3: endi sql show dnodes -print 192.168.0.1 freeVnodes $data3_192.168.0.1 -print 192.168.0.2 freeVnodes $data3_192.168.0.2 $data5_192.168.0.2 +print 192.168.0.1 openVnodes $data3_1 +print 192.168.0.2 openVnodes $data3_2 $data5_192.168.0.2 print ========== step4 sql create dnode 192.168.0.3 -system sh/exec.sh -n dnode3 -s start +system sh/exec_up.sh -n dnode3 -s start $x = 0 show4: @@ -118,16 +118,16 @@ show4: endi sql show dnodes -print 192.168.0.1 freeVnodes $data3_192.168.0.1 -print 192.168.0.2 freeVnodes $data3_192.168.0.2 -print 192.168.0.3 freeVnodes $data3_192.168.0.3 -if $data3_192.168.0.2 != null then +print 192.168.0.1 openVnodes $data3_1 +print 192.168.0.2 openVnodes $data3_2 +print 192.168.0.3 openVnodes $data3_3 +if $data3_2 != null then goto show4 endi -if $data3_192.168.0.1 != 3 then +if $data3_1 != 3 then goto show4 endi -if $data3_192.168.0.3 != 1 then +if $data3_3 != 1 then goto show4 endi diff --git a/tests/script/unique/dnode/vnode_clean.sim b/tests/script/unique/dnode/vnode_clean.sim index 300d1dd4f0..2ce9645108 100644 --- a/tests/script/unique/dnode/vnode_clean.sim +++ b/tests/script/unique/dnode/vnode_clean.sim @@ -31,7 +31,7 @@ system sh/cfg.sh -n dnode3 -c clog -v 1 system sh/cfg.sh -n dnode4 -c clog -v 1 print ========== step1 -system sh/exec.sh -n dnode1 -s start +system sh/exec_up.sh -n dnode1 -s start sql connect sql create database d1 tables 4 @@ -43,14 +43,14 @@ sql insert into d1.t1 values(now+4s, 12) sql insert into d1.t1 values(now+5s, 11) sql show dnodes -print 192.168.0.1 freeVnodes $data3_192.168.0.1 -if $data3_192.168.0.1 != 3 then +print 192.168.0.1 openVnodes $data3_1 +if $data3_1 != 3 then return -1 endi print ========== step2 sql create dnode 192.168.0.2 -system sh/exec.sh -n dnode2 -s start +system sh/exec_up.sh -n dnode2 -s start $x = 0 show2: @@ -60,12 +60,12 @@ show2: return -1 endi sql show dnodes -print 192.168.0.1 freeVnodes $data3_192.168.0.1 -print 192.168.0.2 freeVnodes $data3_192.168.0.2 -if $data3_192.168.0.1 != 4 then +print 192.168.0.1 openVnodes $data3_1 +print 192.168.0.2 openVnodes $data3_2 +if $data3_1 != 4 then goto show2 endi -if $data3_192.168.0.2 != 3 then +if $data3_2 != 3 then goto show2 endi @@ -81,12 +81,12 @@ sql insert into d2.t2 values(now+5s, 21) $x = 0 sql show dnodes -print 192.168.0.1 freeVnodes $data3_192.168.0.1 -print 192.168.0.2 freeVnodes $data3_192.168.0.2 -if $data3_192.168.0.1 != 4 then +print 192.168.0.1 openVnodes $data3_1 +print 192.168.0.2 openVnodes $data3_2 +if $data3_1 != 4 then return -1 endi -if $data3_192.168.0.2 != 2 then +if $data3_2 != 2 then return -1 endi @@ -101,19 +101,19 @@ show4: return -1 endi sql show dnodes -print 192.168.0.1 freeVnodes $data3_192.168.0.1 -print 192.168.0.2 freeVnodes $data3_192.168.0.2 -if $data3_192.168.0.1 != 2 then +print 192.168.0.1 openVnodes $data3_1 +print 192.168.0.2 openVnodes $data3_2 +if $data3_1 != 2 then goto show4 endi -if $data3_192.168.0.2 != null then +if $data3_2 != null then goto show4 endi if $rows != 1 then goto show4 endi -system sh/exec.sh -n dnode2 -s stop -x SIGINT +system sh/exec_up.sh -n dnode2 -s stop -x SIGINT print ========== step5 sleep 2000 @@ -125,7 +125,7 @@ system sh/cfg.sh -n dnode2 -c balanceMonitorInterval -v 1 system sh/cfg.sh -n dnode2 -c balanceStartInterval -v 10 system sh/cfg.sh -n dnode2 -c mgmtEqualVnodeNum -v 4 system sh/cfg.sh -n dnode2 -c clog -v 1 -system sh/exec.sh -n dnode2 -s start +system sh/exec_up.sh -n dnode2 -s start $x = 0 show5: @@ -135,12 +135,12 @@ show5: return -1 endi sql show dnodes -print 192.168.0.1 freeVnodes $data3_192.168.0.1 -print 192.168.0.2 freeVnodes $data3_192.168.0.2 -if $data3_192.168.0.1 != 4 then +print 192.168.0.1 openVnodes $data3_1 +print 192.168.0.2 openVnodes $data3_2 +if $data3_1 != 4 then goto show5 endi -if $data3_192.168.0.2 != 2 then +if $data3_2 != 2 then goto show5 endi @@ -154,18 +154,18 @@ sql insert into d3.t3 values(now+4s, 32) sql insert into d3.t3 values(now+5s, 31) sql show dnodes -print 192.168.0.1 freeVnodes $data3_192.168.0.1 -print 192.168.0.2 freeVnodes $data3_192.168.0.2 -if $data3_192.168.0.1 != 4 then +print 192.168.0.1 openVnodes $data3_1 +print 192.168.0.2 openVnodes $data3_2 +if $data3_1 != 4 then return -1 endi -if $data3_192.168.0.2 != 1 then +if $data3_2 != 1 then return -1 endi print ========== step7 sql create dnode 192.168.0.3 -system sh/exec.sh -n dnode3 -s start +system sh/exec_up.sh -n dnode3 -s start $x = 0 show7: @@ -176,16 +176,16 @@ show7: endi sql show dnodes -print 192.168.0.1 freeVnodes $data3_192.168.0.1 -print 192.168.0.2 freeVnodes $data3_192.168.0.2 -print 192.168.0.3 freeVnodes $data3_192.168.0.3 -if $data3_192.168.0.1 != 4 then +print 192.168.0.1 openVnodes $data3_1 +print 192.168.0.2 openVnodes $data3_2 +print 192.168.0.3 openVnodes $data3_3 +if $data3_1 != 4 then goto show7 endi -if $data3_192.168.0.2 != 2 then +if $data3_2 != 2 then goto show7 endi -if $data3_192.168.0.3 != 3 then +if $data3_3 != 3 then goto show7 endi @@ -206,16 +206,16 @@ show8: return -1 endi sql show dnodes -print 192.168.0.1 freeVnodes $data3_192.168.0.1 -print 192.168.0.2 freeVnodes $data3_192.168.0.2 -print 192.168.0.3 freeVnodes $data3_192.168.0.3 -if $data3_192.168.0.1 != 4 then +print 192.168.0.1 openVnodes $data3_1 +print 192.168.0.2 openVnodes $data3_2 +print 192.168.0.3 openVnodes $data3_3 +if $data3_1 != 4 then goto show8 endi -if $data3_192.168.0.2 != 2 then +if $data3_2 != 2 then goto show8 endi -if $data3_192.168.0.3 != 2 then +if $data3_3 != 2 then goto show8 endi @@ -231,20 +231,20 @@ show9: endi sql show dnodes -print 192.168.0.1 freeVnodes $data3_192.168.0.1 -print 192.168.0.2 freeVnodes $data3_192.168.0.2 -print 192.168.0.3 freeVnodes $data3_192.168.0.3 -if $data3_192.168.0.1 != 4 then +print 192.168.0.1 openVnodes $data3_1 +print 192.168.0.2 openVnodes $data3_2 +print 192.168.0.3 openVnodes $data3_3 +if $data3_1 != 4 then goto show9 endi -if $data3_192.168.0.2 != null then +if $data3_2 != null then goto show9 endi -if $data3_192.168.0.3 != 0 then +if $data3_3 != 0 then goto show9 endi -system sh/exec.sh -n dnode2 -s stop -x SIGINT +system sh/exec_up.sh -n dnode2 -s stop -x SIGINT print ========== step10 sql select * from d1.t1 order by t desc From 3410fef484fdc7d55834c117beb4470714fb62df Mon Sep 17 00:00:00 2001 From: jtao1735 Date: Wed, 22 Apr 2020 10:07:32 +0000 Subject: [PATCH 14/18] close soket after FD is removed from fd set --- src/rpc/src/rpcTcp.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/rpc/src/rpcTcp.c b/src/rpc/src/rpcTcp.c index 75115d9ba0..bac2ae879a 100644 --- a/src/rpc/src/rpcTcp.c +++ b/src/rpc/src/rpcTcp.c @@ -466,8 +466,8 @@ static void taosFreeFdObj(SFdObj *pFdObj) { } pFdObj->signature = NULL; - close(pFdObj->fd); epoll_ctl(pThreadObj->pollFd, EPOLL_CTL_DEL, pFdObj->fd, NULL); + close(pFdObj->fd); pThreadObj->numOfFds--; From fefccc3adba0c6bd5c19e6c405d048cdd96c02a2 Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Wed, 22 Apr 2020 18:24:53 +0800 Subject: [PATCH 15/18] make python test framework dynamically load module with filename script pass [TD-179] --- tests/pytest/insert/basic.py | 24 +++---- tests/pytest/simpletest.sh | 4 +- tests/pytest/test.py | 11 ++-- tests/pytest/util/cases.py | 119 ++++++++++++++++++++++------------- tests/pytest/util/dnodes.py | 23 +++++-- 5 files changed, 113 insertions(+), 68 deletions(-) diff --git a/tests/pytest/insert/basic.py b/tests/pytest/insert/basic.py index 5ec83fd249..e8698e9d05 100644 --- a/tests/pytest/insert/basic.py +++ b/tests/pytest/insert/basic.py @@ -17,6 +17,7 @@ from util.log import * from util.cases import * from util.sql import * + class TDTestCase: def init(self, conn): tdLog.debug("start to execute %s" % __file__) @@ -24,25 +25,24 @@ class TDTestCase: def run(self): tdSql.prepare() - tdSql.execute('show databases') - tdSql.execute('drop database if exists db') - tdSql.execute('create database db') - tdSql.execute('use db') - tdSql.execute('create table tb (ts timestamp, speed int)') + + ret = tdSql.execute('create table tb (ts timestamp, speed int)') insertRows = 10 tdLog.info("insert %d rows" % (insertRows)) for i in range(0, insertRows): - tdSql.execute('insert into tb values (now + %dm, %d)' % (i, i)) + ret = tdSql.execute( + 'insert into tb values (now + %dm, %d)' % + (i, i)) -# tdLog.info("insert earlier data") -# tdSql.execute('insert into tb values (now - 5m , 10)') -# tdSql.execute('insert into tb values (now - 6m , 10)') -# tdSql.execute('insert into tb values (now - 7m , 10)') -# tdSql.execute('insert into tb values (now - 8m , 10)') + tdLog.info("insert earlier data") + tdSql.execute('insert into tb values (now - 5m , 10)') + tdSql.execute('insert into tb values (now - 6m , 10)') + tdSql.execute('insert into tb values (now - 7m , 10)') + tdSql.execute('insert into tb values (now - 8m , 10)') tdSql.query("select * from tb") - tdSql.checkRows(insertRows) + tdSql.checkRows(insertRows + 4) def stop(self): tdSql.close() diff --git a/tests/pytest/simpletest.sh b/tests/pytest/simpletest.sh index a6e023bde8..b77c1aa142 100755 --- a/tests/pytest/simpletest.sh +++ b/tests/pytest/simpletest.sh @@ -1,3 +1,3 @@ #!/bin/bash -python2 ./test.py -f insert/basic.py $1 -python2 ./test.py -s $1 +python3 ./test.py -f insert/basic.py $1 +python3 ./test.py -s $1 diff --git a/tests/pytest/test.py b/tests/pytest/test.py index f5d4cc7c29..55ec46e526 100644 --- a/tests/pytest/test.py +++ b/tests/pytest/test.py @@ -24,8 +24,6 @@ from util.cases import * import taos -# add testcase here: -from insert.basic import * if __name__ == "__main__": fileName = "all" @@ -35,7 +33,7 @@ if __name__ == "__main__": valgrind = 0 stop = 0 opts, args = getopt.gnu_getopt(sys.argv[1:], 'f:p:m:scgh', [ - 'file=', 'path=', 'master', 'stop', 'cluster', 'valgrind', 'help']) + 'file=', 'path=', 'master', 'stop', 'cluster', 'valgrind', 'help']) for key, value in opts: if key in ['-h', '--help']: tdLog.printNoPrefix( @@ -72,13 +70,13 @@ if __name__ == "__main__": toBeKilled = "valgrind.bin" killCmd = "ps -ef|grep -w %s| grep -v grep | awk '{print $2}' | xargs kill -HUP " % toBeKilled - os.system(killCmd) - time.sleep(1) +# os.system(killCmd) +# time.sleep(1) psCmd = "ps -ef|grep -w %s| grep -v grep | awk '{print $2}'" % toBeKilled processID = subprocess.check_output(psCmd, shell=True) - while( processID ): + while(processID): os.system(killCmd) time.sleep(1) processID = subprocess.check_output(psCmd, shell=True) @@ -87,6 +85,7 @@ if __name__ == "__main__": if masterIp == "": tdDnodes.init(deployPath) + tdDnodes.setTestCluster(testCluster) tdDnodes.setValgrind(valgrind) if testCluster: diff --git a/tests/pytest/util/cases.py b/tests/pytest/util/cases.py index 320c9d974f..4bd1b1e9d1 100644 --- a/tests/pytest/util/cases.py +++ b/tests/pytest/util/cases.py @@ -15,6 +15,8 @@ import sys import os import time import datetime +import inspect +import importlib from util.log import * @@ -30,6 +32,10 @@ class TDCases: self.windowsCases = [] self.clusterCases = [] + def __dynamicLoadModule(self, fileName): + moduleName = fileName.replace(".py", "").replace("/", ".") + return importlib.import_module(moduleName, package='..') + def addWindows(self, name, case): self.windowsCases.append(TDCase(name, case)) @@ -40,64 +46,93 @@ class TDCases: self.clusterCases.append(TDCase(name, case)) def runAllLinux(self, conn): - tdLog.notice("run total %d cases" % (len(self.linuxCases))) - for case in self.linuxCases: - case.case.init(conn) - case.case.run() - case.case.stop() - tdLog.notice("total %d cases executed" % (len(self.linuxCases))) + # TODO: load all Linux cases here + runNum = 0 + for tmp in self.linuxCases: + if tmp.name.find(fileName) != -1: + case = testModule.TDTestCase() + case.init(conn) + case.run() + case.stop() + runNum += 1 + continue + + tdLog.notice("total %d Linux test case(s) executed" % (runNum)) def runOneLinux(self, conn, fileName): - tdLog.notice("run cases like %s" % (fileName)) + testModule = self.__dynamicLoadModule(fileName) + runNum = 0 - for case in self.linuxCases: - if case.name.find(fileName) != -1: - case.case.init(conn) - case.case.run() - case.case.stop() - time.sleep(5) + for tmp in self.linuxCases: + if tmp.name.find(fileName) != -1: + case = testModule.TDTestCase() + case.init(conn) + case.run() + case.stop() runNum += 1 - tdLog.notice("total %d cases executed" % (runNum)) + continue + + tdLog.notice("total %d Linux test case(s) executed" % (runNum)) def runAllWindows(self, conn): - tdLog.notice("run total %d cases" % (len(self.windowsCases))) - for case in self.windowsCases: - case.case.init(conn) - case.case.run() - case.case.stop() - tdLog.notice("total %d cases executed" % (len(self.windowsCases))) + # TODO: load all Windows cases here + runNum = 0 + for tmp in self.windowsCases: + if tmp.name.find(fileName) != -1: + case = testModule.TDTestCase() + case.init(conn) + case.run() + case.stop() + runNum += 1 + continue + + tdLog.notice("total %d Windows test case(s) executed" % (runNum)) def runOneWindows(self, conn, fileName): - tdLog.notice("run cases like %s" % (fileName)) + testModule = self.__dynamicLoadModule(fileName) + runNum = 0 - for case in self.windowsCases: - if case.name.find(fileName) != -1: - case.case.init(conn) - case.case.run() - case.case.stop() - time.sleep(2) + for tmp in self.windowsCases: + if tmp.name.find(fileName) != -1: + case = testModule.TDTestCase() + case.init(conn) + case.run() + case.stop() runNum += 1 - tdLog.notice("total %d cases executed" % (runNum)) + continue + tdLog.notice("total %d Windows case(s) executed" % (runNum)) def runAllCluster(self): - tdLog.notice("run total %d cases" % (len(self.clusterCases))) - for case in self.clusterCases: - case.case.init() - case.case.run() - case.case.stop() - tdLog.notice("total %d cases executed" % (len(self.clusterCases))) + # TODO: load all cluster case module here + + runNum = 0 + for tmp in self.clusterCases: + if tmp.name.find(fileName) != -1: + tdLog.notice("run cases like %s" % (fileName)) + case = testModule.TDTestCase() + case.init() + case.run() + case.stop() + runNum += 1 + continue + + tdLog.notice("total %d Cluster test case(s) executed" % (runNum)) def runOneCluster(self, fileName): - tdLog.notice("run cases like %s" % (fileName)) + testModule = self.__dynamicLoadModule(fileName) + runNum = 0 - for case in self.clusterCases: - if case.name.find(fileName) != -1: - case.case.init() - case.case.run() - case.case.stop() - time.sleep(2) + for tmp in self.clusterCases: + if tmp.name.find(fileName) != -1: + tdLog.notice("run cases like %s" % (fileName)) + case = testModule.TDTestCase() + case.init() + case.run() + case.stop() runNum += 1 - tdLog.notice("total %d cases executed" % (runNum)) + continue + + tdLog.notice("total %d Cluster test case(s) executed" % (runNum)) tdCases = TDCases() diff --git a/tests/pytest/util/dnodes.py b/tests/pytest/util/dnodes.py index 45eaa9b30b..6a9c2607e6 100644 --- a/tests/pytest/util/dnodes.py +++ b/tests/pytest/util/dnodes.py @@ -30,9 +30,6 @@ class TDSimClient: if os.system(cmd) != 0: tdLog.exit(cmd) - def setValgrind(self, value): - self.valgrind = value - def deploy(self): self.logDir = "%s/sim/psim/log" % (self.path,) self.cfgDir = "%s/sim/psim/cfg" % (self.path) @@ -82,11 +79,15 @@ class TDDnode: self.index = index self.running = 0 self.deployed = 0 + self.testCluster = False self.valgrind = 0 def init(self, path): self.path = path + def setTestCluster(self, value): + self.testCluster = value + def setValgrind(self, value): self.valgrind = value @@ -124,7 +125,9 @@ class TDDnode: if os.system(cmd) != 0: tdLog.exit(cmd) - self.startIP() + if self.testCluster: + self.startIP() + self.cfg("masterIp", "192.168.0.1") self.cfg("secondIp", "192.168.0.2") self.cfg("publicIp", "192.168.0.%d" % (self.index)) @@ -292,11 +295,15 @@ class TDDnodes: self.sim.init(self.path) self.sim.deploy() + def setTestCluster(self, value): + self.testCluster = value + def setValgrind(self, value): self.valgrind = value def deploy(self, index): self.check(index) + self.dnodes[index - 1].setTestCluster(self.testCluster) self.dnodes[index - 1].setValgrind(self.valgrind) self.dnodes[index - 1].deploy() @@ -318,11 +325,15 @@ class TDDnodes: def startIP(self, index): self.check(index) - self.dnodes[index - 1].startIP() + + if self.testCluster: + self.dnodes[index - 1].startIP() def stopIP(self, index): self.check(index) - self.dnodes[index - 1].stopIP() + + if self.dnodes[index - 1].testCluster: + self.dnodes[index - 1].stopIP() def check(self, index): if index < 1 or index > 10: From c95b1eeccbbc6ccb12c38e68e80803cea4a73d01 Mon Sep 17 00:00:00 2001 From: slguan Date: Wed, 22 Apr 2020 21:22:01 +0800 Subject: [PATCH 16/18] fix bug while balance --- src/dnode/src/dnodeMgmt.c | 33 +++---------- src/dnode/src/dnodeMnode.c | 1 - src/inc/taosmsg.h | 15 +++--- src/inc/vnode.h | 1 + src/mnode/inc/mgmtVgroup.h | 1 + src/mnode/src/mgmtDnode.c | 7 +-- src/mnode/src/mgmtVgroup.c | 43 ++++++++++++++-- src/util/inc/tutil.h | 2 + src/util/src/tutil.c | 24 +++++++++ src/vnode/src/vnodeMain.c | 49 ++++++++++++++++--- tests/script/sh/deploy.sh | 2 +- tests/script/unique/dnode/balance1.sim | 16 +++--- .../unique/{dnodes => dnode}/basic1.sim | 0 13 files changed, 136 insertions(+), 58 deletions(-) rename tests/script/unique/{dnodes => dnode}/basic1.sim (100%) diff --git a/src/dnode/src/dnodeMgmt.c b/src/dnode/src/dnodeMgmt.c index 99209c734c..97768ca743 100644 --- a/src/dnode/src/dnodeMgmt.c +++ b/src/dnode/src/dnodeMgmt.c @@ -33,7 +33,6 @@ static int32_t dnodeOpenVnodes(); static void dnodeCloseVnodes(); static int32_t dnodeProcessCreateVnodeMsg(SRpcMsg *pMsg); static int32_t dnodeProcessDropVnodeMsg(SRpcMsg *pMsg); -static int32_t dnodeProcessAlterVnodeMsg(SRpcMsg *pMsg); static int32_t dnodeProcessAlterStreamMsg(SRpcMsg *pMsg); static int32_t dnodeProcessConfigDnodeMsg(SRpcMsg *pMsg); static int32_t (*dnodeProcessMgmtMsgFp[TSDB_MSG_TYPE_MAX])(SRpcMsg *pMsg); @@ -41,7 +40,6 @@ static int32_t (*dnodeProcessMgmtMsgFp[TSDB_MSG_TYPE_MAX])(SRpcMsg *pMsg); int32_t dnodeInitMgmt() { dnodeProcessMgmtMsgFp[TSDB_MSG_TYPE_MD_CREATE_VNODE] = dnodeProcessCreateVnodeMsg; dnodeProcessMgmtMsgFp[TSDB_MSG_TYPE_MD_DROP_VNODE] = dnodeProcessDropVnodeMsg; - dnodeProcessMgmtMsgFp[TSDB_MSG_TYPE_MD_ALTER_VNODE] = dnodeProcessAlterVnodeMsg; dnodeProcessMgmtMsgFp[TSDB_MSG_TYPE_MD_ALTER_STREAM] = dnodeProcessAlterStreamMsg; dnodeProcessMgmtMsgFp[TSDB_MSG_TYPE_MD_CONFIG_DNODE] = dnodeProcessConfigDnodeMsg; @@ -146,7 +144,14 @@ static int32_t dnodeProcessCreateVnodeMsg(SRpcMsg *rpcMsg) { pCreate->nodes[j].nodeIp = htonl(pCreate->nodes[j].nodeIp); } - return vnodeCreate(pCreate); + void *pVnode = vnodeAccquireVnode(pCreate->cfg.vgId); + if (pVnode != NULL) { + int32_t code = vnodeAlter(pVnode, pCreate); + vnodeRelease(pVnode); + return code; + } else { + return vnodeCreate(pCreate); + } } static int32_t dnodeProcessDropVnodeMsg(SRpcMsg *rpcMsg) { @@ -156,28 +161,6 @@ static int32_t dnodeProcessDropVnodeMsg(SRpcMsg *rpcMsg) { return vnodeDrop(pDrop->vgId); } -static int32_t dnodeProcessAlterVnodeMsg(SRpcMsg *rpcMsg) { - SMDCreateVnodeMsg *pCreate = rpcMsg->pCont; - pCreate->cfg.vgId = htonl(pCreate->cfg.vgId); - pCreate->cfg.maxTables = htonl(pCreate->cfg.maxTables); - pCreate->cfg.maxCacheSize = htobe64(pCreate->cfg.maxCacheSize); - pCreate->cfg.minRowsPerFileBlock = htonl(pCreate->cfg.minRowsPerFileBlock); - pCreate->cfg.maxRowsPerFileBlock = htonl(pCreate->cfg.maxRowsPerFileBlock); - pCreate->cfg.daysPerFile = htonl(pCreate->cfg.daysPerFile); - pCreate->cfg.daysToKeep1 = htonl(pCreate->cfg.daysToKeep1); - pCreate->cfg.daysToKeep2 = htonl(pCreate->cfg.daysToKeep2); - pCreate->cfg.daysToKeep = htonl(pCreate->cfg.daysToKeep); - pCreate->cfg.commitTime = htonl(pCreate->cfg.commitTime); - pCreate->cfg.arbitratorIp = htonl(pCreate->cfg.arbitratorIp); - - for (int32_t j = 0; j < pCreate->cfg.replications; ++j) { - pCreate->nodes[j].nodeId = htonl(pCreate->nodes[j].nodeId); - pCreate->nodes[j].nodeIp = htonl(pCreate->nodes[j].nodeIp); - } - - return 0; -} - static int32_t dnodeProcessAlterStreamMsg(SRpcMsg *pMsg) { // SMDAlterStreamMsg *pStream = pCont; // pStream->uid = htobe64(pStream->uid); diff --git a/src/dnode/src/dnodeMnode.c b/src/dnode/src/dnodeMnode.c index 0c16e0ca84..9672a34a9f 100644 --- a/src/dnode/src/dnodeMnode.c +++ b/src/dnode/src/dnodeMnode.c @@ -33,7 +33,6 @@ int32_t dnodeInitMnode() { dnodeProcessMgmtMsgFp[TSDB_MSG_TYPE_MD_DROP_STABLE] = dnodeWrite; dnodeProcessMgmtMsgFp[TSDB_MSG_TYPE_MD_CREATE_VNODE] = dnodeMgmt; dnodeProcessMgmtMsgFp[TSDB_MSG_TYPE_MD_DROP_VNODE] = dnodeMgmt; - dnodeProcessMgmtMsgFp[TSDB_MSG_TYPE_MD_ALTER_VNODE] = dnodeMgmt; dnodeProcessMgmtMsgFp[TSDB_MSG_TYPE_MD_ALTER_STREAM] = dnodeMgmt; dnodeProcessMgmtMsgFp[TSDB_MSG_TYPE_MD_CONFIG_DNODE] = dnodeMgmt; diff --git a/src/inc/taosmsg.h b/src/inc/taosmsg.h index c3d745c7ac..d821f3117b 100644 --- a/src/inc/taosmsg.h +++ b/src/inc/taosmsg.h @@ -48,14 +48,12 @@ extern "C" { #define TSDB_MSG_TYPE_MD_CREATE_VNODE_RSP 16 #define TSDB_MSG_TYPE_MD_DROP_VNODE 17 #define TSDB_MSG_TYPE_MD_DROP_VNODE_RSP 18 -#define TSDB_MSG_TYPE_MD_ALTER_VNODE 19 -#define TSDB_MSG_TYPE_MD_ALTER_VNODE_RSP 20 -#define TSDB_MSG_TYPE_MD_DROP_STABLE 21 -#define TSDB_MSG_TYPE_MD_DROP_STABLE_RSP 22 -#define TSDB_MSG_TYPE_MD_ALTER_STREAM 23 -#define TSDB_MSG_TYPE_MD_ALTER_STREAM_RSP 24 -#define TSDB_MSG_TYPE_MD_CONFIG_DNODE 25 -#define TSDB_MSG_TYPE_MD_CONFIG_DNODE_RSP 26 +#define TSDB_MSG_TYPE_MD_DROP_STABLE 19 +#define TSDB_MSG_TYPE_MD_DROP_STABLE_RSP 20 +#define TSDB_MSG_TYPE_MD_ALTER_STREAM 21 +#define TSDB_MSG_TYPE_MD_ALTER_STREAM_RSP 22 +#define TSDB_MSG_TYPE_MD_CONFIG_DNODE 23 +#define TSDB_MSG_TYPE_MD_CONFIG_DNODE_RSP 24 // message from client to mnode #define TSDB_MSG_TYPE_CM_CONNECT 31 @@ -512,6 +510,7 @@ typedef struct { uint8_t status; uint8_t role; uint8_t accessState; + uint8_t replica; uint8_t reserved[5]; } SVnodeLoad; diff --git a/src/inc/vnode.h b/src/inc/vnode.h index e8a7a1458f..1714f1336a 100644 --- a/src/inc/vnode.h +++ b/src/inc/vnode.h @@ -38,6 +38,7 @@ typedef struct { int32_t vnodeCreate(SMDCreateVnodeMsg *pVnodeCfg); int32_t vnodeDrop(int32_t vgId); int32_t vnodeOpen(int32_t vgId, char *rootDir); +int32_t vnodeAlter(void *pVnode, SMDCreateVnodeMsg *pVnodeCfg); int32_t vnodeClose(int32_t vgId); void vnodeRelease(void *pVnode); diff --git a/src/mnode/inc/mgmtVgroup.h b/src/mnode/inc/mgmtVgroup.h index 534e640c4d..058ed06f84 100644 --- a/src/mnode/inc/mgmtVgroup.h +++ b/src/mnode/inc/mgmtVgroup.h @@ -47,6 +47,7 @@ void mgmtAddTableIntoVgroup(SVgObj *pVgroup, SChildTableObj *pTable); void mgmtRemoveTableFromVgroup(SVgObj *pVgroup, SChildTableObj *pTable); void mgmtSendCreateVnodeMsg(SVgObj *pVgroup, SRpcIpSet *ipSet, void *ahandle); void mgmtSendDropVnodeMsg(int32_t vgId, SRpcIpSet *ipSet, void *ahandle); +void mgmtSendCreateVgroupMsg(SVgObj *pVgroup, void *ahandle); SRpcIpSet mgmtGetIpSetFromVgroup(SVgObj *pVgroup); SRpcIpSet mgmtGetIpSetFromIp(uint32_t ip); diff --git a/src/mnode/src/mgmtDnode.c b/src/mnode/src/mgmtDnode.c index bf58adf594..ef3f93ddad 100644 --- a/src/mnode/src/mgmtDnode.c +++ b/src/mnode/src/mgmtDnode.c @@ -335,6 +335,8 @@ void mgmtProcessDnodeStatusMsg(SRpcMsg *rpcMsg) { if (pStatus->dnodeId == 0) { mTrace("dnode:%d, first access, privateIp:%s, name:%s", pDnode->dnodeId, taosIpStr(pDnode->privateIp), pDnode->dnodeName); + } else { + mTrace("dnode:%d, status received, access times %d", pDnode->dnodeId, pDnode->lastAccess); } int32_t openVnodes = htons(pStatus->openVnodes); @@ -349,11 +351,6 @@ void mgmtProcessDnodeStatusMsg(SRpcMsg *rpcMsg) { mgmtSendDropVnodeMsg(pVload->vgId, &ipSet, NULL); } else { mgmtUpdateVgroupStatus(pVgroup, pDnode, pVload); - if (pVload->role == TAOS_SYNC_ROLE_MASTER) { - pVgroup->totalStorage = htobe64(pVload->totalStorage); - pVgroup->compStorage = htobe64(pVload->compStorage); - pVgroup->pointsWritten = htobe64(pVload->pointsWritten); - } mgmtDecVgroupRef(pVgroup); } } diff --git a/src/mnode/src/mgmtVgroup.c b/src/mnode/src/mgmtVgroup.c index 3bccf385f1..4088b37e8a 100644 --- a/src/mnode/src/mgmtVgroup.c +++ b/src/mnode/src/mgmtVgroup.c @@ -44,9 +44,7 @@ static int32_t mgmtRetrieveVgroups(SShowObj *pShow, char *data, int32_t rows, vo static void mgmtProcessCreateVnodeRsp(SRpcMsg *rpcMsg); static void mgmtProcessDropVnodeRsp(SRpcMsg *rpcMsg); static void mgmtProcessVnodeCfgMsg(SRpcMsg *rpcMsg) ; - -static void mgmtSendDropVgroupMsg(SVgObj *pVgroup, void *ahandle); -static void mgmtSendCreateVgroupMsg(SVgObj *pVgroup, void *ahandle); +static void mgmtSendDropVgroupMsg(SVgObj *pVgroup, void *ahandle); static int32_t mgmtVgroupActionDestroy(SSdbOper *pOper) { SVgObj *pVgroup = pOper->pObj; @@ -124,9 +122,25 @@ static int32_t mgmtVgroupActionDelete(SSdbOper *pOper) { static int32_t mgmtVgroupActionUpdate(SSdbOper *pOper) { SVgObj *pNew = pOper->pObj; SVgObj *pVgroup = mgmtGetVgroup(pNew->vgId); + if (pVgroup != pNew) { + for (int32_t i = 0; i < pVgroup->numOfVnodes; ++i) { + SDnodeObj *pDnode = pVgroup->vnodeGid[i].pDnode; + if (pDnode != NULL) { + atomic_sub_fetch_32(&pDnode->openVnodes, 1); + } + } + memcpy(pVgroup, pNew, pOper->rowSize); free(pNew); + + for (int32_t i = 0; i < pVgroup->numOfVnodes; ++i) { + SDnodeObj *pDnode = mgmtGetDnode(pVgroup->vnodeGid[i].dnodeId); + pVgroup->vnodeGid[i].pDnode = pDnode; + if (pDnode != NULL) { + atomic_add_fetch_32(&pDnode->openVnodes, 1); + } + } } int32_t oldTables = taosIdPoolMaxSize(pVgroup->idPool); @@ -232,6 +246,7 @@ void mgmtUpdateVgroup(SVgObj *pVgroup) { } void mgmtUpdateVgroupStatus(SVgObj *pVgroup, SDnodeObj *pDnode, SVnodeLoad *pVload) { + bool dnodeExist = false; for (int32_t i = 0; i < pVgroup->numOfVnodes; ++i) { SVnodeGid *pVgid = &pVgroup->vnodeGid[i]; if (pVgid->pDnode == pDnode) { @@ -239,9 +254,29 @@ void mgmtUpdateVgroupStatus(SVgObj *pVgroup, SDnodeObj *pDnode, SVnodeLoad *pVlo if (pVload->role == TAOS_SYNC_ROLE_MASTER) { pVgroup->inUse = i; } + dnodeExist = true; break; } } + + if (!dnodeExist) { + SRpcIpSet ipSet = mgmtGetIpSetFromIp(pDnode->privateIp); + mError("vgroup:%d, dnode:%d not exist in mnode, drop it", pVload->vgId, pDnode->dnodeId); + mgmtSendDropVnodeMsg(pVload->vgId, &ipSet, NULL); + return; + } + + if (pVload->role == TAOS_SYNC_ROLE_MASTER) { + pVgroup->totalStorage = htobe64(pVload->totalStorage); + pVgroup->compStorage = htobe64(pVload->compStorage); + pVgroup->pointsWritten = htobe64(pVload->pointsWritten); + } + + if (pVload->replica != pVgroup->numOfVnodes) { + mError("dnode:%d, vgroup:%d replica:%d not match with mgmt:%d", pDnode->dnodeId, pVload->vgId, pVload->replica, + pVgroup->numOfVnodes); + mgmtSendCreateVgroupMsg(pVgroup, NULL); + } } SVgObj *mgmtGetAvailableVgroup(SDbObj *pDb) { @@ -521,7 +556,7 @@ SMDCreateVnodeMsg *mgmtBuildCreateVnodeMsg(SVgObj *pVgroup) { SMDVnodeDesc *pNodes = pVnode->nodes; for (int32_t j = 0; j < pVgroup->numOfVnodes; ++j) { - SDnodeObj *pDnode = pVgroup->vnodeGid[0].pDnode; + SDnodeObj *pDnode = pVgroup->vnodeGid[j].pDnode; if (pDnode != NULL) { pNodes[j].nodeId = htonl(pDnode->dnodeId); pNodes[j].nodeIp = htonl(pDnode->privateIp); diff --git a/src/util/inc/tutil.h b/src/util/inc/tutil.h index ed58c2e60d..cdcc639151 100644 --- a/src/util/inc/tutil.h +++ b/src/util/inc/tutil.h @@ -170,6 +170,8 @@ char *taosIpStr(uint32_t ipInt); uint32_t ip2uint(const char *const ip_addr); +void taosRemoveDir(char *rootDir); + #define TAOS_ALLOC_MODE_DEFAULT 0 #define TAOS_ALLOC_MODE_RANDOM_FAIL 1 #define TAOS_ALLOC_MODE_DETECT_LEAK 2 diff --git a/src/util/src/tutil.c b/src/util/src/tutil.c index 3d91020365..47d66a066e 100644 --- a/src/util/src/tutil.c +++ b/src/util/src/tutil.c @@ -662,4 +662,28 @@ void tzfree(void *ptr) { if (ptr) { free((void *)((char *)ptr - sizeof(size_t))); } +} + +void taosRemoveDir(char *rootDir) { + DIR *dir = opendir(rootDir); + if (dir == NULL) return; + + struct dirent *de = NULL; + while ((de = readdir(dir)) != NULL) { + if (strcmp(de->d_name, ".") == 0 || strcmp(de->d_name, "..") == 0) continue; + + char filename[1024]; + snprintf(filename, 1023, "%s/%s", rootDir, de->d_name); + if (de->d_type & DT_DIR) { + taosRemoveDir(filename); + } else { + remove(filename); + uPrint("file:%s is removed", filename); + } + } + + closedir(dir); + rmdir(rootDir); + + uPrint("dir:%s is removed", rootDir); } \ No newline at end of file diff --git a/src/vnode/src/vnodeMain.c b/src/vnode/src/vnodeMain.c index 5fd337ceca..5c5b6ff272 100644 --- a/src/vnode/src/vnodeMain.c +++ b/src/vnode/src/vnodeMain.c @@ -135,6 +135,39 @@ int32_t vnodeDrop(int32_t vgId) { return TSDB_CODE_SUCCESS; } +int32_t vnodeAlter(void *param, SMDCreateVnodeMsg *pVnodeCfg) { + SVnodeObj *pVnode = param; + int32_t code = vnodeSaveCfg(pVnodeCfg); + if (code != TSDB_CODE_SUCCESS) { + dError("vgId:%d, failed to save vnode cfg, reason:%s", pVnodeCfg->cfg.vgId, tstrerror(code)); + return code; + } + + code = vnodeReadCfg(pVnode); + if (code != TSDB_CODE_SUCCESS) { + dError("pVnode:%p vgId:%d, failed to read cfg file", pVnode, pVnode->vgId); + taosDeleteIntHash(tsDnodeVnodesHash, pVnode->vgId); + return code; + } + + code = syncReconfig(pVnode->sync, &pVnode->syncCfg); + if (code != TSDB_CODE_SUCCESS) { + dTrace("pVnode:%p vgId:%d, failed to alter vnode, canot reconfig sync, result:%s", pVnode, pVnode->vgId, + tstrerror(code)); + return code; + } + + code = tsdbConfigRepo(pVnode->tsdb, &pVnode->tsdbCfg); + if (code != TSDB_CODE_SUCCESS) { + dTrace("pVnode:%p vgId:%d, failed to alter vnode, canot reconfig tsdb, result:%s", pVnode, pVnode->vgId, + tstrerror(code)); + return code; + } + + dTrace("pVnode:%p vgId:%d, vnode is altered", pVnode, pVnode->vgId); + return TSDB_CODE_SUCCESS; +} + int32_t vnodeOpen(int32_t vnode, char *rootDir) { char temp[TSDB_FILENAME_LEN]; pthread_once(&vnodeModuleInit, vnodeInit); @@ -159,7 +192,7 @@ int32_t vnodeOpen(int32_t vnode, char *rootDir) { pVnode->rqueue = dnodeAllocateRqueue(pVnode); sprintf(temp, "%s/wal", rootDir); - pVnode->wal = walOpen(temp, &pVnode->walCfg); + pVnode->wal = walOpen(temp, &pVnode->walCfg); SSyncInfo syncInfo; syncInfo.vgId = pVnode->vgId; @@ -172,10 +205,10 @@ int32_t vnodeOpen(int32_t vnode, char *rootDir) { syncInfo.writeToCache = vnodeWriteToQueue; syncInfo.confirmForward = dnodeSendRpcWriteRsp; syncInfo.notifyRole = vnodeNotifyRole; - pVnode->sync = syncStart(&syncInfo); + pVnode->sync = syncStart(&syncInfo); - pVnode->events = NULL; - pVnode->cq = NULL; + pVnode->events = NULL; + pVnode->cq = NULL; STsdbAppH appH = {0}; appH.appH = (void *)pVnode; @@ -233,7 +266,9 @@ void vnodeRelease(void *pVnodeRaw) { pVnode->wqueue = NULL; if (pVnode->status == TAOS_VN_STATUS_DELETING) { - // remove the whole directory + char rootDir[TSDB_FILENAME_LEN] = {0}; + sprintf(rootDir, "%s/vnode%d", tsVnodeDir, vgId); + taosRemoveDir(rootDir); } free(pVnode); @@ -252,7 +287,8 @@ void *vnodeGetVnode(int32_t vgId) { SVnodeObj **ppVnode = (SVnodeObj **)taosGetIntHashData(tsDnodeVnodesHash, vgId); if (ppVnode == NULL || *ppVnode == NULL) { terrno = TSDB_CODE_INVALID_VGROUP_ID; - assert(false); + dError("vgId:%d not exist"); + return NULL; } return *ppVnode; @@ -298,6 +334,7 @@ static void vnodeBuildVloadMsg(char *pNode, void * param) { pLoad->vgId = htonl(pVnode->vgId); pLoad->status = pVnode->status; pLoad->role = pVnode->role; + pLoad->replica = pVnode->syncCfg.replica; } static void vnodeCleanUp(SVnodeObj *pVnode) { diff --git a/tests/script/sh/deploy.sh b/tests/script/sh/deploy.sh index b1aa7c6382..41ba3c425a 100755 --- a/tests/script/sh/deploy.sh +++ b/tests/script/sh/deploy.sh @@ -93,7 +93,7 @@ echo "privateIp $NODE_IP" >> $TAOS_CFG echo "dDebugFlag 199" >> $TAOS_CFG echo "mDebugFlag 199" >> $TAOS_CFG echo "sdbDebugFlag 199" >> $TAOS_CFG -echo "rpcDebugFlag 131" >> $TAOS_CFG +echo "rpcDebugFlag 135" >> $TAOS_CFG echo "tmrDebugFlag 131" >> $TAOS_CFG echo "cDebugFlag 135" >> $TAOS_CFG echo "httpDebugFlag 131" >> $TAOS_CFG diff --git a/tests/script/unique/dnode/balance1.sim b/tests/script/unique/dnode/balance1.sim index 413d4d74b3..555ca3cd6b 100644 --- a/tests/script/unique/dnode/balance1.sim +++ b/tests/script/unique/dnode/balance1.sim @@ -25,15 +25,15 @@ system sh/cfg.sh -n dnode2 -c mgmtEqualVnodeNum -v 4 system sh/cfg.sh -n dnode3 -c mgmtEqualVnodeNum -v 4 system sh/cfg.sh -n dnode4 -c mgmtEqualVnodeNum -v 4 -system sh/cfg.sh -n dnode1 -c clog -v 1 -system sh/cfg.sh -n dnode2 -c clog -v 1 -system sh/cfg.sh -n dnode3 -c clog -v 1 -system sh/cfg.sh -n dnode4 -c clog -v 1 +system sh/cfg.sh -n dnode1 -c clog -v 2 +system sh/cfg.sh -n dnode2 -c clog -v 2 +system sh/cfg.sh -n dnode3 -c clog -v 2 +system sh/cfg.sh -n dnode4 -c clog -v 2 -system sh/cfg.sh -n dnode1 -c clog -v 1 -system sh/cfg.sh -n dnode2 -c clog -v 1 -system sh/cfg.sh -n dnode3 -c clog -v 1 -system sh/cfg.sh -n dnode4 -c clog -v 1 +system sh/cfg.sh -n dnode1 -c clog -v 2 +system sh/cfg.sh -n dnode2 -c clog -v 2 +system sh/cfg.sh -n dnode3 -c clog -v 2 +system sh/cfg.sh -n dnode4 -c clog -v 2 print ========== step1 system sh/exec_up.sh -n dnode1 -s start diff --git a/tests/script/unique/dnodes/basic1.sim b/tests/script/unique/dnode/basic1.sim similarity index 100% rename from tests/script/unique/dnodes/basic1.sim rename to tests/script/unique/dnode/basic1.sim From 3ed2cbef8849dd4ab3bb271412ae1bfddf6d2a12 Mon Sep 17 00:00:00 2001 From: slguan Date: Wed, 22 Apr 2020 23:12:32 +0800 Subject: [PATCH 17/18] Modify the user code and pass the test case --- src/client/src/tscSystem.c | 9 +++ src/inc/taoserror.h | 13 ++-- src/mnode/inc/mgmtUser.h | 1 + src/mnode/src/mgmtDnode.c | 2 +- src/mnode/src/mgmtUser.c | 64 +++++++++++-------- tests/script/basicSuite.sim | 2 +- tests/script/general/user/basic1.sim | 4 +- tests/script/general/user/basicSuite.sim | 1 + .../general/{account => user}/monitor.sim | 0 .../general/{account => user}/pass_alter.sim | 0 .../general/{account => user}/pass_len.sim | 0 tests/script/general/user/testSuite.sim | 7 +- .../general/{account => user}/user_create.sim | 2 - .../general/{account => user}/user_len.sim | 0 14 files changed, 66 insertions(+), 39 deletions(-) create mode 100644 tests/script/general/user/basicSuite.sim rename tests/script/general/{account => user}/monitor.sim (100%) rename tests/script/general/{account => user}/pass_alter.sim (100%) rename tests/script/general/{account => user}/pass_len.sim (100%) rename tests/script/general/{account => user}/user_create.sim (99%) rename tests/script/general/{account => user}/user_len.sim (100%) diff --git a/src/client/src/tscSystem.c b/src/client/src/tscSystem.c index 68b11ce416..6713f84f99 100644 --- a/src/client/src/tscSystem.c +++ b/src/client/src/tscSystem.c @@ -42,6 +42,7 @@ void * tscTmr; void * tscQhandle; void * tscCheckDiskUsageTmr; int tsInsertHeadSize; +char tsLastUser[TSDB_USER_LEN + 1]; int tscNumOfThreads; @@ -81,6 +82,13 @@ int32_t tscInitRpc(const char *user, const char *secret) { } } + // not stop service, switch users + if (strcmp(tsLastUser, user) != 0 && pTscMgmtConn != NULL) { + tscTrace("switch user from %s to %s", user, tsLastUser); + rpcClose(pTscMgmtConn); + pTscMgmtConn = NULL; + } + if (pTscMgmtConn == NULL) { memset(&rpcInit, 0, sizeof(rpcInit)); rpcInit.localIp = tsLocalIp; @@ -94,6 +102,7 @@ int32_t tscInitRpc(const char *user, const char *secret) { rpcInit.user = (char*)user; rpcInit.ckey = "key"; rpcInit.secret = secretEncrypt; + strcpy(tsLastUser, user); pTscMgmtConn = rpcOpen(&rpcInit); if (pTscMgmtConn == NULL) { diff --git a/src/inc/taoserror.h b/src/inc/taoserror.h index 5cd38f6d6b..7b060b1c65 100644 --- a/src/inc/taoserror.h +++ b/src/inc/taoserror.h @@ -75,14 +75,19 @@ TAOS_DEFINE_ERROR(TSDB_CODE_NOT_CONFIGURED, 0, 27, "not configured") TAOS_DEFINE_ERROR(TSDB_CODE_NODE_OFFLINE, 0, 28, "node offline") TAOS_DEFINE_ERROR(TSDB_CODE_NETWORK_UNAVAIL, 0, 29, "network unavailable") -// db & user +// db TAOS_DEFINE_ERROR(TSDB_CODE_DB_NOT_SELECTED, 0, 100, "db not selected") TAOS_DEFINE_ERROR(TSDB_CODE_DB_ALREADY_EXIST, 0, 101, "database aleady exist") TAOS_DEFINE_ERROR(TSDB_CODE_INVALID_DB, 0, 102, "invalid database") TAOS_DEFINE_ERROR(TSDB_CODE_MONITOR_DB_FORBIDDEN, 0, 103, "monitor db forbidden") -TAOS_DEFINE_ERROR(TSDB_CODE_USER_ALREADY_EXIST, 0, 104, "user already exist") -TAOS_DEFINE_ERROR(TSDB_CODE_INVALID_USER, 0, 105, "invalid user") -TAOS_DEFINE_ERROR(TSDB_CODE_INVALID_PASS, 0, 106, "invalid password") + +// user +TAOS_DEFINE_ERROR(TSDB_CODE_USER_ALREADY_EXIST, 0, 150, "user already exist") +TAOS_DEFINE_ERROR(TSDB_CODE_INVALID_USER, 0, 151, "invalid user") +TAOS_DEFINE_ERROR(TSDB_CODE_INVALID_PASS, 0, 152, "invalid password") +TAOS_DEFINE_ERROR(TSDB_CODE_INVALID_USER_FORMAT, 0, 153, "invalid user format") +TAOS_DEFINE_ERROR(TSDB_CODE_INVALID_PASS_FORMAT, 0, 154, "invalid password format") +TAOS_DEFINE_ERROR(TSDB_CODE_NO_USER_FROM_CONN, 0, 155, "can not get user from conn") // table TAOS_DEFINE_ERROR(TSDB_CODE_TABLE_ALREADY_EXIST, 0, 200, "table already exist") diff --git a/src/mnode/inc/mgmtUser.h b/src/mnode/inc/mgmtUser.h index 8f1cf5d450..d0fd03de77 100644 --- a/src/mnode/inc/mgmtUser.h +++ b/src/mnode/inc/mgmtUser.h @@ -24,6 +24,7 @@ extern "C" { int32_t mgmtInitUsers(); void mgmtCleanUpUsers(); SUserObj *mgmtGetUser(char *name); +void * mgmtGetNextUser(void *pNode, SUserObj **pUser); void mgmtIncUserRef(SUserObj *pUser); void mgmtDecUserRef(SUserObj *pUser); SUserObj *mgmtGetUserFromConn(void *pConn, bool *usePublicIp); diff --git a/src/mnode/src/mgmtDnode.c b/src/mnode/src/mgmtDnode.c index ef3f93ddad..23a1d157a5 100644 --- a/src/mnode/src/mgmtDnode.c +++ b/src/mnode/src/mgmtDnode.c @@ -336,7 +336,7 @@ void mgmtProcessDnodeStatusMsg(SRpcMsg *rpcMsg) { if (pStatus->dnodeId == 0) { mTrace("dnode:%d, first access, privateIp:%s, name:%s", pDnode->dnodeId, taosIpStr(pDnode->privateIp), pDnode->dnodeName); } else { - mTrace("dnode:%d, status received, access times %d", pDnode->dnodeId, pDnode->lastAccess); + //mTrace("dnode:%d, status received, access times %d", pDnode->dnodeId, pDnode->lastAccess); } int32_t openVnodes = htons(pStatus->openVnodes); diff --git a/src/mnode/src/mgmtUser.c b/src/mnode/src/mgmtUser.c index 240704f9ec..77c48985f6 100644 --- a/src/mnode/src/mgmtUser.c +++ b/src/mnode/src/mgmtUser.c @@ -19,23 +19,23 @@ #include "ttime.h" #include "tutil.h" #include "tglobal.h" +#include "tgrant.h" #include "dnode.h" #include "mgmtDef.h" #include "mgmtLog.h" #include "mgmtAcct.h" -#include "tgrant.h" #include "mgmtMnode.h" #include "mgmtSdb.h" #include "mgmtShell.h" #include "mgmtUser.h" -void * tsUserSdb = NULL; +static void * tsUserSdb = NULL; static int32_t tsUserUpdateSize = 0; static int32_t mgmtGetUserMeta(STableMetaMsg *pMeta, SShowObj *pShow, void *pConn); static int32_t mgmtRetrieveUsers(SShowObj *pShow, char *data, int32_t rows, void *pConn); -static void mgmtProcessCreateUserMsg(SQueuedMsg *pMsg); -static void mgmtProcessAlterUserMsg(SQueuedMsg *pMsg); -static void mgmtProcessDropUserMsg(SQueuedMsg *pMsg); +static void mgmtProcessCreateUserMsg(SQueuedMsg *pMsg); +static void mgmtProcessAlterUserMsg(SQueuedMsg *pMsg); +static void mgmtProcessDropUserMsg(SQueuedMsg *pMsg); static int32_t mgmtUserActionDestroy(SSdbOper *pOper) { tfree(pOper->pObj); @@ -48,8 +48,8 @@ static int32_t mgmtUserActionInsert(SSdbOper *pOper) { if (pAcct != NULL) { mgmtAddUserToAcct(pAcct, pUser); - } - else { + mgmtDecAcctRef(pAcct); + } else { mError("user:%s, acct:%s info not exist in sdb", pUser->user, pUser->acct); return TSDB_CODE_INVALID_ACCT; } @@ -86,7 +86,7 @@ static int32_t mgmtUserActionEncode(SSdbOper *pOper) { } static int32_t mgmtUserActionDecode(SSdbOper *pOper) { - SUserObj *pUser = (SUserObj *) calloc(1, sizeof(SUserObj)); + SUserObj *pUser = (SUserObj *)calloc(1, sizeof(SUserObj)); if (pUser == NULL) return TSDB_CODE_SERV_OUT_OF_MEMORY; memcpy(pUser, pOper->rowData, tsUserUpdateSize); @@ -103,7 +103,7 @@ static int32_t mgmtUserActionRestored() { mgmtDecAcctRef(pAcct); } - return 0; + return TSDB_CODE_SUCCESS; } int32_t mgmtInitUsers() { @@ -128,7 +128,7 @@ int32_t mgmtInitUsers() { tsUserSdb = sdbOpenTable(&tableDesc); if (tsUserSdb == NULL) { - mError("failed to init user data"); + mError("table:%s, failed to create hash", tableDesc.tableName); return -1; } @@ -138,7 +138,7 @@ int32_t mgmtInitUsers() { mgmtAddShellShowMetaHandle(TSDB_MGMT_TABLE_USER, mgmtGetUserMeta); mgmtAddShellShowRetrieveHandle(TSDB_MGMT_TABLE_USER, mgmtRetrieveUsers); - mTrace("table:users table is created"); + mTrace("table:%s, hash is created", tableDesc.tableName); return 0; } @@ -150,6 +150,10 @@ SUserObj *mgmtGetUser(char *name) { return (SUserObj *)sdbGetRow(tsUserSdb, name); } +void *mgmtGetNextUser(void *pNode, SUserObj **pUser) { + return sdbFetchRow(tsUserSdb, pNode, (void **)pUser); +} + void mgmtIncUserRef(SUserObj *pUser) { return sdbIncRef(tsUserSdb, pUser); } @@ -176,17 +180,21 @@ static int32_t mgmtUpdateUser(SUserObj *pUser) { int32_t mgmtCreateUser(SAcctObj *pAcct, char *name, char *pass) { int32_t code = acctCheck(pAcct, ACCT_GRANT_USER); - if (code != 0) { + if (code != TSDB_CODE_SUCCESS) { return code; } - if (name[0] == 0 || pass[0] == 0) { - return TSDB_CODE_INVALID_MSG_CONTENT; + if (name[0] == 0) { + return TSDB_CODE_INVALID_USER_FORMAT; + } + + if (pass[0] == 0) { + return TSDB_CODE_INVALID_PASS_FORMAT; } SUserObj *pUser = mgmtGetUser(name); if (pUser != NULL) { - mTrace("user:%s is already there", name); + mTrace("user:%s, is already there", name); mgmtDecUserRef(pUser); return TSDB_CODE_USER_ALREADY_EXIST; } @@ -241,10 +249,10 @@ static int32_t mgmtDropUser(SUserObj *pUser) { static int32_t mgmtGetUserMeta(STableMetaMsg *pMeta, SShowObj *pShow, void *pConn) { SUserObj *pUser = mgmtGetUserFromConn(pConn, NULL); if (pUser == NULL) { - return TSDB_CODE_INVALID_USER; + return TSDB_CODE_NO_USER_FROM_CONN; } - int32_t cols = 0; + int32_t cols = 0; SSchema *pSchema = pMeta->schema; pShow->bytes[cols] = TSDB_USER_LEN; @@ -314,6 +322,7 @@ static int32_t mgmtRetrieveUsers(SShowObj *pShow, char *data, int32_t rows, void numOfRows++; mgmtDecUserRef(pUser); } + pShow->numOfReads += numOfRows; return numOfRows; } @@ -325,20 +334,21 @@ SUserObj *mgmtGetUserFromConn(void *pConn, bool *usePublicIp) { *usePublicIp = (connInfo.serverIp == tsPublicIpInt); } return mgmtGetUser(connInfo.user); + } else { + mError("can not get user from conn:%p", pConn); + return NULL; } - - return NULL; } static void mgmtProcessCreateUserMsg(SQueuedMsg *pMsg) { int32_t code; - SUserObj *pUser = pMsg->pUser; + SUserObj *pOperUser = pMsg->pUser; - if (pUser->superAuth) { + if (pOperUser->superAuth) { SCMCreateUserMsg *pCreate = pMsg->pCont; - code = mgmtCreateUser(pUser->pAcct, pCreate->user, pCreate->pass); + code = mgmtCreateUser(pOperUser->pAcct, pCreate->user, pCreate->pass); if (code == TSDB_CODE_SUCCESS) { - mLPrint("user:%s is created by %s", pCreate->user, pUser->user); + mLPrint("user:%s, is created by %s", pCreate->user, pOperUser->user); } } else { code = TSDB_CODE_NO_RIGHTS; @@ -384,7 +394,7 @@ static void mgmtProcessAlterUserMsg(SQueuedMsg *pMsg) { memset(pUser->pass, 0, sizeof(pUser->pass)); taosEncryptPass((uint8_t*)pAlter->pass, strlen(pAlter->pass), pUser->pass); code = mgmtUpdateUser(pUser); - mLPrint("user:%s password is altered by %s, result:%d", pUser->user, pOperUser->user, tstrerror(code)); + mLPrint("user:%s, password is altered by %s, result:%s", pUser->user, pOperUser->user, tstrerror(code)); } else { code = TSDB_CODE_NO_RIGHTS; } @@ -426,7 +436,7 @@ static void mgmtProcessAlterUserMsg(SQueuedMsg *pMsg) { } code = mgmtUpdateUser(pUser); - mLPrint("user:%s privilege is altered by %s, result:%d", pUser->user, pOperUser->user, tstrerror(code)); + mLPrint("user:%s, privilege is altered by %s, result:%s", pUser->user, pOperUser->user, tstrerror(code)); } else { code = TSDB_CODE_NO_RIGHTS; } @@ -447,7 +457,7 @@ static void mgmtProcessDropUserMsg(SQueuedMsg *pMsg) { SUserObj *pUser = mgmtGetUser(pDrop->user); if (pUser == NULL) { mgmtSendSimpleResp(pMsg->thandle, TSDB_CODE_INVALID_USER); - return ; + return; } if (strcmp(pUser->user, "monitor") == 0 || strcmp(pUser->user, pUser->acct) == 0 || @@ -475,7 +485,7 @@ static void mgmtProcessDropUserMsg(SQueuedMsg *pMsg) { if (hasRight) { code = mgmtDropUser(pUser); if (code == TSDB_CODE_SUCCESS) { - mLPrint("user:%s is dropped by %s, result:%s", pUser->user, pOperUser->user, tstrerror(code)); + mLPrint("user:%s, is dropped by %s, result:%s", pUser->user, pOperUser->user, tstrerror(code)); } } else { code = TSDB_CODE_NO_RIGHTS; diff --git a/tests/script/basicSuite.sim b/tests/script/basicSuite.sim index 22c92b50c7..aad021e3b5 100644 --- a/tests/script/basicSuite.sim +++ b/tests/script/basicSuite.sim @@ -3,6 +3,6 @@ run general/db/testSuite.sim run general/insert/testSuite.sim run general/table/testSuite.sim -run general/user/testSuite.sim +run general/user/basicSuite.sim ################################## diff --git a/tests/script/general/user/basic1.sim b/tests/script/general/user/basic1.sim index 1b8c4c84d8..f02a4d0e27 100644 --- a/tests/script/general/user/basic1.sim +++ b/tests/script/general/user/basic1.sim @@ -66,6 +66,4 @@ print $data10 $data11 $data22 print $data20 $data11 $data22 print $data30 $data31 $data32 - - - +system sh/exec.sh -n dnode1 -s stop \ No newline at end of file diff --git a/tests/script/general/user/basicSuite.sim b/tests/script/general/user/basicSuite.sim new file mode 100644 index 0000000000..199c8f39a1 --- /dev/null +++ b/tests/script/general/user/basicSuite.sim @@ -0,0 +1 @@ +run general/user/basic1.sim \ No newline at end of file diff --git a/tests/script/general/account/monitor.sim b/tests/script/general/user/monitor.sim similarity index 100% rename from tests/script/general/account/monitor.sim rename to tests/script/general/user/monitor.sim diff --git a/tests/script/general/account/pass_alter.sim b/tests/script/general/user/pass_alter.sim similarity index 100% rename from tests/script/general/account/pass_alter.sim rename to tests/script/general/user/pass_alter.sim diff --git a/tests/script/general/account/pass_len.sim b/tests/script/general/user/pass_len.sim similarity index 100% rename from tests/script/general/account/pass_len.sim rename to tests/script/general/user/pass_len.sim diff --git a/tests/script/general/user/testSuite.sim b/tests/script/general/user/testSuite.sim index 199c8f39a1..242e8ca27a 100644 --- a/tests/script/general/user/testSuite.sim +++ b/tests/script/general/user/testSuite.sim @@ -1 +1,6 @@ -run general/user/basic1.sim \ No newline at end of file +run general/user/basic1.sim +run general/user/pass_alter.sim +run general/user/pass_len.sim +run general/user/user_create.sim +run general/user/user_len.sim +#run general/user/monitor.sim \ No newline at end of file diff --git a/tests/script/general/account/user_create.sim b/tests/script/general/user/user_create.sim similarity index 99% rename from tests/script/general/account/user_create.sim rename to tests/script/general/user/user_create.sim index 802b703118..e63246cbd6 100644 --- a/tests/script/general/account/user_create.sim +++ b/tests/script/general/user/user_create.sim @@ -4,8 +4,6 @@ system sh/ip.sh -i 1 -s up system sh/deploy.sh -n dnode1 -m 192.168.0.1 -i 192.168.0.1 system sh/cfg.sh -n dnode1 -c clog -v 0 system sh/exec.sh -n dnode1 -s start - -sleep 3000 sql connect print =============== step1 diff --git a/tests/script/general/account/user_len.sim b/tests/script/general/user/user_len.sim similarity index 100% rename from tests/script/general/account/user_len.sim rename to tests/script/general/user/user_len.sim From 83727507b7545995d474fb1838076d36e831eccf Mon Sep 17 00:00:00 2001 From: slguan Date: Thu, 23 Apr 2020 00:13:14 +0800 Subject: [PATCH 18/18] fix bug while release users --- src/mnode/inc/mgmtAcct.h | 4 ++-- src/mnode/src/mgmtAcct.c | 9 +++++++-- src/mnode/src/mgmtDb.c | 2 ++ src/mnode/src/mgmtDnode.c | 21 +++++++++++++++++---- src/mnode/src/mgmtMnode.c | 5 ++++- src/mnode/src/mgmtProfile.c | 3 +++ src/mnode/src/mgmtShell.c | 1 - src/mnode/src/mgmtTable.c | 4 ++-- src/mnode/src/mgmtUser.c | 4 +++- 9 files changed, 40 insertions(+), 13 deletions(-) diff --git a/src/mnode/inc/mgmtAcct.h b/src/mnode/inc/mgmtAcct.h index 67c98d1eb2..0c0bb8d05c 100644 --- a/src/mnode/inc/mgmtAcct.h +++ b/src/mnode/inc/mgmtAcct.h @@ -24,10 +24,10 @@ extern "C" { int32_t mgmtInitAccts(); void mgmtCleanUpAccts(); -void *mgmtGetAcct(char *acctName); +void * mgmtGetAcct(char *acctName); +void * mgmtGetNextAcct(void *pNode, SAcctObj **pAcct); void mgmtIncAcctRef(SAcctObj *pAcct); void mgmtDecAcctRef(SAcctObj *pAcct); - void mgmtAddDbToAcct(SAcctObj *pAcct, SDbObj *pDb); void mgmtDropDbFromAcct(SAcctObj *pAcct, SDbObj *pDb); void mgmtAddUserToAcct(SAcctObj *pAcct, SUserObj *pUser); diff --git a/src/mnode/src/mgmtAcct.c b/src/mnode/src/mgmtAcct.c index 3e04399fe7..cd5b849ccc 100644 --- a/src/mnode/src/mgmtAcct.c +++ b/src/mnode/src/mgmtAcct.c @@ -58,6 +58,7 @@ static int32_t mgmtActionAcctUpdate(SSdbOper *pOper) { memcpy(pSaved, pAcct, tsAcctUpdateSize); free(pAcct); } + mgmtDecAcctRef(pSaved); return TSDB_CODE_SUCCESS; } @@ -106,11 +107,11 @@ int32_t mgmtInitAccts() { tsAcctSdb = sdbOpenTable(&tableDesc); if (tsAcctSdb == NULL) { - mError("failed to init acct data"); + mError("table:%s, failed to create hash", tableDesc.tableName); return -1; } - mTrace("table:accounts table is created"); + mTrace("table:%s, hash is created", tableDesc.tableName); return acctInit(); } @@ -123,6 +124,10 @@ void *mgmtGetAcct(char *name) { return sdbGetRow(tsAcctSdb, name); } +void *mgmtGetNextAcct(void *pNode, SAcctObj **pAcct) { + return sdbFetchRow(tsAcctSdb, pNode, (void **)pAcct); +} + void mgmtIncAcctRef(SAcctObj *pAcct) { sdbIncRef(tsAcctSdb, pAcct); } diff --git a/src/mnode/src/mgmtDb.c b/src/mnode/src/mgmtDb.c index d66b949421..35b57094fd 100644 --- a/src/mnode/src/mgmtDb.c +++ b/src/mnode/src/mgmtDb.c @@ -63,6 +63,7 @@ static int32_t mgmtDbActionInsert(SSdbOper *pOper) { if (pAcct != NULL) { mgmtAddDbToAcct(pAcct, pDb); + mgmtDecAcctRef(pAcct); } else { mError("db:%s, acct:%s info not exist in sdb", pDb->name, pDb->cfg.acct); @@ -80,6 +81,7 @@ static int32_t mgmtDbActionDelete(SSdbOper *pOper) { mgmtDropAllChildTables(pDb); mgmtDropAllSuperTables(pDb); mgmtDropAllVgroups(pDb); + mgmtDecAcctRef(pAcct); return TSDB_CODE_SUCCESS; } diff --git a/src/mnode/src/mgmtDnode.c b/src/mnode/src/mgmtDnode.c index 23a1d157a5..562e6c0589 100644 --- a/src/mnode/src/mgmtDnode.c +++ b/src/mnode/src/mgmtDnode.c @@ -508,7 +508,10 @@ static int32_t mgmtGetDnodeMeta(STableMetaMsg *pMeta, SShowObj *pShow, void *pCo SUserObj *pUser = mgmtGetUserFromConn(pConn, NULL); if (pUser == NULL) return 0; - if (strcmp(pUser->pAcct->user, "root") != 0) return TSDB_CODE_NO_RIGHTS; + if (strcmp(pUser->pAcct->user, "root") != 0) { + mgmtDecUserRef(pUser); + return TSDB_CODE_NO_RIGHTS; + } int32_t cols = 0; SSchema *pSchema = pMeta->schema; @@ -635,7 +638,10 @@ static int32_t mgmtGetModuleMeta(STableMetaMsg *pMeta, SShowObj *pShow, void *pC SUserObj *pUser = mgmtGetUserFromConn(pConn, NULL); if (pUser == NULL) return 0; - if (strcmp(pUser->user, "root") != 0) return TSDB_CODE_NO_RIGHTS; + if (strcmp(pUser->user, "root") != 0) { + mgmtDecUserRef(pUser); + return TSDB_CODE_NO_RIGHTS; + } SSchema *pSchema = pMeta->schema; @@ -744,7 +750,10 @@ static int32_t mgmtGetConfigMeta(STableMetaMsg *pMeta, SShowObj *pShow, void *pC SUserObj *pUser = mgmtGetUserFromConn(pConn, NULL); if (pUser == NULL) return 0; - if (strcmp(pUser->user, "root") != 0) return TSDB_CODE_NO_RIGHTS; + if (strcmp(pUser->user, "root") != 0) { + mgmtDecUserRef(pUser); + return TSDB_CODE_NO_RIGHTS; + } SSchema *pSchema = pMeta->schema; @@ -827,7 +836,11 @@ static int32_t mgmtGetVnodeMeta(STableMetaMsg *pMeta, SShowObj *pShow, void *pCo int32_t cols = 0; SUserObj *pUser = mgmtGetUserFromConn(pConn, NULL); if (pUser == NULL) return 0; - if (strcmp(pUser->user, "root") != 0) return TSDB_CODE_NO_RIGHTS; + + if (strcmp(pUser->user, "root") != 0) { + mgmtDecUserRef(pUser); + return TSDB_CODE_NO_RIGHTS; + } SSchema *pSchema = pMeta->schema; diff --git a/src/mnode/src/mgmtMnode.c b/src/mnode/src/mgmtMnode.c index 922788749e..e8b1239380 100644 --- a/src/mnode/src/mgmtMnode.c +++ b/src/mnode/src/mgmtMnode.c @@ -268,7 +268,10 @@ static int32_t mgmtGetMnodeMeta(STableMetaMsg *pMeta, SShowObj *pShow, void *pCo SUserObj *pUser = mgmtGetUserFromConn(pConn, NULL); if (pUser == NULL) return 0; - if (strcmp(pUser->pAcct->user, "root") != 0) return TSDB_CODE_NO_RIGHTS; + if (strcmp(pUser->pAcct->user, "root") != 0) { + mgmtDecUserRef(pUser); + return TSDB_CODE_NO_RIGHTS; + } int32_t cols = 0; SSchema *pSchema = pMeta->schema; diff --git a/src/mnode/src/mgmtProfile.c b/src/mnode/src/mgmtProfile.c index 47bf805138..b52a43569a 100644 --- a/src/mnode/src/mgmtProfile.c +++ b/src/mnode/src/mgmtProfile.c @@ -704,6 +704,7 @@ void mgmtProcessKillQueryMsg(SQueuedMsg *pMsg) { rpcRsp.code = code; rpcSendResponse(&rpcRsp); + mgmtDecUserRef(pUser); } void mgmtProcessKillStreamMsg(SQueuedMsg *pMsg) { @@ -727,6 +728,7 @@ void mgmtProcessKillStreamMsg(SQueuedMsg *pMsg) { rpcRsp.code = code; rpcSendResponse(&rpcRsp); + mgmtDecUserRef(pUser); } void mgmtProcessKillConnectionMsg(SQueuedMsg *pMsg) { @@ -750,6 +752,7 @@ void mgmtProcessKillConnectionMsg(SQueuedMsg *pMsg) { rpcRsp.code = code; rpcSendResponse(&rpcRsp); + mgmtDecUserRef(pUser); } int32_t mgmtInitProfile() { diff --git a/src/mnode/src/mgmtShell.c b/src/mnode/src/mgmtShell.c index 54c66c5ba1..752f33db13 100644 --- a/src/mnode/src/mgmtShell.c +++ b/src/mnode/src/mgmtShell.c @@ -371,7 +371,6 @@ static int mgmtShellRetriveAuth(char *user, char *spi, char *encrypt, char *secr SUserObj *pUser = mgmtGetUser(user); if (pUser == NULL) { *secret = 0; - mgmtDecUserRef(pUser); return TSDB_CODE_INVALID_USER; } else { memcpy(secret, pUser->pass, TSDB_KEY_LEN); diff --git a/src/mnode/src/mgmtTable.c b/src/mnode/src/mgmtTable.c index 1209b489d6..caaae9e988 100644 --- a/src/mnode/src/mgmtTable.c +++ b/src/mnode/src/mgmtTable.c @@ -108,7 +108,7 @@ static int32_t mgmtChildTableActionInsert(SSdbOper *pOper) { SAcctObj *pAcct = mgmtGetAcct(pDb->cfg.acct); if (pAcct == NULL) { - mError("ctable:%s, account:%s not exists", pTable->info.tableId, pDb->cfg.acct); + mError("ctable:%s, acct:%s not exists", pTable->info.tableId, pDb->cfg.acct); return TSDB_CODE_INVALID_ACCT; } mgmtDecAcctRef(pAcct); @@ -150,7 +150,7 @@ static int32_t mgmtChildTableActionDelete(SSdbOper *pOper) { SAcctObj *pAcct = mgmtGetAcct(pDb->cfg.acct); if (pAcct == NULL) { - mError("ctable:%s, account:%s not exists", pTable->info.tableId, pDb->cfg.acct); + mError("ctable:%s, acct:%s not exists", pTable->info.tableId, pDb->cfg.acct); return TSDB_CODE_INVALID_ACCT; } mgmtDecAcctRef(pAcct); diff --git a/src/mnode/src/mgmtUser.c b/src/mnode/src/mgmtUser.c index 77c48985f6..a2ec2a32ad 100644 --- a/src/mnode/src/mgmtUser.c +++ b/src/mnode/src/mgmtUser.c @@ -63,6 +63,7 @@ static int32_t mgmtUserActionDelete(SSdbOper *pOper) { if (pAcct != NULL) { mgmtDropUserFromAcct(pAcct, pUser); + mgmtDecAcctRef(pAcct); } return TSDB_CODE_SUCCESS; @@ -72,9 +73,10 @@ static int32_t mgmtUserActionUpdate(SSdbOper *pOper) { SUserObj *pUser = pOper->pObj; SUserObj *pSaved = mgmtGetUser(pUser->user); if (pUser != pSaved) { - memcpy(pSaved, pUser, pOper->rowSize); + memcpy(pSaved, pUser, tsUserUpdateSize); free(pUser); } + mgmtDecUserRef(pSaved); return TSDB_CODE_SUCCESS; }