merge with develop branch.
This commit is contained in:
commit
2d02ee85a8
|
@ -438,8 +438,9 @@ void tscKillSTableQuery(SSqlObj *pSql) {
|
|||
* here, we cannot set the command = TSDB_SQL_KILL_QUERY. Otherwise, it may cause
|
||||
* sub-queries not correctly released and master sql object of super table query reaches an abnormal state.
|
||||
*/
|
||||
pSql->pSubs[i]->res.code = TSDB_CODE_TSC_QUERY_CANCELLED;
|
||||
rpcCancelRequest(pSql->pSubs[i]->pRpcCtx);
|
||||
rpcCancelRequest(pSub->pRpcCtx);
|
||||
pSub->res.code = TSDB_CODE_TSC_QUERY_CANCELLED;
|
||||
tscQueueAsyncRes(pSub);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -617,19 +617,18 @@ void taos_stop_query(TAOS_RES *res) {
|
|||
if (pSql->signature != pSql) return;
|
||||
tscDebug("%p start to cancel query", res);
|
||||
|
||||
pSql->res.code = TSDB_CODE_TSC_QUERY_CANCELLED;
|
||||
|
||||
SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
|
||||
if (tscIsTwoStageSTableQuery(pQueryInfo, 0)) {
|
||||
tscKillSTableQuery(pSql);
|
||||
return;
|
||||
}
|
||||
|
||||
if (pSql->cmd.command >= TSDB_SQL_LOCAL) {
|
||||
return;
|
||||
if (pSql->cmd.command < TSDB_SQL_LOCAL) {
|
||||
rpcCancelRequest(pSql->pRpcCtx);
|
||||
}
|
||||
pSql->res.code = TSDB_CODE_TSC_QUERY_CANCELLED;
|
||||
tscQueueAsyncRes(pSql);
|
||||
|
||||
rpcCancelRequest(pSql->pRpcCtx);
|
||||
tscDebug("%p query is cancelled", res);
|
||||
}
|
||||
|
||||
|
|
|
@ -870,6 +870,11 @@ void *readTable(void *sarg) {
|
|||
int64_t sTime = rinfo->start_time;
|
||||
char *tb_prefix = rinfo->tb_prefix;
|
||||
FILE *fp = fopen(rinfo->fp, "a");
|
||||
if (NULL == fp) {
|
||||
printf("fopen %s fail, reason:%s.\n", rinfo->fp, strerror(errno));
|
||||
return NULL;
|
||||
}
|
||||
|
||||
int num_of_DPT = rinfo->nrecords_per_table;
|
||||
int num_of_tables = rinfo->end_table_id - rinfo->start_table_id + 1;
|
||||
int totalData = num_of_DPT * num_of_tables;
|
||||
|
@ -925,6 +930,11 @@ void *readMetric(void *sarg) {
|
|||
TAOS *taos = rinfo->taos;
|
||||
char command[BUFFER_SIZE] = "\0";
|
||||
FILE *fp = fopen(rinfo->fp, "a");
|
||||
if (NULL == fp) {
|
||||
printf("fopen %s fail, reason:%s.\n", rinfo->fp, strerror(errno));
|
||||
return NULL;
|
||||
}
|
||||
|
||||
int num_of_DPT = rinfo->nrecords_per_table;
|
||||
int num_of_tables = rinfo->end_table_id - rinfo->start_table_id + 1;
|
||||
int totalData = num_of_DPT * num_of_tables;
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -51,6 +51,7 @@ static error_t parse_opt(int key, char *arg, struct argp_state *state) {
|
|||
break;
|
||||
case 'f':
|
||||
arguments->fqdn = arg;
|
||||
break;
|
||||
case 'g':
|
||||
arguments->dnodeGroups = arg;
|
||||
break;
|
||||
|
|
|
@ -96,6 +96,7 @@ void walModWalFile(char* walfile) {
|
|||
if (wfd < 0) {
|
||||
printf("wal:%s, failed to open(%s)\n", newWalFile, strerror(errno));
|
||||
free(buffer);
|
||||
close(rfd);
|
||||
return ;
|
||||
}
|
||||
|
||||
|
@ -116,6 +117,11 @@ void walModWalFile(char* walfile) {
|
|||
break;
|
||||
}
|
||||
|
||||
if (pHead->len >= 1024000 - sizeof(SWalHead)) {
|
||||
printf("wal:%s, SWalHead.len(%d) overflow, skip the rest of file\n", walfile, pHead->len);
|
||||
break;
|
||||
}
|
||||
|
||||
ret = read(rfd, pHead->cont, pHead->len);
|
||||
if ( ret != pHead->len) {
|
||||
printf("wal:%s, failed to read body, skip, len:%d ret:%d\n", walfile, pHead->len, ret);
|
||||
|
|
|
@ -99,6 +99,8 @@ static int32_t readVnodeCfg(SVnodeObj *pVnode, char* cfgFile)
|
|||
goto PARSE_OVER;
|
||||
}
|
||||
|
||||
content[maxLen] = (char)0;
|
||||
|
||||
root = cJSON_Parse(content);
|
||||
if (root == NULL) {
|
||||
printf("failed to json parse %s, invalid json format\n", cfgFile);
|
||||
|
|
|
@ -783,9 +783,15 @@ static int32_t mnodeProcessTableMetaMsg(SMnodeMsg *pMsg) {
|
|||
|
||||
static int32_t mnodeCreateSuperTableCb(SMnodeMsg *pMsg, int32_t code) {
|
||||
SSuperTableObj *pTable = (SSuperTableObj *)pMsg->pTable;
|
||||
if (pTable != NULL) {
|
||||
mLInfo("app:%p:%p, stable:%s, is created in sdb, result:%s", pMsg->rpcMsg.ahandle, pMsg, pTable->info.tableId,
|
||||
tstrerror(code));
|
||||
assert(pTable);
|
||||
|
||||
if (code == TSDB_CODE_SUCCESS) {
|
||||
mLInfo("stable:%s, is created in sdb", pTable->info.tableId);
|
||||
} else {
|
||||
mError("app:%p:%p, stable:%s, failed to create in sdb, reason:%s", pMsg->rpcMsg.ahandle, pMsg, pTable->info.tableId,
|
||||
tstrerror(code));
|
||||
SSdbOper desc = {.type = SDB_OPER_GLOBAL, .pObj = pTable, .table = tsSuperTableSdb};
|
||||
sdbDeleteRow(&desc);
|
||||
}
|
||||
|
||||
return code;
|
||||
|
@ -1561,10 +1567,16 @@ static int32_t mnodeDoCreateChildTableCb(SMnodeMsg *pMsg, int32_t code) {
|
|||
SChildTableObj *pTable = (SChildTableObj *)pMsg->pTable;
|
||||
assert(pTable);
|
||||
|
||||
mDebug("app:%p:%p, table:%s, create table in id:%d, uid:%" PRIu64 ", result:%s", pMsg->rpcMsg.ahandle, pMsg,
|
||||
pTable->info.tableId, pTable->sid, pTable->uid, tstrerror(code));
|
||||
|
||||
if (code != TSDB_CODE_SUCCESS) return code;
|
||||
if (code == TSDB_CODE_SUCCESS) {
|
||||
mDebug("app:%p:%p, table:%s, create table in sid:%d, uid:%" PRIu64, pMsg->rpcMsg.ahandle, pMsg, pTable->info.tableId,
|
||||
pTable->sid, pTable->uid);
|
||||
} else {
|
||||
mError("app:%p:%p, table:%s, failed to create table sid:%d, uid:%" PRIu64 ", reason:%s", pMsg->rpcMsg.ahandle, pMsg,
|
||||
pTable->info.tableId, pTable->sid, pTable->uid, tstrerror(code));
|
||||
SSdbOper desc = {.type = SDB_OPER_GLOBAL, .pObj = pTable, .table = tsChildTableSdb};
|
||||
sdbDeleteRow(&desc);
|
||||
return code;
|
||||
}
|
||||
|
||||
SCMCreateTableMsg *pCreate = pMsg->rpcMsg.pCont;
|
||||
SMDCreateTableMsg *pMDCreate = mnodeBuildCreateChildTableMsg(pCreate, pTable);
|
||||
|
|
|
@ -348,17 +348,23 @@ void *mnodeGetNextVgroup(void *pIter, SVgObj **pVgroup) {
|
|||
}
|
||||
|
||||
static int32_t mnodeCreateVgroupCb(SMnodeMsg *pMsg, int32_t code) {
|
||||
SVgObj *pVgroup = pMsg->pVgroup;
|
||||
SDbObj *pDb = pMsg->pDb;
|
||||
assert(pVgroup);
|
||||
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
pMsg->pVgroup = NULL;
|
||||
mError("app:%p:%p, vgId:%d, failed to create in sdb, reason:%s", pMsg->rpcMsg.ahandle, pMsg, pVgroup->vgId,
|
||||
tstrerror(code));
|
||||
SSdbOper desc = {.type = SDB_OPER_GLOBAL, .pObj = pVgroup, .table = tsVgroupSdb};
|
||||
sdbDeleteRow(&desc);
|
||||
return code;
|
||||
}
|
||||
|
||||
SVgObj *pVgroup = pMsg->pVgroup;
|
||||
SDbObj *pDb = pMsg->pDb;
|
||||
|
||||
mInfo("vgId:%d, is created in mnode, db:%s replica:%d", pVgroup->vgId, pDb->name, pVgroup->numOfVnodes);
|
||||
mInfo("app:%p:%p, vgId:%d, is created in mnode, db:%s replica:%d", pMsg->rpcMsg.ahandle, pMsg, pVgroup->vgId,
|
||||
pDb->name, pVgroup->numOfVnodes);
|
||||
for (int32_t i = 0; i < pVgroup->numOfVnodes; ++i) {
|
||||
mInfo("vgId:%d, index:%d, dnode:%d", pVgroup->vgId, i, pVgroup->vnodeGid[i].dnodeId);
|
||||
mInfo("app:%p:%p, vgId:%d, index:%d, dnode:%d", pMsg->rpcMsg.ahandle, pMsg, pVgroup->vgId, i,
|
||||
pVgroup->vnodeGid[i].dnodeId);
|
||||
}
|
||||
|
||||
mnodeIncVgroupRef(pVgroup);
|
||||
|
|
|
@ -135,7 +135,10 @@ static int32_t vnodeProcessQueryMsg(SVnodeObj *pVnode, SReadMsg *pReadMsg) {
|
|||
} else {
|
||||
assert(pQInfo == NULL);
|
||||
}
|
||||
|
||||
if (handle != NULL) {
|
||||
dnodePutItemIntoReadQueue(pVnode, handle);
|
||||
qReleaseQInfo(pVnode->qMgmt, (void**) &handle, false);
|
||||
}
|
||||
vDebug("vgId:%d, QInfo:%p, dnode query msg disposed", vgId, pQInfo);
|
||||
} else {
|
||||
assert(pCont != NULL);
|
||||
|
@ -146,14 +149,10 @@ static int32_t vnodeProcessQueryMsg(SVnodeObj *pVnode, SReadMsg *pReadMsg) {
|
|||
} else {
|
||||
vDebug("vgId:%d, QInfo:%p, dnode query msg in progress", pVnode->vgId, *(void**) pCont);
|
||||
code = TSDB_CODE_VND_ACTION_IN_PROGRESS;
|
||||
qTableQuery(*handle); // do execute query
|
||||
}
|
||||
}
|
||||
|
||||
if (handle != NULL) {
|
||||
qTableQuery(*handle); // do execute query
|
||||
qReleaseQInfo(pVnode->qMgmt, (void**) &handle, false);
|
||||
}
|
||||
|
||||
return code;
|
||||
}
|
||||
|
||||
|
|
|
@ -689,14 +689,12 @@ class DbConnNative(DbConn):
|
|||
if ("taosd" in files):
|
||||
rootRealPath = os.path.dirname(os.path.realpath(root))
|
||||
if ("packaging" not in rootRealPath):
|
||||
buildPath = root[:root.find("build")]
|
||||
buildPath = root[:len(root) - len("/build/bin")]
|
||||
break
|
||||
return buildPath
|
||||
|
||||
def openByType(self): # Open connection
|
||||
# cfgPath = "../../build/test/cfg"
|
||||
cfgPath = self.getBuildPath() + "/test/cfg"
|
||||
print("CBD: cfgPath=%s" % cfgPath)
|
||||
self._conn = taos.connect(
|
||||
host="127.0.0.1",
|
||||
config=cfgPath) # TODO: make configurable
|
||||
|
@ -1387,14 +1385,15 @@ class Task():
|
|||
try:
|
||||
self._executeInternal(te, wt) # TODO: no return value?
|
||||
except taos.error.ProgrammingError as err:
|
||||
errno2 = err.errno if (
|
||||
err.errno > 0) else 0x80000000 + err.errno # correct error scheme
|
||||
if (errno2 in [0x200, 0x360, 0x362, 0x36A, 0x36B, 0x36D, 0x381, 0x380, 0x383, 0x503, 0x600,
|
||||
1000 # REST catch-all error
|
||||
]): # allowed errors
|
||||
self.logDebug(
|
||||
"[=] Acceptable Taos library exception: errno=0x{:X}, msg: {}, SQL: {}".format(
|
||||
errno2, err, self._lastSql))
|
||||
errno2 = err.errno if (err.errno > 0) else 0x80000000 + err.errno # correct error scheme
|
||||
if ( errno2 in [
|
||||
0x05, # TSDB_CODE_RPC_NOT_READY
|
||||
0x200, 0x360, 0x362, 0x36A, 0x36B, 0x36D, 0x381, 0x380, 0x383, 0x503,
|
||||
0x510, # vnode not in ready state
|
||||
0x600,
|
||||
1000 # REST catch-all error
|
||||
]) : # allowed errors
|
||||
self.logDebug("[=] Acceptable Taos library exception: errno=0x{:X}, msg: {}, SQL: {}".format(errno2, err, self._lastSql))
|
||||
print("_", end="", flush=True)
|
||||
self._err = err
|
||||
else:
|
||||
|
@ -1863,88 +1862,239 @@ class MyLoggingAdapter(logging.LoggerAdapter):
|
|||
return "[{}]{}".format(threading.get_ident() % 10000, msg), kwargs
|
||||
# return '[%s] %s' % (self.extra['connid'], msg), kwargs
|
||||
|
||||
|
||||
class SvcManager:
|
||||
MAX_QUEUE_SIZE = 10000
|
||||
|
||||
def __init__(self):
|
||||
print("Starting service manager")
|
||||
signal.signal(signal.SIGTERM, self.sigIntHandler)
|
||||
signal.signal(signal.SIGINT, self.sigIntHandler)
|
||||
signal.signal(signal.SIGUSR1, self.sigUsrHandler)
|
||||
self.ioThread = None
|
||||
self.subProcess = None
|
||||
self.shouldStop = False
|
||||
self.status = MainExec.STATUS_RUNNING
|
||||
# self.status = MainExec.STATUS_RUNNING # set inside _startTaosService()
|
||||
|
||||
def svcOutputReader(self, out: IO, queue):
|
||||
# print("This is the svcOutput Reader...")
|
||||
for line in out: # iter(out.readline, b''):
|
||||
# Important Reference: https://stackoverflow.com/questions/375427/non-blocking-read-on-a-subprocess-pipe-in-python
|
||||
print("This is the svcOutput Reader...")
|
||||
# for line in out :
|
||||
for line in iter(out.readline, b''):
|
||||
# print("Finished reading a line: {}".format(line))
|
||||
queue.put(line.rstrip()) # get rid of new lines
|
||||
# meaning sub process must have died
|
||||
print("No more output from incoming IO")
|
||||
# print("Adding item to queue...")
|
||||
line = line.decode("utf-8").rstrip()
|
||||
queue.put(line) # This might block, and then causing "out" buffer to block
|
||||
print("_i", end="", flush=True)
|
||||
|
||||
# Trim the queue if necessary
|
||||
oneTenthQSize = self.MAX_QUEUE_SIZE // 10
|
||||
if (queue.qsize() >= (self.MAX_QUEUE_SIZE - oneTenthQSize) ) : # 90% full?
|
||||
print("Triming IPC queue by: {}".format(oneTenthQSize))
|
||||
for i in range(0, oneTenthQSize) :
|
||||
try:
|
||||
queue.get_nowait()
|
||||
except Empty:
|
||||
break # break out of for loop, no more trimming
|
||||
|
||||
if self.shouldStop :
|
||||
print("Stopping to read output from sub process")
|
||||
break
|
||||
|
||||
# queue.put(line)
|
||||
print("\nNo more output (most likely) from IO thread managing TDengine service") # meaning sub process must have died
|
||||
out.close()
|
||||
|
||||
def sigIntHandler(self, signalNumber, frame):
|
||||
if self.status != MainExec.STATUS_RUNNING:
|
||||
print("Ignoring repeated SIGINT...")
|
||||
return # do nothing if it's already not running
|
||||
self.status = MainExec.STATUS_STOPPING # immediately set our status
|
||||
def _doMenu(self):
|
||||
choice = ""
|
||||
while True:
|
||||
print("\nInterrupting Service Program, Choose an Action: ")
|
||||
print("1: Resume")
|
||||
print("2: Terminate")
|
||||
print("3: Restart")
|
||||
# Remember to update the if range below
|
||||
# print("Enter Choice: ", end="", flush=True)
|
||||
while choice == "":
|
||||
choice = input("Enter Choice: ")
|
||||
if choice != "":
|
||||
break # done with reading repeated input
|
||||
if choice in ["1", "2", "3"]:
|
||||
break # we are done with whole method
|
||||
print("Invalid choice, please try again.")
|
||||
choice = "" # reset
|
||||
return choice
|
||||
|
||||
print("Terminating program...")
|
||||
self.subProcess.send_signal(signal.SIGINT)
|
||||
self.shouldStop = True
|
||||
self.joinIoThread()
|
||||
def sigUsrHandler(self, signalNumber, frame) :
|
||||
print("Interrupting main thread execution upon SIGUSR1")
|
||||
if self.status != MainExec.STATUS_RUNNING :
|
||||
print("Ignoring repeated SIG...")
|
||||
return # do nothing if it's already not running
|
||||
self.status = MainExec.STATUS_STOPPING
|
||||
|
||||
choice = self._doMenu()
|
||||
if choice == "1" :
|
||||
self.sigHandlerResume() # TODO: can the sub-process be blocked due to us not reading from queue?
|
||||
elif choice == "2" :
|
||||
self.stopTaosService()
|
||||
elif choice == "3" :
|
||||
self.stopTaosService()
|
||||
self.startTaosService()
|
||||
else:
|
||||
raise RuntimeError("Invalid menu choice: {}".format(choice))
|
||||
|
||||
def sigIntHandler(self, signalNumber, frame):
|
||||
print("Sig INT Handler starting...")
|
||||
if self.status != MainExec.STATUS_RUNNING :
|
||||
print("Ignoring repeated SIG_INT...")
|
||||
return
|
||||
|
||||
self.status = MainExec.STATUS_STOPPING # immediately set our status
|
||||
self.stopTaosService()
|
||||
print("INT signal handler returning...")
|
||||
|
||||
def sigHandlerResume(self) :
|
||||
print("Resuming TDengine service manager thread (main thread)...\n\n")
|
||||
self.status = MainExec.STATUS_RUNNING
|
||||
|
||||
def joinIoThread(self):
|
||||
if self.ioThread:
|
||||
self.ioThread.join()
|
||||
self.ioThread = None
|
||||
else :
|
||||
print("Joining empty thread, doing nothing")
|
||||
|
||||
def run(self):
|
||||
TD_READY_MSG = "TDengine is initialized successfully"
|
||||
def _procIpcBatch(self):
|
||||
# Process all the output generated by the underlying sub process, managed by IO thread
|
||||
while True :
|
||||
try:
|
||||
line = self.ipcQueue.get_nowait() # getting output at fast speed
|
||||
print("_o", end="", flush=True)
|
||||
if self.status == MainExec.STATUS_STARTING : # we are starting, let's see if we have started
|
||||
if line.find(self.TD_READY_MSG) != -1 : # found
|
||||
self.status = MainExec.STATUS_RUNNING
|
||||
|
||||
except Empty:
|
||||
# time.sleep(2.3) # wait only if there's no output
|
||||
# no more output
|
||||
return # we are done with THIS BATCH
|
||||
else: # got line
|
||||
print(line)
|
||||
|
||||
def _procIpcAll(self):
|
||||
while True :
|
||||
print("<", end="", flush=True)
|
||||
self._procIpcBatch() # process one batch
|
||||
|
||||
# check if the ioThread is still running
|
||||
if (not self.ioThread) or (not self.ioThread.is_alive()):
|
||||
print("IO Thread (with subprocess) has ended, main thread now exiting...")
|
||||
self.stopTaosService()
|
||||
self._procIpcBatch() # one more batch
|
||||
return # TODO: maybe one last batch?
|
||||
|
||||
# Maybe handler says we should exit now
|
||||
if self.shouldStop:
|
||||
print("Main thread ending all IPC processing with IOThread/SubProcess")
|
||||
self._procIpcBatch() # one more batch
|
||||
return
|
||||
|
||||
print(">", end="", flush=True)
|
||||
time.sleep(0.5)
|
||||
|
||||
def getBuildPath(self):
|
||||
selfPath = os.path.dirname(os.path.realpath(__file__))
|
||||
if ("community" in selfPath):
|
||||
projPath = selfPath[:selfPath.find("communit")]
|
||||
else:
|
||||
projPath = selfPath[:selfPath.find("tests")]
|
||||
|
||||
for root, dirs, files in os.walk(projPath):
|
||||
if ("taosd" in files):
|
||||
rootRealPath = os.path.dirname(os.path.realpath(root))
|
||||
if ("packaging" not in rootRealPath):
|
||||
buildPath = root[:len(root) - len("/build/bin")]
|
||||
break
|
||||
return buildPath
|
||||
|
||||
def startTaosService(self):
|
||||
ON_POSIX = 'posix' in sys.builtin_module_names
|
||||
|
||||
taosdPath = self.getBuildPath() + "/build/bin/taosd"
|
||||
cfgPath = self.getBuildPath() + "/test/cfg"
|
||||
|
||||
print ("CBD: taosdPath:%s cfgPath:%s" % (taosPat, cfgPath))
|
||||
|
||||
svcCmd = ['../../build/build/bin/taosd', '-c', '../../build/test/cfg']
|
||||
svcCmd = [taosdPath, '-c', cfgPath]
|
||||
# svcCmd = ['vmstat', '1']
|
||||
if self.subProcess : # already there
|
||||
raise RuntimeError("Corrupt process state")
|
||||
|
||||
self.subProcess = subprocess.Popen(
|
||||
svcCmd,
|
||||
stdout=subprocess.PIPE,
|
||||
bufsize=1,
|
||||
close_fds=ON_POSIX,
|
||||
text=True)
|
||||
q = Queue()
|
||||
self.ioThread = threading.Thread(
|
||||
target=self.svcOutputReader, args=(
|
||||
self.subProcess.stdout, q))
|
||||
self.ioThread.daemon = True # thread dies with the program
|
||||
# bufsize=1, # not supported in binary mode
|
||||
close_fds=ON_POSIX) # had text=True, which interferred with reading EOF
|
||||
self.ipcQueue = Queue()
|
||||
|
||||
if self.ioThread :
|
||||
raise RuntimeError("Corrupt thread state")
|
||||
self.ioThread = threading.Thread(target=self.svcOutputReader, args=(self.subProcess.stdout, self.ipcQueue))
|
||||
self.ioThread.daemon = True # thread dies with the program
|
||||
self.ioThread.start()
|
||||
|
||||
self.shouldStop = False # don't let the main loop stop
|
||||
self.status = MainExec.STATUS_STARTING
|
||||
|
||||
# wait for service to start
|
||||
for i in range(0, 10) :
|
||||
time.sleep(1.0)
|
||||
self._procIpcBatch() # pump messages
|
||||
print("_zz_", end="", flush=True)
|
||||
if self.status == MainExec.STATUS_RUNNING :
|
||||
print("TDengine service READY to process requests")
|
||||
return # now we've started
|
||||
raise RuntimeError("TDengine service did not start successfully") # TODO: handle this better?
|
||||
|
||||
def stopTaosService(self):
|
||||
# can be called from both main thread or signal handler
|
||||
print("Terminating TDengine service running as the sub process...")
|
||||
# Linux will send Control-C generated SIGINT to the TDengine process already, ref: https://unix.stackexchange.com/questions/176235/fork-and-how-signals-are-delivered-to-processes
|
||||
if not self.subProcess :
|
||||
print("Process already stopped")
|
||||
return
|
||||
|
||||
retCode = self.subProcess.poll()
|
||||
if retCode : # valid return code, process ended
|
||||
self.subProcess = None
|
||||
else: # process still alive, let's interrupt it
|
||||
print("Sub process still running, sending SIG_INT and waiting for it to stop...")
|
||||
self.subProcess.send_signal(signal.SIGINT) # sub process should end, then IPC queue should end, causing IO thread to end
|
||||
try :
|
||||
self.subProcess.wait(10)
|
||||
except subprocess.TimeoutExpired as err:
|
||||
print("Time out waiting for TDengine service process to exit")
|
||||
else:
|
||||
print("TDengine service process terminated successfully from SIG_INT")
|
||||
self.subProcess = None
|
||||
|
||||
if self.subProcess and (not self.subProcess.poll()):
|
||||
print("Sub process is still running... pid = {}".format(self.subProcess.pid))
|
||||
|
||||
self.shouldStop = True
|
||||
self.joinIoThread()
|
||||
|
||||
def run(self):
|
||||
self.startTaosService()
|
||||
|
||||
# proc = subprocess.Popen(['echo', '"to stdout"'],
|
||||
# stdout=subprocess.PIPE,
|
||||
# )
|
||||
# stdout_value = proc.communicate()[0]
|
||||
# print('\tstdout: {}'.format(repr(stdout_value)))
|
||||
|
||||
while True:
|
||||
try:
|
||||
line = q.get_nowait() # getting output at fast speed
|
||||
except Empty:
|
||||
# print('no output yet')
|
||||
time.sleep(2.3) # wait only if there's no output
|
||||
else: # got line
|
||||
print(line)
|
||||
# print("----end of iteration----")
|
||||
if self.shouldStop:
|
||||
print("Ending main Svc thread")
|
||||
break
|
||||
self._procIpcAll()
|
||||
|
||||
print("end of loop")
|
||||
|
||||
self.joinIoThread()
|
||||
print("Finished")
|
||||
print("End of loop reading from IPC queue")
|
||||
self.joinIoThread() # should have started already
|
||||
print("SvcManager Run Finished")
|
||||
|
||||
|
||||
class ClientManager:
|
||||
|
@ -1998,6 +2148,10 @@ class ClientManager:
|
|||
self._printLastNumbers()
|
||||
|
||||
def run(self):
|
||||
if gConfig.auto_start_service :
|
||||
svcMgr = SvcManager()
|
||||
svcMgr.startTaosService()
|
||||
|
||||
self._printLastNumbers()
|
||||
|
||||
dbManager = DbManager() # Regular function
|
||||
|
@ -2009,6 +2163,8 @@ class ClientManager:
|
|||
# print("exec stats: {}".format(self.tc.getExecStats()))
|
||||
# print("TC failed = {}".format(self.tc.isFailed()))
|
||||
self.conclude()
|
||||
if gConfig.auto_start_service :
|
||||
svcMgr.stopTaosService()
|
||||
# print("TC failed (2) = {}".format(self.tc.isFailed()))
|
||||
# Linux return code: ref https://shapeshed.com/unix-exit-codes/
|
||||
return 1 if self.tc.isFailed() else 0
|
||||
|
@ -2019,8 +2175,9 @@ class ClientManager:
|
|||
|
||||
|
||||
class MainExec:
|
||||
STATUS_RUNNING = 1
|
||||
STATUS_STOPPING = 2
|
||||
STATUS_STARTING = 1
|
||||
STATUS_RUNNING = 2
|
||||
STATUS_STOPPING = 3
|
||||
# STATUS_STOPPED = 3 # Not used yet
|
||||
|
||||
@classmethod
|
||||
|
@ -2091,52 +2248,24 @@ def main():
|
|||
|
||||
'''))
|
||||
|
||||
parser.add_argument(
|
||||
'-c',
|
||||
'--connector-type',
|
||||
action='store',
|
||||
default='native',
|
||||
type=str,
|
||||
help='Connector type to use: native, rest, or mixed (default: 10)')
|
||||
parser.add_argument(
|
||||
'-d',
|
||||
'--debug',
|
||||
action='store_true',
|
||||
help='Turn on DEBUG mode for more logging (default: false)')
|
||||
parser.add_argument(
|
||||
'-e',
|
||||
'--run-tdengine',
|
||||
action='store_true',
|
||||
help='Run TDengine service in foreground (default: false)')
|
||||
parser.add_argument(
|
||||
'-l',
|
||||
'--larger-data',
|
||||
action='store_true',
|
||||
help='Write larger amount of data during write operations (default: false)')
|
||||
parser.add_argument(
|
||||
'-p',
|
||||
'--per-thread-db-connection',
|
||||
action='store_true',
|
||||
help='Use a single shared db connection (default: false)')
|
||||
parser.add_argument(
|
||||
'-r',
|
||||
'--record-ops',
|
||||
action='store_true',
|
||||
help='Use a pair of always-fsynced fils to record operations performing + performed, for power-off tests (default: false)')
|
||||
parser.add_argument(
|
||||
'-s',
|
||||
'--max-steps',
|
||||
action='store',
|
||||
default=1000,
|
||||
type=int,
|
||||
help='Maximum number of steps to run (default: 100)')
|
||||
parser.add_argument(
|
||||
'-t',
|
||||
'--num-threads',
|
||||
action='store',
|
||||
default=5,
|
||||
type=int,
|
||||
help='Number of threads to run (default: 10)')
|
||||
parser.add_argument('-a', '--auto-start-service', action='store_true',
|
||||
help='Automatically start/stop the TDengine service (default: false)')
|
||||
parser.add_argument('-c', '--connector-type', action='store', default='native', type=str,
|
||||
help='Connector type to use: native, rest, or mixed (default: 10)')
|
||||
parser.add_argument('-d', '--debug', action='store_true',
|
||||
help='Turn on DEBUG mode for more logging (default: false)')
|
||||
parser.add_argument('-e', '--run-tdengine', action='store_true',
|
||||
help='Run TDengine service in foreground (default: false)')
|
||||
parser.add_argument('-l', '--larger-data', action='store_true',
|
||||
help='Write larger amount of data during write operations (default: false)')
|
||||
parser.add_argument('-p', '--per-thread-db-connection', action='store_true',
|
||||
help='Use a single shared db connection (default: false)')
|
||||
parser.add_argument('-r', '--record-ops', action='store_true',
|
||||
help='Use a pair of always-fsynced fils to record operations performing + performed, for power-off tests (default: false)')
|
||||
parser.add_argument('-s', '--max-steps', action='store', default=1000, type=int,
|
||||
help='Maximum number of steps to run (default: 100)')
|
||||
parser.add_argument('-t', '--num-threads', action='store', default=5, type=int,
|
||||
help='Number of threads to run (default: 10)')
|
||||
|
||||
global gConfig
|
||||
gConfig = parser.parse_args()
|
||||
|
|
|
@ -190,32 +190,31 @@ class TDDnode:
|
|||
"dnode:%d is deployed and configured by %s" %
|
||||
(self.index, self.cfgPath))
|
||||
|
||||
def start(self):
|
||||
def getBuildPath(self):
|
||||
selfPath = os.path.dirname(os.path.realpath(__file__))
|
||||
binPath = ""
|
||||
|
||||
if ("community" in selfPath):
|
||||
projPath = selfPath + "/../../../../"
|
||||
|
||||
for root, dirs, files in os.walk(projPath):
|
||||
if ("taosd" in files):
|
||||
rootRealPath = os.path.dirname(os.path.realpath(root))
|
||||
if ("packaging" not in rootRealPath):
|
||||
binPath = os.path.join(root, "taosd")
|
||||
break
|
||||
projPath = selfPath[:selfPath.find("community")]
|
||||
else:
|
||||
projPath = selfPath + "/../../../"
|
||||
for root, dirs, files in os.walk(projPath):
|
||||
if ("taosd" in files):
|
||||
rootRealPath = os.path.dirname(os.path.realpath(root))
|
||||
if ("packaging" not in rootRealPath):
|
||||
binPath = os.path.join(root, "taosd")
|
||||
break
|
||||
projPath = selfPath[:selfPath.find("tests")]
|
||||
|
||||
if (binPath == ""):
|
||||
for root, dirs, files in os.walk(projPath):
|
||||
if ("taosd" in files):
|
||||
rootRealPath = os.path.dirname(os.path.realpath(root))
|
||||
if ("packaging" not in rootRealPath):
|
||||
buildPath = root[:len(root)-len("/build/bin")]
|
||||
break
|
||||
return buildPath
|
||||
|
||||
def start(self):
|
||||
buildPath = self.getBuildPath()
|
||||
|
||||
if (buildPath == ""):
|
||||
tdLog.exit("taosd not found!")
|
||||
else:
|
||||
tdLog.info("taosd found in %s" % rootRealPath)
|
||||
tdLog.info("taosd found in %s" % buildPath)
|
||||
|
||||
binPath = buildPath + "/build/bin/taosd"
|
||||
|
||||
if self.deployed == 0:
|
||||
tdLog.exit("dnode:%d is not deployed" % (self.index))
|
||||
|
|
|
@ -0,0 +1,16 @@
|
|||
sql connect
|
||||
|
||||
$db = db1
|
||||
$stb = stb1
|
||||
print =============== client1_0:
|
||||
|
||||
sql use $db
|
||||
|
||||
$tblNum = 1000
|
||||
|
||||
$i = 1
|
||||
while $i < $tblNum
|
||||
$tb = tb . $i
|
||||
sql create table $tb using $stb tags ($i, 'abcd')
|
||||
$i = $i + 1
|
||||
endw
|
|
@ -0,0 +1,494 @@
|
|||
system sh/stop_dnodes.sh
|
||||
system sh/deploy.sh -n dnode1 -i 1
|
||||
system sh/deploy.sh -n dnode2 -i 2
|
||||
system sh/deploy.sh -n dnode3 -i 3
|
||||
system sh/deploy.sh -n dnode4 -i 4
|
||||
|
||||
system sh/cfg.sh -n dnode1 -c numOfMnodes -v 3
|
||||
system sh/cfg.sh -n dnode2 -c numOfMnodes -v 3
|
||||
system sh/cfg.sh -n dnode3 -c numOfMnodes -v 3
|
||||
system sh/cfg.sh -n dnode4 -c numOfMnodes -v 3
|
||||
|
||||
system sh/cfg.sh -n dnode1 -c walLevel -v 1
|
||||
system sh/cfg.sh -n dnode2 -c walLevel -v 1
|
||||
system sh/cfg.sh -n dnode3 -c walLevel -v 1
|
||||
system sh/cfg.sh -n dnode4 -c walLevel -v 1
|
||||
|
||||
system sh/cfg.sh -n dnode1 -c balanceInterval -v 10
|
||||
system sh/cfg.sh -n dnode2 -c balanceInterval -v 10
|
||||
system sh/cfg.sh -n dnode3 -c balanceInterval -v 10
|
||||
system sh/cfg.sh -n dnode4 -c balanceInterval -v 10
|
||||
|
||||
system sh/cfg.sh -n dnode1 -c numOfTotalVnodes -v 4
|
||||
system sh/cfg.sh -n dnode2 -c numOfTotalVnodes -v 4
|
||||
system sh/cfg.sh -n dnode3 -c numOfTotalVnodes -v 4
|
||||
system sh/cfg.sh -n dnode4 -c numOfTotalVnodes -v 4
|
||||
|
||||
system sh/cfg.sh -n dnode1 -c alternativeRole -v 0
|
||||
system sh/cfg.sh -n dnode2 -c alternativeRole -v 0
|
||||
system sh/cfg.sh -n dnode3 -c alternativeRole -v 0
|
||||
system sh/cfg.sh -n dnode4 -c alternativeRole -v 0
|
||||
|
||||
system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 1000
|
||||
system sh/cfg.sh -n dnode2 -c maxtablesPerVnode -v 1000
|
||||
system sh/cfg.sh -n dnode3 -c maxtablesPerVnode -v 1000
|
||||
system sh/cfg.sh -n dnode4 -c maxtablesPerVnode -v 1000
|
||||
|
||||
system sh/cfg.sh -n dnode1 -c arbitrator -v $arbitrator
|
||||
system sh/cfg.sh -n dnode2 -c arbitrator -v $arbitrator
|
||||
system sh/cfg.sh -n dnode3 -c arbitrator -v $arbitrator
|
||||
system sh/cfg.sh -n dnode4 -c arbitrator -v $arbitrator
|
||||
|
||||
print ============== step0: start tarbitrator
|
||||
system sh/exec_tarbitrator.sh -s start
|
||||
|
||||
print ============== step1: start dnode1/dnode2/dnode3
|
||||
system sh/exec.sh -n dnode1 -s start
|
||||
system sh/exec.sh -n dnode2 -s start
|
||||
system sh/exec.sh -n dnode3 -s start
|
||||
sleep 3000
|
||||
sql connect
|
||||
sql create dnode $hostname2
|
||||
sql create dnode $hostname3
|
||||
sleep 3000
|
||||
|
||||
print ============== step2: create db1 with replica 3
|
||||
$db = db1
|
||||
print create database $db replica 3
|
||||
#sql create database $db replica 3 maxTables $totalTableNum
|
||||
sql create database $db replica 3
|
||||
sql use $db
|
||||
|
||||
print ============== step3: create stable stb1
|
||||
$stb = stb1
|
||||
sql create table $stb (ts timestamp, c1 int, c2 int) tags(t1 int, t2 binary(8))
|
||||
|
||||
print ============== step4: start 10 client1/ 10 client2/ 10 client3/ 10 client4/ 1 client5
|
||||
run_back unique/cluster/client1_0.sim
|
||||
#run_back unique/cluster/client1_1.sim
|
||||
#run_back unique/big_cluster/client1_2.sim
|
||||
#run_back unique/big_cluster/client1_3.sim
|
||||
#run_back unique/big_cluster/client1_4.sim
|
||||
#run_back unique/big_cluster/client1_5.sim
|
||||
#run_back unique/big_cluster/client1_6.sim
|
||||
#run_back unique/big_cluster/client1_7.sim
|
||||
#run_back unique/big_cluster/client1_8.sim
|
||||
#run_back unique/big_cluster/client1_9.sim
|
||||
|
||||
|
||||
print wait for a while to let clients start insert data
|
||||
sleep 5000
|
||||
|
||||
$loop_cnt = 0
|
||||
loop_cluster_do:
|
||||
print **** **** **** START loop cluster do **** **** **** ****
|
||||
print ============== step5: start dnode4 and add into cluster, then wait dnode4 ready
|
||||
system sh/exec.sh -n dnode4 -s start
|
||||
sql create dnode $hostname4
|
||||
|
||||
wait_dnode4_ready_0:
|
||||
$cnt = $cnt + 1
|
||||
if $cnt == 10 then
|
||||
return -1
|
||||
endi
|
||||
sql show dnodes
|
||||
if $rows != 4 then
|
||||
sleep 2000
|
||||
goto wait_dnode4_ready_0
|
||||
endi
|
||||
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1
|
||||
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2
|
||||
print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3
|
||||
print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4
|
||||
$dnode1Status = $data4_1
|
||||
$dnode2Status = $data4_2
|
||||
$dnode3Status = $data4_3
|
||||
#$dnode4Status = $data4_4
|
||||
|
||||
if $loop_cnt == 0 then
|
||||
$dnode4Status = $data4_4
|
||||
elif $loop_cnt == 1 then
|
||||
$dnode4Status = $data4_6
|
||||
elif $loop_cnt == 2 then
|
||||
$dnode4Status = $data4_8
|
||||
else then
|
||||
print **** **** **** END loop cluster do 2**** **** **** ****
|
||||
return
|
||||
endi
|
||||
|
||||
if $dnode4Status != ready then
|
||||
sleep 2000
|
||||
goto wait_dnode4_ready_0
|
||||
endi
|
||||
|
||||
|
||||
print ============== step6: stop and drop dnode1, then remove data dir of dnode1
|
||||
system sh/exec.sh -n dnode1 -s stop -x SIGINT
|
||||
|
||||
$cnt = 0
|
||||
wait_dnode1_offline_0:
|
||||
$cnt = $cnt + 1
|
||||
if $cnt == 10 then
|
||||
return -1
|
||||
endi
|
||||
sql show dnodes
|
||||
if $rows != 4 then
|
||||
sleep 2000
|
||||
goto wait_dnode1_offline_0
|
||||
endi
|
||||
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1
|
||||
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2
|
||||
print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3
|
||||
print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4
|
||||
|
||||
$dnode2Status = $data4_2
|
||||
$dnode3Status = $data4_3
|
||||
$dnode4Status = $data4_4
|
||||
|
||||
if $loop_cnt == 0 then
|
||||
$dnode1Status = $data4_1
|
||||
elif $loop_cnt == 1 then
|
||||
$dnode1Status = $data4_5
|
||||
elif $loop_cnt == 2 then
|
||||
$dnode1Status = $data4_7
|
||||
elif $loop_cnt == 3 then
|
||||
$dnode1Status = $data4_9
|
||||
else then
|
||||
print **** **** **** END loop cluster do 1**** **** **** ****
|
||||
return
|
||||
endi
|
||||
|
||||
if $dnode1Status != offline then
|
||||
sleep 2000
|
||||
goto wait_dnode1_offline_0
|
||||
endi
|
||||
|
||||
sql drop dnode $hostname1
|
||||
system rm -rf ../../../sim/dnode1
|
||||
|
||||
|
||||
print ============== step7: stop dnode2, because mnodes < 50%, so clusert don't provide services
|
||||
system sh/exec.sh -n dnode2 -s stop -x SIGINT
|
||||
|
||||
sql show dnodes -x wait_dnode2_offline_0
|
||||
if $rows != 3 then
|
||||
sleep 2000
|
||||
goto wait_dnode2_offline_0
|
||||
endi
|
||||
wait_dnode2_offline_0:
|
||||
|
||||
#$cnt = 0
|
||||
#wait_dnode2_offline_0:
|
||||
#$cnt = $cnt + 1
|
||||
#if $cnt == 10 then
|
||||
# return -1
|
||||
#endi
|
||||
#sql show dnodes -x wait_dnode2_offline_0
|
||||
#if $rows != 3 then
|
||||
# sleep 2000
|
||||
# goto wait_dnode2_offline_0
|
||||
#endi
|
||||
#print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1
|
||||
#print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2
|
||||
#print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3
|
||||
#print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4
|
||||
#$dnode1Status = $data4_1
|
||||
#$dnode2Status = $data4_2
|
||||
#$dnode3Status = $data4_3
|
||||
#$dnode4Status = $data4_4
|
||||
#
|
||||
#if $dnode2Status != offline then
|
||||
# sleep 2000
|
||||
# goto wait_dnode1_offline_0
|
||||
#endi
|
||||
|
||||
print ============== step8: restart dnode2, then wait sync end
|
||||
system sh/exec.sh -n dnode2 -s start
|
||||
|
||||
$cnt = 0
|
||||
wait_dnode2_ready_0:
|
||||
$cnt = $cnt + 1
|
||||
if $cnt == 10 then
|
||||
return -1
|
||||
endi
|
||||
sql show dnodes
|
||||
if $rows != 3 then
|
||||
sleep 2000
|
||||
goto wait_dnode2_ready_0
|
||||
endi
|
||||
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1
|
||||
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2
|
||||
print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3
|
||||
print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4
|
||||
$dnode1Status = $data4_1
|
||||
$dnode2Status = $data4_2
|
||||
$dnode3Status = $data4_3
|
||||
$dnode4Status = $data4_4
|
||||
|
||||
if $dnode2Status != ready then
|
||||
sleep 2000
|
||||
goto wait_dnode2_ready_0
|
||||
endi
|
||||
|
||||
|
||||
print ============== step9: stop dnode3, then wait sync end
|
||||
system sh/exec.sh -n dnode3 -s stop -x SIGINT
|
||||
|
||||
$cnt = 0
|
||||
wait_dnode3_offline_0:
|
||||
$cnt = $cnt + 1
|
||||
if $cnt == 10 then
|
||||
return -1
|
||||
endi
|
||||
sql show dnodes
|
||||
if $rows != 3 then
|
||||
sleep 2000
|
||||
goto wait_dnode3_offline_0
|
||||
endi
|
||||
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1
|
||||
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2
|
||||
print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3
|
||||
print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4
|
||||
$dnode1Status = $data4_1
|
||||
$dnode2Status = $data4_2
|
||||
$dnode3Status = $data4_3
|
||||
$dnode4Status = $data4_4
|
||||
|
||||
if $dnode3Status != offline then
|
||||
sleep 2000
|
||||
goto wait_dnode3_offline_0
|
||||
endi
|
||||
|
||||
print ============== step10: restart dnode3, then wait sync end
|
||||
system sh/exec.sh -n dnode3 -s start
|
||||
|
||||
$cnt = 0
|
||||
wait_dnode3_ready_0:
|
||||
$cnt = $cnt + 1
|
||||
if $cnt == 10 then
|
||||
return -1
|
||||
endi
|
||||
sql show dnodes
|
||||
if $rows != 3 then
|
||||
sleep 2000
|
||||
goto wait_dnode3_ready_0
|
||||
endi
|
||||
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1
|
||||
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2
|
||||
print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3
|
||||
print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4
|
||||
$dnode1Status = $data4_1
|
||||
$dnode2Status = $data4_2
|
||||
$dnode3Status = $data4_3
|
||||
$dnode4Status = $data4_4
|
||||
|
||||
if $dnode3Status != ready then
|
||||
sleep 2000
|
||||
goto wait_dnode3_ready_0
|
||||
endi
|
||||
|
||||
print ============== step11: stop dnode4, then wait sync end
|
||||
system sh/exec.sh -n dnode4 -s stop -x SIGINT
|
||||
|
||||
$cnt = 0
|
||||
wait_dnode4_offline_0:
|
||||
$cnt = $cnt + 1
|
||||
if $cnt == 10 then
|
||||
return -1
|
||||
endi
|
||||
sql show dnodes
|
||||
if $rows != 3 then
|
||||
sleep 2000
|
||||
goto wait_dnode4_offline_0
|
||||
endi
|
||||
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1
|
||||
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2
|
||||
print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3
|
||||
print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4
|
||||
$dnode1Status = $data4_1
|
||||
$dnode2Status = $data4_2
|
||||
$dnode3Status = $data4_3
|
||||
#$dnode4Status = $data4_4
|
||||
|
||||
if $loop_cnt == 0 then
|
||||
$dnode4Status = $data4_4
|
||||
elif $loop_cnt == 1 then
|
||||
$dnode4Status = $data4_6
|
||||
elif $loop_cnt == 2 then
|
||||
$dnode4Status = $data4_8
|
||||
else then
|
||||
print **** **** **** END loop cluster do 2**** **** **** ****
|
||||
return
|
||||
endi
|
||||
|
||||
if $dnode4Status != offline then
|
||||
sleep 2000
|
||||
goto wait_dnode4_offline_0
|
||||
endi
|
||||
|
||||
print ============== step12: restart dnode4, then wait sync end
|
||||
system sh/exec.sh -n dnode4 -s start
|
||||
|
||||
$cnt = 0
|
||||
wait_dnode4_ready_0:
|
||||
$cnt = $cnt + 1
|
||||
if $cnt == 10 then
|
||||
return -1
|
||||
endi
|
||||
sql show dnodes
|
||||
if $rows != 3 then
|
||||
sleep 2000
|
||||
goto wait_dnode4_ready_0
|
||||
endi
|
||||
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1
|
||||
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2
|
||||
print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3
|
||||
print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4
|
||||
$dnode1Status = $data4_1
|
||||
$dnode2Status = $data4_2
|
||||
$dnode3Status = $data4_3
|
||||
#$dnode4Status = $data4_4
|
||||
|
||||
if $loop_cnt == 0 then
|
||||
$dnode4Status = $data4_4
|
||||
elif $loop_cnt == 1 then
|
||||
$dnode4Status = $data4_6
|
||||
elif $loop_cnt == 2 then
|
||||
$dnode4Status = $data4_8
|
||||
else then
|
||||
print **** **** **** END loop cluster do 2**** **** **** ****
|
||||
return
|
||||
endi
|
||||
|
||||
if $dnode4Status != ready then
|
||||
sleep 2000
|
||||
goto wait_dnode4_ready_0
|
||||
endi
|
||||
|
||||
print ============== step13: alter replica 2
|
||||
sql alter database $db replica 2
|
||||
sql show database
|
||||
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1
|
||||
|
||||
if $data0_5 != 2 then
|
||||
print rplica is not modify to 2, error!!!!!!
|
||||
return
|
||||
endi
|
||||
|
||||
print ============== step14: stop and drop dnode4, then remove data dir of dnode4
|
||||
system sh/exec.sh -n dnode4 -s stop -x SIGINT
|
||||
|
||||
$cnt = 0
|
||||
wait_dnode4_offline_1:
|
||||
$cnt = $cnt + 1
|
||||
if $cnt == 10 then
|
||||
return -1
|
||||
endi
|
||||
sql show dnodes
|
||||
if $rows != 3 then
|
||||
sleep 2000
|
||||
goto wait_dnode4_offline_1
|
||||
endi
|
||||
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1
|
||||
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2
|
||||
print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3
|
||||
print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4
|
||||
|
||||
$dnode2Status = $data4_2
|
||||
$dnode3Status = $data4_3
|
||||
#$dnode4Status = $data4_4
|
||||
|
||||
if $loop_cnt == 0 then
|
||||
$dnode4Status = $data4_4
|
||||
elif $loop_cnt == 1 then
|
||||
$dnode4Status = $data4_6
|
||||
elif $loop_cnt == 2 then
|
||||
$dnode4Status = $data4_8
|
||||
else then
|
||||
print **** **** **** END loop cluster do 2**** **** **** ****
|
||||
return
|
||||
endi
|
||||
|
||||
if $dnode4Status != offline then
|
||||
sleep 2000
|
||||
goto wait_dnode4_offline_1
|
||||
endi
|
||||
|
||||
sql drop dnode $hostname4
|
||||
system rm -rf ../../../sim/dnode4
|
||||
|
||||
|
||||
print ============== step15: alter replica 1
|
||||
sql alter database $db replica 1
|
||||
sql show database
|
||||
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1
|
||||
|
||||
if $data0_5 != 1 then
|
||||
print rplica is not modify to 1, error!!!!!!
|
||||
return
|
||||
endi
|
||||
|
||||
|
||||
print ============== step16: alter replica 2
|
||||
sql alter database $db replica 1
|
||||
sql show database
|
||||
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1
|
||||
|
||||
if $data0_5 != 2 then
|
||||
print rplica is not modify to 2, error!!!!!!
|
||||
return
|
||||
endi
|
||||
|
||||
print ============== step17: start dnode1 and add into cluster, then wait dnode1 ready
|
||||
system sh/exec.sh -n dnode1 -s start
|
||||
sql create dnode $hostname1
|
||||
|
||||
wait_dnode1_ready_0:
|
||||
$cnt = $cnt + 1
|
||||
if $cnt == 10 then
|
||||
return -1
|
||||
endi
|
||||
sql show dnodes
|
||||
if $rows != 3 then
|
||||
sleep 2000
|
||||
goto wait_dnode1_ready_0
|
||||
endi
|
||||
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1
|
||||
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2
|
||||
print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3
|
||||
print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4
|
||||
#$dnode1Status = $data4_1
|
||||
$dnode2Status = $data4_2
|
||||
$dnode3Status = $data4_3
|
||||
$dnode4Status = $data4_4
|
||||
|
||||
if $loop_cnt == 0 then
|
||||
$dnode1Status = $data4_1
|
||||
elif $loop_cnt == 1 then
|
||||
$dnode1Status = $data4_5
|
||||
elif $loop_cnt == 2 then
|
||||
$dnode1Status = $data4_7
|
||||
elif $loop_cnt == 3 then
|
||||
$dnode1Status = $data4_9
|
||||
else then
|
||||
print **** **** **** END loop cluster do 3**** **** **** ****
|
||||
return
|
||||
endi
|
||||
|
||||
if $dnode1Status != ready then
|
||||
sleep 2000
|
||||
goto wait_dnode1_ready_0
|
||||
endi
|
||||
|
||||
print ============== step18: alter replica 3
|
||||
sql alter database $db replica 3
|
||||
sql show database
|
||||
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1
|
||||
|
||||
if $data0_5 != 3 then
|
||||
print rplica is not modify to 3, error!!!!!!
|
||||
return
|
||||
endi
|
||||
|
||||
$loop_cnt = $loop_cnt + 1
|
||||
goto loop_cluster_do
|
Loading…
Reference in New Issue