Fixed travis build failure caused by crash_gen tool, sorry

This commit is contained in:
Steven Li 2020-10-24 08:42:38 +00:00
parent 22a80a9dcc
commit 87cd1cc0f6
2 changed files with 12 additions and 12 deletions

View File

@ -2023,7 +2023,7 @@ class ClientManager:
# print("exec stats: {}".format(self.tc.getExecStats())) # print("exec stats: {}".format(self.tc.getExecStats()))
# print("TC failed = {}".format(self.tc.isFailed())) # print("TC failed = {}".format(self.tc.isFailed()))
if svcMgr: # gConfig.auto_start_service: if svcMgr: # gConfig.auto_start_service:
svcMgr.stopTaosService() svcMgr.stopTaosServices()
svcMgr = None svcMgr = None
# Print exec status, etc., AFTER showing messages from the server # Print exec status, etc., AFTER showing messages from the server
self.conclude() self.conclude()
@ -2077,8 +2077,8 @@ class MainExec:
def runClient(self): def runClient(self):
global gSvcMgr global gSvcMgr
if gConfig.auto_start_service: if gConfig.auto_start_service:
gSvcMgr = self._svcMgr = ServiceManager() # hack alert gSvcMgr = self._svcMgr = ServiceManager(1) # hack alert
gSvcMgr.startTaosService() # we start, don't run gSvcMgr.startTaosServices() # we start, don't run
self._clientMgr = ClientManager() self._clientMgr = ClientManager()
ret = None ret = None

View File

@ -295,7 +295,7 @@ class TdeSubProcess:
class ServiceManager: class ServiceManager:
PAUSE_BETWEEN_IPC_CHECK = 1.2 # seconds between checks on STDOUT of sub process PAUSE_BETWEEN_IPC_CHECK = 1.2 # seconds between checks on STDOUT of sub process
def __init__(self, numDnodes = 1): # Otherwise we run a cluster def __init__(self, numDnodes): # >1 when we run a cluster
Logging.info("TDengine Service Manager (TSM) created") Logging.info("TDengine Service Manager (TSM) created")
self._numDnodes = numDnodes # >1 means we have a cluster self._numDnodes = numDnodes # >1 means we have a cluster
self._lock = threading.Lock() self._lock = threading.Lock()
@ -306,7 +306,7 @@ class ServiceManager:
self.inSigHandler = False self.inSigHandler = False
# self._status = MainExec.STATUS_RUNNING # set inside # self._status = MainExec.STATUS_RUNNING # set inside
# _startTaosService() # _startTaosService()
self._runCluster = (numDnodes >= 1) self._runCluster = (numDnodes > 1)
self._tInsts : List[TdeInstance] = [] self._tInsts : List[TdeInstance] = []
for i in range(0, numDnodes): for i in range(0, numDnodes):
ti = self._createTdeInstance(i) # construct tInst ti = self._createTdeInstance(i) # construct tInst
@ -318,9 +318,9 @@ class ServiceManager:
# self.svcMgrThreads.append(thread) # self.svcMgrThreads.append(thread)
def _createTdeInstance(self, dnIndex): def _createTdeInstance(self, dnIndex):
# if not self._runCluster: # single instance if not self._runCluster: # single instance
# return ServiceManagerThread(0) subdir = 'test'
# Create all threads in a cluster else: # Create all threads in a cluster
subdir = 'cluster_dnode_{}'.format(dnIndex) subdir = 'cluster_dnode_{}'.format(dnIndex)
fepPort= 6030 # firstEP Port fepPort= 6030 # firstEP Port
port = fepPort + dnIndex * 100 port = fepPort + dnIndex * 100
@ -411,7 +411,7 @@ class ServiceManager:
threads are in "stable" status. threads are in "stable" status.
""" """
for ti in self._tInsts: for ti in self._tInsts:
if not ti.isStable(): if not ti.getStatus().isStable():
return False return False
return True return True
@ -473,7 +473,7 @@ class ServiceManager:
self.stopTaosServices() # should have started already self.stopTaosServices() # should have started already
def restart(self): def restart(self):
if not self.getStatus().isStable(): if not self.isStable():
Logging.warning("Cannot restart service/cluster, when not stable") Logging.warning("Cannot restart service/cluster, when not stable")
return return
@ -483,7 +483,7 @@ class ServiceManager:
else: else:
Logging.warning("Service not active when restart requested") Logging.warning("Service not active when restart requested")
self.startTaosService() self.startTaosServices()
# self._isRestarting = False # self._isRestarting = False
# def isRunning(self): # def isRunning(self):