Minor refactoring of crash_gen tool, now doing multi-record insertion some times
This commit is contained in:
parent
7688a6ebc7
commit
0d57114b77
|
@ -388,9 +388,9 @@ class ThreadCoordinator:
|
||||||
self._syncAtBarrier() # For now just cross the barrier
|
self._syncAtBarrier() # For now just cross the barrier
|
||||||
Progress.emit(Progress.END_THREAD_STEP)
|
Progress.emit(Progress.END_THREAD_STEP)
|
||||||
except threading.BrokenBarrierError as err:
|
except threading.BrokenBarrierError as err:
|
||||||
Logging.info("Main loop aborted, caused by worker thread time-out")
|
Logging.info("Main loop aborted, caused by worker thread(s) time-out")
|
||||||
self._execStats.registerFailure("Aborted due to worker thread timeout")
|
self._execStats.registerFailure("Aborted due to worker thread timeout")
|
||||||
print("\n\nWorker Thread time-out detected, important thread info:")
|
print("\n\nWorker Thread time-out detected, TAOS related threads are:")
|
||||||
ts = ThreadStacks()
|
ts = ThreadStacks()
|
||||||
ts.print(filterInternal=True)
|
ts.print(filterInternal=True)
|
||||||
workerTimeout = True
|
workerTimeout = True
|
||||||
|
@ -1242,6 +1242,7 @@ class Task():
|
||||||
0x0B, # Unable to establish connection, more details in TD-1648
|
0x0B, # Unable to establish connection, more details in TD-1648
|
||||||
0x200, # invalid SQL, TODO: re-examine with TD-934
|
0x200, # invalid SQL, TODO: re-examine with TD-934
|
||||||
0x20F, # query terminated, possibly due to vnoding being dropped, see TD-1776
|
0x20F, # query terminated, possibly due to vnoding being dropped, see TD-1776
|
||||||
|
0x213, # "Disconnected from service", result of "kill connection ???"
|
||||||
0x217, # "db not selected", client side defined error code
|
0x217, # "db not selected", client side defined error code
|
||||||
# 0x218, # "Table does not exist" client side defined error code
|
# 0x218, # "Table does not exist" client side defined error code
|
||||||
0x360, # Table already exists
|
0x360, # Table already exists
|
||||||
|
@ -1911,29 +1912,21 @@ class TaskAddData(StateTransitionTask):
|
||||||
def canBeginFrom(cls, state: AnyState):
|
def canBeginFrom(cls, state: AnyState):
|
||||||
return state.canAddData()
|
return state.canAddData()
|
||||||
|
|
||||||
def _executeInternal(self, te: TaskExecutor, wt: WorkerThread):
|
def _addDataInBatch(self, db, dbc, regTableName, te: TaskExecutor):
|
||||||
# ds = self._dbManager # Quite DANGEROUS here, may result in multi-thread client access
|
numRecords = self.LARGE_NUMBER_OF_RECORDS if gConfig.larger_data else self.SMALL_NUMBER_OF_RECORDS
|
||||||
db = self._db
|
|
||||||
dbc = wt.getDbConn()
|
|
||||||
tblSeq = list(range(
|
|
||||||
self.LARGE_NUMBER_OF_TABLES if gConfig.larger_data else self.SMALL_NUMBER_OF_TABLES))
|
|
||||||
random.shuffle(tblSeq)
|
|
||||||
for i in tblSeq:
|
|
||||||
if (i in self.activeTable): # wow already active
|
|
||||||
print("x", end="", flush=True) # concurrent insertion
|
|
||||||
else:
|
|
||||||
self.activeTable.add(i) # marking it active
|
|
||||||
|
|
||||||
sTable = db.getFixedSuperTable()
|
|
||||||
regTableName = self.getRegTableName(i) # "db.reg_table_{}".format(i)
|
|
||||||
|
|
||||||
|
|
||||||
fullTableName = db.getName() + '.' + regTableName
|
fullTableName = db.getName() + '.' + regTableName
|
||||||
# self._lockTable(fullTableName) # "create table" below. Stop it if the table is "locked"
|
|
||||||
sTable.ensureTable(self, wt.getDbConn(), db.getName(), regTableName) # Ensure the table exists
|
|
||||||
# self._unlockTable(fullTableName)
|
|
||||||
|
|
||||||
for j in range(self.LARGE_NUMBER_OF_RECORDS if gConfig.larger_data else self.SMALL_NUMBER_OF_RECORDS): # number of records per table
|
sql = "insert into {} values ".format(fullTableName)
|
||||||
|
for j in range(numRecords): # number of records per table
|
||||||
|
nextInt = db.getNextInt()
|
||||||
|
nextTick = db.getNextTick()
|
||||||
|
sql += "('{}', {});".format(nextTick, nextInt)
|
||||||
|
dbc.execute(sql)
|
||||||
|
|
||||||
|
def _addData(self, db, dbc, regTableName, te: TaskExecutor): # implied: NOT in batches
|
||||||
|
numRecords = self.LARGE_NUMBER_OF_RECORDS if gConfig.larger_data else self.SMALL_NUMBER_OF_RECORDS
|
||||||
|
|
||||||
|
for j in range(numRecords): # number of records per table
|
||||||
nextInt = db.getNextInt()
|
nextInt = db.getNextInt()
|
||||||
nextTick = db.getNextTick()
|
nextTick = db.getNextTick()
|
||||||
if gConfig.record_ops:
|
if gConfig.record_ops:
|
||||||
|
@ -1993,14 +1986,35 @@ class TaskAddData(StateTransitionTask):
|
||||||
self.fAddLogDone.flush()
|
self.fAddLogDone.flush()
|
||||||
os.fsync(self.fAddLogDone)
|
os.fsync(self.fAddLogDone)
|
||||||
|
|
||||||
|
def _executeInternal(self, te: TaskExecutor, wt: WorkerThread):
|
||||||
|
# ds = self._dbManager # Quite DANGEROUS here, may result in multi-thread client access
|
||||||
|
db = self._db
|
||||||
|
dbc = wt.getDbConn()
|
||||||
|
numTables = self.LARGE_NUMBER_OF_TABLES if gConfig.larger_data else self.SMALL_NUMBER_OF_TABLES
|
||||||
|
numRecords = self.LARGE_NUMBER_OF_RECORDS if gConfig.larger_data else self.SMALL_NUMBER_OF_RECORDS
|
||||||
|
tblSeq = list(range(numTables ))
|
||||||
|
random.shuffle(tblSeq) # now we have random sequence
|
||||||
|
for i in tblSeq:
|
||||||
|
if (i in self.activeTable): # wow already active
|
||||||
|
print("x", end="", flush=True) # concurrent insertion
|
||||||
|
else:
|
||||||
|
self.activeTable.add(i) # marking it active
|
||||||
|
|
||||||
|
sTable = db.getFixedSuperTable()
|
||||||
|
regTableName = self.getRegTableName(i) # "db.reg_table_{}".format(i)
|
||||||
|
fullTableName = db.getName() + '.' + regTableName
|
||||||
|
# self._lockTable(fullTableName) # "create table" below. Stop it if the table is "locked"
|
||||||
|
sTable.ensureTable(self, wt.getDbConn(), db.getName(), regTableName) # Ensure the table exists
|
||||||
|
# self._unlockTable(fullTableName)
|
||||||
|
|
||||||
|
if Dice.throw(1) == 0: # 1 in 2 chance
|
||||||
|
self._addData(db, dbc, regTableName, te)
|
||||||
|
else:
|
||||||
|
self._addDataInBatch(db, dbc, regTableName, te)
|
||||||
|
|
||||||
self.activeTable.discard(i) # not raising an error, unlike remove
|
self.activeTable.discard(i) # not raising an error, unlike remove
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
class ThreadStacks: # stack info for all threads
|
class ThreadStacks: # stack info for all threads
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
self._allStacks = {}
|
self._allStacks = {}
|
||||||
|
@ -2022,13 +2036,14 @@ class ThreadStacks: # stack info for all threads
|
||||||
'__init__']: # the thread that extracted the stack
|
'__init__']: # the thread that extracted the stack
|
||||||
continue # ignore
|
continue # ignore
|
||||||
# Now print
|
# Now print
|
||||||
print("\n<----- Thread Info for ID: {}".format(thNid))
|
print("\n<----- Thread Info for LWP/ID: {} (Execution stopped at Bottom Frame) <-----".format(thNid))
|
||||||
|
stackFrame = 0
|
||||||
for frame in stack:
|
for frame in stack:
|
||||||
# print(frame)
|
# print(frame)
|
||||||
print("File {filename}, line {lineno}, in {name}".format(
|
print("[{sf}] File {filename}, line {lineno}, in {name}".format(
|
||||||
filename=frame.filename, lineno=frame.lineno, name=frame.name))
|
sf=stackFrame, filename=frame.filename, lineno=frame.lineno, name=frame.name))
|
||||||
print(" {}".format(frame.line))
|
print(" {}".format(frame.line))
|
||||||
print("-----> End of Thread Info\n")
|
print("-----> End of Thread Info ----->\n")
|
||||||
|
|
||||||
class ClientManager:
|
class ClientManager:
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
|
|
Loading…
Reference in New Issue