Merge branch 'master' into cq .INT64_MIN
This commit is contained in:
commit
d6dc4f8c27
|
@ -769,6 +769,10 @@ static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql, char** boundC
|
|||
index = 0;
|
||||
sToken = tStrGetToken(sql, &index, false);
|
||||
|
||||
if (sToken.type == TK_ILLEGAL) {
|
||||
return tscSQLSyntaxErrMsg(pCmd->payload, "unrecognized token", sToken.z);
|
||||
}
|
||||
|
||||
if (sToken.type == TK_RP) {
|
||||
break;
|
||||
}
|
||||
|
|
|
@ -2,6 +2,10 @@
|
|||
from .connection import TDengineConnection
|
||||
from .cursor import TDengineCursor
|
||||
|
||||
# For some reason, the following is needed for VS Code (through PyLance) to
|
||||
# recognize that "error" is a valid module of the "taos" package.
|
||||
from .error import ProgrammingError
|
||||
|
||||
# Globals
|
||||
threadsafety = 0
|
||||
paramstyle = 'pyformat'
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -87,12 +87,12 @@ static int32_t (*parseLocaltimeFp[]) (char* timestr, int64_t* time, int32_t time
|
|||
|
||||
int32_t taosGetTimestampSec() { return (int32_t)time(NULL); }
|
||||
|
||||
int32_t taosParseTime(char* timestr, int64_t* time, int32_t len, int32_t timePrec, int8_t daylight) {
|
||||
int32_t taosParseTime(char* timestr, int64_t* time, int32_t len, int32_t timePrec, int8_t day_light) {
|
||||
/* parse datatime string in with tz */
|
||||
if (strnchr(timestr, 'T', len, false) != NULL) {
|
||||
return parseTimeWithTz(timestr, time, timePrec);
|
||||
} else {
|
||||
return (*parseLocaltimeFp[daylight])(timestr, time, timePrec);
|
||||
return (*parseLocaltimeFp[day_light])(timestr, time, timePrec);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -2845,6 +2845,8 @@ static void doSetTagValueInParam(void* pTable, int32_t tagColId, tVariant *tag,
|
|||
if (tagColId == TSDB_TBNAME_COLUMN_INDEX) {
|
||||
val = tsdbGetTableName(pTable);
|
||||
assert(val != NULL);
|
||||
} else if (tagColId == TSDB_BLOCK_DIST_COLUMN_INDEX) {
|
||||
val = NULL;
|
||||
} else {
|
||||
val = tsdbGetTableTagVal(pTable, tagColId, type, bytes);
|
||||
}
|
||||
|
|
|
@ -68,7 +68,7 @@ int tsdbCreateTable(STsdbRepo *repo, STableCfg *pCfg) {
|
|||
TABLE_CHAR_NAME(pMeta->tables[tid]), TABLE_TID(pMeta->tables[tid]), TABLE_UID(pMeta->tables[tid]));
|
||||
return 0;
|
||||
} else {
|
||||
tsdbError("vgId:%d table %s at tid %d uid %" PRIu64
|
||||
tsdbInfo("vgId:%d table %s at tid %d uid %" PRIu64
|
||||
" exists, replace it with new table, this can be not reasonable",
|
||||
REPO_ID(pRepo), TABLE_CHAR_NAME(pMeta->tables[tid]), TABLE_TID(pMeta->tables[tid]),
|
||||
TABLE_UID(pMeta->tables[tid]));
|
||||
|
|
|
@ -367,40 +367,39 @@ static STsdbQueryHandle* tsdbQueryTablesImpl(STsdbRepo* tsdb, STsdbQueryCond* pC
|
|||
goto out_of_memory;
|
||||
}
|
||||
|
||||
assert(pCond != NULL && pCond->numOfCols > 0 && pMemRef != NULL);
|
||||
assert(pCond != NULL && pMemRef != NULL);
|
||||
if (ASCENDING_TRAVERSE(pCond->order)) {
|
||||
assert(pQueryHandle->window.skey <= pQueryHandle->window.ekey);
|
||||
} else {
|
||||
assert(pQueryHandle->window.skey >= pQueryHandle->window.ekey);
|
||||
}
|
||||
|
||||
// allocate buffer in order to load data blocks from file
|
||||
pQueryHandle->statis = calloc(pCond->numOfCols, sizeof(SDataStatis));
|
||||
if (pQueryHandle->statis == NULL) {
|
||||
goto out_of_memory;
|
||||
}
|
||||
|
||||
pQueryHandle->pColumns = taosArrayInit(pCond->numOfCols, sizeof(SColumnInfoData)); // todo: use list instead of array?
|
||||
if (pQueryHandle->pColumns == NULL) {
|
||||
goto out_of_memory;
|
||||
}
|
||||
|
||||
for (int32_t i = 0; i < pCond->numOfCols; ++i) {
|
||||
SColumnInfoData colInfo = {{0}, 0};
|
||||
|
||||
colInfo.info = pCond->colList[i];
|
||||
colInfo.pData = calloc(1, EXTRA_BYTES + pQueryHandle->outputCapacity * pCond->colList[i].bytes);
|
||||
if (colInfo.pData == NULL) {
|
||||
if (pCond->numOfCols > 0) {
|
||||
// allocate buffer in order to load data blocks from file
|
||||
pQueryHandle->statis = calloc(pCond->numOfCols, sizeof(SDataStatis));
|
||||
if (pQueryHandle->statis == NULL) {
|
||||
goto out_of_memory;
|
||||
}
|
||||
taosArrayPush(pQueryHandle->pColumns, &colInfo);
|
||||
pQueryHandle->statis[i].colId = colInfo.info.colId;
|
||||
}
|
||||
|
||||
if (pCond->numOfCols > 0) {
|
||||
pQueryHandle->pColumns =
|
||||
taosArrayInit(pCond->numOfCols, sizeof(SColumnInfoData)); // todo: use list instead of array?
|
||||
if (pQueryHandle->pColumns == NULL) {
|
||||
goto out_of_memory;
|
||||
}
|
||||
|
||||
for (int32_t i = 0; i < pCond->numOfCols; ++i) {
|
||||
SColumnInfoData colInfo = {{0}, 0};
|
||||
|
||||
colInfo.info = pCond->colList[i];
|
||||
colInfo.pData = calloc(1, EXTRA_BYTES + pQueryHandle->outputCapacity * pCond->colList[i].bytes);
|
||||
if (colInfo.pData == NULL) {
|
||||
goto out_of_memory;
|
||||
}
|
||||
taosArrayPush(pQueryHandle->pColumns, &colInfo);
|
||||
pQueryHandle->statis[i].colId = colInfo.info.colId;
|
||||
}
|
||||
|
||||
pQueryHandle->defaultLoadColumn = getDefaultLoadColumns(pQueryHandle, true);
|
||||
}
|
||||
|
||||
STsdbMeta* pMeta = tsdbGetMeta(tsdb);
|
||||
assert(pMeta != NULL);
|
||||
|
||||
|
|
|
@ -91,18 +91,18 @@ static void vnodeIncRef(void *ptNode) {
|
|||
}
|
||||
|
||||
void *vnodeAcquire(int32_t vgId) {
|
||||
SVnodeObj **ppVnode = NULL;
|
||||
SVnodeObj *pVnode = NULL;
|
||||
if (tsVnodesHash != NULL) {
|
||||
ppVnode = taosHashGetClone(tsVnodesHash, &vgId, sizeof(int32_t), vnodeIncRef, NULL, sizeof(void *));
|
||||
taosHashGetClone(tsVnodesHash, &vgId, sizeof(int32_t), vnodeIncRef, &pVnode, sizeof(void *));
|
||||
}
|
||||
|
||||
if (ppVnode == NULL || *ppVnode == NULL) {
|
||||
if (pVnode == NULL) {
|
||||
terrno = TSDB_CODE_VND_INVALID_VGROUP_ID;
|
||||
vDebug("vgId:%d, not exist", vgId);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return *ppVnode;
|
||||
return pVnode;
|
||||
}
|
||||
|
||||
void vnodeRelease(void *vparam) {
|
||||
|
|
|
@ -303,6 +303,17 @@ static int32_t vnodeWriteToWQueueImp(SVWriteMsg *pWrite) {
|
|||
}
|
||||
|
||||
int32_t vnodeWriteToWQueue(void *vparam, void *wparam, int32_t qtype, void *rparam) {
|
||||
SVnodeObj *pVnode = vparam;
|
||||
if (qtype == TAOS_QTYPE_RPC) {
|
||||
if (!vnodeInReadyStatus(pVnode)) {
|
||||
return TSDB_CODE_APP_NOT_READY; // it may be in deleting or closing state
|
||||
}
|
||||
|
||||
if (pVnode->role != TAOS_SYNC_ROLE_MASTER) {
|
||||
return TSDB_CODE_APP_NOT_READY;
|
||||
}
|
||||
}
|
||||
|
||||
SVWriteMsg *pWrite = vnodeBuildVWriteMsg(vparam, wparam, qtype, rparam);
|
||||
if (pWrite == NULL) {
|
||||
assert(terrno != 0);
|
||||
|
|
|
@ -37,6 +37,7 @@ import requests
|
|||
import gc
|
||||
import taos
|
||||
|
||||
|
||||
from .shared.types import TdColumns, TdTags
|
||||
|
||||
# from crash_gen import ServiceManager, TdeInstance, TdeSubProcess
|
||||
|
@ -160,6 +161,7 @@ class WorkerThread:
|
|||
Logging.debug("[TRD] Thread Coordinator not running any more, worker thread now stopping...")
|
||||
break
|
||||
|
||||
|
||||
# Before we fetch the task and run it, let's ensure we properly "use" the database (not needed any more)
|
||||
try:
|
||||
if (Config.getConfig().per_thread_db_connection): # most likely TRUE
|
||||
|
@ -1362,9 +1364,12 @@ class Task():
|
|||
Progress.emit(Progress.ACCEPTABLE_ERROR)
|
||||
self._err = err
|
||||
else: # not an acceptable error
|
||||
errMsg = "[=] Unexpected Taos library exception ({}): errno=0x{:X}, msg: {}, SQL: {}".format(
|
||||
shortTid = threading.get_ident() % 10000
|
||||
errMsg = "[=] Unexpected Taos library exception ({}): errno=0x{:X}, thread={}, msg: {}, SQL: {}".format(
|
||||
self.__class__.__name__,
|
||||
errno2, err, wt.getDbConn().getLastSql())
|
||||
errno2,
|
||||
shortTid,
|
||||
err, wt.getDbConn().getLastSql())
|
||||
self.logDebug(errMsg)
|
||||
if Config.getConfig().debug:
|
||||
# raise # so that we see full stack
|
||||
|
@ -1411,21 +1416,31 @@ class Task():
|
|||
|
||||
def lockTable(self, ftName): # full table name
|
||||
# print(" <<" + ftName + '_', end="", flush=True)
|
||||
with Task._lock:
|
||||
if not ftName in Task._tableLocks:
|
||||
with Task._lock: # SHORT lock! so we only protect lock creation
|
||||
if not ftName in Task._tableLocks: # Create new lock and add to list, if needed
|
||||
Task._tableLocks[ftName] = threading.Lock()
|
||||
|
||||
Task._tableLocks[ftName].acquire()
|
||||
# No lock protection, anybody can do this any time
|
||||
lock = Task._tableLocks[ftName]
|
||||
# Logging.info("Acquiring lock: {}, {}".format(ftName, lock))
|
||||
lock.acquire()
|
||||
# Logging.info("Acquiring lock successful: {}".format(lock))
|
||||
|
||||
def unlockTable(self, ftName):
|
||||
# print('_' + ftName + ">> ", end="", flush=True)
|
||||
with Task._lock:
|
||||
with Task._lock:
|
||||
if not ftName in self._tableLocks:
|
||||
raise RuntimeError("Corrupt state, no such lock")
|
||||
lock = Task._tableLocks[ftName]
|
||||
if not lock.locked():
|
||||
raise RuntimeError("Corrupte state, already unlocked")
|
||||
lock.release()
|
||||
|
||||
# Important note, we want to protect unlocking under the task level
|
||||
# locking, because we don't want the lock to be deleted (maybe in the futur)
|
||||
# while we unlock it
|
||||
# Logging.info("Releasing lock: {}".format(lock))
|
||||
lock.release()
|
||||
# Logging.info("Releasing lock successful: {}".format(lock))
|
||||
|
||||
|
||||
class ExecutionStats:
|
||||
|
@ -1696,6 +1711,11 @@ class TdSuperTable:
|
|||
return dbc.query("SELECT * FROM {}.{}".format(self._dbName, self._stName)) > 0
|
||||
|
||||
def ensureRegTable(self, task: Optional[Task], dbc: DbConn, regTableName: str):
|
||||
'''
|
||||
Make sure a regular table exists for this super table, creating it if necessary.
|
||||
If there is an associated "Task" that wants to do this, "lock" this table so that
|
||||
others don't access it while we create it.
|
||||
'''
|
||||
dbName = self._dbName
|
||||
sql = "select tbname from {}.{} where tbname in ('{}')".format(dbName, self._stName, regTableName)
|
||||
if dbc.query(sql) >= 1 : # reg table exists already
|
||||
|
@ -1703,18 +1723,24 @@ class TdSuperTable:
|
|||
|
||||
# acquire a lock first, so as to be able to *verify*. More details in TD-1471
|
||||
fullTableName = dbName + '.' + regTableName
|
||||
if task is not None: # TODO: what happens if we don't lock the table
|
||||
task.lockTable(fullTableName)
|
||||
if task is not None: # Somethime thie operation is requested on behalf of a "task"
|
||||
# Logging.info("Locking table for creation: {}".format(fullTableName))
|
||||
task.lockTable(fullTableName) # in which case we'll lock this table to ensure serialized access
|
||||
# Logging.info("Table locked for creation".format(fullTableName))
|
||||
Progress.emit(Progress.CREATE_TABLE_ATTEMPT) # ATTEMPT to create a new table
|
||||
# print("(" + fullTableName[-3:] + ")", end="", flush=True)
|
||||
try:
|
||||
sql = "CREATE TABLE {} USING {}.{} tags ({})".format(
|
||||
fullTableName, dbName, self._stName, self._getTagStrForSql(dbc)
|
||||
)
|
||||
# Logging.info("Creating regular with SQL: {}".format(sql))
|
||||
dbc.execute(sql)
|
||||
# Logging.info("Regular table created: {}".format(sql))
|
||||
finally:
|
||||
if task is not None:
|
||||
# Logging.info("Unlocking table after creation: {}".format(fullTableName))
|
||||
task.unlockTable(fullTableName) # no matter what
|
||||
# Logging.info("Table unlocked after creation: {}".format(fullTableName))
|
||||
|
||||
def _getTagStrForSql(self, dbc) :
|
||||
tags = self._getTags(dbc)
|
||||
|
@ -2011,9 +2037,30 @@ class TaskAddData(StateTransitionTask):
|
|||
def canBeginFrom(cls, state: AnyState):
|
||||
return state.canAddData()
|
||||
|
||||
def _lockTableIfNeeded(self, fullTableName, extraMsg = ''):
|
||||
if Config.getConfig().verify_data:
|
||||
# Logging.info("Locking table: {}".format(fullTableName))
|
||||
self.lockTable(fullTableName)
|
||||
# Logging.info("Table locked {}: {}".format(extraMsg, fullTableName))
|
||||
# print("_w" + str(nextInt % 100), end="", flush=True) # Trace what was written
|
||||
else:
|
||||
# Logging.info("Skipping locking table")
|
||||
pass
|
||||
|
||||
def _unlockTableIfNeeded(self, fullTableName):
|
||||
if Config.getConfig().verify_data:
|
||||
# Logging.info("Unlocking table: {}".format(fullTableName))
|
||||
self.unlockTable(fullTableName)
|
||||
# Logging.info("Table unlocked: {}".format(fullTableName))
|
||||
else:
|
||||
pass
|
||||
# Logging.info("Skipping unlocking table")
|
||||
|
||||
def _addDataInBatch(self, db, dbc, regTableName, te: TaskExecutor):
|
||||
numRecords = self.LARGE_NUMBER_OF_RECORDS if Config.getConfig().larger_data else self.SMALL_NUMBER_OF_RECORDS
|
||||
|
||||
fullTableName = db.getName() + '.' + regTableName
|
||||
self._lockTableIfNeeded(fullTableName, 'batch')
|
||||
|
||||
sql = "INSERT INTO {} VALUES ".format(fullTableName)
|
||||
for j in range(numRecords): # number of records per table
|
||||
|
@ -2021,51 +2068,60 @@ class TaskAddData(StateTransitionTask):
|
|||
nextTick = db.getNextTick()
|
||||
nextColor = db.getNextColor()
|
||||
sql += "('{}', {}, '{}');".format(nextTick, nextInt, nextColor)
|
||||
dbc.execute(sql)
|
||||
|
||||
# Logging.info("Adding data in batch: {}".format(sql))
|
||||
try:
|
||||
dbc.execute(sql)
|
||||
finally:
|
||||
# Logging.info("Data added in batch: {}".format(sql))
|
||||
self._unlockTableIfNeeded(fullTableName)
|
||||
|
||||
|
||||
|
||||
def _addData(self, db: Database, dbc, regTableName, te: TaskExecutor): # implied: NOT in batches
|
||||
numRecords = self.LARGE_NUMBER_OF_RECORDS if Config.getConfig().larger_data else self.SMALL_NUMBER_OF_RECORDS
|
||||
|
||||
for j in range(numRecords): # number of records per table
|
||||
nextInt = db.getNextInt()
|
||||
intToWrite = db.getNextInt()
|
||||
nextTick = db.getNextTick()
|
||||
nextColor = db.getNextColor()
|
||||
if Config.getConfig().record_ops:
|
||||
self.prepToRecordOps()
|
||||
if self.fAddLogReady is None:
|
||||
raise CrashGenError("Unexpected empty fAddLogReady")
|
||||
self.fAddLogReady.write("Ready to write {} to {}\n".format(nextInt, regTableName))
|
||||
self.fAddLogReady.write("Ready to write {} to {}\n".format(intToWrite, regTableName))
|
||||
self.fAddLogReady.flush()
|
||||
os.fsync(self.fAddLogReady.fileno())
|
||||
|
||||
# TODO: too ugly trying to lock the table reliably, refactor...
|
||||
fullTableName = db.getName() + '.' + regTableName
|
||||
if Config.getConfig().verify_data:
|
||||
self.lockTable(fullTableName)
|
||||
# print("_w" + str(nextInt % 100), end="", flush=True) # Trace what was written
|
||||
|
||||
self._lockTableIfNeeded(fullTableName) # so that we are verify read-back. TODO: deal with exceptions before unlock
|
||||
|
||||
try:
|
||||
sql = "INSERT INTO {} VALUES ('{}', {}, '{}');".format( # removed: tags ('{}', {})
|
||||
fullTableName,
|
||||
# ds.getFixedSuperTableName(),
|
||||
# ds.getNextBinary(), ds.getNextFloat(),
|
||||
nextTick, nextInt, nextColor)
|
||||
nextTick, intToWrite, nextColor)
|
||||
# Logging.info("Adding data: {}".format(sql))
|
||||
dbc.execute(sql)
|
||||
# Logging.info("Data added: {}".format(sql))
|
||||
intWrote = intToWrite
|
||||
|
||||
# Quick hack, attach an update statement here. TODO: create an "update" task
|
||||
if (not Config.getConfig().use_shadow_db) and Dice.throw(5) == 0: # 1 in N chance, plus not using shaddow DB
|
||||
nextInt = db.getNextInt()
|
||||
intToUpdate = db.getNextInt() # Updated, but should not succeed
|
||||
nextColor = db.getNextColor()
|
||||
sql = "INSERt INTO {} VALUES ('{}', {}, '{}');".format( # "INSERt" means "update" here
|
||||
fullTableName,
|
||||
nextTick, nextInt, nextColor)
|
||||
nextTick, intToUpdate, nextColor)
|
||||
# sql = "UPDATE {} set speed={}, color='{}' WHERE ts='{}'".format(
|
||||
# fullTableName, db.getNextInt(), db.getNextColor(), nextTick)
|
||||
dbc.execute(sql)
|
||||
intWrote = intToUpdate # We updated, seems TDengine non-cluster accepts this.
|
||||
|
||||
except: # Any exception at all
|
||||
if Config.getConfig().verify_data:
|
||||
self.unlockTable(fullTableName)
|
||||
self._unlockTableIfNeeded(fullTableName)
|
||||
raise
|
||||
|
||||
# Now read it back and verify, we might encounter an error if table is dropped
|
||||
|
@ -2073,33 +2129,41 @@ class TaskAddData(StateTransitionTask):
|
|||
try:
|
||||
readBack = dbc.queryScalar("SELECT speed from {}.{} WHERE ts='{}'".
|
||||
format(db.getName(), regTableName, nextTick))
|
||||
if readBack != nextInt :
|
||||
if readBack != intWrote :
|
||||
raise taos.error.ProgrammingError(
|
||||
"Failed to read back same data, wrote: {}, read: {}"
|
||||
.format(nextInt, readBack), 0x999)
|
||||
.format(intWrote, readBack), 0x999)
|
||||
except taos.error.ProgrammingError as err:
|
||||
errno = Helper.convertErrno(err.errno)
|
||||
if errno in [CrashGenError.INVALID_EMPTY_RESULT, CrashGenError.INVALID_MULTIPLE_RESULT] : # not a single result
|
||||
if errno == CrashGenError.INVALID_EMPTY_RESULT: # empty result
|
||||
raise taos.error.ProgrammingError(
|
||||
"Failed to read back same data for tick: {}, wrote: {}, read: {}"
|
||||
.format(nextTick, nextInt, "Empty Result" if errno == CrashGenError.INVALID_EMPTY_RESULT else "Multiple Result"),
|
||||
"Failed to read back same data for tick: {}, wrote: {}, read: EMPTY"
|
||||
.format(nextTick, intWrote),
|
||||
errno)
|
||||
elif errno == CrashGenError.INVALID_MULTIPLE_RESULT : # multiple results
|
||||
raise taos.error.ProgrammingError(
|
||||
"Failed to read back same data for tick: {}, wrote: {}, read: MULTIPLE RESULTS"
|
||||
.format(nextTick, intWrote),
|
||||
errno)
|
||||
elif errno in [0x218, 0x362]: # table doesn't exist
|
||||
# do nothing
|
||||
dummy = 0
|
||||
pass
|
||||
else:
|
||||
# Re-throw otherwise
|
||||
raise
|
||||
finally:
|
||||
self.unlockTable(fullTableName) # Unlock the table no matter what
|
||||
self._unlockTableIfNeeded(fullTableName) # Quite ugly, refactor lock/unlock
|
||||
# Done with read-back verification, unlock the table now
|
||||
else:
|
||||
self._unlockTableIfNeeded(fullTableName)
|
||||
|
||||
# Successfully wrote the data into the DB, let's record it somehow
|
||||
te.recordDataMark(nextInt)
|
||||
te.recordDataMark(intWrote)
|
||||
|
||||
if Config.getConfig().record_ops:
|
||||
if self.fAddLogDone is None:
|
||||
raise CrashGenError("Unexpected empty fAddLogDone")
|
||||
self.fAddLogDone.write("Wrote {} to {}\n".format(nextInt, regTableName))
|
||||
self.fAddLogDone.write("Wrote {} to {}\n".format(intWrote, regTableName))
|
||||
self.fAddLogDone.flush()
|
||||
os.fsync(self.fAddLogDone.fileno())
|
||||
|
||||
|
@ -2137,15 +2201,16 @@ class TaskAddData(StateTransitionTask):
|
|||
class ThreadStacks: # stack info for all threads
|
||||
def __init__(self):
|
||||
self._allStacks = {}
|
||||
allFrames = sys._current_frames()
|
||||
for th in threading.enumerate():
|
||||
allFrames = sys._current_frames() # All current stack frames
|
||||
for th in threading.enumerate(): # For each thread
|
||||
if th.ident is None:
|
||||
continue
|
||||
stack = traceback.extract_stack(allFrames[th.ident])
|
||||
self._allStacks[th.native_id] = stack
|
||||
stack = traceback.extract_stack(allFrames[th.ident]) # Get stack for a thread
|
||||
shortTid = th.ident % 10000
|
||||
self._allStacks[shortTid] = stack # Was using th.native_id
|
||||
|
||||
def print(self, filteredEndName = None, filterInternal = False):
|
||||
for thNid, stack in self._allStacks.items(): # for each thread, stack frames top to bottom
|
||||
for tIdent, stack in self._allStacks.items(): # for each thread, stack frames top to bottom
|
||||
lastFrame = stack[-1]
|
||||
if filteredEndName: # we need to filter out stacks that match this name
|
||||
if lastFrame.name == filteredEndName : # end did not match
|
||||
|
@ -2157,7 +2222,7 @@ class ThreadStacks: # stack info for all threads
|
|||
'__init__']: # the thread that extracted the stack
|
||||
continue # ignore
|
||||
# Now print
|
||||
print("\n<----- Thread Info for LWP/ID: {} (most recent call last) <-----".format(thNid))
|
||||
print("\n<----- Thread Info for LWP/ID: {} (most recent call last) <-----".format(tIdent))
|
||||
stackFrame = 0
|
||||
for frame in stack: # was using: reversed(stack)
|
||||
# print(frame)
|
||||
|
@ -2376,7 +2441,7 @@ class MainExec:
|
|||
action='store',
|
||||
default=0,
|
||||
type=int,
|
||||
help='Maximum number of DBs to keep, set to disable dropping DB. (default: 0)')
|
||||
help='Number of DBs to use, set to disable dropping DB. (default: 0)')
|
||||
parser.add_argument(
|
||||
'-c',
|
||||
'--connector-type',
|
||||
|
|
|
@ -179,7 +179,7 @@ quorum 2
|
|||
def getServiceCmdLine(self): # to start the instance
|
||||
if Config.getConfig().track_memory_leaks:
|
||||
Logging.info("Invoking VALGRIND on service...")
|
||||
return ['exec /usr/bin/valgrind', '--leak-check=yes', self.getExecFile(), '-c', self.getCfgDir()]
|
||||
return ['exec valgrind', '--leak-check=yes', self.getExecFile(), '-c', self.getCfgDir()]
|
||||
else:
|
||||
# TODO: move "exec -c" into Popen(), we can both "use shell" and NOT fork so ask to lose kill control
|
||||
return ["exec " + self.getExecFile(), '-c', self.getCfgDir()] # used in subproce.Popen()
|
||||
|
@ -310,7 +310,7 @@ class TdeSubProcess:
|
|||
# print("Starting TDengine with env: ", myEnv.items())
|
||||
print("Starting TDengine: {}".format(cmdLine))
|
||||
|
||||
return Popen(
|
||||
ret = Popen(
|
||||
' '.join(cmdLine), # ' '.join(cmdLine) if useShell else cmdLine,
|
||||
shell=True, # Always use shell, since we need to pass ENV vars
|
||||
stdout=PIPE,
|
||||
|
@ -318,6 +318,10 @@ class TdeSubProcess:
|
|||
close_fds=ON_POSIX,
|
||||
env=myEnv
|
||||
) # had text=True, which interferred with reading EOF
|
||||
time.sleep(0.01) # very brief wait, then let's check if sub process started successfully.
|
||||
if ret.poll():
|
||||
raise CrashGenError("Sub process failed to start with command line: {}".format(cmdLine))
|
||||
return ret
|
||||
|
||||
STOP_SIGNAL = signal.SIGINT # signal.SIGKILL/SIGINT # What signal to use (in kill) to stop a taosd process?
|
||||
SIG_KILL_RETCODE = 137 # ref: https://stackoverflow.com/questions/43268156/process-finished-with-exit-code-137-in-pycharm
|
||||
|
@ -614,7 +618,7 @@ class ServiceManager:
|
|||
|
||||
# Find if there's already a taosd service, and then kill it
|
||||
for proc in psutil.process_iter():
|
||||
if proc.name() == 'taosd':
|
||||
if proc.name() == 'taosd' or proc.name() == 'memcheck-amd64-': # Regular or under Valgrind
|
||||
Logging.info("Killing an existing TAOSD process in 2 seconds... press CTRL-C to interrupt")
|
||||
time.sleep(2.0)
|
||||
proc.kill()
|
||||
|
|
|
@ -35,7 +35,8 @@ class LoggingFilter(logging.Filter):
|
|||
|
||||
class MyLoggingAdapter(logging.LoggerAdapter):
|
||||
def process(self, msg, kwargs):
|
||||
return "[{:04d}] {}".format(threading.get_ident() % 10000, msg), kwargs
|
||||
shortTid = threading.get_ident() % 10000
|
||||
return "[{:04d}] {}".format(shortTid, msg), kwargs
|
||||
# return '[%s] %s' % (self.extra['connid'], msg), kwargs
|
||||
|
||||
|
||||
|
|
|
@ -31,7 +31,7 @@ python3 ./test.py -f table/column_name.py
|
|||
python3 ./test.py -f table/column_num.py
|
||||
python3 ./test.py -f table/db_table.py
|
||||
python3 ./test.py -f table/create_sensitive.py
|
||||
#python3 ./test.py -f table/tablename-boundary.py
|
||||
python3 ./test.py -f table/tablename-boundary.py
|
||||
python3 ./test.py -f table/max_table_length.py
|
||||
python3 ./test.py -f table/alter_column.py
|
||||
python3 ./test.py -f table/boundary.py
|
||||
|
@ -332,5 +332,5 @@ python3 ./test.py -f tag_lite/alter_tag.py
|
|||
|
||||
python3 test.py -f tools/taosdemoAllTest/taosdemoTestInsertWithJson.py
|
||||
python3 test.py -f tools/taosdemoAllTest/taosdemoTestQueryWithJson.py
|
||||
|
||||
python3 ./test.py -f tag_lite/drop_auto_create.py
|
||||
#======================p4-end===============
|
||||
|
|
|
@ -82,6 +82,8 @@ class TDTestCase:
|
|||
tdSql.execute("import into tbx file \'%s\'"%(self.csvfile))
|
||||
tdSql.query('select * from tbx')
|
||||
tdSql.checkRows(self.rows)
|
||||
#TD-4447 import the same csv twice
|
||||
tdSql.execute("import into tbx file \'%s\'"%(self.csvfile))
|
||||
|
||||
def stop(self):
|
||||
self.destroyCSVFile()
|
||||
|
|
|
@ -36,6 +36,10 @@ class TDTestCase:
|
|||
tdSql.checkData(1, 1, '涛思数据')
|
||||
|
||||
tdSql.error("insert into tb values (now, 'taosdata001')")
|
||||
|
||||
tdSql.error("insert into tb(now, 😀)")
|
||||
tdSql.query("select * from tb")
|
||||
tdSql.checkRows(2)
|
||||
|
||||
def stop(self):
|
||||
tdSql.close()
|
||||
|
|
|
@ -14,6 +14,13 @@ class TDTestCase:
|
|||
tdLog.debug("start to execute %s" % __file__)
|
||||
tdSql.init(conn.cursor(), logSql)
|
||||
|
||||
self.ts = 1622100000000
|
||||
|
||||
def get_random_string(self, length):
|
||||
letters = string.ascii_lowercase
|
||||
result_str = ''.join(random.choice(letters) for i in range(length))
|
||||
return result_str
|
||||
|
||||
def run(self):
|
||||
tdSql.prepare()
|
||||
|
||||
|
@ -24,19 +31,62 @@ class TDTestCase:
|
|||
shell=True)) - 1
|
||||
tdLog.info("table name max length is %d" % tableNameMaxLen)
|
||||
chars = string.ascii_uppercase + string.ascii_lowercase
|
||||
tb_name = ''.join(random.choices(chars, k=tableNameMaxLen))
|
||||
tb_name = ''.join(random.choices(chars, k=tableNameMaxLen + 1))
|
||||
tdLog.info('tb_name length %d' % len(tb_name))
|
||||
tdLog.info('create table %s (ts timestamp, value int)' % tb_name)
|
||||
tdSql.error(
|
||||
'create table %s (ts timestamp, speed binary(4089))' %
|
||||
tb_name)
|
||||
tdSql.error('create table %s (ts timestamp, speed binary(4089))' % tb_name)
|
||||
|
||||
tb_name = ''.join(random.choices(chars, k=191))
|
||||
tb_name = ''.join(random.choices(chars, k=tableNameMaxLen))
|
||||
tdLog.info('tb_name length %d' % len(tb_name))
|
||||
tdLog.info('create table %s (ts timestamp, value int)' % tb_name)
|
||||
tdSql.execute(
|
||||
'create table %s (ts timestamp, speed binary(4089))' %
|
||||
tb_name)
|
||||
|
||||
db_name = self.get_random_string(33)
|
||||
tdSql.error("create database %s" % db_name)
|
||||
|
||||
db_name = self.get_random_string(32)
|
||||
tdSql.execute("create database %s" % db_name)
|
||||
tdSql.execute("use %s" % db_name)
|
||||
|
||||
tb_name = self.get_random_string(193)
|
||||
tdSql.error("create table %s(ts timestamp, val int)" % tb_name)
|
||||
|
||||
tb_name = self.get_random_string(192)
|
||||
tdSql.execute("create table %s.%s(ts timestamp, val int)" % (db_name, tb_name))
|
||||
tdSql.query("show %s.tables" % db_name)
|
||||
tdSql.checkRows(1)
|
||||
tdSql.checkData(0, 0, tb_name)
|
||||
|
||||
tdSql.execute("insert into %s.%s values(now, 1)" % (db_name, tb_name))
|
||||
tdSql.query("select * from %s.%s" %(db_name, tb_name))
|
||||
tdSql.checkRows(1)
|
||||
|
||||
db_name = self.get_random_string(32)
|
||||
tdSql.execute("create database %s update 1" % db_name)
|
||||
|
||||
stb_name = self.get_random_string(192)
|
||||
tdSql.execute("create table %s.%s(ts timestamp, val int) tags(id int)" % (db_name, stb_name))
|
||||
tb_name1 = self.get_random_string(192)
|
||||
tdSql.execute("insert into %s.%s using %s.%s tags(1) values(%d, 1)(%d, 2)(%d, 3)" % (db_name, tb_name1, db_name, stb_name, self.ts, self.ts + 1, self.ts + 2))
|
||||
tb_name2 = self.get_random_string(192)
|
||||
tdSql.execute("insert into %s.%s using %s.%s tags(2) values(%d, 1)(%d, 2)(%d, 3)" % (db_name, tb_name2, db_name, stb_name, self.ts, self.ts + 1, self.ts + 2))
|
||||
|
||||
tdSql.query("show %s.tables" % db_name)
|
||||
tdSql.checkRows(2)
|
||||
|
||||
tdSql.query("select * from %s.%s" % (db_name, stb_name))
|
||||
tdSql.checkRows(6)
|
||||
|
||||
tdSql.execute("insert into %s.%s using %s.%s tags(1) values(%d, null)" % (db_name, tb_name1, db_name, stb_name, self.ts))
|
||||
|
||||
tdSql.query("select * from %s.%s" % (db_name, stb_name))
|
||||
tdSql.checkRows(6)
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
def stop(self):
|
||||
tdSql.close()
|
||||
|
|
|
@ -0,0 +1,47 @@
|
|||
###################################################################
|
||||
# Copyright (c) 2016 by TAOS Technologies, Inc.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This file is proprietary and confidential to TAOS Technologies.
|
||||
# No part of this file may be reproduced, stored, transmitted,
|
||||
# disclosed or used in any form or by any means other than as
|
||||
# expressly provided by the written permission from Jianhui Tao
|
||||
#
|
||||
###################################################################
|
||||
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import sys
|
||||
from util.log import *
|
||||
from util.cases import *
|
||||
from util.sql import *
|
||||
|
||||
|
||||
class TDTestCase:
|
||||
def init(self, conn, logSql):
|
||||
tdLog.debug("start to execute %s" % __file__)
|
||||
tdSql.init(conn.cursor(), logSql)
|
||||
|
||||
def run(self):
|
||||
tdSql.prepare()
|
||||
|
||||
tdSql.execute('create table m1(ts timestamp, k int) tags(a binary(12), b int, c double);')
|
||||
tdSql.execute('insert into tm0 using m1(b,c) tags(1, 99) values(now, 1);')
|
||||
tdSql.execute('insert into tm1 using m1(b,c) tags(2, 100) values(now, 2);')
|
||||
tdLog.info("2 rows inserted")
|
||||
tdSql.query('select * from m1;')
|
||||
tdSql.checkRows(2)
|
||||
tdSql.query('select *,tbname from m1;')
|
||||
tdSql.execute("drop table tm0; ")
|
||||
tdSql.query('select * from m1')
|
||||
tdSql.checkRows(1)
|
||||
|
||||
|
||||
|
||||
def stop(self):
|
||||
tdSql.close()
|
||||
tdLog.success("%s successfully executed" % __file__)
|
||||
|
||||
|
||||
tdCases.addWindows(__file__, TDTestCase())
|
||||
tdCases.addLinux(__file__, TDTestCase())
|
|
@ -33,6 +33,7 @@ run general/compute/percentile.sim
|
|||
run general/compute/stddev.sim
|
||||
run general/compute/sum.sim
|
||||
run general/compute/top.sim
|
||||
run general/compute/block_dist.sim
|
||||
run general/db/alter_option.sim
|
||||
run general/db/alter_tables_d2.sim
|
||||
run general/db/alter_tables_v1.sim
|
||||
|
|
|
@ -0,0 +1,94 @@
|
|||
system sh/stop_dnodes.sh
|
||||
|
||||
system sh/deploy.sh -n dnode1 -i 1
|
||||
system sh/cfg.sh -n dnode1 -c walLevel -v 1
|
||||
system sh/exec.sh -n dnode1 -s start
|
||||
sleep 2000
|
||||
sql connect
|
||||
|
||||
$dbPrefix = m_di_db
|
||||
$tbPrefix = m_di_tb
|
||||
$mtPrefix = m_di_mt
|
||||
$ntPrefix = m_di_nt
|
||||
$tbNum = 1
|
||||
$rowNum = 2000
|
||||
|
||||
print =============== step1
|
||||
$i = 0
|
||||
$db = $dbPrefix . $i
|
||||
$mt = $mtPrefix . $i
|
||||
$nt = $ntPrefix . $i
|
||||
|
||||
sql drop database $db -x step1
|
||||
step1:
|
||||
sql create database $db
|
||||
sql use $db
|
||||
sql create table $mt (ts timestamp, tbcol int) TAGS(tgcol int)
|
||||
|
||||
$i = 0
|
||||
while $i < $tbNum
|
||||
$tb = $tbPrefix . $i
|
||||
sql create table $tb using $mt tags( $i )
|
||||
|
||||
$x = 0
|
||||
while $x < $rowNum
|
||||
$cc = $x * 60000
|
||||
$ms = 1601481600000 + $cc
|
||||
sql insert into $tb values ($ms , $x )
|
||||
$x = $x + 1
|
||||
endw
|
||||
|
||||
$i = $i + 1
|
||||
endw
|
||||
|
||||
sql create table $nt (ts timestamp, tbcol int)
|
||||
$x = 0
|
||||
while $x < $rowNum
|
||||
$cc = $x * 60000
|
||||
$ms = 1601481600000 + $cc
|
||||
sql insert into $nt values ($ms , $x )
|
||||
$x = $x + 1
|
||||
endw
|
||||
|
||||
sleep 100
|
||||
|
||||
print =============== step2
|
||||
$i = 0
|
||||
$tb = $tbPrefix . $i
|
||||
|
||||
sql select _block_dist() from $tb
|
||||
|
||||
if $rows != 1 then
|
||||
print expect 1, actual:$rows
|
||||
return -1
|
||||
endi
|
||||
|
||||
print =============== step3
|
||||
$i = 0
|
||||
$mt = $mtPrefix . $i
|
||||
sql select _block_dist() from $mt
|
||||
|
||||
if $rows != 1 then
|
||||
print expect 1, actual:$rows
|
||||
return -1
|
||||
endi
|
||||
|
||||
print =============== step4
|
||||
$i = 0
|
||||
$nt = $ntPrefix . $i
|
||||
|
||||
sql select _block_dist() from $nt
|
||||
|
||||
if $rows != 1 then
|
||||
print expect 1, actual:$rows
|
||||
return -1
|
||||
endi
|
||||
|
||||
print =============== clear
|
||||
sql drop database $db
|
||||
sql show databases
|
||||
if $rows != 0 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
system sh/exec.sh -n dnode1 -s stop -x SIGINT
|
|
@ -14,3 +14,4 @@ run general/compute/percentile.sim
|
|||
run general/compute/stddev.sim
|
||||
run general/compute/sum.sim
|
||||
run general/compute/top.sim
|
||||
run general/compute/block_dist.sim
|
||||
|
|
|
@ -32,6 +32,7 @@ run general/compute/percentile.sim
|
|||
run general/compute/stddev.sim
|
||||
run general/compute/sum.sim
|
||||
run general/compute/top.sim
|
||||
run general/compute/block_dist.sim
|
||||
run general/db/alter_option.sim
|
||||
run general/db/alter_tables_d2.sim
|
||||
run general/db/alter_tables_v1.sim
|
||||
|
|
Loading…
Reference in New Issue