Merge pull request #24759 from taosdata/coverage/TD-28602-3.0

coverage: add schUtil.c and schedulerTest
This commit is contained in:
Alex Duan 2024-02-20 09:14:03 +08:00 committed by GitHub
commit 2eb33b6ee6
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
13 changed files with 78 additions and 20 deletions

View File

@ -1606,10 +1606,6 @@ static int32_t taosCfgDynamicOptionsForClient(SConfig *pCfg, char *name) {
tsTempSpace.reserved = (int64_t)(((double)pItem->fval) * 1024 * 1024 * 1024);
uInfo("%s set to %" PRId64, name, tsTempSpace.reserved);
matched = true;
} else if (strcasecmp("minimalDataDirGB", name) == 0) {
tsDataSpace.reserved = (int64_t)(((double)pItem->fval) * 1024 * 1024 * 1024);
uInfo("%s set to %" PRId64, name, tsDataSpace.reserved);
matched = true;
} else if (strcasecmp("minimalLogDirGB", name) == 0) {
tsLogSpace.reserved = (int64_t)(((double)pItem->fval) * 1024 * 1024 * 1024);
uInfo("%s set to %" PRId64, name, tsLogSpace.reserved);
@ -1680,10 +1676,6 @@ static int32_t taosCfgDynamicOptionsForClient(SConfig *pCfg, char *name) {
return -1;
}
matched = true;
} else if (strcasecmp("telemetryServer", name) == 0) {
uInfo("%s set from %s to %s", name, pItem->str, tsTelemServer);
tstrncpy(tsTelemServer, pItem->str, TSDB_FQDN_LEN);
matched = true;
}
break;
}

View File

@ -433,10 +433,12 @@ static int32_t vnodeAsyncLaunchWorker(SVAsync *async) {
return 0;
}
#ifdef BUILD_NO_CALL
int32_t vnodeAsync(SVAsync *async, EVAPriority priority, int32_t (*execute)(void *), void (*complete)(void *),
void *arg, int64_t *taskId) {
return vnodeAsyncC(async, 0, priority, execute, complete, arg, taskId);
}
#endif
int32_t vnodeAsyncC(SVAsync *async, int64_t channelId, EVAPriority priority, int32_t (*execute)(void *),
void (*complete)(void *), void *arg, int64_t *taskId) {

View File

@ -263,6 +263,7 @@ void schCloseJobRef(void) {
uint64_t schGenTaskId(void) { return atomic_add_fetch_64(&schMgmt.taskId, 1); }
#ifdef BUILD_NO_CALL
uint64_t schGenUUID(void) {
static uint64_t hashId = 0;
static int32_t requestSerialId = 0;
@ -284,6 +285,7 @@ uint64_t schGenUUID(void) {
uint64_t id = ((hashId & 0x0FFF) << 52) | ((pid & 0x0FFF) << 40) | ((ts & 0xFFFFFF) << 16) | (val & 0xFFFF);
return id;
}
#endif
void schFreeRpcCtxVal(const void *arg) {
if (NULL == arg) {

View File

@ -25,4 +25,9 @@ IF(NOT TD_DARWIN)
PUBLIC "${TD_SOURCE_DIR}/include/libs/scheduler/"
PRIVATE "${TD_SOURCE_DIR}/source/libs/scheduler/inc"
)
add_test(
NAME schedulerTest
COMMAND schedulerTest
)
ENDIF()

View File

@ -1072,6 +1072,16 @@ TEST(multiThread, forceFree) {
//taosSsleep(3);
}
TEST(otherTest, otherCase) {
// excpet test
schReleaseJob(0);
schFreeRpcCtx(NULL);
ASSERT_EQ(schDumpEpSet(NULL), (char*)NULL);
ASSERT_EQ(strcmp(schGetOpStr(SCH_OP_NULL), "NULL"), 0);
ASSERT_EQ(strcmp(schGetOpStr((SCH_OP_TYPE)100), "UNKNOWN"), 0);
}
int main(int argc, char **argv) {
taosSeedRand(taosGetTimestampSec());
testing::InitGoogleTest(&argc, argv);

View File

@ -34,7 +34,7 @@ add_test(
COMMAND streamUpdateTest
)
# add_test(
# NAME checkpointTest
# COMMAND checkpointTest
# )
add_test(
NAME checkpointTest
COMMAND checkpointTest
)

View File

@ -18,6 +18,9 @@ from frame.autogen import *
class TDTestCase(TBase):
updatecfgDict = {
'slowLogScope':"query"
}
def init(self, conn, logSql, replicaVar=3):
super(TDTestCase, self).init(conn, logSql, replicaVar=3, db="snapshot", checkColName="c1")

View File

@ -33,7 +33,8 @@ class TDTestCase(TBase):
"lossyColumns" : "float,double",
"fPrecision" : "0.000000001",
"dPrecision" : "0.00000000000000001",
"ifAdtFse" : "1"
"ifAdtFse" : "1",
'slowLogScope' : "insert"
}
def insertData(self):
@ -56,7 +57,7 @@ class TDTestCase(TBase):
sql = f"select * from {self.db}.{self.stb} where fc!=100"
tdSql.query(sql)
tdSql.checkRows(0)
sql = f"select count(*) from {self.db}.{self.stb} where dc!=200"
sql = f"select * from {self.db}.{self.stb} where dc!=200"
tdSql.query(sql)
tdSql.checkRows(0)
sql = f"select avg(fc) from {self.db}.{self.stb}"

View File

@ -31,6 +31,9 @@ from frame.srvCtl import *
class TDTestCase(TBase):
updatecfgDict = {
'slowLogScope' : "others"
}
def init(self, conn, logSql, replicaVar=1):
tdLog.debug(f"start to init {__file__}")

View File

@ -31,9 +31,9 @@ class TDTestCase(TBase):
'queryMaxConcurrentTables': '2K',
'streamMax': '1M',
'totalMemoryKB': '1G',
#'rpcQueueMemoryAllowed': '1T',
#'mndLogRetention': '1P',
'streamBufferSize':'2G'
'streamMax': '1P',
'streamBufferSize':'1T',
'slowLogScope':"query"
}
def insertData(self):
@ -47,8 +47,40 @@ class TDTestCase(TBase):
# taosBenchmark run
etool.benchMark(command = f"-d {self.db} -t {self.childtable_count} -n {self.insert_rows} -v 2 -y")
def checkQueryOK(self, rets):
if rets[-2][:9] != "Query OK,":
tdLog.exit(f"check taos -s return unecpect: {rets}")
def doTaos(self):
tdLog.info(f"check taos command options...")
# local command
options = [
"DebugFlag 143",
"enableCoreFile 1",
"fqdn 127.0.0.1",
"firstEp 127.0.0.1",
"locale ENG",
"metaCacheMaxSize 10000",
"minimalTmpDirGB 5",
"minimalLogDirGB 1",
"secondEp 127.0.0.2",
"smlChildTableName smltbname",
"smlAutoChildTableNameDelimiter autochild",
"smlTagName tagname",
"smlTsDefaultName tsdef",
"serverPort 6030",
"slowLogScope insert",
"timezone tz",
"tempDir /var/tmp"
]
# exec
for option in options:
rets = etool.runBinFile("taos", f"-s \"alter local '{option}'\";")
self.checkQueryOK(rets)
# error
etool.runBinFile("taos", f"-s \"alter local 'nocmd check'\";")
# help
rets = etool.runBinFile("taos", "--help")
self.checkListNotEmpty(rets)

View File

@ -8,6 +8,9 @@ from frame.caseBase import *
from frame import *
class TDTestCase(TBase):
updatecfgDict = {
'slowLogScope':"all"
}
def init(self, conn, logSql, replicaVar=1):
self.replicaVar = int(replicaVar)

View File

@ -31,7 +31,9 @@ class TDTestCase(TBase):
updatecfgDict = {
"keepColumnName" : "1",
"ttlChangeOnWrite" : "1",
"querySmaOptimize": "1"
"querySmaOptimize" : "1",
"slowLogScope" : "none",
"queryBufferSize" : 10240
}

View File

@ -400,6 +400,9 @@ class TDTestCase:
self.explain_check()
# coverage explain.c add
tdSql.query(f"explain verbose true select * from {dbname}.stb1 partition by c1 order by c2")
def __test_error(self, dbname=DBNAME):
ratio = random.uniform(0.001,1)