Merge branch '3.0' of https://github.com/taosdata/TDengine into enh/TS-4723-3.0
This commit is contained in:
commit
9531cba834
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
|
@ -38,7 +38,7 @@ bool TestServer::Start() {
|
|||
taosThreadAttrSetDetachState(&thAttr, PTHREAD_CREATE_JOINABLE);
|
||||
taosThreadCreate(&threadId, &thAttr, serverLoop, this);
|
||||
taosThreadAttrDestroy(&thAttr);
|
||||
taosMsleep(2100);
|
||||
taosMsleep(10000);
|
||||
return runnning;
|
||||
}
|
||||
|
||||
|
|
|
@ -1238,6 +1238,18 @@ int32_t tsdbCacheColFormatUpdate(STsdb *pTsdb, tb_uid_t suid, tb_uid_t uid, SBlo
|
|||
ctxArray = taosArrayInit(pBlockData->nColData, sizeof(SLastUpdateCtx));
|
||||
|
||||
// 1. prepare last
|
||||
STsdbRowKey tsdbRowKey = {0};
|
||||
tsdbRowGetKey(&lRow, &tsdbRowKey);
|
||||
|
||||
{
|
||||
SLastUpdateCtx updateCtx = {
|
||||
.lflag = LFLAG_LAST,
|
||||
.tsdbRowKey = tsdbRowKey,
|
||||
.colVal = COL_VAL_VALUE(PRIMARYKEY_TIMESTAMP_COL_ID, ((SValue){.type = TSDB_DATA_TYPE_TIMESTAMP,
|
||||
.val = lRow.pBlockData->aTSKEY[lRow.iRow]}))};
|
||||
taosArrayPush(ctxArray, &updateCtx);
|
||||
}
|
||||
|
||||
TSDBROW tRow = tsdbRowFromBlockData(pBlockData, 0);
|
||||
|
||||
for (int32_t iColData = 0; iColData < pBlockData->nColData; ++iColData) {
|
||||
|
@ -1263,9 +1275,6 @@ int32_t tsdbCacheColFormatUpdate(STsdb *pTsdb, tb_uid_t suid, tb_uid_t uid, SBlo
|
|||
}
|
||||
|
||||
// 2. prepare last row
|
||||
STsdbRowKey tsdbRowKey = {0};
|
||||
tsdbRowGetKey(&lRow, &tsdbRowKey);
|
||||
|
||||
STSDBRowIter iter = {0};
|
||||
tsdbRowIterOpen(&iter, &lRow, pTSchema);
|
||||
for (SColVal *pColVal = tsdbRowIterNext(&iter); pColVal; pColVal = tsdbRowIterNext(&iter)) {
|
||||
|
|
|
@ -759,7 +759,7 @@ void* getRowStateBuff(SStreamFileState* pFileState) { return pFileState->rowStat
|
|||
void* getStateFileStore(SStreamFileState* pFileState) { return pFileState->pFileStore; }
|
||||
|
||||
bool isDeteled(SStreamFileState* pFileState, TSKEY ts) {
|
||||
return pFileState->deleteMark > 0 && ts < (pFileState->maxTs - pFileState->deleteMark);
|
||||
return pFileState->deleteMark != INT64_MAX && pFileState->maxTs > 0 && ts < (pFileState->maxTs - pFileState->deleteMark);
|
||||
}
|
||||
|
||||
bool isFlushedState(SStreamFileState* pFileState, TSKEY ts, TSKEY gap) { return ts <= (pFileState->flushMark + gap); }
|
||||
|
|
|
@ -22,6 +22,7 @@ class TDTestCase(TBase):
|
|||
self.child_table_num = 1
|
||||
self.insert_round_num = 700
|
||||
self.row_num_per_round = 15
|
||||
self.start_ts = 1704082431000
|
||||
|
||||
def prepare_data(self):
|
||||
# database
|
||||
|
@ -39,7 +40,7 @@ class TDTestCase(TBase):
|
|||
for j in range(self.insert_round_num):
|
||||
sql = "insert into ct_binary%s values" % (i+1)
|
||||
for k in range(self.row_num_per_round):
|
||||
sql += "(now+%ss, '%s')," % (str(j * 10 + k + 1), 'a' * self.max_column_length)
|
||||
sql += "(%s, '%s')," % (str(self.start_ts + (j * self.insert_round_num + k * self.row_num_per_round + 1)), 'a' * self.max_column_length)
|
||||
tdSql.execute(sql)
|
||||
tdLog.info(f"Insert {self.row_num_per_round} rows data into ct_binary{i+1} {j+1} times successfully")
|
||||
tdSql.execute("flush database db;")
|
||||
|
@ -63,7 +64,7 @@ class TDTestCase(TBase):
|
|||
for j in range(self.insert_round_num):
|
||||
sql = "insert into ct_varchar%s values" % (i+1)
|
||||
for k in range(self.row_num_per_round):
|
||||
sql += "(now+%ss, '%s')," % (str(j * 10 + k + 1), 'b' * self.max_column_length)
|
||||
sql += "(%s, '%s')," % (str(self.start_ts + (j * self.insert_round_num + k * self.row_num_per_round + 1)), 'b' * self.max_column_length)
|
||||
tdSql.execute(sql)
|
||||
tdLog.info(f"Insert {self.row_num_per_round} rows data into ct_varchar{i+1} {j+1} times successfully")
|
||||
tdSql.execute("flush database db;")
|
||||
|
@ -98,7 +99,7 @@ class TDTestCase(TBase):
|
|||
for j in range(self.insert_round_num):
|
||||
sql = "insert into ct_nchar%s values" % (i+1)
|
||||
for k in range(self.row_num_per_round):
|
||||
sql += "(now+%ss, '%s')," % (str(j * 10 + k + 1), column)
|
||||
sql += "(%s, '%s')," % (str(self.start_ts + (j * self.insert_round_num + k * self.row_num_per_round + 1)), column)
|
||||
tdSql.execute(sql)
|
||||
tdLog.info(f"Insert {self.row_num_per_round} rows data into ct_nchar{i+1} {j+1} times successfully")
|
||||
tdSql.execute("flush database db;")
|
||||
|
@ -124,7 +125,7 @@ class TDTestCase(TBase):
|
|||
for j in range(self.insert_round_num):
|
||||
sql = "insert into ct_varbinary%s values" % (i+1)
|
||||
for k in range(row_num_per_round):
|
||||
sql += "(now+%ss, '%s')," % (str(j * 10 + k + 1), '\\x' + column)
|
||||
sql += "(%s, '%s')," % (str(self.start_ts + (j * self.insert_round_num + k * self.row_num_per_round + 1)), '\\x' + column)
|
||||
tdSql.execute(sql)
|
||||
tdLog.info(f"Insert {row_num_per_round} rows data into ct_varbinary{i+1} {j+1} times successfully")
|
||||
tdSql.execute("flush database db;")
|
||||
|
@ -153,7 +154,7 @@ class TDTestCase(TBase):
|
|||
for j in range(self.insert_round_num):
|
||||
sql = "insert into ct_json_tag%s values" % (i+1)
|
||||
for k in range(row_num_per_round):
|
||||
sql += "(now+%ss, '%s')," % (str(j * 10 + k + 1), '\\x' + column)
|
||||
sql += "(%s, '%s')," % (str(self.start_ts + (j * self.insert_round_num + k * self.row_num_per_round + 1)), '\\x' + column)
|
||||
tdSql.execute(sql)
|
||||
tdLog.info(f"Insert {row_num_per_round} rows data into ct_json_tag{i+1} {j+1} times successfully")
|
||||
tdSql.execute("flush database db;")
|
||||
|
|
|
@ -315,19 +315,22 @@ function run_thread() {
|
|||
fi
|
||||
if [ -n "$corefile" ]; then
|
||||
echo -e "\e[34m corefiles: $corefile \e[0m"
|
||||
fi
|
||||
# scp build binary and unit test log
|
||||
local build_dir=$log_dir/build_${hosts[index]}
|
||||
local remote_build_dir="${workdirs[index]}/${DEBUGPATH}/build"
|
||||
# if [ $ent -ne 0 ]; then
|
||||
# remote_build_dir="${workdirs[index]}/{DEBUGPATH}/build"
|
||||
# fi
|
||||
local remote_unit_test_log_dir="${workdirs[index]}/${DEBUGPATH}/Testing/Temporary/"
|
||||
|
||||
mkdir "$build_dir" 2>/dev/null
|
||||
if [ $? -eq 0 ]; then
|
||||
# scp build binary
|
||||
cmd="$scpcmd:${remote_build_dir}/* ${build_dir}/"
|
||||
echo "$cmd"
|
||||
$cmd >/dev/null
|
||||
cmd="$scpcmd:${remote_unit_test_log_dir}/* ${build_dir}/"
|
||||
echo "$cmd"
|
||||
$cmd >/dev/null
|
||||
fi
|
||||
fi
|
||||
|
||||
# get remote sim dir
|
||||
local remote_sim_dir="${workdirs[index]}/tmp/thread_volume/$thread_no"
|
||||
local tarcmd="sshpass -p ${passwords[index]} ssh -o StrictHostKeyChecking=no -r ${usernames[index]}@${hosts[index]}"
|
||||
|
|
|
@ -39,7 +39,7 @@ class TDTestCase:
|
|||
|
||||
os.system("nohup taosBenchmark -y -B 1 -t 40 -S 1000 -n 10 -i 1000 -v 5 > /dev/null 2>&1 &")
|
||||
time.sleep(10)
|
||||
tdSql.query("use test")
|
||||
tdSql.execute("use test", queryTimes=100)
|
||||
tdSql.query("create stream if not exists s1 trigger at_once ignore expired 0 ignore update 0 fill_history 1 into st1 as select _wstart,sum(voltage),groupid from meters partition by groupid interval(2s)")
|
||||
tdLog.debug("========create stream and insert data ok========")
|
||||
time.sleep(15)
|
||||
|
@ -66,7 +66,7 @@ class TDTestCase:
|
|||
|
||||
os.system("taosBenchmark -d db -t 20 -v 6 -n 1000 -y > /dev/null 2>&1")
|
||||
# create stream
|
||||
tdSql.execute("use db")
|
||||
tdSql.execute("use db", queryTimes=100)
|
||||
tdSql.execute("create stream stream1 fill_history 1 into sta as select count(*) as cnt from meters interval(10a);",show=True)
|
||||
time.sleep(5)
|
||||
|
||||
|
|
|
@ -89,8 +89,8 @@ else
|
|||
export LD_PRELOAD="$(realpath "$(gcc -print-file-name=libasan.so)") $(realpath "$(gcc -print-file-name=libstdc++.so)")"
|
||||
echo "Preload AsanSo:" $?
|
||||
|
||||
$* -a 2>$AsanFile
|
||||
|
||||
$* -a 2> $AsanFile
|
||||
cat $AsanFile
|
||||
unset LD_PRELOAD
|
||||
for ((i = 1; i <= 20; i++)); do
|
||||
AsanFileLen=$(cat $AsanFile | wc -l)
|
||||
|
|
Loading…
Reference in New Issue