commit
3310c7f2d6
|
@ -422,6 +422,10 @@ pipeline {
|
|||
mkdir -p ${WKDIR}/tmp/${BRANCH_NAME}_${BUILD_ID}
|
||||
echo "''' + env.FILE_CHANGED + '''" > ${WKDIR}/tmp/${BRANCH_NAME}_${BUILD_ID}/docs_changed.txt
|
||||
'''
|
||||
sh '''
|
||||
cd ${WKC}/tests/parallel_test
|
||||
./run_check_assert_container.sh -d ${WKDIR}
|
||||
'''
|
||||
sh '''
|
||||
date
|
||||
rm -rf ${WKC}/debug
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
[Unit]
|
||||
Description=server service
|
||||
Description=taosd - TDengine time series database core service
|
||||
After=network-online.target
|
||||
Wants=network-online.target
|
||||
|
||||
|
|
|
@ -475,7 +475,7 @@ int32_t mndInitSync(SMnode *pMnode) {
|
|||
snprintf(syncInfo.path, sizeof(syncInfo.path), "%s%ssync", pMnode->path, TD_DIRSEP);
|
||||
syncInfo.pFsm = mndSyncMakeFsm(pMnode);
|
||||
|
||||
mInfo("vgId:1, start to open sync, replica:%d selfIndex:%d", pMgmt->numOfReplicas, pMgmt->selfIndex);
|
||||
mInfo("vgId:1, start to open mnode sync, replica:%d selfIndex:%d", pMgmt->numOfReplicas, pMgmt->selfIndex);
|
||||
SSyncCfg *pCfg = &syncInfo.syncCfg;
|
||||
pCfg->totalReplicaNum = pMgmt->numOfTotalReplicas;
|
||||
pCfg->replicaNum = pMgmt->numOfReplicas;
|
||||
|
@ -502,7 +502,7 @@ int32_t mndInitSync(SMnode *pMnode) {
|
|||
}
|
||||
pMnode->pSdb->sync = pMgmt->sync;
|
||||
|
||||
mInfo("mnode-sync is opened, id:%" PRId64, pMgmt->sync);
|
||||
mInfo("vgId:1, mnode sync is opened, id:%" PRId64, pMgmt->sync);
|
||||
TAOS_RETURN(code);
|
||||
}
|
||||
|
||||
|
@ -606,12 +606,13 @@ int32_t mndSyncPropose(SMnode *pMnode, SSdbRaw *pRaw, int32_t transId) {
|
|||
}
|
||||
|
||||
void mndSyncStart(SMnode *pMnode) {
|
||||
mInfo("vgId:1, start to start mnode sync");
|
||||
SSyncMgmt *pMgmt = &pMnode->syncMgmt;
|
||||
if (syncStart(pMgmt->sync) < 0) {
|
||||
mError("vgId:1, failed to start sync, id:%" PRId64, pMgmt->sync);
|
||||
return;
|
||||
}
|
||||
mInfo("vgId:1, sync started, id:%" PRId64, pMgmt->sync);
|
||||
mInfo("vgId:1, mnode sync started, id:%" PRId64, pMgmt->sync);
|
||||
}
|
||||
|
||||
void mndSyncStop(SMnode *pMnode) {
|
||||
|
|
|
@ -837,6 +837,9 @@ static bool mndCheckTransConflict(SMnode *pMnode, STrans *pNew) {
|
|||
|
||||
if (pNew->conflict == TRN_CONFLICT_NOTHING) return conflict;
|
||||
|
||||
int32_t size = sdbGetSize(pMnode->pSdb, SDB_TRANS);
|
||||
mInfo("trans:%d, trans hash size %d", pNew->id, size);
|
||||
|
||||
while (1) {
|
||||
pIter = sdbFetch(pMnode->pSdb, SDB_TRANS, pIter, (void **)&pTrans);
|
||||
if (pIter == NULL) break;
|
||||
|
@ -905,14 +908,14 @@ int32_t mndTransCheckConflict(SMnode *pMnode, STrans *pTrans) {
|
|||
if (pTrans->conflict == TRN_CONFLICT_DB || pTrans->conflict == TRN_CONFLICT_DB_INSIDE) {
|
||||
if (strlen(pTrans->dbname) == 0 && strlen(pTrans->stbname) == 0) {
|
||||
code = TSDB_CODE_MND_TRANS_CONFLICT;
|
||||
mError("trans:%d, failed to prepare conflict db not set", pTrans->id);
|
||||
mError("trans:%d, failed to check tran conflict since db not set", pTrans->id);
|
||||
TAOS_RETURN(code);
|
||||
}
|
||||
}
|
||||
|
||||
if (mndCheckTransConflict(pMnode, pTrans)) {
|
||||
code = TSDB_CODE_MND_TRANS_CONFLICT;
|
||||
mError("trans:%d, failed to prepare since %s", pTrans->id, tstrerror(code));
|
||||
mError("trans:%d, failed to check tran conflict since %s", pTrans->id, tstrerror(code));
|
||||
TAOS_RETURN(code);
|
||||
}
|
||||
|
||||
|
@ -948,7 +951,7 @@ int32_t mndTransCheckConflictWithCompact(SMnode *pMnode, STrans *pTrans) {
|
|||
|
||||
if (conflict) {
|
||||
code = TSDB_CODE_MND_TRANS_CONFLICT_COMPACT;
|
||||
mError("trans:%d, failed to prepare since %s", pTrans->id, tstrerror(code));
|
||||
mError("trans:%d, failed to check tran conflict with compact since %s", pTrans->id, tstrerror(code));
|
||||
TAOS_RETURN(code);
|
||||
}
|
||||
|
||||
|
|
|
@ -4673,7 +4673,7 @@ int32_t translateTable(STranslateContext* pCxt, SNode** pTable, SNode* pJoinPare
|
|||
return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_GET_META_ERROR, tstrerror(code));
|
||||
}
|
||||
#ifdef TD_ENTERPRISE
|
||||
if (TSDB_VIEW_TABLE == pRealTable->pMeta->tableType && !pCurrSmt->tagScan) {
|
||||
if (TSDB_VIEW_TABLE == pRealTable->pMeta->tableType && (!pCurrSmt->tagScan || pCxt->pParseCxt->biMode)) {
|
||||
return translateView(pCxt, pTable, &name);
|
||||
}
|
||||
code = translateAudit(pCxt, pRealTable, &name);
|
||||
|
|
|
@ -113,7 +113,8 @@ void syncLogRecvSyncSnapshotRsp(SSyncNode* pSyncNode, const SyncSnapshotRsp* pMs
|
|||
void syncLogRecvAppendEntries(SSyncNode* pSyncNode, const SyncAppendEntries* pMsg, const char* s);
|
||||
void syncLogSendAppendEntries(SSyncNode* pSyncNode, const SyncAppendEntries* pMsg, const char* s);
|
||||
|
||||
void syncLogRecvRequestVote(SSyncNode* pSyncNode, const SyncRequestVote* pMsg, int32_t voteGranted, const char* s);
|
||||
void syncLogRecvRequestVote(SSyncNode* pSyncNode, const SyncRequestVote* pMsg, int32_t voteGranted, const char* s,
|
||||
const char* opt);
|
||||
void syncLogSendRequestVote(SSyncNode* pSyncNode, const SyncRequestVote* pMsg, const char* s);
|
||||
|
||||
void syncLogRecvRequestVoteReply(SSyncNode* pSyncNode, const SyncRequestVoteReply* pMsg, const char* s);
|
||||
|
|
|
@ -61,6 +61,7 @@ static int32_t syncDoLeaderTransfer(SSyncNode* ths, SRpcMsg* pRpcMsg, SSyncRaftE
|
|||
static ESyncStrategy syncNodeStrategy(SSyncNode* pSyncNode);
|
||||
|
||||
int64_t syncOpen(SSyncInfo* pSyncInfo, int32_t vnodeVersion) {
|
||||
sInfo("vgId:%d, start to open sync", pSyncInfo->vgId);
|
||||
SSyncNode* pSyncNode = syncNodeOpen(pSyncInfo, vnodeVersion);
|
||||
if (pSyncNode == NULL) {
|
||||
sError("vgId:%d, failed to open sync node", pSyncInfo->vgId);
|
||||
|
@ -79,6 +80,7 @@ int64_t syncOpen(SSyncInfo* pSyncInfo, int32_t vnodeVersion) {
|
|||
pSyncNode->hbBaseLine = pSyncInfo->heartbeatMs;
|
||||
pSyncNode->heartbeatTimerMS = pSyncInfo->heartbeatMs;
|
||||
pSyncNode->msgcb = pSyncInfo->msgcb;
|
||||
sInfo("vgId:%d, sync opened", pSyncInfo->vgId);
|
||||
return pSyncNode->rid;
|
||||
}
|
||||
|
||||
|
@ -91,6 +93,7 @@ int32_t syncStart(int64_t rid) {
|
|||
sError("failed to acquire rid:%" PRId64 " of tsNodeReftId for pSyncNode", rid);
|
||||
TAOS_RETURN(code);
|
||||
}
|
||||
sInfo("vgId:%d, begin to start sync", pSyncNode->vgId);
|
||||
|
||||
if ((code = syncNodeRestore(pSyncNode)) < 0) {
|
||||
sError("vgId:%d, failed to restore sync log buffer since %s", pSyncNode->vgId, tstrerror(code));
|
||||
|
@ -103,6 +106,9 @@ int32_t syncStart(int64_t rid) {
|
|||
}
|
||||
|
||||
syncNodeRelease(pSyncNode);
|
||||
|
||||
sInfo("vgId:%d, sync started", pSyncNode->vgId);
|
||||
|
||||
TAOS_RETURN(code);
|
||||
|
||||
_err:
|
||||
|
@ -1370,7 +1376,7 @@ SSyncNode* syncNodeOpen(SSyncInfo* pSyncInfo, int32_t vnodeVersion) {
|
|||
pSyncNode->hbrSlowNum = 0;
|
||||
pSyncNode->tmrRoutineNum = 0;
|
||||
|
||||
sNInfo(pSyncNode, "sync open, node:%p electInterval:%d heartbeatInterval:%d heartbeatTimeout:%d", pSyncNode,
|
||||
sNInfo(pSyncNode, "sync node opened, node:%p electInterval:%d heartbeatInterval:%d heartbeatTimeout:%d", pSyncNode,
|
||||
tsElectInterval, tsHeartbeatInterval, tsHeartbeatTimeout);
|
||||
return pSyncNode;
|
||||
|
||||
|
@ -1434,6 +1440,7 @@ int32_t syncNodeRestore(SSyncNode* pSyncNode) {
|
|||
|
||||
int32_t syncNodeStart(SSyncNode* pSyncNode) {
|
||||
// start raft
|
||||
sInfo("vgId:%d, begin to start sync node", pSyncNode->vgId);
|
||||
if (pSyncNode->raftCfg.cfg.nodeInfo[pSyncNode->raftCfg.cfg.myIndex].nodeRole == TAOS_SYNC_ROLE_LEARNER) {
|
||||
syncNodeBecomeLearner(pSyncNode, "first start");
|
||||
} else {
|
||||
|
@ -1453,6 +1460,7 @@ int32_t syncNodeStart(SSyncNode* pSyncNode) {
|
|||
if (ret != 0) {
|
||||
sError("vgId:%d, failed to start ping timer since %s", pSyncNode->vgId, tstrerror(ret));
|
||||
}
|
||||
sInfo("vgId:%d, sync node started", pSyncNode->vgId);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -2034,6 +2042,8 @@ void syncNodeBecomeFollower(SSyncNode* pSyncNode, const char* debugStr) {
|
|||
|
||||
// reset elect timer
|
||||
syncNodeResetElectTimer(pSyncNode);
|
||||
|
||||
sInfo("vgId:%d, become follower. %s", pSyncNode->vgId, debugStr);
|
||||
}
|
||||
|
||||
void syncNodeBecomeLearner(SSyncNode* pSyncNode, const char* debugStr) {
|
||||
|
|
|
@ -91,9 +91,11 @@ int32_t syncNodeOnRequestVote(SSyncNode* ths, const SRpcMsg* pRpcMsg) {
|
|||
SyncRequestVote* pMsg = pRpcMsg->pCont;
|
||||
bool resetElect = false;
|
||||
|
||||
syncLogRecvRequestVote(ths, pMsg, -1, "", "recv");
|
||||
|
||||
// if already drop replica, do not process
|
||||
if (!syncNodeInRaftGroup(ths, &pMsg->srcId)) {
|
||||
syncLogRecvRequestVote(ths, pMsg, -1, "not in my config");
|
||||
syncLogRecvRequestVote(ths, pMsg, -1, "not in my config", "process");
|
||||
|
||||
TAOS_RETURN(TSDB_CODE_SYN_MISMATCHED_SIGNATURE);
|
||||
}
|
||||
|
@ -133,7 +135,7 @@ int32_t syncNodeOnRequestVote(SSyncNode* ths, const SRpcMsg* pRpcMsg) {
|
|||
if (!(!grant || pMsg->term == pReply->term)) return TSDB_CODE_SYN_INTERNAL_ERROR;
|
||||
|
||||
// trace log
|
||||
syncLogRecvRequestVote(ths, pMsg, pReply->voteGranted, "");
|
||||
syncLogRecvRequestVote(ths, pMsg, pReply->voteGranted, "", "proceed");
|
||||
syncLogSendRequestVoteReply(ths, pReply, "");
|
||||
(void)syncNodeSendMsgById(&pReply->destId, ths, &rpcMsg);
|
||||
|
||||
|
|
|
@ -470,13 +470,13 @@ void syncLogSendAppendEntries(SSyncNode* pSyncNode, const SyncAppendEntries* pMs
|
|||
pMsg->commitIndex, pMsg->dataLen, s);
|
||||
}
|
||||
|
||||
void syncLogRecvRequestVote(SSyncNode* pSyncNode, const SyncRequestVote* pMsg, int32_t voteGranted,
|
||||
const char* errmsg) {
|
||||
void syncLogRecvRequestVote(SSyncNode* pSyncNode, const SyncRequestVote* pMsg, int32_t voteGranted, const char* errmsg,
|
||||
const char* opt) {
|
||||
char statusMsg[64];
|
||||
snprintf(statusMsg, sizeof(statusMsg), "granted:%d", voteGranted);
|
||||
sNInfo(pSyncNode,
|
||||
"recv sync-request-vote from dnode:%d, {term:%" PRId64 ", last-index:%" PRId64 ", last-term:%" PRId64 "}, %s",
|
||||
DID(&pMsg->srcId), pMsg->term, pMsg->lastLogIndex, pMsg->lastLogTerm,
|
||||
"%s sync-request-vote from dnode:%d, {term:%" PRId64 ", last-index:%" PRId64 ", last-term:%" PRId64 "}, %s",
|
||||
opt, DID(&pMsg->srcId), pMsg->term, pMsg->lastLogIndex, pMsg->lastLogTerm,
|
||||
(voteGranted != -1) ? statusMsg : errmsg);
|
||||
}
|
||||
|
||||
|
|
|
@ -1,23 +1,45 @@
|
|||
import os
|
||||
import re
|
||||
from loguru import logger
|
||||
|
||||
# List of source directories to search
|
||||
|
||||
self_path = os.path.dirname(os.path.realpath(__file__))
|
||||
|
||||
# Check if "community" or "tests" is in self_path
|
||||
index_community = self_path.find("community")
|
||||
if index_community != -1:
|
||||
TD_project_path = self_path[:index_community]
|
||||
index_TDinternal = TD_project_path.find("TDinternal")
|
||||
# Check if index_TDinternal is valid and set work_path accordingly
|
||||
if index_TDinternal != -1:
|
||||
work_path = TD_project_path[:index_TDinternal]
|
||||
else:
|
||||
index_tests = self_path.find("tests")
|
||||
if index_tests != -1:
|
||||
TD_project_path = self_path[:index_tests]
|
||||
# Check if index_TDengine is valid and set work_path accordingly
|
||||
index_TDengine = TD_project_path.find("TDengine")
|
||||
if index_TDengine != -1:
|
||||
work_path = TD_project_path[:index_TDengine]
|
||||
TD_project_path = TD_project_path.rstrip('/')
|
||||
print(TD_project_path)
|
||||
source_dirs = [
|
||||
"community/source",
|
||||
"community/include",
|
||||
"enterprise/src/plugins/"
|
||||
f"{TD_project_path}/community/source",
|
||||
f"{TD_project_path}/community/include",
|
||||
f"{TD_project_path}/enterprise/src/plugins/"
|
||||
]
|
||||
|
||||
# List of directories to exclude
|
||||
exclude_dirs = [
|
||||
"community/source/client/jni"
|
||||
f"{TD_project_path}/community/source/client/jni"
|
||||
]
|
||||
|
||||
# List of files to exclude
|
||||
exclude_source_files = [
|
||||
"community/source/libs/parser/src/sql.c",
|
||||
"community/source/util/src/tlog.c",
|
||||
"community/include/util/tlog.h"
|
||||
f"{TD_project_path}/community/source/libs/parser/src/sql.c",
|
||||
f"{TD_project_path}/community/source/util/src/tlog.c",
|
||||
f"{TD_project_path}/community/include/util/tlog.h"
|
||||
]
|
||||
|
||||
def grep_asserts_in_file(file_path, summary_list, detaild_list):
|
||||
|
@ -26,10 +48,10 @@ def grep_asserts_in_file(file_path, summary_list, detaild_list):
|
|||
with open(file_path, 'r') as file:
|
||||
for line_number, line in enumerate(file, start=1):
|
||||
if re.search(r'\bassert\(.*\)|\bASSERT\(.*\)|\bASSERTS\(.*\)|\bASSERT_CORE\(.*\)', line):
|
||||
detaild_list.append(f"{file_path}:{line_number}: {line.strip()}")
|
||||
detaild_list.append(f"{file_path}:{line.strip()}:{line_number}")
|
||||
match_count += 1
|
||||
if match_count > 0:
|
||||
summary_list.append(f"Total matches in {file_path}: {match_count}")
|
||||
summary_list.append(f"Total matches in {file_path}:{match_count}")
|
||||
|
||||
def traverse_and_grep(source_dirs, exclude_dirs, exclude_source_files):
|
||||
"""Traverse directories and grep for assert, ASSERTS, or ASSERT function calls in .h and .c files."""
|
||||
|
@ -47,7 +69,52 @@ def traverse_and_grep(source_dirs, exclude_dirs, exclude_source_files):
|
|||
grep_asserts_in_file(file_path, summary_list, detaild_list)
|
||||
return summary_list, detaild_list
|
||||
|
||||
def check_list_result(result_list,detaild_list):
|
||||
logger.debug("check assert in source code")
|
||||
error_message = "ERROR: do not add `assert` statements in new code."
|
||||
error_message2 = "ERROR: Please check the detailed information below: assert statement with file name and line number"
|
||||
remove_detail_items = [
|
||||
f"{TD_project_path}/community/source/dnode/vnode/src/tsdb/tsdbCommit2.c:ASSERT_CORE(tsdb->imem == NULL, \"imem should be null to commit mem\");",
|
||||
f"{TD_project_path}/community/include/util/types.h:assert(sizeof(float) == sizeof(uint32_t));",
|
||||
f"{TD_project_path}/community/include/util/types.h:assert(sizeof(double) == sizeof(uint64_t));"
|
||||
]
|
||||
expected_strings = [
|
||||
f"Total matches in {TD_project_path}/community/source/dnode/vnode/src/tsdb/tsdbCommit2.c:1",
|
||||
f"Total matches in {TD_project_path}/community/include/util/types.h:2"
|
||||
]
|
||||
# logger.debug(len(result_list))
|
||||
if len(result_list) != 2:
|
||||
logger.error(f"{error_message}")
|
||||
for item in expected_strings:
|
||||
if item in result_list:
|
||||
result_list.remove(item)
|
||||
logger.error("\n" + "\n".join(result_list))
|
||||
logger.error(f"{error_message2}")
|
||||
for item in remove_detail_items:
|
||||
if item in detaild_list:
|
||||
detaild_list.remove(item)
|
||||
logger.error("\n" + "\n".join(detaild_list))
|
||||
exit(1)
|
||||
else:
|
||||
# check if all expected strings are in the result list
|
||||
if all(item in result_list for item in expected_strings):
|
||||
# logger.debug(result_list)
|
||||
# logger.debug(detaild_list)
|
||||
if all(any(remove_detail_item in detaild for remove_detail_item in remove_detail_items) for detaild in detaild_list):
|
||||
logger.info("Validation successful.")
|
||||
else:
|
||||
logger.error(f"{error_message}")
|
||||
for item in expected_strings:
|
||||
if item in result_list:
|
||||
result_list.remove(item)
|
||||
logger.error("\n" + "\n".join(result_list))
|
||||
logger.error(f"{error_message2}")
|
||||
for item in remove_detail_items:
|
||||
if item in detaild_list:
|
||||
detaild_list.remove(item)
|
||||
logger.error("\n" + "\n".join(detaild_list))
|
||||
exit(1)
|
||||
if __name__ == "__main__":
|
||||
summary_list, detaild_list = traverse_and_grep(source_dirs, exclude_dirs, exclude_source_files)
|
||||
print("\n".join(summary_list))
|
||||
# print("\n".join(detaild_list))
|
||||
check_list_result(summary_list,detaild_list)
|
||||
|
||||
|
|
|
@ -0,0 +1,54 @@
|
|||
#!/bin/bash
|
||||
|
||||
function usage() {
|
||||
echo "$0"
|
||||
echo -e "\t -d work dir"
|
||||
echo -e "\t -h help"
|
||||
}
|
||||
|
||||
while getopts "d:h" opt; do
|
||||
case $opt in
|
||||
d)
|
||||
WORKDIR=$OPTARG
|
||||
;;
|
||||
h)
|
||||
usage
|
||||
exit 0
|
||||
;;
|
||||
\?)
|
||||
echo "Invalid option: -$OPTARG"
|
||||
usage
|
||||
exit 0
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
if [ -z "$WORKDIR" ]; then
|
||||
usage
|
||||
exit 1
|
||||
fi
|
||||
|
||||
|
||||
# enterprise edition
|
||||
INTERNAL_REPDIR=$WORKDIR/TDinternal
|
||||
REPDIR_DEBUG=$WORKDIR/debugNoSan/
|
||||
|
||||
REP_MOUNT_PARAM="$INTERNAL_REPDIR:/home/TDinternal"
|
||||
|
||||
CONTAINER_TESTDIR=/home/TDinternal/community
|
||||
|
||||
check_assert_scripts="$CONTAINER_TESTDIR/tests/ci/count_assert.py"
|
||||
|
||||
ulimit -c unlimited
|
||||
cat << EOF
|
||||
docker run \
|
||||
-v $REP_MOUNT_PARAM \
|
||||
--rm --ulimit core=-1 taos_test:v1.0 python3 $check_assert_scripts
|
||||
EOF
|
||||
docker run \
|
||||
-v $REP_MOUNT_PARAM \
|
||||
--rm --ulimit core=-1 taos_test:v1.0 python3 $check_assert_scripts
|
||||
|
||||
ret=$?
|
||||
exit $ret
|
||||
|
Loading…
Reference in New Issue