Merge pull request #22227 from taosdata/fix/TS-3728
fix:add macro ASSERT_NOT_CORE to control ASSERT not core in release version
This commit is contained in:
commit
c26165975d
|
@ -1859,8 +1859,8 @@ static int32_t tmqHandleNoPollRsp(tmq_t* tmq, SMqRspWrapper* rspWrapper, bool* p
|
|||
static void updateVgInfo(SMqClientVg* pVg, STqOffsetVal* reqOffset, STqOffsetVal* rspOffset, int64_t sver, int64_t ever, int64_t consumerId){
|
||||
if (!pVg->seekUpdated) {
|
||||
tscDebug("consumer:0x%" PRIx64" local offset is update, since seekupdate not set", consumerId);
|
||||
if(reqOffset->type != 0) pVg->offsetInfo.beginOffset = *reqOffset;
|
||||
if(rspOffset->type != 0) pVg->offsetInfo.endOffset = *rspOffset;
|
||||
pVg->offsetInfo.beginOffset = *reqOffset;
|
||||
pVg->offsetInfo.endOffset = *rspOffset;
|
||||
} else {
|
||||
tscDebug("consumer:0x%" PRIx64" local offset is NOT update, since seekupdate is set", consumerId);
|
||||
}
|
||||
|
@ -1948,7 +1948,7 @@ static void* tmqHandleAllRsp(tmq_t* tmq, int64_t timeout, bool pollIfReset) {
|
|||
}
|
||||
taosWUnLockLatch(&tmq->lock);
|
||||
} else {
|
||||
tscDebug("consumer:0x%" PRIx64 " vgId:%d msg discard since epoch mismatch: msg epoch %d, consumer epoch %d",
|
||||
tscInfo("consumer:0x%" PRIx64 " vgId:%d msg discard since epoch mismatch: msg epoch %d, consumer epoch %d",
|
||||
tmq->consumerId, pollRspWrapper->vgId, pDataRsp->head.epoch, consumerEpoch);
|
||||
pRspWrapper = tmqFreeRspWrapper(pRspWrapper);
|
||||
taosFreeQitem(pollRspWrapper);
|
||||
|
@ -1979,7 +1979,7 @@ static void* tmqHandleAllRsp(tmq_t* tmq, int64_t timeout, bool pollIfReset) {
|
|||
taosWUnLockLatch(&tmq->lock);
|
||||
return pRsp;
|
||||
} else {
|
||||
tscDebug("consumer:0x%" PRIx64 " vgId:%d msg discard since epoch mismatch: msg epoch %d, consumer epoch %d",
|
||||
tscInfo("consumer:0x%" PRIx64 " vgId:%d msg discard since epoch mismatch: msg epoch %d, consumer epoch %d",
|
||||
tmq->consumerId, pollRspWrapper->vgId, pollRspWrapper->metaRsp.head.epoch, consumerEpoch);
|
||||
pRspWrapper = tmqFreeRspWrapper(pRspWrapper);
|
||||
taosFreeQitem(pollRspWrapper);
|
||||
|
@ -2036,7 +2036,7 @@ static void* tmqHandleAllRsp(tmq_t* tmq, int64_t timeout, bool pollIfReset) {
|
|||
taosWUnLockLatch(&tmq->lock);
|
||||
return pRsp;
|
||||
} else {
|
||||
tscDebug("consumer:0x%" PRIx64 " vgId:%d msg discard since epoch mismatch: msg epoch %d, consumer epoch %d",
|
||||
tscInfo("consumer:0x%" PRIx64 " vgId:%d msg discard since epoch mismatch: msg epoch %d, consumer epoch %d",
|
||||
tmq->consumerId, pollRspWrapper->vgId, pollRspWrapper->taosxRsp.head.epoch, consumerEpoch);
|
||||
pRspWrapper = tmqFreeRspWrapper(pRspWrapper);
|
||||
taosFreeQitem(pollRspWrapper);
|
||||
|
|
|
@ -7227,6 +7227,9 @@ bool tOffsetEqual(const STqOffsetVal *pLeft, const STqOffsetVal *pRight) {
|
|||
return pLeft->uid == pRight->uid && pLeft->ts == pRight->ts;
|
||||
} else if (pLeft->type == TMQ_OFFSET__SNAPSHOT_META) {
|
||||
return pLeft->uid == pRight->uid;
|
||||
} else {
|
||||
uError("offset type:%d", pLeft->type);
|
||||
ASSERT(0);
|
||||
}
|
||||
}
|
||||
return false;
|
||||
|
|
|
@ -420,6 +420,7 @@ static int32_t mndProcessMqHbReq(SRpcMsg *pMsg) {
|
|||
|
||||
SMqSubscribeObj *pSub = mndAcquireSubscribe(pMnode, pConsumer->cgroup, data->topicName);
|
||||
if(pSub == NULL){
|
||||
ASSERT(0);
|
||||
continue;
|
||||
}
|
||||
taosWLockLatch(&pSub->lock);
|
||||
|
@ -515,7 +516,10 @@ static int32_t mndProcessAskEpReq(SRpcMsg *pMsg) {
|
|||
char *topic = taosArrayGetP(pConsumer->currentTopics, i);
|
||||
SMqSubscribeObj *pSub = mndAcquireSubscribe(pMnode, pConsumer->cgroup, topic);
|
||||
// txn guarantees pSub is created
|
||||
if(pSub == NULL) continue;
|
||||
if(pSub == NULL) {
|
||||
ASSERT(0);
|
||||
continue;
|
||||
}
|
||||
taosRLockLatch(&pSub->lock);
|
||||
|
||||
SMqSubTopicEp topicEp = {0};
|
||||
|
@ -524,6 +528,7 @@ static int32_t mndProcessAskEpReq(SRpcMsg *pMsg) {
|
|||
// 2.1 fetch topic schema
|
||||
SMqTopicObj *pTopic = mndAcquireTopic(pMnode, topic);
|
||||
if(pTopic == NULL) {
|
||||
ASSERT(0);
|
||||
taosRUnLockLatch(&pSub->lock);
|
||||
mndReleaseSubscribe(pMnode, pSub);
|
||||
continue;
|
||||
|
|
|
@ -1168,7 +1168,7 @@ static int32_t buildResult(SSDataBlock *pBlock, int32_t* numOfRows, int64_t cons
|
|||
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
|
||||
colDataSetVal(pColInfo, *numOfRows, (const char *)consumerIdHex, consumerId == -1);
|
||||
|
||||
mDebug("mnd show subscriptions: topic %s, consumer:0x%" PRIx64 " cgroup %s vgid %d", varDataVal(topic),
|
||||
mInfo("mnd show subscriptions: topic %s, consumer:0x%" PRIx64 " cgroup %s vgid %d", varDataVal(topic),
|
||||
consumerId, varDataVal(cgroup), pVgEp->vgId);
|
||||
|
||||
// offset
|
||||
|
|
|
@ -146,6 +146,20 @@ void tqClose(STQ* pTq) {
|
|||
return;
|
||||
}
|
||||
|
||||
void* pIter = taosHashIterate(pTq->pPushMgr, NULL);
|
||||
while (pIter) {
|
||||
STqHandle* pHandle = *(STqHandle**)pIter;
|
||||
int32_t vgId = TD_VID(pTq->pVnode);
|
||||
|
||||
if(pHandle->msg != NULL) {
|
||||
tqPushEmptyDataRsp(pHandle, vgId);
|
||||
rpcFreeCont(pHandle->msg->pCont);
|
||||
taosMemoryFree(pHandle->msg);
|
||||
pHandle->msg = NULL;
|
||||
}
|
||||
pIter = taosHashIterate(pTq->pPushMgr, pIter);
|
||||
}
|
||||
|
||||
tqOffsetClose(pTq->pOffsetStore);
|
||||
taosHashCleanup(pTq->pHandle);
|
||||
taosHashCleanup(pTq->pPushMgr);
|
||||
|
@ -278,6 +292,10 @@ int32_t tqPushEmptyDataRsp(STqHandle* pHandle, int32_t vgId) {
|
|||
tqInitDataRsp(&dataRsp, &req);
|
||||
dataRsp.blockNum = 0;
|
||||
dataRsp.rspOffset = dataRsp.reqOffset;
|
||||
char buf[TSDB_OFFSET_LEN] = {0};
|
||||
tFormatOffset(buf, TSDB_OFFSET_LEN, &dataRsp.reqOffset);
|
||||
tqInfo("tqPushEmptyDataRsp to consumer:0x%"PRIx64 " vgId:%d, offset:%s, reqId:0x%" PRIx64, req.consumerId, vgId, buf, req.reqId);
|
||||
|
||||
tqSendDataRsp(pHandle, pHandle->msg, &req, &dataRsp, TMQ_MSG_TYPE__POLL_DATA_RSP, vgId);
|
||||
tDeleteMqDataRsp(&dataRsp);
|
||||
return 0;
|
||||
|
@ -515,10 +533,11 @@ int32_t tqProcessPollPush(STQ* pTq, SRpcMsg* pMsg) {
|
|||
|
||||
while (pIter) {
|
||||
STqHandle* pHandle = *(STqHandle**)pIter;
|
||||
tqDebug("vgId:%d start set submit for pHandle:%p, consumer:0x%" PRIx64, vgId, pHandle, pHandle->consumerId);
|
||||
tqInfo("vgId:%d start set submit for pHandle:%p, consumer:0x%" PRIx64, vgId, pHandle, pHandle->consumerId);
|
||||
|
||||
if (ASSERT(pHandle->msg != NULL)) {
|
||||
tqError("pHandle->msg should not be null");
|
||||
taosHashCancelIterate(pTq->pPushMgr, pIter);
|
||||
break;
|
||||
}else{
|
||||
SRpcMsg msg = {.msgType = TDMT_VND_TMQ_CONSUME, .pCont = pHandle->msg->pCont, .contLen = pHandle->msg->contLen, .info = pHandle->msg->info};
|
||||
|
@ -849,30 +868,28 @@ int32_t tqProcessSubscribeReq(STQ* pTq, int64_t sversion, char* msg, int32_t msg
|
|||
taosWLockLatch(&pTq->lock);
|
||||
|
||||
if (pHandle->consumerId == req.newConsumerId) { // do nothing
|
||||
tqInfo("vgId:%d consumer:0x%" PRIx64 " remains, no switch occurs, should not reach here", req.vgId,
|
||||
req.newConsumerId);
|
||||
tqInfo("vgId:%d no switch consumer:0x%" PRIx64 " remains, because redo wal log", req.vgId, req.newConsumerId);
|
||||
} else {
|
||||
tqInfo("vgId:%d switch consumer from Id:0x%" PRIx64 " to Id:0x%" PRIx64, req.vgId, pHandle->consumerId,
|
||||
req.newConsumerId);
|
||||
tqInfo("vgId:%d switch consumer from Id:0x%" PRIx64 " to Id:0x%" PRIx64, req.vgId, pHandle->consumerId, req.newConsumerId);
|
||||
atomic_store_64(&pHandle->consumerId, req.newConsumerId);
|
||||
// atomic_add_fetch_32(&pHandle->epoch, 1);
|
||||
|
||||
// kill executing task
|
||||
// if(tqIsHandleExec(pHandle)) {
|
||||
// qTaskInfo_t pTaskInfo = pHandle->execHandle.task;
|
||||
// if (pTaskInfo != NULL) {
|
||||
// qKillTask(pTaskInfo, TSDB_CODE_SUCCESS);
|
||||
// }
|
||||
|
||||
// if (pHandle->execHandle.subType == TOPIC_SUB_TYPE__COLUMN) {
|
||||
// qStreamCloseTsdbReader(pTaskInfo);
|
||||
// }
|
||||
// }
|
||||
// remove if it has been register in the push manager, and return one empty block to consumer
|
||||
tqUnregisterPushHandle(pTq, pHandle);
|
||||
ret = tqMetaSaveHandle(pTq, req.subKey, pHandle);
|
||||
}
|
||||
// atomic_add_fetch_32(&pHandle->epoch, 1);
|
||||
|
||||
// kill executing task
|
||||
// if(tqIsHandleExec(pHandle)) {
|
||||
// qTaskInfo_t pTaskInfo = pHandle->execHandle.task;
|
||||
// if (pTaskInfo != NULL) {
|
||||
// qKillTask(pTaskInfo, TSDB_CODE_SUCCESS);
|
||||
// }
|
||||
|
||||
// if (pHandle->execHandle.subType == TOPIC_SUB_TYPE__COLUMN) {
|
||||
// qStreamCloseTsdbReader(pTaskInfo);
|
||||
// }
|
||||
// }
|
||||
// remove if it has been register in the push manager, and return one empty block to consumer
|
||||
tqUnregisterPushHandle(pTq, pHandle);
|
||||
taosWUnLockLatch(&pTq->lock);
|
||||
ret = tqMetaSaveHandle(pTq, req.subKey, pHandle);
|
||||
}
|
||||
|
||||
end:
|
||||
|
|
|
@ -78,12 +78,12 @@ int32_t tqRegisterPushHandle(STQ* pTq, void* handle, SRpcMsg* pMsg) {
|
|||
memcpy(pHandle->msg->pCont, pMsg->pCont, pMsg->contLen);
|
||||
pHandle->msg->contLen = pMsg->contLen;
|
||||
int32_t ret = taosHashPut(pTq->pPushMgr, pHandle->subKey, strlen(pHandle->subKey), &pHandle, POINTER_BYTES);
|
||||
tqDebug("vgId:%d data is over, ret:%d, consumerId:0x%" PRIx64 ", register to pHandle:%p, pCont:%p, len:%d", vgId, ret,
|
||||
tqInfo("vgId:%d data is over, ret:%d, consumerId:0x%" PRIx64 ", register to pHandle:%p, pCont:%p, len:%d", vgId, ret,
|
||||
pHandle->consumerId, pHandle, pHandle->msg->pCont, pHandle->msg->contLen);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t tqUnregisterPushHandle(STQ* pTq, void *handle) {
|
||||
int tqUnregisterPushHandle(STQ* pTq, void *handle) {
|
||||
STqHandle *pHandle = (STqHandle*)handle;
|
||||
int32_t vgId = TD_VID(pTq->pVnode);
|
||||
|
||||
|
@ -91,7 +91,7 @@ int32_t tqUnregisterPushHandle(STQ* pTq, void *handle) {
|
|||
return 0;
|
||||
}
|
||||
int32_t ret = taosHashRemove(pTq->pPushMgr, pHandle->subKey, strlen(pHandle->subKey));
|
||||
tqDebug("vgId:%d remove pHandle:%p,ret:%d consumer Id:0x%" PRIx64, vgId, pHandle, ret, pHandle->consumerId);
|
||||
tqInfo("vgId:%d remove pHandle:%p,ret:%d consumer Id:0x%" PRIx64, vgId, pHandle, ret, pHandle->consumerId);
|
||||
|
||||
if(pHandle->msg != NULL) {
|
||||
// tqPushDataRsp(pHandle, vgId);
|
||||
|
|
|
@ -317,6 +317,7 @@ int32_t tqExtractDataForMq(STQ* pTq, STqHandle* pHandle, const SMqPollReq* pRequ
|
|||
// the offset value can not be monotonious increase??
|
||||
offset = reqOffset;
|
||||
} else {
|
||||
uError("req offset type is 0");
|
||||
return TSDB_CODE_TMQ_INVALID_MSG;
|
||||
}
|
||||
|
||||
|
|
|
@ -476,8 +476,8 @@ void vnodeClose(SVnode *pVnode) {
|
|||
tsem_wait(&pVnode->canCommit);
|
||||
vnodeSyncClose(pVnode);
|
||||
vnodeQueryClose(pVnode);
|
||||
walClose(pVnode->pWal);
|
||||
tqClose(pVnode->pTq);
|
||||
walClose(pVnode->pWal);
|
||||
if (pVnode->pTsdb) tsdbClose(&pVnode->pTsdb);
|
||||
smaClose(pVnode->pSma);
|
||||
if (pVnode->pMeta) metaClose(&pVnode->pMeta);
|
||||
|
|
|
@ -5,6 +5,13 @@ if (DEFINED GRANT_CFG_INCLUDE_DIR)
|
|||
add_definitions(-DGRANTS_CFG)
|
||||
endif()
|
||||
|
||||
IF (${ASSERT_NOT_CORE})
|
||||
ADD_DEFINITIONS(-DASSERT_NOT_CORE)
|
||||
MESSAGE(STATUS "disable assert core")
|
||||
ELSE ()
|
||||
MESSAGE(STATUS "enable assert core")
|
||||
ENDIF (${ASSERT_NOT_CORE})
|
||||
|
||||
target_include_directories(
|
||||
util
|
||||
PUBLIC "${TD_SOURCE_DIR}/include/util"
|
||||
|
|
|
@ -76,7 +76,11 @@ static int32_t tsDaylightActive; /* Currently in daylight saving time. */
|
|||
|
||||
bool tsLogEmbedded = 0;
|
||||
bool tsAsyncLog = true;
|
||||
#ifdef ASSERT_NOT_CORE
|
||||
bool tsAssert = false;
|
||||
#else
|
||||
bool tsAssert = true;
|
||||
#endif
|
||||
int32_t tsNumOfLogLines = 10000000;
|
||||
int32_t tsLogKeepDays = 0;
|
||||
LogFp tsLogFp = NULL;
|
||||
|
|
|
@ -105,7 +105,7 @@
|
|||
,,y,system-test,./pytest.sh python3 ./test.py -f 7-tmq/tmqConsFromTsdb-mutilVg-mutilCtb-funcNFilter.py
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 7-tmq/tmqConsFromTsdb-mutilVg-mutilCtb.py
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 7-tmq/tmqConsFromTsdb1-1ctb-funcNFilter.py
|
||||
#,,y,system-test,./pytest.sh python3 ./test.py -f 7-tmq/tmqConsFromTsdb1-mutilVg-mutilCtb-funcNFilter.py
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 7-tmq/tmqConsFromTsdb1-mutilVg-mutilCtb-funcNFilter.py
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 7-tmq/tmqConsFromTsdb1-mutilVg-mutilCtb.py
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 7-tmq/tmqAutoCreateTbl.py
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 7-tmq/tmqDnodeRestart.py
|
||||
|
|
|
@ -222,9 +222,9 @@ class TDTestCase:
|
|||
|
||||
actConsumeTotalRows = resultList[0]
|
||||
|
||||
if not (actConsumeTotalRows > 0 and actConsumeTotalRows < totalRowsInserted):
|
||||
if not (actConsumeTotalRows >= 0 and actConsumeTotalRows <= totalRowsInserted):
|
||||
tdLog.info("act consume rows: %d"%(actConsumeTotalRows))
|
||||
tdLog.info("and second consume rows should be between 0 and %d"%(totalRowsInserted))
|
||||
tdLog.info("and second consume rows should be between [0 and %d]"%(totalRowsInserted))
|
||||
tdLog.exit("%d tmq consume rows error!"%consumerId)
|
||||
|
||||
time.sleep(10)
|
||||
|
|
Loading…
Reference in New Issue