close backend
This commit is contained in:
parent
7815e44655
commit
993ae84398
|
@ -141,6 +141,7 @@ int32_t tqInitialize(STQ* pTq) {
|
|||
}
|
||||
|
||||
void tqClose(STQ* pTq) {
|
||||
qDebug("start to close tq");
|
||||
if (pTq == NULL) {
|
||||
return;
|
||||
}
|
||||
|
@ -166,6 +167,7 @@ void tqClose(STQ* pTq) {
|
|||
taosMemoryFree(pTq->path);
|
||||
tqMetaClose(pTq);
|
||||
streamMetaClose(pTq->pStreamMeta);
|
||||
qDebug("end to close tq");
|
||||
taosMemoryFree(pTq);
|
||||
}
|
||||
|
||||
|
@ -292,7 +294,8 @@ int32_t tqPushEmptyDataRsp(STqHandle* pHandle, int32_t vgId) {
|
|||
dataRsp.rspOffset = dataRsp.reqOffset;
|
||||
char buf[TSDB_OFFSET_LEN] = {0};
|
||||
tFormatOffset(buf, TSDB_OFFSET_LEN, &dataRsp.reqOffset);
|
||||
tqInfo("tqPushEmptyDataRsp to consumer:0x%"PRIx64 " vgId:%d, offset:%s, reqId:0x%" PRIx64, req.consumerId, vgId, buf, req.reqId);
|
||||
tqInfo("tqPushEmptyDataRsp to consumer:0x%" PRIx64 " vgId:%d, offset:%s, reqId:0x%" PRIx64, req.consumerId, vgId, buf,
|
||||
req.reqId);
|
||||
|
||||
tqSendDataRsp(pHandle, pHandle->msg, &req, &dataRsp, TMQ_MSG_TYPE__POLL_DATA_RSP, vgId);
|
||||
tDeleteMqDataRsp(&dataRsp);
|
||||
|
@ -407,8 +410,8 @@ int32_t tqProcessSeekReq(STQ* pTq, SRpcMsg* pMsg) {
|
|||
goto end;
|
||||
}
|
||||
|
||||
//if consumer register to push manager, push empty to consumer to change vg status from TMQ_VG_STATUS__WAIT to TMQ_VG_STATUS__IDLE,
|
||||
//otherwise poll data failed after seek.
|
||||
// if consumer register to push manager, push empty to consumer to change vg status from TMQ_VG_STATUS__WAIT to
|
||||
// TMQ_VG_STATUS__IDLE, otherwise poll data failed after seek.
|
||||
tqUnregisterPushHandle(pTq, pHandle);
|
||||
taosRUnLockLatch(&pTq->lock);
|
||||
|
||||
|
@ -440,15 +443,15 @@ end:
|
|||
//
|
||||
// STqHandle* pHandle = taosHashGet(pTq->pHandle, pOffset->subKey, strlen(pOffset->subKey));
|
||||
// if (pHandle == NULL) {
|
||||
// tqError("tmq seek: consumer:0x%" PRIx64 " vgId:%d subkey %s not found", vgOffset.consumerId, vgId, pOffset->subKey);
|
||||
// terrno = TSDB_CODE_INVALID_MSG;
|
||||
// return -1;
|
||||
// tqError("tmq seek: consumer:0x%" PRIx64 " vgId:%d subkey %s not found", vgOffset.consumerId, vgId,
|
||||
// pOffset->subKey); terrno = TSDB_CODE_INVALID_MSG; return -1;
|
||||
// }
|
||||
//
|
||||
// // 2. check consumer-vg assignment status
|
||||
// taosRLockLatch(&pTq->lock);
|
||||
// if (pHandle->consumerId != vgOffset.consumerId) {
|
||||
// tqDebug("ERROR tmq seek: consumer:0x%" PRIx64 " vgId:%d, subkey %s, mismatch for saved handle consumer:0x%" PRIx64,
|
||||
// tqDebug("ERROR tmq seek: consumer:0x%" PRIx64 " vgId:%d, subkey %s, mismatch for saved handle consumer:0x%"
|
||||
// PRIx64,
|
||||
// vgOffset.consumerId, vgId, pOffset->subKey, pHandle->consumerId);
|
||||
// terrno = TSDB_CODE_TMQ_CONSUMER_MISMATCH;
|
||||
// taosRUnLockLatch(&pTq->lock);
|
||||
|
@ -743,14 +746,16 @@ int32_t tqProcessVgWalInfoReq(STQ* pTq, SRpcMsg* pMsg) {
|
|||
}
|
||||
|
||||
dataRsp.rspOffset.version = pOffset->val.version;
|
||||
tqInfo("consumer:0x%" PRIx64 " vgId:%d subkey:%s get assignment from store:%"PRId64, consumerId, vgId, req.subKey, dataRsp.rspOffset.version);
|
||||
tqInfo("consumer:0x%" PRIx64 " vgId:%d subkey:%s get assignment from store:%" PRId64, consumerId, vgId,
|
||||
req.subKey, dataRsp.rspOffset.version);
|
||||
} else {
|
||||
if (reqOffset.type == TMQ_OFFSET__RESET_EARLIEST) {
|
||||
dataRsp.rspOffset.version = sver; // not consume yet, set the earliest position
|
||||
} else if (reqOffset.type == TMQ_OFFSET__RESET_LATEST) {
|
||||
dataRsp.rspOffset.version = ever;
|
||||
}
|
||||
tqInfo("consumer:0x%" PRIx64 " vgId:%d subkey:%s get assignment from init:%"PRId64, consumerId, vgId, req.subKey, dataRsp.rspOffset.version);
|
||||
tqInfo("consumer:0x%" PRIx64 " vgId:%d subkey:%s get assignment from init:%" PRId64, consumerId, vgId, req.subKey,
|
||||
dataRsp.rspOffset.version);
|
||||
}
|
||||
} else {
|
||||
tqError("consumer:0x%" PRIx64 " vgId:%d subkey:%s invalid offset type:%d", consumerId, vgId, req.subKey,
|
||||
|
@ -883,7 +888,8 @@ int32_t tqProcessSubscribeReq(STQ* pTq, int64_t sversion, char* msg, int32_t msg
|
|||
if (pHandle->consumerId == req.newConsumerId) { // do nothing
|
||||
tqInfo("vgId:%d no switch consumer:0x%" PRIx64 " remains, because redo wal log", req.vgId, req.newConsumerId);
|
||||
} else {
|
||||
tqInfo("vgId:%d switch consumer from Id:0x%" PRIx64 " to Id:0x%" PRIx64, req.vgId, pHandle->consumerId, req.newConsumerId);
|
||||
tqInfo("vgId:%d switch consumer from Id:0x%" PRIx64 " to Id:0x%" PRIx64, req.vgId, pHandle->consumerId,
|
||||
req.newConsumerId);
|
||||
atomic_store_64(&pHandle->consumerId, req.newConsumerId);
|
||||
// atomic_add_fetch_32(&pHandle->epoch, 1);
|
||||
|
||||
|
@ -1364,11 +1370,13 @@ int32_t tqProcessTaskTransferStateReq(STQ* pTq, SRpcMsg* pMsg) {
|
|||
int32_t code = tDecodeStreamScanHistoryFinishReq(&decoder, &req);
|
||||
tDecoderClear(&decoder);
|
||||
|
||||
tqDebug("vgId:%d start to process transfer state msg, from s-task:0x%x", pTq->pStreamMeta->vgId, req.downstreamTaskId);
|
||||
tqDebug("vgId:%d start to process transfer state msg, from s-task:0x%x", pTq->pStreamMeta->vgId,
|
||||
req.downstreamTaskId);
|
||||
|
||||
SStreamTask* pTask = streamMetaAcquireTask(pTq->pStreamMeta, req.streamId, req.downstreamTaskId);
|
||||
if (pTask == NULL) {
|
||||
tqError("failed to find task:0x%x, it may have been dropped already. process transfer state failed", req.downstreamTaskId);
|
||||
tqError("failed to find task:0x%x, it may have been dropped already. process transfer state failed",
|
||||
req.downstreamTaskId);
|
||||
return -1;
|
||||
}
|
||||
|
||||
|
@ -1442,7 +1450,8 @@ int32_t tqProcessTaskScanHistoryFinishRsp(STQ* pTq, SRpcMsg* pMsg) {
|
|||
} else {
|
||||
tqDebug(
|
||||
"s-task:%s scan-history finish rsp received from downstream task:0x%x, all downstream tasks rsp scan-history "
|
||||
"completed msg", pTask->id.idStr, req.downstreamId);
|
||||
"completed msg",
|
||||
pTask->id.idStr, req.downstreamId);
|
||||
streamProcessScanHistoryFinishRsp(pTask);
|
||||
}
|
||||
|
||||
|
@ -1610,7 +1619,8 @@ int32_t tqProcessTaskResumeImpl(STQ* pTq, SStreamTask* pTask, int64_t sversion,
|
|||
vgId, pTask->id.idStr, pTask->chkInfo.currentVer, sversion, pTask->status.schedStatus);
|
||||
}
|
||||
|
||||
if (level == TASK_LEVEL__SOURCE && pTask->info.fillHistory && pTask->status.taskStatus == TASK_STATUS__SCAN_HISTORY) {
|
||||
if (level == TASK_LEVEL__SOURCE && pTask->info.fillHistory &&
|
||||
pTask->status.taskStatus == TASK_STATUS__SCAN_HISTORY) {
|
||||
streamStartScanHistoryAsync(pTask, igUntreated);
|
||||
} else if (level == TASK_LEVEL__SOURCE && (taosQueueItemSize(pTask->inputQueue->queue) == 0)) {
|
||||
tqStartStreamTasks(pTq);
|
||||
|
@ -1631,7 +1641,8 @@ int32_t tqProcessTaskResumeReq(STQ* pTq, int64_t sversion, char* msg, int32_t ms
|
|||
return code;
|
||||
}
|
||||
|
||||
SStreamTask* pHistoryTask = streamMetaAcquireTask(pTq->pStreamMeta, pTask->historyTaskId.streamId, pTask->historyTaskId.taskId);
|
||||
SStreamTask* pHistoryTask =
|
||||
streamMetaAcquireTask(pTq->pStreamMeta, pTask->historyTaskId.streamId, pTask->historyTaskId.taskId);
|
||||
if (pHistoryTask) {
|
||||
code = tqProcessTaskResumeImpl(pTq, pHistoryTask, sversion, pReq->igUntreated);
|
||||
}
|
||||
|
@ -1697,7 +1708,6 @@ int32_t vnodeEnqueueStreamMsg(SVnode* pVnode, SRpcMsg* pMsg) {
|
|||
taosFreeQitem(pMsg);
|
||||
return 0;
|
||||
} else {
|
||||
|
||||
tDeleteStreamDispatchReq(&req);
|
||||
}
|
||||
|
||||
|
|
|
@ -88,6 +88,9 @@ _err:
|
|||
|
||||
int tsdbClose(STsdb **pTsdb) {
|
||||
if (*pTsdb) {
|
||||
STsdb *pdb = *pTsdb;
|
||||
tsdbDebug("vgId:%d, tsdb is close at %s, days:%d, keep:%d,%d,%d", TD_VID(pdb->pVnode), pdb->path, pdb->keepCfg.days,
|
||||
pdb->keepCfg.keep0, pdb->keepCfg.keep1, pdb->keepCfg.keep2);
|
||||
taosThreadRwlockWrlock(&(*pTsdb)->rwLock);
|
||||
tsdbMemTableDestroy((*pTsdb)->mem, true);
|
||||
(*pTsdb)->mem = NULL;
|
||||
|
|
|
@ -1175,7 +1175,7 @@ int32_t streamStateOpenBackendCf(void* backend, char* name, char** cfs, int32_t
|
|||
}
|
||||
int streamStateOpenBackend(void* backend, SStreamState* pState) {
|
||||
qInfo("start to open state %p on backend %p 0x%" PRIx64 "-%d", pState, backend, pState->streamId, pState->taskId);
|
||||
taosAcquireRef(streamBackendId, pState->streamBackendRid);
|
||||
// taosAcquireRef(streamBackendId, pState->streamBackendRid);
|
||||
SBackendWrapper* handle = backend;
|
||||
SBackendCfWrapper* pBackendCfWrapper = taosMemoryCalloc(1, sizeof(SBackendCfWrapper));
|
||||
taosThreadMutexLock(&handle->cfMutex);
|
||||
|
|
|
@ -142,6 +142,7 @@ _err:
|
|||
}
|
||||
|
||||
void streamMetaClose(SStreamMeta* pMeta) {
|
||||
qDebug("start to close stream meta");
|
||||
tdbAbort(pMeta->db, pMeta->txn);
|
||||
tdbTbClose(pMeta->pTaskDb);
|
||||
tdbTbClose(pMeta->pCheckpointDb);
|
||||
|
@ -167,6 +168,7 @@ void streamMetaClose(SStreamMeta* pMeta) {
|
|||
taosArrayDestroy(pMeta->checkpointInUse);
|
||||
|
||||
taosMemoryFree(pMeta);
|
||||
qDebug("end to close stream meta");
|
||||
}
|
||||
|
||||
int32_t streamMetaSaveTask(SStreamMeta* pMeta, SStreamTask* pTask) {
|
||||
|
|
|
@ -128,7 +128,6 @@ SStreamState* streamStateOpen(char* path, void* pTask, bool specPath, int32_t sz
|
|||
if (uniqueId == NULL) {
|
||||
int code = streamStateOpenBackend(pMeta->streamBackend, pState);
|
||||
if (code == -1) {
|
||||
taosReleaseRef(streamBackendId, pState->streamBackendRid);
|
||||
taosThreadMutexUnlock(&pMeta->backendMutex);
|
||||
taosMemoryFree(pState);
|
||||
return NULL;
|
||||
|
@ -729,7 +728,8 @@ void streamStateFreeVal(void* val) {
|
|||
|
||||
int32_t streamStateSessionPut(SStreamState* pState, const SSessionKey* key, const void* value, int32_t vLen) {
|
||||
#ifdef USE_ROCKSDB
|
||||
qDebug("===stream===save skey:%" PRId64 ", ekey:%" PRId64 ", groupId:%" PRIu64, key->win.skey,key->win.ekey, key->groupId);
|
||||
qDebug("===stream===save skey:%" PRId64 ", ekey:%" PRId64 ", groupId:%" PRIu64, key->win.skey, key->win.ekey,
|
||||
key->groupId);
|
||||
return streamStateSessionPut_rocksdb(pState, key, value, vLen);
|
||||
#else
|
||||
SStateSessionKey sKey = {.key = *key, .opNum = pState->number};
|
||||
|
@ -763,7 +763,8 @@ int32_t streamStateSessionGet(SStreamState* pState, SSessionKey* key, void** pVa
|
|||
|
||||
int32_t streamStateSessionDel(SStreamState* pState, const SSessionKey* key) {
|
||||
#ifdef USE_ROCKSDB
|
||||
qDebug("===stream===delete skey:%" PRId64 ", ekey:%" PRId64 ", groupId:%" PRIu64, key->win.skey,key->win.ekey, key->groupId);
|
||||
qDebug("===stream===delete skey:%" PRId64 ", ekey:%" PRId64 ", groupId:%" PRIu64, key->win.skey, key->win.ekey,
|
||||
key->groupId);
|
||||
return streamStateSessionDel_rocksdb(pState, key);
|
||||
#else
|
||||
SStateSessionKey sKey = {.key = *key, .opNum = pState->number};
|
||||
|
|
Loading…
Reference in New Issue