commit
deab8952e5
|
@ -55,17 +55,6 @@ option(
|
||||||
OFF
|
OFF
|
||||||
)
|
)
|
||||||
|
|
||||||
IF(${TD_WINDOWS})
|
|
||||||
MESSAGE("Not build BDB on Windows")
|
|
||||||
ELSE ()
|
|
||||||
option(
|
|
||||||
BUILD_WITH_BDB
|
|
||||||
"If build with BerkleyDB"
|
|
||||||
ON
|
|
||||||
)
|
|
||||||
|
|
||||||
ENDIF ()
|
|
||||||
|
|
||||||
option(
|
option(
|
||||||
BUILD_WITH_LUCENE
|
BUILD_WITH_LUCENE
|
||||||
"If build with lucene"
|
"If build with lucene"
|
||||||
|
|
|
@ -63,9 +63,9 @@ if(${BUILD_WITH_UV})
|
||||||
endif(${BUILD_WITH_UV})
|
endif(${BUILD_WITH_UV})
|
||||||
|
|
||||||
# bdb
|
# bdb
|
||||||
if(${BUILD_WITH_BDB})
|
#if(${BUILD_WITH_BDB})
|
||||||
cat("${CMAKE_SUPPORT_DIR}/bdb_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
#cat("${CMAKE_SUPPORT_DIR}/bdb_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
||||||
endif(${BUILD_WITH_BDB})
|
#endif(${BUILD_WITH_BDB})
|
||||||
|
|
||||||
# sqlite
|
# sqlite
|
||||||
if(${BUILD_WITH_SQLITE})
|
if(${BUILD_WITH_SQLITE})
|
||||||
|
|
|
@ -7,9 +7,9 @@ if(${BUILD_WITH_LUCENE})
|
||||||
add_subdirectory(lucene)
|
add_subdirectory(lucene)
|
||||||
endif(${BUILD_WITH_LUCENE})
|
endif(${BUILD_WITH_LUCENE})
|
||||||
|
|
||||||
if(${BUILD_WITH_BDB})
|
#if(${BUILD_WITH_BDB})
|
||||||
add_subdirectory(bdb)
|
#add_subdirectory(bdb)
|
||||||
endif(${BUILD_WITH_BDB})
|
#endif(${BUILD_WITH_BDB})
|
||||||
|
|
||||||
if(${BUILD_WITH_SQLITE})
|
if(${BUILD_WITH_SQLITE})
|
||||||
add_subdirectory(sqlite)
|
add_subdirectory(sqlite)
|
||||||
|
|
|
@ -2310,6 +2310,7 @@ typedef struct {
|
||||||
char cgroup[TSDB_CGROUP_LEN];
|
char cgroup[TSDB_CGROUP_LEN];
|
||||||
|
|
||||||
int64_t currentOffset;
|
int64_t currentOffset;
|
||||||
|
uint64_t reqId;
|
||||||
char topic[TSDB_TOPIC_FNAME_LEN];
|
char topic[TSDB_TOPIC_FNAME_LEN];
|
||||||
} SMqPollReq;
|
} SMqPollReq;
|
||||||
|
|
||||||
|
|
|
@ -1149,6 +1149,7 @@ SMqPollReq* tmqBuildConsumeReqImpl(tmq_t* tmq, int64_t blockingTime, SMqClientTo
|
||||||
pReq->consumerId = tmq->consumerId;
|
pReq->consumerId = tmq->consumerId;
|
||||||
pReq->epoch = tmq->epoch;
|
pReq->epoch = tmq->epoch;
|
||||||
pReq->currentOffset = reqOffset;
|
pReq->currentOffset = reqOffset;
|
||||||
|
pReq->reqId = generateRequestId();
|
||||||
|
|
||||||
pReq->head.vgId = htonl(pVg->vgId);
|
pReq->head.vgId = htonl(pVg->vgId);
|
||||||
pReq->head.contLen = htonl(sizeof(SMqPollReq));
|
pReq->head.contLen = htonl(sizeof(SMqPollReq));
|
||||||
|
@ -1279,7 +1280,7 @@ int32_t tmqPollImpl(tmq_t* tmq, int64_t blockingTime) {
|
||||||
.len = sizeof(SMqPollReq),
|
.len = sizeof(SMqPollReq),
|
||||||
.handle = NULL,
|
.handle = NULL,
|
||||||
};
|
};
|
||||||
sendInfo->requestId = generateRequestId();
|
sendInfo->requestId = pReq->reqId;
|
||||||
sendInfo->requestObjRefId = 0;
|
sendInfo->requestObjRefId = 0;
|
||||||
sendInfo->param = pParam;
|
sendInfo->param = pParam;
|
||||||
sendInfo->fp = tmqPollCb;
|
sendInfo->fp = tmqPollCb;
|
||||||
|
@ -1288,7 +1289,7 @@ int32_t tmqPollImpl(tmq_t* tmq, int64_t blockingTime) {
|
||||||
int64_t transporterId = 0;
|
int64_t transporterId = 0;
|
||||||
/*printf("send poll\n");*/
|
/*printf("send poll\n");*/
|
||||||
atomic_add_fetch_32(&tmq->waitingRequest, 1);
|
atomic_add_fetch_32(&tmq->waitingRequest, 1);
|
||||||
tscDebug("consumer %ld send poll: vg %d, epoch %d, req offset %ld", tmq->consumerId, pVg->vgId, tmq->epoch, pVg->currentOffset);
|
tscDebug("consumer %ld send poll to %s : vg %d, epoch %d, req offset %ld, reqId %lu", tmq->consumerId, pTopic->topicName, pVg->vgId, tmq->epoch, pVg->currentOffset, pReq->reqId);
|
||||||
/*printf("send vg %d %ld\n", pVg->vgId, pVg->currentOffset);*/
|
/*printf("send vg %d %ld\n", pVg->vgId, pVg->currentOffset);*/
|
||||||
asyncSendMsgToServer(tmq->pTscObj->pAppInfo->pTransporter, &pVg->epSet, &transporterId, sendInfo);
|
asyncSendMsgToServer(tmq->pTscObj->pAppInfo->pTransporter, &pVg->epSet, &transporterId, sendInfo);
|
||||||
pVg->pollCnt++;
|
pVg->pollCnt++;
|
||||||
|
|
|
@ -60,7 +60,7 @@ static int32_t mndProcessResetOffsetReq(SNodeMsg *pMsg);
|
||||||
static int32_t mndPersistMqSetConnReq(SMnode *pMnode, STrans *pTrans, const SMqTopicObj *pTopic, const char *cgroup,
|
static int32_t mndPersistMqSetConnReq(SMnode *pMnode, STrans *pTrans, const SMqTopicObj *pTopic, const char *cgroup,
|
||||||
const SMqConsumerEp *pConsumerEp);
|
const SMqConsumerEp *pConsumerEp);
|
||||||
|
|
||||||
static int32_t mndPersistRebalanceMsg(SMnode *pMnode, STrans *pTrans, const SMqConsumerEp *pConsumerEp);
|
static int32_t mndPersistRebalanceMsg(SMnode *pMnode, STrans *pTrans, const SMqConsumerEp *pConsumerEp, const char* topicName);
|
||||||
static int32_t mndPersistCancelConnReq(SMnode *pMnode, STrans *pTrans, const SMqConsumerEp *pConsumerEp, const char* oldTopicName);
|
static int32_t mndPersistCancelConnReq(SMnode *pMnode, STrans *pTrans, const SMqConsumerEp *pConsumerEp, const char* oldTopicName);
|
||||||
|
|
||||||
int32_t mndInitSubscribe(SMnode *pMnode) {
|
int32_t mndInitSubscribe(SMnode *pMnode) {
|
||||||
|
@ -102,12 +102,13 @@ static SMqSubscribeObj *mndCreateSubscription(SMnode *pMnode, const SMqTopicObj
|
||||||
return pSub;
|
return pSub;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int32_t mndBuildRebalanceMsg(void **pBuf, int32_t *pLen, const SMqConsumerEp *pConsumerEp) {
|
static int32_t mndBuildRebalanceMsg(void **pBuf, int32_t *pLen, const SMqConsumerEp *pConsumerEp, const char* topicName) {
|
||||||
SMqMVRebReq req = {
|
SMqMVRebReq req = {
|
||||||
.vgId = pConsumerEp->vgId,
|
.vgId = pConsumerEp->vgId,
|
||||||
.oldConsumerId = pConsumerEp->oldConsumerId,
|
.oldConsumerId = pConsumerEp->oldConsumerId,
|
||||||
.newConsumerId = pConsumerEp->consumerId,
|
.newConsumerId = pConsumerEp->consumerId,
|
||||||
};
|
};
|
||||||
|
req.topic = strdup(topicName);
|
||||||
|
|
||||||
int32_t tlen = tEncodeSMqMVRebReq(NULL, &req);
|
int32_t tlen = tEncodeSMqMVRebReq(NULL, &req);
|
||||||
void *buf = taosMemoryMalloc(sizeof(SMsgHead) + tlen);
|
void *buf = taosMemoryMalloc(sizeof(SMsgHead) + tlen);
|
||||||
|
@ -122,6 +123,7 @@ static int32_t mndBuildRebalanceMsg(void **pBuf, int32_t *pLen, const SMqConsume
|
||||||
|
|
||||||
void *abuf = POINTER_SHIFT(buf, sizeof(SMsgHead));
|
void *abuf = POINTER_SHIFT(buf, sizeof(SMsgHead));
|
||||||
tEncodeSMqMVRebReq(&abuf, &req);
|
tEncodeSMqMVRebReq(&abuf, &req);
|
||||||
|
taosMemoryFree(req.topic);
|
||||||
|
|
||||||
*pBuf = buf;
|
*pBuf = buf;
|
||||||
*pLen = tlen;
|
*pLen = tlen;
|
||||||
|
@ -129,12 +131,12 @@ static int32_t mndBuildRebalanceMsg(void **pBuf, int32_t *pLen, const SMqConsume
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int32_t mndPersistRebalanceMsg(SMnode *pMnode, STrans *pTrans, const SMqConsumerEp *pConsumerEp) {
|
static int32_t mndPersistRebalanceMsg(SMnode *pMnode, STrans *pTrans, const SMqConsumerEp *pConsumerEp, const char* topicName) {
|
||||||
ASSERT(pConsumerEp->oldConsumerId != -1);
|
ASSERT(pConsumerEp->oldConsumerId != -1);
|
||||||
|
|
||||||
void *buf;
|
void *buf;
|
||||||
int32_t tlen;
|
int32_t tlen;
|
||||||
if (mndBuildRebalanceMsg(&buf, &tlen, pConsumerEp) < 0) {
|
if (mndBuildRebalanceMsg(&buf, &tlen, pConsumerEp, topicName) < 0) {
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -502,10 +504,10 @@ static int32_t mndProcessDoRebalanceMsg(SNodeMsg *pMsg) {
|
||||||
pConsumerEp->epoch = 0;
|
pConsumerEp->epoch = 0;
|
||||||
taosArrayPush(pSubConsumer->vgInfo, pConsumerEp);
|
taosArrayPush(pSubConsumer->vgInfo, pConsumerEp);
|
||||||
|
|
||||||
|
char topic[TSDB_TOPIC_FNAME_LEN];
|
||||||
|
char cgroup[TSDB_CGROUP_LEN];
|
||||||
|
mndSplitSubscribeKey(pSub->key, topic, cgroup);
|
||||||
if (pConsumerEp->oldConsumerId == -1) {
|
if (pConsumerEp->oldConsumerId == -1) {
|
||||||
char topic[TSDB_TOPIC_FNAME_LEN];
|
|
||||||
char cgroup[TSDB_CGROUP_LEN];
|
|
||||||
mndSplitSubscribeKey(pSub->key, topic, cgroup);
|
|
||||||
SMqTopicObj *pTopic = mndAcquireTopic(pMnode, topic);
|
SMqTopicObj *pTopic = mndAcquireTopic(pMnode, topic);
|
||||||
|
|
||||||
mInfo("mq set conn: assign vgroup %d of topic %s to consumer %" PRId64 " cgroup: %s", pConsumerEp->vgId,
|
mInfo("mq set conn: assign vgroup %d of topic %s to consumer %" PRId64 " cgroup: %s", pConsumerEp->vgId,
|
||||||
|
@ -517,7 +519,7 @@ static int32_t mndProcessDoRebalanceMsg(SNodeMsg *pMsg) {
|
||||||
mInfo("mq rebalance: assign vgroup %d, from consumer %" PRId64 " to consumer %" PRId64 "",
|
mInfo("mq rebalance: assign vgroup %d, from consumer %" PRId64 " to consumer %" PRId64 "",
|
||||||
pConsumerEp->vgId, pConsumerEp->oldConsumerId, pConsumerEp->consumerId);
|
pConsumerEp->vgId, pConsumerEp->oldConsumerId, pConsumerEp->consumerId);
|
||||||
|
|
||||||
mndPersistRebalanceMsg(pMnode, pTrans, pConsumerEp);
|
mndPersistRebalanceMsg(pMnode, pTrans, pConsumerEp, topic);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -849,7 +851,7 @@ static int32_t mndProcessSubscribeReq(SNodeMsg *pMsg) {
|
||||||
pConsumerEp->consumerId);
|
pConsumerEp->consumerId);
|
||||||
mndPersistMqSetConnReq(pMnode, pTrans, pTopic, cgroup, pConsumerEp);
|
mndPersistMqSetConnReq(pMnode, pTrans, pTopic, cgroup, pConsumerEp);
|
||||||
} else {
|
} else {
|
||||||
mndPersistRebalanceMsg(pMnode, pTrans, pConsumerEp);
|
mndPersistRebalanceMsg(pMnode, pTrans, pConsumerEp, newTopicName);
|
||||||
}
|
}
|
||||||
// to trigger rebalance at once, do not set status active
|
// to trigger rebalance at once, do not set status active
|
||||||
/*atomic_store_32(&pConsumer->status, MQ_CONSUMER_STATUS__ACTIVE);*/
|
/*atomic_store_32(&pConsumer->status, MQ_CONSUMER_STATUS__ACTIVE);*/
|
||||||
|
|
|
@ -68,7 +68,7 @@ target_link_libraries(
|
||||||
PUBLIC executor
|
PUBLIC executor
|
||||||
PUBLIC scheduler
|
PUBLIC scheduler
|
||||||
PUBLIC tdb
|
PUBLIC tdb
|
||||||
PUBLIC bdb
|
#PUBLIC bdb
|
||||||
PUBLIC transport
|
PUBLIC transport
|
||||||
PUBLIC stream
|
PUBLIC stream
|
||||||
)
|
)
|
||||||
|
|
|
@ -316,7 +316,7 @@ int32_t tqProcessPollReq(STQ* pTq, SRpcMsg* pMsg, int32_t workerId) {
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
vDebug("poll topic %s from consumer %ld (epoch %d)", pTopic->topicName, consumerId, pReq->epoch);
|
vDebug("poll topic %s from consumer %ld (epoch %d) %s", pTopic->topicName, consumerId, pReq->epoch, pTopic->topicName);
|
||||||
|
|
||||||
rsp.reqOffset = pReq->currentOffset;
|
rsp.reqOffset = pReq->currentOffset;
|
||||||
rsp.skipLogNum = 0;
|
rsp.skipLogNum = 0;
|
||||||
|
@ -334,12 +334,10 @@ int32_t tqProcessPollReq(STQ* pTq, SRpcMsg* pMsg, int32_t workerId) {
|
||||||
// TODO: no more log, set timer to wait blocking time
|
// TODO: no more log, set timer to wait blocking time
|
||||||
// if data inserted during waiting, launch query and
|
// if data inserted during waiting, launch query and
|
||||||
// response to user
|
// response to user
|
||||||
vDebug("tmq poll: consumer %ld (epoch %d) vg %d offset %ld, no more log to return", consumerId, pReq->epoch,
|
vDebug("tmq poll: consumer %ld (epoch %d) vg %d offset %ld, no more log to return", consumerId, pReq->epoch, pTq->pVnode->vgId, fetchOffset);
|
||||||
pTq->pVnode->vgId, fetchOffset);
|
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
vDebug("tmq poll: consumer %ld (epoch %d) iter log, vg %d offset %ld msgType %d", consumerId, pReq->epoch,
|
vDebug("tmq poll: consumer %ld (epoch %d) iter log, vg %d offset %ld msgType %d", consumerId, pReq->epoch, pTq->pVnode->vgId, fetchOffset, pHead->msgType);
|
||||||
pTq->pVnode->vgId, fetchOffset, pHead->msgType);
|
|
||||||
/*int8_t pos = fetchOffset % TQ_BUFFER_SIZE;*/
|
/*int8_t pos = fetchOffset % TQ_BUFFER_SIZE;*/
|
||||||
/*pHead = pTopic->pReadhandle->pHead;*/
|
/*pHead = pTopic->pReadhandle->pHead;*/
|
||||||
if (pHead->msgType == TDMT_VND_SUBMIT) {
|
if (pHead->msgType == TDMT_VND_SUBMIT) {
|
||||||
|
@ -363,8 +361,7 @@ int32_t tqProcessPollReq(STQ* pTq, SRpcMsg* pMsg, int32_t workerId) {
|
||||||
}
|
}
|
||||||
|
|
||||||
if (taosArrayGetSize(pRes) == 0) {
|
if (taosArrayGetSize(pRes) == 0) {
|
||||||
vDebug("tmq poll: consumer %ld (epoch %d) iter log, vg %d skip log %ld since not wanted", consumerId,
|
vDebug("tmq poll: consumer %ld (epoch %d) iter log, vg %d skip log %ld since not wanted", consumerId, pReq->epoch, pTq->pVnode->vgId, fetchOffset);
|
||||||
pReq->epoch, pTq->pVnode->vgId, fetchOffset);
|
|
||||||
fetchOffset++;
|
fetchOffset++;
|
||||||
rsp.skipLogNum++;
|
rsp.skipLogNum++;
|
||||||
taosArrayDestroy(pRes);
|
taosArrayDestroy(pRes);
|
||||||
|
@ -393,8 +390,7 @@ int32_t tqProcessPollReq(STQ* pTq, SRpcMsg* pMsg, int32_t workerId) {
|
||||||
pMsg->pCont = buf;
|
pMsg->pCont = buf;
|
||||||
pMsg->contLen = tlen;
|
pMsg->contLen = tlen;
|
||||||
pMsg->code = 0;
|
pMsg->code = 0;
|
||||||
vDebug("vg %d offset %ld msgType %d from consumer %ld (epoch %d) actual rsp", pTq->pVnode->vgId, fetchOffset,
|
vDebug("vg %d offset %ld msgType %d from consumer %ld (epoch %d) actual rsp", pTq->pVnode->vgId, fetchOffset, pHead->msgType, consumerId, pReq->epoch);
|
||||||
pHead->msgType, consumerId, pReq->epoch);
|
|
||||||
tmsgSendRsp(pMsg);
|
tmsgSendRsp(pMsg);
|
||||||
taosMemoryFree(pHead);
|
taosMemoryFree(pHead);
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -425,8 +421,7 @@ int32_t tqProcessPollReq(STQ* pTq, SRpcMsg* pMsg, int32_t workerId) {
|
||||||
pMsg->contLen = tlen;
|
pMsg->contLen = tlen;
|
||||||
pMsg->code = 0;
|
pMsg->code = 0;
|
||||||
tmsgSendRsp(pMsg);
|
tmsgSendRsp(pMsg);
|
||||||
vDebug("vg %d offset %ld from consumer %ld (epoch %d) not rsp", pTq->pVnode->vgId, fetchOffset, consumerId,
|
vDebug("vg %d offset %ld from consumer %ld (epoch %d) not rsp", pTq->pVnode->vgId, fetchOffset, consumerId, pReq->epoch);
|
||||||
pReq->epoch);
|
|
||||||
/*}*/
|
/*}*/
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -437,7 +432,7 @@ int32_t tqProcessRebReq(STQ* pTq, char* msg) {
|
||||||
terrno = TSDB_CODE_SUCCESS;
|
terrno = TSDB_CODE_SUCCESS;
|
||||||
tDecodeSMqMVRebReq(msg, &req);
|
tDecodeSMqMVRebReq(msg, &req);
|
||||||
|
|
||||||
vDebug("vg %d set from consumer %ld to consumer %ld", req.vgId, req.oldConsumerId, req.newConsumerId);
|
vDebug("vg %d set from consumer %ld to consumer %ld", req.vgId, req.oldConsumerId ,req.newConsumerId);
|
||||||
STqConsumer* pConsumer = tqHandleGet(pTq->tqMeta, req.oldConsumerId);
|
STqConsumer* pConsumer = tqHandleGet(pTq->tqMeta, req.oldConsumerId);
|
||||||
ASSERT(pConsumer);
|
ASSERT(pConsumer);
|
||||||
ASSERT(pConsumer->consumerId == req.oldConsumerId);
|
ASSERT(pConsumer->consumerId == req.oldConsumerId);
|
||||||
|
|
|
@ -167,8 +167,8 @@ SArray* tqRetrieveDataBlock(STqReadHandle* pHandle) {
|
||||||
if (!tdSTSRowIterNext(&iter, pColData->info.colId, pColData->info.type, &sVal)) {
|
if (!tdSTSRowIterNext(&iter, pColData->info.colId, pColData->info.type, &sVal)) {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
if (colDataAppend(pColData, curRow, sVal.val, false) < 0) {
|
/*if (colDataAppend(pColData, curRow, sVal.val, false) < 0) {*/
|
||||||
/*if (colDataAppend(pColData, curRow, sVal.val, sVal.valType == TD_VTYPE_NULL) < 0) {*/
|
if (colDataAppend(pColData, curRow, sVal.val, sVal.valType == TD_VTYPE_NULL) < 0) {
|
||||||
taosArrayDestroyEx(pArray, (void (*)(void*))tDeleteSSDataBlock);
|
taosArrayDestroyEx(pArray, (void (*)(void*))tDeleteSSDataBlock);
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
|
@ -944,8 +944,8 @@ void transSendRequest(void* shandle, const char* ip, uint32_t port, STransMsg* p
|
||||||
|
|
||||||
SCliThrdObj* thrd = ((SCliObj*)pTransInst->tcphandle)->pThreadObj[index];
|
SCliThrdObj* thrd = ((SCliObj*)pTransInst->tcphandle)->pThreadObj[index];
|
||||||
|
|
||||||
tDebug("send request at thread:%d %p, dst: %s:%d", index, pMsg, ip, port);
|
tDebug("send request at thread:%d %p, dst: %s:%d, app:%p", index, pMsg, ip, port, pMsg->ahandle);
|
||||||
transSendAsync(thrd->asyncPool, &(cliMsg->q));
|
ASSERT(transSendAsync(thrd->asyncPool, &(cliMsg->q)) == 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
void transSendRecv(void* shandle, const char* ip, uint32_t port, STransMsg* pReq, STransMsg* pRsp) {
|
void transSendRecv(void* shandle, const char* ip, uint32_t port, STransMsg* pReq, STransMsg* pRsp) {
|
||||||
|
|
|
@ -274,17 +274,11 @@ int main(int32_t argc, char *argv[]) {
|
||||||
|
|
||||||
loop_consume(tmq);
|
loop_consume(tmq);
|
||||||
|
|
||||||
err = tmq_unsubscribe(tmq);
|
|
||||||
ASSERT(err == TMQ_RESP_ERR__SUCCESS);
|
|
||||||
|
|
||||||
|
|
||||||
#if 0
|
|
||||||
err = tmq_unsubscribe(tmq);
|
err = tmq_unsubscribe(tmq);
|
||||||
if (err) {
|
if (err) {
|
||||||
printf("tmq_unsubscribe() fail, reason: %s\n", tmq_err2str(err));
|
printf("tmq_unsubscribe() fail, reason: %s\n", tmq_err2str(err));
|
||||||
exit(-1);
|
exit(-1);
|
||||||
}
|
}
|
||||||
#endif
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue