Merge pull request #24775 from taosdata/opti/TS-4480
opti:consume performance if all data is in format of "insert into using"
This commit is contained in:
commit
980e1b83be
|
@ -31,8 +31,7 @@ static char* buildCreateTableJson(SSchemaWrapper* schemaRow, SSchemaWrapper* sch
|
|||
char* string = NULL;
|
||||
cJSON* json = cJSON_CreateObject();
|
||||
if (json == NULL) {
|
||||
uError("create json object failed")
|
||||
return NULL;
|
||||
uError("create json object failed") return NULL;
|
||||
}
|
||||
cJSON* type = cJSON_CreateString("create");
|
||||
cJSON_AddItemToObject(json, "type", type);
|
||||
|
@ -131,7 +130,8 @@ static char* buildAlterSTableJson(void* alterData, int32_t alterDataLen) {
|
|||
cJSON* colType = cJSON_CreateNumber(field->type);
|
||||
cJSON_AddItemToObject(json, "colType", colType);
|
||||
|
||||
if (field->type == TSDB_DATA_TYPE_BINARY || field->type == TSDB_DATA_TYPE_VARBINARY || field->type == TSDB_DATA_TYPE_GEOMETRY) {
|
||||
if (field->type == TSDB_DATA_TYPE_BINARY || field->type == TSDB_DATA_TYPE_VARBINARY ||
|
||||
field->type == TSDB_DATA_TYPE_GEOMETRY) {
|
||||
int32_t length = field->bytes - VARSTR_HEADER_SIZE;
|
||||
cJSON* cbytes = cJSON_CreateNumber(length);
|
||||
cJSON_AddItemToObject(json, "colLength", cbytes);
|
||||
|
@ -156,7 +156,8 @@ static char* buildAlterSTableJson(void* alterData, int32_t alterDataLen) {
|
|||
cJSON_AddItemToObject(json, "colName", colName);
|
||||
cJSON* colType = cJSON_CreateNumber(field->type);
|
||||
cJSON_AddItemToObject(json, "colType", colType);
|
||||
if (field->type == TSDB_DATA_TYPE_BINARY || field->type == TSDB_DATA_TYPE_VARBINARY || field->type == TSDB_DATA_TYPE_GEOMETRY) {
|
||||
if (field->type == TSDB_DATA_TYPE_BINARY || field->type == TSDB_DATA_TYPE_VARBINARY ||
|
||||
field->type == TSDB_DATA_TYPE_GEOMETRY) {
|
||||
int32_t length = field->bytes - VARSTR_HEADER_SIZE;
|
||||
cJSON* cbytes = cJSON_CreateNumber(length);
|
||||
cJSON_AddItemToObject(json, "colLength", cbytes);
|
||||
|
@ -469,7 +470,8 @@ static char* processAlterTable(SMqMetaRsp* metaRsp) {
|
|||
cJSON* colType = cJSON_CreateNumber(vAlterTbReq.type);
|
||||
cJSON_AddItemToObject(json, "colType", colType);
|
||||
|
||||
if (vAlterTbReq.type == TSDB_DATA_TYPE_BINARY || vAlterTbReq.type == TSDB_DATA_TYPE_VARBINARY || vAlterTbReq.type == TSDB_DATA_TYPE_GEOMETRY) {
|
||||
if (vAlterTbReq.type == TSDB_DATA_TYPE_BINARY || vAlterTbReq.type == TSDB_DATA_TYPE_VARBINARY ||
|
||||
vAlterTbReq.type == TSDB_DATA_TYPE_GEOMETRY) {
|
||||
int32_t length = vAlterTbReq.bytes - VARSTR_HEADER_SIZE;
|
||||
cJSON* cbytes = cJSON_CreateNumber(length);
|
||||
cJSON_AddItemToObject(json, "colLength", cbytes);
|
||||
|
@ -490,7 +492,8 @@ static char* processAlterTable(SMqMetaRsp* metaRsp) {
|
|||
cJSON_AddItemToObject(json, "colName", colName);
|
||||
cJSON* colType = cJSON_CreateNumber(vAlterTbReq.colModType);
|
||||
cJSON_AddItemToObject(json, "colType", colType);
|
||||
if (vAlterTbReq.colModType == TSDB_DATA_TYPE_BINARY || vAlterTbReq.colModType == TSDB_DATA_TYPE_VARBINARY || vAlterTbReq.colModType == TSDB_DATA_TYPE_GEOMETRY) {
|
||||
if (vAlterTbReq.colModType == TSDB_DATA_TYPE_BINARY || vAlterTbReq.colModType == TSDB_DATA_TYPE_VARBINARY ||
|
||||
vAlterTbReq.colModType == TSDB_DATA_TYPE_GEOMETRY) {
|
||||
int32_t length = vAlterTbReq.colModBytes - VARSTR_HEADER_SIZE;
|
||||
cJSON* cbytes = cJSON_CreateNumber(length);
|
||||
cJSON_AddItemToObject(json, "colLength", cbytes);
|
||||
|
@ -731,8 +734,8 @@ static int32_t taosCreateStb(TAOS* taos, void* meta, int32_t metaLen) {
|
|||
pReq.source = TD_REQ_FROM_TAOX;
|
||||
pReq.igExists = true;
|
||||
|
||||
uDebug(LOG_ID_TAG" create stable name:%s suid:%" PRId64 " processSuid:%" PRId64,
|
||||
LOG_ID_VALUE, req.name, req.suid, pReq.suid);
|
||||
uDebug(LOG_ID_TAG " create stable name:%s suid:%" PRId64 " processSuid:%" PRId64, LOG_ID_VALUE, req.name, req.suid,
|
||||
pReq.suid);
|
||||
STscObj* pTscObj = pRequest->pTscObj;
|
||||
SName tableName;
|
||||
tNameExtractFullName(toName(pTscObj->acctId, pRequest->pDb, req.name, &tableName), pReq.name);
|
||||
|
@ -835,8 +838,8 @@ static int32_t taosDropStb(TAOS* taos, void* meta, int32_t metaLen) {
|
|||
pReq.source = TD_REQ_FROM_TAOX;
|
||||
// pReq.suid = processSuid(req.suid, pRequest->pDb);
|
||||
|
||||
uDebug(LOG_ID_TAG" drop stable name:%s suid:%" PRId64 " new suid:%" PRId64,
|
||||
LOG_ID_VALUE, req.name, req.suid, pReq.suid);
|
||||
uDebug(LOG_ID_TAG " drop stable name:%s suid:%" PRId64 " new suid:%" PRId64, LOG_ID_VALUE, req.name, req.suid,
|
||||
pReq.suid);
|
||||
STscObj* pTscObj = pRequest->pTscObj;
|
||||
SName tableName = {0};
|
||||
tNameExtractFullName(toName(pTscObj->acctId, pRequest->pDb, req.name, &tableName), pReq.name);
|
||||
|
@ -1142,7 +1145,8 @@ static int32_t taosDropTable(TAOS* taos, void* meta, int32_t metaLen) {
|
|||
tb_uid_t oldSuid = pDropReq->suid;
|
||||
pDropReq->suid = pTableMeta->suid;
|
||||
taosMemoryFreeClear(pTableMeta);
|
||||
uDebug(LOG_ID_TAG" drop table name:%s suid:%" PRId64 " new suid:%" PRId64, LOG_ID_VALUE, pDropReq->name, oldSuid, pDropReq->suid);
|
||||
uDebug(LOG_ID_TAG " drop table name:%s suid:%" PRId64 " new suid:%" PRId64, LOG_ID_VALUE, pDropReq->name, oldSuid,
|
||||
pDropReq->suid);
|
||||
|
||||
taosArrayPush(pRequest->tableList, &pName);
|
||||
SVgroupDropTableBatch* pTableBatch = taosHashGet(pVgroupHashmap, &pInfo.vgId, sizeof(pInfo.vgId));
|
||||
|
@ -1410,8 +1414,8 @@ int taos_write_raw_block_with_fields_with_reqid(TAOS *taos, int rows, char *pDat
|
|||
return terrno;
|
||||
}
|
||||
|
||||
uDebug(LOG_ID_TAG " write raw block with field, rows:%d, pData:%p, tbname:%s, fields:%p, numFields:%d",
|
||||
LOG_ID_VALUE, rows, pData, tbname, fields, numFields);
|
||||
uDebug(LOG_ID_TAG " write raw block with field, rows:%d, pData:%p, tbname:%s, fields:%p, numFields:%d", LOG_ID_VALUE,
|
||||
rows, pData, tbname, fields, numFields);
|
||||
|
||||
pRequest->syncQuery = true;
|
||||
if (!pRequest->pDb) {
|
||||
|
|
|
@ -252,7 +252,8 @@ typedef struct SSyncCommitInfo {
|
|||
static int32_t syncAskEp(tmq_t* tmq);
|
||||
static int32_t makeTopicVgroupKey(char* dst, const char* topicName, int32_t vg);
|
||||
static int32_t tmqCommitDone(SMqCommitCbParamSet* pParamSet);
|
||||
static int32_t doSendCommitMsg(tmq_t* tmq, int32_t vgId, SEpSet* epSet, STqOffsetVal* offset, const char* pTopicName, SMqCommitCbParamSet* pParamSet);
|
||||
static int32_t doSendCommitMsg(tmq_t* tmq, int32_t vgId, SEpSet* epSet, STqOffsetVal* offset, const char* pTopicName,
|
||||
SMqCommitCbParamSet* pParamSet);
|
||||
static void commitRspCountDown(SMqCommitCbParamSet* pParamSet, int64_t consumerId, const char* pTopic, int32_t vgId);
|
||||
static void askEp(tmq_t* pTmq, void* param, bool sync, bool updateEpset);
|
||||
|
||||
|
@ -439,7 +440,8 @@ static int32_t tmqCommitCb(void* param, SDataBuf* pBuf, int32_t code) {
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int32_t doSendCommitMsg(tmq_t* tmq, int32_t vgId, SEpSet* epSet, STqOffsetVal* offset, const char* pTopicName, SMqCommitCbParamSet* pParamSet) {
|
||||
static int32_t doSendCommitMsg(tmq_t* tmq, int32_t vgId, SEpSet* epSet, STqOffsetVal* offset, const char* pTopicName,
|
||||
SMqCommitCbParamSet* pParamSet) {
|
||||
SMqVgOffset pOffset = {0};
|
||||
|
||||
pOffset.consumerId = tmq->consumerId;
|
||||
|
@ -527,7 +529,8 @@ static SMqClientTopic* getTopicByName(tmq_t* tmq, const char* pTopicName) {
|
|||
return NULL;
|
||||
}
|
||||
|
||||
static SMqCommitCbParamSet* prepareCommitCbParamSet(tmq_t* tmq, tmq_commit_cb* pCommitFp, void* userParam, int32_t rspNum){
|
||||
static SMqCommitCbParamSet* prepareCommitCbParamSet(tmq_t* tmq, tmq_commit_cb* pCommitFp, void* userParam,
|
||||
int32_t rspNum) {
|
||||
SMqCommitCbParamSet* pParamSet = taosMemoryCalloc(1, sizeof(SMqCommitCbParamSet));
|
||||
if (pParamSet == NULL) {
|
||||
return NULL;
|
||||
|
@ -561,7 +564,8 @@ static int32_t getClientVg(tmq_t* tmq, char* pTopicName, int32_t vgId, SMqClient
|
|||
return *pVg == NULL ? TSDB_CODE_TMQ_INVALID_VGID : TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
static int32_t asyncCommitOffset(tmq_t* tmq, char* pTopicName, int32_t vgId, STqOffsetVal* offsetVal, tmq_commit_cb* pCommitFp, void* userParam) {
|
||||
static int32_t asyncCommitOffset(tmq_t* tmq, char* pTopicName, int32_t vgId, STqOffsetVal* offsetVal,
|
||||
tmq_commit_cb* pCommitFp, void* userParam) {
|
||||
tscInfo("consumer:0x%" PRIx64 " do manual commit offset for %s, vgId:%d", tmq->consumerId, pTopicName, vgId);
|
||||
taosRLockLatch(&tmq->lock);
|
||||
SMqClientVg* pVg = NULL;
|
||||
|
@ -662,11 +666,13 @@ static void asyncCommitAllOffsets(tmq_t* tmq, tmq_commit_cb* pCommitFp, void* us
|
|||
SMqClientTopic* pTopic = taosArrayGet(tmq->clientTopics, i);
|
||||
int32_t numOfVgroups = taosArrayGetSize(pTopic->vgs);
|
||||
|
||||
tscInfo("consumer:0x%" PRIx64 " commit offset for topics:%s, numOfVgs:%d", tmq->consumerId, pTopic->topicName, numOfVgroups);
|
||||
tscInfo("consumer:0x%" PRIx64 " commit offset for topics:%s, numOfVgs:%d", tmq->consumerId, pTopic->topicName,
|
||||
numOfVgroups);
|
||||
for (int32_t j = 0; j < numOfVgroups; j++) {
|
||||
SMqClientVg* pVg = taosArrayGet(pTopic->vgs, j);
|
||||
|
||||
if (pVg->offsetInfo.endOffset.type > 0 && !tOffsetEqual(&pVg->offsetInfo.endOffset, &pVg->offsetInfo.committedOffset)) {
|
||||
if (pVg->offsetInfo.endOffset.type > 0 &&
|
||||
!tOffsetEqual(&pVg->offsetInfo.endOffset, &pVg->offsetInfo.committedOffset)) {
|
||||
char offsetBuf[TSDB_OFFSET_LEN] = {0};
|
||||
tFormatOffset(offsetBuf, tListLen(offsetBuf), &pVg->offsetInfo.endOffset);
|
||||
|
||||
|
@ -675,12 +681,15 @@ static void asyncCommitAllOffsets(tmq_t* tmq, tmq_commit_cb* pCommitFp, void* us
|
|||
|
||||
code = doSendCommitMsg(tmq, pVg->vgId, &pVg->epSet, &pVg->offsetInfo.endOffset, pTopic->topicName, pParamSet);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
tscError("consumer:0x%" PRIx64 " topic:%s on vgId:%d end commit msg failed, send offset:%s committed:%s, code:%s ordinal:%d/%d",
|
||||
tmq->consumerId, pTopic->topicName, pVg->vgId, offsetBuf, commitBuf, tstrerror(terrno), j + 1, numOfVgroups);
|
||||
tscError("consumer:0x%" PRIx64
|
||||
" topic:%s on vgId:%d end commit msg failed, send offset:%s committed:%s, code:%s ordinal:%d/%d",
|
||||
tmq->consumerId, pTopic->topicName, pVg->vgId, offsetBuf, commitBuf, tstrerror(terrno), j + 1,
|
||||
numOfVgroups);
|
||||
continue;
|
||||
}
|
||||
|
||||
tscInfo("consumer:0x%" PRIx64 " topic:%s on vgId:%d send commit msg success, send offset:%s committed:%s, ordinal:%d/%d",
|
||||
tscInfo("consumer:0x%" PRIx64
|
||||
" topic:%s on vgId:%d send commit msg success, send offset:%s committed:%s, ordinal:%d/%d",
|
||||
tmq->consumerId, pTopic->topicName, pVg->vgId, offsetBuf, commitBuf, j + 1, numOfVgroups);
|
||||
pVg->offsetInfo.committedOffset = pVg->offsetInfo.endOffset;
|
||||
} else {
|
||||
|
@ -691,7 +700,8 @@ static void asyncCommitAllOffsets(tmq_t* tmq, tmq_commit_cb* pCommitFp, void* us
|
|||
}
|
||||
taosRUnLockLatch(&tmq->lock);
|
||||
|
||||
tscInfo("consumer:0x%" PRIx64 " total commit:%d for %d topics", tmq->consumerId, pParamSet->waitingRspNum - 1, numOfTopics);
|
||||
tscInfo("consumer:0x%" PRIx64 " total commit:%d for %d topics", tmq->consumerId, pParamSet->waitingRspNum - 1,
|
||||
numOfTopics);
|
||||
|
||||
// request is sent
|
||||
if (pParamSet->waitingRspNum != 1) {
|
||||
|
@ -1060,7 +1070,8 @@ static void tmqMgmtInit(void) {
|
|||
}
|
||||
}
|
||||
|
||||
#define SET_ERROR_MSG(MSG) if(errstr!=NULL)snprintf(errstr,errstrLen,MSG);
|
||||
#define SET_ERROR_MSG(MSG) \
|
||||
if (errstr != NULL) snprintf(errstr, errstrLen, MSG);
|
||||
tmq_t* tmq_consumer_new(tmq_conf_t* conf, char* errstr, int32_t errstrLen) {
|
||||
if (conf == NULL) {
|
||||
SET_ERROR_MSG("configure is null")
|
||||
|
@ -1154,7 +1165,8 @@ tmq_t* tmq_consumer_new(tmq_conf_t* conf, char* errstr, int32_t errstrLen) {
|
|||
tFormatOffset(buf, tListLen(buf), &offset);
|
||||
tscInfo("consumer:0x%" PRIx64 " is setup, refId:%" PRId64
|
||||
", groupId:%s, snapshot:%d, autoCommit:%d, commitInterval:%dms, offset:%s",
|
||||
pTmq->consumerId, pTmq->refId, pTmq->groupId, pTmq->useSnapshot, pTmq->autoCommit, pTmq->autoCommitInterval, buf);
|
||||
pTmq->consumerId, pTmq->refId, pTmq->groupId, pTmq->useSnapshot, pTmq->autoCommit, pTmq->autoCommitInterval,
|
||||
buf);
|
||||
|
||||
return pTmq;
|
||||
|
||||
|
@ -1456,7 +1468,8 @@ static void initClientTopicFromRsp(SMqClientTopic* pTopic, SMqSubTopicEp* pTopic
|
|||
STqOffsetVal offsetNew = {0};
|
||||
offsetNew.type = tmq->resetOffsetCfg;
|
||||
|
||||
tscInfo("consumer:0x%" PRIx64 ", update topic:%s, new numOfVgs:%d, num:%d, port:%d", tmq->consumerId, pTopic->topicName, vgNumGet, pVgEp->epSet.numOfEps,pVgEp->epSet.eps[pVgEp->epSet.inUse].port);
|
||||
tscInfo("consumer:0x%" PRIx64 ", update topic:%s, new numOfVgs:%d, num:%d, port:%d", tmq->consumerId,
|
||||
pTopic->topicName, vgNumGet, pVgEp->epSet.numOfEps, pVgEp->epSet.eps[pVgEp->epSet.inUse].port);
|
||||
|
||||
SMqClientVg clientVg = {
|
||||
.pollCnt = 0,
|
||||
|
@ -1495,8 +1508,8 @@ static bool doUpdateLocalEp(tmq_t* tmq, int32_t epoch, const SMqAskEpRsp* pRsp)
|
|||
|
||||
int32_t topicNumGet = taosArrayGetSize(pRsp->topics);
|
||||
if (epoch < tmq->epoch || (epoch == tmq->epoch && topicNumGet == 0)) {
|
||||
tscInfo("consumer:0x%" PRIx64 " no update ep epoch from %d to epoch %d, incoming topics:%d",
|
||||
tmq->consumerId, tmq->epoch, epoch, topicNumGet);
|
||||
tscInfo("consumer:0x%" PRIx64 " no update ep epoch from %d to epoch %d, incoming topics:%d", tmq->consumerId,
|
||||
tmq->epoch, epoch, topicNumGet);
|
||||
if (atomic_load_8(&tmq->status) == TMQ_CONSUMER_STATUS__RECOVER) {
|
||||
atomic_store_8(&tmq->status, TMQ_CONSUMER_STATUS__READY);
|
||||
}
|
||||
|
@ -1535,8 +1548,10 @@ static bool doUpdateLocalEp(tmq_t* tmq, int32_t epoch, const SMqAskEpRsp* pRsp)
|
|||
tscInfo("consumer:0x%" PRIx64 ", epoch:%d vgId:%d vgKey:%s, offset:%s", tmq->consumerId, epoch, pVgCur->vgId,
|
||||
vgKey, buf);
|
||||
|
||||
SVgroupSaveInfo info = {.currentOffset = pVgCur->offsetInfo.endOffset, .seekOffset = pVgCur->offsetInfo.beginOffset,
|
||||
.commitOffset = pVgCur->offsetInfo.committedOffset, .numOfRows = pVgCur->numOfRows,
|
||||
SVgroupSaveInfo info = {.currentOffset = pVgCur->offsetInfo.endOffset,
|
||||
.seekOffset = pVgCur->offsetInfo.beginOffset,
|
||||
.commitOffset = pVgCur->offsetInfo.committedOffset,
|
||||
.numOfRows = pVgCur->numOfRows,
|
||||
.vgStatus = pVgCur->vgStatus};
|
||||
taosHashPut(pVgOffsetHashMap, vgKey, strlen(vgKey), &info, sizeof(SVgroupSaveInfo));
|
||||
}
|
||||
|
@ -1709,8 +1724,8 @@ static int32_t doTmqPollImpl(tmq_t* pTmq, SMqClientTopic* pTopic, SMqClientVg* p
|
|||
char offsetFormatBuf[TSDB_OFFSET_LEN] = {0};
|
||||
tFormatOffset(offsetFormatBuf, tListLen(offsetFormatBuf), &pVg->offsetInfo.endOffset);
|
||||
code = asyncSendMsgToServer(pTmq->pTscObj->pAppInfo->pTransporter, &pVg->epSet, &transporterId, sendInfo);
|
||||
tscDebug("consumer:0x%" PRIx64 " send poll to %s vgId:%d, code:%d, epoch %d, req:%s, reqId:0x%" PRIx64, pTmq->consumerId,
|
||||
pTopic->topicName, pVg->vgId, code, pTmq->epoch, offsetFormatBuf, req.reqId);
|
||||
tscDebug("consumer:0x%" PRIx64 " send poll to %s vgId:%d, code:%d, epoch %d, req:%s, reqId:0x%" PRIx64,
|
||||
pTmq->consumerId, pTopic->topicName, pVg->vgId, code, pTmq->epoch, offsetFormatBuf, req.reqId);
|
||||
if (code != 0) {
|
||||
goto FAIL;
|
||||
}
|
||||
|
@ -1751,9 +1766,10 @@ static int32_t tmqPollImpl(tmq_t* tmq, int64_t timeout) {
|
|||
continue;
|
||||
}
|
||||
|
||||
if (tmq->replayEnable && taosGetTimestampMs() - pVg->blockReceiveTs < pVg->blockSleepForReplay) { // less than 10ms
|
||||
tscTrace("consumer:0x%" PRIx64 " epoch %d, vgId:%d idle for %" PRId64 "ms before start next poll when replay", tmq->consumerId,
|
||||
tmq->epoch, pVg->vgId, pVg->blockSleepForReplay);
|
||||
if (tmq->replayEnable &&
|
||||
taosGetTimestampMs() - pVg->blockReceiveTs < pVg->blockSleepForReplay) { // less than 10ms
|
||||
tscTrace("consumer:0x%" PRIx64 " epoch %d, vgId:%d idle for %" PRId64 "ms before start next poll when replay",
|
||||
tmq->consumerId, tmq->epoch, pVg->vgId, pVg->blockSleepForReplay);
|
||||
continue;
|
||||
}
|
||||
|
||||
|
@ -1779,7 +1795,8 @@ end:
|
|||
return code;
|
||||
}
|
||||
|
||||
static void updateVgInfo(SMqClientVg* pVg, STqOffsetVal* reqOffset, STqOffsetVal* rspOffset, int64_t sver, int64_t ever, int64_t consumerId, bool hasData){
|
||||
static void updateVgInfo(SMqClientVg* pVg, STqOffsetVal* reqOffset, STqOffsetVal* rspOffset, int64_t sver, int64_t ever,
|
||||
int64_t consumerId, bool hasData) {
|
||||
if (!pVg->seekUpdated) {
|
||||
tscDebug("consumer:0x%" PRIx64 " local offset is update, since seekupdate not set", consumerId);
|
||||
if (hasData) pVg->offsetInfo.beginOffset = *reqOffset;
|
||||
|
@ -1820,14 +1837,16 @@ static void* tmqHandleAllRsp(tmq_t* tmq, int64_t timeout) {
|
|||
tscDebug("consumer:0x%" PRIx64 " wait for the re-balance, set status to be RECOVER", tmq->consumerId);
|
||||
} else if (pRspWrapper->code == TSDB_CODE_TQ_NO_COMMITTED_OFFSET) {
|
||||
terrno = pRspWrapper->code;
|
||||
tscError("consumer:0x%" PRIx64 " unexpected rsp from poll, code:%s", tmq->consumerId, tstrerror(pRspWrapper->code));
|
||||
tscError("consumer:0x%" PRIx64 " unexpected rsp from poll, code:%s", tmq->consumerId,
|
||||
tstrerror(pRspWrapper->code));
|
||||
taosFreeQitem(pRspWrapper);
|
||||
return NULL;
|
||||
} else {
|
||||
if (pRspWrapper->code == TSDB_CODE_VND_INVALID_VGROUP_ID) { // for vnode transform
|
||||
askEp(tmq, NULL, false, true);
|
||||
}
|
||||
tscError("consumer:0x%" PRIx64 " msg from vgId:%d discarded, since %s", tmq->consumerId, pollRspWrapper->vgId, tstrerror(pRspWrapper->code));
|
||||
tscError("consumer:0x%" PRIx64 " msg from vgId:%d discarded, since %s", tmq->consumerId, pollRspWrapper->vgId,
|
||||
tstrerror(pRspWrapper->code));
|
||||
taosWLockLatch(&tmq->lock);
|
||||
SMqClientVg* pVg = getVgInfo(tmq, pollRspWrapper->topicName, pollRspWrapper->vgId);
|
||||
if (pVg) pVg->emptyBlockReceiveTs = taosGetTimestampMs();
|
||||
|
@ -1861,7 +1880,8 @@ static void* tmqHandleAllRsp(tmq_t* tmq, int64_t timeout) {
|
|||
pVg->epSet = *pollRspWrapper->pEpset;
|
||||
}
|
||||
|
||||
updateVgInfo(pVg, &pDataRsp->reqOffset, &pDataRsp->rspOffset, pDataRsp->head.walsver, pDataRsp->head.walever, tmq->consumerId, pDataRsp->blockNum != 0);
|
||||
updateVgInfo(pVg, &pDataRsp->reqOffset, &pDataRsp->rspOffset, pDataRsp->head.walsver, pDataRsp->head.walever,
|
||||
tmq->consumerId, pDataRsp->blockNum != 0);
|
||||
|
||||
char buf[TSDB_OFFSET_LEN] = {0};
|
||||
tFormatOffset(buf, TSDB_OFFSET_LEN, &pDataRsp->rspOffset);
|
||||
|
@ -1920,7 +1940,8 @@ static void* tmqHandleAllRsp(tmq_t* tmq, int64_t timeout) {
|
|||
return NULL;
|
||||
}
|
||||
|
||||
updateVgInfo(pVg, &pollRspWrapper->metaRsp.rspOffset, &pollRspWrapper->metaRsp.rspOffset, pollRspWrapper->metaRsp.head.walsver, pollRspWrapper->metaRsp.head.walever, tmq->consumerId, true);
|
||||
updateVgInfo(pVg, &pollRspWrapper->metaRsp.rspOffset, &pollRspWrapper->metaRsp.rspOffset,
|
||||
pollRspWrapper->metaRsp.head.walsver, pollRspWrapper->metaRsp.head.walever, tmq->consumerId, true);
|
||||
// build rsp
|
||||
SMqMetaRspObj* pRsp = tmqBuildMetaRspFromWrapper(pollRspWrapper);
|
||||
taosFreeQitem(pRspWrapper);
|
||||
|
@ -1949,7 +1970,9 @@ static void* tmqHandleAllRsp(tmq_t* tmq, int64_t timeout) {
|
|||
return NULL;
|
||||
}
|
||||
|
||||
updateVgInfo(pVg, &pollRspWrapper->taosxRsp.reqOffset, &pollRspWrapper->taosxRsp.rspOffset, pollRspWrapper->taosxRsp.head.walsver, pollRspWrapper->taosxRsp.head.walever, tmq->consumerId, pollRspWrapper->taosxRsp.blockNum != 0);
|
||||
updateVgInfo(pVg, &pollRspWrapper->taosxRsp.reqOffset, &pollRspWrapper->taosxRsp.rspOffset,
|
||||
pollRspWrapper->taosxRsp.head.walsver, pollRspWrapper->taosxRsp.head.walever, tmq->consumerId,
|
||||
pollRspWrapper->taosxRsp.blockNum != 0);
|
||||
|
||||
if (pollRspWrapper->taosxRsp.blockNum == 0) {
|
||||
tscDebug("consumer:0x%" PRIx64 " taosx empty block received, vgId:%d, vg total:%" PRId64 ", reqId:0x%" PRIx64,
|
||||
|
@ -2316,7 +2339,8 @@ static int32_t checkWalRange(SVgOffsetInfo* offset, int64_t value){
|
|||
}
|
||||
|
||||
if (value != -1 && (value < offset->walVerBegin || value > offset->walVerEnd)) {
|
||||
tscError("invalid seek params, offset:%" PRId64 ", valid range:[%" PRId64 ", %" PRId64 "]", value, offset->walVerBegin, offset->walVerEnd);
|
||||
tscError("invalid seek params, offset:%" PRId64 ", valid range:[%" PRId64 ", %" PRId64 "]", value,
|
||||
offset->walVerBegin, offset->walVerEnd);
|
||||
return TSDB_CODE_TMQ_VERSION_OUT_OF_RANGE;
|
||||
}
|
||||
|
||||
|
@ -2370,12 +2394,14 @@ int32_t tmq_commit_offset_sync(tmq_t *tmq, const char *pTopicName, int32_t vgId,
|
|||
tsem_destroy(&pInfo->sem);
|
||||
taosMemoryFree(pInfo);
|
||||
|
||||
tscInfo("consumer:0x%" PRIx64 " sync send commit to vgId:%d, offset:%" PRId64" code:%s", tmq->consumerId, vgId, offset, tstrerror(code));
|
||||
tscInfo("consumer:0x%" PRIx64 " sync send commit to vgId:%d, offset:%" PRId64 " code:%s", tmq->consumerId, vgId,
|
||||
offset, tstrerror(code));
|
||||
|
||||
return code;
|
||||
}
|
||||
|
||||
void tmq_commit_offset_async(tmq_t *tmq, const char *pTopicName, int32_t vgId, int64_t offset, tmq_commit_cb *cb, void *param){
|
||||
void tmq_commit_offset_async(tmq_t* tmq, const char* pTopicName, int32_t vgId, int64_t offset, tmq_commit_cb* cb,
|
||||
void* param) {
|
||||
int32_t code = 0;
|
||||
if (tmq == NULL || pTopicName == NULL) {
|
||||
tscError("invalid tmq handle, null");
|
||||
|
@ -2407,7 +2433,8 @@ void tmq_commit_offset_async(tmq_t *tmq, const char *pTopicName, int32_t vgId, i
|
|||
|
||||
code = asyncCommitOffset(tmq, tname, vgId, &offsetVal, cb, param);
|
||||
|
||||
tscInfo("consumer:0x%" PRIx64 " async send commit to vgId:%d, offset:%" PRId64" code:%s", tmq->consumerId, vgId, offset, tstrerror(code));
|
||||
tscInfo("consumer:0x%" PRIx64 " async send commit to vgId:%d, offset:%" PRId64 " code:%s", tmq->consumerId, vgId,
|
||||
offset, tstrerror(code));
|
||||
|
||||
end:
|
||||
if (code != 0 && cb != NULL) {
|
||||
|
@ -2431,8 +2458,7 @@ int32_t askEpCb(void* param, SDataBuf* pMsg, int32_t code) {
|
|||
|
||||
SMqRspHead* head = pMsg->pData;
|
||||
int32_t epoch = atomic_load_32(&tmq->epoch);
|
||||
tscInfo("consumer:0x%" PRIx64 ", recv ep, msg epoch %d, current epoch %d", tmq->consumerId,
|
||||
head->epoch, epoch);
|
||||
tscInfo("consumer:0x%" PRIx64 ", recv ep, msg epoch %d, current epoch %d", tmq->consumerId, head->epoch, epoch);
|
||||
if (pParam->sync) {
|
||||
SMqAskEpRsp rsp = {0};
|
||||
tDecodeSMqAskEpRsp(POINTER_SHIFT(pMsg->pData, sizeof(SMqRspHead)), &rsp);
|
||||
|
@ -2578,10 +2604,12 @@ int32_t tmqCommitDone(SMqCommitCbParamSet* pParamSet) {
|
|||
void commitRspCountDown(SMqCommitCbParamSet* pParamSet, int64_t consumerId, const char* pTopic, int32_t vgId) {
|
||||
int32_t waitingRspNum = atomic_sub_fetch_32(&pParamSet->waitingRspNum, 1);
|
||||
if (waitingRspNum == 0) {
|
||||
tscInfo("consumer:0x%" PRIx64 " topic:%s vgId:%d all commit-rsp received, commit completed", consumerId, pTopic, vgId);
|
||||
tscInfo("consumer:0x%" PRIx64 " topic:%s vgId:%d all commit-rsp received, commit completed", consumerId, pTopic,
|
||||
vgId);
|
||||
tmqCommitDone(pParamSet);
|
||||
} else {
|
||||
tscInfo("consumer:0x%" PRIx64 " topic:%s vgId:%d commit-rsp received, remain:%d", consumerId, pTopic, vgId, waitingRspNum);
|
||||
tscInfo("consumer:0x%" PRIx64 " topic:%s vgId:%d commit-rsp received, remain:%d", consumerId, pTopic, vgId,
|
||||
waitingRspNum);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -2590,7 +2618,8 @@ SReqResultInfo* tmqGetNextResInfo(TAOS_RES* res, bool convertUcs4) {
|
|||
pRspObj->resIter++;
|
||||
|
||||
if (pRspObj->resIter < pRspObj->rsp.blockNum) {
|
||||
SRetrieveTableRspForTmq* pRetrieveTmq = (SRetrieveTableRspForTmq*)taosArrayGetP(pRspObj->rsp.blockData, pRspObj->resIter);
|
||||
SRetrieveTableRspForTmq* pRetrieveTmq =
|
||||
(SRetrieveTableRspForTmq*)taosArrayGetP(pRspObj->rsp.blockData, pRspObj->resIter);
|
||||
if (pRspObj->rsp.withSchema) {
|
||||
SSchemaWrapper* pSW = (SSchemaWrapper*)taosArrayGetP(pRspObj->rsp.blockSchema, pRspObj->resIter);
|
||||
setResSchemaInfo(&pRspObj->resInfo, pSW->pSchema, pSW->nCols);
|
||||
|
@ -2856,13 +2885,15 @@ int64_t tmq_committed(tmq_t *tmq, const char *pTopicName, int32_t vgId){
|
|||
|
||||
SVgOffsetInfo* pOffsetInfo = &pVg->offsetInfo;
|
||||
if (isInSnapshotMode(pOffsetInfo->endOffset.type, tmq->useSnapshot)) {
|
||||
tscError("consumer:0x%" PRIx64 " offset type:%d not wal version, committed error", tmq->consumerId, pOffsetInfo->endOffset.type);
|
||||
tscError("consumer:0x%" PRIx64 " offset type:%d not wal version, committed error", tmq->consumerId,
|
||||
pOffsetInfo->endOffset.type);
|
||||
taosWUnLockLatch(&tmq->lock);
|
||||
return TSDB_CODE_TMQ_SNAPSHOT_ERROR;
|
||||
}
|
||||
|
||||
if (isInSnapshotMode(pOffsetInfo->committedOffset.type, tmq->useSnapshot)) {
|
||||
tscError("consumer:0x%" PRIx64 " offset type:%d not wal version, committed error", tmq->consumerId, pOffsetInfo->committedOffset.type);
|
||||
tscError("consumer:0x%" PRIx64 " offset type:%d not wal version, committed error", tmq->consumerId,
|
||||
pOffsetInfo->committedOffset.type);
|
||||
taosWUnLockLatch(&tmq->lock);
|
||||
return TSDB_CODE_TMQ_SNAPSHOT_ERROR;
|
||||
}
|
||||
|
@ -2939,8 +2970,8 @@ int32_t tmq_get_topic_assignment(tmq_t* tmq, const char* pTopicName, tmq_topic_a
|
|||
pAssignment->begin = pClientVg->offsetInfo.walVerBegin;
|
||||
pAssignment->end = pClientVg->offsetInfo.walVerEnd;
|
||||
pAssignment->vgId = pClientVg->vgId;
|
||||
tscInfo("consumer:0x%" PRIx64 " get assignment from local:%d->%" PRId64, tmq->consumerId,
|
||||
pAssignment->vgId, pAssignment->currentOffset);
|
||||
tscInfo("consumer:0x%" PRIx64 " get assignment from local:%d->%" PRId64, tmq->consumerId, pAssignment->vgId,
|
||||
pAssignment->currentOffset);
|
||||
}
|
||||
|
||||
if (needFetch) {
|
||||
|
@ -3049,7 +3080,8 @@ int32_t tmq_get_topic_assignment(tmq_t* tmq, const char* pTopicName, tmq_topic_a
|
|||
}
|
||||
|
||||
SVgOffsetInfo* pOffsetInfo = &pClientVg->offsetInfo;
|
||||
tscInfo("consumer:0x%" PRIx64 " %s vgId:%d offset is update to:%"PRId64, tmq->consumerId, pTopic->topicName, p->vgId, p->currentOffset);
|
||||
tscInfo("consumer:0x%" PRIx64 " %s vgId:%d offset is update to:%" PRId64, tmq->consumerId, pTopic->topicName,
|
||||
p->vgId, p->currentOffset);
|
||||
|
||||
pOffsetInfo->walVerBegin = p->begin;
|
||||
pOffsetInfo->walVerEnd = p->end;
|
||||
|
@ -3087,7 +3119,8 @@ static int32_t tmqSeekCb(void* param, SDataBuf* pMsg, int32_t code) {
|
|||
return 0;
|
||||
}
|
||||
|
||||
// seek interface have to send msg to server to cancel push handle if needed, because consumer may be in wait status if there is no data to poll
|
||||
// seek interface have to send msg to server to cancel push handle if needed, because consumer may be in wait status if
|
||||
// there is no data to poll
|
||||
int32_t tmq_offset_seek(tmq_t* tmq, const char* pTopicName, int32_t vgId, int64_t offset) {
|
||||
if (tmq == NULL || pTopicName == NULL) {
|
||||
tscError("invalid tmq handle, null");
|
||||
|
|
|
@ -15,13 +15,13 @@
|
|||
|
||||
#define _DEFAULT_SOURCE
|
||||
#include "mndConsumer.h"
|
||||
#include "mndPrivilege.h"
|
||||
#include "mndVgroup.h"
|
||||
#include "mndShow.h"
|
||||
#include "mndDb.h"
|
||||
#include "mndPrivilege.h"
|
||||
#include "mndShow.h"
|
||||
#include "mndSubscribe.h"
|
||||
#include "mndTopic.h"
|
||||
#include "mndTrans.h"
|
||||
#include "mndVgroup.h"
|
||||
#include "tcompare.h"
|
||||
#include "tname.h"
|
||||
|
||||
|
@ -71,7 +71,8 @@ void mndCleanupConsumer(SMnode *pMnode) {}
|
|||
void mndDropConsumerFromSdb(SMnode *pMnode, int64_t consumerId, SRpcHandleInfo *info) {
|
||||
SMqConsumerClearMsg *pClearMsg = rpcMallocCont(sizeof(SMqConsumerClearMsg));
|
||||
if (pClearMsg == NULL) {
|
||||
mError("consumer:0x%"PRIx64" failed to clear consumer due to out of memory. alloc size:%d", consumerId, (int32_t)sizeof(SMqConsumerClearMsg));
|
||||
mError("consumer:0x%" PRIx64 " failed to clear consumer due to out of memory. alloc size:%d", consumerId,
|
||||
(int32_t)sizeof(SMqConsumerClearMsg));
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -88,7 +89,8 @@ void mndDropConsumerFromSdb(SMnode *pMnode, int64_t consumerId, SRpcHandleInfo*
|
|||
return;
|
||||
}
|
||||
|
||||
static int32_t validateTopics(STrans *pTrans, const SArray *pTopicList, SMnode *pMnode, const char *pUser, bool enableReplay) {
|
||||
static int32_t validateTopics(STrans *pTrans, const SArray *pTopicList, SMnode *pMnode, const char *pUser,
|
||||
bool enableReplay) {
|
||||
SMqTopicObj *pTopic = NULL;
|
||||
int32_t code = 0;
|
||||
|
||||
|
@ -503,7 +505,6 @@ static int32_t mndProcessAskEpReq(SRpcMsg *pMsg) {
|
|||
pHead->walsver = 0;
|
||||
pHead->walever = 0;
|
||||
|
||||
|
||||
void *abuf = POINTER_SHIFT(buf, sizeof(SMqRspHead));
|
||||
tEncodeSMqAskEpRsp(&abuf, &rsp);
|
||||
|
||||
|
@ -561,7 +562,6 @@ int32_t mndProcessSubscribeReq(SRpcMsg *pMsg) {
|
|||
SMqConsumerObj *pConsumerNew = NULL;
|
||||
STrans *pTrans = NULL;
|
||||
|
||||
|
||||
SArray *pTopicList = subscribe.topicNames;
|
||||
taosArraySort(pTopicList, taosArrayCompareString);
|
||||
taosArrayRemoveDuplicate(pTopicList, taosArrayCompareString, freeItem);
|
||||
|
@ -787,9 +787,8 @@ CM_DECODE_OVER:
|
|||
}
|
||||
|
||||
static int32_t mndConsumerActionInsert(SSdb *pSdb, SMqConsumerObj *pConsumer) {
|
||||
mInfo("consumer:0x%" PRIx64 " sub insert, cgroup:%s status:%d(%s) epoch:%d",
|
||||
pConsumer->consumerId, pConsumer->cgroup, pConsumer->status, mndConsumerStatusName(pConsumer->status),
|
||||
pConsumer->epoch);
|
||||
mInfo("consumer:0x%" PRIx64 " sub insert, cgroup:%s status:%d(%s) epoch:%d", pConsumer->consumerId, pConsumer->cgroup,
|
||||
pConsumer->status, mndConsumerStatusName(pConsumer->status), pConsumer->epoch);
|
||||
pConsumer->subscribeTime = pConsumer->createTime;
|
||||
return 0;
|
||||
}
|
||||
|
@ -898,9 +897,11 @@ static int32_t mndConsumerActionUpdate(SSdb *pSdb, SMqConsumerObj *pOldConsumer,
|
|||
//
|
||||
// int32_t prevStatus = pOldConsumer->status;
|
||||
// pOldConsumer->status = MQ_CONSUMER_STATUS_LOST;
|
||||
// mInfo("consumer:0x%" PRIx64 " timer update, timer lost. state %s -> %s, reb-time:%" PRId64 ", reb-removed-topics:%d",
|
||||
// pOldConsumer->consumerId, mndConsumerStatusName(prevStatus), mndConsumerStatusName(pOldConsumer->status),
|
||||
// pOldConsumer->rebalanceTime, (int)taosArrayGetSize(pOldConsumer->rebRemovedTopics));
|
||||
// mInfo("consumer:0x%" PRIx64 " timer update, timer lost. state %s -> %s, reb-time:%" PRId64 ",
|
||||
// reb-removed-topics:%d",
|
||||
// pOldConsumer->consumerId, mndConsumerStatusName(prevStatus),
|
||||
// mndConsumerStatusName(pOldConsumer->status), pOldConsumer->rebalanceTime,
|
||||
// (int)taosArrayGetSize(pOldConsumer->rebRemovedTopics));
|
||||
} else if (pNewConsumer->updateType == CONSUMER_UPDATE_REC) {
|
||||
int32_t sz = taosArrayGetSize(pOldConsumer->assignedTopics);
|
||||
for (int32_t i = 0; i < sz; i++) {
|
||||
|
@ -1086,7 +1087,8 @@ static int32_t mndRetrieveConsumer(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *
|
|||
tFormatOffset(buf, TSDB_OFFSET_LEN, &pVal);
|
||||
|
||||
char parasStr[64 + TSDB_OFFSET_LEN + VARSTR_HEADER_SIZE] = {0};
|
||||
sprintf(varDataVal(parasStr), "tbname:%d,commit:%d,interval:%dms,reset:%s", pConsumer->withTbName, pConsumer->autoCommit, pConsumer->autoCommitInterval, buf);
|
||||
sprintf(varDataVal(parasStr), "tbname:%d,commit:%d,interval:%dms,reset:%s", pConsumer->withTbName,
|
||||
pConsumer->autoCommit, pConsumer->autoCommitInterval, buf);
|
||||
varDataSetLen(parasStr, strlen(varDataVal(parasStr)));
|
||||
|
||||
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
|
||||
|
|
|
@ -14,8 +14,8 @@
|
|||
*/
|
||||
|
||||
#include "tq.h"
|
||||
#include "vnd.h"
|
||||
#include "tqCommon.h"
|
||||
#include "vnd.h"
|
||||
|
||||
// 0: not init
|
||||
// 1: already inited
|
||||
|
@ -865,9 +865,7 @@ int32_t tqExpandTask(STQ* pTq, SStreamTask* pTask, int64_t nextProcessVer) {
|
|||
return 0;
|
||||
}
|
||||
|
||||
int32_t tqProcessTaskCheckReq(STQ* pTq, SRpcMsg* pMsg) {
|
||||
return tqStreamTaskProcessCheckReq(pTq->pStreamMeta, pMsg);
|
||||
}
|
||||
int32_t tqProcessTaskCheckReq(STQ* pTq, SRpcMsg* pMsg) { return tqStreamTaskProcessCheckReq(pTq->pStreamMeta, pMsg); }
|
||||
|
||||
int32_t tqProcessTaskCheckRsp(STQ* pTq, SRpcMsg* pMsg) {
|
||||
return tqStreamTaskProcessCheckRsp(pTq->pStreamMeta, pMsg, vnodeIsRoleLeader(pTq->pVnode));
|
||||
|
@ -1195,8 +1193,8 @@ int32_t tqProcessTaskCheckPointSourceReq(STQ* pTq, SRpcMsg* pMsg, SRpcMsg* pRsp)
|
|||
streamProcessCheckpointSourceReq(pTask, &req);
|
||||
taosThreadMutexUnlock(&pTask->lock);
|
||||
|
||||
qInfo("s-task:%s (vgId:%d) level:%d receive checkpoint-source msg chkpt:%" PRId64 ", transId:%d",
|
||||
pTask->id.idStr, vgId, pTask->info.taskLevel, req.checkpointId, req.transId);
|
||||
qInfo("s-task:%s (vgId:%d) level:%d receive checkpoint-source msg chkpt:%" PRId64 ", transId:%d", pTask->id.idStr,
|
||||
vgId, pTask->info.taskLevel, req.checkpointId, req.transId);
|
||||
|
||||
code = streamAddCheckpointSourceRspMsg(&req, &pMsg->info, pTask, 1);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
|
|
|
@ -207,8 +207,8 @@ int32_t tqFetchLog(STQ* pTq, STqHandle* pHandle, int64_t* fetchOffset, uint64_t
|
|||
goto END;
|
||||
}
|
||||
|
||||
tqDebug("vgId:%d, consumer:0x%" PRIx64 " taosx get msg ver %" PRId64 ", type: %s, reqId:0x%" PRIx64" 0x%"PRIx64, vgId,
|
||||
pHandle->consumerId, offset, TMSG_INFO(pHandle->pWalReader->pHead->head.msgType), reqId, id);
|
||||
tqDebug("vgId:%d, consumer:0x%" PRIx64 " taosx get msg ver %" PRId64 ", type: %s, reqId:0x%" PRIx64 " 0x%" PRIx64,
|
||||
vgId, pHandle->consumerId, offset, TMSG_INFO(pHandle->pWalReader->pHead->head.msgType), reqId, id);
|
||||
|
||||
if (pHandle->pWalReader->pHead->head.msgType == TDMT_VND_SUBMIT) {
|
||||
code = walFetchBody(pHandle->pWalReader);
|
||||
|
@ -322,7 +322,8 @@ int32_t extractMsgFromWal(SWalReader* pReader, void** pItem, int64_t maxVer, con
|
|||
|
||||
void* data = taosMemoryMalloc(len);
|
||||
if (data == NULL) {
|
||||
// todo: for all stream in this vnode, keep this offset in the offset files, and wait for a moment, and then retry
|
||||
// todo: for all stream in this vnode, keep this offset in the offset files, and wait for a moment, and then
|
||||
// retry
|
||||
code = TSDB_CODE_OUT_OF_MEMORY;
|
||||
terrno = code;
|
||||
|
||||
|
@ -387,8 +388,8 @@ bool tqNextBlockInWal(STqReader* pReader, const char* id, int sourceExcluded) {
|
|||
pReader->nextBlk = 0;
|
||||
int32_t numOfBlocks = taosArrayGetSize(pReader->submit.aSubmitTbData);
|
||||
while (pReader->nextBlk < numOfBlocks) {
|
||||
tqTrace("tq reader next data block %d/%d, len:%d %" PRId64, pReader->nextBlk,
|
||||
numOfBlocks, pReader->msg.msgLen, pReader->msg.ver);
|
||||
tqTrace("tq reader next data block %d/%d, len:%d %" PRId64, pReader->nextBlk, numOfBlocks, pReader->msg.msgLen,
|
||||
pReader->msg.ver);
|
||||
|
||||
SSubmitTbData* pSubmitTbData = taosArrayGet(pReader->submit.aSubmitTbData, pReader->nextBlk);
|
||||
if ((pSubmitTbData->source & sourceExcluded) != 0) {
|
||||
|
@ -448,17 +449,11 @@ int32_t tqReaderSetSubmitMsg(STqReader* pReader, void* msgStr, int32_t msgLen, i
|
|||
return 0;
|
||||
}
|
||||
|
||||
SWalReader* tqGetWalReader(STqReader* pReader) {
|
||||
return pReader->pWalReader;
|
||||
}
|
||||
SWalReader* tqGetWalReader(STqReader* pReader) { return pReader->pWalReader; }
|
||||
|
||||
SSDataBlock* tqGetResultBlock (STqReader* pReader) {
|
||||
return pReader->pResBlock;
|
||||
}
|
||||
SSDataBlock* tqGetResultBlock(STqReader* pReader) { return pReader->pResBlock; }
|
||||
|
||||
int64_t tqGetResultBlockTime(STqReader *pReader){
|
||||
return pReader->lastTs;
|
||||
}
|
||||
int64_t tqGetResultBlockTime(STqReader* pReader) { return pReader->lastTs; }
|
||||
|
||||
bool tqNextBlockImpl(STqReader* pReader, const char* idstr) {
|
||||
if (pReader->msg.msgStr == NULL) {
|
||||
|
@ -1027,9 +1022,7 @@ bool tqReaderIsQueriedTable(STqReader* pReader, uint64_t uid) {
|
|||
return taosHashGet(pReader->tbIdHash, &uid, sizeof(uint64_t));
|
||||
}
|
||||
|
||||
bool tqCurrentBlockConsumed(const STqReader* pReader) {
|
||||
return pReader->msg.msgStr == NULL;
|
||||
}
|
||||
bool tqCurrentBlockConsumed(const STqReader* pReader) { return pReader->msg.msgStr == NULL; }
|
||||
|
||||
int tqReaderRemoveTbUidList(STqReader* pReader, const SArray* tbUidList) {
|
||||
for (int32_t i = 0; i < taosArrayGetSize(tbUidList); i++) {
|
||||
|
@ -1071,9 +1064,11 @@ int32_t tqUpdateTbUidList(STQ* pTq, const SArray* tbUidList, bool isAdd) {
|
|||
} else if (pTqHandle->execHandle.subType == TOPIC_SUB_TYPE__TABLE) {
|
||||
if (isAdd) {
|
||||
SArray* list = NULL;
|
||||
int ret = qGetTableList(pTqHandle->execHandle.execTb.suid, pTq->pVnode, pTqHandle->execHandle.execTb.node, &list, pTqHandle->execHandle.task);
|
||||
int ret = qGetTableList(pTqHandle->execHandle.execTb.suid, pTq->pVnode, pTqHandle->execHandle.execTb.node,
|
||||
&list, pTqHandle->execHandle.task);
|
||||
if (ret != TDB_CODE_SUCCESS) {
|
||||
tqError("qGetTableList in tqUpdateTbUidList error:%d handle %s consumer:0x%" PRIx64, ret, pTqHandle->subKey, pTqHandle->consumerId);
|
||||
tqError("qGetTableList in tqUpdateTbUidList error:%d handle %s consumer:0x%" PRIx64, ret, pTqHandle->subKey,
|
||||
pTqHandle->consumerId);
|
||||
taosArrayDestroy(list);
|
||||
taosHashCancelIterate(pTq->pHandle, pIter);
|
||||
taosWUnLockLatch(&pTq->lock);
|
||||
|
|
|
@ -250,7 +250,8 @@ int32_t tqScanTaosx(STQ* pTq, const STqHandle* pHandle, STaosxRsp* pRsp, SMqMeta
|
|||
return 0;
|
||||
}
|
||||
|
||||
int32_t tqTaosxScanLog(STQ* pTq, STqHandle* pHandle, SPackedData submit, STaosxRsp* pRsp, int32_t* totalRows, int8_t sourceExcluded) {
|
||||
int32_t tqTaosxScanLog(STQ* pTq, STqHandle* pHandle, SPackedData submit, STaosxRsp* pRsp, int32_t* totalRows,
|
||||
int8_t sourceExcluded) {
|
||||
STqExecHandle* pExec = &pHandle->execHandle;
|
||||
SArray* pBlocks = taosArrayInit(0, sizeof(SSDataBlock));
|
||||
SArray* pSchemas = taosArrayInit(0, sizeof(void*));
|
||||
|
|
|
@ -72,7 +72,8 @@ static int32_t tqInitTaosxRsp(STaosxRsp* pRsp, STqOffsetVal pOffset) {
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int32_t extractResetOffsetVal(STqOffsetVal* pOffsetVal, STQ* pTq, STqHandle* pHandle, const SMqPollReq* pRequest, SRpcMsg* pMsg, bool* pBlockReturned) {
|
||||
static int32_t extractResetOffsetVal(STqOffsetVal* pOffsetVal, STQ* pTq, STqHandle* pHandle, const SMqPollReq* pRequest,
|
||||
SRpcMsg* pMsg, bool* pBlockReturned) {
|
||||
uint64_t consumerId = pRequest->consumerId;
|
||||
STqOffset* pOffset = tqOffsetRead(pTq->pOffsetStore, pRequest->subKey);
|
||||
int32_t vgId = TD_VID(pTq->pVnode);
|
||||
|
@ -218,7 +219,9 @@ static int32_t extractDataAndRspForDbStbSubscribe(STQ* pTq, STqHandle* pHandle,
|
|||
|
||||
if (tqFetchLog(pTq, pHandle, &fetchVer, pRequest->reqId) < 0) {
|
||||
tqOffsetResetToLog(&taosxRsp.rspOffset, fetchVer);
|
||||
code = tqSendDataRsp(pHandle, pMsg, pRequest, (SMqDataRsp*)&taosxRsp, TMQ_MSG_TYPE__POLL_DATA_RSP, vgId);
|
||||
code = tqSendDataRsp(
|
||||
pHandle, pMsg, pRequest, (SMqDataRsp*)&taosxRsp,
|
||||
taosxRsp.createTableNum > 0 ? TMQ_MSG_TYPE__POLL_DATA_META_RSP : TMQ_MSG_TYPE__POLL_DATA_RSP, vgId);
|
||||
goto end;
|
||||
}
|
||||
|
||||
|
@ -230,7 +233,9 @@ static int32_t extractDataAndRspForDbStbSubscribe(STQ* pTq, STqHandle* pHandle,
|
|||
if (pHead->msgType != TDMT_VND_SUBMIT) {
|
||||
if (totalRows > 0) {
|
||||
tqOffsetResetToLog(&taosxRsp.rspOffset, fetchVer);
|
||||
code = tqSendDataRsp(pHandle, pMsg, pRequest, (SMqDataRsp*)&taosxRsp, TMQ_MSG_TYPE__POLL_DATA_RSP, vgId);
|
||||
code = tqSendDataRsp(
|
||||
pHandle, pMsg, pRequest, (SMqDataRsp*)&taosxRsp,
|
||||
taosxRsp.createTableNum > 0 ? TMQ_MSG_TYPE__POLL_DATA_META_RSP : TMQ_MSG_TYPE__POLL_DATA_RSP, vgId);
|
||||
goto end;
|
||||
}
|
||||
|
||||
|
@ -257,9 +262,11 @@ static int32_t extractDataAndRspForDbStbSubscribe(STQ* pTq, STqHandle* pHandle,
|
|||
goto end;
|
||||
}
|
||||
|
||||
if (totalRows >= 4096 || taosxRsp.createTableNum > 0 || (taosGetTimestampMs() - st > 1000)) {
|
||||
if (totalRows >= 4096 || (taosGetTimestampMs() - st > 1000)) {
|
||||
tqOffsetResetToLog(&taosxRsp.rspOffset, fetchVer + 1);
|
||||
code = tqSendDataRsp(pHandle, pMsg, pRequest, (SMqDataRsp*)&taosxRsp, taosxRsp.createTableNum > 0 ? TMQ_MSG_TYPE__POLL_DATA_META_RSP : TMQ_MSG_TYPE__POLL_DATA_RSP, vgId);
|
||||
code = tqSendDataRsp(
|
||||
pHandle, pMsg, pRequest, (SMqDataRsp*)&taosxRsp,
|
||||
taosxRsp.createTableNum > 0 ? TMQ_MSG_TYPE__POLL_DATA_META_RSP : TMQ_MSG_TYPE__POLL_DATA_RSP, vgId);
|
||||
goto end;
|
||||
} else {
|
||||
fetchVer++;
|
||||
|
|
|
@ -311,44 +311,57 @@ class TDTestCase:
|
|||
|
||||
return
|
||||
|
||||
def consumeExcluded(self):
|
||||
tdSql.execute(f'create topic topic_excluded as database db_taosx')
|
||||
def consumeTest(self):
|
||||
tdSql.execute(f'create database if not exists d1 vgroups 1')
|
||||
tdSql.execute(f'use d1')
|
||||
tdSql.execute(f'create table st(ts timestamp, i int) tags(t int)')
|
||||
tdSql.execute(f'insert into t1 using st tags(1) values(now, 1) (now+1s, 2)')
|
||||
tdSql.execute(f'insert into t2 using st tags(2) values(now, 1) (now+1s, 2)')
|
||||
tdSql.execute(f'insert into t3 using st tags(3) values(now, 1) (now+1s, 2)')
|
||||
tdSql.execute(f'insert into t1 using st tags(1) values(now+5s, 11) (now+10s, 12)')
|
||||
|
||||
tdSql.query("select * from st")
|
||||
tdSql.checkRows(8)
|
||||
|
||||
tdSql.execute(f'create topic topic_excluded with meta as database d1')
|
||||
consumer_dict = {
|
||||
"group.id": "g1",
|
||||
"td.connect.user": "root",
|
||||
"td.connect.pass": "taosdata",
|
||||
"auto.offset.reset": "earliest",
|
||||
"msg.consume.excluded": 1
|
||||
}
|
||||
consumer = Consumer(consumer_dict)
|
||||
|
||||
tdLog.debug("test subscribe topic created by other user")
|
||||
exceptOccured = False
|
||||
try:
|
||||
consumer.subscribe(["topic_excluded"])
|
||||
except TmqError:
|
||||
exceptOccured = True
|
||||
|
||||
if exceptOccured:
|
||||
tdLog.exit(f"subscribe error")
|
||||
|
||||
index = 0
|
||||
try:
|
||||
while True:
|
||||
res = consumer.poll(1)
|
||||
if not res:
|
||||
if index != 1:
|
||||
tdLog.exit("consume error")
|
||||
break
|
||||
err = res.error()
|
||||
if err is not None:
|
||||
raise err
|
||||
val = res.value()
|
||||
|
||||
if val is None:
|
||||
continue
|
||||
cnt = 0;
|
||||
for block in val:
|
||||
print(block.fetchall())
|
||||
cnt += len(block.fetchall())
|
||||
|
||||
if cnt != 8:
|
||||
tdLog.exit("consume error")
|
||||
|
||||
index += 1
|
||||
finally:
|
||||
consumer.close()
|
||||
|
||||
def run(self):
|
||||
self.consumeTest()
|
||||
|
||||
tdSql.prepare()
|
||||
self.checkWal1VgroupOnlyMeta()
|
||||
|
||||
|
@ -362,7 +375,6 @@ class TDTestCase:
|
|||
self.checkSnapshotMultiVgroups()
|
||||
|
||||
self.checkWalMultiVgroupsWithDropTable()
|
||||
# self.consumeExcluded()
|
||||
|
||||
self.checkSnapshotMultiVgroupsWithDropTable()
|
||||
|
||||
|
|
Loading…
Reference in New Issue