From 077745390dc75dfc19d311e92f0c6e29a115e403 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Sat, 3 Aug 2024 16:34:26 +0800 Subject: [PATCH 001/181] fix(stream): add more check in tmr. --- source/libs/stream/src/streamCheckStatus.c | 7 ------- source/libs/stream/src/streamCheckpoint.c | 22 ++++++++++++++++++++++ source/libs/stream/src/streamDispatch.c | 13 ++++++++++++- 3 files changed, 34 insertions(+), 8 deletions(-) diff --git a/source/libs/stream/src/streamCheckStatus.c b/source/libs/stream/src/streamCheckStatus.c index c9ba6ffcfe..b7661e72d4 100644 --- a/source/libs/stream/src/streamCheckStatus.c +++ b/source/libs/stream/src/streamCheckStatus.c @@ -74,13 +74,6 @@ int32_t streamTaskCheckStatus(SStreamTask* pTask, int32_t upstreamTaskId, int32_ } if (pInfo->stage != stage) { - streamMutexLock(&pTask->lock); - ETaskStatus status = streamTaskGetStatus(pTask).state; - if (status == TASK_STATUS__CK) { - streamTaskSetFailedCheckpointId(pTask); - } - streamMutexUnlock(&pTask->lock); - return TASK_UPSTREAM_NEW_STAGE; } else if (pTask->status.downstreamReady != 1) { stDebug("s-task:%s vgId:%d leader:%d, downstream not ready", id, vgId, (pTask->pMeta->role == NODE_ROLE_LEADER)); diff --git a/source/libs/stream/src/streamCheckpoint.c b/source/libs/stream/src/streamCheckpoint.c index f817447099..741e3cc882 100644 --- a/source/libs/stream/src/streamCheckpoint.c +++ b/source/libs/stream/src/streamCheckpoint.c @@ -838,6 +838,28 @@ void checkpointTriggerMonitorFn(void* param, void* tmrId) { return; } + if ((pTmrInfo->launchChkptId != pActiveInfo->activeId) || (pActiveInfo->activeId == 0)) { + streamMutexUnlock(&pActiveInfo->lock); + int32_t ref = streamCleanBeforeQuitTmr(pTmrInfo, pTask); + stWarn("s-task:%s vgId:%d checkpoint-trigger retrieve by previous checkpoint procedure, checkpointId:%" PRId64 + ", quit, ref:%d", + id, vgId, pTmrInfo->launchChkptId, ref); + + streamMetaReleaseTask(pTask->pMeta, pTask); + return; + } + + // active checkpoint info is cleared for now + if ((pActiveInfo->activeId == 0) || (pActiveInfo->transId == 0) || (pTask->chkInfo.startTs == 0)) { + streamMutexUnlock(&pActiveInfo->lock); + int32_t ref = streamCleanBeforeQuitTmr(pTmrInfo, pTask); + stWarn("s-task:%s vgId:%d active checkpoint may be cleared, quit from retrieve checkpoint-trigger send tmr, ref:%d", + id, vgId, ref); + + streamMetaReleaseTask(pTask->pMeta, pTask); + return; + } + for (int32_t i = 0; i < taosArrayGetSize(pList); ++i) { SStreamUpstreamEpInfo* pInfo = taosArrayGetP(pList, i); diff --git a/source/libs/stream/src/streamDispatch.c b/source/libs/stream/src/streamDispatch.c index 255afb44f9..9e07059e53 100644 --- a/source/libs/stream/src/streamDispatch.c +++ b/source/libs/stream/src/streamDispatch.c @@ -819,8 +819,19 @@ static void checkpointReadyMsgSendMonitorFn(void* param, void* tmrId) { SArray* pList = pActiveInfo->pReadyMsgList; int32_t num = taosArrayGetSize(pList); + if (pTmrInfo->launchChkptId != pActiveInfo->activeId) { + streamMutexUnlock(&pActiveInfo->lock); + int32_t ref = streamCleanBeforeQuitTmr(pTmrInfo, pTask); + stWarn("s-task:%s vgId:%d ready-msg send tmr launched by previous checkpoint procedure, checkpointId:%" PRId64 + ", quit, ref:%d", + id, vgId, pTmrInfo->launchChkptId, ref); + + streamMetaReleaseTask(pTask->pMeta, pTask); + return; + } + // active checkpoint info is cleared for now - if ((pActiveInfo->activeId == 0) && (pActiveInfo->transId == 0) && (num == 0) && (pTask->chkInfo.startTs == 0)) { + if ((pActiveInfo->activeId == 0) || (pActiveInfo->transId == 0) || (num == 0) || (pTask->chkInfo.startTs == 0)) { streamMutexUnlock(&pActiveInfo->lock); int32_t ref = atomic_sub_fetch_32(&pTask->status.timerActive, 1); stWarn("s-task:%s vgId:%d active checkpoint may be cleared, quit from readyMsg send tmr, ref:%d", id, vgId, ref); From 6a6ab9ff6a0b57426d2ab290192df0a5a5b9f02e Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Sun, 4 Aug 2024 11:37:23 +0800 Subject: [PATCH 002/181] fix(stream): add check for checkpointId in retrieve-checkpoint id msg. --- source/dnode/vnode/src/tqCommon/tqCommon.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/source/dnode/vnode/src/tqCommon/tqCommon.c b/source/dnode/vnode/src/tqCommon/tqCommon.c index b56c474ed5..11d38dde87 100644 --- a/source/dnode/vnode/src/tqCommon/tqCommon.c +++ b/source/dnode/vnode/src/tqCommon/tqCommon.c @@ -989,7 +989,12 @@ int32_t tqStreamTaskProcessRetrieveTriggerReq(SStreamMeta* pMeta, SRpcMsg* pMsg) int64_t checkpointId = 0; streamTaskGetActiveCheckpointInfo(pTask, &transId, &checkpointId); - ASSERT(checkpointId == pReq->checkpointId); + if (checkpointId != pReq->checkpointId) { + tqError("s-task:%s invalid checkpoint-trigger retrieve msg from %x, current checkpointId:%"PRId64" req:%"PRId64, + pTask->id.idStr, pReq->downstreamTaskId, checkpointId, pReq->checkpointId); + streamMetaReleaseTask(pMeta, pTask); + return TSDB_CODE_INVALID_MSG; + } if (streamTaskAlreadySendTrigger(pTask, pReq->downstreamNodeId)) { // re-send the lost checkpoint-trigger msg to downstream task From d5b990e4ed2e77eaf23b2a865b6716d4b901799a Mon Sep 17 00:00:00 2001 From: wangmm0220 Date: Thu, 8 Aug 2024 16:38:11 +0800 Subject: [PATCH 003/181] fix:[TS-5156]add user in show consumers --- include/common/tmsg.h | 3 +++ source/client/src/clientTmq.c | 3 +++ source/dnode/mnode/impl/inc/mndDef.h | 1 + source/dnode/mnode/impl/src/mndConsumer.c | 8 ++++++++ source/dnode/mnode/impl/src/mndDef.c | 2 ++ 5 files changed, 17 insertions(+) diff --git a/include/common/tmsg.h b/include/common/tmsg.h index 70cf9c8b58..63fe4271d9 100644 --- a/include/common/tmsg.h +++ b/include/common/tmsg.h @@ -2837,6 +2837,7 @@ typedef struct { int64_t consumerId; char cgroup[TSDB_CGROUP_LEN]; char clientId[TSDB_CLIENT_ID_LEN]; + char user[TSDB_USER_LEN]; SArray* topicNames; // SArray int8_t withTbName; @@ -2870,6 +2871,7 @@ static FORCE_INLINE int32_t tSerializeSCMSubscribeReq(void** buf, const SCMSubsc tlen += taosEncodeFixedI8(buf, pReq->enableBatchMeta); tlen += taosEncodeFixedI32(buf, pReq->sessionTimeoutMs); tlen += taosEncodeFixedI32(buf, pReq->maxPollIntervalMs); + tlen += taosEncodeString(buf, pReq->user); return tlen; } @@ -2904,6 +2906,7 @@ static FORCE_INLINE int32_t tDeserializeSCMSubscribeReq(void* buf, SCMSubscribeR if ((char*)buf - (char*)start < len) { buf = taosDecodeFixedI32(buf, &pReq->sessionTimeoutMs); buf = taosDecodeFixedI32(buf, &pReq->maxPollIntervalMs); + buf = taosDecodeStringTo(buf, pReq->user); } else { pReq->sessionTimeoutMs = DEFAULT_SESSION_TIMEOUT; pReq->maxPollIntervalMs = DEFAULT_MAX_POLL_INTERVAL; diff --git a/source/client/src/clientTmq.c b/source/client/src/clientTmq.c index 197a65add8..61037c2e68 100644 --- a/source/client/src/clientTmq.c +++ b/source/client/src/clientTmq.c @@ -82,6 +82,7 @@ struct tmq_t { int64_t refId; char groupId[TSDB_CGROUP_LEN]; char clientId[TSDB_CLIENT_ID_LEN]; + char user[TSDB_USER_LEN]; int8_t withTbName; int8_t useSnapshot; int8_t autoCommit; @@ -1265,6 +1266,7 @@ tmq_t* tmq_consumer_new(tmq_conf_t* conf, char* errstr, int32_t errstrLen) { pTmq->replayEnable = conf->replayEnable; pTmq->sourceExcluded = conf->sourceExcluded; pTmq->enableBatchMeta = conf->enableBatchMeta; + tstrncpy(pTmq->user, user, TSDB_USER_LEN); if (conf->replayEnable) { pTmq->autoCommit = false; } @@ -1332,6 +1334,7 @@ int32_t tmq_subscribe(tmq_t* tmq, const tmq_list_t* topic_list) { req.consumerId = tmq->consumerId; tstrncpy(req.clientId, tmq->clientId, TSDB_CLIENT_ID_LEN); tstrncpy(req.cgroup, tmq->groupId, TSDB_CGROUP_LEN); + tstrncpy(req.user, tmq->user, TSDB_USER_LEN); req.topicNames = taosArrayInit(sz, sizeof(void*)); if (req.topicNames == NULL) { diff --git a/source/dnode/mnode/impl/inc/mndDef.h b/source/dnode/mnode/impl/inc/mndDef.h index 62e77867f6..0505f604a2 100644 --- a/source/dnode/mnode/impl/inc/mndDef.h +++ b/source/dnode/mnode/impl/inc/mndDef.h @@ -597,6 +597,7 @@ typedef struct { int64_t consumerId; char cgroup[TSDB_CGROUP_LEN]; char clientId[TSDB_CLIENT_ID_LEN]; + char user[TSDB_USER_LEN]; int8_t updateType; // used only for update int32_t epoch; int32_t status; diff --git a/source/dnode/mnode/impl/src/mndConsumer.c b/source/dnode/mnode/impl/src/mndConsumer.c index 6116d2da19..5f3f794ef9 100644 --- a/source/dnode/mnode/impl/src/mndConsumer.c +++ b/source/dnode/mnode/impl/src/mndConsumer.c @@ -903,6 +903,14 @@ static int32_t mndRetrieveConsumer(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock * MND_TMQ_NULL_CHECK(pColInfo); MND_TMQ_RETURN_CHECK(colDataSetVal(pColInfo, numOfRows, (const char *)clientId, false)); + // user + char user[TSDB_USER_LEN + VARSTR_HEADER_SIZE] = {0}; + STR_TO_VARSTR(user, pConsumer->user); + + pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); + MND_TMQ_NULL_CHECK(pColInfo); + MND_TMQ_RETURN_CHECK(colDataSetVal(pColInfo, numOfRows, (const char *)user, false)); + // status const char *pStatusName = mndConsumerStatusName(pConsumer->status); status = taosMemoryCalloc(1, pShow->pMeta->pSchemas[cols].bytes); diff --git a/source/dnode/mnode/impl/src/mndDef.c b/source/dnode/mnode/impl/src/mndDef.c index 695bf4d30d..c7de16d824 100644 --- a/source/dnode/mnode/impl/src/mndDef.c +++ b/source/dnode/mnode/impl/src/mndDef.c @@ -429,6 +429,7 @@ int32_t tEncodeSMqConsumerObj(void **buf, const SMqConsumerObj *pConsumer) { tlen += taosEncodeFixedI32(buf, pConsumer->resetOffsetCfg); tlen += taosEncodeFixedI32(buf, pConsumer->maxPollIntervalMs); tlen += taosEncodeFixedI32(buf, pConsumer->sessionTimeoutMs); + tlen += taosEncodeString(buf, pConsumer->user); return tlen; } @@ -503,6 +504,7 @@ void *tDecodeSMqConsumerObj(const void *buf, SMqConsumerObj *pConsumer, int8_t s if (sver > 2){ buf = taosDecodeFixedI32(buf, &pConsumer->maxPollIntervalMs); buf = taosDecodeFixedI32(buf, &pConsumer->sessionTimeoutMs); + buf = taosDecodeStringTo(buf, pConsumer->user); } else{ pConsumer->maxPollIntervalMs = DEFAULT_MAX_POLL_INTERVAL; pConsumer->sessionTimeoutMs = DEFAULT_SESSION_TIMEOUT; From 9e018e8e58d6ce05db6bb3443ae487017e6b9fa7 Mon Sep 17 00:00:00 2001 From: wangmm0220 Date: Fri, 9 Aug 2024 14:28:58 +0800 Subject: [PATCH 004/181] fix:[TS-5156]add user/fqdn in show consumers --- include/common/tmsg.h | 3 +++ source/client/src/clientTmq.c | 5 ++++ source/common/src/systable.c | 7 +++-- source/dnode/mnode/impl/inc/mndDef.h | 1 + source/dnode/mnode/impl/src/mndConsumer.c | 8 ++++++ source/dnode/mnode/impl/src/mndDef.c | 4 +++ source/dnode/mnode/impl/src/mndSubscribe.c | 31 ++++++++++++++++++---- 7 files changed, 52 insertions(+), 7 deletions(-) diff --git a/include/common/tmsg.h b/include/common/tmsg.h index 63fe4271d9..736267c3b9 100644 --- a/include/common/tmsg.h +++ b/include/common/tmsg.h @@ -2838,6 +2838,7 @@ typedef struct { char cgroup[TSDB_CGROUP_LEN]; char clientId[TSDB_CLIENT_ID_LEN]; char user[TSDB_USER_LEN]; + char fqdn[TSDB_FQDN_LEN]; SArray* topicNames; // SArray int8_t withTbName; @@ -2872,6 +2873,7 @@ static FORCE_INLINE int32_t tSerializeSCMSubscribeReq(void** buf, const SCMSubsc tlen += taosEncodeFixedI32(buf, pReq->sessionTimeoutMs); tlen += taosEncodeFixedI32(buf, pReq->maxPollIntervalMs); tlen += taosEncodeString(buf, pReq->user); + tlen += taosEncodeString(buf, pReq->fqdn); return tlen; } @@ -2907,6 +2909,7 @@ static FORCE_INLINE int32_t tDeserializeSCMSubscribeReq(void* buf, SCMSubscribeR buf = taosDecodeFixedI32(buf, &pReq->sessionTimeoutMs); buf = taosDecodeFixedI32(buf, &pReq->maxPollIntervalMs); buf = taosDecodeStringTo(buf, pReq->user); + buf = taosDecodeStringTo(buf, pReq->fqdn); } else { pReq->sessionTimeoutMs = DEFAULT_SESSION_TIMEOUT; pReq->maxPollIntervalMs = DEFAULT_MAX_POLL_INTERVAL; diff --git a/source/client/src/clientTmq.c b/source/client/src/clientTmq.c index 61037c2e68..a69af05900 100644 --- a/source/client/src/clientTmq.c +++ b/source/client/src/clientTmq.c @@ -83,6 +83,7 @@ struct tmq_t { char groupId[TSDB_CGROUP_LEN]; char clientId[TSDB_CLIENT_ID_LEN]; char user[TSDB_USER_LEN]; + char fqdn[TSDB_FQDN_LEN]; int8_t withTbName; int8_t useSnapshot; int8_t autoCommit; @@ -1267,6 +1268,9 @@ tmq_t* tmq_consumer_new(tmq_conf_t* conf, char* errstr, int32_t errstrLen) { pTmq->sourceExcluded = conf->sourceExcluded; pTmq->enableBatchMeta = conf->enableBatchMeta; tstrncpy(pTmq->user, user, TSDB_USER_LEN); + if (taosGetFqdn(pTmq->fqdn) != 0) { + (void)strcpy(pTmq->fqdn, "localhost"); + } if (conf->replayEnable) { pTmq->autoCommit = false; } @@ -1335,6 +1339,7 @@ int32_t tmq_subscribe(tmq_t* tmq, const tmq_list_t* topic_list) { tstrncpy(req.clientId, tmq->clientId, TSDB_CLIENT_ID_LEN); tstrncpy(req.cgroup, tmq->groupId, TSDB_CGROUP_LEN); tstrncpy(req.user, tmq->user, TSDB_USER_LEN); + tstrncpy(req.fqdn, tmq->fqdn, TSDB_FQDN_LEN); req.topicNames = taosArrayInit(sz, sizeof(void*)); if (req.topicNames == NULL) { diff --git a/source/common/src/systable.c b/source/common/src/systable.c index 2d69a687a6..3f27ab2b2b 100644 --- a/source/common/src/systable.c +++ b/source/common/src/systable.c @@ -344,7 +344,9 @@ static const SSysDbTableSchema subscriptionSchema[] = { {.name = "topic_name", .bytes = TSDB_TOPIC_FNAME_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY, .sysInfo = false}, {.name = "consumer_group", .bytes = TSDB_CGROUP_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY, .sysInfo = false}, {.name = "vgroup_id", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = false}, - {.name = "consumer_id", .bytes = 32, .type = TSDB_DATA_TYPE_BINARY, .sysInfo = false}, + {.name = "consumer_id", .bytes = TSDB_CLIENT_ID_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY, .sysInfo = false}, + {.name = "user", .bytes = TSDB_USER_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY, .sysInfo = false}, + {.name = "fqdn", .bytes = TSDB_FQDN_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY, .sysInfo = false}, {.name = "offset", .bytes = TSDB_OFFSET_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY, .sysInfo = false}, {.name = "rows", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT, .sysInfo = false}, }; @@ -480,11 +482,12 @@ static const SSysDbTableSchema connectionsSchema[] = { {.name = "last_access", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP, .sysInfo = false}, }; - static const SSysDbTableSchema consumerSchema[] = { {.name = "consumer_id", .bytes = TSDB_CONSUMER_ID_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY, .sysInfo = false}, {.name = "consumer_group", .bytes = TSDB_CGROUP_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY, .sysInfo = false}, {.name = "client_id", .bytes = TSDB_CLIENT_ID_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY, .sysInfo = false}, + {.name = "user", .bytes = TSDB_USER_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY, .sysInfo = false}, + {.name = "fqdn", .bytes = TSDB_FQDN_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY, .sysInfo = false}, {.name = "status", .bytes = 20 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY, .sysInfo = false}, {.name = "topics", .bytes = TSDB_TOPIC_FNAME_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY, .sysInfo = false}, /*{.name = "end_point", .bytes = TSDB_IPv4ADDR_LEN + 6 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},*/ diff --git a/source/dnode/mnode/impl/inc/mndDef.h b/source/dnode/mnode/impl/inc/mndDef.h index 0505f604a2..99e59662ac 100644 --- a/source/dnode/mnode/impl/inc/mndDef.h +++ b/source/dnode/mnode/impl/inc/mndDef.h @@ -598,6 +598,7 @@ typedef struct { char cgroup[TSDB_CGROUP_LEN]; char clientId[TSDB_CLIENT_ID_LEN]; char user[TSDB_USER_LEN]; + char fqdn[TSDB_FQDN_LEN]; int8_t updateType; // used only for update int32_t epoch; int32_t status; diff --git a/source/dnode/mnode/impl/src/mndConsumer.c b/source/dnode/mnode/impl/src/mndConsumer.c index 5f3f794ef9..37eb899ed5 100644 --- a/source/dnode/mnode/impl/src/mndConsumer.c +++ b/source/dnode/mnode/impl/src/mndConsumer.c @@ -911,6 +911,14 @@ static int32_t mndRetrieveConsumer(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock * MND_TMQ_NULL_CHECK(pColInfo); MND_TMQ_RETURN_CHECK(colDataSetVal(pColInfo, numOfRows, (const char *)user, false)); + // fqdn + char fqdn[TSDB_FQDN_LEN + VARSTR_HEADER_SIZE] = {0}; + STR_TO_VARSTR(fqdn, pConsumer->fqdn); + + pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); + MND_TMQ_NULL_CHECK(pColInfo); + MND_TMQ_RETURN_CHECK(colDataSetVal(pColInfo, numOfRows, (const char *)fqdn, false)); + // status const char *pStatusName = mndConsumerStatusName(pConsumer->status); status = taosMemoryCalloc(1, pShow->pMeta->pSchemas[cols].bytes); diff --git a/source/dnode/mnode/impl/src/mndDef.c b/source/dnode/mnode/impl/src/mndDef.c index c7de16d824..c604e58588 100644 --- a/source/dnode/mnode/impl/src/mndDef.c +++ b/source/dnode/mnode/impl/src/mndDef.c @@ -325,6 +325,8 @@ int32_t tNewSMqConsumerObj(int64_t consumerId, char *cgroup, int8_t updateType, pConsumer->resetOffsetCfg = subscribe->resetOffsetCfg; pConsumer->maxPollIntervalMs = subscribe->maxPollIntervalMs; pConsumer->sessionTimeoutMs = subscribe->sessionTimeoutMs; + tstrncpy(pConsumer->user, subscribe->user, TSDB_USER_LEN); + tstrncpy(pConsumer->fqdn, subscribe->fqdn, TSDB_FQDN_LEN); pConsumer->rebNewTopics = taosArrayDup(subscribe->topicNames, topicNameDup); if (pConsumer->rebNewTopics == NULL){ @@ -430,6 +432,7 @@ int32_t tEncodeSMqConsumerObj(void **buf, const SMqConsumerObj *pConsumer) { tlen += taosEncodeFixedI32(buf, pConsumer->maxPollIntervalMs); tlen += taosEncodeFixedI32(buf, pConsumer->sessionTimeoutMs); tlen += taosEncodeString(buf, pConsumer->user); + tlen += taosEncodeString(buf, pConsumer->fqdn); return tlen; } @@ -505,6 +508,7 @@ void *tDecodeSMqConsumerObj(const void *buf, SMqConsumerObj *pConsumer, int8_t s buf = taosDecodeFixedI32(buf, &pConsumer->maxPollIntervalMs); buf = taosDecodeFixedI32(buf, &pConsumer->sessionTimeoutMs); buf = taosDecodeStringTo(buf, pConsumer->user); + buf = taosDecodeStringTo(buf, pConsumer->fqdn); } else{ pConsumer->maxPollIntervalMs = DEFAULT_MAX_POLL_INTERVAL; pConsumer->sessionTimeoutMs = DEFAULT_SESSION_TIMEOUT; diff --git a/source/dnode/mnode/impl/src/mndSubscribe.c b/source/dnode/mnode/impl/src/mndSubscribe.c index bff313dbaf..db5bb2eacd 100644 --- a/source/dnode/mnode/impl/src/mndSubscribe.c +++ b/source/dnode/mnode/impl/src/mndSubscribe.c @@ -1330,8 +1330,8 @@ END: TAOS_RETURN(code); } -static int32_t buildResult(SSDataBlock *pBlock, int32_t *numOfRows, int64_t consumerId, const char *topic, - const char *cgroup, SArray *vgs, SArray *offsetRows) { +static int32_t buildResult(SSDataBlock *pBlock, int32_t *numOfRows, int64_t consumerId, const char* user, const char* fqdn, + const char *topic, const char *cgroup, SArray *vgs, SArray *offsetRows) { int32_t code = 0; int32_t sz = taosArrayGetSize(vgs); for (int32_t j = 0; j < sz; j++) { @@ -1355,7 +1355,7 @@ static int32_t buildResult(SSDataBlock *pBlock, int32_t *numOfRows, int64_t cons MND_TMQ_RETURN_CHECK(colDataSetVal(pColInfo, *numOfRows, (const char *)&pVgEp->vgId, false)); // consumer id - char consumerIdHex[32] = {0}; + char consumerIdHex[TSDB_CONSUMER_ID_LEN] = {0}; (void)sprintf(varDataVal(consumerIdHex), "0x%" PRIx64, consumerId); varDataSetLen(consumerIdHex, strlen(varDataVal(consumerIdHex))); @@ -1363,6 +1363,18 @@ static int32_t buildResult(SSDataBlock *pBlock, int32_t *numOfRows, int64_t cons MND_TMQ_NULL_CHECK(pColInfo); MND_TMQ_RETURN_CHECK(colDataSetVal(pColInfo, *numOfRows, (const char *)consumerIdHex, consumerId == -1)); + char userStr[TSDB_USER_LEN + VARSTR_HEADER_SIZE] = {0}; + if (user) STR_TO_VARSTR(userStr, user); + pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); + MND_TMQ_NULL_CHECK(pColInfo); + MND_TMQ_RETURN_CHECK(colDataSetVal(pColInfo, *numOfRows, userStr, user == NULL)); + + char fqdnStr[TSDB_FQDN_LEN + VARSTR_HEADER_SIZE] = {0}; + if (fqdn) STR_TO_VARSTR(fqdnStr, fqdn); + pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); + MND_TMQ_NULL_CHECK(pColInfo); + MND_TMQ_RETURN_CHECK(colDataSetVal(pColInfo, *numOfRows, fqdnStr, fqdn == NULL)); + mInfo("mnd show subscriptions: topic %s, consumer:0x%" PRIx64 " cgroup %s vgid %d", varDataVal(topic), consumerId, varDataVal(cgroup), pVgEp->vgId); @@ -1435,16 +1447,25 @@ int32_t mndRetrieveSubscribe(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pBlock SMqConsumerEp *pConsumerEp = NULL; void *pIter = NULL; + while (1) { pIter = taosHashIterate(pSub->consumerHash, pIter); if (pIter == NULL) break; pConsumerEp = (SMqConsumerEp *)pIter; - MND_TMQ_RETURN_CHECK(buildResult(pBlock, &numOfRows, pConsumerEp->consumerId, topic, cgroup, pConsumerEp->vgs, + char *user = NULL; + char *fqdn = NULL; + SMqConsumerObj *pConsumer = sdbAcquire(pSdb, SDB_CONSUMER, &pConsumerEp->consumerId); + if (pConsumer != NULL) { + user = pConsumer->user; + fqdn = pConsumer->fqdn; + sdbRelease(pSdb, pConsumer); + } + MND_TMQ_RETURN_CHECK(buildResult(pBlock, &numOfRows, pConsumerEp->consumerId, user, fqdn, topic, cgroup, pConsumerEp->vgs, pConsumerEp->offsetRows)); } - MND_TMQ_RETURN_CHECK(buildResult(pBlock, &numOfRows, -1, topic, cgroup, pSub->unassignedVgs, pSub->offsetRows)); + MND_TMQ_RETURN_CHECK(buildResult(pBlock, &numOfRows, -1, NULL, NULL, topic, cgroup, pSub->unassignedVgs, pSub->offsetRows)); pBlock->info.rows = numOfRows; From 78f991b2c9db9aac551a4b0cbde767f6990da936 Mon Sep 17 00:00:00 2001 From: wangmm0220 Date: Fri, 9 Aug 2024 16:56:35 +0800 Subject: [PATCH 005/181] fix:[TS-5156]case error --- tests/system-test/0-others/information_schema.py | 4 ++-- tests/system-test/7-tmq/tmq_primary_key.py | 10 +++++----- tests/system-test/7-tmq/tmq_taosx.py | 6 +++--- 3 files changed, 10 insertions(+), 10 deletions(-) diff --git a/tests/system-test/0-others/information_schema.py b/tests/system-test/0-others/information_schema.py index d7a5540544..616cd034ab 100644 --- a/tests/system-test/0-others/information_schema.py +++ b/tests/system-test/0-others/information_schema.py @@ -222,10 +222,10 @@ class TDTestCase: tdSql.query("select * from information_schema.ins_columns where db_name ='information_schema'") tdLog.info(len(tdSql.queryResult)) - tdSql.checkEqual(True, len(tdSql.queryResult) in range(261, 269)) + tdSql.checkEqual(True, len(tdSql.queryResult) in range(261, 271)) tdSql.query("select * from information_schema.ins_columns where db_name ='performance_schema'") - tdSql.checkEqual(54, len(tdSql.queryResult)) + tdSql.checkEqual(56, len(tdSql.queryResult)) def ins_dnodes_check(self): tdSql.execute('drop database if exists db2') diff --git a/tests/system-test/7-tmq/tmq_primary_key.py b/tests/system-test/7-tmq/tmq_primary_key.py index 80888ddbe6..13d6bd565d 100644 --- a/tests/system-test/7-tmq/tmq_primary_key.py +++ b/tests/system-test/7-tmq/tmq_primary_key.py @@ -85,7 +85,7 @@ class TDTestCase: time.sleep(4) # wait for heart beat tdSql.query(f'show subscriptions;') - sub = tdSql.getData(0, 4); + sub = tdSql.getData(0, 6); print(sub) if not sub.startswith("tsdb"): tdLog.exit(f"show subscriptions error") @@ -196,7 +196,7 @@ class TDTestCase: time.sleep(4) # wait for heart beat tdSql.query(f'show subscriptions;') - sub = tdSql.getData(0, 4); + sub = tdSql.getData(0, 6); print(sub) if not sub.startswith("tsdb"): tdLog.exit(f"show subscriptions error") @@ -306,7 +306,7 @@ class TDTestCase: time.sleep(4) # wait for heart beat tdSql.query(f'show subscriptions;') - sub = tdSql.getData(0, 4); + sub = tdSql.getData(0, 6); print(sub) if not sub.startswith("tsdb"): tdLog.exit(f"show subscriptions error") @@ -416,7 +416,7 @@ class TDTestCase: time.sleep(4) # wait for heart beat tdSql.query(f'show subscriptions;') - sub = tdSql.getData(0, 4); + sub = tdSql.getData(0, 6); print(sub) if not sub.startswith("tsdb"): tdLog.exit(f"show subscriptions error") @@ -517,7 +517,7 @@ class TDTestCase: consumer.close() tdSql.query(f'show subscriptions;') - sub = tdSql.getData(0, 4); + sub = tdSql.getData(0, 6); print(sub) if not sub.startswith("tsdb"): tdLog.exit(f"show subscriptions error") diff --git a/tests/system-test/7-tmq/tmq_taosx.py b/tests/system-test/7-tmq/tmq_taosx.py index d0e682cffb..4e90aefe7c 100644 --- a/tests/system-test/7-tmq/tmq_taosx.py +++ b/tests/system-test/7-tmq/tmq_taosx.py @@ -598,12 +598,12 @@ class TDTestCase: tdSql.query(f'show consumers') tdSql.checkRows(1) tdSql.checkData(0, 1, 'g1') - tdSql.checkData(0, 4, 't2') + tdSql.checkData(0, 6, 't2') tdSql.execute(f'drop consumer group g1 on t1') tdSql.query(f'show consumers') tdSql.checkRows(1) tdSql.checkData(0, 1, 'g1') - tdSql.checkData(0, 4, 't2') + tdSql.checkData(0, 6, 't2') tdSql.query(f'show subscriptions') tdSql.checkRows(1) @@ -641,7 +641,7 @@ class TDTestCase: tdSql.query(f'show consumers') tdSql.checkRows(1) tdSql.checkData(0, 1, 'g1') - tdSql.checkData(0, 4, 't2') + tdSql.checkData(0, 6, 't2') tdSql.execute(f'insert into t4 using st tags(3) values(now, 1)') try: From c68c1e320123c83136aba1408bc43e0842f991c3 Mon Sep 17 00:00:00 2001 From: chenhaoran Date: Mon, 12 Aug 2024 19:56:11 +0800 Subject: [PATCH 006/181] test:set debug in address sanitizer model --- tests/ci/Dockerfile | 24 +++++++++++++----------- tests/parallel_test/container_build.sh | 12 +++++++----- 2 files changed, 20 insertions(+), 16 deletions(-) diff --git a/tests/ci/Dockerfile b/tests/ci/Dockerfile index 213570dfb2..8381f1bb57 100644 --- a/tests/ci/Dockerfile +++ b/tests/ci/Dockerfile @@ -1,9 +1,14 @@ FROM python:3.8 -RUN pip3 config set global.index-url https://pypi.tuna.tsinghua.edu.cn/simple -RUN pip3 install pandas psutil fabric2 requests faker simplejson toml pexpect tzlocal distro -RUN apt-get update -RUN apt-get install -y psmisc sudo tree libgeos-dev libjansson-dev libsnappy-dev liblzma-dev libz-dev zlib1g pkg-config build-essential valgrind \ - vim libjemalloc-dev openssh-server screen sshpass net-tools dirmngr gnupg apt-transport-https ca-certificates software-properties-common r-base iputils-ping +COPY sources.list /etc/apt/ +COPY id_ecdsa /root/.ssh/id_ecdsa +RUN apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 3B4FE6ACC0B21F32 && apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 871920D1991BC93C +RUN apt-get update +RUN apt-get install -y locales psmisc sudo tree libgeos-dev libgflags2.2 libgflags-dev libgoogle-glog-dev libjansson-dev libsnappy-dev liblzma-dev libz-dev zlib1g pkg-config build-essential valgrind rsync vim libjemalloc-dev openssh-server screen sshpass net-tools dirmngr gnupg apt-transport-https ca-certificates software-properties-common r-base iputils-ping clang-tools-16 +RUN sed -i 's/# en_US.UTF-8/en_US.UTF-8/' /etc/locale.gen && locale-gen +RUN pip3 config set global.index-url http://admin:123456@192.168.0.212:3141/admin/dev/+simple/ +RUN pip3 config set global.trusted-host 192.168.0.212 +RUN pip3 install taospy==2.7.15 taos-ws-py==0.3.1 pandas psutil fabric2 requests faker simplejson toml pexpect tzlocal distro decorator loguru hyperloglog +ENV LANG=en_US.UTF-8 LANGUAGE=en_US.UTF-8 LC_ALL=en_US.UTF-8 RUN apt-key adv --keyserver keyserver.ubuntu.com --recv-keys E298A3A825C0D65DFD57CBB651716619E084DAB9 RUN add-apt-repository 'deb https://cloud.r-project.org/bin/linux/ubuntu focal-cran40/' RUN apt install -y r-base @@ -17,17 +22,16 @@ RUN apt-get install wget -y \ && apt-get update && apt-get install -y dotnet-sdk-5.0 && apt-get install -y dotnet-sdk-6.0 ADD node-v12.20.0-linux-x64.tar.gz /usr/local/ RUN sh -c "rm -f /etc/localtime;ln -s /usr/share/zoneinfo/Asia/Shanghai /etc/localtime;echo \"Asia/Shanghai\" >/etc/timezone" -COPY id_rsa /root/.ssh/id_rsa COPY .m2 /root/.m2 COPY .nuget /root/.nuget COPY .dotnet /root/.dotnet -COPY .cargo /root/.cargo COPY go /root/go ADD cmake-3.21.5-linux-x86_64.tar.gz /usr/local/ RUN echo " export RUSTUP_DIST_SERVER=\"https://rsproxy.cn\" " >> /root/.bashrc RUN echo " export RUSTUP_UPDATE_ROOT=\"https://rsproxy.cn/rustup\" " >> /root/.bashrc RUN curl https://sh.rustup.rs -o /tmp/rustup-init.sh RUN sh /tmp/rustup-init.sh -y +COPY .cargo/config /root/.cargo/config ENV PATH /usr/local/go/bin:/usr/local/node-v12.20.0-linux-x64/bin:/usr/local/apache-maven-3.8.4/bin:/usr/local/jdk1.8.0_144/bin:/usr/local/cmake-3.21.5-linux-x86_64/bin:/root/.cargo/bin:$PATH ENV JAVA_HOME /usr/local/jdk1.8.0_144 RUN go env -w GOPROXY=https://goproxy.cn @@ -39,10 +43,8 @@ RUN R CMD javareconf JAVA_HOME=${JAVA_HOME} JAVA=${JAVA_HOME}/bin/java JAVAC=${J RUN echo "install.packages(\"RJDBC\", repos=\"http://cran.us.r-project.org\")"|R --no-save COPY .gitconfig /root/.gitconfig RUN mkdir -p /run/sshd -COPY id_rsa.pub /root/.ssh/id_rsa.pub -COPY id_rsa.pub /root/.ssh/authorized_keys +COPY id_ecdsa.pub /root/.ssh/id_ecdsa.pub +COPY id_ecdsa.pub /root/.ssh/authorized_keys RUN pip3 uninstall -y taostest COPY repository/TDinternal /home/TDinternal -COPY repository/taos-connector-python /home/taos-connector-python -RUN sh -c "cd /home/taos-connector-python; pip3 install ." COPY setup.sh /home/setup.sh diff --git a/tests/parallel_test/container_build.sh b/tests/parallel_test/container_build.sh index 26cabad107..95f7693742 100755 --- a/tests/parallel_test/container_build.sh +++ b/tests/parallel_test/container_build.sh @@ -95,11 +95,13 @@ if [[ -d ${WORKDIR}/debugSan ]] ;then rm -rf ${WORKDIR}/debugSan fi -if [ "$(uname -m)" = "aarch64" ] ;then - CMAKE_BUILD_TYPE="-DCMAKE_BUILD_TYPE=Debug" -else - CMAKE_BUILD_TYPE="-DCMAKE_BUILD_TYPE=Release" -fi +CMAKE_BUILD_TYPE="-DCMAKE_BUILD_TYPE=Release" + +# if [ "$(uname -m)" = "aarch64" ] ;then +# CMAKE_BUILD_TYPE="-DCMAKE_BUILD_TYPE=Debug" +# else +# CMAKE_BUILD_TYPE="-DCMAKE_BUILD_TYPE=Release" +# fi mv ${REP_REAL_PATH}/debug ${WORKDIR}/debugNoSan date From 75543d952c861469019d63bb5b893778c750c794 Mon Sep 17 00:00:00 2001 From: chenhaoran Date: Tue, 13 Aug 2024 09:18:29 +0800 Subject: [PATCH 007/181] test:set debug in address sanitizer model --- tests/parallel_test/container_build.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/parallel_test/container_build.sh b/tests/parallel_test/container_build.sh index 95f7693742..30690fb335 100755 --- a/tests/parallel_test/container_build.sh +++ b/tests/parallel_test/container_build.sh @@ -83,7 +83,7 @@ docker run \ -v ${REP_REAL_PATH}/community/contrib/xml2/:${REP_DIR}/community/contrib/xml2 \ -v ${REP_REAL_PATH}/community/contrib/zlib/:${REP_DIR}/community/contrib/zlib \ -v ${REP_REAL_PATH}/community/contrib/zstd/:${REP_DIR}/community/contrib/zstd \ - --rm --ulimit core=-1 taos_test:v1.0 sh -c "pip uninstall taospy -y;pip3 install taospy==2.7.2;cd $REP_DIR;rm -rf debug;mkdir -p debug;cd debug;cmake .. -DBUILD_HTTP=false -DBUILD_TOOLS=true -DBUILD_TEST=true -DWEBSOCKET=true -DBUILD_TAOSX=false -DJEMALLOC_ENABLED=0 -DCMAKE_EXPORT_COMPILE_COMMANDS=1 ;make -j 10|| exit 1" + --rm --ulimit core=-1 taos_test:v1.0 sh -c "pip uninstall taospy -y;pip3 install taospy==2.7.2;cd $REP_DIR;rm -rf debug;mkdir -p debug;cd debug;cmake .. -DBUILD_HTTP=false -DBUILD_TOOLS=true -DBUILD_TEST=true -DWEBSOCKET=true -DBUILD_TAOSX=false -DCMAKE_BUILD_TYPE=Release -DJEMALLOC_ENABLED=0 -DCMAKE_EXPORT_COMPILE_COMMANDS=1 ;make -j 10|| exit 1" # -v ${REP_REAL_PATH}/community/contrib/jemalloc/:${REP_DIR}/community/contrib/jemalloc \ if [[ -d ${WORKDIR}/debugNoSan ]] ;then @@ -95,7 +95,7 @@ if [[ -d ${WORKDIR}/debugSan ]] ;then rm -rf ${WORKDIR}/debugSan fi -CMAKE_BUILD_TYPE="-DCMAKE_BUILD_TYPE=Release" +CMAKE_BUILD_TYPE="-DCMAKE_BUILD_TYPE=Debug" # if [ "$(uname -m)" = "aarch64" ] ;then # CMAKE_BUILD_TYPE="-DCMAKE_BUILD_TYPE=Debug" From 23b54e1df48a8586aa5238a95494083ed3ad7ee3 Mon Sep 17 00:00:00 2001 From: menshibin Date: Tue, 13 Aug 2024 10:39:43 +0800 Subject: [PATCH 008/181] modify alert sql --- docs/examples/python/tmq_native.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/examples/python/tmq_native.py b/docs/examples/python/tmq_native.py index 64e447384c..a913d945c9 100644 --- a/docs/examples/python/tmq_native.py +++ b/docs/examples/python/tmq_native.py @@ -163,7 +163,7 @@ if __name__ == "__main__": seek_offset(consumer) commit_offset(consumer) consumer.unsubscribe() - print("Consumer unsubscribed successfully."); + print("Consumer unsubscribed successfully.") except Exception as err: print(f"Failed to stmt consumer. ErrMessage:{err}") finally: From 933f34bc38aaf2e97cb275f29a2813fa529e8810 Mon Sep 17 00:00:00 2001 From: chenhaoran Date: Tue, 13 Aug 2024 15:11:46 +0800 Subject: [PATCH 009/181] test:commit cunit_test of taos-tools in unit-test --- tests/unit-test/test.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/unit-test/test.sh b/tests/unit-test/test.sh index 71f5189551..21461bc6a5 100755 --- a/tests/unit-test/test.sh +++ b/tests/unit-test/test.sh @@ -40,7 +40,7 @@ pgrep taosd || taosd >> /dev/null 2>&1 & sleep 10 -ctest -j8 +ctest -E "cunit_test" -j8 ret=$? exit $ret From 7bf320607630e65cbd1fd7b9668f45576d4de70d Mon Sep 17 00:00:00 2001 From: dmchen Date: Tue, 13 Aug 2024 08:29:09 +0000 Subject: [PATCH 010/181] fix/TS-5262-conflict-return-error --- source/dnode/mnode/impl/src/mndSync.c | 9 ++++++--- source/dnode/mnode/impl/src/mndTrans.c | 5 ++++- 2 files changed, 10 insertions(+), 4 deletions(-) diff --git a/source/dnode/mnode/impl/src/mndSync.c b/source/dnode/mnode/impl/src/mndSync.c index 89f3c6e253..282ae677fe 100644 --- a/source/dnode/mnode/impl/src/mndSync.c +++ b/source/dnode/mnode/impl/src/mndSync.c @@ -183,7 +183,7 @@ int32_t mndProcessWriteMsg(SMnode *pMnode, SRpcMsg *pMsg, SFsmCbMeta *pMeta) { code = mndTransValidate(pMnode, pRaw); if (code != 0) { mError("trans:%d, failed to validate requested trans since %s", transId, terrstr()); - code = 0; + // code = 0; pMeta->code = code; goto _OUT; } @@ -191,7 +191,7 @@ int32_t mndProcessWriteMsg(SMnode *pMnode, SRpcMsg *pMsg, SFsmCbMeta *pMeta) { code = sdbWriteWithoutFree(pMnode->pSdb, pRaw); if (code != 0) { mError("trans:%d, failed to write to sdb since %s", transId, terrstr()); - code = 0; + // code = 0; pMeta->code = code; goto _OUT; } @@ -206,7 +206,10 @@ int32_t mndProcessWriteMsg(SMnode *pMnode, SRpcMsg *pMsg, SFsmCbMeta *pMeta) { if (pTrans->stage == TRN_STAGE_PREPARE) { bool continueExec = mndTransPerformPrepareStage(pMnode, pTrans, false); - if (!continueExec) goto _OUT; + if (!continueExec) { + if (terrno != 0) code = terrno; + goto _OUT; + } } mndTransRefresh(pMnode, pTrans); diff --git a/source/dnode/mnode/impl/src/mndTrans.c b/source/dnode/mnode/impl/src/mndTrans.c index 7f8d63c8e0..ab236007a1 100644 --- a/source/dnode/mnode/impl/src/mndTrans.c +++ b/source/dnode/mnode/impl/src/mndTrans.c @@ -1569,6 +1569,7 @@ static int32_t mndTransExecuteUndoActionsSerial(SMnode *pMnode, STrans *pTrans, bool mndTransPerformPrepareStage(SMnode *pMnode, STrans *pTrans, bool topHalf) { bool continueExec = true; int32_t code = 0; + terrno = 0; int32_t numOfActions = taosArrayGetSize(pTrans->prepareActions); if (numOfActions == 0) goto _OVER; @@ -1579,7 +1580,9 @@ bool mndTransPerformPrepareStage(SMnode *pMnode, STrans *pTrans, bool topHalf) { STransAction *pAction = taosArrayGet(pTrans->prepareActions, action); code = mndTransExecSingleAction(pMnode, pTrans, pAction, topHalf); if (code != 0) { - mError("trans:%d, failed to execute prepare action:%d, numOfActions:%d", pTrans->id, action, numOfActions); + terrno = code; + mError("trans:%d, failed to execute prepare action:%d, numOfActions:%d, since %s", pTrans->id, action, + numOfActions, tstrerror(code)); return false; } } From 8358b9736b96e4824ce5b3e736d2d9e6966cc376 Mon Sep 17 00:00:00 2001 From: chenhaoran Date: Tue, 13 Aug 2024 16:52:41 +0800 Subject: [PATCH 011/181] test:add release building tests and modify debug in unit-test --- tests/parallel_test/container_build.sh | 45 ++++++++++++++++++++++++-- 1 file changed, 43 insertions(+), 2 deletions(-) diff --git a/tests/parallel_test/container_build.sh b/tests/parallel_test/container_build.sh index 30690fb335..d99fbafcbd 100755 --- a/tests/parallel_test/container_build.sh +++ b/tests/parallel_test/container_build.sh @@ -83,7 +83,7 @@ docker run \ -v ${REP_REAL_PATH}/community/contrib/xml2/:${REP_DIR}/community/contrib/xml2 \ -v ${REP_REAL_PATH}/community/contrib/zlib/:${REP_DIR}/community/contrib/zlib \ -v ${REP_REAL_PATH}/community/contrib/zstd/:${REP_DIR}/community/contrib/zstd \ - --rm --ulimit core=-1 taos_test:v1.0 sh -c "pip uninstall taospy -y;pip3 install taospy==2.7.2;cd $REP_DIR;rm -rf debug;mkdir -p debug;cd debug;cmake .. -DBUILD_HTTP=false -DBUILD_TOOLS=true -DBUILD_TEST=true -DWEBSOCKET=true -DBUILD_TAOSX=false -DCMAKE_BUILD_TYPE=Release -DJEMALLOC_ENABLED=0 -DCMAKE_EXPORT_COMPILE_COMMANDS=1 ;make -j 10|| exit 1" + --rm --ulimit core=-1 taos_test:v1.0 sh -c "pip uninstall taospy -y;pip3 install taospy==2.7.2;cd $REP_DIR;rm -rf debug;mkdir -p debug;cd debug;cmake .. -DBUILD_HTTP=false -DBUILD_TOOLS=true -DBUILD_TEST=true -DWEBSOCKET=true -DBUILD_TAOSX=false -DJEMALLOC_ENABLED=0 -DCMAKE_EXPORT_COMPILE_COMMANDS=1 ;make -j|| exit 1" # -v ${REP_REAL_PATH}/community/contrib/jemalloc/:${REP_DIR}/community/contrib/jemalloc \ if [[ -d ${WORKDIR}/debugNoSan ]] ;then @@ -94,6 +94,10 @@ if [[ -d ${WORKDIR}/debugSan ]] ;then echo "delete ${WORKDIR}/debugSan" rm -rf ${WORKDIR}/debugSan fi +if [[ -d ${WORKDIR}/debugRelease ]] ;then + echo "delete ${WORKDIR}/debugRelease" + rm -rf ${WORKDIR}/debugRelease +fi CMAKE_BUILD_TYPE="-DCMAKE_BUILD_TYPE=Debug" @@ -135,10 +139,47 @@ docker run \ -v ${REP_REAL_PATH}/community/contrib/xml2/:${REP_DIR}/community/contrib/xml2 \ -v ${REP_REAL_PATH}/community/contrib/zlib/:${REP_DIR}/community/contrib/zlib \ -v ${REP_REAL_PATH}/community/contrib/zstd/:${REP_DIR}/community/contrib/zstd \ - --rm --ulimit core=-1 taos_test:v1.0 sh -c "pip uninstall taospy -y;pip3 install taospy==2.7.2;cd $REP_DIR;rm -rf debug;mkdir -p debug;cd debug;cmake .. -DBUILD_HTTP=false -DBUILD_TOOLS=true -DBUILD_TEST=false -DWEBSOCKET=true -DBUILD_SANITIZER=1 -DTOOLS_SANITIZE=true $CMAKE_BUILD_TYPE -DTOOLS_BUILD_TYPE=Debug -DBUILD_TAOSX=false -DJEMALLOC_ENABLED=0;make -j 10|| exit 1 " + --rm --ulimit core=-1 taos_test:v1.0 sh -c "pip uninstall taospy -y;pip3 install taospy==2.7.2;cd $REP_DIR;rm -rf debug;mkdir -p debug;cd debug;cmake .. -DBUILD_HTTP=false -DBUILD_TOOLS=true -DBUILD_TEST=false -DWEBSOCKET=true -DBUILD_SANITIZER=1 -DTOOLS_SANITIZE=true $CMAKE_BUILD_TYPE -DTOOLS_BUILD_TYPE=Debug -DBUILD_TAOSX=false -DJEMALLOC_ENABLED=0;make -j|| exit 1 " mv ${REP_REAL_PATH}/debug ${WORKDIR}/debugSan +date +docker run \ + -v $REP_MOUNT_PARAM \ + -v /root/.cargo/registry:/root/.cargo/registry \ + -v /root/.cargo/git:/root/.cargo/git \ + -v /root/go/pkg/mod:/root/go/pkg/mod \ + -v /root/.cache/go-build:/root/.cache/go-build \ + -v /root/.cos-local.1:/root/.cos-local.2 \ + -v ${REP_REAL_PATH}/enterprise/contrib/grant-lib:${REP_DIR}/enterprise/contrib/grant-lib \ + -v ${REP_REAL_PATH}/community/tools/taosadapter:${REP_DIR}/community/tools/taosadapter \ + -v ${REP_REAL_PATH}/community/tools/taos-tools:${REP_DIR}/community/tools/taos-tools \ + -v ${REP_REAL_PATH}/community/tools/taosws-rs:${REP_DIR}/community/tools/taosws-rs \ + -v ${REP_REAL_PATH}/community/tools/taosws-rs/target:${REP_DIR}/community/tools/taosws-rs/target \ + -v ${REP_REAL_PATH}/community/contrib/apr/:${REP_DIR}/community/contrib/apr \ + -v ${REP_REAL_PATH}/community/contrib/apr-util/:${REP_DIR}/community/contrib/apr-util \ + -v ${REP_REAL_PATH}/community/contrib/cJson/:${REP_DIR}/community/contrib/cJson \ + -v ${REP_REAL_PATH}/community/contrib/cpp-stub/:${REP_DIR}/community/contrib/cpp-stub \ + -v ${REP_REAL_PATH}/community/contrib/curl/:${REP_DIR}/community/contrib/curl \ + -v ${REP_REAL_PATH}/community/contrib/curl2/:${REP_DIR}/community/contrib/curl2 \ + -v ${REP_REAL_PATH}/community/contrib/geos/:${REP_DIR}/community/contrib/geos \ + -v ${REP_REAL_PATH}/community/contrib/googletest/:${REP_DIR}/community/contrib/googletest \ + -v ${REP_REAL_PATH}/community/contrib/libs3/:${REP_DIR}/community/contrib/libs3 \ + -v ${REP_REAL_PATH}/community/contrib/libuv/:${REP_DIR}/community/contrib/libuv \ + -v ${REP_REAL_PATH}/community/contrib/lz4/:${REP_DIR}/community/contrib/lz4 \ + -v ${REP_REAL_PATH}/community/contrib/lzma2/:${REP_DIR}/community/contrib/lzma2 \ + -v ${REP_REAL_PATH}/community/contrib/mxml/:${REP_DIR}/community/contrib/mxml \ + -v ${REP_REAL_PATH}/community/contrib/openssl/:${REP_DIR}/community/contrib/openssl \ + -v ${REP_REAL_PATH}/community/contrib/pcre2/:${REP_DIR}/community/contrib/pcre2 \ + -v ${REP_REAL_PATH}/community/contrib/xml2/:${REP_DIR}/community/contrib/xml2 \ + -v ${REP_REAL_PATH}/community/contrib/zlib/:${REP_DIR}/community/contrib/zlib \ + -v ${REP_REAL_PATH}/community/contrib/zstd/:${REP_DIR}/community/contrib/zstd \ + --rm --ulimit core=-1 taos_test:v1.0 sh -c "pip uninstall taospy -y;pip3 install taospy==2.7.2;cd $REP_DIR;rm -rf debug;mkdir -p debug;cd debug;cmake .. -DBUILD_HTTP=false -DBUILD_TOOLS=true -DBUILD_TEST=false -DWEBSOCKET=true -DCMAKE_BUILD_TYPE=Release -DBUILD_TAOSX=false -DJEMALLOC_ENABLED=0;make -j || exit 1 " + +mv ${REP_REAL_PATH}/debug ${WORKDIR}/debugRelease + + + ret=$? exit $ret From 413a57f4cbf1863fdd0fe5d651624cf80e7a3c47 Mon Sep 17 00:00:00 2001 From: kailixu Date: Tue, 13 Aug 2024 18:55:48 +0800 Subject: [PATCH 012/181] fix(query): retry when column dropped in latest schema --- source/dnode/vnode/src/tsdb/tsdbDataFileRW.c | 4 +++- source/dnode/vnode/src/tsdb/tsdbSttFileRW.c | 4 +++- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/source/dnode/vnode/src/tsdb/tsdbDataFileRW.c b/source/dnode/vnode/src/tsdb/tsdbDataFileRW.c index 2665cc1aaf..e6720466ce 100644 --- a/source/dnode/vnode/src/tsdb/tsdbDataFileRW.c +++ b/source/dnode/vnode/src/tsdb/tsdbDataFileRW.c @@ -463,7 +463,9 @@ int32_t tsdbDataFileReadBlockDataByColumn(SDataFileReader *reader, const SBrinRe if (cid < blockCol.cid) { const STColumn *tcol = tTSchemaSearchColumn(pTSchema, cid); - ASSERT(tcol); + if (tcol == NULL) { + TAOS_CHECK_GOTO(TSDB_CODE_TDB_INVALID_TABLE_SCHEMA_VER, &lino, _exit); + } SBlockCol none = { .cid = cid, .type = tcol->type, diff --git a/source/dnode/vnode/src/tsdb/tsdbSttFileRW.c b/source/dnode/vnode/src/tsdb/tsdbSttFileRW.c index 6e24311017..3d30ea0d93 100644 --- a/source/dnode/vnode/src/tsdb/tsdbSttFileRW.c +++ b/source/dnode/vnode/src/tsdb/tsdbSttFileRW.c @@ -309,7 +309,9 @@ int32_t tsdbSttFileReadBlockDataByColumn(SSttFileReader *reader, const SSttBlk * if (cid < blockCol.cid) { const STColumn *tcol = tTSchemaSearchColumn(pTSchema, cid); - ASSERT(tcol); + if (tcol == NULL) { + TAOS_CHECK_GOTO(TSDB_CODE_TDB_INVALID_TABLE_SCHEMA_VER, &lino, _exit); + } SBlockCol none = { .cid = cid, .type = tcol->type, From 61f0aec698e5b0f62a6f573a69ea209a944abf1a Mon Sep 17 00:00:00 2001 From: dapan1121 Date: Wed, 14 Aug 2024 10:05:03 +0800 Subject: [PATCH 013/181] fix: response message memory leak --- source/client/src/clientMsgHandler.c | 11 ++++++++++- source/client/src/clientTmq.c | 28 ++++++++++++++++++++++------ 2 files changed, 32 insertions(+), 7 deletions(-) diff --git a/source/client/src/clientMsgHandler.c b/source/client/src/clientMsgHandler.c index cc1ed7f3fa..4dea9c17b0 100644 --- a/source/client/src/clientMsgHandler.c +++ b/source/client/src/clientMsgHandler.c @@ -297,6 +297,9 @@ int32_t processUseDbRsp(void* param, SDataBuf* pMsg, int32_t code) { } if (strlen(usedbRsp.db) == 0) { + taosMemoryFree(pMsg->pData); + taosMemoryFree(pMsg->pEpSet); + if (usedbRsp.errCode != 0) { return usedbRsp.errCode; } else { @@ -366,9 +369,15 @@ int32_t processUseDbRsp(void* param, SDataBuf* pMsg, int32_t code) { } int32_t processCreateSTableRsp(void* param, SDataBuf* pMsg, int32_t code) { - if (pMsg == NULL || param == NULL) { + if (pMsg == NULL) { return TSDB_CODE_TSC_INVALID_INPUT; } + if (param == NULL) { + taosMemoryFree(pMsg->pEpSet); + taosMemoryFree(pMsg->pData); + return TSDB_CODE_TSC_INVALID_INPUT; + } + SRequestObj* pRequest = param; if (code != TSDB_CODE_SUCCESS) { diff --git a/source/client/src/clientTmq.c b/source/client/src/clientTmq.c index 8f35a2fad1..73d21c0e22 100644 --- a/source/client/src/clientTmq.c +++ b/source/client/src/clientTmq.c @@ -823,15 +823,17 @@ void tmqAssignDelayedCommitTask(void* param, void* tmrId) { int32_t tmqHbCb(void* param, SDataBuf* pMsg, int32_t code) { if (code != 0){ - return code; + goto _return; } if (pMsg == NULL || param == NULL) { - return TSDB_CODE_INVALID_PARA; + code = TSDB_CODE_INVALID_PARA; + goto _return; } + SMqHbRsp rsp = {0}; code = tDeserializeSMqHbRsp(pMsg->pData, pMsg->len, &rsp); if (code != 0) { - return code; + goto _return; } int64_t refId = (int64_t)param; @@ -854,10 +856,14 @@ int32_t tmqHbCb(void* param, SDataBuf* pMsg, int32_t code) { taosWUnLockLatch(&tmq->lock); (void)taosReleaseRef(tmqMgmt.rsetId, refId); } + tDestroySMqHbRsp(&rsp); + +_return: + taosMemoryFree(pMsg->pData); taosMemoryFree(pMsg->pEpSet); - return 0; + return code; } void tmqSendHbReq(void* param, void* tmrId) { @@ -1504,7 +1510,12 @@ static void setVgIdle(tmq_t* tmq, char* topicName, int32_t vgId) { int32_t tmqPollCb(void* param, SDataBuf* pMsg, int32_t code) { tmq_t* tmq = NULL; SMqPollCbParam* pParam = (SMqPollCbParam*)param; - if (pParam == NULL || pMsg == NULL) { + if (pMsg == NULL) { + return TSDB_CODE_TSC_INTERNAL_ERROR; + } + if (pParam == NULL) { + taosMemoryFreeClear(pMsg->pData); + taosMemoryFreeClear(pMsg->pEpSet); return TSDB_CODE_TSC_INTERNAL_ERROR; } int64_t refId = pParam->refId; @@ -1512,6 +1523,8 @@ int32_t tmqPollCb(void* param, SDataBuf* pMsg, int32_t code) { uint64_t requestId = pParam->requestId; tmq = taosAcquireRef(tmqMgmt.rsetId, refId); if (tmq == NULL) { + taosMemoryFreeClear(pMsg->pData); + taosMemoryFreeClear(pMsg->pEpSet); return TSDB_CODE_TMQ_CONSUMER_CLOSED; } @@ -2809,7 +2822,10 @@ end: } int32_t askEpCb(void* param, SDataBuf* pMsg, int32_t code) { - if (param == NULL) return code; + if (param == NULL) { + goto FAIL; + } + SMqAskEpCbParam* pParam = (SMqAskEpCbParam*)param; tmq_t* tmq = taosAcquireRef(tmqMgmt.rsetId, pParam->refId); if (tmq == NULL) { From ae6b08d13731db48b82e251ad1115ab4e649c4f3 Mon Sep 17 00:00:00 2001 From: chenhaoran Date: Wed, 14 Aug 2024 10:32:49 +0800 Subject: [PATCH 014/181] test:add release building tests and modify debug in unit-test --- tests/parallel_test/container_build.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/parallel_test/container_build.sh b/tests/parallel_test/container_build.sh index d99fbafcbd..862aef65f4 100755 --- a/tests/parallel_test/container_build.sh +++ b/tests/parallel_test/container_build.sh @@ -139,7 +139,7 @@ docker run \ -v ${REP_REAL_PATH}/community/contrib/xml2/:${REP_DIR}/community/contrib/xml2 \ -v ${REP_REAL_PATH}/community/contrib/zlib/:${REP_DIR}/community/contrib/zlib \ -v ${REP_REAL_PATH}/community/contrib/zstd/:${REP_DIR}/community/contrib/zstd \ - --rm --ulimit core=-1 taos_test:v1.0 sh -c "pip uninstall taospy -y;pip3 install taospy==2.7.2;cd $REP_DIR;rm -rf debug;mkdir -p debug;cd debug;cmake .. -DBUILD_HTTP=false -DBUILD_TOOLS=true -DBUILD_TEST=false -DWEBSOCKET=true -DBUILD_SANITIZER=1 -DTOOLS_SANITIZE=true $CMAKE_BUILD_TYPE -DTOOLS_BUILD_TYPE=Debug -DBUILD_TAOSX=false -DJEMALLOC_ENABLED=0;make -j|| exit 1 " + --rm --ulimit core=-1 taos_test:v1.0 sh -c "pip uninstall taospy -y;pip3 install taospy==2.7.2;cd $REP_DIR;rm -rf debug;mkdir -p debug;cd debug;cmake .. -DBUILD_HTTP=false -DBUILD_TOOLS=true -DBUILD_TEST=false -DWEBSOCKET=true -DBUILD_SANITIZER=1 -DTOOLS_SANITIZE=true -DCMAKE_BUILD_TYPE=RelWithDebInfo -DTOOLS_BUILD_TYPE=Debug -DBUILD_TAOSX=false -DJEMALLOC_ENABLED=0;make -j|| exit 1 " mv ${REP_REAL_PATH}/debug ${WORKDIR}/debugSan From fe07324d72bf289d27764e9d664e988817f9f2bc Mon Sep 17 00:00:00 2001 From: kailixu Date: Wed, 14 Aug 2024 12:42:46 +0800 Subject: [PATCH 015/181] fix(tsdb): return error code when loading data --- source/dnode/vnode/src/tsdb/tsdbMergeTree.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/source/dnode/vnode/src/tsdb/tsdbMergeTree.c b/source/dnode/vnode/src/tsdb/tsdbMergeTree.c index e55ede560e..e9761d8c87 100644 --- a/source/dnode/vnode/src/tsdb/tsdbMergeTree.c +++ b/source/dnode/vnode/src/tsdb/tsdbMergeTree.c @@ -1004,6 +1004,10 @@ int32_t tMergeTreeOpen2(SMergeTree *pMTree, SMergeTreeConf *pConf, SSttDataInfoF pSttDataInfo->numOfRows += numOfRows; } } else { + if(terrno != TSDB_CODE_SUCCESS) { + code = terrno; + goto _end; + } if (!pMTree->ignoreEarlierTs) { pMTree->ignoreEarlierTs = pIter->ignoreEarlierTs; } From cec7643ea825920081f491e66be52f4ae3145e5d Mon Sep 17 00:00:00 2001 From: kailixu Date: Wed, 14 Aug 2024 13:57:33 +0800 Subject: [PATCH 016/181] enh: code optimization --- source/dnode/vnode/src/tsdb/tsdbDataFileRW.c | 4 +--- source/dnode/vnode/src/tsdb/tsdbMergeTree.c | 5 +---- source/dnode/vnode/src/tsdb/tsdbSttFileRW.c | 4 +--- 3 files changed, 3 insertions(+), 10 deletions(-) diff --git a/source/dnode/vnode/src/tsdb/tsdbDataFileRW.c b/source/dnode/vnode/src/tsdb/tsdbDataFileRW.c index e6720466ce..a9c7ea3961 100644 --- a/source/dnode/vnode/src/tsdb/tsdbDataFileRW.c +++ b/source/dnode/vnode/src/tsdb/tsdbDataFileRW.c @@ -463,9 +463,7 @@ int32_t tsdbDataFileReadBlockDataByColumn(SDataFileReader *reader, const SBrinRe if (cid < blockCol.cid) { const STColumn *tcol = tTSchemaSearchColumn(pTSchema, cid); - if (tcol == NULL) { - TAOS_CHECK_GOTO(TSDB_CODE_TDB_INVALID_TABLE_SCHEMA_VER, &lino, _exit); - } + TSDB_CHECK_NULL(tcol, code,lino,_exit,TSDB_CODE_TDB_INVALID_TABLE_SCHEMA_VER); SBlockCol none = { .cid = cid, .type = tcol->type, diff --git a/source/dnode/vnode/src/tsdb/tsdbMergeTree.c b/source/dnode/vnode/src/tsdb/tsdbMergeTree.c index e9761d8c87..9d99c51587 100644 --- a/source/dnode/vnode/src/tsdb/tsdbMergeTree.c +++ b/source/dnode/vnode/src/tsdb/tsdbMergeTree.c @@ -1004,10 +1004,7 @@ int32_t tMergeTreeOpen2(SMergeTree *pMTree, SMergeTreeConf *pConf, SSttDataInfoF pSttDataInfo->numOfRows += numOfRows; } } else { - if(terrno != TSDB_CODE_SUCCESS) { - code = terrno; - goto _end; - } + TAOS_CHECK_GOTO(terrno, NULL, _end); if (!pMTree->ignoreEarlierTs) { pMTree->ignoreEarlierTs = pIter->ignoreEarlierTs; } diff --git a/source/dnode/vnode/src/tsdb/tsdbSttFileRW.c b/source/dnode/vnode/src/tsdb/tsdbSttFileRW.c index 3d30ea0d93..d3990c645c 100644 --- a/source/dnode/vnode/src/tsdb/tsdbSttFileRW.c +++ b/source/dnode/vnode/src/tsdb/tsdbSttFileRW.c @@ -309,9 +309,7 @@ int32_t tsdbSttFileReadBlockDataByColumn(SSttFileReader *reader, const SSttBlk * if (cid < blockCol.cid) { const STColumn *tcol = tTSchemaSearchColumn(pTSchema, cid); - if (tcol == NULL) { - TAOS_CHECK_GOTO(TSDB_CODE_TDB_INVALID_TABLE_SCHEMA_VER, &lino, _exit); - } + TSDB_CHECK_NULL(tcol, code,lino,_exit,TSDB_CODE_TDB_INVALID_TABLE_SCHEMA_VER); SBlockCol none = { .cid = cid, .type = tcol->type, From f6ad63ac14516b7a904f9746744bc8980457f830 Mon Sep 17 00:00:00 2001 From: kailixu Date: Wed, 14 Aug 2024 13:59:33 +0800 Subject: [PATCH 017/181] enh: code optimization --- source/dnode/vnode/src/tsdb/tsdbDataFileRW.c | 2 +- source/dnode/vnode/src/tsdb/tsdbSttFileRW.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/source/dnode/vnode/src/tsdb/tsdbDataFileRW.c b/source/dnode/vnode/src/tsdb/tsdbDataFileRW.c index a9c7ea3961..7e7ea59a5b 100644 --- a/source/dnode/vnode/src/tsdb/tsdbDataFileRW.c +++ b/source/dnode/vnode/src/tsdb/tsdbDataFileRW.c @@ -463,7 +463,7 @@ int32_t tsdbDataFileReadBlockDataByColumn(SDataFileReader *reader, const SBrinRe if (cid < blockCol.cid) { const STColumn *tcol = tTSchemaSearchColumn(pTSchema, cid); - TSDB_CHECK_NULL(tcol, code,lino,_exit,TSDB_CODE_TDB_INVALID_TABLE_SCHEMA_VER); + TSDB_CHECK_NULL(tcol, code, lino, _exit, TSDB_CODE_TDB_INVALID_TABLE_SCHEMA_VER); SBlockCol none = { .cid = cid, .type = tcol->type, diff --git a/source/dnode/vnode/src/tsdb/tsdbSttFileRW.c b/source/dnode/vnode/src/tsdb/tsdbSttFileRW.c index d3990c645c..e3d7f9d45f 100644 --- a/source/dnode/vnode/src/tsdb/tsdbSttFileRW.c +++ b/source/dnode/vnode/src/tsdb/tsdbSttFileRW.c @@ -309,7 +309,7 @@ int32_t tsdbSttFileReadBlockDataByColumn(SSttFileReader *reader, const SSttBlk * if (cid < blockCol.cid) { const STColumn *tcol = tTSchemaSearchColumn(pTSchema, cid); - TSDB_CHECK_NULL(tcol, code,lino,_exit,TSDB_CODE_TDB_INVALID_TABLE_SCHEMA_VER); + TSDB_CHECK_NULL(tcol, code, lino, _exit, TSDB_CODE_TDB_INVALID_TABLE_SCHEMA_VER); SBlockCol none = { .cid = cid, .type = tcol->type, From 73265490f0c194023b950714789180e99017a753 Mon Sep 17 00:00:00 2001 From: chenhaoran Date: Wed, 14 Aug 2024 14:11:21 +0800 Subject: [PATCH 018/181] tetst: test for checking return value --- source/common/src/cos.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/source/common/src/cos.c b/source/common/src/cos.c index 335c654acd..bc92527108 100644 --- a/source/common/src/cos.c +++ b/source/common/src/cos.c @@ -54,6 +54,8 @@ int32_t s3Begin() { void s3End() { S3_deinitialize(); } + + int32_t s3Init() { TAOS_RETURN(TSDB_CODE_SUCCESS); /*s3Begin();*/ } void s3CleanUp() { /*s3End();*/ From e08d7240efe4986bb4ba8895dc3adf83aef07029 Mon Sep 17 00:00:00 2001 From: t_max <1172915550@qq.com> Date: Wed, 14 Aug 2024 14:14:17 +0800 Subject: [PATCH 019/181] fix: check transactions and reset query cache before running the example --- tests/docs-examples-test/csharp.sh | 65 ++++++++++++++++++++++++------ tests/docs-examples-test/go.sh | 30 ++++++++++++++ 2 files changed, 82 insertions(+), 13 deletions(-) diff --git a/tests/docs-examples-test/csharp.sh b/tests/docs-examples-test/csharp.sh index 0805b425b4..497cb074d9 100644 --- a/tests/docs-examples-test/csharp.sh +++ b/tests/docs-examples-test/csharp.sh @@ -2,6 +2,32 @@ set -e +check_transactions() { + for i in {1..30} + do + output=$(taos -s "show transactions;") + if [[ $output == *"Query OK, 0 row(s)"* ]]; then + echo "Success: No transactions are in progress." + return 0 + fi + sleep 1 + done + + echo "Error: Transactions are still in progress after 30 attempts." + return 1 +} + +reset_cache() { + response=$(curl --location -uroot:taosdata 'http://127.0.0.1:6041/rest/sql' --data 'reset query cache') + + if [[ $response == \{\"code\":0* ]]; then + echo "Success: Query cache reset successfully." + else + echo "Error: Failed to reset query cache. Response: $response" + return 1 + fi +} + pgrep taosd || taosd >> /dev/null 2>&1 & pgrep taosadapter || taosadapter >> /dev/null 2>&1 & cd ../../docs/examples/csharp @@ -10,56 +36,69 @@ dotnet run --project connect/connect.csproj dotnet run --project wsConnect/wsConnect.csproj taos -s "drop database if exists test" -sleep 1 +check_transactions || exit 1 +reset_cache || exit 1 dotnet run --project influxdbLine/influxdbline.csproj taos -s "drop database if exists test" -sleep 1 +check_transactions || exit 1 +reset_cache || exit 1 dotnet run --project optsTelnet/optstelnet.csproj taos -s "drop database if exists test" -sleep 1 +check_transactions || exit 1 +reset_cache || exit 1 dotnet run --project optsJSON/optsJSON.csproj # query taos -s "drop database if exists power" -sleep 1 +check_transactions || exit 1 +reset_cache || exit 1 dotnet run --project wsInsert/wsInsert.csproj dotnet run --project wsQuery/wsQuery.csproj taos -s "drop database if exists power" -sleep 1 +check_transactions || exit 1 +reset_cache || exit 1 dotnet run --project sqlInsert/sqlinsert.csproj dotnet run --project query/query.csproj # stmt taos -s "drop database if exists power" -sleep 1 +check_transactions || exit 1 +reset_cache || exit 1 dotnet run --project wsStmt/wsStmt.csproj taos -s "drop database if exists power" -sleep 1 +check_transactions || exit 1 +reset_cache || exit 1 dotnet run --project stmtInsert/stmtinsert.csproj # schemaless taos -s "drop database if exists power" -sleep 1 +check_transactions || exit 1 +reset_cache || exit 1 dotnet run --project wssml/wssml.csproj taos -s "drop database if exists power" -sleep 1 +check_transactions || exit 1 +reset_cache || exit 1 dotnet run --project nativesml/nativesml.csproj # subscribe taos -s "drop topic if exists topic_meters" -sleep 1 +check_transactions || exit 1 +reset_cache || exit 1 taos -s "drop database if exists power" -sleep 1 +check_transactions || exit 1 +reset_cache || exit 1 dotnet run --project wssubscribe/wssubscribe.csproj taos -s "drop topic if exists topic_meters" -sleep 1 +check_transactions || exit 1 +reset_cache || exit 1 taos -s "drop database if exists power" -sleep 1 +check_transactions || exit 1 +reset_cache || exit 1 dotnet run --project subscribe/subscribe.csproj diff --git a/tests/docs-examples-test/go.sh b/tests/docs-examples-test/go.sh index ea19d3212a..606265435d 100644 --- a/tests/docs-examples-test/go.sh +++ b/tests/docs-examples-test/go.sh @@ -17,6 +17,17 @@ check_transactions() { return 1 } +reset_cache() { + response=$(curl --location -uroot:taosdata 'http://127.0.0.1:6041/rest/sql' --data 'reset query cache') + + if [[ $response == \{\"code\":0* ]]; then + echo "Success: Query cache reset successfully." + else + echo "Error: Failed to reset query cache. Response: $response" + return 1 + fi +} + taosd >>/dev/null 2>&1 & taosadapter >>/dev/null 2>&1 & sleep 1 @@ -31,64 +42,83 @@ go run ./connect/connpool/main.go go run ./connect/wsexample/main.go taos -s "drop database if exists power" +check_transactions || exit 1 +reset_cache || exit 1 go run ./sqlquery/main.go taos -s "drop database if exists power" check_transactions || exit 1 +reset_cache || exit 1 go run ./queryreqid/main.go taos -s "drop database if exists power" check_transactions || exit 1 +reset_cache || exit 1 go run ./stmt/native/main.go taos -s "drop database if exists power" check_transactions || exit 1 +reset_cache || exit 1 go run ./stmt/ws/main.go taos -s "drop database if exists power" check_transactions || exit 1 +reset_cache || exit 1 sleep 3 go run ./schemaless/native/main.go taos -s "drop database if exists power" check_transactions || exit 1 +reset_cache || exit 1 go run ./schemaless/ws/main.go taos -s "drop topic if exists topic_meters" check_transactions || exit 1 +reset_cache || exit 1 taos -s "drop database if exists power" check_transactions || exit 1 +reset_cache || exit 1 go run ./tmq/native/main.go taos -s "drop topic if exists topic_meters" check_transactions || exit 1 +reset_cache || exit 1 taos -s "drop database if exists power" check_transactions || exit 1 +reset_cache || exit 1 go run ./tmq/ws/main.go taos -s "drop database if exists test" check_transactions || exit 1 +reset_cache || exit 1 go run ./insert/json/main.go taos -s "drop database if exists test" check_transactions || exit 1 +reset_cache || exit 1 go run ./insert/line/main.go taos -s "drop topic if exists topic_meters" check_transactions || exit 1 +reset_cache || exit 1 taos -s "drop database if exists power" check_transactions || exit 1 +reset_cache || exit 1 go run ./insert/sql/main.go taos -s "drop database if exists power" check_transactions || exit 1 +reset_cache || exit 1 go run ./insert/stmt/main.go taos -s "drop database if exists test" check_transactions || exit 1 +reset_cache || exit 1 go run ./insert/telnet/main.go go run ./query/sync/main.go taos -s "drop topic if exists example_tmq_topic" check_transactions || exit 1 +reset_cache || exit 1 taos -s "drop database if exists example_tmq" check_transactions || exit 1 +reset_cache || exit 1 go run ./sub/main.go From 9b490d7a76cabbc2745a68d68f6e7281397812ab Mon Sep 17 00:00:00 2001 From: sima Date: Wed, 14 Aug 2024 14:37:32 +0800 Subject: [PATCH 020/181] fix:[TD-31439] Reset groupNum and unitNum when malloc failed. --- source/libs/scalar/src/filter.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/source/libs/scalar/src/filter.c b/source/libs/scalar/src/filter.c index e9efbc638d..17dfd0aa75 100644 --- a/source/libs/scalar/src/filter.c +++ b/source/libs/scalar/src/filter.c @@ -2769,6 +2769,7 @@ int32_t filterConvertGroupFromArray(SFilterInfo *info, SArray *group) { if (info->groupNum > 0) { info->groups = taosMemoryCalloc(info->groupNum, sizeof(*info->groups)); if (info->groups == NULL) { + info->groupNum = 0; FLT_ERR_RET(TSDB_CODE_OUT_OF_MEMORY); } } @@ -2780,6 +2781,7 @@ int32_t filterConvertGroupFromArray(SFilterInfo *info, SArray *group) { } pg->unitFlags = taosMemoryCalloc(pg->unitNum, sizeof(*pg->unitFlags)); if (pg->unitFlags == NULL) { + pg->unitNum = 0; FLT_ERR_RET(TSDB_CODE_OUT_OF_MEMORY); } info->groups[i] = *pg; From 87f50f01e4049f10b47eba0b960f7802f18dbc9f Mon Sep 17 00:00:00 2001 From: chenhaoran Date: Wed, 14 Aug 2024 14:39:24 +0800 Subject: [PATCH 021/181] tetst: test for checking return value --- tests/ci/scan_file_path.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/ci/scan_file_path.py b/tests/ci/scan_file_path.py index aff94158b8..ab3c08cc2d 100644 --- a/tests/ci/scan_file_path.py +++ b/tests/ci/scan_file_path.py @@ -189,7 +189,7 @@ if __name__ == "__main__": try: stdout, stderr = command_executor.execute(cmd) #if "error" in stderr: - # print(stderr) + print(stderr) lines = stdout.split("\n") if lines[-2].endswith("matches.") or lines[-2].endswith("match."): match_num = int(lines[-2].split(" ")[0]) From 19dcc5bddd3c55cd2d9765dbdfeacd85c41a4ec1 Mon Sep 17 00:00:00 2001 From: dapan1121 Date: Wed, 14 Aug 2024 14:50:57 +0800 Subject: [PATCH 022/181] fix: error log issue --- source/client/src/clientTmq.c | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/source/client/src/clientTmq.c b/source/client/src/clientTmq.c index 73d21c0e22..929debf16d 100644 --- a/source/client/src/clientTmq.c +++ b/source/client/src/clientTmq.c @@ -1509,6 +1509,10 @@ static void setVgIdle(tmq_t* tmq, char* topicName, int32_t vgId) { int32_t tmqPollCb(void* param, SDataBuf* pMsg, int32_t code) { tmq_t* tmq = NULL; + SMqPollRspWrapper* pRspWrapper = NULL; + int8_t rspType = 0; + int32_t vgId = 0; + uint64_t requestId = 0; SMqPollCbParam* pParam = (SMqPollCbParam*)param; if (pMsg == NULL) { return TSDB_CODE_TSC_INTERNAL_ERROR; @@ -1519,8 +1523,8 @@ int32_t tmqPollCb(void* param, SDataBuf* pMsg, int32_t code) { return TSDB_CODE_TSC_INTERNAL_ERROR; } int64_t refId = pParam->refId; - int32_t vgId = pParam->vgId; - uint64_t requestId = pParam->requestId; + vgId = pParam->vgId; + requestId = pParam->requestId; tmq = taosAcquireRef(tmqMgmt.rsetId, refId); if (tmq == NULL) { taosMemoryFreeClear(pMsg->pData); @@ -1528,7 +1532,6 @@ int32_t tmqPollCb(void* param, SDataBuf* pMsg, int32_t code) { return TSDB_CODE_TMQ_CONSUMER_CLOSED; } - SMqPollRspWrapper* pRspWrapper = NULL; int32_t ret = taosAllocateQitem(sizeof(SMqPollRspWrapper), DEF_QITEM, 0, (void**)&pRspWrapper); if (ret) { code = ret; @@ -1559,7 +1562,7 @@ int32_t tmqPollCb(void* param, SDataBuf* pMsg, int32_t code) { ASSERT(msgEpoch == clientEpoch); // handle meta rsp - int8_t rspType = ((SMqRspHead*)pMsg->pData)->mqMsgType; + rspType = ((SMqRspHead*)pMsg->pData)->mqMsgType; pRspWrapper->tmqRspType = rspType; pRspWrapper->reqId = requestId; pRspWrapper->pEpset = pMsg->pEpSet; @@ -1627,7 +1630,7 @@ END: } int32_t total = taosQueueItemSize(tmq->mqueue); tscDebug("consumer:0x%" PRIx64 " put poll res into mqueue, type:%d, vgId:%d, total in queue:%d, reqId:0x%" PRIx64, - tmq->consumerId, rspType, vgId, total, requestId); + tmq ? tmq->consumerId : 0, rspType, vgId, total, requestId); if (tmq) (void)tsem2_post(&tmq->rspSem); if (pMsg) taosMemoryFreeClear(pMsg->pData); From 109418e57a3f299cb0646d1fe85d59ec04f8b2ce Mon Sep 17 00:00:00 2001 From: chenhaoran Date: Wed, 14 Aug 2024 15:07:39 +0800 Subject: [PATCH 023/181] tetst: test for checking return value --- tests/ci/scan_file_path.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/ci/scan_file_path.py b/tests/ci/scan_file_path.py index ab3c08cc2d..81862363a0 100644 --- a/tests/ci/scan_file_path.py +++ b/tests/ci/scan_file_path.py @@ -122,7 +122,7 @@ def scan_files_path(source_file_path): for file in files: if any(item in root for item in scan_dir_list): file_path = os.path.join(root, file) - if (file_path.endswith(".c") or file_name.endswith(".h") or file_path.endswith(".cpp")) and all(item not in file_path for item in scan_skip_file_list): + if (file_path.endswith(".c") or file_path.endswith(".h") or file_path.endswith(".cpp")) and all(item not in file_path for item in scan_skip_file_list): all_file_path.append(file_path) logger.info("Found %s files" % len(all_file_path)) @@ -184,7 +184,7 @@ if __name__ == "__main__": # os.makedirs(scan_result_path) for file in all_file_path: - cmd = f"clang-query-10 -p {compile_commands_path} {file} -f {clang_scan_rules_path}" + cmd = f"clang-query-16 -p {compile_commands_path} {file} -f {clang_scan_rules_path}" logger.debug(f"cmd:{cmd}") try: stdout, stderr = command_executor.execute(cmd) From ca7b1b21c77adca64032635750a3c9d9426e0fd2 Mon Sep 17 00:00:00 2001 From: chenhaoran Date: Wed, 14 Aug 2024 15:10:14 +0800 Subject: [PATCH 024/181] test:add release building tests and modify debug in unit-test --- tests/parallel_test/container_build.sh | 66 +++++++++++++------------- 1 file changed, 32 insertions(+), 34 deletions(-) diff --git a/tests/parallel_test/container_build.sh b/tests/parallel_test/container_build.sh index 862aef65f4..923a65ec3a 100755 --- a/tests/parallel_test/container_build.sh +++ b/tests/parallel_test/container_build.sh @@ -99,8 +99,6 @@ if [[ -d ${WORKDIR}/debugRelease ]] ;then rm -rf ${WORKDIR}/debugRelease fi -CMAKE_BUILD_TYPE="-DCMAKE_BUILD_TYPE=Debug" - # if [ "$(uname -m)" = "aarch64" ] ;then # CMAKE_BUILD_TYPE="-DCMAKE_BUILD_TYPE=Debug" # else @@ -144,39 +142,39 @@ docker run \ mv ${REP_REAL_PATH}/debug ${WORKDIR}/debugSan date -docker run \ - -v $REP_MOUNT_PARAM \ - -v /root/.cargo/registry:/root/.cargo/registry \ - -v /root/.cargo/git:/root/.cargo/git \ - -v /root/go/pkg/mod:/root/go/pkg/mod \ - -v /root/.cache/go-build:/root/.cache/go-build \ - -v /root/.cos-local.1:/root/.cos-local.2 \ - -v ${REP_REAL_PATH}/enterprise/contrib/grant-lib:${REP_DIR}/enterprise/contrib/grant-lib \ - -v ${REP_REAL_PATH}/community/tools/taosadapter:${REP_DIR}/community/tools/taosadapter \ - -v ${REP_REAL_PATH}/community/tools/taos-tools:${REP_DIR}/community/tools/taos-tools \ - -v ${REP_REAL_PATH}/community/tools/taosws-rs:${REP_DIR}/community/tools/taosws-rs \ - -v ${REP_REAL_PATH}/community/tools/taosws-rs/target:${REP_DIR}/community/tools/taosws-rs/target \ - -v ${REP_REAL_PATH}/community/contrib/apr/:${REP_DIR}/community/contrib/apr \ - -v ${REP_REAL_PATH}/community/contrib/apr-util/:${REP_DIR}/community/contrib/apr-util \ - -v ${REP_REAL_PATH}/community/contrib/cJson/:${REP_DIR}/community/contrib/cJson \ - -v ${REP_REAL_PATH}/community/contrib/cpp-stub/:${REP_DIR}/community/contrib/cpp-stub \ - -v ${REP_REAL_PATH}/community/contrib/curl/:${REP_DIR}/community/contrib/curl \ - -v ${REP_REAL_PATH}/community/contrib/curl2/:${REP_DIR}/community/contrib/curl2 \ - -v ${REP_REAL_PATH}/community/contrib/geos/:${REP_DIR}/community/contrib/geos \ - -v ${REP_REAL_PATH}/community/contrib/googletest/:${REP_DIR}/community/contrib/googletest \ - -v ${REP_REAL_PATH}/community/contrib/libs3/:${REP_DIR}/community/contrib/libs3 \ - -v ${REP_REAL_PATH}/community/contrib/libuv/:${REP_DIR}/community/contrib/libuv \ - -v ${REP_REAL_PATH}/community/contrib/lz4/:${REP_DIR}/community/contrib/lz4 \ - -v ${REP_REAL_PATH}/community/contrib/lzma2/:${REP_DIR}/community/contrib/lzma2 \ - -v ${REP_REAL_PATH}/community/contrib/mxml/:${REP_DIR}/community/contrib/mxml \ - -v ${REP_REAL_PATH}/community/contrib/openssl/:${REP_DIR}/community/contrib/openssl \ - -v ${REP_REAL_PATH}/community/contrib/pcre2/:${REP_DIR}/community/contrib/pcre2 \ - -v ${REP_REAL_PATH}/community/contrib/xml2/:${REP_DIR}/community/contrib/xml2 \ - -v ${REP_REAL_PATH}/community/contrib/zlib/:${REP_DIR}/community/contrib/zlib \ - -v ${REP_REAL_PATH}/community/contrib/zstd/:${REP_DIR}/community/contrib/zstd \ - --rm --ulimit core=-1 taos_test:v1.0 sh -c "pip uninstall taospy -y;pip3 install taospy==2.7.2;cd $REP_DIR;rm -rf debug;mkdir -p debug;cd debug;cmake .. -DBUILD_HTTP=false -DBUILD_TOOLS=true -DBUILD_TEST=false -DWEBSOCKET=true -DCMAKE_BUILD_TYPE=Release -DBUILD_TAOSX=false -DJEMALLOC_ENABLED=0;make -j || exit 1 " +# docker run \ +# -v $REP_MOUNT_PARAM \ +# -v /root/.cargo/registry:/root/.cargo/registry \ +# -v /root/.cargo/git:/root/.cargo/git \ +# -v /root/go/pkg/mod:/root/go/pkg/mod \ +# -v /root/.cache/go-build:/root/.cache/go-build \ +# -v /root/.cos-local.1:/root/.cos-local.2 \ +# -v ${REP_REAL_PATH}/enterprise/contrib/grant-lib:${REP_DIR}/enterprise/contrib/grant-lib \ +# -v ${REP_REAL_PATH}/community/tools/taosadapter:${REP_DIR}/community/tools/taosadapter \ +# -v ${REP_REAL_PATH}/community/tools/taos-tools:${REP_DIR}/community/tools/taos-tools \ +# -v ${REP_REAL_PATH}/community/tools/taosws-rs:${REP_DIR}/community/tools/taosws-rs \ +# -v ${REP_REAL_PATH}/community/tools/taosws-rs/target:${REP_DIR}/community/tools/taosws-rs/target \ +# -v ${REP_REAL_PATH}/community/contrib/apr/:${REP_DIR}/community/contrib/apr \ +# -v ${REP_REAL_PATH}/community/contrib/apr-util/:${REP_DIR}/community/contrib/apr-util \ +# -v ${REP_REAL_PATH}/community/contrib/cJson/:${REP_DIR}/community/contrib/cJson \ +# -v ${REP_REAL_PATH}/community/contrib/cpp-stub/:${REP_DIR}/community/contrib/cpp-stub \ +# -v ${REP_REAL_PATH}/community/contrib/curl/:${REP_DIR}/community/contrib/curl \ +# -v ${REP_REAL_PATH}/community/contrib/curl2/:${REP_DIR}/community/contrib/curl2 \ +# -v ${REP_REAL_PATH}/community/contrib/geos/:${REP_DIR}/community/contrib/geos \ +# -v ${REP_REAL_PATH}/community/contrib/googletest/:${REP_DIR}/community/contrib/googletest \ +# -v ${REP_REAL_PATH}/community/contrib/libs3/:${REP_DIR}/community/contrib/libs3 \ +# -v ${REP_REAL_PATH}/community/contrib/libuv/:${REP_DIR}/community/contrib/libuv \ +# -v ${REP_REAL_PATH}/community/contrib/lz4/:${REP_DIR}/community/contrib/lz4 \ +# -v ${REP_REAL_PATH}/community/contrib/lzma2/:${REP_DIR}/community/contrib/lzma2 \ +# -v ${REP_REAL_PATH}/community/contrib/mxml/:${REP_DIR}/community/contrib/mxml \ +# -v ${REP_REAL_PATH}/community/contrib/openssl/:${REP_DIR}/community/contrib/openssl \ +# -v ${REP_REAL_PATH}/community/contrib/pcre2/:${REP_DIR}/community/contrib/pcre2 \ +# -v ${REP_REAL_PATH}/community/contrib/xml2/:${REP_DIR}/community/contrib/xml2 \ +# -v ${REP_REAL_PATH}/community/contrib/zlib/:${REP_DIR}/community/contrib/zlib \ +# -v ${REP_REAL_PATH}/community/contrib/zstd/:${REP_DIR}/community/contrib/zstd \ +# --rm --ulimit core=-1 taos_test:v1.0 sh -c "pip uninstall taospy -y;pip3 install taospy==2.7.2;cd $REP_DIR;rm -rf debug;mkdir -p debug;cd debug;cmake .. -DBUILD_HTTP=false -DBUILD_TOOLS=true -DBUILD_TEST=false -DWEBSOCKET=true -DCMAKE_BUILD_TYPE=Release -DBUILD_TAOSX=false -DJEMALLOC_ENABLED=0;make -j || exit 1 " -mv ${REP_REAL_PATH}/debug ${WORKDIR}/debugRelease +# mv ${REP_REAL_PATH}/debug ${WORKDIR}/debugRelease From e405d934e88399f121a158480ab3eaf483282da9 Mon Sep 17 00:00:00 2001 From: sima Date: Wed, 14 Aug 2024 16:04:34 +0800 Subject: [PATCH 025/181] fix:[TD-31443] Reset field nums in SFilterFields when malloc failed. --- source/libs/scalar/src/filter.c | 1 + 1 file changed, 1 insertion(+) diff --git a/source/libs/scalar/src/filter.c b/source/libs/scalar/src/filter.c index 40fcdab7ad..a9c9399dcb 100644 --- a/source/libs/scalar/src/filter.c +++ b/source/libs/scalar/src/filter.c @@ -1066,6 +1066,7 @@ int32_t filterAddField(SFilterInfo *info, void *desc, void **data, int32_t type, info->fields[type].fields = taosMemoryRealloc(info->fields[type].fields, info->fields[type].size * sizeof(SFilterField)); if (info->fields[type].fields == NULL) { + *num = 0; fltError("taosMemoryRealloc failed, size:%d", (int32_t)(info->fields[type].size * sizeof(SFilterField))); FLT_ERR_RET(terrno); } From 9a7ffe38acdef732bb4a36acd012f07d97ef5930 Mon Sep 17 00:00:00 2001 From: 54liuyao <54liuyao> Date: Wed, 14 Aug 2024 16:11:48 +0800 Subject: [PATCH 026/181] fix issue --- source/libs/executor/src/scanoperator.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/source/libs/executor/src/scanoperator.c b/source/libs/executor/src/scanoperator.c index 4e25015af5..a3cbe302d3 100644 --- a/source/libs/executor/src/scanoperator.c +++ b/source/libs/executor/src/scanoperator.c @@ -536,9 +536,10 @@ static int32_t createTableCacheVal(const SMetaReader* pMetaReader, STableCachedV int32_t lino = 0; STableCachedVal* pVal = taosMemoryMalloc(sizeof(STableCachedVal)); QUERY_CHECK_NULL(pVal, code, lino, _end, terrno); + + pVal->pTags = NULL; pVal->pName = taosStrdup(pMetaReader->me.name); QUERY_CHECK_NULL(pVal->pName, code, lino, _end, terrno); - pVal->pTags = NULL; // only child table has tag value if (pMetaReader->me.type == TSDB_CHILD_TABLE) { From 6c917646b2aa30182772e0ddd6bae4376ffb7995 Mon Sep 17 00:00:00 2001 From: kailixu Date: Wed, 14 Aug 2024 16:15:37 +0800 Subject: [PATCH 027/181] fix(tsdb/cache): return error code --- source/dnode/vnode/src/tsdb/tsdbCache.c | 44 +++++++++++-------------- 1 file changed, 19 insertions(+), 25 deletions(-) diff --git a/source/dnode/vnode/src/tsdb/tsdbCache.c b/source/dnode/vnode/src/tsdb/tsdbCache.c index 1216f0da81..46a498409b 100644 --- a/source/dnode/vnode/src/tsdb/tsdbCache.c +++ b/source/dnode/vnode/src/tsdb/tsdbCache.c @@ -1368,7 +1368,7 @@ static int32_t mergeLastRowCid(tb_uid_t uid, STsdb *pTsdb, SArray **ppLastArray, static int32_t tsdbCacheLoadFromRaw(STsdb *pTsdb, tb_uid_t uid, SArray *pLastArray, SArray *remainCols, SCacheRowsReader *pr, int8_t ltype) { - int32_t code = 0; + int32_t code = 0, lino = 0; rocksdb_writebatch_t *wb = NULL; SArray *pTmpColArray = NULL; @@ -1413,9 +1413,7 @@ static int32_t tsdbCacheLoadFromRaw(STsdb *pTsdb, tb_uid_t uid, SArray *pLastArr if (NULL == lastTmpIndexArray) { lastTmpIndexArray = taosArrayInit(num_keys, sizeof(int32_t)); if (!lastTmpIndexArray) { - taosArrayDestroy(lastrowTmpIndexArray); - - TAOS_RETURN(TSDB_CODE_OUT_OF_MEMORY); + TAOS_CHECK_EXIT(TSDB_CODE_OUT_OF_MEMORY); } } (void)taosArrayPush(lastTmpIndexArray, &(i)); @@ -1426,9 +1424,7 @@ static int32_t tsdbCacheLoadFromRaw(STsdb *pTsdb, tb_uid_t uid, SArray *pLastArr if (NULL == lastrowTmpIndexArray) { lastrowTmpIndexArray = taosArrayInit(num_keys, sizeof(int32_t)); if (!lastrowTmpIndexArray) { - taosArrayDestroy(lastTmpIndexArray); - - TAOS_RETURN(TSDB_CODE_OUT_OF_MEMORY); + TAOS_CHECK_EXIT(TSDB_CODE_OUT_OF_MEMORY); } } (void)taosArrayPush(lastrowTmpIndexArray, &(i)); @@ -1440,13 +1436,11 @@ static int32_t tsdbCacheLoadFromRaw(STsdb *pTsdb, tb_uid_t uid, SArray *pLastArr pTmpColArray = taosArrayInit(lastIndex + lastrowIndex, sizeof(SLastCol)); if (!pTmpColArray) { - taosArrayDestroy(lastrowTmpIndexArray); - taosArrayDestroy(lastTmpIndexArray); - TAOS_RETURN(TSDB_CODE_OUT_OF_MEMORY); + TAOS_CHECK_EXIT(TSDB_CODE_OUT_OF_MEMORY); } if (lastTmpIndexArray != NULL) { - (void)mergeLastCid(uid, pTsdb, &lastTmpColArray, pr, lastColIds, lastIndex, lastSlotIds); + TAOS_CHECK_EXIT(mergeLastCid(uid, pTsdb, &lastTmpColArray, pr, lastColIds, lastIndex, lastSlotIds)); for (int i = 0; i < taosArrayGetSize(lastTmpColArray); i++) { (void)taosArrayInsert(pTmpColArray, *(int32_t *)taosArrayGet(lastTmpIndexArray, i), taosArrayGet(lastTmpColArray, i)); @@ -1454,7 +1448,7 @@ static int32_t tsdbCacheLoadFromRaw(STsdb *pTsdb, tb_uid_t uid, SArray *pLastArr } if (lastrowTmpIndexArray != NULL) { - (void)mergeLastRowCid(uid, pTsdb, &lastrowTmpColArray, pr, lastrowColIds, lastrowIndex, lastrowSlotIds); + TAOS_CHECK_EXIT(mergeLastRowCid(uid, pTsdb, &lastrowTmpColArray, pr, lastrowColIds, lastrowIndex, lastrowSlotIds)); for (int i = 0; i < taosArrayGetSize(lastrowTmpColArray); i++) { (void)taosArrayInsert(pTmpColArray, *(int32_t *)taosArrayGet(lastrowTmpIndexArray, i), taosArrayGet(lastrowTmpColArray, i)); @@ -1475,7 +1469,7 @@ static int32_t tsdbCacheLoadFromRaw(STsdb *pTsdb, tb_uid_t uid, SArray *pLastArr .colVal = COL_VAL_NONE(idxKey->key.cid, pr->pSchema->columns[slotIds[i]].type)}; if (!pLastCol) { pLastCol = &noneCol; - TAOS_CHECK_RETURN(reallocVarData(&pLastCol->colVal)); + TAOS_CHECK_EXIT(reallocVarData(&pLastCol->colVal)); } taosArraySet(pLastArray, idxKey->idx, pLastCol); @@ -1490,12 +1484,7 @@ static int32_t tsdbCacheLoadFromRaw(STsdb *pTsdb, tb_uid_t uid, SArray *pLastArr SLastCol *pTmpLastCol = taosMemoryCalloc(1, sizeof(SLastCol)); if (!pTmpLastCol) { - taosMemoryFree(slotIds); - taosMemoryFree(lastColIds); - taosMemoryFree(lastSlotIds); - taosMemoryFree(lastrowColIds); - taosMemoryFree(lastrowSlotIds); - TAOS_RETURN(TSDB_CODE_OUT_OF_MEMORY); + TAOS_CHECK_EXIT(TSDB_CODE_OUT_OF_MEMORY); } *pTmpLastCol = *pLastCol; pLastCol = pTmpLastCol; @@ -1504,12 +1493,12 @@ static int32_t tsdbCacheLoadFromRaw(STsdb *pTsdb, tb_uid_t uid, SArray *pLastArr for (int8_t i = 0; i < pLastCol->rowKey.numOfPKs; i++) { SValue *pValue = &pLastCol->rowKey.pks[i]; if (IS_VAR_DATA_TYPE(pValue->type)) { - TAOS_CHECK_RETURN(reallocVarDataVal(pValue)); + TAOS_CHECK_EXIT(reallocVarDataVal(pValue)); charge += pValue->nData; } } if (IS_VAR_DATA_TYPE(pLastCol->colVal.value.type)) { - TAOS_CHECK_RETURN(reallocVarData(&pLastCol->colVal)); + TAOS_CHECK_EXIT(reallocVarData(&pLastCol->colVal)); charge += pLastCol->colVal.value.nData; } @@ -1538,6 +1527,7 @@ static int32_t tsdbCacheLoadFromRaw(STsdb *pTsdb, tb_uid_t uid, SArray *pLastArr rocksMayWrite(pTsdb, false, true, false); } +_exit: taosArrayDestroy(lastrowTmpIndexArray); taosArrayDestroy(lastrowTmpColArray); taosArrayDestroy(lastTmpIndexArray); @@ -3015,11 +3005,13 @@ static int32_t mergeLastCid(tb_uid_t uid, STsdb *pTsdb, SArray **ppLastArray, SC // inverse iterator CacheNextRowIter iter = {0}; - (void)nextRowIterOpen(&iter, uid, pTsdb, pTSchema, pr->info.suid, pr->pLDataIterArray, pr->pReadSnap, pr->lastTs, pr); + code = + nextRowIterOpen(&iter, uid, pTsdb, pTSchema, pr->info.suid, pr->pLDataIterArray, pr->pReadSnap, pr->lastTs, pr); + TAOS_CHECK_GOTO(code, &lino, _err); do { TSDBROW *pRow = NULL; - (void)nextRowIterGet(&iter, &pRow, &ignoreEarlierTs, true, TARRAY_DATA(aColArray), TARRAY_SIZE(aColArray)); + code = nextRowIterGet(&iter, &pRow, &ignoreEarlierTs, true, TARRAY_DATA(aColArray), TARRAY_SIZE(aColArray)); if (!pRow) { break; @@ -3199,11 +3191,13 @@ static int32_t mergeLastRowCid(tb_uid_t uid, STsdb *pTsdb, SArray **ppLastArray, // inverse iterator CacheNextRowIter iter = {0}; - (void)nextRowIterOpen(&iter, uid, pTsdb, pTSchema, pr->info.suid, pr->pLDataIterArray, pr->pReadSnap, pr->lastTs, pr); + code = + nextRowIterOpen(&iter, uid, pTsdb, pTSchema, pr->info.suid, pr->pLDataIterArray, pr->pReadSnap, pr->lastTs, pr); + TAOS_CHECK_GOTO(code, &lino, _err); do { TSDBROW *pRow = NULL; - (void)nextRowIterGet(&iter, &pRow, &ignoreEarlierTs, false, TARRAY_DATA(aColArray), TARRAY_SIZE(aColArray)); + code = nextRowIterGet(&iter, &pRow, &ignoreEarlierTs, false, TARRAY_DATA(aColArray), TARRAY_SIZE(aColArray)); if (!pRow) { break; From daf8ed7e06011ac27d26aae70b5be3b1e9e4afa1 Mon Sep 17 00:00:00 2001 From: wangjiaming0909 <604227650@qq.com> Date: Wed, 14 Aug 2024 16:23:08 +0800 Subject: [PATCH 028/181] fix db tsma version not updated by heart beat --- include/common/tmsg.h | 2 + include/libs/catalog/catalog.h | 2 + source/client/src/clientHb.c | 2 + source/common/src/tmsg.c | 7 +- source/dnode/mnode/impl/src/mndDb.c | 2 + source/libs/catalog/inc/catalogInt.h | 4 ++ source/libs/catalog/src/catalog.c | 12 ++++ source/libs/catalog/src/ctgCache.c | 102 +++++++++++++++++++++------ 8 files changed, 110 insertions(+), 23 deletions(-) diff --git a/include/common/tmsg.h b/include/common/tmsg.h index 70cf9c8b58..1d926ba42c 100644 --- a/include/common/tmsg.h +++ b/include/common/tmsg.h @@ -1547,6 +1547,8 @@ typedef struct { SDbCfgRsp* cfgRsp; STableTSMAInfoRsp* pTsmaRsp; int32_t dbTsmaVersion; + char db[TSDB_DB_FNAME_LEN]; + int64_t dbId; } SDbHbRsp; typedef struct { diff --git a/include/libs/catalog/catalog.h b/include/libs/catalog/catalog.h index 3f1cf74cfa..11ed7c7da6 100644 --- a/include/libs/catalog/catalog.h +++ b/include/libs/catalog/catalog.h @@ -415,6 +415,8 @@ int32_t catalogGetTableTsmas(SCatalog* pCtg, SRequestConnInfo* pConn, const SNam int32_t catalogGetTsma(SCatalog* pCtg, SRequestConnInfo* pConn, const SName* pTsmaName, STableTSMAInfo** pTsma); +int32_t catalogAsyncUpdateDbTsmaVersion(SCatalog* pCtg, int32_t tsmaVersion, const char* dbFName, int64_t dbId); + /** * Destroy catalog and relase all resources */ diff --git a/source/client/src/clientHb.c b/source/client/src/clientHb.c index 415c2d6685..70a519d8ae 100644 --- a/source/client/src/clientHb.c +++ b/source/client/src/clientHb.c @@ -269,6 +269,8 @@ static int32_t hbProcessDBInfoRsp(void *value, int32_t valueLen, struct SCatalog TSC_ERR_JRET(catalogAsyncUpdateTSMA(pCatalog, &pTsma, rsp->dbTsmaVersion)); } taosArrayClear(rsp->pTsmaRsp->pTsmas); + } else { + TSC_ERR_JRET(catalogAsyncUpdateDbTsmaVersion(pCatalog, rsp->dbTsmaVersion, rsp->db, rsp->dbId)); } } } diff --git a/source/common/src/tmsg.c b/source/common/src/tmsg.c index 740e517e35..4dc59bf6fe 100644 --- a/source/common/src/tmsg.c +++ b/source/common/src/tmsg.c @@ -3824,7 +3824,8 @@ int32_t tSerializeSDbHbRspImp(SEncoder *pEncoder, const SDbHbRsp *pRsp) { if (tEncodeI8(pEncoder, 0) < 0) return -1; } if (tEncodeI32(pEncoder, pRsp->dbTsmaVersion) < 0) return -1; - + if (tEncodeCStr(pEncoder, pRsp->db) < 0) return -1; + if (tEncodeI64(pEncoder, pRsp->dbId) < 0) return -1; return 0; } @@ -3915,6 +3916,10 @@ int32_t tDeserializeSDbHbRspImp(SDecoder *decoder, SDbHbRsp *pRsp) { if (!tDecodeIsEnd(decoder)) { if (tDecodeI32(decoder, &pRsp->dbTsmaVersion) < 0) return -1; } + if (!tDecodeIsEnd(decoder)) { + if (tDecodeCStrTo(decoder, pRsp->db) < 0) return -1; + if (tDecodeI64(decoder, &pRsp->dbId) < 0) return -1; + } return 0; } diff --git a/source/dnode/mnode/impl/src/mndDb.c b/source/dnode/mnode/impl/src/mndDb.c index dd3f89c9d0..fe5c12419c 100644 --- a/source/dnode/mnode/impl/src/mndDb.c +++ b/source/dnode/mnode/impl/src/mndDb.c @@ -1843,6 +1843,8 @@ int32_t mndValidateDbInfo(SMnode *pMnode, SDbCacheInfo *pDbs, int32_t numOfDbs, pDbCacheInfo->tsmaVersion = htonl(pDbCacheInfo->tsmaVersion); SDbHbRsp rsp = {0}; + (void)memcpy(rsp.db, pDbCacheInfo->dbFName, TSDB_DB_FNAME_LEN); + rsp.dbId = pDbCacheInfo->dbId; if ((0 == strcasecmp(pDbCacheInfo->dbFName, TSDB_INFORMATION_SCHEMA_DB) || (0 == strcasecmp(pDbCacheInfo->dbFName, TSDB_PERFORMANCE_SCHEMA_DB)))) { diff --git a/source/libs/catalog/inc/catalogInt.h b/source/libs/catalog/inc/catalogInt.h index f70cfff71d..f3b1852ce1 100644 --- a/source/libs/catalog/inc/catalogInt.h +++ b/source/libs/catalog/inc/catalogInt.h @@ -108,6 +108,7 @@ enum { CTG_OP_UPDATE_TB_TSMA, CTG_OP_DROP_TB_TSMA, CTG_OP_CLEAR_CACHE, + CTG_OP_UPDATE_DB_TSMA_VERSION, CTG_OP_MAX }; @@ -603,6 +604,7 @@ typedef struct SCtgUpdateTbTSMAMsg { STableTSMAInfo* pTsma; int32_t dbTsmaVersion; uint64_t dbId; + char dbFName[TSDB_DB_FNAME_LEN]; } SCtgUpdateTbTSMAMsg; typedef struct SCtgDropTbTSMAMsg { @@ -1167,6 +1169,8 @@ int32_t ctgGetStreamProgressFromVnode(SCatalog* pCtg, SRequestConnInfo* pConn, void* bInput); int32_t ctgAddTSMAFetch(SArray** pFetchs, int32_t dbIdx, int32_t tbIdx, int32_t* fetchIdx, int32_t resIdx, int32_t flag, CTG_TSMA_FETCH_TYPE fetchType, const SName* sourceTbName); +int32_t ctgOpUpdateDbTsmaVersion(SCtgCacheOperation* pOper); +int32_t ctgUpdateDbTsmaVersionEnqueue(SCatalog* pCtg, int32_t tsmaVersion, const char* dbFName, int64_t dbId, bool syncOper); void ctgFreeTask(SCtgTask* pTask, bool freeRes); extern SCatalogMgmt gCtgMgmt; diff --git a/source/libs/catalog/src/catalog.c b/source/libs/catalog/src/catalog.c index 27a7ce1022..d4c79a6c8d 100644 --- a/source/libs/catalog/src/catalog.c +++ b/source/libs/catalog/src/catalog.c @@ -1933,6 +1933,18 @@ _return: CTG_API_LEAVE(code); } +int32_t catalogAsyncUpdateDbTsmaVersion(SCatalog* pCtg, int32_t tsmaVersion, const char* dbFName, int64_t dbId) { + CTG_API_ENTER(); + if (!pCtg || !dbFName) { + CTG_API_LEAVE(TSDB_CODE_CTG_INVALID_INPUT); + } + int32_t code = 0; + CTG_ERR_JRET(ctgUpdateDbTsmaVersionEnqueue(pCtg, tsmaVersion, dbFName, dbId, false)); + +_return: + CTG_API_LEAVE(code); +} + int32_t catalogClearCache(void) { CTG_API_ENTER_NOLOCK(); diff --git a/source/libs/catalog/src/ctgCache.c b/source/libs/catalog/src/ctgCache.c index 8e5aba26af..689bf900e2 100644 --- a/source/libs/catalog/src/ctgCache.c +++ b/source/libs/catalog/src/ctgCache.c @@ -34,7 +34,8 @@ SCtgOperation gCtgCacheOperation[CTG_OP_MAX] = {{CTG_OP_UPDATE_VGROUP, "update v {CTG_OP_DROP_VIEW_META, "drop viewMeta", ctgOpDropViewMeta}, {CTG_OP_UPDATE_TB_TSMA, "update tbTSMA", ctgOpUpdateTbTSMA}, {CTG_OP_DROP_TB_TSMA, "drop tbTSMA", ctgOpDropTbTSMA}, - {CTG_OP_CLEAR_CACHE, "clear cache", ctgOpClearCache}}; + {CTG_OP_CLEAR_CACHE, "clear cache", ctgOpClearCache}, + {CTG_OP_UPDATE_DB_TSMA_VERSION, "update dbTsmaVersion", ctgOpUpdateDbTsmaVersion}}; SCtgCacheItemInfo gCtgStatItem[CTG_CI_MAX_VALUE] = { {"Cluster ", CTG_CI_FLAG_LEVEL_GLOBAL}, //CTG_CI_CLUSTER @@ -1628,6 +1629,41 @@ _return: CTG_RET(code); } +int32_t ctgUpdateDbTsmaVersionEnqueue(SCatalog* pCtg, int32_t tsmaVersion, const char* dbFName, int64_t dbId, bool syncOp) { + int32_t code = 0; + SCtgCacheOperation *op = taosMemoryCalloc(1, sizeof(SCtgCacheOperation)); + if (NULL == op) { + ctgError("malloc %d failed", (int32_t)sizeof(SCtgCacheOperation)); + CTG_ERR_RET(TSDB_CODE_OUT_OF_MEMORY); + } + + op->opId = CTG_OP_UPDATE_DB_TSMA_VERSION; + op->syncOp = syncOp; + + SCtgUpdateTbTSMAMsg *msg = taosMemoryMalloc(sizeof(SCtgUpdateTbTSMAMsg)); + if (NULL == msg) { + ctgError("malloc %d failed", (int32_t)sizeof(SCtgUpdateTbTSMAMsg)); + taosMemoryFree(op); + CTG_ERR_JRET(TSDB_CODE_OUT_OF_MEMORY); + } + + msg->pCtg = pCtg; + msg->pTsma = NULL; + msg->dbTsmaVersion = tsmaVersion; + msg->dbId = dbId; + memcpy(msg->dbFName, dbFName, TSDB_DB_FNAME_LEN); + + op->data = msg; + + CTG_ERR_JRET(ctgEnqueue(pCtg, op)); + + return TSDB_CODE_SUCCESS; + +_return: + + CTG_RET(code); +} + int32_t ctgAddNewDBCache(SCatalog *pCtg, const char *dbFName, uint64_t dbId) { int32_t code = 0; @@ -3010,6 +3046,32 @@ _return: CTG_RET(code); } +static int32_t ctgOpUpdateDbRentForTsmaVersion(SCtgDBCache* pDbCache, SCtgUpdateTbTSMAMsg* pMsg) { + int32_t code = TSDB_CODE_SUCCESS; + if (pDbCache && pMsg->dbTsmaVersion > 0) { + pDbCache->tsmaVersion = pMsg->dbTsmaVersion; + SDbCacheInfo cacheInfo = {0}; + cacheInfo.dbId = pDbCache->dbId; + + if (pDbCache->cfgCache.cfgInfo) { + cacheInfo.cfgVersion = pDbCache->cfgCache.cfgInfo->cfgVersion; + tstrncpy(cacheInfo.dbFName, pDbCache->cfgCache.cfgInfo->db, TSDB_DB_FNAME_LEN); + } + + if (pDbCache->vgCache.vgInfo) { + cacheInfo.vgVersion = pDbCache->vgCache.vgInfo->vgVersion; + cacheInfo.numOfTable = pDbCache->vgCache.vgInfo->numOfTable; + cacheInfo.stateTs = pDbCache->vgCache.vgInfo->stateTs; + } + + cacheInfo.tsmaVersion = pDbCache->tsmaVersion; + CTG_ERR_JRET(ctgMetaRentUpdate(&pMsg->pCtg->dbRent, &cacheInfo, cacheInfo.dbId, sizeof(SDbCacheInfo), + ctgDbCacheInfoSortCompare, ctgDbCacheInfoSearchCompare)); + } +_return: + CTG_RET(code); +} + int32_t ctgOpUpdateTbTSMA(SCtgCacheOperation *operation) { int32_t code = 0; SCtgUpdateTbTSMAMsg *msg = operation->data; @@ -3023,27 +3085,7 @@ int32_t ctgOpUpdateTbTSMA(SCtgCacheOperation *operation) { CTG_ERR_JRET(ctgGetAddDBCache(pCtg, pTsmaInfo->dbFName, pTsmaInfo->dbId, &dbCache)); CTG_ERR_JRET(ctgWriteTbTSMAToCache(pCtg, dbCache, pTsmaInfo->dbFName, pTsmaInfo->tb, &pTsmaInfo)); - - if (dbCache && msg->dbTsmaVersion > 0) { - dbCache->tsmaVersion = msg->dbTsmaVersion; - SDbCacheInfo cacheInfo = {0}; - cacheInfo.dbId = dbCache->dbId; - - if (dbCache->cfgCache.cfgInfo) { - cacheInfo.cfgVersion = dbCache->cfgCache.cfgInfo->cfgVersion; - tstrncpy(cacheInfo.dbFName, dbCache->cfgCache.cfgInfo->db, TSDB_DB_FNAME_LEN); - } - - if (dbCache->vgCache.vgInfo) { - cacheInfo.vgVersion = dbCache->vgCache.vgInfo->vgVersion; - cacheInfo.numOfTable = dbCache->vgCache.vgInfo->numOfTable; - cacheInfo.stateTs = dbCache->vgCache.vgInfo->stateTs; - } - - cacheInfo.tsmaVersion = dbCache->tsmaVersion; - CTG_ERR_JRET(ctgMetaRentUpdate(&msg->pCtg->dbRent, &cacheInfo, cacheInfo.dbId, sizeof(SDbCacheInfo), - ctgDbCacheInfoSortCompare, ctgDbCacheInfoSearchCompare)); - } + CTG_ERR_JRET(ctgOpUpdateDbRentForTsmaVersion(dbCache, msg)); _return: @@ -3057,6 +3099,22 @@ _return: CTG_RET(code); } +int32_t ctgOpUpdateDbTsmaVersion(SCtgCacheOperation *pOper) { + int32_t code = 0; + SCtgUpdateTbTSMAMsg *pMsg = pOper->data; + SCatalog *pCtg = pMsg->pCtg; + SCtgDBCache *pDbCache = NULL; + + if (pCtg->stopUpdate) goto _return; + + CTG_ERR_JRET(ctgGetAddDBCache(pCtg, pMsg->dbFName, pMsg->dbId, &pDbCache)); + CTG_ERR_JRET(ctgOpUpdateDbRentForTsmaVersion(pDbCache, pMsg)); + +_return: + taosMemoryFreeClear(pMsg); + CTG_RET(code); +} + void ctgFreeCacheOperationData(SCtgCacheOperation *op) { if (NULL == op || NULL == op->data) { From 1397fbe2a24122c6f93b406829a246461251a651 Mon Sep 17 00:00:00 2001 From: sheyanjie-qq <249478495@qq.com> Date: Wed, 14 Aug 2024 16:25:22 +0800 Subject: [PATCH 029/181] modify connector sample code log --- docs/examples/csharp/nativesml/Program.cs | 6 +- docs/examples/csharp/stmtInsert/Program.cs | 6 +- docs/examples/csharp/subscribe/Program.cs | 24 +- docs/examples/csharp/wsInsert/Program.cs | 22 +- docs/examples/csharp/wsStmt/Program.cs | 6 +- docs/examples/csharp/wssml/Program.cs | 8 +- docs/examples/csharp/wssubscribe/Program.cs | 24 +- docs/examples/go/queryreqid/main.go | 6 +- docs/examples/go/schemaless/native/main.go | 10 +- docs/examples/go/schemaless/ws/main.go | 8 +- docs/examples/go/sqlquery/main.go | 21 +- docs/examples/go/stmt/native/main.go | 16 +- docs/examples/go/stmt/ws/main.go | 20 +- docs/examples/go/tmq/native/main.go | 24 +- docs/examples/go/tmq/ws/main.go | 24 +- docs/examples/java/pom.xml | 18 + .../com/taos/example/ConsumerLoopFull.java | 384 ++++++++++++++++++ .../main/java/com/taos/example/DruidDemo.java | 36 ++ .../java/com/taos/example/GeometryDemo.java | 190 +++++++++ .../java/com/taos/example/HikariDemo.java | 37 ++ .../com/taos/example/JNIConnectExample.java | 48 +-- .../java/com/taos/example/JdbcBasicDemo.java | 125 ++++++ .../com/taos/example/JdbcCreatDBDemo.java | 53 +++ .../com/taos/example/JdbcInsertDataDemo.java | 55 +++ .../java/com/taos/example/JdbcQueryDemo.java | 55 +++ .../java/com/taos/example/JdbcReqIdDemo.java | 64 +++ .../example/ParameterBindingBasicDemo.java | 90 ++++ .../example/ParameterBindingFullDemo.java | 325 +++++++++++++++ .../com/taos/example/RESTConnectExample.java | 32 +- .../com/taos/example/SchemalessJniTest.java | 47 +++ .../com/taos/example/SchemalessWsTest.java | 47 +++ .../src/main/java/com/taos/example/Util.java | 25 ++ .../com/taos/example/WSConnectExample.java | 16 +- .../example/WSParameterBindingBasicDemo.java | 67 +++ .../example/WSParameterBindingFullDemo.java | 180 ++++++++ .../com/taos/example/WsConsumerLoopFull.java | 384 ++++++++++++++++++ .../node/websocketexample/line_example.js | 4 +- .../node/websocketexample/sql_example.js | 51 +-- .../node/websocketexample/stmt_example.js | 4 +- .../node/websocketexample/tmq_example.js | 17 +- .../node/websocketexample/tmq_seek_example.js | 4 +- docs/examples/python/connect_example.py | 4 +- docs/examples/python/connect_rest_example.py | 2 +- .../python/connect_websocket_examples.py | 2 +- docs/examples/python/create_db_native.py | 2 +- docs/examples/python/create_db_rest.py | 2 +- docs/examples/python/create_db_ws.py | 2 +- docs/examples/python/insert_native.py | 2 +- docs/examples/python/insert_rest.py | 2 +- docs/examples/python/insert_ws.py | 2 +- docs/examples/python/query_native.py | 8 +- docs/examples/python/query_rest.py | 6 +- docs/examples/python/query_ws.py | 6 +- docs/examples/python/reqid_native.py | 2 +- docs/examples/python/reqid_rest.py | 2 +- docs/examples/python/reqid_ws.py | 2 +- docs/examples/python/schemaless_native.py | 2 +- docs/examples/python/schemaless_ws.py | 4 +- docs/examples/python/stmt_native.py | 2 +- docs/examples/python/stmt_ws.py | 2 +- docs/examples/python/tmq_native.py | 11 +- docs/examples/python/tmq_websocket_example.py | 13 +- .../rust/nativeexample/examples/connect.rs | 2 +- .../rust/nativeexample/examples/createdb.rs | 4 +- .../rust/nativeexample/examples/insert.rs | 2 +- .../rust/nativeexample/examples/query.rs | 10 +- .../rust/nativeexample/examples/schemaless.rs | 6 +- .../rust/nativeexample/examples/stmt.rs | 26 +- .../rust/nativeexample/examples/tmq.rs | 16 +- .../rust/restexample/examples/connect.rs | 2 +- .../rust/restexample/examples/createdb.rs | 4 +- .../rust/restexample/examples/insert.rs | 2 +- .../rust/restexample/examples/query.rs | 10 +- .../rust/restexample/examples/schemaless.rs | 15 +- .../rust/restexample/examples/stmt.rs | 26 +- .../examples/rust/restexample/examples/tmq.rs | 10 +- docs/zh/08-develop/01-connect/index.md | 4 +- docs/zh/08-develop/02-sql.md | 8 +- docs/zh/08-develop/04-schemaless.md | 4 +- docs/zh/08-develop/05-stmt.md | 4 +- docs/zh/08-develop/07-tmq.md | 28 +- docs/zh/14-reference/05-connector/14-java.mdx | 2 +- .../taosdata/example/ConsumerLoopFull.java | 99 +++-- .../com/taosdata/example/JdbcCreatDBDemo.java | 14 +- .../taosdata/example/JdbcInsertDataDemo.java | 13 +- .../com/taosdata/example/JdbcQueryDemo.java | 22 +- .../com/taosdata/example/JdbcReqIdDemo.java | 13 +- .../example/ParameterBindingBasicDemo.java | 13 +- .../taosdata/example/SchemalessJniTest.java | 13 +- .../taosdata/example/SchemalessWsTest.java | 13 +- .../example/WSParameterBindingBasicDemo.java | 13 +- .../taosdata/example/WsConsumerLoopFull.java | 108 +++-- .../java/com/taosdata/example/DruidDemo.java | 48 +-- .../java/com/taosdata/example/HikariDemo.java | 48 +-- 94 files changed, 2774 insertions(+), 491 deletions(-) create mode 100644 docs/examples/java/src/main/java/com/taos/example/ConsumerLoopFull.java create mode 100644 docs/examples/java/src/main/java/com/taos/example/DruidDemo.java create mode 100644 docs/examples/java/src/main/java/com/taos/example/GeometryDemo.java create mode 100644 docs/examples/java/src/main/java/com/taos/example/HikariDemo.java create mode 100644 docs/examples/java/src/main/java/com/taos/example/JdbcBasicDemo.java create mode 100644 docs/examples/java/src/main/java/com/taos/example/JdbcCreatDBDemo.java create mode 100644 docs/examples/java/src/main/java/com/taos/example/JdbcInsertDataDemo.java create mode 100644 docs/examples/java/src/main/java/com/taos/example/JdbcQueryDemo.java create mode 100644 docs/examples/java/src/main/java/com/taos/example/JdbcReqIdDemo.java create mode 100644 docs/examples/java/src/main/java/com/taos/example/ParameterBindingBasicDemo.java create mode 100644 docs/examples/java/src/main/java/com/taos/example/ParameterBindingFullDemo.java create mode 100644 docs/examples/java/src/main/java/com/taos/example/SchemalessJniTest.java create mode 100644 docs/examples/java/src/main/java/com/taos/example/SchemalessWsTest.java create mode 100644 docs/examples/java/src/main/java/com/taos/example/Util.java create mode 100644 docs/examples/java/src/main/java/com/taos/example/WSParameterBindingBasicDemo.java create mode 100644 docs/examples/java/src/main/java/com/taos/example/WSParameterBindingFullDemo.java create mode 100644 docs/examples/java/src/main/java/com/taos/example/WsConsumerLoopFull.java diff --git a/docs/examples/csharp/nativesml/Program.cs b/docs/examples/csharp/nativesml/Program.cs index cfee07eee0..047222c4f4 100644 --- a/docs/examples/csharp/nativesml/Program.cs +++ b/docs/examples/csharp/nativesml/Program.cs @@ -42,16 +42,16 @@ namespace TDengineExample catch (TDengineError e) { // handle TDengine error - Console.WriteLine("Failed to insert data with schemaless; ErrCode:" + e.Code + "; ErrMessage: " + e.Error); + Console.WriteLine("Failed to insert data with schemaless, ErrCode: " + e.Code + ", ErrMessage: " + e.Error); throw; } catch (Exception e) { // handle other exceptions - Console.WriteLine("Failed to insert data with schemaless; Err:" + e.Message); + Console.WriteLine("Failed to insert data with schemaless, ErrMessage:" + e.Message); throw; } } // ANCHOR_END: main } -} \ No newline at end of file +} diff --git a/docs/examples/csharp/stmtInsert/Program.cs b/docs/examples/csharp/stmtInsert/Program.cs index 60d1a58e55..b317da2fc1 100644 --- a/docs/examples/csharp/stmtInsert/Program.cs +++ b/docs/examples/csharp/stmtInsert/Program.cs @@ -62,16 +62,16 @@ namespace TDengineExample catch (TDengineError e) { // handle TDengine error - Console.WriteLine("Failed to insert to table meters using stmt, url: " + connectionString + "; ErrCode:" + e.Code + "; ErrMessage: " + e.Error); + Console.WriteLine("Failed to insert to table meters using stmt, ErrCode: " + e.Code + ", ErrMessage: " + e.Error); throw; } catch (Exception e) { // handle other exceptions - Console.WriteLine("Failed to insert to table meters using stmt, url: " + connectionString + "; ErrMessage: " + e.Message); + Console.WriteLine("Failed to insert to table meters using stmt, ErrMessage: " + e.Message); throw; } } // ANCHOR_END: main } -} \ No newline at end of file +} diff --git a/docs/examples/csharp/subscribe/Program.cs b/docs/examples/csharp/subscribe/Program.cs index fa61435b03..4138194800 100644 --- a/docs/examples/csharp/subscribe/Program.cs +++ b/docs/examples/csharp/subscribe/Program.cs @@ -91,14 +91,14 @@ namespace TMQExample catch (TDengineError e) { // handle TDengine error - Console.WriteLine("Failed to create native consumer, host : " + host + "; ErrCode:" + e.Code + - "; ErrMessage: " + e.Error); + Console.WriteLine("Failed to create native consumer, host: " + host + ", ErrCode:" + e.Code + + ", ErrMessage: " + e.Error); throw; } catch (Exception e) { // handle other exceptions - Console.WriteLine("Failed to create native consumer, host : " + host + "; ErrMessage: " + e.Message); + Console.WriteLine("Failed to create native consumer, host: " + host + ", ErrMessage: " + e.Message); throw; } @@ -133,13 +133,13 @@ namespace TMQExample catch (TDengineError e) { // handle TDengine error - Console.WriteLine("Failed to poll data; ErrCode:" + e.Code + "; ErrMessage: " + e.Error); + Console.WriteLine("Failed to poll data, ErrCode: " + e.Code + ", ErrMessage: " + e.Error); throw; } catch (Exception e) { // handle other exceptions - Console.WriteLine("Failed to poll data; ErrMessage:" + e.Message); + Console.WriteLine("Failed to poll data, ErrMessage: " + e.Message); throw; } // ANCHOR_END: subscribe @@ -163,13 +163,13 @@ namespace TMQExample catch (TDengineError e) { // handle TDengine error - Console.WriteLine("Seek example failed; ErrCode:" + e.Code + "; ErrMessage: " + e.Error); + Console.WriteLine("Failed to execute seek example, ErrCode: " + e.Code + ", ErrMessage: " + e.Error); throw; } catch (Exception e) { // handle other exceptions - Console.WriteLine("Seek example failed; ErrMessage: " + e.Message); + Console.WriteLine("Failed to execute seek example, ErrMessage: " + e.Message); throw; } // ANCHOR_END: seek @@ -197,13 +197,13 @@ namespace TMQExample catch (TDengineError e) { // handle TDengine error - Console.WriteLine("Failed to execute consumer functions. ErrCode:" + e.Code + "; ErrMessage: " + e.Error); + Console.WriteLine("Failed to execute commit example, ErrCode:" + e.Code + ", ErrMessage: " + e.Error); throw; } catch (Exception e) { // handle other exceptions - Console.WriteLine("Failed to execute consumer functions. ErrMessage:" + e.Message); + Console.WriteLine("Failed to execute commit example, ErrMessage: " + e.Message); throw; } } @@ -221,13 +221,13 @@ namespace TMQExample catch (TDengineError e) { // handle TDengine error - Console.WriteLine("Failed to unsubscribe consumer. ErrCode:" + e.Code + "; ErrMessage: " + e.Error); + Console.WriteLine("Failed to unsubscribe consumer, ErrCode: " + e.Code + ", ErrMessage: " + e.Error); throw; } catch (Exception e) { // handle other exceptions - Console.WriteLine("Failed to unsubscribe consumer. Err: " + e.Message); + Console.WriteLine("Failed to unsubscribe consumer, ErrMessage: " + e.Message); throw; } finally @@ -239,4 +239,4 @@ namespace TMQExample // ANCHOR_END: close } } -} \ No newline at end of file +} diff --git a/docs/examples/csharp/wsInsert/Program.cs b/docs/examples/csharp/wsInsert/Program.cs index cdddf27f42..36b884a522 100644 --- a/docs/examples/csharp/wsInsert/Program.cs +++ b/docs/examples/csharp/wsInsert/Program.cs @@ -52,13 +52,13 @@ namespace Examples catch (TDengineError e) { // handle TDengine error - Console.WriteLine("Failed to create db and table,url:" + connectionString +"; ErrCode:" + e.Code + "; ErrMessage: " + e.Error); + Console.WriteLine("Failed to create database power or stable meters, ErrCode: " + e.Code + ", ErrMessage: " + e.Error); throw; } catch (Exception e) { // handle other exceptions - Console.WriteLine("Failed to create db and table, url:" + connectionString + "; ErrMessage: " + e.Message); + Console.WriteLine("Failed to create database power or stable meters, ErrMessage: " + e.Message); throw; } // ANCHOR_END: create_db_and_table @@ -85,13 +85,13 @@ namespace Examples catch (TDengineError e) { // handle TDengine error - Console.WriteLine("Failed to insert data to power.meters, url:" + connectionString + "; ErrCode:" + e.Code + "; ErrMessage: " + e.Error); + Console.WriteLine("Failed to insert data to power.meters, ErrCode: " + e.Code + ", ErrMessage: " + e.Error); throw; } catch (Exception e) { // handle other exceptions - Console.WriteLine("Failed to insert data to power.meters, url:" + connectionString + "; ErrMessage: " + e.Message); + Console.WriteLine("Failed to insert data to power.meters, ErrMessage: " + e.Message); throw; } // ANCHOR_END: insert_data @@ -100,10 +100,10 @@ namespace Examples private static void QueryData(ITDengineClient client,string connectionString) { // ANCHOR: select_data + // query data, make sure the database and table are created before + var query = "SELECT ts, current, location FROM power.meters limit 100"; try { - // query data, make sure the database and table are created before - var query = "SELECT ts, current, location FROM power.meters limit 100"; using (var rows = client.Query(query)) { while (rows.Read()) @@ -119,13 +119,13 @@ namespace Examples catch (TDengineError e) { // handle TDengine error - Console.WriteLine("Failed to query data from power.meters, url:" + connectionString + "; ErrCode:" + e.Code + "; ErrMessage: " + e.Error); + Console.WriteLine("Failed to query data from power.meters, sql: " + query + ", ErrCode: " + e.Code + ", ErrMessage: " + e.Error); throw; } catch (Exception e) { // handle other exceptions - Console.WriteLine("Failed to query data from power.meters, url:" + connectionString + "; ErrMessage: " + e.Message); + Console.WriteLine("Failed to query data from power.meters, sql: " + query + ", ErrMessage: " + e.Message); throw; } // ANCHOR_END: select_data @@ -155,16 +155,16 @@ namespace Examples catch (TDengineError e) { // handle TDengine error - Console.WriteLine("Failed to execute sql with reqId: " + reqId + ", url:" + connectionString + "; ErrCode:" + e.Code + "; ErrMessage: " + e.Error); + Console.WriteLine("Failed to execute sql with reqId: " + reqId + ", ErrCode: " + e.Code + ", ErrMessage: " + e.Error); throw; } catch (Exception e) { // handle other exceptions - Console.WriteLine("Failed to execute sql with reqId: " + reqId + ", url:" + connectionString + "; ErrMessage: " + e.Message); + Console.WriteLine("Failed to execute sql with reqId: " + reqId + ", ErrMessage: " + e.Message); throw; } // ANCHOR_END: query_id } } -} \ No newline at end of file +} diff --git a/docs/examples/csharp/wsStmt/Program.cs b/docs/examples/csharp/wsStmt/Program.cs index 394165f109..fa0365f418 100644 --- a/docs/examples/csharp/wsStmt/Program.cs +++ b/docs/examples/csharp/wsStmt/Program.cs @@ -63,16 +63,16 @@ namespace Examples catch (TDengineError e) { // handle TDengine error - Console.WriteLine("Failed to insert to table meters using stmt, url: " + connectionString + "; ErrCode:" + e.Code + "; ErrMessage: " + e.Error); + Console.WriteLine("Failed to insert to table meters using stmt, ErrCode: " + e.Code + ", ErrMessage: " + e.Error); throw; } catch (Exception e) { // handle other exceptions - Console.WriteLine("Failed to insert to table meters using stmt, url: " + connectionString + "; ErrMessage: " + e.Message); + Console.WriteLine("Failed to insert to table meters using stmt, ErrMessage: " + e.Message); throw; } } // ANCHOR_END: main } -} \ No newline at end of file +} diff --git a/docs/examples/csharp/wssml/Program.cs b/docs/examples/csharp/wssml/Program.cs index 94ee86919b..3bb5eea70c 100644 --- a/docs/examples/csharp/wssml/Program.cs +++ b/docs/examples/csharp/wssml/Program.cs @@ -44,17 +44,17 @@ namespace TDengineExample catch (TDengineError e) { // handle TDengine error - Console.WriteLine("Failed to insert data with schemaless, host:" + host + "; ErrCode:" + e.Code + - "; ErrMessage: " + e.Error); + Console.WriteLine("Failed to insert data with schemaless, ErrCode: " + e.Code + + ", ErrMessage: " + e.Error); throw; } catch (Exception e) { // handle other exceptions - Console.WriteLine("Failed to insert data with schemaless, host:" + host + "; ErrMessage: " + e.Message); + Console.WriteLine("Failed to insert data with schemaless, ErrMessage: " + e.Message); throw; } } // ANCHOR_END: main } -} \ No newline at end of file +} diff --git a/docs/examples/csharp/wssubscribe/Program.cs b/docs/examples/csharp/wssubscribe/Program.cs index 9974c87906..21abe10847 100644 --- a/docs/examples/csharp/wssubscribe/Program.cs +++ b/docs/examples/csharp/wssubscribe/Program.cs @@ -96,14 +96,14 @@ namespace TMQExample catch (TDengineError e) { // handle TDengine error - Console.WriteLine("Failed to create websocket consumer, host : " + host + "; ErrCode:" + e.Code + - "; ErrMessage: " + e.Error); + Console.WriteLine("Failed to create websocket consumer, host: " + host + ", ErrCode: " + e.Code + + ", ErrMessage: " + e.Error); throw; } catch (Exception e) { // handle other exceptions - Console.WriteLine("Failed to create websocket consumer, host : " + host + "; ErrMessage: " + e.Message); + Console.WriteLine("Failed to create websocket consumer, host: " + host + ", ErrMessage: " + e.Message); throw; } @@ -138,13 +138,13 @@ namespace TMQExample catch (TDengineError e) { // handle TDengine error - Console.WriteLine("Failed to poll data; ErrCode:" + e.Code + "; ErrMessage: " + e.Error); + Console.WriteLine("Failed to poll data, ErrCode: " + e.Code + ", ErrMessage: " + e.Error); throw; } catch (Exception e) { // handle other exceptions - Console.WriteLine("Failed to poll data; ErrMessage:" + e.Message); + Console.WriteLine("Failed to poll data, ErrMessage: " + e.Message); throw; } // ANCHOR_END: subscribe @@ -168,13 +168,13 @@ namespace TMQExample catch (TDengineError e) { // handle TDengine error - Console.WriteLine("Seek example failed; ErrCode:" + e.Code + "; ErrMessage: " + e.Error); + Console.WriteLine("Failed to execute seek example, ErrCode: " + e.Code + ", ErrMessage: " + e.Error); throw; } catch (Exception e) { // handle other exceptions - Console.WriteLine("Seek example failed; ErrMessage: " + e.Message); + Console.WriteLine("Failed to execute seek example, ErrMessage: " + e.Message); throw; } // ANCHOR_END: seek @@ -202,13 +202,13 @@ namespace TMQExample catch (TDengineError e) { // handle TDengine error - Console.WriteLine("Failed to execute consumer functions. ErrCode:" + e.Code + "; ErrMessage: " + e.Error); + Console.WriteLine("Failed to execute commit example, ErrCode: " + e.Code + ", ErrMessage: " + e.Error); throw; } catch (Exception e) { // handle other exceptions - Console.WriteLine("Failed to execute consumer functions. ErrMessage:" + e.Message); + Console.WriteLine("Failed to execute commit example, ErrMessage: " + e.Message); throw; } } @@ -226,13 +226,13 @@ namespace TMQExample catch (TDengineError e) { // handle TDengine error - Console.WriteLine("Failed to unsubscribe consumer. ErrCode:" + e.Code + "; ErrMessage: " + e.Error); + Console.WriteLine("Failed to unsubscribe consumer, ErrCode :" + e.Code + ", ErrMessage: " + e.Error); throw; } catch (Exception e) { // handle other exceptions - Console.WriteLine("Failed to unsubscribe consumer. Err: " + e.Message); + Console.WriteLine("Failed to unsubscribe consumer, ErrMessage: " + e.Message); throw; } finally @@ -244,4 +244,4 @@ namespace TMQExample // ANCHOR_END: close } } -} \ No newline at end of file +} diff --git a/docs/examples/go/queryreqid/main.go b/docs/examples/go/queryreqid/main.go index 045f0f8901..39d1d6bd5e 100644 --- a/docs/examples/go/queryreqid/main.go +++ b/docs/examples/go/queryreqid/main.go @@ -45,14 +45,14 @@ func main() { func initEnv(conn *sql.DB) { _, err := conn.Exec("CREATE DATABASE IF NOT EXISTS power") if err != nil { - log.Fatal("Create database error: ", err) + log.Fatal("Create database power error: ", err) } _, err = conn.Exec("CREATE STABLE IF NOT EXISTS power.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (groupId INT, location BINARY(24))") if err != nil { - log.Fatal("Create table error: ", err) + log.Fatal("Create stable meters error: ", err) } _, err = conn.Exec("INSERT INTO power.d1001 USING power.meters TAGS (2, 'California.SanFrancisco') VALUES (NOW , 10.2, 219, 0.32)") if err != nil { - log.Fatal("Insert data error: ", err) + log.Fatal("Insert data to power.meters error: ", err) } } diff --git a/docs/examples/go/schemaless/native/main.go b/docs/examples/go/schemaless/native/main.go index 6caa8bb4cb..36cdee70ac 100644 --- a/docs/examples/go/schemaless/native/main.go +++ b/docs/examples/go/schemaless/native/main.go @@ -20,26 +20,26 @@ func main() { defer conn.Close() _, err = conn.Exec("CREATE DATABASE IF NOT EXISTS power") if err != nil { - log.Fatalln("Failed to create db host: " + host + "; ErrMessage: " + err.Error()) + log.Fatalln("Failed to create database power, ErrMessage: " + err.Error()) } _, err = conn.Exec("USE power") if err != nil { - log.Fatalln("Failed to use db host: " + host + "; ErrMessage: " + err.Error()) + log.Fatalln("Failed to use database power, ErrMessage: " + err.Error()) } // insert influxdb line protocol err = conn.InfluxDBInsertLines([]string{lineDemo}, "ms") if err != nil { - log.Fatalln("Failed to insert data with schemaless, host: " + host + "; ErrMessage: " + err.Error()) + log.Fatalln("Failed to insert data with schemaless, data:" + lineDemo + ", ErrMessage: " + err.Error()) } // insert opentsdb telnet protocol err = conn.OpenTSDBInsertTelnetLines([]string{telnetDemo}) if err != nil { - log.Fatalln("Failed to insert data with schemaless, host: " + host + "; ErrMessage: " + err.Error()) + log.Fatalln("Failed to insert data with schemaless, data:" + telnetDemo + ", ErrMessage: " + err.Error()) } // insert opentsdb json protocol err = conn.OpenTSDBInsertJsonPayload(jsonDemo) if err != nil { - log.Fatalln("Failed to insert data with schemaless, host: " + host + "; ErrMessage: " + err.Error()) + log.Fatalln("Failed to insert data with schemaless, data:" + jsonDemo + ", ErrMessage: " + err.Error()) } fmt.Println("Inserted data with schemaless successfully.") } diff --git a/docs/examples/go/schemaless/ws/main.go b/docs/examples/go/schemaless/ws/main.go index 8507a70811..df1a14ee09 100644 --- a/docs/examples/go/schemaless/ws/main.go +++ b/docs/examples/go/schemaless/ws/main.go @@ -25,7 +25,7 @@ func main() { defer db.Close() _, err = db.Exec("CREATE DATABASE IF NOT EXISTS power") if err != nil { - log.Fatalln("Failed to create db host: " + host + "; ErrMessage: " + err.Error()) + log.Fatalln("Failed to create database power, ErrMessage: " + err.Error()) } s, err := schemaless.NewSchemaless(schemaless.NewConfig("ws://localhost:6041", 1, schemaless.SetDb("power"), @@ -40,17 +40,17 @@ func main() { // insert influxdb line protocol err = s.Insert(lineDemo, schemaless.InfluxDBLineProtocol, "ms", 0, common.GetReqID()) if err != nil { - log.Fatalln("Failed to insert data with schemaless, host:" + host + "; ErrMessage: " + err.Error()) + log.Fatalln("Failed to insert data with schemaless, data:" + lineDemo + ", ErrMessage: " + err.Error()) } // insert opentsdb telnet line protocol err = s.Insert(telnetDemo, schemaless.OpenTSDBTelnetLineProtocol, "ms", 0, common.GetReqID()) if err != nil { - log.Fatalln("Failed to insert data with schemaless, host: " + host + "; ErrMessage: " + err.Error()) + log.Fatalln("Failed to insert data with schemaless, data: " + telnetDemo + ", ErrMessage: " + err.Error()) } // insert opentsdb json format protocol err = s.Insert(jsonDemo, schemaless.OpenTSDBJsonFormatProtocol, "s", 0, common.GetReqID()) if err != nil { - log.Fatalln("Failed to insert data with schemaless, host: " + host + "; ErrMessage: " + err.Error()) + log.Fatalln("Failed to insert data with schemaless, data: " + jsonDemo + ", ErrMessage: " + err.Error()) } fmt.Println("Inserted data with schemaless successfully.") } diff --git a/docs/examples/go/sqlquery/main.go b/docs/examples/go/sqlquery/main.go index 1301c79325..f0e0f1c97e 100644 --- a/docs/examples/go/sqlquery/main.go +++ b/docs/examples/go/sqlquery/main.go @@ -13,29 +13,29 @@ func main() { var taosDSN = "root:taosdata@tcp(localhost:6030)/" db, err := sql.Open("taosSql", taosDSN) if err != nil { - log.Fatalln("Failed to connect to " + taosDSN + "; ErrMessage: " + err.Error()) + log.Fatalln("Failed to connect to " + taosDSN + ", ErrMessage: " + err.Error()) } defer db.Close() // ANCHOR: create_db_and_table // create database res, err := db.Exec("CREATE DATABASE IF NOT EXISTS power") if err != nil { - log.Fatalln("Failed to create db, url:" + taosDSN + "; ErrMessage: " + err.Error()) + log.Fatalln("Failed to create database power, ErrMessage: " + err.Error()) } rowsAffected, err := res.RowsAffected() if err != nil { - log.Fatalln("Failed to get create db rowsAffected, url:" + taosDSN + "; ErrMessage: " + err.Error()) + log.Fatalln("Failed to get create database rowsAffected, ErrMessage: " + err.Error()) } // you can check rowsAffected here fmt.Println("Create database power successfully, rowsAffected: ", rowsAffected) // create table res, err = db.Exec("CREATE STABLE IF NOT EXISTS power.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (groupId INT, location BINARY(24))") if err != nil { - log.Fatalln("Failed to create db and table, url:" + taosDSN + "; ErrMessage: " + err.Error()) + log.Fatalln("Failed to create stable meters, ErrMessage: " + err.Error()) } rowsAffected, err = res.RowsAffected() if err != nil { - log.Fatalln("Failed to get create db rowsAffected, url:" + taosDSN + "; ErrMessage: " + err.Error()) + log.Fatalln("Failed to get create stable rowsAffected, ErrMessage: " + err.Error()) } // you can check rowsAffected here fmt.Println("Create stable power.meters successfully, rowsAffected:", rowsAffected) @@ -53,20 +53,21 @@ func main() { "(NOW + 1a, 10.30000, 218, 0.25000) " res, err = db.Exec(insertQuery) if err != nil { - log.Fatal("Failed to insert data to power.meters, url:" + taosDSN + "; ErrMessage: " + err.Error()) + log.Fatal("Failed to insert data to power.meters, ErrMessage: " + err.Error()) } rowsAffected, err = res.RowsAffected() if err != nil { - log.Fatal("Failed to get insert rowsAffected, url:" + taosDSN + "; ErrMessage: " + err.Error()) + log.Fatal("Failed to get insert rowsAffected, ErrMessage: " + err.Error()) } // you can check affectedRows here fmt.Printf("Successfully inserted %d rows to power.meters.\n", rowsAffected) // ANCHOR_END: insert_data // ANCHOR: select_data // query data, make sure the database and table are created before - rows, err := db.Query("SELECT ts, current, location FROM power.meters limit 100") + sql := "SELECT ts, current, location FROM power.meters limit 100" + rows, err := db.Query(sql) if err != nil { - log.Fatal("Failed to query data from power.meters, url:" + taosDSN + "; ErrMessage: " + err.Error()) + log.Fatal("Failed to query data from power.meters, ErrMessage: " + err.Error()) } for rows.Next() { var ( @@ -76,7 +77,7 @@ func main() { ) err = rows.Scan(&ts, ¤t, &location) if err != nil { - log.Fatal("Failed to scan data, url:" + taosDSN + "; ErrMessage: " + err.Error()) + log.Fatal("Failed to scan data, sql:" + sql + ", ErrMessage: " + err.Error()) } // you can check data here fmt.Printf("ts: %s, current: %f, location: %s\n", ts, current, location) diff --git a/docs/examples/go/stmt/native/main.go b/docs/examples/go/stmt/native/main.go index 46db6eee46..c61cdba57e 100644 --- a/docs/examples/go/stmt/native/main.go +++ b/docs/examples/go/stmt/native/main.go @@ -23,22 +23,22 @@ func main() { // prepare database and table _, err = db.Exec("CREATE DATABASE IF NOT EXISTS power") if err != nil { - log.Fatalln("Failed to create db, host: " + host + "; ErrMessage: " + err.Error()) + log.Fatalln("Failed to create database power, ErrMessage: " + err.Error()) } _, err = db.Exec("USE power") if err != nil { - log.Fatalln("Failed to use db, host: " + host + "; ErrMessage: " + err.Error()) + log.Fatalln("Failed to use database power, ErrMessage: " + err.Error()) } _, err = db.Exec("CREATE STABLE IF NOT EXISTS meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (groupId INT, location BINARY(24))") if err != nil { - log.Fatalln("Failed to create table, host: " + host + "; ErrMessage: " + err.Error()) + log.Fatalln("Failed to create stable meters, ErrMessage: " + err.Error()) } // prepare statement sql := "INSERT INTO ? USING meters TAGS(?,?) VALUES (?,?,?,?)" stmt := db.Stmt() err = stmt.Prepare(sql) if err != nil { - log.Fatalln("Failed to prepare sql, host: " + host + "; ErrMessage: " + err.Error()) + log.Fatalln("Failed to prepare sql, sql: " + sql + ", ErrMessage: " + err.Error()) } for i := 1; i <= numOfSubTable; i++ { tableName := fmt.Sprintf("d_bind_%d", i) @@ -46,7 +46,7 @@ func main() { // set tableName and tags err = stmt.SetTableNameWithTags(tableName, tags) if err != nil { - log.Fatalln("Failed to set table name and tags, host: " + host + "; ErrMessage: " + err.Error()) + log.Fatalln("Failed to set table name and tags, tableName: " + tableName + "; ErrMessage: " + err.Error()) } // bind column data current := time.Now() @@ -58,18 +58,18 @@ func main() { AddFloat(rand.Float32()) err = stmt.BindRow(row) if err != nil { - log.Fatalln("Failed to bind params, host: " + host + "; ErrMessage: " + err.Error()) + log.Fatalln("Failed to bind params, ErrMessage: " + err.Error()) } } // add batch err = stmt.AddBatch() if err != nil { - log.Fatalln("Failed to add batch, host: " + host + "; ErrMessage: " + err.Error()) + log.Fatalln("Failed to add batch, ErrMessage: " + err.Error()) } // execute batch err = stmt.Execute() if err != nil { - log.Fatalln("Failed to exec, host: " + host + "; ErrMessage: " + err.Error()) + log.Fatalln("Failed to exec, ErrMessage: " + err.Error()) } // get affected rows affected := stmt.GetAffectedRows() diff --git a/docs/examples/go/stmt/ws/main.go b/docs/examples/go/stmt/ws/main.go index ae224c704f..289a2f1d57 100644 --- a/docs/examples/go/stmt/ws/main.go +++ b/docs/examples/go/stmt/ws/main.go @@ -27,11 +27,11 @@ func main() { // prepare database and table _, err = db.Exec("CREATE DATABASE IF NOT EXISTS power") if err != nil { - log.Fatalln("Failed to create db, url: " + taosDSN + "; ErrMessage: " + err.Error()) + log.Fatalln("Failed to create database power, ErrMessage: " + err.Error()) } _, err = db.Exec("CREATE STABLE IF NOT EXISTS power.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (groupId INT, location BINARY(24))") if err != nil { - log.Fatalln("Failed to create table, url: " + taosDSN + "; ErrMessage: " + err.Error()) + log.Fatalln("Failed to create stable power.meters, ErrMessage: " + err.Error()) } config := stmt.NewConfig(fmt.Sprintf("ws://%s:6041", host), 0) @@ -49,11 +49,11 @@ func main() { sql := "INSERT INTO ? USING meters TAGS(?,?) VALUES (?,?,?,?)" stmt, err := connector.Init() if err != nil { - log.Fatalln("Failed to init stmt, url: " + taosDSN + "; ErrMessage: " + err.Error()) + log.Fatalln("Failed to init stmt, sql: " + sql + ", ErrMessage: " + err.Error()) } err = stmt.Prepare(sql) if err != nil { - log.Fatal("Failed to prepare sql, url: " + taosDSN + "; ErrMessage: " + err.Error()) + log.Fatal("Failed to prepare sql, sql: " + sql + ", ErrMessage: " + err.Error()) } for i := 1; i <= numOfSubTable; i++ { tableName := fmt.Sprintf("d_bind_%d", i) @@ -63,12 +63,12 @@ func main() { // set tableName err = stmt.SetTableName(tableName) if err != nil { - log.Fatal("Failed to set table name, url: " + taosDSN + "; ErrMessage: " + err.Error()) + log.Fatal("Failed to set table name, tableName: " + tableName + "; ErrMessage: " + err.Error()) } // set tags err = stmt.SetTags(tags, tagsType) if err != nil { - log.Fatal("Failed to set tags, url: " + taosDSN + "; ErrMessage: " + err.Error()) + log.Fatal("Failed to set tags, ErrMessage: " + err.Error()) } // bind column data current := time.Now() @@ -80,18 +80,18 @@ func main() { columnData[3] = param.NewParam(1).AddFloat(rand.Float32()) err = stmt.BindParam(columnData, columnType) if err != nil { - log.Fatal("Failed to bind params, url: " + taosDSN + "; ErrMessage: " + err.Error()) + log.Fatal("Failed to bind params, ErrMessage: " + err.Error()) } } // add batch err = stmt.AddBatch() if err != nil { - log.Fatal("Failed to add batch, url: " + taosDSN + "; ErrMessage: " + err.Error()) + log.Fatal("Failed to add batch, ErrMessage: " + err.Error()) } // execute batch err = stmt.Exec() if err != nil { - log.Fatal("Failed to exec, url: " + taosDSN + "; ErrMessage: " + err.Error()) + log.Fatal("Failed to exec, ErrMessage: " + err.Error()) } // get affected rows affected := stmt.GetAffectedRows() @@ -100,6 +100,6 @@ func main() { } err = stmt.Close() if err != nil { - log.Fatal("Failed to close stmt, url: " + taosDSN + "; ErrMessage: " + err.Error()) + log.Fatal("Failed to close stmt, ErrMessage: " + err.Error()) } } diff --git a/docs/examples/go/tmq/native/main.go b/docs/examples/go/tmq/native/main.go index 4bf2fb4a60..638a07d235 100644 --- a/docs/examples/go/tmq/native/main.go +++ b/docs/examples/go/tmq/native/main.go @@ -48,7 +48,7 @@ func main() { // ANCHOR: subscribe err = consumer.Subscribe("topic_meters", nil) if err != nil { - log.Fatalln("Failed to subscribe, host : " + host + "; ErrMessage: " + err.Error()) + log.Fatalln("Failed to subscribe topic_meters, ErrMessage: " + err.Error()) } log.Println("Subscribe topics successfully") for i := 0; i < 50; i++ { @@ -62,13 +62,13 @@ func main() { // commit offset _, err = consumer.CommitOffsets([]tmqcommon.TopicPartition{e.TopicPartition}) if err != nil { - log.Fatalln("Failed to commit offset, host : " + host + "; ErrMessage: " + err.Error()) + log.Fatalln("Failed to commit offset, ErrMessage: " + err.Error()) } log.Println("Commit offset manually successfully.") // ANCHOR_END: commit_offset case tmqcommon.Error: fmt.Printf("%% Error: %v: %v\n", e.Code(), e) - log.Fatalln("Failed to poll data, host : " + host + "; ErrMessage: " + err.Error()) + log.Fatalln("Failed to poll data, ErrMessage: " + err.Error()) } } } @@ -77,7 +77,7 @@ func main() { // get assignment partitions, err := consumer.Assignment() if err != nil { - log.Fatal("Failed to get assignment; ErrMessage: " + err.Error()) + log.Fatal("Failed to get assignment, ErrMessage: " + err.Error()) } fmt.Println("Now assignment:", partitions) for i := 0; i < len(partitions); i++ { @@ -88,7 +88,7 @@ func main() { Offset: 0, }, 0) if err != nil { - log.Fatalln("Seek example failed; ErrMessage: " + err.Error()) + log.Fatalln("Failed to execute seek example, ErrMessage: " + err.Error()) } } fmt.Println("Assignment seek to beginning successfully") @@ -97,13 +97,15 @@ func main() { // unsubscribe err = consumer.Unsubscribe() if err != nil { - log.Fatal("Failed to unsubscribe consumer. ErrMessage: " + err.Error()) + log.Fatal("Failed to unsubscribe consumer, ErrMessage: " + err.Error()) } + fmt.Println("Consumer unsubscribed successfully.") // close consumer err = consumer.Close() if err != nil { - log.Fatal("Failed to close consumer. ErrMessage: " + err.Error()) + log.Fatal("Failed to close consumer, ErrMessage: " + err.Error()) } + fmt.Println("Consumer closed successfully.") // ANCHOR_END: close <-done } @@ -111,22 +113,22 @@ func main() { func initEnv(conn *sql.DB) { _, err := conn.Exec("CREATE DATABASE IF NOT EXISTS power") if err != nil { - log.Fatal("Failed to create database. ErrMessage: " + err.Error()) + log.Fatal("Failed to create database, ErrMessage: " + err.Error()) } _, err = conn.Exec("CREATE STABLE IF NOT EXISTS power.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (groupId INT, location BINARY(24))") if err != nil { - log.Fatal("Failed to create stable. ErrMessage: " + err.Error()) + log.Fatal("Failed to create stable, ErrMessage: " + err.Error()) } _, err = conn.Exec("CREATE TOPIC IF NOT EXISTS topic_meters AS SELECT ts, current, voltage, phase, groupid, location FROM power.meters") if err != nil { - log.Fatal("Failed to create topic. ErrMessage: " + err.Error()) + log.Fatal("Failed to create topic, ErrMessage: " + err.Error()) } go func() { for i := 0; i < 10; i++ { time.Sleep(time.Second) _, err = conn.Exec("INSERT INTO power.d1001 USING power.meters TAGS (2, 'California.SanFrancisco') VALUES (NOW , 10.2, 219, 0.32)") if err != nil { - log.Fatal("Failed to insert data. ErrMessage: " + err.Error()) + log.Fatal("Failed to insert data, ErrMessage: " + err.Error()) } } done <- struct{}{} diff --git a/docs/examples/go/tmq/ws/main.go b/docs/examples/go/tmq/ws/main.go index c6be0326b5..70ea3af0b3 100644 --- a/docs/examples/go/tmq/ws/main.go +++ b/docs/examples/go/tmq/ws/main.go @@ -53,7 +53,7 @@ func main() { // ANCHOR: subscribe err = consumer.Subscribe("topic_meters", nil) if err != nil { - log.Fatalln("Failed to subscribe, host : " + wsUrl + "; ErrMessage: " + err.Error()) + log.Fatalln("Failed to subscribe topic_meters, ErrMessage: " + err.Error()) } log.Println("Subscribe topics successfully") for i := 0; i < 50; i++ { @@ -67,13 +67,13 @@ func main() { // commit offset _, err = consumer.CommitOffsets([]tmqcommon.TopicPartition{e.TopicPartition}) if err != nil { - log.Fatalln("Failed to commit offset, host : " + wsUrl + "; ErrMessage: " + err.Error()) + log.Fatalln("Failed to commit offset, ErrMessage: " + err.Error()) } log.Println("Commit offset manually successfully.") // ANCHOR_END: commit_offset case tmqcommon.Error: fmt.Printf("%% Error: %v: %v\n", e.Code(), e) - log.Fatalln("Failed to poll data, host : " + wsUrl + "; ErrMessage: " + err.Error()) + log.Fatalln("Failed to poll data, ErrMessage: " + err.Error()) } } } @@ -82,7 +82,7 @@ func main() { // get assignment partitions, err := consumer.Assignment() if err != nil { - log.Fatal("Failed to get assignment; ErrMessage: " + err.Error()) + log.Fatal("Failed to get assignment, ErrMessage: " + err.Error()) } fmt.Println("Now assignment:", partitions) for i := 0; i < len(partitions); i++ { @@ -93,7 +93,7 @@ func main() { Offset: 0, }, 0) if err != nil { - log.Fatalln("Seek example failed; ErrMessage: " + err.Error()) + log.Fatalln("Failed to execute seek example, ErrMessage: " + err.Error()) } } fmt.Println("Assignment seek to beginning successfully") @@ -102,13 +102,15 @@ func main() { // unsubscribe err = consumer.Unsubscribe() if err != nil { - log.Fatal("Failed to unsubscribe consumer. ErrMessage: " + err.Error()) + log.Fatal("Failed to unsubscribe consumer, ErrMessage: " + err.Error()) } + fmt.Println("Consumer unsubscribed successfully.") // close consumer err = consumer.Close() if err != nil { - log.Fatal("Failed to close consumer. ErrMessage: " + err.Error()) + log.Fatal("Failed to close consumer, ErrMessage: " + err.Error()) } + fmt.Println("Consumer closed successfully.") // ANCHOR_END: close <-done } @@ -116,22 +118,22 @@ func main() { func initEnv(conn *sql.DB) { _, err := conn.Exec("CREATE DATABASE IF NOT EXISTS power") if err != nil { - log.Fatal("Failed to create database. ErrMessage: " + err.Error()) + log.Fatal("Failed to create database, ErrMessage: " + err.Error()) } _, err = conn.Exec("CREATE STABLE IF NOT EXISTS power.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (groupId INT, location BINARY(24))") if err != nil { - log.Fatal("Failed to create stable. ErrMessage: " + err.Error()) + log.Fatal("Failed to create stable, ErrMessage: " + err.Error()) } _, err = conn.Exec("CREATE TOPIC IF NOT EXISTS topic_meters AS SELECT ts, current, voltage, phase, groupid, location FROM power.meters") if err != nil { - log.Fatal("Failed to create topic. ErrMessage: " + err.Error()) + log.Fatal("Failed to create topic, ErrMessage: " + err.Error()) } go func() { for i := 0; i < 10; i++ { time.Sleep(time.Second) _, err = conn.Exec("INSERT INTO power.d1001 USING power.meters TAGS (2, 'California.SanFrancisco') VALUES (NOW , 10.2, 219, 0.32)") if err != nil { - log.Fatal("Failed to insert data. ErrMessage: " + err.Error()) + log.Fatal("Failed to insert data, ErrMessage: " + err.Error()) } } done <- struct{}{} diff --git a/docs/examples/java/pom.xml b/docs/examples/java/pom.xml index e7969bd4eb..35fe5f280c 100644 --- a/docs/examples/java/pom.xml +++ b/docs/examples/java/pom.xml @@ -25,6 +25,24 @@ 3.3.0 + + + + com.alibaba + druid + 1.1.17 + + + + com.zaxxer + HikariCP + 3.2.0 + + + org.locationtech.jts + jts-core + 1.19.0 + junit junit diff --git a/docs/examples/java/src/main/java/com/taos/example/ConsumerLoopFull.java b/docs/examples/java/src/main/java/com/taos/example/ConsumerLoopFull.java new file mode 100644 index 0000000000..62dac019d7 --- /dev/null +++ b/docs/examples/java/src/main/java/com/taos/example/ConsumerLoopFull.java @@ -0,0 +1,384 @@ +package com.taos.example; + +import com.alibaba.fastjson.JSON; +import com.taosdata.jdbc.TSDBDriver; +import com.taosdata.jdbc.tmq.*; + +import java.sql.*; +import java.time.Duration; +import java.util.Collections; +import java.util.List; +import java.util.Properties; +import java.util.Set; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.TimeUnit; + +// ANCHOR: consumer_demo +public class ConsumerLoopFull { + static private Connection connection; + static private Statement statement; + static private volatile boolean stopThread = false; + + public static TaosConsumer getConsumer() throws Exception { +// ANCHOR: create_consumer + Properties config = new Properties(); + config.setProperty("td.connect.type", "jni"); + config.setProperty("bootstrap.servers", "localhost:6030"); + config.setProperty("auto.offset.reset", "latest"); + config.setProperty("msg.with.table.name", "true"); + config.setProperty("enable.auto.commit", "true"); + config.setProperty("auto.commit.interval.ms", "1000"); + config.setProperty("group.id", "group1"); + config.setProperty("client.id", "1"); + config.setProperty("td.connect.user", "root"); + config.setProperty("td.connect.pass", "taosdata"); + config.setProperty("value.deserializer", "com.taos.example.ConsumerLoopFull$ResultDeserializer"); + config.setProperty("value.deserializer.encoding", "UTF-8"); + + try { + TaosConsumer consumer= new TaosConsumer<>(config); + System.out.printf("Create consumer successfully, host: %s, groupId: %s, clientId: %s%n", + config.getProperty("bootstrap.servers"), + config.getProperty("group.id"), + config.getProperty("client.id")); + return consumer; + } catch (Exception ex) { + // please refer to the JDBC specifications for detailed exceptions info + System.out.printf("Failed to create native consumer, host: %s, %sErrMessage: %s%n", + config.getProperty("bootstrap.servers"), + ex instanceof SQLException ? "ErrCode: " + ((SQLException) ex).getErrorCode() + ", " : "", + ex.getMessage()); + // Print stack trace for context in examples. Use logging in production. + ex.printStackTrace(); + throw ex; + } +// ANCHOR_END: create_consumer + } + + public static void pollExample(TaosConsumer consumer) throws SQLException { +// ANCHOR: poll_data_code_piece + try { + List topics = Collections.singletonList("topic_meters"); + + // subscribe to the topics + consumer.subscribe(topics); + System.out.println("Subscribe topics successfully."); + for (int i = 0; i < 50; i++) { + // poll data + ConsumerRecords records = consumer.poll(Duration.ofMillis(100)); + for (ConsumerRecord record : records) { + ResultBean bean = record.value(); + // process the data here + System.out.println("data: " + JSON.toJSONString(bean)); + } + } + } catch (Exception ex) { + // please refer to the JDBC specifications for detailed exceptions info + System.out.printf("Failed to poll data, %sErrMessage: %s%n", + ex instanceof SQLException ? "ErrCode: " + ((SQLException) ex).getErrorCode() + ", " : "", + ex.getMessage()); + // Print stack trace for context in examples. Use logging in production. + ex.printStackTrace(); + throw ex; + } +// ANCHOR_END: poll_data_code_piece + } + + public static void seekExample(TaosConsumer consumer) throws SQLException { +// ANCHOR: consumer_seek + try { + List topics = Collections.singletonList("topic_meters"); + + // subscribe to the topics + consumer.subscribe(topics); + System.out.println("Subscribe topics successfully."); + Set assignment = consumer.assignment(); + System.out.println("Now assignment: " + JSON.toJSONString(assignment)); + + ConsumerRecords records = ConsumerRecords.emptyRecord(); + // make sure we have got some data + while (records.isEmpty()) { + records = consumer.poll(Duration.ofMillis(100)); + } + + consumer.seekToBeginning(assignment); + System.out.println("Assignment seek to beginning successfully."); + } catch (Exception ex) { + // please refer to the JDBC specifications for detailed exceptions info + System.out.printf("Failed to execute seek example, %sErrMessage: %s%n", + ex instanceof SQLException ? "ErrCode: " + ((SQLException) ex).getErrorCode() + ", " : "", + ex.getMessage()); + // Print stack trace for context in examples. Use logging in production. + ex.printStackTrace(); + throw ex; + } +// ANCHOR_END: consumer_seek + } + + + public static void commitExample(TaosConsumer consumer) throws SQLException { +// ANCHOR: commit_code_piece + try { + List topics = Collections.singletonList("topic_meters"); + + consumer.subscribe(topics); + for (int i = 0; i < 50; i++) { + ConsumerRecords records = consumer.poll(Duration.ofMillis(100)); + for (ConsumerRecord record : records) { + ResultBean bean = record.value(); + // process your data here + System.out.println("data: " + JSON.toJSONString(bean)); + } + if (!records.isEmpty()) { + // after processing the data, commit the offset manually + consumer.commitSync(); + System.out.println("Commit offset manually successfully."); + } + } + } catch (Exception ex) { + // please refer to the JDBC specifications for detailed exceptions info + System.out.printf("Failed to execute commit example, %sErrMessage: %s%n", + ex instanceof SQLException ? "ErrCode: " + ((SQLException) ex).getErrorCode() + ", " : "", + ex.getMessage()); + // Print stack trace for context in examples. Use logging in production. + ex.printStackTrace(); + throw ex; + } +// ANCHOR_END: commit_code_piece + } + + public static void unsubscribeExample(TaosConsumer consumer) throws SQLException { + List topics = Collections.singletonList("topic_meters"); + consumer.subscribe(topics); +// ANCHOR: unsubscribe_data_code_piece + try { + // unsubscribe the consumer + consumer.unsubscribe(); + System.out.println("Consumer unsubscribed successfully."); + } catch (Exception ex) { + // please refer to the JDBC specifications for detailed exceptions info + System.out.printf("Failed to unsubscribe consumer, %sErrMessage: %s%n", + ex instanceof SQLException ? "ErrCode: " + ((SQLException) ex).getErrorCode() + ", " : "", + ex.getMessage()); + // Print stack trace for context in examples. Use logging in production. + ex.printStackTrace(); + throw ex; + } + finally { + // close the consumer + consumer.close(); + System.out.println("Consumer closed successfully."); + } +// ANCHOR_END: unsubscribe_data_code_piece + } + + public static class ResultDeserializer extends ReferenceDeserializer { + + } + + // use this class to define the data structure of the result record + public static class ResultBean { + private Timestamp ts; + private double current; + private int voltage; + private double phase; + private int groupid; + private String location; + + public Timestamp getTs() { + return ts; + } + + public void setTs(Timestamp ts) { + this.ts = ts; + } + + public double getCurrent() { + return current; + } + + public void setCurrent(double current) { + this.current = current; + } + + public int getVoltage() { + return voltage; + } + + public void setVoltage(int voltage) { + this.voltage = voltage; + } + + public double getPhase() { + return phase; + } + + public void setPhase(double phase) { + this.phase = phase; + } + + public int getGroupid() { + return groupid; + } + + public void setGroupid(int groupid) { + this.groupid = groupid; + } + + public String getLocation() { + return location; + } + + public void setLocation(String location) { + this.location = location; + } + } + + public static void prepareData() throws SQLException, InterruptedException { + try { + int i = 0; + while (!stopThread) { + String insertQuery = "INSERT INTO power.d1001 USING power.meters TAGS(2,'California.SanFrancisco') VALUES (NOW + " + i + "a, 10.30000, 219, 0.31000) "; + int affectedRows = statement.executeUpdate(insertQuery); + assert affectedRows == 1; + i++; + Thread.sleep(1); + } + } catch (Exception ex) { + // please refer to the JDBC specifications for detailed exceptions info + System.out.printf("Failed to insert data to power.meters, %sErrMessage: %s%n", + ex instanceof SQLException ? "ErrCode: " + ((SQLException) ex).getErrorCode() + ", " : "", + ex.getMessage()); + // Print stack trace for context in examples. Use logging in production. + ex.printStackTrace(); + throw ex; + } + } + + public static void prepareMeta() throws SQLException { + try { + statement.executeUpdate("CREATE DATABASE IF NOT EXISTS power"); + statement.executeUpdate("USE power"); + statement.executeUpdate("CREATE STABLE IF NOT EXISTS meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (groupId INT, location BINARY(24))"); + statement.executeUpdate("CREATE TOPIC IF NOT EXISTS topic_meters AS SELECT ts, current, voltage, phase, groupid, location FROM meters"); + } catch (Exception ex) { + // please refer to the JDBC specifications for detailed exceptions info + System.out.printf("Failed to create db and table, %sErrMessage: %s%n", + ex instanceof SQLException ? "ErrCode: " + ((SQLException) ex).getErrorCode() + ", " : "", + ex.getMessage()); + // Print stack trace for context in examples. Use logging in production. + ex.printStackTrace(); + throw ex; + } + } + + public static void initConnection() throws SQLException { + String url = "jdbc:TAOS://localhost:6030?user=root&password=taosdata"; + Properties properties = new Properties(); + properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "C"); + properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8"); + + try { + connection = DriverManager.getConnection(url, properties); + } catch (SQLException ex) { + System.out.println("Failed to create connection, url:" + url + "; ErrCode:" + ex.getErrorCode() + "; ErrMessage: " + ex.getMessage()); + throw new SQLException("Failed to create connection", ex); + } + try { + statement = connection.createStatement(); + } catch (SQLException ex) { + System.out.println("Failed to create statement, ErrCode:" + ex.getErrorCode() + "; ErrMessage: " + ex.getMessage()); + throw new SQLException("Failed to create statement", ex); + } + System.out.println("Connection created successfully."); + } + + public static void closeConnection() throws SQLException { + try { + if (statement != null) { + statement.close(); + } + } catch (SQLException ex) { + System.out.println("Failed to close statement, ErrCode:" + ex.getErrorCode() + "; ErrMessage: " + ex.getMessage()); + throw new SQLException("Failed to close statement", ex); + } + + try { + if (connection != null) { + connection.close(); + } + } catch (SQLException ex) { + System.out.println("Failed to close connection, ErrCode:" + ex.getErrorCode() + "; ErrMessage: " + ex.getMessage()); + throw new SQLException("Failed to close connection", ex); + } + System.out.println("Connection closed Successfully."); + } + + + public static void main(String[] args) throws SQLException, InterruptedException { + initConnection(); + prepareMeta(); + + // create a single thread executor + ExecutorService executor = Executors.newSingleThreadExecutor(); + + // submit a task + executor.submit(() -> { + try { + prepareData(); + } catch (SQLException ex) { + System.out.println("Failed to prepare data, ErrCode:" + ex.getErrorCode() + ", ErrMessage: " + ex.getMessage()); + return; + } catch (Exception ex) { + System.out.println("Failed to prepare data, ErrMessage: " + ex.getMessage()); + return; + } + System.out.println("pollDataExample executed successfully."); + }); + + try { + TaosConsumer consumer = getConsumer(); + + pollExample(consumer); + System.out.println("pollExample executed successfully."); + consumer.unsubscribe(); + + seekExample(consumer); + System.out.println("seekExample executed successfully."); + consumer.unsubscribe(); + + commitExample(consumer); + System.out.println("commitExample executed successfully."); + consumer.unsubscribe(); + + unsubscribeExample(consumer); + System.out.println("unsubscribeExample executed successfully"); + } catch (SQLException ex) { + System.out.println("Failed to poll data from topic_meters, ErrCode:" + ex.getErrorCode() + "; ErrMessage: " + ex.getMessage()); + return; + } catch (Exception ex) { + System.out.println("Failed to poll data from topic_meters, ErrMessage: " + ex.getMessage()); + return; + } + + stopThread = true; + // close the executor, which will make the executor reject new tasks + executor.shutdown(); + + try { + // wait for the executor to terminate + boolean result = executor.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS); + assert result; + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + } catch (Exception e) { + e.printStackTrace(); + System.out.println("Wait executor termination failed."); + } + + closeConnection(); + System.out.println("program end."); + } +} +// ANCHOR_END: consumer_demo diff --git a/docs/examples/java/src/main/java/com/taos/example/DruidDemo.java b/docs/examples/java/src/main/java/com/taos/example/DruidDemo.java new file mode 100644 index 0000000000..a366efd419 --- /dev/null +++ b/docs/examples/java/src/main/java/com/taos/example/DruidDemo.java @@ -0,0 +1,36 @@ +package com.taosdata.example; + +import com.alibaba.druid.pool.DruidDataSource; + +import java.sql.Connection; +import java.sql.Statement; + +public class DruidDemo { + // ANCHOR: connection_pool + public static void main(String[] args) throws Exception { + String url = "jdbc:TAOS://127.0.0.1:6030/log"; + + DruidDataSource dataSource = new DruidDataSource(); + // jdbc properties + dataSource.setDriverClassName("com.taosdata.jdbc.TSDBDriver"); + dataSource.setUrl(url); + dataSource.setUsername("root"); + dataSource.setPassword("taosdata"); + // pool configurations + dataSource.setInitialSize(10); + dataSource.setMinIdle(10); + dataSource.setMaxActive(10); + dataSource.setMaxWait(30000); + dataSource.setValidationQuery("SELECT SERVER_VERSION()"); + + Connection connection = dataSource.getConnection(); // get connection + Statement statement = connection.createStatement(); // get statement + // query or insert + // ... + + statement.close(); + connection.close(); // put back to connection pool + dataSource.close(); + } + // ANCHOR_END: connection_pool +} diff --git a/docs/examples/java/src/main/java/com/taos/example/GeometryDemo.java b/docs/examples/java/src/main/java/com/taos/example/GeometryDemo.java new file mode 100644 index 0000000000..036125e7ea --- /dev/null +++ b/docs/examples/java/src/main/java/com/taos/example/GeometryDemo.java @@ -0,0 +1,190 @@ +package com.taos.example; + +import com.taosdata.jdbc.TSDBPreparedStatement; +import org.locationtech.jts.geom.*; +import org.locationtech.jts.io.ByteOrderValues; +import org.locationtech.jts.io.ParseException; +import org.locationtech.jts.io.WKBReader; +import org.locationtech.jts.io.WKBWriter; + +import java.sql.*; +import java.util.ArrayList; +import java.util.Properties; + +public class GeometryDemo { + private static String host = "localhost"; + private static final String dbName = "test"; + private static final String tbName = "weather"; + private static final String user = "root"; + private static final String password = "taosdata"; + + private Connection connection; + + public static void main(String[] args) throws SQLException { + for (int i = 0; i < args.length; i++) { + if ("-host".equalsIgnoreCase(args[i]) && i < args.length - 1) + host = args[++i]; + } + if (host == null) { + printHelp(); + } + GeometryDemo demo = new GeometryDemo(); + demo.init(); + demo.createDatabase(); + demo.useDatabase(); + demo.dropTable(); + demo.createTable(); + + demo.insert(); + demo.stmtInsert(); + demo.select(); + + demo.dropTable(); + demo.close(); + } + + private void init() { + final String url = "jdbc:TAOS://" + host + ":6030/?user=" + user + "&password=" + password; + // get connection + try { + Properties properties = new Properties(); + properties.setProperty("charset", "UTF-8"); + properties.setProperty("locale", "en_US.UTF-8"); + properties.setProperty("timezone", "UTC-8"); + System.out.println("get connection starting..."); + connection = DriverManager.getConnection(url, properties); + if (connection != null) + System.out.println("[ OK ] Connection established."); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + private void createDatabase() { + String sql = "create database if not exists " + dbName; + execute(sql); + } + + private void useDatabase() { + String sql = "use " + dbName; + execute(sql); + } + + private void dropTable() { + final String sql = "drop table if exists " + dbName + "." + tbName + ""; + execute(sql); + } + + private void createTable() { + final String sql = "create table if not exists " + dbName + "." + tbName + " (ts timestamp, temperature float, humidity int, location geometry(50))"; + execute(sql); + } + + private void insert() { + final String sql = "insert into " + dbName + "." + tbName + " (ts, temperature, humidity, location) values(now, 20.5, 34, 'POINT(1 2)')"; + execute(sql); + } + + private void stmtInsert() throws SQLException { + TSDBPreparedStatement preparedStatement = (TSDBPreparedStatement) connection.prepareStatement("insert into " + dbName + "." + tbName + " values (?, ?, ?, ?)"); + + long current = System.currentTimeMillis(); + ArrayList tsList = new ArrayList<>(); + tsList.add(current); + tsList.add(current + 1); + preparedStatement.setTimestamp(0, tsList); + ArrayList tempList = new ArrayList<>(); + tempList.add(20.1F); + tempList.add(21.2F); + preparedStatement.setFloat(1, tempList); + ArrayList humList = new ArrayList<>(); + humList.add(30); + humList.add(31); + preparedStatement.setInt(2, humList); + + + ArrayList list = new ArrayList<>(); + GeometryFactory gf = new GeometryFactory(); + Point p1 = gf.createPoint(new Coordinate(1,2)); + p1.setSRID(1234); + + // NOTE: TDengine current version only support 2D dimension and little endian byte order + WKBWriter w = new WKBWriter(2, ByteOrderValues.LITTLE_ENDIAN, true); + byte[] wkb = w.write(p1); + list.add(wkb); + + Coordinate[] coordinates = { new Coordinate(10, 20), + new Coordinate(30, 40)}; + LineString lineString = gf.createLineString(coordinates); + lineString.setSRID(2345); + byte[] wkb2 = w.write(lineString); + list.add(wkb2); + + preparedStatement.setGeometry(3, list, 50); + + preparedStatement.columnDataAddBatch(); + preparedStatement.columnDataExecuteBatch(); + } + + private void select() { + final String sql = "select * from " + dbName + "." + tbName; + executeQuery(sql); + } + + private void close() { + try { + if (connection != null) { + this.connection.close(); + System.out.println("connection closed."); + } + } catch (SQLException e) { + e.printStackTrace(); + } + } + + private void executeQuery(String sql) { + long start = System.currentTimeMillis(); + try (Statement statement = connection.createStatement()) { + ResultSet resultSet = statement.executeQuery(sql); + long end = System.currentTimeMillis(); + printSql(sql, true, (end - start)); + + while (resultSet.next()){ + byte[] result1 = resultSet.getBytes(4); + WKBReader reader = new WKBReader(); + Geometry g1 = reader.read(result1); + System.out.println("GEO OBJ: " + g1 + ", SRID: " + g1.getSRID()); + } + + } catch (SQLException e) { + long end = System.currentTimeMillis(); + printSql(sql, false, (end - start)); + e.printStackTrace(); + } catch (ParseException e) { + throw new RuntimeException(e); + } + } + + private void printSql(String sql, boolean succeed, long cost) { + System.out.println("[ " + (succeed ? "OK" : "ERROR!") + " ] time cost: " + cost + " ms, execute statement ====> " + sql); + } + + private void execute(String sql) { + long start = System.currentTimeMillis(); + try (Statement statement = connection.createStatement()) { + boolean execute = statement.execute(sql); + long end = System.currentTimeMillis(); + printSql(sql, true, (end - start)); + } catch (SQLException e) { + long end = System.currentTimeMillis(); + printSql(sql, false, (end - start)); + e.printStackTrace(); + } + } + + private static void printHelp() { + System.out.println("Usage: java -jar JDBCDemo.jar -host "); + System.exit(0); + } + +} diff --git a/docs/examples/java/src/main/java/com/taos/example/HikariDemo.java b/docs/examples/java/src/main/java/com/taos/example/HikariDemo.java new file mode 100644 index 0000000000..50b20fdb0c --- /dev/null +++ b/docs/examples/java/src/main/java/com/taos/example/HikariDemo.java @@ -0,0 +1,37 @@ +package com.taosdata.example; + +import com.zaxxer.hikari.HikariConfig; +import com.zaxxer.hikari.HikariDataSource; + +import java.sql.Connection; +import java.sql.Statement; + +public class HikariDemo { + // ANCHOR: connection_pool + public static void main(String[] args) throws Exception { + HikariConfig config = new HikariConfig(); + // jdbc properties + config.setJdbcUrl("jdbc:TAOS://127.0.0.1:6030/log"); + config.setUsername("root"); + config.setPassword("taosdata"); + // connection pool configurations + config.setMinimumIdle(10); // minimum number of idle connection + config.setMaximumPoolSize(10); // maximum number of connection in the pool + config.setConnectionTimeout(30000); // maximum wait milliseconds for get connection from pool + config.setMaxLifetime(0); // maximum life time for each connection + config.setIdleTimeout(0); // max idle time for recycle idle connection + config.setConnectionTestQuery("SELECT SERVER_VERSION()"); // validation query + + HikariDataSource dataSource = new HikariDataSource(config); // create datasource + + Connection connection = dataSource.getConnection(); // get connection + Statement statement = connection.createStatement(); // get statement + + // query or insert + // ... + statement.close(); + connection.close(); // put back to connection pool + dataSource.close(); + } + // ANCHOR_END: connection_pool +} diff --git a/docs/examples/java/src/main/java/com/taos/example/JNIConnectExample.java b/docs/examples/java/src/main/java/com/taos/example/JNIConnectExample.java index 42ac7bde85..4950654cf6 100644 --- a/docs/examples/java/src/main/java/com/taos/example/JNIConnectExample.java +++ b/docs/examples/java/src/main/java/com/taos/example/JNIConnectExample.java @@ -8,31 +8,33 @@ import java.util.Properties; import com.taosdata.jdbc.TSDBDriver; public class JNIConnectExample { -// ANCHOR: main -public static void main(String[] args) throws SQLException { - // use - // String jdbcUrl = "jdbc:TAOS://localhost:6030/dbName?user=root&password=taosdata"; - // if you want to connect a specified database named "dbName". - String jdbcUrl = "jdbc:TAOS://localhost:6030?user=root&password=taosdata"; - Properties connProps = new Properties(); - connProps.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8"); - connProps.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8"); - connProps.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8"); + // ANCHOR: main + public static void main(String[] args) throws Exception { + // use + // String jdbcUrl = + // "jdbc:TAOS://localhost:6030/dbName?user=root&password=taosdata"; + // if you want to connect a specified database named "dbName". + String jdbcUrl = "jdbc:TAOS://localhost:6030?user=root&password=taosdata"; + Properties connProps = new Properties(); + connProps.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8"); + connProps.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8"); + connProps.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8"); - try (Connection conn = DriverManager.getConnection(jdbcUrl, connProps)) { - System.out.println("Connected to " + jdbcUrl + " successfully."); + try (Connection conn = DriverManager.getConnection(jdbcUrl, connProps)) { + System.out.println("Connected to " + jdbcUrl + " successfully."); - // you can use the connection for execute SQL here + // you can use the connection for execute SQL here - } catch (SQLException ex) { - // handle any errors, please refer to the JDBC specifications for detailed exceptions info - System.out.println("Failed to connect to " + jdbcUrl + "; ErrCode:" + ex.getErrorCode() + "; ErrMessage: " + ex.getMessage()); - throw ex; - } catch (Exception ex){ - System.out.println("Failed to connect to " + jdbcUrl + "; ErrMessage: " + ex.getMessage()); - throw ex; + } catch (Exception ex) { + // please refer to the JDBC specifications for detailed exceptions info + System.out.printf("Failed to connect to %s, %sErrMessage: %s%n", + jdbcUrl, + ex instanceof SQLException ? "ErrCode: " + ((SQLException) ex).getErrorCode() + ", " : "", + ex.getMessage()); + // Print stack trace for context in examples. Use logging in production. + ex.printStackTrace(); + throw ex; + } } + // ANCHOR_END: main } -// ANCHOR_END: main -} - diff --git a/docs/examples/java/src/main/java/com/taos/example/JdbcBasicDemo.java b/docs/examples/java/src/main/java/com/taos/example/JdbcBasicDemo.java new file mode 100644 index 0000000000..f9b30f52d9 --- /dev/null +++ b/docs/examples/java/src/main/java/com/taos/example/JdbcBasicDemo.java @@ -0,0 +1,125 @@ +package com.taos.example; + +import com.taosdata.jdbc.AbstractStatement; + +import java.sql.*; +import java.util.Properties; + +public class JdbcBasicDemo { + private static final String host = "localhost"; + private static final String dbName = "test"; + private static final String tbName = "weather"; + private static final String user = "root"; + private static final String password = "taosdata"; + + + public static void main(String[] args) throws SQLException { + + final String url = "jdbc:TAOS://" + host + ":6030/?user=" + user + "&password=" + password; + +// get connection + Properties properties = new Properties(); + properties.setProperty("charset", "UTF-8"); + properties.setProperty("locale", "en_US.UTF-8"); + properties.setProperty("timezone", "UTC-8"); + System.out.println("get connection starting..."); + try (Connection connection = DriverManager.getConnection(url, properties)) { + + + if (connection != null) { + System.out.println("[ OK ] Connection established."); + } else { + System.out.println("[ ERR ] Connection can not be established."); + return; + } + + Statement stmt = connection.createStatement(); + +// ANCHOR: create_db_and_table +// create database + stmt.executeUpdate("CREATE DATABASE IF NOT EXISTS power"); + +// use database + stmt.executeUpdate("USE power"); + +// create table + stmt.executeUpdate("CREATE STABLE IF NOT EXISTS meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (groupId INT, location BINARY(24))"); +// ANCHOR_END: create_db_and_table + +// ANCHOR: insert_data +// insert data + String insertQuery = "INSERT INTO " + + "power.d1001 USING power.meters TAGS(2,'California.SanFrancisco') " + + "VALUES " + + "(NOW + 1a, 10.30000, 219, 0.31000) " + + "(NOW + 2a, 12.60000, 218, 0.33000) " + + "(NOW + 3a, 12.30000, 221, 0.31000) " + + "power.d1002 USING power.meters TAGS(3, 'California.SanFrancisco') " + + "VALUES " + + "(NOW + 1a, 10.30000, 218, 0.25000) "; + int affectedRows = stmt.executeUpdate(insertQuery); + System.out.println("insert " + affectedRows + " rows."); +// ANCHOR_END: insert_data + + +// ANCHOR: query_data +// query data + ResultSet resultSet = stmt.executeQuery("SELECT * FROM meters"); + + Timestamp ts; + float current; + String location; + while (resultSet.next()) { + ts = resultSet.getTimestamp(1); + current = resultSet.getFloat(2); + location = resultSet.getString("location"); + + System.out.printf("%s, %f, %s\n", ts, current, location); + } +// ANCHOR_END: query_data + +// ANCHOR: with_reqid + AbstractStatement aStmt = (AbstractStatement) connection.createStatement(); + aStmt.execute("CREATE DATABASE IF NOT EXISTS power", 1L); + aStmt.executeUpdate("USE power", 2L); + try (ResultSet rs = aStmt.executeQuery("SELECT * FROM meters limit 1", 3L)) { + while (rs.next()) { + Timestamp timestamp = rs.getTimestamp(1); + System.out.println("timestamp = " + timestamp); + } + } + aStmt.close(); +// ANCHOR_END: with_reqid + + + String sql = "SELECT * FROM meters limit 2;"; + +// ANCHOR: jdbc_exception + try (Statement statement = connection.createStatement(); + // executeQuery + ResultSet tempResultSet = statement.executeQuery(sql)) { + + // print result + printResult(tempResultSet); + } catch (Exception ex) { + // please refer to the JDBC specifications for detailed exceptions info + System.out.printf("Failed to execute statement, %sErrMessage: %s%n", + ex instanceof SQLException ? "ErrCode: " + ((SQLException) ex).getErrorCode() + ", " : "", + ex.getMessage()); + // Print stack trace for context in examples. Use logging in production. + ex.printStackTrace(); + throw ex; + } +// ANCHOR_END: jdbc_exception + } catch (SQLException ex) { + // handle any errors, please refer to the JDBC specifications for detailed exceptions info + System.out.println("Error Code: " + ex.getErrorCode()); + System.out.println("Message: " + ex.getMessage()); + } + } + + private static void printResult(ResultSet resultSet) throws SQLException { + Util.printResult(resultSet); + } + +} diff --git a/docs/examples/java/src/main/java/com/taos/example/JdbcCreatDBDemo.java b/docs/examples/java/src/main/java/com/taos/example/JdbcCreatDBDemo.java new file mode 100644 index 0000000000..0293f15b53 --- /dev/null +++ b/docs/examples/java/src/main/java/com/taos/example/JdbcCreatDBDemo.java @@ -0,0 +1,53 @@ +package com.taos.example; + +import java.sql.*; +import java.util.Properties; + +public class JdbcCreatDBDemo { + private static final String host = "localhost"; + private static final String dbName = "test"; + private static final String tbName = "weather"; + private static final String user = "root"; + private static final String password = "taosdata"; + + + public static void main(String[] args) throws SQLException { + + final String jdbcUrl = "jdbc:TAOS://" + host + ":6030/?user=" + user + "&password=" + password; + +// get connection + Properties properties = new Properties(); + properties.setProperty("charset", "UTF-8"); + properties.setProperty("locale", "en_US.UTF-8"); + properties.setProperty("timezone", "UTC-8"); + System.out.println("get connection starting..."); +// ANCHOR: create_db_and_table + try (Connection connection = DriverManager.getConnection(jdbcUrl, properties); + Statement stmt = connection.createStatement()) { + + // create database + int rowsAffected = stmt.executeUpdate("CREATE DATABASE IF NOT EXISTS power"); + // you can check rowsAffected here + System.out.println("Create database power successfully, rowsAffected: " + rowsAffected); + // create table + rowsAffected = stmt.executeUpdate("CREATE STABLE IF NOT EXISTS power.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (groupId INT, location BINARY(24))"); + // you can check rowsAffected here + System.out.println("Create stable power.meters successfully, rowsAffected: " + rowsAffected); + } catch (Exception ex) { + // please refer to the JDBC specifications for detailed exceptions info + System.out.printf("Failed to create database power or stable meters, %sErrMessage: %s%n", + ex instanceof SQLException ? "ErrCode: " + ((SQLException) ex).getErrorCode() + ", " : "", + ex.getMessage()); + // Print stack trace for context in examples. Use logging in production. + ex.printStackTrace(); + throw ex; + } +// ANCHOR_END: create_db_and_table + + } + + private static void printResult(ResultSet resultSet) throws SQLException { + Util.printResult(resultSet); + } + +} diff --git a/docs/examples/java/src/main/java/com/taos/example/JdbcInsertDataDemo.java b/docs/examples/java/src/main/java/com/taos/example/JdbcInsertDataDemo.java new file mode 100644 index 0000000000..f19017193c --- /dev/null +++ b/docs/examples/java/src/main/java/com/taos/example/JdbcInsertDataDemo.java @@ -0,0 +1,55 @@ +package com.taos.example; + +import java.sql.Connection; +import java.sql.DriverManager; +import java.sql.SQLException; +import java.sql.Statement; +import java.util.Properties; + +public class JdbcInsertDataDemo { + private static final String host = "localhost"; + private static final String dbName = "test"; + private static final String tbName = "weather"; + private static final String user = "root"; + private static final String password = "taosdata"; + + + public static void main(String[] args) throws SQLException { + + final String jdbcUrl = "jdbc:TAOS://" + host + ":6030/?user=" + user + "&password=" + password; + +// get connection + Properties properties = new Properties(); + properties.setProperty("charset", "UTF-8"); + properties.setProperty("locale", "en_US.UTF-8"); + properties.setProperty("timezone", "UTC-8"); + System.out.println("get connection starting..."); +// ANCHOR: insert_data + try (Connection connection = DriverManager.getConnection(jdbcUrl, properties); + Statement stmt = connection.createStatement()) { + + // insert data, please make sure the database and table are created before + String insertQuery = "INSERT INTO " + + "power.d1001 USING power.meters TAGS(2,'California.SanFrancisco') " + + "VALUES " + + "(NOW + 1a, 10.30000, 219, 0.31000) " + + "(NOW + 2a, 12.60000, 218, 0.33000) " + + "(NOW + 3a, 12.30000, 221, 0.31000) " + + "power.d1002 USING power.meters TAGS(3, 'California.SanFrancisco') " + + "VALUES " + + "(NOW + 1a, 10.30000, 218, 0.25000) "; + int affectedRows = stmt.executeUpdate(insertQuery); + // you can check affectedRows here + System.out.println("Successfully inserted " + affectedRows + " rows to power.meters."); + } catch (Exception ex) { + // please refer to the JDBC specifications for detailed exceptions info + System.out.printf("Failed to insert data to power.meters, %sErrMessage: %s%n", + ex instanceof SQLException ? "ErrCode: " + ((SQLException) ex).getErrorCode() + ", " : "", + ex.getMessage()); + // Print stack trace for context in examples. Use logging in production. + ex.printStackTrace(); + throw ex; + } +// ANCHOR_END: insert_data + } +} diff --git a/docs/examples/java/src/main/java/com/taos/example/JdbcQueryDemo.java b/docs/examples/java/src/main/java/com/taos/example/JdbcQueryDemo.java new file mode 100644 index 0000000000..4b28a7de28 --- /dev/null +++ b/docs/examples/java/src/main/java/com/taos/example/JdbcQueryDemo.java @@ -0,0 +1,55 @@ +package com.taos.example; + +import java.sql.*; +import java.util.Properties; + +public class JdbcQueryDemo { + private static final String host = "localhost"; + private static final String dbName = "test"; + private static final String tbName = "weather"; + private static final String user = "root"; + private static final String password = "taosdata"; + + + public static void main(String[] args) throws SQLException { + + final String jdbcUrl = "jdbc:TAOS://" + host + ":6030/?user=" + user + "&password=" + password; + +// get connection + Properties properties = new Properties(); + properties.setProperty("charset", "UTF-8"); + properties.setProperty("locale", "en_US.UTF-8"); + properties.setProperty("timezone", "UTC-8"); + System.out.println("get connection starting..."); +// ANCHOR: query_data + String sql = "SELECT ts, current, location FROM power.meters limit 100"; + try (Connection connection = DriverManager.getConnection(jdbcUrl, properties); + Statement stmt = connection.createStatement(); + // query data, make sure the database and table are created before + ResultSet resultSet = stmt.executeQuery(sql)) { + + Timestamp ts; + float current; + String location; + while (resultSet.next()) { + ts = resultSet.getTimestamp(1); + current = resultSet.getFloat(2); + // we recommend using the column name to get the value + location = resultSet.getString("location"); + + // you can check data here + System.out.printf("ts: %s, current: %f, location: %s %n", ts, current, location); + } + } catch (Exception ex) { + // please refer to the JDBC specifications for detailed exceptions info + System.out.printf("Failed to query data from power.meters, sql: %s, %sErrMessage: %s%n", + sql, + ex instanceof SQLException ? "ErrCode: " + ((SQLException) ex).getErrorCode() + ", " : "", + ex.getMessage()); + // Print stack trace for context in examples. Use logging in production. + ex.printStackTrace(); + throw ex; + } +// ANCHOR_END: query_data + } +} diff --git a/docs/examples/java/src/main/java/com/taos/example/JdbcReqIdDemo.java b/docs/examples/java/src/main/java/com/taos/example/JdbcReqIdDemo.java new file mode 100644 index 0000000000..ee0ff45962 --- /dev/null +++ b/docs/examples/java/src/main/java/com/taos/example/JdbcReqIdDemo.java @@ -0,0 +1,64 @@ +package com.taos.example; + +import com.taosdata.jdbc.AbstractStatement; + +import java.sql.*; +import java.util.Properties; + +public class JdbcReqIdDemo { + private static final String host = "localhost"; + private static final String dbName = "test"; + private static final String tbName = "weather"; + private static final String user = "root"; + private static final String password = "taosdata"; + + + public static void main(String[] args) throws SQLException { + + final String jdbcUrl = "jdbc:TAOS://" + host + ":6030/?user=" + user + "&password=" + password; + +// get connection + Properties properties = new Properties(); + properties.setProperty("charset", "UTF-8"); + properties.setProperty("locale", "en_US.UTF-8"); + properties.setProperty("timezone", "UTC-8"); + System.out.println("get connection starting..."); + +// ANCHOR: with_reqid + long reqId = 3L; + try (Connection connection = DriverManager.getConnection(jdbcUrl, properties); + // Create a statement that allows specifying a request ID + AbstractStatement aStmt = (AbstractStatement) connection.createStatement()) { + + try (ResultSet resultSet = aStmt.executeQuery("SELECT ts, current, location FROM power.meters limit 1", reqId)) { + Timestamp ts; + float current; + String location; + while (resultSet.next()) { + ts = resultSet.getTimestamp(1); + current = resultSet.getFloat(2); + // we recommend using the column name to get the value + location = resultSet.getString("location"); + + // you can check data here + System.out.printf("ts: %s, current: %f, location: %s %n", ts, current, location); + + } + } + } catch (Exception ex) { + // please refer to the JDBC specifications for detailed exceptions info + System.out.printf("Failed to execute sql with reqId: %s, %sErrMessage: %s%n", reqId, + ex instanceof SQLException ? "ErrCode: " + ((SQLException) ex).getErrorCode() + ", " : "", + ex.getMessage()); + // Print stack trace for context in examples. Use logging in production. + ex.printStackTrace(); + throw ex; + } +// ANCHOR_END: with_reqid + } + + private static void printResult(ResultSet resultSet) throws SQLException { + Util.printResult(resultSet); + } + +} diff --git a/docs/examples/java/src/main/java/com/taos/example/ParameterBindingBasicDemo.java b/docs/examples/java/src/main/java/com/taos/example/ParameterBindingBasicDemo.java new file mode 100644 index 0000000000..8a8019e54e --- /dev/null +++ b/docs/examples/java/src/main/java/com/taos/example/ParameterBindingBasicDemo.java @@ -0,0 +1,90 @@ +package com.taos.example; + +import com.taosdata.jdbc.TSDBPreparedStatement; + +import java.sql.Connection; +import java.sql.DriverManager; +import java.sql.SQLException; +import java.sql.Statement; +import java.util.ArrayList; +import java.util.Random; + +// ANCHOR: para_bind +public class ParameterBindingBasicDemo { + + // modify host to your own + private static final String host = "127.0.0.1"; + private static final Random random = new Random(System.currentTimeMillis()); + private static final int numOfSubTable = 10, numOfRow = 10; + + public static void main(String[] args) throws SQLException { + + String jdbcUrl = "jdbc:TAOS://" + host + ":6030/"; + try (Connection conn = DriverManager.getConnection(jdbcUrl, "root", "taosdata")) { + + init(conn); + + String sql = "INSERT INTO ? USING power.meters TAGS(?,?) VALUES (?,?,?,?)"; + + try (TSDBPreparedStatement pstmt = conn.prepareStatement(sql).unwrap(TSDBPreparedStatement.class)) { + + for (int i = 1; i <= numOfSubTable; i++) { + // set table name + pstmt.setTableName("d_bind_" + i); + + // set tags + pstmt.setTagInt(0, i); + pstmt.setTagString(1, "location_" + i); + + // set column ts + ArrayList tsList = new ArrayList<>(); + long current = System.currentTimeMillis(); + for (int j = 0; j < numOfRow; j++) + tsList.add(current + j); + pstmt.setTimestamp(0, tsList); + + // set column current + ArrayList currentList = new ArrayList<>(); + for (int j = 0; j < numOfRow; j++) + currentList.add(random.nextFloat() * 30); + pstmt.setFloat(1, currentList); + + // set column voltage + ArrayList voltageList = new ArrayList<>(); + for (int j = 0; j < numOfRow; j++) + voltageList.add(random.nextInt(300)); + pstmt.setInt(2, voltageList); + + // set column phase + ArrayList phaseList = new ArrayList<>(); + for (int j = 0; j < numOfRow; j++) + phaseList.add(random.nextFloat()); + pstmt.setFloat(3, phaseList); + // add column + pstmt.columnDataAddBatch(); + } + // execute column + pstmt.columnDataExecuteBatch(); + // you can check exeResult here + System.out.println("Successfully inserted " + (numOfSubTable * numOfRow) + " rows to power.meters."); + } + } catch (Exception ex) { + // please refer to the JDBC specifications for detailed exceptions info + System.out.printf("Failed to insert to table meters using stmt, %sErrMessage: %s%n", + ex instanceof SQLException ? "ErrCode: " + ((SQLException) ex).getErrorCode() + ", " : "", + ex.getMessage()); + // Print stack trace for context in examples. Use logging in production. + ex.printStackTrace(); + throw ex; + } + } + + private static void init(Connection conn) throws SQLException { + try (Statement stmt = conn.createStatement()) { + stmt.execute("CREATE DATABASE IF NOT EXISTS power"); + stmt.execute("USE power"); + stmt.execute("CREATE STABLE IF NOT EXISTS meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (groupId INT, location BINARY(24))"); + } + } +} +// ANCHOR_END: para_bind diff --git a/docs/examples/java/src/main/java/com/taos/example/ParameterBindingFullDemo.java b/docs/examples/java/src/main/java/com/taos/example/ParameterBindingFullDemo.java new file mode 100644 index 0000000000..5eb0cf0a61 --- /dev/null +++ b/docs/examples/java/src/main/java/com/taos/example/ParameterBindingFullDemo.java @@ -0,0 +1,325 @@ +package com.taos.example; + +import com.taosdata.jdbc.TSDBPreparedStatement; +import com.taosdata.jdbc.utils.StringUtils; + +import java.sql.Connection; +import java.sql.DriverManager; +import java.sql.SQLException; +import java.sql.Statement; +import java.util.ArrayList; +import java.util.List; +import java.util.Random; + +// ANCHOR: para_bind +public class ParameterBindingFullDemo { + + private static final String host = "127.0.0.1"; + private static final Random random = new Random(System.currentTimeMillis()); + private static final int BINARY_COLUMN_SIZE = 50; + private static final String[] schemaList = { + "create table stable1(ts timestamp, f1 tinyint, f2 smallint, f3 int, f4 bigint) tags(t1 tinyint, t2 smallint, t3 int, t4 bigint)", + "create table stable2(ts timestamp, f1 float, f2 double) tags(t1 float, t2 double)", + "create table stable3(ts timestamp, f1 bool) tags(t1 bool)", + "create table stable4(ts timestamp, f1 binary(" + BINARY_COLUMN_SIZE + ")) tags(t1 binary(" + BINARY_COLUMN_SIZE + "))", + "create table stable5(ts timestamp, f1 nchar(" + BINARY_COLUMN_SIZE + ")) tags(t1 nchar(" + BINARY_COLUMN_SIZE + "))", + "create table stable6(ts timestamp, f1 varbinary(" + BINARY_COLUMN_SIZE + ")) tags(t1 varbinary(" + BINARY_COLUMN_SIZE + "))", + "create table stable7(ts timestamp, f1 geometry(" + BINARY_COLUMN_SIZE + ")) tags(t1 geometry(" + BINARY_COLUMN_SIZE + "))", + }; + private static final int numOfSubTable = 10, numOfRow = 10; + + public static void main(String[] args) throws SQLException { + + String jdbcUrl = "jdbc:TAOS://" + host + ":6030/"; + try (Connection conn = DriverManager.getConnection(jdbcUrl, "root", "taosdata")) { + + init(conn); + + bindInteger(conn); + bindFloat(conn); + bindBoolean(conn); + bindBytes(conn); + bindString(conn); + bindVarbinary(conn); + bindGeometry(conn); + + clean(conn); + } catch (SQLException ex) { + // handle any errors, please refer to the JDBC specifications for detailed exceptions info + System.out.println("Failed to insert to table meters using stmt, url: " + jdbcUrl + "; ErrCode:" + ex.getErrorCode() + "; ErrMessage: " + ex.getMessage()); + throw ex; + } catch (Exception ex){ + System.out.println("Failed to insert to table meters using stmt, url: " + jdbcUrl + "; ErrMessage: " + ex.getMessage()); + throw ex; + } + } + + private static void init(Connection conn) throws SQLException { + clean(conn); + try (Statement stmt = conn.createStatement()) { + stmt.execute("create database if not exists test_parabind"); + stmt.execute("use test_parabind"); + for (int i = 0; i < schemaList.length; i++) { + stmt.execute(schemaList[i]); + } + } + } + private static void clean(Connection conn) throws SQLException { + try (Statement stmt = conn.createStatement()) { + stmt.execute("drop database if exists test_parabind"); + } + } + + private static void bindInteger(Connection conn) throws SQLException { + String sql = "insert into ? using stable1 tags(?,?,?,?) values(?,?,?,?,?)"; + + try (TSDBPreparedStatement pstmt = conn.prepareStatement(sql).unwrap(TSDBPreparedStatement.class)) { + + for (int i = 1; i <= numOfSubTable; i++) { + // set table name + pstmt.setTableName("t1_" + i); + // set tags + pstmt.setTagByte(0, Byte.parseByte(Integer.toString(random.nextInt(Byte.MAX_VALUE)))); + pstmt.setTagShort(1, Short.parseShort(Integer.toString(random.nextInt(Short.MAX_VALUE)))); + pstmt.setTagInt(2, random.nextInt(Integer.MAX_VALUE)); + pstmt.setTagLong(3, random.nextLong()); + // set columns + ArrayList tsList = new ArrayList<>(); + long current = System.currentTimeMillis(); + for (int j = 0; j < numOfRow; j++) + tsList.add(current + j); + pstmt.setTimestamp(0, tsList); + + ArrayList f1List = new ArrayList<>(); + for (int j = 0; j < numOfRow; j++) + f1List.add(Byte.parseByte(Integer.toString(random.nextInt(Byte.MAX_VALUE)))); + pstmt.setByte(1, f1List); + + ArrayList f2List = new ArrayList<>(); + for (int j = 0; j < numOfRow; j++) + f2List.add(Short.parseShort(Integer.toString(random.nextInt(Short.MAX_VALUE)))); + pstmt.setShort(2, f2List); + + ArrayList f3List = new ArrayList<>(); + for (int j = 0; j < numOfRow; j++) + f3List.add(random.nextInt(Integer.MAX_VALUE)); + pstmt.setInt(3, f3List); + + ArrayList f4List = new ArrayList<>(); + for (int j = 0; j < numOfRow; j++) + f4List.add(random.nextLong()); + pstmt.setLong(4, f4List); + + // add column + pstmt.columnDataAddBatch(); + } + // execute column + pstmt.columnDataExecuteBatch(); + } + } + + private static void bindFloat(Connection conn) throws SQLException { + String sql = "insert into ? using stable2 tags(?,?) values(?,?,?)"; + + TSDBPreparedStatement pstmt = conn.prepareStatement(sql).unwrap(TSDBPreparedStatement.class); + + for (int i = 1; i <= numOfSubTable; i++) { + // set table name + pstmt.setTableName("t2_" + i); + // set tags + pstmt.setTagFloat(0, random.nextFloat()); + pstmt.setTagDouble(1, random.nextDouble()); + // set columns + ArrayList tsList = new ArrayList<>(); + long current = System.currentTimeMillis(); + for (int j = 0; j < numOfRow; j++) + tsList.add(current + j); + pstmt.setTimestamp(0, tsList); + + ArrayList f1List = new ArrayList<>(); + for (int j = 0; j < numOfRow; j++) + f1List.add(random.nextFloat()); + pstmt.setFloat(1, f1List); + + ArrayList f2List = new ArrayList<>(); + for (int j = 0; j < numOfRow; j++) + f2List.add(random.nextDouble()); + pstmt.setDouble(2, f2List); + + // add column + pstmt.columnDataAddBatch(); + } + // execute + pstmt.columnDataExecuteBatch(); + // close if no try-with-catch statement is used + pstmt.close(); + } + + private static void bindBoolean(Connection conn) throws SQLException { + String sql = "insert into ? using stable3 tags(?) values(?,?)"; + + try (TSDBPreparedStatement pstmt = conn.prepareStatement(sql).unwrap(TSDBPreparedStatement.class)) { + for (int i = 1; i <= numOfSubTable; i++) { + // set table name + pstmt.setTableName("t3_" + i); + // set tags + pstmt.setTagBoolean(0, random.nextBoolean()); + // set columns + ArrayList tsList = new ArrayList<>(); + long current = System.currentTimeMillis(); + for (int j = 0; j < numOfRow; j++) + tsList.add(current + j); + pstmt.setTimestamp(0, tsList); + + ArrayList f1List = new ArrayList<>(); + for (int j = 0; j < numOfRow; j++) + f1List.add(random.nextBoolean()); + pstmt.setBoolean(1, f1List); + + // add column + pstmt.columnDataAddBatch(); + } + // execute + pstmt.columnDataExecuteBatch(); + } + } + + private static void bindBytes(Connection conn) throws SQLException { + String sql = "insert into ? using stable4 tags(?) values(?,?)"; + + try (TSDBPreparedStatement pstmt = conn.prepareStatement(sql).unwrap(TSDBPreparedStatement.class)) { + + for (int i = 1; i <= numOfSubTable; i++) { + // set table name + pstmt.setTableName("t4_" + i); + // set tags + pstmt.setTagString(0, new String("abc")); + + // set columns + ArrayList tsList = new ArrayList<>(); + long current = System.currentTimeMillis(); + for (int j = 0; j < numOfRow; j++) + tsList.add(current + j); + pstmt.setTimestamp(0, tsList); + + ArrayList f1List = new ArrayList<>(); + for (int j = 0; j < numOfRow; j++) { + f1List.add(new String("abc")); + } + pstmt.setString(1, f1List, BINARY_COLUMN_SIZE); + + // add column + pstmt.columnDataAddBatch(); + } + // execute + pstmt.columnDataExecuteBatch(); + } + } + + private static void bindString(Connection conn) throws SQLException { + String sql = "insert into ? using stable5 tags(?) values(?,?)"; + + try (TSDBPreparedStatement pstmt = conn.prepareStatement(sql).unwrap(TSDBPreparedStatement.class)) { + + for (int i = 1; i <= numOfSubTable; i++) { + // set table name + pstmt.setTableName("t5_" + i); + // set tags + pstmt.setTagNString(0, "California.SanFrancisco"); + + // set columns + ArrayList tsList = new ArrayList<>(); + long current = System.currentTimeMillis(); + for (int j = 0; j < numOfRow; j++) + tsList.add(current + j); + pstmt.setTimestamp(0, tsList); + + ArrayList f1List = new ArrayList<>(); + for (int j = 0; j < numOfRow; j++) { + f1List.add("California.LosAngeles"); + } + pstmt.setNString(1, f1List, BINARY_COLUMN_SIZE); + + // add column + pstmt.columnDataAddBatch(); + } + // execute + pstmt.columnDataExecuteBatch(); + } + } + + private static void bindVarbinary(Connection conn) throws SQLException { + String sql = "insert into ? using stable6 tags(?) values(?,?)"; + + try (TSDBPreparedStatement pstmt = conn.prepareStatement(sql).unwrap(TSDBPreparedStatement.class)) { + + for (int i = 1; i <= numOfSubTable; i++) { + // set table name + pstmt.setTableName("t6_" + i); + // set tags + byte[] bTag = new byte[]{0,2,3,4,5}; + bTag[0] = (byte) i; + pstmt.setTagVarbinary(0, bTag); + + // set columns + ArrayList tsList = new ArrayList<>(); + long current = System.currentTimeMillis(); + for (int j = 0; j < numOfRow; j++) + tsList.add(current + j); + pstmt.setTimestamp(0, tsList); + + ArrayList f1List = new ArrayList<>(); + for (int j = 0; j < numOfRow; j++) { + byte[] v = new byte[]{0,2,3,4,5,6}; + v[0] = (byte)j; + f1List.add(v); + } + pstmt.setVarbinary(1, f1List, BINARY_COLUMN_SIZE); + + // add column + pstmt.columnDataAddBatch(); + } + // execute + pstmt.columnDataExecuteBatch(); + } + } + + private static void bindGeometry(Connection conn) throws SQLException { + String sql = "insert into ? using stable7 tags(?) values(?,?)"; + + try (TSDBPreparedStatement pstmt = conn.prepareStatement(sql).unwrap(TSDBPreparedStatement.class)) { + + byte[] g1 = StringUtils.hexToBytes("0101000000000000000000F03F0000000000000040"); + byte[] g2 = StringUtils.hexToBytes("0102000020E610000002000000000000000000F03F000000000000004000000000000008400000000000001040"); + List listGeo = new ArrayList<>(); + listGeo.add(g1); + listGeo.add(g2); + + for (int i = 1; i <= 2; i++) { + // set table name + pstmt.setTableName("t7_" + i); + // set tags + pstmt.setTagGeometry(0, listGeo.get(i - 1)); + + // set columns + ArrayList tsList = new ArrayList<>(); + long current = System.currentTimeMillis(); + for (int j = 0; j < numOfRow; j++) + tsList.add(current + j); + pstmt.setTimestamp(0, tsList); + + ArrayList f1List = new ArrayList<>(); + for (int j = 0; j < numOfRow; j++) { + f1List.add(listGeo.get(i - 1)); + } + pstmt.setGeometry(1, f1List, BINARY_COLUMN_SIZE); + + // add column + pstmt.columnDataAddBatch(); + } + // execute + pstmt.columnDataExecuteBatch(); + } + } +} +// ANCHOR_END: para_bind diff --git a/docs/examples/java/src/main/java/com/taos/example/RESTConnectExample.java b/docs/examples/java/src/main/java/com/taos/example/RESTConnectExample.java index b1ec31ee86..441d478b07 100644 --- a/docs/examples/java/src/main/java/com/taos/example/RESTConnectExample.java +++ b/docs/examples/java/src/main/java/com/taos/example/RESTConnectExample.java @@ -5,22 +5,24 @@ import java.sql.DriverManager; import java.sql.SQLException; public class RESTConnectExample { -// ANCHOR: main -public static void main(String[] args) throws SQLException { - String jdbcUrl = "jdbc:TAOS-RS://localhost:6041?user=root&password=taosdata"; - try (Connection conn = DriverManager.getConnection(jdbcUrl)){ - System.out.println("Connected to " + jdbcUrl + " successfully."); + // ANCHOR: main + public static void main(String[] args) throws Exception { + String jdbcUrl = "jdbc:TAOS-RS://localhost:6041?user=root&password=taosdata"; + try (Connection conn = DriverManager.getConnection(jdbcUrl)) { + System.out.println("Connected to " + jdbcUrl + " successfully."); - // you can use the connection for execute SQL here + // you can use the connection for execute SQL here - } catch (SQLException ex) { - // handle any errors, please refer to the JDBC specifications for detailed exceptions info - System.out.println("Failed to connect to " + jdbcUrl + "; ErrCode:" + ex.getErrorCode() + "; ErrMessage: " + ex.getMessage()); - throw ex; - } catch (Exception ex){ - System.out.println("Failed to connect to " + jdbcUrl + "; ErrMessage: " + ex.getMessage()); - throw ex; + } catch (Exception ex) { + // please refer to the JDBC specifications for detailed exceptions info + System.out.printf("Failed to connect to %s, %sErrMessage: %s%n", + jdbcUrl, + ex instanceof SQLException ? "ErrCode: " + ((SQLException) ex).getErrorCode() + ", " : "", + ex.getMessage()); + // Print stack trace for context in examples. Use logging in production. + ex.printStackTrace(); + throw ex; + } } + // ANCHOR_END: main } -// ANCHOR_END: main -} \ No newline at end of file diff --git a/docs/examples/java/src/main/java/com/taos/example/SchemalessJniTest.java b/docs/examples/java/src/main/java/com/taos/example/SchemalessJniTest.java new file mode 100644 index 0000000000..818dda90d8 --- /dev/null +++ b/docs/examples/java/src/main/java/com/taos/example/SchemalessJniTest.java @@ -0,0 +1,47 @@ +package com.taos.example; + +import com.taosdata.jdbc.AbstractConnection; +import com.taosdata.jdbc.enums.SchemalessProtocolType; +import com.taosdata.jdbc.enums.SchemalessTimestampType; + +import java.sql.Connection; +import java.sql.DriverManager; +import java.sql.SQLException; +import java.sql.Statement; + +// ANCHOR: schemaless +public class SchemalessJniTest { + private static final String host = "127.0.0.1"; + private static final String lineDemo = "meters,groupid=2,location=California.SanFrancisco current=10.3000002f64,voltage=219i32,phase=0.31f64 1626006833639"; + private static final String telnetDemo = "metric_telnet 1707095283260 4 host=host0 interface=eth0"; + private static final String jsonDemo = "{\"metric\": \"metric_json\",\"timestamp\": 1626846400,\"value\": 10.3, \"tags\": {\"groupid\": 2, \"location\": \"California.SanFrancisco\", \"id\": \"d1001\"}}"; + + public static void main(String[] args) throws SQLException { + final String jdbcUrl = "jdbc:TAOS://" + host + ":6030/?user=root&password=taosdata"; + try (Connection connection = DriverManager.getConnection(jdbcUrl)) { + init(connection); + AbstractConnection conn = connection.unwrap(AbstractConnection.class); + + conn.write(lineDemo, SchemalessProtocolType.LINE, SchemalessTimestampType.MILLI_SECONDS); + conn.write(telnetDemo, SchemalessProtocolType.TELNET, SchemalessTimestampType.MILLI_SECONDS); + conn.write(jsonDemo, SchemalessProtocolType.JSON, SchemalessTimestampType.NOT_CONFIGURED); + System.out.println("Inserted data with schemaless successfully."); + } catch (Exception ex) { + // please refer to the JDBC specifications for detailed exceptions info + System.out.printf("Failed to insert data with schemaless, %sErrMessage: %s%n", + ex instanceof SQLException ? "ErrCode: " + ((SQLException) ex).getErrorCode() + ", " : "", + ex.getMessage()); + // Print stack trace for context in examples. Use logging in production. + ex.printStackTrace(); + throw ex; + } + } + + private static void init(Connection connection) throws SQLException { + try (Statement stmt = connection.createStatement()) { + stmt.execute("CREATE DATABASE IF NOT EXISTS power"); + stmt.execute("USE power"); + } + } +} +// ANCHOR_END: schemaless diff --git a/docs/examples/java/src/main/java/com/taos/example/SchemalessWsTest.java b/docs/examples/java/src/main/java/com/taos/example/SchemalessWsTest.java new file mode 100644 index 0000000000..08f66c2227 --- /dev/null +++ b/docs/examples/java/src/main/java/com/taos/example/SchemalessWsTest.java @@ -0,0 +1,47 @@ +package com.taos.example; + +import com.taosdata.jdbc.AbstractConnection; +import com.taosdata.jdbc.enums.SchemalessProtocolType; +import com.taosdata.jdbc.enums.SchemalessTimestampType; + +import java.sql.Connection; +import java.sql.DriverManager; +import java.sql.SQLException; +import java.sql.Statement; + +// ANCHOR: schemaless +public class SchemalessWsTest { + private static final String host = "127.0.0.1"; + private static final String lineDemo = "meters,groupid=2,location=California.SanFrancisco current=10.3000002f64,voltage=219i32,phase=0.31f64 1626006833639"; + private static final String telnetDemo = "metric_telnet 1707095283260 4 host=host0 interface=eth0"; + private static final String jsonDemo = "{\"metric\": \"metric_json\",\"timestamp\": 1626846400,\"value\": 10.3, \"tags\": {\"groupid\": 2, \"location\": \"California.SanFrancisco\", \"id\": \"d1001\"}}"; + + public static void main(String[] args) throws SQLException { + final String url = "jdbc:TAOS-RS://" + host + ":6041?user=root&password=taosdata&batchfetch=true"; + try(Connection connection = DriverManager.getConnection(url)){ + init(connection); + AbstractConnection conn = connection.unwrap(AbstractConnection.class); + + conn.write(lineDemo, SchemalessProtocolType.LINE, SchemalessTimestampType.MILLI_SECONDS); + conn.write(telnetDemo, SchemalessProtocolType.TELNET, SchemalessTimestampType.MILLI_SECONDS); + conn.write(jsonDemo, SchemalessProtocolType.JSON, SchemalessTimestampType.SECONDS); + System.out.println("Inserted data with schemaless successfully."); + } catch (Exception ex) { + // please refer to the JDBC specifications for detailed exceptions info + System.out.printf("Failed to insert data with schemaless, %sErrMessage: %s%n", + ex instanceof SQLException ? "ErrCode: " + ((SQLException) ex).getErrorCode() + ", " : "", + ex.getMessage()); + // Print stack trace for context in examples. Use logging in production. + ex.printStackTrace(); + throw ex; + } + } + + private static void init(Connection connection) throws SQLException { + try (Statement stmt = connection.createStatement()) { + stmt.execute("CREATE DATABASE IF NOT EXISTS power"); + stmt.execute("USE power"); + } + } +} +// ANCHOR_END: schemaless diff --git a/docs/examples/java/src/main/java/com/taos/example/Util.java b/docs/examples/java/src/main/java/com/taos/example/Util.java new file mode 100644 index 0000000000..79a1d8628b --- /dev/null +++ b/docs/examples/java/src/main/java/com/taos/example/Util.java @@ -0,0 +1,25 @@ +package com.taos.example; + +import java.sql.ResultSet; +import java.sql.ResultSetMetaData; +import java.sql.SQLException; + +public class Util { + public static void printResult(ResultSet resultSet) throws SQLException { + ResultSetMetaData metaData = resultSet.getMetaData(); + for (int i = 1; i <= metaData.getColumnCount(); i++) { + String columnLabel = metaData.getColumnLabel(i); + System.out.printf(" %s |", columnLabel); + } + System.out.println(); + System.out.println("-------------------------------------------------------------"); + while (resultSet.next()) { + for (int i = 1; i <= metaData.getColumnCount(); i++) { + String value = resultSet.getString(i); + System.out.printf("%s, ", value); + } + System.out.println(); + } + } + +} diff --git a/docs/examples/java/src/main/java/com/taos/example/WSConnectExample.java b/docs/examples/java/src/main/java/com/taos/example/WSConnectExample.java index 21f184b45a..afe74ace83 100644 --- a/docs/examples/java/src/main/java/com/taos/example/WSConnectExample.java +++ b/docs/examples/java/src/main/java/com/taos/example/WSConnectExample.java @@ -9,7 +9,7 @@ import java.util.Properties; public class WSConnectExample { // ANCHOR: main - public static void main(String[] args) throws SQLException { + public static void main(String[] args) throws Exception { // use // String jdbcUrl = // "jdbc:TAOS-RS://localhost:6041/dbName?user=root&password=taosdata&batchfetch=true"; @@ -25,14 +25,14 @@ public class WSConnectExample { // you can use the connection for execute SQL here - } catch (SQLException ex) { - // handle any errors, please refer to the JDBC specifications for detailed - // exceptions info - System.out.println("Failed to connect to " + jdbcUrl + "; ErrCode:" + ex.getErrorCode() + "; ErrMessage: " - + ex.getMessage()); - throw ex; } catch (Exception ex) { - System.out.println("Failed to connect to " + jdbcUrl + "; ErrMessage: " + ex.getMessage()); + // please refer to the JDBC specifications for detailed exceptions info + System.out.printf("Failed to connect to %s, %sErrMessage: %s%n", + jdbcUrl, + ex instanceof SQLException ? "ErrCode: " + ((SQLException) ex).getErrorCode() + ", " : "", + ex.getMessage()); + // Print stack trace for context in examples. Use logging in production. + ex.printStackTrace(); throw ex; } } diff --git a/docs/examples/java/src/main/java/com/taos/example/WSParameterBindingBasicDemo.java b/docs/examples/java/src/main/java/com/taos/example/WSParameterBindingBasicDemo.java new file mode 100644 index 0000000000..eab8df06b9 --- /dev/null +++ b/docs/examples/java/src/main/java/com/taos/example/WSParameterBindingBasicDemo.java @@ -0,0 +1,67 @@ +package com.taos.example; + +import com.taosdata.jdbc.ws.TSWSPreparedStatement; + +import java.sql.*; +import java.util.Random; + +// ANCHOR: para_bind +public class WSParameterBindingBasicDemo { + + // modify host to your own + private static final String host = "127.0.0.1"; + private static final Random random = new Random(System.currentTimeMillis()); + private static final int numOfSubTable = 10, numOfRow = 10; + + public static void main(String[] args) throws SQLException { + + String jdbcUrl = "jdbc:TAOS-RS://" + host + ":6041/?batchfetch=true"; + try (Connection conn = DriverManager.getConnection(jdbcUrl, "root", "taosdata")) { + init(conn); + + String sql = "INSERT INTO ? USING power.meters TAGS(?,?) VALUES (?,?,?,?)"; + + try (TSWSPreparedStatement pstmt = conn.prepareStatement(sql).unwrap(TSWSPreparedStatement.class)) { + + for (int i = 1; i <= numOfSubTable; i++) { + // set table name + pstmt.setTableName("d_bind_" + i); + + // set tags + pstmt.setTagInt(0, i); + pstmt.setTagString(1, "location_" + i); + + // set columns + long current = System.currentTimeMillis(); + for (int j = 0; j < numOfRow; j++) { + pstmt.setTimestamp(1, new Timestamp(current + j)); + pstmt.setFloat(2, random.nextFloat() * 30); + pstmt.setInt(3, random.nextInt(300)); + pstmt.setFloat(4, random.nextFloat()); + pstmt.addBatch(); + } + int [] exeResult = pstmt.executeBatch(); + // you can check exeResult here + System.out.println("Successfully inserted " + exeResult.length + " rows to power.meters."); + } + } + } catch (Exception ex) { + // please refer to the JDBC specifications for detailed exceptions info + System.out.printf("Failed to insert to table meters using stmt, %sErrMessage: %s%n", + ex instanceof SQLException ? "ErrCode: " + ((SQLException) ex).getErrorCode() + ", " : "", + ex.getMessage()); + // Print stack trace for context in examples. Use logging in production. + ex.printStackTrace(); + throw ex; + } + } + + private static void init(Connection conn) throws SQLException { + try (Statement stmt = conn.createStatement()) { + stmt.execute("CREATE DATABASE IF NOT EXISTS power"); + stmt.execute("USE power"); + stmt.execute("CREATE STABLE IF NOT EXISTS power.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (groupId INT, location BINARY(24))"); + } + } +} +// ANCHOR_END: para_bind diff --git a/docs/examples/java/src/main/java/com/taos/example/WSParameterBindingFullDemo.java b/docs/examples/java/src/main/java/com/taos/example/WSParameterBindingFullDemo.java new file mode 100644 index 0000000000..ec94f2ded6 --- /dev/null +++ b/docs/examples/java/src/main/java/com/taos/example/WSParameterBindingFullDemo.java @@ -0,0 +1,180 @@ +package com.taos.example; + +import com.taosdata.jdbc.ws.TSWSPreparedStatement; + +import java.sql.*; +import java.util.Random; + +// ANCHOR: para_bind +public class WSParameterBindingFullDemo { + private static final String host = "127.0.0.1"; + private static final Random random = new Random(System.currentTimeMillis()); + private static final int BINARY_COLUMN_SIZE = 30; + private static final String[] schemaList = { + "create table stable1(ts timestamp, f1 tinyint, f2 smallint, f3 int, f4 bigint) tags(t1 tinyint, t2 smallint, t3 int, t4 bigint)", + "create table stable2(ts timestamp, f1 float, f2 double) tags(t1 float, t2 double)", + "create table stable3(ts timestamp, f1 bool) tags(t1 bool)", + "create table stable4(ts timestamp, f1 binary(" + BINARY_COLUMN_SIZE + ")) tags(t1 binary(" + BINARY_COLUMN_SIZE + "))", + "create table stable5(ts timestamp, f1 nchar(" + BINARY_COLUMN_SIZE + ")) tags(t1 nchar(" + BINARY_COLUMN_SIZE + "))" + }; + private static final int numOfSubTable = 10, numOfRow = 10; + + public static void main(String[] args) throws SQLException { + + String jdbcUrl = "jdbc:TAOS-RS://" + host + ":6041/?batchfetch=true"; + + try (Connection conn = DriverManager.getConnection(jdbcUrl, "root", "taosdata")) { + + init(conn); + + bindInteger(conn); + + bindFloat(conn); + + bindBoolean(conn); + + bindBytes(conn); + + bindString(conn); + + } catch (SQLException ex) { + // handle any errors, please refer to the JDBC specifications for detailed exceptions info + System.out.println("Failed to insert to table meters using stmt, url: " + jdbcUrl + "; ErrCode:" + ex.getErrorCode() + "; ErrMessage: " + ex.getMessage()); + throw ex; + } catch (Exception ex){ + System.out.println("Failed to insert to table meters using stmt, url: " + jdbcUrl + "; ErrMessage: " + ex.getMessage()); + throw ex; + } + } + + private static void init(Connection conn) throws SQLException { + try (Statement stmt = conn.createStatement()) { + stmt.execute("drop database if exists test_ws_parabind"); + stmt.execute("create database if not exists test_ws_parabind"); + stmt.execute("use test_ws_parabind"); + for (int i = 0; i < schemaList.length; i++) { + stmt.execute(schemaList[i]); + } + } + } + + private static void bindInteger(Connection conn) throws SQLException { + String sql = "insert into ? using stable1 tags(?,?,?,?) values(?,?,?,?,?)"; + + try (TSWSPreparedStatement pstmt = conn.prepareStatement(sql).unwrap(TSWSPreparedStatement.class)) { + + for (int i = 1; i <= numOfSubTable; i++) { + // set table name + pstmt.setTableName("t1_" + i); + // set tags + pstmt.setTagByte(1, Byte.parseByte(Integer.toString(random.nextInt(Byte.MAX_VALUE)))); + pstmt.setTagShort(2, Short.parseShort(Integer.toString(random.nextInt(Short.MAX_VALUE)))); + pstmt.setTagInt(3, random.nextInt(Integer.MAX_VALUE)); + pstmt.setTagLong(4, random.nextLong()); + // set columns + long current = System.currentTimeMillis(); + for (int j = 0; j < numOfRow; j++) { + pstmt.setTimestamp(1, new Timestamp(current + j)); + pstmt.setByte(2, Byte.parseByte(Integer.toString(random.nextInt(Byte.MAX_VALUE)))); + pstmt.setShort(3, Short.parseShort(Integer.toString(random.nextInt(Short.MAX_VALUE)))); + pstmt.setInt(4, random.nextInt(Integer.MAX_VALUE)); + pstmt.setLong(5, random.nextLong()); + pstmt.addBatch(); + } + pstmt.executeBatch(); + } + } + } + + private static void bindFloat(Connection conn) throws SQLException { + String sql = "insert into ? using stable2 tags(?,?) values(?,?,?)"; + + try(TSWSPreparedStatement pstmt = conn.prepareStatement(sql).unwrap(TSWSPreparedStatement.class)) { + + for (int i = 1; i <= numOfSubTable; i++) { + // set table name + pstmt.setTableName("t2_" + i); + // set tags + pstmt.setTagFloat(1, random.nextFloat()); + pstmt.setTagDouble(2, random.nextDouble()); + // set columns + long current = System.currentTimeMillis(); + for (int j = 0; j < numOfRow; j++) { + pstmt.setTimestamp(1, new Timestamp(current + j)); + pstmt.setFloat(2, random.nextFloat()); + pstmt.setDouble(3, random.nextDouble()); + pstmt.addBatch(); + } + pstmt.executeBatch(); + } + } + } + + private static void bindBoolean(Connection conn) throws SQLException { + String sql = "insert into ? using stable3 tags(?) values(?,?)"; + + try (TSWSPreparedStatement pstmt = conn.prepareStatement(sql).unwrap(TSWSPreparedStatement.class)) { + for (int i = 1; i <= numOfSubTable; i++) { + // set table name + pstmt.setTableName("t3_" + i); + // set tags + pstmt.setTagBoolean(1, random.nextBoolean()); + // set columns + long current = System.currentTimeMillis(); + for (int j = 0; j < numOfRow; j++) { + pstmt.setTimestamp(1, new Timestamp(current + j)); + pstmt.setBoolean(2, random.nextBoolean()); + pstmt.addBatch(); + } + pstmt.executeBatch(); + } + } + } + + private static void bindBytes(Connection conn) throws SQLException { + String sql = "insert into ? using stable4 tags(?) values(?,?)"; + + try (TSWSPreparedStatement pstmt = conn.prepareStatement(sql).unwrap(TSWSPreparedStatement.class)) { + + for (int i = 1; i <= numOfSubTable; i++) { + // set table name + pstmt.setTableName("t4_" + i); + // set tags + pstmt.setTagString(1, new String("abc")); + + // set columns + long current = System.currentTimeMillis(); + for (int j = 0; j < numOfRow; j++) { + pstmt.setTimestamp(1, new Timestamp(current + j)); + pstmt.setString(2, "abc"); + pstmt.addBatch(); + } + pstmt.executeBatch(); + } + } + } + + private static void bindString(Connection conn) throws SQLException { + String sql = "insert into ? using stable5 tags(?) values(?,?)"; + + try (TSWSPreparedStatement pstmt = conn.prepareStatement(sql).unwrap(TSWSPreparedStatement.class)) { + + for (int i = 1; i <= numOfSubTable; i++) { + // set table name + pstmt.setTableName("t5_" + i); + // set tags + pstmt.setTagNString(1, "California.SanFrancisco"); + + // set columns + long current = System.currentTimeMillis(); + for (int j = 0; j < numOfRow; j++) { + pstmt.setTimestamp(0, new Timestamp(current + j)); + pstmt.setNString(1, "California.SanFrancisco"); + pstmt.addBatch(); + } + pstmt.executeBatch(); + } + } + } +} +// ANCHOR_END: para_bind diff --git a/docs/examples/java/src/main/java/com/taos/example/WsConsumerLoopFull.java b/docs/examples/java/src/main/java/com/taos/example/WsConsumerLoopFull.java new file mode 100644 index 0000000000..66c37f172e --- /dev/null +++ b/docs/examples/java/src/main/java/com/taos/example/WsConsumerLoopFull.java @@ -0,0 +1,384 @@ +package com.taos.example; + +import com.alibaba.fastjson.JSON; +import com.taosdata.jdbc.TSDBDriver; +import com.taosdata.jdbc.tmq.*; + +import java.sql.*; +import java.time.Duration; +import java.util.Collections; +import java.util.List; +import java.util.Properties; +import java.util.Set; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.TimeUnit; + +// ANCHOR: consumer_demo +public class WsConsumerLoopFull { + static private Connection connection; + static private Statement statement; + static private volatile boolean stopThread = false; + + public static TaosConsumer getConsumer() throws Exception { +// ANCHOR: create_consumer + Properties config = new Properties(); + config.setProperty("td.connect.type", "ws"); + config.setProperty("bootstrap.servers", "localhost:6041"); + config.setProperty("auto.offset.reset", "latest"); + config.setProperty("msg.with.table.name", "true"); + config.setProperty("enable.auto.commit", "true"); + config.setProperty("auto.commit.interval.ms", "1000"); + config.setProperty("group.id", "group1"); + config.setProperty("client.id", "1"); + config.setProperty("td.connect.user", "root"); + config.setProperty("td.connect.pass", "taosdata"); + config.setProperty("value.deserializer", "com.taos.example.WsConsumerLoopFull$ResultDeserializer"); + config.setProperty("value.deserializer.encoding", "UTF-8"); + + try { + TaosConsumer consumer= new TaosConsumer<>(config); + System.out.printf("Create consumer successfully, host: %s, groupId: %s, clientId: %s%n", + config.getProperty("bootstrap.servers"), + config.getProperty("group.id"), + config.getProperty("client.id")); + return consumer; + } catch (Exception ex) { + // please refer to the JDBC specifications for detailed exceptions info + System.out.printf("Failed to create websocket consumer, host: %s, %sErrMessage: %s%n", + config.getProperty("bootstrap.servers"), + ex instanceof SQLException ? "ErrCode: " + ((SQLException) ex).getErrorCode() + ", " : "", + ex.getMessage()); + // Print stack trace for context in examples. Use logging in production. + ex.printStackTrace(); + throw ex; + } +// ANCHOR_END: create_consumer + } + + public static void pollExample(TaosConsumer consumer) throws SQLException { +// ANCHOR: poll_data_code_piece + try { + List topics = Collections.singletonList("topic_meters"); + + // subscribe to the topics + consumer.subscribe(topics); + System.out.println("Subscribe topics successfully."); + for (int i = 0; i < 50; i++) { + // poll data + ConsumerRecords records = consumer.poll(Duration.ofMillis(100)); + for (ConsumerRecord record : records) { + ResultBean bean = record.value(); + // process the data here + System.out.println("data: " + JSON.toJSONString(bean)); + } + } + } catch (Exception ex) { + // please refer to the JDBC specifications for detailed exceptions info + System.out.printf("Failed to poll data, %sErrMessage: %s%n", + ex instanceof SQLException ? "ErrCode: " + ((SQLException) ex).getErrorCode() + ", " : "", + ex.getMessage()); + // Print stack trace for context in examples. Use logging in production. + ex.printStackTrace(); + throw ex; + } +// ANCHOR_END: poll_data_code_piece + } + + public static void seekExample(TaosConsumer consumer) throws SQLException { +// ANCHOR: consumer_seek + try { + List topics = Collections.singletonList("topic_meters"); + + // subscribe to the topics + consumer.subscribe(topics); + System.out.println("Subscribe topics successfully."); + Set assignment = consumer.assignment(); + System.out.println("Now assignment: " + JSON.toJSONString(assignment)); + + ConsumerRecords records = ConsumerRecords.emptyRecord(); + // make sure we have got some data + while (records.isEmpty()) { + records = consumer.poll(Duration.ofMillis(100)); + } + + consumer.seekToBeginning(assignment); + System.out.println("Assignment seek to beginning successfully."); + } catch (Exception ex) { + // please refer to the JDBC specifications for detailed exceptions info + System.out.printf("Failed to execute seek example, %sErrMessage: %s%n", + ex instanceof SQLException ? "ErrCode: " + ((SQLException) ex).getErrorCode() + ", " : "", + ex.getMessage()); + // Print stack trace for context in examples. Use logging in production. + ex.printStackTrace(); + throw ex; + } +// ANCHOR_END: consumer_seek + } + + + public static void commitExample(TaosConsumer consumer) throws SQLException { +// ANCHOR: commit_code_piece + try { + List topics = Collections.singletonList("topic_meters"); + + consumer.subscribe(topics); + for (int i = 0; i < 50; i++) { + ConsumerRecords records = consumer.poll(Duration.ofMillis(100)); + for (ConsumerRecord record : records) { + ResultBean bean = record.value(); + // process your data here + System.out.println("data: " + JSON.toJSONString(bean)); + } + if (!records.isEmpty()) { + // after processing the data, commit the offset manually + consumer.commitSync(); + System.out.println("Commit offset manually successfully."); + } + } + } catch (Exception ex) { + // please refer to the JDBC specifications for detailed exceptions info + System.out.printf("Failed to execute commit example, %sErrMessage: %s%n", + ex instanceof SQLException ? "ErrCode: " + ((SQLException) ex).getErrorCode() + ", " : "", + ex.getMessage()); + // Print stack trace for context in examples. Use logging in production. + ex.printStackTrace(); + throw ex; + } +// ANCHOR_END: commit_code_piece + } + + public static void unsubscribeExample(TaosConsumer consumer) throws SQLException { + List topics = Collections.singletonList("topic_meters"); + consumer.subscribe(topics); +// ANCHOR: unsubscribe_data_code_piece + try { + // unsubscribe the consumer + consumer.unsubscribe(); + System.out.println("Consumer unsubscribed successfully."); + } catch (Exception ex) { + // please refer to the JDBC specifications for detailed exceptions info + System.out.printf("Failed to unsubscribe consumer, %sErrMessage: %s%n", + ex instanceof SQLException ? "ErrCode: " + ((SQLException) ex).getErrorCode() + ", " : "", + ex.getMessage()); + // Print stack trace for context in examples. Use logging in production. + ex.printStackTrace(); + throw ex; + } + finally { + // close the consumer + consumer.close(); + System.out.println("Consumer closed successfully."); + } +// ANCHOR_END: unsubscribe_data_code_piece + } + + public static class ResultDeserializer extends ReferenceDeserializer { + + } + + // use this class to define the data structure of the result record + public static class ResultBean { + private Timestamp ts; + private double current; + private int voltage; + private double phase; + private int groupid; + private String location; + + public Timestamp getTs() { + return ts; + } + + public void setTs(Timestamp ts) { + this.ts = ts; + } + + public double getCurrent() { + return current; + } + + public void setCurrent(double current) { + this.current = current; + } + + public int getVoltage() { + return voltage; + } + + public void setVoltage(int voltage) { + this.voltage = voltage; + } + + public double getPhase() { + return phase; + } + + public void setPhase(double phase) { + this.phase = phase; + } + + public int getGroupid() { + return groupid; + } + + public void setGroupid(int groupid) { + this.groupid = groupid; + } + + public String getLocation() { + return location; + } + + public void setLocation(String location) { + this.location = location; + } + } + + public static void prepareData() throws SQLException, InterruptedException { + try { + int i = 0; + while (!stopThread) { + String insertQuery = "INSERT INTO power.d1001 USING power.meters TAGS(2,'California.SanFrancisco') VALUES (NOW + " + i + "a, 10.30000, 219, 0.31000) "; + int affectedRows = statement.executeUpdate(insertQuery); + assert affectedRows == 1; + i++; + Thread.sleep(1); + } + } catch (Exception ex) { + // please refer to the JDBC specifications for detailed exceptions info + System.out.printf("Failed to insert data to power.meters, %sErrMessage: %s%n", + ex instanceof SQLException ? "ErrCode: " + ((SQLException) ex).getErrorCode() + ", " : "", + ex.getMessage()); + // Print stack trace for context in examples. Use logging in production. + ex.printStackTrace(); + throw ex; + } + } + + public static void prepareMeta() throws SQLException { + try { + statement.executeUpdate("CREATE DATABASE IF NOT EXISTS power"); + statement.executeUpdate("USE power"); + statement.executeUpdate("CREATE STABLE IF NOT EXISTS meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (groupId INT, location BINARY(24))"); + statement.executeUpdate("CREATE TOPIC IF NOT EXISTS topic_meters AS SELECT ts, current, voltage, phase, groupid, location FROM meters"); + } catch (Exception ex) { + // please refer to the JDBC specifications for detailed exceptions info + System.out.printf("Failed to create db and table, %sErrMessage: %s%n", + ex instanceof SQLException ? "ErrCode: " + ((SQLException) ex).getErrorCode() + ", " : "", + ex.getMessage()); + // Print stack trace for context in examples. Use logging in production. + ex.printStackTrace(); + throw ex; + } + } + + public static void initConnection() throws SQLException { + String url = "jdbc:TAOS://localhost:6030?user=root&password=taosdata"; + Properties properties = new Properties(); + properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "C"); + properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8"); + + try { + connection = DriverManager.getConnection(url, properties); + } catch (SQLException ex) { + System.out.println("Failed to create connection, url:" + url + "; ErrCode:" + ex.getErrorCode() + "; ErrMessage: " + ex.getMessage()); + throw new SQLException("Failed to create connection", ex); + } + try { + statement = connection.createStatement(); + } catch (SQLException ex) { + System.out.println("Failed to create statement, ErrCode:" + ex.getErrorCode() + "; ErrMessage: " + ex.getMessage()); + throw new SQLException("Failed to create statement", ex); + } + System.out.println("Connection created successfully."); + } + + public static void closeConnection() throws SQLException { + try { + if (statement != null) { + statement.close(); + } + } catch (SQLException ex) { + System.out.println("Failed to close statement, ErrCode:" + ex.getErrorCode() + "; ErrMessage: " + ex.getMessage()); + throw new SQLException("Failed to close statement", ex); + } + + try { + if (connection != null) { + connection.close(); + } + } catch (SQLException ex) { + System.out.println("Failed to close connection, ErrCode:" + ex.getErrorCode() + "; ErrMessage: " + ex.getMessage()); + throw new SQLException("Failed to close connection", ex); + } + System.out.println("Connection closed Successfully."); + } + + + public static void main(String[] args) throws SQLException, InterruptedException { + initConnection(); + prepareMeta(); + + // create a single thread executor + ExecutorService executor = Executors.newSingleThreadExecutor(); + + // submit a task + executor.submit(() -> { + try { + prepareData(); + } catch (SQLException ex) { + System.out.println("Failed to prepare data, ErrCode:" + ex.getErrorCode() + ", ErrMessage: " + ex.getMessage()); + return; + } catch (Exception ex) { + System.out.println("Failed to prepare data, ErrMessage: " + ex.getMessage()); + return; + } + System.out.println("pollDataExample executed successfully."); + }); + + try { + TaosConsumer consumer = getConsumer(); + + pollExample(consumer); + System.out.println("pollExample executed successfully."); + consumer.unsubscribe(); + + seekExample(consumer); + System.out.println("seekExample executed successfully."); + consumer.unsubscribe(); + + commitExample(consumer); + System.out.println("commitExample executed successfully."); + consumer.unsubscribe(); + + unsubscribeExample(consumer); + System.out.println("unsubscribeExample executed successfully"); + } catch (SQLException ex) { + System.out.println("Failed to poll data from topic_meters, ErrCode:" + ex.getErrorCode() + "; ErrMessage: " + ex.getMessage()); + return; + } catch (Exception ex) { + System.out.println("Failed to poll data from topic_meters, ErrMessage: " + ex.getMessage()); + return; + } + + stopThread = true; + // close the executor, which will make the executor reject new tasks + executor.shutdown(); + + try { + // wait for the executor to terminate + boolean result = executor.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS); + assert result; + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + } catch (Exception e) { + e.printStackTrace(); + System.out.println("Wait executor termination failed."); + } + + closeConnection(); + System.out.println("program end."); + } +} +// ANCHOR_END: consumer_demo diff --git a/docs/examples/node/websocketexample/line_example.js b/docs/examples/node/websocketexample/line_example.js index 7e76db2677..4fc5042f5a 100644 --- a/docs/examples/node/websocketexample/line_example.js +++ b/docs/examples/node/websocketexample/line_example.js @@ -28,7 +28,7 @@ async function test() { console.log("Inserted data with schemaless successfully.") } catch (err) { - console.error("Failed to insert data with schemaless, url:"+ dsn +", ErrCode:" + err.code + "; ErrMessage: " + err.message); + console.error("Failed to insert data with schemaless, ErrCode: " + err.code + ", ErrMessage: " + err.message); } finally { if (wsRows) { @@ -40,4 +40,4 @@ async function test() { taos.destroy(); } } -test() \ No newline at end of file +test() diff --git a/docs/examples/node/websocketexample/sql_example.js b/docs/examples/node/websocketexample/sql_example.js index e36bd037ed..8ef4dcb831 100644 --- a/docs/examples/node/websocketexample/sql_example.js +++ b/docs/examples/node/websocketexample/sql_example.js @@ -9,11 +9,11 @@ async function createConnect() { conf.setUser('root'); conf.setPwd('taosdata'); conf.setDb('power'); - conn = await taos.sqlConnect(conf); + conn = await taos.sqlConnect(conf); console.log("Connected to " + dsn + " successfully."); - return conn; + return conn; } catch (err) { - console.log("Failed to connect to " + dns + "; ErrCode:" + err.code + "; ErrMessage: " + err.message); + console.log("Failed to connect to " + dns + ", ErrCode: " + err.code + ", ErrMessage: " + err.message); throw err; } @@ -29,13 +29,13 @@ async function createDbAndTable() { await wsSql.exec('CREATE DATABASE IF NOT EXISTS power'); console.log("Create database power successfully."); // create table - await wsSql.exec('CREATE STABLE IF NOT EXISTS power.meters ' + - '(_ts timestamp, current float, voltage int, phase float) ' + - 'TAGS (location binary(64), groupId int);'); + await wsSql.exec('CREATE STABLE IF NOT EXISTS power.meters ' + + '(_ts timestamp, current float, voltage int, phase float) ' + + 'TAGS (location binary(64), groupId int);'); console.log("Create stable power.meters successfully"); } catch (err) { - console.error("Failed to create db and table, url:" + dns + "; ErrCode:" + err.code + "; ErrMessage: " + err.message); + console.error("Failed to create database power or stable meters, ErrCode: " + err.code + ", ErrMessage: " + err.message); } finally { if (wsSql) { await wsSql.close(); @@ -51,18 +51,18 @@ async function insertData() { try { wsSql = await createConnect(); let insertQuery = "INSERT INTO " + - "power.d1001 USING power.meters (location, groupId) TAGS('California.SanFrancisco', 2) " + - "VALUES " + - "(NOW + 1a, 10.30000, 219, 0.31000) " + - "(NOW + 2a, 12.60000, 218, 0.33000) " + - "(NOW + 3a, 12.30000, 221, 0.31000) " + - "power.d1002 USING power.meters TAGS('California.SanFrancisco', 3) " + - "VALUES " + - "(NOW + 1a, 10.30000, 218, 0.25000) "; + "power.d1001 USING power.meters (location, groupId) TAGS('California.SanFrancisco', 2) " + + "VALUES " + + "(NOW + 1a, 10.30000, 219, 0.31000) " + + "(NOW + 2a, 12.60000, 218, 0.33000) " + + "(NOW + 3a, 12.30000, 221, 0.31000) " + + "power.d1002 USING power.meters TAGS('California.SanFrancisco', 3) " + + "VALUES " + + "(NOW + 1a, 10.30000, 218, 0.25000) "; taosResult = await wsSql.exec(insertQuery); console.log("Successfully inserted " + taosResult.getAffectRows() + " rows to power.meters."); } catch (err) { - console.error("Failed to insert data to power.meters, url:" + dsn + "; ErrCode:" + err.code + "; ErrMessage: " + err.message); + console.error("Failed to insert data to power.meters, ErrCode: " + err.code + ", ErrMessage: " + err.message); } finally { if (wsSql) { await wsSql.close(); @@ -75,21 +75,22 @@ async function insertData() { async function queryData() { let wsRows = null; let wsSql = null; + let sql = 'SELECT ts, current, location FROM power.meters limit 100'; try { wsSql = await createConnect(); - wsRows = await wsSql.query('SELECT ts, current, location FROM power.meters limit 100'); + wsRows = await wsSql.query(sql); while (await wsRows.next()) { let row = wsRows.getData(); - console.log('ts: ' + row[0] + ', current: ' + row[1] + ', location: ' + row[2]); + console.log('ts: ' + row[0] + ', current: ' + row[1] + ', location: ' + row[2]); } } catch (err) { - console.error("Failed to query data from power.meters, url:" + dsn + " ; ErrCode:" + err.code + "; ErrMessage: " + err.message); + console.error("Failed to query data from power.meters, sql: " + sql + ", ErrCode:" + err.code + ", ErrMessage: " + err.message); } finally { if (wsRows) { await wsRows.close(); - } + } if (wsSql) { await wsSql.close(); } @@ -107,16 +108,16 @@ async function sqlWithReqid() { wsRows = await wsSql.query('SELECT ts, current, location FROM power.meters limit 100', reqId); while (await wsRows.next()) { let row = wsRows.getData(); - console.log('ts: ' + row[0] + ', current: ' + row[1] + ', location: ' + row[2]); + console.log('ts: ' + row[0] + ', current: ' + row[1] + ', location: ' + row[2]); } } catch (err) { - console.error("Failed to execute sql with reqId: " + reqId + ", ErrCode:" + err.code + "; ErrMessage: " + err.message); + console.error("Failed to execute sql with reqId: " + reqId + ", ErrCode: " + err.code + ", ErrMessage: " + err.message); } finally { if (wsRows) { await wsRows.close(); - } + } if (wsSql) { await wsSql.close(); } @@ -129,7 +130,7 @@ async function test() { await insertData(); await queryData(); await sqlWithReqid(); - taos.destroy(); + taos.destroy(); } -test() \ No newline at end of file +test() diff --git a/docs/examples/node/websocketexample/stmt_example.js b/docs/examples/node/websocketexample/stmt_example.js index d159de6ef8..6ca4959c48 100644 --- a/docs/examples/node/websocketexample/stmt_example.js +++ b/docs/examples/node/websocketexample/stmt_example.js @@ -3,7 +3,7 @@ const taos = require("@tdengine/websocket"); let db = 'power'; let stable = 'meters'; let numOfSubTable = 10; -let numOfRow = 10; +let numOfRow = 10; let dsn = 'ws://localhost:6041' function getRandomInt(min, max) { min = Math.ceil(min); @@ -59,7 +59,7 @@ async function prepare() { } } catch (err) { - console.error("Failed to insert to table meters using stmt, url:" + dsn + "ErrCode:" + err.code + "; ErrMessage: " + err.message); + console.error("Failed to insert to table meters using stmt, ErrCode: " + err.code + ", ErrMessage: " + err.message); } finally { if (stmt) { diff --git a/docs/examples/node/websocketexample/tmq_example.js b/docs/examples/node/websocketexample/tmq_example.js index ff676fa972..b0e52360c9 100644 --- a/docs/examples/node/websocketexample/tmq_example.js +++ b/docs/examples/node/websocketexample/tmq_example.js @@ -23,11 +23,11 @@ async function createConsumer() { conn = await taos.tmqConnect(configMap); console.log(`Create consumer successfully, host: ${url}, groupId: ${groupId}, clientId: ${clientId}`) return conn; - }catch (err) { - console.log("Failed to create websocket consumer, ErrCode:" + err.code + "; ErrMessage: " + err.message); + } catch (err) { + console.log("Failed to create websocket consumer, ErrCode:" + err.code + ", ErrMessage: " + err.message); throw err; } - + } // ANCHOR_END: create_consumer @@ -38,7 +38,7 @@ async function prepare() { conf.setDb('power'); const createDB = `CREATE DATABASE IF NOT EXISTS ${db}`; const createStable = `CREATE STABLE IF NOT EXISTS ${db}.${stable} (ts timestamp, current float, voltage int, phase float) TAGS (location binary(64), groupId int);`; - + let wsSql = await taos.sqlConnect(conf); await wsSql.exec(createDB); await wsSql.exec(createStable); @@ -64,9 +64,9 @@ async function subscribe(consumer) { } consumer.commit(); console.log("Commit offset manually successfully."); - } + } } catch (err) { - console.error("Failed to poll data; ErrCode:" + err.code + "; ErrMessage: " + err.message); + console.error("Failed to poll data, ErrCode: " + err.code + ", ErrMessage: " + err.message); throw err; } // ANCHOR_END: commit @@ -78,16 +78,17 @@ async function test() { try { await prepare(); let consumer = await createConsumer() - await subscribe(consumer) + await subscribe(consumer) await consumer.unsubscribe(); console.log("Consumer unsubscribed successfully."); } catch (err) { - console.error("Failed to unsubscribe consume, ErrCode:" + err.code + "; ErrMessage: " + err.message); + console.error("Failed to unsubscribe consumer, ErrCode: " + err.code + ", ErrMessage: " + err.message); } finally { if (consumer) { await consumer.close(); + console.log("Consumer closed successfully."); } taos.destroy(); } diff --git a/docs/examples/node/websocketexample/tmq_seek_example.js b/docs/examples/node/websocketexample/tmq_seek_example.js index da22217e14..0dea7e1671 100644 --- a/docs/examples/node/websocketexample/tmq_seek_example.js +++ b/docs/examples/node/websocketexample/tmq_seek_example.js @@ -59,7 +59,7 @@ async function subscribe(consumer) { } } } catch (err) { - console.error("Failed to poll data; ErrCode:" + err.code + "; ErrMessage: " + err.message); + console.error("Failed to poll data, ErrCode: " + err.code + ", ErrMessage: " + err.message); throw err; } @@ -83,7 +83,7 @@ async function test() { console.log("Assignment seek to beginning successfully"); } catch (err) { - console.error("Seek example failed, ErrCode:" + err.code + "; ErrMessage: " + err.message); + console.error("Failed to execute seek example, ErrCode: " + err.code + ", ErrMessage: " + err.message); } finally { if (consumer) { diff --git a/docs/examples/python/connect_example.py b/docs/examples/python/connect_example.py index 1f01a04e4c..ce8b306024 100644 --- a/docs/examples/python/connect_example.py +++ b/docs/examples/python/connect_example.py @@ -14,11 +14,11 @@ def create_connection(): ) print(f"Connected to {host}:{port} successfully."); except Exception as err: - print(f"Failed to connect to {host}:{port} ; ErrMessage:{err}") + print(f"Failed to connect to {host}:{port} , ErrMessage:{err}") finally: if conn: conn.close() if __name__ == "__main__": - create_connection() \ No newline at end of file + create_connection() diff --git a/docs/examples/python/connect_rest_example.py b/docs/examples/python/connect_rest_example.py index bd65ce7970..02daa60b2e 100644 --- a/docs/examples/python/connect_rest_example.py +++ b/docs/examples/python/connect_rest_example.py @@ -12,7 +12,7 @@ def create_connection(): print(f"Connected to {url} successfully."); except Exception as err: - print(f"Failed to connect to {url} ; ErrMessage:{err}") + print(f"Failed to connect to {url} , ErrMessage:{err}") finally: if conn: conn.close() diff --git a/docs/examples/python/connect_websocket_examples.py b/docs/examples/python/connect_websocket_examples.py index 79e96dd09c..56d208f5db 100644 --- a/docs/examples/python/connect_websocket_examples.py +++ b/docs/examples/python/connect_websocket_examples.py @@ -14,7 +14,7 @@ def create_connection(): ) print(f"Connected to {host}:{port} successfully."); except Exception as err: - print(f"Failed to connect to {host}:{port} ; ErrMessage:{err}") + print(f"Failed to connect to {host}:{port} , ErrMessage:{err}") return conn # ANCHOR_END: connect diff --git a/docs/examples/python/create_db_native.py b/docs/examples/python/create_db_native.py index 1a98a242f2..34dabfabe2 100644 --- a/docs/examples/python/create_db_native.py +++ b/docs/examples/python/create_db_native.py @@ -20,7 +20,7 @@ try: print(f"Create stable power.meters successfully, rowsAffected: {rowsAffected}"); except Exception as err: - print(f"Failed to create db and table, db addr:{host}:{port} ; ErrMessage:{err}") + print(f"Failed to create database power or stable meters, ErrMessage:{err}") finally: if conn: conn.close() diff --git a/docs/examples/python/create_db_rest.py b/docs/examples/python/create_db_rest.py index 2387b80195..4b98c991a3 100644 --- a/docs/examples/python/create_db_rest.py +++ b/docs/examples/python/create_db_rest.py @@ -19,7 +19,7 @@ try: print(f"Create stable power.meters successfully, rowsAffected: {rowsAffected}"); except Exception as err: - print(f"Failed to create db and table, url:{url} ; ErrMessage:{err}") + print(f"Failed to create database power or stable meters, ErrMessage:{err}") finally: if conn: conn.close() diff --git a/docs/examples/python/create_db_ws.py b/docs/examples/python/create_db_ws.py index 7749ba617f..ddbacb4b1f 100644 --- a/docs/examples/python/create_db_ws.py +++ b/docs/examples/python/create_db_ws.py @@ -20,7 +20,7 @@ try: print(f"Create stable power.meters successfully, rowsAffected: {rowsAffected}"); except Exception as err: - print(f"Failed to create db and table, db addrr:{host}:{port} ; ErrMessage:{err}") + print(f"Failed to create database power or stable meters, ErrMessage:{err}") finally: if conn: conn.close() diff --git a/docs/examples/python/insert_native.py b/docs/examples/python/insert_native.py index 5f1b6fbc21..ad7a8b85c2 100644 --- a/docs/examples/python/insert_native.py +++ b/docs/examples/python/insert_native.py @@ -21,7 +21,7 @@ try: print(f"Successfully inserted {affectedRows} rows to power.meters.") except Exception as err: - print(f"Failed to insert data to power.meters, db addr:{host}:{port} ; ErrMessage:{err}") + print(f"Failed to insert data to power.meters, ErrMessage:{err}") finally: if conn: conn.close() diff --git a/docs/examples/python/insert_rest.py b/docs/examples/python/insert_rest.py index d8e07323a9..41fd70857f 100644 --- a/docs/examples/python/insert_rest.py +++ b/docs/examples/python/insert_rest.py @@ -20,7 +20,7 @@ try: print(f"Successfully inserted {affectedRows} rows to power.meters.") except Exception as err: - print(f"Failed to insert data to power.meters, url:{url} ; ErrMessage:{err}") + print(f"Failed to insert data to power.meters, ErrMessage:{err}") finally: if conn: conn.close() diff --git a/docs/examples/python/insert_ws.py b/docs/examples/python/insert_ws.py index 0e1f74e2cf..9c03b4857a 100644 --- a/docs/examples/python/insert_ws.py +++ b/docs/examples/python/insert_ws.py @@ -21,7 +21,7 @@ try: print(f"Successfully inserted {affectedRows} rows to power.meters.") except Exception as err: - print(f"Failed to insert data to power.meters, db addr:{host}:{port} ; ErrMessage:{err}") + print(f"Failed to insert data to power.meters, ErrMessage:{err}") finally: if conn: conn.close() diff --git a/docs/examples/python/query_native.py b/docs/examples/python/query_native.py index df19819de7..072807986e 100644 --- a/docs/examples/python/query_native.py +++ b/docs/examples/python/query_native.py @@ -7,15 +7,15 @@ try: port=port, user="root", password="taosdata") - - result = conn.query("SELECT ts, current, location FROM power.meters limit 100") + sql = "SELECT ts, current, location FROM power.meters limit 100" + result = conn.query(sql) # Get data from result as list of tuple data = result.fetch_all() for row in data: print(f"ts: {row[0]}, current: {row[1]}, location: {row[2]}") except Exception as err: - print(f"Failed to query data from power.meters, db addr:{host}:{port} ; ErrMessage:{err}") + print(f"Failed to query data from power.meters, sql: {sql}, ErrMessage:{err}") finally: if conn: - conn.close() \ No newline at end of file + conn.close() diff --git a/docs/examples/python/query_rest.py b/docs/examples/python/query_rest.py index de16ace8be..85a70fd382 100644 --- a/docs/examples/python/query_rest.py +++ b/docs/examples/python/query_rest.py @@ -7,11 +7,11 @@ try: user="root", password="taosdata", timeout=30) - - result = client.sql(f"SELECT ts, current, location FROM power.meters limit 100") + sql = f"SELECT ts, current, location FROM power.meters limit 100" + result = client.sql(sql) if result["data"]: for row in result["data"]: print(f"ts: {row[0]}, current: {row[1]}, location: {row[2]}") except Exception as err: - print(f"Failed to query data from power.meters, url:{url} ; ErrMessage:{err}") + print(f"Failed to query data from power.meters, sql: {sql}, ErrMessage:{err}") diff --git a/docs/examples/python/query_ws.py b/docs/examples/python/query_ws.py index 8d177b167a..afab438ad9 100644 --- a/docs/examples/python/query_ws.py +++ b/docs/examples/python/query_ws.py @@ -8,13 +8,13 @@ try: password="taosdata", host=host, port=port) - - result = conn.query("SELECT ts, current, location FROM power.meters limit 100") + sql = "SELECT ts, current, location FROM power.meters limit 100" + result = conn.query(sql) for row in result: print(f"ts: {row[0]}, current: {row[1]}, location: {row[2]}") except Exception as err: - print(f"Failed to query data from power.meters, db addr:{host}:{port} ; ErrMessage:{err}") + print(f"Failed to query data from power.meters, sql: {sql}, ErrMessage:{err}") finally: if conn: conn.close() diff --git a/docs/examples/python/reqid_native.py b/docs/examples/python/reqid_native.py index 8f5bb3538c..7f16093835 100644 --- a/docs/examples/python/reqid_native.py +++ b/docs/examples/python/reqid_native.py @@ -17,7 +17,7 @@ try: print(f"ts: {row[0]}, current: {row[1]}, location: {row[2]}") except Exception as err: - print(f"Failed to execute sql with reqId:{reqId}, db addr:{host}:{port} ; ErrMessage:{err}") + print(f"Failed to execute sql with reqId:{reqId}, ErrMessage:{err}") finally: if conn: diff --git a/docs/examples/python/reqid_rest.py b/docs/examples/python/reqid_rest.py index 76ba735234..570e671092 100644 --- a/docs/examples/python/reqid_rest.py +++ b/docs/examples/python/reqid_rest.py @@ -15,4 +15,4 @@ try: print(f"ts: {row[0]}, current: {row[1]}, location: {row[2]}") except Exception as err: - print(f"Failed to execute sql with reqId:{reqId}, url:{url} ; ErrMessage:{err}") + print(f"Failed to execute sql with reqId:{reqId}, ErrMessage:{err}") diff --git a/docs/examples/python/reqid_ws.py b/docs/examples/python/reqid_ws.py index 691dde710f..7c74104169 100644 --- a/docs/examples/python/reqid_ws.py +++ b/docs/examples/python/reqid_ws.py @@ -18,7 +18,7 @@ try: print(f"ts: {row[0]}, current: {row[1]}, location: {row[2]}") except Exception as err: - print(f"Failed to execute sql with reqId:{reqId}, db addr:{host}:{port} ; ErrMessage:{err}") + print(f"Failed to execute sql with reqId:{reqId}, ErrMessage:{err}") finally: if conn: conn.close() diff --git a/docs/examples/python/schemaless_native.py b/docs/examples/python/schemaless_native.py index 0cb4d1c94d..96d8f3177f 100644 --- a/docs/examples/python/schemaless_native.py +++ b/docs/examples/python/schemaless_native.py @@ -34,7 +34,7 @@ try: ) print("Inserted data with schemaless successfully."); except Exception as err: - print(f"Failed to insert data with schemaless, addr: {host}:{port} ErrMessage:{err}") + print(f"Failed to insert data with schemaless, ErrMessage:{err}") finally: if conn: conn.close() diff --git a/docs/examples/python/schemaless_ws.py b/docs/examples/python/schemaless_ws.py index f03f78b10f..3c41d1768b 100644 --- a/docs/examples/python/schemaless_ws.py +++ b/docs/examples/python/schemaless_ws.py @@ -66,7 +66,7 @@ def schemaless_insert(): ) print("Inserted data with schemaless successfully."); except Exception as err: - print(f"Failed to insert data with schemaless, addr: {host}:{port} ErrMessage:{err}") + print(f"Failed to insert data with schemaless, ErrMessage:{err}") raise err finally: @@ -78,4 +78,4 @@ if __name__ == "__main__": prepare() schemaless_insert except Exception as err: - print(f"Failed to insert data with schemaless, err:{err}") \ No newline at end of file + print(f"Failed to insert data with schemaless, err:{err}") diff --git a/docs/examples/python/stmt_native.py b/docs/examples/python/stmt_native.py index ea0cf9c626..16975d6895 100644 --- a/docs/examples/python/stmt_native.py +++ b/docs/examples/python/stmt_native.py @@ -56,7 +56,7 @@ try: print(f"Successfully inserted to power.meters.") except Exception as err: - print(f"Failed to insert to table meters using stmt, addr:{host}:{port} ; ErrMessage:{err}") + print(f"Failed to insert to table meters using stmt, ErrMessage:{err}") finally: if stmt: stmt.close() diff --git a/docs/examples/python/stmt_ws.py b/docs/examples/python/stmt_ws.py index 8d992b32e6..74f071fde1 100644 --- a/docs/examples/python/stmt_ws.py +++ b/docs/examples/python/stmt_ws.py @@ -61,7 +61,7 @@ try: print(f"Successfully inserted to power.meters.") except Exception as err: - print(f"Failed to insert to table meters using stmt, addr:{host}:{port} ; ErrMessage:{err}") + print(f"Failed to insert to table meters using stmt, ErrMessage:{err}") finally: if stmt: stmt.close() diff --git a/docs/examples/python/tmq_native.py b/docs/examples/python/tmq_native.py index 64e447384c..bed173c506 100644 --- a/docs/examples/python/tmq_native.py +++ b/docs/examples/python/tmq_native.py @@ -70,7 +70,7 @@ def create_consumer(): print(f"Create consumer successfully, host: {host}:{port}, groupId: {groupId}, clientId: {clientId}"); return consumer except Exception as err: - print(f"Failed to create native consumer, host: {host}:{port} ; ErrMessage:{err}"); + print(f"Failed to create native consumer, host: {host}:{port}, ErrMessage:{err}"); raise err # ANCHOR_END: create_consumer @@ -123,7 +123,7 @@ def commit_offset(consumer): print("Commit offset manually successfully."); except Exception as err: - print(f"Failed to poll data, ErrMessage:{err}") + print(f"Failed to execute commit example, ErrMessage:{err}") raise err # ANCHOR_END: commit_offset @@ -138,7 +138,7 @@ def seek_offset(consumer): consumer.seek(partition) print(f"Assignment seek to beginning successfully"); except Exception as err: - print(f"Seek example failed; ErrMessage:{err}") + print(f"Failed to execute seek example, ErrMessage:{err}") raise err # ANCHOR_END: assignment @@ -151,7 +151,8 @@ def unsubscribe(consumer): print(f"Failed to unsubscribe consumer. ErrMessage:{err}") finally: if consumer: - consumer.close() + consumer.close() + print("Consumer closed successfully."); # ANCHOR_END: unsubscribe if __name__ == "__main__": @@ -165,7 +166,7 @@ if __name__ == "__main__": consumer.unsubscribe() print("Consumer unsubscribed successfully."); except Exception as err: - print(f"Failed to stmt consumer. ErrMessage:{err}") + print(f"Failed to execute consumer example, ErrMessage:{err}") finally: consumer.unsubscribe() diff --git a/docs/examples/python/tmq_websocket_example.py b/docs/examples/python/tmq_websocket_example.py index aa9cbf6545..15441fbb41 100644 --- a/docs/examples/python/tmq_websocket_example.py +++ b/docs/examples/python/tmq_websocket_example.py @@ -78,7 +78,7 @@ def create_consumer(): print(f"Create consumer successfully, host: {host}:{port}, groupId: {groupId}, clientId: {clientId}"); return consumer; except Exception as err: - print(f"Failed to create websocket consumer, host: {host}:{port} ; ErrMessage:{err}"); + print(f"Failed to create websocket consumer, host: {host}:{port}, ErrMessage:{err}"); raise err @@ -98,7 +98,7 @@ def seek_offset(consumer): print("Assignment seek to beginning successfully"); except Exception as err: - print(f"Seek example failed; ErrMessage:{err}") + print(f"Failed to execute seek example, ErrMessage:{err}") raise err # ANCHOR_END: assignment @@ -137,7 +137,7 @@ def commit_offset(consumer): print("Commit offset manually successfully."); except Exception as err: - print(f"Failed to poll data, ErrMessage:{err}") + print(f"Failed to execute commit example, ErrMessage:{err}") raise err @@ -153,7 +153,8 @@ def unsubscribe(consumer): print(f"Failed to unsubscribe consumer. ErrMessage:{err}") finally: if consumer: - consumer.close() + consumer.close() + print("Consumer closed successfully."); # ANCHOR_END: unsubscribe @@ -166,6 +167,6 @@ if __name__ == "__main__": seek_offset(consumer) commit_offset(consumer) except Exception as err: - print(f"Failed to stmt consumer. ErrorMessage:{err}") + print(f"Failed to execute consumer example, ErrorMessage:{err}") finally: - unsubscribe(consumer); \ No newline at end of file + unsubscribe(consumer); diff --git a/docs/examples/rust/nativeexample/examples/connect.rs b/docs/examples/rust/nativeexample/examples/connect.rs index 7da09ae7ec..ad2533d4c3 100644 --- a/docs/examples/rust/nativeexample/examples/connect.rs +++ b/docs/examples/rust/nativeexample/examples/connect.rs @@ -10,7 +10,7 @@ async fn main() -> anyhow::Result<()> { Ok(()) } Err(err) => { - eprintln!("Failed to connect to {}; ErrMessage: {}", dsn, err); + eprintln!("Failed to connect to {}, ErrMessage: {}", dsn, err); return Err(err.into()); } } diff --git a/docs/examples/rust/nativeexample/examples/createdb.rs b/docs/examples/rust/nativeexample/examples/createdb.rs index d3b2f5178b..292f13de8f 100644 --- a/docs/examples/rust/nativeexample/examples/createdb.rs +++ b/docs/examples/rust/nativeexample/examples/createdb.rs @@ -13,7 +13,7 @@ async fn main() -> anyhow::Result<()> { ]).await { Ok(afffected_rows) => println!("Create database power successfully, rowsAffected: {}", afffected_rows), Err(err) => { - eprintln!("Failed to create database power; ErrMessage: {}", err); + eprintln!("Failed to create database power, ErrMessage: {}", err); return Err(err.into()); } } @@ -25,7 +25,7 @@ async fn main() -> anyhow::Result<()> { ]).await { Ok(afffected_rows) => println!("Create stable power.meters successfully, rowsAffected: {}", afffected_rows), Err(err) => { - eprintln!("Failed to create stable power.meters; ErrMessage: {}", err); + eprintln!("Failed to create stable power.meters, ErrMessage: {}", err); return Err(err.into()); } } diff --git a/docs/examples/rust/nativeexample/examples/insert.rs b/docs/examples/rust/nativeexample/examples/insert.rs index d551da436e..e78381fc61 100644 --- a/docs/examples/rust/nativeexample/examples/insert.rs +++ b/docs/examples/rust/nativeexample/examples/insert.rs @@ -20,7 +20,7 @@ async fn main() -> anyhow::Result<()> { (NOW + 1a, 10.30000, 218, 0.25000) "#).await{ Ok(affected_rows) => println!("Successfully inserted {} rows to power.meters.", affected_rows), Err(err) => { - eprintln!("Failed to insert data to power.meters, dsn: {}; ErrMessage: {}", dsn, err); + eprintln!("Failed to insert data to power.meters, ErrMessage: {}", err); return Err(err.into()); } } diff --git a/docs/examples/rust/nativeexample/examples/query.rs b/docs/examples/rust/nativeexample/examples/query.rs index 6b6fde6a31..e4f7c45521 100644 --- a/docs/examples/rust/nativeexample/examples/query.rs +++ b/docs/examples/rust/nativeexample/examples/query.rs @@ -11,7 +11,8 @@ async fn main() -> anyhow::Result<()> { // ANCHOR: query_data // query data, make sure the database and table are created before - match taos.query("SELECT ts, current, location FROM power.meters limit 100").await{ + let sql = "SELECT ts, current, location FROM power.meters limit 100"; + match taos.query(sql).await{ Ok(mut result) => { for field in result.fields() { println!("got field: {}", field.name()); @@ -30,7 +31,7 @@ async fn main() -> anyhow::Result<()> { } } Err(err) => { - eprintln!("Failed to query data from power.meters, dsn: {}; ErrMessage: {}", dsn, err); + eprintln!("Failed to query data from power.meters, sql: {}, ErrMessage: {}", sql, err); return Err(err.into()); } } @@ -51,6 +52,7 @@ async fn main() -> anyhow::Result<()> { location: String, } + let sql = "SELECT ts, current, location FROM power.meters limit 100"; match taos.query("SELECT ts, current, location FROM power.meters limit 100").await { Ok(mut query) => { match query.deserialize::().try_collect::>().await { @@ -64,7 +66,7 @@ async fn main() -> anyhow::Result<()> { } } Err(err) => { - eprintln!("Failed to query data from power.meters, url: {}; ErrMessage: {}", dsn, err); + eprintln!("Failed to query data from power.meters, sql: {}, ErrMessage: {}", sql, err); return Err(err.into()); } } @@ -92,7 +94,7 @@ async fn main() -> anyhow::Result<()> { } } Err(err) => { - eprintln!("Failed to execute sql with reqId: {}, dsn: {}; ErrMessage: {}", req_id, dsn, err); + eprintln!("Failed to execute sql with reqId: {}, ErrMessage: {}", req_id, err); return Err(err.into()); } } diff --git a/docs/examples/rust/nativeexample/examples/schemaless.rs b/docs/examples/rust/nativeexample/examples/schemaless.rs index 88e9ef4584..75f806f54c 100644 --- a/docs/examples/rust/nativeexample/examples/schemaless.rs +++ b/docs/examples/rust/nativeexample/examples/schemaless.rs @@ -43,7 +43,7 @@ async fn main() -> anyhow::Result<()> { match client.put(&sml_data).await{ Ok(_) => {}, Err(err) => { - eprintln!("Failed to insert data with schemaless, host: {}; ErrMessage: {}", host, err); + eprintln!("Failed to insert data with schemaless, data:{:?}, ErrMessage: {}", data, err); return Err(err.into()); } } @@ -65,7 +65,7 @@ async fn main() -> anyhow::Result<()> { match client.put(&sml_data).await{ Ok(_) => {}, Err(err) => { - eprintln!("Failed to insert data with schemaless, host: {}; ErrMessage: {}", host, err); + eprintln!("Failed to insert data with schemaless, data:{:?}, ErrMessage: {}", data, err); return Err(err.into()); } } @@ -96,7 +96,7 @@ async fn main() -> anyhow::Result<()> { match client.put(&sml_data).await{ Ok(_) => {}, Err(err) => { - eprintln!("Failed to insert data with schemaless, host: {}; ErrMessage: {}", host, err); + eprintln!("Failed to insert data with schemaless, data:{:?}, ErrMessage: {}", data, err); return Err(err.into()); } } diff --git a/docs/examples/rust/nativeexample/examples/stmt.rs b/docs/examples/rust/nativeexample/examples/stmt.rs index de35ae2972..0091283a6b 100644 --- a/docs/examples/rust/nativeexample/examples/stmt.rs +++ b/docs/examples/rust/nativeexample/examples/stmt.rs @@ -20,7 +20,13 @@ async fn main() -> anyhow::Result<()> { let tags = vec![Value::Int(i as i32), Value::VarChar(format!("location_{}", i).into())]; // set table name and tags for the prepared statement. - stmt.set_tbname_tags(&table_name, &tags).await?; + match stmt.set_tbname_tags(&table_name, &tags).await{ + Ok(_) => {}, + Err(err) => { + eprintln!("Failed to set table name and tags, table_name:{}, tags:{:?}, ErrMessage: {}", table_name, tags, err); + return Err(err.into()); + } + } for j in 0..NUM_ROWS { let values = vec![ ColumnView::from_millis_timestamp(vec![1648432611249 + j as i64]), @@ -29,17 +35,29 @@ async fn main() -> anyhow::Result<()> { ColumnView::from_floats(vec![0.31 + j as f32]), ]; // bind values to the prepared statement. - stmt.bind(&values).await?; + match stmt.bind(&values).await{ + Ok(_) => {}, + Err(err) => { + eprintln!("Failed to bind values, values:{:?}, ErrMessage: {}", values, err); + return Err(err.into()); + } + } } - stmt.add_batch().await?; + match stmt.add_batch().await{ + Ok(_) => {}, + Err(err) => { + eprintln!("Failed to add batch, ErrMessage: {}", err); + return Err(err.into()); + } + } } // execute. match stmt.execute().await{ Ok(affected_rows) => println!("Successfully inserted {} rows to power.meters.", affected_rows), Err(err) => { - eprintln!("Failed to insert to table meters using stmt, dsn: {}; ErrMessage: {}", dsn, err); + eprintln!("Failed to insert to table meters using stmt, ErrMessage: {}", err); return Err(err.into()); } } diff --git a/docs/examples/rust/nativeexample/examples/tmq.rs b/docs/examples/rust/nativeexample/examples/tmq.rs index 7f3bd416d1..f312bee0f2 100644 --- a/docs/examples/rust/nativeexample/examples/tmq.rs +++ b/docs/examples/rust/nativeexample/examples/tmq.rs @@ -53,7 +53,7 @@ async fn main() -> anyhow::Result<()> { consumer } Err(err) => { - eprintln!("Failed to create consumer, dsn: {}; ErrMessage: {}", dsn, err); + eprintln!("Failed to create websocket consumer, dsn: {}, ErrMessage: {}", dsn, err); return Err(err.into()); } }; @@ -63,7 +63,7 @@ async fn main() -> anyhow::Result<()> { match consumer.subscribe(["topic_meters"]).await{ Ok(_) => println!("Subscribe topics successfully."), Err(err) => { - eprintln!("Failed to subscribe topic_meters, dsn: {}; ErrMessage: {}", dsn, err); + eprintln!("Failed to subscribe topic_meters, ErrMessage: {}", err); return Err(err.into()); } } @@ -100,7 +100,7 @@ async fn main() -> anyhow::Result<()> { Ok(()) }) .await.map_err(|e| { - eprintln!("Failed to execute consumer functions. ErrMessage: {:?}", e); + eprintln!("Failed to poll data; ErrMessage: {:?}", e); e })?; @@ -125,14 +125,14 @@ async fn main() -> anyhow::Result<()> { match consumer.commit(offset).await{ Ok(_) => println!("Commit offset manually successfully."), Err(err) => { - eprintln!("Failed to commit offset manually, dsn: {}; ErrMessage: {}", dsn, err); + eprintln!("Failed to commit offset manually, ErrMessage: {}", err); return Err(err.into()); } } Ok(()) }) .await.map_err(|e| { - eprintln!("Failed to execute consumer functions. ErrMessage: {:?}", e); + eprintln!("Failed to poll data, ErrMessage: {:?}", e); e })?; // ANCHOR_END: consumer_commit_manually @@ -140,7 +140,7 @@ async fn main() -> anyhow::Result<()> { // ANCHOR: seek_offset let assignments = consumer.assignments().await.unwrap(); - println!("Now assignments: {:?}", assignments); + println!("assignments: {:?}", assignments); // seek offset for topic_vec_assignment in assignments { @@ -163,14 +163,14 @@ async fn main() -> anyhow::Result<()> { match consumer.offset_seek(topic, vgroup_id, begin).await{ Ok(_) => (), Err(err) => { - eprintln!("Seek example failed; ErrMessage: {}", err); + eprintln!("Failed to seek offset, ErrMessage: {}", err); return Err(err.into()); } } } let topic_assignment = consumer.topic_assignment(topic).await; - println!("Topic assignment: {:?}", topic_assignment); + println!("topic assignment: {:?}", topic_assignment); } println!("Assignment seek to beginning successfully."); // after seek offset diff --git a/docs/examples/rust/restexample/examples/connect.rs b/docs/examples/rust/restexample/examples/connect.rs index 535e265a97..7051572296 100644 --- a/docs/examples/rust/restexample/examples/connect.rs +++ b/docs/examples/rust/restexample/examples/connect.rs @@ -10,7 +10,7 @@ async fn main() -> anyhow::Result<()> { Ok(()) } Err(err) => { - eprintln!("Failed to connect to {}; ErrMessage: {}", dsn, err); + eprintln!("Failed to connect to {}, ErrMessage: {}", dsn, err); return Err(err.into()); } } diff --git a/docs/examples/rust/restexample/examples/createdb.rs b/docs/examples/rust/restexample/examples/createdb.rs index e061f1d835..896868ae15 100644 --- a/docs/examples/rust/restexample/examples/createdb.rs +++ b/docs/examples/rust/restexample/examples/createdb.rs @@ -13,7 +13,7 @@ async fn main() -> anyhow::Result<()> { ]).await { Ok(afffected_rows) => println!("Create database power successfully, rowsAffected: {}", afffected_rows), Err(err) => { - eprintln!("Failed to create database power; ErrMessage: {}", err); + eprintln!("Failed to create database power, ErrMessage: {}", err); return Err(err.into()); } } @@ -25,7 +25,7 @@ async fn main() -> anyhow::Result<()> { ]).await { Ok(afffected_rows) => println!("Create stable power.meters successfully, rowsAffected: {}", afffected_rows), Err(err) => { - eprintln!("Failed to create stable power.meters; ErrMessage: {}", err); + eprintln!("Failed to create stable power.meters, ErrMessage: {}", err); return Err(err.into()); } } diff --git a/docs/examples/rust/restexample/examples/insert.rs b/docs/examples/rust/restexample/examples/insert.rs index 62522575ee..537e531501 100644 --- a/docs/examples/rust/restexample/examples/insert.rs +++ b/docs/examples/rust/restexample/examples/insert.rs @@ -20,7 +20,7 @@ async fn main() -> anyhow::Result<()> { (NOW + 1a, 10.30000, 218, 0.25000) "#).await{ Ok(affected_rows) => println!("Successfully inserted {} rows to power.meters.", affected_rows), Err(err) => { - eprintln!("Failed to insert data to power.meters, dsn: {}; ErrMessage: {}", dsn, err); + eprintln!("Failed to insert data to power.meters, ErrMessage: {}", dsn, err); return Err(err.into()); } } diff --git a/docs/examples/rust/restexample/examples/query.rs b/docs/examples/rust/restexample/examples/query.rs index 4e69f9fc97..4366efd265 100644 --- a/docs/examples/rust/restexample/examples/query.rs +++ b/docs/examples/rust/restexample/examples/query.rs @@ -11,7 +11,8 @@ async fn main() -> anyhow::Result<()> { // ANCHOR: query_data // query data, make sure the database and table are created before - match taos.query("SELECT ts, current, location FROM power.meters limit 100").await{ + let sql = "SELECT ts, current, location FROM power.meters limit 100"; + match taos.query(sql).await{ Ok(mut result) => { for field in result.fields() { println!("got field: {}", field.name()); @@ -30,7 +31,7 @@ async fn main() -> anyhow::Result<()> { } } Err(err) => { - eprintln!("Failed to query data from power.meters, dsn: {}; ErrMessage: {}", dsn, err); + eprintln!("Failed to query data from power.meters, sql: {}, ErrMessage: {}", sql, err); return Err(err.into()); } } @@ -51,6 +52,7 @@ async fn main() -> anyhow::Result<()> { location: String, } + let sql = "SELECT ts, current, location FROM power.meters limit 100"; match taos.query("SELECT ts, current, location FROM power.meters limit 100").await { Ok(mut query) => { match query.deserialize::().try_collect::>().await { @@ -64,7 +66,7 @@ async fn main() -> anyhow::Result<()> { } } Err(err) => { - eprintln!("Failed to query data from power.meters, url: {}; ErrMessage: {}", dsn, err); + eprintln!("Failed to query data from power.meters, sql: {}, ErrMessage: {}", sql, err); return Err(err.into()); } } @@ -92,7 +94,7 @@ async fn main() -> anyhow::Result<()> { } } Err(err) => { - eprintln!("Failed to execute sql with reqId: {}, dsn: {}; ErrMessage: {}", req_id, dsn, err); + eprintln!("Failed to execute sql with reqId: {}, ErrMessage: {}", req_id, err); return Err(err.into()); } } diff --git a/docs/examples/rust/restexample/examples/schemaless.rs b/docs/examples/rust/restexample/examples/schemaless.rs index f629d66663..fe666cca6f 100644 --- a/docs/examples/rust/restexample/examples/schemaless.rs +++ b/docs/examples/rust/restexample/examples/schemaless.rs @@ -17,6 +17,15 @@ async fn main() -> anyhow::Result<()> { let client = TaosBuilder::from_dsn(dsn)?.build().await?; + let db = "power"; + + client + .exec(format!("create database if not exists {db}")) + .await?; + + // should specify database before insert + client.exec(format!("use {db}")).await?; + // SchemalessProtocol::Line let data = [ "meters,groupid=2,location=California.SanFrancisco current=10.3000002f64,voltage=219i32,phase=0.31f64 1626006833639", @@ -34,7 +43,7 @@ async fn main() -> anyhow::Result<()> { match client.put(&sml_data).await{ Ok(_) => {}, Err(err) => { - eprintln!("Failed to insert data with schemaless, host: {}; ErrMessage: {}", host, err); + eprintln!("Failed to insert data with schemaless, data:{:?}, ErrMessage: {}", data, err); return Err(err.into()); } } @@ -56,7 +65,7 @@ async fn main() -> anyhow::Result<()> { match client.put(&sml_data).await{ Ok(_) => {}, Err(err) => { - eprintln!("Failed to insert data with schemaless, host: {}; ErrMessage: {}", host, err); + eprintln!("Failed to insert data with schemaless, data:{:?}, ErrMessage: {}", data, err); return Err(err.into()); } } @@ -87,7 +96,7 @@ async fn main() -> anyhow::Result<()> { match client.put(&sml_data).await{ Ok(_) => {}, Err(err) => { - eprintln!("Failed to insert data with schemaless, host: {}; ErrMessage: {}", host, err); + eprintln!("Failed to insert data with schemaless, data:{:?}, ErrMessage: {}", data, err); return Err(err.into()); } } diff --git a/docs/examples/rust/restexample/examples/stmt.rs b/docs/examples/rust/restexample/examples/stmt.rs index 636b6780cd..4c3d5fb759 100644 --- a/docs/examples/rust/restexample/examples/stmt.rs +++ b/docs/examples/rust/restexample/examples/stmt.rs @@ -20,7 +20,13 @@ async fn main() -> anyhow::Result<()> { let tags = vec![Value::Int(i as i32), Value::VarChar(format!("location_{}", i).into())]; // set table name and tags for the prepared statement. - stmt.set_tbname_tags(&table_name, &tags).await?; + match stmt.set_tbname_tags(&table_name, &tags).await{ + Ok(_) => {}, + Err(err) => { + eprintln!("Failed to set table name and tags, table_name:{}, tags:{:?}, ErrMessage: {}", table_name, tags, err); + return Err(err.into()); + } + } for j in 0..NUM_ROWS { let values = vec![ ColumnView::from_millis_timestamp(vec![1648432611249 + j as i64]), @@ -29,17 +35,29 @@ async fn main() -> anyhow::Result<()> { ColumnView::from_floats(vec![0.31 + j as f32]), ]; // bind values to the prepared statement. - stmt.bind(&values).await?; + match stmt.bind(&values).await{ + Ok(_) => {}, + Err(err) => { + eprintln!("Failed to bind values, values:{:?}, ErrMessage: {}", values, err); + return Err(err.into()); + } + } } - stmt.add_batch().await?; + match stmt.add_batch().await{ + Ok(_) => {}, + Err(err) => { + eprintln!("Failed to add batch, ErrMessage: {}", err); + return Err(err.into()); + } + } } // execute. match stmt.execute().await{ Ok(affected_rows) => println!("Successfully inserted {} rows to power.meters.", affected_rows), Err(err) => { - eprintln!("Failed to insert to table meters using stmt, dsn: {}; ErrMessage: {}", dsn, err); + eprintln!("Failed to insert to table meters using stmt, ErrMessage: {}", err); return Err(err.into()); } } diff --git a/docs/examples/rust/restexample/examples/tmq.rs b/docs/examples/rust/restexample/examples/tmq.rs index 2abc0a36da..0a0214d258 100644 --- a/docs/examples/rust/restexample/examples/tmq.rs +++ b/docs/examples/rust/restexample/examples/tmq.rs @@ -53,7 +53,7 @@ async fn main() -> anyhow::Result<()> { consumer } Err(err) => { - eprintln!("Failed to create consumer, dsn: {}; ErrMessage: {}", dsn, err); + eprintln!("Failed to create websocket consumer, dsn: {}, ErrMessage: {}", dsn, err); return Err(err.into()); } }; @@ -63,7 +63,7 @@ async fn main() -> anyhow::Result<()> { match consumer.subscribe(["topic_meters"]).await{ Ok(_) => println!("Subscribe topics successfully."), Err(err) => { - eprintln!("Failed to subscribe topic_meters, dsn: {}; ErrMessage: {}", dsn, err); + eprintln!("Failed to subscribe topic_meters, ErrMessage: {}", err); return Err(err.into()); } } @@ -125,14 +125,14 @@ async fn main() -> anyhow::Result<()> { match consumer.commit(offset).await{ Ok(_) => println!("Commit offset manually successfully."), Err(err) => { - eprintln!("Failed to commit offset manually, dsn: {}; ErrMessage: {}", dsn, err); + eprintln!("Failed to commit offset manually, ErrMessage: {}", err); return Err(err.into()); } } Ok(()) }) .await.map_err(|e| { - eprintln!("Failed to execute consumer functions. ErrMessage: {:?}", e); + eprintln!("Failed to poll data, ErrMessage: {:?}", e); e })?; // ANCHOR_END: consumer_commit_manually @@ -163,7 +163,7 @@ async fn main() -> anyhow::Result<()> { match consumer.offset_seek(topic, vgroup_id, begin).await{ Ok(_) => (), Err(err) => { - eprintln!("seek example failed; ErrMessage: {}", err); + eprintln!("Failed to seek offset, ErrMessage: {}", err); return Err(err.into()); } } diff --git a/docs/zh/08-develop/01-connect/index.md b/docs/zh/08-develop/01-connect/index.md index 755a9e7f74..280e31b2a6 100644 --- a/docs/zh/08-develop/01-connect/index.md +++ b/docs/zh/08-develop/01-connect/index.md @@ -544,7 +544,7 @@ C/C++ 语言连接器使用 `taos_connect()` 函数用于建立与 TDengine 数 使用示例如下: ```java -{{#include examples/JDBC/connectionPools/src/main/java/com/taosdata/example/HikariDemo.java:connection_pool}} +{{#include docs/examples/java/src/main/java/com/taos/example/HikariDemo.java:connection_pool}} ``` > 通过 HikariDataSource.getConnection() 获取连接后,使用完成后需要调用 close() 方法,实际上它并不会关闭连接,只是放回连接池中。 @@ -555,7 +555,7 @@ C/C++ 语言连接器使用 `taos_connect()` 函数用于建立与 TDengine 数 使用示例如下: ```java -{{#include examples/JDBC/connectionPools/src/main/java/com/taosdata/example/DruidDemo.java:connection_pool}} +{{#include docs/examples/java/src/main/java/com/taos/example/DruidDemo.java:connection_pool}} ``` > 更多 druid 使用问题请查看[官方说明](https://github.com/alibaba/druid)。 diff --git a/docs/zh/08-develop/02-sql.md b/docs/zh/08-develop/02-sql.md index 7e42e38949..be44458c5b 100644 --- a/docs/zh/08-develop/02-sql.md +++ b/docs/zh/08-develop/02-sql.md @@ -27,7 +27,7 @@ REST API:直接调用 `taosadapter` 提供的 REST API 接口,进行数据 ```java -{{#include examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/JdbcCreatDBDemo.java:create_db_and_table}} +{{#include docs/examples/java/src/main/java/com/taos/example/JdbcCreatDBDemo.java:create_db_and_table}} ``` @@ -98,7 +98,7 @@ curl --location -uroot:taosdata 'http://127.0.0.1:6041/rest/sql/power' \ ```java -{{#include examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/JdbcInsertDataDemo.java:insert_data}} +{{#include docs/examples/java/src/main/java/com/taos/example/JdbcInsertDataDemo.java:insert_data}} ``` **Note** @@ -170,7 +170,7 @@ curl --location -uroot:taosdata 'http://127.0.0.1:6041/rest/sql' \ ```java -{{#include examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/JdbcQueryDemo.java:query_data}} +{{#include docs/examples/java/src/main/java/com/taos/example/JdbcQueryDemo.java:query_data}} ``` **Note** 查询和操作关系型数据库一致,使用下标获取返回字段内容时从 1 开始,建议使用字段名称获取。 @@ -251,7 +251,7 @@ reqId 可用于请求链路追踪,reqId 就像分布式系统中的 traceId ```java -{{#include examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/JdbcReqIdDemo.java:with_reqid}} +{{#include docs/examples/java/src/main/java/com/taos/example/JdbcReqIdDemo.java:with_reqid}} ``` diff --git a/docs/zh/08-develop/04-schemaless.md b/docs/zh/08-develop/04-schemaless.md index 06dec726e9..130d012e8b 100644 --- a/docs/zh/08-develop/04-schemaless.md +++ b/docs/zh/08-develop/04-schemaless.md @@ -165,7 +165,7 @@ st,t1=3,t2=4,t3=t3 c1=3i64,c6="passit" 1626006833640000000 ```java -{{#include examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/SchemalessWsTest.java:schemaless}} +{{#include docs/examples/java/src/main/java/com/taos/example/SchemalessWsTest.java:schemaless}} ``` @@ -216,7 +216,7 @@ writer.write(lineDemo, SchemalessProtocolType.LINE, SchemalessTimestampType.NANO ```java -{{#include examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/SchemalessJniTest.java:schemaless}} +{{#include docs/examples/java/src/main/java/com/taos/example/SchemalessJniTest.java:schemaless}} ``` 执行带有 reqId 的无模式写入,最后一个参数 reqId 可用于请求链路追踪。 diff --git a/docs/zh/08-develop/05-stmt.md b/docs/zh/08-develop/05-stmt.md index 4b8dbbce9b..9dfd9f56e3 100644 --- a/docs/zh/08-develop/05-stmt.md +++ b/docs/zh/08-develop/05-stmt.md @@ -27,7 +27,7 @@ import TabItem from "@theme/TabItem"; ```java -{{#include examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/WSParameterBindingBasicDemo.java:para_bind}} +{{#include docs/examples/java/src/main/java/com/taos/example/WSParameterBindingBasicDemo.java:para_bind}} ``` @@ -76,7 +76,7 @@ import TabItem from "@theme/TabItem"; ```java -{{#include examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/ParameterBindingBasicDemo.java:para_bind}} +{{#include docs/examples/java/src/main/java/com/taos/example/ParameterBindingBasicDemo.java:para_bind}} ``` 这是一个[更详细的参数绑定示例](https://github.com/taosdata/TDengine/blob/main/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/ParameterBindingFullDemo.java) diff --git a/docs/zh/08-develop/07-tmq.md b/docs/zh/08-develop/07-tmq.md index 0711ab9f28..aedc59d05b 100644 --- a/docs/zh/08-develop/07-tmq.md +++ b/docs/zh/08-develop/07-tmq.md @@ -109,7 +109,7 @@ Rust 连接器创建消费者的参数为 DSN, 可以设置的参数列表请 ```java -{{#include examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/WsConsumerLoopFull.java:create_consumer}} +{{#include docs/examples/java/src/main/java/com/taos/example/WsConsumerLoopFull.java:create_consumer}} ``` @@ -168,7 +168,7 @@ Rust 连接器创建消费者的参数为 DSN, 可以设置的参数列表请 ```java -{{#include examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/ConsumerLoopFull.java:create_consumer}} +{{#include docs/examples/java/src/main/java/com/taos/example/ConsumerLoopFull.java:create_consumer}} ``` @@ -229,7 +229,7 @@ Rust 连接器创建消费者的参数为 DSN, 可以设置的参数列表请 ```java -{{#include examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/WsConsumerLoopFull.java:poll_data_code_piece}} +{{#include docs/examples/java/src/main/java/com/taos/example/WsConsumerLoopFull.java:poll_data_code_piece}} ``` - `subscribe` 方法的参数含义为:订阅的主题列表(即名称),支持同时订阅多个主题。 @@ -293,7 +293,7 @@ Rust 连接器创建消费者的参数为 DSN, 可以设置的参数列表请 ```java -{{#include examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/WsConsumerLoopFull.java:poll_data_code_piece}} +{{#include docs/examples/java/src/main/java/com/taos/example/WsConsumerLoopFull.java:poll_data_code_piece}} ``` - `subscribe` 方法的参数含义为:订阅的主题列表(即名称),支持同时订阅多个主题。 @@ -376,7 +376,7 @@ Rust 连接器创建消费者的参数为 DSN, 可以设置的参数列表请 ```java -{{#include examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/WsConsumerLoopFull.java:consumer_seek}} +{{#include docs/examples/java/src/main/java/com/taos/example/WsConsumerLoopFull.java:consumer_seek}} ``` 1. 使用 consumer.poll 方法轮询数据,直到获取到数据为止。 2. 对于轮询到的第一批数据,打印第一条数据的内容,并获取当前消费者的分区分配信息。 @@ -438,7 +438,7 @@ Rust 连接器创建消费者的参数为 DSN, 可以设置的参数列表请 ```java -{{#include examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/WsConsumerLoopFull.java:consumer_seek}} +{{#include docs/examples/java/src/main/java/com/taos/example/WsConsumerLoopFull.java:consumer_seek}} ``` 1. 使用 consumer.poll 方法轮询数据,直到获取到数据为止。 2. 对于轮询到的第一批数据,打印第一条数据的内容,并获取当前消费者的分区分配信息。 @@ -513,7 +513,7 @@ Rust 连接器创建消费者的参数为 DSN, 可以设置的参数列表请 ```java -{{#include examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/WsConsumerLoopFull.java:commit_code_piece}} +{{#include docs/examples/java/src/main/java/com/taos/example/WsConsumerLoopFull.java:commit_code_piece}} ``` @@ -565,7 +565,7 @@ Rust 连接器创建消费者的参数为 DSN, 可以设置的参数列表请 ```java -{{#include examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/WsConsumerLoopFull.java:commit_code_piece}} +{{#include docs/examples/java/src/main/java/com/taos/example/WsConsumerLoopFull.java:commit_code_piece}} ``` @@ -621,7 +621,7 @@ Rust 连接器创建消费者的参数为 DSN, 可以设置的参数列表请 ```java -{{#include examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/WsConsumerLoopFull.java:unsubscribe_data_code_piece}} +{{#include docs/examples/java/src/main/java/com/taos/example/WsConsumerLoopFull.java:unsubscribe_data_code_piece}} ``` @@ -644,7 +644,7 @@ Rust 连接器创建消费者的参数为 DSN, 可以设置的参数列表请 {{#include docs/examples/rust/restexample/examples/tmq.rs:unsubscribe}} ``` -**注意**:消费者取消订阅后无法重用,如果想订阅新的 `topic`, 请重新创建消费者。 +**注意**:消费者取消订阅后已经关闭,无法重用,如果想订阅新的 `topic`, 请重新创建消费者。 @@ -672,7 +672,7 @@ Rust 连接器创建消费者的参数为 DSN, 可以设置的参数列表请 ```java -{{#include examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/WsConsumerLoopFull.java:unsubscribe_data_code_piece}} +{{#include docs/examples/java/src/main/java/com/taos/example/WsConsumerLoopFull.java:unsubscribe_data_code_piece}} ``` @@ -694,7 +694,7 @@ Rust 连接器创建消费者的参数为 DSN, 可以设置的参数列表请 {{#include docs/examples/rust/restexample/examples/tmq.rs:unsubscribe}} ``` -**注意**:消费者取消订阅后无法重用,如果想订阅新的 `topic`, 请重新创建消费者。 +**注意**:消费者取消订阅后已经关闭,无法重用,如果想订阅新的 `topic`, 请重新创建消费者。 不支持 @@ -723,7 +723,7 @@ Rust 连接器创建消费者的参数为 DSN, 可以设置的参数列表请
完整代码示例 ```java -{{#include examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/WsConsumerLoopFull.java:consumer_demo}} +{{#include docs/examples/java/src/main/java/com/taos/example/WsConsumerLoopFull.java:consumer_demo}} ``` **注意**:这里的 value.deserializer 配置参数值应该根据测试环境的包路径做相应的调整。 @@ -788,7 +788,7 @@ Rust 连接器创建消费者的参数为 DSN, 可以设置的参数列表请
完整代码示例 ```java -{{#include examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/ConsumerLoopFull.java:consumer_demo}} +{{#include docs/examples/java/src/main/java/com/taos/example/ConsumerLoopFull.java:consumer_demo}} ``` **注意**:这里的 value.deserializer 配置参数值应该根据测试环境的包路径做相应的调整。 diff --git a/docs/zh/14-reference/05-connector/14-java.mdx b/docs/zh/14-reference/05-connector/14-java.mdx index 0dc85feb13..f355aea621 100644 --- a/docs/zh/14-reference/05-connector/14-java.mdx +++ b/docs/zh/14-reference/05-connector/14-java.mdx @@ -60,7 +60,7 @@ REST 连接支持所有能运行 Java 的平台。 在报错后,通过 SQLException 可以获取到错误的信息和错误码: ```java -{{#include examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/JdbcBasicDemo.java:jdbc_exception}} +{{#include docs/examples/java/src/main/java/com/taos/example/JdbcBasicDemo.java:jdbc_exception}} ``` JDBC 连接器可能报错的错误码包括 4 种: diff --git a/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/ConsumerLoopFull.java b/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/ConsumerLoopFull.java index 34629fb8d2..ce9af5ecdc 100644 --- a/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/ConsumerLoopFull.java +++ b/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/ConsumerLoopFull.java @@ -20,7 +20,7 @@ public class ConsumerLoopFull { static private Statement statement; static private volatile boolean stopThread = false; - public static TaosConsumer getConsumer() throws SQLException { + public static TaosConsumer getConsumer() throws Exception { // ANCHOR: create_consumer Properties config = new Properties(); config.setProperty("td.connect.type", "jni"); @@ -38,16 +38,20 @@ public class ConsumerLoopFull { try { TaosConsumer consumer= new TaosConsumer<>(config); - System.out.println("Create consumer successfully, host: " + config.getProperty("bootstrap.servers") + ", groupId: " + config.getProperty("group.id") + ", clientId: " + config.getProperty("client.id")); + System.out.printf("Create consumer successfully, host: %s, groupId: %s, clientId: %s%n", + config.getProperty("bootstrap.servers"), + config.getProperty("group.id"), + config.getProperty("client.id")); return consumer; - } catch (SQLException ex) { - // handle any errors, please refer to the JDBC specifications for detailed exceptions info - System.out.println("Failed to create native consumer, host: " + config.getProperty("bootstrap.servers") + "; ErrCode:" + ex.getErrorCode() + "; ErrMessage: " + ex.getMessage()); - throw new SQLException("Failed to create consumer", ex); } catch (Exception ex) { - System.out.println("Failed to create native consumer, host: " + config.getProperty("bootstrap.servers") - + "; ErrMessage: " + ex.getMessage()); - throw new SQLException("Failed to create consumer", ex); + // please refer to the JDBC specifications for detailed exceptions info + System.out.printf("Failed to create native consumer, host: %s, %sErrMessage: %s%n", + config.getProperty("bootstrap.servers"), + ex instanceof SQLException ? "ErrCode: " + ((SQLException) ex).getErrorCode() + ", " : "", + ex.getMessage()); + // Print stack trace for context in examples. Use logging in production. + ex.printStackTrace(); + throw ex; } // ANCHOR_END: create_consumer } @@ -69,14 +73,14 @@ public class ConsumerLoopFull { System.out.println("data: " + JSON.toJSONString(bean)); } } - - } catch (SQLException ex) { - // handle any errors, please refer to the JDBC specifications for detailed exceptions info - System.out.println("Failed to poll data, ErrCode:" + ex.getErrorCode() + "; ErrMessage: " + ex.getMessage()); - throw new SQLException("Failed to poll data", ex); } catch (Exception ex) { - System.out.println("Failed to poll data, ErrMessage: " + ex.getMessage()); - throw new SQLException("Failed to poll data", ex); + // please refer to the JDBC specifications for detailed exceptions info + System.out.printf("Failed to poll data, %sErrMessage: %s%n", + ex instanceof SQLException ? "ErrCode: " + ((SQLException) ex).getErrorCode() + ", " : "", + ex.getMessage()); + // Print stack trace for context in examples. Use logging in production. + ex.printStackTrace(); + throw ex; } // ANCHOR_END: poll_data_code_piece } @@ -100,13 +104,14 @@ public class ConsumerLoopFull { consumer.seekToBeginning(assignment); System.out.println("Assignment seek to beginning successfully."); - } catch (SQLException ex) { - // handle any errors, please refer to the JDBC specifications for detailed exceptions info - System.out.println("Seek example failed; ErrCode:" + ex.getErrorCode() + "; ErrMessage: " + ex.getMessage()); - throw new SQLException("seek example failed", ex); } catch (Exception ex) { - System.out.println("Seek example failed; ErrMessage: " + ex.getMessage()); - throw new SQLException("seek example failed", ex); + // please refer to the JDBC specifications for detailed exceptions info + System.out.printf("Failed to execute seek example, %sErrMessage: %s%n", + ex instanceof SQLException ? "ErrCode: " + ((SQLException) ex).getErrorCode() + ", " : "", + ex.getMessage()); + // Print stack trace for context in examples. Use logging in production. + ex.printStackTrace(); + throw ex; } // ANCHOR_END: consumer_seek } @@ -131,13 +136,14 @@ public class ConsumerLoopFull { System.out.println("Commit offset manually successfully."); } } - } catch (SQLException ex) { - // handle any errors, please refer to the JDBC specifications for detailed exceptions info - System.out.println("Failed to execute consumer functions. ErrCode:" + ex.getErrorCode() + "; ErrMessage: " + ex.getMessage()); - throw new SQLException("Failed to execute consumer functions", ex); } catch (Exception ex) { - System.out.println("Failed to execute consumer functions. ErrMessage: " + ex.getMessage()); - throw new SQLException("Failed to execute consumer functions", ex); + // please refer to the JDBC specifications for detailed exceptions info + System.out.printf("Failed to execute commit example, %sErrMessage: %s%n", + ex instanceof SQLException ? "ErrCode: " + ((SQLException) ex).getErrorCode() + ", " : "", + ex.getMessage()); + // Print stack trace for context in examples. Use logging in production. + ex.printStackTrace(); + throw ex; } // ANCHOR_END: commit_code_piece } @@ -150,13 +156,14 @@ public class ConsumerLoopFull { // unsubscribe the consumer consumer.unsubscribe(); System.out.println("Consumer unsubscribed successfully."); - } catch (SQLException ex) { - // handle any errors, please refer to the JDBC specifications for detailed exceptions info - System.out.println("Failed to unsubscribe consumer. ErrCode:" + ex.getErrorCode() + "; ErrMessage: " + ex.getMessage()); - throw new SQLException("Failed to unsubscribe consumer", ex); } catch (Exception ex) { - System.out.println("Failed to unsubscribe consumer. ErrMessage: " + ex.getMessage()); - throw new SQLException("Failed to unsubscribe consumer", ex); + // please refer to the JDBC specifications for detailed exceptions info + System.out.printf("Failed to unsubscribe consumer, %sErrMessage: %s%n", + ex instanceof SQLException ? "ErrCode: " + ((SQLException) ex).getErrorCode() + ", " : "", + ex.getMessage()); + // Print stack trace for context in examples. Use logging in production. + ex.printStackTrace(); + throw ex; } finally { // close the consumer @@ -238,9 +245,14 @@ public class ConsumerLoopFull { i++; Thread.sleep(1); } - } catch (SQLException ex) { - System.out.println("Failed to insert data to power.meters, ErrCode:" + ex.getErrorCode() + "; ErrMessage: " + ex.getMessage()); - throw new SQLException("Failed to insert data to power.meters", ex); + } catch (Exception ex) { + // please refer to the JDBC specifications for detailed exceptions info + System.out.printf("Failed to insert data to power.meters, %sErrMessage: %s%n", + ex instanceof SQLException ? "ErrCode: " + ((SQLException) ex).getErrorCode() + ", " : "", + ex.getMessage()); + // Print stack trace for context in examples. Use logging in production. + ex.printStackTrace(); + throw ex; } } @@ -250,9 +262,14 @@ public class ConsumerLoopFull { statement.executeUpdate("USE power"); statement.executeUpdate("CREATE STABLE IF NOT EXISTS meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (groupId INT, location BINARY(24))"); statement.executeUpdate("CREATE TOPIC IF NOT EXISTS topic_meters AS SELECT ts, current, voltage, phase, groupid, location FROM meters"); - } catch (SQLException ex) { - System.out.println("Failed to create db and table, ErrCode:" + ex.getErrorCode() + "; ErrMessage: " + ex.getMessage()); - throw new SQLException("Failed to create db and table", ex); + } catch (Exception ex) { + // please refer to the JDBC specifications for detailed exceptions info + System.out.printf("Failed to create db and table, %sErrMessage: %s%n", + ex instanceof SQLException ? "ErrCode: " + ((SQLException) ex).getErrorCode() + ", " : "", + ex.getMessage()); + // Print stack trace for context in examples. Use logging in production. + ex.printStackTrace(); + throw ex; } } @@ -311,7 +328,7 @@ public class ConsumerLoopFull { try { prepareData(); } catch (SQLException ex) { - System.out.println("Failed to prepare data, ErrCode:" + ex.getErrorCode() + "; ErrMessage: " + ex.getMessage()); + System.out.println("Failed to prepare data, ErrCode:" + ex.getErrorCode() + ", ErrMessage: " + ex.getMessage()); return; } catch (Exception ex) { System.out.println("Failed to prepare data, ErrMessage: " + ex.getMessage()); diff --git a/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/JdbcCreatDBDemo.java b/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/JdbcCreatDBDemo.java index ca50c4ca77..28d7d2d67b 100644 --- a/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/JdbcCreatDBDemo.java +++ b/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/JdbcCreatDBDemo.java @@ -35,13 +35,13 @@ public class JdbcCreatDBDemo { rowsAffected = stmt.executeUpdate("CREATE STABLE IF NOT EXISTS power.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (groupId INT, location BINARY(24))"); // you can check rowsAffected here System.out.println("Create stable power.meters successfully, rowsAffected: " + rowsAffected); - - } catch (SQLException ex) { - // handle any errors, please refer to the JDBC specifications for detailed exceptions info - System.out.println("Failed to create db and table, url:" + jdbcUrl + "; ErrCode:" + ex.getErrorCode() + "; ErrMessage: " + ex.getMessage()); - throw ex; - } catch (Exception ex){ - System.out.println("Failed to create db and table, url:" + jdbcUrl + "; ErrMessage: " + ex.getMessage()); + } catch (Exception ex) { + // please refer to the JDBC specifications for detailed exceptions info + System.out.printf("Failed to create database power or stable meters, %sErrMessage: %s%n", + ex instanceof SQLException ? "ErrCode: " + ((SQLException) ex).getErrorCode() + ", " : "", + ex.getMessage()); + // Print stack trace for context in examples. Use logging in production. + ex.printStackTrace(); throw ex; } // ANCHOR_END: create_db_and_table diff --git a/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/JdbcInsertDataDemo.java b/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/JdbcInsertDataDemo.java index 7e60c10bf7..08798b755c 100644 --- a/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/JdbcInsertDataDemo.java +++ b/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/JdbcInsertDataDemo.java @@ -40,12 +40,13 @@ public class JdbcInsertDataDemo { int affectedRows = stmt.executeUpdate(insertQuery); // you can check affectedRows here System.out.println("Successfully inserted " + affectedRows + " rows to power.meters."); - } catch (SQLException ex) { - // handle any errors, please refer to the JDBC specifications for detailed exceptions info - System.out.println("Failed to insert data to power.meters, url:" + jdbcUrl + "; ErrCode:" + ex.getErrorCode() + "; ErrMessage: " + ex.getMessage()); - throw ex; - } catch (Exception ex){ - System.out.println("Failed to insert data to power.meters, url:" + jdbcUrl + "; ErrMessage: " + ex.getMessage()); + } catch (Exception ex) { + // please refer to the JDBC specifications for detailed exceptions info + System.out.printf("Failed to insert data to power.meters, %sErrMessage: %s%n", + ex instanceof SQLException ? "ErrCode: " + ((SQLException) ex).getErrorCode() + ", " : "", + ex.getMessage()); + // Print stack trace for context in examples. Use logging in production. + ex.printStackTrace(); throw ex; } // ANCHOR_END: insert_data diff --git a/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/JdbcQueryDemo.java b/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/JdbcQueryDemo.java index ea6d8346c6..768ba8929c 100644 --- a/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/JdbcQueryDemo.java +++ b/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/JdbcQueryDemo.java @@ -24,10 +24,11 @@ public class JdbcQueryDemo { properties.setProperty("timezone", "UTC-8"); System.out.println("get connection starting..."); // ANCHOR: query_data + String sql = "SELECT ts, current, location FROM power.meters limit 100"; try (Connection connection = DriverManager.getConnection(jdbcUrl, properties); Statement stmt = connection.createStatement(); // query data, make sure the database and table are created before - ResultSet resultSet = stmt.executeQuery("SELECT ts, current, location FROM power.meters limit 100")) { + ResultSet resultSet = stmt.executeQuery(sql)) { Timestamp ts; float current; @@ -41,19 +42,16 @@ public class JdbcQueryDemo { // you can check data here System.out.printf("ts: %s, current: %f, location: %s %n", ts, current, location); } - } catch (SQLException ex) { - // handle any errors, please refer to the JDBC specifications for detailed exceptions info - System.out.println("Failed to query data from power.meters, url:" + jdbcUrl + "; ErrCode:" + ex.getErrorCode() + "; ErrMessage: " + ex.getMessage()); - throw ex; - } catch (Exception ex){ - System.out.println("Failed to query data from power.meters, url:" + jdbcUrl + "; ErrMessage: " + ex.getMessage()); + } catch (Exception ex) { + // please refer to the JDBC specifications for detailed exceptions info + System.out.printf("Failed to query data from power.meters, sql: %s, %sErrMessage: %s%n", + sql, + ex instanceof SQLException ? "ErrCode: " + ((SQLException) ex).getErrorCode() + ", " : "", + ex.getMessage()); + // Print stack trace for context in examples. Use logging in production. + ex.printStackTrace(); throw ex; } // ANCHOR_END: query_data } - - private static void printResult(ResultSet resultSet) throws SQLException { - Util.printResult(resultSet); - } - } diff --git a/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/JdbcReqIdDemo.java b/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/JdbcReqIdDemo.java index 7dd99c58dc..dd4b549bc5 100644 --- a/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/JdbcReqIdDemo.java +++ b/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/JdbcReqIdDemo.java @@ -45,12 +45,13 @@ public class JdbcReqIdDemo { } } - } catch (SQLException ex) { - // handle any errors, please refer to the JDBC specifications for detailed exceptions info - System.out.println("Failed to execute sql with reqId: " + reqId + ", url:" + jdbcUrl + "; ErrCode:" + ex.getErrorCode() + "; ErrMessage: " + ex.getMessage()); - throw ex; - } catch (Exception ex){ - System.out.println("Failed to execute sql with reqId: " + reqId + ", url:" + jdbcUrl + "; ErrMessage: " + ex.getMessage()); + } catch (Exception ex) { + // please refer to the JDBC specifications for detailed exceptions info + System.out.printf("Failed to execute sql with reqId: %s, %sErrMessage: %s%n", reqId, + ex instanceof SQLException ? "ErrCode: " + ((SQLException) ex).getErrorCode() + ", " : "", + ex.getMessage()); + // Print stack trace for context in examples. Use logging in production. + ex.printStackTrace(); throw ex; } // ANCHOR_END: with_reqid diff --git a/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/ParameterBindingBasicDemo.java b/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/ParameterBindingBasicDemo.java index ab93e324ea..b5732f0e33 100644 --- a/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/ParameterBindingBasicDemo.java +++ b/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/ParameterBindingBasicDemo.java @@ -68,12 +68,13 @@ public class ParameterBindingBasicDemo { // you can check exeResult here System.out.println("Successfully inserted " + (numOfSubTable * numOfRow) + " rows to power.meters."); } - } catch (SQLException ex) { - // handle any errors, please refer to the JDBC specifications for detailed exceptions info - System.out.println("Failed to insert to table meters using stmt, url: " + jdbcUrl + "; ErrCode:" + ex.getErrorCode() + "; ErrMessage: " + ex.getMessage()); - throw ex; - } catch (Exception ex){ - System.out.println("Failed to insert to table meters using stmt, url: " + jdbcUrl + "; ErrMessage: " + ex.getMessage()); + } catch (Exception ex) { + // please refer to the JDBC specifications for detailed exceptions info + System.out.printf("Failed to insert to table meters using stmt, %sErrMessage: %s%n", + ex instanceof SQLException ? "ErrCode: " + ((SQLException) ex).getErrorCode() + ", " : "", + ex.getMessage()); + // Print stack trace for context in examples. Use logging in production. + ex.printStackTrace(); throw ex; } } diff --git a/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/SchemalessJniTest.java b/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/SchemalessJniTest.java index d1f6eb3cea..5b1ce51be6 100644 --- a/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/SchemalessJniTest.java +++ b/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/SchemalessJniTest.java @@ -26,12 +26,13 @@ public class SchemalessJniTest { conn.write(telnetDemo, SchemalessProtocolType.TELNET, SchemalessTimestampType.MILLI_SECONDS); conn.write(jsonDemo, SchemalessProtocolType.JSON, SchemalessTimestampType.NOT_CONFIGURED); System.out.println("Inserted data with schemaless successfully."); - } catch (SQLException ex) { - // handle any errors, please refer to the JDBC specifications for detailed exceptions info - System.out.println("Failed to insert data with schemaless, host:" + host + "; ErrCode:" + ex.getErrorCode() + "; ErrMessage: " + ex.getMessage()); - throw ex; - } catch (Exception ex){ - System.out.println("Failed to insert data with schemaless, host:" + host + "; ErrMessage: " + ex.getMessage()); + } catch (Exception ex) { + // please refer to the JDBC specifications for detailed exceptions info + System.out.printf("Failed to insert data with schemaless, %sErrMessage: %s%n", + ex instanceof SQLException ? "ErrCode: " + ((SQLException) ex).getErrorCode() + ", " : "", + ex.getMessage()); + // Print stack trace for context in examples. Use logging in production. + ex.printStackTrace(); throw ex; } } diff --git a/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/SchemalessWsTest.java b/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/SchemalessWsTest.java index 6432c8100e..0f15e70224 100644 --- a/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/SchemalessWsTest.java +++ b/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/SchemalessWsTest.java @@ -26,12 +26,13 @@ public class SchemalessWsTest { conn.write(telnetDemo, SchemalessProtocolType.TELNET, SchemalessTimestampType.MILLI_SECONDS); conn.write(jsonDemo, SchemalessProtocolType.JSON, SchemalessTimestampType.SECONDS); System.out.println("Inserted data with schemaless successfully."); - } catch (SQLException ex) { - // handle any errors, please refer to the JDBC specifications for detailed exceptions info - System.out.println("Failed to insert data with schemaless, host:" + host + "; ErrCode:" + ex.getErrorCode() + "; ErrMessage: " + ex.getMessage()); - throw ex; - } catch (Exception ex){ - System.out.println("Failed to insert data with schemaless, host:" + host + "; ErrMessage: " + ex.getMessage()); + } catch (Exception ex) { + // please refer to the JDBC specifications for detailed exceptions info + System.out.printf("Failed to insert data with schemaless, %sErrMessage: %s%n", + ex instanceof SQLException ? "ErrCode: " + ((SQLException) ex).getErrorCode() + ", " : "", + ex.getMessage()); + // Print stack trace for context in examples. Use logging in production. + ex.printStackTrace(); throw ex; } } diff --git a/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/WSParameterBindingBasicDemo.java b/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/WSParameterBindingBasicDemo.java index 29013014b9..792ee4ed2d 100644 --- a/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/WSParameterBindingBasicDemo.java +++ b/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/WSParameterBindingBasicDemo.java @@ -47,12 +47,13 @@ public class WSParameterBindingBasicDemo { System.out.println("Successfully inserted " + exeResult.length + " rows to power.meters."); } } - } catch (SQLException ex) { - // handle any errors, please refer to the JDBC specifications for detailed exceptions info - System.out.println("Failed to insert to table meters using stmt, url: " + jdbcUrl + "; ErrCode:" + ex.getErrorCode() + "; ErrMessage: " + ex.getMessage()); - throw ex; - } catch (Exception ex){ - System.out.println("Failed to insert to table meters using stmt, url: " + jdbcUrl + "; ErrMessage: " + ex.getMessage()); + } catch (Exception ex) { + // please refer to the JDBC specifications for detailed exceptions info + System.out.printf("Failed to insert to table meters using stmt, %sErrMessage: %s%n", + ex instanceof SQLException ? "ErrCode: " + ((SQLException) ex).getErrorCode() + ", " : "", + ex.getMessage()); + // Print stack trace for context in examples. Use logging in production. + ex.printStackTrace(); throw ex; } } diff --git a/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/WsConsumerLoopFull.java b/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/WsConsumerLoopFull.java index 241ab2df76..17380023cd 100644 --- a/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/WsConsumerLoopFull.java +++ b/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/WsConsumerLoopFull.java @@ -6,11 +6,13 @@ import com.taosdata.jdbc.tmq.*; import java.sql.*; import java.time.Duration; -import java.util.*; +import java.util.Collections; +import java.util.List; +import java.util.Properties; +import java.util.Set; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.concurrent.TimeUnit; -import java.util.function.Consumer; // ANCHOR: consumer_demo public class WsConsumerLoopFull { @@ -18,7 +20,7 @@ public class WsConsumerLoopFull { static private Statement statement; static private volatile boolean stopThread = false; - public static TaosConsumer getConsumer() throws SQLException { + public static TaosConsumer getConsumer() throws Exception { // ANCHOR: create_consumer Properties config = new Properties(); config.setProperty("td.connect.type", "ws"); @@ -36,16 +38,20 @@ public class WsConsumerLoopFull { try { TaosConsumer consumer= new TaosConsumer<>(config); - System.out.println("Create consumer successfully, host: " + config.getProperty("bootstrap.servers") + ", groupId: " + config.getProperty("group.id") + ", clientId: " + config.getProperty("client.id")); + System.out.printf("Create consumer successfully, host: %s, groupId: %s, clientId: %s%n", + config.getProperty("bootstrap.servers"), + config.getProperty("group.id"), + config.getProperty("client.id")); return consumer; - } catch (SQLException ex) { - // handle any errors, please refer to the JDBC specifications for detailed exceptions info - System.out.println("Failed to create websocket consumer, host: " + config.getProperty("bootstrap.servers") + "; ErrCode:" + ex.getErrorCode() + "; ErrMessage: " + ex.getMessage()); - throw new SQLException("Failed to create consumer", ex); } catch (Exception ex) { - System.out.println("Failed to create websocket consumer, host: " + config.getProperty("bootstrap.servers") - + "; ErrMessage: " + ex.getMessage()); - throw new SQLException("Failed to create consumer", ex); + // please refer to the JDBC specifications for detailed exceptions info + System.out.printf("Failed to create websocket consumer, host: %s, %sErrMessage: %s%n", + config.getProperty("bootstrap.servers"), + ex instanceof SQLException ? "ErrCode: " + ((SQLException) ex).getErrorCode() + ", " : "", + ex.getMessage()); + // Print stack trace for context in examples. Use logging in production. + ex.printStackTrace(); + throw ex; } // ANCHOR_END: create_consumer } @@ -67,14 +73,14 @@ public class WsConsumerLoopFull { System.out.println("data: " + JSON.toJSONString(bean)); } } - - } catch (SQLException ex) { - // handle any errors, please refer to the JDBC specifications for detailed exceptions info - System.out.println("Failed to poll data, ErrCode:" + ex.getErrorCode() + "; ErrMessage: " + ex.getMessage()); - throw new SQLException("Failed to poll data", ex); } catch (Exception ex) { - System.out.println("Failed to poll data, ErrMessage: " + ex.getMessage()); - throw new SQLException("Failed to poll data", ex); + // please refer to the JDBC specifications for detailed exceptions info + System.out.printf("Failed to poll data, %sErrMessage: %s%n", + ex instanceof SQLException ? "ErrCode: " + ((SQLException) ex).getErrorCode() + ", " : "", + ex.getMessage()); + // Print stack trace for context in examples. Use logging in production. + ex.printStackTrace(); + throw ex; } // ANCHOR_END: poll_data_code_piece } @@ -98,13 +104,14 @@ public class WsConsumerLoopFull { consumer.seekToBeginning(assignment); System.out.println("Assignment seek to beginning successfully."); - } catch (SQLException ex) { - // handle any errors, please refer to the JDBC specifications for detailed exceptions info - System.out.println("Seek example failed; ErrCode:" + ex.getErrorCode() + "; ErrMessage: " + ex.getMessage()); - throw new SQLException("seek example failed", ex); } catch (Exception ex) { - System.out.println("Seek example failed; ErrMessage: " + ex.getMessage()); - throw new SQLException("seek example failed", ex); + // please refer to the JDBC specifications for detailed exceptions info + System.out.printf("Failed to execute seek example, %sErrMessage: %s%n", + ex instanceof SQLException ? "ErrCode: " + ((SQLException) ex).getErrorCode() + ", " : "", + ex.getMessage()); + // Print stack trace for context in examples. Use logging in production. + ex.printStackTrace(); + throw ex; } // ANCHOR_END: consumer_seek } @@ -129,13 +136,14 @@ public class WsConsumerLoopFull { System.out.println("Commit offset manually successfully."); } } - } catch (SQLException ex) { - // handle any errors, please refer to the JDBC specifications for detailed exceptions info - System.out.println("Failed to execute consumer functions. ErrCode:" + ex.getErrorCode() + "; ErrMessage: " + ex.getMessage()); - throw new SQLException("Failed to execute consumer functions", ex); } catch (Exception ex) { - System.out.println("Failed to execute consumer functions. ErrMessage: " + ex.getMessage()); - throw new SQLException("Failed to execute consumer functions", ex); + // please refer to the JDBC specifications for detailed exceptions info + System.out.printf("Failed to execute commit example, %sErrMessage: %s%n", + ex instanceof SQLException ? "ErrCode: " + ((SQLException) ex).getErrorCode() + ", " : "", + ex.getMessage()); + // Print stack trace for context in examples. Use logging in production. + ex.printStackTrace(); + throw ex; } // ANCHOR_END: commit_code_piece } @@ -148,13 +156,14 @@ public class WsConsumerLoopFull { // unsubscribe the consumer consumer.unsubscribe(); System.out.println("Consumer unsubscribed successfully."); - } catch (SQLException ex) { - // handle any errors, please refer to the JDBC specifications for detailed exceptions info - System.out.println("Failed to unsubscribe consumer. ErrCode:" + ex.getErrorCode() + "; ErrMessage: " + ex.getMessage()); - throw new SQLException("Failed to unsubscribe consumer", ex); } catch (Exception ex) { - System.out.println("Failed to unsubscribe consumer. ErrMessage: " + ex.getMessage()); - throw new SQLException("Failed to unsubscribe consumer", ex); + // please refer to the JDBC specifications for detailed exceptions info + System.out.printf("Failed to unsubscribe consumer, %sErrMessage: %s%n", + ex instanceof SQLException ? "ErrCode: " + ((SQLException) ex).getErrorCode() + ", " : "", + ex.getMessage()); + // Print stack trace for context in examples. Use logging in production. + ex.printStackTrace(); + throw ex; } finally { // close the consumer @@ -236,9 +245,14 @@ public class WsConsumerLoopFull { i++; Thread.sleep(1); } - } catch (SQLException ex) { - System.out.println("Failed to insert data to power.meters, ErrCode:" + ex.getErrorCode() + "; ErrMessage: " + ex.getMessage()); - throw new SQLException("Failed to insert data to power.meters", ex); + } catch (Exception ex) { + // please refer to the JDBC specifications for detailed exceptions info + System.out.printf("Failed to insert data to power.meters, %sErrMessage: %s%n", + ex instanceof SQLException ? "ErrCode: " + ((SQLException) ex).getErrorCode() + ", " : "", + ex.getMessage()); + // Print stack trace for context in examples. Use logging in production. + ex.printStackTrace(); + throw ex; } } @@ -248,9 +262,14 @@ public class WsConsumerLoopFull { statement.executeUpdate("USE power"); statement.executeUpdate("CREATE STABLE IF NOT EXISTS meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (groupId INT, location BINARY(24))"); statement.executeUpdate("CREATE TOPIC IF NOT EXISTS topic_meters AS SELECT ts, current, voltage, phase, groupid, location FROM meters"); - } catch (SQLException ex) { - System.out.println("Failed to create db and table, ErrCode:" + ex.getErrorCode() + "; ErrMessage: " + ex.getMessage()); - throw new SQLException("Failed to create db and table", ex); + } catch (Exception ex) { + // please refer to the JDBC specifications for detailed exceptions info + System.out.printf("Failed to create db and table, %sErrMessage: %s%n", + ex instanceof SQLException ? "ErrCode: " + ((SQLException) ex).getErrorCode() + ", " : "", + ex.getMessage()); + // Print stack trace for context in examples. Use logging in production. + ex.printStackTrace(); + throw ex; } } @@ -309,7 +328,7 @@ public class WsConsumerLoopFull { try { prepareData(); } catch (SQLException ex) { - System.out.println("Failed to prepare data, ErrCode:" + ex.getErrorCode() + "; ErrMessage: " + ex.getMessage()); + System.out.println("Failed to prepare data, ErrCode:" + ex.getErrorCode() + ", ErrMessage: " + ex.getMessage()); return; } catch (Exception ex) { System.out.println("Failed to prepare data, ErrMessage: " + ex.getMessage()); @@ -334,8 +353,7 @@ public class WsConsumerLoopFull { consumer.unsubscribe(); unsubscribeExample(consumer); - System.out.println("unsubscribeExample executed successfully."); - + System.out.println("unsubscribeExample executed successfully"); } catch (SQLException ex) { System.out.println("Failed to poll data from topic_meters, ErrCode:" + ex.getErrorCode() + "; ErrMessage: " + ex.getMessage()); return; diff --git a/examples/JDBC/connectionPools/src/main/java/com/taosdata/example/DruidDemo.java b/examples/JDBC/connectionPools/src/main/java/com/taosdata/example/DruidDemo.java index dd96f5c632..a366efd419 100644 --- a/examples/JDBC/connectionPools/src/main/java/com/taosdata/example/DruidDemo.java +++ b/examples/JDBC/connectionPools/src/main/java/com/taosdata/example/DruidDemo.java @@ -2,35 +2,35 @@ package com.taosdata.example; import com.alibaba.druid.pool.DruidDataSource; -import javax.sql.DataSource; import java.sql.Connection; import java.sql.Statement; public class DruidDemo { -// ANCHOR: connection_pool -public static void main(String[] args) throws Exception { - String url = "jdbc:TAOS://127.0.0.1:6030/log"; + // ANCHOR: connection_pool + public static void main(String[] args) throws Exception { + String url = "jdbc:TAOS://127.0.0.1:6030/log"; - DruidDataSource dataSource = new DruidDataSource(); - // jdbc properties - dataSource.setDriverClassName("com.taosdata.jdbc.TSDBDriver"); - dataSource.setUrl(url); - dataSource.setUsername("root"); - dataSource.setPassword("taosdata"); - // pool configurations - dataSource.setInitialSize(10); - dataSource.setMinIdle(10); - dataSource.setMaxActive(10); - dataSource.setMaxWait(30000); - dataSource.setValidationQuery("SELECT SERVER_VERSION()"); + DruidDataSource dataSource = new DruidDataSource(); + // jdbc properties + dataSource.setDriverClassName("com.taosdata.jdbc.TSDBDriver"); + dataSource.setUrl(url); + dataSource.setUsername("root"); + dataSource.setPassword("taosdata"); + // pool configurations + dataSource.setInitialSize(10); + dataSource.setMinIdle(10); + dataSource.setMaxActive(10); + dataSource.setMaxWait(30000); + dataSource.setValidationQuery("SELECT SERVER_VERSION()"); - Connection connection = dataSource.getConnection(); // get connection - Statement statement = connection.createStatement(); // get statement - //query or insert - // ... + Connection connection = dataSource.getConnection(); // get connection + Statement statement = connection.createStatement(); // get statement + // query or insert + // ... - statement.close(); - connection.close(); // put back to connection pool -} -// ANCHOR_END: connection_pool + statement.close(); + connection.close(); // put back to connection pool + dataSource.close(); + } + // ANCHOR_END: connection_pool } diff --git a/examples/JDBC/connectionPools/src/main/java/com/taosdata/example/HikariDemo.java b/examples/JDBC/connectionPools/src/main/java/com/taosdata/example/HikariDemo.java index 4480cbc7c4..50b20fdb0c 100644 --- a/examples/JDBC/connectionPools/src/main/java/com/taosdata/example/HikariDemo.java +++ b/examples/JDBC/connectionPools/src/main/java/com/taosdata/example/HikariDemo.java @@ -4,34 +4,34 @@ import com.zaxxer.hikari.HikariConfig; import com.zaxxer.hikari.HikariDataSource; import java.sql.Connection; -import java.sql.SQLException; import java.sql.Statement; public class HikariDemo { -// ANCHOR: connection_pool -public static void main(String[] args) throws SQLException { - HikariConfig config = new HikariConfig(); - // jdbc properties - config.setJdbcUrl("jdbc:TAOS://127.0.0.1:6030/log"); - config.setUsername("root"); - config.setPassword("taosdata"); - // connection pool configurations - config.setMinimumIdle(10); //minimum number of idle connection - config.setMaximumPoolSize(10); //maximum number of connection in the pool - config.setConnectionTimeout(30000); //maximum wait milliseconds for get connection from pool - config.setMaxLifetime(0); // maximum life time for each connection - config.setIdleTimeout(0); // max idle time for recycle idle connection - config.setConnectionTestQuery("SELECT SERVER_VERSION()"); //validation query + // ANCHOR: connection_pool + public static void main(String[] args) throws Exception { + HikariConfig config = new HikariConfig(); + // jdbc properties + config.setJdbcUrl("jdbc:TAOS://127.0.0.1:6030/log"); + config.setUsername("root"); + config.setPassword("taosdata"); + // connection pool configurations + config.setMinimumIdle(10); // minimum number of idle connection + config.setMaximumPoolSize(10); // maximum number of connection in the pool + config.setConnectionTimeout(30000); // maximum wait milliseconds for get connection from pool + config.setMaxLifetime(0); // maximum life time for each connection + config.setIdleTimeout(0); // max idle time for recycle idle connection + config.setConnectionTestQuery("SELECT SERVER_VERSION()"); // validation query - HikariDataSource ds = new HikariDataSource(config); //create datasource + HikariDataSource dataSource = new HikariDataSource(config); // create datasource - Connection connection = ds.getConnection(); // get connection - Statement statement = connection.createStatement(); // get statement + Connection connection = dataSource.getConnection(); // get connection + Statement statement = connection.createStatement(); // get statement - //query or insert - // ... - statement.close(); - connection.close(); // put back to connection pool -} -// ANCHOR_END: connection_pool + // query or insert + // ... + statement.close(); + connection.close(); // put back to connection pool + dataSource.close(); + } + // ANCHOR_END: connection_pool } From d1bcc1ddddd1ae093c00afed36bf2baf53292cb0 Mon Sep 17 00:00:00 2001 From: chenhaoran Date: Wed, 14 Aug 2024 16:25:34 +0800 Subject: [PATCH 030/181] tetst: test for checking return value --- tests/parallel_test/run_scan_container.sh | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tests/parallel_test/run_scan_container.sh b/tests/parallel_test/run_scan_container.sh index d16d1c3017..9f190f5f13 100755 --- a/tests/parallel_test/run_scan_container.sh +++ b/tests/parallel_test/run_scan_container.sh @@ -79,6 +79,7 @@ scan_scripts="$CONTAINER_TESTDIR/tests/ci/scan_file_path.py" ulimit -c unlimited cat << EOF docker run \ + -v /root/.cos-local.1:/root/.cos-local.2 \ -v $REP_MOUNT_PARAM \ -v $REP_MOUNT_DEBUG \ -v $scan_changefile_temp_path:$docker_can_changefile_temp_path \ @@ -86,6 +87,7 @@ docker run \ --rm --ulimit core=-1 taos_test:v1.0 python3 $scan_scripts -b "${branch_name_id}" -f "${scan_file_name}" -w ${web_server} EOF docker run \ + -v /root/.cos-local.1:/root/.cos-local.2 \ -v $REP_MOUNT_PARAM \ -v $REP_MOUNT_DEBUG \ -v $scan_changefile_temp_path:$docker_can_changefile_temp_path \ From ab45ab1459801a068b8d339d777c3656ccd343d2 Mon Sep 17 00:00:00 2001 From: kailixu Date: Wed, 14 Aug 2024 16:40:12 +0800 Subject: [PATCH 031/181] fix(tsdb/read): memory leak --- source/dnode/vnode/src/tsdb/tsdbRead2.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/source/dnode/vnode/src/tsdb/tsdbRead2.c b/source/dnode/vnode/src/tsdb/tsdbRead2.c index 6783eb2cbd..31101165fd 100644 --- a/source/dnode/vnode/src/tsdb/tsdbRead2.c +++ b/source/dnode/vnode/src/tsdb/tsdbRead2.c @@ -2440,20 +2440,23 @@ static bool initSttBlockReader(SSttBlockReader* pSttBlockReader, STableBlockScan int32_t code = tMergeTreeOpen2(&pSttBlockReader->mergeTree, &conf, &info); if (code != TSDB_CODE_SUCCESS) { + taosArrayDestroy(info.pKeyRangeList); pReader->code = code; return false; } code = initMemDataIterator(pScanInfo, pReader); if (code != TSDB_CODE_SUCCESS) { + taosArrayDestroy(info.pKeyRangeList); pReader->code = code; return false; } code = initDelSkylineIterator(pScanInfo, pReader->info.order, &pReader->cost); if (code != TSDB_CODE_SUCCESS) { + taosArrayDestroy(info.pKeyRangeList); pReader->code = code; - return code; + return false; } if (conf.rspRows) { From 8aa9fb82d3ad851f1e6507a1458738f594fafe28 Mon Sep 17 00:00:00 2001 From: sheyanjie-qq <249478495@qq.com> Date: Wed, 14 Aug 2024 16:40:19 +0800 Subject: [PATCH 032/181] update c sample code --- docs/examples/c/create_db_demo.c | 6 +- docs/examples/c/insert_data_demo.c | 86 ++++++------- docs/examples/c/query_data_demo.c | 95 +++++++------- docs/examples/c/sml_insert_demo.c | 197 +++++++++++++++-------------- docs/examples/c/tmq_demo.c | 79 ++++++------ docs/examples/c/with_reqid_demo.c | 98 +++++++------- 6 files changed, 282 insertions(+), 279 deletions(-) diff --git a/docs/examples/c/create_db_demo.c b/docs/examples/c/create_db_demo.c index 45d4a17c5c..7ac54041d8 100644 --- a/docs/examples/c/create_db_demo.c +++ b/docs/examples/c/create_db_demo.c @@ -43,8 +43,7 @@ static int DemoCreateDB() { TAOS_RES *result = taos_query(taos, "CREATE DATABASE IF NOT EXISTS power"); code = taos_errno(result); if (code != 0) { - printf("Failed to create database power, Server: %s:%hu, ErrCode: 0x%x, ErrMessage: %s.\n", host, port, code, - taos_errstr(result)); + printf("Failed to create database power, ErrCode: 0x%x, ErrMessage: %s.\n", code, taos_errstr(result)); taos_close(taos); taos_cleanup(); return -1; @@ -59,8 +58,7 @@ static int DemoCreateDB() { result = taos_query(taos, sql); code = taos_errno(result); if (code != 0) { - printf("Failed to create stable power.meters, Server: %s:%hu, ErrCode: 0x%x, ErrMessage: %s\n.", host, port, code, - taos_errstr(result)); + printf("Failed to create stable power.meters, ErrCode: 0x%x, ErrMessage: %s\n.", code, taos_errstr(result)); taos_close(taos); taos_cleanup(); return -1; diff --git a/docs/examples/c/insert_data_demo.c b/docs/examples/c/insert_data_demo.c index 3d655edca3..edc6b0b750 100644 --- a/docs/examples/c/insert_data_demo.c +++ b/docs/examples/c/insert_data_demo.c @@ -23,52 +23,52 @@ #include "taos.h" static int DemoInsertData() { -// ANCHOR: insert_data -const char *host = "localhost"; -const char *user = "root"; -const char *password = "taosdata"; -uint16_t port = 6030; -int code = 0; + // ANCHOR: insert_data + const char *host = "localhost"; + const char *user = "root"; + const char *password = "taosdata"; + uint16_t port = 6030; + int code = 0; -// connect -TAOS *taos = taos_connect(host, user, password, NULL, port); -if (taos == NULL) { - printf("Failed to connect to %s:%hu, ErrCode: 0x%x, ErrMessage: %s.\n", host, port, taos_errno(NULL), taos_errstr(NULL)); - taos_cleanup(); - return -1; -} + // connect + TAOS *taos = taos_connect(host, user, password, NULL, port); + if (taos == NULL) { + printf("Failed to connect to %s:%hu, ErrCode: 0x%x, ErrMessage: %s.\n", host, port, taos_errno(NULL), + taos_errstr(NULL)); + taos_cleanup(); + return -1; + } -// insert data, please make sure the database and table are already created -const char* sql = "INSERT INTO " \ - "power.d1001 USING power.meters TAGS(2,'California.SanFrancisco') " \ - "VALUES " \ - "(NOW + 1a, 10.30000, 219, 0.31000) " \ - "(NOW + 2a, 12.60000, 218, 0.33000) " \ - "(NOW + 3a, 12.30000, 221, 0.31000) " \ - "power.d1002 USING power.meters TAGS(3, 'California.SanFrancisco') " \ - "VALUES " \ - "(NOW + 1a, 10.30000, 218, 0.25000) "; -TAOS_RES *result = taos_query(taos, sql); -code = taos_errno(result); -if (code != 0) { - printf("Failed to insert data to power.meters, Server: %s:%hu, ErrCode: 0x%x, ErrMessage: %s\n.", host, port, code, taos_errstr(result)); + // insert data, please make sure the database and table are already created + const char *sql = + "INSERT INTO " + "power.d1001 USING power.meters TAGS(2,'California.SanFrancisco') " + "VALUES " + "(NOW + 1a, 10.30000, 219, 0.31000) " + "(NOW + 2a, 12.60000, 218, 0.33000) " + "(NOW + 3a, 12.30000, 221, 0.31000) " + "power.d1002 USING power.meters TAGS(3, 'California.SanFrancisco') " + "VALUES " + "(NOW + 1a, 10.30000, 218, 0.25000) "; + TAOS_RES *result = taos_query(taos, sql); + code = taos_errno(result); + if (code != 0) { + printf("Failed to insert data to power.meters, ErrCode: 0x%x, ErrMessage: %s\n.", code, taos_errstr(result)); + taos_close(taos); + taos_cleanup(); + return -1; + } + taos_free_result(result); + + // you can check affectedRows here + int rows = taos_affected_rows(result); + printf("Successfully inserted %d rows into power.meters.\n", rows); + + // close & clean taos_close(taos); taos_cleanup(); - return -1; -} -taos_free_result(result); - -// you can check affectedRows here -int rows = taos_affected_rows(result); -printf("Successfully inserted %d rows into power.meters.\n", rows); - -// close & clean -taos_close(taos); -taos_cleanup(); -return 0; -// ANCHOR_END: insert_data + return 0; + // ANCHOR_END: insert_data } -int main(int argc, char *argv[]) { - return DemoInsertData(); -} +int main(int argc, char *argv[]) { return DemoInsertData(); } diff --git a/docs/examples/c/query_data_demo.c b/docs/examples/c/query_data_demo.c index e58c467ccf..f2c6662d9e 100644 --- a/docs/examples/c/query_data_demo.c +++ b/docs/examples/c/query_data_demo.c @@ -22,61 +22,58 @@ #include #include "taos.h" - static int DemoQueryData() { -// ANCHOR: query_data -const char *host = "localhost"; -const char *user = "root"; -const char *password = "taosdata"; -uint16_t port = 6030; -int code = 0; + // ANCHOR: query_data + const char *host = "localhost"; + const char *user = "root"; + const char *password = "taosdata"; + uint16_t port = 6030; + int code = 0; -// connect -TAOS *taos = taos_connect(host, user, password, NULL, port); -if (taos == NULL) { - printf("Failed to connect to %s:%hu, ErrCode: 0x%x, ErrMessage: %s.\n", host, port, taos_errno(NULL), taos_errstr(NULL)); - taos_cleanup(); - return -1; -} + // connect + TAOS *taos = taos_connect(host, user, password, NULL, port); + if (taos == NULL) { + printf("Failed to connect to %s:%hu, ErrCode: 0x%x, ErrMessage: %s.\n", host, port, taos_errno(NULL), + taos_errstr(NULL)); + taos_cleanup(); + return -1; + } + // query data, please make sure the database and table are already created + const char *sql = "SELECT ts, current, location FROM power.meters limit 100"; + TAOS_RES *result = taos_query(taos, sql); + code = taos_errno(result); + if (code != 0) { + printf("Failed to query data from power.meters, sql: %s, ErrCode: 0x%x, ErrMessage: %s\n.", sql, code, + taos_errstr(result)); + taos_close(taos); + taos_cleanup(); + return -1; + } + TAOS_ROW row = NULL; + int rows = 0; + int num_fields = taos_field_count(result); + TAOS_FIELD *fields = taos_fetch_fields(result); -// query data, please make sure the database and table are already created -const char* sql = "SELECT ts, current, location FROM power.meters limit 100"; -TAOS_RES *result = taos_query(taos, sql); -code = taos_errno(result); -if (code != 0) { - printf("Failed to query data from power.meters, Server: %s:%hu, ErrCode: 0x%x, ErrMessage: %s\n.", host, port, code, taos_errstr(result)); + printf("fields: %d\n", num_fields); + printf("sql: %s, result:\n", sql); + + // fetch the records row by row + while ((row = taos_fetch_row(result))) { + char temp[1024] = {0}; + rows++; + taos_print_row(temp, row, fields, num_fields); + printf("%s\n", temp); + } + printf("total rows: %d\n", rows); + taos_free_result(result); + + // close & clean taos_close(taos); taos_cleanup(); - return -1; + return 0; + // ANCHOR_END: query_data } -TAOS_ROW row = NULL; -int rows = 0; -int num_fields = taos_field_count(result); -TAOS_FIELD *fields = taos_fetch_fields(result); - -printf("fields: %d\n", num_fields); -printf("sql: %s, result:\n", sql); - -// fetch the records row by row -while ((row = taos_fetch_row(result))) { - char temp[1024] = {0}; - rows++; - taos_print_row(temp, row, fields, num_fields); - printf("%s\n", temp); -} -printf("total rows: %d\n", rows); -taos_free_result(result); - -// close & clean -taos_close(taos); -taos_cleanup(); -return 0; -// ANCHOR_END: query_data -} - -int main(int argc, char *argv[]) { - return DemoQueryData(); -} +int main(int argc, char *argv[]) { return DemoQueryData(); } diff --git a/docs/examples/c/sml_insert_demo.c b/docs/examples/c/sml_insert_demo.c index 807fd10501..1107734c78 100644 --- a/docs/examples/c/sml_insert_demo.c +++ b/docs/examples/c/sml_insert_demo.c @@ -21,114 +21,119 @@ #include #include "taos.h" - static int DemoSmlInsert() { -// ANCHOR: schemaless -const char *host = "localhost"; -const char *user = "root"; -const char *password = "taosdata"; -uint16_t port = 6030; -int code = 0; + // ANCHOR: schemaless + const char *host = "localhost"; + const char *user = "root"; + const char *password = "taosdata"; + uint16_t port = 6030; + int code = 0; -// connect -TAOS *taos = taos_connect(host, user, password, NULL, port); -if (taos == NULL) { - printf("Failed to connect to %s:%hu, ErrCode: 0x%x, ErrMessage: %s.\n", host, port, taos_errno(NULL), taos_errstr(NULL)); - taos_cleanup(); - return -1; -} + // connect + TAOS *taos = taos_connect(host, user, password, NULL, port); + if (taos == NULL) { + printf("Failed to connect to %s:%hu, ErrCode: 0x%x, ErrMessage: %s.\n", host, port, taos_errno(NULL), + taos_errstr(NULL)); + taos_cleanup(); + return -1; + } -// create database -TAOS_RES *result = taos_query(taos, "CREATE DATABASE IF NOT EXISTS power"); -code = taos_errno(result); -if (code != 0) { - printf("Failed to create database power, Server: %s:%hu, ErrCode: 0x%x, ErrMessage: %s.\n", host, port, code, taos_errstr(result)); - taos_close(taos); - taos_cleanup(); - return -1; -} -taos_free_result(result); -printf("Create database power successfully.\n"); + // create database + TAOS_RES *result = taos_query(taos, "CREATE DATABASE IF NOT EXISTS power"); + code = taos_errno(result); + if (code != 0) { + printf("Failed to create database power, ErrCode: 0x%x, ErrMessage: %s.\n", code, taos_errstr(result)); + taos_close(taos); + taos_cleanup(); + return -1; + } + taos_free_result(result); + printf("Create database power successfully.\n"); -// use database -result = taos_query(taos, "USE power"); -code = taos_errno(result); -if (code != 0) { - printf("Failed to execute use power, Server: %s:%hu, ErrCode: 0x%x, ErrMessage: %s\n.", host, port, code, taos_errstr(result)); - taos_close(taos); - taos_cleanup(); - return -1; -} -taos_free_result(result); + // use database + result = taos_query(taos, "USE power"); + code = taos_errno(result); + if (code != 0) { + printf("Failed to execute use power, ErrCode: 0x%x, ErrMessage: %s\n.", code, taos_errstr(result)); + taos_close(taos); + taos_cleanup(); + return -1; + } + taos_free_result(result); -// schemaless demo data -char * line_demo = "meters,groupid=2,location=California.SanFrancisco current=10.3000002f64,voltage=219i32,phase=0.31f64 1626006833639"; -char * telnet_demo = "metric_telnet 1707095283260 4 host=host0 interface=eth0"; -char * json_demo = "{\"metric\": \"metric_json\",\"timestamp\": 1626846400,\"value\": 10.3, \"tags\": {\"groupid\": 2, \"location\": \"California.SanFrancisco\", \"id\": \"d1001\"}}"; + // schemaless demo data + char *line_demo = + "meters,groupid=2,location=California.SanFrancisco current=10.3000002f64,voltage=219i32,phase=0.31f64 " + "1626006833639"; + char *telnet_demo = "metric_telnet 1707095283260 4 host=host0 interface=eth0"; + char *json_demo = + "{\"metric\": \"metric_json\",\"timestamp\": 1626846400,\"value\": 10.3, \"tags\": {\"groupid\": 2, " + "\"location\": \"California.SanFrancisco\", \"id\": \"d1001\"}}"; -// influxdb line protocol -char *lines[] = {line_demo}; -result = taos_schemaless_insert(taos, lines, 1, TSDB_SML_LINE_PROTOCOL, TSDB_SML_TIMESTAMP_MILLI_SECONDS); -code = taos_errno(result); -if (code != 0) { - printf("Failed to insert schemaless line data, Server: %s:%hu, ErrCode: 0x%x, ErrMessage: %s\n.", host, port, code, taos_errstr(result)); - taos_close(taos); - taos_cleanup(); - return -1; -} + // influxdb line protocol + char *lines[] = {line_demo}; + result = taos_schemaless_insert(taos, lines, 1, TSDB_SML_LINE_PROTOCOL, TSDB_SML_TIMESTAMP_MILLI_SECONDS); + code = taos_errno(result); + if (code != 0) { + printf("Failed to insert schemaless line data, data: %s, ErrCode: 0x%x, ErrMessage: %s\n.", line_demo, code, + taos_errstr(result)); + taos_close(taos); + taos_cleanup(); + return -1; + } -int rows = taos_affected_rows(result); -printf("Insert %d rows of schemaless line data successfully.\n", rows); -taos_free_result(result); + int rows = taos_affected_rows(result); + printf("Insert %d rows of schemaless line data successfully.\n", rows); + taos_free_result(result); -// opentsdb telnet protocol -char *telnets[] = {telnet_demo}; -result = taos_schemaless_insert(taos, telnets, 1, TSDB_SML_TELNET_PROTOCOL, TSDB_SML_TIMESTAMP_MILLI_SECONDS); -code = taos_errno(result); -if (code != 0) { - printf("Failed to insert schemaless telnet data, Server: %s:%hu, ErrCode: 0x%x, ErrMessage: %s\n.", host, port, code, taos_errstr(result)); - taos_close(taos); - taos_cleanup(); - return -1; -} + // opentsdb telnet protocol + char *telnets[] = {telnet_demo}; + result = taos_schemaless_insert(taos, telnets, 1, TSDB_SML_TELNET_PROTOCOL, TSDB_SML_TIMESTAMP_MILLI_SECONDS); + code = taos_errno(result); + if (code != 0) { + printf("Failed to insert schemaless telnet data, data: %s, ErrCode: 0x%x, ErrMessage: %s\n.", telnet_demo, code, + taos_errstr(result)); + taos_close(taos); + taos_cleanup(); + return -1; + } -rows = taos_affected_rows(result); -printf("Insert %d rows of schemaless telnet data successfully.\n", rows); -taos_free_result(result); + rows = taos_affected_rows(result); + printf("Insert %d rows of schemaless telnet data successfully.\n", rows); + taos_free_result(result); -// opentsdb json protocol -char *jsons[1] = {0}; -// allocate memory for json data. can not use static memory. -jsons[0] = malloc(1024); -if (jsons[0] == NULL) { - printf("Failed to allocate memory\n"); - taos_close(taos); - taos_cleanup(); - return -1; -} -(void)strncpy(jsons[0], json_demo, 1023); -result = taos_schemaless_insert(taos, jsons, 1, TSDB_SML_JSON_PROTOCOL, TSDB_SML_TIMESTAMP_NOT_CONFIGURED); -code = taos_errno(result); -if (code != 0) { + // opentsdb json protocol + char *jsons[1] = {0}; + // allocate memory for json data. can not use static memory. + jsons[0] = malloc(1024); + if (jsons[0] == NULL) { + printf("Failed to allocate memory\n"); + taos_close(taos); + taos_cleanup(); + return -1; + } + (void)strncpy(jsons[0], json_demo, 1023); + result = taos_schemaless_insert(taos, jsons, 1, TSDB_SML_JSON_PROTOCOL, TSDB_SML_TIMESTAMP_NOT_CONFIGURED); + code = taos_errno(result); + if (code != 0) { + free(jsons[0]); + printf("Failed to insert schemaless json data, Server: %s, ErrCode: 0x%x, ErrMessage: %s\n.", json_demo, code, + taos_errstr(result)); + taos_close(taos); + taos_cleanup(); + return -1; + } free(jsons[0]); - printf("Failed to insert schemaless json data, Server: %s:%hu, ErrCode: 0x%x, ErrMessage: %s\n.", host, port, code, taos_errstr(result)); + + rows = taos_affected_rows(result); + printf("Insert %d rows of schemaless json data successfully.\n", rows); + taos_free_result(result); + + // close & clean taos_close(taos); taos_cleanup(); - return -1; -} -free(jsons[0]); - -rows = taos_affected_rows(result); -printf("Insert %d rows of schemaless json data successfully.\n", rows); -taos_free_result(result); - -// close & clean -taos_close(taos); -taos_cleanup(); -return 0; -// ANCHOR_END: schemaless + return 0; + // ANCHOR_END: schemaless } -int main(int argc, char *argv[]) { - return DemoSmlInsert(); -} +int main(int argc, char *argv[]) { return DemoSmlInsert(); } diff --git a/docs/examples/c/tmq_demo.c b/docs/examples/c/tmq_demo.c index e7a2425c7d..55ec568dc5 100644 --- a/docs/examples/c/tmq_demo.c +++ b/docs/examples/c/tmq_demo.c @@ -41,14 +41,15 @@ typedef struct { } ConsumerConfig; void* prepare_data(void* arg) { - const char *host = "localhost"; - const char *user = "root"; - const char *password = "taosdata"; - uint16_t port = 6030; - int code = 0; - TAOS *pConn = taos_connect(host, user, password, NULL, port); + const char* host = "localhost"; + const char* user = "root"; + const char* password = "taosdata"; + uint16_t port = 6030; + int code = 0; + TAOS* pConn = taos_connect(host, user, password, NULL, port); if (pConn == NULL) { - fprintf(stderr, "Failed to connect to %s:%hu, ErrCode: 0x%x, ErrMessage: %s.\n", host, port, taos_errno(NULL), taos_errstr(NULL)); + fprintf(stderr, "Failed to connect to %s:%hu, ErrCode: 0x%x, ErrMessage: %s.\n", host, port, taos_errno(NULL), + taos_errstr(NULL)); taos_cleanup(); return NULL; } @@ -68,7 +69,8 @@ void* prepare_data(void* arg) { pRes = taos_query(pConn, buf); code = taos_errno(pRes); if (code != 0) { - fprintf(stderr, "Failed to insert data to power.meters, ErrCode: 0x%x, ErrMessage: %s.\n", code, taos_errstr(pRes)); + fprintf(stderr, "Failed to insert data to power.meters, ErrCode: 0x%x, ErrMessage: %s.\n", code, + taos_errstr(pRes)); } taos_free_result(pRes); sleep(1); @@ -115,14 +117,15 @@ static int32_t msg_process(TAOS_RES* msg) { // ANCHOR_END: msg_process static int32_t init_env() { - const char *host = "localhost"; - const char *user = "root"; - const char *password = "taosdata"; - uint16_t port = 6030; - int code = 0; - TAOS *pConn = taos_connect(host, user, password, NULL, port); + const char* host = "localhost"; + const char* user = "root"; + const char* password = "taosdata"; + uint16_t port = 6030; + int code = 0; + TAOS* pConn = taos_connect(host, user, password, NULL, port); if (pConn == NULL) { - fprintf(stderr, "Failed to connect to %s:%hu, ErrCode: 0x%x, ErrMessage: %s.\n", host, port, taos_errno(NULL), taos_errstr(NULL)); + fprintf(stderr, "Failed to connect to %s:%hu, ErrCode: 0x%x, ErrMessage: %s.\n", host, port, taos_errno(NULL), + taos_errstr(NULL)); taos_cleanup(); return -1; } @@ -177,15 +180,16 @@ END: int32_t create_topic() { fprintf(stdout, "Create topic.\n"); - TAOS_RES* pRes; - const char *host = "localhost"; - const char *user = "root"; - const char *password = "taosdata"; - uint16_t port = 6030; - int code = 0; - TAOS *pConn = taos_connect(host, user, password, NULL, port); + TAOS_RES* pRes; + const char* host = "localhost"; + const char* user = "root"; + const char* password = "taosdata"; + uint16_t port = 6030; + int code = 0; + TAOS* pConn = taos_connect(host, user, password, NULL, port); if (pConn == NULL) { - fprintf(stderr, "Failed to connect to %s:%hu, ErrCode: 0x%x, ErrMessage: %s.\n", host, port, taos_errno(NULL), taos_errstr(NULL)); + fprintf(stderr, "Failed to connect to %s:%hu, ErrCode: 0x%x, ErrMessage: %s.\n", host, port, taos_errno(NULL), + taos_errstr(NULL)); taos_cleanup(); return -1; } @@ -348,7 +352,8 @@ void consume_repeatly(tmq_t* tmq) { code = tmq_offset_seek(tmq, topic_name, p->vgId, p->begin); if (code != 0) { - fprintf(stderr, "Failed to seek assignment %d to beginning %ld, ErrCode: 0x%x, ErrMessage: %s.\n", i, p->begin, code, tmq_err2str(code)); + fprintf(stderr, "Failed to seek assignment %d to beginning %ld, ErrCode: 0x%x, ErrMessage: %s.\n", i, p->begin, + code, tmq_err2str(code)); } else { fprintf(stdout, "Seek assignment %d to beginning %ld successfully.\n", i, p->begin); } @@ -417,25 +422,25 @@ int main(int argc, char* argv[]) { return -1; } - ConsumerConfig config = { - .enable_auto_commit = "true", - .auto_commit_interval_ms = "1000", - .group_id = "group1", - .client_id = "client1", - .td_connect_host = "localhost", - .td_connect_port = "6030", - .td_connect_user = "root", - .td_connect_pass = "taosdata", - .auto_offset_reset = "latest" - }; + ConsumerConfig config = {.enable_auto_commit = "true", + .auto_commit_interval_ms = "1000", + .group_id = "group1", + .client_id = "client1", + .td_connect_host = "localhost", + .td_connect_port = "6030", + .td_connect_user = "root", + .td_connect_pass = "taosdata", + .auto_offset_reset = "latest"}; // ANCHOR: create_consumer_2 tmq_t* tmq = build_consumer(&config); if (NULL == tmq) { - fprintf(stderr, "Failed to create native consumer, host: %s, groupId: %s, , clientId: %s.\n", config.td_connect_host, config.group_id, config.client_id); + fprintf(stderr, "Failed to create native consumer, host: %s, groupId: %s, , clientId: %s.\n", + config.td_connect_host, config.group_id, config.client_id); return -1; } else { - fprintf(stdout, "Create consumer successfully, host: %s, groupId: %s, , clientId: %s.\n", config.td_connect_host, config.group_id, config.client_id); + fprintf(stdout, "Create consumer successfully, host: %s, groupId: %s, , clientId: %s.\n", config.td_connect_host, + config.group_id, config.client_id); } // ANCHOR_END: create_consumer_2 diff --git a/docs/examples/c/with_reqid_demo.c b/docs/examples/c/with_reqid_demo.c index 724b682850..b3e4e79e39 100644 --- a/docs/examples/c/with_reqid_demo.c +++ b/docs/examples/c/with_reqid_demo.c @@ -23,59 +23,57 @@ #include "taos.h" static int DemoWithReqId() { -// ANCHOR: with_reqid -const char *host = "localhost"; -const char *user = "root"; -const char *password = "taosdata"; -uint16_t port = 6030; -int code = 0; + // ANCHOR: with_reqid + const char *host = "localhost"; + const char *user = "root"; + const char *password = "taosdata"; + uint16_t port = 6030; + int code = 0; -// connect -TAOS *taos = taos_connect(host, user, password, NULL, port); -if (taos == NULL) { - printf("Failed to connect to %s:%hu, ErrCode: 0x%x, ErrMessage: %s.\n", host, port, taos_errno(NULL), taos_errstr(NULL)); - taos_cleanup(); - return -1; -} + // connect + TAOS *taos = taos_connect(host, user, password, NULL, port); + if (taos == NULL) { + printf("Failed to connect to %s:%hu, ErrCode: 0x%x, ErrMessage: %s.\n", host, port, taos_errno(NULL), + taos_errstr(NULL)); + taos_cleanup(); + return -1; + } -const char *sql = "SELECT ts, current, location FROM power.meters limit 1"; -// query data with reqid -long reqid = 3L; -TAOS_RES *result = taos_query_with_reqid(taos, sql, reqid); -code = taos_errno(result); -if (code != 0) { - printf("Failed to execute sql with reqId: %ld, Server: %s:%hu, ErrCode: 0x%x, ErrMessage: %s\n.", reqid, host, port, code, taos_errstr(result)); + const char *sql = "SELECT ts, current, location FROM power.meters limit 1"; + // query data with reqid + long reqid = 3L; + TAOS_RES *result = taos_query_with_reqid(taos, sql, reqid); + code = taos_errno(result); + if (code != 0) { + printf("Failed to execute sql with reqId: %ld, ErrCode: 0x%x, ErrMessage: %s\n.", reqid, code, taos_errstr(result)); + taos_close(taos); + taos_cleanup(); + return -1; + } + + TAOS_ROW row = NULL; + int rows = 0; + int num_fields = taos_field_count(result); + TAOS_FIELD *fields = taos_fetch_fields(result); + + printf("fields: %d\n", num_fields); + printf("sql: %s, result:\n", sql); + + // fetch the records row by row + while ((row = taos_fetch_row(result))) { + char temp[1024] = {0}; + rows++; + taos_print_row(temp, row, fields, num_fields); + printf("%s\n", temp); + } + printf("total rows: %d\n", rows); + taos_free_result(result); + + // close & clean taos_close(taos); taos_cleanup(); - return -1; -} - -TAOS_ROW row = NULL; -int rows = 0; -int num_fields = taos_field_count(result); -TAOS_FIELD *fields = taos_fetch_fields(result); - -printf("fields: %d\n", num_fields); -printf("sql: %s, result:\n", sql); - -// fetch the records row by row -while ((row = taos_fetch_row(result))) { - char temp[1024] = {0}; - rows++; - taos_print_row(temp, row, fields, num_fields); - printf("%s\n", temp); -} -printf("total rows: %d\n", rows); -taos_free_result(result); - -// close & clean -taos_close(taos); -taos_cleanup(); -return 0; -// ANCHOR_END: with_reqid -} - -int main(int argc, char *argv[]) { - return DemoWithReqId(); + return 0; + // ANCHOR_END: with_reqid } +int main(int argc, char *argv[]) { return DemoWithReqId(); } From 9792683b7de561317da30472a04a9f9cfa3d6902 Mon Sep 17 00:00:00 2001 From: Hongze Cheng Date: Wed, 14 Aug 2024 17:50:20 +0800 Subject: [PATCH 033/181] handle some of return -1 --- include/util/tchecksum.h | 2 +- source/common/src/tname.c | 2 +- source/dnode/vnode/src/meta/metaCache.c | 6 ++--- source/dnode/vnode/src/tsdb/tsdbReadUtil.c | 2 +- source/dnode/vnode/src/vnd/vnodeCfg.c | 3 +-- source/dnode/vnode/src/vnd/vnodeCommit.c | 7 +++-- source/dnode/vnode/src/vnd/vnodeOpen.c | 31 +++++++++++----------- source/dnode/vnode/src/vnd/vnodeQuery.c | 15 +++++------ source/dnode/vnode/src/vnd/vnodeSvr.c | 2 +- source/dnode/vnode/src/vnd/vnodeSync.c | 2 +- source/libs/wal/src/walMeta.c | 14 +++++++--- source/util/src/tbloomfilter.c | 14 +++++----- source/util/src/tqueue.c | 2 +- source/util/src/tscalablebf.c | 14 +++++----- source/util/src/tsched.c | 4 +-- source/util/src/tsimplehash.c | 2 +- 16 files changed, 63 insertions(+), 59 deletions(-) diff --git a/include/util/tchecksum.h b/include/util/tchecksum.h index 28fb784c46..9113861d4c 100644 --- a/include/util/tchecksum.h +++ b/include/util/tchecksum.h @@ -29,7 +29,7 @@ static FORCE_INLINE TSCKSUM taosCalcChecksum(TSCKSUM csi, const uint8_t *stream, } static FORCE_INLINE int32_t taosCalcChecksumAppend(TSCKSUM csi, uint8_t *stream, uint32_t ssize) { - if (ssize < sizeof(TSCKSUM)) return -1; + if (ssize < sizeof(TSCKSUM)) return TSDB_CODE_INVALID_PARA; *((TSCKSUM *)(stream + ssize - sizeof(TSCKSUM))) = (*crc32c)(csi, stream, (size_t)(ssize - sizeof(TSCKSUM))); diff --git a/source/common/src/tname.c b/source/common/src/tname.c index e495547cc5..491c203a58 100644 --- a/source/common/src/tname.c +++ b/source/common/src/tname.c @@ -169,7 +169,7 @@ void tNameAssign(SName* dst, const SName* src) { memcpy(dst, src, sizeof(SName)) int32_t tNameSetDbName(SName* dst, int32_t acct, const char* dbName, size_t nameLen) { // too long account id or too long db name if (nameLen <= 0 || nameLen >= tListLen(dst->dbname)) { - return -1; + return TSDB_CODE_INVALID_PARA; } dst->type = TSDB_DB_NAME_T; diff --git a/source/dnode/vnode/src/meta/metaCache.c b/source/dnode/vnode/src/meta/metaCache.c index 3d4067ba99..98f5615f5e 100644 --- a/source/dnode/vnode/src/meta/metaCache.c +++ b/source/dnode/vnode/src/meta/metaCache.c @@ -271,7 +271,7 @@ int32_t metaCacheUpsert(SMeta* pMeta, SMetaInfo* pInfo) { if (*ppEntry) { // update if (pInfo->suid != (*ppEntry)->info.suid) { metaError("meta/cache: suid should be same as the one in cache."); - return TSDB_CODE_FAILED; + return TSDB_CODE_INVALID_PARA; } if (pInfo->version > (*ppEntry)->info.version) { (*ppEntry)->info.version = pInfo->version; @@ -543,7 +543,7 @@ int32_t metaGetCachedTableUidList(void* pVnode, tb_uid_t suid, const uint8_t* pK STagFilterResEntry** pEntry = taosHashGet(pTableMap, &suid, sizeof(uint64_t)); if (NULL == pEntry) { metaError("meta/cache: pEntry should not be NULL."); - return TSDB_CODE_FAILED; + return TSDB_CODE_NOT_FOUND; } *acquireRes = 1; @@ -750,7 +750,7 @@ int32_t metaGetCachedTbGroup(void* pVnode, tb_uid_t suid, const uint8_t* pKey, i STagFilterResEntry** pEntry = taosHashGet(pTableMap, &suid, sizeof(uint64_t)); if (NULL == pEntry) { metaDebug("suid %" PRIu64 " not in tb group cache", suid); - return TSDB_CODE_FAILED; + return TSDB_CODE_NOT_FOUND; } *pList = taosArrayDup(taosLRUCacheValue(pCache, pHandle), NULL); diff --git a/source/dnode/vnode/src/tsdb/tsdbReadUtil.c b/source/dnode/vnode/src/tsdb/tsdbReadUtil.c index 1fba59c9b2..7db66cf47c 100644 --- a/source/dnode/vnode/src/tsdb/tsdbReadUtil.c +++ b/source/dnode/vnode/src/tsdb/tsdbReadUtil.c @@ -139,7 +139,7 @@ int32_t getPosInBlockInfoBuf(SBlockInfoBuf* pBuf, int32_t index, STableBlockScan int32_t bucketIndex = index / pBuf->numPerBucket; char** pBucket = taosArrayGet(pBuf->pData, bucketIndex); if (pBucket == NULL) { - return TSDB_CODE_FAILED; + return TSDB_CODE_NOT_FOUND; } *pInfo = (STableBlockScanInfo*)((*pBucket) + (index % pBuf->numPerBucket) * sizeof(STableBlockScanInfo)); diff --git a/source/dnode/vnode/src/vnd/vnodeCfg.c b/source/dnode/vnode/src/vnd/vnodeCfg.c index e2db87173d..1f2cf707f3 100644 --- a/source/dnode/vnode/src/vnd/vnodeCfg.c +++ b/source/dnode/vnode/src/vnd/vnodeCfg.c @@ -301,8 +301,7 @@ int vnodeDecodeConfig(const SJson *pJson, void *pObj) { #if defined(TD_ENTERPRISE) if (pCfg->tdbEncryptAlgorithm == DND_CA_SM4) { if (tsEncryptKey[0] == 0) { - terrno = TSDB_CODE_DNODE_INVALID_ENCRYPTKEY; - return -1; + return terrno = TSDB_CODE_DNODE_INVALID_ENCRYPTKEY; } else { strncpy(pCfg->tdbEncryptKey, tsEncryptKey, ENCRYPT_KEY_LEN); } diff --git a/source/dnode/vnode/src/vnd/vnodeCommit.c b/source/dnode/vnode/src/vnd/vnodeCommit.c index cfaf155276..8fcbe49f9a 100644 --- a/source/dnode/vnode/src/vnd/vnodeCommit.c +++ b/source/dnode/vnode/src/vnd/vnodeCommit.c @@ -413,9 +413,9 @@ static int vnodeCommitImpl(SCommitInfo *pInfo) { pInfo->info.state.commitID, pInfo->info.state.committed, pInfo->info.state.commitTerm); // persist wal before starting - if (walPersist(pVnode->pWal) < 0) { - vError("vgId:%d, failed to persist wal since %s", TD_VID(pVnode), terrstr()); - return -1; + if ((code = walPersist(pVnode->pWal)) < 0) { + vError("vgId:%d, failed to persist wal since %s", TD_VID(pVnode), tstrerror(code)); + return code; } (void)vnodeGetPrimaryDir(pVnode->path, pVnode->diskPrimary, pVnode->pTfs, dir, TSDB_FILENAME_LEN); @@ -556,7 +556,6 @@ int vnodeDecodeInfo(uint8_t *pData, SVnodeInfo *pInfo) { pJson = tjsonParse(pData); if (pJson == NULL) { TSDB_CHECK_CODE(code = TSDB_CODE_INVALID_DATA_FMT, lino, _exit); - return -1; } code = tjsonToObject(pJson, "config", vnodeDecodeConfig, (void *)&pInfo->config); diff --git a/source/dnode/vnode/src/vnd/vnodeOpen.c b/source/dnode/vnode/src/vnd/vnodeOpen.c index 4f5d7c24e1..ed008d4f88 100644 --- a/source/dnode/vnode/src/vnd/vnodeOpen.c +++ b/source/dnode/vnode/src/vnd/vnodeOpen.c @@ -39,13 +39,14 @@ static int32_t vnodeMkDir(STfs *pTfs, const char *path) { } int32_t vnodeCreate(const char *path, SVnodeCfg *pCfg, int32_t diskPrimary, STfs *pTfs) { + int32_t code = 0; SVnodeInfo info = {0}; char dir[TSDB_FILENAME_LEN] = {0}; // check config - if (vnodeCheckCfg(pCfg) < 0) { - vError("vgId:%d, failed to create vnode since:%s", pCfg->vgId, tstrerror(terrno)); - return -1; + if ((code = vnodeCheckCfg(pCfg)) < 0) { + vError("vgId:%d, failed to create vnode since:%s", pCfg->vgId, tstrerror(code)); + return code; } // create vnode env @@ -72,9 +73,9 @@ int32_t vnodeCreate(const char *path, SVnodeCfg *pCfg, int32_t diskPrimary, STfs } vInfo("vgId:%d, save config while create", info.config.vgId); - if (vnodeSaveInfo(dir, &info) < 0 || vnodeCommitInfo(dir) < 0) { - vError("vgId:%d, failed to save vnode config since %s", pCfg ? pCfg->vgId : 0, tstrerror(terrno)); - return -1; + if ((code = vnodeSaveInfo(dir, &info)) < 0 || (code = vnodeCommitInfo(dir)) < 0) { + vError("vgId:%d, failed to save vnode config since %s", pCfg ? pCfg->vgId : 0, tstrerror(code)); + return code; } vInfo("vgId:%d, vnode is created", info.config.vgId); @@ -93,7 +94,7 @@ int32_t vnodeAlterReplica(const char *path, SAlterVnodeReplicaReq *pReq, int32_t ret = vnodeLoadInfo(dir, &info); if (ret < 0) { vError("vgId:%d, failed to read vnode config from %s since %s", pReq->vgId, path, tstrerror(terrno)); - return -1; + return ret; } SSyncCfg *pCfg = &info.config.syncCfg; @@ -144,13 +145,13 @@ int32_t vnodeAlterReplica(const char *path, SAlterVnodeReplicaReq *pReq, int32_t ret = vnodeSaveInfo(dir, &info); if (ret < 0) { vError("vgId:%d, failed to save vnode config since %s", pReq->vgId, tstrerror(terrno)); - return -1; + return ret; } ret = vnodeCommitInfo(dir); if (ret < 0) { vError("vgId:%d, failed to commit vnode config since %s", pReq->vgId, tstrerror(terrno)); - return -1; + return ret; } vInfo("vgId:%d, vnode config is saved", info.config.vgId); @@ -226,7 +227,7 @@ int32_t vnodeAlterHashRange(const char *srcPath, const char *dstPath, SAlterVnod ret = vnodeLoadInfo(dir, &info); if (ret < 0) { vError("vgId:%d, failed to read vnode config from %s since %s", pReq->srcVgId, srcPath, tstrerror(terrno)); - return -1; + return ret; } vInfo("vgId:%d, alter hashrange from [%u, %u] to [%u, %u]", pReq->srcVgId, info.config.hashBegin, info.config.hashEnd, @@ -256,13 +257,13 @@ int32_t vnodeAlterHashRange(const char *srcPath, const char *dstPath, SAlterVnod ret = vnodeSaveInfo(dir, &info); if (ret < 0) { vError("vgId:%d, failed to save vnode config since %s", pReq->dstVgId, tstrerror(terrno)); - return -1; + return ret; } ret = vnodeCommitInfo(dir); if (ret < 0) { vError("vgId:%d, failed to commit vnode config since %s", pReq->dstVgId, tstrerror(terrno)); - return -1; + return ret; } vInfo("vgId:%d, rename %s to %s", pReq->dstVgId, srcPath, dstPath); @@ -270,7 +271,7 @@ int32_t vnodeAlterHashRange(const char *srcPath, const char *dstPath, SAlterVnod if (ret < 0) { vError("vgId:%d, failed to rename vnode from %s to %s since %s", pReq->dstVgId, srcPath, dstPath, tstrerror(terrno)); - return -1; + return ret; } vInfo("vgId:%d, vnode hashrange is altered", info.config.vgId); @@ -293,9 +294,9 @@ int32_t vnodeRestoreVgroupId(const char *srcPath, const char *dstPath, int32_t s } (void)vnodeGetPrimaryDir(srcPath, diskPrimary, pTfs, dir, TSDB_FILENAME_LEN); - if (vnodeLoadInfo(dir, &info) < 0) { + if ((code = vnodeLoadInfo(dir, &info)) < 0) { vError("vgId:%d, failed to read vnode config from %s since %s", srcVgId, srcPath, tstrerror(terrno)); - return -1; + return code; } if (info.config.vgId == srcVgId) { diff --git a/source/dnode/vnode/src/vnd/vnodeQuery.c b/source/dnode/vnode/src/vnd/vnodeQuery.c index 36d473fe56..904b29bf43 100644 --- a/source/dnode/vnode/src/vnd/vnodeQuery.c +++ b/source/dnode/vnode/src/vnd/vnodeQuery.c @@ -545,7 +545,7 @@ int32_t vnodeGetAllTableList(SVnode *pVnode, uint64_t uid, SArray *list) { SMCtbCursor *pCur = metaOpenCtbCursor(pVnode, uid, 1); if (NULL == pCur) { qError("vnode get all table list failed"); - return TSDB_CODE_FAILED; + return terrno; } while (1) { @@ -576,7 +576,7 @@ int32_t vnodeGetCtbIdList(void *pVnode, int64_t suid, SArray *list) { SMCtbCursor *pCur = metaOpenCtbCursor(pVnodeObj, suid, 1); if (NULL == pCur) { qError("vnode get all table list failed"); - return TSDB_CODE_FAILED; + return terrno; } while (1) { @@ -627,7 +627,7 @@ int32_t vnodeGetStbIdListByFilter(SVnode *pVnode, int64_t suid, SArray *list, bo int32_t code = TSDB_CODE_SUCCESS; SMStbCursor *pCur = metaOpenStbCursor(pVnode->pMeta, suid); if (!pCur) { - return TSDB_CODE_FAILED; + return terrno; } while (1) { @@ -655,7 +655,7 @@ _exit: int32_t vnodeGetCtbNum(SVnode *pVnode, int64_t suid, int64_t *num) { SMCtbCursor *pCur = metaOpenCtbCursor(pVnode, suid, 0); if (!pCur) { - return TSDB_CODE_FAILED; + return terrno; } *num = 0; @@ -757,8 +757,7 @@ int32_t vnodeGetTimeSeriesNum(SVnode *pVnode, int64_t *num) { SArray *suidList = NULL; if (!(suidList = taosArrayInit(1, sizeof(tb_uid_t)))) { - terrno = TSDB_CODE_OUT_OF_MEMORY; - return TSDB_CODE_FAILED; + return terrno = TSDB_CODE_OUT_OF_MEMORY; } int32_t tbFilterSize = 0; @@ -774,7 +773,7 @@ int32_t vnodeGetTimeSeriesNum(SVnode *pVnode, int64_t *num) { (tbFilterSize && vnodeGetStbIdListByFilter(pVnode, 0, suidList, vnodeTimeSeriesFilter, pVnode) < 0)) { qError("vgId:%d, failed to get stb id list error: %s", TD_VID(pVnode), terrstr()); taosArrayDestroy(suidList); - return TSDB_CODE_FAILED; + return terrno; } *num = 0; @@ -799,7 +798,7 @@ _exit: int32_t vnodeGetAllCtbNum(SVnode *pVnode, int64_t *num) { SMStbCursor *pCur = metaOpenStbCursor(pVnode->pMeta, 0); if (!pCur) { - return TSDB_CODE_FAILED; + return terrno; } *num = 0; diff --git a/source/dnode/vnode/src/vnd/vnodeSvr.c b/source/dnode/vnode/src/vnd/vnodeSvr.c index 3f6ca053cd..dc84b73c10 100644 --- a/source/dnode/vnode/src/vnd/vnodeSvr.c +++ b/source/dnode/vnode/src/vnd/vnodeSvr.c @@ -1980,7 +1980,7 @@ _err: tDecoderClear(&coder); vError("vgId:%d, failed to create tsma %s:%" PRIi64 " version %" PRIi64 "for table %" PRIi64 " since %s", TD_VID(pVnode), req.indexName, req.indexUid, ver, req.tableUid, terrstr()); - return -1; + return terrno; } /** diff --git a/source/dnode/vnode/src/vnd/vnodeSync.c b/source/dnode/vnode/src/vnd/vnodeSync.c index d9630b56fd..31e44f5912 100644 --- a/source/dnode/vnode/src/vnd/vnodeSync.c +++ b/source/dnode/vnode/src/vnd/vnodeSync.c @@ -381,7 +381,7 @@ int32_t vnodeProcessSyncMsg(SVnode *pVnode, SRpcMsg *pMsg, SRpcMsg **pRsp) { static int32_t vnodeSyncEqCtrlMsg(const SMsgCb *msgcb, SRpcMsg *pMsg) { if (pMsg == NULL || pMsg->pCont == NULL) { - return -1; + return TSDB_CODE_INVALID_PARA; } if (msgcb == NULL || msgcb->putToQueueFp == NULL) { diff --git a/source/libs/wal/src/walMeta.c b/source/libs/wal/src/walMeta.c index f953f50aa7..db1d61a023 100644 --- a/source/libs/wal/src/walMeta.c +++ b/source/libs/wal/src/walMeta.c @@ -818,7 +818,10 @@ int32_t walMetaDeserialize(SWal* pWal, const char* bytes) { int sz = cJSON_GetArraySize(pFiles); // deserialize SArray* pArray = pWal->fileInfoSet; - (void)taosArrayEnsureCap(pArray, sz); + if (taosArrayEnsureCap(pArray, sz)) { + cJSON_Delete(pRoot); + return terrno; + } for (int i = 0; i < sz; i++) { pInfoJson = cJSON_GetArrayItem(pFiles, i); @@ -841,7 +844,10 @@ int32_t walMetaDeserialize(SWal* pWal, const char* bytes) { pField = cJSON_GetObjectItem(pInfoJson, "fileSize"); if (!pField) goto _err; info.fileSize = atoll(cJSON_GetStringValue(pField)); - (void)taosArrayPush(pArray, &info); + if (!taosArrayPush(pArray, &info)) { + cJSON_Delete(pRoot); + return terrno; + } } pWal->fileInfoSet = pArray; pWal->writeCur = sz - 1; @@ -860,8 +866,8 @@ static int walFindCurMetaVer(SWal* pWal) { TdDirPtr pDir = taosOpenDir(pWal->path); if (pDir == NULL) { - wError("vgId:%d, path:%s, failed to open since %s", pWal->cfg.vgId, pWal->path, strerror(errno)); - return -1; + wError("vgId:%d, path:%s, failed to open since %s", pWal->cfg.vgId, pWal->path, tstrerror(terrno)); + return terrno; } TdDirEntryPtr pDirEntry; diff --git a/source/util/src/tbloomfilter.c b/source/util/src/tbloomfilter.c index cf9d5cd79c..b20fb4bf39 100644 --- a/source/util/src/tbloomfilter.c +++ b/source/util/src/tbloomfilter.c @@ -131,16 +131,16 @@ void tBloomFilterDestroy(SBloomFilter* pBF) { } int32_t tBloomFilterEncode(const SBloomFilter* pBF, SEncoder* pEncoder) { - if (tEncodeU32(pEncoder, pBF->hashFunctions) < 0) return -1; - if (tEncodeU64(pEncoder, pBF->expectedEntries) < 0) return -1; - if (tEncodeU64(pEncoder, pBF->numUnits) < 0) return -1; - if (tEncodeU64(pEncoder, pBF->numBits) < 0) return -1; - if (tEncodeU64(pEncoder, pBF->size) < 0) return -1; + TAOS_CHECK_RETURN(tEncodeU32(pEncoder, pBF->hashFunctions)); + TAOS_CHECK_RETURN(tEncodeU64(pEncoder, pBF->expectedEntries)); + TAOS_CHECK_RETURN(tEncodeU64(pEncoder, pBF->numUnits)); + TAOS_CHECK_RETURN(tEncodeU64(pEncoder, pBF->numBits)); + TAOS_CHECK_RETURN(tEncodeU64(pEncoder, pBF->size)); for (uint64_t i = 0; i < pBF->numUnits; i++) { uint64_t* pUnits = (uint64_t*)pBF->buffer; - if (tEncodeU64(pEncoder, pUnits[i]) < 0) return -1; + TAOS_CHECK_RETURN(tEncodeU64(pEncoder, pUnits[i])); } - if (tEncodeDouble(pEncoder, pBF->errorRate) < 0) return -1; + TAOS_CHECK_RETURN(tEncodeDouble(pEncoder, pBF->errorRate)); return 0; } diff --git a/source/util/src/tqueue.c b/source/util/src/tqueue.c index 9ba88b4451..780a6c94f1 100644 --- a/source/util/src/tqueue.c +++ b/source/util/src/tqueue.c @@ -372,7 +372,7 @@ void taosQsetThreadResume(STaosQset *qset) { } int32_t taosAddIntoQset(STaosQset *qset, STaosQueue *queue, void *ahandle) { - if (queue->qset) return -1; + if (queue->qset) return TSDB_CODE_INVALID_PARA; (void)taosThreadMutexLock(&qset->mutex); diff --git a/source/util/src/tscalablebf.c b/source/util/src/tscalablebf.c index 80b633f5e8..c48cd38886 100644 --- a/source/util/src/tscalablebf.c +++ b/source/util/src/tscalablebf.c @@ -188,19 +188,19 @@ void tScalableBfDestroy(SScalableBf* pSBf) { int32_t tScalableBfEncode(const SScalableBf* pSBf, SEncoder* pEncoder) { if (!pSBf) { - if (tEncodeI32(pEncoder, 0) < 0) return -1; + TAOS_CHECK_RETURN(tEncodeI32(pEncoder, 0)); return 0; } int32_t size = taosArrayGetSize(pSBf->bfArray); - if (tEncodeI32(pEncoder, size) < 0) return -1; + TAOS_CHECK_RETURN(tEncodeI32(pEncoder, size)); for (int32_t i = 0; i < size; i++) { SBloomFilter* pBF = taosArrayGetP(pSBf->bfArray, i); - if (tBloomFilterEncode(pBF, pEncoder) < 0) return -1; + TAOS_CHECK_RETURN(tBloomFilterEncode(pBF, pEncoder)); } - if (tEncodeU32(pEncoder, pSBf->growth) < 0) return -1; - if (tEncodeU64(pEncoder, pSBf->numBits) < 0) return -1; - if (tEncodeU32(pEncoder, pSBf->maxBloomFilters) < 0) return -1; - if (tEncodeI8(pEncoder, pSBf->status) < 0) return -1; + TAOS_CHECK_RETURN(tEncodeU32(pEncoder, pSBf->growth)); + TAOS_CHECK_RETURN(tEncodeU64(pEncoder, pSBf->numBits)); + TAOS_CHECK_RETURN(tEncodeU32(pEncoder, pSBf->maxBloomFilters)); + TAOS_CHECK_RETURN(tEncodeI8(pEncoder, pSBf->status)); return 0; } diff --git a/source/util/src/tsched.c b/source/util/src/tsched.c index 04d903491b..6779e8dee5 100644 --- a/source/util/src/tsched.c +++ b/source/util/src/tsched.c @@ -189,12 +189,12 @@ int taosScheduleTask(void *queueScheduler, SSchedMsg *pMsg) { if (pSched == NULL) { uError("sched is not ready, msg:%p is dropped", pMsg); - return -1; + return TSDB_CODE_INVALID_PARA; } if (atomic_load_8(&pSched->stop)) { uError("sched is already stopped, msg:%p is dropped", pMsg); - return -1; + return TSDB_CODE_INVALID_PARA; } if ((ret = tsem_wait(&pSched->emptySem)) != 0) { diff --git a/source/util/src/tsimplehash.c b/source/util/src/tsimplehash.c index fddb3f354d..e39c7364b7 100644 --- a/source/util/src/tsimplehash.c +++ b/source/util/src/tsimplehash.c @@ -209,7 +209,7 @@ static int32_t tSimpleHashTableResize(SSHashObj *pHashObj) { int32_t tSimpleHashPut(SSHashObj *pHashObj, const void *key, size_t keyLen, const void *data, size_t dataLen) { if (!pHashObj || !key) { - return TSDB_CODE_FAILED; + return TSDB_CODE_INVALID_PARA; } uint32_t hashVal = (*pHashObj->hashFp)(key, (uint32_t)keyLen); From 94891943a98f126669ebe3dbe0330db4d20fd63a Mon Sep 17 00:00:00 2001 From: chenhaoran Date: Wed, 14 Aug 2024 17:51:26 +0800 Subject: [PATCH 034/181] test:add release building tests and modify debug in unit-test --- tests/parallel_test/container_build.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/parallel_test/container_build.sh b/tests/parallel_test/container_build.sh index 923a65ec3a..a386269f85 100755 --- a/tests/parallel_test/container_build.sh +++ b/tests/parallel_test/container_build.sh @@ -137,7 +137,7 @@ docker run \ -v ${REP_REAL_PATH}/community/contrib/xml2/:${REP_DIR}/community/contrib/xml2 \ -v ${REP_REAL_PATH}/community/contrib/zlib/:${REP_DIR}/community/contrib/zlib \ -v ${REP_REAL_PATH}/community/contrib/zstd/:${REP_DIR}/community/contrib/zstd \ - --rm --ulimit core=-1 taos_test:v1.0 sh -c "pip uninstall taospy -y;pip3 install taospy==2.7.2;cd $REP_DIR;rm -rf debug;mkdir -p debug;cd debug;cmake .. -DBUILD_HTTP=false -DBUILD_TOOLS=true -DBUILD_TEST=false -DWEBSOCKET=true -DBUILD_SANITIZER=1 -DTOOLS_SANITIZE=true -DCMAKE_BUILD_TYPE=RelWithDebInfo -DTOOLS_BUILD_TYPE=Debug -DBUILD_TAOSX=false -DJEMALLOC_ENABLED=0;make -j|| exit 1 " + --rm --ulimit core=-1 taos_test:v1.0 sh -c "pip uninstall taospy -y;pip3 install taospy==2.7.2;cd $REP_DIR;rm -rf debug;mkdir -p debug;cd debug;cmake .. -DBUILD_HTTP=false -DBUILD_TOOLS=true -DBUILD_TEST=false -DWEBSOCKET=true -DBUILD_SANITIZER=1 -DTOOLS_SANITIZE=true -DCMAKE_BUILD_TYPE=Debug -DTOOLS_BUILD_TYPE=Debug -DBUILD_TAOSX=false -DJEMALLOC_ENABLED=0;make -j|| exit 1 " mv ${REP_REAL_PATH}/debug ${WORKDIR}/debugSan From d99e6be4c1934f6365228c26042a5c2ce9bbbeae Mon Sep 17 00:00:00 2001 From: Yaming Pei Date: Wed, 14 Aug 2024 18:08:07 +0800 Subject: [PATCH 035/181] mod c sample code --- docs/examples/c/create_db_demo.c | 4 +++- docs/examples/c/insert_data_demo.c | 4 +++- docs/examples/c/query_data_demo.c | 11 ++++++++--- docs/examples/c/sml_insert_demo.c | 5 +++-- docs/examples/c/tmq_demo.c | 4 ---- docs/examples/c/with_reqid_demo.c | 11 ++++++++--- 6 files changed, 25 insertions(+), 14 deletions(-) diff --git a/docs/examples/c/create_db_demo.c b/docs/examples/c/create_db_demo.c index 7ac54041d8..44960defa5 100644 --- a/docs/examples/c/create_db_demo.c +++ b/docs/examples/c/create_db_demo.c @@ -73,4 +73,6 @@ static int DemoCreateDB() { // ANCHOR_END: create_db_and_table } -int main(int argc, char *argv[]) { return DemoCreateDB(); } +int main(int argc, char *argv[]) { + return DemoCreateDB(); +} diff --git a/docs/examples/c/insert_data_demo.c b/docs/examples/c/insert_data_demo.c index edc6b0b750..e880af0cd6 100644 --- a/docs/examples/c/insert_data_demo.c +++ b/docs/examples/c/insert_data_demo.c @@ -71,4 +71,6 @@ static int DemoInsertData() { // ANCHOR_END: insert_data } -int main(int argc, char *argv[]) { return DemoInsertData(); } +int main(int argc, char *argv[]) { + return DemoInsertData(); +} diff --git a/docs/examples/c/query_data_demo.c b/docs/examples/c/query_data_demo.c index f2c6662d9e..097a6b99b8 100644 --- a/docs/examples/c/query_data_demo.c +++ b/docs/examples/c/query_data_demo.c @@ -62,9 +62,12 @@ static int DemoQueryData() { // fetch the records row by row while ((row = taos_fetch_row(result))) { char temp[1024] = {0}; - rows++; - taos_print_row(temp, row, fields, num_fields); + if (taos_print_row(temp, row, fields, num_fields) < 0) { + printf("Failed to print row\n"); + break; + } printf("%s\n", temp); + rows++; } printf("total rows: %d\n", rows); taos_free_result(result); @@ -76,4 +79,6 @@ static int DemoQueryData() { // ANCHOR_END: query_data } -int main(int argc, char *argv[]) { return DemoQueryData(); } +int main(int argc, char *argv[]) { + return DemoQueryData(); +} diff --git a/docs/examples/c/sml_insert_demo.c b/docs/examples/c/sml_insert_demo.c index 1107734c78..9adbb28f87 100644 --- a/docs/examples/c/sml_insert_demo.c +++ b/docs/examples/c/sml_insert_demo.c @@ -48,7 +48,6 @@ static int DemoSmlInsert() { return -1; } taos_free_result(result); - printf("Create database power successfully.\n"); // use database result = taos_query(taos, "USE power"); @@ -136,4 +135,6 @@ static int DemoSmlInsert() { // ANCHOR_END: schemaless } -int main(int argc, char *argv[]) { return DemoSmlInsert(); } +int main(int argc, char *argv[]) { + return DemoSmlInsert(); +} diff --git a/docs/examples/c/tmq_demo.c b/docs/examples/c/tmq_demo.c index 55ec568dc5..e8e28e1df4 100644 --- a/docs/examples/c/tmq_demo.c +++ b/docs/examples/c/tmq_demo.c @@ -132,7 +132,6 @@ static int32_t init_env() { TAOS_RES* pRes; // drop database if exists - fprintf(stdout, "Create database.\n"); pRes = taos_query(pConn, "DROP TOPIC IF EXISTS topic_meters"); code = taos_errno(pRes); if (code != 0) { @@ -157,7 +156,6 @@ static int32_t init_env() { taos_free_result(pRes); // create super table - fprintf(stdout, "Create super table.\n"); pRes = taos_query( pConn, "CREATE STABLE IF NOT EXISTS power.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS " @@ -179,7 +177,6 @@ END: } int32_t create_topic() { - fprintf(stdout, "Create topic.\n"); TAOS_RES* pRes; const char* host = "localhost"; const char* user = "root"; @@ -382,7 +379,6 @@ void manual_commit(tmq_t* tmq) { totalRows += msg_process(tmqmsg); // commit the message int32_t code = tmq_commit_sync(tmq, tmqmsg); - if (code) { fprintf(stderr, "Failed to commit message, ErrCode: 0x%x, ErrMessage: %s.\n", code, tmq_err2str(code)); // free the message diff --git a/docs/examples/c/with_reqid_demo.c b/docs/examples/c/with_reqid_demo.c index b3e4e79e39..c6e87686e9 100644 --- a/docs/examples/c/with_reqid_demo.c +++ b/docs/examples/c/with_reqid_demo.c @@ -62,9 +62,12 @@ static int DemoWithReqId() { // fetch the records row by row while ((row = taos_fetch_row(result))) { char temp[1024] = {0}; - rows++; - taos_print_row(temp, row, fields, num_fields); + if (taos_print_row(temp, row, fields, num_fields) < 0) { + printf("Failed to print row\n"); + break; + } printf("%s\n", temp); + rows++; } printf("total rows: %d\n", rows); taos_free_result(result); @@ -76,4 +79,6 @@ static int DemoWithReqId() { // ANCHOR_END: with_reqid } -int main(int argc, char *argv[]) { return DemoWithReqId(); } +int main(int argc, char *argv[]) { + return DemoWithReqId(); +} From 235a082406666c02b04162594f62a67256f4b415 Mon Sep 17 00:00:00 2001 From: 54liuyao <54liuyao> Date: Wed, 14 Aug 2024 18:17:22 +0800 Subject: [PATCH 036/181] fix issue --- source/libs/executor/inc/tfill.h | 6 +++--- source/libs/executor/src/filloperator.c | 8 ++++++-- source/libs/executor/src/tfill.c | 11 +++++------ 3 files changed, 14 insertions(+), 11 deletions(-) diff --git a/source/libs/executor/inc/tfill.h b/source/libs/executor/inc/tfill.h index b659c12315..b06aa7d1c8 100644 --- a/source/libs/executor/inc/tfill.h +++ b/source/libs/executor/inc/tfill.h @@ -128,9 +128,9 @@ SFillColInfo* createFillColInfo(SExprInfo* pExpr, int32_t numOfFillExpr, SExprIn int32_t numOfNotFillCols, const struct SNodeListNode* val); bool taosFillHasMoreResults(struct SFillInfo* pFillInfo); -void taosCreateFillInfo(TSKEY skey, int32_t numOfFillCols, int32_t numOfNotFillCols, int32_t capacity, - SInterval* pInterval, int32_t fillType, struct SFillColInfo* pCol, int32_t slotId, - int32_t order, const char* id, SExecTaskInfo* pTaskInfo, SFillInfo** ppFillInfo); +int32_t taosCreateFillInfo(TSKEY skey, int32_t numOfFillCols, int32_t numOfNotFillCols, int32_t capacity, + SInterval* pInterval, int32_t fillType, struct SFillColInfo* pCol, int32_t slotId, + int32_t order, const char* id, SExecTaskInfo* pTaskInfo, SFillInfo** ppFillInfo); void* taosDestroyFillInfo(struct SFillInfo* pFillInfo); int32_t taosFillResultDataBlock(struct SFillInfo* pFillInfo, SSDataBlock* p, int32_t capacity); diff --git a/source/libs/executor/src/filloperator.c b/source/libs/executor/src/filloperator.c index 34f174377b..0b66834d45 100644 --- a/source/libs/executor/src/filloperator.c +++ b/source/libs/executor/src/filloperator.c @@ -405,8 +405,12 @@ static int32_t initFillInfo(SFillOperatorInfo* pInfo, SExprInfo* pExpr, int32_t // STimeWindow w = {0}; // getInitialStartTimeWindow(pInterval, startKey, &w, order == TSDB_ORDER_ASC); pInfo->pFillInfo = NULL; - taosCreateFillInfo(startKey, numOfCols, numOfNotFillCols, capacity, pInterval, fillType, pColInfo, - pInfo->primaryTsCol, order, id, pTaskInfo, &pInfo->pFillInfo); + int32_t code = taosCreateFillInfo(startKey, numOfCols, numOfNotFillCols, capacity, pInterval, fillType, pColInfo, + pInfo->primaryTsCol, order, id, pTaskInfo, &pInfo->pFillInfo); + if (code != TSDB_CODE_SUCCESS) { + qError("%s failed at line %d since %s", __func__, __LINE__, tstrerror(code)); + return code; + } if (order == TSDB_ORDER_ASC) { pInfo->win.skey = win.skey; diff --git a/source/libs/executor/src/tfill.c b/source/libs/executor/src/tfill.c index c2ccdf62f5..e346946a7a 100644 --- a/source/libs/executor/src/tfill.c +++ b/source/libs/executor/src/tfill.c @@ -523,14 +523,14 @@ static int32_t taosNumOfRemainRows(SFillInfo* pFillInfo) { return pFillInfo->numOfRows - pFillInfo->index; } -void taosCreateFillInfo(TSKEY skey, int32_t numOfFillCols, int32_t numOfNotFillCols, int32_t capacity, - SInterval* pInterval, int32_t fillType, struct SFillColInfo* pCol, int32_t primaryTsSlotId, - int32_t order, const char* id, SExecTaskInfo* pTaskInfo, SFillInfo** ppFillInfo) { +int32_t taosCreateFillInfo(TSKEY skey, int32_t numOfFillCols, int32_t numOfNotFillCols, int32_t capacity, + SInterval* pInterval, int32_t fillType, struct SFillColInfo* pCol, int32_t primaryTsSlotId, + int32_t order, const char* id, SExecTaskInfo* pTaskInfo, SFillInfo** ppFillInfo) { int32_t code = TSDB_CODE_SUCCESS; int32_t lino = 0; if (fillType == TSDB_FILL_NONE) { (*ppFillInfo) = NULL; - return; + return code; } SFillInfo* pFillInfo = taosMemoryCalloc(1, sizeof(SFillInfo)); @@ -572,10 +572,9 @@ _end: if (code != TSDB_CODE_SUCCESS) { taosArrayDestroy(pFillInfo->next.pRowVal); taosArrayDestroy(pFillInfo->prev.pRowVal); - terrno = code; - T_LONG_JMP(pTaskInfo->env, code); } (*ppFillInfo) = pFillInfo; + return code; } void taosResetFillInfo(SFillInfo* pFillInfo, TSKEY startTimestamp) { From 31628e00773b81a1dccdc1552e26858af695d0d6 Mon Sep 17 00:00:00 2001 From: chenhaoran Date: Wed, 14 Aug 2024 19:02:52 +0800 Subject: [PATCH 037/181] tetst: test for checking return value --- source/common/src/cos.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/source/common/src/cos.c b/source/common/src/cos.c index bc92527108..335c654acd 100644 --- a/source/common/src/cos.c +++ b/source/common/src/cos.c @@ -54,8 +54,6 @@ int32_t s3Begin() { void s3End() { S3_deinitialize(); } - - int32_t s3Init() { TAOS_RETURN(TSDB_CODE_SUCCESS); /*s3Begin();*/ } void s3CleanUp() { /*s3End();*/ From 0294a2d9d6d979b8d4016920c2c1b1616958a98e Mon Sep 17 00:00:00 2001 From: chenhaoran Date: Wed, 14 Aug 2024 19:07:10 +0800 Subject: [PATCH 038/181] tetst: test for checking return value --- tests/ci/scan_file_path.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/ci/scan_file_path.py b/tests/ci/scan_file_path.py index 81862363a0..2d4e701012 100644 --- a/tests/ci/scan_file_path.py +++ b/tests/ci/scan_file_path.py @@ -134,7 +134,7 @@ def input_files(change_files): for line in file: file_name = line.strip() if any(dir_name in file_name for dir_name in scan_dir_list): - if (file_name.endswith(".c") or line.endswith(".cpp")) and all(dir_name not in file_name for dir_name in scan_skip_file_list): + if (file_name.endswith(".c") or file_name.endswith(".cpp")) and all(dir_name not in file_name for dir_name in scan_skip_file_list): if "enterprise" in file_name: file_name = os.path.join(TD_project_path, file_name) else: From 70eb66604def51d4b312f9bde361ffe9f5db760a Mon Sep 17 00:00:00 2001 From: 54liuyao <54liuyao> Date: Wed, 14 Aug 2024 19:11:34 +0800 Subject: [PATCH 039/181] fix issue --- source/libs/executor/src/timewindowoperator.c | 20 ++++++++++++------- 1 file changed, 13 insertions(+), 7 deletions(-) diff --git a/source/libs/executor/src/timewindowoperator.c b/source/libs/executor/src/timewindowoperator.c index 5e5d6494cf..fa9dc79cc3 100644 --- a/source/libs/executor/src/timewindowoperator.c +++ b/source/libs/executor/src/timewindowoperator.c @@ -1230,7 +1230,7 @@ void destroyIntervalOperatorInfo(void* param) { } static bool timeWindowinterpNeeded(SqlFunctionCtx* pCtx, int32_t numOfCols, SIntervalAggOperatorInfo* pInfo, - SExecTaskInfo* pTaskInfo) { + bool* pRes) { // the primary timestamp column bool needed = false; int32_t code = TSDB_CODE_SUCCESS; @@ -1298,10 +1298,9 @@ static bool timeWindowinterpNeeded(SqlFunctionCtx* pCtx, int32_t numOfCols, SInt _end: if (code != TSDB_CODE_SUCCESS) { qError("%s failed at line %d since %s", __func__, lino, tstrerror(code)); - pTaskInfo->code = code; - T_LONG_JMP(pTaskInfo->env, code); } - return needed; + *pRes = needed; + return code; } int32_t createIntervalOperatorInfo(SOperatorInfo* downstream, SIntervalPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo, @@ -1390,7 +1389,10 @@ int32_t createIntervalOperatorInfo(SOperatorInfo* downstream, SIntervalPhysiNode code = initExecTimeWindowInfo(&pInfo->twAggSup.timeWindowData, &pInfo->win); QUERY_CHECK_CODE(code, lino, _error); - pInfo->timeWindowInterpo = timeWindowinterpNeeded(pSup->pCtx, num, pInfo, pTaskInfo); + + pInfo->timeWindowInterpo = false; + code = timeWindowinterpNeeded(pSup->pCtx, num, pInfo, &pInfo->timeWindowInterpo); + QUERY_CHECK_CODE(code, lino, _error); if (pInfo->timeWindowInterpo) { pInfo->binfo.resultRowInfo.openWindow = tdListNew(sizeof(SOpenWindowInfo)); if (pInfo->binfo.resultRowInfo.openWindow == NULL) { @@ -2085,7 +2087,9 @@ int32_t createMergeAlignedIntervalOperatorInfo(SOperatorInfo* downstream, SMerge code = initExecTimeWindowInfo(&iaInfo->twAggSup.timeWindowData, &iaInfo->win); QUERY_CHECK_CODE(code, lino, _error); - iaInfo->timeWindowInterpo = timeWindowinterpNeeded(pSup->pCtx, num, iaInfo, pTaskInfo); + iaInfo->timeWindowInterpo = false; + code = timeWindowinterpNeeded(pSup->pCtx, num, iaInfo, &iaInfo->timeWindowInterpo); + QUERY_CHECK_CODE(code, lino, _error); if (iaInfo->timeWindowInterpo) { iaInfo->binfo.resultRowInfo.openWindow = tdListNew(sizeof(SOpenWindowInfo)); } @@ -2416,7 +2420,9 @@ int32_t createMergeIntervalOperatorInfo(SOperatorInfo* downstream, SMergeInterva code = initExecTimeWindowInfo(&pIntervalInfo->twAggSup.timeWindowData, &pIntervalInfo->win); QUERY_CHECK_CODE(code, lino, _error); - pIntervalInfo->timeWindowInterpo = timeWindowinterpNeeded(pExprSupp->pCtx, num, pIntervalInfo, pTaskInfo); + pIntervalInfo->timeWindowInterpo = false; + code = timeWindowinterpNeeded(pExprSupp->pCtx, num, pIntervalInfo, &pIntervalInfo->timeWindowInterpo); + QUERY_CHECK_CODE(code, lino, _error); if (pIntervalInfo->timeWindowInterpo) { pIntervalInfo->binfo.resultRowInfo.openWindow = tdListNew(sizeof(SOpenWindowInfo)); if (pIntervalInfo->binfo.resultRowInfo.openWindow == NULL) { From e599f68480fe0da2766d6d4964ea40632dd11fa8 Mon Sep 17 00:00:00 2001 From: sheyanjie-qq <249478495@qq.com> Date: Wed, 14 Aug 2024 19:46:39 +0800 Subject: [PATCH 040/181] fix link error --- docs/zh/08-develop/01-connect/index.md | 2 +- docs/zh/26-tdinternal/03-storage.md | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/zh/08-develop/01-connect/index.md b/docs/zh/08-develop/01-connect/index.md index 280e31b2a6..092a50869a 100644 --- a/docs/zh/08-develop/01-connect/index.md +++ b/docs/zh/08-develop/01-connect/index.md @@ -99,7 +99,7 @@ TDengine 提供了丰富的应用程序开发接口,为了便于用户快速 - **安装前准备** - 安装 Python。新近版本 taospy 包要求 Python 3.6.2+。早期版本 taospy 包要求 Python 3.7+。taos-ws-py 包要求 Python 3.7+。如果系统上还没有 Python 可参考 [Python BeginnersGuide](https://wiki.python.org/moin/BeginnersGuide/Download) 安装。 - 安装 [pip](https://pypi.org/project/pip/)。大部分情况下 Python 的安装包都自带了 pip 工具, 如果没有请参考 [pip documentation](https://pip.pypa.io/en/stable/installation/) 安装。 - - 如果使用原生连接,还需[安装客户端驱动](../#安装客户端驱动)。客户端软件包含了 TDengine 客户端动态链接库(libtaos.so 或 taos.dll) 和 TDengine CLI。 + - 如果使用原生连接,还需[安装客户端驱动](../connect/#安装客户端驱动-taosc)。客户端软件包含了 TDengine 客户端动态链接库(libtaos.so 或 taos.dll) 和 TDengine CLI。 - **使用 pip 安装** - 卸载旧版本 diff --git a/docs/zh/26-tdinternal/03-storage.md b/docs/zh/26-tdinternal/03-storage.md index f65f06e85b..e402babdd7 100644 --- a/docs/zh/26-tdinternal/03-storage.md +++ b/docs/zh/26-tdinternal/03-storage.md @@ -101,7 +101,7 @@ head 文件是时序数据存储文件(data 文件)的 BRIN(Block Range In head 文件中存储了多个 BRIN 记录块及其索引。BRIN 记录块采用列存压缩的方式,这种方式可以大大减少空间占用,同时保持较高的查询性能。BRIN 索引结构如下图所示: -![BRIN 索引结构](./brin.png) +![BRIN 索引结构](./brin.png) #### data 文件 @@ -121,4 +121,4 @@ data 文件是实际存储时序数据的文件。在 data 文件中,时序数 在少表高频的场景下,系统仅维护一个 stt 文件。该文件专门用于存储每次数据落盘后剩余的碎片数据。这样,在下一次数据落盘时,这些碎片数据可以与内存中的新数据合并,形成较大的数据块,随后一并写入 data 文件中。这种机制有效地避免了数据文件的碎片化,确保了数据存储的连续性和高效性。 -对于多表低频的场景,建议配置多个 stt 文件。这种场景下的核心思想是,尽管单张表每次落盘的数据量可能不大,但同一超级表下的所有子表累积的数据量却相当可观。通过合并这些数据,可以生成较大的数据块,从而减少数据块的碎片化。这不仅提升了数据的写入效率,还能显著提高查询性能,因为连续的数据存储更有利于快速的数据检索和访问。 \ No newline at end of file +对于多表低频的场景,建议配置多个 stt 文件。这种场景下的核心思想是,尽管单张表每次落盘的数据量可能不大,但同一超级表下的所有子表累积的数据量却相当可观。通过合并这些数据,可以生成较大的数据块,从而减少数据块的碎片化。这不仅提升了数据的写入效率,还能显著提高查询性能,因为连续的数据存储更有利于快速的数据检索和访问。 From 2b40ecf71892604fa4ba7a0c6228d7819bcc6162 Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Wed, 14 Aug 2024 20:41:28 +0800 Subject: [PATCH 041/181] fix meta deadlock --- source/dnode/vnode/src/meta/metaQuery.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/source/dnode/vnode/src/meta/metaQuery.c b/source/dnode/vnode/src/meta/metaQuery.c index 3abd185f0f..47ed7743cb 100644 --- a/source/dnode/vnode/src/meta/metaQuery.c +++ b/source/dnode/vnode/src/meta/metaQuery.c @@ -280,7 +280,11 @@ int32_t metaResumeTbCursor(SMTbCursor *pTbCur, int8_t first, int8_t move) { metaReaderDoInit(&pTbCur->mr, pTbCur->pMeta, META_READER_LOCK); code = tdbTbcOpen(((SMeta *)pTbCur->pMeta)->pUidIdx, (TBC **)&pTbCur->pDbc, NULL); - TSDB_CHECK_CODE(code, lino, _exit); + if (code != 0) { + metaReaderReleaseLock(&pTbCur->mr); + pTbCur->paused = 1; + TSDB_CHECK_CODE(code, lino, _exit); + } if (first) { code = tdbTbcMoveToFirst((TBC *)pTbCur->pDbc); From ecb48dd31707aa81ee75643bb8b64259e96da461 Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Wed, 14 Aug 2024 21:09:24 +0800 Subject: [PATCH 042/181] fix meta deadlock --- source/dnode/vnode/src/meta/metaQuery.c | 4 ++-- source/libs/executor/src/sysscanoperator.c | 17 ++++++++++++++--- 2 files changed, 16 insertions(+), 5 deletions(-) diff --git a/source/dnode/vnode/src/meta/metaQuery.c b/source/dnode/vnode/src/meta/metaQuery.c index 47ed7743cb..7d7b12bef3 100644 --- a/source/dnode/vnode/src/meta/metaQuery.c +++ b/source/dnode/vnode/src/meta/metaQuery.c @@ -282,7 +282,6 @@ int32_t metaResumeTbCursor(SMTbCursor *pTbCur, int8_t first, int8_t move) { code = tdbTbcOpen(((SMeta *)pTbCur->pMeta)->pUidIdx, (TBC **)&pTbCur->pDbc, NULL); if (code != 0) { metaReaderReleaseLock(&pTbCur->mr); - pTbCur->paused = 1; TSDB_CHECK_CODE(code, lino, _exit); } @@ -1311,7 +1310,8 @@ int32_t metaFilterTableIds(void *pVnode, SMetaFltParam *arg, SArray *pUids) { } TAOS_CHECK_GOTO(metaCreateTagIdxKey(pCursor->suid, pCursor->cid, tagData, nTagData, pCursor->type, - param->reverse ? INT64_MAX : INT64_MIN, &pKey, &nKey), NULL, END); + param->reverse ? INT64_MAX : INT64_MIN, &pKey, &nKey), + NULL, END); int cmp = 0; TAOS_CHECK_GOTO(tdbTbcMoveTo(pCursor->pCur, pKey, nKey, &cmp), 0, END); diff --git a/source/libs/executor/src/sysscanoperator.c b/source/libs/executor/src/sysscanoperator.c index e11ee6b0dc..68be3b4ae8 100644 --- a/source/libs/executor/src/sysscanoperator.c +++ b/source/libs/executor/src/sysscanoperator.c @@ -568,7 +568,12 @@ static SSDataBlock* sysTableScanUserCols(SOperatorInfo* pOperator) { if (pInfo->pCur == NULL) { pInfo->pCur = pAPI->metaFn.openTableMetaCursor(pInfo->readHandle.vnode); } else { - (void)pAPI->metaFn.resumeTableMetaCursor(pInfo->pCur, 0, 0); + code = pAPI->metaFn.resumeTableMetaCursor(pInfo->pCur, 0, 0); + if (code != 0) { + pAPI->metaFn.closeTableMetaCursor(pInfo->pCur); + pInfo->pCur = NULL; + QUERY_CHECK_CODE(code, lino, _end); + } } if (pInfo->pSchema == NULL) { @@ -782,7 +787,8 @@ static SSDataBlock* sysTableScanUserTags(SOperatorInfo* pOperator) { pInfo->pCur = pAPI->metaFn.openTableMetaCursor(pInfo->readHandle.vnode); QUERY_CHECK_NULL(pInfo->pCur, code, lino, _end, terrno); } else { - (void)pAPI->metaFn.resumeTableMetaCursor(pInfo->pCur, 0, 0); + code = pAPI->metaFn.resumeTableMetaCursor(pInfo->pCur, 0, 0); + QUERY_CHECK_CODE(code, lino, _end); } while ((ret = pAPI->metaFn.cursorNext(pInfo->pCur, TSDB_SUPER_TABLE)) == 0) { @@ -1583,7 +1589,12 @@ static SSDataBlock* sysTableBuildUserTables(SOperatorInfo* pOperator) { firstMetaCursor = 1; } if (!firstMetaCursor) { - (void)pAPI->metaFn.resumeTableMetaCursor(pInfo->pCur, 0, 1); + code = pAPI->metaFn.resumeTableMetaCursor(pInfo->pCur, 0, 1); + if (code != 0) { + pAPI->metaFn.closeTableMetaCursor(pInfo->pCur); + pInfo->pCur = NULL; + QUERY_CHECK_CODE(code, lino, _end); + } } blockDataCleanup(pInfo->pRes); From 92704f8b7add1cdc5ce20c9ef0e92e2ece11d96b Mon Sep 17 00:00:00 2001 From: kailixu Date: Wed, 14 Aug 2024 22:06:18 +0800 Subject: [PATCH 043/181] fix: check error code --- source/dnode/vnode/src/tsdb/tsdbCache.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/source/dnode/vnode/src/tsdb/tsdbCache.c b/source/dnode/vnode/src/tsdb/tsdbCache.c index 46a498409b..0b020830e1 100644 --- a/source/dnode/vnode/src/tsdb/tsdbCache.c +++ b/source/dnode/vnode/src/tsdb/tsdbCache.c @@ -2891,11 +2891,13 @@ static int32_t nextRowIterGet(CacheNextRowIter *pIter, TSDBROW **ppRow, bool *pI if (!pIter->pSkyline) { pIter->pSkyline = taosArrayInit(32, sizeof(TSDBKEY)); + TSDB_CHECK_NULL(pIter->pSkyline, code, lino, _err, TSDB_CODE_OUT_OF_MEMORY); uint64_t uid = pIter->idx.uid; STableLoadInfo *pInfo = getTableLoadInfo(pIter->pr, uid); if (pInfo->pTombData == NULL) { pInfo->pTombData = taosArrayInit(4, sizeof(SDelData)); + TSDB_CHECK_NULL(pInfo->pTombData, code, lino, _err, TSDB_CODE_OUT_OF_MEMORY); } (void)taosArrayAddAll(pInfo->pTombData, pIter->pMemDelData); @@ -2903,6 +2905,7 @@ static int32_t nextRowIterGet(CacheNextRowIter *pIter, TSDBROW **ppRow, bool *pI size_t delSize = TARRAY_SIZE(pInfo->pTombData); if (delSize > 0) { code = tsdbBuildDeleteSkyline(pInfo->pTombData, 0, (int32_t)(delSize - 1), pIter->pSkyline); + TAOS_CHECK_GOTO(code, &lino, _err); } pIter->iSkyline = taosArrayGetSize(pIter->pSkyline) - 1; } From 1059650e573a9a8795574449f04449868edd9dd5 Mon Sep 17 00:00:00 2001 From: kailixu Date: Wed, 14 Aug 2024 22:20:03 +0800 Subject: [PATCH 044/181] enh: code optimization --- source/dnode/vnode/src/tsdb/tsdbCache.c | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/source/dnode/vnode/src/tsdb/tsdbCache.c b/source/dnode/vnode/src/tsdb/tsdbCache.c index cc5759a151..aa92597211 100644 --- a/source/dnode/vnode/src/tsdb/tsdbCache.c +++ b/source/dnode/vnode/src/tsdb/tsdbCache.c @@ -1440,11 +1440,7 @@ static int32_t tsdbCacheLoadFromRaw(STsdb *pTsdb, tb_uid_t uid, SArray *pLastArr int lastrowIndex = 0; if (!slotIds || !lastColIds || !lastSlotIds || !lastrowColIds || !lastrowSlotIds) { - taosMemoryFree(slotIds); - taosMemoryFree(lastColIds); - taosMemoryFree(lastSlotIds); - taosMemoryFree(lastrowColIds); - TAOS_RETURN(TSDB_CODE_OUT_OF_MEMORY); + TAOS_CHECK_EXIT(TSDB_CODE_OUT_OF_MEMORY); } for (int i = 0; i < num_keys; ++i) { From 57f5e11adaba0524cc20c0aa21c54be2736e42c7 Mon Sep 17 00:00:00 2001 From: zhiyong Date: Thu, 15 Aug 2024 08:22:13 +0800 Subject: [PATCH 045/181] Consistent writing --- docs/zh/03-intro.md | 16 +++++++--------- 1 file changed, 7 insertions(+), 9 deletions(-) diff --git a/docs/zh/03-intro.md b/docs/zh/03-intro.md index 47d00cf01a..0167f9323b 100644 --- a/docs/zh/03-intro.md +++ b/docs/zh/03-intro.md @@ -24,11 +24,11 @@ TDengine 经过特别优化,以适应时间序列数据的独特需求,引 1. 写入数据:TDengine 支持多种数据写入方式。首先,它完全兼容 SQL,允许用户使用标准的 SQL 语法进行数据写入。而且 TDengine 还支持无模式(Schemaless)写入,包括流行的 InfluxDB Line 协议、OpenTSDB 的 Telnet 和 JSON 协议,这些协议的加入使得数据的导入变得更加灵活和高效。更进一步,TDengine 与众多第三方工具实现了无缝集成,例如 Telegraf、Prometheus、EMQX、StatsD、collectd 和 HiveMQ 等。在 TDengine Enterprise 中, 还提供了 MQTT、OPC-UA、OPC-DA、PI、Wonderware、Kafka、InfluxDB、OpenTSDB、MySQL、Oracle 和 SQL Server 等连接器。这些工具通过简单的配置,无需一行代码,就可以将来自各种数据源的数据源源不断的写入数据库,极大地简化了数据收集和存储的过程。 -2. 查询数据:TDengine 提供标准的 SQL 查询语法,并针对时序数据和业务的特点优化和新增了许多语法和功能,例如降采样、插值、累计求和、时间加权平均、状态窗口、时间窗口、会话窗口、滑动窗口等。TDengine 还支持用户自定义函数(UDF) +2. 查询数据:TDengine 提供标准的 SQL 查询语法,并针对时序数据和业务的特点优化和新增了许多语法和功能,例如降采样、插值、累计求和、时间加权平均、状态窗口、时间窗口、会话窗口、滑动窗口等。TDengine 还支持用户自定义函数(UDF)。 3. 缓存:TDengine 使用时间驱动缓存管理策略(First-In-First-Out,FIFO),将最近到达的(当前状态)数据保存在缓存中,这样便于获取任何监测对象的实时状态,而无需使用 Redis 等其他缓存工具,简化系统架构和运营成本。 -4. 流式计算:TDengine 流式计算引擎提供了实时处理写入的数据流的能力,不仅支持连续查询,还支持事件驱动的流式计算。它提供了替代复杂流处理系统的轻量级解决方案,并能够在高吞吐的数据写入的情况下,提供毫秒级的计算结果延迟 +4. 流式计算:TDengine 流式计算引擎提供了实时处理写入的数据流的能力,不仅支持连续查询,还支持事件驱动的流式计算。它提供了替代复杂流处理系统的轻量级解决方案,并能够在高吞吐的数据写入的情况下,提供毫秒级的计算结果延迟。 5. 数据订阅:TDengine 提供了类似 Kafka 的数据订阅功能。但用户可以通过 SQL 来灵活控制订阅的数据内容,并使用 Kafka 相同的 API 来订阅一张表、一组表、全部列或部分列、甚至整个数据库的数据。TDengine 可以替代需要集成消息队列产品的场景, 从而简化系统设计的复杂度,降低运营维护成本。 @@ -38,13 +38,13 @@ TDengine 经过特别优化,以适应时间序列数据的独特需求,引 8. 数据迁移:TDengine 提供了多种便捷的数据导入导出功能,包括脚本文件导入导出、数据文件导入导出、taosdump 工具导入导出等。 -9. 编程连接器:TDengine 提供不同语言的连接器,包括 C/C++、Java、Go、Node.js、Rust、Python、C#、R、PHP 等。这些连接器大多都支持原生连接和 WebSocket 两种连接方式。TDengine 也提供 REST 接口,任何语言的应用程序可以直接通过 HTTP 请求访问数据库。 +9. 编程连接器:TDengine 提供不同语言的连接器,包括 C/C++、Java、Go、Node.js、Rust、Python、C#、R、PHP 等。这些连接器大多都支持原生连接和 WebSocket 两种连接方式。TDengine 也提供 RESTful 接口,任何语言的应用程序可以直接通过 HTTP 请求访问数据库。 10. 数据安全:TDengine 提供了丰富的用户管理和权限管理功能以控制不同用户对数据库和表的访问权限,提供了 IP 白名单功能以控制不同帐号只能从特定的服务器接入集群。TDengine 支持系统管理员对不同数据库按需加密,数据加密后对读写完全透明且对性能的影响很小。还提供了审计日志功能以记录系统中的敏感操作。 11. 常用工具:TDengine 还提供了交互式命令行程序(CLI),便于管理集群、检查系统状态、做即时查询。压力测试工具 taosBenchmark,用于测试 TDengine 的性能。TDengine 还提供了图形化管理界面,简化了操作和管理过程。 -12. 零代码数据接入:TDengine 企业版提供了丰富的数据接入功能,依托强大的数据接入平台,无需一行代码,只需要做简单的配置即可实现多种数据源的数据接入,目前已经支持的数据源包括:OPC UA, OPC DA, Pi, MQTT, Kafka, InfluxDB, OpenTSDB, MySql, SQL Server, Oracle, Wonderware Historian, MongoDB。 +12. 零代码数据接入:TDengine 企业版提供了丰富的数据接入功能,依托强大的数据接入平台,无需一行代码,只需要做简单的配置即可实现多种数据源的数据接入,目前已经支持的数据源包括:OPC-UA、OPC-DA、PI、MQTT、Kafka、InfluxDB、OpenTSDB、MySQL、SQL Server、Oracle、Wonderware Historian、MongoDB。 ## TDengine 与典型时序数据库的区别 @@ -58,16 +58,14 @@ TDengine 经过特别优化,以适应时间序列数据的独特需求,引 4. 强大的分析能力:TDengine 不仅支持标准 SQL 查询,还为时序数据特有的分析提供了 SQL 扩展。通过超级表、存储计算分离、分区分片、预计算、UDF 等先进技术,TDengine 展现出强大的数据分析能力。 -5. 简单易用:TDengine 安装无依赖,集群部署仅需几秒即可完成。它提供了 REST ful接口和多种编程语言的连接器,与众多第三方工具无缝集成。此外,命令行程序和丰富的运维工具也极大地方便了用户的管理和即时查询需求。 +5. 简单易用:TDengine 安装无依赖,集群部署仅需几秒即可完成。它提供了 RESTful 接口和多种编程语言的连接器,与众多第三方工具无缝集成。此外,命令行程序和丰富的运维工具也极大地方便了用户的管理和即时查询需求。 -6. 核心开源:TDengine 的核心代码,包括集群功能,均在开源协议下公开发布。它在GitHub 网站全球趋势排行榜上多次位居榜首,显示出其受欢迎程度。同时,TDengine -拥有一个活跃的开发者社区,为技术的持续发展和创新提供了有力支持。 +6. 核心开源:TDengine 的核心代码,包括集群功能,均在开源协议下公开发布。它在 GitHub 网站全球趋势排行榜上多次位居榜首,显示出其受欢迎程度。同时,TDengine 拥有一个活跃的开发者社区,为技术的持续发展和创新提供了有力支持。 采用 TDengine,企业可以在物联网、车联网、工业互联网等典型场景中显著降低大数据平台的总拥有成本,主要体现在以下几个方面: 1. 高性能带来的成本节约:TDengine 卓越的写入、查询和存储性能意味着系统所需的计算资源和存储资源可以大幅度减少。这不仅降低了硬件成本,还减少了能源消耗和维护费用。 2. 标准化与兼容性带来的成本效益:由于 TDengine 支持标准 SQL,并与众多第三方软件实现了无缝集成,用户可以轻松地将现有系统迁移到 TDengine 上,无须重写大量代码。这种标准化和兼容性大大降低了学习和迁移成本,缩短了项目周期。 -3. 简化系统架构带来的成本降低:作为一个极简的时序数据平台,TDengine 集成了消息队列、缓存、流计算等必要功能,避免了额外集成众多其他组件的需要。这 -种简化的系统架构显著降低了系统的复杂度,从而减少了研发和运营成本,提高了整体运营效率。 +3. 简化系统架构带来的成本降低:作为一个极简的时序数据平台,TDengine 集成了消息队列、缓存、流计算等必要功能,避免了额外集成众多其他组件的需要。这种简化的系统架构显著降低了系统的复杂度,从而减少了研发和运营成本,提高了整体运营效率。 ## 技术生态 From bc2cbea8d12d7f0ec195eec6d60140bc592cb029 Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Thu, 15 Aug 2024 09:19:34 +0800 Subject: [PATCH 046/181] fix meta deadlock --- source/dnode/vnode/src/meta/metaQuery.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/source/dnode/vnode/src/meta/metaQuery.c b/source/dnode/vnode/src/meta/metaQuery.c index 7d7b12bef3..7e9ce5b61f 100644 --- a/source/dnode/vnode/src/meta/metaQuery.c +++ b/source/dnode/vnode/src/meta/metaQuery.c @@ -275,13 +275,12 @@ void metaPauseTbCursor(SMTbCursor *pTbCur) { int32_t metaResumeTbCursor(SMTbCursor *pTbCur, int8_t first, int8_t move) { int32_t code = 0; int32_t lino; - + int8_t locked = 0; if (pTbCur->paused) { metaReaderDoInit(&pTbCur->mr, pTbCur->pMeta, META_READER_LOCK); - + locked = 1; code = tdbTbcOpen(((SMeta *)pTbCur->pMeta)->pUidIdx, (TBC **)&pTbCur->pDbc, NULL); if (code != 0) { - metaReaderReleaseLock(&pTbCur->mr); TSDB_CHECK_CODE(code, lino, _exit); } @@ -307,6 +306,9 @@ int32_t metaResumeTbCursor(SMTbCursor *pTbCur, int8_t first, int8_t move) { } _exit: + if (locked) { + metaReaderReleaseLock(&pTbCur->mr); + } return code; } @@ -794,6 +796,7 @@ void metaCloseSmaCursor(SMSmaCursor *pSmaCur) { if (pSmaCur->pMeta) metaULock(pSmaCur->pMeta); if (pSmaCur->pCur) { (void)tdbTbcClose(pSmaCur->pCur); + pSmaCur->pCur = NULL; tdbFree(pSmaCur->pKey); tdbFree(pSmaCur->pVal); From b9d085764f5e95d4ffda87b05f22e9f8f00bf2f8 Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Thu, 15 Aug 2024 09:23:19 +0800 Subject: [PATCH 047/181] fix meta deadlock --- source/dnode/vnode/src/meta/metaQuery.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/dnode/vnode/src/meta/metaQuery.c b/source/dnode/vnode/src/meta/metaQuery.c index 7e9ce5b61f..bee4727260 100644 --- a/source/dnode/vnode/src/meta/metaQuery.c +++ b/source/dnode/vnode/src/meta/metaQuery.c @@ -306,7 +306,7 @@ int32_t metaResumeTbCursor(SMTbCursor *pTbCur, int8_t first, int8_t move) { } _exit: - if (locked) { + if (code != 0 && locked) { metaReaderReleaseLock(&pTbCur->mr); } return code; From 81eca8fadb64c1b4539d56adcda00f45b6811aee Mon Sep 17 00:00:00 2001 From: t_max <1172915550@qq.com> Date: Thu, 15 Aug 2024 10:01:13 +0800 Subject: [PATCH 048/181] docs: the download address uses absolute url --- docs/zh/04-get-started/03-package.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/zh/04-get-started/03-package.md b/docs/zh/04-get-started/03-package.md index 4906a2fcfa..7df41af831 100644 --- a/docs/zh/04-get-started/03-package.md +++ b/docs/zh/04-get-started/03-package.md @@ -146,7 +146,7 @@ Note: 从 3.0.1.7 开始,只提供 TDengine 客户端的 Windows 客户端的 :::info -下载其他组件、最新 Beta 版及之前版本的安装包,请点击[发布历史页面](../../releases/tdengine)。 +下载其他组件、最新 Beta 版及之前版本的安装包,请点击[发布历史页面](https://docs.taosdata.com/releases/tdengine/)。 ::: :::note From 0c025d54a70b011e195949629e4bc3b750252ea9 Mon Sep 17 00:00:00 2001 From: t_max <1172915550@qq.com> Date: Thu, 15 Aug 2024 10:01:13 +0800 Subject: [PATCH 049/181] docs: the download address uses absolute url --- docs/zh/04-get-started/03-package.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/zh/04-get-started/03-package.md b/docs/zh/04-get-started/03-package.md index 4906a2fcfa..7df41af831 100644 --- a/docs/zh/04-get-started/03-package.md +++ b/docs/zh/04-get-started/03-package.md @@ -146,7 +146,7 @@ Note: 从 3.0.1.7 开始,只提供 TDengine 客户端的 Windows 客户端的 :::info -下载其他组件、最新 Beta 版及之前版本的安装包,请点击[发布历史页面](../../releases/tdengine)。 +下载其他组件、最新 Beta 版及之前版本的安装包,请点击[发布历史页面](https://docs.taosdata.com/releases/tdengine/)。 ::: :::note From 2fec464eb246a881a1a0f80cc3bef053792c206e Mon Sep 17 00:00:00 2001 From: sheyanjie-qq <249478495@qq.com> Date: Wed, 14 Aug 2024 19:46:39 +0800 Subject: [PATCH 050/181] fix link error --- docs/zh/08-develop/01-connect/index.md | 2 +- docs/zh/26-tdinternal/03-storage.md | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/zh/08-develop/01-connect/index.md b/docs/zh/08-develop/01-connect/index.md index 755a9e7f74..160e4cd40c 100644 --- a/docs/zh/08-develop/01-connect/index.md +++ b/docs/zh/08-develop/01-connect/index.md @@ -99,7 +99,7 @@ TDengine 提供了丰富的应用程序开发接口,为了便于用户快速 - **安装前准备** - 安装 Python。新近版本 taospy 包要求 Python 3.6.2+。早期版本 taospy 包要求 Python 3.7+。taos-ws-py 包要求 Python 3.7+。如果系统上还没有 Python 可参考 [Python BeginnersGuide](https://wiki.python.org/moin/BeginnersGuide/Download) 安装。 - 安装 [pip](https://pypi.org/project/pip/)。大部分情况下 Python 的安装包都自带了 pip 工具, 如果没有请参考 [pip documentation](https://pip.pypa.io/en/stable/installation/) 安装。 - - 如果使用原生连接,还需[安装客户端驱动](../#安装客户端驱动)。客户端软件包含了 TDengine 客户端动态链接库(libtaos.so 或 taos.dll) 和 TDengine CLI。 + - 如果使用原生连接,还需[安装客户端驱动](../connect/#安装客户端驱动-taosc)。客户端软件包含了 TDengine 客户端动态链接库(libtaos.so 或 taos.dll) 和 TDengine CLI。 - **使用 pip 安装** - 卸载旧版本 diff --git a/docs/zh/26-tdinternal/03-storage.md b/docs/zh/26-tdinternal/03-storage.md index f65f06e85b..e402babdd7 100644 --- a/docs/zh/26-tdinternal/03-storage.md +++ b/docs/zh/26-tdinternal/03-storage.md @@ -101,7 +101,7 @@ head 文件是时序数据存储文件(data 文件)的 BRIN(Block Range In head 文件中存储了多个 BRIN 记录块及其索引。BRIN 记录块采用列存压缩的方式,这种方式可以大大减少空间占用,同时保持较高的查询性能。BRIN 索引结构如下图所示: -![BRIN 索引结构](./brin.png) +![BRIN 索引结构](./brin.png) #### data 文件 @@ -121,4 +121,4 @@ data 文件是实际存储时序数据的文件。在 data 文件中,时序数 在少表高频的场景下,系统仅维护一个 stt 文件。该文件专门用于存储每次数据落盘后剩余的碎片数据。这样,在下一次数据落盘时,这些碎片数据可以与内存中的新数据合并,形成较大的数据块,随后一并写入 data 文件中。这种机制有效地避免了数据文件的碎片化,确保了数据存储的连续性和高效性。 -对于多表低频的场景,建议配置多个 stt 文件。这种场景下的核心思想是,尽管单张表每次落盘的数据量可能不大,但同一超级表下的所有子表累积的数据量却相当可观。通过合并这些数据,可以生成较大的数据块,从而减少数据块的碎片化。这不仅提升了数据的写入效率,还能显著提高查询性能,因为连续的数据存储更有利于快速的数据检索和访问。 \ No newline at end of file +对于多表低频的场景,建议配置多个 stt 文件。这种场景下的核心思想是,尽管单张表每次落盘的数据量可能不大,但同一超级表下的所有子表累积的数据量却相当可观。通过合并这些数据,可以生成较大的数据块,从而减少数据块的碎片化。这不仅提升了数据的写入效率,还能显著提高查询性能,因为连续的数据存储更有利于快速的数据检索和访问。 From cc8bff453a1e0fa14a0b8aad6c26a0de67305262 Mon Sep 17 00:00:00 2001 From: kailixu Date: Thu, 15 Aug 2024 10:45:16 +0800 Subject: [PATCH 051/181] fix(insert): return error when parsing csv file --- source/libs/parser/src/parInsertSql.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/source/libs/parser/src/parInsertSql.c b/source/libs/parser/src/parInsertSql.c index 70bd43559c..cb94cd42f7 100644 --- a/source/libs/parser/src/parInsertSql.c +++ b/source/libs/parser/src/parInsertSql.c @@ -2238,6 +2238,8 @@ static int32_t parseDataFromFileImpl(SInsertParseContext* pCxt, SVnodeModifyOpSt if (pStmt->insertType != TSDB_QUERY_TYPE_FILE_INSERT) { return buildSyntaxErrMsg(&pCxt->msg, "keyword VALUES or FILE is exclusive", NULL); } + } else { + return buildInvalidOperationMsg(&pCxt->msg, tstrerror(code)); } // just record pTableCxt whose data come from file From 5589799e90a7dce1a7ab013d9536d923961bb209 Mon Sep 17 00:00:00 2001 From: 54liuyao <54liuyao> Date: Thu, 15 Aug 2024 10:58:53 +0800 Subject: [PATCH 052/181] fix issue --- source/libs/executor/src/tfill.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/source/libs/executor/src/tfill.c b/source/libs/executor/src/tfill.c index e346946a7a..a7e2ea3429 100644 --- a/source/libs/executor/src/tfill.c +++ b/source/libs/executor/src/tfill.c @@ -570,8 +570,8 @@ int32_t taosCreateFillInfo(TSKEY skey, int32_t numOfFillCols, int32_t numOfNotFi _end: if (code != TSDB_CODE_SUCCESS) { - taosArrayDestroy(pFillInfo->next.pRowVal); - taosArrayDestroy(pFillInfo->prev.pRowVal); + qError("%s failed at line %d since %s", __func__, lino, tstrerror(code)); + pFillInfo = taosDestroyFillInfo(pFillInfo); } (*ppFillInfo) = pFillInfo; return code; From b85dba328c5f823921321997a110b36939ffd52c Mon Sep 17 00:00:00 2001 From: dmchen Date: Thu, 15 Aug 2024 03:55:20 +0000 Subject: [PATCH 053/181] fix/TD-30849 --- include/libs/wal/wal.h | 6 +++++- source/dnode/mgmt/mgmt_mnode/src/mmInt.c | 2 +- source/dnode/mgmt/mgmt_vnode/src/vmInt.c | 5 ++--- source/dnode/mgmt/node_mgmt/src/dmEnv.c | 1 + source/dnode/mgmt/node_util/inc/dmUtil.h | 2 ++ source/dnode/mnode/impl/src/mndDump.c | 2 +- source/dnode/vnode/inc/vnode.h | 4 +++- source/dnode/vnode/src/vnd/vnodeModule.c | 4 ++-- source/libs/wal/src/walMgmt.c | 10 +++++++++- source/libs/wal/src/walWrite.c | 15 +++++++++++++++ 10 files changed, 41 insertions(+), 10 deletions(-) diff --git a/include/libs/wal/wal.h b/include/libs/wal/wal.h index 66ea5ea5c7..f74e26eeda 100644 --- a/include/libs/wal/wal.h +++ b/include/libs/wal/wal.h @@ -95,6 +95,7 @@ typedef struct { } SWalCkHead; #pragma pack(pop) +typedef void (*stopDnodeFn)(); typedef struct SWal { // cfg SWalCfg cfg; @@ -119,6 +120,9 @@ typedef struct SWal { char path[WAL_PATH_LEN]; // reusable write head SWalCkHead writeHead; + + stopDnodeFn stopDnode; + } SWal; typedef struct { @@ -152,7 +156,7 @@ typedef struct SWalReader { } SWalReader; // module initialization -int32_t walInit(); +int32_t walInit(stopDnodeFn stopDnode); void walCleanUp(); // handle open and ctl diff --git a/source/dnode/mgmt/mgmt_mnode/src/mmInt.c b/source/dnode/mgmt/mgmt_mnode/src/mmInt.c index 20802e33d9..48606b2ed9 100644 --- a/source/dnode/mgmt/mgmt_mnode/src/mmInt.c +++ b/source/dnode/mgmt/mgmt_mnode/src/mmInt.c @@ -85,7 +85,7 @@ static int32_t mndOpenWrapper(const char *path, SMnodeOpt *opt, SMnode **pMnode) } static int32_t mmOpen(SMgmtInputOpt *pInput, SMgmtOutputOpt *pOutput) { int32_t code = 0; - if ((code = walInit()) != 0) { + if ((code = walInit(pInput->stopDnodeFp)) != 0) { dError("failed to init wal since %s", tstrerror(code)); return code; } diff --git a/source/dnode/mgmt/mgmt_vnode/src/vmInt.c b/source/dnode/mgmt/mgmt_vnode/src/vmInt.c index 6bc0b5fe93..e599676cec 100644 --- a/source/dnode/mgmt/mgmt_vnode/src/vmInt.c +++ b/source/dnode/mgmt/mgmt_vnode/src/vmInt.c @@ -624,8 +624,7 @@ static int32_t vmInit(SMgmtInputOpt *pInput, SMgmtOutputOpt *pOutput) { goto _OVER; } tmsgReportStartup("vnode-tfs", "initialized"); - - if ((code = walInit()) != 0) { + if ((code = walInit(pInput->stopDnodeFp)) != 0) { dError("failed to init wal since %s", tstrerror(code)); goto _OVER; } @@ -638,7 +637,7 @@ static int32_t vmInit(SMgmtInputOpt *pInput, SMgmtOutputOpt *pOutput) { } tmsgReportStartup("vnode-sync", "initialized"); - if ((code = vnodeInit(tsNumOfCommitThreads)) != 0) { + if ((code = vnodeInit(tsNumOfCommitThreads, pInput->stopDnodeFp)) != 0) { dError("failed to init vnode since %s", tstrerror(code)); goto _OVER; } diff --git a/source/dnode/mgmt/node_mgmt/src/dmEnv.c b/source/dnode/mgmt/node_mgmt/src/dmEnv.c index 9819c4f64e..0a75847d96 100644 --- a/source/dnode/mgmt/node_mgmt/src/dmEnv.c +++ b/source/dnode/mgmt/node_mgmt/src/dmEnv.c @@ -414,6 +414,7 @@ SMgmtInputOpt dmBuildMgmtInputOpt(SMgmtWrapper *pWrapper) { .getVnodeLoadsLiteFp = dmGetVnodeLoadsLite, .getMnodeLoadsFp = dmGetMnodeLoads, .getQnodeLoadsFp = dmGetQnodeLoads, + .stopDnodeFp = dmStop, }; opt.msgCb = dmGetMsgcb(pWrapper->pDnode); diff --git a/source/dnode/mgmt/node_util/inc/dmUtil.h b/source/dnode/mgmt/node_util/inc/dmUtil.h index 3b94f00bee..5be41f830d 100644 --- a/source/dnode/mgmt/node_util/inc/dmUtil.h +++ b/source/dnode/mgmt/node_util/inc/dmUtil.h @@ -121,6 +121,7 @@ typedef void (*GetVnodeLoadsFp)(SMonVloadInfo *pInfo); typedef void (*GetMnodeLoadsFp)(SMonMloadInfo *pInfo); typedef void (*GetQnodeLoadsFp)(SQnodeLoad *pInfo); typedef int32_t (*ProcessAlterNodeTypeFp)(EDndNodeType ntype, SRpcMsg *pMsg); +typedef void (*StopDnodeFp)(); typedef struct { int32_t dnodeId; @@ -159,6 +160,7 @@ typedef struct { GetVnodeLoadsFp getVnodeLoadsLiteFp; GetMnodeLoadsFp getMnodeLoadsFp; GetQnodeLoadsFp getQnodeLoadsFp; + StopDnodeFp stopDnodeFp; } SMgmtInputOpt; typedef struct { diff --git a/source/dnode/mnode/impl/src/mndDump.c b/source/dnode/mnode/impl/src/mndDump.c index 31e092f1a4..565e244014 100644 --- a/source/dnode/mnode/impl/src/mndDump.c +++ b/source/dnode/mnode/impl/src/mndDump.c @@ -581,7 +581,7 @@ void mndDumpSdb() { msgCb.mgmt = (SMgmtWrapper *)(&msgCb); // hack tmsgSetDefault(&msgCb); - (void)walInit(); + (void)walInit(NULL); (void)syncInit(); SMnodeOpt opt = {.msgCb = msgCb}; diff --git a/source/dnode/vnode/inc/vnode.h b/source/dnode/vnode/inc/vnode.h index d01db56013..2f56aac7d6 100644 --- a/source/dnode/vnode/inc/vnode.h +++ b/source/dnode/vnode/inc/vnode.h @@ -49,7 +49,9 @@ typedef struct SVSnapWriter SVSnapWriter; extern const SVnodeCfg vnodeCfgDefault; -int32_t vnodeInit(int32_t nthreads); +typedef void (*StopDnodeFp)(); + +int32_t vnodeInit(int32_t nthreads, StopDnodeFp stopDnodeFp); void vnodeCleanup(); int32_t vnodeCreate(const char *path, SVnodeCfg *pCfg, int32_t diskPrimary, STfs *pTfs); bool vnodeShouldRemoveWal(SVnode *pVnode); diff --git a/source/dnode/vnode/src/vnd/vnodeModule.c b/source/dnode/vnode/src/vnd/vnodeModule.c index 8b7de7058c..709bfa19bc 100644 --- a/source/dnode/vnode/src/vnd/vnodeModule.c +++ b/source/dnode/vnode/src/vnd/vnodeModule.c @@ -18,13 +18,13 @@ static volatile int32_t VINIT = 0; -int vnodeInit(int nthreads) { +int vnodeInit(int nthreads, StopDnodeFp stopDnodeFp) { if (atomic_val_compare_exchange_32(&VINIT, 0, 1)) { return 0; } TAOS_CHECK_RETURN(vnodeAsyncOpen(nthreads)); - TAOS_CHECK_RETURN(walInit()); + TAOS_CHECK_RETURN(walInit(stopDnodeFp)); return 0; } diff --git a/source/libs/wal/src/walMgmt.c b/source/libs/wal/src/walMgmt.c index 9da3207471..581a63671c 100644 --- a/source/libs/wal/src/walMgmt.c +++ b/source/libs/wal/src/walMgmt.c @@ -26,6 +26,7 @@ typedef struct { uint32_t seq; int32_t refSetId; TdThread thread; + stopDnodeFn stopDnode; } SWalMgmt; static SWalMgmt tsWal = {0, .seq = 1}; @@ -35,7 +36,7 @@ static void walFreeObj(void *pWal); int64_t walGetSeq() { return (int64_t)atomic_load_32((volatile int32_t *)&tsWal.seq); } -int32_t walInit() { +int32_t walInit(stopDnodeFn stopDnode) { int8_t old; while (1) { old = atomic_val_compare_exchange_8(&tsWal.inited, 0, 2); @@ -57,6 +58,11 @@ int32_t walInit() { atomic_store_8(&tsWal.inited, 1); } + if (stopDnode == NULL) { + wWarn("failed to set stop dnode call back"); + } + tsWal.stopDnode = stopDnode; + return 0; } @@ -164,6 +170,8 @@ SWal *walOpen(const char *path, SWalCfg *pCfg) { goto _err; } + pWal->stopDnode = tsWal.stopDnode; + wDebug("vgId:%d, wal:%p is opened, level:%d fsyncPeriod:%d", pWal->cfg.vgId, pWal, pWal->cfg.level, pWal->cfg.fsyncPeriod); return pWal; diff --git a/source/libs/wal/src/walWrite.c b/source/libs/wal/src/walWrite.c index dc3b2df52c..9979ddd0b0 100644 --- a/source/libs/wal/src/walWrite.c +++ b/source/libs/wal/src/walWrite.c @@ -525,6 +525,11 @@ static int32_t walWriteIndex(SWal *pWal, int64_t ver, int64_t offset) { if (size != sizeof(SWalIdxEntry)) { wError("vgId:%d, failed to write idx entry due to %s. ver:%" PRId64, pWal->cfg.vgId, strerror(errno), ver); + if (pWal->stopDnode != NULL) { + wWarn("vgId:%d, set stop dnode flag", pWal->cfg.vgId); + pWal->stopDnode(); + } + TAOS_RETURN(TAOS_SYSTEM_ERROR(errno)); } @@ -571,6 +576,11 @@ static FORCE_INLINE int32_t walWriteImpl(SWal *pWal, int64_t index, tmsg_t msgTy wError("vgId:%d, file:%" PRId64 ".log, failed to write since %s", pWal->cfg.vgId, walGetLastFileFirstVer(pWal), strerror(errno)); + if (pWal->stopDnode != NULL) { + wWarn("vgId:%d, set stop dnode flag", pWal->cfg.vgId); + pWal->stopDnode(); + } + TAOS_CHECK_GOTO(code, &lino, _exit); } @@ -627,6 +637,11 @@ static FORCE_INLINE int32_t walWriteImpl(SWal *pWal, int64_t index, tmsg_t msgTy taosMemoryFreeClear(newBodyEncrypted); } + if (pWal->stopDnode != NULL) { + wWarn("vgId:%d, set stop dnode flag", pWal->cfg.vgId); + pWal->stopDnode(); + } + TAOS_CHECK_GOTO(code, &lino, _exit); } From f4c1ad0fdea1e5f6340c754733df6743f5318d6b Mon Sep 17 00:00:00 2001 From: menshibin Date: Thu, 15 Aug 2024 13:06:47 +0800 Subject: [PATCH 054/181] modify exec error --- docs/examples/python/schemaless_ws.py | 13 +++++++------ docs/examples/python/stmt_native.py | 4 ++-- docs/examples/python/stmt_ws.py | 4 ++-- docs/examples/python/tmq_native.py | 2 +- 4 files changed, 12 insertions(+), 11 deletions(-) diff --git a/docs/examples/python/schemaless_ws.py b/docs/examples/python/schemaless_ws.py index 3c41d1768b..39de55393d 100644 --- a/docs/examples/python/schemaless_ws.py +++ b/docs/examples/python/schemaless_ws.py @@ -1,19 +1,21 @@ import taosws +host = "localhost" +port = 6041 def prepare(): conn = None try: conn = taosws.connect(user="root", password="taosdata", - host="localhost", - port=6041) + host=host, + port=port) # create database rowsAffected = conn.execute(f"CREATE DATABASE IF NOT EXISTS power") assert rowsAffected == 0 except Exception as err: - print(f"Failed to create db and table, err:{err}") + print(f"Failed to create db and table, db addrr:{host}:{port} ; ErrMessage:{err}") raise err finally: if conn: @@ -32,8 +34,7 @@ def schemaless_insert(): jsonDemo = [ '{"metric": "metric_json","timestamp": 1626846400,"value": 10.3, "tags": {"groupid": 2, "location": "California.SanFrancisco", "id": "d1001"}}' ] - host = "localhost" - port = 6041 + try: conn = taosws.connect(user="root", password="taosdata", @@ -76,6 +77,6 @@ def schemaless_insert(): if __name__ == "__main__": try: prepare() - schemaless_insert + schemaless_insert() except Exception as err: print(f"Failed to insert data with schemaless, err:{err}") diff --git a/docs/examples/python/stmt_native.py b/docs/examples/python/stmt_native.py index 16975d6895..a1af7d1dd7 100644 --- a/docs/examples/python/stmt_native.py +++ b/docs/examples/python/stmt_native.py @@ -7,8 +7,8 @@ numOfRow = 10 conn = None stmt = None -host="localhost", -port=6030, +host="localhost" +port=6030 try: conn = taos.connect( user="root", diff --git a/docs/examples/python/stmt_ws.py b/docs/examples/python/stmt_ws.py index 74f071fde1..45d9222315 100644 --- a/docs/examples/python/stmt_ws.py +++ b/docs/examples/python/stmt_ws.py @@ -13,8 +13,8 @@ port=6041 try: conn = taosws.connect(user="root", password="taosdata", - host="localhost", - port=6041) + host=host, + port=port) conn.execute("CREATE DATABASE IF NOT EXISTS power") conn.execute("USE power") diff --git a/docs/examples/python/tmq_native.py b/docs/examples/python/tmq_native.py index 8181e1c2db..22a9b805bc 100644 --- a/docs/examples/python/tmq_native.py +++ b/docs/examples/python/tmq_native.py @@ -50,7 +50,7 @@ from taos.tmq import Consumer def create_consumer(): host = "localhost" - port = 6030 + port = "6030" groupId = "group1" clientId = "1" try: From d0059d2d9dfa0bd681bcf3a7dec22764519ddc29 Mon Sep 17 00:00:00 2001 From: kailixu Date: Thu, 15 Aug 2024 13:25:04 +0800 Subject: [PATCH 055/181] enh: support config randErrorScope dynamically --- include/os/os.h | 1 + include/util/tdef.h | 7 +++++++ source/common/src/tglobal.c | 23 +++++++++++++++-------- source/os/src/osFile.c | 17 +++++++++-------- source/os/src/osMemory.c | 12 +++++++----- 5 files changed, 39 insertions(+), 21 deletions(-) diff --git a/include/os/os.h b/include/os/os.h index 08b68f36d4..9e5e9221e4 100644 --- a/include/os/os.h +++ b/include/os/os.h @@ -126,6 +126,7 @@ extern "C" { extern int32_t tsRandErrChance; extern int64_t tsRandErrDivisor; +extern int64_t tsRandErrScope; extern threadlocal bool tsEnableRandErr; #ifdef __cplusplus diff --git a/include/util/tdef.h b/include/util/tdef.h index 890f1d8f95..35c4adab50 100644 --- a/include/util/tdef.h +++ b/include/util/tdef.h @@ -568,6 +568,13 @@ enum { SND_WORKER_TYPE__UNIQUE, }; +enum { + RAND_ERR_MEMORY = 1, + RAND_ERR_FILE = 2, + // RAND_ERR_SCOPE_XXX... = 4, + // ... +}; + #define DEFAULT_HANDLE 0 #define MNODE_HANDLE 1 #define QNODE_HANDLE -1 diff --git a/source/common/src/tglobal.c b/source/common/src/tglobal.c index 46ad263d3d..a013c98b73 100644 --- a/source/common/src/tglobal.c +++ b/source/common/src/tglobal.c @@ -592,6 +592,9 @@ static int32_t taosAddClientCfg(SConfig *pCfg) { CFG_SCOPE_CLIENT, CFG_DYN_NONE)); TAOS_CHECK_RETURN( cfgAddInt32(pCfg, "metaCacheMaxSize", tsMetaCacheMaxSize, -1, INT32_MAX, CFG_SCOPE_CLIENT, CFG_DYN_CLIENT)); + TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "randErrorChance", tsRandErrChance, 0, 10000, CFG_SCOPE_BOTH, CFG_DYN_NONE)); + TAOS_CHECK_RETURN(cfgAddInt64(pCfg, "randErrorDivisor", tsRandErrDivisor, 1, INT64_MAX, CFG_SCOPE_BOTH, CFG_DYN_BOTH)); + TAOS_CHECK_RETURN(cfgAddInt64(pCfg, "randErrorScope", tsRandErrScope, 0, INT64_MAX, CFG_SCOPE_BOTH, CFG_DYN_BOTH)); tsNumOfRpcThreads = tsNumOfCores / 2; tsNumOfRpcThreads = TRANGE(tsNumOfRpcThreads, 2, TSDB_MAX_RPC_THREADS); @@ -774,8 +777,6 @@ static int32_t taosAddServerCfg(SConfig *pCfg) { TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "transPullupInterval", tsTransPullupInterval, 1, 10000, CFG_SCOPE_SERVER, CFG_DYN_ENT_SERVER)); TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "compactPullupInterval", tsCompactPullupInterval, 1, 10000, CFG_SCOPE_SERVER, CFG_DYN_ENT_SERVER)); TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "mqRebalanceInterval", tsMqRebalanceInterval, 1, 10000, CFG_SCOPE_SERVER, CFG_DYN_ENT_SERVER)); - TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "randErrorChance", tsRandErrChance, 0, 10000, CFG_SCOPE_BOTH, CFG_DYN_NONE)); - TAOS_CHECK_RETURN(cfgAddInt64(pCfg, "randErrorDivisor", tsRandErrDivisor, 1, INT64_MAX, CFG_SCOPE_BOTH, CFG_DYN_SERVER)); TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "ttlUnit", tsTtlUnit, 1, 86400 * 365, CFG_SCOPE_SERVER, CFG_DYN_NONE)); TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "ttlPushInterval", tsTtlPushIntervalSec, 1, 100000, CFG_SCOPE_SERVER, CFG_DYN_ENT_SERVER)); @@ -1210,6 +1211,15 @@ static int32_t taosSetClientCfg(SConfig *pCfg) { TAOS_CHECK_GET_CFG_ITEM(pCfg, pItem, "metaCacheMaxSize"); tsMetaCacheMaxSize = pItem->i32; + TAOS_CHECK_GET_CFG_ITEM(pCfg, pItem, "randErrorChance"); + tsRandErrChance = pItem->i32; + + TAOS_CHECK_GET_CFG_ITEM(pCfg, pItem, "randErrorDivisor"); + tsRandErrDivisor = pItem->i64; + + TAOS_CHECK_GET_CFG_ITEM(pCfg, pItem, "randErrorScope"); + tsRandErrScope = pItem->i64; + TAOS_CHECK_GET_CFG_ITEM(pCfg, pItem, "countAlwaysReturnValue"); tsCountAlwaysReturnValue = pItem->i32; @@ -1466,12 +1476,6 @@ static int32_t taosSetServerCfg(SConfig *pCfg) { TAOS_CHECK_GET_CFG_ITEM(pCfg, pItem, "mqRebalanceInterval"); tsMqRebalanceInterval = pItem->i32; - TAOS_CHECK_GET_CFG_ITEM(pCfg, pItem, "randErrorChance"); - tsRandErrChance = pItem->i32; - - TAOS_CHECK_GET_CFG_ITEM(pCfg, pItem, "randErrorDivisor"); - tsRandErrDivisor = pItem->i64; - TAOS_CHECK_GET_CFG_ITEM(pCfg, pItem, "ttlUnit"); tsTtlUnit = pItem->i32; @@ -1927,6 +1931,7 @@ static int32_t taosCfgDynamicOptionsForServer(SConfig *pCfg, const char *name) { {"mndSdbWriteDelta", &tsMndSdbWriteDelta}, {"minDiskFreeSize", &tsMinDiskFreeSize}, {"randErrorDivisor", &tsRandErrDivisor}, + {"randErrorScope", &tsRandErrScope}, {"cacheLazyLoadThreshold", &tsCacheLazyLoadThreshold}, {"checkpointInterval", &tsStreamCheckpointInterval}, @@ -2205,6 +2210,8 @@ static int32_t taosCfgDynamicOptionsForClient(SConfig *pCfg, const char *name) { {"queryPlannerTrace", &tsQueryPlannerTrace}, {"queryNodeChunkSize", &tsQueryNodeChunkSize}, {"queryUseNodeAllocator", &tsQueryUseNodeAllocator}, + {"randErrorDivisor", &tsRandErrDivisor}, + {"randErrorScope", &tsRandErrScope}, {"smlDot2Underline", &tsSmlDot2Underline}, {"shellActivityTimer", &tsShellActivityTimer}, {"useAdapter", &tsUseAdapter}, diff --git a/source/os/src/osFile.c b/source/os/src/osFile.c index b8160a14b7..a5df4f63f3 100644 --- a/source/os/src/osFile.c +++ b/source/os/src/osFile.c @@ -15,6 +15,7 @@ #define ALLOW_FORBID_FUNC #include "os.h" #include "osSemaphore.h" +#include "tdef.h" #include "zlib.h" #ifdef WINDOWS @@ -65,14 +66,14 @@ typedef struct TdFile { #define FILE_WITH_LOCK 1 #ifdef BUILD_WITH_RAND_ERR -#define STUB_RAND_IO_ERR(ret) \ - if (tsEnableRandErr) { \ - uint32_t r = taosRand() % tsRandErrDivisor; \ - if ((r + 1) <= tsRandErrChance) { \ - errno = EIO; \ - terrno = TAOS_SYSTEM_ERROR(errno); \ - return (ret); \ - } \ +#define STUB_RAND_IO_ERR(ret) \ + if (tsEnableRandErr && (tsRandErrScope & RAND_ERR_FILE)) { \ + uint32_t r = taosRand() % tsRandErrDivisor; \ + if ((r + 1) <= tsRandErrChance) { \ + errno = EIO; \ + terrno = TAOS_SYSTEM_ERROR(errno); \ + return (ret); \ + } \ } #else #define STUB_RAND_IO_ERR(ret) diff --git a/source/os/src/osMemory.c b/source/os/src/osMemory.c index 297b17b957..7a5a547354 100644 --- a/source/os/src/osMemory.c +++ b/source/os/src/osMemory.c @@ -20,9 +20,11 @@ #include #endif #include "os.h" +#include "tdef.h" int32_t tsRandErrChance = 1; int64_t tsRandErrDivisor = 10001; +int64_t tsRandErrScope = (RAND_ERR_MEMORY | RAND_ERR_FILE); threadlocal bool tsEnableRandErr = 0; #if defined(USE_TD_MEMORY) || defined(USE_ADDR2LINE) @@ -272,7 +274,7 @@ void *taosMemoryMalloc(int64_t size) { #else #ifdef BUILD_WITH_RAND_ERR - if (tsEnableRandErr) { + if (tsEnableRandErr && (tsRandErrScope & RAND_ERR_MEMORY)) { uint32_t r = taosRand() % tsRandErrDivisor; if ((r + 1) <= tsRandErrChance) { terrno = TSDB_CODE_OUT_OF_MEMORY; @@ -302,7 +304,7 @@ void *taosMemoryCalloc(int64_t num, int64_t size) { return (char *)tmp + sizeof(TdMemoryInfo); #else #ifdef BUILD_WITH_RAND_ERR - if (tsEnableRandErr) { + if (tsEnableRandErr && (tsRandErrScope & RAND_ERR_MEMORY)) { uint32_t r = taosRand() % tsRandErrDivisor; if ((r + 1) <= tsRandErrChance) { terrno = TSDB_CODE_OUT_OF_MEMORY; @@ -342,7 +344,7 @@ void *taosMemoryRealloc(void *ptr, int64_t size) { return (char *)tmp + sizeof(TdMemoryInfo); #else #ifdef BUILD_WITH_RAND_ERR - if (tsEnableRandErr) { + if (tsEnableRandErr && (tsRandErrScope & RAND_ERR_MEMORY)) { uint32_t r = taosRand() % tsRandErrDivisor; if ((r + 1) <= tsRandErrChance) { terrno = TSDB_CODE_OUT_OF_MEMORY; @@ -377,7 +379,7 @@ char *taosStrdup(const char *ptr) { return (char *)tmp + sizeof(TdMemoryInfo); #else #ifdef BUILD_WITH_RAND_ERR - if (tsEnableRandErr) { + if (tsEnableRandErr && (tsRandErrScope & RAND_ERR_MEMORY)) { uint32_t r = taosRand() % tsRandErrDivisor; if ((r + 1) <= tsRandErrChance) { terrno = TSDB_CODE_OUT_OF_MEMORY; @@ -443,7 +445,7 @@ void *taosMemoryMallocAlign(uint32_t alignment, int64_t size) { #else #if defined(LINUX) #ifdef BUILD_WITH_RAND_ERR - if (tsEnableRandErr) { + if (tsEnableRandErr && (tsRandErrScope & RAND_ERR_MEMORY)) { uint32_t r = taosRand() % tsRandErrDivisor; if ((r + 1) <= tsRandErrChance) { terrno = TSDB_CODE_OUT_OF_MEMORY; From d9dbf95c5eca2585bfabb1c8a3ded577bdcac9ec Mon Sep 17 00:00:00 2001 From: menshibin Date: Thu, 15 Aug 2024 13:37:06 +0800 Subject: [PATCH 056/181] modify exec error --- docs/examples/node/websocketexample/tmq_example.js | 4 ++-- docs/examples/node/websocketexample/tmq_seek_example.js | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/docs/examples/node/websocketexample/tmq_example.js b/docs/examples/node/websocketexample/tmq_example.js index b0e52360c9..fc9d2889e0 100644 --- a/docs/examples/node/websocketexample/tmq_example.js +++ b/docs/examples/node/websocketexample/tmq_example.js @@ -62,7 +62,7 @@ async function subscribe(consumer) { for (let [key, value] of res) { console.log(`data: ${key} ${value}`); } - consumer.commit(); + await consumer.commit(); console.log("Commit offset manually successfully."); } } catch (err) { @@ -77,7 +77,7 @@ async function test() { let consumer = null; try { await prepare(); - let consumer = await createConsumer() + consumer = await createConsumer() await subscribe(consumer) await consumer.unsubscribe(); console.log("Consumer unsubscribed successfully."); diff --git a/docs/examples/node/websocketexample/tmq_seek_example.js b/docs/examples/node/websocketexample/tmq_seek_example.js index 0dea7e1671..259614d5db 100644 --- a/docs/examples/node/websocketexample/tmq_seek_example.js +++ b/docs/examples/node/websocketexample/tmq_seek_example.js @@ -31,7 +31,7 @@ async function prepare() { conf.setUser('root'); conf.setPwd('taosdata'); conf.setDb('power'); - const createDB = `CREATE DATABASE IF NOT EXISTS POWER ${db} KEEP 3650 DURATION 10 BUFFER 16 WAL_LEVEL 1;`; + const createDB = `CREATE DATABASE IF NOT EXISTS ${db} KEEP 3650 DURATION 10 BUFFER 16 WAL_LEVEL 1;`; const createStable = `CREATE STABLE IF NOT EXISTS ${db}.${stable} (ts timestamp, current float, voltage int, phase float) TAGS (location binary(64), groupId int);`; let wsSql = await taos.sqlConnect(conf); @@ -45,7 +45,7 @@ async function prepare() { for (let i = 0; i < 10; i++) { await wsSql.exec(`INSERT INTO d1001 USING ${stable} (location, groupId) TAGS ("California.SanFrancisco", 3) VALUES (NOW, ${10 + i}, ${200 + i}, ${0.32 + i})`); } - wsSql.Close(); + await wsSql.close(); } // ANCHOR: subscribe From ce7d70c3b3de7f832cbd875a333e2f73f31a63d6 Mon Sep 17 00:00:00 2001 From: jiajingbin Date: Thu, 15 Aug 2024 14:02:31 +0800 Subject: [PATCH 057/181] test: (fix case) add timeout-return --- tests/pytest/util/common.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tests/pytest/util/common.py b/tests/pytest/util/common.py index dee3f505c9..1141ca403d 100644 --- a/tests/pytest/util/common.py +++ b/tests/pytest/util/common.py @@ -1909,6 +1909,8 @@ class TDCom: if latency < self.stream_timeout: latency += 1 time.sleep(1) + else: + return False return tbname def get_group_id_from_stb(self, stbname): From 04764316b150a6a4e7a14517529a4ffe9fe53344 Mon Sep 17 00:00:00 2001 From: dmchen Date: Thu, 15 Aug 2024 06:07:15 +0000 Subject: [PATCH 058/181] fix/TD-30849-fix-unit-test-compile --- include/libs/wal/wal.h | 4 ++-- source/libs/wal/test/walMetaTest.cpp | 8 ++++---- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/include/libs/wal/wal.h b/include/libs/wal/wal.h index f74e26eeda..a5d5316d23 100644 --- a/include/libs/wal/wal.h +++ b/include/libs/wal/wal.h @@ -118,11 +118,11 @@ typedef struct SWal { SHashObj *pRefHash; // refId -> SWalRef // path char path[WAL_PATH_LEN]; - // reusable write head - SWalCkHead writeHead; stopDnodeFn stopDnode; + // reusable write head + SWalCkHead writeHead; } SWal; typedef struct { diff --git a/source/libs/wal/test/walMetaTest.cpp b/source/libs/wal/test/walMetaTest.cpp index c7e83e7c86..8bd4de0a89 100644 --- a/source/libs/wal/test/walMetaTest.cpp +++ b/source/libs/wal/test/walMetaTest.cpp @@ -12,7 +12,7 @@ SWalSyncInfo syncMeta = {0}; class WalCleanEnv : public ::testing::Test { protected: static void SetUpTestCase() { - int code = walInit(); + int code = walInit(NULL); ASSERT(code == 0); } @@ -44,7 +44,7 @@ class WalCleanEnv : public ::testing::Test { class WalCleanDeleteEnv : public ::testing::Test { protected: static void SetUpTestCase() { - int code = walInit(); + int code = walInit(NULL); ASSERT(code == 0); } @@ -74,7 +74,7 @@ class WalCleanDeleteEnv : public ::testing::Test { class WalKeepEnv : public ::testing::Test { protected: static void SetUpTestCase() { - int code = walInit(); + int code = walInit(NULL); ASSERT(code == 0); } @@ -111,7 +111,7 @@ class WalKeepEnv : public ::testing::Test { class WalRetentionEnv : public ::testing::Test { protected: static void SetUpTestCase() { - int code = walInit(); + int code = walInit(NULL); ASSERT(code == 0); } From ef5f69e3cf97eb77fe7cb46ff478c16768a6d0b3 Mon Sep 17 00:00:00 2001 From: 54liuyao <54liuyao> Date: Thu, 15 Aug 2024 14:13:08 +0800 Subject: [PATCH 059/181] fix issue --- source/libs/executor/src/timesliceoperator.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/source/libs/executor/src/timesliceoperator.c b/source/libs/executor/src/timesliceoperator.c index 8cd547e333..b14f4f0266 100644 --- a/source/libs/executor/src/timesliceoperator.c +++ b/source/libs/executor/src/timesliceoperator.c @@ -1249,10 +1249,11 @@ void destroyTimeSliceOperatorInfo(void* param) { } cleanupExprSupp(&pInfo->scalarSup); - - for (int32_t i = 0; i < pInfo->pFillColInfo->numOfFillExpr; ++i) { - taosVariantDestroy(&pInfo->pFillColInfo[i].fillVal); + if (pInfo->pFillColInfo != NULL) { + for (int32_t i = 0; i < pInfo->pFillColInfo->numOfFillExpr; ++i) { + taosVariantDestroy(&pInfo->pFillColInfo[i].fillVal); + } + taosMemoryFree(pInfo->pFillColInfo); } - taosMemoryFree(pInfo->pFillColInfo); taosMemoryFreeClear(param); } From 9a0f7aeedb97ad33352a2d54d033b2d3dd226944 Mon Sep 17 00:00:00 2001 From: sheyanjie-qq <249478495@qq.com> Date: Thu, 15 Aug 2024 14:28:35 +0800 Subject: [PATCH 060/181] mod rest api desc --- docs/zh/08-develop/01-connect/index.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/docs/zh/08-develop/01-connect/index.md b/docs/zh/08-develop/01-connect/index.md index 092a50869a..d1aeb0ed8b 100644 --- a/docs/zh/08-develop/01-connect/index.md +++ b/docs/zh/08-develop/01-connect/index.md @@ -399,7 +399,8 @@ C/C++ 语言连接器使用 `taos_connect()` 函数用于建立与 TDengine 数 -使用 REST API 方式访问 TDengine,由应用程序去建立 HTTP 连接,自己控制 HTTP 连接参数。 +通过 REST API 方式访问 TDengine 时,应用程序直接与 taosAdapter 建立 HTTP 连接,建议使用连接池来管理连接。 +使用 REST API 的参数具体可以参考:[http-请求格式](../../reference/connector/rest-api/#http-请求格式) From 41d8c20117051c3dc069afa82bc6d59ce5e27f0e Mon Sep 17 00:00:00 2001 From: Shungang Li Date: Thu, 15 Aug 2024 14:49:48 +0800 Subject: [PATCH 061/181] fix: add miss hostname for ep funcs --- source/common/src/cos.c | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/source/common/src/cos.c b/source/common/src/cos.c index db0dadbc46..aa587bf07b 100644 --- a/source/common/src/cos.c +++ b/source/common/src/cos.c @@ -974,7 +974,7 @@ int32_t s3PutObjectFromFile2ByEp(const char *file, const char *object_name, int8 data.totalContentLength = data.totalOriginalContentLength = data.contentLength = data.originalContentLength = contentLength; - S3BucketContext bucketContext = {0, + S3BucketContext bucketContext = {tsS3Hostname[epIndex], tsS3BucketName[epIndex], protocolG, uriStyleG, @@ -1057,7 +1057,7 @@ static int32_t s3PutObjectFromFileOffsetByEp(const char *file, const char *objec data.totalContentLength = data.totalOriginalContentLength = data.contentLength = data.originalContentLength = contentLength; - S3BucketContext bucketContext = {0, + S3BucketContext bucketContext = {tsS3Hostname[epIndex], tsS3BucketName[epIndex], protocolG, uriStyleG, @@ -1153,7 +1153,7 @@ static void s3FreeObjectKey(void *pItem) { } static SArray *getListByPrefixByEp(const char *prefix, int8_t epIndex) { - S3BucketContext bucketContext = {0, + S3BucketContext bucketContext = {tsS3Hostname[epIndex], tsS3BucketName[epIndex], protocolG, uriStyleG, @@ -1221,7 +1221,7 @@ static SArray *getListByPrefix(const char *prefix) { static int32_t s3DeleteObjectsByEp(const char *object_name[], int nobject, int8_t epIndex) { int32_t code = 0; - S3BucketContext bucketContext = {0, + S3BucketContext bucketContext = {tsS3Hostname[epIndex], tsS3BucketName[epIndex], protocolG, uriStyleG, @@ -1297,7 +1297,7 @@ static int32_t s3GetObjectBlockByEp(const char *object_name, int64_t offset, int int64_t ifModifiedSince = -1, ifNotModifiedSince = -1; const char *ifMatch = 0, *ifNotMatch = 0; - S3BucketContext bucketContext = {0, + S3BucketContext bucketContext = {tsS3Hostname[epIndex], tsS3BucketName[epIndex], protocolG, uriStyleG, @@ -1370,7 +1370,7 @@ static int32_t s3GetObjectToFileByEp(const char *object_name, const char *fileNa int64_t ifModifiedSince = -1, ifNotModifiedSince = -1; const char *ifMatch = 0, *ifNotMatch = 0; - S3BucketContext bucketContext = {0, + S3BucketContext bucketContext = {tsS3Hostname[epIndex], tsS3BucketName[epIndex], protocolG, uriStyleG, @@ -1447,7 +1447,7 @@ static long s3SizeByEp(const char *object_name, int8_t epIndex) { long size = 0; int status = 0; - S3BucketContext bucketContext = {0, + S3BucketContext bucketContext = {tsS3Hostname[epIndex], tsS3BucketName[epIndex], protocolG, uriStyleG, From fd53940b33407b0a6c146893bede76ec7352fb3e Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Thu, 15 Aug 2024 14:55:29 +0800 Subject: [PATCH 062/181] fix(query): release lock when error occurs. --- source/libs/executor/src/scanoperator.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/source/libs/executor/src/scanoperator.c b/source/libs/executor/src/scanoperator.c index 54c1c78844..b25ac8e928 100644 --- a/source/libs/executor/src/scanoperator.c +++ b/source/libs/executor/src/scanoperator.c @@ -440,7 +440,10 @@ static int32_t loadDataBlock(SOperatorInfo* pOperator, STableScanBase* pTableSca // try to filter data block according to current results code = doDynamicPruneDataBlock(pOperator, pBlockInfo, status); - QUERY_CHECK_CODE(code, lino, _end); + if (code) { + pAPI->tsdReader.tsdReaderReleaseDataBlock(pTableScanInfo->dataReader); + QUERY_CHECK_CODE(code, lino, _end); + } if (*status == FUNC_DATA_REQUIRED_NOT_LOAD) { qDebug("%s data block skipped due to dynamic prune, brange:%" PRId64 "-%" PRId64 ", rows:%" PRId64, From 2e51d5409ab504efc0dce859d679542e9aa5f1ec Mon Sep 17 00:00:00 2001 From: sima Date: Thu, 15 Aug 2024 14:34:41 +0800 Subject: [PATCH 063/181] fix:[TD-31469] Fix trim sql syntax error. --- source/libs/parser/inc/sql.y | 3 +- source/libs/parser/src/sql.c | 1808 +++++++++++++++++----------------- 2 files changed, 909 insertions(+), 902 deletions(-) diff --git a/source/libs/parser/inc/sql.y b/source/libs/parser/inc/sql.y index 05c1c95aad..9228e16ff9 100644 --- a/source/libs/parser/inc/sql.y +++ b/source/libs/parser/inc/sql.y @@ -1195,6 +1195,8 @@ function_expression(A) ::= TRIM(B) NK_LP expr_or_subquery(C) NK_RP(D). { A = createRawExprNodeExt(pCxt, &B, &D, createTrimFunctionNode(pCxt, releaseRawExprNode(pCxt, C), TRIM_TYPE_BOTH)); } function_expression(A) ::= TRIM(B) NK_LP trim_specification_type(C) FROM expr_or_subquery(D) NK_RP(E). { A = createRawExprNodeExt(pCxt, &B, &E, createTrimFunctionNode(pCxt, releaseRawExprNode(pCxt, D), C)); } +function_expression(A) ::= + TRIM(B) NK_LP expr_or_subquery(C) FROM expr_or_subquery(D) NK_RP(E). { A = createRawExprNodeExt(pCxt, &B, &E, createTrimFunctionNodeExt(pCxt, releaseRawExprNode(pCxt, C), releaseRawExprNode(pCxt, D), TRIM_TYPE_BOTH)); } function_expression(A) ::= TRIM(B) NK_LP trim_specification_type(C) expr_or_subquery(D) FROM expr_or_subquery(E) NK_RP(F). { A = createRawExprNodeExt(pCxt, &B, &F, createTrimFunctionNodeExt(pCxt, releaseRawExprNode(pCxt, D), releaseRawExprNode(pCxt, E), C)); } function_expression(A) ::= @@ -1217,7 +1219,6 @@ substr_func(A) ::= SUBSTRING(B). %type trim_specification_type ETrimType %destructor trim_specification_type { } -trim_specification_type(A) ::= . { A = TRIM_TYPE_BOTH; } trim_specification_type(A) ::= BOTH. { A = TRIM_TYPE_BOTH; } trim_specification_type(A) ::= TRAILING. { A = TRIM_TYPE_TRAILING; } trim_specification_type(A) ::= LEADING. { A = TRIM_TYPE_LEADING; } diff --git a/source/libs/parser/src/sql.c b/source/libs/parser/src/sql.c index 0492288fd6..4ff7510e92 100644 --- a/source/libs/parser/src/sql.c +++ b/source/libs/parser/src/sql.c @@ -536,18 +536,18 @@ typedef union { #define ParseCTX_FETCH #define ParseCTX_STORE #define YYFALLBACK 1 -#define YYNSTATE 1004 +#define YYNSTATE 1006 #define YYNRULE 770 #define YYNRULE_WITH_ACTION 770 #define YYNTOKEN 385 -#define YY_MAX_SHIFT 1003 -#define YY_MIN_SHIFTREDUCE 1487 -#define YY_MAX_SHIFTREDUCE 2256 -#define YY_ERROR_ACTION 2257 -#define YY_ACCEPT_ACTION 2258 -#define YY_NO_ACTION 2259 -#define YY_MIN_REDUCE 2260 -#define YY_MAX_REDUCE 3029 +#define YY_MAX_SHIFT 1005 +#define YY_MIN_SHIFTREDUCE 1490 +#define YY_MAX_SHIFTREDUCE 2259 +#define YY_ERROR_ACTION 2260 +#define YY_ACCEPT_ACTION 2261 +#define YY_NO_ACTION 2262 +#define YY_MIN_REDUCE 2263 +#define YY_MAX_REDUCE 3032 #define YY_MIN_DSTRCTR 386 #define YY_MAX_DSTRCTR 566 /************* End control #defines *******************************************/ @@ -632,402 +632,404 @@ typedef union { ** yy_default[] Default action for each state. ** *********** Begin parsing tables **********************************************/ -#define YY_ACTTAB_COUNT (3934) +#define YY_ACTTAB_COUNT (3956) static const YYACTIONTYPE yy_action[] = { - /* 0 */ 819, 668, 3000, 676, 669, 2308, 669, 2308, 2995, 2472, - /* 10 */ 2995, 954, 57, 55, 2427, 56, 54, 53, 52, 51, - /* 20 */ 497, 2261, 1979, 2623, 2004, 462, 818, 228, 769, 2999, - /* 30 */ 217, 2996, 820, 2996, 2998, 501, 1977, 490, 2081, 2352, - /* 40 */ 217, 2790, 148, 2620, 880, 147, 146, 145, 144, 143, - /* 50 */ 142, 141, 140, 139, 580, 2592, 834, 2596, 50, 49, - /* 60 */ 831, 167, 56, 54, 53, 52, 51, 2596, 2076, 148, - /* 70 */ 765, 863, 147, 146, 145, 144, 143, 142, 141, 140, - /* 80 */ 139, 893, 1985, 50, 49, 2808, 755, 56, 54, 53, - /* 90 */ 52, 51, 50, 49, 2695, 2008, 56, 54, 53, 52, - /* 100 */ 51, 2755, 749, 875, 753, 751, 298, 297, 244, 688, - /* 110 */ 678, 2662, 1000, 893, 691, 58, 971, 970, 969, 968, - /* 120 */ 526, 248, 967, 966, 172, 961, 960, 959, 958, 957, - /* 130 */ 956, 955, 171, 949, 948, 947, 525, 524, 944, 943, - /* 140 */ 942, 208, 207, 941, 521, 940, 939, 938, 2789, 42, - /* 150 */ 350, 2836, 2084, 2085, 808, 131, 2791, 879, 2793, 2794, - /* 160 */ 874, 2459, 2748, 862, 898, 514, 2217, 104, 2610, 210, - /* 170 */ 689, 2897, 103, 50, 49, 492, 2893, 56, 54, 53, - /* 180 */ 52, 51, 2790, 2808, 204, 2905, 830, 768, 159, 829, - /* 190 */ 3000, 2040, 2050, 341, 342, 229, 2995, 876, 340, 766, - /* 200 */ 2060, 2083, 2086, 2944, 2260, 50, 49, 2007, 60, 56, - /* 210 */ 54, 53, 52, 51, 818, 228, 1980, 2170, 1978, 2996, - /* 220 */ 820, 2177, 9, 861, 503, 503, 2808, 2004, 157, 156, - /* 230 */ 155, 154, 153, 152, 151, 150, 149, 898, 898, 102, - /* 240 */ 454, 893, 2755, 2258, 875, 935, 184, 183, 932, 931, - /* 250 */ 930, 181, 1983, 1984, 2037, 807, 2039, 2042, 2043, 2044, - /* 260 */ 2045, 2046, 2047, 2048, 2049, 871, 864, 2004, 224, 896, - /* 270 */ 895, 2067, 2068, 2070, 2071, 2072, 2075, 2077, 2078, 2079, - /* 280 */ 2080, 2082, 2, 57, 55, 2173, 684, 2790, 673, 2789, - /* 290 */ 2111, 497, 2836, 1979, 670, 687, 131, 2791, 879, 2793, - /* 300 */ 2794, 874, 873, 2241, 862, 898, 742, 1977, 169, 2081, - /* 310 */ 178, 2868, 2897, 2189, 831, 167, 492, 2893, 50, 49, - /* 320 */ 893, 756, 56, 54, 53, 52, 51, 3000, 571, 2009, - /* 330 */ 2623, 2808, 529, 2148, 72, 2995, 60, 528, 219, 2076, - /* 340 */ 299, 2148, 863, 33, 499, 2004, 19, 2755, 665, 875, - /* 350 */ 2620, 880, 43, 1985, 2999, 2112, 745, 663, 2996, 2997, - /* 360 */ 659, 655, 72, 739, 737, 2749, 57, 55, 72, 264, - /* 370 */ 296, 894, 2468, 671, 497, 2316, 1979, 2206, 633, 631, - /* 380 */ 332, 434, 769, 1000, 242, 76, 15, 2565, 785, 1532, - /* 390 */ 1977, 158, 2081, 202, 2789, 327, 2995, 2836, 2041, 714, - /* 400 */ 334, 429, 2791, 879, 2793, 2794, 874, 872, 1539, 862, - /* 410 */ 898, 854, 2862, 2355, 3001, 228, 82, 124, 12, 2996, - /* 420 */ 820, 81, 2076, 2084, 2085, 863, 2218, 518, 503, 19, - /* 430 */ 2521, 2523, 2117, 1534, 1537, 1538, 1985, 833, 197, 2905, - /* 440 */ 2906, 898, 165, 2910, 801, 800, 2204, 2205, 2207, 2208, - /* 450 */ 2209, 41, 494, 2106, 2107, 2108, 2109, 2110, 2114, 2115, - /* 460 */ 2116, 2038, 2040, 2050, 771, 2662, 1000, 2005, 562, 15, - /* 470 */ 561, 61, 2083, 2086, 2917, 2145, 2146, 2147, 2917, 2917, - /* 480 */ 2917, 2917, 2917, 2145, 2146, 2147, 2765, 1980, 1985, 1978, - /* 490 */ 735, 734, 733, 194, 861, 192, 72, 725, 164, 729, - /* 500 */ 809, 2401, 560, 728, 516, 2470, 2084, 2085, 727, 732, - /* 510 */ 472, 471, 904, 1650, 726, 2769, 1825, 1826, 470, 722, - /* 520 */ 721, 720, 2169, 1983, 1984, 2037, 334, 2039, 2042, 2043, - /* 530 */ 2044, 2045, 2046, 2047, 2048, 2049, 871, 864, 2452, 441, - /* 540 */ 896, 895, 2067, 2068, 182, 2040, 2050, 2075, 2077, 2078, - /* 550 */ 2079, 2080, 2082, 2, 334, 2083, 2086, 685, 608, 1652, - /* 560 */ 334, 469, 468, 607, 1712, 2771, 2773, 493, 302, 2148, - /* 570 */ 1980, 606, 1978, 221, 831, 167, 2445, 861, 898, 1703, - /* 580 */ 927, 926, 925, 1707, 924, 1709, 1710, 923, 920, 2515, - /* 590 */ 1718, 917, 1720, 1721, 914, 911, 908, 50, 49, 1751, - /* 600 */ 1752, 56, 54, 53, 52, 51, 1983, 1984, 2037, 173, - /* 610 */ 2039, 2042, 2043, 2044, 2045, 2046, 2047, 2048, 2049, 871, - /* 620 */ 864, 686, 2616, 896, 895, 2067, 2068, 64, 2912, 2623, - /* 630 */ 2075, 2077, 2078, 2079, 2080, 2082, 2, 12, 57, 55, - /* 640 */ 509, 467, 466, 391, 716, 2790, 497, 301, 1979, 2621, - /* 650 */ 880, 300, 804, 1906, 1907, 1712, 2909, 831, 167, 195, - /* 660 */ 876, 2272, 1977, 334, 2081, 718, 903, 902, 901, 717, - /* 670 */ 1703, 927, 926, 925, 1707, 924, 1709, 1710, 870, 869, - /* 680 */ 2790, 1718, 868, 1720, 1721, 867, 911, 908, 334, 2808, - /* 690 */ 1559, 2283, 1558, 2005, 2076, 876, 2548, 863, 137, 2905, - /* 700 */ 2906, 19, 165, 2910, 2671, 2755, 163, 875, 1985, 217, - /* 710 */ 2917, 2145, 2146, 2147, 2917, 2917, 2917, 2917, 2917, 894, - /* 720 */ 2468, 57, 55, 2087, 2808, 113, 548, 2765, 1560, 497, - /* 730 */ 449, 1979, 2282, 477, 2037, 757, 2597, 2092, 1000, 520, - /* 740 */ 2755, 15, 875, 2004, 134, 1977, 1559, 2081, 1558, 1562, - /* 750 */ 1563, 2755, 2789, 516, 2470, 2836, 2769, 586, 2592, 416, - /* 760 */ 2791, 879, 2793, 2794, 874, 723, 31, 862, 898, 810, - /* 770 */ 805, 798, 794, 894, 2468, 617, 2592, 2076, 2084, 2085, - /* 780 */ 863, 198, 2905, 2906, 1560, 165, 2910, 2789, 1643, 2708, - /* 790 */ 2836, 1985, 2755, 158, 131, 2791, 879, 2793, 2794, 874, - /* 800 */ 500, 719, 862, 898, 479, 2670, 2771, 2774, 2872, 191, - /* 810 */ 2897, 246, 894, 2468, 492, 2893, 94, 2040, 2050, 898, - /* 820 */ 2473, 1000, 1948, 937, 58, 1852, 1853, 2083, 2086, 251, - /* 830 */ 50, 49, 233, 170, 56, 54, 53, 52, 51, 735, - /* 840 */ 734, 733, 1980, 2460, 1978, 785, 725, 164, 729, 861, - /* 850 */ 928, 610, 728, 2995, 508, 507, 46, 727, 732, 472, - /* 860 */ 471, 2084, 2085, 726, 552, 2709, 609, 470, 722, 721, - /* 870 */ 720, 3001, 228, 2281, 1851, 1854, 2996, 820, 1983, 1984, - /* 880 */ 2037, 110, 2039, 2042, 2043, 2044, 2045, 2046, 2047, 2048, - /* 890 */ 2049, 871, 864, 554, 550, 896, 895, 2067, 2068, 465, - /* 900 */ 2040, 2050, 2075, 2077, 2078, 2079, 2080, 2082, 2, 2463, - /* 910 */ 2083, 2086, 50, 49, 2041, 174, 56, 54, 53, 52, - /* 920 */ 51, 785, 266, 894, 2468, 1980, 671, 1978, 2316, 2995, - /* 930 */ 2008, 519, 861, 2755, 894, 2468, 2528, 12, 512, 10, - /* 940 */ 191, 564, 2007, 65, 460, 2528, 563, 3001, 228, 2008, - /* 950 */ 762, 2473, 2996, 820, 568, 2526, 894, 2468, 384, 523, - /* 960 */ 522, 1983, 1984, 2037, 838, 2039, 2042, 2043, 2044, 2045, - /* 970 */ 2046, 2047, 2048, 2049, 871, 864, 569, 2038, 896, 895, - /* 980 */ 2067, 2068, 2528, 1986, 724, 2075, 2077, 2078, 2079, 2080, - /* 990 */ 2082, 2, 57, 55, 1979, 2790, 2528, 785, 1896, 1662, - /* 1000 */ 497, 846, 1979, 784, 488, 2995, 785, 1641, 1977, 1539, - /* 1010 */ 834, 894, 2468, 1661, 2995, 2526, 1977, 855, 2081, 2869, - /* 1020 */ 2057, 894, 2468, 3001, 228, 894, 2468, 37, 2996, 820, - /* 1030 */ 2248, 588, 3001, 228, 819, 1537, 1538, 2996, 820, 2808, - /* 1040 */ 2280, 602, 2995, 168, 2790, 690, 2868, 391, 2076, 2522, - /* 1050 */ 2523, 863, 894, 2468, 1985, 2755, 857, 875, 2869, 876, - /* 1060 */ 818, 228, 1985, 50, 49, 2996, 820, 56, 54, 53, - /* 1070 */ 52, 51, 603, 50, 49, 57, 55, 56, 54, 53, - /* 1080 */ 52, 51, 866, 497, 1000, 1979, 710, 709, 2808, 638, - /* 1090 */ 712, 711, 1000, 865, 123, 58, 894, 2468, 2530, 1977, - /* 1100 */ 2755, 2081, 2789, 223, 2755, 2836, 875, 478, 2670, 131, - /* 1110 */ 2791, 879, 2793, 2794, 874, 332, 604, 862, 898, 2279, - /* 1120 */ 1947, 2461, 231, 210, 627, 2897, 53, 52, 51, 492, - /* 1130 */ 2893, 2076, 2084, 2085, 863, 313, 38, 2150, 2151, 2152, - /* 1140 */ 2153, 2154, 50, 49, 70, 1985, 56, 54, 53, 52, - /* 1150 */ 51, 2789, 511, 510, 2836, 782, 2247, 2945, 196, 2791, - /* 1160 */ 879, 2793, 2794, 874, 2009, 1989, 862, 898, 2528, 894, - /* 1170 */ 2468, 2040, 2050, 894, 2468, 1000, 502, 2004, 58, 2755, - /* 1180 */ 191, 2083, 2086, 2009, 894, 2468, 115, 2526, 1980, 2465, - /* 1190 */ 1978, 2474, 253, 304, 894, 2468, 1980, 2457, 1978, 40, - /* 1200 */ 894, 2468, 2676, 861, 312, 50, 49, 786, 2955, 56, - /* 1210 */ 54, 53, 52, 51, 837, 2084, 2085, 626, 252, 2912, - /* 1220 */ 345, 2455, 50, 49, 1983, 1984, 56, 54, 53, 52, - /* 1230 */ 51, 624, 1983, 1984, 2037, 2278, 2039, 2042, 2043, 2044, - /* 1240 */ 2045, 2046, 2047, 2048, 2049, 871, 864, 2908, 135, 896, - /* 1250 */ 895, 2067, 2068, 636, 2040, 2050, 2075, 2077, 2078, 2079, - /* 1260 */ 2080, 2082, 2, 303, 2083, 2086, 50, 49, 1666, 1541, - /* 1270 */ 56, 54, 53, 52, 51, 2003, 284, 894, 2468, 1980, - /* 1280 */ 2444, 1978, 1665, 50, 49, 2442, 861, 56, 54, 53, - /* 1290 */ 52, 51, 203, 894, 2468, 2755, 390, 851, 894, 2468, - /* 1300 */ 192, 708, 704, 700, 696, 44, 283, 2441, 731, 730, - /* 1310 */ 2471, 894, 2468, 352, 2999, 1983, 1984, 2037, 887, 2039, - /* 1320 */ 2042, 2043, 2044, 2045, 2046, 2047, 2048, 2049, 871, 864, - /* 1330 */ 2061, 888, 896, 895, 2067, 2068, 14, 13, 2113, 2075, - /* 1340 */ 2077, 2078, 2079, 2080, 2082, 2, 57, 55, 2041, 2277, - /* 1350 */ 2198, 438, 111, 2002, 497, 767, 1979, 281, 640, 937, - /* 1360 */ 615, 50, 49, 464, 2199, 56, 54, 53, 52, 51, - /* 1370 */ 1977, 635, 2081, 1003, 50, 49, 894, 2468, 56, 54, - /* 1380 */ 53, 52, 51, 894, 2468, 2528, 398, 594, 2276, 637, - /* 1390 */ 2505, 2058, 388, 517, 439, 596, 892, 951, 965, 963, - /* 1400 */ 45, 2275, 2076, 380, 2526, 863, 574, 991, 218, 2755, - /* 1410 */ 2197, 2038, 2912, 2699, 2274, 2118, 1985, 987, 983, 979, - /* 1420 */ 975, 2008, 383, 2004, 269, 935, 184, 183, 932, 931, - /* 1430 */ 930, 181, 110, 280, 39, 174, 929, 271, 278, 2519, - /* 1440 */ 2907, 543, 933, 276, 682, 2519, 1000, 463, 2755, 15, - /* 1450 */ 1988, 2125, 935, 184, 183, 932, 931, 930, 181, 582, - /* 1460 */ 2464, 2755, 268, 2271, 2270, 235, 953, 536, 130, 2269, - /* 1470 */ 2268, 50, 49, 356, 2755, 56, 54, 53, 52, 51, - /* 1480 */ 309, 770, 2056, 835, 2267, 823, 2084, 2085, 2266, 934, - /* 1490 */ 2603, 2582, 2519, 623, 622, 621, 620, 619, 614, 613, - /* 1500 */ 612, 611, 446, 813, 847, 601, 600, 599, 598, 597, - /* 1510 */ 591, 590, 589, 2265, 584, 583, 461, 2264, 1904, 2528, - /* 1520 */ 575, 1813, 1814, 2755, 2755, 2040, 2050, 1832, 114, 2755, - /* 1530 */ 2755, 2288, 993, 2250, 2251, 2083, 2086, 785, 2527, 785, - /* 1540 */ 2575, 2263, 792, 289, 2755, 2995, 287, 2995, 2755, 354, - /* 1550 */ 1980, 853, 1978, 3, 337, 88, 718, 861, 222, 336, - /* 1560 */ 717, 160, 1880, 3001, 228, 3001, 228, 63, 2996, 820, - /* 1570 */ 2996, 820, 2159, 2755, 759, 100, 758, 2755, 306, 2055, - /* 1580 */ 291, 1645, 293, 290, 578, 292, 1983, 1984, 2037, 1623, - /* 1590 */ 2039, 2042, 2043, 2044, 2045, 2046, 2047, 2048, 2049, 871, - /* 1600 */ 864, 2755, 2339, 896, 895, 2067, 2068, 59, 826, 101, - /* 1610 */ 2075, 2077, 2078, 2079, 2080, 2082, 2, 193, 59, 211, - /* 1620 */ 295, 2790, 404, 294, 736, 2193, 397, 1646, 2337, 2328, - /* 1630 */ 2326, 2776, 1991, 182, 339, 1624, 876, 87, 2318, 59, - /* 1640 */ 822, 402, 86, 14, 13, 85, 74, 2790, 1596, 59, - /* 1650 */ 738, 740, 743, 860, 1987, 2009, 440, 2038, 59, 59, - /* 1660 */ 359, 358, 876, 2472, 2952, 2808, 2273, 87, 262, 650, - /* 1670 */ 648, 645, 643, 361, 360, 363, 362, 179, 365, 364, - /* 1680 */ 129, 2755, 126, 875, 160, 182, 367, 366, 369, 368, - /* 1690 */ 2203, 2808, 371, 370, 1597, 2778, 373, 372, 375, 374, - /* 1700 */ 945, 2202, 318, 377, 376, 379, 378, 2755, 84, 875, - /* 1710 */ 906, 180, 182, 2402, 72, 161, 836, 343, 179, 202, - /* 1720 */ 843, 2958, 2059, 1615, 946, 2320, 328, 802, 2789, 2119, - /* 1730 */ 320, 2836, 2062, 832, 2809, 131, 2791, 879, 2793, 2794, - /* 1740 */ 874, 2051, 1849, 862, 898, 162, 2394, 1613, 2393, 3015, - /* 1750 */ 1839, 2897, 2309, 73, 2789, 492, 2893, 2836, 2948, 2790, - /* 1760 */ 355, 131, 2791, 879, 2793, 2794, 874, 891, 1694, 862, - /* 1770 */ 898, 799, 484, 806, 876, 3015, 2965, 2897, 480, 840, - /* 1780 */ 989, 492, 2893, 824, 2601, 527, 545, 778, 2315, 814, - /* 1790 */ 2516, 396, 2949, 1725, 1733, 1740, 2959, 815, 1738, 330, - /* 1800 */ 325, 185, 333, 2808, 2602, 2790, 5, 98, 97, 567, - /* 1810 */ 2428, 535, 241, 530, 458, 2002, 2012, 544, 237, 2755, - /* 1820 */ 876, 875, 796, 556, 555, 559, 557, 558, 1873, 239, - /* 1830 */ 2103, 2003, 523, 522, 389, 2790, 1990, 236, 437, 572, - /* 1840 */ 250, 546, 1993, 579, 542, 538, 534, 531, 560, 2808, - /* 1850 */ 876, 581, 2988, 585, 629, 587, 1986, 605, 2081, 592, - /* 1860 */ 616, 2594, 625, 618, 628, 2755, 2789, 875, 641, 2836, - /* 1870 */ 630, 642, 639, 131, 2791, 879, 2793, 2794, 874, 2808, - /* 1880 */ 255, 862, 898, 256, 644, 646, 2790, 3015, 2076, 2897, - /* 1890 */ 647, 259, 2010, 492, 2893, 2755, 649, 875, 666, 651, - /* 1900 */ 4, 876, 1985, 2929, 667, 674, 334, 675, 827, 2005, - /* 1910 */ 677, 679, 2789, 267, 2011, 2836, 680, 106, 270, 131, - /* 1920 */ 2791, 879, 2793, 2794, 874, 2013, 273, 862, 898, 681, - /* 1930 */ 2808, 683, 859, 3015, 275, 2897, 2014, 2617, 2015, 492, - /* 1940 */ 2893, 107, 2789, 108, 2611, 2836, 2755, 109, 875, 131, - /* 1950 */ 2791, 879, 2793, 2794, 874, 282, 692, 862, 898, 713, - /* 1960 */ 136, 444, 443, 3015, 746, 2897, 715, 2458, 747, 492, - /* 1970 */ 2893, 504, 2685, 286, 2454, 288, 432, 2682, 187, 133, - /* 1980 */ 2456, 2451, 188, 189, 761, 513, 2681, 2081, 112, 763, - /* 1990 */ 2006, 305, 175, 2789, 773, 2790, 2836, 392, 2663, 774, - /* 2000 */ 131, 2791, 879, 2793, 2794, 874, 772, 308, 862, 898, - /* 2010 */ 876, 777, 310, 780, 3015, 789, 2897, 2076, 803, 2964, - /* 2020 */ 492, 2893, 841, 315, 2963, 2790, 812, 779, 317, 8, - /* 2030 */ 790, 201, 321, 2936, 319, 788, 1994, 322, 1989, 2808, - /* 2040 */ 876, 787, 817, 324, 323, 816, 3018, 485, 828, 825, - /* 2050 */ 2916, 2994, 326, 166, 329, 2755, 2007, 875, 2913, 2790, - /* 2060 */ 2167, 2165, 1, 214, 335, 230, 176, 839, 2631, 2808, - /* 2070 */ 2630, 2629, 1997, 1999, 876, 489, 393, 394, 844, 177, - /* 2080 */ 845, 849, 852, 71, 2878, 2755, 883, 875, 395, 896, - /* 2090 */ 895, 881, 348, 885, 2747, 2469, 2075, 2077, 2078, 2079, - /* 2100 */ 2080, 2082, 2789, 2808, 886, 2836, 353, 122, 125, 131, - /* 2110 */ 2791, 879, 2793, 2794, 874, 2746, 2742, 862, 898, 2755, - /* 2120 */ 2741, 875, 382, 2870, 995, 2897, 1511, 996, 399, 492, - /* 2130 */ 2893, 900, 2789, 997, 186, 2836, 2733, 2732, 2724, 131, - /* 2140 */ 2791, 879, 2793, 2794, 874, 385, 2790, 862, 898, 2723, - /* 2150 */ 992, 999, 2739, 856, 2738, 2897, 386, 2730, 2729, 492, - /* 2160 */ 2893, 876, 2718, 2717, 450, 1970, 2789, 1946, 2736, 2836, - /* 2170 */ 2735, 2727, 2726, 132, 2791, 879, 2793, 2794, 874, 2715, - /* 2180 */ 2714, 862, 898, 2712, 442, 2711, 2520, 420, 431, 2897, - /* 2190 */ 2808, 421, 2790, 2896, 2893, 62, 401, 451, 445, 506, - /* 2200 */ 505, 1971, 403, 2707, 2706, 2705, 2755, 876, 875, 95, - /* 2210 */ 2700, 532, 765, 1930, 433, 533, 1931, 234, 896, 895, - /* 2220 */ 537, 2698, 2790, 539, 540, 2075, 2077, 2078, 2079, 2080, - /* 2230 */ 2082, 541, 1929, 2697, 2696, 459, 2808, 876, 2694, 2693, - /* 2240 */ 2692, 551, 547, 549, 2691, 553, 1917, 2667, 238, 2666, - /* 2250 */ 240, 1876, 2755, 2789, 875, 96, 2836, 1875, 2644, 2643, - /* 2260 */ 132, 2791, 879, 2793, 2794, 874, 2808, 2642, 862, 898, - /* 2270 */ 565, 2641, 566, 2640, 2584, 570, 2897, 1812, 2581, 573, - /* 2280 */ 858, 2893, 2755, 2580, 875, 2574, 2790, 576, 2571, 577, - /* 2290 */ 2570, 2569, 243, 99, 2568, 2573, 245, 2572, 2567, 877, - /* 2300 */ 2566, 876, 2836, 2564, 2563, 2562, 132, 2791, 879, 2793, - /* 2310 */ 2794, 874, 247, 2790, 862, 898, 593, 2561, 595, 2559, - /* 2320 */ 2558, 2557, 2897, 2556, 2555, 2579, 453, 2893, 876, 2789, - /* 2330 */ 2808, 2554, 2836, 2553, 2552, 2577, 199, 2791, 879, 2793, - /* 2340 */ 2794, 874, 2560, 2551, 862, 898, 2755, 2550, 875, 2549, - /* 2350 */ 2790, 2547, 2546, 2545, 2544, 2543, 2542, 2808, 249, 2541, - /* 2360 */ 105, 2540, 2539, 2538, 2537, 876, 1818, 2534, 2533, 254, - /* 2370 */ 632, 2532, 634, 2755, 2609, 875, 2578, 2576, 2536, 2535, - /* 2380 */ 2531, 2529, 1663, 2359, 447, 2358, 257, 1667, 448, 2357, - /* 2390 */ 258, 2356, 2354, 2789, 2808, 260, 2836, 821, 3016, 2790, - /* 2400 */ 132, 2791, 879, 2793, 2794, 874, 1659, 261, 862, 898, - /* 2410 */ 2755, 2351, 875, 654, 876, 2350, 2897, 658, 652, 2343, - /* 2420 */ 2789, 2894, 656, 2836, 660, 2330, 653, 196, 2791, 879, - /* 2430 */ 2793, 2794, 874, 664, 482, 862, 898, 662, 657, 2304, - /* 2440 */ 91, 661, 263, 2808, 1540, 209, 2303, 92, 2790, 2775, - /* 2450 */ 220, 672, 265, 2665, 2661, 2651, 2639, 2789, 272, 2755, - /* 2460 */ 2836, 875, 274, 876, 430, 2791, 879, 2793, 2794, 874, - /* 2470 */ 2638, 277, 862, 898, 2615, 279, 2790, 2956, 693, 2347, - /* 2480 */ 1589, 2353, 2349, 483, 2608, 2446, 694, 695, 697, 699, - /* 2490 */ 2790, 876, 2808, 2345, 698, 701, 2342, 705, 702, 2325, - /* 2500 */ 2323, 703, 706, 2324, 707, 876, 2789, 2322, 2755, 2836, - /* 2510 */ 875, 1745, 2790, 430, 2791, 879, 2793, 2794, 874, 2300, - /* 2520 */ 2808, 862, 898, 2448, 1744, 2447, 83, 873, 285, 2340, - /* 2530 */ 1649, 1648, 2338, 1647, 2808, 1631, 2755, 1644, 875, 2329, - /* 2540 */ 1642, 1640, 1639, 473, 474, 1638, 1637, 962, 1636, 964, - /* 2550 */ 2755, 1633, 875, 1632, 1630, 2789, 2808, 475, 2836, 2327, - /* 2560 */ 476, 2299, 423, 2791, 879, 2793, 2794, 874, 2298, 744, - /* 2570 */ 862, 898, 2755, 2297, 875, 754, 741, 748, 2296, 750, - /* 2580 */ 2295, 2790, 752, 2789, 2294, 138, 2836, 1911, 1913, 1915, - /* 2590 */ 199, 2791, 879, 2793, 2794, 874, 876, 2789, 862, 898, - /* 2600 */ 2836, 32, 1910, 2664, 415, 2791, 879, 2793, 2794, 874, - /* 2610 */ 66, 307, 862, 898, 2660, 811, 67, 2790, 764, 2789, - /* 2620 */ 2650, 1882, 2836, 1901, 1884, 2808, 429, 2791, 879, 2793, - /* 2630 */ 2794, 874, 876, 1886, 862, 898, 311, 2863, 775, 77, - /* 2640 */ 190, 2755, 776, 875, 1861, 2790, 2637, 2636, 3000, 17, - /* 2650 */ 781, 1860, 3017, 783, 21, 34, 6, 7, 2220, 314, - /* 2660 */ 876, 2808, 791, 2194, 481, 495, 316, 797, 795, 793, - /* 2670 */ 22, 23, 213, 2201, 2188, 225, 2776, 2755, 200, 875, - /* 2680 */ 2158, 36, 212, 2160, 35, 93, 226, 24, 2789, 2808, - /* 2690 */ 2162, 2836, 227, 75, 25, 430, 2791, 879, 2793, 2794, - /* 2700 */ 874, 491, 2240, 862, 898, 2755, 2241, 875, 2235, 2234, - /* 2710 */ 486, 2239, 2238, 487, 331, 2142, 2141, 69, 18, 205, - /* 2720 */ 2635, 2614, 68, 116, 2789, 117, 2790, 2836, 338, 496, - /* 2730 */ 215, 413, 2791, 879, 2793, 2794, 874, 2613, 2196, 862, - /* 2740 */ 898, 876, 344, 118, 842, 79, 2607, 848, 347, 346, - /* 2750 */ 119, 26, 2789, 2094, 2790, 2836, 11, 13, 850, 430, - /* 2760 */ 2791, 879, 2793, 2794, 874, 2093, 349, 862, 898, 876, - /* 2770 */ 2808, 1995, 27, 28, 20, 47, 206, 913, 2104, 216, - /* 2780 */ 2054, 2053, 916, 2030, 919, 922, 2755, 2052, 875, 878, - /* 2790 */ 2790, 884, 120, 48, 16, 2606, 2443, 29, 2808, 2022, - /* 2800 */ 30, 80, 889, 882, 351, 876, 121, 2256, 357, 126, - /* 2810 */ 498, 890, 89, 2847, 2755, 2846, 875, 2064, 897, 78, - /* 2820 */ 899, 2255, 2254, 2790, 1726, 2253, 905, 907, 515, 1723, - /* 2830 */ 909, 910, 1717, 2789, 2808, 1722, 2836, 912, 876, 1719, - /* 2840 */ 430, 2791, 879, 2793, 2794, 874, 915, 1713, 862, 898, - /* 2850 */ 2755, 918, 875, 921, 1711, 127, 381, 128, 1739, 1716, - /* 2860 */ 90, 760, 1735, 1715, 2836, 1587, 2790, 2808, 425, 2791, - /* 2870 */ 879, 2793, 2794, 874, 1627, 1626, 862, 898, 1714, 936, - /* 2880 */ 1625, 876, 1622, 2755, 1619, 875, 1618, 1617, 1657, 1616, - /* 2890 */ 1614, 1612, 1611, 1610, 1656, 950, 952, 2789, 232, 2790, - /* 2900 */ 2836, 1653, 1608, 1605, 407, 2791, 879, 2793, 2794, 874, - /* 2910 */ 2808, 1607, 862, 898, 876, 1606, 1604, 1593, 1603, 1602, - /* 2920 */ 1651, 1599, 1598, 1595, 1594, 1592, 2755, 2348, 875, 972, - /* 2930 */ 2789, 973, 2346, 2836, 974, 976, 978, 405, 2791, 879, - /* 2940 */ 2793, 2794, 874, 2808, 2344, 862, 898, 980, 977, 982, - /* 2950 */ 981, 2341, 984, 986, 2321, 985, 988, 2319, 990, 2755, - /* 2960 */ 1529, 875, 2293, 2790, 1512, 1517, 994, 1519, 387, 998, - /* 2970 */ 2259, 1981, 400, 2789, 1001, 1002, 2836, 2259, 876, 2259, - /* 2980 */ 408, 2791, 879, 2793, 2794, 874, 2790, 2259, 862, 898, - /* 2990 */ 2259, 2259, 2259, 2259, 2259, 2259, 2259, 2259, 2259, 2259, - /* 3000 */ 2259, 876, 2259, 2259, 2259, 2259, 2789, 2808, 2259, 2836, - /* 3010 */ 2259, 2259, 2259, 422, 2791, 879, 2793, 2794, 874, 2259, - /* 3020 */ 2259, 862, 898, 2755, 2259, 875, 2259, 2259, 2259, 2259, - /* 3030 */ 2808, 2259, 2790, 2259, 2259, 2259, 2259, 2259, 2259, 2259, - /* 3040 */ 2259, 2259, 2259, 2259, 2259, 2259, 2755, 876, 875, 2259, - /* 3050 */ 2259, 2259, 2259, 2259, 2790, 2259, 2259, 2259, 2259, 2259, - /* 3060 */ 2259, 2259, 2259, 2259, 2259, 2259, 2259, 2259, 2790, 876, - /* 3070 */ 2789, 2259, 2259, 2836, 2259, 2259, 2808, 409, 2791, 879, - /* 3080 */ 2793, 2794, 874, 876, 2259, 862, 898, 2259, 2259, 2259, - /* 3090 */ 2259, 2259, 2755, 2789, 875, 2259, 2836, 2259, 2808, 2259, - /* 3100 */ 426, 2791, 879, 2793, 2794, 874, 2259, 2259, 862, 898, - /* 3110 */ 2259, 2259, 2808, 2259, 2755, 2259, 875, 2259, 2259, 2259, - /* 3120 */ 2259, 2259, 2259, 2259, 2259, 2259, 2259, 2259, 2755, 2259, - /* 3130 */ 875, 2259, 2259, 2259, 2259, 2259, 2259, 2259, 2259, 2789, - /* 3140 */ 2259, 2259, 2836, 2259, 2790, 2259, 410, 2791, 879, 2793, - /* 3150 */ 2794, 874, 2259, 2259, 862, 898, 2259, 2259, 2259, 876, - /* 3160 */ 2259, 2789, 2259, 2259, 2836, 2259, 2259, 2259, 427, 2791, - /* 3170 */ 879, 2793, 2794, 874, 2259, 2789, 862, 898, 2836, 2790, - /* 3180 */ 2259, 2259, 411, 2791, 879, 2793, 2794, 874, 2808, 2259, - /* 3190 */ 862, 898, 2259, 2259, 876, 2259, 2259, 2259, 2259, 2259, - /* 3200 */ 2259, 2259, 2259, 2259, 2755, 2259, 875, 2259, 2259, 2259, - /* 3210 */ 2259, 2259, 2790, 2259, 2259, 2259, 2259, 2259, 2259, 2259, - /* 3220 */ 2259, 2259, 2259, 2808, 2259, 2259, 2259, 876, 2259, 2259, - /* 3230 */ 2259, 2259, 2259, 2259, 2259, 2259, 2259, 2259, 2259, 2755, - /* 3240 */ 2259, 875, 2259, 2259, 2259, 2259, 2259, 2259, 2259, 2259, - /* 3250 */ 2259, 2789, 2259, 2259, 2836, 2790, 2808, 2259, 428, 2791, - /* 3260 */ 879, 2793, 2794, 874, 2259, 2259, 862, 898, 2259, 2259, - /* 3270 */ 876, 2259, 2755, 2259, 875, 2259, 2259, 2259, 2259, 2259, - /* 3280 */ 2259, 2259, 2259, 2259, 2259, 2259, 2789, 2259, 2790, 2836, - /* 3290 */ 2259, 2259, 2259, 412, 2791, 879, 2793, 2794, 874, 2808, - /* 3300 */ 2259, 862, 898, 876, 2259, 2259, 2259, 2259, 2259, 2259, - /* 3310 */ 2259, 2259, 2259, 2259, 2259, 2755, 2259, 875, 2259, 2789, - /* 3320 */ 2259, 2259, 2836, 2259, 2259, 2259, 406, 2791, 879, 2793, - /* 3330 */ 2794, 874, 2808, 2259, 862, 898, 2259, 2259, 2259, 2259, - /* 3340 */ 2259, 2259, 2259, 2259, 2259, 2259, 2259, 2259, 2755, 2259, - /* 3350 */ 875, 2259, 2790, 2259, 2259, 2259, 2259, 2259, 2259, 2259, - /* 3360 */ 2259, 2259, 2789, 2259, 2259, 2836, 2259, 876, 2259, 414, - /* 3370 */ 2791, 879, 2793, 2794, 874, 2790, 2259, 862, 898, 2259, - /* 3380 */ 2259, 2259, 2259, 2259, 2259, 2259, 2259, 2259, 2259, 2259, - /* 3390 */ 876, 2259, 2259, 2259, 2259, 2789, 2808, 2259, 2836, 2259, - /* 3400 */ 2259, 2259, 417, 2791, 879, 2793, 2794, 874, 2259, 2259, - /* 3410 */ 862, 898, 2755, 2259, 875, 2259, 2259, 2259, 2259, 2808, - /* 3420 */ 2259, 2790, 2259, 2259, 2259, 2259, 2259, 2259, 2259, 2259, - /* 3430 */ 2259, 2259, 2259, 2259, 2259, 2755, 876, 875, 2259, 2259, - /* 3440 */ 2259, 2259, 2259, 2790, 2259, 2259, 2259, 2259, 2259, 2259, - /* 3450 */ 2259, 2259, 2259, 2259, 2259, 2259, 2259, 2790, 876, 2789, - /* 3460 */ 2259, 2259, 2836, 2259, 2259, 2808, 418, 2791, 879, 2793, - /* 3470 */ 2794, 874, 876, 2259, 862, 898, 2259, 2259, 2259, 2259, - /* 3480 */ 2259, 2755, 2789, 875, 2259, 2836, 2259, 2808, 2259, 419, - /* 3490 */ 2791, 879, 2793, 2794, 874, 2259, 2259, 862, 898, 2259, - /* 3500 */ 2259, 2808, 2259, 2755, 2259, 875, 2259, 2259, 2259, 2259, - /* 3510 */ 2259, 2259, 2259, 2259, 2259, 2259, 2259, 2755, 2259, 875, - /* 3520 */ 2259, 2259, 2259, 2259, 2259, 2259, 2259, 2259, 2789, 2259, - /* 3530 */ 2259, 2836, 2259, 2790, 2259, 435, 2791, 879, 2793, 2794, - /* 3540 */ 874, 2259, 2259, 862, 898, 2259, 2259, 2259, 876, 2259, - /* 3550 */ 2789, 2259, 2259, 2836, 2259, 2259, 2259, 436, 2791, 879, - /* 3560 */ 2793, 2794, 874, 2259, 2789, 862, 898, 2836, 2790, 2259, - /* 3570 */ 2259, 2802, 2791, 879, 2793, 2794, 874, 2808, 2259, 862, - /* 3580 */ 898, 2259, 2259, 876, 2259, 2259, 2259, 2259, 2259, 2259, - /* 3590 */ 2259, 2259, 2259, 2755, 2259, 875, 2259, 2259, 2259, 2259, - /* 3600 */ 2259, 2790, 2259, 2259, 2259, 2259, 2259, 2259, 2259, 2259, - /* 3610 */ 2259, 2259, 2808, 2259, 2259, 2259, 876, 2259, 2259, 2259, - /* 3620 */ 2259, 2259, 2259, 2259, 2259, 2259, 2259, 2259, 2755, 2259, - /* 3630 */ 875, 2259, 2259, 2259, 2259, 2259, 2259, 2259, 2259, 2259, - /* 3640 */ 2789, 2259, 2259, 2836, 2790, 2808, 2259, 2801, 2791, 879, - /* 3650 */ 2793, 2794, 874, 2259, 2259, 862, 898, 2259, 2259, 876, - /* 3660 */ 2259, 2755, 2259, 875, 2259, 2259, 2259, 2259, 2259, 2259, - /* 3670 */ 2259, 2259, 2259, 2259, 2259, 2789, 2259, 2790, 2836, 2259, - /* 3680 */ 2259, 2259, 2800, 2791, 879, 2793, 2794, 874, 2808, 2259, - /* 3690 */ 862, 898, 876, 2259, 2259, 2259, 2259, 2259, 2259, 2259, - /* 3700 */ 2259, 2259, 2259, 2259, 2755, 2259, 875, 2259, 2789, 2259, - /* 3710 */ 2259, 2836, 2259, 2259, 2259, 455, 2791, 879, 2793, 2794, - /* 3720 */ 874, 2808, 2259, 862, 898, 2259, 2259, 2259, 2259, 2259, - /* 3730 */ 2259, 2259, 2259, 2259, 2259, 2259, 2259, 2755, 2259, 875, - /* 3740 */ 2259, 2790, 2259, 2259, 2259, 2259, 2259, 2259, 2259, 2259, - /* 3750 */ 2259, 2789, 2259, 2259, 2836, 2259, 876, 2259, 456, 2791, - /* 3760 */ 879, 2793, 2794, 874, 2790, 2259, 862, 898, 2259, 2259, - /* 3770 */ 2259, 2259, 2259, 2259, 2259, 2259, 2259, 2259, 2259, 876, - /* 3780 */ 2259, 2259, 2259, 2259, 2789, 2808, 2259, 2836, 2259, 2259, - /* 3790 */ 2259, 452, 2791, 879, 2793, 2794, 874, 2259, 2259, 862, - /* 3800 */ 898, 2755, 2259, 875, 2259, 2259, 2259, 2259, 2808, 2259, - /* 3810 */ 2790, 2259, 2259, 2259, 2259, 2259, 2259, 2259, 2259, 2259, - /* 3820 */ 2259, 2259, 2259, 2259, 2755, 876, 875, 2259, 2259, 2259, - /* 3830 */ 2259, 2259, 2259, 2259, 2259, 2259, 2259, 2259, 2259, 2259, - /* 3840 */ 2259, 2259, 2259, 2259, 2259, 2259, 2259, 2259, 2789, 2259, - /* 3850 */ 2259, 2836, 2259, 2259, 2808, 457, 2791, 879, 2793, 2794, - /* 3860 */ 874, 2259, 2259, 862, 898, 2259, 2259, 2259, 2259, 2259, - /* 3870 */ 2755, 877, 875, 2259, 2836, 2259, 2259, 2259, 425, 2791, - /* 3880 */ 879, 2793, 2794, 874, 2259, 2259, 862, 898, 2259, 2259, - /* 3890 */ 2259, 2259, 2259, 2259, 2259, 2259, 2259, 2259, 2259, 2259, - /* 3900 */ 2259, 2259, 2259, 2259, 2259, 2259, 2259, 2259, 2259, 2259, - /* 3910 */ 2259, 2259, 2259, 2259, 2259, 2259, 2259, 2789, 2259, 2259, - /* 3920 */ 2836, 2259, 2259, 2259, 424, 2791, 879, 2793, 2794, 874, - /* 3930 */ 2259, 2259, 862, 898, + /* 0 */ 821, 670, 3003, 678, 671, 2311, 671, 2311, 2998, 2475, + /* 10 */ 2998, 956, 58, 56, 2430, 57, 55, 54, 53, 52, + /* 20 */ 499, 2264, 1982, 2626, 2007, 464, 820, 229, 771, 3002, + /* 30 */ 218, 2999, 822, 2999, 3001, 503, 1980, 492, 2084, 2355, + /* 40 */ 218, 2793, 149, 2623, 882, 148, 147, 146, 145, 144, + /* 50 */ 143, 142, 141, 140, 582, 2595, 836, 2599, 51, 50, + /* 60 */ 833, 168, 57, 55, 54, 53, 52, 2599, 2079, 149, + /* 70 */ 767, 865, 148, 147, 146, 145, 144, 143, 142, 141, + /* 80 */ 140, 895, 1988, 51, 50, 2811, 757, 57, 55, 54, + /* 90 */ 53, 52, 51, 50, 2698, 2011, 57, 55, 54, 53, + /* 100 */ 52, 2758, 751, 877, 755, 753, 299, 298, 245, 690, + /* 110 */ 680, 2665, 1002, 895, 693, 59, 973, 972, 971, 970, + /* 120 */ 528, 249, 969, 968, 173, 963, 962, 961, 960, 959, + /* 130 */ 958, 957, 172, 951, 950, 949, 527, 526, 946, 945, + /* 140 */ 944, 209, 208, 943, 523, 942, 941, 940, 2792, 43, + /* 150 */ 351, 2840, 2087, 2088, 810, 132, 2794, 881, 2796, 2797, + /* 160 */ 876, 2462, 2751, 864, 900, 516, 2220, 105, 2613, 211, + /* 170 */ 691, 2900, 104, 51, 50, 494, 2896, 57, 55, 54, + /* 180 */ 53, 52, 2793, 2811, 205, 2908, 832, 770, 160, 831, + /* 190 */ 3003, 2043, 2053, 342, 343, 230, 2998, 878, 341, 768, + /* 200 */ 2064, 2086, 2089, 2947, 2263, 51, 50, 2010, 61, 57, + /* 210 */ 55, 54, 53, 52, 820, 229, 1983, 2173, 1981, 2999, + /* 220 */ 822, 2180, 9, 863, 505, 505, 2811, 2007, 158, 157, + /* 230 */ 156, 155, 154, 153, 152, 151, 150, 900, 900, 103, + /* 240 */ 456, 895, 2758, 2261, 877, 937, 185, 184, 934, 933, + /* 250 */ 932, 182, 1986, 1987, 2040, 809, 2042, 2045, 2046, 2047, + /* 260 */ 2048, 2049, 2050, 2051, 2052, 873, 866, 2007, 225, 898, + /* 270 */ 897, 2071, 2072, 2073, 2074, 2075, 2078, 2080, 2081, 2082, + /* 280 */ 2083, 2085, 2, 58, 56, 2176, 686, 2793, 675, 2792, + /* 290 */ 2114, 499, 2840, 1982, 672, 689, 132, 2794, 881, 2796, + /* 300 */ 2797, 876, 875, 2244, 864, 900, 744, 1980, 170, 2084, + /* 310 */ 179, 2871, 2900, 2192, 833, 168, 494, 2896, 51, 50, + /* 320 */ 895, 758, 57, 55, 54, 53, 52, 3003, 573, 2012, + /* 330 */ 2626, 2811, 531, 2151, 73, 2998, 61, 530, 220, 2079, + /* 340 */ 300, 2151, 865, 33, 501, 2007, 19, 2758, 667, 877, + /* 350 */ 2623, 882, 44, 1988, 3002, 2115, 747, 665, 2999, 3000, + /* 360 */ 661, 657, 73, 741, 739, 2752, 58, 56, 73, 265, + /* 370 */ 297, 896, 2471, 673, 499, 2319, 1982, 2209, 635, 633, + /* 380 */ 333, 436, 771, 1002, 243, 77, 15, 2568, 787, 1535, + /* 390 */ 1980, 159, 2084, 203, 2792, 328, 2998, 2840, 2044, 716, + /* 400 */ 335, 431, 2794, 881, 2796, 2797, 876, 874, 1542, 864, + /* 410 */ 900, 856, 2865, 2358, 3004, 229, 83, 125, 12, 2999, + /* 420 */ 822, 82, 2079, 2087, 2088, 865, 2221, 520, 505, 19, + /* 430 */ 2524, 2526, 2120, 1537, 1540, 1541, 1988, 835, 198, 2908, + /* 440 */ 2909, 900, 166, 2913, 803, 802, 2207, 2208, 2210, 2211, + /* 450 */ 2212, 42, 496, 2109, 2110, 2111, 2112, 2113, 2117, 2118, + /* 460 */ 2119, 2041, 2043, 2053, 773, 2665, 1002, 2008, 564, 15, + /* 470 */ 563, 62, 2086, 2089, 2920, 2148, 2149, 2150, 2920, 2920, + /* 480 */ 2920, 2920, 2920, 2148, 2149, 2150, 2768, 1983, 1988, 1981, + /* 490 */ 737, 736, 735, 195, 863, 193, 73, 727, 165, 731, + /* 500 */ 811, 2404, 562, 730, 518, 2473, 2087, 2088, 729, 734, + /* 510 */ 474, 473, 906, 1653, 728, 2772, 1828, 1829, 472, 724, + /* 520 */ 723, 722, 2172, 1986, 1987, 2040, 335, 2042, 2045, 2046, + /* 530 */ 2047, 2048, 2049, 2050, 2051, 2052, 873, 866, 2455, 443, + /* 540 */ 898, 897, 2071, 2072, 183, 2043, 2053, 2078, 2080, 2081, + /* 550 */ 2082, 2083, 2085, 2, 335, 2086, 2089, 687, 610, 1655, + /* 560 */ 335, 471, 470, 609, 1715, 2774, 2776, 495, 303, 2151, + /* 570 */ 1983, 608, 1981, 222, 833, 168, 2010, 863, 900, 1706, + /* 580 */ 929, 928, 927, 1710, 926, 1712, 1713, 925, 922, 2518, + /* 590 */ 1721, 919, 1723, 1724, 916, 913, 910, 51, 50, 1754, + /* 600 */ 1755, 57, 55, 54, 53, 52, 1986, 1987, 2040, 930, + /* 610 */ 2042, 2045, 2046, 2047, 2048, 2049, 2050, 2051, 2052, 873, + /* 620 */ 866, 688, 2619, 898, 897, 2071, 2072, 65, 2915, 2626, + /* 630 */ 2078, 2080, 2081, 2082, 2083, 2085, 2, 12, 58, 56, + /* 640 */ 511, 469, 468, 392, 718, 2793, 499, 302, 1982, 2624, + /* 650 */ 882, 301, 806, 1909, 1910, 1715, 2912, 833, 168, 196, + /* 660 */ 878, 2275, 1980, 335, 2084, 720, 905, 904, 903, 719, + /* 670 */ 1706, 929, 928, 927, 1710, 926, 1712, 1713, 872, 871, + /* 680 */ 2793, 1721, 870, 1723, 1724, 869, 913, 910, 335, 2811, + /* 690 */ 1562, 2286, 1561, 2008, 2079, 878, 2551, 865, 138, 2908, + /* 700 */ 2909, 19, 166, 2913, 2674, 2758, 164, 877, 1988, 218, + /* 710 */ 2920, 2148, 2149, 2150, 2920, 2920, 2920, 2920, 2920, 896, + /* 720 */ 2471, 58, 56, 2090, 2811, 114, 550, 2768, 1563, 499, + /* 730 */ 451, 1982, 2285, 479, 2040, 759, 2600, 2095, 1002, 522, + /* 740 */ 2758, 15, 877, 2007, 135, 1980, 1562, 2084, 1561, 333, + /* 750 */ 2011, 2758, 2792, 518, 2473, 2840, 2772, 588, 2595, 408, + /* 760 */ 2794, 881, 2796, 2797, 876, 725, 31, 864, 900, 812, + /* 770 */ 807, 800, 796, 896, 2471, 619, 2595, 2079, 2087, 2088, + /* 780 */ 865, 199, 2908, 2909, 1563, 166, 2913, 2792, 1646, 2711, + /* 790 */ 2840, 1988, 2758, 159, 132, 2794, 881, 2796, 2797, 876, + /* 800 */ 502, 721, 864, 900, 481, 2673, 2774, 2777, 2875, 192, + /* 810 */ 2900, 247, 896, 2471, 494, 2896, 95, 2043, 2053, 900, + /* 820 */ 2476, 1002, 1951, 939, 59, 1855, 1856, 2086, 2089, 252, + /* 830 */ 51, 50, 234, 171, 57, 55, 54, 53, 52, 737, + /* 840 */ 736, 735, 1983, 2463, 1981, 787, 727, 165, 731, 863, + /* 850 */ 2445, 612, 730, 2998, 510, 509, 47, 729, 734, 474, + /* 860 */ 473, 2087, 2088, 728, 554, 2712, 611, 472, 724, 723, + /* 870 */ 722, 3004, 229, 2011, 1854, 1857, 2999, 822, 1986, 1987, + /* 880 */ 2040, 111, 2042, 2045, 2046, 2047, 2048, 2049, 2050, 2051, + /* 890 */ 2052, 873, 866, 556, 552, 898, 897, 2071, 2072, 467, + /* 900 */ 2043, 2053, 2078, 2080, 2081, 2082, 2083, 2085, 2, 2466, + /* 910 */ 2086, 2089, 51, 50, 2044, 868, 57, 55, 54, 53, + /* 920 */ 52, 787, 392, 224, 939, 1983, 867, 1981, 629, 2998, + /* 930 */ 896, 2471, 863, 267, 525, 524, 2531, 673, 514, 2319, + /* 940 */ 521, 566, 1665, 1982, 462, 2531, 565, 3004, 229, 192, + /* 950 */ 66, 46, 2999, 822, 2342, 2529, 1664, 1980, 1989, 2284, + /* 960 */ 2476, 1986, 1987, 2040, 840, 2042, 2045, 2046, 2047, 2048, + /* 970 */ 2049, 2050, 2051, 2052, 873, 866, 738, 2041, 898, 897, + /* 980 */ 2071, 2072, 480, 2673, 2012, 2078, 2080, 2081, 2082, 2083, + /* 990 */ 2085, 2, 58, 56, 2201, 2793, 254, 787, 2531, 12, + /* 1000 */ 499, 10, 1982, 1988, 2283, 2998, 490, 2116, 2202, 115, + /* 1010 */ 836, 2153, 2154, 2155, 2156, 2157, 1980, 2529, 2084, 2758, + /* 1020 */ 2059, 628, 253, 3004, 229, 896, 2471, 37, 2999, 822, + /* 1030 */ 2251, 2282, 640, 1002, 821, 626, 896, 2471, 314, 2811, + /* 1040 */ 2525, 2526, 2998, 1883, 2793, 692, 232, 2679, 2079, 896, + /* 1050 */ 2471, 865, 896, 2471, 2200, 2758, 570, 877, 2915, 878, + /* 1060 */ 820, 229, 1988, 2531, 2758, 2999, 822, 896, 2471, 571, + /* 1070 */ 896, 2471, 590, 896, 2471, 58, 56, 896, 2471, 385, + /* 1080 */ 896, 2471, 848, 499, 2121, 1982, 2911, 604, 2811, 116, + /* 1090 */ 605, 2758, 1002, 606, 720, 59, 2915, 2468, 719, 1980, + /* 1100 */ 305, 2084, 2792, 40, 2758, 2840, 877, 2012, 304, 132, + /* 1110 */ 2794, 881, 2796, 2797, 876, 896, 2471, 864, 900, 2531, + /* 1120 */ 1950, 896, 2471, 211, 2910, 2900, 2281, 504, 953, 494, + /* 1130 */ 2896, 2079, 2087, 2088, 865, 313, 39, 1983, 2529, 1981, + /* 1140 */ 1992, 839, 51, 50, 192, 1988, 57, 55, 54, 53, + /* 1150 */ 52, 2792, 513, 512, 2840, 2477, 2250, 2948, 197, 2794, + /* 1160 */ 881, 2796, 2797, 876, 1565, 1566, 864, 900, 54, 53, + /* 1170 */ 52, 2043, 2053, 1986, 1987, 1002, 2460, 2007, 59, 896, + /* 1180 */ 2471, 2086, 2089, 51, 50, 2531, 2758, 57, 55, 54, + /* 1190 */ 53, 52, 124, 519, 712, 711, 1983, 955, 1981, 346, + /* 1200 */ 714, 713, 2458, 863, 2529, 51, 50, 788, 2958, 57, + /* 1210 */ 55, 54, 53, 52, 41, 2087, 2088, 2280, 2279, 2464, + /* 1220 */ 51, 50, 14, 13, 57, 55, 54, 53, 52, 733, + /* 1230 */ 732, 399, 1986, 1987, 2040, 2508, 2042, 2045, 2046, 2047, + /* 1240 */ 2048, 2049, 2050, 2051, 2052, 873, 866, 111, 2278, 898, + /* 1250 */ 897, 2071, 2072, 175, 2043, 2053, 2078, 2080, 2081, 2082, + /* 1260 */ 2083, 2085, 2, 786, 2086, 2089, 51, 50, 896, 2471, + /* 1270 */ 57, 55, 54, 53, 52, 2467, 285, 2758, 2758, 1983, + /* 1280 */ 2447, 1981, 857, 859, 2872, 2872, 863, 825, 853, 51, + /* 1290 */ 50, 2060, 204, 57, 55, 54, 53, 52, 896, 2471, + /* 1300 */ 391, 710, 706, 702, 698, 38, 284, 2444, 2758, 169, + /* 1310 */ 896, 2471, 2871, 2062, 2011, 1986, 1987, 2040, 353, 2042, + /* 1320 */ 2045, 2046, 2047, 2048, 2049, 2050, 2051, 2052, 873, 866, + /* 1330 */ 889, 2007, 898, 897, 2071, 2072, 1899, 2128, 2277, 2078, + /* 1340 */ 2080, 2081, 2082, 2083, 2085, 2, 58, 56, 2044, 2274, + /* 1350 */ 1669, 440, 112, 2005, 499, 769, 1982, 282, 3002, 1542, + /* 1360 */ 617, 51, 50, 466, 1668, 57, 55, 54, 53, 52, + /* 1370 */ 1980, 637, 2084, 1005, 51, 50, 896, 2471, 57, 55, + /* 1380 */ 54, 53, 52, 896, 2471, 1540, 1541, 596, 545, 639, + /* 1390 */ 896, 2471, 389, 1544, 441, 598, 890, 815, 2758, 2006, + /* 1400 */ 2273, 2272, 2079, 894, 71, 865, 576, 993, 219, 2758, + /* 1410 */ 381, 2041, 236, 45, 2271, 784, 1988, 989, 985, 981, + /* 1420 */ 977, 2270, 384, 2269, 270, 937, 185, 184, 934, 933, + /* 1430 */ 932, 182, 193, 281, 967, 965, 931, 272, 279, 2522, + /* 1440 */ 642, 310, 2474, 277, 684, 828, 1002, 465, 2531, 15, + /* 1450 */ 2291, 995, 937, 185, 184, 934, 933, 932, 182, 584, + /* 1460 */ 2758, 2758, 269, 2268, 2267, 2702, 2162, 2530, 131, 2065, + /* 1470 */ 2266, 51, 50, 357, 2758, 57, 55, 54, 53, 52, + /* 1480 */ 2533, 2758, 935, 2758, 89, 2522, 2087, 2088, 2578, 3, + /* 1490 */ 2606, 2585, 175, 625, 624, 623, 622, 621, 616, 615, + /* 1500 */ 614, 613, 448, 64, 849, 603, 602, 601, 600, 599, + /* 1510 */ 593, 592, 591, 2448, 586, 585, 463, 1648, 2340, 538, + /* 1520 */ 577, 1816, 1817, 2758, 2758, 2043, 2053, 1835, 936, 161, + /* 1530 */ 2758, 2522, 580, 2331, 290, 2086, 2089, 288, 102, 292, + /* 1540 */ 740, 294, 291, 101, 293, 726, 174, 2329, 2012, 355, + /* 1550 */ 1983, 855, 1981, 296, 338, 742, 295, 863, 761, 337, + /* 1560 */ 760, 794, 1626, 1649, 398, 2041, 51, 50, 1644, 745, + /* 1570 */ 57, 55, 54, 53, 52, 1907, 2779, 60, 307, 2061, + /* 1580 */ 2253, 2254, 2475, 60, 212, 826, 1986, 1987, 2040, 223, + /* 1590 */ 2042, 2045, 2046, 2047, 2048, 2049, 2050, 2051, 2052, 873, + /* 1600 */ 866, 14, 13, 898, 897, 2071, 2072, 183, 1627, 340, + /* 1610 */ 2078, 2080, 2081, 2082, 2083, 2085, 2, 194, 88, 51, + /* 1620 */ 50, 2793, 405, 57, 55, 54, 53, 52, 2405, 764, + /* 1630 */ 60, 360, 359, 2276, 862, 638, 878, 75, 2321, 60, + /* 1640 */ 2781, 403, 87, 60, 2196, 86, 60, 2793, 1991, 88, + /* 1650 */ 180, 161, 362, 361, 2961, 203, 442, 364, 363, 772, + /* 1660 */ 2206, 130, 878, 127, 2955, 2811, 2205, 319, 263, 652, + /* 1670 */ 650, 647, 645, 329, 2058, 183, 947, 366, 365, 368, + /* 1680 */ 367, 2758, 1990, 877, 824, 787, 804, 370, 369, 321, + /* 1690 */ 838, 2811, 344, 2998, 372, 371, 837, 374, 373, 1618, + /* 1700 */ 834, 845, 85, 376, 375, 378, 377, 2758, 163, 877, + /* 1710 */ 2812, 3004, 229, 2063, 73, 787, 2999, 822, 380, 379, + /* 1720 */ 2122, 2312, 2066, 2998, 2397, 2396, 2054, 908, 2792, 1852, + /* 1730 */ 181, 2840, 1842, 356, 893, 132, 2794, 881, 2796, 2797, + /* 1740 */ 876, 3004, 229, 864, 900, 829, 2999, 822, 183, 3018, + /* 1750 */ 948, 2900, 787, 74, 2792, 494, 2896, 2840, 1697, 2793, + /* 1760 */ 2998, 132, 2794, 881, 2796, 2797, 876, 801, 162, 864, + /* 1770 */ 900, 180, 1599, 1616, 878, 3018, 2968, 2900, 3004, 229, + /* 1780 */ 486, 494, 2896, 2999, 822, 397, 2951, 808, 482, 842, + /* 1790 */ 2604, 529, 547, 2318, 2519, 780, 326, 2952, 2962, 816, + /* 1800 */ 817, 331, 2605, 2811, 136, 2793, 2323, 99, 98, 569, + /* 1810 */ 1728, 2106, 242, 1736, 334, 2431, 5, 537, 1600, 2758, + /* 1820 */ 878, 877, 798, 532, 460, 561, 559, 2005, 546, 2015, + /* 1830 */ 1994, 1743, 525, 524, 557, 2793, 558, 237, 439, 1876, + /* 1840 */ 238, 548, 1996, 560, 544, 540, 536, 533, 562, 2811, + /* 1850 */ 878, 1741, 2991, 240, 186, 390, 1989, 574, 2084, 2006, + /* 1860 */ 581, 991, 251, 583, 1993, 2758, 2792, 877, 589, 2840, + /* 1870 */ 587, 631, 594, 132, 2794, 881, 2796, 2797, 876, 2811, + /* 1880 */ 607, 864, 900, 643, 620, 618, 2793, 3018, 2079, 2900, + /* 1890 */ 627, 2597, 630, 494, 2896, 2758, 641, 877, 632, 644, + /* 1900 */ 256, 878, 1988, 2932, 257, 646, 335, 648, 649, 260, + /* 1910 */ 651, 653, 2792, 2013, 668, 2840, 4, 676, 679, 132, + /* 1920 */ 2794, 881, 2796, 2797, 876, 268, 669, 864, 900, 677, + /* 1930 */ 2811, 107, 861, 3018, 2008, 2900, 2014, 681, 682, 494, + /* 1940 */ 2896, 2016, 2792, 685, 683, 2840, 2758, 271, 877, 132, + /* 1950 */ 2794, 881, 2796, 2797, 876, 2017, 274, 864, 900, 276, + /* 1960 */ 2620, 446, 445, 3018, 108, 2900, 2018, 109, 110, 494, + /* 1970 */ 2896, 506, 2614, 694, 283, 715, 763, 748, 749, 137, + /* 1980 */ 765, 717, 113, 434, 2461, 515, 287, 2084, 2688, 2685, + /* 1990 */ 2684, 393, 2457, 2792, 289, 2793, 2840, 188, 134, 2459, + /* 2000 */ 132, 2794, 881, 2796, 2797, 876, 2009, 2454, 864, 900, + /* 2010 */ 878, 189, 176, 190, 3018, 775, 2900, 2079, 306, 774, + /* 2020 */ 494, 2896, 2666, 779, 311, 2793, 776, 805, 782, 2967, + /* 2030 */ 791, 843, 316, 8, 2939, 814, 1997, 309, 1992, 2811, + /* 2040 */ 878, 792, 320, 781, 790, 2966, 318, 202, 819, 322, + /* 2050 */ 818, 789, 2919, 487, 323, 2758, 327, 877, 324, 2793, + /* 2060 */ 830, 827, 167, 3021, 325, 2010, 2170, 2168, 231, 2811, + /* 2070 */ 2916, 330, 2000, 2002, 878, 336, 1, 2997, 215, 177, + /* 2080 */ 841, 2634, 394, 2633, 2632, 2758, 491, 877, 395, 898, + /* 2090 */ 897, 846, 847, 178, 72, 851, 2078, 2080, 2081, 2082, + /* 2100 */ 2083, 2085, 2792, 2811, 854, 2840, 885, 349, 883, 132, + /* 2110 */ 2794, 881, 2796, 2797, 876, 2881, 887, 864, 900, 2758, + /* 2120 */ 354, 877, 888, 2873, 396, 2900, 2472, 2750, 123, 494, + /* 2130 */ 2896, 2749, 2792, 126, 2745, 2840, 2744, 2736, 2735, 132, + /* 2140 */ 2794, 881, 2796, 2797, 876, 1514, 2793, 864, 900, 2727, + /* 2150 */ 2726, 2742, 2741, 858, 400, 2900, 2733, 997, 2732, 494, + /* 2160 */ 2896, 878, 902, 998, 2721, 1973, 2792, 1949, 2720, 2840, + /* 2170 */ 2739, 383, 2738, 133, 2794, 881, 2796, 2797, 876, 187, + /* 2180 */ 2730, 864, 900, 999, 386, 994, 767, 387, 402, 2900, + /* 2190 */ 2811, 2729, 2793, 2899, 2896, 2718, 2717, 2715, 2714, 508, + /* 2200 */ 507, 1974, 2523, 1001, 63, 444, 2758, 878, 877, 422, + /* 2210 */ 452, 447, 433, 453, 435, 404, 2710, 423, 898, 897, + /* 2220 */ 2709, 2708, 2793, 96, 2703, 2078, 2080, 2081, 2082, 2083, + /* 2230 */ 2085, 534, 535, 1933, 1934, 235, 2811, 878, 539, 2701, + /* 2240 */ 541, 542, 543, 1932, 2700, 2699, 461, 2697, 549, 2696, + /* 2250 */ 551, 2695, 2758, 2792, 877, 553, 2840, 1920, 555, 2670, + /* 2260 */ 133, 2794, 881, 2796, 2797, 876, 2811, 2694, 864, 900, + /* 2270 */ 239, 2669, 241, 1879, 97, 1878, 2900, 2647, 2646, 2645, + /* 2280 */ 860, 2896, 2758, 567, 877, 568, 2793, 2644, 2643, 2587, + /* 2290 */ 2584, 572, 1815, 2583, 575, 2577, 578, 579, 2574, 879, + /* 2300 */ 2573, 878, 2840, 244, 2572, 2571, 133, 2794, 881, 2796, + /* 2310 */ 2797, 876, 100, 2793, 864, 900, 2576, 246, 2575, 2570, + /* 2320 */ 2569, 2567, 2900, 2566, 2565, 248, 455, 2896, 878, 2792, + /* 2330 */ 2811, 2564, 2840, 595, 597, 2562, 200, 2794, 881, 2796, + /* 2340 */ 2797, 876, 2561, 2560, 864, 900, 2758, 2559, 877, 2558, + /* 2350 */ 2793, 2582, 2557, 2556, 2555, 2580, 2563, 2811, 2554, 2553, + /* 2360 */ 2552, 2550, 2549, 2548, 2547, 878, 250, 2544, 2543, 106, + /* 2370 */ 2542, 2541, 2540, 2758, 2546, 877, 2545, 2612, 2581, 2579, + /* 2380 */ 2539, 2538, 2537, 255, 2536, 634, 2535, 1821, 636, 2534, + /* 2390 */ 2532, 1666, 449, 2792, 2811, 1670, 2840, 823, 3019, 2793, + /* 2400 */ 133, 2794, 881, 2796, 2797, 876, 2362, 258, 864, 900, + /* 2410 */ 2758, 2361, 877, 259, 878, 2360, 2900, 2359, 261, 1662, + /* 2420 */ 2792, 2897, 2357, 2840, 2354, 262, 2353, 197, 2794, 881, + /* 2430 */ 2796, 2797, 876, 654, 484, 864, 900, 450, 656, 658, + /* 2440 */ 660, 655, 659, 2811, 2346, 662, 663, 2333, 2793, 664, + /* 2450 */ 666, 2307, 92, 210, 264, 2778, 1543, 2792, 2306, 2758, + /* 2460 */ 2840, 877, 93, 878, 432, 2794, 881, 2796, 2797, 876, + /* 2470 */ 221, 2668, 864, 900, 674, 266, 2793, 2959, 273, 275, + /* 2480 */ 2641, 2618, 278, 485, 2664, 2654, 2642, 280, 2611, 2449, + /* 2490 */ 2793, 878, 2811, 2356, 2352, 695, 697, 1592, 2350, 696, + /* 2500 */ 699, 701, 700, 2348, 703, 878, 2792, 704, 2758, 2840, + /* 2510 */ 877, 705, 2793, 432, 2794, 881, 2796, 2797, 876, 2345, + /* 2520 */ 2811, 864, 900, 707, 708, 2328, 709, 875, 2326, 2327, + /* 2530 */ 2325, 2303, 2451, 1748, 2811, 84, 2758, 1747, 877, 286, + /* 2540 */ 2450, 1651, 1652, 1650, 1647, 1645, 1643, 964, 1642, 1641, + /* 2550 */ 2758, 2343, 877, 1640, 1639, 2792, 2811, 966, 2840, 1636, + /* 2560 */ 1634, 475, 425, 2794, 881, 2796, 2797, 876, 2341, 1635, + /* 2570 */ 864, 900, 2758, 1633, 877, 476, 2332, 477, 743, 2330, + /* 2580 */ 478, 2793, 746, 2792, 2302, 2301, 2840, 2300, 750, 2299, + /* 2590 */ 200, 2794, 881, 2796, 2797, 876, 878, 2792, 864, 900, + /* 2600 */ 2840, 752, 2298, 754, 413, 2794, 881, 2796, 2797, 876, + /* 2610 */ 2297, 139, 864, 900, 756, 813, 1914, 2793, 1916, 2792, + /* 2620 */ 1918, 32, 2840, 2667, 1913, 2811, 431, 2794, 881, 2796, + /* 2630 */ 2797, 876, 878, 67, 864, 900, 78, 2866, 2663, 68, + /* 2640 */ 766, 2758, 1904, 877, 308, 2793, 1887, 1885, 1889, 2653, + /* 2650 */ 777, 191, 3020, 778, 312, 2640, 2639, 783, 1864, 1863, + /* 2660 */ 878, 2811, 3003, 21, 785, 497, 2223, 793, 483, 34, + /* 2670 */ 6, 315, 7, 2197, 795, 797, 317, 2758, 22, 877, + /* 2680 */ 799, 23, 2204, 214, 226, 201, 213, 17, 2792, 2811, + /* 2690 */ 2191, 2840, 35, 2779, 36, 432, 2794, 881, 2796, 2797, + /* 2700 */ 876, 493, 2161, 864, 900, 2758, 227, 877, 2163, 94, + /* 2710 */ 24, 228, 2165, 2243, 76, 2244, 25, 2238, 2237, 2145, + /* 2720 */ 488, 2242, 2241, 489, 2792, 2144, 2793, 2840, 332, 498, + /* 2730 */ 70, 417, 2794, 881, 2796, 2797, 876, 69, 2638, 864, + /* 2740 */ 900, 878, 206, 2617, 117, 18, 118, 2616, 119, 347, + /* 2750 */ 339, 2199, 2792, 2610, 2793, 2840, 216, 844, 345, 432, + /* 2760 */ 2794, 881, 2796, 2797, 876, 120, 80, 864, 900, 878, + /* 2770 */ 2811, 2097, 26, 850, 852, 348, 350, 2096, 13, 11, + /* 2780 */ 1998, 2057, 27, 28, 20, 48, 2758, 2107, 877, 915, + /* 2790 */ 2793, 2056, 207, 918, 921, 217, 2033, 880, 2811, 2609, + /* 2800 */ 886, 121, 2446, 924, 49, 878, 2055, 16, 29, 30, + /* 2810 */ 500, 2025, 884, 81, 2758, 352, 877, 891, 122, 892, + /* 2820 */ 358, 90, 127, 2793, 2850, 2849, 2259, 2258, 2068, 899, + /* 2830 */ 79, 2257, 1729, 2792, 2811, 2256, 2840, 907, 878, 901, + /* 2840 */ 432, 2794, 881, 2796, 2797, 876, 517, 909, 864, 900, + /* 2850 */ 2758, 1726, 877, 911, 914, 912, 917, 920, 1725, 923, + /* 2860 */ 1720, 762, 1722, 1719, 2840, 1716, 2793, 2811, 427, 2794, + /* 2870 */ 881, 2796, 2797, 876, 1718, 1714, 864, 900, 128, 382, + /* 2880 */ 129, 878, 1717, 2758, 1742, 877, 91, 1738, 1590, 938, + /* 2890 */ 1630, 1629, 1628, 1625, 1622, 1621, 1620, 2792, 1619, 2793, + /* 2900 */ 2840, 1660, 1617, 952, 409, 2794, 881, 2796, 2797, 876, + /* 2910 */ 2811, 1615, 864, 900, 878, 1614, 1613, 1659, 233, 1611, + /* 2920 */ 954, 1608, 1610, 1609, 1607, 1606, 2758, 1605, 877, 1656, + /* 2930 */ 2792, 1654, 1602, 2840, 1601, 1596, 2351, 406, 2794, 881, + /* 2940 */ 2796, 2797, 876, 2811, 1598, 864, 900, 1597, 1595, 976, + /* 2950 */ 974, 975, 2349, 978, 980, 979, 2347, 982, 983, 2758, + /* 2960 */ 984, 877, 2344, 2793, 986, 2324, 988, 990, 2322, 987, + /* 2970 */ 992, 1532, 2296, 2792, 996, 1515, 2840, 388, 878, 1520, + /* 2980 */ 410, 2794, 881, 2796, 2797, 876, 2793, 1522, 864, 900, + /* 2990 */ 1000, 2262, 1984, 401, 1003, 1004, 2262, 2262, 2262, 2262, + /* 3000 */ 2262, 878, 2262, 2262, 2262, 2262, 2792, 2811, 2262, 2840, + /* 3010 */ 2262, 2262, 2262, 424, 2794, 881, 2796, 2797, 876, 2262, + /* 3020 */ 2262, 864, 900, 2758, 2262, 877, 2262, 2262, 2262, 2262, + /* 3030 */ 2811, 2262, 2793, 2262, 2262, 2262, 2262, 2262, 2262, 2262, + /* 3040 */ 2262, 2262, 2262, 2262, 2262, 2262, 2758, 878, 877, 2262, + /* 3050 */ 2262, 2262, 2262, 2262, 2793, 2262, 2262, 2262, 2262, 2262, + /* 3060 */ 2262, 2262, 2262, 2262, 2262, 2262, 2262, 2262, 2793, 878, + /* 3070 */ 2792, 2262, 2262, 2840, 2262, 2262, 2811, 411, 2794, 881, + /* 3080 */ 2796, 2797, 876, 878, 2262, 864, 900, 2262, 2262, 2262, + /* 3090 */ 2262, 2262, 2758, 2792, 877, 2262, 2840, 2262, 2811, 2262, + /* 3100 */ 412, 2794, 881, 2796, 2797, 876, 2262, 2262, 864, 900, + /* 3110 */ 2262, 2262, 2811, 2262, 2758, 2262, 877, 2262, 2262, 2262, + /* 3120 */ 2262, 2262, 2262, 2262, 2262, 2262, 2262, 2262, 2758, 2262, + /* 3130 */ 877, 2262, 2262, 2262, 2262, 2262, 2262, 2262, 2262, 2792, + /* 3140 */ 2262, 2262, 2840, 2262, 2793, 2262, 428, 2794, 881, 2796, + /* 3150 */ 2797, 876, 2262, 2262, 864, 900, 2262, 2262, 2262, 878, + /* 3160 */ 2262, 2792, 2262, 2262, 2840, 2262, 2262, 2262, 414, 2794, + /* 3170 */ 881, 2796, 2797, 876, 2262, 2792, 864, 900, 2840, 2793, + /* 3180 */ 2262, 2262, 429, 2794, 881, 2796, 2797, 876, 2811, 2262, + /* 3190 */ 864, 900, 2262, 2262, 878, 2262, 2262, 2262, 2262, 2262, + /* 3200 */ 2262, 2262, 2262, 2262, 2758, 2262, 877, 2262, 2262, 2262, + /* 3210 */ 2262, 2262, 2793, 2262, 2262, 2262, 2262, 2262, 2262, 2262, + /* 3220 */ 2262, 2262, 2262, 2811, 2262, 2262, 2262, 878, 2262, 2262, + /* 3230 */ 2262, 2262, 2262, 2262, 2262, 2262, 2262, 2262, 2262, 2758, + /* 3240 */ 2262, 877, 2262, 2262, 2262, 2262, 2262, 2262, 2262, 2262, + /* 3250 */ 2262, 2792, 2262, 2262, 2840, 2793, 2811, 2262, 415, 2794, + /* 3260 */ 881, 2796, 2797, 876, 2262, 2262, 864, 900, 2262, 2262, + /* 3270 */ 878, 2262, 2758, 2262, 877, 2262, 2262, 2262, 2262, 2262, + /* 3280 */ 2262, 2262, 2262, 2262, 2262, 2262, 2792, 2262, 2793, 2840, + /* 3290 */ 2262, 2262, 2262, 430, 2794, 881, 2796, 2797, 876, 2811, + /* 3300 */ 2262, 864, 900, 878, 2262, 2262, 2262, 2262, 2262, 2262, + /* 3310 */ 2262, 2262, 2262, 2262, 2262, 2758, 2262, 877, 2262, 2792, + /* 3320 */ 2262, 2262, 2840, 2262, 2262, 2262, 416, 2794, 881, 2796, + /* 3330 */ 2797, 876, 2811, 2262, 864, 900, 2262, 2262, 2262, 2262, + /* 3340 */ 2262, 2262, 2262, 2262, 2262, 2262, 2262, 2262, 2758, 2262, + /* 3350 */ 877, 2262, 2793, 2262, 2262, 2262, 2262, 2262, 2262, 2262, + /* 3360 */ 2262, 2262, 2792, 2262, 2262, 2840, 2262, 878, 2262, 407, + /* 3370 */ 2794, 881, 2796, 2797, 876, 2793, 2262, 864, 900, 2262, + /* 3380 */ 2262, 2262, 2262, 2262, 2262, 2262, 2262, 2262, 2262, 2262, + /* 3390 */ 878, 2262, 2262, 2262, 2262, 2792, 2811, 2262, 2840, 2262, + /* 3400 */ 2262, 2262, 418, 2794, 881, 2796, 2797, 876, 2262, 2262, + /* 3410 */ 864, 900, 2758, 2262, 877, 2262, 2262, 2262, 2262, 2811, + /* 3420 */ 2262, 2793, 2262, 2262, 2262, 2262, 2262, 2262, 2262, 2262, + /* 3430 */ 2262, 2262, 2262, 2262, 2262, 2758, 878, 877, 2262, 2262, + /* 3440 */ 2262, 2262, 2262, 2793, 2262, 2262, 2262, 2262, 2262, 2262, + /* 3450 */ 2262, 2262, 2262, 2262, 2262, 2262, 2262, 2793, 878, 2792, + /* 3460 */ 2262, 2262, 2840, 2262, 2262, 2811, 419, 2794, 881, 2796, + /* 3470 */ 2797, 876, 878, 2262, 864, 900, 2262, 2262, 2262, 2262, + /* 3480 */ 2262, 2758, 2792, 877, 2262, 2840, 2262, 2811, 2262, 420, + /* 3490 */ 2794, 881, 2796, 2797, 876, 2262, 2262, 864, 900, 2262, + /* 3500 */ 2262, 2811, 2262, 2758, 2262, 877, 2262, 2262, 2262, 2262, + /* 3510 */ 2262, 2262, 2262, 2262, 2262, 2262, 2262, 2758, 2262, 877, + /* 3520 */ 2262, 2262, 2262, 2262, 2262, 2262, 2262, 2262, 2792, 2262, + /* 3530 */ 2262, 2840, 2262, 2793, 2262, 421, 2794, 881, 2796, 2797, + /* 3540 */ 876, 2262, 2262, 864, 900, 2262, 2262, 2262, 878, 2262, + /* 3550 */ 2792, 2262, 2262, 2840, 2262, 2262, 2262, 437, 2794, 881, + /* 3560 */ 2796, 2797, 876, 2262, 2792, 864, 900, 2840, 2793, 2262, + /* 3570 */ 2262, 438, 2794, 881, 2796, 2797, 876, 2811, 2262, 864, + /* 3580 */ 900, 2262, 2262, 878, 2262, 2262, 2262, 2262, 2262, 2262, + /* 3590 */ 2262, 2262, 2262, 2758, 2262, 877, 2262, 2262, 2262, 2262, + /* 3600 */ 2262, 2793, 2262, 2262, 2262, 2262, 2262, 2262, 2262, 2262, + /* 3610 */ 2262, 2262, 2811, 2262, 2262, 2262, 878, 2262, 2262, 2262, + /* 3620 */ 2262, 2262, 2262, 2262, 2262, 2262, 2262, 2262, 2758, 2262, + /* 3630 */ 877, 2262, 2262, 2262, 2262, 2262, 2262, 2262, 2262, 2262, + /* 3640 */ 2792, 2262, 2262, 2840, 2793, 2811, 2262, 2805, 2794, 881, + /* 3650 */ 2796, 2797, 876, 2262, 2262, 864, 900, 2262, 2262, 878, + /* 3660 */ 2262, 2758, 2262, 877, 2262, 2262, 2262, 2262, 2262, 2262, + /* 3670 */ 2262, 2262, 2262, 2262, 2262, 2792, 2262, 2793, 2840, 2262, + /* 3680 */ 2262, 2262, 2804, 2794, 881, 2796, 2797, 876, 2811, 2262, + /* 3690 */ 864, 900, 878, 2262, 2262, 2262, 2262, 2262, 2262, 2262, + /* 3700 */ 2262, 2262, 2262, 2262, 2758, 2262, 877, 2262, 2792, 2262, + /* 3710 */ 2262, 2840, 2262, 2262, 2262, 2803, 2794, 881, 2796, 2797, + /* 3720 */ 876, 2811, 2262, 864, 900, 2262, 2262, 2262, 2262, 2262, + /* 3730 */ 2262, 2262, 2262, 2262, 2262, 2262, 2262, 2758, 2262, 877, + /* 3740 */ 2262, 2793, 2262, 2262, 2262, 2262, 2262, 2262, 2262, 2262, + /* 3750 */ 2262, 2792, 2262, 2262, 2840, 2262, 878, 2262, 457, 2794, + /* 3760 */ 881, 2796, 2797, 876, 2793, 2262, 864, 900, 2262, 2262, + /* 3770 */ 2262, 2262, 2262, 2262, 2262, 2262, 2262, 2262, 2262, 878, + /* 3780 */ 2262, 2262, 2262, 2262, 2792, 2811, 2262, 2840, 2262, 2262, + /* 3790 */ 2262, 458, 2794, 881, 2796, 2797, 876, 2262, 2262, 864, + /* 3800 */ 900, 2758, 2262, 877, 2262, 2262, 2262, 2262, 2811, 2262, + /* 3810 */ 2793, 2262, 2262, 2262, 2262, 2262, 2262, 2262, 2262, 2262, + /* 3820 */ 2262, 2262, 2262, 2262, 2758, 878, 877, 2262, 2262, 2262, + /* 3830 */ 2262, 2262, 2793, 2262, 2262, 2262, 2262, 2262, 2262, 2262, + /* 3840 */ 2262, 2262, 2262, 2262, 2262, 2262, 2262, 878, 2792, 2262, + /* 3850 */ 2262, 2840, 2262, 2262, 2811, 454, 2794, 881, 2796, 2797, + /* 3860 */ 876, 2262, 2262, 864, 900, 2262, 2262, 2262, 2262, 2262, + /* 3870 */ 2758, 2792, 877, 2262, 2840, 2262, 2811, 2262, 459, 2794, + /* 3880 */ 881, 2796, 2797, 876, 2262, 2262, 864, 900, 2262, 2262, + /* 3890 */ 2262, 2262, 2758, 2262, 877, 2262, 2262, 2262, 2262, 2262, + /* 3900 */ 2262, 2262, 2262, 2262, 2262, 2262, 2262, 2262, 2262, 2262, + /* 3910 */ 2262, 2262, 2262, 2262, 2262, 2262, 2262, 879, 2262, 2262, + /* 3920 */ 2840, 2262, 2262, 2262, 427, 2794, 881, 2796, 2797, 876, + /* 3930 */ 2262, 2262, 864, 900, 2262, 2262, 2262, 2262, 2262, 2792, + /* 3940 */ 2262, 2262, 2840, 2262, 2262, 2262, 426, 2794, 881, 2796, + /* 3950 */ 2797, 876, 2262, 2262, 864, 900, }; static const YYCODETYPE yy_lookahead[] = { /* 0 */ 530, 397, 530, 397, 400, 401, 400, 401, 538, 433, @@ -1087,10 +1089,10 @@ static const YYCODETYPE yy_lookahead[] = { /* 540 */ 269, 270, 271, 272, 33, 191, 192, 276, 277, 278, /* 550 */ 279, 280, 281, 282, 307, 201, 202, 402, 171, 82, /* 560 */ 307, 41, 42, 176, 112, 498, 499, 500, 146, 155, - /* 570 */ 216, 184, 218, 431, 402, 403, 0, 223, 511, 127, + /* 570 */ 216, 184, 218, 431, 402, 403, 20, 223, 511, 127, /* 580 */ 128, 129, 130, 131, 132, 133, 134, 135, 136, 447, /* 590 */ 138, 139, 140, 141, 142, 143, 144, 8, 9, 152, - /* 600 */ 153, 12, 13, 14, 15, 16, 252, 253, 254, 33, + /* 600 */ 153, 12, 13, 14, 15, 16, 252, 253, 254, 126, /* 610 */ 256, 257, 258, 259, 260, 261, 262, 263, 264, 265, /* 620 */ 266, 466, 467, 269, 270, 271, 272, 116, 501, 450, /* 630 */ 276, 277, 278, 279, 280, 281, 282, 283, 12, 13, @@ -1104,8 +1106,8 @@ static const YYCODETYPE yy_lookahead[] = { /* 710 */ 296, 297, 298, 299, 300, 301, 302, 303, 304, 402, /* 720 */ 403, 12, 13, 14, 432, 225, 73, 419, 58, 20, /* 730 */ 230, 22, 388, 233, 254, 235, 459, 14, 112, 422, - /* 740 */ 448, 115, 450, 20, 432, 36, 20, 38, 22, 59, - /* 750 */ 60, 448, 495, 441, 442, 498, 448, 402, 403, 502, + /* 740 */ 448, 115, 450, 20, 432, 36, 20, 38, 22, 193, + /* 750 */ 20, 448, 495, 441, 442, 498, 448, 402, 403, 502, /* 760 */ 503, 504, 505, 506, 507, 13, 509, 510, 511, 313, /* 770 */ 314, 315, 316, 402, 403, 402, 403, 68, 152, 153, /* 780 */ 71, 526, 527, 528, 58, 530, 531, 495, 36, 474, @@ -1115,221 +1117,221 @@ static const YYCODETYPE yy_lookahead[] = { /* 820 */ 443, 112, 218, 74, 115, 152, 153, 201, 202, 456, /* 830 */ 8, 9, 422, 426, 12, 13, 14, 15, 16, 77, /* 840 */ 78, 79, 216, 436, 218, 530, 84, 85, 86, 223, - /* 850 */ 126, 155, 90, 538, 250, 251, 267, 95, 96, 97, + /* 850 */ 0, 155, 90, 538, 250, 251, 267, 95, 96, 97, /* 860 */ 98, 152, 153, 101, 211, 474, 170, 105, 106, 107, - /* 870 */ 108, 556, 557, 388, 201, 202, 561, 562, 252, 253, + /* 870 */ 108, 556, 557, 20, 201, 202, 561, 562, 252, 253, /* 880 */ 254, 411, 256, 257, 258, 259, 260, 261, 262, 263, /* 890 */ 264, 265, 266, 240, 241, 269, 270, 271, 272, 429, /* 900 */ 191, 192, 276, 277, 278, 279, 280, 281, 282, 439, - /* 910 */ 201, 202, 8, 9, 191, 33, 12, 13, 14, 15, - /* 920 */ 16, 530, 398, 402, 403, 216, 402, 218, 404, 538, - /* 930 */ 20, 423, 223, 448, 402, 403, 432, 283, 36, 285, - /* 940 */ 432, 474, 20, 422, 440, 432, 479, 556, 557, 20, - /* 950 */ 474, 443, 561, 562, 422, 451, 402, 403, 34, 12, - /* 960 */ 13, 252, 253, 254, 451, 256, 257, 258, 259, 260, - /* 970 */ 261, 262, 263, 264, 265, 266, 422, 254, 269, 270, - /* 980 */ 271, 272, 432, 36, 13, 276, 277, 278, 279, 280, - /* 990 */ 281, 282, 12, 13, 22, 388, 432, 530, 116, 22, - /* 1000 */ 20, 451, 22, 52, 440, 538, 530, 36, 36, 23, - /* 1010 */ 403, 402, 403, 36, 538, 451, 36, 515, 38, 517, - /* 1020 */ 116, 402, 403, 556, 557, 402, 403, 47, 561, 562, - /* 1030 */ 208, 422, 556, 557, 530, 49, 50, 561, 562, 432, - /* 1040 */ 388, 422, 538, 514, 388, 422, 517, 432, 68, 449, - /* 1050 */ 450, 71, 402, 403, 82, 448, 515, 450, 517, 403, - /* 1060 */ 556, 557, 82, 8, 9, 561, 562, 12, 13, 14, - /* 1070 */ 15, 16, 422, 8, 9, 12, 13, 12, 13, 14, - /* 1080 */ 15, 16, 433, 20, 112, 22, 407, 408, 432, 112, - /* 1090 */ 407, 408, 112, 444, 409, 115, 402, 403, 0, 36, - /* 1100 */ 448, 38, 495, 193, 448, 498, 450, 492, 493, 502, - /* 1110 */ 503, 504, 505, 506, 507, 193, 422, 510, 511, 388, - /* 1120 */ 218, 436, 193, 516, 91, 518, 14, 15, 16, 522, - /* 1130 */ 523, 68, 152, 153, 71, 68, 2, 300, 301, 302, - /* 1140 */ 303, 304, 8, 9, 193, 82, 12, 13, 14, 15, - /* 1150 */ 16, 495, 250, 251, 498, 204, 334, 550, 502, 503, - /* 1160 */ 504, 505, 506, 507, 254, 218, 510, 511, 432, 402, - /* 1170 */ 403, 191, 192, 402, 403, 112, 440, 20, 115, 448, - /* 1180 */ 432, 201, 202, 254, 402, 403, 119, 451, 216, 422, - /* 1190 */ 218, 443, 159, 422, 402, 403, 216, 433, 218, 2, - /* 1200 */ 402, 403, 428, 223, 422, 8, 9, 551, 552, 12, - /* 1210 */ 13, 14, 15, 16, 422, 152, 153, 184, 185, 501, - /* 1220 */ 422, 433, 8, 9, 252, 253, 12, 13, 14, 15, - /* 1230 */ 16, 198, 252, 253, 254, 388, 256, 257, 258, 259, - /* 1240 */ 260, 261, 262, 263, 264, 265, 266, 529, 193, 269, - /* 1250 */ 270, 271, 272, 155, 191, 192, 276, 277, 278, 279, - /* 1260 */ 280, 281, 282, 489, 201, 202, 8, 9, 22, 14, - /* 1270 */ 12, 13, 14, 15, 16, 20, 38, 402, 403, 216, - /* 1280 */ 0, 218, 36, 8, 9, 0, 223, 12, 13, 14, - /* 1290 */ 15, 16, 54, 402, 403, 448, 433, 422, 402, 403, - /* 1300 */ 432, 63, 64, 65, 66, 47, 68, 0, 416, 417, - /* 1310 */ 442, 402, 403, 422, 3, 252, 253, 254, 422, 256, + /* 910 */ 201, 202, 8, 9, 191, 433, 12, 13, 14, 15, + /* 920 */ 16, 530, 432, 193, 74, 216, 444, 218, 91, 538, + /* 930 */ 402, 403, 223, 398, 12, 13, 432, 402, 36, 404, + /* 940 */ 423, 474, 22, 22, 440, 432, 479, 556, 557, 432, + /* 950 */ 422, 47, 561, 562, 0, 451, 36, 36, 36, 388, + /* 960 */ 443, 252, 253, 254, 451, 256, 257, 258, 259, 260, + /* 970 */ 261, 262, 263, 264, 265, 266, 22, 254, 269, 270, + /* 980 */ 271, 272, 492, 493, 254, 276, 277, 278, 279, 280, + /* 990 */ 281, 282, 12, 13, 22, 388, 159, 530, 432, 283, + /* 1000 */ 20, 285, 22, 82, 388, 538, 440, 190, 36, 186, + /* 1010 */ 403, 300, 301, 302, 303, 304, 36, 451, 38, 448, + /* 1020 */ 116, 184, 185, 556, 557, 402, 403, 47, 561, 562, + /* 1030 */ 208, 388, 112, 112, 530, 198, 402, 403, 68, 432, + /* 1040 */ 449, 450, 538, 220, 388, 422, 193, 428, 68, 402, + /* 1050 */ 403, 71, 402, 403, 82, 448, 422, 450, 501, 403, + /* 1060 */ 556, 557, 82, 432, 448, 561, 562, 402, 403, 422, + /* 1070 */ 402, 403, 422, 402, 403, 12, 13, 402, 403, 34, + /* 1080 */ 402, 403, 451, 20, 267, 22, 529, 422, 432, 119, + /* 1090 */ 422, 448, 112, 422, 145, 115, 501, 422, 149, 36, + /* 1100 */ 422, 38, 495, 286, 448, 498, 450, 254, 489, 502, + /* 1110 */ 503, 504, 505, 506, 507, 402, 403, 510, 511, 432, + /* 1120 */ 218, 402, 403, 516, 529, 518, 388, 440, 13, 522, + /* 1130 */ 523, 68, 152, 153, 71, 422, 2, 216, 451, 218, + /* 1140 */ 218, 422, 8, 9, 432, 82, 12, 13, 14, 15, + /* 1150 */ 16, 495, 250, 251, 498, 443, 334, 550, 502, 503, + /* 1160 */ 504, 505, 506, 507, 59, 60, 510, 511, 14, 15, + /* 1170 */ 16, 191, 192, 252, 253, 112, 433, 20, 115, 402, + /* 1180 */ 403, 201, 202, 8, 9, 432, 448, 12, 13, 14, + /* 1190 */ 15, 16, 409, 440, 407, 408, 216, 82, 218, 422, + /* 1200 */ 407, 408, 433, 223, 451, 8, 9, 551, 552, 12, + /* 1210 */ 13, 14, 15, 16, 2, 152, 153, 388, 388, 436, + /* 1220 */ 8, 9, 1, 2, 12, 13, 14, 15, 16, 416, + /* 1230 */ 417, 424, 252, 253, 254, 428, 256, 257, 258, 259, + /* 1240 */ 260, 261, 262, 263, 264, 265, 266, 411, 388, 269, + /* 1250 */ 270, 271, 272, 33, 191, 192, 276, 277, 278, 279, + /* 1260 */ 280, 281, 282, 52, 201, 202, 8, 9, 402, 403, + /* 1270 */ 12, 13, 14, 15, 16, 439, 38, 448, 448, 216, + /* 1280 */ 0, 218, 515, 515, 517, 517, 223, 33, 422, 8, + /* 1290 */ 9, 116, 54, 12, 13, 14, 15, 16, 402, 403, + /* 1300 */ 433, 63, 64, 65, 66, 47, 68, 0, 448, 514, + /* 1310 */ 402, 403, 517, 116, 20, 252, 253, 254, 422, 256, /* 1320 */ 257, 258, 259, 260, 261, 262, 263, 264, 265, 266, - /* 1330 */ 116, 422, 269, 270, 271, 272, 1, 2, 190, 276, + /* 1330 */ 422, 20, 269, 270, 271, 272, 116, 116, 388, 276, /* 1340 */ 277, 278, 279, 280, 281, 282, 12, 13, 191, 388, - /* 1350 */ 22, 18, 114, 20, 20, 1, 22, 119, 112, 74, + /* 1350 */ 22, 18, 114, 20, 20, 1, 22, 119, 3, 23, /* 1360 */ 27, 8, 9, 30, 36, 12, 13, 14, 15, 16, /* 1370 */ 36, 38, 38, 19, 8, 9, 402, 403, 12, 13, - /* 1380 */ 14, 15, 16, 402, 403, 432, 424, 54, 388, 56, - /* 1390 */ 428, 116, 38, 440, 61, 62, 422, 13, 416, 417, - /* 1400 */ 47, 388, 68, 422, 451, 71, 73, 53, 54, 448, - /* 1410 */ 82, 254, 501, 0, 388, 267, 82, 63, 64, 65, - /* 1420 */ 66, 20, 68, 20, 186, 145, 146, 147, 148, 149, - /* 1430 */ 150, 151, 411, 195, 286, 33, 445, 199, 200, 448, - /* 1440 */ 529, 44, 445, 205, 206, 448, 112, 114, 448, 115, - /* 1450 */ 36, 116, 145, 146, 147, 148, 149, 150, 151, 126, - /* 1460 */ 439, 448, 224, 388, 388, 68, 82, 54, 114, 388, + /* 1380 */ 14, 15, 16, 402, 403, 49, 50, 54, 44, 56, + /* 1390 */ 402, 403, 38, 14, 61, 62, 422, 13, 448, 20, + /* 1400 */ 388, 388, 68, 422, 193, 71, 73, 53, 54, 448, + /* 1410 */ 422, 254, 68, 47, 388, 204, 82, 63, 64, 65, + /* 1420 */ 66, 388, 68, 388, 186, 145, 146, 147, 148, 149, + /* 1430 */ 150, 151, 432, 195, 416, 417, 445, 199, 200, 448, + /* 1440 */ 112, 433, 442, 205, 206, 33, 112, 114, 432, 115, + /* 1450 */ 391, 392, 145, 146, 147, 148, 149, 150, 151, 126, + /* 1460 */ 448, 448, 224, 388, 388, 0, 82, 451, 114, 116, /* 1470 */ 388, 8, 9, 119, 448, 12, 13, 14, 15, 16, - /* 1480 */ 433, 474, 116, 474, 388, 33, 152, 153, 388, 445, - /* 1490 */ 157, 158, 448, 160, 161, 162, 163, 164, 165, 166, - /* 1500 */ 167, 168, 169, 13, 150, 172, 173, 174, 175, 176, - /* 1510 */ 177, 178, 179, 388, 181, 182, 183, 388, 116, 432, - /* 1520 */ 187, 188, 189, 448, 448, 191, 192, 194, 186, 448, - /* 1530 */ 448, 391, 392, 152, 153, 201, 202, 530, 451, 530, - /* 1540 */ 0, 388, 33, 120, 448, 538, 123, 538, 448, 195, - /* 1550 */ 216, 197, 218, 33, 200, 126, 145, 223, 480, 205, - /* 1560 */ 149, 33, 220, 556, 557, 556, 557, 47, 561, 562, - /* 1570 */ 561, 562, 82, 448, 234, 47, 236, 448, 224, 116, - /* 1580 */ 120, 36, 120, 123, 44, 123, 252, 253, 254, 36, + /* 1480 */ 0, 448, 445, 448, 126, 448, 152, 153, 0, 33, + /* 1490 */ 157, 158, 33, 160, 161, 162, 163, 164, 165, 166, + /* 1500 */ 167, 168, 169, 47, 150, 172, 173, 174, 175, 176, + /* 1510 */ 177, 178, 179, 0, 181, 182, 183, 36, 0, 54, + /* 1520 */ 187, 188, 189, 448, 448, 191, 192, 194, 445, 33, + /* 1530 */ 448, 448, 44, 0, 120, 201, 202, 123, 180, 120, + /* 1540 */ 22, 120, 123, 47, 123, 13, 33, 0, 254, 195, + /* 1550 */ 216, 197, 218, 120, 200, 22, 123, 223, 234, 205, + /* 1560 */ 236, 33, 36, 82, 433, 254, 8, 9, 36, 22, + /* 1570 */ 12, 13, 14, 15, 16, 116, 51, 33, 224, 116, + /* 1580 */ 152, 153, 433, 33, 33, 331, 252, 253, 254, 480, /* 1590 */ 256, 257, 258, 259, 260, 261, 262, 263, 264, 265, - /* 1600 */ 266, 448, 0, 269, 270, 271, 272, 33, 33, 180, - /* 1610 */ 276, 277, 278, 279, 280, 281, 282, 18, 33, 33, - /* 1620 */ 120, 388, 23, 123, 22, 116, 433, 82, 0, 0, - /* 1630 */ 0, 51, 218, 33, 33, 82, 403, 33, 405, 33, - /* 1640 */ 329, 42, 43, 1, 2, 46, 33, 388, 36, 33, - /* 1650 */ 22, 22, 22, 75, 36, 254, 57, 254, 33, 33, - /* 1660 */ 12, 13, 403, 433, 405, 432, 389, 33, 69, 70, - /* 1670 */ 71, 72, 73, 12, 13, 12, 13, 33, 12, 13, - /* 1680 */ 115, 448, 117, 450, 33, 33, 12, 13, 12, 13, - /* 1690 */ 116, 432, 12, 13, 82, 115, 12, 13, 12, 13, - /* 1700 */ 13, 116, 116, 12, 13, 12, 13, 448, 33, 450, - /* 1710 */ 33, 33, 33, 420, 115, 33, 116, 116, 33, 532, - /* 1720 */ 116, 460, 116, 36, 13, 0, 565, 554, 495, 116, - /* 1730 */ 547, 498, 116, 533, 432, 502, 503, 504, 505, 506, - /* 1740 */ 507, 116, 116, 510, 511, 406, 419, 36, 419, 516, - /* 1750 */ 116, 518, 401, 154, 495, 522, 523, 498, 460, 388, - /* 1760 */ 116, 502, 503, 504, 505, 506, 507, 116, 116, 510, - /* 1770 */ 511, 553, 553, 553, 403, 516, 405, 518, 473, 553, - /* 1780 */ 55, 522, 523, 331, 460, 406, 496, 481, 403, 537, - /* 1790 */ 447, 116, 460, 116, 116, 116, 460, 537, 116, 558, - /* 1800 */ 524, 116, 540, 432, 460, 388, 310, 208, 209, 210, - /* 1810 */ 421, 54, 213, 475, 497, 20, 20, 402, 411, 448, - /* 1820 */ 403, 450, 405, 486, 233, 226, 227, 486, 214, 411, - /* 1830 */ 252, 20, 12, 13, 477, 388, 218, 491, 239, 402, - /* 1840 */ 47, 242, 22, 403, 245, 246, 247, 248, 249, 432, - /* 1850 */ 403, 457, 405, 403, 190, 457, 36, 402, 38, 454, - /* 1860 */ 403, 402, 454, 457, 454, 448, 495, 450, 113, 498, - /* 1870 */ 454, 415, 111, 502, 503, 504, 505, 506, 507, 432, - /* 1880 */ 414, 510, 511, 402, 402, 110, 388, 516, 68, 518, - /* 1890 */ 413, 402, 20, 522, 523, 448, 402, 450, 395, 402, - /* 1900 */ 52, 403, 82, 405, 399, 395, 307, 399, 333, 20, - /* 1910 */ 486, 450, 495, 411, 20, 498, 404, 411, 411, 502, - /* 1920 */ 503, 504, 505, 506, 507, 20, 411, 510, 511, 476, - /* 1930 */ 432, 404, 112, 516, 411, 518, 20, 467, 20, 522, - /* 1940 */ 523, 411, 495, 411, 461, 498, 448, 411, 450, 502, - /* 1950 */ 503, 504, 505, 506, 507, 411, 402, 510, 511, 395, - /* 1960 */ 402, 12, 13, 516, 391, 518, 432, 432, 391, 522, - /* 1970 */ 523, 22, 448, 432, 432, 432, 395, 448, 432, 432, - /* 1980 */ 432, 432, 432, 432, 237, 36, 448, 38, 115, 490, - /* 1990 */ 20, 409, 488, 495, 222, 388, 498, 486, 485, 483, - /* 2000 */ 502, 503, 504, 505, 506, 507, 221, 482, 510, 511, - /* 2010 */ 403, 450, 409, 402, 516, 448, 518, 68, 318, 546, - /* 2020 */ 522, 523, 317, 468, 546, 388, 207, 475, 468, 326, - /* 2030 */ 328, 546, 545, 549, 548, 327, 216, 544, 218, 432, - /* 2040 */ 403, 311, 306, 475, 543, 305, 566, 335, 332, 330, - /* 2050 */ 536, 560, 535, 403, 559, 448, 20, 450, 501, 388, - /* 2060 */ 126, 308, 541, 404, 409, 539, 409, 448, 448, 432, - /* 2070 */ 448, 448, 252, 253, 403, 448, 468, 468, 199, 409, - /* 2080 */ 465, 448, 461, 115, 521, 448, 448, 450, 428, 269, - /* 2090 */ 270, 199, 409, 462, 448, 403, 276, 277, 278, 279, - /* 2100 */ 280, 281, 495, 432, 461, 498, 409, 409, 115, 502, - /* 2110 */ 503, 504, 505, 506, 507, 448, 448, 510, 511, 448, - /* 2120 */ 448, 450, 409, 516, 390, 518, 22, 35, 402, 522, - /* 2130 */ 523, 437, 495, 37, 393, 498, 448, 448, 448, 502, - /* 2140 */ 503, 504, 505, 506, 507, 394, 388, 510, 511, 448, - /* 2150 */ 40, 395, 448, 516, 448, 518, 396, 448, 448, 522, - /* 2160 */ 523, 403, 448, 448, 469, 216, 495, 218, 448, 498, - /* 2170 */ 448, 448, 448, 502, 503, 504, 505, 506, 507, 448, - /* 2180 */ 448, 510, 511, 448, 434, 448, 448, 425, 425, 518, - /* 2190 */ 432, 425, 388, 522, 523, 478, 410, 469, 434, 250, - /* 2200 */ 251, 252, 386, 0, 0, 0, 448, 403, 450, 47, - /* 2210 */ 0, 36, 494, 36, 487, 243, 36, 36, 269, 270, - /* 2220 */ 243, 0, 388, 36, 36, 276, 277, 278, 279, 280, - /* 2230 */ 281, 243, 36, 0, 0, 243, 432, 403, 0, 0, - /* 2240 */ 0, 22, 36, 36, 0, 36, 238, 0, 224, 0, - /* 2250 */ 224, 218, 448, 495, 450, 225, 498, 216, 0, 0, + /* 1600 */ 266, 1, 2, 269, 270, 271, 272, 33, 82, 33, + /* 1610 */ 276, 277, 278, 279, 280, 281, 282, 18, 33, 8, + /* 1620 */ 9, 388, 23, 12, 13, 14, 15, 16, 420, 474, + /* 1630 */ 33, 12, 13, 389, 75, 155, 403, 33, 405, 33, + /* 1640 */ 115, 42, 43, 33, 116, 46, 33, 388, 36, 33, + /* 1650 */ 33, 33, 12, 13, 460, 532, 57, 12, 13, 474, + /* 1660 */ 116, 115, 403, 117, 405, 432, 116, 116, 69, 70, + /* 1670 */ 71, 72, 73, 565, 116, 33, 13, 12, 13, 12, + /* 1680 */ 13, 448, 36, 450, 329, 530, 554, 12, 13, 547, + /* 1690 */ 116, 432, 116, 538, 12, 13, 474, 12, 13, 36, + /* 1700 */ 533, 116, 33, 12, 13, 12, 13, 448, 406, 450, + /* 1710 */ 432, 556, 557, 116, 115, 530, 561, 562, 12, 13, + /* 1720 */ 116, 401, 116, 538, 419, 419, 116, 33, 495, 116, + /* 1730 */ 33, 498, 116, 116, 116, 502, 503, 504, 505, 506, + /* 1740 */ 507, 556, 557, 510, 511, 333, 561, 562, 33, 516, + /* 1750 */ 13, 518, 530, 154, 495, 522, 523, 498, 116, 388, + /* 1760 */ 538, 502, 503, 504, 505, 506, 507, 553, 33, 510, + /* 1770 */ 511, 33, 36, 36, 403, 516, 405, 518, 556, 557, + /* 1780 */ 553, 522, 523, 561, 562, 116, 460, 553, 473, 553, + /* 1790 */ 460, 406, 496, 403, 447, 481, 524, 460, 460, 537, + /* 1800 */ 537, 558, 460, 432, 193, 388, 0, 208, 209, 210, + /* 1810 */ 116, 252, 213, 116, 540, 421, 310, 54, 82, 448, + /* 1820 */ 403, 450, 405, 475, 497, 226, 227, 20, 402, 20, + /* 1830 */ 218, 116, 12, 13, 233, 388, 486, 491, 239, 214, + /* 1840 */ 411, 242, 22, 486, 245, 246, 247, 248, 249, 432, + /* 1850 */ 403, 116, 405, 411, 116, 477, 36, 402, 38, 20, + /* 1860 */ 403, 55, 47, 457, 218, 448, 495, 450, 457, 498, + /* 1870 */ 403, 190, 454, 502, 503, 504, 505, 506, 507, 432, + /* 1880 */ 402, 510, 511, 113, 457, 403, 388, 516, 68, 518, + /* 1890 */ 454, 402, 454, 522, 523, 448, 111, 450, 454, 415, + /* 1900 */ 414, 403, 82, 405, 402, 402, 307, 110, 413, 402, + /* 1910 */ 402, 402, 495, 20, 395, 498, 52, 395, 486, 502, + /* 1920 */ 503, 504, 505, 506, 507, 411, 399, 510, 511, 399, + /* 1930 */ 432, 411, 112, 516, 20, 518, 20, 450, 404, 522, + /* 1940 */ 523, 20, 495, 404, 476, 498, 448, 411, 450, 502, + /* 1950 */ 503, 504, 505, 506, 507, 20, 411, 510, 511, 411, + /* 1960 */ 467, 12, 13, 516, 411, 518, 20, 411, 411, 522, + /* 1970 */ 523, 22, 461, 402, 411, 395, 237, 391, 391, 402, + /* 1980 */ 490, 432, 115, 395, 432, 36, 432, 38, 448, 448, + /* 1990 */ 448, 486, 432, 495, 432, 388, 498, 432, 432, 432, + /* 2000 */ 502, 503, 504, 505, 506, 507, 20, 432, 510, 511, + /* 2010 */ 403, 432, 488, 432, 516, 222, 518, 68, 409, 221, + /* 2020 */ 522, 523, 485, 450, 409, 388, 483, 318, 402, 546, + /* 2030 */ 448, 317, 468, 326, 549, 207, 216, 482, 218, 432, + /* 2040 */ 403, 328, 548, 475, 327, 546, 468, 546, 306, 545, + /* 2050 */ 305, 311, 536, 335, 544, 448, 535, 450, 543, 388, + /* 2060 */ 332, 330, 403, 566, 475, 20, 126, 308, 539, 432, + /* 2070 */ 501, 559, 252, 253, 403, 409, 541, 560, 404, 409, + /* 2080 */ 448, 448, 468, 448, 448, 448, 448, 450, 468, 269, + /* 2090 */ 270, 199, 465, 409, 115, 448, 276, 277, 278, 279, + /* 2100 */ 280, 281, 495, 432, 461, 498, 448, 409, 199, 502, + /* 2110 */ 503, 504, 505, 506, 507, 521, 462, 510, 511, 448, + /* 2120 */ 409, 450, 461, 516, 428, 518, 403, 448, 409, 522, + /* 2130 */ 523, 448, 495, 115, 448, 498, 448, 448, 448, 502, + /* 2140 */ 503, 504, 505, 506, 507, 22, 388, 510, 511, 448, + /* 2150 */ 448, 448, 448, 516, 402, 518, 448, 390, 448, 522, + /* 2160 */ 523, 403, 437, 35, 448, 216, 495, 218, 448, 498, + /* 2170 */ 448, 409, 448, 502, 503, 504, 505, 506, 507, 393, + /* 2180 */ 448, 510, 511, 37, 394, 40, 494, 396, 410, 518, + /* 2190 */ 432, 448, 388, 522, 523, 448, 448, 448, 448, 250, + /* 2200 */ 251, 252, 448, 395, 478, 434, 448, 403, 450, 425, + /* 2210 */ 469, 434, 425, 469, 487, 386, 0, 425, 269, 270, + /* 2220 */ 0, 0, 388, 47, 0, 276, 277, 278, 279, 280, + /* 2230 */ 281, 36, 243, 36, 36, 36, 432, 403, 243, 0, + /* 2240 */ 36, 36, 243, 36, 0, 0, 243, 0, 36, 0, + /* 2250 */ 36, 0, 448, 495, 450, 22, 498, 238, 36, 0, /* 2260 */ 502, 503, 504, 505, 506, 507, 432, 0, 510, 511, - /* 2270 */ 212, 0, 211, 0, 158, 51, 518, 51, 0, 36, - /* 2280 */ 522, 523, 448, 0, 450, 0, 388, 36, 0, 54, - /* 2290 */ 0, 0, 51, 47, 0, 0, 51, 0, 0, 495, - /* 2300 */ 0, 403, 498, 0, 0, 0, 502, 503, 504, 505, - /* 2310 */ 506, 507, 176, 388, 510, 511, 36, 0, 176, 0, - /* 2320 */ 0, 0, 518, 0, 0, 0, 522, 523, 403, 495, - /* 2330 */ 432, 0, 498, 0, 0, 0, 502, 503, 504, 505, + /* 2270 */ 224, 0, 224, 218, 225, 216, 518, 0, 0, 0, + /* 2280 */ 522, 523, 448, 212, 450, 211, 388, 0, 0, 158, + /* 2290 */ 0, 51, 51, 0, 36, 0, 36, 54, 0, 495, + /* 2300 */ 0, 403, 498, 51, 0, 0, 502, 503, 504, 505, + /* 2310 */ 506, 507, 47, 388, 510, 511, 0, 51, 0, 0, + /* 2320 */ 0, 0, 518, 0, 0, 176, 522, 523, 403, 495, + /* 2330 */ 432, 0, 498, 36, 176, 0, 502, 503, 504, 505, /* 2340 */ 506, 507, 0, 0, 510, 511, 448, 0, 450, 0, - /* 2350 */ 388, 0, 0, 0, 0, 0, 0, 432, 51, 0, - /* 2360 */ 47, 0, 0, 0, 0, 403, 22, 0, 0, 158, - /* 2370 */ 157, 0, 156, 448, 0, 450, 0, 0, 0, 0, - /* 2380 */ 0, 0, 22, 0, 52, 0, 68, 22, 52, 0, - /* 2390 */ 68, 0, 0, 495, 432, 68, 498, 563, 564, 388, - /* 2400 */ 502, 503, 504, 505, 506, 507, 36, 68, 510, 511, - /* 2410 */ 448, 0, 450, 44, 403, 0, 518, 44, 36, 0, - /* 2420 */ 495, 523, 36, 498, 36, 0, 54, 502, 503, 504, - /* 2430 */ 505, 506, 507, 36, 472, 510, 511, 44, 54, 0, - /* 2440 */ 44, 54, 47, 432, 14, 33, 0, 44, 388, 51, - /* 2450 */ 51, 51, 45, 0, 0, 0, 0, 495, 44, 448, - /* 2460 */ 498, 450, 207, 403, 502, 503, 504, 505, 506, 507, - /* 2470 */ 0, 51, 510, 511, 0, 51, 388, 552, 36, 0, - /* 2480 */ 76, 0, 0, 472, 0, 0, 54, 44, 36, 44, - /* 2490 */ 388, 403, 432, 0, 54, 36, 0, 36, 54, 0, - /* 2500 */ 0, 44, 54, 0, 44, 403, 495, 0, 448, 498, - /* 2510 */ 450, 36, 388, 502, 503, 504, 505, 506, 507, 0, - /* 2520 */ 432, 510, 511, 0, 22, 0, 125, 403, 123, 0, - /* 2530 */ 22, 36, 0, 36, 432, 22, 448, 36, 450, 0, - /* 2540 */ 36, 36, 36, 22, 22, 36, 36, 33, 36, 33, - /* 2550 */ 448, 36, 450, 36, 36, 495, 432, 22, 498, 0, - /* 2560 */ 22, 0, 502, 503, 504, 505, 506, 507, 0, 36, - /* 2570 */ 510, 511, 448, 0, 450, 22, 56, 36, 0, 36, - /* 2580 */ 0, 388, 36, 495, 0, 20, 498, 36, 36, 116, + /* 2350 */ 388, 0, 0, 0, 0, 0, 0, 432, 0, 0, + /* 2360 */ 0, 0, 0, 0, 0, 403, 51, 0, 0, 47, + /* 2370 */ 0, 0, 0, 448, 0, 450, 0, 0, 0, 0, + /* 2380 */ 0, 0, 0, 158, 0, 157, 0, 22, 156, 0, + /* 2390 */ 0, 22, 52, 495, 432, 22, 498, 563, 564, 388, + /* 2400 */ 502, 503, 504, 505, 506, 507, 0, 68, 510, 511, + /* 2410 */ 448, 0, 450, 68, 403, 0, 518, 0, 68, 36, + /* 2420 */ 495, 523, 0, 498, 0, 68, 0, 502, 503, 504, + /* 2430 */ 505, 506, 507, 36, 472, 510, 511, 52, 44, 36, + /* 2440 */ 44, 54, 54, 432, 0, 36, 54, 0, 388, 44, + /* 2450 */ 36, 0, 44, 33, 47, 51, 14, 495, 0, 448, + /* 2460 */ 498, 450, 44, 403, 502, 503, 504, 505, 506, 507, + /* 2470 */ 51, 0, 510, 511, 51, 45, 388, 552, 44, 207, + /* 2480 */ 0, 0, 51, 472, 0, 0, 0, 51, 0, 0, + /* 2490 */ 388, 403, 432, 0, 0, 36, 44, 76, 0, 54, + /* 2500 */ 36, 44, 54, 0, 36, 403, 495, 54, 448, 498, + /* 2510 */ 450, 44, 388, 502, 503, 504, 505, 506, 507, 0, + /* 2520 */ 432, 510, 511, 36, 54, 0, 44, 403, 0, 0, + /* 2530 */ 0, 0, 0, 36, 432, 125, 448, 22, 450, 123, + /* 2540 */ 0, 36, 22, 36, 36, 36, 36, 33, 36, 36, + /* 2550 */ 448, 0, 450, 36, 36, 495, 432, 33, 498, 36, + /* 2560 */ 22, 22, 502, 503, 504, 505, 506, 507, 0, 36, + /* 2570 */ 510, 511, 448, 36, 450, 22, 0, 22, 56, 0, + /* 2580 */ 22, 388, 36, 495, 0, 0, 498, 0, 36, 0, /* 2590 */ 502, 503, 504, 505, 506, 507, 403, 495, 510, 511, - /* 2600 */ 498, 115, 36, 0, 502, 503, 504, 505, 506, 507, - /* 2610 */ 193, 51, 510, 511, 0, 555, 193, 388, 228, 495, - /* 2620 */ 0, 36, 498, 229, 22, 432, 502, 503, 504, 505, - /* 2630 */ 506, 507, 403, 223, 510, 511, 199, 513, 22, 115, - /* 2640 */ 219, 448, 193, 450, 193, 388, 0, 0, 3, 312, - /* 2650 */ 203, 193, 564, 203, 33, 115, 52, 52, 116, 115, - /* 2660 */ 403, 432, 36, 116, 36, 472, 116, 111, 113, 115, - /* 2670 */ 33, 33, 33, 116, 116, 51, 51, 448, 115, 450, - /* 2680 */ 116, 33, 115, 82, 115, 115, 33, 312, 495, 432, - /* 2690 */ 36, 498, 115, 3, 33, 502, 503, 504, 505, 506, - /* 2700 */ 507, 472, 116, 510, 511, 448, 116, 450, 36, 36, - /* 2710 */ 36, 36, 36, 36, 51, 116, 116, 33, 312, 51, - /* 2720 */ 0, 0, 296, 115, 495, 44, 388, 498, 116, 472, - /* 2730 */ 115, 502, 503, 504, 505, 506, 507, 0, 116, 510, - /* 2740 */ 511, 403, 115, 44, 196, 115, 0, 116, 115, 200, - /* 2750 */ 44, 33, 495, 113, 388, 498, 284, 2, 196, 502, - /* 2760 */ 503, 504, 505, 506, 507, 113, 195, 510, 511, 403, - /* 2770 */ 432, 22, 115, 115, 115, 115, 51, 115, 252, 51, - /* 2780 */ 116, 116, 115, 22, 115, 115, 448, 116, 450, 255, - /* 2790 */ 388, 196, 44, 115, 115, 0, 0, 115, 432, 116, - /* 2800 */ 115, 115, 22, 116, 115, 403, 115, 22, 51, 117, - /* 2810 */ 472, 118, 115, 115, 448, 115, 450, 116, 115, 115, - /* 2820 */ 126, 22, 22, 388, 116, 229, 36, 115, 36, 116, - /* 2830 */ 36, 115, 137, 495, 432, 116, 498, 36, 403, 116, - /* 2840 */ 502, 503, 504, 505, 506, 507, 36, 116, 510, 511, - /* 2850 */ 448, 36, 450, 36, 116, 115, 33, 115, 36, 137, - /* 2860 */ 115, 495, 22, 137, 498, 76, 388, 432, 502, 503, - /* 2870 */ 504, 505, 506, 507, 22, 36, 510, 511, 137, 75, - /* 2880 */ 36, 403, 36, 448, 36, 450, 36, 36, 82, 36, - /* 2890 */ 36, 36, 36, 36, 82, 109, 109, 495, 33, 388, - /* 2900 */ 498, 82, 36, 22, 502, 503, 504, 505, 506, 507, - /* 2910 */ 432, 36, 510, 511, 403, 36, 36, 22, 36, 36, - /* 2920 */ 36, 36, 36, 36, 36, 36, 448, 0, 450, 36, - /* 2930 */ 495, 54, 0, 498, 44, 36, 44, 502, 503, 504, - /* 2940 */ 505, 506, 507, 432, 0, 510, 511, 36, 54, 44, - /* 2950 */ 54, 0, 36, 44, 0, 54, 36, 0, 22, 448, - /* 2960 */ 36, 450, 0, 388, 22, 36, 33, 36, 22, 21, - /* 2970 */ 567, 22, 22, 495, 21, 20, 498, 567, 403, 567, - /* 2980 */ 502, 503, 504, 505, 506, 507, 388, 567, 510, 511, - /* 2990 */ 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, + /* 2600 */ 498, 36, 0, 36, 502, 503, 504, 505, 506, 507, + /* 2610 */ 0, 20, 510, 511, 22, 555, 36, 388, 36, 495, + /* 2620 */ 116, 115, 498, 0, 36, 432, 502, 503, 504, 505, + /* 2630 */ 506, 507, 403, 193, 510, 511, 115, 513, 0, 193, + /* 2640 */ 228, 448, 229, 450, 51, 388, 22, 36, 223, 0, + /* 2650 */ 22, 219, 564, 193, 199, 0, 0, 203, 193, 193, + /* 2660 */ 403, 432, 3, 33, 203, 472, 116, 36, 36, 115, + /* 2670 */ 52, 115, 52, 116, 115, 113, 116, 448, 33, 450, + /* 2680 */ 111, 33, 116, 33, 51, 115, 115, 312, 495, 432, + /* 2690 */ 116, 498, 115, 51, 33, 502, 503, 504, 505, 506, + /* 2700 */ 507, 472, 116, 510, 511, 448, 33, 450, 82, 115, + /* 2710 */ 312, 115, 36, 116, 3, 116, 33, 36, 36, 116, + /* 2720 */ 36, 36, 36, 36, 495, 116, 388, 498, 51, 472, + /* 2730 */ 33, 502, 503, 504, 505, 506, 507, 296, 0, 510, + /* 2740 */ 511, 403, 51, 0, 115, 312, 44, 0, 44, 200, + /* 2750 */ 116, 116, 495, 0, 388, 498, 115, 196, 115, 502, + /* 2760 */ 503, 504, 505, 506, 507, 44, 115, 510, 511, 403, + /* 2770 */ 432, 113, 33, 116, 196, 115, 195, 113, 2, 284, + /* 2780 */ 22, 116, 115, 115, 115, 115, 448, 252, 450, 115, + /* 2790 */ 388, 116, 51, 115, 115, 51, 22, 255, 432, 0, + /* 2800 */ 196, 44, 0, 115, 115, 403, 116, 115, 115, 115, + /* 2810 */ 472, 116, 116, 115, 448, 115, 450, 22, 115, 118, + /* 2820 */ 51, 115, 117, 388, 115, 115, 22, 22, 116, 115, + /* 2830 */ 115, 22, 116, 495, 432, 229, 498, 36, 403, 126, + /* 2840 */ 502, 503, 504, 505, 506, 507, 36, 115, 510, 511, + /* 2850 */ 448, 116, 450, 36, 36, 115, 36, 36, 116, 36, + /* 2860 */ 137, 495, 116, 137, 498, 116, 388, 432, 502, 503, + /* 2870 */ 504, 505, 506, 507, 137, 116, 510, 511, 115, 33, + /* 2880 */ 115, 403, 137, 448, 36, 450, 115, 22, 76, 75, + /* 2890 */ 22, 36, 36, 36, 36, 36, 36, 495, 36, 388, + /* 2900 */ 498, 82, 36, 109, 502, 503, 504, 505, 506, 507, + /* 2910 */ 432, 36, 510, 511, 403, 36, 36, 82, 33, 36, + /* 2920 */ 109, 22, 36, 36, 36, 36, 448, 36, 450, 82, + /* 2930 */ 495, 36, 36, 498, 36, 22, 0, 502, 503, 504, + /* 2940 */ 505, 506, 507, 432, 36, 510, 511, 36, 36, 44, + /* 2950 */ 36, 54, 0, 36, 44, 54, 0, 36, 54, 448, + /* 2960 */ 44, 450, 0, 388, 36, 0, 44, 36, 0, 54, + /* 2970 */ 22, 36, 0, 495, 33, 22, 498, 22, 403, 36, + /* 2980 */ 502, 503, 504, 505, 506, 507, 388, 36, 510, 511, + /* 2990 */ 21, 567, 22, 22, 21, 20, 567, 567, 567, 567, /* 3000 */ 567, 403, 567, 567, 567, 567, 495, 432, 567, 498, /* 3010 */ 567, 567, 567, 502, 503, 504, 505, 506, 507, 567, /* 3020 */ 567, 510, 511, 448, 567, 450, 567, 567, 567, 567, @@ -1413,19 +1415,19 @@ static const YYCODETYPE yy_lookahead[] = { /* 3800 */ 511, 448, 567, 450, 567, 567, 567, 567, 432, 567, /* 3810 */ 388, 567, 567, 567, 567, 567, 567, 567, 567, 567, /* 3820 */ 567, 567, 567, 567, 448, 403, 450, 567, 567, 567, - /* 3830 */ 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, - /* 3840 */ 567, 567, 567, 567, 567, 567, 567, 567, 495, 567, + /* 3830 */ 567, 567, 388, 567, 567, 567, 567, 567, 567, 567, + /* 3840 */ 567, 567, 567, 567, 567, 567, 567, 403, 495, 567, /* 3850 */ 567, 498, 567, 567, 432, 502, 503, 504, 505, 506, /* 3860 */ 507, 567, 567, 510, 511, 567, 567, 567, 567, 567, - /* 3870 */ 448, 495, 450, 567, 498, 567, 567, 567, 502, 503, + /* 3870 */ 448, 495, 450, 567, 498, 567, 432, 567, 502, 503, /* 3880 */ 504, 505, 506, 507, 567, 567, 510, 511, 567, 567, - /* 3890 */ 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, + /* 3890 */ 567, 567, 448, 567, 450, 567, 567, 567, 567, 567, /* 3900 */ 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, /* 3910 */ 567, 567, 567, 567, 567, 567, 567, 495, 567, 567, /* 3920 */ 498, 567, 567, 567, 502, 503, 504, 505, 506, 507, - /* 3930 */ 567, 567, 510, 511, 385, 385, 385, 385, 385, 385, - /* 3940 */ 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, - /* 3950 */ 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, + /* 3930 */ 567, 567, 510, 511, 567, 567, 567, 567, 567, 495, + /* 3940 */ 567, 567, 498, 567, 567, 567, 502, 503, 504, 505, + /* 3950 */ 506, 507, 567, 567, 510, 511, 385, 385, 385, 385, /* 3960 */ 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, /* 3970 */ 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, /* 3980 */ 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, @@ -1461,11 +1463,14 @@ static const YYCODETYPE yy_lookahead[] = { /* 4280 */ 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, /* 4290 */ 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, /* 4300 */ 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, - /* 4310 */ 385, 385, 385, 385, 385, 385, 385, 385, 385, + /* 4310 */ 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, + /* 4320 */ 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, + /* 4330 */ 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, + /* 4340 */ 385, }; -#define YY_SHIFT_COUNT (1003) +#define YY_SHIFT_COUNT (1005) #define YY_SHIFT_MIN (0) -#define YY_SHIFT_MAX (2962) +#define YY_SHIFT_MAX (2975) static const unsigned short int yy_shift_ofst[] = { /* 0 */ 1599, 271, 354, 271, 626, 626, 626, 626, 626, 626, /* 10 */ 626, 626, 626, 626, 626, 626, 709, 1063, 1063, 1334, @@ -1473,105 +1478,105 @@ static const unsigned short int yy_shift_ofst[] = { /* 30 */ 1063, 980, 1063, 1063, 1063, 1063, 1063, 1063, 1063, 1063, /* 40 */ 1063, 1063, 1063, 1063, 1063, 1063, 1063, 1063, 1063, 1063, /* 50 */ 1063, 1063, 1063, 1063, 1063, 1063, 1063, 1063, 1063, 1063, - /* 60 */ 93, 247, 253, 221, 219, 356, 219, 219, 221, 221, - /* 70 */ 219, 1820, 219, 1333, 1820, 381, 219, 4, 1949, 673, - /* 80 */ 673, 61, 61, 1949, 1949, 385, 385, 673, 325, 325, - /* 90 */ 447, 274, 274, 480, 300, 61, 61, 61, 61, 61, - /* 100 */ 61, 61, 61, 61, 61, 61, 167, 266, 275, 61, - /* 110 */ 61, 40, 4, 61, 167, 61, 4, 61, 61, 61, - /* 120 */ 61, 4, 61, 61, 61, 4, 61, 4, 4, 4, - /* 130 */ 749, 165, 165, 452, 452, 543, 762, 178, 48, 972, - /* 140 */ 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, - /* 150 */ 972, 972, 972, 972, 972, 972, 972, 972, 520, 187, - /* 160 */ 325, 447, 690, 690, 477, 922, 922, 922, 654, 654, - /* 170 */ 1285, 1384, 477, 40, 4, 73, 4, 4, 135, 4, - /* 180 */ 4, 406, 4, 406, 406, 724, 924, 452, 452, 452, - /* 190 */ 452, 452, 452, 1354, 413, 21, 75, 414, 414, 822, - /* 200 */ 125, 456, 837, 670, 186, 207, 723, 947, 947, 986, - /* 210 */ 910, 1328, 1328, 1328, 951, 1328, 1157, 929, 726, 1397, - /* 220 */ 1255, 1411, 1342, 1401, 1401, 1403, 1490, 1490, 1311, 1520, - /* 230 */ 213, 1401, 1384, 1496, 1757, 1795, 1796, 1591, 40, 1796, - /* 240 */ 40, 1614, 1795, 1811, 1793, 1811, 1793, 1664, 1795, 1811, - /* 250 */ 1795, 1793, 1664, 1664, 1664, 1755, 1761, 1795, 1795, 1775, - /* 260 */ 1795, 1795, 1795, 1872, 1848, 1872, 1848, 1796, 40, 40, - /* 270 */ 1889, 40, 1894, 1905, 40, 1894, 40, 1916, 40, 1918, - /* 280 */ 40, 40, 1795, 40, 1872, 4, 4, 4, 4, 4, - /* 290 */ 4, 4, 4, 4, 4, 4, 1795, 924, 924, 1872, - /* 300 */ 406, 406, 406, 1747, 1873, 1796, 749, 1970, 1772, 1785, - /* 310 */ 1889, 749, 1496, 1795, 406, 1700, 1705, 1700, 1705, 1703, - /* 320 */ 1819, 1700, 1702, 1708, 1730, 1496, 1736, 1740, 1712, 1716, - /* 330 */ 1719, 1811, 2036, 1934, 1753, 1894, 749, 749, 1705, 406, - /* 340 */ 406, 406, 406, 1705, 406, 1879, 749, 406, 1918, 749, - /* 350 */ 1968, 406, 1892, 1918, 749, 724, 749, 1811, 406, 406, + /* 60 */ 1063, 93, 247, 253, 221, 219, 356, 219, 219, 221, + /* 70 */ 221, 219, 1820, 219, 1333, 1820, 381, 219, 4, 1949, + /* 80 */ 673, 673, 61, 61, 1949, 1949, 385, 385, 673, 325, + /* 90 */ 325, 447, 274, 274, 480, 300, 61, 61, 61, 61, + /* 100 */ 61, 61, 61, 61, 61, 61, 61, 167, 266, 275, + /* 110 */ 61, 61, 40, 4, 61, 167, 61, 4, 61, 61, + /* 120 */ 61, 61, 4, 61, 61, 61, 4, 61, 4, 4, + /* 130 */ 4, 749, 165, 165, 452, 452, 543, 762, 178, 48, + /* 140 */ 921, 921, 921, 921, 921, 921, 921, 921, 921, 921, + /* 150 */ 921, 921, 921, 921, 921, 921, 921, 921, 921, 520, + /* 160 */ 187, 325, 447, 1105, 1105, 477, 556, 556, 556, 716, + /* 170 */ 716, 850, 1115, 477, 40, 4, 73, 4, 4, 135, + /* 180 */ 4, 4, 406, 4, 406, 406, 483, 1045, 452, 452, + /* 190 */ 452, 452, 452, 452, 1354, 413, 21, 75, 414, 414, + /* 200 */ 822, 125, 456, 711, 670, 186, 207, 723, 922, 922, + /* 210 */ 1336, 730, 972, 972, 972, 1211, 972, 1157, 853, 726, + /* 220 */ 1344, 1379, 949, 823, 1294, 1294, 1311, 1384, 1384, 1355, + /* 230 */ 1456, 213, 1294, 1115, 1506, 1763, 1807, 1809, 1601, 40, + /* 240 */ 1809, 40, 1625, 1807, 1839, 1815, 1839, 1815, 1681, 1807, + /* 250 */ 1839, 1807, 1815, 1681, 1681, 1681, 1770, 1785, 1807, 1807, + /* 260 */ 1797, 1807, 1807, 1807, 1893, 1864, 1893, 1864, 1809, 40, + /* 270 */ 40, 1914, 40, 1916, 1921, 40, 1916, 40, 1935, 40, + /* 280 */ 1946, 40, 40, 1807, 40, 1893, 4, 4, 4, 4, + /* 290 */ 4, 4, 4, 4, 4, 4, 4, 1807, 1045, 1045, + /* 300 */ 1893, 406, 406, 406, 1739, 1867, 1809, 749, 1986, 1793, + /* 310 */ 1798, 1914, 749, 1506, 1807, 406, 1709, 1714, 1709, 1714, + /* 320 */ 1707, 1828, 1709, 1713, 1717, 1740, 1506, 1742, 1745, 1718, + /* 330 */ 1728, 1731, 1839, 2045, 1940, 1759, 1916, 749, 749, 1714, + /* 340 */ 406, 406, 406, 406, 1714, 406, 1892, 749, 406, 1946, + /* 350 */ 749, 1979, 406, 1909, 1946, 749, 483, 749, 1839, 406, /* 360 */ 406, 406, 406, 406, 406, 406, 406, 406, 406, 406, /* 370 */ 406, 406, 406, 406, 406, 406, 406, 406, 406, 406, - /* 380 */ 1993, 406, 1795, 749, 2104, 2092, 2096, 2110, 1872, 3934, - /* 390 */ 3934, 3934, 3934, 3934, 3934, 3934, 3934, 3934, 3934, 3934, - /* 400 */ 3934, 39, 1238, 204, 302, 310, 84, 50, 197, 904, - /* 410 */ 1134, 1197, 1214, 1258, 1275, 1353, 1366, 1463, 589, 1055, - /* 420 */ 1280, 1307, 1065, 1065, 1065, 1065, 1065, 1065, 1065, 1065, - /* 430 */ 1065, 100, 65, 500, 1033, 3, 3, 653, 53, 387, - /* 440 */ 294, 284, 284, 604, 902, 284, 696, 977, 1246, 422, - /* 450 */ 47, 47, 1112, 1335, 1148, 1112, 1112, 1112, 1413, 94, - /* 460 */ 511, 1540, 1528, 1429, 1098, 576, 1423, 1460, 1462, 1500, - /* 470 */ 1545, 752, 971, 1602, 1628, 1629, 1630, 1340, 882, 1402, - /* 480 */ 1067, 1509, 1574, 1585, 1586, 1381, 1452, 1575, 1600, 1601, - /* 490 */ 1604, 1606, 1642, 1613, 1578, 1616, 1625, 1580, 1626, 1634, - /* 500 */ 1644, 1651, 1652, 1648, 1661, 1663, 1666, 1674, 1676, 1680, - /* 510 */ 1684, 1686, 1691, 1693, 1675, 1677, 1678, 1679, 1682, 1685, - /* 520 */ 1565, 1553, 1414, 1618, 1687, 1711, 1612, 1725, 2203, 2204, - /* 530 */ 2205, 2162, 2210, 2175, 1972, 2177, 2180, 2181, 1977, 2221, - /* 540 */ 2187, 2188, 1988, 2196, 2233, 2234, 1992, 2238, 2206, 2239, - /* 550 */ 2207, 2240, 2219, 2244, 2209, 2008, 2247, 2024, 2249, 2026, - /* 560 */ 2030, 2033, 2041, 2258, 2259, 2267, 2058, 2061, 2271, 2273, - /* 570 */ 2116, 2224, 2226, 2278, 2243, 2283, 2285, 2251, 2235, 2288, - /* 580 */ 2241, 2290, 2246, 2291, 2294, 2295, 2245, 2297, 2298, 2300, - /* 590 */ 2303, 2304, 2305, 2136, 2280, 2317, 2142, 2319, 2320, 2321, - /* 600 */ 2323, 2324, 2325, 2331, 2333, 2334, 2335, 2342, 2343, 2347, - /* 610 */ 2349, 2351, 2352, 2353, 2354, 2355, 2356, 2307, 2359, 2313, - /* 620 */ 2361, 2362, 2363, 2364, 2374, 2376, 2377, 2378, 2379, 2344, - /* 630 */ 2367, 2211, 2368, 2213, 2371, 2216, 2380, 2381, 2360, 2332, - /* 640 */ 2365, 2336, 2383, 2318, 2385, 2322, 2370, 2389, 2327, 2391, - /* 650 */ 2339, 2392, 2411, 2382, 2372, 2369, 2415, 2386, 2384, 2373, - /* 660 */ 2419, 2388, 2387, 2393, 2425, 2397, 2439, 2395, 2396, 2412, - /* 670 */ 2398, 2399, 2430, 2400, 2446, 2407, 2403, 2453, 2454, 2455, - /* 680 */ 2456, 2414, 2255, 2470, 2398, 2420, 2474, 2398, 2424, 2484, - /* 690 */ 2485, 2404, 2481, 2482, 2442, 2432, 2443, 2479, 2452, 2440, - /* 700 */ 2445, 2493, 2459, 2444, 2457, 2496, 2461, 2448, 2460, 2499, - /* 710 */ 2500, 2503, 2507, 2519, 2523, 2401, 2405, 2475, 2502, 2525, - /* 720 */ 2508, 2495, 2497, 2501, 2504, 2505, 2506, 2509, 2510, 2512, - /* 730 */ 2514, 2516, 2515, 2517, 2513, 2518, 2529, 2521, 2532, 2522, - /* 740 */ 2539, 2535, 2520, 2559, 2538, 2533, 2561, 2568, 2573, 2541, - /* 750 */ 2578, 2543, 2580, 2546, 2584, 2553, 2565, 2551, 2552, 2566, - /* 760 */ 2473, 2486, 2603, 2417, 2394, 2390, 2524, 2410, 2398, 2560, - /* 770 */ 2614, 2423, 2585, 2602, 2620, 2421, 2616, 2449, 2437, 2646, - /* 780 */ 2647, 2451, 2447, 2458, 2450, 2645, 2621, 2337, 2540, 2542, - /* 790 */ 2544, 2547, 2626, 2628, 2554, 2604, 2555, 2605, 2556, 2550, - /* 800 */ 2637, 2638, 2557, 2563, 2567, 2569, 2558, 2639, 2624, 2625, - /* 810 */ 2570, 2648, 2375, 2601, 2564, 2653, 2577, 2654, 2586, 2590, - /* 820 */ 2690, 2661, 2406, 2672, 2673, 2674, 2675, 2676, 2677, 2599, - /* 830 */ 2600, 2663, 2426, 2684, 2668, 2720, 2721, 2608, 2681, 2612, - /* 840 */ 2622, 2615, 2627, 2548, 2630, 2737, 2699, 2549, 2746, 2631, - /* 850 */ 2633, 2562, 2706, 2571, 2718, 2640, 2472, 2652, 2755, 2749, - /* 860 */ 2526, 2657, 2658, 2659, 2660, 2664, 2665, 2662, 2667, 2669, - /* 870 */ 2670, 2678, 2671, 2725, 2679, 2682, 2728, 2683, 2761, 2534, - /* 880 */ 2685, 2686, 2795, 2687, 2689, 2595, 2748, 2691, 2692, 2796, - /* 890 */ 2780, 2693, 2697, 2398, 2757, 2698, 2700, 2701, 2703, 2704, - /* 900 */ 2694, 2785, 2799, 2800, 2596, 2708, 2790, 2792, 2712, 2713, - /* 910 */ 2794, 2716, 2719, 2801, 2662, 2723, 2810, 2667, 2731, 2815, - /* 920 */ 2669, 2738, 2817, 2670, 2695, 2722, 2726, 2741, 2740, 2823, - /* 930 */ 2742, 2822, 2745, 2823, 2823, 2840, 2789, 2804, 2852, 2839, - /* 940 */ 2844, 2846, 2848, 2850, 2851, 2853, 2854, 2855, 2856, 2857, - /* 950 */ 2806, 2786, 2812, 2787, 2865, 2866, 2875, 2879, 2881, 2880, - /* 960 */ 2882, 2883, 2819, 2514, 2884, 2516, 2885, 2886, 2887, 2888, - /* 970 */ 2895, 2889, 2927, 2893, 2877, 2890, 2932, 2899, 2894, 2892, - /* 980 */ 2944, 2911, 2896, 2905, 2951, 2916, 2901, 2909, 2954, 2920, - /* 990 */ 2957, 2936, 2924, 2962, 2942, 2933, 2929, 2931, 2946, 2948, - /* 1000 */ 2949, 2950, 2953, 2955, + /* 380 */ 406, 2018, 406, 1807, 749, 2123, 2128, 2146, 2145, 1893, + /* 390 */ 3956, 3956, 3956, 3956, 3956, 3956, 3956, 3956, 3956, 3956, + /* 400 */ 3956, 3956, 39, 1238, 204, 302, 310, 84, 904, 50, + /* 410 */ 197, 1175, 1197, 1258, 1134, 1212, 1353, 1366, 1463, 1558, + /* 420 */ 589, 1611, 1280, 1307, 1281, 1281, 1281, 1281, 1281, 1281, + /* 430 */ 1281, 1281, 1281, 100, 65, 500, 837, 3, 3, 653, + /* 440 */ 53, 387, 294, 284, 284, 604, 902, 284, 696, 920, + /* 450 */ 1328, 422, 47, 47, 1154, 1221, 817, 1154, 1154, 1154, + /* 460 */ 1465, 94, 511, 1488, 1496, 1358, 1480, 1513, 1414, 1419, + /* 470 */ 1421, 1433, 1481, 752, 1532, 954, 1518, 1533, 1547, 1324, + /* 480 */ 1220, 1459, 970, 1528, 1544, 1550, 1551, 1428, 1254, 1412, + /* 490 */ 1574, 1576, 1585, 1597, 1600, 1604, 1559, 1606, 1610, 1525, + /* 500 */ 1613, 1616, 1617, 1618, 1642, 1619, 1640, 1645, 1665, 1667, + /* 510 */ 1675, 1682, 1685, 1691, 1693, 1706, 1669, 1694, 1697, 1715, + /* 520 */ 1735, 1738, 1546, 1526, 1612, 1646, 1663, 1737, 1736, 1806, + /* 530 */ 2216, 2220, 2221, 2176, 2224, 2195, 1989, 2197, 2198, 2199, + /* 540 */ 1995, 2239, 2204, 2205, 1999, 2207, 2244, 2245, 2003, 2247, + /* 550 */ 2212, 2249, 2214, 2251, 2233, 2267, 2222, 2019, 2259, 2046, + /* 560 */ 2271, 2048, 2049, 2055, 2059, 2277, 2278, 2279, 2071, 2074, + /* 570 */ 2287, 2288, 2131, 2240, 2241, 2290, 2258, 2293, 2295, 2260, + /* 580 */ 2243, 2298, 2252, 2300, 2265, 2304, 2305, 2316, 2266, 2318, + /* 590 */ 2319, 2320, 2321, 2323, 2324, 2149, 2297, 2331, 2158, 2335, + /* 600 */ 2342, 2343, 2347, 2349, 2351, 2352, 2353, 2354, 2355, 2356, + /* 610 */ 2358, 2359, 2360, 2361, 2362, 2363, 2364, 2374, 2376, 2315, + /* 620 */ 2367, 2322, 2368, 2370, 2371, 2372, 2377, 2378, 2379, 2380, + /* 630 */ 2381, 2365, 2382, 2225, 2384, 2228, 2386, 2232, 2389, 2390, + /* 640 */ 2369, 2340, 2373, 2385, 2406, 2339, 2411, 2345, 2383, 2415, + /* 650 */ 2350, 2417, 2357, 2422, 2424, 2397, 2387, 2394, 2426, 2403, + /* 660 */ 2388, 2396, 2444, 2409, 2392, 2405, 2447, 2414, 2451, 2407, + /* 670 */ 2408, 2420, 2404, 2419, 2442, 2423, 2458, 2430, 2418, 2471, + /* 680 */ 2484, 2485, 2486, 2434, 2272, 2480, 2404, 2431, 2481, 2404, + /* 690 */ 2436, 2488, 2489, 2421, 2493, 2494, 2459, 2445, 2452, 2498, + /* 700 */ 2464, 2448, 2457, 2503, 2468, 2453, 2467, 2519, 2487, 2470, + /* 710 */ 2482, 2525, 2528, 2529, 2530, 2531, 2532, 2410, 2416, 2497, + /* 720 */ 2515, 2540, 2520, 2505, 2507, 2508, 2509, 2510, 2512, 2513, + /* 730 */ 2517, 2518, 2514, 2524, 2523, 2533, 2538, 2537, 2551, 2539, + /* 740 */ 2568, 2553, 2576, 2555, 2522, 2579, 2558, 2546, 2584, 2585, + /* 750 */ 2587, 2552, 2589, 2565, 2602, 2567, 2610, 2592, 2591, 2580, + /* 760 */ 2582, 2588, 2504, 2506, 2623, 2440, 2413, 2412, 2521, 2425, + /* 770 */ 2404, 2593, 2638, 2446, 2611, 2624, 2649, 2432, 2628, 2460, + /* 780 */ 2455, 2655, 2656, 2465, 2454, 2466, 2461, 2659, 2630, 2375, + /* 790 */ 2554, 2550, 2556, 2557, 2631, 2632, 2559, 2618, 2562, 2620, + /* 800 */ 2569, 2560, 2645, 2648, 2566, 2570, 2571, 2577, 2574, 2650, + /* 810 */ 2633, 2642, 2594, 2661, 2398, 2626, 2586, 2673, 2596, 2676, + /* 820 */ 2597, 2599, 2711, 2683, 2433, 2681, 2682, 2684, 2685, 2686, + /* 830 */ 2687, 2603, 2609, 2677, 2441, 2697, 2691, 2738, 2743, 2629, + /* 840 */ 2702, 2634, 2635, 2641, 2643, 2561, 2651, 2747, 2704, 2549, + /* 850 */ 2753, 2657, 2660, 2578, 2721, 2581, 2739, 2658, 2495, 2664, + /* 860 */ 2776, 2758, 2535, 2667, 2668, 2669, 2670, 2665, 2675, 2674, + /* 870 */ 2678, 2679, 2688, 2689, 2690, 2741, 2692, 2693, 2744, 2695, + /* 880 */ 2774, 2542, 2694, 2698, 2799, 2696, 2700, 2604, 2757, 2703, + /* 890 */ 2705, 2802, 2795, 2701, 2706, 2404, 2769, 2709, 2710, 2712, + /* 900 */ 2714, 2715, 2713, 2804, 2805, 2809, 2606, 2716, 2801, 2810, + /* 910 */ 2732, 2735, 2817, 2740, 2742, 2818, 2674, 2746, 2820, 2678, + /* 920 */ 2749, 2821, 2679, 2759, 2823, 2688, 2723, 2726, 2737, 2745, + /* 930 */ 2763, 2846, 2765, 2848, 2771, 2846, 2846, 2865, 2812, 2814, + /* 940 */ 2868, 2855, 2856, 2857, 2858, 2859, 2860, 2862, 2866, 2875, + /* 950 */ 2879, 2880, 2819, 2794, 2835, 2811, 2885, 2883, 2886, 2887, + /* 960 */ 2899, 2888, 2889, 2891, 2847, 2514, 2895, 2524, 2896, 2898, + /* 970 */ 2908, 2911, 2913, 2912, 2936, 2914, 2897, 2905, 2952, 2917, + /* 980 */ 2901, 2910, 2956, 2921, 2904, 2916, 2962, 2928, 2915, 2922, + /* 990 */ 2965, 2931, 2968, 2948, 2935, 2972, 2953, 2941, 2943, 2951, + /* 1000 */ 2955, 2969, 2970, 2971, 2973, 2975, }; -#define YY_REDUCE_COUNT (400) +#define YY_REDUCE_COUNT (401) #define YY_REDUCE_MIN (-530) -#define YY_REDUCE_MAX (3422) +#define YY_REDUCE_MAX (3444) static const short yy_reduce_ofst[] = { /* 0 */ -142, -347, -206, 607, 1233, 1259, 1371, 1417, 1447, 1498, /* 10 */ 292, 1607, 1637, 1671, 1758, 1804, -101, 656, 1834, 1898, @@ -1579,144 +1584,144 @@ static const short yy_reduce_ofst[] = { /* 30 */ 2338, 2102, 2366, 2402, 2435, 2478, 2511, 2575, 2598, 2644, /* 40 */ 2666, 2680, 2756, 2791, 2824, 2867, 2900, 2964, 2987, 3033, /* 50 */ 3055, 3069, 3145, 3180, 3213, 3256, 3289, 3353, 3376, 3422, - /* 60 */ -342, 504, 467, -88, 315, 391, 476, 1007, 172, 255, - /* 70 */ 1009, 67, -530, -74, 308, -528, -203, 312, -273, -427, - /* 80 */ -120, -31, 371, -274, -70, -396, -394, 179, -402, -392, - /* 90 */ -19, -29, 524, -249, 317, 410, 521, 532, 554, -348, - /* 100 */ 355, 609, 619, 650, 694, 373, -374, 155, -293, 623, - /* 110 */ 767, 470, 615, 771, -20, 782, 564, 792, 798, 875, - /* 120 */ 891, 377, 896, 909, 974, 736, 981, 63, 953, 508, - /* 130 */ 407, -370, -370, 105, -424, 649, 81, -139, 272, 303, - /* 140 */ 344, 485, 652, 731, 847, 961, 1000, 1013, 1026, 1075, - /* 150 */ 1076, 1081, 1082, 1096, 1100, 1125, 1129, 1153, 142, 127, - /* 160 */ 277, 600, 679, 683, 892, 127, 718, 911, 502, 541, - /* 170 */ 685, -407, 982, 1021, 211, 774, 513, 550, 529, 748, - /* 180 */ 868, 991, 1087, 997, 1044, 962, 1140, 764, 788, 863, - /* 190 */ 1047, 1193, 1230, 1078, 1293, 1277, 1261, 1187, 1187, 1161, - /* 200 */ 1173, 1183, 1200, 1339, 1187, 1302, 1302, 1327, 1329, 1351, - /* 210 */ 1298, 1218, 1219, 1220, 1305, 1226, 1302, 1324, 1379, 1290, - /* 220 */ 1385, 1343, 1306, 1332, 1336, 1302, 1252, 1260, 1241, 1276, - /* 230 */ 1262, 1344, 1389, 1338, 1317, 1415, 1337, 1346, 1407, 1341, - /* 240 */ 1418, 1357, 1437, 1440, 1394, 1450, 1398, 1405, 1455, 1457, - /* 250 */ 1459, 1406, 1408, 1410, 1416, 1456, 1466, 1481, 1482, 1477, - /* 260 */ 1489, 1494, 1497, 1503, 1505, 1510, 1508, 1424, 1502, 1506, - /* 270 */ 1461, 1507, 1512, 1453, 1515, 1527, 1523, 1470, 1530, 1483, - /* 280 */ 1532, 1536, 1554, 1544, 1564, 1534, 1535, 1541, 1542, 1543, - /* 290 */ 1546, 1547, 1548, 1549, 1550, 1551, 1558, 1573, 1577, 1581, - /* 300 */ 1524, 1529, 1538, 1499, 1504, 1511, 1582, 1513, 1516, 1525, - /* 310 */ 1561, 1603, 1552, 1611, 1567, 1473, 1555, 1478, 1560, 1484, - /* 320 */ 1486, 1485, 1487, 1493, 1501, 1568, 1514, 1517, 1480, 1491, - /* 330 */ 1495, 1650, 1557, 1521, 1526, 1659, 1655, 1657, 1608, 1619, - /* 340 */ 1620, 1622, 1623, 1609, 1627, 1615, 1670, 1633, 1621, 1683, - /* 350 */ 1563, 1638, 1631, 1643, 1697, 1660, 1698, 1692, 1646, 1667, - /* 360 */ 1668, 1672, 1688, 1689, 1690, 1701, 1704, 1706, 1709, 1710, - /* 370 */ 1714, 1715, 1720, 1722, 1723, 1724, 1731, 1732, 1735, 1737, - /* 380 */ 1694, 1738, 1726, 1713, 1734, 1741, 1751, 1760, 1756, 1717, - /* 390 */ 1750, 1718, 1727, 1695, 1728, 1762, 1763, 1764, 1766, 1786, - /* 400 */ 1816, + /* 60 */ 3444, -342, 504, 467, -88, 315, 391, 1155, 1185, 172, + /* 70 */ 255, 1222, 67, -530, -74, 308, -528, -203, 312, -273, + /* 80 */ -427, -120, -31, 371, -274, -70, -396, -394, 179, -402, + /* 90 */ -392, -19, -29, 535, -249, 317, 410, 528, 634, 647, + /* 100 */ -348, 355, 650, 665, 668, 671, 373, -374, 155, -293, + /* 110 */ 623, 675, 470, 490, 678, -20, 713, 566, 719, 777, + /* 120 */ 866, 896, 377, 908, 974, 981, 687, 988, 63, 753, + /* 130 */ 517, 407, -370, -370, 105, -424, 482, 81, -139, 272, + /* 140 */ 303, 344, 571, 616, 643, 738, 829, 830, 860, 950, + /* 150 */ 961, 1012, 1013, 1026, 1033, 1035, 1075, 1076, 1082, 142, + /* 160 */ 127, 277, 591, 787, 793, 813, 127, 557, 595, 767, + /* 170 */ 768, 783, -407, 1018, 836, 211, 619, 513, 631, 795, + /* 180 */ 712, 1000, 991, 1016, 1037, 1083, 807, 1059, 743, 769, + /* 190 */ 867, 1008, 1131, 1149, 1109, 1208, 1244, 1194, 1123, 1123, + /* 200 */ 1108, 1132, 1142, 1167, 1302, 1123, 1278, 1278, 1305, 1306, + /* 210 */ 1320, 1326, 1214, 1227, 1234, 1315, 1236, 1278, 1330, 1385, + /* 220 */ 1296, 1390, 1347, 1314, 1337, 1338, 1278, 1262, 1263, 1243, + /* 230 */ 1272, 1274, 1342, 1394, 1348, 1327, 1426, 1350, 1346, 1429, + /* 240 */ 1357, 1442, 1378, 1455, 1457, 1406, 1467, 1411, 1418, 1478, + /* 250 */ 1482, 1489, 1427, 1436, 1438, 1444, 1484, 1486, 1502, 1503, + /* 260 */ 1495, 1507, 1508, 1509, 1519, 1527, 1522, 1530, 1432, 1514, + /* 270 */ 1520, 1487, 1536, 1534, 1468, 1545, 1539, 1548, 1493, 1553, + /* 280 */ 1511, 1556, 1557, 1571, 1563, 1580, 1549, 1552, 1554, 1560, + /* 290 */ 1562, 1565, 1566, 1567, 1575, 1579, 1581, 1577, 1586, 1587, + /* 300 */ 1588, 1540, 1541, 1542, 1490, 1524, 1505, 1609, 1537, 1543, + /* 310 */ 1555, 1573, 1615, 1568, 1626, 1582, 1483, 1564, 1499, 1578, + /* 320 */ 1485, 1494, 1501, 1504, 1510, 1515, 1589, 1516, 1521, 1497, + /* 330 */ 1517, 1512, 1659, 1569, 1535, 1529, 1674, 1666, 1670, 1614, + /* 340 */ 1632, 1633, 1635, 1636, 1620, 1638, 1627, 1684, 1647, 1643, + /* 350 */ 1698, 1594, 1658, 1654, 1661, 1711, 1696, 1719, 1723, 1679, + /* 360 */ 1683, 1686, 1688, 1689, 1690, 1701, 1702, 1703, 1704, 1708, + /* 370 */ 1710, 1716, 1720, 1722, 1724, 1732, 1743, 1747, 1748, 1749, + /* 380 */ 1750, 1725, 1754, 1752, 1762, 1767, 1786, 1790, 1791, 1808, + /* 390 */ 1726, 1771, 1692, 1727, 1741, 1744, 1784, 1787, 1777, 1792, + /* 400 */ 1778, 1829, }; static const YYACTIONTYPE yy_default[] = { - /* 0 */ 2257, 2257, 2257, 2257, 2257, 2257, 2257, 2257, 2257, 2257, - /* 10 */ 2257, 2257, 2257, 2257, 2257, 2257, 2257, 2257, 2257, 2257, - /* 20 */ 2842, 2257, 2257, 2257, 2257, 2257, 2257, 2257, 2257, 2257, - /* 30 */ 2257, 2257, 2257, 2257, 2257, 2257, 2257, 2257, 2257, 2257, - /* 40 */ 2257, 2257, 2257, 2257, 2257, 2257, 2257, 2257, 2257, 2257, - /* 50 */ 2257, 2257, 2257, 2257, 2257, 2257, 2257, 2257, 2257, 2257, - /* 60 */ 2257, 2257, 2257, 2257, 2257, 2257, 2257, 2257, 2257, 2257, - /* 70 */ 2632, 2257, 2257, 2588, 2257, 2257, 2257, 2257, 2257, 2257, - /* 80 */ 2257, 2257, 2257, 2257, 2257, 2257, 2257, 2257, 2595, 2595, - /* 90 */ 2257, 2257, 2257, 2257, 2257, 2257, 2257, 2257, 2257, 2257, - /* 100 */ 2257, 2257, 2257, 2257, 2257, 2257, 2257, 2257, 2257, 2257, - /* 110 */ 2257, 2363, 2257, 2257, 2257, 2257, 2257, 2257, 2257, 2257, - /* 120 */ 2257, 2257, 2257, 2257, 2257, 2257, 2257, 2257, 2257, 2257, - /* 130 */ 2361, 2899, 2257, 3025, 2673, 2257, 2257, 2928, 2257, 2257, - /* 140 */ 2257, 2257, 2257, 2257, 2257, 2257, 2257, 2257, 2257, 2257, - /* 150 */ 2257, 2257, 2257, 2257, 2257, 2257, 2257, 2257, 2257, 2911, - /* 160 */ 2257, 2257, 2334, 2334, 2257, 2911, 2911, 2911, 2871, 2871, - /* 170 */ 2361, 2257, 2257, 2363, 2257, 2675, 2257, 2257, 2257, 2257, - /* 180 */ 2257, 2257, 2257, 2257, 2257, 2504, 2287, 2257, 2257, 2257, - /* 190 */ 2257, 2257, 2257, 2658, 2257, 2257, 2957, 2903, 2904, 3019, - /* 200 */ 2257, 2960, 2922, 2257, 2917, 2257, 2257, 2257, 2257, 2257, - /* 210 */ 2947, 2257, 2257, 2257, 2257, 2257, 2257, 2600, 2257, 2701, - /* 220 */ 2257, 2449, 2652, 2257, 2257, 2257, 2257, 2257, 3003, 2901, - /* 230 */ 2941, 2257, 2257, 2951, 2257, 2257, 2257, 2689, 2363, 2257, - /* 240 */ 2363, 2645, 2583, 2257, 2593, 2257, 2593, 2590, 2257, 2257, - /* 250 */ 2257, 2593, 2590, 2590, 2590, 2437, 2433, 2257, 2257, 2431, - /* 260 */ 2257, 2257, 2257, 2257, 2317, 2257, 2317, 2257, 2363, 2363, - /* 270 */ 2257, 2363, 2257, 2257, 2363, 2257, 2363, 2257, 2363, 2257, - /* 280 */ 2363, 2363, 2257, 2363, 2257, 2257, 2257, 2257, 2257, 2257, - /* 290 */ 2257, 2257, 2257, 2257, 2257, 2257, 2257, 2257, 2257, 2257, - /* 300 */ 2257, 2257, 2257, 2687, 2668, 2257, 2361, 2257, 2656, 2654, - /* 310 */ 2257, 2361, 2951, 2257, 2257, 2973, 2968, 2973, 2968, 2987, - /* 320 */ 2983, 2973, 2992, 2989, 2953, 2951, 2934, 2930, 3022, 3009, - /* 330 */ 3005, 2257, 2257, 2939, 2937, 2257, 2361, 2361, 2968, 2257, - /* 340 */ 2257, 2257, 2257, 2968, 2257, 2257, 2361, 2257, 2257, 2361, - /* 350 */ 2257, 2257, 2257, 2257, 2361, 2257, 2361, 2257, 2257, 2257, - /* 360 */ 2257, 2257, 2257, 2257, 2257, 2257, 2257, 2257, 2257, 2257, - /* 370 */ 2257, 2257, 2257, 2257, 2257, 2257, 2257, 2257, 2257, 2257, - /* 380 */ 2466, 2257, 2257, 2361, 2257, 2289, 2291, 2301, 2257, 2647, - /* 390 */ 3025, 2673, 2678, 2628, 2628, 2507, 2507, 3025, 2507, 2364, - /* 400 */ 2262, 2257, 2257, 2257, 2257, 2257, 2257, 2257, 2257, 2257, - /* 410 */ 2257, 2257, 2257, 2806, 2257, 2257, 2257, 2257, 2257, 2257, - /* 420 */ 2257, 2257, 2986, 2985, 2807, 2257, 2875, 2874, 2873, 2864, - /* 430 */ 2806, 2462, 2257, 2257, 2257, 2805, 2804, 2257, 2257, 2257, - /* 440 */ 2257, 2453, 2450, 2257, 2257, 2475, 2257, 2257, 2257, 2257, - /* 450 */ 2619, 2618, 2798, 2257, 2257, 2799, 2797, 2796, 2257, 2257, - /* 460 */ 2257, 2257, 2257, 2257, 2257, 2257, 2257, 2257, 2257, 2257, - /* 470 */ 2257, 2257, 2257, 2257, 2257, 2257, 2257, 2257, 2257, 2257, - /* 480 */ 2257, 2257, 2257, 2257, 2257, 2257, 3006, 3010, 2257, 2257, - /* 490 */ 2257, 2257, 2900, 2257, 2257, 2257, 2257, 2777, 2257, 2257, - /* 500 */ 2257, 2257, 2257, 2745, 2740, 2731, 2722, 2737, 2728, 2716, - /* 510 */ 2734, 2725, 2713, 2710, 2257, 2257, 2257, 2257, 2257, 2257, - /* 520 */ 2257, 2257, 2257, 2257, 2257, 2257, 2257, 2257, 2257, 2257, - /* 530 */ 2257, 2257, 2257, 2257, 2257, 2257, 2257, 2257, 2257, 2257, - /* 540 */ 2257, 2257, 2257, 2257, 2257, 2257, 2257, 2257, 2257, 2257, - /* 550 */ 2257, 2257, 2257, 2257, 2257, 2257, 2257, 2257, 2257, 2257, - /* 560 */ 2257, 2257, 2257, 2257, 2257, 2257, 2257, 2257, 2257, 2257, - /* 570 */ 2589, 2257, 2257, 2257, 2257, 2257, 2257, 2257, 2257, 2257, - /* 580 */ 2257, 2257, 2257, 2257, 2257, 2257, 2257, 2257, 2257, 2257, - /* 590 */ 2257, 2257, 2257, 2257, 2257, 2257, 2257, 2257, 2257, 2257, - /* 600 */ 2257, 2257, 2257, 2257, 2257, 2257, 2257, 2257, 2257, 2257, - /* 610 */ 2257, 2257, 2257, 2257, 2257, 2257, 2257, 2257, 2257, 2257, - /* 620 */ 2257, 2257, 2257, 2257, 2257, 2257, 2257, 2257, 2257, 2257, - /* 630 */ 2257, 2257, 2257, 2257, 2257, 2604, 2257, 2257, 2257, 2257, - /* 640 */ 2257, 2257, 2257, 2257, 2257, 2257, 2257, 2257, 2257, 2257, - /* 650 */ 2257, 2257, 2257, 2257, 2257, 2257, 2257, 2257, 2257, 2257, - /* 660 */ 2257, 2257, 2257, 2257, 2257, 2257, 2257, 2257, 2257, 2306, - /* 670 */ 2784, 2257, 2257, 2257, 2257, 2257, 2257, 2257, 2257, 2257, - /* 680 */ 2257, 2257, 2257, 2257, 2787, 2257, 2257, 2788, 2257, 2257, - /* 690 */ 2257, 2257, 2257, 2257, 2257, 2257, 2257, 2257, 2257, 2257, - /* 700 */ 2257, 2257, 2257, 2257, 2257, 2257, 2257, 2257, 2257, 2257, - /* 710 */ 2257, 2257, 2257, 2257, 2257, 2257, 2257, 2257, 2257, 2257, - /* 720 */ 2257, 2257, 2257, 2257, 2257, 2257, 2257, 2257, 2257, 2257, - /* 730 */ 2408, 2407, 2257, 2257, 2257, 2257, 2257, 2257, 2257, 2257, - /* 740 */ 2257, 2257, 2257, 2257, 2257, 2257, 2257, 2257, 2257, 2257, - /* 750 */ 2257, 2257, 2257, 2257, 2257, 2257, 2257, 2257, 2257, 2257, - /* 760 */ 2789, 2257, 2257, 2257, 2257, 2672, 2257, 2257, 2779, 2257, - /* 770 */ 2257, 2257, 2257, 2257, 2257, 2257, 2257, 2257, 2257, 2257, - /* 780 */ 2257, 2257, 2257, 2257, 2257, 3002, 2954, 2257, 2257, 2257, - /* 790 */ 2257, 2257, 2257, 2257, 2257, 2257, 2257, 2257, 2257, 2257, - /* 800 */ 2257, 2257, 2257, 2257, 2257, 2257, 2257, 2257, 2257, 2777, - /* 810 */ 2257, 2984, 2257, 2257, 2257, 2257, 2257, 2257, 2257, 3000, - /* 820 */ 2257, 3004, 2257, 2257, 2257, 2257, 2257, 2257, 2257, 2910, - /* 830 */ 2906, 2257, 2257, 2902, 2257, 2257, 2257, 2257, 2257, 2257, - /* 840 */ 2257, 2257, 2257, 2257, 2257, 2257, 2257, 2257, 2257, 2257, - /* 850 */ 2257, 2257, 2257, 2257, 2861, 2257, 2257, 2257, 2895, 2257, - /* 860 */ 2257, 2257, 2257, 2257, 2257, 2257, 2257, 2503, 2502, 2501, - /* 870 */ 2500, 2257, 2257, 2257, 2257, 2257, 2257, 2789, 2257, 2792, - /* 880 */ 2257, 2257, 2257, 2257, 2257, 2257, 2257, 2257, 2257, 2257, - /* 890 */ 2257, 2257, 2257, 2776, 2257, 2839, 2838, 2257, 2257, 2257, - /* 900 */ 2257, 2257, 2257, 2257, 2257, 2257, 2257, 2257, 2497, 2257, - /* 910 */ 2257, 2257, 2257, 2257, 2257, 2257, 2257, 2257, 2257, 2257, - /* 920 */ 2257, 2257, 2257, 2257, 2481, 2479, 2478, 2477, 2257, 2514, - /* 930 */ 2257, 2257, 2257, 2510, 2509, 2257, 2257, 2257, 2257, 2257, - /* 940 */ 2257, 2257, 2257, 2257, 2257, 2257, 2257, 2257, 2257, 2257, - /* 950 */ 2257, 2257, 2257, 2257, 2382, 2257, 2257, 2257, 2257, 2257, - /* 960 */ 2257, 2257, 2257, 2374, 2257, 2373, 2257, 2257, 2257, 2257, - /* 970 */ 2257, 2257, 2257, 2257, 2257, 2257, 2257, 2257, 2257, 2257, - /* 980 */ 2257, 2257, 2257, 2257, 2257, 2257, 2257, 2257, 2257, 2257, - /* 990 */ 2257, 2257, 2257, 2257, 2257, 2286, 2257, 2257, 2257, 2257, - /* 1000 */ 2257, 2257, 2257, 2257, + /* 0 */ 2260, 2260, 2260, 2260, 2260, 2260, 2260, 2260, 2260, 2260, + /* 10 */ 2260, 2260, 2260, 2260, 2260, 2260, 2260, 2260, 2260, 2260, + /* 20 */ 2260, 2260, 2260, 2260, 2260, 2260, 2260, 2260, 2260, 2260, + /* 30 */ 2260, 2260, 2260, 2260, 2260, 2260, 2260, 2260, 2260, 2260, + /* 40 */ 2260, 2260, 2260, 2260, 2260, 2260, 2260, 2260, 2260, 2260, + /* 50 */ 2260, 2260, 2260, 2260, 2260, 2260, 2260, 2260, 2260, 2260, + /* 60 */ 2260, 2260, 2260, 2260, 2260, 2260, 2260, 2260, 2260, 2260, + /* 70 */ 2260, 2635, 2260, 2260, 2591, 2260, 2260, 2260, 2260, 2260, + /* 80 */ 2260, 2260, 2260, 2260, 2260, 2260, 2260, 2260, 2260, 2598, + /* 90 */ 2598, 2260, 2260, 2260, 2260, 2260, 2260, 2260, 2260, 2260, + /* 100 */ 2260, 2260, 2260, 2260, 2260, 2260, 2260, 2260, 2260, 2260, + /* 110 */ 2260, 2260, 2366, 2260, 2260, 2260, 2260, 2260, 2260, 2260, + /* 120 */ 2260, 2260, 2260, 2260, 2260, 2260, 2260, 2260, 2260, 2260, + /* 130 */ 2260, 2364, 2902, 2260, 3028, 2676, 2260, 2260, 2931, 2260, + /* 140 */ 2260, 2260, 2260, 2260, 2260, 2260, 2260, 2260, 2260, 2260, + /* 150 */ 2260, 2260, 2260, 2260, 2260, 2260, 2260, 2260, 2260, 2260, + /* 160 */ 2914, 2260, 2260, 2337, 2337, 2260, 2914, 2914, 2914, 2874, + /* 170 */ 2874, 2364, 2260, 2260, 2366, 2260, 2678, 2260, 2260, 2260, + /* 180 */ 2260, 2260, 2260, 2260, 2260, 2260, 2507, 2290, 2260, 2260, + /* 190 */ 2260, 2260, 2260, 2260, 2661, 2260, 2260, 2960, 2906, 2907, + /* 200 */ 3022, 2260, 2963, 2925, 2260, 2920, 2260, 2260, 2260, 2260, + /* 210 */ 2260, 2950, 2260, 2260, 2260, 2260, 2260, 2260, 2603, 2260, + /* 220 */ 2704, 2260, 2452, 2655, 2260, 2260, 2260, 2260, 2260, 3006, + /* 230 */ 2904, 2944, 2260, 2260, 2954, 2260, 2260, 2260, 2692, 2366, + /* 240 */ 2260, 2366, 2648, 2586, 2260, 2596, 2260, 2596, 2593, 2260, + /* 250 */ 2260, 2260, 2596, 2593, 2593, 2593, 2440, 2436, 2260, 2260, + /* 260 */ 2434, 2260, 2260, 2260, 2260, 2320, 2260, 2320, 2260, 2366, + /* 270 */ 2366, 2260, 2366, 2260, 2260, 2366, 2260, 2366, 2260, 2366, + /* 280 */ 2260, 2366, 2366, 2260, 2366, 2260, 2260, 2260, 2260, 2260, + /* 290 */ 2260, 2260, 2260, 2260, 2260, 2260, 2260, 2260, 2260, 2260, + /* 300 */ 2260, 2260, 2260, 2260, 2690, 2671, 2260, 2364, 2260, 2659, + /* 310 */ 2657, 2260, 2364, 2954, 2260, 2260, 2976, 2971, 2976, 2971, + /* 320 */ 2990, 2986, 2976, 2995, 2992, 2956, 2954, 2937, 2933, 3025, + /* 330 */ 3012, 3008, 2260, 2260, 2942, 2940, 2260, 2364, 2364, 2971, + /* 340 */ 2260, 2260, 2260, 2260, 2971, 2260, 2260, 2364, 2260, 2260, + /* 350 */ 2364, 2260, 2260, 2260, 2260, 2364, 2260, 2364, 2260, 2260, + /* 360 */ 2260, 2260, 2260, 2260, 2260, 2260, 2260, 2260, 2260, 2260, + /* 370 */ 2260, 2260, 2260, 2260, 2260, 2260, 2260, 2260, 2260, 2260, + /* 380 */ 2260, 2469, 2260, 2260, 2364, 2260, 2292, 2294, 2304, 2260, + /* 390 */ 2650, 3028, 2676, 2681, 2631, 2631, 2510, 2510, 3028, 2510, + /* 400 */ 2367, 2265, 2260, 2260, 2260, 2260, 2260, 2260, 2260, 2260, + /* 410 */ 2260, 2260, 2260, 2260, 2260, 2260, 2260, 2809, 2260, 2260, + /* 420 */ 2260, 2260, 2260, 2260, 2989, 2988, 2810, 2260, 2878, 2877, + /* 430 */ 2876, 2867, 2809, 2465, 2260, 2260, 2260, 2808, 2807, 2260, + /* 440 */ 2260, 2260, 2260, 2456, 2453, 2260, 2260, 2478, 2260, 2260, + /* 450 */ 2260, 2260, 2622, 2621, 2801, 2260, 2260, 2802, 2800, 2799, + /* 460 */ 2260, 2260, 2260, 2260, 2260, 2260, 2260, 2260, 2260, 2260, + /* 470 */ 2260, 2260, 2260, 2260, 2260, 2260, 2260, 2260, 2260, 2260, + /* 480 */ 2260, 2260, 2260, 2260, 2260, 2260, 2260, 2260, 3009, 3013, + /* 490 */ 2260, 2260, 2260, 2260, 2903, 2260, 2260, 2260, 2260, 2780, + /* 500 */ 2260, 2260, 2260, 2260, 2260, 2748, 2743, 2734, 2725, 2740, + /* 510 */ 2731, 2719, 2737, 2728, 2716, 2713, 2260, 2260, 2260, 2260, + /* 520 */ 2260, 2260, 2260, 2260, 2260, 2260, 2260, 2260, 2260, 2260, + /* 530 */ 2260, 2260, 2260, 2260, 2260, 2260, 2260, 2260, 2260, 2260, + /* 540 */ 2260, 2260, 2260, 2260, 2260, 2260, 2260, 2260, 2260, 2260, + /* 550 */ 2260, 2260, 2260, 2260, 2260, 2260, 2260, 2260, 2260, 2260, + /* 560 */ 2260, 2260, 2260, 2260, 2260, 2260, 2260, 2260, 2260, 2260, + /* 570 */ 2260, 2260, 2592, 2260, 2260, 2260, 2260, 2260, 2260, 2260, + /* 580 */ 2260, 2260, 2260, 2260, 2260, 2260, 2260, 2260, 2260, 2260, + /* 590 */ 2260, 2260, 2260, 2260, 2260, 2260, 2260, 2260, 2260, 2260, + /* 600 */ 2260, 2260, 2260, 2260, 2260, 2260, 2260, 2260, 2260, 2260, + /* 610 */ 2260, 2260, 2260, 2260, 2260, 2260, 2260, 2260, 2260, 2260, + /* 620 */ 2260, 2260, 2260, 2260, 2260, 2260, 2260, 2260, 2260, 2260, + /* 630 */ 2260, 2260, 2260, 2260, 2260, 2260, 2260, 2607, 2260, 2260, + /* 640 */ 2260, 2260, 2260, 2260, 2260, 2260, 2260, 2260, 2260, 2260, + /* 650 */ 2260, 2260, 2260, 2260, 2260, 2260, 2260, 2260, 2260, 2260, + /* 660 */ 2260, 2260, 2260, 2260, 2260, 2260, 2260, 2260, 2260, 2260, + /* 670 */ 2260, 2309, 2787, 2260, 2260, 2260, 2260, 2260, 2260, 2260, + /* 680 */ 2260, 2260, 2260, 2260, 2260, 2260, 2790, 2260, 2260, 2791, + /* 690 */ 2260, 2260, 2260, 2260, 2260, 2260, 2260, 2260, 2260, 2260, + /* 700 */ 2260, 2260, 2260, 2260, 2260, 2260, 2260, 2260, 2260, 2260, + /* 710 */ 2260, 2260, 2260, 2260, 2260, 2260, 2260, 2260, 2260, 2260, + /* 720 */ 2260, 2260, 2260, 2260, 2260, 2260, 2260, 2260, 2260, 2260, + /* 730 */ 2260, 2260, 2411, 2410, 2260, 2260, 2260, 2260, 2260, 2260, + /* 740 */ 2260, 2260, 2260, 2260, 2260, 2260, 2260, 2260, 2260, 2260, + /* 750 */ 2260, 2260, 2260, 2260, 2260, 2260, 2260, 2260, 2260, 2260, + /* 760 */ 2260, 2260, 2792, 2260, 2260, 2260, 2260, 2675, 2260, 2260, + /* 770 */ 2782, 2260, 2260, 2260, 2260, 2260, 2260, 2260, 2260, 2260, + /* 780 */ 2260, 2260, 2260, 2260, 2260, 2260, 2260, 3005, 2957, 2260, + /* 790 */ 2260, 2260, 2260, 2260, 2260, 2260, 2260, 2260, 2260, 2260, + /* 800 */ 2260, 2260, 2260, 2260, 2260, 2260, 2260, 2260, 2260, 2260, + /* 810 */ 2260, 2780, 2260, 2987, 2260, 2260, 2260, 2260, 2260, 2260, + /* 820 */ 2260, 3003, 2260, 3007, 2260, 2260, 2260, 2260, 2260, 2260, + /* 830 */ 2260, 2913, 2909, 2260, 2260, 2905, 2260, 2260, 2260, 2260, + /* 840 */ 2260, 2260, 2260, 2260, 2260, 2260, 2260, 2260, 2260, 2260, + /* 850 */ 2260, 2260, 2260, 2260, 2260, 2260, 2864, 2260, 2260, 2260, + /* 860 */ 2898, 2260, 2260, 2260, 2260, 2260, 2260, 2260, 2260, 2506, + /* 870 */ 2505, 2504, 2503, 2260, 2260, 2260, 2260, 2260, 2260, 2792, + /* 880 */ 2260, 2795, 2260, 2260, 2260, 2260, 2260, 2260, 2260, 2260, + /* 890 */ 2260, 2260, 2260, 2260, 2260, 2779, 2260, 2843, 2842, 2260, + /* 900 */ 2260, 2260, 2260, 2260, 2260, 2260, 2260, 2260, 2260, 2260, + /* 910 */ 2500, 2260, 2260, 2260, 2260, 2260, 2260, 2260, 2260, 2260, + /* 920 */ 2260, 2260, 2260, 2260, 2260, 2260, 2484, 2482, 2481, 2480, + /* 930 */ 2260, 2517, 2260, 2260, 2260, 2513, 2512, 2260, 2260, 2260, + /* 940 */ 2260, 2260, 2260, 2260, 2260, 2260, 2260, 2260, 2260, 2260, + /* 950 */ 2260, 2260, 2260, 2260, 2260, 2260, 2385, 2260, 2260, 2260, + /* 960 */ 2260, 2260, 2260, 2260, 2260, 2377, 2260, 2376, 2260, 2260, + /* 970 */ 2260, 2260, 2260, 2260, 2260, 2260, 2260, 2260, 2260, 2260, + /* 980 */ 2260, 2260, 2260, 2260, 2260, 2260, 2260, 2260, 2260, 2260, + /* 990 */ 2260, 2260, 2260, 2260, 2260, 2260, 2260, 2289, 2260, 2260, + /* 1000 */ 2260, 2260, 2260, 2260, 2260, 2260, }; /********** End of lemon-generated parsing tables *****************************/ @@ -3349,18 +3354,18 @@ static const char *const yyRuleName[] = { /* 568 */ "function_expression ::= POSITION NK_LP expr_or_subquery IN expr_or_subquery NK_RP", /* 569 */ "function_expression ::= TRIM NK_LP expr_or_subquery NK_RP", /* 570 */ "function_expression ::= TRIM NK_LP trim_specification_type FROM expr_or_subquery NK_RP", - /* 571 */ "function_expression ::= TRIM NK_LP trim_specification_type expr_or_subquery FROM expr_or_subquery NK_RP", - /* 572 */ "function_expression ::= substr_func NK_LP expression_list NK_RP", - /* 573 */ "function_expression ::= substr_func NK_LP expr_or_subquery FROM expr_or_subquery NK_RP", - /* 574 */ "function_expression ::= substr_func NK_LP expr_or_subquery FROM expr_or_subquery FOR expr_or_subquery NK_RP", - /* 575 */ "function_expression ::= REPLACE NK_LP expression_list NK_RP", - /* 576 */ "function_expression ::= literal_func", - /* 577 */ "literal_func ::= noarg_func NK_LP NK_RP", - /* 578 */ "literal_func ::= NOW", - /* 579 */ "literal_func ::= TODAY", - /* 580 */ "substr_func ::= SUBSTR", - /* 581 */ "substr_func ::= SUBSTRING", - /* 582 */ "trim_specification_type ::=", + /* 571 */ "function_expression ::= TRIM NK_LP expr_or_subquery FROM expr_or_subquery NK_RP", + /* 572 */ "function_expression ::= TRIM NK_LP trim_specification_type expr_or_subquery FROM expr_or_subquery NK_RP", + /* 573 */ "function_expression ::= substr_func NK_LP expression_list NK_RP", + /* 574 */ "function_expression ::= substr_func NK_LP expr_or_subquery FROM expr_or_subquery NK_RP", + /* 575 */ "function_expression ::= substr_func NK_LP expr_or_subquery FROM expr_or_subquery FOR expr_or_subquery NK_RP", + /* 576 */ "function_expression ::= REPLACE NK_LP expression_list NK_RP", + /* 577 */ "function_expression ::= literal_func", + /* 578 */ "literal_func ::= noarg_func NK_LP NK_RP", + /* 579 */ "literal_func ::= NOW", + /* 580 */ "literal_func ::= TODAY", + /* 581 */ "substr_func ::= SUBSTR", + /* 582 */ "substr_func ::= SUBSTRING", /* 583 */ "trim_specification_type ::= BOTH", /* 584 */ "trim_specification_type ::= TRAILING", /* 585 */ "trim_specification_type ::= LEADING", @@ -4802,18 +4807,18 @@ static const YYCODETYPE yyRuleInfoLhs[] = { 505, /* (568) function_expression ::= POSITION NK_LP expr_or_subquery IN expr_or_subquery NK_RP */ 505, /* (569) function_expression ::= TRIM NK_LP expr_or_subquery NK_RP */ 505, /* (570) function_expression ::= TRIM NK_LP trim_specification_type FROM expr_or_subquery NK_RP */ - 505, /* (571) function_expression ::= TRIM NK_LP trim_specification_type expr_or_subquery FROM expr_or_subquery NK_RP */ - 505, /* (572) function_expression ::= substr_func NK_LP expression_list NK_RP */ - 505, /* (573) function_expression ::= substr_func NK_LP expr_or_subquery FROM expr_or_subquery NK_RP */ - 505, /* (574) function_expression ::= substr_func NK_LP expr_or_subquery FROM expr_or_subquery FOR expr_or_subquery NK_RP */ - 505, /* (575) function_expression ::= REPLACE NK_LP expression_list NK_RP */ - 505, /* (576) function_expression ::= literal_func */ - 498, /* (577) literal_func ::= noarg_func NK_LP NK_RP */ - 498, /* (578) literal_func ::= NOW */ - 498, /* (579) literal_func ::= TODAY */ - 510, /* (580) substr_func ::= SUBSTR */ - 510, /* (581) substr_func ::= SUBSTRING */ - 509, /* (582) trim_specification_type ::= */ + 505, /* (571) function_expression ::= TRIM NK_LP expr_or_subquery FROM expr_or_subquery NK_RP */ + 505, /* (572) function_expression ::= TRIM NK_LP trim_specification_type expr_or_subquery FROM expr_or_subquery NK_RP */ + 505, /* (573) function_expression ::= substr_func NK_LP expression_list NK_RP */ + 505, /* (574) function_expression ::= substr_func NK_LP expr_or_subquery FROM expr_or_subquery NK_RP */ + 505, /* (575) function_expression ::= substr_func NK_LP expr_or_subquery FROM expr_or_subquery FOR expr_or_subquery NK_RP */ + 505, /* (576) function_expression ::= REPLACE NK_LP expression_list NK_RP */ + 505, /* (577) function_expression ::= literal_func */ + 498, /* (578) literal_func ::= noarg_func NK_LP NK_RP */ + 498, /* (579) literal_func ::= NOW */ + 498, /* (580) literal_func ::= TODAY */ + 510, /* (581) substr_func ::= SUBSTR */ + 510, /* (582) substr_func ::= SUBSTRING */ 509, /* (583) trim_specification_type ::= BOTH */ 509, /* (584) trim_specification_type ::= TRAILING */ 509, /* (585) trim_specification_type ::= LEADING */ @@ -5577,18 +5582,18 @@ static const signed char yyRuleInfoNRhs[] = { -6, /* (568) function_expression ::= POSITION NK_LP expr_or_subquery IN expr_or_subquery NK_RP */ -4, /* (569) function_expression ::= TRIM NK_LP expr_or_subquery NK_RP */ -6, /* (570) function_expression ::= TRIM NK_LP trim_specification_type FROM expr_or_subquery NK_RP */ - -7, /* (571) function_expression ::= TRIM NK_LP trim_specification_type expr_or_subquery FROM expr_or_subquery NK_RP */ - -4, /* (572) function_expression ::= substr_func NK_LP expression_list NK_RP */ - -6, /* (573) function_expression ::= substr_func NK_LP expr_or_subquery FROM expr_or_subquery NK_RP */ - -8, /* (574) function_expression ::= substr_func NK_LP expr_or_subquery FROM expr_or_subquery FOR expr_or_subquery NK_RP */ - -4, /* (575) function_expression ::= REPLACE NK_LP expression_list NK_RP */ - -1, /* (576) function_expression ::= literal_func */ - -3, /* (577) literal_func ::= noarg_func NK_LP NK_RP */ - -1, /* (578) literal_func ::= NOW */ - -1, /* (579) literal_func ::= TODAY */ - -1, /* (580) substr_func ::= SUBSTR */ - -1, /* (581) substr_func ::= SUBSTRING */ - 0, /* (582) trim_specification_type ::= */ + -6, /* (571) function_expression ::= TRIM NK_LP expr_or_subquery FROM expr_or_subquery NK_RP */ + -7, /* (572) function_expression ::= TRIM NK_LP trim_specification_type expr_or_subquery FROM expr_or_subquery NK_RP */ + -4, /* (573) function_expression ::= substr_func NK_LP expression_list NK_RP */ + -6, /* (574) function_expression ::= substr_func NK_LP expr_or_subquery FROM expr_or_subquery NK_RP */ + -8, /* (575) function_expression ::= substr_func NK_LP expr_or_subquery FROM expr_or_subquery FOR expr_or_subquery NK_RP */ + -4, /* (576) function_expression ::= REPLACE NK_LP expression_list NK_RP */ + -1, /* (577) function_expression ::= literal_func */ + -3, /* (578) literal_func ::= noarg_func NK_LP NK_RP */ + -1, /* (579) literal_func ::= NOW */ + -1, /* (580) literal_func ::= TODAY */ + -1, /* (581) substr_func ::= SUBSTR */ + -1, /* (582) substr_func ::= SUBSTRING */ -1, /* (583) trim_specification_type ::= BOTH */ -1, /* (584) trim_specification_type ::= TRAILING */ -1, /* (585) trim_specification_type ::= LEADING */ @@ -6065,8 +6070,8 @@ static YYACTIONTYPE yy_reduce( case 526: /* cgroup_name ::= NK_ID */ yytestcase(yyruleno==526); case 527: /* index_name ::= NK_ID */ yytestcase(yyruleno==527); case 528: /* tsma_name ::= NK_ID */ yytestcase(yyruleno==528); - case 580: /* substr_func ::= SUBSTR */ yytestcase(yyruleno==580); - case 581: /* substr_func ::= SUBSTRING */ yytestcase(yyruleno==581); + case 581: /* substr_func ::= SUBSTR */ yytestcase(yyruleno==581); + case 582: /* substr_func ::= SUBSTRING */ yytestcase(yyruleno==582); case 586: /* noarg_func ::= NOW */ yytestcase(yyruleno==586); case 587: /* noarg_func ::= TODAY */ yytestcase(yyruleno==587); case 588: /* noarg_func ::= TIMEZONE */ yytestcase(yyruleno==588); @@ -7317,7 +7322,7 @@ static YYACTIONTYPE yy_reduce( case 532: /* expression ::= column_reference */ yytestcase(yyruleno==532); case 533: /* expression ::= function_expression */ yytestcase(yyruleno==533); case 534: /* expression ::= case_when_expression */ yytestcase(yyruleno==534); - case 576: /* function_expression ::= literal_func */ yytestcase(yyruleno==576); + case 577: /* function_expression ::= literal_func */ yytestcase(yyruleno==577); case 633: /* boolean_value_expression ::= boolean_primary */ yytestcase(yyruleno==633); case 637: /* boolean_primary ::= predicate */ yytestcase(yyruleno==637); case 639: /* common_expression ::= expr_or_subquery */ yytestcase(yyruleno==639); @@ -7522,8 +7527,8 @@ static YYACTIONTYPE yy_reduce( case 561: /* pseudo_column ::= IROWTS */ yytestcase(yyruleno==561); case 562: /* pseudo_column ::= ISFILLED */ yytestcase(yyruleno==562); case 563: /* pseudo_column ::= QTAGS */ yytestcase(yyruleno==563); - case 578: /* literal_func ::= NOW */ yytestcase(yyruleno==578); - case 579: /* literal_func ::= TODAY */ yytestcase(yyruleno==579); + case 579: /* literal_func ::= NOW */ yytestcase(yyruleno==579); + case 580: /* literal_func ::= TODAY */ yytestcase(yyruleno==580); { yylhsminor.yy560 = createRawExprNode(pCxt, &yymsp[0].minor.yy0, createFunctionNode(pCxt, &yymsp[0].minor.yy0, NULL)); } yymsp[0].minor.yy560 = yylhsminor.yy560; break; @@ -7533,7 +7538,7 @@ static YYACTIONTYPE yy_reduce( break; case 564: /* function_expression ::= function_name NK_LP expression_list NK_RP */ case 565: /* function_expression ::= star_func NK_LP star_func_para_list NK_RP */ yytestcase(yyruleno==565); - case 572: /* function_expression ::= substr_func NK_LP expression_list NK_RP */ yytestcase(yyruleno==572); + case 573: /* function_expression ::= substr_func NK_LP expression_list NK_RP */ yytestcase(yyruleno==573); { yylhsminor.yy560 = createRawExprNodeExt(pCxt, &yymsp[-3].minor.yy533, &yymsp[0].minor.yy0, createFunctionNode(pCxt, &yymsp[-3].minor.yy533, yymsp[-1].minor.yy334)); } yymsp[-3].minor.yy560 = yylhsminor.yy560; break; @@ -7554,29 +7559,30 @@ static YYACTIONTYPE yy_reduce( { yylhsminor.yy560 = createRawExprNodeExt(pCxt, &yymsp[-5].minor.yy0, &yymsp[0].minor.yy0, createTrimFunctionNode(pCxt, releaseRawExprNode(pCxt, yymsp[-1].minor.yy560), yymsp[-3].minor.yy672)); } yymsp[-5].minor.yy560 = yylhsminor.yy560; break; - case 571: /* function_expression ::= TRIM NK_LP trim_specification_type expr_or_subquery FROM expr_or_subquery NK_RP */ + case 571: /* function_expression ::= TRIM NK_LP expr_or_subquery FROM expr_or_subquery NK_RP */ + { yylhsminor.yy560 = createRawExprNodeExt(pCxt, &yymsp[-5].minor.yy0, &yymsp[0].minor.yy0, createTrimFunctionNodeExt(pCxt, releaseRawExprNode(pCxt, yymsp[-3].minor.yy560), releaseRawExprNode(pCxt, yymsp[-1].minor.yy560), TRIM_TYPE_BOTH)); } + yymsp[-5].minor.yy560 = yylhsminor.yy560; + break; + case 572: /* function_expression ::= TRIM NK_LP trim_specification_type expr_or_subquery FROM expr_or_subquery NK_RP */ { yylhsminor.yy560 = createRawExprNodeExt(pCxt, &yymsp[-6].minor.yy0, &yymsp[0].minor.yy0, createTrimFunctionNodeExt(pCxt, releaseRawExprNode(pCxt, yymsp[-3].minor.yy560), releaseRawExprNode(pCxt, yymsp[-1].minor.yy560), yymsp[-4].minor.yy672)); } yymsp[-6].minor.yy560 = yylhsminor.yy560; break; - case 573: /* function_expression ::= substr_func NK_LP expr_or_subquery FROM expr_or_subquery NK_RP */ + case 574: /* function_expression ::= substr_func NK_LP expr_or_subquery FROM expr_or_subquery NK_RP */ { yylhsminor.yy560 = createRawExprNodeExt(pCxt, &yymsp[-5].minor.yy533, &yymsp[0].minor.yy0, createSubstrFunctionNode(pCxt, releaseRawExprNode(pCxt, yymsp[-3].minor.yy560), releaseRawExprNode(pCxt, yymsp[-1].minor.yy560))); } yymsp[-5].minor.yy560 = yylhsminor.yy560; break; - case 574: /* function_expression ::= substr_func NK_LP expr_or_subquery FROM expr_or_subquery FOR expr_or_subquery NK_RP */ + case 575: /* function_expression ::= substr_func NK_LP expr_or_subquery FROM expr_or_subquery FOR expr_or_subquery NK_RP */ { yylhsminor.yy560 = createRawExprNodeExt(pCxt, &yymsp[-7].minor.yy533, &yymsp[0].minor.yy0, createSubstrFunctionNodeExt(pCxt, releaseRawExprNode(pCxt, yymsp[-5].minor.yy560), releaseRawExprNode(pCxt, yymsp[-3].minor.yy560), releaseRawExprNode(pCxt, yymsp[-1].minor.yy560))); } yymsp[-7].minor.yy560 = yylhsminor.yy560; break; - case 575: /* function_expression ::= REPLACE NK_LP expression_list NK_RP */ + case 576: /* function_expression ::= REPLACE NK_LP expression_list NK_RP */ { yylhsminor.yy560 = createRawExprNodeExt(pCxt, &yymsp[-3].minor.yy0, &yymsp[0].minor.yy0, createFunctionNode(pCxt, &yymsp[-3].minor.yy0, yymsp[-1].minor.yy334)); } yymsp[-3].minor.yy560 = yylhsminor.yy560; break; - case 577: /* literal_func ::= noarg_func NK_LP NK_RP */ + case 578: /* literal_func ::= noarg_func NK_LP NK_RP */ { yylhsminor.yy560 = createRawExprNodeExt(pCxt, &yymsp[-2].minor.yy533, &yymsp[0].minor.yy0, createFunctionNode(pCxt, &yymsp[-2].minor.yy533, NULL)); } yymsp[-2].minor.yy560 = yylhsminor.yy560; break; - case 582: /* trim_specification_type ::= */ - { yymsp[1].minor.yy672 = TRIM_TYPE_BOTH; } - break; case 583: /* trim_specification_type ::= BOTH */ { yymsp[0].minor.yy672 = TRIM_TYPE_BOTH; } break; From f2dcd3f16adb5aa1738d58a93014e17b97c01543 Mon Sep 17 00:00:00 2001 From: sima Date: Thu, 15 Aug 2024 15:20:10 +0800 Subject: [PATCH 064/181] fix:[TD-31470] Fix replace function wrong length. --- source/libs/function/src/builtins.c | 17 ++++++++++++++--- source/libs/scalar/src/sclfunc.c | 16 +++++++++++++++- 2 files changed, 29 insertions(+), 4 deletions(-) diff --git a/source/libs/function/src/builtins.c b/source/libs/function/src/builtins.c index e6724f9e0a..760a3c4a33 100644 --- a/source/libs/function/src/builtins.c +++ b/source/libs/function/src/builtins.c @@ -2327,13 +2327,24 @@ static int32_t translateReplace(SFunctionNode* pFunc, char* pErrBuf, int32_t len } } - uint8_t type = getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, 0))->type; + uint8_t orgType = getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, 0))->type; + uint8_t fromType = getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, 1))->type; + uint8_t toType = getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, 2))->type; int32_t orgLen = getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, 0))->bytes; int32_t fromLen = getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, 1))->bytes; int32_t toLen = getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, 2))->bytes; - int32_t resLen = orgLen + orgLen / fromLen * (toLen - fromLen); - pFunc->node.resType = (SDataType){.bytes = resLen, .type = type}; + int32_t resLen; + // Since we don't know the accurate length of result, estimate the maximum length here. + // To make the resLen bigger, we should make fromLen smaller and toLen bigger. + if (orgType == TSDB_DATA_TYPE_VARBINARY && fromType != orgType) { + fromLen = fromLen / TSDB_NCHAR_SIZE; + } + if (orgType == TSDB_DATA_TYPE_NCHAR && toType != orgType) { + toLen = toLen * TSDB_NCHAR_SIZE; + } + resLen = TMAX(orgLen, orgLen + orgLen / fromLen * (toLen - fromLen)); + pFunc->node.resType = (SDataType){.bytes = resLen, .type = orgType}; return TSDB_CODE_SUCCESS; } diff --git a/source/libs/scalar/src/sclfunc.c b/source/libs/scalar/src/sclfunc.c index a666404838..fde87ba4f1 100644 --- a/source/libs/scalar/src/sclfunc.c +++ b/source/libs/scalar/src/sclfunc.c @@ -1458,7 +1458,21 @@ int32_t replaceFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pO numOfRows = TMAX(numOfRows, pInput[i].numOfRows); } - outputLen = pInputData[0]->info.bytes + pInputData[0]->info.bytes / pInputData[1]->info.bytes * (pInputData[2]->info.bytes - pInputData[1]->info.bytes); + int8_t orgType = pInputData[0]->info.type; + int8_t fromType = pInputData[1]->info.type; + int8_t toType = pInputData[2]->info.type; + int32_t orgLength = pInputData[0]->info.bytes; + int32_t fromLength = pInputData[1]->info.bytes; + int32_t toLength = pInputData[2]->info.bytes; + + if (orgType == TSDB_DATA_TYPE_VARBINARY && fromType != orgType) { + fromLength = fromLength / TSDB_NCHAR_SIZE; + } + if (orgType == TSDB_DATA_TYPE_NCHAR && toType != orgType) { + toLength = toLength * TSDB_NCHAR_SIZE; + } + outputLen = TMAX(orgLength, orgLength + orgLength / fromLength * (toLength - fromLength)); + if (GET_PARAM_TYPE(&pInput[0]) == TSDB_DATA_TYPE_NULL || GET_PARAM_TYPE(&pInput[1]) == TSDB_DATA_TYPE_NULL || GET_PARAM_TYPE(&pInput[2]) == TSDB_DATA_TYPE_NULL || From cbf5f231be10914f3afe0664dfaba3b93ab3b194 Mon Sep 17 00:00:00 2001 From: dapan1121 Date: Thu, 15 Aug 2024 15:38:09 +0800 Subject: [PATCH 065/181] fix: group cache log issue --- source/libs/executor/src/groupcacheoperator.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/source/libs/executor/src/groupcacheoperator.c b/source/libs/executor/src/groupcacheoperator.c index 00b8c3b9ae..3e9ac2b10a 100644 --- a/source/libs/executor/src/groupcacheoperator.c +++ b/source/libs/executor/src/groupcacheoperator.c @@ -73,6 +73,10 @@ static int32_t initGroupColsInfo(SGroupColsInfo* pCols, bool grpColsMayBeNull, S } static void logGroupCacheExecInfo(SGroupCacheOperatorInfo* pGrpCacheOperator) { + if (pGrpCacheOperator->downstreamNum <= 0 || NULL == pGrpCacheOperator->execInfo.pDownstreamBlkNum) { + return; + } + char* buf = taosMemoryMalloc(pGrpCacheOperator->downstreamNum * 32 + 100); if (NULL == buf) { return; From efff5e2bf9181cd483d3e76b29b3608640ca8c52 Mon Sep 17 00:00:00 2001 From: Shungang Li Date: Thu, 15 Aug 2024 16:06:12 +0800 Subject: [PATCH 066/181] feat: s3 use global s3BucketName --- source/common/src/cos.c | 24 ++++++++++++------------ source/common/src/tglobal.c | 20 ++++++++++++-------- 2 files changed, 24 insertions(+), 20 deletions(-) diff --git a/source/common/src/cos.c b/source/common/src/cos.c index aa587bf07b..8392b0564a 100644 --- a/source/common/src/cos.c +++ b/source/common/src/cos.c @@ -9,7 +9,7 @@ extern int8_t tsS3EpNum; extern char tsS3Endpoint[][TSDB_FQDN_LEN]; extern char tsS3AccessKeyId[][TSDB_FQDN_LEN]; extern char tsS3AccessKeySecret[][TSDB_FQDN_LEN]; -extern char tsS3BucketName[][TSDB_FQDN_LEN]; +extern char tsS3BucketName[TSDB_FQDN_LEN]; extern char tsS3AppId[][TSDB_FQDN_LEN]; extern char tsS3Hostname[][TSDB_FQDN_LEN]; extern int8_t tsS3Https; @@ -130,13 +130,13 @@ int32_t s3CheckCfg() { (void)fprintf(stderr, "put object %s: success.\n\n", objectname[0]); // list buckets - (void)fprintf(stderr, "start to list bucket %s by prefix s3.\n", tsS3BucketName[i]); - code = s3ListBucketByEp(tsS3BucketName[i], i); + (void)fprintf(stderr, "start to list bucket %s by prefix s3.\n", tsS3BucketName); + code = s3ListBucketByEp(tsS3BucketName, i); if (code != 0) { - (void)fprintf(stderr, "listing bucket %s : failed.\n", tsS3BucketName[i]); + (void)fprintf(stderr, "listing bucket %s : failed.\n", tsS3BucketName); TAOS_CHECK_GOTO(code, &lino, _exit); } - (void)fprintf(stderr, "listing bucket %s: success.\n\n", tsS3BucketName[i]); + (void)fprintf(stderr, "listing bucket %s: success.\n\n", tsS3BucketName); // test range get uint8_t *pBlock = NULL; @@ -975,7 +975,7 @@ int32_t s3PutObjectFromFile2ByEp(const char *file, const char *object_name, int8 contentLength; S3BucketContext bucketContext = {tsS3Hostname[epIndex], - tsS3BucketName[epIndex], + tsS3BucketName, protocolG, uriStyleG, tsS3AccessKeyId[epIndex], @@ -1058,7 +1058,7 @@ static int32_t s3PutObjectFromFileOffsetByEp(const char *file, const char *objec contentLength; S3BucketContext bucketContext = {tsS3Hostname[epIndex], - tsS3BucketName[epIndex], + tsS3BucketName, protocolG, uriStyleG, tsS3AccessKeyId[epIndex], @@ -1154,7 +1154,7 @@ static void s3FreeObjectKey(void *pItem) { static SArray *getListByPrefixByEp(const char *prefix, int8_t epIndex) { S3BucketContext bucketContext = {tsS3Hostname[epIndex], - tsS3BucketName[epIndex], + tsS3BucketName, protocolG, uriStyleG, tsS3AccessKeyId[epIndex], @@ -1222,7 +1222,7 @@ static int32_t s3DeleteObjectsByEp(const char *object_name[], int nobject, int8_ int32_t code = 0; S3BucketContext bucketContext = {tsS3Hostname[epIndex], - tsS3BucketName[epIndex], + tsS3BucketName, protocolG, uriStyleG, tsS3AccessKeyId[epIndex], @@ -1298,7 +1298,7 @@ static int32_t s3GetObjectBlockByEp(const char *object_name, int64_t offset, int const char *ifMatch = 0, *ifNotMatch = 0; S3BucketContext bucketContext = {tsS3Hostname[epIndex], - tsS3BucketName[epIndex], + tsS3BucketName, protocolG, uriStyleG, tsS3AccessKeyId[epIndex], @@ -1371,7 +1371,7 @@ static int32_t s3GetObjectToFileByEp(const char *object_name, const char *fileNa const char *ifMatch = 0, *ifNotMatch = 0; S3BucketContext bucketContext = {tsS3Hostname[epIndex], - tsS3BucketName[epIndex], + tsS3BucketName, protocolG, uriStyleG, tsS3AccessKeyId[epIndex], @@ -1448,7 +1448,7 @@ static long s3SizeByEp(const char *object_name, int8_t epIndex) { int status = 0; S3BucketContext bucketContext = {tsS3Hostname[epIndex], - tsS3BucketName[epIndex], + tsS3BucketName, protocolG, uriStyleG, tsS3AccessKeyId[epIndex], diff --git a/source/common/src/tglobal.c b/source/common/src/tglobal.c index 4e32288123..3373b56ad8 100644 --- a/source/common/src/tglobal.c +++ b/source/common/src/tglobal.c @@ -299,7 +299,7 @@ char tsS3Endpoint[TSDB_MAX_EP_NUM][TSDB_FQDN_LEN] = {""}; char tsS3AccessKey[TSDB_MAX_EP_NUM][TSDB_FQDN_LEN] = {""}; char tsS3AccessKeyId[TSDB_MAX_EP_NUM][TSDB_FQDN_LEN] = {""}; char tsS3AccessKeySecret[TSDB_MAX_EP_NUM][TSDB_FQDN_LEN] = {""}; -char tsS3BucketName[TSDB_MAX_EP_NUM][TSDB_FQDN_LEN] = {""}; +char tsS3BucketName[TSDB_FQDN_LEN] = ""; char tsS3AppId[TSDB_MAX_EP_NUM][TSDB_FQDN_LEN] = {""}; int8_t tsS3Enabled = false; int8_t tsS3EnabledCfg = false; @@ -404,10 +404,14 @@ int32_t taosSetS3Cfg(SConfig *pCfg) { } TAOS_CHECK_RETURN(taosSplitS3Cfg(pCfg, "s3Endpoint", tsS3Endpoint, &num)); - if (num != tsS3EpNum) TAOS_RETURN(TSDB_CODE_INVALID_CFG); + if (num != tsS3EpNum) { + uError("invalid s3 ep num:%d, expected:%d, ", num, tsS3EpNum); + TAOS_RETURN(TSDB_CODE_INVALID_CFG); + } - TAOS_CHECK_RETURN(taosSplitS3Cfg(pCfg, "s3BucketName", tsS3BucketName, &num)); - if (num != tsS3EpNum) TAOS_RETURN(TSDB_CODE_INVALID_CFG); + SConfigItem *pItem = NULL; + TAOS_CHECK_GET_CFG_ITEM(pCfg, pItem, "s3BucketName"); + tstrncpy(tsS3BucketName, pItem->str, TSDB_FQDN_LEN); for (int i = 0; i < tsS3EpNum; ++i) { char *proto = strstr(tsS3Endpoint[i], "https://"); @@ -419,9 +423,9 @@ int32_t taosSetS3Cfg(SConfig *pCfg) { char *cos = strstr(tsS3Endpoint[i], "cos."); if (cos) { - char *appid = strrchr(tsS3BucketName[i], '-'); + char *appid = strrchr(tsS3BucketName, '-'); if (!appid) { - uError("failed to locate appid in bucket:%s", tsS3BucketName[i]); + uError("failed to locate appid in bucket:%s", tsS3BucketName); TAOS_RETURN(TSDB_CODE_INVALID_CFG); } else { tstrncpy(tsS3AppId[i], appid + 1, TSDB_FQDN_LEN); @@ -432,7 +436,7 @@ int32_t taosSetS3Cfg(SConfig *pCfg) { tsS3Https = (strstr(tsS3Endpoint[0], "https://") != NULL); tsS3Oss = (strstr(tsS3Endpoint[0], "aliyuncs.") != NULL); - if (tsS3BucketName[0][0] != '<') { + if (tsS3BucketName[0] != '<') { #if defined(USE_COS) || defined(USE_S3) #ifdef TD_ENTERPRISE /*if (tsDiskCfgNum > 1) */ tsS3Enabled = true; @@ -818,7 +822,7 @@ static int32_t taosAddServerCfg(SConfig *pCfg) { TAOS_CHECK_RETURN(cfgAddString(pCfg, "s3Accesskey", tsS3AccessKey[0], CFG_SCOPE_SERVER, CFG_DYN_NONE)); TAOS_CHECK_RETURN(cfgAddString(pCfg, "s3Endpoint", tsS3Endpoint[0], CFG_SCOPE_SERVER, CFG_DYN_NONE)); - TAOS_CHECK_RETURN(cfgAddString(pCfg, "s3BucketName", tsS3BucketName[0], CFG_SCOPE_SERVER, CFG_DYN_NONE)); + TAOS_CHECK_RETURN(cfgAddString(pCfg, "s3BucketName", tsS3BucketName, CFG_SCOPE_SERVER, CFG_DYN_NONE)); TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "s3PageCacheSize", tsS3PageCacheSize, 4, 1024 * 1024 * 1024, CFG_SCOPE_SERVER, CFG_DYN_ENT_SERVER)); TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "s3UploadDelaySec", tsS3UploadDelaySec, 1, 60 * 60 * 24 * 30, CFG_SCOPE_SERVER, CFG_DYN_ENT_SERVER)); From f62e849222ab9f4f28177ebdcd159b32344bb088 Mon Sep 17 00:00:00 2001 From: kailixu Date: Thu, 15 Aug 2024 16:07:57 +0800 Subject: [PATCH 067/181] fix: support customized version --- source/dnode/mgmt/exe/dmMain.c | 6 +++--- source/os/src/osSysinfo.c | 6 +++++- tools/shell/src/shellArguments.c | 10 +++++++--- 3 files changed, 15 insertions(+), 7 deletions(-) diff --git a/source/dnode/mgmt/exe/dmMain.c b/source/dnode/mgmt/exe/dmMain.c index b3e5015706..e5c37e3d55 100644 --- a/source/dnode/mgmt/exe/dmMain.c +++ b/source/dnode/mgmt/exe/dmMain.c @@ -125,7 +125,7 @@ void dmLogCrash(int signum, void *sigInfo, void *context) { _return: - taosLogCrashInfo("taosd", pMsg, msgLen, signum, sigInfo); + taosLogCrashInfo(CUS_PROMPT "d", pMsg, msgLen, signum, sigInfo); #ifdef _TD_DARWIN_64 exit(signum); @@ -258,7 +258,7 @@ static void dmPrintArgs(int32_t argc, char const *argv[]) { static void dmGenerateGrant() { mndGenerateMachineCode(); } static void dmPrintVersion() { - printf("%s\ntaosd version: %s compatible_version: %s\n", TD_PRODUCT_NAME, version, compatible_version); + printf("%s\n%sd version: %s compatible_version: %s\n", TD_PRODUCT_NAME, CUS_PROMPT, version, compatible_version); printf("git: %s\n", gitinfo); #ifdef TD_ENTERPRISE printf("gitOfInternal: %s\n", gitinfoOfInternal); @@ -268,7 +268,7 @@ static void dmPrintVersion() { static void dmPrintHelp() { char indent[] = " "; - printf("Usage: taosd [OPTION...] \n\n"); + printf("Usage: %sd [OPTION...] \n\n", CUS_PROMPT); printf("%s%s%s%s\n", indent, "-a,", indent, DM_APOLLO_URL); printf("%s%s%s%s\n", indent, "-c,", indent, DM_CFG_DIR); printf("%s%s%s%s\n", indent, "-s,", indent, DM_SDB_INFO); diff --git a/source/os/src/osSysinfo.c b/source/os/src/osSysinfo.c index 67a2cd97c4..92e5967416 100644 --- a/source/os/src/osSysinfo.c +++ b/source/os/src/osSysinfo.c @@ -19,6 +19,10 @@ #if defined(CUS_NAME) || defined(CUS_PROMPT) || defined(CUS_EMAIL) #include "cus_name.h" +#else +#ifndef CUS_PROMPT +#define CUS_PROMPT "taos" +#endif #endif #define PROCESS_ITEM 12 @@ -987,7 +991,7 @@ void taosKillSystem() { exit(0); #else // SIGINT - (void)printf("taosd will shut down soon"); + (void)printf("%sd will shut down soon", CUS_PROMPT); (void)kill(tsProcId, 2); #endif } diff --git a/tools/shell/src/shellArguments.c b/tools/shell/src/shellArguments.c index 1eb61d2394..cf3c7824fa 100644 --- a/tools/shell/src/shellArguments.c +++ b/tools/shell/src/shellArguments.c @@ -22,6 +22,10 @@ #if defined(CUS_NAME) || defined(CUS_PROMPT) || defined(CUS_EMAIL) #include "cus_name.h" +#else +#ifndef CUS_PROMPT +#define CUS_PROMPT "taos" +#endif #endif #define TAOS_CONSOLE_PROMPT_CONTINUE " -> " @@ -435,11 +439,11 @@ int32_t shellParseArgs(int32_t argc, char *argv[]) { shell.info.promptSize = strlen(shell.info.promptHeader); #ifdef TD_ENTERPRISE snprintf(shell.info.programVersion, sizeof(shell.info.programVersion), - "%s\ntaos version: %s compatible_version: %s\ngit: %s\ngitOfInternal: %s\nbuild: %s", TD_PRODUCT_NAME, - version, compatible_version, gitinfo, gitinfoOfInternal, buildinfo); + "%s\n%s version: %s compatible_version: %s\ngit: %s\ngitOfInternal: %s\nbuild: %s", TD_PRODUCT_NAME, + CUS_PROMPT, version, compatible_version, gitinfo, gitinfoOfInternal, buildinfo); #else snprintf(shell.info.programVersion, sizeof(shell.info.programVersion), - "%s\ntaos version: %s compatible_version: %s\ngit: %s\nbuild: %s", TD_PRODUCT_NAME, version, + "%s\n%s version: %s compatible_version: %s\ngit: %s\nbuild: %s", TD_PRODUCT_NAME, CUS_PROMPT, version, compatible_version, gitinfo, buildinfo); #endif From 85ec91fb5837a5f6d803d42f7d4846f4e7351248 Mon Sep 17 00:00:00 2001 From: sima Date: Thu, 15 Aug 2024 16:11:38 +0800 Subject: [PATCH 068/181] fix:[TD-31473] Fix repeat function use tag as parameter. --- source/libs/scalar/src/sclfunc.c | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/source/libs/scalar/src/sclfunc.c b/source/libs/scalar/src/sclfunc.c index a666404838..a68dcb19b2 100644 --- a/source/libs/scalar/src/sclfunc.c +++ b/source/libs/scalar/src/sclfunc.c @@ -1639,7 +1639,9 @@ int32_t repeatFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOu int32_t maxCount = 0; for (int32_t i = 0; i < pInput[1].numOfRows; i++) { - maxCount = TMAX(maxCount, *(int32_t *)colDataGetData(pInput[1].columnData, i)); + int32_t tmpCount = 0; + GET_TYPED_DATA(tmpCount, int32_t, GET_PARAM_TYPE(&pInput[1]), colDataGetData(pInput[1].columnData, i)); + maxCount = TMAX(maxCount, tmpCount); } pInputData[0] = pInput[0].columnData; pInputData[1] = pInput[1].columnData; @@ -1663,7 +1665,8 @@ int32_t repeatFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOu colDataSetNULL(pOutputData, i); continue; } - int32_t count = *(int32_t *)colDataGetData(pInputData[1], i); + int32_t count = 0; + GET_TYPED_DATA(count, int32_t, GET_PARAM_TYPE(&pInput[1]), colDataGetData(pInput[1].columnData, i)); if (count <= 0) { varDataSetLen(output, 0); SCL_ERR_JRET(colDataSetVal(pOutputData, i, outputBuf, false)); @@ -1684,7 +1687,8 @@ int32_t repeatFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOu colDataSetNULL(pOutputData, i); continue; } - int32_t count = *(int32_t *)colDataGetData(pInputData[1], i); + int32_t count = 0; + GET_TYPED_DATA(count, int32_t, GET_PARAM_TYPE(&pInput[1]), colDataGetData(pInput[1].columnData, i)); if (count <= 0) { varDataSetLen(output, 0); SCL_ERR_JRET(colDataSetVal(pOutputData, i, outputBuf, false)); @@ -1706,7 +1710,8 @@ int32_t repeatFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOu colDataSetNULL(pOutputData, i); continue; } - int32_t count = *(int32_t *)colDataGetData(pInputData[1], 0); + int32_t count = 0; + GET_TYPED_DATA(count, int32_t, GET_PARAM_TYPE(&pInput[1]), colDataGetData(pInput[1].columnData, i)); if (count <= 0) { varDataSetLen(output, 0); SCL_ERR_JRET(colDataSetVal(pOutputData, i, outputBuf, false)); From d2ea41e0300e8b47789c336c853b3228ad0f6e26 Mon Sep 17 00:00:00 2001 From: Shungang Li Date: Thu, 15 Aug 2024 13:48:49 +0800 Subject: [PATCH 069/181] fix: ttlMgrFlush endless loop --- source/dnode/vnode/src/meta/metaTtl.c | 27 ++++++++++++++++++--------- 1 file changed, 18 insertions(+), 9 deletions(-) diff --git a/source/dnode/vnode/src/meta/metaTtl.c b/source/dnode/vnode/src/meta/metaTtl.c index 6b203b146d..b5436af2bf 100644 --- a/source/dnode/vnode/src/meta/metaTtl.c +++ b/source/dnode/vnode/src/meta/metaTtl.c @@ -402,15 +402,20 @@ int32_t ttlMgrFlush(STtlManger *pTtlMgr, TXN *pTxn) { int32_t code = TSDB_CODE_SUCCESS; - void *pIter = taosHashIterate(pTtlMgr->pDirtyUids, NULL); - while (pIter != NULL) { + void *pIter = NULL; + while ((pIter = taosHashIterate(pTtlMgr->pDirtyUids, pIter)) != NULL) { STtlDirtyEntry *pEntry = (STtlDirtyEntry *)pIter; tb_uid_t *pUid = taosHashGetKey(pIter, NULL); STtlCacheEntry *cacheEntry = taosHashGet(pTtlMgr->pTtlCache, pUid, sizeof(*pUid)); if (cacheEntry == NULL) { - metaError("%s, ttlMgr flush failed to get ttl cache, uid: %" PRId64 ", type: %d", pTtlMgr->logPrefix, *pUid, - pEntry->type); + metaInfo("%s, ttlMgr flush failed to get ttl cache, might be restoring, uid: %" PRId64 ", type: %d", + pTtlMgr->logPrefix, *pUid, pEntry->type); + code = taosHashRemove(pTtlMgr->pDirtyUids, pUid, sizeof(*pUid)); + if (TSDB_CODE_SUCCESS != code) { + metaError("%s, ttlMgr flush failed to remove dirty uid since %s", pTtlMgr->logPrefix, tstrerror(code)); + goto _out; + } continue; } @@ -422,9 +427,9 @@ int32_t ttlMgrFlush(STtlManger *pTtlMgr, TXN *pTxn) { if (pEntry->type == ENTRY_TYPE_UPSERT) { // delete old key & upsert new key - (void)tdbTbDelete(pTtlMgr->pTtlIdx, &ttlKey, sizeof(ttlKey), pTxn); // maybe first insert, ignore error + (void)tdbTbDelete(pTtlMgr->pTtlIdx, &ttlKey, sizeof(ttlKey), pTxn); // maybe first insert, ignore error code = tdbTbUpsert(pTtlMgr->pTtlIdx, &ttlKeyDirty, sizeof(ttlKeyDirty), &cacheEntry->ttlDaysDirty, - sizeof(cacheEntry->ttlDaysDirty), pTxn); + sizeof(cacheEntry->ttlDaysDirty), pTxn); if (TSDB_CODE_SUCCESS != code) { metaError("%s, ttlMgr flush failed to upsert since %s", pTtlMgr->logPrefix, tstrerror(code)); goto _out; @@ -449,9 +454,11 @@ int32_t ttlMgrFlush(STtlManger *pTtlMgr, TXN *pTxn) { goto _out; } - void *pIterTmp = pIter; - pIter = taosHashIterate(pTtlMgr->pDirtyUids, pIterTmp); - (void)taosHashRemove(pTtlMgr->pDirtyUids, pUid, sizeof(tb_uid_t)); + code = taosHashRemove(pTtlMgr->pDirtyUids, pUid, sizeof(*pUid)); + if (TSDB_CODE_SUCCESS != code) { + metaError("%s, ttlMgr flush failed to remove dirty uid since %s", pTtlMgr->logPrefix, tstrerror(code)); + goto _out; + } } taosHashClear(pTtlMgr->pDirtyUids); @@ -459,6 +466,8 @@ int32_t ttlMgrFlush(STtlManger *pTtlMgr, TXN *pTxn) { code = TSDB_CODE_SUCCESS; _out: + taosHashCancelIterate(pTtlMgr->pDirtyUids, pIter); + endNs = taosGetTimestampNs(); metaTrace("%s, ttl mgr flush end, time consumed: %" PRId64 " ns", pTtlMgr->logPrefix, endNs - startNs); From b6629532bf3c8c1b8561ee949740cf1fc991de80 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Thu, 15 Aug 2024 16:20:49 +0800 Subject: [PATCH 070/181] fix(tsdb): fix memory leak. --- source/dnode/vnode/src/tsdb/tsdbReadUtil.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/source/dnode/vnode/src/tsdb/tsdbReadUtil.c b/source/dnode/vnode/src/tsdb/tsdbReadUtil.c index 4dabffc10a..a26c326b2a 100644 --- a/source/dnode/vnode/src/tsdb/tsdbReadUtil.c +++ b/source/dnode/vnode/src/tsdb/tsdbReadUtil.c @@ -337,13 +337,14 @@ int32_t createDataBlockScanInfo(STsdbReader* pTsdbReader, SBlockInfoBuf* pBuf, c int64_t st = taosGetTimestampUs(); code = initBlockScanInfoBuf(pBuf, numOfTables); if (code != TSDB_CODE_SUCCESS) { + tSimpleHashCleanup(pTableMap); return code; } pUidList->tableUidList = taosMemoryMalloc(numOfTables * sizeof(uint64_t)); if (pUidList->tableUidList == NULL) { tSimpleHashCleanup(pTableMap); - return TSDB_CODE_OUT_OF_MEMORY; + return terrno; } pUidList->currentIndex = 0; From cce4d1104f72a5c19411ef52a0231a7c943267d1 Mon Sep 17 00:00:00 2001 From: kailixu Date: Thu, 15 Aug 2024 16:29:20 +0800 Subject: [PATCH 071/181] fix: support customized version --- tools/shell/src/shellArguments.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/shell/src/shellArguments.c b/tools/shell/src/shellArguments.c index cf3c7824fa..4638f2ad74 100644 --- a/tools/shell/src/shellArguments.c +++ b/tools/shell/src/shellArguments.c @@ -61,7 +61,7 @@ static int32_t shellParseSingleOpt(int32_t key, char *arg); void shellPrintHelp() { char indent[] = " "; - printf("Usage: taos [OPTION...] \r\n\r\n"); + printf("Usage: %s [OPTION...] \r\n\r\n", CUS_PROMPT); printf("%s%s%s%s\r\n", indent, "-a,", indent, SHELL_AUTH); printf("%s%s%s%s\r\n", indent, "-A,", indent, SHELL_GEN_AUTH); printf("%s%s%s%s\r\n", indent, "-B,", indent, SHELL_BI_MODE); From 7cd086c8f97ad333038268423f83c5670f56ff03 Mon Sep 17 00:00:00 2001 From: Hongze Cheng Date: Thu, 15 Aug 2024 16:35:37 +0800 Subject: [PATCH 072/181] fix: --- source/client/src/clientTmq.c | 2 +- source/common/src/tmsg.c | 274 +++++++++++++++--------------- source/libs/tdb/src/db/tdbPager.c | 2 +- 3 files changed, 140 insertions(+), 138 deletions(-) diff --git a/source/client/src/clientTmq.c b/source/client/src/clientTmq.c index 0224a20109..1c0856b464 100644 --- a/source/client/src/clientTmq.c +++ b/source/client/src/clientTmq.c @@ -1441,7 +1441,7 @@ int32_t tmq_subscribe(tmq_t* tmq, const tmq_list_t* topic_list) { while ((code = syncAskEp(tmq)) != 0) { if (retryCnt++ > MAX_RETRY_COUNT || code == TSDB_CODE_MND_CONSUMER_NOT_EXIST) { tscError("consumer:0x%" PRIx64 ", mnd not ready for subscribe, retry more than 2 minutes, code:%s", - tmq->consumerId, strerror(code)); + tmq->consumerId, tstrerror(code)); if (code == TSDB_CODE_MND_CONSUMER_NOT_EXIST) { code = 0; } diff --git a/source/common/src/tmsg.c b/source/common/src/tmsg.c index 740e517e35..0258f4faa9 100644 --- a/source/common/src/tmsg.c +++ b/source/common/src/tmsg.c @@ -70,24 +70,24 @@ } while (0) static int32_t tSerializeSMonitorParas(SEncoder *encoder, const SMonitorParas *pMonitorParas) { - if (tEncodeI8(encoder, pMonitorParas->tsEnableMonitor) < 0) return -1; - if (tEncodeI32(encoder, pMonitorParas->tsMonitorInterval) < 0) return -1; - if (tEncodeI32(encoder, pMonitorParas->tsSlowLogScope) < 0) return -1; - if (tEncodeI32(encoder, pMonitorParas->tsSlowLogMaxLen) < 0) return -1; - if (tEncodeI32(encoder, pMonitorParas->tsSlowLogThreshold) < 0) return -1; - if (tEncodeI32(encoder, pMonitorParas->tsSlowLogThresholdTest) < 0) return -1; - if (tEncodeCStr(encoder, pMonitorParas->tsSlowLogExceptDb) < 0) return -1; + TAOS_CHECK_RETURN(tEncodeI8(encoder, pMonitorParas->tsEnableMonitor)); + TAOS_CHECK_RETURN(tEncodeI32(encoder, pMonitorParas->tsMonitorInterval)); + TAOS_CHECK_RETURN(tEncodeI32(encoder, pMonitorParas->tsSlowLogScope)); + TAOS_CHECK_RETURN(tEncodeI32(encoder, pMonitorParas->tsSlowLogMaxLen)); + TAOS_CHECK_RETURN(tEncodeI32(encoder, pMonitorParas->tsSlowLogThreshold)); + TAOS_CHECK_RETURN(tEncodeI32(encoder, pMonitorParas->tsSlowLogThresholdTest)); + TAOS_CHECK_RETURN(tEncodeCStr(encoder, pMonitorParas->tsSlowLogExceptDb)); return 0; } static int32_t tDeserializeSMonitorParas(SDecoder *decoder, SMonitorParas *pMonitorParas) { - if (tDecodeI8(decoder, (int8_t *)&pMonitorParas->tsEnableMonitor) < 0) return -1; - if (tDecodeI32(decoder, &pMonitorParas->tsMonitorInterval) < 0) return -1; - if (tDecodeI32(decoder, &pMonitorParas->tsSlowLogScope) < 0) return -1; - if (tDecodeI32(decoder, &pMonitorParas->tsSlowLogMaxLen) < 0) return -1; - if (tDecodeI32(decoder, &pMonitorParas->tsSlowLogThreshold) < 0) return -1; - if (tDecodeI32(decoder, &pMonitorParas->tsSlowLogThresholdTest) < 0) return -1; - if (tDecodeCStrTo(decoder, pMonitorParas->tsSlowLogExceptDb) < 0) return -1; + TAOS_CHECK_RETURN(tDecodeI8(decoder, (int8_t *)&pMonitorParas->tsEnableMonitor)); + TAOS_CHECK_RETURN(tDecodeI32(decoder, &pMonitorParas->tsMonitorInterval)); + TAOS_CHECK_RETURN(tDecodeI32(decoder, &pMonitorParas->tsSlowLogScope)); + TAOS_CHECK_RETURN(tDecodeI32(decoder, &pMonitorParas->tsSlowLogMaxLen)); + TAOS_CHECK_RETURN(tDecodeI32(decoder, &pMonitorParas->tsSlowLogThreshold)); + TAOS_CHECK_RETURN(tDecodeI32(decoder, &pMonitorParas->tsSlowLogThresholdTest)); + TAOS_CHECK_RETURN(tDecodeCStrTo(decoder, pMonitorParas->tsSlowLogExceptDb)); return 0; } @@ -98,8 +98,7 @@ static int32_t tDecodeTableTSMAInfoRsp(SDecoder *pDecoder, STableTSMAInfoRsp *pR int32_t tInitSubmitMsgIter(const SSubmitReq *pMsg, SSubmitMsgIter *pIter) { if (pMsg == NULL) { - terrno = TSDB_CODE_TDB_SUBMIT_MSG_MSSED_UP; - return -1; + return terrno = TSDB_CODE_TDB_SUBMIT_MSG_MSSED_UP; } pIter->totalLen = htonl(pMsg->length); @@ -108,8 +107,7 @@ int32_t tInitSubmitMsgIter(const SSubmitReq *pMsg, SSubmitMsgIter *pIter) { pIter->len = 0; pIter->pMsg = pMsg; if (pIter->totalLen <= sizeof(SSubmitReq)) { - terrno = TSDB_CODE_TDB_SUBMIT_MSG_MSSED_UP; - return -1; + return terrno = TSDB_CODE_TDB_SUBMIT_MSG_MSSED_UP; } return 0; @@ -130,9 +128,8 @@ int32_t tGetSubmitMsgNext(SSubmitMsgIter *pIter, SSubmitBlk **pPBlock) { } if (pIter->len > pIter->totalLen) { - terrno = TSDB_CODE_TDB_SUBMIT_MSG_MSSED_UP; *pPBlock = NULL; - return -1; + return terrno = TSDB_CODE_TDB_SUBMIT_MSG_MSSED_UP; } if (pIter->len == pIter->totalLen) { @@ -193,46 +190,46 @@ int32_t tPrintFixedSchemaSubmitReq(SSubmitReq *pReq, STSchema *pTschema) { #endif int32_t tEncodeSEpSet(SEncoder *pEncoder, const SEpSet *pEp) { - if (tEncodeI8(pEncoder, pEp->inUse) < 0) return -1; - if (tEncodeI8(pEncoder, pEp->numOfEps) < 0) return -1; + TAOS_CHECK_RETURN(tEncodeI8(pEncoder, pEp->inUse)); + TAOS_CHECK_RETURN(tEncodeI8(pEncoder, pEp->numOfEps)); for (int32_t i = 0; i < TSDB_MAX_REPLICA; i++) { - if (tEncodeU16(pEncoder, pEp->eps[i].port) < 0) return -1; - if (tEncodeCStrWithLen(pEncoder, pEp->eps[i].fqdn, TSDB_FQDN_LEN) < 0) return -1; + TAOS_CHECK_RETURN(tEncodeU16(pEncoder, pEp->eps[i].port)); + TAOS_CHECK_RETURN(tEncodeCStrWithLen(pEncoder, pEp->eps[i].fqdn, TSDB_FQDN_LEN)); } return 0; } int32_t tDecodeSEpSet(SDecoder *pDecoder, SEpSet *pEp) { - if (tDecodeI8(pDecoder, &pEp->inUse) < 0) return -1; - if (tDecodeI8(pDecoder, &pEp->numOfEps) < 0) return -1; + TAOS_CHECK_RETURN(tDecodeI8(pDecoder, &pEp->inUse)); + TAOS_CHECK_RETURN(tDecodeI8(pDecoder, &pEp->numOfEps)); for (int32_t i = 0; i < TSDB_MAX_REPLICA; i++) { - if (tDecodeU16(pDecoder, &pEp->eps[i].port) < 0) return -1; - if (tDecodeCStrTo(pDecoder, pEp->eps[i].fqdn) < 0) return -1; + TAOS_CHECK_RETURN(tDecodeU16(pDecoder, &pEp->eps[i].port)); + TAOS_CHECK_RETURN(tDecodeCStrTo(pDecoder, pEp->eps[i].fqdn)); } return 0; } int32_t tEncodeSQueryNodeAddr(SEncoder *pEncoder, SQueryNodeAddr *pAddr) { - if (tEncodeI32(pEncoder, pAddr->nodeId) < 0) return -1; - if (tEncodeSEpSet(pEncoder, &pAddr->epSet) < 0) return -1; + TAOS_CHECK_RETURN(tEncodeI32(pEncoder, pAddr->nodeId)); + TAOS_CHECK_RETURN(tEncodeSEpSet(pEncoder, &pAddr->epSet)); return 0; } int32_t tEncodeSQueryNodeLoad(SEncoder *pEncoder, SQueryNodeLoad *pLoad) { - if (tEncodeSQueryNodeAddr(pEncoder, &pLoad->addr) < 0) return -1; - if (tEncodeU64(pEncoder, pLoad->load) < 0) return -1; + TAOS_CHECK_RETURN(tEncodeSQueryNodeAddr(pEncoder, &pLoad->addr)); + TAOS_CHECK_RETURN(tEncodeU64(pEncoder, pLoad->load)); return 0; } int32_t tDecodeSQueryNodeAddr(SDecoder *pDecoder, SQueryNodeAddr *pAddr) { - if (tDecodeI32(pDecoder, &pAddr->nodeId) < 0) return -1; - if (tDecodeSEpSet(pDecoder, &pAddr->epSet) < 0) return -1; + TAOS_CHECK_RETURN(tDecodeI32(pDecoder, &pAddr->nodeId)); + TAOS_CHECK_RETURN(tDecodeSEpSet(pDecoder, &pAddr->epSet)); return 0; } int32_t tDecodeSQueryNodeLoad(SDecoder *pDecoder, SQueryNodeLoad *pLoad) { - if (tDecodeSQueryNodeAddr(pDecoder, &pLoad->addr) < 0) return -1; - if (tDecodeU64(pDecoder, &pLoad->load) < 0) return -1; + TAOS_CHECK_RETURN(tDecodeSQueryNodeAddr(pDecoder, &pLoad->addr)); + TAOS_CHECK_RETURN(tDecodeU64(pDecoder, &pLoad->load)); return 0; } @@ -258,63 +255,63 @@ void *taosDecodeSEpSet(const void *buf, SEpSet *pEp) { } static int32_t tSerializeSClientHbReq(SEncoder *pEncoder, const SClientHbReq *pReq) { - if (tEncodeSClientHbKey(pEncoder, &pReq->connKey) < 0) return -1; + TAOS_CHECK_RETURN(tEncodeSClientHbKey(pEncoder, &pReq->connKey)); if (pReq->connKey.connType == CONN_TYPE__QUERY) { - if (tEncodeI64(pEncoder, pReq->app.appId) < 0) return -1; - if (tEncodeI32(pEncoder, pReq->app.pid) < 0) return -1; - if (tEncodeCStr(pEncoder, pReq->app.name) < 0) return -1; - if (tEncodeI64(pEncoder, pReq->app.startTime) < 0) return -1; - if (tEncodeU64(pEncoder, pReq->app.summary.numOfInsertsReq) < 0) return -1; - if (tEncodeU64(pEncoder, pReq->app.summary.numOfInsertRows) < 0) return -1; - if (tEncodeU64(pEncoder, pReq->app.summary.insertElapsedTime) < 0) return -1; - if (tEncodeU64(pEncoder, pReq->app.summary.insertBytes) < 0) return -1; - if (tEncodeU64(pEncoder, pReq->app.summary.fetchBytes) < 0) return -1; - if (tEncodeU64(pEncoder, pReq->app.summary.queryElapsedTime) < 0) return -1; - if (tEncodeU64(pEncoder, pReq->app.summary.numOfSlowQueries) < 0) return -1; - if (tEncodeU64(pEncoder, pReq->app.summary.totalRequests) < 0) return -1; - if (tEncodeU64(pEncoder, pReq->app.summary.currentRequests) < 0) return -1; + TAOS_CHECK_RETURN(tEncodeI64(pEncoder, pReq->app.appId)); + TAOS_CHECK_RETURN(tEncodeI32(pEncoder, pReq->app.pid)); + TAOS_CHECK_RETURN(tEncodeCStr(pEncoder, pReq->app.name)); + TAOS_CHECK_RETURN(tEncodeI64(pEncoder, pReq->app.startTime)); + TAOS_CHECK_RETURN(tEncodeU64(pEncoder, pReq->app.summary.numOfInsertsReq)); + TAOS_CHECK_RETURN(tEncodeU64(pEncoder, pReq->app.summary.numOfInsertRows)); + TAOS_CHECK_RETURN(tEncodeU64(pEncoder, pReq->app.summary.insertElapsedTime)); + TAOS_CHECK_RETURN(tEncodeU64(pEncoder, pReq->app.summary.insertBytes)); + TAOS_CHECK_RETURN(tEncodeU64(pEncoder, pReq->app.summary.fetchBytes)); + TAOS_CHECK_RETURN(tEncodeU64(pEncoder, pReq->app.summary.queryElapsedTime)); + TAOS_CHECK_RETURN(tEncodeU64(pEncoder, pReq->app.summary.numOfSlowQueries)); + TAOS_CHECK_RETURN(tEncodeU64(pEncoder, pReq->app.summary.totalRequests)); + TAOS_CHECK_RETURN(tEncodeU64(pEncoder, pReq->app.summary.currentRequests)); int32_t queryNum = 0; if (pReq->query) { queryNum = 1; - if (tEncodeI32(pEncoder, queryNum) < 0) return -1; - if (tEncodeU32(pEncoder, pReq->query->connId) < 0) return -1; + TAOS_CHECK_RETURN(tEncodeI32(pEncoder, queryNum)); + TAOS_CHECK_RETURN(tEncodeU32(pEncoder, pReq->query->connId)); int32_t num = taosArrayGetSize(pReq->query->queryDesc); - if (tEncodeI32(pEncoder, num) < 0) return -1; + TAOS_CHECK_RETURN(tEncodeI32(pEncoder, num)); for (int32_t i = 0; i < num; ++i) { SQueryDesc *desc = taosArrayGet(pReq->query->queryDesc, i); - if (tEncodeCStr(pEncoder, desc->sql) < 0) return -1; - if (tEncodeU64(pEncoder, desc->queryId) < 0) return -1; - if (tEncodeI64(pEncoder, desc->useconds) < 0) return -1; - if (tEncodeI64(pEncoder, desc->stime) < 0) return -1; - if (tEncodeI64(pEncoder, desc->reqRid) < 0) return -1; - if (tEncodeI8(pEncoder, desc->stableQuery) < 0) return -1; - if (tEncodeI8(pEncoder, desc->isSubQuery) < 0) return -1; - if (tEncodeCStr(pEncoder, desc->fqdn) < 0) return -1; - if (tEncodeI32(pEncoder, desc->subPlanNum) < 0) return -1; + TAOS_CHECK_RETURN(tEncodeCStr(pEncoder, desc->sql)); + TAOS_CHECK_RETURN(tEncodeU64(pEncoder, desc->queryId)); + TAOS_CHECK_RETURN(tEncodeI64(pEncoder, desc->useconds)); + TAOS_CHECK_RETURN(tEncodeI64(pEncoder, desc->stime)); + TAOS_CHECK_RETURN(tEncodeI64(pEncoder, desc->reqRid)); + TAOS_CHECK_RETURN(tEncodeI8(pEncoder, desc->stableQuery)); + TAOS_CHECK_RETURN(tEncodeI8(pEncoder, desc->isSubQuery)); + TAOS_CHECK_RETURN(tEncodeCStr(pEncoder, desc->fqdn)); + TAOS_CHECK_RETURN(tEncodeI32(pEncoder, desc->subPlanNum)); int32_t snum = desc->subDesc ? taosArrayGetSize(desc->subDesc) : 0; - if (tEncodeI32(pEncoder, snum) < 0) return -1; + TAOS_CHECK_RETURN(tEncodeI32(pEncoder, snum)); for (int32_t m = 0; m < snum; ++m) { SQuerySubDesc *sDesc = taosArrayGet(desc->subDesc, m); - if (tEncodeI64(pEncoder, sDesc->tid) < 0) return -1; - if (tEncodeCStr(pEncoder, sDesc->status) < 0) return -1; + TAOS_CHECK_RETURN(tEncodeI64(pEncoder, sDesc->tid)); + TAOS_CHECK_RETURN(tEncodeCStr(pEncoder, sDesc->status)); } } } else { - if (tEncodeI32(pEncoder, queryNum) < 0) return -1; + TAOS_CHECK_RETURN(tEncodeI32(pEncoder, queryNum)); } } int32_t kvNum = taosHashGetSize(pReq->info); - if (tEncodeI32(pEncoder, kvNum) < 0) return -1; + TAOS_CHECK_RETURN(tEncodeI32(pEncoder, kvNum)); void *pIter = taosHashIterate(pReq->info, NULL); while (pIter != NULL) { SKv *kv = pIter; - if (tEncodeSKv(pEncoder, kv) < 0) return -1; + TAOS_CHECK_RETURN(tEncodeSKv(pEncoder, kv)); pIter = taosHashIterate(pReq->info, pIter); } @@ -322,83 +319,88 @@ static int32_t tSerializeSClientHbReq(SEncoder *pEncoder, const SClientHbReq *pR } static int32_t tDeserializeSClientHbReq(SDecoder *pDecoder, SClientHbReq *pReq) { - if (tDecodeSClientHbKey(pDecoder, &pReq->connKey) < 0) return -1; + TAOS_CHECK_RETURN(tDecodeSClientHbKey(pDecoder, &pReq->connKey)); if (pReq->connKey.connType == CONN_TYPE__QUERY) { - if (tDecodeI64(pDecoder, &pReq->app.appId) < 0) return -1; - if (tDecodeI32(pDecoder, &pReq->app.pid) < 0) return -1; - if (tDecodeCStrTo(pDecoder, pReq->app.name) < 0) return -1; - if (tDecodeI64(pDecoder, &pReq->app.startTime) < 0) return -1; - if (tDecodeU64(pDecoder, &pReq->app.summary.numOfInsertsReq) < 0) return -1; - if (tDecodeU64(pDecoder, &pReq->app.summary.numOfInsertRows) < 0) return -1; - if (tDecodeU64(pDecoder, &pReq->app.summary.insertElapsedTime) < 0) return -1; - if (tDecodeU64(pDecoder, &pReq->app.summary.insertBytes) < 0) return -1; - if (tDecodeU64(pDecoder, &pReq->app.summary.fetchBytes) < 0) return -1; - if (tDecodeU64(pDecoder, &pReq->app.summary.queryElapsedTime) < 0) return -1; - if (tDecodeU64(pDecoder, &pReq->app.summary.numOfSlowQueries) < 0) return -1; - if (tDecodeU64(pDecoder, &pReq->app.summary.totalRequests) < 0) return -1; - if (tDecodeU64(pDecoder, &pReq->app.summary.currentRequests) < 0) return -1; + TAOS_CHECK_RETURN(tDecodeI64(pDecoder, &pReq->app.appId)); + TAOS_CHECK_RETURN(tDecodeI32(pDecoder, &pReq->app.pid)); + TAOS_CHECK_RETURN(tDecodeCStrTo(pDecoder, pReq->app.name)); + TAOS_CHECK_RETURN(tDecodeI64(pDecoder, &pReq->app.startTime)); + TAOS_CHECK_RETURN(tDecodeU64(pDecoder, &pReq->app.summary.numOfInsertsReq)); + TAOS_CHECK_RETURN(tDecodeU64(pDecoder, &pReq->app.summary.numOfInsertRows)); + TAOS_CHECK_RETURN(tDecodeU64(pDecoder, &pReq->app.summary.insertElapsedTime)); + TAOS_CHECK_RETURN(tDecodeU64(pDecoder, &pReq->app.summary.insertBytes)); + TAOS_CHECK_RETURN(tDecodeU64(pDecoder, &pReq->app.summary.fetchBytes)); + TAOS_CHECK_RETURN(tDecodeU64(pDecoder, &pReq->app.summary.queryElapsedTime)); + TAOS_CHECK_RETURN(tDecodeU64(pDecoder, &pReq->app.summary.numOfSlowQueries)); + TAOS_CHECK_RETURN(tDecodeU64(pDecoder, &pReq->app.summary.totalRequests)); + TAOS_CHECK_RETURN(tDecodeU64(pDecoder, &pReq->app.summary.currentRequests)); int32_t queryNum = 0; - if (tDecodeI32(pDecoder, &queryNum) < 0) return -1; + TAOS_CHECK_RETURN(tDecodeI32(pDecoder, &queryNum)); if (queryNum) { pReq->query = taosMemoryCalloc(1, sizeof(*pReq->query)); if (NULL == pReq->query) return -1; - if (tDecodeU32(pDecoder, &pReq->query->connId) < 0) return -1; + TAOS_CHECK_RETURN(tDecodeU32(pDecoder, &pReq->query->connId)); int32_t num = 0; - if (tDecodeI32(pDecoder, &num) < 0) return -1; + TAOS_CHECK_RETURN(tDecodeI32(pDecoder, &num)); if (num > 0) { pReq->query->queryDesc = taosArrayInit(num, sizeof(SQueryDesc)); if (NULL == pReq->query->queryDesc) return -1; for (int32_t i = 0; i < num; ++i) { SQueryDesc desc = {0}; - if (tDecodeCStrTo(pDecoder, desc.sql) < 0) return -1; - if (tDecodeU64(pDecoder, &desc.queryId) < 0) return -1; - if (tDecodeI64(pDecoder, &desc.useconds) < 0) return -1; - if (tDecodeI64(pDecoder, &desc.stime) < 0) return -1; - if (tDecodeI64(pDecoder, &desc.reqRid) < 0) return -1; - if (tDecodeI8(pDecoder, (int8_t *)&desc.stableQuery) < 0) return -1; - if (tDecodeI8(pDecoder, (int8_t *)&desc.isSubQuery) < 0) return -1; - if (tDecodeCStrTo(pDecoder, desc.fqdn) < 0) return -1; - if (tDecodeI32(pDecoder, &desc.subPlanNum) < 0) return -1; + TAOS_CHECK_RETURN(tDecodeCStrTo(pDecoder, desc.sql)); + TAOS_CHECK_RETURN(tDecodeU64(pDecoder, &desc.queryId)); + TAOS_CHECK_RETURN(tDecodeI64(pDecoder, &desc.useconds)); + TAOS_CHECK_RETURN(tDecodeI64(pDecoder, &desc.stime)); + TAOS_CHECK_RETURN(tDecodeI64(pDecoder, &desc.reqRid)); + TAOS_CHECK_RETURN(tDecodeI8(pDecoder, (int8_t *)&desc.stableQuery)); + TAOS_CHECK_RETURN(tDecodeI8(pDecoder, (int8_t *)&desc.isSubQuery)); + TAOS_CHECK_RETURN(tDecodeCStrTo(pDecoder, desc.fqdn)); + TAOS_CHECK_RETURN(tDecodeI32(pDecoder, &desc.subPlanNum)); int32_t snum = 0; - if (tDecodeI32(pDecoder, &snum) < 0) return -1; + TAOS_CHECK_RETURN(tDecodeI32(pDecoder, &snum)); if (snum > 0) { desc.subDesc = taosArrayInit(snum, sizeof(SQuerySubDesc)); if (NULL == desc.subDesc) return -1; for (int32_t m = 0; m < snum; ++m) { SQuerySubDesc sDesc = {0}; - if (tDecodeI64(pDecoder, &sDesc.tid) < 0) return -1; - if (tDecodeCStrTo(pDecoder, sDesc.status) < 0) return -1; - if (!taosArrayPush(desc.subDesc, &sDesc)) return -1; + TAOS_CHECK_RETURN(tDecodeI64(pDecoder, &sDesc.tid)); + TAOS_CHECK_RETURN(tDecodeCStrTo(pDecoder, sDesc.status)); + if (!taosArrayPush(desc.subDesc, &sDesc)) { + return terrno; + } } } ASSERT(desc.subPlanNum == taosArrayGetSize(desc.subDesc)); - if (!taosArrayPush(pReq->query->queryDesc, &desc)) return -1; + if (!taosArrayPush(pReq->query->queryDesc, &desc)) { + return terrno; + } } } } } int32_t kvNum = 0; - if (tDecodeI32(pDecoder, &kvNum) < 0) return -1; + TAOS_CHECK_RETURN(tDecodeI32(pDecoder, &kvNum)); if (pReq->info == NULL) { pReq->info = taosHashInit(kvNum, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_NO_LOCK); } - if (pReq->info == NULL) return -1; + if (pReq->info == NULL) { + return terrno; + } for (int32_t i = 0; i < kvNum; i++) { SKv kv = {0}; - if (tDecodeSKv(pDecoder, &kv) < 0) return -1; + TAOS_CHECK_RETURN(tDecodeSKv(pDecoder, &kv)); int32_t code = taosHashPut(pReq->info, &kv.key, sizeof(kv.key), &kv, sizeof(kv)); if (code) { - terrno = code; - return -1; + return terrno = code; } } @@ -406,75 +408,75 @@ static int32_t tDeserializeSClientHbReq(SDecoder *pDecoder, SClientHbReq *pReq) } static int32_t tSerializeSClientHbRsp(SEncoder *pEncoder, const SClientHbRsp *pRsp) { - if (tEncodeSClientHbKey(pEncoder, &pRsp->connKey) < 0) return -1; - if (tEncodeI32(pEncoder, pRsp->status) < 0) return -1; + TAOS_CHECK_RETURN(tEncodeSClientHbKey(pEncoder, &pRsp->connKey)); + TAOS_CHECK_RETURN(tEncodeI32(pEncoder, pRsp->status)); int32_t queryNum = 0; if (pRsp->query) { queryNum = 1; - if (tEncodeI32(pEncoder, queryNum) < 0) return -1; - if (tEncodeU32(pEncoder, pRsp->query->connId) < 0) return -1; - if (tEncodeU64(pEncoder, pRsp->query->killRid) < 0) return -1; - if (tEncodeI32(pEncoder, pRsp->query->totalDnodes) < 0) return -1; - if (tEncodeI32(pEncoder, pRsp->query->onlineDnodes) < 0) return -1; - if (tEncodeI8(pEncoder, pRsp->query->killConnection) < 0) return -1; - if (tEncodeSEpSet(pEncoder, &pRsp->query->epSet) < 0) return -1; + TAOS_CHECK_RETURN(tEncodeI32(pEncoder, queryNum)); + TAOS_CHECK_RETURN(tEncodeU32(pEncoder, pRsp->query->connId)); + TAOS_CHECK_RETURN(tEncodeU64(pEncoder, pRsp->query->killRid)); + TAOS_CHECK_RETURN(tEncodeI32(pEncoder, pRsp->query->totalDnodes)); + TAOS_CHECK_RETURN(tEncodeI32(pEncoder, pRsp->query->onlineDnodes)); + TAOS_CHECK_RETURN(tEncodeI8(pEncoder, pRsp->query->killConnection)); + TAOS_CHECK_RETURN(tEncodeSEpSet(pEncoder, &pRsp->query->epSet)); int32_t num = taosArrayGetSize(pRsp->query->pQnodeList); - if (tEncodeI32(pEncoder, num) < 0) return -1; + TAOS_CHECK_RETURN(tEncodeI32(pEncoder, num)); for (int32_t i = 0; i < num; ++i) { SQueryNodeLoad *pLoad = taosArrayGet(pRsp->query->pQnodeList, i); - if (tEncodeSQueryNodeLoad(pEncoder, pLoad) < 0) return -1; + TAOS_CHECK_RETURN(tEncodeSQueryNodeLoad(pEncoder, pLoad)); } } else { - if (tEncodeI32(pEncoder, queryNum) < 0) return -1; + TAOS_CHECK_RETURN(tEncodeI32(pEncoder, queryNum)); } int32_t kvNum = taosArrayGetSize(pRsp->info); - if (tEncodeI32(pEncoder, kvNum) < 0) return -1; + TAOS_CHECK_RETURN(tEncodeI32(pEncoder, kvNum)); for (int32_t i = 0; i < kvNum; i++) { SKv *kv = taosArrayGet(pRsp->info, i); - if (tEncodeSKv(pEncoder, kv) < 0) return -1; + TAOS_CHECK_RETURN(tEncodeSKv(pEncoder, kv)); } return 0; } static int32_t tDeserializeSClientHbRsp(SDecoder *pDecoder, SClientHbRsp *pRsp) { - if (tDecodeSClientHbKey(pDecoder, &pRsp->connKey) < 0) return -1; - if (tDecodeI32(pDecoder, &pRsp->status) < 0) return -1; + TAOS_CHECK_RETURN(tDecodeSClientHbKey(pDecoder, &pRsp->connKey)); + TAOS_CHECK_RETURN(tDecodeI32(pDecoder, &pRsp->status)); int32_t queryNum = 0; - if (tDecodeI32(pDecoder, &queryNum) < 0) return -1; + TAOS_CHECK_RETURN(tDecodeI32(pDecoder, &queryNum)); if (queryNum) { pRsp->query = taosMemoryCalloc(1, sizeof(*pRsp->query)); if (NULL == pRsp->query) return -1; - if (tDecodeU32(pDecoder, &pRsp->query->connId) < 0) return -1; - if (tDecodeU64(pDecoder, &pRsp->query->killRid) < 0) return -1; - if (tDecodeI32(pDecoder, &pRsp->query->totalDnodes) < 0) return -1; - if (tDecodeI32(pDecoder, &pRsp->query->onlineDnodes) < 0) return -1; - if (tDecodeI8(pDecoder, &pRsp->query->killConnection) < 0) return -1; - if (tDecodeSEpSet(pDecoder, &pRsp->query->epSet) < 0) return -1; + TAOS_CHECK_RETURN(tDecodeU32(pDecoder, &pRsp->query->connId)); + TAOS_CHECK_RETURN(tDecodeU64(pDecoder, &pRsp->query->killRid)); + TAOS_CHECK_RETURN(tDecodeI32(pDecoder, &pRsp->query->totalDnodes)); + TAOS_CHECK_RETURN(tDecodeI32(pDecoder, &pRsp->query->onlineDnodes)); + TAOS_CHECK_RETURN(tDecodeI8(pDecoder, &pRsp->query->killConnection)); + TAOS_CHECK_RETURN(tDecodeSEpSet(pDecoder, &pRsp->query->epSet)); int32_t pQnodeNum = 0; - if (tDecodeI32(pDecoder, &pQnodeNum) < 0) return -1; + TAOS_CHECK_RETURN(tDecodeI32(pDecoder, &pQnodeNum)); if (pQnodeNum > 0) { pRsp->query->pQnodeList = taosArrayInit(pQnodeNum, sizeof(SQueryNodeLoad)); - if (NULL == pRsp->query->pQnodeList) return -1; + if (NULL == pRsp->query->pQnodeList) return terrno; for (int32_t i = 0; i < pQnodeNum; ++i) { SQueryNodeLoad load = {0}; - if (tDecodeSQueryNodeLoad(pDecoder, &load) < 0) return -1; - if (!taosArrayPush(pRsp->query->pQnodeList, &load)) return -1; + TAOS_CHECK_RETURN(tDecodeSQueryNodeLoad(pDecoder, &load)); + if (!taosArrayPush(pRsp->query->pQnodeList, &load)) return terrno; } } } int32_t kvNum = 0; - if (tDecodeI32(pDecoder, &kvNum) < 0) return -1; + TAOS_CHECK_RETURN(tDecodeI32(pDecoder, &kvNum)); pRsp->info = taosArrayInit(kvNum, sizeof(SKv)); if (pRsp->info == NULL) return -1; for (int32_t i = 0; i < kvNum; i++) { SKv kv = {0}; - if (tDecodeSKv(pDecoder, &kv)) return -1; - if (!taosArrayPush(pRsp->info, &kv)) return -1; + TAOS_CHECK_RETURN(tDecodeSKv(pDecoder, &kv)); + if (!taosArrayPush(pRsp->info, &kv)) return terrno; } return 0; @@ -5308,7 +5310,7 @@ int32_t tSerializeSMTimerMsg(void *buf, int32_t bufLen, SMTimerReq *pReq) { // return 0; // } -int32_t tSerializeDropOrphanTaskMsg(void* buf, int32_t bufLen, SMStreamDropOrphanMsg* pMsg) { +int32_t tSerializeDropOrphanTaskMsg(void *buf, int32_t bufLen, SMStreamDropOrphanMsg *pMsg) { SEncoder encoder = {0}; tEncoderInit(&encoder, buf, bufLen); @@ -5331,7 +5333,7 @@ int32_t tSerializeDropOrphanTaskMsg(void* buf, int32_t bufLen, SMStreamDropOrpha return tlen; } -int32_t tDeserializeDropOrphanTaskMsg(void* buf, int32_t bufLen, SMStreamDropOrphanMsg* pMsg) { +int32_t tDeserializeDropOrphanTaskMsg(void *buf, int32_t bufLen, SMStreamDropOrphanMsg *pMsg) { SDecoder decoder = {0}; tDecoderInit(&decoder, buf, bufLen); diff --git a/source/libs/tdb/src/db/tdbPager.c b/source/libs/tdb/src/db/tdbPager.c index 01dd0ac766..a650847e1e 100644 --- a/source/libs/tdb/src/db/tdbPager.c +++ b/source/libs/tdb/src/db/tdbPager.c @@ -1180,7 +1180,7 @@ int tdbPagerRestoreJournals(SPager *pPager) { if (code) { taosArrayDestroy(pTxnList); (void)tdbCloseDir(&pDir); - tdbError("failed to restore file due to %s. jFileName:%s", strerror(code), jname); + tdbError("failed to restore file due to %s. jFileName:%s", tstrerror(code), jname); return code; } } From 59270dfd0dfe27c2b1f77bafdf8a2133e3b10af7 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Thu, 15 Aug 2024 16:56:30 +0800 Subject: [PATCH 073/181] fix(stream): check status before start timer. --- source/libs/stream/src/streamDispatch.c | 30 ++++++++++++++++--------- 1 file changed, 19 insertions(+), 11 deletions(-) diff --git a/source/libs/stream/src/streamDispatch.c b/source/libs/stream/src/streamDispatch.c index 5a9a60db1d..7937402ccc 100644 --- a/source/libs/stream/src/streamDispatch.c +++ b/source/libs/stream/src/streamDispatch.c @@ -762,18 +762,26 @@ int32_t streamDispatchStreamBlock(SStreamTask* pTask) { code = sendDispatchMsg(pTask, pTask->msgInfo.pData); - streamMutexLock(&pTask->msgInfo.lock); - if (pTask->msgInfo.inMonitor == 0) { - int32_t ref = atomic_add_fetch_32(&pTask->status.timerActive, 1); - stDebug("s-task:%s start dispatch monitor tmr in %dms, ref:%d, dispatch code:%s", id, DISPATCH_RETRY_INTERVAL_MS, - ref, tstrerror(code)); - streamStartMonitorDispatchData(pTask, DISPATCH_RETRY_INTERVAL_MS); - pTask->msgInfo.inMonitor = 1; - } else { - stDebug("s-task:%s already in dispatch monitor tmr", id); - } + streamMutexLock(&pTask->lock); + bool shouldStop = streamTaskShouldStop(pTask); + streamMutexLock(&pTask->lock); - streamMutexUnlock(&pTask->msgInfo.lock); + if (shouldStop) { + stDebug("s-task:%s in stop/dropping status, not start dispatch monitor tmr", id); + } else { + streamMutexLock(&pTask->msgInfo.lock); + if (pTask->msgInfo.inMonitor == 0) { + int32_t ref = atomic_add_fetch_32(&pTask->status.timerActive, 1); + stDebug("s-task:%s start dispatch monitor tmr in %dms, ref:%d, dispatch code:%s", id, DISPATCH_RETRY_INTERVAL_MS, + ref, tstrerror(code)); + streamStartMonitorDispatchData(pTask, DISPATCH_RETRY_INTERVAL_MS); + pTask->msgInfo.inMonitor = 1; + } else { + stDebug("s-task:%s already in dispatch monitor tmr", id); + } + + streamMutexUnlock(&pTask->msgInfo.lock); + } // this block can not be deleted until it has been sent to downstream task successfully. return TSDB_CODE_SUCCESS; From 761ae2ab4b3767806ce8a44ef009044688eb090f Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Thu, 15 Aug 2024 16:59:43 +0800 Subject: [PATCH 074/181] refactor: do some internal refactor. --- source/libs/stream/src/streamCheckpoint.c | 22 ---------------------- 1 file changed, 22 deletions(-) diff --git a/source/libs/stream/src/streamCheckpoint.c b/source/libs/stream/src/streamCheckpoint.c index 7e4d212457..4bf74d8d4f 100644 --- a/source/libs/stream/src/streamCheckpoint.c +++ b/source/libs/stream/src/streamCheckpoint.c @@ -938,28 +938,6 @@ void checkpointTriggerMonitorFn(void* param, void* tmrId) { return; } - if ((pTmrInfo->launchChkptId != pActiveInfo->activeId) || (pActiveInfo->activeId == 0)) { - streamMutexUnlock(&pActiveInfo->lock); - int32_t ref = streamCleanBeforeQuitTmr(pTmrInfo, pTask); - stWarn("s-task:%s vgId:%d checkpoint-trigger retrieve by previous checkpoint procedure, checkpointId:%" PRId64 - ", quit, ref:%d", - id, vgId, pTmrInfo->launchChkptId, ref); - - streamMetaReleaseTask(pTask->pMeta, pTask); - return; - } - - // active checkpoint info is cleared for now - if ((pActiveInfo->activeId == 0) || (pActiveInfo->transId == 0) || (pTask->chkInfo.startTs == 0)) { - streamMutexUnlock(&pActiveInfo->lock); - int32_t ref = streamCleanBeforeQuitTmr(pTmrInfo, pTask); - stWarn("s-task:%s vgId:%d active checkpoint may be cleared, quit from retrieve checkpoint-trigger send tmr, ref:%d", - id, vgId, ref); - - streamMetaReleaseTask(pTask->pMeta, pTask); - return; - } - for (int32_t i = 0; i < taosArrayGetSize(pList); ++i) { SStreamUpstreamEpInfo* pInfo = taosArrayGetP(pList, i); From cf796a45c1bb867330ac2447f49f9adc2ffd7d26 Mon Sep 17 00:00:00 2001 From: 54liuyao <54liuyao> Date: Thu, 15 Aug 2024 17:03:32 +0800 Subject: [PATCH 075/181] fix issue --- source/libs/executor/src/aggregateoperator.c | 3 +++ source/libs/executor/src/countwindowoperator.c | 3 +++ source/libs/executor/src/eventwindowoperator.c | 3 +++ source/libs/executor/src/executil.c | 6 +++++- source/libs/executor/src/filloperator.c | 3 +++ source/libs/executor/src/groupcacheoperator.c | 3 +++ source/libs/executor/src/groupoperator.c | 9 +++++++++ source/libs/executor/src/projectoperator.c | 15 +++++++++++---- source/libs/executor/src/sortoperator.c | 6 ++++++ .../libs/executor/src/streamcountwindowoperator.c | 3 +++ .../libs/executor/src/streameventwindowoperator.c | 3 +++ source/libs/executor/src/streamfilloperator.c | 3 +++ .../libs/executor/src/streamtimewindowoperator.c | 15 +++++++++++++++ source/libs/executor/src/timesliceoperator.c | 3 +++ source/libs/executor/src/timewindowoperator.c | 15 +++++++++++++++ 15 files changed, 88 insertions(+), 5 deletions(-) diff --git a/source/libs/executor/src/aggregateoperator.c b/source/libs/executor/src/aggregateoperator.c index 093555c9c5..d7b60b2bcd 100644 --- a/source/libs/executor/src/aggregateoperator.c +++ b/source/libs/executor/src/aggregateoperator.c @@ -149,6 +149,9 @@ _error: } if (pOperator != NULL) { pOperator->info = NULL; + if (pOperator->pDownstream == NULL && downstream != NULL) { + destroyOperator(downstream); + } destroyOperator(pOperator); } pTaskInfo->code = code; diff --git a/source/libs/executor/src/countwindowoperator.c b/source/libs/executor/src/countwindowoperator.c index 9019fa0fef..a9858eeb96 100644 --- a/source/libs/executor/src/countwindowoperator.c +++ b/source/libs/executor/src/countwindowoperator.c @@ -344,6 +344,9 @@ _error: if (pOperator != NULL) { pOperator->info = NULL; + if (pOperator->pDownstream == NULL && downstream != NULL) { + destroyOperator(downstream); + } destroyOperator(pOperator); } pTaskInfo->code = code; diff --git a/source/libs/executor/src/eventwindowoperator.c b/source/libs/executor/src/eventwindowoperator.c index d4e5dedd20..b80ea74006 100644 --- a/source/libs/executor/src/eventwindowoperator.c +++ b/source/libs/executor/src/eventwindowoperator.c @@ -147,6 +147,9 @@ _error: if (pOperator != NULL) { pOperator->info = NULL; + if (pOperator->pDownstream == NULL && downstream != NULL) { + destroyOperator(downstream); + } destroyOperator(pOperator); } pTaskInfo->code = code; diff --git a/source/libs/executor/src/executil.c b/source/libs/executor/src/executil.c index b732fccd8e..210c073c6d 100644 --- a/source/libs/executor/src/executil.c +++ b/source/libs/executor/src/executil.c @@ -1814,6 +1814,10 @@ int32_t createExprFromOneNode(SExprInfo* pExp, SNode* pNode, int16_t slotId) { QUERY_CHECK_CODE(code, lino, _end); res->node.resType = (SDataType){.bytes = sizeof(int64_t), .type = TSDB_DATA_TYPE_BIGINT}; code = nodesListAppend(pFuncNode->pParameterList, (SNode*)res); + if (code != TSDB_CODE_SUCCESS) { + nodesDestroyNode((SNode*)res); + res = NULL; + } QUERY_CHECK_CODE(code, lino, _end); } #endif @@ -1945,7 +1949,7 @@ int32_t createExprInfo(SNodeList* pNodeList, SNodeList* pGroupKeys, SExprInfo** SExprInfo* pExp = &pExprs[i]; code = createExprFromTargetNode(pExp, pTargetNode); if (code != TSDB_CODE_SUCCESS) { - taosMemoryFreeClear(pExprs); + destroyExprInfo(pExprs, *numOfExprs); qError("%s failed at line %d since %s", __func__, __LINE__, tstrerror(code)); return code; } diff --git a/source/libs/executor/src/filloperator.c b/source/libs/executor/src/filloperator.c index 0b66834d45..5ece57cad1 100644 --- a/source/libs/executor/src/filloperator.c +++ b/source/libs/executor/src/filloperator.c @@ -579,6 +579,9 @@ _error: pTaskInfo->code = code; if (pOperator != NULL) { pOperator->info = NULL; + if (pOperator->pDownstream == NULL && downstream != NULL) { + destroyOperator(downstream); + } destroyOperator(pOperator); } return code; diff --git a/source/libs/executor/src/groupcacheoperator.c b/source/libs/executor/src/groupcacheoperator.c index 00b8c3b9ae..d5e6061a0f 100644 --- a/source/libs/executor/src/groupcacheoperator.c +++ b/source/libs/executor/src/groupcacheoperator.c @@ -1506,6 +1506,9 @@ _error: if (pOperator != NULL) { pOperator->info = NULL; + if (pOperator->pDownstream == NULL && pDownstream != NULL && (*pDownstream) != NULL) { + destroyOperator(*pDownstream); + } destroyOperator(pOperator); } pTaskInfo->code = code; diff --git a/source/libs/executor/src/groupoperator.c b/source/libs/executor/src/groupoperator.c index 69a9045004..064ce42840 100644 --- a/source/libs/executor/src/groupoperator.c +++ b/source/libs/executor/src/groupoperator.c @@ -618,6 +618,9 @@ _error: if (pOperator) { pOperator->info = NULL; + if (pOperator->pDownstream == NULL && downstream != NULL) { + destroyOperator(downstream); + } destroyOperator(pOperator); } @@ -1248,6 +1251,9 @@ _error: pTaskInfo->code = code; if (pOperator != NULL) { pOperator->info = NULL; + if (pOperator->pDownstream == NULL && downstream != NULL) { + destroyOperator(downstream); + } destroyOperator(pOperator); } TAOS_RETURN(code); @@ -1797,6 +1803,9 @@ _error: if (pInfo != NULL) destroyStreamPartitionOperatorInfo(pInfo); if (pOperator != NULL) { pOperator->info = NULL; + if (pOperator->pDownstream == NULL && downstream != NULL) { + destroyOperator(downstream); + } destroyOperator(pOperator); } qError("%s failed at line %d since %s", __func__, lino, tstrerror(code)); diff --git a/source/libs/executor/src/projectoperator.c b/source/libs/executor/src/projectoperator.c index 66a7408b13..8426cb73fe 100644 --- a/source/libs/executor/src/projectoperator.c +++ b/source/libs/executor/src/projectoperator.c @@ -107,10 +107,6 @@ int32_t createProjectOperatorInfo(SOperatorInfo* downstream, SProjectPhysiNode* pOperator->pTaskInfo = pTaskInfo; int32_t lino = 0; - int32_t numOfCols = 0; - SExprInfo* pExprInfo = NULL; - code = createExprInfo(pProjPhyNode->pProjections, NULL, &pExprInfo, &numOfCols); - TSDB_CHECK_CODE(code, lino, _error); SSDataBlock* pResBlock = createDataBlockFromDescNode(pProjPhyNode->node.pOutputDataBlockDesc); TSDB_CHECK_NULL(pResBlock, code, lino, _error, terrno); @@ -148,6 +144,11 @@ int32_t createProjectOperatorInfo(SOperatorInfo* downstream, SProjectPhysiNode* } initResultSizeInfo(&pOperator->resultInfo, numOfRows); + + int32_t numOfCols = 0; + SExprInfo* pExprInfo = NULL; + code = createExprInfo(pProjPhyNode->pProjections, NULL, &pExprInfo, &numOfCols); + TSDB_CHECK_CODE(code, lino, _error); code = initAggSup(&pOperator->exprSupp, &pInfo->aggSup, pExprInfo, numOfCols, keyBufSize, pTaskInfo->id.str, pTaskInfo->streamInfo.pState, &pTaskInfo->storageAPI.functionStore); TSDB_CHECK_CODE(code, lino, _error); @@ -182,6 +183,9 @@ _error: if (pInfo != NULL) destroyProjectOperatorInfo(pInfo); if (pOperator != NULL) { pOperator->info = NULL; + if (pOperator->pDownstream == NULL && downstream != NULL) { + destroyOperator(downstream); + } destroyOperator(pOperator); } pTaskInfo->code = code; @@ -534,6 +538,9 @@ _error: if (pInfo != NULL) destroyIndefinitOperatorInfo(pInfo); if (pOperator != NULL) { pOperator->info = NULL; + if (pOperator->pDownstream == NULL && downstream != NULL) { + destroyOperator(downstream); + } destroyOperator(pOperator); } pTaskInfo->code = code; diff --git a/source/libs/executor/src/sortoperator.c b/source/libs/executor/src/sortoperator.c index 59b4e1cbbb..36f9ac0954 100644 --- a/source/libs/executor/src/sortoperator.c +++ b/source/libs/executor/src/sortoperator.c @@ -166,6 +166,9 @@ _error: if (pOperator != NULL) { pOperator->info = NULL; + if (pOperator->pDownstream == NULL && downstream != NULL) { + destroyOperator(downstream); + } destroyOperator(pOperator); } pTaskInfo->code = code; @@ -841,6 +844,9 @@ _error: } if (pOperator != NULL) { pOperator->info = NULL; + if (pOperator->pDownstream == NULL && downstream != NULL) { + destroyOperator(downstream); + } destroyOperator(pOperator); } return code; diff --git a/source/libs/executor/src/streamcountwindowoperator.c b/source/libs/executor/src/streamcountwindowoperator.c index 62506858fc..fb4b9db05a 100644 --- a/source/libs/executor/src/streamcountwindowoperator.c +++ b/source/libs/executor/src/streamcountwindowoperator.c @@ -928,6 +928,9 @@ _error: if (pOperator != NULL) { pOperator->info = NULL; + if (pOperator->pDownstream == NULL && downstream != NULL) { + destroyOperator(downstream); + } destroyOperator(pOperator); } pTaskInfo->code = code; diff --git a/source/libs/executor/src/streameventwindowoperator.c b/source/libs/executor/src/streameventwindowoperator.c index 93f30ea899..67929678e5 100644 --- a/source/libs/executor/src/streameventwindowoperator.c +++ b/source/libs/executor/src/streameventwindowoperator.c @@ -986,6 +986,9 @@ _error: if (pInfo != NULL) destroyStreamEventOperatorInfo(pInfo); if (pOperator != NULL) { pOperator->info = NULL; + if (pOperator->pDownstream == NULL && downstream != NULL) { + destroyOperator(downstream); + } destroyOperator(pOperator); } pTaskInfo->code = code; diff --git a/source/libs/executor/src/streamfilloperator.c b/source/libs/executor/src/streamfilloperator.c index 39e602ee84..507ae724e0 100644 --- a/source/libs/executor/src/streamfilloperator.c +++ b/source/libs/executor/src/streamfilloperator.c @@ -1463,6 +1463,9 @@ _error: if (pInfo != NULL) destroyStreamFillOperatorInfo(pInfo); if (pOperator != NULL) { pOperator->info = NULL; + if (pOperator->pDownstream == NULL && downstream != NULL) { + destroyOperator(downstream); + } destroyOperator(pOperator); } pTaskInfo->code = code; diff --git a/source/libs/executor/src/streamtimewindowoperator.c b/source/libs/executor/src/streamtimewindowoperator.c index cf3b53bf02..823897eccd 100644 --- a/source/libs/executor/src/streamtimewindowoperator.c +++ b/source/libs/executor/src/streamtimewindowoperator.c @@ -2018,6 +2018,9 @@ _error: if (pInfo != NULL) destroyStreamFinalIntervalOperatorInfo(pInfo); if (pOperator != NULL) { pOperator->info = NULL; + if (pOperator->pDownstream == NULL && downstream != NULL) { + destroyOperator(downstream); + } destroyOperator(pOperator); } pTaskInfo->code = code; @@ -3843,6 +3846,9 @@ _error: if (pOperator != NULL) { pOperator->info = NULL; + if (pOperator->pDownstream == NULL && downstream != NULL) { + destroyOperator(downstream); + } destroyOperator(pOperator); } pTaskInfo->code = code; @@ -4102,6 +4108,9 @@ _error: } if (pOperator != NULL) { pOperator->info = NULL; + if (pOperator->pDownstream == NULL && downstream != NULL) { + destroyOperator(downstream); + } destroyOperator(pOperator); } pTaskInfo->code = code; @@ -4998,6 +5007,9 @@ _error: if (pInfo != NULL) destroyStreamStateOperatorInfo(pInfo); if (pOperator != NULL) { pOperator->info = NULL; + if (pOperator->pDownstream == NULL && downstream != NULL) { + destroyOperator(downstream); + } destroyOperator(pOperator); } pTaskInfo->code = code; @@ -5337,6 +5349,9 @@ _error: if (pInfo != NULL) destroyStreamFinalIntervalOperatorInfo(pInfo); if (pOperator != NULL) { pOperator->info = NULL; + if (pOperator->pDownstream == NULL && downstream != NULL) { + destroyOperator(downstream); + } destroyOperator(pOperator); } pTaskInfo->code = code; diff --git a/source/libs/executor/src/timesliceoperator.c b/source/libs/executor/src/timesliceoperator.c index b14f4f0266..258f886805 100644 --- a/source/libs/executor/src/timesliceoperator.c +++ b/source/libs/executor/src/timesliceoperator.c @@ -1212,6 +1212,9 @@ _error: if (pInfo != NULL) destroyTimeSliceOperatorInfo(pInfo); if (pOperator != NULL) { pOperator->info = NULL; + if (pOperator->pDownstream == NULL && downstream != NULL) { + destroyOperator(downstream); + } destroyOperator(pOperator); } pTaskInfo->code = code; diff --git a/source/libs/executor/src/timewindowoperator.c b/source/libs/executor/src/timewindowoperator.c index fa9dc79cc3..b3f060e213 100644 --- a/source/libs/executor/src/timewindowoperator.c +++ b/source/libs/executor/src/timewindowoperator.c @@ -1421,6 +1421,9 @@ _error: } if (pOperator != NULL) { pOperator->info = NULL; + if (pOperator->pDownstream == NULL && downstream != NULL) { + destroyOperator(downstream); + } destroyOperator(pOperator); } pTaskInfo->code = code; @@ -1700,6 +1703,9 @@ _error: if (pOperator != NULL) { pOperator->info = NULL; + if (pOperator->pDownstream == NULL && downstream != NULL) { + destroyOperator(downstream); + } destroyOperator(pOperator); } pTaskInfo->code = code; @@ -1796,6 +1802,9 @@ _error: if (pInfo != NULL) destroySWindowOperatorInfo(pInfo); if (pOperator != NULL) { pOperator->info = NULL; + if (pOperator->pDownstream == NULL && downstream != NULL) { + destroyOperator(downstream); + } destroyOperator(pOperator); } pTaskInfo->code = code; @@ -2113,6 +2122,9 @@ _error: if (miaInfo != NULL) destroyMAIOperatorInfo(miaInfo); if (pOperator != NULL) { pOperator->info = NULL; + if (pOperator->pDownstream == NULL && downstream != NULL) { + destroyOperator(downstream); + } destroyOperator(pOperator); } pTaskInfo->code = code; @@ -2450,6 +2462,9 @@ _error: if (pOperator != NULL) { pOperator->info = NULL; + if (pOperator->pDownstream == NULL && downstream != NULL) { + destroyOperator(downstream); + } destroyOperator(pOperator); } pTaskInfo->code = code; From 3274194d219d151fb0b75b6bc3158b9bbe267edb Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Thu, 15 Aug 2024 17:11:43 +0800 Subject: [PATCH 076/181] refactor: do some internal refactor. --- source/libs/stream/src/streamDispatch.c | 1 + 1 file changed, 1 insertion(+) diff --git a/source/libs/stream/src/streamDispatch.c b/source/libs/stream/src/streamDispatch.c index 7937402ccc..86970f80fa 100644 --- a/source/libs/stream/src/streamDispatch.c +++ b/source/libs/stream/src/streamDispatch.c @@ -762,6 +762,7 @@ int32_t streamDispatchStreamBlock(SStreamTask* pTask) { code = sendDispatchMsg(pTask, pTask->msgInfo.pData); + // todo: secure the timerActive and start timer in after lock pTask->lock streamMutexLock(&pTask->lock); bool shouldStop = streamTaskShouldStop(pTask); streamMutexLock(&pTask->lock); From a82c220645b01760f1738250960cd11a5185629c Mon Sep 17 00:00:00 2001 From: 54liuyao <54liuyao> Date: Thu, 15 Aug 2024 17:38:29 +0800 Subject: [PATCH 077/181] fix issue --- source/libs/executor/src/groupoperator.c | 12 +++---- source/libs/executor/src/projectoperator.c | 10 +++--- .../executor/src/streamcountwindowoperator.c | 7 ++-- .../executor/src/streameventwindowoperator.c | 6 ++-- source/libs/executor/src/streamfilloperator.c | 6 ++-- .../executor/src/streamtimewindowoperator.c | 32 +++++++++++-------- source/libs/executor/src/tfill.c | 1 + source/libs/executor/src/timewindowoperator.c | 18 +++++------ 8 files changed, 50 insertions(+), 42 deletions(-) diff --git a/source/libs/executor/src/groupoperator.c b/source/libs/executor/src/groupoperator.c index 064ce42840..e5289fa216 100644 --- a/source/libs/executor/src/groupoperator.c +++ b/source/libs/executor/src/groupoperator.c @@ -561,6 +561,10 @@ int32_t createGroupOperatorInfo(SOperatorInfo* downstream, SAggPhysiNode* pAggNo } initBasicInfo(&pInfo->binfo, pResBlock); + pInfo->pGroupCols = NULL; + code = extractColumnInfo(pAggNode->pGroupKeys, &pInfo->pGroupCols); + QUERY_CHECK_CODE(code, lino, _error); + int32_t numOfScalarExpr = 0; SExprInfo* pScalarExprInfo = NULL; if (pAggNode->pExprs != NULL) { @@ -568,10 +572,6 @@ int32_t createGroupOperatorInfo(SOperatorInfo* downstream, SAggPhysiNode* pAggNo QUERY_CHECK_CODE(code, lino, _error); } - pInfo->pGroupCols = NULL; - code = extractColumnInfo(pAggNode->pGroupKeys, &pInfo->pGroupCols); - QUERY_CHECK_CODE(code, lino, _error); - code = initExprSupp(&pInfo->scalarSup, pScalarExprInfo, numOfScalarExpr, &pTaskInfo->storageAPI.functionStore); QUERY_CHECK_CODE(code, lino, _error); @@ -1165,6 +1165,8 @@ int32_t createPartitionOperatorInfo(SOperatorInfo* downstream, SPartitionPhysiNo SExprInfo* pExprInfo = NULL; code = createExprInfo(pPartNode->pTargets, NULL, &pExprInfo, &numOfCols); QUERY_CHECK_CODE(code, lino, _error); + pOperator->exprSupp.numOfExprs = numOfCols; + pOperator->exprSupp.pExprInfo = pExprInfo; pInfo->pGroupCols = makeColumnArrayFromList(pPartNode->pPartitionKeys); @@ -1230,8 +1232,6 @@ int32_t createPartitionOperatorInfo(SOperatorInfo* downstream, SPartitionPhysiNo setOperatorInfo(pOperator, "PartitionOperator", QUERY_NODE_PHYSICAL_PLAN_PARTITION, false, OP_NOT_OPENED, pInfo, pTaskInfo); - pOperator->exprSupp.numOfExprs = numOfCols; - pOperator->exprSupp.pExprInfo = pExprInfo; pOperator->fpSet = createOperatorFpSet(optrDummyOpenFn, hashPartition, NULL, destroyPartitionOperatorInfo, optrDefaultBufFn, NULL, optrDefaultGetNextExtFn, NULL); diff --git a/source/libs/executor/src/projectoperator.c b/source/libs/executor/src/projectoperator.c index 8426cb73fe..4d2bdc62f8 100644 --- a/source/libs/executor/src/projectoperator.c +++ b/source/libs/executor/src/projectoperator.c @@ -474,11 +474,6 @@ int32_t createIndefinitOutputOperatorInfo(SOperatorInfo* downstream, SPhysiNode* SIndefRowsFuncPhysiNode* pPhyNode = (SIndefRowsFuncPhysiNode*)pNode; - int32_t numOfExpr = 0; - SExprInfo* pExprInfo = NULL; - code = createExprInfo(pPhyNode->pFuncs, NULL, &pExprInfo, &numOfExpr); - TSDB_CHECK_CODE(code, lino, _error); - if (pPhyNode->pExprs != NULL) { int32_t num = 0; SExprInfo* pSExpr = NULL; @@ -505,6 +500,11 @@ int32_t createIndefinitOutputOperatorInfo(SOperatorInfo* downstream, SPhysiNode* code = blockDataEnsureCapacity(pResBlock, numOfRows); TSDB_CHECK_CODE(code, lino, _error); + int32_t numOfExpr = 0; + SExprInfo* pExprInfo = NULL; + code = createExprInfo(pPhyNode->pFuncs, NULL, &pExprInfo, &numOfExpr); + TSDB_CHECK_CODE(code, lino, _error); + code = initAggSup(pSup, &pInfo->aggSup, pExprInfo, numOfExpr, keyBufSize, pTaskInfo->id.str, pTaskInfo->streamInfo.pState, &pTaskInfo->storageAPI.functionStore); TSDB_CHECK_CODE(code, lino, _error); diff --git a/source/libs/executor/src/streamcountwindowoperator.c b/source/libs/executor/src/streamcountwindowoperator.c index fb4b9db05a..44a383772d 100644 --- a/source/libs/executor/src/streamcountwindowoperator.c +++ b/source/libs/executor/src/streamcountwindowoperator.c @@ -834,12 +834,14 @@ int32_t createStreamCountAggOperatorInfo(SOperatorInfo* downstream, SPhysiNode* } SExprSupp* pExpSup = &pOperator->exprSupp; + SSDataBlock* pResBlock = createDataBlockFromDescNode(pPhyNode->pOutputDataBlockDesc); + QUERY_CHECK_NULL(pResBlock, code, lino, _error, terrno); + pInfo->binfo.pRes = pResBlock; + SExprInfo* pExprInfo = NULL; code = createExprInfo(pCountNode->window.pFuncs, NULL, &pExprInfo, &numOfCols); QUERY_CHECK_CODE(code, lino, _error); - SSDataBlock* pResBlock = createDataBlockFromDescNode(pPhyNode->pOutputDataBlockDesc); - QUERY_CHECK_NULL(pResBlock, code, lino, _error, terrno); code = initBasicInfoEx(&pInfo->binfo, pExpSup, pExprInfo, numOfCols, pResBlock, &pTaskInfo->storageAPI.functionStore); QUERY_CHECK_CODE(code, lino, _error); @@ -863,7 +865,6 @@ int32_t createStreamCountAggOperatorInfo(SOperatorInfo* downstream, SPhysiNode* code = initExecTimeWindowInfo(&pInfo->twAggSup.timeWindowData, &pTaskInfo->window); QUERY_CHECK_CODE(code, lino, _error); - pInfo->binfo.pRes = pResBlock; _hash_fn_t hashFn = taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY); pInfo->pStDeleted = tSimpleHashInit(64, hashFn); QUERY_CHECK_NULL(pInfo->pStDeleted, code, lino, _error, terrno); diff --git a/source/libs/executor/src/streameventwindowoperator.c b/source/libs/executor/src/streameventwindowoperator.c index 67929678e5..ff1ff579fc 100644 --- a/source/libs/executor/src/streameventwindowoperator.c +++ b/source/libs/executor/src/streameventwindowoperator.c @@ -895,14 +895,16 @@ int32_t createStreamEventAggOperatorInfo(SOperatorInfo* downstream, SPhysiNode* code = initExecTimeWindowInfo(&pInfo->twAggSup.timeWindowData, &pTaskInfo->window); QUERY_CHECK_CODE(code, lino, _error); + SSDataBlock* pResBlock = createDataBlockFromDescNode(pPhyNode->pOutputDataBlockDesc); + QUERY_CHECK_NULL(pResBlock, code, lino, _error, terrno); + pInfo->binfo.pRes = pResBlock; + SExprSupp* pExpSup = &pOperator->exprSupp; int32_t numOfCols = 0; SExprInfo* pExprInfo = NULL; code = createExprInfo(pEventNode->window.pFuncs, NULL, &pExprInfo, &numOfCols); QUERY_CHECK_CODE(code, lino, _error); - SSDataBlock* pResBlock = createDataBlockFromDescNode(pPhyNode->pOutputDataBlockDesc); - QUERY_CHECK_NULL(pResBlock, code, lino, _error, terrno); code = initBasicInfoEx(&pInfo->binfo, pExpSup, pExprInfo, numOfCols, pResBlock, &pTaskInfo->storageAPI.functionStore); QUERY_CHECK_CODE(code, lino, _error); diff --git a/source/libs/executor/src/streamfilloperator.c b/source/libs/executor/src/streamfilloperator.c index 507ae724e0..75b15dbea4 100644 --- a/source/libs/executor/src/streamfilloperator.c +++ b/source/libs/executor/src/streamfilloperator.c @@ -1370,6 +1370,9 @@ int32_t createStreamFillOperatorInfo(SOperatorInfo* downstream, SStreamFillPhysi code = createExprInfo(pPhyFillNode->pFillExprs, NULL, &pFillExprInfo, &numOfFillCols); QUERY_CHECK_CODE(code, lino, _error); + code = initExprSupp(&pOperator->exprSupp, pFillExprInfo, numOfFillCols, &pTaskInfo->storageAPI.functionStore); + QUERY_CHECK_CODE(code, lino, _error); + pInfo->pFillSup = initStreamFillSup(pPhyFillNode, pInterval, pFillExprInfo, numOfFillCols, &pTaskInfo->storageAPI); if (!pInfo->pFillSup) { code = TSDB_CODE_FAILED; @@ -1440,9 +1443,6 @@ int32_t createStreamFillOperatorInfo(SOperatorInfo* downstream, SStreamFillPhysi code = filterInitFromNode((SNode*)pPhyFillNode->node.pConditions, &pOperator->exprSupp.pFilterInfo, 0); QUERY_CHECK_CODE(code, lino, _error); - code = initExprSupp(&pOperator->exprSupp, pFillExprInfo, numOfFillCols, &pTaskInfo->storageAPI.functionStore); - QUERY_CHECK_CODE(code, lino, _error); - pInfo->srcRowIndex = -1; setOperatorInfo(pOperator, "StreamFillOperator", QUERY_NODE_PHYSICAL_PLAN_STREAM_FILL, false, OP_NOT_OPENED, pInfo, pTaskInfo); diff --git a/source/libs/executor/src/streamtimewindowoperator.c b/source/libs/executor/src/streamtimewindowoperator.c index 823897eccd..756a6d71e1 100644 --- a/source/libs/executor/src/streamtimewindowoperator.c +++ b/source/libs/executor/src/streamtimewindowoperator.c @@ -1896,11 +1896,6 @@ int32_t createStreamFinalIntervalOperatorInfo(SOperatorInfo* downstream, SPhysiN QUERY_CHECK_CODE(code, lino, _error); } - int32_t numOfCols = 0; - SExprInfo* pExprInfo = NULL; - code = createExprInfo(pIntervalPhyNode->window.pFuncs, NULL, &pExprInfo, &numOfCols); - QUERY_CHECK_CODE(code, lino, _error); - SSDataBlock* pResBlock = createDataBlockFromDescNode(pPhyNode->pOutputDataBlockDesc); QUERY_CHECK_NULL(pResBlock, code, lino, _error, terrno); initBasicInfo(&pInfo->binfo, pResBlock); @@ -1914,6 +1909,12 @@ int32_t createStreamFinalIntervalOperatorInfo(SOperatorInfo* downstream, SPhysiN qInfo("copy state %p to %p", pTaskInfo->streamInfo.pState, pInfo->pState); pAPI->stateStore.streamStateSetNumber(pInfo->pState, -1, pInfo->primaryTsIndex); + + int32_t numOfCols = 0; + SExprInfo* pExprInfo = NULL; + code = createExprInfo(pIntervalPhyNode->window.pFuncs, NULL, &pExprInfo, &numOfCols); + QUERY_CHECK_CODE(code, lino, _error); + code = initAggSup(&pOperator->exprSupp, &pInfo->aggSup, pExprInfo, numOfCols, keyBufSize, pTaskInfo->id.str, pInfo->pState, &pTaskInfo->storageAPI.functionStore); QUERY_CHECK_CODE(code, lino, _error); @@ -3742,13 +3743,15 @@ int32_t createStreamSessionAggOperatorInfo(SOperatorInfo* downstream, SPhysiNode } } SExprSupp* pExpSup = &pOperator->exprSupp; + + SSDataBlock* pResBlock = createDataBlockFromDescNode(pPhyNode->pOutputDataBlockDesc); + QUERY_CHECK_NULL(pResBlock, code, lino, _error, terrno); + pInfo->binfo.pRes = pResBlock; SExprInfo* pExprInfo = NULL; code = createExprInfo(pSessionNode->window.pFuncs, NULL, &pExprInfo, &numOfCols); QUERY_CHECK_CODE(code, lino, _error); - SSDataBlock* pResBlock = createDataBlockFromDescNode(pPhyNode->pOutputDataBlockDesc); - QUERY_CHECK_NULL(pResBlock, code, lino, _error, terrno); code = initBasicInfoEx(&pInfo->binfo, pExpSup, pExprInfo, numOfCols, pResBlock, &pTaskInfo->storageAPI.functionStore); QUERY_CHECK_CODE(code, lino, _error); @@ -3774,7 +3777,7 @@ int32_t createStreamSessionAggOperatorInfo(SOperatorInfo* downstream, SPhysiNode if (pSessionNode->window.pTsEnd) { pInfo->endTsIndex = ((SColumnNode*)pSessionNode->window.pTsEnd)->slotId; } - pInfo->binfo.pRes = pResBlock; + pInfo->order = TSDB_ORDER_ASC; _hash_fn_t hashFn = taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY); pInfo->pStDeleted = tSimpleHashInit(64, hashFn); @@ -4924,14 +4927,16 @@ int32_t createStreamStateAggOperatorInfo(SOperatorInfo* downstream, SPhysiNode* code = initExecTimeWindowInfo(&pInfo->twAggSup.timeWindowData, &pTaskInfo->window); QUERY_CHECK_CODE(code, lino, _error); + SSDataBlock* pResBlock = createDataBlockFromDescNode(pPhyNode->pOutputDataBlockDesc); + QUERY_CHECK_NULL(pResBlock, code, lino, _error, terrno); + pInfo->binfo.pRes = pResBlock; + SExprSupp* pExpSup = &pOperator->exprSupp; int32_t numOfCols = 0; SExprInfo* pExprInfo = NULL; code = createExprInfo(pStateNode->window.pFuncs, NULL, &pExprInfo, &numOfCols); QUERY_CHECK_CODE(code, lino, _error); - SSDataBlock* pResBlock = createDataBlockFromDescNode(pPhyNode->pOutputDataBlockDesc); - QUERY_CHECK_NULL(pResBlock, code, lino, _error, terrno); code = initBasicInfoEx(&pInfo->binfo, pExpSup, pExprInfo, numOfCols, pResBlock, &pTaskInfo->storageAPI.functionStore); if (code != TSDB_CODE_SUCCESS) { goto _error; @@ -5218,10 +5223,6 @@ int32_t createStreamIntervalOperatorInfo(SOperatorInfo* downstream, SPhysiNode* SStreamIntervalPhysiNode* pIntervalPhyNode = (SStreamIntervalPhysiNode*)pPhyNode; - SExprInfo* pExprInfo = NULL; - code = createExprInfo(pIntervalPhyNode->window.pFuncs, NULL, &pExprInfo, &numOfCols); - QUERY_CHECK_CODE(code, lino, _error); - SSDataBlock* pResBlock = createDataBlockFromDescNode(pPhyNode->pOutputDataBlockDesc); QUERY_CHECK_NULL(pResBlock, code, lino, _error, terrno); initBasicInfo(&pInfo->binfo, pResBlock); @@ -5265,6 +5266,9 @@ int32_t createStreamIntervalOperatorInfo(SOperatorInfo* downstream, SPhysiNode* pAPI->stateStore.streamStateSetNumber(pInfo->pState, -1, pInfo->primaryTsIndex); size_t keyBufSize = sizeof(int64_t) + sizeof(int64_t) + POINTER_BYTES; + SExprInfo* pExprInfo = NULL; + code = createExprInfo(pIntervalPhyNode->window.pFuncs, NULL, &pExprInfo, &numOfCols); + QUERY_CHECK_CODE(code, lino, _error); code = initAggSup(pSup, &pInfo->aggSup, pExprInfo, numOfCols, keyBufSize, pTaskInfo->id.str, pInfo->pState, &pTaskInfo->storageAPI.functionStore); QUERY_CHECK_CODE(code, lino, _error); diff --git a/source/libs/executor/src/tfill.c b/source/libs/executor/src/tfill.c index a7e2ea3429..59c19a706c 100644 --- a/source/libs/executor/src/tfill.c +++ b/source/libs/executor/src/tfill.c @@ -764,6 +764,7 @@ SFillColInfo* createFillColInfo(SExprInfo* pExpr, int32_t numOfFillExpr, SExprIn SValueNode* pv = (SValueNode*)nodesListGetNode(pValNode->pNodeList, index); QUERY_CHECK_NULL(pv, code, lino, _end, terrno); code = nodesValueNodeToVariant(pv, &pFillCol[i].fillVal); + QUERY_CHECK_CODE(code, lino, _end); } if (TSDB_CODE_SUCCESS != code) { goto _end; diff --git a/source/libs/executor/src/timewindowoperator.c b/source/libs/executor/src/timewindowoperator.c index b3f060e213..6a74c6a093 100644 --- a/source/libs/executor/src/timewindowoperator.c +++ b/source/libs/executor/src/timewindowoperator.c @@ -1746,15 +1746,15 @@ int32_t createSessionAggOperatorInfo(SOperatorInfo* downstream, SSessionWinodwPh size_t keyBufSize = sizeof(int64_t) + sizeof(int64_t) + POINTER_BYTES; initResultSizeInfo(&pOperator->resultInfo, 4096); + SSDataBlock* pResBlock = createDataBlockFromDescNode(pSessionNode->window.node.pOutputDataBlockDesc); + QUERY_CHECK_NULL(pResBlock, code, lino, _error, terrno); + initBasicInfo(&pInfo->binfo, pResBlock); + int32_t numOfCols = 0; SExprInfo* pExprInfo = NULL; code = createExprInfo(pSessionNode->window.pFuncs, NULL, &pExprInfo, &numOfCols); QUERY_CHECK_CODE(code, lino, _error); - SSDataBlock* pResBlock = createDataBlockFromDescNode(pSessionNode->window.node.pOutputDataBlockDesc); - QUERY_CHECK_NULL(pResBlock, code, lino, _error, terrno); - initBasicInfo(&pInfo->binfo, pResBlock); - code = initAggSup(&pOperator->exprSupp, &pInfo->aggSup, pExprInfo, numOfCols, keyBufSize, pTaskInfo->id.str, pTaskInfo->streamInfo.pState, &pTaskInfo->storageAPI.functionStore); QUERY_CHECK_CODE(code, lino, _error); @@ -2392,11 +2392,6 @@ int32_t createMergeIntervalOperatorInfo(SOperatorInfo* downstream, SMergeInterva goto _error; } - int32_t num = 0; - SExprInfo* pExprInfo = NULL; - code = createExprInfo(pIntervalPhyNode->window.pFuncs, NULL, &pExprInfo, &num); - QUERY_CHECK_CODE(code, lino, _error); - SInterval interval = {.interval = pIntervalPhyNode->interval, .sliding = pIntervalPhyNode->sliding, .intervalUnit = pIntervalPhyNode->intervalUnit, @@ -2420,6 +2415,11 @@ int32_t createMergeIntervalOperatorInfo(SOperatorInfo* downstream, SMergeInterva size_t keyBufSize = sizeof(int64_t) + sizeof(int64_t) + POINTER_BYTES; initResultSizeInfo(&pOperator->resultInfo, 4096); + int32_t num = 0; + SExprInfo* pExprInfo = NULL; + code = createExprInfo(pIntervalPhyNode->window.pFuncs, NULL, &pExprInfo, &num); + QUERY_CHECK_CODE(code, lino, _error); + code = initAggSup(pExprSupp, &pIntervalInfo->aggSup, pExprInfo, num, keyBufSize, pTaskInfo->id.str, pTaskInfo->streamInfo.pState, &pTaskInfo->storageAPI.functionStore); if (code != TSDB_CODE_SUCCESS) { From 75660b8f87f024c94f65400d3c2d48133e034e47 Mon Sep 17 00:00:00 2001 From: gccgdb1234 Date: Thu, 15 Aug 2024 17:47:13 +0800 Subject: [PATCH 078/181] docs: refine and correct errors according to comments from dong yan qiong --- docs/zh/07-operation/04-maintenance.md | 2 -- docs/zh/07-operation/18-dual.md | 2 +- docs/zh/14-reference/01-components/01-taosd.md | 4 ++-- docs/zh/14-reference/01-components/02-taosc.md | 2 +- docs/zh/14-reference/01-components/06-taoskeeper.md | 2 -- docs/zh/14-reference/01-components/09-taosdump.md | 2 -- docs/zh/14-reference/01-components/10-taosbenchmark.md | 2 -- docs/zh/14-reference/03-taos-sql/07-tag-index.md | 4 +--- docs/zh/14-reference/03-taos-sql/32-view.md | 2 -- docs/zh/14-reference/05-connector/10-cpp.mdx | 1 + docs/zh/14-reference/05-connector/45-php.mdx | 1 + docs/zh/14-reference/05-connector/50-odbc.mdx | 3 --- docs/zh/14-reference/05-connector/60-rest-api.mdx | 1 + docs/zh/14-reference/09-error-code.md | 4 ++-- docs/zh/27-train-faq/01-faq.md | 2 +- 15 files changed, 11 insertions(+), 23 deletions(-) diff --git a/docs/zh/07-operation/04-maintenance.md b/docs/zh/07-operation/04-maintenance.md index 3c02e4dd39..88122fed69 100644 --- a/docs/zh/07-operation/04-maintenance.md +++ b/docs/zh/07-operation/04-maintenance.md @@ -4,8 +4,6 @@ title: 集群维护 sidebar_label: 集群维护 --- -## 简介 - 本节介绍 TDengine Enterprise 中提供的高阶集群维护手段,能够使 TDengine 集群长期运行得更健壮和高效。 ## 节点管理 diff --git a/docs/zh/07-operation/18-dual.md b/docs/zh/07-operation/18-dual.md index 9de6a75b18..354e715602 100644 --- a/docs/zh/07-operation/18-dual.md +++ b/docs/zh/07-operation/18-dual.md @@ -4,7 +4,7 @@ sidebar_label: 双活系统 toc_max_heading_level: 4 --- -## 简介 +本节介绍 TDengine 双活系统的配置和使用。 1. 部分用户因为部署环境的特殊性只能部署两台服务器,同时希望实现一定的服务高可用和数据高可靠。本文主要描述基于数据复制和客户端 Failover 两项关键技术的 TDengine 双活系统的产品行为,包括双活系统的架构、配置、运维等。TDengine 双活既可以用于前面所述资源受限的环境,也可用于在两套 TDengine 集群(不限资源)之间的灾备场景。双活是 TDengine Enterprise 特有功能,在 3.3.0.0 版本中第一次发布,建议使用最新版本。 diff --git a/docs/zh/14-reference/01-components/01-taosd.md b/docs/zh/14-reference/01-components/01-taosd.md index 994f557a17..3746a16c54 100644 --- a/docs/zh/14-reference/01-components/01-taosd.md +++ b/docs/zh/14-reference/01-components/01-taosd.md @@ -143,7 +143,7 @@ charset 的有效值是 UTF-8。 | 参数名称 | 参数说明 | | :--------------: | :--------------------------------------------------------------------: | -| dataDir | 数据文件目录,所有的数据文件都将写入该目录,缺省值:/var/lib | +| dataDir | 数据文件目录,所有的数据文件都将写入该目录,缺省值:/var/lib/taos | | tempDir | 指定所有系统运行过程中的临时文件生成的目录,缺省值:/tmp | | minimalTmpDirGB | tempDir 所指定的临时文件目录所需要保留的最小空间,单位 GB,缺省值: 1 | | minimalDataDirGB | dataDir 指定的时序数据存储目录所需要保留的最小空间,单位 GB,缺省值: 2 | @@ -168,7 +168,7 @@ charset 的有效值是 UTF-8。 | minimalLogDirGB | 当日志文件夹所在磁盘可用空间大小小于该值时,停止写日志,单位GB,缺省值:1 | | numOfLogLines | 单个日志文件允许的最大行数,缺省值:10,000,000 | | asyncLog | 日志写入模式,0: 同步,1: 异步,缺省值: 1 | -| logKeepDays | 日志文件的最长保存时间 ,单位:天,缺省值:0,意味着无限保存;当设置为大于0 的值时,日志文件会被重命名为 taosdlog.xxx,其中 xxx 为日志文件最后修改的时间戳。 | +| logKeepDays | 日志文件的最长保存时间 ,单位:天,缺省值:0,意味着无限保存,日志文件不会被重命名,也不会有新的日志文件滚动产生,但日志文件的内容有可能会不断滚动,取决于日志文件大小的设置;当设置为大于0 的值时,当日志文件大小达到设置的上限时会被重命名为 taosdlog.xxx,其中 xxx 为日志文件最后修改的时间戳,并滚动产生新的日志文件 | | slowLogThreshold | 慢查询门限值,大于等于门限值认为是慢查询,单位秒,默认值: 3 | | slowLogScope | 定启动记录哪些类型的慢查询,可选值:ALL, QUERY, INSERT, OHTERS, NONE; 默认值:ALL | | debugFlag | 运行日志开关,131(输出错误和警告日志),135(输出错误、警告和调试日志),143(输出错误、警告、调试和跟踪日志); 默认值:131 或 135 (取决于不同模块) | diff --git a/docs/zh/14-reference/01-components/02-taosc.md b/docs/zh/14-reference/01-components/02-taosc.md index 96d108e8c8..d198890bd9 100644 --- a/docs/zh/14-reference/01-components/02-taosc.md +++ b/docs/zh/14-reference/01-components/02-taosc.md @@ -18,7 +18,7 @@ TDengine 客户端驱动提供了应用编程所需要的全部 API,并且在 |queryPolicy | 查询语句的执行策略,1: 只使用 vnode,不使用 qnode; 2: 没有扫描算子的子任务在 qnode 执行,带扫描算子的子任务在 vnode 执行; 3: vnode 只运行扫描算子,其余算子均在 qnode 执行 ;缺省值:1 | |querySmaOptimize | sma index 的优化策略,0: 表示不使用 sma index,永远从原始数据进行查询; 1: 表示使用 sma index,对符合的语句,直接从预计算的结果进行查询;缺省值:0 | |keepColumnName | Last、First、LastRow 函数查询且未指定别名时,自动设置别名为列名(不含函数名),因此 order by 子句如果引用了该列名将自动引用该列对应的函数; 1: 表示自动设置别名为列名(不包含函数名), 0: 表示不自动设置别名; 缺省值: 0 | -|countAlwaysReturnValue | ount/hyperloglog函数在输入数据为空或者NULL的情况下是否返回值; 0:返回空行,1:返回; 缺省值 1; 该参数设置为 1 时,如果查询中含有 INTERVAL 子句或者该查询使用了TSMA时, 且相应的组或窗口内数据为空或者NULL, 对应的组或窗口将不返回查询结果. 注意此参数客户端和服务端值应保持一致. | +|countAlwaysReturnValue | count/hyperloglog函数在输入数据为空或者NULL的情况下是否返回值; 0:返回空行,1:返回; 缺省值 1; 该参数设置为 1 时,如果查询中含有 INTERVAL 子句或者该查询使用了TSMA时, 且相应的组或窗口内数据为空或者NULL, 对应的组或窗口将不返回查询结果. 注意此参数客户端和服务端值应保持一致. | |multiResultFunctionStarReturnTags | 查询超级表时,last(\*)/last_row(\*)/first(\*) 是否返回标签列;查询普通表、子表时,不受该参数影响; 0:不返回标签列,1:返回标签列 ; 缺省值: 0; 该参数设置为 0 时,last(\*)/last_row(\*)/first(\*) 只返回超级表的普通列;为 1 时,返回超级表的普通列和标签列 | |maxTsmaCalcDelay| 查询时客户端可允许的tsma计算延迟, 若tsma的计算延迟大于配置值, 则该TSMA将不会被使用.; 取值范围: 600s - 86400s, 即10分钟-1小时 ; 缺省值:600 秒| |tsmaDataDeleteMark |TSMA计算的历史数据中间结果保存时间, 单位为毫秒; 取值范围:>= 3600000, 即大于等于1h; 缺省值: 86400000, 即1d | diff --git a/docs/zh/14-reference/01-components/06-taoskeeper.md b/docs/zh/14-reference/01-components/06-taoskeeper.md index 2877728077..f40b34ebeb 100644 --- a/docs/zh/14-reference/01-components/06-taoskeeper.md +++ b/docs/zh/14-reference/01-components/06-taoskeeper.md @@ -7,8 +7,6 @@ toc_max_heading_level: 4 import Tabs from "@theme/Tabs"; import TabItem from "@theme/TabItem"; -## 简介 - taosKeeper 是 TDengine 3.0 版本监控指标的导出工具,通过简单的几项配置即可获取 TDengine 的运行状态。taosKeeper 使用 TDengine RESTful 接口,所以不需要安装 TDengine 客户端即可使用。 ## 安装 diff --git a/docs/zh/14-reference/01-components/09-taosdump.md b/docs/zh/14-reference/01-components/09-taosdump.md index fd08da56d8..7afe8721ee 100644 --- a/docs/zh/14-reference/01-components/09-taosdump.md +++ b/docs/zh/14-reference/01-components/09-taosdump.md @@ -4,8 +4,6 @@ sidebar_label: taosdump toc_max_heading_level: 4 --- -## 简介 - taosdump 是一个支持从运行中的 TDengine 集群备份数据并将备份的数据恢复到相同或另一个运行中的 TDengine 集群中的工具应用程序。 taosdump 可以用数据库、超级表或普通表作为逻辑数据单元进行备份,也可以对数据库、超级 diff --git a/docs/zh/14-reference/01-components/10-taosbenchmark.md b/docs/zh/14-reference/01-components/10-taosbenchmark.md index 8540fa1cbb..3f15d6b8e3 100644 --- a/docs/zh/14-reference/01-components/10-taosbenchmark.md +++ b/docs/zh/14-reference/01-components/10-taosbenchmark.md @@ -4,8 +4,6 @@ sidebar_label: taosBenchmark toc_max_heading_level: 4 --- -## 简介 - taosBenchmark (曾用名 taosdemo ) 是一个用于测试 TDengine 产品性能的工具。taosBenchmark 可以测试 TDengine 的插入、查询和订阅等功能的性能,它可以模拟由大量设备产生的大量数据,还可以灵活地控制数据库、超级表、标签列的数量和类型、数据列的数量和类型、子表的数量、每张子表的数据量、插入数据的时间间隔、taosBenchmark 的工作线程数量、是否以及如何插入乱序数据等。为了兼容过往用户的使用习惯,安装包提供 了 taosdemo 作为 taosBenchmark 的软链接。 ## 安装 diff --git a/docs/zh/14-reference/03-taos-sql/07-tag-index.md b/docs/zh/14-reference/03-taos-sql/07-tag-index.md index c016a5b513..383c5b2a1f 100644 --- a/docs/zh/14-reference/03-taos-sql/07-tag-index.md +++ b/docs/zh/14-reference/03-taos-sql/07-tag-index.md @@ -4,9 +4,7 @@ title: 标签索引 description: 使用标签索引提升查询性能 --- -## 简介 - -在 TDengine 3.0.3.0 版本之前(不含),默认在第一列 TAG 上建立索引,但不支持给其它列动态添加索引。从 3.0.3.0 版本开始,可以动态地为其它 TAG 列添加索引。对于第一个 TAG 列上自动建立的索引,其在查询中默认生效,且用户无法对其进行任何干预。适当地使用索引能够有效地提升查询性能。 +本节说明 TDengine 的索引机制。在 TDengine 3.0.3.0 版本之前(不含),默认在第一列 TAG 上建立索引,但不支持给其它列动态添加索引。从 3.0.3.0 版本开始,可以动态地为其它 TAG 列添加索引。对于第一个 TAG 列上自动建立的索引,其在查询中默认生效,且用户无法对其进行任何干预。适当地使用索引能够有效地提升查询性能。 ## 语法 diff --git a/docs/zh/14-reference/03-taos-sql/32-view.md b/docs/zh/14-reference/03-taos-sql/32-view.md index 9b85ed906d..e8b08e05e8 100644 --- a/docs/zh/14-reference/03-taos-sql/32-view.md +++ b/docs/zh/14-reference/03-taos-sql/32-view.md @@ -4,8 +4,6 @@ title: "视图" sidebar_label: "视图" --- -## 简介 - 从 TDengine 3.2.1.0 开始,TDengine 企业版提供视图功能,便于用户简化操作,提升用户间的分享能力。 视图(View)本质上是一个存储在数据库中的查询语句。视图(非物化视图)本身不包含数据,只有在从视图读取数据时才动态执行视图所指定的查询语句。我们在创建视图时指定一个名称,然后可以像使用普通表一样对其进行查询等操作。视图的使用需遵循以下规则: diff --git a/docs/zh/14-reference/05-connector/10-cpp.mdx b/docs/zh/14-reference/05-connector/10-cpp.mdx index 727e33eb7c..be7e44812c 100644 --- a/docs/zh/14-reference/05-connector/10-cpp.mdx +++ b/docs/zh/14-reference/05-connector/10-cpp.mdx @@ -1,6 +1,7 @@ --- sidebar_label: C/C++ title: C/C++ Connector +toc_max_heading_level: 4 --- C/C++ 开发人员可以使用 TDengine 的客户端驱动,即 C/C++连接器 (以下都用 TDengine 客户端驱动表示),开发自己的应用来连接 TDengine 集群完成数据存储、查询以及其他功能。TDengine 客户端驱动的 API 类似于 MySQL 的 C API。应用程序使用时,需要包含 TDengine 头文件 _taos.h_,里面列出了提供的 API 的函数原型;应用程序还要链接到所在平台上对应的动态库。 diff --git a/docs/zh/14-reference/05-connector/45-php.mdx b/docs/zh/14-reference/05-connector/45-php.mdx index 9bc9662a72..0b453218f6 100644 --- a/docs/zh/14-reference/05-connector/45-php.mdx +++ b/docs/zh/14-reference/05-connector/45-php.mdx @@ -1,6 +1,7 @@ --- sidebar_label: PHP title: PHP Connector +toc_max_heading_level: 4 --- `php-tdengine` 是由社区贡献的 PHP 连接器扩展,还特别支持了 Swoole 协程化。 diff --git a/docs/zh/14-reference/05-connector/50-odbc.mdx b/docs/zh/14-reference/05-connector/50-odbc.mdx index 521b08c599..244a3f8d4e 100644 --- a/docs/zh/14-reference/05-connector/50-odbc.mdx +++ b/docs/zh/14-reference/05-connector/50-odbc.mdx @@ -3,9 +3,6 @@ sidebar_label: ODBC title: TDengine ODBC --- - -## 简介 - TDengine ODBC 是为 TDengine 实现的 ODBC 驱动程序,支持 Windows 系统的应用(如 [PowerBI](https://powerbi.microsoft.com/zh-cn/) 等)通过 ODBC 标准接口访问本地、远程和云服务的 TDengine 数据库。 TDengine ODBC 提供基于 WebSocket(推荐)和 原生连接两种方式连接 TDengine 数据库,使用时可以为 TDengine 数据源设置不同的连接方式。访问云服务时必须使用 WebSocket 连接方式。 diff --git a/docs/zh/14-reference/05-connector/60-rest-api.mdx b/docs/zh/14-reference/05-connector/60-rest-api.mdx index 4a3fcbd49e..b6d6ec3b4a 100644 --- a/docs/zh/14-reference/05-connector/60-rest-api.mdx +++ b/docs/zh/14-reference/05-connector/60-rest-api.mdx @@ -1,6 +1,7 @@ --- title: REST API sidebar_label: REST API +toc_max_heading_level: 4 description: 详细介绍 TDengine 提供的 RESTful API. --- diff --git a/docs/zh/14-reference/09-error-code.md b/docs/zh/14-reference/09-error-code.md index bb66ca7f95..fbd347b6af 100644 --- a/docs/zh/14-reference/09-error-code.md +++ b/docs/zh/14-reference/09-error-code.md @@ -122,7 +122,7 @@ description: TDengine 服务端的错误码列表和详细说明 | 0x80000335 | Cluster cfg inconsistent | 配置不一致 | 检查dnode节点与mnode节点配置是否一致。检查方式:1.节点启动时,在日志中输出 2.使用show variables | | 0x8000033B | Cluster id not match | 节点配置数据不一致 | 检查各节点data/dnode/dnodes.json文件中的clusterid | | 0x80000340 | Account already exists | (仅企业版)内部错误 | 上报issue | -| 0x80000342 | Invalid account options | (仅企业版)操作不zh | 确认操作是否正确 | +| 0x80000342 | Invalid account options | (仅企业版)该操作不支持 | 确认操作是否正确 | | 0x80000344 | Invalid account | 账户不存在 | 确认账户是否正确 | | 0x80000350 | User already exists | Create user, 重复创建 | 确认操作是否正确 | | 0x80000351 | Invalid user | 用户不存在 | 确认操作是否正确 | @@ -311,7 +311,7 @@ description: TDengine 服务端的错误码列表和详细说明 | ---------- | ---------------------------- | ----------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------ | | 0x80000903 | Sync timeout | 场景1:发生了切主;旧主节点上已经开始协商但尚未达成一致的请求将超时。 场景2:从节点响应超时,导致协商超时。 | 检查集群状态,例如:show vgroups;查看服务端日志,以及服务端节点之间的网络状况。 | | 0x8000090C | Sync leader is unreachable | 场景1:选主过程中 场景2:客户端请求路由到了从节点,且重定向失败 场景3:客户端或服务端网络配置错误 | 检查集群状态、网络配置、应用程序访问状态等。查看服务端日志,以及服务端节点之间的网络状况。 | -| 0x8000090F | Sync new config error | 成员变更新配置错误 | 预留 | +| 0x8000090F | Sync new config error | 成员变更配置错误 | 内部错误,用户无法干预 | | 0x80000911 | Sync not ready to propose | 场景1:恢复未完成 | 检查集群状态,例如:show vgroups。查看服务端日志,以及服务端节点之间的网络状况。 | | 0x80000914 | Sync leader is restoring | 场景1:发生了切主;选主后,日志重演中 | 检查集群状态,例如:show vgroups。查看服务端日志,观察恢复进度。 | | 0x80000915 | Sync invalid snapshot msg | 快照复制消息错误 | 服务端内部错误 | diff --git a/docs/zh/27-train-faq/01-faq.md b/docs/zh/27-train-faq/01-faq.md index 50470e92f1..01cb42213e 100644 --- a/docs/zh/27-train-faq/01-faq.md +++ b/docs/zh/27-train-faq/01-faq.md @@ -265,7 +265,7 @@ TDengine 在写入数据时如果有很严重的乱序写入问题,会严重 ### 26 遇到报错 “DND ERROR Version not compatible,cliver : 3000700swr wer : 3020300” 说明客户端和服务端版本不兼容,这里cliver的版本是3.0.7.0,server版本是 3.2.3.0。目前的兼容策略是前三位一致,client 和 sever才能兼容。 -### 27 修改database的root密码后,启动taos遇到报错 “failed to connect to server, reason: Authen tication failure” +### 27 修改database的root密码后,启动taos遇到报错 “failed to connect to server, reason: Authentication failure” 默认情况,启动taos服务会使用系统默认的用户名(root)和密码尝试连接taosd,在root密码修改后,启用taos连接就需要指明用户名和密码,例如: taos -h xxx.xxx.xxx.xxx -u root -p,然后输入新密码进行连接。 ### 28 修改database的root密码后,Grafana监控插件TDinsight无数据展示 From 54fdea3173b7132ea98b8c77f6a7ac611bce5de5 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Thu, 15 Aug 2024 18:01:28 +0800 Subject: [PATCH 079/181] fix(stream): perform node update when mnode leader/follower switches. --- include/common/tmsg.h | 1 + include/libs/stream/tstream.h | 8 +- source/dnode/mnode/impl/inc/mndStream.h | 3 + source/dnode/mnode/impl/src/mndStream.c | 117 ++++++++++++++++--- source/dnode/mnode/impl/src/mndStreamTrans.c | 2 +- source/dnode/mnode/impl/src/mndStreamUtil.c | 3 + source/dnode/mnode/impl/src/mndSync.c | 4 + source/dnode/vnode/src/tqCommon/tqCommon.c | 41 +++---- source/libs/stream/src/streamExec.c | 8 +- source/libs/stream/src/streamMeta.c | 59 ++++++++-- source/libs/stream/src/streamTask.c | 2 +- 11 files changed, 194 insertions(+), 54 deletions(-) diff --git a/include/common/tmsg.h b/include/common/tmsg.h index 70cf9c8b58..cfacb5cfd6 100644 --- a/include/common/tmsg.h +++ b/include/common/tmsg.h @@ -3696,6 +3696,7 @@ typedef struct { SMsgHead head; int64_t streamId; int32_t taskId; + int32_t transId; } SVPauseStreamTaskReq, SVResetStreamTaskReq; typedef struct { diff --git a/include/libs/stream/tstream.h b/include/libs/stream/tstream.h index 9c59e3f3ec..5e7f2bf0a6 100644 --- a/include/libs/stream/tstream.h +++ b/include/libs/stream/tstream.h @@ -473,7 +473,9 @@ typedef struct STaskStartInfo { typedef struct STaskUpdateInfo { SHashObj* pTasks; - int32_t transId; + int32_t activeTransId; + int32_t completeTransId; + int64_t completeTs; } STaskUpdateInfo; typedef struct SScanWalInfo { @@ -753,8 +755,8 @@ int32_t streamMetaAddFailedTask(SStreamMeta* pMeta, int64_t streamId, int32_t ta void streamMetaAddFailedTaskSelf(SStreamTask* pTask, int64_t failedTs); void streamMetaAddIntoUpdateTaskList(SStreamMeta* pMeta, SStreamTask* pTask, SStreamTask* pHTask, int32_t transId, int64_t startTs); -void streamMetaClearUpdateTaskList(SStreamMeta* pMeta); -void streamMetaInitUpdateTaskList(SStreamMeta* pMeta, int32_t transId); +void streamMetaClearSetUpdateTaskListComplete(SStreamMeta* pMeta); +bool streamMetaInitUpdateTaskList(SStreamMeta* pMeta, int32_t transId); void streamMetaRLock(SStreamMeta* pMeta); void streamMetaRUnLock(SStreamMeta* pMeta); diff --git a/source/dnode/mnode/impl/inc/mndStream.h b/source/dnode/mnode/impl/inc/mndStream.h index 88b8e98afb..a87a01c5b6 100644 --- a/source/dnode/mnode/impl/inc/mndStream.h +++ b/source/dnode/mnode/impl/inc/mndStream.h @@ -64,6 +64,8 @@ typedef struct SChkptReportInfo { } SChkptReportInfo; typedef struct SStreamExecInfo { + int32_t role; + bool switchFromFollower; bool initTaskList; SArray *pNodeList; int64_t ts; // snapshot ts @@ -153,6 +155,7 @@ int32_t streamTaskIterGetCurrent(SStreamTaskIter *pIter, SStreamTask **pTask); int32_t mndInitExecInfo(); void mndInitStreamExecInfo(SMnode *pMnode, SStreamExecInfo *pExecInfo); void mndInitStreamExecInfoForLeader(SMnode *pMnode); +void mndInitStreamExecInfoUpdateRole(SMnode *pMnode, int32_t role); int32_t removeExpiredNodeEntryAndTaskInBuf(SArray *pNodeSnapshot); void removeStreamTasksInBuf(SStreamObj *pStream, SStreamExecInfo *pExecNode); diff --git a/source/dnode/mnode/impl/src/mndStream.c b/source/dnode/mnode/impl/src/mndStream.c index a01bc92a97..a85b5c733b 100644 --- a/source/dnode/mnode/impl/src/mndStream.c +++ b/source/dnode/mnode/impl/src/mndStream.c @@ -144,7 +144,7 @@ int32_t mndInitStream(SMnode *pMnode) { code = sdbSetTable(pMnode->pSdb, table); if (code) { - return terrno; + return code; } code = sdbSetTable(pMnode->pSdb, tableSeq); @@ -2024,7 +2024,7 @@ static SVgroupChangeInfo mndFindChangedNodeInfo(SMnode *pMnode, const SArray *pP return info; } -static int32_t mndProcessVgroupChange(SMnode *pMnode, SVgroupChangeInfo *pChangeInfo) { +static int32_t mndProcessVgroupChange(SMnode *pMnode, SVgroupChangeInfo *pChangeInfo, bool includeAllNodes) { SSdb *pSdb = pMnode->pSdb; SStreamObj *pStream = NULL; void *pIter = NULL; @@ -2069,12 +2069,14 @@ static int32_t mndProcessVgroupChange(SMnode *pMnode, SVgroupChangeInfo *pChange } } - void *p = taosHashGet(pChangeInfo->pDBMap, pStream->targetDb, strlen(pStream->targetDb)); - void *p1 = taosHashGet(pChangeInfo->pDBMap, pStream->sourceDb, strlen(pStream->sourceDb)); - if (p == NULL && p1 == NULL) { - mDebug("stream:0x%" PRIx64 " %s not involved nodeUpdate, ignore", pStream->uid, pStream->name); - sdbRelease(pSdb, pStream); - continue; + if (!includeAllNodes) { + void *p1 = taosHashGet(pChangeInfo->pDBMap, pStream->targetDb, strlen(pStream->targetDb)); + void *p2 = taosHashGet(pChangeInfo->pDBMap, pStream->sourceDb, strlen(pStream->sourceDb)); + if (p1 == NULL && p2 == NULL) { + mDebug("stream:0x%" PRIx64 " %s not involved nodeUpdate, ignore", pStream->uid, pStream->name); + sdbRelease(pSdb, pStream); + continue; + } } mDebug("stream:0x%" PRIx64 " %s involved node changed, create update trans, transId:%d", pStream->uid, @@ -2192,11 +2194,36 @@ static int32_t refreshNodeListFromExistedStreams(SMnode *pMnode, SArray *pNodeLi return code; } +static int32_t addAllDbsIntoHashmap(SHashObj *pDBMap, SSdb *pSdb) { + void *pIter = NULL; + int32_t code = 0; + while (1) { + SVgObj *pVgroup = NULL; + pIter = sdbFetch(pSdb, SDB_VGROUP, pIter, (void **)&pVgroup); + if (pIter == NULL) { + break; + } + + code = taosHashPut(pDBMap, pVgroup->dbName, strlen(pVgroup->dbName), NULL, 0); + sdbRelease(pSdb, pVgroup); + + if (code == 0) { + int32_t size = taosHashGetSize(pDBMap); + mDebug("add Db:%s into Dbs list (total:%d) for kill checkpoint trans", pVgroup->dbName, size); + } + } + + return code; +} + // this function runs by only one thread, so it is not multi-thread safe static int32_t mndProcessNodeCheckReq(SRpcMsg *pMsg) { int32_t code = 0; bool allReady = true; SArray *pNodeSnapshot = NULL; + SMnode *pMnode = pMsg->info.node; + int64_t ts = taosGetTimestampSec(); + bool updateAllVgroups = false; int32_t old = atomic_val_compare_exchange_32(&mndNodeCheckSentinel, 0, 1); if (old != 0) { @@ -2204,10 +2231,7 @@ static int32_t mndProcessNodeCheckReq(SRpcMsg *pMsg) { return 0; } - mDebug("start to do node change checking"); - int64_t ts = taosGetTimestampSec(); - - SMnode *pMnode = pMsg->info.node; + mDebug("start to do node changing check"); streamMutexLock(&execInfo.lock); int32_t numOfNodes = extractStreamNodeList(pMnode); @@ -2240,10 +2264,20 @@ static int32_t mndProcessNodeCheckReq(SRpcMsg *pMsg) { } SVgroupChangeInfo changeInfo = mndFindChangedNodeInfo(pMnode, execInfo.pNodeList, pNodeSnapshot); - if (taosArrayGetSize(changeInfo.pUpdateNodeList) > 0) { + + { + if (execInfo.role == NODE_ROLE_LEADER && execInfo.switchFromFollower) { + mInfo("rollback all stream due to mnode leader/follower switch by using nodeUpdate trans"); + updateAllVgroups = true; + execInfo.switchFromFollower = false; // reset the flag + (void) addAllDbsIntoHashmap(changeInfo.pDBMap, pMnode->pSdb); + } + } + + if (taosArrayGetSize(changeInfo.pUpdateNodeList) > 0 || updateAllVgroups) { // kill current active checkpoint transaction, since the transaction is vnode wide. killAllCheckpointTrans(pMnode, &changeInfo); - code = mndProcessVgroupChange(pMnode, &changeInfo); + code = mndProcessVgroupChange(pMnode, &changeInfo, updateAllVgroups); // keep the new vnode snapshot if success if (code == TSDB_CODE_SUCCESS || code == TSDB_CODE_ACTION_IN_PROGRESS) { @@ -2284,6 +2318,9 @@ static int32_t mndProcessNodeCheck(SRpcMsg *pReq) { int32_t size = sizeof(SMStreamNodeCheckMsg); SMStreamNodeCheckMsg *pMsg = rpcMallocCont(size); + if (pMsg == NULL) { + return terrno; + } SRpcMsg rpcMsg = {.msgType = TDMT_MND_STREAM_NODECHANGE_CHECK, .pCont = pMsg, .contLen = size}; return tmsgPutToQueue(&pMnode->msgCb, WRITE_QUEUE, &rpcMsg); @@ -2459,6 +2496,10 @@ int32_t mndProcessStreamReqCheckpoint(SRpcMsg *pReq) { { SRpcMsg rsp = {.code = 0, .info = pReq->info, .contLen = sizeof(SMStreamReqCheckpointRsp)}; rsp.pCont = rpcMallocCont(rsp.contLen); + if (rsp.pCont == NULL) { + return terrno; + } + SMsgHead *pHead = rsp.pCont; pHead->vgId = htonl(req.nodeId); @@ -2663,11 +2704,13 @@ static int64_t getConsensusId(int64_t streamId, int32_t numOfTasks, int32_t* pEx static void doSendQuickRsp(SRpcHandleInfo *pInfo, int32_t msgSize, int32_t vgId, int32_t code) { SRpcMsg rsp = {.code = code, .info = *pInfo, .contLen = msgSize}; rsp.pCont = rpcMallocCont(rsp.contLen); - SMsgHead *pHead = rsp.pCont; - pHead->vgId = htonl(vgId); + if (rsp.pCont != NULL) { + SMsgHead *pHead = rsp.pCont; + pHead->vgId = htonl(vgId); - tmsgSendRsp(&rsp); - pInfo->handle = NULL; // disable auto rsp + tmsgSendRsp(&rsp); + pInfo->handle = NULL; // disable auto rsp + } } int32_t mndProcessConsensusInTmr(SRpcMsg *pMsg) { @@ -2804,6 +2847,10 @@ static int32_t mndProcessCreateStreamReqFromMNode(SRpcMsg *pReq) { int32_t code = mndProcessCreateStreamReq(pReq); if (code != 0 && code != TSDB_CODE_ACTION_IN_PROGRESS) { pReq->info.rsp = rpcMallocCont(1); + if (pReq->info.rsp == NULL) { + return terrno; + } + pReq->info.rspLen = 1; pReq->info.noResp = false; pReq->code = code; @@ -2815,6 +2862,10 @@ static int32_t mndProcessDropStreamReqFromMNode(SRpcMsg *pReq) { int32_t code = mndProcessDropStreamReq(pReq); if (code != 0 && code != TSDB_CODE_ACTION_IN_PROGRESS) { pReq->info.rsp = rpcMallocCont(1); + if (pReq->info.rsp == NULL) { + return terrno; + } + pReq->info.rspLen = 1; pReq->info.noResp = false; pReq->code = code; @@ -2837,6 +2888,36 @@ void mndInitStreamExecInfoForLeader(SMnode* pMnode) { mndInitStreamExecInfo(pMnode, &execInfo); } +void mndInitStreamExecInfoUpdateRole(SMnode* pMnode, int32_t role) { + execInfo.switchFromFollower = false; + + if (execInfo.role == NODE_ROLE_UNINIT) { + execInfo.role = role; + if (role == NODE_ROLE_LEADER) { + mInfo("init mnode is set to leader"); + } else { + mInfo("init mnode is set to follower"); + } + } else { + if (role == NODE_ROLE_LEADER) { + if (execInfo.role == NODE_ROLE_FOLLOWER) { + execInfo.role = role; + execInfo.switchFromFollower = true; + mInfo("mnode switch to be leader from follower"); + } else { + mInfo("mnode remain to be leader, do nothing"); + } + } else { // follower's + if (execInfo.role == NODE_ROLE_LEADER) { + execInfo.role = role; + mInfo("mnode switch to be follower from leader"); + } else { + mInfo("mnode remain to be follower, do nothing"); + } + } + } +} + void addAllStreamTasksIntoBuf(SMnode *pMnode, SStreamExecInfo *pExecInfo) { SSdb *pSdb = pMnode->pSdb; SStreamObj *pStream = NULL; diff --git a/source/dnode/mnode/impl/src/mndStreamTrans.c b/source/dnode/mnode/impl/src/mndStreamTrans.c index 414cd402ec..494771e65e 100644 --- a/source/dnode/mnode/impl/src/mndStreamTrans.c +++ b/source/dnode/mnode/impl/src/mndStreamTrans.c @@ -334,5 +334,5 @@ void killAllCheckpointTrans(SMnode *pMnode, SVgroupChangeInfo *pChangeInfo) { taosMemoryFree(p); } - mDebug("complete clear checkpoints in Dbs"); + mDebug("complete clear checkpoints in all Dbs"); } diff --git a/source/dnode/mnode/impl/src/mndStreamUtil.c b/source/dnode/mnode/impl/src/mndStreamUtil.c index 383ffe16da..07bba4e1b3 100644 --- a/source/dnode/mnode/impl/src/mndStreamUtil.c +++ b/source/dnode/mnode/impl/src/mndStreamUtil.c @@ -785,6 +785,9 @@ int32_t mndInitExecInfo() { return terrno; } + execInfo.role = NODE_ROLE_UNINIT; + execInfo.switchFromFollower = false; + taosHashSetFreeFp(execInfo.pTransferStateStreams, freeTaskList); taosHashSetFreeFp(execInfo.pChkptStreams, freeTaskList); taosHashSetFreeFp(execInfo.pStreamConsensus, freeTaskList); diff --git a/source/dnode/mnode/impl/src/mndSync.c b/source/dnode/mnode/impl/src/mndSync.c index 1094a17f6b..f5704be371 100644 --- a/source/dnode/mnode/impl/src/mndSync.c +++ b/source/dnode/mnode/impl/src/mndSync.c @@ -360,6 +360,8 @@ static void mndBecomeFollower(const SSyncFSM *pFsm) { (void)tsem_post(&pMgmt->syncSem); } (void)taosThreadMutexUnlock(&pMgmt->lock); + + mndInitStreamExecInfoUpdateRole(pMnode, NODE_ROLE_FOLLOWER); } static void mndBecomeLearner(const SSyncFSM *pFsm) { @@ -382,6 +384,8 @@ static void mndBecomeLearner(const SSyncFSM *pFsm) { static void mndBecomeLeader(const SSyncFSM *pFsm) { mInfo("vgId:1, become leader"); SMnode *pMnode = pFsm->data; + + mndInitStreamExecInfoUpdateRole(pMnode, NODE_ROLE_LEADER); mndInitStreamExecInfoForLeader(pMnode); } diff --git a/source/dnode/vnode/src/tqCommon/tqCommon.c b/source/dnode/vnode/src/tqCommon/tqCommon.c index dc58bfd8c4..7037eb5199 100644 --- a/source/dnode/vnode/src/tqCommon/tqCommon.c +++ b/source/dnode/vnode/src/tqCommon/tqCommon.c @@ -193,28 +193,23 @@ int32_t tqStreamTaskProcessUpdateReq(SStreamMeta* pMeta, SMsgCb* cb, SRpcMsg* pM SStreamTask* pTask = *ppTask; const char* idstr = pTask->id.idStr; - if (pMeta->updateInfo.transId == -1) { // info needs to be kept till the new trans to update the nodeEp arrived. - streamMetaInitUpdateTaskList(pMeta, req.transId); + if (req.transId <= 0) { + tqError("vgId:%d invalid update nodeEp task, transId:%d, discard", vgId, req.taskId); + rsp.code = TSDB_CODE_SUCCESS; + streamMetaWUnLock(pMeta); + + taosArrayDestroy(req.pNodeList); + return rsp.code; } - if (pMeta->updateInfo.transId != req.transId) { - if (req.transId < pMeta->updateInfo.transId) { - tqError("s-task:%s vgId:%d disorder update nodeEp msg recv, discarded, newest transId:%d, recv:%d", idstr, vgId, - pMeta->updateInfo.transId, req.transId); - rsp.code = TSDB_CODE_SUCCESS; - streamMetaWUnLock(pMeta); + // info needs to be kept till the new trans to update the nodeEp arrived. + bool update = streamMetaInitUpdateTaskList(pMeta, req.transId); + if (!update) { + rsp.code = TSDB_CODE_SUCCESS; + streamMetaWUnLock(pMeta); - taosArrayDestroy(req.pNodeList); - return rsp.code; - } else { - tqInfo("s-task:%s vgId:%d receive new trans to update nodeEp msg from mnode, transId:%d, prev transId:%d", idstr, - vgId, req.transId, pMeta->updateInfo.transId); - // info needs to be kept till the new trans to update the nodeEp arrived. - streamMetaInitUpdateTaskList(pMeta, req.transId); - } - } else { - tqDebug("s-task:%s vgId:%d recv trans to update nodeEp from mnode, transId:%d, recorded update transId:%d", idstr, - vgId, req.transId, pMeta->updateInfo.transId); + taosArrayDestroy(req.pNodeList); + return rsp.code; } // duplicate update epset msg received, discard this redundant message @@ -311,7 +306,7 @@ int32_t tqStreamTaskProcessUpdateReq(SStreamMeta* pMeta, SMsgCb* cb, SRpcMsg* pM // persist to disk } - streamMetaClearUpdateTaskList(pMeta); + streamMetaClearSetUpdateTaskListComplete(pMeta); if (!restored) { tqDebug("vgId:%d vnode restore not completed, not start all tasks", vgId); @@ -775,8 +770,8 @@ static int32_t restartStreamTasks(SStreamMeta* pMeta, bool isLeader) { streamMetaWUnLock(pMeta); terrno = 0; - tqInfo("vgId:%d tasks are all updated and stopped, restart all tasks, triggered by transId:%d", vgId, - pMeta->updateInfo.transId); + tqInfo("vgId:%d tasks are all updated and stopped, restart all tasks, triggered by transId:%d, ts:%" PRId64, vgId, + pMeta->updateInfo.completeTransId, pMeta->updateInfo.completeTs); while (streamMetaTaskInTimer(pMeta)) { tqDebug("vgId:%d some tasks in timer, wait for 100ms and recheck", pMeta->vgId); @@ -902,7 +897,7 @@ int32_t tqStartTaskCompleteCallback(SStreamMeta* pMeta) { return restartStreamTasks(pMeta, (pMeta->role == NODE_ROLE_LEADER)); } else { if (pStartInfo->restartCount == 0) { - tqDebug("vgId:%d start all tasks completed in callbackFn, restartCount is 0", pMeta->vgId); + tqDebug("vgId:%d start all tasks completed in callbackFn, restartCounter is 0", pMeta->vgId); } else if (allReady) { pStartInfo->restartCount = 0; tqDebug("vgId:%d all tasks are ready, reset restartCounter 0, not restart tasks", vgId); diff --git a/source/libs/stream/src/streamExec.c b/source/libs/stream/src/streamExec.c index d222004fb7..cd69c9168c 100644 --- a/source/libs/stream/src/streamExec.c +++ b/source/libs/stream/src/streamExec.c @@ -631,7 +631,13 @@ void flushStateDataInExecutor(SStreamTask* pTask, SStreamQueueItem* pCheckpointB (void) streamTaskReloadState(pTask); stDebug("s-task:%s transfer state from fill-history task:%s, status:%s completed", id, pHTask->id.idStr, streamTaskGetStatus(pHTask).name); - + // todo execute qExecTask to fetch the reload-generated result, if this is stream is for session window query. + /* + * while(1) { + * qExecTask() + * } + * // put into the output queue. + */ streamMetaReleaseTask(pTask->pMeta, pHTask); } else { stError("s-task:%s related fill-history task:0x%x failed to acquire, transfer state failed", id, diff --git a/source/libs/stream/src/streamMeta.c b/source/libs/stream/src/streamMeta.c index 5ed9f274a2..8379d904c2 100644 --- a/source/libs/stream/src/streamMeta.c +++ b/source/libs/stream/src/streamMeta.c @@ -431,7 +431,8 @@ int32_t streamMetaOpen(const char* path, void* ahandle, FTaskBuild buildTaskFn, pMeta->expandTaskFn = expandTaskFn; pMeta->stage = stage; pMeta->role = (vgId == SNODE_HANDLE) ? NODE_ROLE_LEADER : NODE_ROLE_UNINIT; - pMeta->updateInfo.transId = -1; + pMeta->updateInfo.activeTransId = -1; + pMeta->updateInfo.completeTransId = -1; pMeta->startInfo.completeFn = fn; pMeta->pTaskDbUnique = taosHashInit(64, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), false, HASH_ENTRY_LOCK); @@ -1759,12 +1760,56 @@ void streamMetaAddIntoUpdateTaskList(SStreamMeta* pMeta, SStreamTask* pTask, SSt } } -void streamMetaClearUpdateTaskList(SStreamMeta* pMeta) { - taosHashClear(pMeta->updateInfo.pTasks); - pMeta->updateInfo.transId = -1; +void streamMetaClearSetUpdateTaskListComplete(SStreamMeta* pMeta) { + STaskUpdateInfo* pInfo = &pMeta->updateInfo; + + taosHashClear(pInfo->pTasks); + + int32_t prev = pInfo->completeTransId; + pInfo->completeTransId = pInfo->activeTransId; + pInfo->activeTransId = -1; + pInfo->completeTs = taosGetTimestampMs(); + + stDebug("vgId:%d set the nodeEp update complete, ts:%" PRId64 ", complete transId:%d->%d, reset active transId", + pMeta->vgId, pInfo->completeTs, prev, pInfo->completeTransId); } -void streamMetaInitUpdateTaskList(SStreamMeta* pMeta, int32_t transId) { - taosHashClear(pMeta->updateInfo.pTasks); - pMeta->updateInfo.transId = transId; +bool streamMetaInitUpdateTaskList(SStreamMeta* pMeta, int32_t transId) { + STaskUpdateInfo* pInfo = &pMeta->updateInfo; + + if (transId > pInfo->completeTransId) { + if (pInfo->activeTransId == -1) { + taosHashClear(pInfo->pTasks); + pInfo->activeTransId = transId; + + stInfo("vgId:%d set the active epset update transId:%d, prev complete transId:%d", pMeta->vgId, transId, + pInfo->completeTransId); + return true; + } else { + if (pInfo->activeTransId == transId) { + // do nothing + return true; + } else if (transId < pInfo->activeTransId) { + stError("vgId:%d invalid(out of order)epset update transId:%d, active transId:%d, complete transId:%d, discard", + pMeta->vgId, transId, pInfo->activeTransId, pInfo->completeTransId); + return false; + } else { // transId > pInfo->activeTransId + taosHashClear(pInfo->pTasks); + int32_t prev = pInfo->activeTransId; + pInfo->activeTransId = transId; + + stInfo("vgId:%d active epset update transId updated from:%d to %d, prev complete transId:%d", pMeta->vgId, + transId, prev, pInfo->completeTransId); + return true; + } + } + } else if (transId == pInfo->completeTransId) { + stError("vgId:%d already handled epset update transId:%d, completeTs:%" PRId64 " ignore", pMeta->vgId, transId, + pInfo->completeTs); + return false; + } else { // pInfo->completeTransId > transId + stError("vgId:%d disorder update nodeEp msg recv, prev completed epset update transId:%d, recv:%d, discard", + pMeta->vgId, pInfo->activeTransId, transId); + return false; + } } diff --git a/source/libs/stream/src/streamTask.c b/source/libs/stream/src/streamTask.c index c0b2b16d30..f190673430 100644 --- a/source/libs/stream/src/streamTask.c +++ b/source/libs/stream/src/streamTask.c @@ -637,7 +637,7 @@ bool streamTaskUpdateEpsetInfo(SStreamTask* pTask, SArray* pNodeList) { numOfNodes, p->updateCount, prevTs); bool updated = false; - for (int32_t i = 0; i < taosArrayGetSize(pNodeList); ++i) { + for (int32_t i = 0; i < numOfNodes; ++i) { SNodeUpdateInfo* pInfo = taosArrayGet(pNodeList, i); if (pInfo == NULL) { continue; From 5322b60a31086f37da347ce101e6243e8d5e4776 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Thu, 15 Aug 2024 18:08:16 +0800 Subject: [PATCH 080/181] refactor: remove unused attributes in msg. --- include/common/tmsg.h | 1 - 1 file changed, 1 deletion(-) diff --git a/include/common/tmsg.h b/include/common/tmsg.h index cfacb5cfd6..70cf9c8b58 100644 --- a/include/common/tmsg.h +++ b/include/common/tmsg.h @@ -3696,7 +3696,6 @@ typedef struct { SMsgHead head; int64_t streamId; int32_t taskId; - int32_t transId; } SVPauseStreamTaskReq, SVResetStreamTaskReq; typedef struct { From c94cd245931f713e3e980eb23f34b5e3013355ac Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Thu, 15 Aug 2024 18:29:36 +0800 Subject: [PATCH 081/181] refactor: do some internal refactor. --- source/libs/stream/src/streamCheckpoint.c | 14 +++++++++----- source/libs/stream/src/streamMeta.c | 20 ++++++++++++-------- 2 files changed, 21 insertions(+), 13 deletions(-) diff --git a/source/libs/stream/src/streamCheckpoint.c b/source/libs/stream/src/streamCheckpoint.c index 4bf74d8d4f..9be8f5ffaa 100644 --- a/source/libs/stream/src/streamCheckpoint.c +++ b/source/libs/stream/src/streamCheckpoint.c @@ -561,12 +561,14 @@ int32_t streamTaskUpdateTaskCheckpointInfo(SStreamTask* pTask, bool restored, SV stDebug("s-task:%s vgId:%d related fill-history task:0x%x dropped in update checkpointInfo, remain tasks:%d", id, vgId, pReq->taskId, numOfTasks); } + streamMetaWLock(pMeta); - if (streamMetaCommit(pMeta) < 0) { - // persist to disk + if (pReq->dropRelHTask) { + code = streamMetaCommit(pMeta); } } + // always return true return TSDB_CODE_SUCCESS; } @@ -594,13 +596,15 @@ int32_t streamTaskUpdateTaskCheckpointInfo(SStreamTask* pTask, bool restored, SV ASSERT(pInfo->checkpointId <= pReq->checkpointId && pInfo->checkpointVer <= pReq->checkpointVer && pInfo->processedVer <= pReq->checkpointVer); - // update only it is in checkpoint status. - if (pStatus.state == TASK_STATUS__CK) { + // update only it is in checkpoint status, or during restore procedure. + if (pStatus.state == TASK_STATUS__CK || (!restored)) { pInfo->checkpointId = pReq->checkpointId; pInfo->checkpointVer = pReq->checkpointVer; pInfo->checkpointTime = pReq->checkpointTs; - code = streamTaskHandleEvent(pTask->status.pSM, TASK_EVENT_CHECKPOINT_DONE); + if (restored) { + code = streamTaskHandleEvent(pTask->status.pSM, TASK_EVENT_CHECKPOINT_DONE); + } } streamTaskClearCheckInfo(pTask, true); diff --git a/source/libs/stream/src/streamMeta.c b/source/libs/stream/src/streamMeta.c index 8379d904c2..7c6461b1c8 100644 --- a/source/libs/stream/src/streamMeta.c +++ b/source/libs/stream/src/streamMeta.c @@ -891,24 +891,28 @@ int32_t streamMetaBegin(SStreamMeta* pMeta) { } int32_t streamMetaCommit(SStreamMeta* pMeta) { - if (tdbCommit(pMeta->db, pMeta->txn) < 0) { + int32_t code = 0; + code = tdbCommit(pMeta->db, pMeta->txn); + if (code != 0) { stError("vgId:%d failed to commit stream meta", pMeta->vgId); - return -1; + return code; } - if (tdbPostCommit(pMeta->db, pMeta->txn) < 0) { + code = tdbPostCommit(pMeta->db, pMeta->txn); + if (code != 0) { stError("vgId:%d failed to do post-commit stream meta", pMeta->vgId); - return -1; + return code; } - if (tdbBegin(pMeta->db, &pMeta->txn, tdbDefaultMalloc, tdbDefaultFree, NULL, - TDB_TXN_WRITE | TDB_TXN_READ_UNCOMMITTED) < 0) { + code = tdbBegin(pMeta->db, &pMeta->txn, tdbDefaultMalloc, tdbDefaultFree, NULL, + TDB_TXN_WRITE | TDB_TXN_READ_UNCOMMITTED); + if (code != 0) { stError("vgId:%d failed to begin trans", pMeta->vgId); - return -1; + return code; } stDebug("vgId:%d stream meta file commit completed", pMeta->vgId); - return 0; + return code; } int64_t streamMetaGetLatestCheckpointId(SStreamMeta* pMeta) { From 6adc0543e86555517f36006f886ea13826a56baf Mon Sep 17 00:00:00 2001 From: kailixu Date: Thu, 15 Aug 2024 19:19:48 +0800 Subject: [PATCH 082/181] enh: support config randErrorChance dynamically --- source/common/src/tglobal.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/source/common/src/tglobal.c b/source/common/src/tglobal.c index a013c98b73..6cd99d4443 100644 --- a/source/common/src/tglobal.c +++ b/source/common/src/tglobal.c @@ -592,7 +592,7 @@ static int32_t taosAddClientCfg(SConfig *pCfg) { CFG_SCOPE_CLIENT, CFG_DYN_NONE)); TAOS_CHECK_RETURN( cfgAddInt32(pCfg, "metaCacheMaxSize", tsMetaCacheMaxSize, -1, INT32_MAX, CFG_SCOPE_CLIENT, CFG_DYN_CLIENT)); - TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "randErrorChance", tsRandErrChance, 0, 10000, CFG_SCOPE_BOTH, CFG_DYN_NONE)); + TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "randErrorChance", tsRandErrChance, 0, 10000, CFG_SCOPE_BOTH, CFG_DYN_BOTH)); TAOS_CHECK_RETURN(cfgAddInt64(pCfg, "randErrorDivisor", tsRandErrDivisor, 1, INT64_MAX, CFG_SCOPE_BOTH, CFG_DYN_BOTH)); TAOS_CHECK_RETURN(cfgAddInt64(pCfg, "randErrorScope", tsRandErrScope, 0, INT64_MAX, CFG_SCOPE_BOTH, CFG_DYN_BOTH)); @@ -1930,6 +1930,7 @@ static int32_t taosCfgDynamicOptionsForServer(SConfig *pCfg, const char *name) { {"mndSdbWriteDelta", &tsMndSdbWriteDelta}, {"minDiskFreeSize", &tsMinDiskFreeSize}, + {"randErrorChance", &tsRandErrChance}, {"randErrorDivisor", &tsRandErrDivisor}, {"randErrorScope", &tsRandErrScope}, @@ -2210,6 +2211,7 @@ static int32_t taosCfgDynamicOptionsForClient(SConfig *pCfg, const char *name) { {"queryPlannerTrace", &tsQueryPlannerTrace}, {"queryNodeChunkSize", &tsQueryNodeChunkSize}, {"queryUseNodeAllocator", &tsQueryUseNodeAllocator}, + {"randErrorChance", &tsRandErrChance}, {"randErrorDivisor", &tsRandErrDivisor}, {"randErrorScope", &tsRandErrScope}, {"smlDot2Underline", &tsSmlDot2Underline}, From c1c59ce82b97c587b2472c18f096961300f21579 Mon Sep 17 00:00:00 2001 From: sima Date: Thu, 15 Aug 2024 17:31:18 +0800 Subject: [PATCH 083/181] fix:[TD-31479] Fix wrong ans when parameter is NULL in substring. --- source/libs/scalar/src/sclfunc.c | 96 +++++++++++++++++++++----------- 1 file changed, 64 insertions(+), 32 deletions(-) diff --git a/source/libs/scalar/src/sclfunc.c b/source/libs/scalar/src/sclfunc.c index 846837fd99..aa3f1dfd78 100644 --- a/source/libs/scalar/src/sclfunc.c +++ b/source/libs/scalar/src/sclfunc.c @@ -1173,49 +1173,79 @@ static int32_t findPosBytes(char *orgStr, char *delimStr, int32_t orgLen, int32_ int32_t substrFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput) { int32_t code = TSDB_CODE_SUCCESS; - int32_t subPos = 0; - GET_TYPED_DATA(subPos, int32_t, GET_PARAM_TYPE(&pInput[1]), pInput[1].columnData->pData); - - int32_t subLen = INT16_MAX; - if (inputNum == 3) { - GET_TYPED_DATA(subLen, int32_t, GET_PARAM_TYPE(&pInput[2]), pInput[2].columnData->pData); - } - - SColumnInfoData *pInputData = pInput->columnData; + SColumnInfoData *pInputData[3]; SColumnInfoData *pOutputData = pOutput->columnData; - int32_t outputLen = pInputData->info.bytes; + for (int32_t i = 0; i < inputNum; ++i) { + pInputData[i] = pInput[i].columnData; + } + + int32_t outputLen = pInputData[0]->info.bytes; char *outputBuf = taosMemoryMalloc(outputLen); if (outputBuf == NULL) { qError("substr function memory allocation failure. size: %d", outputLen); return TSDB_CODE_OUT_OF_MEMORY; } - for (int32_t i = 0; i < pInput->numOfRows; ++i) { - if (colDataIsNull_s(pInputData, i)) { + int32_t numOfRows = 0; + for (int32_t i = 0; i < inputNum; ++i) { + numOfRows = TMAX(pInput[i].numOfRows, numOfRows); + } + + bool hasNullType = (IS_NULL_TYPE(GET_PARAM_TYPE(&pInput[0])) || IS_NULL_TYPE(GET_PARAM_TYPE(&pInput[1])) || + (inputNum == 3 && IS_NULL_TYPE(GET_PARAM_TYPE(&pInput[2])))); + + if (hasNullType || + (pInput[0].numOfRows == 1 && colDataIsNull_s(pInputData[0], 0)) || + (pInput[1].numOfRows == 1 && colDataIsNull_s(pInputData[1], 0)) || + (inputNum == 3 && (pInput[2].numOfRows == 1 && colDataIsNull_s(pInputData[2], 0)))) { + colDataSetNNULL(pOutputData, 0, numOfRows); + pOutput->numOfRows = numOfRows; + goto _return; + } + + int32_t colIdx[3]; + for (int32_t i = 0; i < numOfRows; ++i) { + colIdx[0] = (pInput[0].numOfRows == 1) ? 0 : i; + colIdx[1] = (pInput[1].numOfRows == 1) ? 0 : i; + if (inputNum == 3) { + colIdx[2] = (pInput[2].numOfRows == 1) ? 0 : i; + } + + if (colDataIsNull_s(pInputData[0], colIdx[0]) || colDataIsNull_s(pInputData[1], colIdx[1]) || + (inputNum == 3 && colDataIsNull_s(pInputData[2], colIdx[2]))) { colDataSetNULL(pOutputData, i); continue; } + + int32_t subPos = 0; + int32_t subLen = INT16_MAX; + GET_TYPED_DATA(subPos, int32_t, GET_PARAM_TYPE(&pInput[1]), colDataGetData(pInputData[1], colIdx[1])); + if (inputNum == 3) { + GET_TYPED_DATA(subLen, int32_t, GET_PARAM_TYPE(&pInput[2]), colDataGetData(pInputData[2], colIdx[2])); + } + if (subPos == 0 || subLen < 1) { varDataSetLen(outputBuf, 0); SCL_ERR_JRET(colDataSetVal(pOutputData, i, outputBuf, false)); continue; } - char *input = colDataGetData(pInput[0].columnData, i); + + char *input = colDataGetData(pInputData[0], colIdx[0]); int32_t len = varDataLen(input); int32_t startPosBytes; int32_t endPosBytes = len; if (subPos > 0) { - startPosBytes = (GET_PARAM_TYPE(pInput) == TSDB_DATA_TYPE_VARCHAR) ? findPosBytes(varDataVal(input), NULL, varDataLen(input), -1, subPos) : (subPos - 1) * TSDB_NCHAR_SIZE; + startPosBytes = (GET_PARAM_TYPE(&pInput[0]) == TSDB_DATA_TYPE_VARCHAR) ? findPosBytes(varDataVal(input), NULL, varDataLen(input), -1, subPos) : (subPos - 1) * TSDB_NCHAR_SIZE; startPosBytes = TMIN(startPosBytes, len); } else { startPosBytes = - (GET_PARAM_TYPE(pInput) == TSDB_DATA_TYPE_VARCHAR) ? findPosBytes(varDataVal(input), NULL, varDataLen(input), -1, subPos) : len + subPos * TSDB_NCHAR_SIZE; + (GET_PARAM_TYPE(&pInput[0]) == TSDB_DATA_TYPE_VARCHAR) ? findPosBytes(varDataVal(input), NULL, varDataLen(input), -1, subPos) : len + subPos * TSDB_NCHAR_SIZE; startPosBytes = TMAX(startPosBytes, 0); } if (inputNum == 3) { endPosBytes = - (GET_PARAM_TYPE(pInput) == TSDB_DATA_TYPE_VARCHAR) + (GET_PARAM_TYPE(&pInput[0]) == TSDB_DATA_TYPE_VARCHAR) ? startPosBytes + findPosBytes(varDataVal(input) + startPosBytes, NULL, varDataLen(input) - startPosBytes, -1, subLen + 1) : startPosBytes + subLen * TSDB_NCHAR_SIZE; endPosBytes = TMIN(endPosBytes, len); @@ -1230,10 +1260,10 @@ int32_t substrFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOu varDataSetLen(output, 0); } - SCL_ERR_JRET(colDataSetVal(pOutputData, i, output, false)); + SCL_ERR_JRET(colDataSetVal(pOutputData, i, outputBuf, false)); } - pOutput->numOfRows = pInput->numOfRows; + pOutput->numOfRows = numOfRows; _return: taosMemoryFree(outputBuf); @@ -1510,13 +1540,13 @@ int32_t replaceFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pO bool needFreeTo = false; if (GET_PARAM_TYPE(&pInput[1]) != GET_PARAM_TYPE(&pInput[0])) { - SCL_ERR_RET(convBetweenNcharAndVarchar(varDataVal(colDataGetData(pInputData[1], colIdx2)), &fromStr, + SCL_ERR_JRET(convBetweenNcharAndVarchar(varDataVal(colDataGetData(pInputData[1], colIdx2)), &fromStr, varDataLen(colDataGetData(pInputData[1], colIdx2)), &fromLen, GET_PARAM_TYPE(&pInput[0]))); needFreeFrom = true; } if (GET_PARAM_TYPE(&pInput[2]) != GET_PARAM_TYPE(&pInput[0])) { - SCL_ERR_RET(convBetweenNcharAndVarchar(varDataVal(colDataGetData(pInputData[2], colIdx3)), &toStr, + SCL_ERR_JRET(convBetweenNcharAndVarchar(varDataVal(colDataGetData(pInputData[2], colIdx3)), &toStr, varDataLen(colDataGetData(pInputData[2], colIdx3)), &toLen, GET_PARAM_TYPE(&pInput[0]))); needFreeTo = true; @@ -1544,9 +1574,11 @@ int32_t replaceFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pO taosMemoryFree(fromStr); } varDataSetLen(outputBuf, totalLen); - SCL_ERR_RET(colDataSetVal(pOutputData, i, outputBuf, false)); + SCL_ERR_JRET(colDataSetVal(pOutputData, i, outputBuf, false)); } pOutput->numOfRows = numOfRows; +_return: + taosMemoryFree(outputBuf); return code; } @@ -1561,10 +1593,16 @@ int32_t substrIdxFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam * pInputData[1] = pInput[1].columnData; pInputData[2] = pInput[2].columnData; + for (int32_t i = 0; i < inputNum; ++i) { + if (pInput[i].numOfRows > numOfRows) { + numOfRows = pInput[i].numOfRows; + } + } + outputLen = pInputData[0]->info.bytes; if (GET_PARAM_TYPE(&pInput[0]) == TSDB_DATA_TYPE_NULL || GET_PARAM_TYPE(&pInput[1]) == TSDB_DATA_TYPE_NULL || GET_PARAM_TYPE(&pInput[2]) == TSDB_DATA_TYPE_NULL) { - colDataSetNNULL(pOutputData, 0, pInput[0].numOfRows); - pOutput->numOfRows = pInput[0].numOfRows; + colDataSetNNULL(pOutputData, 0, numOfRows); + pOutput->numOfRows = numOfRows; return TSDB_CODE_SUCCESS; } char *outputBuf = taosMemoryCalloc(outputLen + VARSTR_HEADER_SIZE, 1); @@ -1572,12 +1610,6 @@ int32_t substrIdxFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam * SCL_ERR_RET(TSDB_CODE_OUT_OF_MEMORY); } - for (int32_t i = 0; i < inputNum; ++i) { - if (pInput[i].numOfRows > numOfRows) { - numOfRows = pInput[i].numOfRows; - } - } - for (int32_t k = 0; k < numOfRows; ++k) { bool hasNull = false; for (int32_t i = 0; i < inputNum; ++i) { @@ -1600,9 +1632,9 @@ int32_t substrIdxFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam * int32_t startPosBytes; int32_t endPosBytes; if (GET_PARAM_TYPE(&pInput[0]) != GET_PARAM_TYPE(&pInput[1])) { - SCL_ERR_RET(convBetweenNcharAndVarchar(varDataVal(colDataGetData(pInputData[1], colIdx2)), &delimStr, - varDataLen(colDataGetData(pInputData[1], colIdx2)), &delimLen, - GET_PARAM_TYPE(&pInput[0]))); + SCL_ERR_JRET(convBetweenNcharAndVarchar(varDataVal(colDataGetData(pInputData[1], colIdx2)), &delimStr, + varDataLen(colDataGetData(pInputData[1], colIdx2)), &delimLen, + GET_PARAM_TYPE(&pInput[0]))); needFreeDelim = true; } From ebe5f183cbcd7a76969f75314a77c9f4e9ff39e5 Mon Sep 17 00:00:00 2001 From: sima Date: Thu, 15 Aug 2024 11:27:59 +0800 Subject: [PATCH 084/181] fix:[TD-31468] Fix char function wrong result. --- source/libs/scalar/src/sclfunc.c | 20 +++++++++++++++----- 1 file changed, 15 insertions(+), 5 deletions(-) diff --git a/source/libs/scalar/src/sclfunc.c b/source/libs/scalar/src/sclfunc.c index 846837fd99..836dd89f0d 100644 --- a/source/libs/scalar/src/sclfunc.c +++ b/source/libs/scalar/src/sclfunc.c @@ -1295,30 +1295,40 @@ int32_t charFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutp int32_t outputLen = inputNum * 4 + 2; char *outputBuf = taosMemoryCalloc(outputLen, 1); if (outputBuf == NULL) { - SCL_ERR_RET(TSDB_CODE_OUT_OF_MEMORY); + SCL_ERR_RET(terrno); } - for (int32_t i = 0; i < pInput[0].numOfRows; ++i) { + int32_t numOfRows = 0; + for (int32_t i = 0; i < inputNum; ++i) { + numOfRows = TMAX(numOfRows, pInput[i].numOfRows); + } + for (int32_t i = 0; i < numOfRows; ++i) { char *output = varDataVal(outputBuf); for (int32_t j = 0; j < inputNum; ++j) { + int32_t colIdx = (pInput[j].numOfRows == 1) ? 0 : i; int32_t num; if (colDataIsNull_s(pInput[j].columnData, i)) { continue; } else if (IS_NUMERIC_TYPE(GET_PARAM_TYPE(&pInput[j]))) { - GET_TYPED_DATA(num, int32_t, GET_PARAM_TYPE(&pInput[j]), pInput[j].columnData->pData); + GET_TYPED_DATA(num, int32_t, GET_PARAM_TYPE(&pInput[j]), colDataGetData(pInput[j].columnData, colIdx)); getAsciiChar(num, &output); } else if (TSDB_DATA_TYPE_BINARY == GET_PARAM_TYPE(&pInput[j])) { - num = taosStr2Int32(varDataVal(pInput[j].columnData->pData), NULL, 10); + num = taosStr2Int32(varDataVal(colDataGetData(pInput[j].columnData, colIdx)), NULL, 10); getAsciiChar(num, &output); } else if (TSDB_DATA_TYPE_NCHAR == GET_PARAM_TYPE(&pInput[j])) { char *convBuf = taosMemoryMalloc(GET_PARAM_BYTES(&pInput[j])); - int32_t len = taosUcs4ToMbs((TdUcs4 *)varDataVal(pInput[j].columnData->pData), varDataLen(pInput[j].columnData->pData), convBuf); + if (convBuf == NULL) { + SCL_ERR_RET(terrno); + } + int32_t len = taosUcs4ToMbs((TdUcs4 *)varDataVal(colDataGetData(pInput[j].columnData, colIdx)), varDataLen(colDataGetData(pInput[j].columnData, colIdx)), convBuf); if (len < 0) { + taosMemoryFree(convBuf); code = TSDB_CODE_SCALAR_CONVERT_ERROR; goto _return; } convBuf[len] = 0; num = taosStr2Int32(convBuf, NULL, 10); getAsciiChar(num, &output); + taosMemoryFree(convBuf); } else { code = TSDB_CODE_FUNC_FUNTION_PARA_TYPE; goto _return; From 44466a4bcdb3e0eb14c13c688b69c3e7b0c0e508 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Thu, 15 Aug 2024 22:28:13 +0800 Subject: [PATCH 085/181] fix(stream): fix a typo --- source/libs/stream/src/streamDispatch.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/libs/stream/src/streamDispatch.c b/source/libs/stream/src/streamDispatch.c index 86970f80fa..bf64af6558 100644 --- a/source/libs/stream/src/streamDispatch.c +++ b/source/libs/stream/src/streamDispatch.c @@ -765,7 +765,7 @@ int32_t streamDispatchStreamBlock(SStreamTask* pTask) { // todo: secure the timerActive and start timer in after lock pTask->lock streamMutexLock(&pTask->lock); bool shouldStop = streamTaskShouldStop(pTask); - streamMutexLock(&pTask->lock); + streamMutexUnlock(&pTask->lock); if (shouldStop) { stDebug("s-task:%s in stop/dropping status, not start dispatch monitor tmr", id); From 868898d11cf5f0f3f237ba3f7d0423d60783ba9d Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Fri, 16 Aug 2024 00:28:49 +0800 Subject: [PATCH 086/181] fix(stream): delay to load the stream task. --- source/dnode/mnode/impl/inc/mndStream.h | 4 ++-- source/dnode/mnode/impl/src/mndStream.c | 15 +++++++-------- source/dnode/mnode/impl/src/mndSync.c | 6 +++--- 3 files changed, 12 insertions(+), 13 deletions(-) diff --git a/source/dnode/mnode/impl/inc/mndStream.h b/source/dnode/mnode/impl/inc/mndStream.h index a87a01c5b6..75ba51e498 100644 --- a/source/dnode/mnode/impl/inc/mndStream.h +++ b/source/dnode/mnode/impl/inc/mndStream.h @@ -154,8 +154,8 @@ bool streamTaskIterNextTask(SStreamTaskIter *pIter); int32_t streamTaskIterGetCurrent(SStreamTaskIter *pIter, SStreamTask **pTask); int32_t mndInitExecInfo(); void mndInitStreamExecInfo(SMnode *pMnode, SStreamExecInfo *pExecInfo); -void mndInitStreamExecInfoForLeader(SMnode *pMnode); -void mndInitStreamExecInfoUpdateRole(SMnode *pMnode, int32_t role); +void mndStreamResetInitTaskListLoadFlag(); +void mndUpdateStreamExecInfoRole(SMnode *pMnode, int32_t role); int32_t removeExpiredNodeEntryAndTaskInBuf(SArray *pNodeSnapshot); void removeStreamTasksInBuf(SStreamObj *pStream, SStreamExecInfo *pExecNode); diff --git a/source/dnode/mnode/impl/src/mndStream.c b/source/dnode/mnode/impl/src/mndStream.c index a85b5c733b..9aa36c0c4e 100644 --- a/source/dnode/mnode/impl/src/mndStream.c +++ b/source/dnode/mnode/impl/src/mndStream.c @@ -2882,13 +2882,12 @@ void mndInitStreamExecInfo(SMnode *pMnode, SStreamExecInfo *pExecInfo) { pExecInfo->initTaskList = true; } -void mndInitStreamExecInfoForLeader(SMnode* pMnode) { +void mndStreamResetInitTaskListLoadFlag() { + mInfo("reset task list buffer init flag for leader"); execInfo.initTaskList = false; - mInfo("init stream execInfo for leader"); - mndInitStreamExecInfo(pMnode, &execInfo); } -void mndInitStreamExecInfoUpdateRole(SMnode* pMnode, int32_t role) { +void mndUpdateStreamExecInfoRole(SMnode* pMnode, int32_t role) { execInfo.switchFromFollower = false; if (execInfo.role == NODE_ROLE_UNINIT) { @@ -3013,7 +3012,8 @@ static int32_t mndProcessDropOrphanTaskReq(SRpcMsg *pReq) { // check if it is conflict with other trans in both sourceDb and targetDb. bool conflict = mndStreamTransConflictCheck(pMnode, pTask->streamId, MND_STREAM_DROP_NAME, false); if (conflict) { - TAOS_RETURN(TSDB_CODE_MND_TRANS_CONFLICT); + code = TSDB_CODE_MND_TRANS_CONFLICT; + goto _err; } SStreamObj dummyObj = {.uid = pTask->streamId, .sourceDb = "", .targetSTbName = ""}; @@ -3026,8 +3026,7 @@ static int32_t mndProcessDropOrphanTaskReq(SRpcMsg *pReq) { code = mndStreamRegisterTrans(pTrans, MND_STREAM_DROP_NAME, pTask->streamId); if (code) { - mndTransDrop(pTrans); - return code; + goto _err; } // drop all tasks @@ -3051,7 +3050,7 @@ _err: tDestroyDropOrphanTaskMsg(&msg); mndTransDrop(pTrans); - if (code == TSDB_CODE_SUCCESS) { + if (code == TSDB_CODE_SUCCESS || code == TSDB_CODE_ACTION_IN_PROGRESS) { mDebug("create drop %d orphan tasks trans succ", numOfTasks); } return code; diff --git a/source/dnode/mnode/impl/src/mndSync.c b/source/dnode/mnode/impl/src/mndSync.c index f5704be371..0f4e4f0363 100644 --- a/source/dnode/mnode/impl/src/mndSync.c +++ b/source/dnode/mnode/impl/src/mndSync.c @@ -361,7 +361,7 @@ static void mndBecomeFollower(const SSyncFSM *pFsm) { } (void)taosThreadMutexUnlock(&pMgmt->lock); - mndInitStreamExecInfoUpdateRole(pMnode, NODE_ROLE_FOLLOWER); + mndUpdateStreamExecInfoRole(pMnode, NODE_ROLE_FOLLOWER); } static void mndBecomeLearner(const SSyncFSM *pFsm) { @@ -385,8 +385,8 @@ static void mndBecomeLeader(const SSyncFSM *pFsm) { mInfo("vgId:1, become leader"); SMnode *pMnode = pFsm->data; - mndInitStreamExecInfoUpdateRole(pMnode, NODE_ROLE_LEADER); - mndInitStreamExecInfoForLeader(pMnode); + mndUpdateStreamExecInfoRole(pMnode, NODE_ROLE_LEADER); + mndStreamResetInitTaskListLoadFlag(); } static bool mndApplyQueueEmpty(const SSyncFSM *pFsm) { From 0f922fb3730231725e3a23b90dfd25c9006266da Mon Sep 17 00:00:00 2001 From: dmchen Date: Fri, 16 Aug 2024 01:23:01 +0000 Subject: [PATCH 087/181] fix/TD-31485 --- source/dnode/mnode/impl/src/mndDb.c | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/source/dnode/mnode/impl/src/mndDb.c b/source/dnode/mnode/impl/src/mndDb.c index fe5c12419c..d0eed37f99 100644 --- a/source/dnode/mnode/impl/src/mndDb.c +++ b/source/dnode/mnode/impl/src/mndDb.c @@ -1211,22 +1211,22 @@ static int32_t mndAlterDb(SMnode *pMnode, SRpcMsg *pReq, SDbObj *pOld, SDbObj *p if (pTrans == NULL) { code = TSDB_CODE_MND_RETURN_VALUE_NULL; if (terrno != 0) code = terrno; - return -1; + TAOS_RETURN(code); } mInfo("trans:%d, used to alter db:%s", pTrans->id, pOld->name); mndTransSetDbName(pTrans, pOld->name, NULL); - TAOS_CHECK_RETURN(mndTransCheckConflict(pMnode, pTrans)); + TAOS_CHECK_GOTO(mndTransCheckConflict(pMnode, pTrans), NULL, _OVER); - TAOS_CHECK_RETURN(mndSetAlterDbPrepareLogs(pMnode, pTrans, pOld, pNew)); - TAOS_CHECK_RETURN(mndSetAlterDbCommitLogs(pMnode, pTrans, pOld, pNew)); - TAOS_CHECK_RETURN(mndSetAlterDbRedoActions(pMnode, pTrans, pOld, pNew)); - TAOS_CHECK_RETURN(mndTransPrepare(pMnode, pTrans)); + TAOS_CHECK_GOTO(mndSetAlterDbPrepareLogs(pMnode, pTrans, pOld, pNew), NULL, _OVER); + TAOS_CHECK_GOTO(mndSetAlterDbCommitLogs(pMnode, pTrans, pOld, pNew), NULL, _OVER); + TAOS_CHECK_GOTO(mndSetAlterDbRedoActions(pMnode, pTrans, pOld, pNew), NULL, _OVER); + TAOS_CHECK_GOTO(mndTransPrepare(pMnode, pTrans), NULL, _OVER); code = 0; _OVER: mndTransDrop(pTrans); - return code; + TAOS_RETURN(code); } static int32_t mndProcessAlterDbReq(SRpcMsg *pReq) { From 76f56d940b19ce48df661dff6bb6993a47e9a2ba Mon Sep 17 00:00:00 2001 From: 54liuyao <54liuyao> Date: Fri, 16 Aug 2024 10:26:09 +0800 Subject: [PATCH 088/181] fix issue --- .../executor/src/streamtimewindowoperator.c | 28 +++++++++++++------ source/libs/stream/src/tstreamFileState.c | 25 +++++++++++------ 2 files changed, 36 insertions(+), 17 deletions(-) diff --git a/source/libs/executor/src/streamtimewindowoperator.c b/source/libs/executor/src/streamtimewindowoperator.c index 756a6d71e1..5c12db1ab9 100644 --- a/source/libs/executor/src/streamtimewindowoperator.c +++ b/source/libs/executor/src/streamtimewindowoperator.c @@ -477,10 +477,12 @@ void destroyStreamFinalIntervalOperatorInfo(void* param) { blockDataDestroy(pInfo->pDelRes); blockDataDestroy(pInfo->pMidRetriveRes); blockDataDestroy(pInfo->pMidPulloverRes); - pInfo->stateStore.streamFileStateDestroy(pInfo->pState->pFileState); + if (pInfo->stateStore.streamFileStateDestroy != NULL) { + pInfo->stateStore.streamFileStateDestroy(pInfo->pState->pFileState); + } taosArrayDestroy(pInfo->pMidPullDatas); - if (pInfo->pState->dump == 1) { + if (pInfo->pState !=NULL && pInfo->pState->dump == 1) { taosMemoryFreeClear(pInfo->pState->pTdbState->pOwner); taosMemoryFreeClear(pInfo->pState->pTdbState); } @@ -1953,12 +1955,14 @@ int32_t createStreamFinalIntervalOperatorInfo(SOperatorInfo* downstream, SPhysiN pInfo->numOfDatapack = 0; pInfo->pUpdated = NULL; pInfo->pUpdatedMap = NULL; + pInfo->stateStore = pTaskInfo->storageAPI.stateStore; int32_t funResSize = getMaxFunResSize(&pOperator->exprSupp, numOfCols); pInfo->pState->pFileState = pAPI->stateStore.streamFileStateInit( tsStreamBufferSize, sizeof(SWinKey), pInfo->aggSup.resultRowSize, funResSize, compareTs, pInfo->pState, pInfo->twAggSup.deleteMark, GET_TASKID(pTaskInfo), pHandle->checkpointId, STREAM_STATE_BUFF_HASH); + QUERY_CHECK_NULL(pInfo->pState->pFileState, code, lino, _error, terrno); + pInfo->dataVersion = 0; - pInfo->stateStore = pTaskInfo->storageAPI.stateStore; pInfo->recvGetAll = false; pInfo->recvPullover = false; pInfo->recvRetrive = false; @@ -2032,7 +2036,9 @@ void destroyStreamAggSupporter(SStreamAggSupporter* pSup) { tSimpleHashCleanup(pSup->pResultRows); destroyDiskbasedBuf(pSup->pResultBuf); blockDataDestroy(pSup->pScanBlock); - pSup->stateStore.streamFileStateDestroy(pSup->pState->pFileState); + if (pSup->stateStore.streamFileStateDestroy != NULL) { + pSup->stateStore.streamFileStateDestroy(pSup->pState->pFileState); + } taosMemoryFreeClear(pSup->pState); taosMemoryFreeClear(pSup->pDummyCtx); } @@ -2141,7 +2147,7 @@ int32_t initStreamAggSupporter(SStreamAggSupporter* pSup, SExprSupp* pExpSup, in SReadHandle* pHandle, STimeWindowAggSupp* pTwAggSup, const char* taskIdStr, SStorageAPI* pApi, int32_t tsIndex) { pSup->resultRowSize = keySize + getResultRowSize(pExpSup->pCtx, numOfOutput); - + int32_t lino = 0; int32_t code = createSpecialDataBlock(STREAM_CLEAR, &pSup->pScanBlock); if (code) { return code; @@ -2156,6 +2162,7 @@ int32_t initStreamAggSupporter(SStreamAggSupporter* pSup, SExprSupp* pExpSup, in } pSup->stateStore = *pStore; + pSup->pSessionAPI = pApi; initDummyFunction(pSup->pDummyCtx, pExpSup->pCtx, numOfOutput); pSup->pState = taosMemoryCalloc(1, sizeof(SStreamState)); @@ -2168,6 +2175,7 @@ int32_t initStreamAggSupporter(SStreamAggSupporter* pSup, SExprSupp* pExpSup, in pSup->pState->pFileState = pSup->stateStore.streamFileStateInit( tsStreamBufferSize, sizeof(SSessionKey), pSup->resultRowSize, funResSize, sesionTs, pSup->pState, pTwAggSup->deleteMark, taskIdStr, pHandle->checkpointId, STREAM_STATE_BUFF_SORT); + QUERY_CHECK_NULL(pSup->pState->pFileState, code, lino, _end, terrno); _hash_fn_t hashFn = taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY); pSup->pResultRows = tSimpleHashInit(32, hashFn); @@ -2179,8 +2187,11 @@ int32_t initStreamAggSupporter(SStreamAggSupporter* pSup, SExprSupp* pExpSup, in pExpSup->pCtx[i].saveHandle.pState = pSup->pState; } - pSup->pSessionAPI = pApi; - return TSDB_CODE_SUCCESS; +_end: + if (code != TSDB_CODE_SUCCESS) { + qError("%s failed at line %d since %s", __func__, lino, tstrerror(code)); + } + return code; } bool isInTimeWindow(STimeWindow* pWin, TSKEY ts, int64_t gap) { @@ -5308,9 +5319,11 @@ int32_t createStreamIntervalOperatorInfo(SOperatorInfo* downstream, SPhysiNode* pInfo->pUpdatedMap = NULL; int32_t funResSize = getMaxFunResSize(pSup, numOfCols); + pInfo->stateStore = pTaskInfo->storageAPI.stateStore; pInfo->pState->pFileState = pTaskInfo->storageAPI.stateStore.streamFileStateInit( tsStreamBufferSize, sizeof(SWinKey), pInfo->aggSup.resultRowSize, funResSize, compareTs, pInfo->pState, pInfo->twAggSup.deleteMark, GET_TASKID(pTaskInfo), pHandle->checkpointId, STREAM_STATE_BUFF_HASH); + QUERY_CHECK_NULL(pInfo->pState->pFileState, code, lino, _error, terrno); setOperatorInfo(pOperator, "StreamIntervalOperator", QUERY_NODE_PHYSICAL_PLAN_STREAM_INTERVAL, true, OP_NOT_OPENED, pInfo, pTaskInfo); @@ -5319,7 +5332,6 @@ int32_t createStreamIntervalOperatorInfo(SOperatorInfo* downstream, SPhysiNode* optrDefaultBufFn, NULL, optrDefaultGetNextExtFn, NULL); setOperatorStreamStateFn(pOperator, streamIntervalReleaseState, streamIntervalReloadState); - pInfo->stateStore = pTaskInfo->storageAPI.stateStore; pInfo->recvGetAll = false; code = createSpecialDataBlock(STREAM_CHECKPOINT, &pInfo->pCheckpointRes); diff --git a/source/libs/stream/src/tstreamFileState.c b/source/libs/stream/src/tstreamFileState.c index 5dacd4c80c..3cdbad2dd5 100644 --- a/source/libs/stream/src/tstreamFileState.c +++ b/source/libs/stream/src/tstreamFileState.c @@ -131,21 +131,27 @@ static void streamFileStateEncode(TSKEY* pKey, void** pVal, int32_t* pLen) { SStreamFileState* streamFileStateInit(int64_t memSize, uint32_t keySize, uint32_t rowSize, uint32_t selectRowSize, GetTsFun fp, void* pFile, TSKEY delMark, const char* taskId, int64_t checkpointId, int8_t type) { + int32_t code = TSDB_CODE_SUCCESS; + int32_t lino = 0; if (memSize <= 0) { memSize = DEFAULT_MAX_STREAM_BUFFER_SIZE; } if (rowSize == 0) { + code = TSDB_CODE_INVALID_PARA; goto _error; } SStreamFileState* pFileState = taosMemoryCalloc(1, sizeof(SStreamFileState)); - if (!pFileState) { - goto _error; - } + QUERY_CHECK_NULL(pFileState, code, lino, _error, terrno); + rowSize += selectRowSize; pFileState->maxRowCount = TMAX((uint64_t)memSize / rowSize, FLUSH_NUM * 2); pFileState->usedBuffs = tdListNew(POINTER_BYTES); + QUERY_CHECK_NULL(pFileState->usedBuffs, code, lino, _error, terrno); + pFileState->freeBuffs = tdListNew(POINTER_BYTES); + QUERY_CHECK_NULL(pFileState->freeBuffs, code, lino, _error, terrno); + _hash_fn_t hashFn = taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY); int32_t cap = TMIN(MIN_NUM_OF_ROW_BUFF, pFileState->maxRowCount); if (type == STREAM_STATE_BUFF_HASH) { @@ -171,10 +177,7 @@ SStreamFileState* streamFileStateInit(int64_t memSize, uint32_t keySize, uint32_ pFileState->cfName = taosStrdup("sess"); pFileState->stateFunctionGetFn = getSessionRowBuff; } - - if (!pFileState->usedBuffs || !pFileState->freeBuffs || !pFileState->rowStateBuff) { - goto _error; - } + QUERY_CHECK_NULL(pFileState->rowStateBuff, code, lino, _error, terrno); pFileState->keyLen = keySize; pFileState->rowSize = rowSize; @@ -188,6 +191,7 @@ SStreamFileState* streamFileStateInit(int64_t memSize, uint32_t keySize, uint32_ pFileState->flushMark = INT64_MIN; pFileState->maxTs = INT64_MIN; pFileState->id = taosStrdup(taskId); + QUERY_CHECK_NULL(pFileState->id, code, lino, _error, terrno); // todo(liuyao) optimize if (type == STREAM_STATE_BUFF_HASH) { @@ -198,8 +202,8 @@ SStreamFileState* streamFileStateInit(int64_t memSize, uint32_t keySize, uint32_ void* valBuf = NULL; int32_t len = 0; - int32_t code = streamDefaultGet_rocksdb(pFileState->pFileStore, STREAM_STATE_INFO_NAME, &valBuf, &len); - if (code == TSDB_CODE_SUCCESS) { + int32_t tmpRes = streamDefaultGet_rocksdb(pFileState->pFileStore, STREAM_STATE_INFO_NAME, &valBuf, &len); + if (tmpRes == TSDB_CODE_SUCCESS) { ASSERT(len == sizeof(TSKEY)); streamFileStateDecode(&pFileState->flushMark, valBuf, len); qDebug("===stream===flushMark read:%" PRId64, pFileState->flushMark); @@ -208,6 +212,9 @@ SStreamFileState* streamFileStateInit(int64_t memSize, uint32_t keySize, uint32_ return pFileState; _error: + if (code != TSDB_CODE_SUCCESS) { + qError("%s failed at line %d since %s", __func__, lino, tstrerror(code)); + } streamFileStateDestroy(pFileState); return NULL; } From 4e8d5e21a174d3032c97b25164feceaef8c43f86 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Fri, 16 Aug 2024 10:34:39 +0800 Subject: [PATCH 089/181] fix(tsdb): check tables number. --- source/dnode/vnode/src/tsdb/tsdbRead2.c | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/source/dnode/vnode/src/tsdb/tsdbRead2.c b/source/dnode/vnode/src/tsdb/tsdbRead2.c index 6783eb2cbd..7d633e2520 100644 --- a/source/dnode/vnode/src/tsdb/tsdbRead2.c +++ b/source/dnode/vnode/src/tsdb/tsdbRead2.c @@ -5182,7 +5182,10 @@ int32_t tsdbNextDataBlock2(STsdbReader* pReader, bool* hasNext) { if (pReader->step == EXTERNAL_ROWS_PREV) { // prepare for the main scan - code = doOpenReaderImpl(pReader); + if (tSimpleHashGetSize(pReader->status.pTableMap) > 0) { + code = doOpenReaderImpl(pReader); + } + int32_t step = 1; resetAllDataBlockScanInfo(pReader->status.pTableMap, pReader->innerReader[0]->info.window.ekey, step); @@ -5210,8 +5213,11 @@ int32_t tsdbNextDataBlock2(STsdbReader* pReader, bool* hasNext) { if (pReader->step == EXTERNAL_ROWS_MAIN && pReader->innerReader[1] != NULL) { // prepare for the next row scan + if (tSimpleHashGetSize(pReader->status.pTableMap) > 0) { + code = doOpenReaderImpl(pReader->innerReader[1]); + } + int32_t step = -1; - code = doOpenReaderImpl(pReader->innerReader[1]); resetAllDataBlockScanInfo(pReader->innerReader[1]->status.pTableMap, pReader->info.window.ekey, step); if (code != TSDB_CODE_SUCCESS) { (void) tsdbReleaseReader(pReader); From 3d9380b51a5da3cfc598cb38cb5c6207f6d4ba9d Mon Sep 17 00:00:00 2001 From: sheyanjie-qq <249478495@qq.com> Date: Fri, 16 Aug 2024 11:28:03 +0800 Subject: [PATCH 090/181] update java sample code --- .../com/taos/example/ConsumerLoopFull.java | 43 +- .../com/taos/example/JdbcInsertDataDemo.java | 23 +- .../com/taos/example/WsConsumerLoopFull.java | 41 +- .../taosdata/example/ConsumerLoopFull.java | 384 ------------------ .../com/taosdata/example/JdbcCreatDBDemo.java | 55 --- .../taosdata/example/JdbcInsertDataDemo.java | 54 --- .../com/taosdata/example/JdbcQueryDemo.java | 57 --- .../com/taosdata/example/JdbcReqIdDemo.java | 64 --- .../example/ParameterBindingBasicDemo.java | 90 ---- .../taosdata/example/SchemalessJniTest.java | 47 --- .../taosdata/example/SchemalessWsTest.java | 47 --- .../example/WSParameterBindingBasicDemo.java | 69 ---- .../taosdata/example/WsConsumerLoopFull.java | 384 ------------------ 13 files changed, 67 insertions(+), 1291 deletions(-) delete mode 100644 examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/ConsumerLoopFull.java delete mode 100644 examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/JdbcCreatDBDemo.java delete mode 100644 examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/JdbcInsertDataDemo.java delete mode 100644 examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/JdbcQueryDemo.java delete mode 100644 examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/JdbcReqIdDemo.java delete mode 100644 examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/ParameterBindingBasicDemo.java delete mode 100644 examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/SchemalessJniTest.java delete mode 100644 examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/SchemalessWsTest.java delete mode 100644 examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/WSParameterBindingBasicDemo.java delete mode 100644 examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/WsConsumerLoopFull.java diff --git a/docs/examples/java/src/main/java/com/taos/example/ConsumerLoopFull.java b/docs/examples/java/src/main/java/com/taos/example/ConsumerLoopFull.java index 62dac019d7..ec9faf383e 100644 --- a/docs/examples/java/src/main/java/com/taos/example/ConsumerLoopFull.java +++ b/docs/examples/java/src/main/java/com/taos/example/ConsumerLoopFull.java @@ -19,8 +19,10 @@ public class ConsumerLoopFull { static private Connection connection; static private Statement statement; static private volatile boolean stopThread = false; + static private String groupId = "group1"; + static private String clientId = "clinet1"; - public static TaosConsumer getConsumer() throws Exception { + public static TaosConsumer getConsumer() throws Exception { // ANCHOR: create_consumer Properties config = new Properties(); config.setProperty("td.connect.type", "jni"); @@ -30,7 +32,7 @@ public class ConsumerLoopFull { config.setProperty("enable.auto.commit", "true"); config.setProperty("auto.commit.interval.ms", "1000"); config.setProperty("group.id", "group1"); - config.setProperty("client.id", "1"); + config.setProperty("client.id", "clinet1"); config.setProperty("td.connect.user", "root"); config.setProperty("td.connect.pass", "taosdata"); config.setProperty("value.deserializer", "com.taos.example.ConsumerLoopFull$ResultDeserializer"); @@ -45,8 +47,10 @@ public class ConsumerLoopFull { return consumer; } catch (Exception ex) { // please refer to the JDBC specifications for detailed exceptions info - System.out.printf("Failed to create native consumer, host: %s, %sErrMessage: %s%n", + System.out.printf("Failed to create websocket consumer, host: %s, groupId: %s, clientId: %s, %sErrMessage: %s%n", config.getProperty("bootstrap.servers"), + config.getProperty("group.id"), + config.getProperty("client.id"), ex instanceof SQLException ? "ErrCode: " + ((SQLException) ex).getErrorCode() + ", " : "", ex.getMessage()); // Print stack trace for context in examples. Use logging in production. @@ -58,9 +62,8 @@ public class ConsumerLoopFull { public static void pollExample(TaosConsumer consumer) throws SQLException { // ANCHOR: poll_data_code_piece + List topics = Collections.singletonList("topic_meters"); try { - List topics = Collections.singletonList("topic_meters"); - // subscribe to the topics consumer.subscribe(topics); System.out.println("Subscribe topics successfully."); @@ -69,13 +72,16 @@ public class ConsumerLoopFull { ConsumerRecords records = consumer.poll(Duration.ofMillis(100)); for (ConsumerRecord record : records) { ResultBean bean = record.value(); - // process the data here + // Add your data processing logic here System.out.println("data: " + JSON.toJSONString(bean)); } } } catch (Exception ex) { // please refer to the JDBC specifications for detailed exceptions info - System.out.printf("Failed to poll data, %sErrMessage: %s%n", + System.out.printf("Failed to poll data, topic: %s, groupId: %s, clientId: %s, %sErrMessage: %s%n", + topics.get(0), + groupId, + clientId, ex instanceof SQLException ? "ErrCode: " + ((SQLException) ex).getErrorCode() + ", " : "", ex.getMessage()); // Print stack trace for context in examples. Use logging in production. @@ -87,9 +93,8 @@ public class ConsumerLoopFull { public static void seekExample(TaosConsumer consumer) throws SQLException { // ANCHOR: consumer_seek + List topics = Collections.singletonList("topic_meters"); try { - List topics = Collections.singletonList("topic_meters"); - // subscribe to the topics consumer.subscribe(topics); System.out.println("Subscribe topics successfully."); @@ -106,7 +111,10 @@ public class ConsumerLoopFull { System.out.println("Assignment seek to beginning successfully."); } catch (Exception ex) { // please refer to the JDBC specifications for detailed exceptions info - System.out.printf("Failed to execute seek example, %sErrMessage: %s%n", + System.out.printf("Failed to seek offset, topic: %s, groupId: %s, clientId: %s, %sErrMessage: %s%n", + topics.get(0), + groupId, + clientId, ex instanceof SQLException ? "ErrCode: " + ((SQLException) ex).getErrorCode() + ", " : "", ex.getMessage()); // Print stack trace for context in examples. Use logging in production. @@ -119,15 +127,14 @@ public class ConsumerLoopFull { public static void commitExample(TaosConsumer consumer) throws SQLException { // ANCHOR: commit_code_piece + List topics = Collections.singletonList("topic_meters"); try { - List topics = Collections.singletonList("topic_meters"); - consumer.subscribe(topics); for (int i = 0; i < 50; i++) { ConsumerRecords records = consumer.poll(Duration.ofMillis(100)); for (ConsumerRecord record : records) { ResultBean bean = record.value(); - // process your data here + // Add your data processing logic here System.out.println("data: " + JSON.toJSONString(bean)); } if (!records.isEmpty()) { @@ -138,7 +145,10 @@ public class ConsumerLoopFull { } } catch (Exception ex) { // please refer to the JDBC specifications for detailed exceptions info - System.out.printf("Failed to execute commit example, %sErrMessage: %s%n", + System.out.printf("Failed to commit offset, topic: %s, groupId: %s, clientId: %s, %sErrMessage: %s%n", + topics.get(0), + groupId, + clientId, ex instanceof SQLException ? "ErrCode: " + ((SQLException) ex).getErrorCode() + ", " : "", ex.getMessage()); // Print stack trace for context in examples. Use logging in production. @@ -158,7 +168,10 @@ public class ConsumerLoopFull { System.out.println("Consumer unsubscribed successfully."); } catch (Exception ex) { // please refer to the JDBC specifications for detailed exceptions info - System.out.printf("Failed to unsubscribe consumer, %sErrMessage: %s%n", + System.out.printf("Failed to unsubscribe consumer, topic: %s, groupId: %s, clientId: %s, %sErrMessage: %s%n", + topics.get(0), + groupId, + clientId, ex instanceof SQLException ? "ErrCode: " + ((SQLException) ex).getErrorCode() + ", " : "", ex.getMessage()); // Print stack trace for context in examples. Use logging in production. diff --git a/docs/examples/java/src/main/java/com/taos/example/JdbcInsertDataDemo.java b/docs/examples/java/src/main/java/com/taos/example/JdbcInsertDataDemo.java index f19017193c..5c3599d819 100644 --- a/docs/examples/java/src/main/java/com/taos/example/JdbcInsertDataDemo.java +++ b/docs/examples/java/src/main/java/com/taos/example/JdbcInsertDataDemo.java @@ -25,25 +25,26 @@ public class JdbcInsertDataDemo { properties.setProperty("timezone", "UTC-8"); System.out.println("get connection starting..."); // ANCHOR: insert_data + // insert data, please make sure the database and table are created before + String insertQuery = "INSERT INTO " + + "power.d1001 USING power.meters TAGS(2,'California.SanFrancisco') " + + "VALUES " + + "(NOW + 1a, 10.30000, 219, 0.31000) " + + "(NOW + 2a, 12.60000, 218, 0.33000) " + + "(NOW + 3a, 12.30000, 221, 0.31000) " + + "power.d1002 USING power.meters TAGS(3, 'California.SanFrancisco') " + + "VALUES " + + "(NOW + 1a, 10.30000, 218, 0.25000) "; try (Connection connection = DriverManager.getConnection(jdbcUrl, properties); Statement stmt = connection.createStatement()) { - // insert data, please make sure the database and table are created before - String insertQuery = "INSERT INTO " + - "power.d1001 USING power.meters TAGS(2,'California.SanFrancisco') " + - "VALUES " + - "(NOW + 1a, 10.30000, 219, 0.31000) " + - "(NOW + 2a, 12.60000, 218, 0.33000) " + - "(NOW + 3a, 12.30000, 221, 0.31000) " + - "power.d1002 USING power.meters TAGS(3, 'California.SanFrancisco') " + - "VALUES " + - "(NOW + 1a, 10.30000, 218, 0.25000) "; int affectedRows = stmt.executeUpdate(insertQuery); // you can check affectedRows here System.out.println("Successfully inserted " + affectedRows + " rows to power.meters."); } catch (Exception ex) { // please refer to the JDBC specifications for detailed exceptions info - System.out.printf("Failed to insert data to power.meters, %sErrMessage: %s%n", + System.out.printf("Failed to insert data to power.meters, sql: %s, %sErrMessage: %s%n", + insertQuery, ex instanceof SQLException ? "ErrCode: " + ((SQLException) ex).getErrorCode() + ", " : "", ex.getMessage()); // Print stack trace for context in examples. Use logging in production. diff --git a/docs/examples/java/src/main/java/com/taos/example/WsConsumerLoopFull.java b/docs/examples/java/src/main/java/com/taos/example/WsConsumerLoopFull.java index 66c37f172e..6db65f47f2 100644 --- a/docs/examples/java/src/main/java/com/taos/example/WsConsumerLoopFull.java +++ b/docs/examples/java/src/main/java/com/taos/example/WsConsumerLoopFull.java @@ -19,6 +19,8 @@ public class WsConsumerLoopFull { static private Connection connection; static private Statement statement; static private volatile boolean stopThread = false; + static private String groupId = "group1"; + static private String clientId = "clinet1"; public static TaosConsumer getConsumer() throws Exception { // ANCHOR: create_consumer @@ -30,7 +32,7 @@ public class WsConsumerLoopFull { config.setProperty("enable.auto.commit", "true"); config.setProperty("auto.commit.interval.ms", "1000"); config.setProperty("group.id", "group1"); - config.setProperty("client.id", "1"); + config.setProperty("client.id", "clinet1"); config.setProperty("td.connect.user", "root"); config.setProperty("td.connect.pass", "taosdata"); config.setProperty("value.deserializer", "com.taos.example.WsConsumerLoopFull$ResultDeserializer"); @@ -45,8 +47,10 @@ public class WsConsumerLoopFull { return consumer; } catch (Exception ex) { // please refer to the JDBC specifications for detailed exceptions info - System.out.printf("Failed to create websocket consumer, host: %s, %sErrMessage: %s%n", + System.out.printf("Failed to create websocket consumer, host: %s, groupId: %s, clientId: %s, %sErrMessage: %s%n", config.getProperty("bootstrap.servers"), + config.getProperty("group.id"), + config.getProperty("client.id"), ex instanceof SQLException ? "ErrCode: " + ((SQLException) ex).getErrorCode() + ", " : "", ex.getMessage()); // Print stack trace for context in examples. Use logging in production. @@ -58,9 +62,8 @@ public class WsConsumerLoopFull { public static void pollExample(TaosConsumer consumer) throws SQLException { // ANCHOR: poll_data_code_piece + List topics = Collections.singletonList("topic_meters"); try { - List topics = Collections.singletonList("topic_meters"); - // subscribe to the topics consumer.subscribe(topics); System.out.println("Subscribe topics successfully."); @@ -69,13 +72,16 @@ public class WsConsumerLoopFull { ConsumerRecords records = consumer.poll(Duration.ofMillis(100)); for (ConsumerRecord record : records) { ResultBean bean = record.value(); - // process the data here + // Add your data processing logic here System.out.println("data: " + JSON.toJSONString(bean)); } } } catch (Exception ex) { // please refer to the JDBC specifications for detailed exceptions info - System.out.printf("Failed to poll data, %sErrMessage: %s%n", + System.out.printf("Failed to poll data, topic: %s, groupId: %s, clientId: %s, %sErrMessage: %s%n", + topics.get(0), + groupId, + clientId, ex instanceof SQLException ? "ErrCode: " + ((SQLException) ex).getErrorCode() + ", " : "", ex.getMessage()); // Print stack trace for context in examples. Use logging in production. @@ -87,9 +93,8 @@ public class WsConsumerLoopFull { public static void seekExample(TaosConsumer consumer) throws SQLException { // ANCHOR: consumer_seek + List topics = Collections.singletonList("topic_meters"); try { - List topics = Collections.singletonList("topic_meters"); - // subscribe to the topics consumer.subscribe(topics); System.out.println("Subscribe topics successfully."); @@ -106,7 +111,10 @@ public class WsConsumerLoopFull { System.out.println("Assignment seek to beginning successfully."); } catch (Exception ex) { // please refer to the JDBC specifications for detailed exceptions info - System.out.printf("Failed to execute seek example, %sErrMessage: %s%n", + System.out.printf("Failed to seek offset, topic: %s, groupId: %s, clientId: %s, %sErrMessage: %s%n", + topics.get(0), + groupId, + clientId, ex instanceof SQLException ? "ErrCode: " + ((SQLException) ex).getErrorCode() + ", " : "", ex.getMessage()); // Print stack trace for context in examples. Use logging in production. @@ -119,15 +127,14 @@ public class WsConsumerLoopFull { public static void commitExample(TaosConsumer consumer) throws SQLException { // ANCHOR: commit_code_piece + List topics = Collections.singletonList("topic_meters"); try { - List topics = Collections.singletonList("topic_meters"); - consumer.subscribe(topics); for (int i = 0; i < 50; i++) { ConsumerRecords records = consumer.poll(Duration.ofMillis(100)); for (ConsumerRecord record : records) { ResultBean bean = record.value(); - // process your data here + // Add your data processing logic here System.out.println("data: " + JSON.toJSONString(bean)); } if (!records.isEmpty()) { @@ -138,7 +145,10 @@ public class WsConsumerLoopFull { } } catch (Exception ex) { // please refer to the JDBC specifications for detailed exceptions info - System.out.printf("Failed to execute commit example, %sErrMessage: %s%n", + System.out.printf("Failed to commit offset, topic: %s, groupId: %s, clientId: %s, %sErrMessage: %s%n", + topics.get(0), + groupId, + clientId, ex instanceof SQLException ? "ErrCode: " + ((SQLException) ex).getErrorCode() + ", " : "", ex.getMessage()); // Print stack trace for context in examples. Use logging in production. @@ -158,7 +168,10 @@ public class WsConsumerLoopFull { System.out.println("Consumer unsubscribed successfully."); } catch (Exception ex) { // please refer to the JDBC specifications for detailed exceptions info - System.out.printf("Failed to unsubscribe consumer, %sErrMessage: %s%n", + System.out.printf("Failed to unsubscribe consumer, topic: %s, groupId: %s, clientId: %s, %sErrMessage: %s%n", + topics.get(0), + groupId, + clientId, ex instanceof SQLException ? "ErrCode: " + ((SQLException) ex).getErrorCode() + ", " : "", ex.getMessage()); // Print stack trace for context in examples. Use logging in production. diff --git a/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/ConsumerLoopFull.java b/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/ConsumerLoopFull.java deleted file mode 100644 index ce9af5ecdc..0000000000 --- a/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/ConsumerLoopFull.java +++ /dev/null @@ -1,384 +0,0 @@ -package com.taosdata.example; - -import com.alibaba.fastjson.JSON; -import com.taosdata.jdbc.TSDBDriver; -import com.taosdata.jdbc.tmq.*; - -import java.sql.*; -import java.time.Duration; -import java.util.Collections; -import java.util.List; -import java.util.Properties; -import java.util.Set; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Executors; -import java.util.concurrent.TimeUnit; - -// ANCHOR: consumer_demo -public class ConsumerLoopFull { - static private Connection connection; - static private Statement statement; - static private volatile boolean stopThread = false; - - public static TaosConsumer getConsumer() throws Exception { -// ANCHOR: create_consumer - Properties config = new Properties(); - config.setProperty("td.connect.type", "jni"); - config.setProperty("bootstrap.servers", "localhost:6030"); - config.setProperty("auto.offset.reset", "latest"); - config.setProperty("msg.with.table.name", "true"); - config.setProperty("enable.auto.commit", "true"); - config.setProperty("auto.commit.interval.ms", "1000"); - config.setProperty("group.id", "group1"); - config.setProperty("client.id", "1"); - config.setProperty("td.connect.user", "root"); - config.setProperty("td.connect.pass", "taosdata"); - config.setProperty("value.deserializer", "com.taosdata.example.ConsumerLoopFull$ResultDeserializer"); - config.setProperty("value.deserializer.encoding", "UTF-8"); - - try { - TaosConsumer consumer= new TaosConsumer<>(config); - System.out.printf("Create consumer successfully, host: %s, groupId: %s, clientId: %s%n", - config.getProperty("bootstrap.servers"), - config.getProperty("group.id"), - config.getProperty("client.id")); - return consumer; - } catch (Exception ex) { - // please refer to the JDBC specifications for detailed exceptions info - System.out.printf("Failed to create native consumer, host: %s, %sErrMessage: %s%n", - config.getProperty("bootstrap.servers"), - ex instanceof SQLException ? "ErrCode: " + ((SQLException) ex).getErrorCode() + ", " : "", - ex.getMessage()); - // Print stack trace for context in examples. Use logging in production. - ex.printStackTrace(); - throw ex; - } -// ANCHOR_END: create_consumer - } - - public static void pollExample(TaosConsumer consumer) throws SQLException { -// ANCHOR: poll_data_code_piece - try { - List topics = Collections.singletonList("topic_meters"); - - // subscribe to the topics - consumer.subscribe(topics); - System.out.println("Subscribe topics successfully."); - for (int i = 0; i < 50; i++) { - // poll data - ConsumerRecords records = consumer.poll(Duration.ofMillis(100)); - for (ConsumerRecord record : records) { - ResultBean bean = record.value(); - // process the data here - System.out.println("data: " + JSON.toJSONString(bean)); - } - } - } catch (Exception ex) { - // please refer to the JDBC specifications for detailed exceptions info - System.out.printf("Failed to poll data, %sErrMessage: %s%n", - ex instanceof SQLException ? "ErrCode: " + ((SQLException) ex).getErrorCode() + ", " : "", - ex.getMessage()); - // Print stack trace for context in examples. Use logging in production. - ex.printStackTrace(); - throw ex; - } -// ANCHOR_END: poll_data_code_piece - } - - public static void seekExample(TaosConsumer consumer) throws SQLException { -// ANCHOR: consumer_seek - try { - List topics = Collections.singletonList("topic_meters"); - - // subscribe to the topics - consumer.subscribe(topics); - System.out.println("Subscribe topics successfully."); - Set assignment = consumer.assignment(); - System.out.println("Now assignment: " + JSON.toJSONString(assignment)); - - ConsumerRecords records = ConsumerRecords.emptyRecord(); - // make sure we have got some data - while (records.isEmpty()) { - records = consumer.poll(Duration.ofMillis(100)); - } - - consumer.seekToBeginning(assignment); - System.out.println("Assignment seek to beginning successfully."); - } catch (Exception ex) { - // please refer to the JDBC specifications for detailed exceptions info - System.out.printf("Failed to execute seek example, %sErrMessage: %s%n", - ex instanceof SQLException ? "ErrCode: " + ((SQLException) ex).getErrorCode() + ", " : "", - ex.getMessage()); - // Print stack trace for context in examples. Use logging in production. - ex.printStackTrace(); - throw ex; - } -// ANCHOR_END: consumer_seek - } - - - public static void commitExample(TaosConsumer consumer) throws SQLException { -// ANCHOR: commit_code_piece - try { - List topics = Collections.singletonList("topic_meters"); - - consumer.subscribe(topics); - for (int i = 0; i < 50; i++) { - ConsumerRecords records = consumer.poll(Duration.ofMillis(100)); - for (ConsumerRecord record : records) { - ResultBean bean = record.value(); - // process your data here - System.out.println("data: " + JSON.toJSONString(bean)); - } - if (!records.isEmpty()) { - // after processing the data, commit the offset manually - consumer.commitSync(); - System.out.println("Commit offset manually successfully."); - } - } - } catch (Exception ex) { - // please refer to the JDBC specifications for detailed exceptions info - System.out.printf("Failed to execute commit example, %sErrMessage: %s%n", - ex instanceof SQLException ? "ErrCode: " + ((SQLException) ex).getErrorCode() + ", " : "", - ex.getMessage()); - // Print stack trace for context in examples. Use logging in production. - ex.printStackTrace(); - throw ex; - } -// ANCHOR_END: commit_code_piece - } - - public static void unsubscribeExample(TaosConsumer consumer) throws SQLException { - List topics = Collections.singletonList("topic_meters"); - consumer.subscribe(topics); -// ANCHOR: unsubscribe_data_code_piece - try { - // unsubscribe the consumer - consumer.unsubscribe(); - System.out.println("Consumer unsubscribed successfully."); - } catch (Exception ex) { - // please refer to the JDBC specifications for detailed exceptions info - System.out.printf("Failed to unsubscribe consumer, %sErrMessage: %s%n", - ex instanceof SQLException ? "ErrCode: " + ((SQLException) ex).getErrorCode() + ", " : "", - ex.getMessage()); - // Print stack trace for context in examples. Use logging in production. - ex.printStackTrace(); - throw ex; - } - finally { - // close the consumer - consumer.close(); - System.out.println("Consumer closed successfully."); - } -// ANCHOR_END: unsubscribe_data_code_piece - } - - public static class ResultDeserializer extends ReferenceDeserializer { - - } - - // use this class to define the data structure of the result record - public static class ResultBean { - private Timestamp ts; - private double current; - private int voltage; - private double phase; - private int groupid; - private String location; - - public Timestamp getTs() { - return ts; - } - - public void setTs(Timestamp ts) { - this.ts = ts; - } - - public double getCurrent() { - return current; - } - - public void setCurrent(double current) { - this.current = current; - } - - public int getVoltage() { - return voltage; - } - - public void setVoltage(int voltage) { - this.voltage = voltage; - } - - public double getPhase() { - return phase; - } - - public void setPhase(double phase) { - this.phase = phase; - } - - public int getGroupid() { - return groupid; - } - - public void setGroupid(int groupid) { - this.groupid = groupid; - } - - public String getLocation() { - return location; - } - - public void setLocation(String location) { - this.location = location; - } - } - - public static void prepareData() throws SQLException, InterruptedException { - try { - int i = 0; - while (!stopThread) { - String insertQuery = "INSERT INTO power.d1001 USING power.meters TAGS(2,'California.SanFrancisco') VALUES (NOW + " + i + "a, 10.30000, 219, 0.31000) "; - int affectedRows = statement.executeUpdate(insertQuery); - assert affectedRows == 1; - i++; - Thread.sleep(1); - } - } catch (Exception ex) { - // please refer to the JDBC specifications for detailed exceptions info - System.out.printf("Failed to insert data to power.meters, %sErrMessage: %s%n", - ex instanceof SQLException ? "ErrCode: " + ((SQLException) ex).getErrorCode() + ", " : "", - ex.getMessage()); - // Print stack trace for context in examples. Use logging in production. - ex.printStackTrace(); - throw ex; - } - } - - public static void prepareMeta() throws SQLException { - try { - statement.executeUpdate("CREATE DATABASE IF NOT EXISTS power"); - statement.executeUpdate("USE power"); - statement.executeUpdate("CREATE STABLE IF NOT EXISTS meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (groupId INT, location BINARY(24))"); - statement.executeUpdate("CREATE TOPIC IF NOT EXISTS topic_meters AS SELECT ts, current, voltage, phase, groupid, location FROM meters"); - } catch (Exception ex) { - // please refer to the JDBC specifications for detailed exceptions info - System.out.printf("Failed to create db and table, %sErrMessage: %s%n", - ex instanceof SQLException ? "ErrCode: " + ((SQLException) ex).getErrorCode() + ", " : "", - ex.getMessage()); - // Print stack trace for context in examples. Use logging in production. - ex.printStackTrace(); - throw ex; - } - } - - public static void initConnection() throws SQLException { - String url = "jdbc:TAOS://localhost:6030?user=root&password=taosdata"; - Properties properties = new Properties(); - properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "C"); - properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8"); - - try { - connection = DriverManager.getConnection(url, properties); - } catch (SQLException ex) { - System.out.println("Failed to create connection, url:" + url + "; ErrCode:" + ex.getErrorCode() + "; ErrMessage: " + ex.getMessage()); - throw new SQLException("Failed to create connection", ex); - } - try { - statement = connection.createStatement(); - } catch (SQLException ex) { - System.out.println("Failed to create statement, ErrCode:" + ex.getErrorCode() + "; ErrMessage: " + ex.getMessage()); - throw new SQLException("Failed to create statement", ex); - } - System.out.println("Connection created successfully."); - } - - public static void closeConnection() throws SQLException { - try { - if (statement != null) { - statement.close(); - } - } catch (SQLException ex) { - System.out.println("Failed to close statement, ErrCode:" + ex.getErrorCode() + "; ErrMessage: " + ex.getMessage()); - throw new SQLException("Failed to close statement", ex); - } - - try { - if (connection != null) { - connection.close(); - } - } catch (SQLException ex) { - System.out.println("Failed to close connection, ErrCode:" + ex.getErrorCode() + "; ErrMessage: " + ex.getMessage()); - throw new SQLException("Failed to close connection", ex); - } - System.out.println("Connection closed Successfully."); - } - - - public static void main(String[] args) throws SQLException, InterruptedException { - initConnection(); - prepareMeta(); - - // create a single thread executor - ExecutorService executor = Executors.newSingleThreadExecutor(); - - // submit a task - executor.submit(() -> { - try { - prepareData(); - } catch (SQLException ex) { - System.out.println("Failed to prepare data, ErrCode:" + ex.getErrorCode() + ", ErrMessage: " + ex.getMessage()); - return; - } catch (Exception ex) { - System.out.println("Failed to prepare data, ErrMessage: " + ex.getMessage()); - return; - } - System.out.println("pollDataExample executed successfully."); - }); - - try { - TaosConsumer consumer = getConsumer(); - - pollExample(consumer); - System.out.println("pollExample executed successfully."); - consumer.unsubscribe(); - - seekExample(consumer); - System.out.println("seekExample executed successfully."); - consumer.unsubscribe(); - - commitExample(consumer); - System.out.println("commitExample executed successfully."); - consumer.unsubscribe(); - - unsubscribeExample(consumer); - System.out.println("unsubscribeExample executed successfully"); - } catch (SQLException ex) { - System.out.println("Failed to poll data from topic_meters, ErrCode:" + ex.getErrorCode() + "; ErrMessage: " + ex.getMessage()); - return; - } catch (Exception ex) { - System.out.println("Failed to poll data from topic_meters, ErrMessage: " + ex.getMessage()); - return; - } - - stopThread = true; - // close the executor, which will make the executor reject new tasks - executor.shutdown(); - - try { - // wait for the executor to terminate - boolean result = executor.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS); - assert result; - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - } catch (Exception e) { - e.printStackTrace(); - System.out.println("Wait executor termination failed."); - } - - closeConnection(); - System.out.println("program end."); - } -} -// ANCHOR_END: consumer_demo diff --git a/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/JdbcCreatDBDemo.java b/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/JdbcCreatDBDemo.java deleted file mode 100644 index 28d7d2d67b..0000000000 --- a/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/JdbcCreatDBDemo.java +++ /dev/null @@ -1,55 +0,0 @@ -package com.taosdata.example; - -import com.taosdata.jdbc.AbstractStatement; - -import java.sql.*; -import java.util.Properties; - -public class JdbcCreatDBDemo { - private static final String host = "localhost"; - private static final String dbName = "test"; - private static final String tbName = "weather"; - private static final String user = "root"; - private static final String password = "taosdata"; - - - public static void main(String[] args) throws SQLException { - - final String jdbcUrl = "jdbc:TAOS://" + host + ":6030/?user=" + user + "&password=" + password; - -// get connection - Properties properties = new Properties(); - properties.setProperty("charset", "UTF-8"); - properties.setProperty("locale", "en_US.UTF-8"); - properties.setProperty("timezone", "UTC-8"); - System.out.println("get connection starting..."); -// ANCHOR: create_db_and_table - try (Connection connection = DriverManager.getConnection(jdbcUrl, properties); - Statement stmt = connection.createStatement()) { - - // create database - int rowsAffected = stmt.executeUpdate("CREATE DATABASE IF NOT EXISTS power"); - // you can check rowsAffected here - System.out.println("Create database power successfully, rowsAffected: " + rowsAffected); - // create table - rowsAffected = stmt.executeUpdate("CREATE STABLE IF NOT EXISTS power.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (groupId INT, location BINARY(24))"); - // you can check rowsAffected here - System.out.println("Create stable power.meters successfully, rowsAffected: " + rowsAffected); - } catch (Exception ex) { - // please refer to the JDBC specifications for detailed exceptions info - System.out.printf("Failed to create database power or stable meters, %sErrMessage: %s%n", - ex instanceof SQLException ? "ErrCode: " + ((SQLException) ex).getErrorCode() + ", " : "", - ex.getMessage()); - // Print stack trace for context in examples. Use logging in production. - ex.printStackTrace(); - throw ex; - } -// ANCHOR_END: create_db_and_table - - } - - private static void printResult(ResultSet resultSet) throws SQLException { - Util.printResult(resultSet); - } - -} diff --git a/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/JdbcInsertDataDemo.java b/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/JdbcInsertDataDemo.java deleted file mode 100644 index 08798b755c..0000000000 --- a/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/JdbcInsertDataDemo.java +++ /dev/null @@ -1,54 +0,0 @@ -package com.taosdata.example; - -import com.taosdata.jdbc.AbstractStatement; - -import java.sql.*; -import java.util.Properties; - -public class JdbcInsertDataDemo { - private static final String host = "localhost"; - private static final String dbName = "test"; - private static final String tbName = "weather"; - private static final String user = "root"; - private static final String password = "taosdata"; - - - public static void main(String[] args) throws SQLException { - - final String jdbcUrl = "jdbc:TAOS://" + host + ":6030/?user=" + user + "&password=" + password; - -// get connection - Properties properties = new Properties(); - properties.setProperty("charset", "UTF-8"); - properties.setProperty("locale", "en_US.UTF-8"); - properties.setProperty("timezone", "UTC-8"); - System.out.println("get connection starting..."); -// ANCHOR: insert_data - try (Connection connection = DriverManager.getConnection(jdbcUrl, properties); - Statement stmt = connection.createStatement()) { - - // insert data, please make sure the database and table are created before - String insertQuery = "INSERT INTO " + - "power.d1001 USING power.meters TAGS(2,'California.SanFrancisco') " + - "VALUES " + - "(NOW + 1a, 10.30000, 219, 0.31000) " + - "(NOW + 2a, 12.60000, 218, 0.33000) " + - "(NOW + 3a, 12.30000, 221, 0.31000) " + - "power.d1002 USING power.meters TAGS(3, 'California.SanFrancisco') " + - "VALUES " + - "(NOW + 1a, 10.30000, 218, 0.25000) "; - int affectedRows = stmt.executeUpdate(insertQuery); - // you can check affectedRows here - System.out.println("Successfully inserted " + affectedRows + " rows to power.meters."); - } catch (Exception ex) { - // please refer to the JDBC specifications for detailed exceptions info - System.out.printf("Failed to insert data to power.meters, %sErrMessage: %s%n", - ex instanceof SQLException ? "ErrCode: " + ((SQLException) ex).getErrorCode() + ", " : "", - ex.getMessage()); - // Print stack trace for context in examples. Use logging in production. - ex.printStackTrace(); - throw ex; - } -// ANCHOR_END: insert_data - } -} diff --git a/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/JdbcQueryDemo.java b/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/JdbcQueryDemo.java deleted file mode 100644 index 768ba8929c..0000000000 --- a/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/JdbcQueryDemo.java +++ /dev/null @@ -1,57 +0,0 @@ -package com.taosdata.example; - -import com.taosdata.jdbc.AbstractStatement; - -import java.sql.*; -import java.util.Properties; - -public class JdbcQueryDemo { - private static final String host = "localhost"; - private static final String dbName = "test"; - private static final String tbName = "weather"; - private static final String user = "root"; - private static final String password = "taosdata"; - - - public static void main(String[] args) throws SQLException { - - final String jdbcUrl = "jdbc:TAOS://" + host + ":6030/?user=" + user + "&password=" + password; - -// get connection - Properties properties = new Properties(); - properties.setProperty("charset", "UTF-8"); - properties.setProperty("locale", "en_US.UTF-8"); - properties.setProperty("timezone", "UTC-8"); - System.out.println("get connection starting..."); -// ANCHOR: query_data - String sql = "SELECT ts, current, location FROM power.meters limit 100"; - try (Connection connection = DriverManager.getConnection(jdbcUrl, properties); - Statement stmt = connection.createStatement(); - // query data, make sure the database and table are created before - ResultSet resultSet = stmt.executeQuery(sql)) { - - Timestamp ts; - float current; - String location; - while (resultSet.next()) { - ts = resultSet.getTimestamp(1); - current = resultSet.getFloat(2); - // we recommend using the column name to get the value - location = resultSet.getString("location"); - - // you can check data here - System.out.printf("ts: %s, current: %f, location: %s %n", ts, current, location); - } - } catch (Exception ex) { - // please refer to the JDBC specifications for detailed exceptions info - System.out.printf("Failed to query data from power.meters, sql: %s, %sErrMessage: %s%n", - sql, - ex instanceof SQLException ? "ErrCode: " + ((SQLException) ex).getErrorCode() + ", " : "", - ex.getMessage()); - // Print stack trace for context in examples. Use logging in production. - ex.printStackTrace(); - throw ex; - } -// ANCHOR_END: query_data - } -} diff --git a/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/JdbcReqIdDemo.java b/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/JdbcReqIdDemo.java deleted file mode 100644 index dd4b549bc5..0000000000 --- a/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/JdbcReqIdDemo.java +++ /dev/null @@ -1,64 +0,0 @@ -package com.taosdata.example; - -import com.taosdata.jdbc.AbstractStatement; - -import java.sql.*; -import java.util.Properties; - -public class JdbcReqIdDemo { - private static final String host = "localhost"; - private static final String dbName = "test"; - private static final String tbName = "weather"; - private static final String user = "root"; - private static final String password = "taosdata"; - - - public static void main(String[] args) throws SQLException { - - final String jdbcUrl = "jdbc:TAOS://" + host + ":6030/?user=" + user + "&password=" + password; - -// get connection - Properties properties = new Properties(); - properties.setProperty("charset", "UTF-8"); - properties.setProperty("locale", "en_US.UTF-8"); - properties.setProperty("timezone", "UTC-8"); - System.out.println("get connection starting..."); - -// ANCHOR: with_reqid - long reqId = 3L; - try (Connection connection = DriverManager.getConnection(jdbcUrl, properties); - // Create a statement that allows specifying a request ID - AbstractStatement aStmt = (AbstractStatement) connection.createStatement()) { - - try (ResultSet resultSet = aStmt.executeQuery("SELECT ts, current, location FROM power.meters limit 1", reqId)) { - Timestamp ts; - float current; - String location; - while (resultSet.next()) { - ts = resultSet.getTimestamp(1); - current = resultSet.getFloat(2); - // we recommend using the column name to get the value - location = resultSet.getString("location"); - - // you can check data here - System.out.printf("ts: %s, current: %f, location: %s %n", ts, current, location); - - } - } - } catch (Exception ex) { - // please refer to the JDBC specifications for detailed exceptions info - System.out.printf("Failed to execute sql with reqId: %s, %sErrMessage: %s%n", reqId, - ex instanceof SQLException ? "ErrCode: " + ((SQLException) ex).getErrorCode() + ", " : "", - ex.getMessage()); - // Print stack trace for context in examples. Use logging in production. - ex.printStackTrace(); - throw ex; - } -// ANCHOR_END: with_reqid - } - - private static void printResult(ResultSet resultSet) throws SQLException { - Util.printResult(resultSet); - } - -} diff --git a/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/ParameterBindingBasicDemo.java b/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/ParameterBindingBasicDemo.java deleted file mode 100644 index b5732f0e33..0000000000 --- a/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/ParameterBindingBasicDemo.java +++ /dev/null @@ -1,90 +0,0 @@ -package com.taosdata.example; - -import com.taosdata.jdbc.TSDBPreparedStatement; - -import java.sql.Connection; -import java.sql.DriverManager; -import java.sql.SQLException; -import java.sql.Statement; -import java.util.ArrayList; -import java.util.Random; - -// ANCHOR: para_bind -public class ParameterBindingBasicDemo { - - // modify host to your own - private static final String host = "127.0.0.1"; - private static final Random random = new Random(System.currentTimeMillis()); - private static final int numOfSubTable = 10, numOfRow = 10; - - public static void main(String[] args) throws SQLException { - - String jdbcUrl = "jdbc:TAOS://" + host + ":6030/"; - try (Connection conn = DriverManager.getConnection(jdbcUrl, "root", "taosdata")) { - - init(conn); - - String sql = "INSERT INTO ? USING power.meters TAGS(?,?) VALUES (?,?,?,?)"; - - try (TSDBPreparedStatement pstmt = conn.prepareStatement(sql).unwrap(TSDBPreparedStatement.class)) { - - for (int i = 1; i <= numOfSubTable; i++) { - // set table name - pstmt.setTableName("d_bind_" + i); - - // set tags - pstmt.setTagInt(0, i); - pstmt.setTagString(1, "location_" + i); - - // set column ts - ArrayList tsList = new ArrayList<>(); - long current = System.currentTimeMillis(); - for (int j = 0; j < numOfRow; j++) - tsList.add(current + j); - pstmt.setTimestamp(0, tsList); - - // set column current - ArrayList currentList = new ArrayList<>(); - for (int j = 0; j < numOfRow; j++) - currentList.add(random.nextFloat() * 30); - pstmt.setFloat(1, currentList); - - // set column voltage - ArrayList voltageList = new ArrayList<>(); - for (int j = 0; j < numOfRow; j++) - voltageList.add(random.nextInt(300)); - pstmt.setInt(2, voltageList); - - // set column phase - ArrayList phaseList = new ArrayList<>(); - for (int j = 0; j < numOfRow; j++) - phaseList.add(random.nextFloat()); - pstmt.setFloat(3, phaseList); - // add column - pstmt.columnDataAddBatch(); - } - // execute column - pstmt.columnDataExecuteBatch(); - // you can check exeResult here - System.out.println("Successfully inserted " + (numOfSubTable * numOfRow) + " rows to power.meters."); - } - } catch (Exception ex) { - // please refer to the JDBC specifications for detailed exceptions info - System.out.printf("Failed to insert to table meters using stmt, %sErrMessage: %s%n", - ex instanceof SQLException ? "ErrCode: " + ((SQLException) ex).getErrorCode() + ", " : "", - ex.getMessage()); - // Print stack trace for context in examples. Use logging in production. - ex.printStackTrace(); - throw ex; - } - } - - private static void init(Connection conn) throws SQLException { - try (Statement stmt = conn.createStatement()) { - stmt.execute("CREATE DATABASE IF NOT EXISTS power"); - stmt.execute("USE power"); - stmt.execute("CREATE STABLE IF NOT EXISTS meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (groupId INT, location BINARY(24))"); - } - } -} -// ANCHOR_END: para_bind diff --git a/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/SchemalessJniTest.java b/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/SchemalessJniTest.java deleted file mode 100644 index 5b1ce51be6..0000000000 --- a/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/SchemalessJniTest.java +++ /dev/null @@ -1,47 +0,0 @@ -package com.taosdata.example; - -import com.taosdata.jdbc.AbstractConnection; -import com.taosdata.jdbc.enums.SchemalessProtocolType; -import com.taosdata.jdbc.enums.SchemalessTimestampType; - -import java.sql.Connection; -import java.sql.DriverManager; -import java.sql.SQLException; -import java.sql.Statement; - -// ANCHOR: schemaless -public class SchemalessJniTest { - private static final String host = "127.0.0.1"; - private static final String lineDemo = "meters,groupid=2,location=California.SanFrancisco current=10.3000002f64,voltage=219i32,phase=0.31f64 1626006833639"; - private static final String telnetDemo = "metric_telnet 1707095283260 4 host=host0 interface=eth0"; - private static final String jsonDemo = "{\"metric\": \"metric_json\",\"timestamp\": 1626846400,\"value\": 10.3, \"tags\": {\"groupid\": 2, \"location\": \"California.SanFrancisco\", \"id\": \"d1001\"}}"; - - public static void main(String[] args) throws SQLException { - final String jdbcUrl = "jdbc:TAOS://" + host + ":6030/?user=root&password=taosdata"; - try (Connection connection = DriverManager.getConnection(jdbcUrl)) { - init(connection); - AbstractConnection conn = connection.unwrap(AbstractConnection.class); - - conn.write(lineDemo, SchemalessProtocolType.LINE, SchemalessTimestampType.MILLI_SECONDS); - conn.write(telnetDemo, SchemalessProtocolType.TELNET, SchemalessTimestampType.MILLI_SECONDS); - conn.write(jsonDemo, SchemalessProtocolType.JSON, SchemalessTimestampType.NOT_CONFIGURED); - System.out.println("Inserted data with schemaless successfully."); - } catch (Exception ex) { - // please refer to the JDBC specifications for detailed exceptions info - System.out.printf("Failed to insert data with schemaless, %sErrMessage: %s%n", - ex instanceof SQLException ? "ErrCode: " + ((SQLException) ex).getErrorCode() + ", " : "", - ex.getMessage()); - // Print stack trace for context in examples. Use logging in production. - ex.printStackTrace(); - throw ex; - } - } - - private static void init(Connection connection) throws SQLException { - try (Statement stmt = connection.createStatement()) { - stmt.execute("CREATE DATABASE IF NOT EXISTS power"); - stmt.execute("USE power"); - } - } -} -// ANCHOR_END: schemaless diff --git a/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/SchemalessWsTest.java b/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/SchemalessWsTest.java deleted file mode 100644 index 0f15e70224..0000000000 --- a/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/SchemalessWsTest.java +++ /dev/null @@ -1,47 +0,0 @@ -package com.taosdata.example; - -import com.taosdata.jdbc.AbstractConnection; -import com.taosdata.jdbc.enums.SchemalessProtocolType; -import com.taosdata.jdbc.enums.SchemalessTimestampType; - -import java.sql.Connection; -import java.sql.DriverManager; -import java.sql.SQLException; -import java.sql.Statement; - -// ANCHOR: schemaless -public class SchemalessWsTest { - private static final String host = "127.0.0.1"; - private static final String lineDemo = "meters,groupid=2,location=California.SanFrancisco current=10.3000002f64,voltage=219i32,phase=0.31f64 1626006833639"; - private static final String telnetDemo = "metric_telnet 1707095283260 4 host=host0 interface=eth0"; - private static final String jsonDemo = "{\"metric\": \"metric_json\",\"timestamp\": 1626846400,\"value\": 10.3, \"tags\": {\"groupid\": 2, \"location\": \"California.SanFrancisco\", \"id\": \"d1001\"}}"; - - public static void main(String[] args) throws SQLException { - final String url = "jdbc:TAOS-RS://" + host + ":6041?user=root&password=taosdata&batchfetch=true"; - try(Connection connection = DriverManager.getConnection(url)){ - init(connection); - AbstractConnection conn = connection.unwrap(AbstractConnection.class); - - conn.write(lineDemo, SchemalessProtocolType.LINE, SchemalessTimestampType.MILLI_SECONDS); - conn.write(telnetDemo, SchemalessProtocolType.TELNET, SchemalessTimestampType.MILLI_SECONDS); - conn.write(jsonDemo, SchemalessProtocolType.JSON, SchemalessTimestampType.SECONDS); - System.out.println("Inserted data with schemaless successfully."); - } catch (Exception ex) { - // please refer to the JDBC specifications for detailed exceptions info - System.out.printf("Failed to insert data with schemaless, %sErrMessage: %s%n", - ex instanceof SQLException ? "ErrCode: " + ((SQLException) ex).getErrorCode() + ", " : "", - ex.getMessage()); - // Print stack trace for context in examples. Use logging in production. - ex.printStackTrace(); - throw ex; - } - } - - private static void init(Connection connection) throws SQLException { - try (Statement stmt = connection.createStatement()) { - stmt.execute("CREATE DATABASE IF NOT EXISTS power"); - stmt.execute("USE power"); - } - } -} -// ANCHOR_END: schemaless diff --git a/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/WSParameterBindingBasicDemo.java b/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/WSParameterBindingBasicDemo.java deleted file mode 100644 index 792ee4ed2d..0000000000 --- a/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/WSParameterBindingBasicDemo.java +++ /dev/null @@ -1,69 +0,0 @@ -package com.taosdata.example; - -import com.taosdata.jdbc.TSDBPreparedStatement; -import com.taosdata.jdbc.ws.TSWSPreparedStatement; - -import java.sql.*; -import java.util.ArrayList; -import java.util.Random; - -// ANCHOR: para_bind -public class WSParameterBindingBasicDemo { - - // modify host to your own - private static final String host = "127.0.0.1"; - private static final Random random = new Random(System.currentTimeMillis()); - private static final int numOfSubTable = 10, numOfRow = 10; - - public static void main(String[] args) throws SQLException { - - String jdbcUrl = "jdbc:TAOS-RS://" + host + ":6041/?batchfetch=true"; - try (Connection conn = DriverManager.getConnection(jdbcUrl, "root", "taosdata")) { - init(conn); - - String sql = "INSERT INTO ? USING power.meters TAGS(?,?) VALUES (?,?,?,?)"; - - try (TSWSPreparedStatement pstmt = conn.prepareStatement(sql).unwrap(TSWSPreparedStatement.class)) { - - for (int i = 1; i <= numOfSubTable; i++) { - // set table name - pstmt.setTableName("d_bind_" + i); - - // set tags - pstmt.setTagInt(0, i); - pstmt.setTagString(1, "location_" + i); - - // set columns - long current = System.currentTimeMillis(); - for (int j = 0; j < numOfRow; j++) { - pstmt.setTimestamp(1, new Timestamp(current + j)); - pstmt.setFloat(2, random.nextFloat() * 30); - pstmt.setInt(3, random.nextInt(300)); - pstmt.setFloat(4, random.nextFloat()); - pstmt.addBatch(); - } - int [] exeResult = pstmt.executeBatch(); - // you can check exeResult here - System.out.println("Successfully inserted " + exeResult.length + " rows to power.meters."); - } - } - } catch (Exception ex) { - // please refer to the JDBC specifications for detailed exceptions info - System.out.printf("Failed to insert to table meters using stmt, %sErrMessage: %s%n", - ex instanceof SQLException ? "ErrCode: " + ((SQLException) ex).getErrorCode() + ", " : "", - ex.getMessage()); - // Print stack trace for context in examples. Use logging in production. - ex.printStackTrace(); - throw ex; - } - } - - private static void init(Connection conn) throws SQLException { - try (Statement stmt = conn.createStatement()) { - stmt.execute("CREATE DATABASE IF NOT EXISTS power"); - stmt.execute("USE power"); - stmt.execute("CREATE STABLE IF NOT EXISTS power.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (groupId INT, location BINARY(24))"); - } - } -} -// ANCHOR_END: para_bind diff --git a/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/WsConsumerLoopFull.java b/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/WsConsumerLoopFull.java deleted file mode 100644 index 17380023cd..0000000000 --- a/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/WsConsumerLoopFull.java +++ /dev/null @@ -1,384 +0,0 @@ -package com.taosdata.example; - -import com.alibaba.fastjson.JSON; -import com.taosdata.jdbc.TSDBDriver; -import com.taosdata.jdbc.tmq.*; - -import java.sql.*; -import java.time.Duration; -import java.util.Collections; -import java.util.List; -import java.util.Properties; -import java.util.Set; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Executors; -import java.util.concurrent.TimeUnit; - -// ANCHOR: consumer_demo -public class WsConsumerLoopFull { - static private Connection connection; - static private Statement statement; - static private volatile boolean stopThread = false; - - public static TaosConsumer getConsumer() throws Exception { -// ANCHOR: create_consumer - Properties config = new Properties(); - config.setProperty("td.connect.type", "ws"); - config.setProperty("bootstrap.servers", "localhost:6041"); - config.setProperty("auto.offset.reset", "latest"); - config.setProperty("msg.with.table.name", "true"); - config.setProperty("enable.auto.commit", "true"); - config.setProperty("auto.commit.interval.ms", "1000"); - config.setProperty("group.id", "group1"); - config.setProperty("client.id", "1"); - config.setProperty("td.connect.user", "root"); - config.setProperty("td.connect.pass", "taosdata"); - config.setProperty("value.deserializer", "com.taosdata.example.WsConsumerLoopFull$ResultDeserializer"); - config.setProperty("value.deserializer.encoding", "UTF-8"); - - try { - TaosConsumer consumer= new TaosConsumer<>(config); - System.out.printf("Create consumer successfully, host: %s, groupId: %s, clientId: %s%n", - config.getProperty("bootstrap.servers"), - config.getProperty("group.id"), - config.getProperty("client.id")); - return consumer; - } catch (Exception ex) { - // please refer to the JDBC specifications for detailed exceptions info - System.out.printf("Failed to create websocket consumer, host: %s, %sErrMessage: %s%n", - config.getProperty("bootstrap.servers"), - ex instanceof SQLException ? "ErrCode: " + ((SQLException) ex).getErrorCode() + ", " : "", - ex.getMessage()); - // Print stack trace for context in examples. Use logging in production. - ex.printStackTrace(); - throw ex; - } -// ANCHOR_END: create_consumer - } - - public static void pollExample(TaosConsumer consumer) throws SQLException { -// ANCHOR: poll_data_code_piece - try { - List topics = Collections.singletonList("topic_meters"); - - // subscribe to the topics - consumer.subscribe(topics); - System.out.println("Subscribe topics successfully."); - for (int i = 0; i < 50; i++) { - // poll data - ConsumerRecords records = consumer.poll(Duration.ofMillis(100)); - for (ConsumerRecord record : records) { - ResultBean bean = record.value(); - // process the data here - System.out.println("data: " + JSON.toJSONString(bean)); - } - } - } catch (Exception ex) { - // please refer to the JDBC specifications for detailed exceptions info - System.out.printf("Failed to poll data, %sErrMessage: %s%n", - ex instanceof SQLException ? "ErrCode: " + ((SQLException) ex).getErrorCode() + ", " : "", - ex.getMessage()); - // Print stack trace for context in examples. Use logging in production. - ex.printStackTrace(); - throw ex; - } -// ANCHOR_END: poll_data_code_piece - } - - public static void seekExample(TaosConsumer consumer) throws SQLException { -// ANCHOR: consumer_seek - try { - List topics = Collections.singletonList("topic_meters"); - - // subscribe to the topics - consumer.subscribe(topics); - System.out.println("Subscribe topics successfully."); - Set assignment = consumer.assignment(); - System.out.println("Now assignment: " + JSON.toJSONString(assignment)); - - ConsumerRecords records = ConsumerRecords.emptyRecord(); - // make sure we have got some data - while (records.isEmpty()) { - records = consumer.poll(Duration.ofMillis(100)); - } - - consumer.seekToBeginning(assignment); - System.out.println("Assignment seek to beginning successfully."); - } catch (Exception ex) { - // please refer to the JDBC specifications for detailed exceptions info - System.out.printf("Failed to execute seek example, %sErrMessage: %s%n", - ex instanceof SQLException ? "ErrCode: " + ((SQLException) ex).getErrorCode() + ", " : "", - ex.getMessage()); - // Print stack trace for context in examples. Use logging in production. - ex.printStackTrace(); - throw ex; - } -// ANCHOR_END: consumer_seek - } - - - public static void commitExample(TaosConsumer consumer) throws SQLException { -// ANCHOR: commit_code_piece - try { - List topics = Collections.singletonList("topic_meters"); - - consumer.subscribe(topics); - for (int i = 0; i < 50; i++) { - ConsumerRecords records = consumer.poll(Duration.ofMillis(100)); - for (ConsumerRecord record : records) { - ResultBean bean = record.value(); - // process your data here - System.out.println("data: " + JSON.toJSONString(bean)); - } - if (!records.isEmpty()) { - // after processing the data, commit the offset manually - consumer.commitSync(); - System.out.println("Commit offset manually successfully."); - } - } - } catch (Exception ex) { - // please refer to the JDBC specifications for detailed exceptions info - System.out.printf("Failed to execute commit example, %sErrMessage: %s%n", - ex instanceof SQLException ? "ErrCode: " + ((SQLException) ex).getErrorCode() + ", " : "", - ex.getMessage()); - // Print stack trace for context in examples. Use logging in production. - ex.printStackTrace(); - throw ex; - } -// ANCHOR_END: commit_code_piece - } - - public static void unsubscribeExample(TaosConsumer consumer) throws SQLException { - List topics = Collections.singletonList("topic_meters"); - consumer.subscribe(topics); -// ANCHOR: unsubscribe_data_code_piece - try { - // unsubscribe the consumer - consumer.unsubscribe(); - System.out.println("Consumer unsubscribed successfully."); - } catch (Exception ex) { - // please refer to the JDBC specifications for detailed exceptions info - System.out.printf("Failed to unsubscribe consumer, %sErrMessage: %s%n", - ex instanceof SQLException ? "ErrCode: " + ((SQLException) ex).getErrorCode() + ", " : "", - ex.getMessage()); - // Print stack trace for context in examples. Use logging in production. - ex.printStackTrace(); - throw ex; - } - finally { - // close the consumer - consumer.close(); - System.out.println("Consumer closed successfully."); - } -// ANCHOR_END: unsubscribe_data_code_piece - } - - public static class ResultDeserializer extends ReferenceDeserializer { - - } - - // use this class to define the data structure of the result record - public static class ResultBean { - private Timestamp ts; - private double current; - private int voltage; - private double phase; - private int groupid; - private String location; - - public Timestamp getTs() { - return ts; - } - - public void setTs(Timestamp ts) { - this.ts = ts; - } - - public double getCurrent() { - return current; - } - - public void setCurrent(double current) { - this.current = current; - } - - public int getVoltage() { - return voltage; - } - - public void setVoltage(int voltage) { - this.voltage = voltage; - } - - public double getPhase() { - return phase; - } - - public void setPhase(double phase) { - this.phase = phase; - } - - public int getGroupid() { - return groupid; - } - - public void setGroupid(int groupid) { - this.groupid = groupid; - } - - public String getLocation() { - return location; - } - - public void setLocation(String location) { - this.location = location; - } - } - - public static void prepareData() throws SQLException, InterruptedException { - try { - int i = 0; - while (!stopThread) { - String insertQuery = "INSERT INTO power.d1001 USING power.meters TAGS(2,'California.SanFrancisco') VALUES (NOW + " + i + "a, 10.30000, 219, 0.31000) "; - int affectedRows = statement.executeUpdate(insertQuery); - assert affectedRows == 1; - i++; - Thread.sleep(1); - } - } catch (Exception ex) { - // please refer to the JDBC specifications for detailed exceptions info - System.out.printf("Failed to insert data to power.meters, %sErrMessage: %s%n", - ex instanceof SQLException ? "ErrCode: " + ((SQLException) ex).getErrorCode() + ", " : "", - ex.getMessage()); - // Print stack trace for context in examples. Use logging in production. - ex.printStackTrace(); - throw ex; - } - } - - public static void prepareMeta() throws SQLException { - try { - statement.executeUpdate("CREATE DATABASE IF NOT EXISTS power"); - statement.executeUpdate("USE power"); - statement.executeUpdate("CREATE STABLE IF NOT EXISTS meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (groupId INT, location BINARY(24))"); - statement.executeUpdate("CREATE TOPIC IF NOT EXISTS topic_meters AS SELECT ts, current, voltage, phase, groupid, location FROM meters"); - } catch (Exception ex) { - // please refer to the JDBC specifications for detailed exceptions info - System.out.printf("Failed to create db and table, %sErrMessage: %s%n", - ex instanceof SQLException ? "ErrCode: " + ((SQLException) ex).getErrorCode() + ", " : "", - ex.getMessage()); - // Print stack trace for context in examples. Use logging in production. - ex.printStackTrace(); - throw ex; - } - } - - public static void initConnection() throws SQLException { - String url = "jdbc:TAOS://localhost:6030?user=root&password=taosdata"; - Properties properties = new Properties(); - properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "C"); - properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8"); - - try { - connection = DriverManager.getConnection(url, properties); - } catch (SQLException ex) { - System.out.println("Failed to create connection, url:" + url + "; ErrCode:" + ex.getErrorCode() + "; ErrMessage: " + ex.getMessage()); - throw new SQLException("Failed to create connection", ex); - } - try { - statement = connection.createStatement(); - } catch (SQLException ex) { - System.out.println("Failed to create statement, ErrCode:" + ex.getErrorCode() + "; ErrMessage: " + ex.getMessage()); - throw new SQLException("Failed to create statement", ex); - } - System.out.println("Connection created successfully."); - } - - public static void closeConnection() throws SQLException { - try { - if (statement != null) { - statement.close(); - } - } catch (SQLException ex) { - System.out.println("Failed to close statement, ErrCode:" + ex.getErrorCode() + "; ErrMessage: " + ex.getMessage()); - throw new SQLException("Failed to close statement", ex); - } - - try { - if (connection != null) { - connection.close(); - } - } catch (SQLException ex) { - System.out.println("Failed to close connection, ErrCode:" + ex.getErrorCode() + "; ErrMessage: " + ex.getMessage()); - throw new SQLException("Failed to close connection", ex); - } - System.out.println("Connection closed Successfully."); - } - - - public static void main(String[] args) throws SQLException, InterruptedException { - initConnection(); - prepareMeta(); - - // create a single thread executor - ExecutorService executor = Executors.newSingleThreadExecutor(); - - // submit a task - executor.submit(() -> { - try { - prepareData(); - } catch (SQLException ex) { - System.out.println("Failed to prepare data, ErrCode:" + ex.getErrorCode() + ", ErrMessage: " + ex.getMessage()); - return; - } catch (Exception ex) { - System.out.println("Failed to prepare data, ErrMessage: " + ex.getMessage()); - return; - } - System.out.println("pollDataExample executed successfully."); - }); - - try { - TaosConsumer consumer = getConsumer(); - - pollExample(consumer); - System.out.println("pollExample executed successfully."); - consumer.unsubscribe(); - - seekExample(consumer); - System.out.println("seekExample executed successfully."); - consumer.unsubscribe(); - - commitExample(consumer); - System.out.println("commitExample executed successfully."); - consumer.unsubscribe(); - - unsubscribeExample(consumer); - System.out.println("unsubscribeExample executed successfully"); - } catch (SQLException ex) { - System.out.println("Failed to poll data from topic_meters, ErrCode:" + ex.getErrorCode() + "; ErrMessage: " + ex.getMessage()); - return; - } catch (Exception ex) { - System.out.println("Failed to poll data from topic_meters, ErrMessage: " + ex.getMessage()); - return; - } - - stopThread = true; - // close the executor, which will make the executor reject new tasks - executor.shutdown(); - - try { - // wait for the executor to terminate - boolean result = executor.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS); - assert result; - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - } catch (Exception e) { - e.printStackTrace(); - System.out.println("Wait executor termination failed."); - } - - closeConnection(); - System.out.println("program end."); - } -} -// ANCHOR_END: consumer_demo From 9059454c905b6125525fc091ac469fe9652132c9 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Fri, 16 Aug 2024 13:11:10 +0800 Subject: [PATCH 091/181] fix(tsdb): check return value. --- source/dnode/vnode/src/tsdb/tsdbMergeTree.c | 69 ++++++++++++--------- 1 file changed, 41 insertions(+), 28 deletions(-) diff --git a/source/dnode/vnode/src/tsdb/tsdbMergeTree.c b/source/dnode/vnode/src/tsdb/tsdbMergeTree.c index e55ede560e..77a82e0dd9 100644 --- a/source/dnode/vnode/src/tsdb/tsdbMergeTree.c +++ b/source/dnode/vnode/src/tsdb/tsdbMergeTree.c @@ -146,10 +146,14 @@ static void updateBlockLoadSlot(SSttBlockLoadInfo *pLoadInfo) { pLoadInfo->currentLoadBlockIndex = nextSlotIndex; } -static SBlockData *loadLastBlock(SLDataIter *pIter, const char *idStr) { - int32_t code = 0; +static int32_t loadLastBlock(SLDataIter *pIter, const char *idStr, SBlockData **pResBlock) { + if (pResBlock != NULL) { + *pResBlock = NULL; + } + int32_t code = 0; SSttBlockLoadInfo *pInfo = pIter->pBlockLoadInfo; + if (pInfo->blockData[0].sttBlockIndex == pIter->iSttBlk) { if (pInfo->currentLoadBlockIndex != 0) { tsdbDebug("current load index is set to 0, block index:%d, fileVer:%" PRId64 ", due to uid:%" PRIu64 @@ -157,7 +161,9 @@ static SBlockData *loadLastBlock(SLDataIter *pIter, const char *idStr) { pIter->iSttBlk, pIter->cid, pIter->uid, idStr); pInfo->currentLoadBlockIndex = 0; } - return &pInfo->blockData[0].data; + + *pResBlock = &pInfo->blockData[0].data; + return code; } if (pInfo->blockData[1].sttBlockIndex == pIter->iSttBlk) { @@ -167,11 +173,13 @@ static SBlockData *loadLastBlock(SLDataIter *pIter, const char *idStr) { pIter->iSttBlk, pIter->cid, pIter->uid, idStr); pInfo->currentLoadBlockIndex = 1; } - return &pInfo->blockData[1].data; + + *pResBlock = &pInfo->blockData[1].data; + return code; } if (pIter->pSttBlk == NULL || pInfo->pSchema == NULL) { - return NULL; + return code; } updateBlockLoadSlot(pInfo); @@ -181,7 +189,7 @@ static SBlockData *loadLastBlock(SLDataIter *pIter, const char *idStr) { code = tsdbSttFileReadBlockDataByColumn(pIter->pReader, pIter->pSttBlk, pBlock, pInfo->pSchema, &pInfo->colIds[1], pInfo->numOfCols - 1); if (code != TSDB_CODE_SUCCESS) { - goto _exit; + return code; } double el = (taosGetTimestampUs() - st) / 1000.0; @@ -200,14 +208,9 @@ static SBlockData *loadLastBlock(SLDataIter *pIter, const char *idStr) { tsdbDebug("last block index list:%d, %d, rowIndex:%d %s", pInfo->blockData[0].sttBlockIndex, pInfo->blockData[1].sttBlockIndex, pIter->iRow, idStr); - return &pInfo->blockData[pInfo->currentLoadBlockIndex].data; -_exit: - if (code != TSDB_CODE_SUCCESS) { - terrno = code; - } - - return NULL; + *pResBlock = &pInfo->blockData[pInfo->currentLoadBlockIndex].data; + return code; } // find the earliest block that contains the required records @@ -735,12 +738,17 @@ void tLDataIterNextBlock(SLDataIter *pIter, const char *idStr) { } } -static void findNextValidRow(SLDataIter *pIter, const char *idStr) { - bool hasVal = false; - int32_t step = pIter->backward ? -1 : 1; - int32_t i = pIter->iRow; +static int32_t findNextValidRow(SLDataIter *pIter, const char *idStr) { + bool hasVal = false; + int32_t step = pIter->backward ? -1 : 1; + int32_t i = pIter->iRow; + SBlockData *pData = NULL; - SBlockData *pData = loadLastBlock(pIter, idStr); + int32_t code = loadLastBlock(pIter, idStr, &pData); + if (code) { + tsdbError("failed to load stt block, code:%s, %s", tstrerror(code), idStr); + return code; + } // mostly we only need to find the start position for a given table if ((((i == 0) && (!pIter->backward)) || (i == pData->nRow - 1 && pIter->backward)) && pData->aUid != NULL) { @@ -748,7 +756,7 @@ static void findNextValidRow(SLDataIter *pIter, const char *idStr) { if (i == -1) { tsdbDebug("failed to find the data in pBlockData, uid:%" PRIu64 " , %s", pIter->uid, idStr); pIter->iRow = -1; - return; + return code; } } @@ -817,20 +825,22 @@ static void findNextValidRow(SLDataIter *pIter, const char *idStr) { } pIter->iRow = (hasVal) ? i : -1; + return code; } bool tLDataIterNextRow(SLDataIter *pIter, const char *idStr) { - int32_t step = pIter->backward ? -1 : 1; - terrno = TSDB_CODE_SUCCESS; + int32_t step = pIter->backward ? -1 : 1; + int32_t code = 0; + int32_t iBlockL = pIter->iSttBlk; + SBlockData *pBlockData = NULL; // no qualified last file block in current file, no need to fetch row if (pIter->pSttBlk == NULL) { return false; } - int32_t iBlockL = pIter->iSttBlk; - SBlockData *pBlockData = loadLastBlock(pIter, idStr); - if (pBlockData == NULL || terrno != TSDB_CODE_SUCCESS) { + code = loadLastBlock(pIter, idStr, &pBlockData); + if (pBlockData == NULL || code != TSDB_CODE_SUCCESS) { goto _exit; } @@ -838,7 +848,10 @@ bool tLDataIterNextRow(SLDataIter *pIter, const char *idStr) { while (1) { bool skipBlock = false; - findNextValidRow(pIter, idStr); + code = findNextValidRow(pIter, idStr); + if (code) { + goto _exit; + } if (pIter->pBlockLoadInfo->checkRemainingRow) { skipBlock = true; @@ -873,8 +886,8 @@ bool tLDataIterNextRow(SLDataIter *pIter, const char *idStr) { } if (iBlockL != pIter->iSttBlk) { - pBlockData = loadLastBlock(pIter, idStr); - if (pBlockData == NULL) { + code = loadLastBlock(pIter, idStr, &pBlockData); + if ((pBlockData == NULL) || (code != 0)) { goto _exit; } @@ -888,7 +901,7 @@ bool tLDataIterNextRow(SLDataIter *pIter, const char *idStr) { pIter->rInfo.row = tsdbRowFromBlockData(pBlockData, pIter->iRow); _exit: - return (terrno == TSDB_CODE_SUCCESS) && (pIter->pSttBlk != NULL) && (pBlockData != NULL); + return (code == TSDB_CODE_SUCCESS) && (pIter->pSttBlk != NULL) && (pBlockData != NULL); } // SMergeTree ================================================= From 5361d5f38a0ff405b1511dfd1fd2fdaf0bf71d1f Mon Sep 17 00:00:00 2001 From: Hongze Cheng Date: Fri, 16 Aug 2024 13:27:51 +0800 Subject: [PATCH 092/181] fix: possible delete data loss when stt_trigger = 1 --- source/dnode/vnode/src/tsdb/tsdbCommit2.c | 48 ++++++++++------------- 1 file changed, 21 insertions(+), 27 deletions(-) diff --git a/source/dnode/vnode/src/tsdb/tsdbCommit2.c b/source/dnode/vnode/src/tsdb/tsdbCommit2.c index 4467102d6f..3c407b31cf 100644 --- a/source/dnode/vnode/src/tsdb/tsdbCommit2.c +++ b/source/dnode/vnode/src/tsdb/tsdbCommit2.c @@ -157,41 +157,35 @@ static int32_t tsdbCommitTombData(SCommitter2 *committer) { int64_t numRecord = 0; SMetaInfo info; - if (committer->tsdb->imem->nDel == 0) { - goto _exit; - } + // if no history data and no new timestamp data, skip tomb data + if (committer->ctx->info->fset || committer->ctx->hasTSData) { + committer->ctx->tbid->suid = 0; + committer->ctx->tbid->uid = 0; + for (STombRecord *record; (record = tsdbIterMergerGetTombRecord(committer->tombIterMerger));) { + if (record->uid != committer->ctx->tbid->uid) { + committer->ctx->tbid->suid = record->suid; + committer->ctx->tbid->uid = record->uid; - // do not need to write tomb data if there is no ts data - bool skip = (committer->ctx->info->fset == NULL && !committer->ctx->hasTSData); - - committer->ctx->tbid->suid = 0; - committer->ctx->tbid->uid = 0; - for (STombRecord *record; (record = tsdbIterMergerGetTombRecord(committer->tombIterMerger));) { - if (record->uid != committer->ctx->tbid->uid) { - committer->ctx->tbid->suid = record->suid; - committer->ctx->tbid->uid = record->uid; - - if (metaGetInfo(committer->tsdb->pVnode->pMeta, record->uid, &info, NULL) != 0) { - TAOS_CHECK_GOTO(tsdbIterMergerSkipTableData(committer->tombIterMerger, committer->ctx->tbid), &lino, _exit); - continue; + if (metaGetInfo(committer->tsdb->pVnode->pMeta, record->uid, &info, NULL) != 0) { + TAOS_CHECK_GOTO(tsdbIterMergerSkipTableData(committer->tombIterMerger, committer->ctx->tbid), &lino, _exit); + continue; + } } - } - if (record->ekey < committer->ctx->minKey) { - // do nothing - } else if (record->skey > committer->ctx->maxKey) { - // committer->ctx->nextKey = TMIN(record->skey, committer->ctx->nextKey); - } else { - record->skey = TMAX(record->skey, committer->ctx->minKey); - record->ekey = TMIN(record->ekey, committer->ctx->maxKey); + if (record->ekey < committer->ctx->minKey) { + // do nothing + } else if (record->skey > committer->ctx->maxKey) { + // committer->ctx->nextKey = TMIN(record->skey, committer->ctx->nextKey); + } else { + record->skey = TMAX(record->skey, committer->ctx->minKey); + record->ekey = TMIN(record->ekey, committer->ctx->maxKey); - if (!skip) { numRecord++; TAOS_CHECK_GOTO(tsdbFSetWriteTombRecord(committer->writer, record), &lino, _exit); } - } - TAOS_CHECK_GOTO(tsdbIterMergerNext(committer->tombIterMerger), &lino, _exit); + TAOS_CHECK_GOTO(tsdbIterMergerNext(committer->tombIterMerger), &lino, _exit); + } } _exit: From bb0aa6839f92936af2f045ecb98fb115b906ba34 Mon Sep 17 00:00:00 2001 From: 54liuyao <54liuyao> Date: Fri, 16 Aug 2024 14:30:35 +0800 Subject: [PATCH 093/181] fix mem leak --- source/common/src/tdatablock.c | 5 +++++ source/libs/executor/src/exchangeoperator.c | 6 ++++-- source/libs/executor/src/executil.c | 3 +++ source/libs/executor/src/sysscanoperator.c | 3 +++ 4 files changed, 15 insertions(+), 2 deletions(-) diff --git a/source/common/src/tdatablock.c b/source/common/src/tdatablock.c index 8f97aaf154..2181496635 100644 --- a/source/common/src/tdatablock.c +++ b/source/common/src/tdatablock.c @@ -1976,6 +1976,7 @@ int32_t createOneDataBlock(const SSDataBlock* pDataBlock, bool copyData, SSDataB code = copyPkVal(&pDstBlock->info, &pDataBlock->info); if (code != TSDB_CODE_SUCCESS) { + blockDataDestroy(pDstBlock); uError("%s failed at line %d since %s", __func__, __LINE__, tstrerror(code)); return code; } @@ -1991,10 +1992,14 @@ int32_t createOneDataBlock(const SSDataBlock* pDataBlock, bool copyData, SSDataB SColumnInfoData* pDst = taosArrayGet(pDstBlock->pDataBlock, i); SColumnInfoData* pSrc = taosArrayGet(pDataBlock->pDataBlock, i); if (pDst == NULL) { + blockDataDestroy(pDstBlock); + uError("%s failed at line %d since %s", __func__, __LINE__, tstrerror(code)); return terrno; } if (pSrc == NULL) { + blockDataDestroy(pDstBlock); + uError("%s failed at line %d since %s", __func__, __LINE__, tstrerror(code)); return terrno; } diff --git a/source/libs/executor/src/exchangeoperator.c b/source/libs/executor/src/exchangeoperator.c index bdc1e42b28..d8cced2c7a 100644 --- a/source/libs/executor/src/exchangeoperator.c +++ b/source/libs/executor/src/exchangeoperator.c @@ -850,6 +850,7 @@ int32_t doExtractResultBlocks(SExchangeInfo* pExchangeInfo, SSourceDataInfo* pDa int32_t code = TSDB_CODE_SUCCESS; int32_t lino = 0; SRetrieveTableRsp* pRetrieveRsp = pDataInfo->pRsp; + SSDataBlock* pb = NULL; char* pNextStart = pRetrieveRsp->data; char* pStart = pNextStart; @@ -874,7 +875,6 @@ int32_t doExtractResultBlocks(SExchangeInfo* pExchangeInfo, SSourceDataInfo* pDa } while (index++ < pRetrieveRsp->numOfBlocks) { - SSDataBlock* pb = NULL; pStart = pNextStart; if (taosArrayGetSize(pExchangeInfo->pRecycledBlocks) > 0) { @@ -902,15 +902,17 @@ int32_t doExtractResultBlocks(SExchangeInfo* pExchangeInfo, SSourceDataInfo* pDa code = extractDataBlockFromFetchRsp(pb, pStart, NULL, &pStart); if (code != 0) { taosMemoryFreeClear(pDataInfo->pRsp); - return code; + goto _end; } void* tmp = taosArrayPush(pExchangeInfo->pResultBlockList, &pb); QUERY_CHECK_NULL(tmp, code, lino, _end, terrno); + pb = NULL; } _end: if (code != TSDB_CODE_SUCCESS) { + blockDataDestroy(pb); qError("%s failed at line %d since %s", __func__, lino, tstrerror(code)); } return code; diff --git a/source/libs/executor/src/executil.c b/source/libs/executor/src/executil.c index 210c073c6d..24eafe7d57 100644 --- a/source/libs/executor/src/executil.c +++ b/source/libs/executor/src/executil.c @@ -1942,6 +1942,8 @@ int32_t createExprInfo(SNodeList* pNodeList, SNodeList* pGroupKeys, SExprInfo** pTargetNode = (STargetNode*)nodesListGetNode(pGroupKeys, i - numOfFuncs); } if (!pTargetNode) { + destroyExprInfo(pExprs, *numOfExprs); + taosMemoryFreeClear(pExprs); qError("%s failed at line %d since %s", __func__, __LINE__, tstrerror(terrno)); return terrno; } @@ -1950,6 +1952,7 @@ int32_t createExprInfo(SNodeList* pNodeList, SNodeList* pGroupKeys, SExprInfo** code = createExprFromTargetNode(pExp, pTargetNode); if (code != TSDB_CODE_SUCCESS) { destroyExprInfo(pExprs, *numOfExprs); + taosMemoryFreeClear(pExprs); qError("%s failed at line %d since %s", __func__, __LINE__, tstrerror(code)); return code; } diff --git a/source/libs/executor/src/sysscanoperator.c b/source/libs/executor/src/sysscanoperator.c index a2cfedd16e..d8a2331980 100644 --- a/source/libs/executor/src/sysscanoperator.c +++ b/source/libs/executor/src/sysscanoperator.c @@ -1670,6 +1670,7 @@ static SSDataBlock* sysTableBuildUserTables(SOperatorInfo* pOperator) { pAPI->metaReaderFn.clearReader(&mr); pAPI->metaFn.closeTableMetaCursor(pInfo->pCur); pInfo->pCur = NULL; + blockDataDestroy(p); T_LONG_JMP(pTaskInfo->env, terrno); } @@ -1829,6 +1830,8 @@ _end: qError("%s failed at line %d since %s", __func__, lino, tstrerror(code)); blockDataDestroy(p); pTaskInfo->code = code; + pAPI->metaFn.closeTableMetaCursor(pInfo->pCur); + pInfo->pCur = NULL; T_LONG_JMP(pTaskInfo->env, code); } return (pInfo->pRes->info.rows == 0) ? NULL : pInfo->pRes; From d6284424b66b854dc4d63398baa0438b50c49297 Mon Sep 17 00:00:00 2001 From: t_max <1172915550@qq.com> Date: Fri, 16 Aug 2024 15:08:14 +0800 Subject: [PATCH 094/181] docs: update the output of go and c# examples --- docs/examples/csharp/subscribe/Program.cs | 114 +++++-- docs/examples/csharp/wsInsert/Program.cs | 65 ++-- docs/examples/csharp/wssubscribe/Program.cs | 110 +++++-- docs/examples/go/queryreqid/main.go | 118 +++---- docs/examples/go/sqlquery/main.go | 172 +++++----- docs/examples/go/tmq/native/main.go | 315 ++++++++++-------- docs/examples/go/tmq/ws/main.go | 338 ++++++++++++-------- 7 files changed, 737 insertions(+), 495 deletions(-) diff --git a/docs/examples/csharp/subscribe/Program.cs b/docs/examples/csharp/subscribe/Program.cs index 4138194800..2ec73aae48 100644 --- a/docs/examples/csharp/subscribe/Program.cs +++ b/docs/examples/csharp/subscribe/Program.cs @@ -6,6 +6,11 @@ namespace TMQExample { internal class SubscribeDemo { + private static string _host = ""; + private static string _groupId = ""; + private static string _clientId = ""; + private static string _topic = ""; + public static void Main(string[] args) { try @@ -64,9 +69,9 @@ namespace TMQExample { // ANCHOR: create_consumer // consumer config - var host = "127.0.0.1"; - var groupId = "group1"; - var clientId = "client1"; + _host = "127.0.0.1"; + _groupId = "group1"; + _clientId = "client1"; var cfg = new Dictionary() { { "td.connect.port", "6030" }, @@ -74,9 +79,9 @@ namespace TMQExample { "msg.with.table.name", "true" }, { "enable.auto.commit", "true" }, { "auto.commit.interval.ms", "1000" }, - { "group.id", groupId }, - { "client.id", clientId }, - { "td.connect.ip", host }, + { "group.id", _groupId }, + { "client.id", _clientId }, + { "td.connect.ip", _host }, { "td.connect.user", "root" }, { "td.connect.pass", "taosdata" }, }; @@ -85,20 +90,32 @@ namespace TMQExample { // create consumer consumer = new ConsumerBuilder>(cfg).Build(); - Console.WriteLine("Create consumer successfully, host: " + host + ", groupId: " + groupId + - ", clientId: " + clientId); + Console.WriteLine( + $"Create consumer successfully, " + + $"host: {_host}, " + + $"groupId: {_groupId}, " + + $"clientId: {_clientId}"); } catch (TDengineError e) { // handle TDengine error - Console.WriteLine("Failed to create native consumer, host: " + host + ", ErrCode:" + e.Code + - ", ErrMessage: " + e.Error); + Console.WriteLine( + $"Failed to create native consumer, " + + $"host: {_host}, " + + $"groupId: {_groupId}, " + + $"clientId: {_clientId}, " + + $"ErrCode: {e.Code}, " + + $"ErrMessage: {e.Error}"); throw; } catch (Exception e) { // handle other exceptions - Console.WriteLine("Failed to create native consumer, host: " + host + ", ErrMessage: " + e.Message); + Console.WriteLine($"Failed to create native consumer, " + + $"host: {_host}, " + + $"groupId: {_groupId}, " + + $"clientId: {_clientId}, " + + $"ErrMessage: {e.Message}"); throw; } @@ -109,11 +126,12 @@ namespace TMQExample static void Consume(IConsumer> consumer) { // ANCHOR: subscribe + _topic = "topic_meters"; try { // subscribe - consumer.Subscribe(new List() { "topic_meters" }); - Console.WriteLine("subscribe topics successfully"); + consumer.Subscribe(new List() { _topic }); + Console.WriteLine("Subscribe topics successfully"); for (int i = 0; i < 50; i++) { // consume message with using block to ensure the result is disposed @@ -133,13 +151,23 @@ namespace TMQExample catch (TDengineError e) { // handle TDengine error - Console.WriteLine("Failed to poll data, ErrCode: " + e.Code + ", ErrMessage: " + e.Error); + Console.WriteLine( + $"Failed to poll data, " + + $"topic: {_topic}, " + + $"groupId: {_groupId}, " + + $"clientId: {_clientId}, " + + $"ErrCode: {e.Code}, " + + $"ErrMessage: {e.Error}"); throw; } catch (Exception e) { // handle other exceptions - Console.WriteLine("Failed to poll data, ErrMessage: " + e.Message); + Console.WriteLine($"Failed to poll data, " + + $"topic: {_topic}, " + + $"groupId: {_groupId}, " + + $"clientId: {_clientId}, " + + $"ErrMessage: {e.Message}"); throw; } // ANCHOR_END: subscribe @@ -152,7 +180,7 @@ namespace TMQExample { // get assignment var assignment = consumer.Assignment; - Console.WriteLine($"now assignment: {assignment}"); + Console.WriteLine($"Now assignment: {assignment}"); // seek to the beginning foreach (var topicPartition in assignment) { @@ -163,13 +191,25 @@ namespace TMQExample catch (TDengineError e) { // handle TDengine error - Console.WriteLine("Failed to execute seek example, ErrCode: " + e.Code + ", ErrMessage: " + e.Error); + Console.WriteLine( + $"Failed to execute seek example, " + + $"topic: {_topic}, " + + $"groupId: {_groupId}, " + + $"clientId: {_clientId}, " + + $"offset: 0, " + + $"ErrCode: {e.Code}, " + + $"ErrMessage: {e.Error}"); throw; } catch (Exception e) { // handle other exceptions - Console.WriteLine("Failed to execute seek example, ErrMessage: " + e.Message); + Console.WriteLine($"Failed to execute seek example, " + + $"topic: {_topic}, " + + $"groupId: {_groupId}, " + + $"clientId: {_clientId}, " + + $"offset: 0, " + + $"ErrMessage: {e.Message}"); throw; } // ANCHOR_END: seek @@ -180,6 +220,7 @@ namespace TMQExample // ANCHOR: commit_offset for (int i = 0; i < 5; i++) { + TopicPartitionOffset topicPartitionOffset = null; try { // consume message with using block to ensure the result is disposed @@ -187,9 +228,10 @@ namespace TMQExample { if (cr == null) continue; // commit offset + topicPartitionOffset = cr.TopicPartitionOffset; consumer.Commit(new List { - cr.TopicPartitionOffset, + topicPartitionOffset, }); Console.WriteLine("Commit offset manually successfully."); } @@ -197,13 +239,26 @@ namespace TMQExample catch (TDengineError e) { // handle TDengine error - Console.WriteLine("Failed to execute commit example, ErrCode:" + e.Code + ", ErrMessage: " + e.Error); + Console.WriteLine( + $"Failed to execute commit example, " + + $"topic: {_topic}, " + + $"groupId: {_groupId}, " + + $"clientId: {_clientId}, " + + $"offset: {topicPartitionOffset}, " + + $"ErrCode: {e.Code}, " + + $"ErrMessage: {e.Error}"); throw; } catch (Exception e) { // handle other exceptions - Console.WriteLine("Failed to execute commit example, ErrMessage: " + e.Message); + Console.WriteLine( + $"Failed to execute commit example, " + + $"topic: {_topic}, " + + $"groupId: {_groupId}, " + + $"clientId: {_clientId}, " + + $"offset: {topicPartitionOffset}, " + + $"ErrMessage: {e.Message}"); throw; } } @@ -221,13 +276,24 @@ namespace TMQExample catch (TDengineError e) { // handle TDengine error - Console.WriteLine("Failed to unsubscribe consumer, ErrCode: " + e.Code + ", ErrMessage: " + e.Error); + Console.WriteLine( + $"Failed to unsubscribe consumer, " + + $"topic: {_topic}, " + + $"groupId: {_groupId}, " + + $"clientId: {_clientId}, " + + $"ErrCode: {e.Code}, " + + $"ErrMessage: {e.Error}"); throw; } catch (Exception e) { // handle other exceptions - Console.WriteLine("Failed to unsubscribe consumer, ErrMessage: " + e.Message); + Console.WriteLine( + $"Failed to execute commit example, " + + $"topic: {_topic}, " + + $"groupId: {_groupId}, " + + $"clientId: {_clientId}, " + + $"ErrMessage: {e.Message}"); throw; } finally @@ -239,4 +305,4 @@ namespace TMQExample // ANCHOR_END: close } } -} +} \ No newline at end of file diff --git a/docs/examples/csharp/wsInsert/Program.cs b/docs/examples/csharp/wsInsert/Program.cs index 36b884a522..9bc47d97f1 100644 --- a/docs/examples/csharp/wsInsert/Program.cs +++ b/docs/examples/csharp/wsInsert/Program.cs @@ -16,10 +16,10 @@ namespace Examples var builder = new ConnectionStringBuilder(connectionString); using (var client = DbDriver.Open(builder)) { - CreateDatabaseAndTable(client,connectionString); - InsertData(client,connectionString); - QueryData(client,connectionString); - QueryWithReqId(client,connectionString); + CreateDatabaseAndTable(client, connectionString); + InsertData(client, connectionString); + QueryData(client, connectionString); + QueryWithReqId(client, connectionString); } } catch (TDengineError e) @@ -52,7 +52,8 @@ namespace Examples catch (TDengineError e) { // handle TDengine error - Console.WriteLine("Failed to create database power or stable meters, ErrCode: " + e.Code + ", ErrMessage: " + e.Error); + Console.WriteLine("Failed to create database power or stable meters, ErrCode: " + e.Code + + ", ErrMessage: " + e.Error); throw; } catch (Exception e) @@ -64,40 +65,43 @@ namespace Examples // ANCHOR_END: create_db_and_table } - private static void InsertData(ITDengineClient client,string connectionString) + private static void InsertData(ITDengineClient client, string connectionString) { // ANCHOR: insert_data + // insert data, please make sure the database and table are created before + var insertQuery = "INSERT INTO " + + "power.d1001 USING power.meters TAGS(2,'California.SanFrancisco') " + + "VALUES " + + "(NOW + 1a, 10.30000, 219, 0.31000) " + + "(NOW + 2a, 12.60000, 218, 0.33000) " + + "(NOW + 3a, 12.30000, 221, 0.31000) " + + "power.d1002 USING power.meters TAGS(3, 'California.SanFrancisco') " + + "VALUES " + + "(NOW + 1a, 10.30000, 218, 0.25000) "; try { - // insert data, please make sure the database and table are created before - var insertQuery = "INSERT INTO " + - "power.d1001 USING power.meters TAGS(2,'California.SanFrancisco') " + - "VALUES " + - "(NOW + 1a, 10.30000, 219, 0.31000) " + - "(NOW + 2a, 12.60000, 218, 0.33000) " + - "(NOW + 3a, 12.30000, 221, 0.31000) " + - "power.d1002 USING power.meters TAGS(3, 'California.SanFrancisco') " + - "VALUES " + - "(NOW + 1a, 10.30000, 218, 0.25000) "; var affectedRows = client.Exec(insertQuery); Console.WriteLine("Successfully inserted " + affectedRows + " rows to power.meters."); } catch (TDengineError e) { // handle TDengine error - Console.WriteLine("Failed to insert data to power.meters, ErrCode: " + e.Code + ", ErrMessage: " + e.Error); + Console.WriteLine("Failed to insert data to power.meters, sql: " + insertQuery + ", ErrCode: " + + e.Code + ", ErrMessage: " + + e.Error); throw; } catch (Exception e) { // handle other exceptions - Console.WriteLine("Failed to insert data to power.meters, ErrMessage: " + e.Message); + Console.WriteLine("Failed to insert data to power.meters, sql: " + insertQuery + ", ErrMessage: " + + e.Message); throw; } // ANCHOR_END: insert_data } - private static void QueryData(ITDengineClient client,string connectionString) + private static void QueryData(ITDengineClient client, string connectionString) { // ANCHOR: select_data // query data, make sure the database and table are created before @@ -108,6 +112,7 @@ namespace Examples { while (rows.Read()) { + // Add your data processing logic here var ts = (DateTime)rows.GetValue(0); var current = (float)rows.GetValue(1); var location = Encoding.UTF8.GetString((byte[])rows.GetValue(2)); @@ -119,28 +124,30 @@ namespace Examples catch (TDengineError e) { // handle TDengine error - Console.WriteLine("Failed to query data from power.meters, sql: " + query + ", ErrCode: " + e.Code + ", ErrMessage: " + e.Error); + Console.WriteLine("Failed to query data from power.meters, sql: " + query + ", ErrCode: " + e.Code + + ", ErrMessage: " + e.Error); throw; } catch (Exception e) { // handle other exceptions - Console.WriteLine("Failed to query data from power.meters, sql: " + query + ", ErrMessage: " + e.Message); + Console.WriteLine( + "Failed to query data from power.meters, sql: " + query + ", ErrMessage: " + e.Message); throw; } // ANCHOR_END: select_data } - private static void QueryWithReqId(ITDengineClient client,string connectionString) + private static void QueryWithReqId(ITDengineClient client, string connectionString) { // ANCHOR: query_id var reqId = (long)3; + // query data + var query = "SELECT ts, current, location FROM power.meters limit 1"; try { - // query data - var query = "SELECT ts, current, location FROM power.meters limit 1"; // query with request id 3 - using (var rows = client.Query(query,reqId)) + using (var rows = client.Query(query, reqId)) { while (rows.Read()) { @@ -155,16 +162,18 @@ namespace Examples catch (TDengineError e) { // handle TDengine error - Console.WriteLine("Failed to execute sql with reqId: " + reqId + ", ErrCode: " + e.Code + ", ErrMessage: " + e.Error); + Console.WriteLine("Failed to execute sql with reqId: " + reqId + ", sql: " + query + ", ErrCode: " + + e.Code + ", ErrMessage: " + e.Error); throw; } catch (Exception e) { // handle other exceptions - Console.WriteLine("Failed to execute sql with reqId: " + reqId + ", ErrMessage: " + e.Message); + Console.WriteLine("Failed to execute sql with reqId: " + reqId + ", sql: " + query + ", ErrMessage: " + + e.Message); throw; } // ANCHOR_END: query_id } } -} +} \ No newline at end of file diff --git a/docs/examples/csharp/wssubscribe/Program.cs b/docs/examples/csharp/wssubscribe/Program.cs index 21abe10847..5f5afc575a 100644 --- a/docs/examples/csharp/wssubscribe/Program.cs +++ b/docs/examples/csharp/wssubscribe/Program.cs @@ -6,6 +6,11 @@ namespace TMQExample { internal class SubscribeDemo { + private static string _host = ""; + private static string _groupId = ""; + private static string _clientId = ""; + private static string _topic = ""; + public static void Main(string[] args) { try @@ -68,9 +73,9 @@ namespace TMQExample { // ANCHOR: create_consumer // consumer config - var host = "127.0.0.1"; - var groupId = "group1"; - var clientId = "client1"; + _host = "127.0.0.1"; + _groupId = "group1"; + _clientId = "client1"; var cfg = new Dictionary() { { "td.connect.type", "WebSocket" }, @@ -79,9 +84,9 @@ namespace TMQExample { "msg.with.table.name", "true" }, { "enable.auto.commit", "true" }, { "auto.commit.interval.ms", "1000" }, - { "group.id", groupId }, - { "client.id", clientId }, - { "td.connect.ip", host }, + { "group.id", _groupId }, + { "client.id", _clientId }, + { "td.connect.ip", _host }, { "td.connect.user", "root" }, { "td.connect.pass", "taosdata" }, }; @@ -90,20 +95,32 @@ namespace TMQExample { // create consumer consumer = new ConsumerBuilder>(cfg).Build(); - Console.WriteLine("Create consumer successfully, host: " + host + ", groupId: " + groupId + - ", clientId: " + clientId); + Console.WriteLine( + $"Create consumer successfully, " + + $"host: {_host}, " + + $"groupId: {_groupId}, " + + $"clientId: {_clientId}"); } catch (TDengineError e) { // handle TDengine error - Console.WriteLine("Failed to create websocket consumer, host: " + host + ", ErrCode: " + e.Code + - ", ErrMessage: " + e.Error); + Console.WriteLine( + $"Failed to create native consumer, " + + $"host: {_host}, " + + $"groupId: {_groupId}, " + + $"clientId: {_clientId}, " + + $"ErrCode: {e.Code}, " + + $"ErrMessage: {e.Error}"); throw; } catch (Exception e) { // handle other exceptions - Console.WriteLine("Failed to create websocket consumer, host: " + host + ", ErrMessage: " + e.Message); + Console.WriteLine($"Failed to create native consumer, " + + $"host: {_host}, " + + $"groupId: {_groupId}, " + + $"clientId: {_clientId}, " + + $"ErrMessage: {e.Message}"); throw; } @@ -114,10 +131,11 @@ namespace TMQExample static void Consume(IConsumer> consumer) { // ANCHOR: subscribe + _topic = "topic_meters"; try { // subscribe - consumer.Subscribe(new List() { "topic_meters" }); + consumer.Subscribe(new List() { _topic }); Console.WriteLine("Subscribe topics successfully"); for (int i = 0; i < 50; i++) { @@ -138,13 +156,23 @@ namespace TMQExample catch (TDengineError e) { // handle TDengine error - Console.WriteLine("Failed to poll data, ErrCode: " + e.Code + ", ErrMessage: " + e.Error); + Console.WriteLine( + $"Failed to poll data, " + + $"topic: {_topic}, " + + $"groupId: {_groupId}, " + + $"clientId: {_clientId}, " + + $"ErrCode: {e.Code}, " + + $"ErrMessage: {e.Error}"); throw; } catch (Exception e) { // handle other exceptions - Console.WriteLine("Failed to poll data, ErrMessage: " + e.Message); + Console.WriteLine($"Failed to poll data, " + + $"topic: {_topic}, " + + $"groupId: {_groupId}, " + + $"clientId: {_clientId}, " + + $"ErrMessage: {e.Message}"); throw; } // ANCHOR_END: subscribe @@ -168,13 +196,25 @@ namespace TMQExample catch (TDengineError e) { // handle TDengine error - Console.WriteLine("Failed to execute seek example, ErrCode: " + e.Code + ", ErrMessage: " + e.Error); + Console.WriteLine( + $"Failed to execute seek example, " + + $"topic: {_topic}, " + + $"groupId: {_groupId}, " + + $"clientId: {_clientId}, " + + $"offset: 0, " + + $"ErrCode: {e.Code}, " + + $"ErrMessage: {e.Error}"); throw; } catch (Exception e) { // handle other exceptions - Console.WriteLine("Failed to execute seek example, ErrMessage: " + e.Message); + Console.WriteLine($"Failed to execute seek example, " + + $"topic: {_topic}, " + + $"groupId: {_groupId}, " + + $"clientId: {_clientId}, " + + $"offset: 0, " + + $"ErrMessage: {e.Message}"); throw; } // ANCHOR_END: seek @@ -185,6 +225,7 @@ namespace TMQExample // ANCHOR: commit_offset for (int i = 0; i < 5; i++) { + TopicPartitionOffset topicPartitionOffset = null; try { // consume message with using block to ensure the result is disposed @@ -192,9 +233,10 @@ namespace TMQExample { if (cr == null) continue; // commit offset + topicPartitionOffset = cr.TopicPartitionOffset; consumer.Commit(new List { - cr.TopicPartitionOffset, + topicPartitionOffset, }); Console.WriteLine("Commit offset manually successfully."); } @@ -202,13 +244,26 @@ namespace TMQExample catch (TDengineError e) { // handle TDengine error - Console.WriteLine("Failed to execute commit example, ErrCode: " + e.Code + ", ErrMessage: " + e.Error); + Console.WriteLine( + $"Failed to execute commit example, " + + $"topic: {_topic}, " + + $"groupId: {_groupId}, " + + $"clientId: {_clientId}, " + + $"offset: {topicPartitionOffset}, " + + $"ErrCode: {e.Code}, " + + $"ErrMessage: {e.Error}"); throw; } catch (Exception e) { // handle other exceptions - Console.WriteLine("Failed to execute commit example, ErrMessage: " + e.Message); + Console.WriteLine( + $"Failed to execute commit example, " + + $"topic: {_topic}, " + + $"groupId: {_groupId}, " + + $"clientId: {_clientId}, " + + $"offset: {topicPartitionOffset}, " + + $"ErrMessage: {e.Message}"); throw; } } @@ -226,13 +281,24 @@ namespace TMQExample catch (TDengineError e) { // handle TDengine error - Console.WriteLine("Failed to unsubscribe consumer, ErrCode :" + e.Code + ", ErrMessage: " + e.Error); + Console.WriteLine( + $"Failed to unsubscribe consumer, " + + $"topic: {_topic}, " + + $"groupId: {_groupId}, " + + $"clientId: {_clientId}, " + + $"ErrCode: {e.Code}, " + + $"ErrMessage: {e.Error}"); throw; } catch (Exception e) { // handle other exceptions - Console.WriteLine("Failed to unsubscribe consumer, ErrMessage: " + e.Message); + Console.WriteLine( + $"Failed to execute commit example, " + + $"topic: {_topic}, " + + $"groupId: {_groupId}, " + + $"clientId: {_clientId}, " + + $"ErrMessage: {e.Message}"); throw; } finally @@ -244,4 +310,4 @@ namespace TMQExample // ANCHOR_END: close } } -} +} \ No newline at end of file diff --git a/docs/examples/go/queryreqid/main.go b/docs/examples/go/queryreqid/main.go index 39d1d6bd5e..0763feceff 100644 --- a/docs/examples/go/queryreqid/main.go +++ b/docs/examples/go/queryreqid/main.go @@ -1,58 +1,60 @@ -package main - -import ( - "context" - "database/sql" - "fmt" - "log" - "time" - - _ "github.com/taosdata/driver-go/v3/taosSql" -) - -func main() { - taosDSN := "root:taosdata@tcp(localhost:6030)/" - db, err := sql.Open("taosSql", taosDSN) - if err != nil { - log.Fatalln("Failed to connect to " + taosDSN + "; ErrMessage: " + err.Error()) - } - defer db.Close() - initEnv(db) - // ANCHOR: query_id - // use context to set request id - reqId := int64(3) - ctx := context.WithValue(context.Background(), "taos_req_id", reqId) - // execute query with context - rows, err := db.QueryContext(ctx, "SELECT ts, current, location FROM power.meters limit 1") - if err != nil { - log.Fatalf("Failed to execute sql with reqId: %d, url: %s; ErrMessage: %s\n", reqId, taosDSN, err.Error()) - } - for rows.Next() { - var ( - ts time.Time - current float32 - location string - ) - err = rows.Scan(&ts, ¤t, &location) - if err != nil { - log.Fatal("Scan error: ", err) - } - fmt.Printf("ts: %s, current: %f, location: %s\n", ts, current, location) - } - // ANCHOR_END: query_id -} - -func initEnv(conn *sql.DB) { - _, err := conn.Exec("CREATE DATABASE IF NOT EXISTS power") - if err != nil { - log.Fatal("Create database power error: ", err) - } - _, err = conn.Exec("CREATE STABLE IF NOT EXISTS power.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (groupId INT, location BINARY(24))") - if err != nil { - log.Fatal("Create stable meters error: ", err) - } - _, err = conn.Exec("INSERT INTO power.d1001 USING power.meters TAGS (2, 'California.SanFrancisco') VALUES (NOW , 10.2, 219, 0.32)") - if err != nil { - log.Fatal("Insert data to power.meters error: ", err) - } -} +package main + +import ( + "context" + "database/sql" + "fmt" + "log" + "time" + + _ "github.com/taosdata/driver-go/v3/taosSql" +) + +func main() { + taosDSN := "root:taosdata@tcp(localhost:6030)/" + db, err := sql.Open("taosSql", taosDSN) + if err != nil { + log.Fatalln("Failed to connect to " + taosDSN + ", ErrMessage: " + err.Error()) + } + defer db.Close() + initEnv(db) + // ANCHOR: query_id + // use context to set request id + reqId := int64(3) + ctx := context.WithValue(context.Background(), "taos_req_id", reqId) + // execute query with context + querySql := "SELECT ts, current, location FROM power.meters limit 1" + rows, err := db.QueryContext(ctx, querySql) + if err != nil { + log.Fatalf("Failed to execute sql with reqId: %d, url: %s, sql: %s, ErrMessage: %s\n", reqId, taosDSN, querySql, err.Error()) + } + for rows.Next() { + // Add your data processing logic here + var ( + ts time.Time + current float32 + location string + ) + err = rows.Scan(&ts, ¤t, &location) + if err != nil { + log.Fatalf("Failed to scan data, reqId: %d, url:%s, sql: %s, ErrMessage: %s\n", reqId, taosDSN, querySql, err) + } + fmt.Printf("ts: %s, current: %f, location: %s\n", ts, current, location) + } + // ANCHOR_END: query_id +} + +func initEnv(conn *sql.DB) { + _, err := conn.Exec("CREATE DATABASE IF NOT EXISTS power") + if err != nil { + log.Fatal("Create database power error: ", err) + } + _, err = conn.Exec("CREATE STABLE IF NOT EXISTS power.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (groupId INT, location BINARY(24))") + if err != nil { + log.Fatal("Create stable meters error: ", err) + } + _, err = conn.Exec("INSERT INTO power.d1001 USING power.meters TAGS (2, 'California.SanFrancisco') VALUES (NOW , 10.2, 219, 0.32)") + if err != nil { + log.Fatal("Insert data to power.meters error: ", err) + } +} diff --git a/docs/examples/go/sqlquery/main.go b/docs/examples/go/sqlquery/main.go index f0e0f1c97e..1bfb74ca87 100644 --- a/docs/examples/go/sqlquery/main.go +++ b/docs/examples/go/sqlquery/main.go @@ -1,86 +1,86 @@ -package main - -import ( - "database/sql" - "fmt" - "log" - "time" - - _ "github.com/taosdata/driver-go/v3/taosSql" -) - -func main() { - var taosDSN = "root:taosdata@tcp(localhost:6030)/" - db, err := sql.Open("taosSql", taosDSN) - if err != nil { - log.Fatalln("Failed to connect to " + taosDSN + ", ErrMessage: " + err.Error()) - } - defer db.Close() - // ANCHOR: create_db_and_table - // create database - res, err := db.Exec("CREATE DATABASE IF NOT EXISTS power") - if err != nil { - log.Fatalln("Failed to create database power, ErrMessage: " + err.Error()) - } - rowsAffected, err := res.RowsAffected() - if err != nil { - log.Fatalln("Failed to get create database rowsAffected, ErrMessage: " + err.Error()) - } - // you can check rowsAffected here - fmt.Println("Create database power successfully, rowsAffected: ", rowsAffected) - // create table - res, err = db.Exec("CREATE STABLE IF NOT EXISTS power.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (groupId INT, location BINARY(24))") - if err != nil { - log.Fatalln("Failed to create stable meters, ErrMessage: " + err.Error()) - } - rowsAffected, err = res.RowsAffected() - if err != nil { - log.Fatalln("Failed to get create stable rowsAffected, ErrMessage: " + err.Error()) - } - // you can check rowsAffected here - fmt.Println("Create stable power.meters successfully, rowsAffected:", rowsAffected) - // ANCHOR_END: create_db_and_table - // ANCHOR: insert_data - // insert data, please make sure the database and table are created before - insertQuery := "INSERT INTO " + - "power.d1001 USING power.meters TAGS(2,'California.SanFrancisco') " + - "VALUES " + - "(NOW + 1a, 10.30000, 219, 0.31000) " + - "(NOW + 2a, 12.60000, 218, 0.33000) " + - "(NOW + 3a, 12.30000, 221, 0.31000) " + - "power.d1002 USING power.meters TAGS(3, 'California.SanFrancisco') " + - "VALUES " + - "(NOW + 1a, 10.30000, 218, 0.25000) " - res, err = db.Exec(insertQuery) - if err != nil { - log.Fatal("Failed to insert data to power.meters, ErrMessage: " + err.Error()) - } - rowsAffected, err = res.RowsAffected() - if err != nil { - log.Fatal("Failed to get insert rowsAffected, ErrMessage: " + err.Error()) - } - // you can check affectedRows here - fmt.Printf("Successfully inserted %d rows to power.meters.\n", rowsAffected) - // ANCHOR_END: insert_data - // ANCHOR: select_data - // query data, make sure the database and table are created before - sql := "SELECT ts, current, location FROM power.meters limit 100" - rows, err := db.Query(sql) - if err != nil { - log.Fatal("Failed to query data from power.meters, ErrMessage: " + err.Error()) - } - for rows.Next() { - var ( - ts time.Time - current float32 - location string - ) - err = rows.Scan(&ts, ¤t, &location) - if err != nil { - log.Fatal("Failed to scan data, sql:" + sql + ", ErrMessage: " + err.Error()) - } - // you can check data here - fmt.Printf("ts: %s, current: %f, location: %s\n", ts, current, location) - } - // ANCHOR_END: select_data -} +package main + +import ( + "database/sql" + "fmt" + "log" + "time" + + _ "github.com/taosdata/driver-go/v3/taosSql" +) + +func main() { + var taosDSN = "root:taosdata@tcp(localhost:6030)/" + db, err := sql.Open("taosSql", taosDSN) + if err != nil { + log.Fatalln("Failed to connect to " + taosDSN + ", ErrMessage: " + err.Error()) + } + defer db.Close() + // ANCHOR: create_db_and_table + // create database + res, err := db.Exec("CREATE DATABASE IF NOT EXISTS power") + if err != nil { + log.Fatalln("Failed to create database power, ErrMessage: " + err.Error()) + } + rowsAffected, err := res.RowsAffected() + if err != nil { + log.Fatalln("Failed to get create database rowsAffected, ErrMessage: " + err.Error()) + } + // you can check rowsAffected here + fmt.Println("Create database power successfully, rowsAffected: ", rowsAffected) + // create table + res, err = db.Exec("CREATE STABLE IF NOT EXISTS power.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (groupId INT, location BINARY(24))") + if err != nil { + log.Fatalln("Failed to create stable meters, ErrMessage: " + err.Error()) + } + rowsAffected, err = res.RowsAffected() + if err != nil { + log.Fatalln("Failed to get create stable rowsAffected, ErrMessage: " + err.Error()) + } + // you can check rowsAffected here + fmt.Println("Create stable power.meters successfully, rowsAffected:", rowsAffected) + // ANCHOR_END: create_db_and_table + // ANCHOR: insert_data + // insert data, please make sure the database and table are created before + insertQuery := "INSERT INTO " + + "power.d1001 USING power.meters TAGS(2,'California.SanFrancisco') " + + "VALUES " + + "(NOW + 1a, 10.30000, 219, 0.31000) " + + "(NOW + 2a, 12.60000, 218, 0.33000) " + + "(NOW + 3a, 12.30000, 221, 0.31000) " + + "power.d1002 USING power.meters TAGS(3, 'California.SanFrancisco') " + + "VALUES " + + "(NOW + 1a, 10.30000, 218, 0.25000) " + res, err = db.Exec(insertQuery) + if err != nil { + log.Fatalf("Failed to insert data to power.meters, sql: %s, ErrMessage: %s\n", insertQuery, err.Error()) + } + rowsAffected, err = res.RowsAffected() + if err != nil { + log.Fatalf("Failed to get insert rowsAffected, sql: %s, ErrMessage: %s\n", insertQuery, err.Error()) + } + // you can check affectedRows here + fmt.Printf("Successfully inserted %d rows to power.meters.\n", rowsAffected) + // ANCHOR_END: insert_data + // ANCHOR: select_data + // query data, make sure the database and table are created before + sql := "SELECT ts, current, location FROM power.meters limit 100" + rows, err := db.Query(sql) + if err != nil { + log.Fatalf("Failed to query data from power.meters, sql: %s, ErrMessage: %s\n", sql, err.Error()) + } + for rows.Next() { + // Add your data processing logic here + var ( + ts time.Time + current float32 + location string + ) + err = rows.Scan(&ts, ¤t, &location) + if err != nil { + log.Fatalf("Failed to scan data, sql: %s, ErrMessage: %s\n", sql, err) + } + fmt.Printf("ts: %s, current: %f, location: %s\n", ts, current, location) + } + // ANCHOR_END: select_data +} diff --git a/docs/examples/go/tmq/native/main.go b/docs/examples/go/tmq/native/main.go index 638a07d235..1d4a22b880 100644 --- a/docs/examples/go/tmq/native/main.go +++ b/docs/examples/go/tmq/native/main.go @@ -1,136 +1,179 @@ -package main - -import ( - "database/sql" - "fmt" - "log" - "time" - - "github.com/taosdata/driver-go/v3/af/tmq" - tmqcommon "github.com/taosdata/driver-go/v3/common/tmq" - _ "github.com/taosdata/driver-go/v3/taosSql" -) - -var done = make(chan struct{}) - -func main() { - // init env - taosDSN := "root:taosdata@tcp(127.0.0.1:6030)/" - conn, err := sql.Open("taosSql", taosDSN) - if err != nil { - log.Fatalln("Failed to connect to " + taosDSN + "; ErrMessage: " + err.Error()) - } - defer func() { - conn.Close() - }() - initEnv(conn) - // ANCHOR: create_consumer - // create consumer - groupID := "group1" - clientID := "client1" - host := "127.0.0.1" - consumer, err := tmq.NewConsumer(&tmqcommon.ConfigMap{ - "td.connect.user": "root", - "td.connect.pass": "taosdata", - "auto.offset.reset": "latest", - "msg.with.table.name": "true", - "enable.auto.commit": "true", - "auto.commit.interval.ms": "1000", - "group.id": groupID, - "client.id": clientID, - }) - if err != nil { - log.Fatalln("Failed to create native consumer, host : " + host + "; ErrMessage: " + err.Error()) - } - log.Println("Create consumer successfully, host: " + host + ", groupId: " + groupID + ", clientId: " + clientID) - - // ANCHOR_END: create_consumer - // ANCHOR: subscribe - err = consumer.Subscribe("topic_meters", nil) - if err != nil { - log.Fatalln("Failed to subscribe topic_meters, ErrMessage: " + err.Error()) - } - log.Println("Subscribe topics successfully") - for i := 0; i < 50; i++ { - ev := consumer.Poll(100) - if ev != nil { - switch e := ev.(type) { - case *tmqcommon.DataMessage: - // process your data here - fmt.Printf("data:%v\n", e) - // ANCHOR: commit_offset - // commit offset - _, err = consumer.CommitOffsets([]tmqcommon.TopicPartition{e.TopicPartition}) - if err != nil { - log.Fatalln("Failed to commit offset, ErrMessage: " + err.Error()) - } - log.Println("Commit offset manually successfully.") - // ANCHOR_END: commit_offset - case tmqcommon.Error: - fmt.Printf("%% Error: %v: %v\n", e.Code(), e) - log.Fatalln("Failed to poll data, ErrMessage: " + err.Error()) - } - } - } - // ANCHOR_END: subscribe - // ANCHOR: seek - // get assignment - partitions, err := consumer.Assignment() - if err != nil { - log.Fatal("Failed to get assignment, ErrMessage: " + err.Error()) - } - fmt.Println("Now assignment:", partitions) - for i := 0; i < len(partitions); i++ { - // seek to the beginning - err = consumer.Seek(tmqcommon.TopicPartition{ - Topic: partitions[i].Topic, - Partition: partitions[i].Partition, - Offset: 0, - }, 0) - if err != nil { - log.Fatalln("Failed to execute seek example, ErrMessage: " + err.Error()) - } - } - fmt.Println("Assignment seek to beginning successfully") - // ANCHOR_END: seek - // ANCHOR: close - // unsubscribe - err = consumer.Unsubscribe() - if err != nil { - log.Fatal("Failed to unsubscribe consumer, ErrMessage: " + err.Error()) - } - fmt.Println("Consumer unsubscribed successfully.") - // close consumer - err = consumer.Close() - if err != nil { - log.Fatal("Failed to close consumer, ErrMessage: " + err.Error()) - } - fmt.Println("Consumer closed successfully.") - // ANCHOR_END: close - <-done -} - -func initEnv(conn *sql.DB) { - _, err := conn.Exec("CREATE DATABASE IF NOT EXISTS power") - if err != nil { - log.Fatal("Failed to create database, ErrMessage: " + err.Error()) - } - _, err = conn.Exec("CREATE STABLE IF NOT EXISTS power.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (groupId INT, location BINARY(24))") - if err != nil { - log.Fatal("Failed to create stable, ErrMessage: " + err.Error()) - } - _, err = conn.Exec("CREATE TOPIC IF NOT EXISTS topic_meters AS SELECT ts, current, voltage, phase, groupid, location FROM power.meters") - if err != nil { - log.Fatal("Failed to create topic, ErrMessage: " + err.Error()) - } - go func() { - for i := 0; i < 10; i++ { - time.Sleep(time.Second) - _, err = conn.Exec("INSERT INTO power.d1001 USING power.meters TAGS (2, 'California.SanFrancisco') VALUES (NOW , 10.2, 219, 0.32)") - if err != nil { - log.Fatal("Failed to insert data, ErrMessage: " + err.Error()) - } - } - done <- struct{}{} - }() -} +package main + +import ( + "database/sql" + "fmt" + "log" + "time" + + "github.com/taosdata/driver-go/v3/af/tmq" + tmqcommon "github.com/taosdata/driver-go/v3/common/tmq" + _ "github.com/taosdata/driver-go/v3/taosSql" +) + +var done = make(chan struct{}) +var groupID string +var clientID string +var host string +var topic string + +func main() { + // init env + taosDSN := "root:taosdata@tcp(127.0.0.1:6030)/" + conn, err := sql.Open("taosSql", taosDSN) + if err != nil { + log.Fatalln("Failed to connect to " + taosDSN + ", ErrMessage: " + err.Error()) + } + defer func() { + conn.Close() + }() + initEnv(conn) + // ANCHOR: create_consumer + // create consumer + groupID = "group1" + clientID = "client1" + host = "127.0.0.1" + consumer, err := tmq.NewConsumer(&tmqcommon.ConfigMap{ + "td.connect.user": "root", + "td.connect.pass": "taosdata", + "auto.offset.reset": "latest", + "msg.with.table.name": "true", + "enable.auto.commit": "true", + "auto.commit.interval.ms": "1000", + "group.id": groupID, + "client.id": clientID, + }) + if err != nil { + log.Fatalf( + "Failed to create native consumer, host: %s, groupId: %s, clientId: %s, ErrMessage: %s\n", + host, + groupID, + clientID, + err.Error(), + ) + } + log.Printf("Create consumer successfully, host: %s, groupId: %s, clientId: %s\n", host, groupID, clientID) + + // ANCHOR_END: create_consumer + // ANCHOR: subscribe + topic = "topic_meters" + err = consumer.Subscribe(topic, nil) + if err != nil { + log.Fatalf( + "Failed to subscribe topic_meters, topic: %s, groupId: %s, clientId: %s, ErrMessage: %s\n", + topic, + groupID, + clientID, + err.Error(), + ) + } + log.Println("Subscribe topics successfully") + for i := 0; i < 50; i++ { + ev := consumer.Poll(100) + if ev != nil { + switch e := ev.(type) { + case *tmqcommon.DataMessage: + // process your data here + fmt.Printf("data:%v\n", e) + // ANCHOR: commit_offset + // commit offset + _, err = consumer.CommitOffsets([]tmqcommon.TopicPartition{e.TopicPartition}) + if err != nil { + log.Fatalf( + "Failed to commit offset, topic: %s, groupId: %s, clientId: %s, offset %s, ErrMessage: %s\n", + topic, + groupID, + clientID, + e.TopicPartition, + err.Error(), + ) + } + log.Println("Commit offset manually successfully.") + // ANCHOR_END: commit_offset + case tmqcommon.Error: + log.Fatalf("Failed to poll data, topic: %s, groupId: %s, clientId: %s, ErrMessage: %s\n", topic, groupID, clientID, e.Error()) + } + } + } + // ANCHOR_END: subscribe + // ANCHOR: seek + // get assignment + partitions, err := consumer.Assignment() + if err != nil { + log.Fatalf("Failed to get assignment, topic: %s, groupId: %s, clientId: %s, ErrMessage: %s\n", topic, groupID, clientID, err.Error()) + } + fmt.Println("Now assignment:", partitions) + for i := 0; i < len(partitions); i++ { + // seek to the beginning + err = consumer.Seek(tmqcommon.TopicPartition{ + Topic: partitions[i].Topic, + Partition: partitions[i].Partition, + Offset: 0, + }, 0) + if err != nil { + log.Fatalf( + "Failed to execute seek example, topic: %s, groupId: %s, clientId: %s, partition: %d, offset: %d, ErrMessage: %s\n", + topic, + groupID, + clientID, + partitions[i].Partition, + 0, + err.Error(), + ) + } + } + fmt.Println("Assignment seek to beginning successfully") + // ANCHOR_END: seek + // ANCHOR: close + // unsubscribe + err = consumer.Unsubscribe() + if err != nil { + log.Fatalf( + "Failed to unsubscribe consumer, topic: %s, groupId: %s, clientId: %s, ErrMessage: %s\n", + topic, + groupID, + clientID, + err.Error(), + ) + } + fmt.Println("Consumer unsubscribed successfully.") + // close consumer + err = consumer.Close() + if err != nil { + log.Fatalf( + "Failed to close consumer, topic: %s, groupId: %s, clientId: %s, ErrMessage: %s\n", + topic, + groupID, + clientID, + err.Error(), + ) + } + fmt.Println("Consumer closed successfully.") + // ANCHOR_END: close + <-done +} + +func initEnv(conn *sql.DB) { + _, err := conn.Exec("CREATE DATABASE IF NOT EXISTS power") + if err != nil { + log.Fatal("Failed to create database, ErrMessage: " + err.Error()) + } + _, err = conn.Exec("CREATE STABLE IF NOT EXISTS power.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (groupId INT, location BINARY(24))") + if err != nil { + log.Fatal("Failed to create stable, ErrMessage: " + err.Error()) + } + _, err = conn.Exec("CREATE TOPIC IF NOT EXISTS topic_meters AS SELECT ts, current, voltage, phase, groupid, location FROM power.meters") + if err != nil { + log.Fatal("Failed to create topic, ErrMessage: " + err.Error()) + } + go func() { + for i := 0; i < 10; i++ { + time.Sleep(time.Second) + _, err = conn.Exec("INSERT INTO power.d1001 USING power.meters TAGS (2, 'California.SanFrancisco') VALUES (NOW , 10.2, 219, 0.32)") + if err != nil { + log.Fatal("Failed to insert data, ErrMessage: " + err.Error()) + } + } + done <- struct{}{} + }() +} diff --git a/docs/examples/go/tmq/ws/main.go b/docs/examples/go/tmq/ws/main.go index 70ea3af0b3..aaed8395e0 100644 --- a/docs/examples/go/tmq/ws/main.go +++ b/docs/examples/go/tmq/ws/main.go @@ -1,141 +1,197 @@ -package main - -import ( - "database/sql" - "fmt" - "log" - "time" - - "github.com/taosdata/driver-go/v3/common" - tmqcommon "github.com/taosdata/driver-go/v3/common/tmq" - _ "github.com/taosdata/driver-go/v3/taosWS" - "github.com/taosdata/driver-go/v3/ws/tmq" -) - -var done = make(chan struct{}) - -func main() { - // init env - taosDSN := "root:taosdata@ws(127.0.0.1:6041)/" - conn, err := sql.Open("taosWS", taosDSN) - if err != nil { - log.Fatalln("Failed to connect to " + taosDSN + "; ErrMessage: " + err.Error()) - } - defer func() { - conn.Close() - }() - initEnv(conn) - // ANCHOR: create_consumer - // create consumer - wsUrl := "ws://127.0.0.1:6041" - groupID := "group1" - clientID := "client1" - consumer, err := tmq.NewConsumer(&tmqcommon.ConfigMap{ - "ws.url": wsUrl, - "ws.message.channelLen": uint(0), - "ws.message.timeout": common.DefaultMessageTimeout, - "ws.message.writeWait": common.DefaultWriteWait, - "td.connect.user": "root", - "td.connect.pass": "taosdata", - "auto.offset.reset": "latest", - "msg.with.table.name": "true", - "enable.auto.commit": "true", - "auto.commit.interval.ms": "1000", - "group.id": groupID, - "client.id": clientID, - }) - if err != nil { - log.Fatalln("Failed to create websocket consumer, host : " + wsUrl + "; ErrMessage: " + err.Error()) - } - log.Println("Create consumer successfully, host: " + wsUrl + ", groupId: " + groupID + ", clientId: " + clientID) - - // ANCHOR_END: create_consumer - // ANCHOR: subscribe - err = consumer.Subscribe("topic_meters", nil) - if err != nil { - log.Fatalln("Failed to subscribe topic_meters, ErrMessage: " + err.Error()) - } - log.Println("Subscribe topics successfully") - for i := 0; i < 50; i++ { - ev := consumer.Poll(100) - if ev != nil { - switch e := ev.(type) { - case *tmqcommon.DataMessage: - // process your data here - fmt.Printf("data:%v\n", e) - // ANCHOR: commit_offset - // commit offset - _, err = consumer.CommitOffsets([]tmqcommon.TopicPartition{e.TopicPartition}) - if err != nil { - log.Fatalln("Failed to commit offset, ErrMessage: " + err.Error()) - } - log.Println("Commit offset manually successfully.") - // ANCHOR_END: commit_offset - case tmqcommon.Error: - fmt.Printf("%% Error: %v: %v\n", e.Code(), e) - log.Fatalln("Failed to poll data, ErrMessage: " + err.Error()) - } - } - } - // ANCHOR_END: subscribe - // ANCHOR: seek - // get assignment - partitions, err := consumer.Assignment() - if err != nil { - log.Fatal("Failed to get assignment, ErrMessage: " + err.Error()) - } - fmt.Println("Now assignment:", partitions) - for i := 0; i < len(partitions); i++ { - // seek to the beginning - err = consumer.Seek(tmqcommon.TopicPartition{ - Topic: partitions[i].Topic, - Partition: partitions[i].Partition, - Offset: 0, - }, 0) - if err != nil { - log.Fatalln("Failed to execute seek example, ErrMessage: " + err.Error()) - } - } - fmt.Println("Assignment seek to beginning successfully") - // ANCHOR_END: seek - // ANCHOR: close - // unsubscribe - err = consumer.Unsubscribe() - if err != nil { - log.Fatal("Failed to unsubscribe consumer, ErrMessage: " + err.Error()) - } - fmt.Println("Consumer unsubscribed successfully.") - // close consumer - err = consumer.Close() - if err != nil { - log.Fatal("Failed to close consumer, ErrMessage: " + err.Error()) - } - fmt.Println("Consumer closed successfully.") - // ANCHOR_END: close - <-done -} - -func initEnv(conn *sql.DB) { - _, err := conn.Exec("CREATE DATABASE IF NOT EXISTS power") - if err != nil { - log.Fatal("Failed to create database, ErrMessage: " + err.Error()) - } - _, err = conn.Exec("CREATE STABLE IF NOT EXISTS power.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (groupId INT, location BINARY(24))") - if err != nil { - log.Fatal("Failed to create stable, ErrMessage: " + err.Error()) - } - _, err = conn.Exec("CREATE TOPIC IF NOT EXISTS topic_meters AS SELECT ts, current, voltage, phase, groupid, location FROM power.meters") - if err != nil { - log.Fatal("Failed to create topic, ErrMessage: " + err.Error()) - } - go func() { - for i := 0; i < 10; i++ { - time.Sleep(time.Second) - _, err = conn.Exec("INSERT INTO power.d1001 USING power.meters TAGS (2, 'California.SanFrancisco') VALUES (NOW , 10.2, 219, 0.32)") - if err != nil { - log.Fatal("Failed to insert data, ErrMessage: " + err.Error()) - } - } - done <- struct{}{} - }() -} +package main + +import ( + "database/sql" + "fmt" + "log" + "time" + + "github.com/taosdata/driver-go/v3/common" + tmqcommon "github.com/taosdata/driver-go/v3/common/tmq" + _ "github.com/taosdata/driver-go/v3/taosWS" + "github.com/taosdata/driver-go/v3/ws/tmq" +) + +var done = make(chan struct{}) +var groupID string +var clientID string +var host string +var topic string + +func main() { + // init env + taosDSN := "root:taosdata@ws(127.0.0.1:6041)/" + conn, err := sql.Open("taosWS", taosDSN) + if err != nil { + log.Fatalln("Failed to connect to " + taosDSN + ", ErrMessage: " + err.Error()) + } + defer func() { + conn.Close() + }() + initEnv(conn) + // ANCHOR: create_consumer + // create consumer + wsUrl := "ws://127.0.0.1:6041" + groupID = "group1" + clientID = "client1" + host = "127.0.0.1" + consumer, err := tmq.NewConsumer(&tmqcommon.ConfigMap{ + "ws.url": wsUrl, + "ws.message.channelLen": uint(0), + "ws.message.timeout": common.DefaultMessageTimeout, + "ws.message.writeWait": common.DefaultWriteWait, + "td.connect.user": "root", + "td.connect.pass": "taosdata", + "auto.offset.reset": "latest", + "msg.with.table.name": "true", + "enable.auto.commit": "true", + "auto.commit.interval.ms": "1000", + "group.id": groupID, + "client.id": clientID, + }) + if err != nil { + log.Fatalf( + "Failed to create websocket consumer, host: %s, groupId: %s, clientId: %s, ErrMessage: %s\n", + host, + groupID, + clientID, + err.Error(), + ) + } + log.Printf("Create consumer successfully, host: %s, groupId: %s, clientId: %s\n", host, groupID, clientID) + + // ANCHOR_END: create_consumer + // ANCHOR: subscribe + topic = "topic_meters" + err = consumer.Subscribe(topic, nil) + if err != nil { + log.Fatalf( + "Failed to subscribe topic_meters, topic: %s, groupId: %s, clientId: %s, ErrMessage: %s\n", + topic, + groupID, + clientID, + err.Error(), + ) + } + log.Println("Subscribe topics successfully") + for i := 0; i < 50; i++ { + ev := consumer.Poll(100) + if ev != nil { + switch e := ev.(type) { + case *tmqcommon.DataMessage: + // process your data here + fmt.Printf("data:%v\n", e) + // ANCHOR: commit_offset + // commit offset + _, err = consumer.CommitOffsets([]tmqcommon.TopicPartition{e.TopicPartition}) + if err != nil { + log.Fatalf( + "Failed to commit offset, topic: %s, groupId: %s, clientId: %s, offset %s, ErrMessage: %s\n", + topic, + groupID, + clientID, + e.TopicPartition, + err.Error(), + ) + } + log.Println("Commit offset manually successfully.") + // ANCHOR_END: commit_offset + case tmqcommon.Error: + log.Fatalf( + "Failed to poll data, topic: %s, groupId: %s, clientId: %s, ErrMessage: %s\n", + topic, + groupID, + clientID, + e.Error(), + ) + } + } + } + // ANCHOR_END: subscribe + // ANCHOR: seek + // get assignment + partitions, err := consumer.Assignment() + if err != nil { + log.Fatalf( + "Failed to get assignment, topic: %s, groupId: %s, clientId: %s, ErrMessage: %s\n", + topic, + groupID, + clientID, + err.Error(), + ) + } + fmt.Println("Now assignment:", partitions) + for i := 0; i < len(partitions); i++ { + // seek to the beginning + err = consumer.Seek(tmqcommon.TopicPartition{ + Topic: partitions[i].Topic, + Partition: partitions[i].Partition, + Offset: 0, + }, 0) + if err != nil { + log.Fatalf( + "Failed to execute seek example, topic: %s, groupId: %s, clientId: %s, partition: %d, offset: %d, ErrMessage: %s\n", + topic, + groupID, + clientID, + partitions[i].Partition, + 0, + err.Error(), + ) + } + } + fmt.Println("Assignment seek to beginning successfully") + // ANCHOR_END: seek + // ANCHOR: close + // unsubscribe + err = consumer.Unsubscribe() + if err != nil { + log.Fatalf( + "Failed to unsubscribe consumer, topic: %s, groupId: %s, clientId: %s, ErrMessage: %s\n", + topic, + groupID, + clientID, + err.Error(), + ) + } + fmt.Println("Consumer unsubscribed successfully.") + // close consumer + err = consumer.Close() + if err != nil { + log.Fatalf( + "Failed to close consumer, topic: %s, groupId: %s, clientId: %s, ErrMessage: %s\n", + topic, + groupID, + clientID, + err.Error(), + ) + } + fmt.Println("Consumer closed successfully.") + // ANCHOR_END: close + <-done +} + +func initEnv(conn *sql.DB) { + _, err := conn.Exec("CREATE DATABASE IF NOT EXISTS power") + if err != nil { + log.Fatal("Failed to create database, ErrMessage: " + err.Error()) + } + _, err = conn.Exec("CREATE STABLE IF NOT EXISTS power.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (groupId INT, location BINARY(24))") + if err != nil { + log.Fatal("Failed to create stable, ErrMessage: " + err.Error()) + } + _, err = conn.Exec("CREATE TOPIC IF NOT EXISTS topic_meters AS SELECT ts, current, voltage, phase, groupid, location FROM power.meters") + if err != nil { + log.Fatal("Failed to create topic, ErrMessage: " + err.Error()) + } + go func() { + for i := 0; i < 10; i++ { + time.Sleep(time.Second) + _, err = conn.Exec("INSERT INTO power.d1001 USING power.meters TAGS (2, 'California.SanFrancisco') VALUES (NOW , 10.2, 219, 0.32)") + if err != nil { + log.Fatal("Failed to insert data, ErrMessage: " + err.Error()) + } + } + done <- struct{}{} + }() +} From 1590a51e5789b3059e412599375994b940c5c4c4 Mon Sep 17 00:00:00 2001 From: sheyanjie-qq <249478495@qq.com> Date: Fri, 16 Aug 2024 15:18:38 +0800 Subject: [PATCH 095/181] mod rust sample code --- .../com/taos/example/ConsumerLoopFull.java | 2 +- .../rust/nativeexample/examples/insert.rs | 22 ++++----- .../rust/nativeexample/examples/tmq.rs | 46 +++++++++++++++---- .../rust/restexample/examples/insert.rs | 22 ++++----- .../examples/rust/restexample/examples/tmq.rs | 46 +++++++++++++++---- docs/zh/08-develop/05-stmt.md | 4 +- docs/zh/14-reference/05-connector/14-java.mdx | 2 +- 7 files changed, 98 insertions(+), 46 deletions(-) diff --git a/docs/examples/java/src/main/java/com/taos/example/ConsumerLoopFull.java b/docs/examples/java/src/main/java/com/taos/example/ConsumerLoopFull.java index ec9faf383e..a399f3aa6a 100644 --- a/docs/examples/java/src/main/java/com/taos/example/ConsumerLoopFull.java +++ b/docs/examples/java/src/main/java/com/taos/example/ConsumerLoopFull.java @@ -47,7 +47,7 @@ public class ConsumerLoopFull { return consumer; } catch (Exception ex) { // please refer to the JDBC specifications for detailed exceptions info - System.out.printf("Failed to create websocket consumer, host: %s, groupId: %s, clientId: %s, %sErrMessage: %s%n", + System.out.printf("Failed to create native consumer, host: %s, groupId: %s, clientId: %s, %sErrMessage: %s%n", config.getProperty("bootstrap.servers"), config.getProperty("group.id"), config.getProperty("client.id"), diff --git a/docs/examples/rust/nativeexample/examples/insert.rs b/docs/examples/rust/nativeexample/examples/insert.rs index e78381fc61..585cb69c52 100644 --- a/docs/examples/rust/nativeexample/examples/insert.rs +++ b/docs/examples/rust/nativeexample/examples/insert.rs @@ -9,22 +9,22 @@ async fn main() -> anyhow::Result<()> { // ANCHOR: insert_data - match taos.exec(r#"INSERT INTO - power.d1001 USING power.meters TAGS(2,'California.SanFrancisco') - VALUES - (NOW + 1a, 10.30000, 219, 0.31000) - (NOW + 2a, 12.60000, 218, 0.33000) - (NOW + 3a, 12.30000, 221, 0.31000) - power.d1002 USING power.meters TAGS(3, 'California.SanFrancisco') - VALUES - (NOW + 1a, 10.30000, 218, 0.25000) "#).await{ + let insert_sql = r#"INSERT INTO + power.d1001 USING power.meters TAGS(2,'California.SanFrancisco') + VALUES + (NOW + 1a, 10.30000, 219, 0.31000) + (NOW + 2a, 12.60000, 218, 0.33000) + (NOW + 3a, 12.30000, 221, 0.31000) + power.d1002 USING power.meters TAGS(3, 'California.SanFrancisco') + VALUES + (NOW + 1a, 10.30000, 218, 0.25000) "#; + match taos.exec(insert_sql).await{ Ok(affected_rows) => println!("Successfully inserted {} rows to power.meters.", affected_rows), Err(err) => { - eprintln!("Failed to insert data to power.meters, ErrMessage: {}", err); + eprintln!("Failed to insert data to power.meters, sql: {}, ErrMessage: {}", insert_sql, err); return Err(err.into()); } } - // ANCHOR_END: insert_data Ok(()) diff --git a/docs/examples/rust/nativeexample/examples/tmq.rs b/docs/examples/rust/nativeexample/examples/tmq.rs index f312bee0f2..2f8a9e5995 100644 --- a/docs/examples/rust/nativeexample/examples/tmq.rs +++ b/docs/examples/rust/nativeexample/examples/tmq.rs @@ -3,6 +3,8 @@ use std::str::FromStr; use chrono::Local; use chrono::DateTime; use taos::*; +use std::thread; +use tokio::runtime::Runtime; #[tokio::main] async fn main() -> anyhow::Result<()> { @@ -53,17 +55,38 @@ async fn main() -> anyhow::Result<()> { consumer } Err(err) => { - eprintln!("Failed to create websocket consumer, dsn: {}, ErrMessage: {}", dsn, err); + eprintln!("Failed to create native consumer, dsn: {}, groupId: {}, clientId: {}, ErrMessage: {:?}", dsn, group_id, client_id, err); return Err(err.into()); } }; // ANCHOR_END: create_consumer_ac + thread::spawn(move || { + let rt = Runtime::new().unwrap(); + + rt.block_on(async { + let taos_insert = TaosBuilder::from_dsn(&dsn).unwrap().build().await.unwrap(); + for i in 0..50 { + let insert_sql = format!(r#"INSERT INTO + power.d1001 USING power.meters TAGS(2,'California.SanFrancisco') + VALUES + (NOW, 10.30000, {}, 0.31000)"#, i); + if let Err(e) = taos_insert.exec(insert_sql).await { + eprintln!("Failed to execute insert: {:?}", e); + } + tokio::time::sleep(Duration::from_millis(10)).await; + } + }); + + }).join().unwrap(); + + // ANCHOR: consume - match consumer.subscribe(["topic_meters"]).await{ + let topic = "topic_meters"; + match consumer.subscribe([topic]).await{ Ok(_) => println!("Subscribe topics successfully."), Err(err) => { - eprintln!("Failed to subscribe topic_meters, ErrMessage: {}", err); + eprintln!("Failed to subscribe topic_meters, ErrMessage: {:?}", err); return Err(err.into()); } } @@ -94,13 +117,14 @@ async fn main() -> anyhow::Result<()> { if let Some(data) = message.into_data() { while let Some(block) = data.fetch_raw_block().await? { let records: Vec = block.deserialize().try_collect()?; + // Add your data processing logic here println!("** read {} records: {:#?}\n", records.len(), records); } } Ok(()) }) .await.map_err(|e| { - eprintln!("Failed to poll data; ErrMessage: {:?}", e); + eprintln!("Failed to poll data, topic: {}, groupId: {}, clientId: {}, ErrMessage: {:?}", topic, group_id, client_id, e); e })?; @@ -110,14 +134,14 @@ async fn main() -> anyhow::Result<()> { consumer .stream() .try_for_each(|(offset, message)| async { - let topic = offset.topic(); // the vgroup id, like partition id in kafka. let vgroup_id = offset.vgroup_id(); - println!("* in vgroup id {vgroup_id} of topic {topic}\n"); + println!("* in vgroup id {} of topic {}\n", vgroup_id, topic); if let Some(data) = message.into_data() { while let Some(block) = data.fetch_raw_block().await? { let records: Vec = block.deserialize().try_collect()?; + // Add your data processing logic here println!("** read {} records: {:#?}\n", records.len(), records); } } @@ -125,14 +149,15 @@ async fn main() -> anyhow::Result<()> { match consumer.commit(offset).await{ Ok(_) => println!("Commit offset manually successfully."), Err(err) => { - eprintln!("Failed to commit offset manually, ErrMessage: {}", err); + eprintln!("Failed to commit offset manually, topic: {}, groupId: {}, clientId: {}, vGroupId: {}, ErrMessage: {:?}", + topic, group_id, client_id, vgroup_id, err); return Err(err.into()); } } Ok(()) }) .await.map_err(|e| { - eprintln!("Failed to poll data, ErrMessage: {:?}", e); + eprintln!("Failed to poll data, topic: {}, groupId: {}, clientId: {}, ErrMessage: {:?}", topic, group_id, client_id, e); e })?; // ANCHOR_END: consumer_commit_manually @@ -152,7 +177,7 @@ async fn main() -> anyhow::Result<()> { let begin = assignment.begin(); let end = assignment.end(); println!( - "topic: {}, vgroup_id: {}, current offset: {} begin {}, end: {}", + "topic: {}, vgroup_id: {}, current offset: {}, begin {}, end: {}", topic, vgroup_id, current, @@ -163,7 +188,8 @@ async fn main() -> anyhow::Result<()> { match consumer.offset_seek(topic, vgroup_id, begin).await{ Ok(_) => (), Err(err) => { - eprintln!("Failed to seek offset, ErrMessage: {}", err); + eprintln!("Failed to seek offset, topic: {}, groupId: {}, clientId: {}, vGroupId: {}, begin: {}, ErrMessage: {:?}", + topic, group_id, client_id, vgroup_id, begin, err); return Err(err.into()); } } diff --git a/docs/examples/rust/restexample/examples/insert.rs b/docs/examples/rust/restexample/examples/insert.rs index 537e531501..be85c1f82c 100644 --- a/docs/examples/rust/restexample/examples/insert.rs +++ b/docs/examples/rust/restexample/examples/insert.rs @@ -9,22 +9,22 @@ async fn main() -> anyhow::Result<()> { // ANCHOR: insert_data - match taos.exec(r#"INSERT INTO - power.d1001 USING power.meters TAGS(2,'California.SanFrancisco') - VALUES - (NOW + 1a, 10.30000, 219, 0.31000) - (NOW + 2a, 12.60000, 218, 0.33000) - (NOW + 3a, 12.30000, 221, 0.31000) - power.d1002 USING power.meters TAGS(3, 'California.SanFrancisco') - VALUES - (NOW + 1a, 10.30000, 218, 0.25000) "#).await{ + let insert_sql = r#"INSERT INTO + power.d1001 USING power.meters TAGS(2,'California.SanFrancisco') + VALUES + (NOW + 1a, 10.30000, 219, 0.31000) + (NOW + 2a, 12.60000, 218, 0.33000) + (NOW + 3a, 12.30000, 221, 0.31000) + power.d1002 USING power.meters TAGS(3, 'California.SanFrancisco') + VALUES + (NOW + 1a, 10.30000, 218, 0.25000) "#; + match taos.exec(insert_sql).await{ Ok(affected_rows) => println!("Successfully inserted {} rows to power.meters.", affected_rows), Err(err) => { - eprintln!("Failed to insert data to power.meters, ErrMessage: {}", dsn, err); + eprintln!("Failed to insert data to power.meters, sql: {}, ErrMessage: {}", insert_sql, err); return Err(err.into()); } } - // ANCHOR_END: insert_data Ok(()) diff --git a/docs/examples/rust/restexample/examples/tmq.rs b/docs/examples/rust/restexample/examples/tmq.rs index 0a0214d258..670ae0ca3c 100644 --- a/docs/examples/rust/restexample/examples/tmq.rs +++ b/docs/examples/rust/restexample/examples/tmq.rs @@ -3,6 +3,8 @@ use std::str::FromStr; use chrono::Local; use chrono::DateTime; use taos::*; +use std::thread; +use tokio::runtime::Runtime; #[tokio::main] async fn main() -> anyhow::Result<()> { @@ -53,17 +55,38 @@ async fn main() -> anyhow::Result<()> { consumer } Err(err) => { - eprintln!("Failed to create websocket consumer, dsn: {}, ErrMessage: {}", dsn, err); + eprintln!("Failed to create websocket consumer, dsn: {}, groupId: {}, clientId: {}, ErrMessage: {:?}", dsn, group_id, client_id, err); return Err(err.into()); } }; // ANCHOR_END: create_consumer_ac + thread::spawn(move || { + let rt = Runtime::new().unwrap(); + + rt.block_on(async { + let taos_insert = TaosBuilder::from_dsn(&dsn).unwrap().build().await.unwrap(); + for i in 0..50 { + let insert_sql = format!(r#"INSERT INTO + power.d1001 USING power.meters TAGS(2,'California.SanFrancisco') + VALUES + (NOW, 10.30000, {}, 0.31000)"#, i); + if let Err(e) = taos_insert.exec(insert_sql).await { + eprintln!("Failed to execute insert: {:?}", e); + } + tokio::time::sleep(Duration::from_millis(10)).await; + } + }); + + }).join().unwrap(); + + // ANCHOR: consume - match consumer.subscribe(["topic_meters"]).await{ + let topic = "topic_meters"; + match consumer.subscribe([topic]).await{ Ok(_) => println!("Subscribe topics successfully."), Err(err) => { - eprintln!("Failed to subscribe topic_meters, ErrMessage: {}", err); + eprintln!("Failed to subscribe topic_meters, ErrMessage: {:?}", err); return Err(err.into()); } } @@ -94,13 +117,14 @@ async fn main() -> anyhow::Result<()> { if let Some(data) = message.into_data() { while let Some(block) = data.fetch_raw_block().await? { let records: Vec = block.deserialize().try_collect()?; + // Add your data processing logic here println!("** read {} records: {:#?}\n", records.len(), records); } } Ok(()) }) .await.map_err(|e| { - eprintln!("Failed to poll data; ErrMessage: {:?}", e); + eprintln!("Failed to poll data, topic: {}, groupId: {}, clientId: {}, ErrMessage: {:?}", topic, group_id, client_id, e); e })?; @@ -110,14 +134,14 @@ async fn main() -> anyhow::Result<()> { consumer .stream() .try_for_each(|(offset, message)| async { - let topic = offset.topic(); // the vgroup id, like partition id in kafka. let vgroup_id = offset.vgroup_id(); - println!("* in vgroup id {vgroup_id} of topic {topic}\n"); + println!("* in vgroup id {} of topic {}\n", vgroup_id, topic); if let Some(data) = message.into_data() { while let Some(block) = data.fetch_raw_block().await? { let records: Vec = block.deserialize().try_collect()?; + // Add your data processing logic here println!("** read {} records: {:#?}\n", records.len(), records); } } @@ -125,14 +149,15 @@ async fn main() -> anyhow::Result<()> { match consumer.commit(offset).await{ Ok(_) => println!("Commit offset manually successfully."), Err(err) => { - eprintln!("Failed to commit offset manually, ErrMessage: {}", err); + eprintln!("Failed to commit offset manually, topic: {}, groupId: {}, clientId: {}, vGroupId: {}, ErrMessage: {:?}", + topic, group_id, client_id, vgroup_id, err); return Err(err.into()); } } Ok(()) }) .await.map_err(|e| { - eprintln!("Failed to poll data, ErrMessage: {:?}", e); + eprintln!("Failed to poll data, topic: {}, groupId: {}, clientId: {}, ErrMessage: {:?}", topic, group_id, client_id, e); e })?; // ANCHOR_END: consumer_commit_manually @@ -152,7 +177,7 @@ async fn main() -> anyhow::Result<()> { let begin = assignment.begin(); let end = assignment.end(); println!( - "topic: {}, vgroup_id: {}, current offset: {} begin {}, end: {}", + "topic: {}, vgroup_id: {}, current offset: {}, begin {}, end: {}", topic, vgroup_id, current, @@ -163,7 +188,8 @@ async fn main() -> anyhow::Result<()> { match consumer.offset_seek(topic, vgroup_id, begin).await{ Ok(_) => (), Err(err) => { - eprintln!("Failed to seek offset, ErrMessage: {}", err); + eprintln!("Failed to seek offset, topic: {}, groupId: {}, clientId: {}, vGroupId: {}, begin: {}, ErrMessage: {:?}", + topic, group_id, client_id, vgroup_id, begin, err); return Err(err.into()); } } diff --git a/docs/zh/08-develop/05-stmt.md b/docs/zh/08-develop/05-stmt.md index 9dfd9f56e3..e659177c94 100644 --- a/docs/zh/08-develop/05-stmt.md +++ b/docs/zh/08-develop/05-stmt.md @@ -31,7 +31,7 @@ import TabItem from "@theme/TabItem"; ``` -这是一个[更详细的参数绑定示例](https://github.com/taosdata/TDengine/blob/main/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/WSParameterBindingFullDemo.java) +这是一个[更详细的参数绑定示例](https://github.com/taosdata/TDengine/blob/main/docs/examples/java/src/main/java/com/taos/example/WSParameterBindingFullDemo.java) @@ -79,7 +79,7 @@ import TabItem from "@theme/TabItem"; {{#include docs/examples/java/src/main/java/com/taos/example/ParameterBindingBasicDemo.java:para_bind}} ``` -这是一个[更详细的参数绑定示例](https://github.com/taosdata/TDengine/blob/main/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/ParameterBindingFullDemo.java) +这是一个[更详细的参数绑定示例](https://github.com/taosdata/TDengine/blob/main/docs/examples/java/src/main/java/com/taos/example/ParameterBindingFullDemo.java) diff --git a/docs/zh/14-reference/05-connector/14-java.mdx b/docs/zh/14-reference/05-connector/14-java.mdx index f355aea621..a752867b3f 100644 --- a/docs/zh/14-reference/05-connector/14-java.mdx +++ b/docs/zh/14-reference/05-connector/14-java.mdx @@ -141,7 +141,7 @@ TDengine 目前支持时间戳、数字、字符、布尔类型,与 Java 对 由于历史原因,TDengine中的BINARY底层不是真正的二进制数据,已不建议使用。请用VARBINARY类型代替。 GEOMETRY类型是little endian字节序的二进制数据,符合WKB规范。详细信息请参考 [数据类型](../../taos-sql/data-type/#数据类型) WKB规范请参考[Well-Known Binary (WKB)](https://libgeos.org/specifications/wkb/) -对于java连接器,可以使用jts库来方便的创建GEOMETRY类型对象,序列化后写入TDengine,这里有一个样例[Geometry示例](https://github.com/taosdata/TDengine/blob/3.0/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/GeometryDemo.java) +对于java连接器,可以使用jts库来方便的创建GEOMETRY类型对象,序列化后写入TDengine,这里有一个样例[Geometry示例](https://github.com/taosdata/TDengine/blob/main/docs/examples/java/src/main/java/com/taos/example/GeometryDemo.java) ## 示例程序汇总 From 8c368a6fa665a1dafb18532dae364a49622d3bf4 Mon Sep 17 00:00:00 2001 From: t_max <1172915550@qq.com> Date: Fri, 16 Aug 2024 15:22:18 +0800 Subject: [PATCH 096/181] docs: update the output of go and c# examples --- docs/examples/csharp/subscribe/Program.cs | 42 +++++++++++---------- docs/examples/csharp/wssubscribe/Program.cs | 31 ++++++++------- docs/examples/go/tmq/native/main.go | 2 +- docs/examples/go/tmq/ws/main.go | 2 +- 4 files changed, 42 insertions(+), 35 deletions(-) diff --git a/docs/examples/csharp/subscribe/Program.cs b/docs/examples/csharp/subscribe/Program.cs index 2ec73aae48..50988d0c5d 100644 --- a/docs/examples/csharp/subscribe/Program.cs +++ b/docs/examples/csharp/subscribe/Program.cs @@ -111,11 +111,12 @@ namespace TMQExample catch (Exception e) { // handle other exceptions - Console.WriteLine($"Failed to create native consumer, " + - $"host: {_host}, " + - $"groupId: {_groupId}, " + - $"clientId: {_clientId}, " + - $"ErrMessage: {e.Message}"); + Console.WriteLine( + $"Failed to create native consumer, " + + $"host: {_host}, " + + $"groupId: {_groupId}, " + + $"clientId: {_clientId}, " + + $"ErrMessage: {e.Message}"); throw; } @@ -163,11 +164,12 @@ namespace TMQExample catch (Exception e) { // handle other exceptions - Console.WriteLine($"Failed to poll data, " + - $"topic: {_topic}, " + - $"groupId: {_groupId}, " + - $"clientId: {_clientId}, " + - $"ErrMessage: {e.Message}"); + Console.WriteLine( + $"Failed to poll data, " + + $"topic: {_topic}, " + + $"groupId: {_groupId}, " + + $"clientId: {_clientId}, " + + $"ErrMessage: {e.Message}"); throw; } // ANCHOR_END: subscribe @@ -186,13 +188,14 @@ namespace TMQExample { consumer.Seek(new TopicPartitionOffset(topicPartition.Topic, topicPartition.Partition, 0)); } + Console.WriteLine("Assignment seek to beginning successfully"); } catch (TDengineError e) { // handle TDengine error Console.WriteLine( - $"Failed to execute seek example, " + + $"Failed to seek offset, " + $"topic: {_topic}, " + $"groupId: {_groupId}, " + $"clientId: {_clientId}, " + @@ -204,12 +207,13 @@ namespace TMQExample catch (Exception e) { // handle other exceptions - Console.WriteLine($"Failed to execute seek example, " + - $"topic: {_topic}, " + - $"groupId: {_groupId}, " + - $"clientId: {_clientId}, " + - $"offset: 0, " + - $"ErrMessage: {e.Message}"); + Console.WriteLine( + $"Failed to seek offset, " + + $"topic: {_topic}, " + + $"groupId: {_groupId}, " + + $"clientId: {_clientId}, " + + $"offset: 0, " + + $"ErrMessage: {e.Message}"); throw; } // ANCHOR_END: seek @@ -240,7 +244,7 @@ namespace TMQExample { // handle TDengine error Console.WriteLine( - $"Failed to execute commit example, " + + $"Failed to commit offset, " + $"topic: {_topic}, " + $"groupId: {_groupId}, " + $"clientId: {_clientId}, " + @@ -253,7 +257,7 @@ namespace TMQExample { // handle other exceptions Console.WriteLine( - $"Failed to execute commit example, " + + $"Failed to commit offset, " + $"topic: {_topic}, " + $"groupId: {_groupId}, " + $"clientId: {_clientId}, " + diff --git a/docs/examples/csharp/wssubscribe/Program.cs b/docs/examples/csharp/wssubscribe/Program.cs index 5f5afc575a..939189cabd 100644 --- a/docs/examples/csharp/wssubscribe/Program.cs +++ b/docs/examples/csharp/wssubscribe/Program.cs @@ -116,11 +116,12 @@ namespace TMQExample catch (Exception e) { // handle other exceptions - Console.WriteLine($"Failed to create native consumer, " + - $"host: {_host}, " + - $"groupId: {_groupId}, " + - $"clientId: {_clientId}, " + - $"ErrMessage: {e.Message}"); + Console.WriteLine( + $"Failed to create native consumer, " + + $"host: {_host}, " + + $"groupId: {_groupId}, " + + $"clientId: {_clientId}, " + + $"ErrMessage: {e.Message}"); throw; } @@ -191,13 +192,14 @@ namespace TMQExample { consumer.Seek(new TopicPartitionOffset(topicPartition.Topic, topicPartition.Partition, 0)); } + Console.WriteLine("Assignment seek to beginning successfully"); } catch (TDengineError e) { // handle TDengine error Console.WriteLine( - $"Failed to execute seek example, " + + $"Failed to seek offset, " + $"topic: {_topic}, " + $"groupId: {_groupId}, " + $"clientId: {_clientId}, " + @@ -209,12 +211,13 @@ namespace TMQExample catch (Exception e) { // handle other exceptions - Console.WriteLine($"Failed to execute seek example, " + - $"topic: {_topic}, " + - $"groupId: {_groupId}, " + - $"clientId: {_clientId}, " + - $"offset: 0, " + - $"ErrMessage: {e.Message}"); + Console.WriteLine( + $"Failed to seek offset, " + + $"topic: {_topic}, " + + $"groupId: {_groupId}, " + + $"clientId: {_clientId}, " + + $"offset: 0, " + + $"ErrMessage: {e.Message}"); throw; } // ANCHOR_END: seek @@ -245,7 +248,7 @@ namespace TMQExample { // handle TDengine error Console.WriteLine( - $"Failed to execute commit example, " + + $"Failed to commit offset, " + $"topic: {_topic}, " + $"groupId: {_groupId}, " + $"clientId: {_clientId}, " + @@ -258,7 +261,7 @@ namespace TMQExample { // handle other exceptions Console.WriteLine( - $"Failed to execute commit example, " + + $"Failed to commit offset, " + $"topic: {_topic}, " + $"groupId: {_groupId}, " + $"clientId: {_clientId}, " + diff --git a/docs/examples/go/tmq/native/main.go b/docs/examples/go/tmq/native/main.go index 1d4a22b880..8d667abc18 100644 --- a/docs/examples/go/tmq/native/main.go +++ b/docs/examples/go/tmq/native/main.go @@ -112,7 +112,7 @@ func main() { }, 0) if err != nil { log.Fatalf( - "Failed to execute seek example, topic: %s, groupId: %s, clientId: %s, partition: %d, offset: %d, ErrMessage: %s\n", + "Failed to execute seek offset, topic: %s, groupId: %s, clientId: %s, partition: %d, offset: %d, ErrMessage: %s\n", topic, groupID, clientID, diff --git a/docs/examples/go/tmq/ws/main.go b/docs/examples/go/tmq/ws/main.go index aaed8395e0..9ea4d72b39 100644 --- a/docs/examples/go/tmq/ws/main.go +++ b/docs/examples/go/tmq/ws/main.go @@ -130,7 +130,7 @@ func main() { }, 0) if err != nil { log.Fatalf( - "Failed to execute seek example, topic: %s, groupId: %s, clientId: %s, partition: %d, offset: %d, ErrMessage: %s\n", + "Failed to seek offset, topic: %s, groupId: %s, clientId: %s, partition: %d, offset: %d, ErrMessage: %s\n", topic, groupID, clientID, From 6884b5ac32638053c48b1fbc3536f08b8cec442f Mon Sep 17 00:00:00 2001 From: zhiyong Date: Fri, 16 Aug 2024 15:31:29 +0800 Subject: [PATCH 097/181] docs: add fqdn and restapi example --- docs/en/14-reference/05-connectors/60-rest-api.mdx | 2 ++ docs/en/27-train-faq/01-faq.md | 6 +++++- docs/zh/14-reference/05-connector/60-rest-api.mdx | 2 ++ docs/zh/27-train-faq/01-faq.md | 3 +++ 4 files changed, 12 insertions(+), 1 deletion(-) diff --git a/docs/en/14-reference/05-connectors/60-rest-api.mdx b/docs/en/14-reference/05-connectors/60-rest-api.mdx index b059e647ed..2c3cd21f41 100644 --- a/docs/en/14-reference/05-connectors/60-rest-api.mdx +++ b/docs/en/14-reference/05-connectors/60-rest-api.mdx @@ -448,6 +448,7 @@ Response body: ```bash curl -L -H "Authorization: Basic cm9vdDp0YW9zZGF0YQ==" -d "select * from demo.d1001" 192.168.0.1:6041/rest/sql + curl -L -H "Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04" -d "select * from demo.d1001" 192.168.0.1:6041/rest/sql ``` Response body: @@ -499,6 +500,7 @@ Response body: ```bash curl -L -H "Authorization: Basic cm9vdDp0YW9zZGF0YQ==" -d "create database demo" 192.168.0.1:6041/rest/sql + curl -L -H "Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04" -d "create database demo" 192.168.0.1:6041/rest/sql ``` Response body: diff --git a/docs/en/27-train-faq/01-faq.md b/docs/en/27-train-faq/01-faq.md index e8e1386197..468b340abf 100644 --- a/docs/en/27-train-faq/01-faq.md +++ b/docs/en/27-train-faq/01-faq.md @@ -69,7 +69,7 @@ This error indicates that the client could not connect to the server. Perform th 11. You can also use the TDengine CLI to diagnose network issues. For more information, see [Problem Diagnostics](https://docs.tdengine.com/operation/diagnose/). -### 3. How can I resolve the "Unable to resolve FQDN" error? +### 3. How can I resolve the "Unable to resolve FQDN" error? Clients and dnodes must be able to resolve the FQDN of each required node. You can confirm your configuration as follows: @@ -164,3 +164,7 @@ For more information, see [taosAdapter](https://docs.tdengine.com/reference/taos OOM errors are thrown by the operating system when its memory, including swap, becomes insufficient and it needs to terminate processes to remain operational. Most OOM errors in TDengine occur for one of the following reasons: free memory is less than the value of `vm.min_free_kbytes` or free memory is less than the size of the request. If TDengine occupies reserved memory, an OOM error can occur even when free memory is sufficient. TDengine preallocates memory to each vnode. The number of vnodes per database is determined by the `vgroups` parameter, and the amount of memory per vnode is determined by the `buffer` parameter. To prevent OOM errors from occurring, ensure that you prepare sufficient memory on your hosts to support the number of vnodes that your deployment requires. Configure an appropriately sized swap space. If you continue to receive OOM errors, your SQL statements may be querying too much data for your system. TDengine Enterprise Edition includes optimized memory management that increases stability for enterprise customers. + +### 14. How can I resolve the "some vnode/qnode/mnode(s) out of service" error? + +The client has not configured FQDN for all servers. For example, the server has 3 nodes, while the client has only configured FQDN for 1 node. FQDN configuration refer to [How can I resolve the "Unable to resolve FQDN" error?](#FQDN) diff --git a/docs/zh/14-reference/05-connector/60-rest-api.mdx b/docs/zh/14-reference/05-connector/60-rest-api.mdx index b6d6ec3b4a..a804229d27 100644 --- a/docs/zh/14-reference/05-connector/60-rest-api.mdx +++ b/docs/zh/14-reference/05-connector/60-rest-api.mdx @@ -444,6 +444,7 @@ curl http://192.168.0.1:6041/rest/login/root/taosdata ```bash curl -L -H "Authorization: Basic cm9vdDp0YW9zZGF0YQ==" -d "select * from demo.d1001" 192.168.0.1:6041/rest/sql + curl -L -H "Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04" -d "select * from demo.d1001" 192.168.0.1:6041/rest/sql ``` 返回值: @@ -495,6 +496,7 @@ curl http://192.168.0.1:6041/rest/login/root/taosdata ```bash curl -L -H "Authorization: Basic cm9vdDp0YW9zZGF0YQ==" -d "create database demo" 192.168.0.1:6041/rest/sql + curl -L -H "Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04" -d "create database demo" 192.168.0.1:6041/rest/sql ``` 返回值: diff --git a/docs/zh/27-train-faq/01-faq.md b/docs/zh/27-train-faq/01-faq.md index 01cb42213e..5bc6a4df27 100644 --- a/docs/zh/27-train-faq/01-faq.md +++ b/docs/zh/27-train-faq/01-faq.md @@ -270,3 +270,6 @@ TDengine 在写入数据时如果有很严重的乱序写入问题,会严重 ### 28 修改database的root密码后,Grafana监控插件TDinsight无数据展示 TDinsight插件中展示的数据是通过taosKeeper和taosAdapter服务收集并存储于TD的log库中,在root密码修改后,需要同步更新taosKeeper和taosAdapter配置文件中对应的密码信息,然后重启taosKeeper和taosAdapter服务(注:若是集群需要重启每个节点上的对应服务)。 + +### 29 遇到报错 “some vnode/qnode/mnode(s) out of service” 怎么办? +客户端未配置所有服务端的 FQDN 解析。比如服务端有 3 个节点,客户端只配置了 1 个节点的 FQDN 解析。FQDN 配置参考:[一篇文章说清楚 TDengine 的 FQDN](https://www.taosdata.com/blog/2020/09/11/1824.html) From a54e7bec9976628a5f5b772a45ead96b84729879 Mon Sep 17 00:00:00 2001 From: Ping Xiao Date: Fri, 16 Aug 2024 15:34:34 +0800 Subject: [PATCH 098/181] fix TD-31500 --- packaging/tools/remove.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/packaging/tools/remove.sh b/packaging/tools/remove.sh index 093c81eef4..7af64fab1e 100755 --- a/packaging/tools/remove.sh +++ b/packaging/tools/remove.sh @@ -209,11 +209,11 @@ function clean_service_on_launchctl() { } function remove_data_and_config() { - data_dir=`grep dataDir /etc/taos/taos.cfg | grep -v '#' | tail -n 1 | awk {'print $2'}` + data_dir=`grep dataDir /etc/${PREFIX}/${PREFIX}.cfg | grep -v '#' | tail -n 1 | awk {'print $2'}` if [ X"$data_dir" == X"" ]; then data_dir="/var/lib/${PREFIX}" fi - log_dir=`grep logDir /etc/taos/taos.cfg | grep -v '#' | tail -n 1 | awk {'print $2'}` + log_dir=`grep logDir /etc/${PREFIX}/${PREFIX}.cfg | grep -v '#' | tail -n 1 | awk {'print $2'}` if [ X"$log_dir" == X"" ]; then log_dir="/var/log/${PREFIX}" fi From 0cbbdf1b888a359e842937dba7022592c55cbbf4 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Fri, 16 Aug 2024 15:42:53 +0800 Subject: [PATCH 099/181] fix(query): if the return value of addTagPseudoColumnData is not success, not jump out. --- source/libs/executor/inc/executorInt.h | 2 - source/libs/executor/src/scanoperator.c | 54 +++++++++++++++---------- 2 files changed, 32 insertions(+), 24 deletions(-) diff --git a/source/libs/executor/inc/executorInt.h b/source/libs/executor/inc/executorInt.h index d295e868e9..48adb22927 100644 --- a/source/libs/executor/inc/executorInt.h +++ b/source/libs/executor/inc/executorInt.h @@ -856,8 +856,6 @@ int32_t getBufferPgSize(int32_t rowSize, uint32_t* defaultPgsz, uint32_t* de extern void doDestroyExchangeOperatorInfo(void* param); -int32_t doFilterImpl(SSDataBlock* pBlock, SFilterInfo* pFilterInfo, SColMatchInfo* pColMatchInfo, - SColumnInfoData** pResCol); int32_t doFilter(SSDataBlock* pBlock, SFilterInfo* pFilterInfo, SColMatchInfo* pColMatchInfo); int32_t addTagPseudoColumnData(SReadHandle* pHandle, const SExprInfo* pExpr, int32_t numOfExpr, SSDataBlock* pBlock, int32_t rows, SExecTaskInfo* pTask, STableMetaCacheInfo* pCache); diff --git a/source/libs/executor/src/scanoperator.c b/source/libs/executor/src/scanoperator.c index 1935f2b0b6..d72716b141 100644 --- a/source/libs/executor/src/scanoperator.c +++ b/source/libs/executor/src/scanoperator.c @@ -279,21 +279,20 @@ static int32_t doLoadBlockSMA(STableScanBase* pTableScanInfo, SSDataBlock* pBloc return code; } -static void doSetTagColumnData(STableScanBase* pTableScanInfo, SSDataBlock* pBlock, SExecTaskInfo* pTaskInfo, - int32_t rows) { - if (pTableScanInfo->pseudoSup.numOfExprs > 0) { - SExprSupp* pSup = &pTableScanInfo->pseudoSup; - - int32_t code = addTagPseudoColumnData(&pTableScanInfo->readHandle, pSup->pExprInfo, pSup->numOfExprs, pBlock, rows, - pTaskInfo, &pTableScanInfo->metaCache); +static int32_t doSetTagColumnData(STableScanBase* pTableScanInfo, SSDataBlock* pBlock, SExecTaskInfo* pTaskInfo, + int32_t rows) { + int32_t code = 0; + SExprSupp* pSup = &pTableScanInfo->pseudoSup; + if (pSup->numOfExprs > 0) { + code = addTagPseudoColumnData(&pTableScanInfo->readHandle, pSup->pExprInfo, pSup->numOfExprs, pBlock, rows, + pTaskInfo, &pTableScanInfo->metaCache); // ignore the table not exists error, since this table may have been dropped during the scan procedure. - if (code != TSDB_CODE_SUCCESS && code != TSDB_CODE_PAR_TABLE_NOT_EXIST) { - T_LONG_JMP(pTaskInfo->env, code); + if (code == TSDB_CODE_PAR_TABLE_NOT_EXIST) { + code = 0; } - - // reset the error code. - terrno = 0; } + + return code; } bool applyLimitOffset(SLimitInfo* pLimitInfo, SSDataBlock* pBlock, SExecTaskInfo* pTaskInfo) { @@ -373,10 +372,10 @@ static int32_t loadDataBlock(SOperatorInfo* pOperator, STableScanBase* pTableSca qDebug("%s data block skipped, brange:%" PRId64 "-%" PRId64 ", rows:%" PRId64 ", uid:%" PRIu64, GET_TASKID(pTaskInfo), pBlockInfo->window.skey, pBlockInfo->window.ekey, pBlockInfo->rows, pBlockInfo->id.uid); - doSetTagColumnData(pTableScanInfo, pBlock, pTaskInfo, pBlock->info.rows); + code = doSetTagColumnData(pTableScanInfo, pBlock, pTaskInfo, pBlock->info.rows); pCost->skipBlocks += 1; pAPI->tsdReader.tsdReaderReleaseDataBlock(pTableScanInfo->dataReader); - return TSDB_CODE_SUCCESS; + QUERY_CHECK_CODE(code, lino, _end); } else if (*status == FUNC_DATA_REQUIRED_SMA_LOAD) { pCost->loadBlockStatis += 1; loadSMA = true; // mark the operation of load sma; @@ -391,9 +390,9 @@ static int32_t loadDataBlock(SOperatorInfo* pOperator, STableScanBase* pTableSca if (success) { // failed to load the block sma data, data block statistics does not exist, load data block instead qDebug("%s data block SMA loaded, brange:%" PRId64 "-%" PRId64 ", rows:%" PRId64, GET_TASKID(pTaskInfo), pBlockInfo->window.skey, pBlockInfo->window.ekey, pBlockInfo->rows); - doSetTagColumnData(pTableScanInfo, pBlock, pTaskInfo, pBlock->info.rows); + code = doSetTagColumnData(pTableScanInfo, pBlock, pTaskInfo, pBlock->info.rows); pAPI->tsdReader.tsdReaderReleaseDataBlock(pTableScanInfo->dataReader); - return TSDB_CODE_SUCCESS; + QUERY_CHECK_CODE(code, lino, _end); } else { qDebug("%s failed to load SMA, since not all columns have SMA", GET_TASKID(pTaskInfo)); *status = FUNC_DATA_REQUIRED_DATA_LOAD; @@ -473,7 +472,11 @@ static int32_t loadDataBlock(SOperatorInfo* pOperator, STableScanBase* pTableSca qError("[loadDataBlock] p != pBlock"); return TSDB_CODE_QRY_EXECUTOR_INTERNAL_ERROR; } - doSetTagColumnData(pTableScanInfo, pBlock, pTaskInfo, pBlock->info.rows); + + code = doSetTagColumnData(pTableScanInfo, pBlock, pTaskInfo, pBlock->info.rows); + if (code) { + return code; + } // restore the previous value pCost->totalRows -= pBlock->info.rows; @@ -912,7 +915,8 @@ static SSDataBlock* getOneRowResultBlock(SExecTaskInfo* pTaskInfo, STableScanBas } // set tag/tbname - doSetTagColumnData(pBase, pBlock, pTaskInfo, 1); + terrno = doSetTagColumnData(pBase, pBlock, pTaskInfo, 1); + return pBlock; } @@ -1633,7 +1637,9 @@ static SSDataBlock* readPreVersionData(SOperatorInfo* pTableScanOp, uint64_t tbU code = pAPI->tsdReader.tsdReaderRetrieveDataBlock(pReader, &p, NULL); QUERY_CHECK_CODE(code, lino, _end); - doSetTagColumnData(&pTableScanInfo->base, pBlock, pTaskInfo, pBlock->info.rows); + code = doSetTagColumnData(&pTableScanInfo->base, pBlock, pTaskInfo, pBlock->info.rows); + QUERY_CHECK_CODE(code, lino, _end); + pBlock->info.id.groupId = tableListGetTableGroupId(pTableScanInfo->base.pTableListInfo, pBlock->info.id.uid); } @@ -2762,12 +2768,16 @@ static int32_t setBlockIntoRes(SStreamScanInfo* pInfo, const SSDataBlock* pBlock // currently only the tbname pseudo column if (pInfo->numOfPseudoExpr > 0) { - int32_t tmpCode = addTagPseudoColumnData(&pInfo->readHandle, pInfo->pPseudoExpr, pInfo->numOfPseudoExpr, + code = addTagPseudoColumnData(&pInfo->readHandle, pInfo->pPseudoExpr, pInfo->numOfPseudoExpr, pInfo->pRes, pBlockInfo->rows, pTaskInfo, &pTableScanInfo->base.metaCache); // ignore the table not exists error, since this table may have been dropped during the scan procedure. - if (tmpCode != TSDB_CODE_SUCCESS && tmpCode != TSDB_CODE_PAR_TABLE_NOT_EXIST) { + if (code == TSDB_CODE_PAR_TABLE_NOT_EXIST) { + code = 0; + } + + if (code) { blockDataFreeRes((SSDataBlock*)pBlock); - T_LONG_JMP(pTaskInfo->env, code); + QUERY_CHECK_CODE(code, lino, _end); } // reset the error code. From 3c3507f283f678bae2be88645624e141494863f1 Mon Sep 17 00:00:00 2001 From: sima Date: Fri, 16 Aug 2024 15:54:07 +0800 Subject: [PATCH 100/181] fix:[TD-31503] Return null when expr in timediff is null, and use ms as default time_unit when time_unit is null. --- source/libs/function/src/builtins.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/source/libs/function/src/builtins.c b/source/libs/function/src/builtins.c index 760a3c4a33..a93ae8e574 100644 --- a/source/libs/function/src/builtins.c +++ b/source/libs/function/src/builtins.c @@ -2573,13 +2573,14 @@ static int32_t translateTimeDiff(SFunctionNode* pFunc, char* pErrBuf, int32_t le for (int32_t i = 0; i < 2; ++i) { uint8_t paraType = getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, i))->type; - if (!IS_STR_DATA_TYPE(paraType) && !IS_INTEGER_TYPE(paraType) && !IS_TIMESTAMP_TYPE(paraType)) { + if (!IS_STR_DATA_TYPE(paraType) && !IS_INTEGER_TYPE(paraType) && !IS_TIMESTAMP_TYPE(paraType) && !IS_NULL_TYPE(paraType)) { return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); } } - + uint8_t para2Type; if (3 == numOfParams) { - if (!IS_INTEGER_TYPE(getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, 2))->type)) { + para2Type = getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, 2))->type; + if (!IS_INTEGER_TYPE(para2Type) && !IS_NULL_TYPE(para2Type)) { return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); } } @@ -2587,7 +2588,7 @@ static int32_t translateTimeDiff(SFunctionNode* pFunc, char* pErrBuf, int32_t le // add database precision as param uint8_t dbPrec = pFunc->node.resType.precision; - if (3 == numOfParams) { + if (3 == numOfParams && !IS_NULL_TYPE(para2Type)) { int32_t code = validateTimeUnitParam(dbPrec, (SValueNode*)nodesListGetNode(pFunc->pParameterList, 2)); if (code == TSDB_CODE_FUNC_TIME_UNIT_TOO_SMALL) { return buildFuncErrMsg(pErrBuf, len, code, From b6cf379247fb213e48bc32f1d2b9b2b8b0aef52c Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Fri, 16 Aug 2024 16:53:44 +0800 Subject: [PATCH 101/181] fix double send-rsp --- source/dnode/mnode/impl/src/mndStream.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/source/dnode/mnode/impl/src/mndStream.c b/source/dnode/mnode/impl/src/mndStream.c index a35815cf4d..93397e3a8c 100644 --- a/source/dnode/mnode/impl/src/mndStream.c +++ b/source/dnode/mnode/impl/src/mndStream.c @@ -24,6 +24,7 @@ #include "mndVgroup.h" #include "osMemory.h" #include "parser.h" +#include "taoserror.h" #include "tmisce.h" #include "tname.h" @@ -879,6 +880,10 @@ static int32_t mndProcessCreateStreamReq(SRpcMsg *pReq) { mndTransDrop(pTrans); + if (code == 0) { + code = TSDB_CODE_ACTION_IN_PROGRESS; + } + SName dbname = {0}; code = tNameFromString(&dbname, createReq.sourceDB, T_NAME_ACCT | T_NAME_DB | T_NAME_TABLE); if (code) { @@ -3058,4 +3063,4 @@ _err: mDebug("create drop %d orphan tasks trans succ", numOfTasks); } return code; -} \ No newline at end of file +} From bfbe687d2a19f40577bcacf609420b95ea26047d Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Fri, 16 Aug 2024 17:04:26 +0800 Subject: [PATCH 102/181] fix(query): return directly. --- source/libs/executor/src/scanoperator.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/libs/executor/src/scanoperator.c b/source/libs/executor/src/scanoperator.c index d72716b141..0ede1cf379 100644 --- a/source/libs/executor/src/scanoperator.c +++ b/source/libs/executor/src/scanoperator.c @@ -375,7 +375,7 @@ static int32_t loadDataBlock(SOperatorInfo* pOperator, STableScanBase* pTableSca code = doSetTagColumnData(pTableScanInfo, pBlock, pTaskInfo, pBlock->info.rows); pCost->skipBlocks += 1; pAPI->tsdReader.tsdReaderReleaseDataBlock(pTableScanInfo->dataReader); - QUERY_CHECK_CODE(code, lino, _end); + return code; } else if (*status == FUNC_DATA_REQUIRED_SMA_LOAD) { pCost->loadBlockStatis += 1; loadSMA = true; // mark the operation of load sma; From adc583a93653f490a2c26604e860fc9a73d0cfb7 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Fri, 16 Aug 2024 17:08:58 +0800 Subject: [PATCH 103/181] fix(stream): fix memory leak. --- source/libs/stream/src/streamMeta.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/libs/stream/src/streamMeta.c b/source/libs/stream/src/streamMeta.c index c50c3c484e..07c67ba007 100644 --- a/source/libs/stream/src/streamMeta.c +++ b/source/libs/stream/src/streamMeta.c @@ -1051,7 +1051,7 @@ void streamMetaLoadAllTasks(SStreamMeta* pMeta) { } else { // todo this should replace the existed object put by replay creating stream task msg from mnode stError("s-task:0x%x already added into table meta by replaying WAL, need check", pTask->id.taskId); - taosMemoryFree(pTask); + tFreeStreamTask(pTask); continue; } From 3b7b41f82d3524a77588a92371b0898e84a0e9ce Mon Sep 17 00:00:00 2001 From: Yaming Pei Date: Fri, 16 Aug 2024 17:10:21 +0800 Subject: [PATCH 104/181] c language sample program modification --- docs/examples/c/connect_example.c | 4 +- docs/examples/c/create_db_demo.c | 10 +- docs/examples/c/insert_data_demo.c | 6 +- docs/examples/c/query_data_demo.c | 17 ++- docs/examples/c/sml_insert_demo.c | 23 ++-- docs/examples/c/stmt_insert_demo.c | 10 +- docs/examples/c/tmq_demo.c | 168 ++++++++++++++++------------- docs/examples/c/with_reqid_demo.c | 19 ++-- 8 files changed, 135 insertions(+), 122 deletions(-) diff --git a/docs/examples/c/connect_example.c b/docs/examples/c/connect_example.c index 53d2d0d59b..ef07c54122 100644 --- a/docs/examples/c/connect_example.c +++ b/docs/examples/c/connect_example.c @@ -12,11 +12,11 @@ int main() { uint16_t port = 6030; // 0 means use the default port TAOS *taos = taos_connect(host, user, passwd, db, port); if (taos == NULL) { - printf("Failed to connect to %s:%hu, ErrCode: 0x%x, ErrMessage: %s.\n", host, port, taos_errno(NULL), taos_errstr(NULL)); + fprintf(stderr, "Failed to connect to %s:%hu, ErrCode: 0x%x, ErrMessage: %s.\n", host, port, taos_errno(NULL), taos_errstr(NULL)); taos_cleanup(); return -1; } - printf("Connected to %s:%hu successfully.\n", host, port); + fprintf(stdout, "Connected to %s:%hu successfully.\n", host, port); /* put your code here for read and write */ diff --git a/docs/examples/c/create_db_demo.c b/docs/examples/c/create_db_demo.c index 44960defa5..7ae41da65c 100644 --- a/docs/examples/c/create_db_demo.c +++ b/docs/examples/c/create_db_demo.c @@ -33,7 +33,7 @@ static int DemoCreateDB() { // connect TAOS *taos = taos_connect(host, user, password, NULL, port); if (taos == NULL) { - printf("Failed to connect to %s:%hu, ErrCode: 0x%x, ErrMessage: %s.\n", host, port, taos_errno(NULL), + fprintf(stderr, "Failed to connect to %s:%hu, ErrCode: 0x%x, ErrMessage: %s.\n", host, port, taos_errno(NULL), taos_errstr(NULL)); taos_cleanup(); return -1; @@ -43,13 +43,13 @@ static int DemoCreateDB() { TAOS_RES *result = taos_query(taos, "CREATE DATABASE IF NOT EXISTS power"); code = taos_errno(result); if (code != 0) { - printf("Failed to create database power, ErrCode: 0x%x, ErrMessage: %s.\n", code, taos_errstr(result)); + fprintf(stderr, "Failed to create database power, ErrCode: 0x%x, ErrMessage: %s.\n", code, taos_errstr(result)); taos_close(taos); taos_cleanup(); return -1; } taos_free_result(result); - printf("Create database power successfully.\n"); + fprintf(stdout, "Create database power successfully.\n"); // create table const char *sql = @@ -58,13 +58,13 @@ static int DemoCreateDB() { result = taos_query(taos, sql); code = taos_errno(result); if (code != 0) { - printf("Failed to create stable power.meters, ErrCode: 0x%x, ErrMessage: %s\n.", code, taos_errstr(result)); + fprintf(stderr, "Failed to create stable power.meters, ErrCode: 0x%x, ErrMessage: %s\n.", code, taos_errstr(result)); taos_close(taos); taos_cleanup(); return -1; } taos_free_result(result); - printf("Create stable power.meters successfully.\n"); + fprintf(stdout, "Create stable power.meters successfully.\n"); // close & clean taos_close(taos); diff --git a/docs/examples/c/insert_data_demo.c b/docs/examples/c/insert_data_demo.c index e880af0cd6..7570af02ad 100644 --- a/docs/examples/c/insert_data_demo.c +++ b/docs/examples/c/insert_data_demo.c @@ -33,7 +33,7 @@ static int DemoInsertData() { // connect TAOS *taos = taos_connect(host, user, password, NULL, port); if (taos == NULL) { - printf("Failed to connect to %s:%hu, ErrCode: 0x%x, ErrMessage: %s.\n", host, port, taos_errno(NULL), + fprintf(stderr, "Failed to connect to %s:%hu, ErrCode: 0x%x, ErrMessage: %s.\n", host, port, taos_errno(NULL), taos_errstr(NULL)); taos_cleanup(); return -1; @@ -53,7 +53,7 @@ static int DemoInsertData() { TAOS_RES *result = taos_query(taos, sql); code = taos_errno(result); if (code != 0) { - printf("Failed to insert data to power.meters, ErrCode: 0x%x, ErrMessage: %s\n.", code, taos_errstr(result)); + fprintf(stderr, "Failed to insert data to power.meters, sql: %s, ErrCode: 0x%x, ErrMessage: %s\n.", sql, code, taos_errstr(result)); taos_close(taos); taos_cleanup(); return -1; @@ -62,7 +62,7 @@ static int DemoInsertData() { // you can check affectedRows here int rows = taos_affected_rows(result); - printf("Successfully inserted %d rows into power.meters.\n", rows); + fprintf(stdout, "Successfully inserted %d rows into power.meters.\n", rows); // close & clean taos_close(taos); diff --git a/docs/examples/c/query_data_demo.c b/docs/examples/c/query_data_demo.c index 097a6b99b8..0e13f57e3f 100644 --- a/docs/examples/c/query_data_demo.c +++ b/docs/examples/c/query_data_demo.c @@ -33,7 +33,7 @@ static int DemoQueryData() { // connect TAOS *taos = taos_connect(host, user, password, NULL, port); if (taos == NULL) { - printf("Failed to connect to %s:%hu, ErrCode: 0x%x, ErrMessage: %s.\n", host, port, taos_errno(NULL), + fprintf(stderr, "Failed to connect to %s:%hu, ErrCode: 0x%x, ErrMessage: %s.\n", host, port, taos_errno(NULL), taos_errstr(NULL)); taos_cleanup(); return -1; @@ -44,7 +44,7 @@ static int DemoQueryData() { TAOS_RES *result = taos_query(taos, sql); code = taos_errno(result); if (code != 0) { - printf("Failed to query data from power.meters, sql: %s, ErrCode: 0x%x, ErrMessage: %s\n.", sql, code, + fprintf(stderr, "Failed to query data from power.meters, sql: %s, ErrCode: 0x%x, ErrMessage: %s\n.", sql, code, taos_errstr(result)); taos_close(taos); taos_cleanup(); @@ -56,20 +56,15 @@ static int DemoQueryData() { int num_fields = taos_field_count(result); TAOS_FIELD *fields = taos_fetch_fields(result); - printf("fields: %d\n", num_fields); - printf("sql: %s, result:\n", sql); + fprintf(stdout, "query successfully, got %d fields, the sql is: %s.\n", num_fields, sql); // fetch the records row by row while ((row = taos_fetch_row(result))) { - char temp[1024] = {0}; - if (taos_print_row(temp, row, fields, num_fields) < 0) { - printf("Failed to print row\n"); - break; - } - printf("%s\n", temp); + // Add your data processing logic here + rows++; } - printf("total rows: %d\n", rows); + fprintf(stdout, "total rows: %d\n", rows); taos_free_result(result); // close & clean diff --git a/docs/examples/c/sml_insert_demo.c b/docs/examples/c/sml_insert_demo.c index 9adbb28f87..63870d90b6 100644 --- a/docs/examples/c/sml_insert_demo.c +++ b/docs/examples/c/sml_insert_demo.c @@ -32,7 +32,7 @@ static int DemoSmlInsert() { // connect TAOS *taos = taos_connect(host, user, password, NULL, port); if (taos == NULL) { - printf("Failed to connect to %s:%hu, ErrCode: 0x%x, ErrMessage: %s.\n", host, port, taos_errno(NULL), + fprintf(stderr, "Failed to connect to %s:%hu, ErrCode: 0x%x, ErrMessage: %s.\n", host, port, taos_errno(NULL), taos_errstr(NULL)); taos_cleanup(); return -1; @@ -42,7 +42,7 @@ static int DemoSmlInsert() { TAOS_RES *result = taos_query(taos, "CREATE DATABASE IF NOT EXISTS power"); code = taos_errno(result); if (code != 0) { - printf("Failed to create database power, ErrCode: 0x%x, ErrMessage: %s.\n", code, taos_errstr(result)); + fprintf(stderr, "Failed to create database power, ErrCode: 0x%x, ErrMessage: %s.\n", code, taos_errstr(result)); taos_close(taos); taos_cleanup(); return -1; @@ -53,7 +53,7 @@ static int DemoSmlInsert() { result = taos_query(taos, "USE power"); code = taos_errno(result); if (code != 0) { - printf("Failed to execute use power, ErrCode: 0x%x, ErrMessage: %s\n.", code, taos_errstr(result)); + fprintf(stderr, "Failed to execute use power, ErrCode: 0x%x, ErrMessage: %s\n.", code, taos_errstr(result)); taos_close(taos); taos_cleanup(); return -1; @@ -74,7 +74,7 @@ static int DemoSmlInsert() { result = taos_schemaless_insert(taos, lines, 1, TSDB_SML_LINE_PROTOCOL, TSDB_SML_TIMESTAMP_MILLI_SECONDS); code = taos_errno(result); if (code != 0) { - printf("Failed to insert schemaless line data, data: %s, ErrCode: 0x%x, ErrMessage: %s\n.", line_demo, code, + fprintf(stderr, "Failed to insert schemaless line data, data: %s, ErrCode: 0x%x, ErrMessage: %s\n.", line_demo, code, taos_errstr(result)); taos_close(taos); taos_cleanup(); @@ -82,7 +82,7 @@ static int DemoSmlInsert() { } int rows = taos_affected_rows(result); - printf("Insert %d rows of schemaless line data successfully.\n", rows); + fprintf(stdout, "Insert %d rows of schemaless line data successfully.\n", rows); taos_free_result(result); // opentsdb telnet protocol @@ -90,7 +90,7 @@ static int DemoSmlInsert() { result = taos_schemaless_insert(taos, telnets, 1, TSDB_SML_TELNET_PROTOCOL, TSDB_SML_TIMESTAMP_MILLI_SECONDS); code = taos_errno(result); if (code != 0) { - printf("Failed to insert schemaless telnet data, data: %s, ErrCode: 0x%x, ErrMessage: %s\n.", telnet_demo, code, + fprintf(stderr, "Failed to insert schemaless telnet data, data: %s, ErrCode: 0x%x, ErrMessage: %s\n.", telnet_demo, code, taos_errstr(result)); taos_close(taos); taos_cleanup(); @@ -98,15 +98,16 @@ static int DemoSmlInsert() { } rows = taos_affected_rows(result); - printf("Insert %d rows of schemaless telnet data successfully.\n", rows); + fprintf(stdout, "Insert %d rows of schemaless telnet data successfully.\n", rows); taos_free_result(result); // opentsdb json protocol char *jsons[1] = {0}; // allocate memory for json data. can not use static memory. - jsons[0] = malloc(1024); + size_t size = 1024; + jsons[0] = malloc(size); if (jsons[0] == NULL) { - printf("Failed to allocate memory\n"); + fprintf(stderr, "Failed to allocate memory: %zu bytes.\n", size); taos_close(taos); taos_cleanup(); return -1; @@ -116,7 +117,7 @@ static int DemoSmlInsert() { code = taos_errno(result); if (code != 0) { free(jsons[0]); - printf("Failed to insert schemaless json data, Server: %s, ErrCode: 0x%x, ErrMessage: %s\n.", json_demo, code, + fprintf(stderr, "Failed to insert schemaless json data, Server: %s, ErrCode: 0x%x, ErrMessage: %s\n.", json_demo, code, taos_errstr(result)); taos_close(taos); taos_cleanup(); @@ -125,7 +126,7 @@ static int DemoSmlInsert() { free(jsons[0]); rows = taos_affected_rows(result); - printf("Insert %d rows of schemaless json data successfully.\n", rows); + fprintf(stdout, "Insert %d rows of schemaless json data successfully.\n", rows); taos_free_result(result); // close & clean diff --git a/docs/examples/c/stmt_insert_demo.c b/docs/examples/c/stmt_insert_demo.c index 854c9f86e2..f89a0fc8da 100644 --- a/docs/examples/c/stmt_insert_demo.c +++ b/docs/examples/c/stmt_insert_demo.c @@ -32,7 +32,7 @@ void executeSQL(TAOS *taos, const char *sql) { TAOS_RES *res = taos_query(taos, sql); int code = taos_errno(res); if (code != 0) { - printf("%s\n", taos_errstr(res)); + fprintf(stderr, "%s\n", taos_errstr(res)); taos_free_result(res); taos_close(taos); exit(EXIT_FAILURE); @@ -49,7 +49,7 @@ void executeSQL(TAOS *taos, const char *sql) { */ void checkErrorCode(TAOS_STMT *stmt, int code, const char *msg) { if (code != 0) { - printf("%s. code: %d, error: %s\n", msg,code,taos_stmt_errstr(stmt)); + fprintf(stderr, "%s. code: %d, error: %s\n", msg,code,taos_stmt_errstr(stmt)); taos_stmt_close(stmt); exit(EXIT_FAILURE); } @@ -74,7 +74,7 @@ void insertData(TAOS *taos) { // init TAOS_STMT *stmt = taos_stmt_init(taos); if (stmt == NULL) { - printf("Failed to init taos_stmt, error: %s\n", taos_stmt_errstr(NULL)); + fprintf(stderr, "Failed to init taos_stmt, error: %s\n", taos_stmt_errstr(NULL)); exit(EXIT_FAILURE); } // prepare @@ -159,7 +159,7 @@ void insertData(TAOS *taos) { int affected = taos_stmt_affected_rows_once(stmt); total_affected += affected; } - printf("Successfully inserted %d rows to power.meters.\n", total_affected); + fprintf(stdout, "Successfully inserted %d rows to power.meters.\n", total_affected); taos_stmt_close(stmt); } @@ -170,7 +170,7 @@ int main() { uint16_t port = 6030; TAOS *taos = taos_connect(host, user, password, NULL, port); if (taos == NULL) { - printf("Failed to connect to %s:%hu, ErrCode: 0x%x, ErrMessage: %s.\n", host, port, taos_errno(NULL), taos_errstr(NULL)); + fprintf(stderr, "Failed to connect to %s:%hu, ErrCode: 0x%x, ErrMessage: %s.\n", host, port, taos_errno(NULL), taos_errstr(NULL)); taos_cleanup(); exit(EXIT_FAILURE); } diff --git a/docs/examples/c/tmq_demo.c b/docs/examples/c/tmq_demo.c index e8e28e1df4..9deff9add5 100644 --- a/docs/examples/c/tmq_demo.c +++ b/docs/examples/c/tmq_demo.c @@ -26,6 +26,7 @@ volatile int thread_stop = 0; static int running = 1; +static int count = 0; const char* topic_name = "topic_meters"; typedef struct { @@ -40,6 +41,18 @@ typedef struct { const char* auto_offset_reset; } ConsumerConfig; +ConsumerConfig config = { + .enable_auto_commit = "true", + .auto_commit_interval_ms = "1000", + .group_id = "group1", + .client_id = "client1", + .td_connect_host = "localhost", + .td_connect_port = "6030", + .td_connect_user = "root", + .td_connect_pass = "taosdata", + .auto_offset_reset = "latest" +}; + void* prepare_data(void* arg) { const char* host = "localhost"; const char* user = "root"; @@ -48,8 +61,7 @@ void* prepare_data(void* arg) { int code = 0; TAOS* pConn = taos_connect(host, user, password, NULL, port); if (pConn == NULL) { - fprintf(stderr, "Failed to connect to %s:%hu, ErrCode: 0x%x, ErrMessage: %s.\n", host, port, taos_errno(NULL), - taos_errstr(NULL)); + fprintf(stderr, "Failed to connect to %s:%hu, ErrCode: 0x%x, ErrMessage: %s.\n", host, port, taos_errno(NULL), taos_errstr(NULL)); taos_cleanup(); return NULL; } @@ -69,8 +81,7 @@ void* prepare_data(void* arg) { pRes = taos_query(pConn, buf); code = taos_errno(pRes); if (code != 0) { - fprintf(stderr, "Failed to insert data to power.meters, ErrCode: 0x%x, ErrMessage: %s.\n", code, - taos_errstr(pRes)); + fprintf(stderr, "Failed to insert data to power.meters, ErrCode: 0x%x, ErrMessage: %s.\n", code, taos_errstr(pRes)); } taos_free_result(pRes); sleep(1); @@ -80,43 +91,28 @@ void* prepare_data(void* arg) { } // ANCHOR: msg_process -static int32_t msg_process(TAOS_RES* msg) { +int32_t msg_process(TAOS_RES* msg) { char buf[1024]; // buf to store the row content int32_t rows = 0; const char* topicName = tmq_get_topic_name(msg); - const char* dbName = tmq_get_db_name(msg); - int32_t vgroupId = tmq_get_vgroup_id(msg); + const char* dbName = tmq_get_db_name(msg); + int32_t vgroupId = tmq_get_vgroup_id(msg); - fprintf(stdout, "topic: %s\n", topicName); - fprintf(stdout, "db: %s\n", dbName); - fprintf(stdout, "vgroup id: %d\n", vgroupId); - - while (1) { + while (true) { // get one row data from message TAOS_ROW row = taos_fetch_row(msg); if (row == NULL) break; - // get the field information - TAOS_FIELD* fields = taos_fetch_fields(msg); - // get the number of fields - int32_t numOfFields = taos_field_count(msg); - // get the precision of the result - int32_t precision = taos_result_precision(msg); + // Add your data processing logic here + rows++; - // print the row content - if (taos_print_row(buf, row, fields, numOfFields) < 0) { - fprintf(stderr, "Failed to print row\n"); - break; - } - // print the precision and row content to the console - fprintf(stdout, "precision: %d, data: %s\n", precision, buf); } return rows; } // ANCHOR_END: msg_process -static int32_t init_env() { +TAOS* init_env() { const char* host = "localhost"; const char* user = "root"; const char* password = "taosdata"; @@ -124,10 +120,9 @@ static int32_t init_env() { int code = 0; TAOS* pConn = taos_connect(host, user, password, NULL, port); if (pConn == NULL) { - fprintf(stderr, "Failed to connect to %s:%hu, ErrCode: 0x%x, ErrMessage: %s.\n", host, port, taos_errno(NULL), - taos_errstr(NULL)); + fprintf(stderr, "Failed to connect to %s:%hu, ErrCode: 0x%x, ErrMessage: %s.\n", host, port, taos_errno(NULL), taos_errstr(NULL)); taos_cleanup(); - return -1; + return NULL; } TAOS_RES* pRes; @@ -136,6 +131,7 @@ static int32_t init_env() { code = taos_errno(pRes); if (code != 0) { fprintf(stderr, "Failed to drop topic_meters, ErrCode: 0x%x, ErrMessage: %s.\n", code, taos_errstr(pRes)); + goto END; } taos_free_result(pRes); @@ -143,6 +139,7 @@ static int32_t init_env() { code = taos_errno(pRes); if (code != 0) { fprintf(stderr, "Failed to drop database power, ErrCode: 0x%x, ErrMessage: %s.\n", code, taos_errstr(pRes)); + goto END; } taos_free_result(pRes); @@ -150,7 +147,7 @@ static int32_t init_env() { pRes = taos_query(pConn, "CREATE DATABASE power PRECISION 'ms' WAL_RETENTION_PERIOD 3600"); code = taos_errno(pRes); if (code != 0) { - fprintf(stderr, "Failed to create tmqdb, ErrCode: 0x%x, ErrMessage: %s.\n", code, taos_errstr(pRes)); + fprintf(stderr, "Failed to create power, ErrCode: 0x%x, ErrMessage: %s.\n", code, taos_errstr(pRes)); goto END; } taos_free_result(pRes); @@ -165,56 +162,78 @@ static int32_t init_env() { fprintf(stderr, "Failed to create super table meters, ErrCode: 0x%x, ErrMessage: %s.\n", code, taos_errstr(pRes)); goto END; } - taos_free_result(pRes); - taos_close(pConn); - return 0; + + return pConn; END: taos_free_result(pRes); taos_close(pConn); - return -1; + return NULL; } -int32_t create_topic() { +void deinit_env(TAOS* pConn) { + if (pConn) + taos_close(pConn); +} + +int32_t create_topic(TAOS* pConn) { TAOS_RES* pRes; - const char* host = "localhost"; - const char* user = "root"; - const char* password = "taosdata"; - uint16_t port = 6030; int code = 0; - TAOS* pConn = taos_connect(host, user, password, NULL, port); - if (pConn == NULL) { - fprintf(stderr, "Failed to connect to %s:%hu, ErrCode: 0x%x, ErrMessage: %s.\n", host, port, taos_errno(NULL), - taos_errstr(NULL)); - taos_cleanup(); + + if (!pConn) { + fprintf(stderr, "Invalid input parameter.\n"); return -1; } - pRes = taos_query(pConn, "USE POWER"); + pRes = taos_query(pConn, "USE power"); code = taos_errno(pRes); if (taos_errno(pRes) != 0) { - fprintf(stderr, "Failed to use tmqdb, ErrCode: 0x%x, ErrMessage: %s.\n", code, taos_errstr(pRes)); + fprintf(stderr, "Failed to use power, ErrCode: 0x%x, ErrMessage: %s.\n", code, taos_errstr(pRes)); return -1; } taos_free_result(pRes); - pRes = taos_query( - pConn, - "CREATE TOPIC IF NOT EXISTS topic_meters AS SELECT ts, current, voltage, phase, groupid, location FROM meters"); + pRes = taos_query(pConn, "CREATE TOPIC IF NOT EXISTS topic_meters AS SELECT ts, current, voltage, phase, groupid, location FROM meters"); code = taos_errno(pRes); if (code != 0) { fprintf(stderr, "Failed to create topic topic_meters, ErrCode: 0x%x, ErrMessage: %s.\n", code, taos_errstr(pRes)); return -1; } taos_free_result(pRes); + return 0; +} - taos_close(pConn); +int32_t drop_topic(TAOS* pConn) { + TAOS_RES* pRes; + int code = 0; + + if (!pConn) { + fprintf(stderr, "Invalid input parameter.\n"); + return -1; + } + + pRes = taos_query(pConn, "USE power"); + code = taos_errno(pRes); + if (taos_errno(pRes) != 0) { + fprintf(stderr, "Failed to use power, ErrCode: 0x%x, ErrMessage: %s.\n", code, taos_errstr(pRes)); + return -1; + } + taos_free_result(pRes); + + pRes = taos_query(pConn, "DROP TOPIC IF EXISTS topic_meters"); + code = taos_errno(pRes); + if (code != 0) { + fprintf(stderr, "Failed to drop topic topic_meters, ErrCode: 0x%x, ErrMessage: %s.\n", code, taos_errstr(pRes)); + return -1; + } + taos_free_result(pRes); return 0; } void tmq_commit_cb_print(tmq_t* tmq, int32_t code, void* param) { - fprintf(stdout, "tmq_commit_cb_print() code: %d, tmq: %p, param: %p\n", code, tmq, param); + count +=1; + fprintf(stdout, "tmq_commit_cb_print() code: %d, tmq: %p, param: %p, count: %d.\n", code, tmq, param, count); } // ANCHOR: create_consumer_1 @@ -289,7 +308,6 @@ _end: tmq_list_t* build_topic_list() { // create a empty topic list tmq_list_t* topicList = tmq_list_new(); - const char* topic_name = "topic_meters"; // append topic name to the list int32_t code = tmq_list_append(topicList, topic_name); @@ -315,8 +333,10 @@ void basic_consume_loop(tmq_t* tmq) { TAOS_RES* tmqmsg = tmq_consumer_poll(tmq, timeout); if (tmqmsg) { msgCnt++; - // process the message + + // Add your data processing logic here totalRows += msg_process(tmqmsg); + // free the message taos_free_result(tmqmsg); } @@ -349,12 +369,13 @@ void consume_repeatly(tmq_t* tmq) { code = tmq_offset_seek(tmq, topic_name, p->vgId, p->begin); if (code != 0) { - fprintf(stderr, "Failed to seek assignment %d to beginning %ld, ErrCode: 0x%x, ErrMessage: %s.\n", i, p->begin, - code, tmq_err2str(code)); - } else { - fprintf(stdout, "Seek assignment %d to beginning %ld successfully.\n", i, p->begin); + fprintf(stderr, "Failed to seek offset, topic: %s, groupId: %s, clientId: %s, ErrCode: 0x%x, ErrMessage: %s.\n", + topic_name, config.group_id, config.client_id, code, tmq_err2str(code)); + break; } } + if (code == 0) + fprintf(stdout, "Assignment seek to beginning successfully.\n"); // free the assignment array tmq_free_assignment(pAssign); @@ -380,7 +401,8 @@ void manual_commit(tmq_t* tmq) { // commit the message int32_t code = tmq_commit_sync(tmq, tmqmsg); if (code) { - fprintf(stderr, "Failed to commit message, ErrCode: 0x%x, ErrMessage: %s.\n", code, tmq_err2str(code)); + fprintf(stderr, "Failed to commit offset, topic: %s, groupId: %s, clientId: %s, ErrCode: 0x%x, ErrMessage: %s.\n", + topic_name, config.group_id, config.client_id, code, tmq_err2str(code)); // free the message taos_free_result(tmqmsg); break; @@ -405,11 +427,14 @@ int main(int argc, char* argv[]) { int32_t code; pthread_t thread_id; - if (init_env() < 0) { + TAOS* pConn = init_env(); + if (pConn == NULL) { + fprintf(stderr, "Failed to init env.\n"); return -1; } - if (create_topic() < 0) { + if (create_topic(pConn) < 0) { + fprintf(stderr, "Failed to create topic.\n"); return -1; } @@ -418,16 +443,6 @@ int main(int argc, char* argv[]) { return -1; } - ConsumerConfig config = {.enable_auto_commit = "true", - .auto_commit_interval_ms = "1000", - .group_id = "group1", - .client_id = "client1", - .td_connect_host = "localhost", - .td_connect_port = "6030", - .td_connect_user = "root", - .td_connect_pass = "taosdata", - .auto_offset_reset = "latest"}; - // ANCHOR: create_consumer_2 tmq_t* tmq = build_consumer(&config); if (NULL == tmq) { @@ -435,8 +450,8 @@ int main(int argc, char* argv[]) { config.td_connect_host, config.group_id, config.client_id); return -1; } else { - fprintf(stdout, "Create consumer successfully, host: %s, groupId: %s, , clientId: %s.\n", config.td_connect_host, - config.group_id, config.client_id); + fprintf(stdout, "Create consumer successfully, host: %s, groupId: %s, clientId: %s.\n", + config.td_connect_host, config.group_id, config.client_id); } // ANCHOR_END: create_consumer_2 @@ -467,7 +482,8 @@ int main(int argc, char* argv[]) { // unsubscribe the topic code = tmq_unsubscribe(tmq); if (code) { - fprintf(stderr, "Failed to unsubscribe consumer, ErrCode: 0x%x, ErrMessage: %s.\n", code, tmq_err2str(code)); + fprintf(stderr, "Failed to unsubscribe consumer, topic: %s, groupId: %s, clientId: %s, ErrCode: 0x%x, ErrMessage: %s.\n", + topic_name, config.group_id, config.client_id, code, tmq_err2str(code)); } else { fprintf(stderr, "Consumer unsubscribed successfully.\n"); } @@ -484,5 +500,11 @@ int main(int argc, char* argv[]) { thread_stop = 1; pthread_join(thread_id, NULL); + if (drop_topic(pConn) < 0) { + fprintf(stderr, "Failed to drop topic.\n"); + return -1; + } + + deinit_env(pConn); return 0; } diff --git a/docs/examples/c/with_reqid_demo.c b/docs/examples/c/with_reqid_demo.c index c6e87686e9..8942077f67 100644 --- a/docs/examples/c/with_reqid_demo.c +++ b/docs/examples/c/with_reqid_demo.c @@ -33,8 +33,7 @@ static int DemoWithReqId() { // connect TAOS *taos = taos_connect(host, user, password, NULL, port); if (taos == NULL) { - printf("Failed to connect to %s:%hu, ErrCode: 0x%x, ErrMessage: %s.\n", host, port, taos_errno(NULL), - taos_errstr(NULL)); + fprintf(stderr, "Failed to connect to %s:%hu, ErrCode: 0x%x, ErrMessage: %s.\n", host, port, taos_errno(NULL), taos_errstr(NULL)); taos_cleanup(); return -1; } @@ -45,7 +44,7 @@ static int DemoWithReqId() { TAOS_RES *result = taos_query_with_reqid(taos, sql, reqid); code = taos_errno(result); if (code != 0) { - printf("Failed to execute sql with reqId: %ld, ErrCode: 0x%x, ErrMessage: %s\n.", reqid, code, taos_errstr(result)); + fprintf(stderr, "Failed to execute sql with reqId: %ld, ErrCode: 0x%x, ErrMessage: %s\n.", reqid, code, taos_errstr(result)); taos_close(taos); taos_cleanup(); return -1; @@ -56,20 +55,16 @@ static int DemoWithReqId() { int num_fields = taos_field_count(result); TAOS_FIELD *fields = taos_fetch_fields(result); - printf("fields: %d\n", num_fields); - printf("sql: %s, result:\n", sql); + fprintf(stdout, "query successfully, got %d fields, the sql is: %s.\n", num_fields, sql); + // fetch the records row by row while ((row = taos_fetch_row(result))) { - char temp[1024] = {0}; - if (taos_print_row(temp, row, fields, num_fields) < 0) { - printf("Failed to print row\n"); - break; - } - printf("%s\n", temp); + // Add your data processing logic here + rows++; } - printf("total rows: %d\n", rows); + fprintf(stdout, "total rows: %d\n", rows); taos_free_result(result); // close & clean From 69a09e5a0193fca3408308eddfaa5489d2b66eac Mon Sep 17 00:00:00 2001 From: sima Date: Fri, 16 Aug 2024 17:33:25 +0800 Subject: [PATCH 105/181] fix:[TD-31508] Fix wrong data type in week function. --- source/libs/scalar/src/sclfunc.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/source/libs/scalar/src/sclfunc.c b/source/libs/scalar/src/sclfunc.c index a4f32356c6..f81205df7a 100644 --- a/source/libs/scalar/src/sclfunc.c +++ b/source/libs/scalar/src/sclfunc.c @@ -2668,8 +2668,8 @@ int32_t weekdayFunctionImpl(SScalarParam *pInput, int32_t inputNum, SScalarParam } struct STm tm; TAOS_CHECK_RETURN(taosTs2Tm(timeVal, timePrec, &tm)); - int32_t ret = startFromZero ? (tm.tm.tm_wday + 6) % 7 : tm.tm.tm_wday + 1; - SCL_ERR_RET(colDataSetVal(pOutput->columnData, i, (const char*)&ret, false)); + int64_t ret = startFromZero ? (tm.tm.tm_wday + 6) % 7 : tm.tm.tm_wday + 1; + colDataSetInt64(pOutput->columnData, i, &ret); } pOutput->numOfRows = pInput->numOfRows; @@ -2778,8 +2778,8 @@ int32_t weekFunctionImpl(SScalarParam *pInput, int32_t inputNum, SScalarParam *p } struct STm tm; SCL_ERR_RET(taosTs2Tm(timeVal, prec, &tm)); - int32_t ret = calculateWeekNum(tm.tm, weekMode(mode)); - SCL_ERR_RET(colDataSetVal(pOutput->columnData, i, (const char*)&ret, false)); + int64_t ret = calculateWeekNum(tm.tm, weekMode(mode)); + colDataSetInt64(pOutput->columnData, i, &ret); } pOutput->numOfRows = pInput->numOfRows; From 562552bf303d55a566e18b94a70c751fbac52345 Mon Sep 17 00:00:00 2001 From: sheyanjie-qq <249478495@qq.com> Date: Fri, 16 Aug 2024 17:46:55 +0800 Subject: [PATCH 106/181] update rust sample code --- .../rust/nativeexample/examples/tmq.rs | 18 +++++++++++++++--- docs/examples/rust/restexample/examples/tmq.rs | 18 +++++++++++++++--- 2 files changed, 30 insertions(+), 6 deletions(-) diff --git a/docs/examples/rust/nativeexample/examples/tmq.rs b/docs/examples/rust/nativeexample/examples/tmq.rs index 2f8a9e5995..49580f81b7 100644 --- a/docs/examples/rust/nativeexample/examples/tmq.rs +++ b/docs/examples/rust/nativeexample/examples/tmq.rs @@ -86,7 +86,7 @@ async fn main() -> anyhow::Result<()> { match consumer.subscribe([topic]).await{ Ok(_) => println!("Subscribe topics successfully."), Err(err) => { - eprintln!("Failed to subscribe topic_meters, ErrMessage: {:?}", err); + eprintln!("Failed to subscribe topic: {}, groupId: {}, clientId: {}, ErrMessage: {:?}", topic, group_id, client_id, err); return Err(err.into()); } } @@ -164,7 +164,13 @@ async fn main() -> anyhow::Result<()> { // ANCHOR: seek_offset - let assignments = consumer.assignments().await.unwrap(); + let assignments = match consumer.assignments().await{ + Some(assignments) => assignments, + None => { + eprintln!("Failed to get assignments."); + return Err(anyhow::anyhow!("Failed to get assignments. topic: {}, groupId: {}, clientId: {}", topic, group_id, client_id)); + } + }; println!("assignments: {:?}", assignments); // seek offset @@ -200,7 +206,13 @@ async fn main() -> anyhow::Result<()> { } println!("Assignment seek to beginning successfully."); // after seek offset - let assignments = consumer.assignments().await.unwrap(); + let assignments = match consumer.assignments().await{ + Some(assignments) => assignments, + None => { + eprintln!("Failed to get assignments."); + return Err(anyhow::anyhow!("Failed to get assignments. topic: {}, groupId: {}, clientId: {}", topic, group_id, client_id)); + } + }; println!("After seek offset assignments: {:?}", assignments); // ANCHOR_END: seek_offset diff --git a/docs/examples/rust/restexample/examples/tmq.rs b/docs/examples/rust/restexample/examples/tmq.rs index 670ae0ca3c..86715d57cb 100644 --- a/docs/examples/rust/restexample/examples/tmq.rs +++ b/docs/examples/rust/restexample/examples/tmq.rs @@ -86,7 +86,7 @@ async fn main() -> anyhow::Result<()> { match consumer.subscribe([topic]).await{ Ok(_) => println!("Subscribe topics successfully."), Err(err) => { - eprintln!("Failed to subscribe topic_meters, ErrMessage: {:?}", err); + eprintln!("Failed to subscribe topic: {}, groupId: {}, clientId: {}, ErrMessage: {:?}", topic, group_id, client_id, err); return Err(err.into()); } } @@ -164,7 +164,13 @@ async fn main() -> anyhow::Result<()> { // ANCHOR: seek_offset - let assignments = consumer.assignments().await.unwrap(); + let assignments = match consumer.assignments().await{ + Some(assignments) => assignments, + None => { + eprintln!("Failed to get assignments."); + return Err(anyhow::anyhow!("Failed to get assignments. topic: {}, groupId: {}, clientId: {}", topic, group_id, client_id)); + } + }; println!("assignments: {:?}", assignments); // seek offset @@ -200,7 +206,13 @@ async fn main() -> anyhow::Result<()> { } println!("Assignment seek to beginning successfully."); // after seek offset - let assignments = consumer.assignments().await.unwrap(); + let assignments = match consumer.assignments().await{ + Some(assignments) => assignments, + None => { + eprintln!("Failed to get assignments."); + return Err(anyhow::anyhow!("Failed to get assignments. topic: {}, groupId: {}, clientId: {}", topic, group_id, client_id)); + } + }; println!("After seek offset assignments: {:?}", assignments); // ANCHOR_END: seek_offset From f7d4c274a8ecc94710378e9729bb907eafbe170b Mon Sep 17 00:00:00 2001 From: wangjiaming0909 <604227650@qq.com> Date: Fri, 16 Aug 2024 18:03:16 +0800 Subject: [PATCH 107/181] fix exchange operator blocked --- source/libs/executor/src/exchangeoperator.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/source/libs/executor/src/exchangeoperator.c b/source/libs/executor/src/exchangeoperator.c index bdc1e42b28..fb085c39c8 100644 --- a/source/libs/executor/src/exchangeoperator.c +++ b/source/libs/executor/src/exchangeoperator.c @@ -225,7 +225,10 @@ static SSDataBlock* doLoadRemoteDataImpl(SOperatorInfo* pOperator) { } else { concurrentlyLoadRemoteDataImpl(pOperator, pExchangeInfo, pTaskInfo); } - + if (TSDB_CODE_SUCCESS != pOperator->pTaskInfo->code) { + qError("%s failed at line %d since %s", __func__, __LINE__, tstrerror(code)); + T_LONG_JMP(pTaskInfo->env, pOperator->pTaskInfo->code); + } if (taosArrayGetSize(pExchangeInfo->pResultBlockList) == 0) { return NULL; } else { From a9acc43550cdbe206375a47d15680f238fc7f50c Mon Sep 17 00:00:00 2001 From: Yaming Pei Date: Fri, 16 Aug 2024 18:25:02 +0800 Subject: [PATCH 108/181] python language sample program modification --- docs/examples/python/insert_native.py | 2 +- docs/examples/python/insert_rest.py | 2 +- docs/examples/python/insert_ws.py | 2 +- docs/examples/python/tmq_native.py | 66 +++++++++---------- docs/examples/python/tmq_websocket_example.py | 54 ++++++++------- 5 files changed, 65 insertions(+), 61 deletions(-) diff --git a/docs/examples/python/insert_native.py b/docs/examples/python/insert_native.py index ad7a8b85c2..19dafa3f23 100644 --- a/docs/examples/python/insert_native.py +++ b/docs/examples/python/insert_native.py @@ -21,7 +21,7 @@ try: print(f"Successfully inserted {affectedRows} rows to power.meters.") except Exception as err: - print(f"Failed to insert data to power.meters, ErrMessage:{err}") + print(f"Failed to insert data to power.meters, sql: {sql}, ErrMessage: {err}.") finally: if conn: conn.close() diff --git a/docs/examples/python/insert_rest.py b/docs/examples/python/insert_rest.py index 41fd70857f..526c3a6a69 100644 --- a/docs/examples/python/insert_rest.py +++ b/docs/examples/python/insert_rest.py @@ -20,7 +20,7 @@ try: print(f"Successfully inserted {affectedRows} rows to power.meters.") except Exception as err: - print(f"Failed to insert data to power.meters, ErrMessage:{err}") + print(f"Failed to insert data to power.meters, sql:{sql}, ErrMessage:{err}.") finally: if conn: conn.close() diff --git a/docs/examples/python/insert_ws.py b/docs/examples/python/insert_ws.py index 9c03b4857a..886dda1c10 100644 --- a/docs/examples/python/insert_ws.py +++ b/docs/examples/python/insert_ws.py @@ -21,7 +21,7 @@ try: print(f"Successfully inserted {affectedRows} rows to power.meters.") except Exception as err: - print(f"Failed to insert data to power.meters, ErrMessage:{err}") + print(f"Failed to insert data to power.meters, sql: {sql}, ErrMessage: {err}.") finally: if conn: conn.close() diff --git a/docs/examples/python/tmq_native.py b/docs/examples/python/tmq_native.py index 22a9b805bc..d4ccfda138 100644 --- a/docs/examples/python/tmq_native.py +++ b/docs/examples/python/tmq_native.py @@ -1,17 +1,24 @@ +#!/usr/bin/python3 import taos +db = "power" +topic = "topic_meters" +user = "root" +password = "taosdata" +host = "localhost" +port = 6030 +groupId = "group1" +clientId = "1" +tdConnWsScheme = "ws" +autoOffsetReset = "latest" +autoCommitState = "true" +autoCommitIntv = "1000" + + def prepareMeta(): conn = None try: - conn = taos.connect( - host="localhost", - user="root", - password="taosdata", - port=6030, - ) - - db = "power" - topic = "topic_meters" + conn = taos.connect(host=host, user=user, password=password, port=port) conn.execute(f"CREATE DATABASE IF NOT EXISTS {db}") # change database. same as execute "USE db" @@ -33,13 +40,13 @@ def prepareMeta(): power.d1001 USING power.meters (groupid, location) TAGS(2, 'California.SanFrancisco') VALUES (NOW + 1a, 10.30000, 219, 0.31000) (NOW + 2a, 12.60000, 218, 0.33000) (NOW + 3a, 12.30000, 221, 0.31000) - power.d1002 USING power.meters (groupid, location) TAGS(3, 'California.SanFrancisco') + power.d1002 USING power.meters (groupid, location) TAGS(3, 'California.SanFrancisco') VALUES (NOW + 1a, 10.30000, 218, 0.25000) """ affectedRows = conn.execute(sql) print(f"Inserted into {affectedRows} rows to power.meters successfully.") except Exception as err: - print(f"Prepare insert data error, ErrMessage:{err}") + print(f"Failed to prepareMeta, host: {host}:{port}, db: {db}, topic: {topic}, ErrMessage:{err}.") raise err finally: if conn: @@ -49,28 +56,24 @@ def prepareMeta(): from taos.tmq import Consumer def create_consumer(): - host = "localhost" - port = "6030" - groupId = "group1" - clientId = "1" try: consumer = Consumer( { "group.id": groupId, "client.id": clientId, - "td.connect.user": "root", - "td.connect.pass": "taosdata", - "enable.auto.commit": "true", - "auto.commit.interval.ms": "1000", - "auto.offset.reset": "latest", + "td.connect.user": user, + "td.connect.pass": password, + "enable.auto.commit": autoCommitState, + "auto.commit.interval.ms": autoCommitIntv, + "auto.offset.reset": autoOffsetReset, "td.connect.ip": host, - "td.connect.port": port, + "td.connect.port": str(port), } ) - print(f"Create consumer successfully, host: {host}:{port}, groupId: {groupId}, clientId: {clientId}"); + print(f"Create consumer successfully, host: {host}:{port}, groupId: {groupId}, clientId: {clientId}") return consumer except Exception as err: - print(f"Failed to create native consumer, host: {host}:{port}, ErrMessage:{err}"); + print(f"Failed to create native consumer, host: {host}:{port}, groupId: {groupId}, clientId: {clientId}, ErrMessage:{err}.") raise err # ANCHOR_END: create_consumer @@ -96,7 +99,7 @@ def subscribe(consumer): print(f"data: {data}") except Exception as err: - print(f"Failed to poll data, ErrMessage:{err}") + print(f"Failed to poll data, topic: {topic}, groupId: {groupId}, clientId: {clientId}, ErrMessage:{err}.") raise err @@ -123,7 +126,7 @@ def commit_offset(consumer): print("Commit offset manually successfully."); except Exception as err: - print(f"Failed to execute commit example, ErrMessage:{err}") + print(f"Failed to commit offset, topic: {topic}, groupId: {groupId}, clientId: {clientId}, ErrMessage:{err}.") raise err # ANCHOR_END: commit_offset @@ -136,9 +139,9 @@ def seek_offset(consumer): for partition in assignments: partition.offset = 0 consumer.seek(partition) - print(f"Assignment seek to beginning successfully"); + print(f"Assignment seek to beginning successfully.") except Exception as err: - print(f"Failed to execute seek example, ErrMessage:{err}") + print(f"Failed to seek offset, topic: {topic}, groupId: {groupId}, clientId: {clientId}, ErrMessage:{err}.") raise err # ANCHOR_END: assignment @@ -148,7 +151,7 @@ def unsubscribe(consumer): consumer.unsubscribe() print("Consumer unsubscribed successfully."); except Exception as err: - print(f"Failed to unsubscribe consumer. ErrMessage:{err}") + print(f"Failed to unsubscribe consumer. topic: {topic}, groupId: {groupId}, clientId: {clientId}, ErrMessage:{err}.") finally: if consumer: consumer.close() @@ -163,10 +166,7 @@ if __name__ == "__main__": subscribe(consumer) seek_offset(consumer) commit_offset(consumer) - consumer.unsubscribe() - print("Consumer unsubscribed successfully.") except Exception as err: - print(f"Failed to execute consumer example, ErrMessage:{err}") + print(f"Failed to execute consumer example, topic: {topic}, groupId: {groupId}, clientId: {clientId}, ErrMessage:{err}.") finally: - consumer.unsubscribe() - + unsubscribe(consumer); diff --git a/docs/examples/python/tmq_websocket_example.py b/docs/examples/python/tmq_websocket_example.py index 15441fbb41..793eb3c416 100644 --- a/docs/examples/python/tmq_websocket_example.py +++ b/docs/examples/python/tmq_websocket_example.py @@ -1,18 +1,26 @@ #!/usr/bin/python3 import taosws -topic = "topic_meters" +db = "power" +topic = "topic_meters" +user = "root" +password = "taosdata" +host = "localhost" +port = 6041 +groupId = "group1" +clientId = "1" +tdConnWsScheme = "ws" +autoOffsetReset = "latest" +autoCommitState = "true" +autoCommitIntv = "1000" + def prepareMeta(): conn = None try: - conn = taosws.connect(user="root", - password="taosdata", - host="localhost", - port=6041) + conn = taosws.connect(user=user, password=password, host=host, port=port) - db = "power" # create database rowsAffected = conn.execute(f"CREATE DATABASE IF NOT EXISTS {db}") assert rowsAffected == 0 @@ -51,7 +59,7 @@ def prepareMeta(): print(f"Inserted into {affectedRows} rows to power.meters successfully.") except Exception as err: - print(f"Failed to prepareMeta ErrMessage:{err}") + print(f"Failed to prepareMeta, host: {host}:{port}, db: {db}, topic: {topic}, ErrMessage:{err}.") raise err finally: if conn: @@ -59,26 +67,22 @@ def prepareMeta(): # ANCHOR: create_consumer -def create_consumer(): - host = "localhost" - port = 6041 - groupId = "group1" - clientId = "1" +def create_consumer(): try: consumer = taosws.Consumer(conf={ - "td.connect.websocket.scheme": "ws", + "td.connect.websocket.scheme": tdConnWsScheme, "group.id": groupId, "client.id": clientId, - "auto.offset.reset": "latest", + "auto.offset.reset": autoOffsetReset, "td.connect.ip": host, "td.connect.port": port, - "enable.auto.commit": "true", - "auto.commit.interval.ms": "1000", + "enable.auto.commit": autoCommitState, + "auto.commit.interval.ms": autoCommitIntv, }) - print(f"Create consumer successfully, host: {host}:{port}, groupId: {groupId}, clientId: {clientId}"); + print(f"Create consumer successfully, host: {host}:{port}, groupId: {groupId}, clientId: {clientId}."); return consumer; except Exception as err: - print(f"Failed to create websocket consumer, host: {host}:{port}, ErrMessage:{err}"); + print(f"Failed to create websocket consumer, host: {host}:{port}, groupId: {groupId}, clientId: {clientId}, ErrMessage:{err}."); raise err @@ -95,10 +99,10 @@ def seek_offset(consumer): print( f"vg_id: {assign.vg_id()}, offset: {assign.offset()}, begin: {assign.begin()}, end: {assign.end()}") consumer.seek(topic, assign.vg_id(), assign.begin()) - print("Assignment seek to beginning successfully"); + print("Assignment seek to beginning successfully.") except Exception as err: - print(f"Failed to execute seek example, ErrMessage:{err}") + print(f"Failed to seek offset, topic: {topic}, groupId: {groupId}, clientId: {clientId}, ErrMessage:{err}.") raise err # ANCHOR_END: assignment @@ -116,7 +120,7 @@ def subscribe(consumer): print(f"data: {row}") except Exception as err: - print(f"Failed to poll data, ErrMessage:{err}") + print(f"Failed to poll data, topic: {topic}, groupId: {groupId}, clientId: {clientId}, ErrMessage:{err}.") raise err @@ -134,10 +138,10 @@ def commit_offset(consumer): # after processing the data, commit the offset manually consumer.commit(records) - print("Commit offset manually successfully."); + print("Commit offset manually successfully.") except Exception as err: - print(f"Failed to execute commit example, ErrMessage:{err}") + print(f"Failed to commit offset, topic: {topic}, groupId: {groupId}, clientId: {clientId}, ErrMessage:{err}.") raise err @@ -150,7 +154,7 @@ def unsubscribe(consumer): consumer.unsubscribe() print("Consumer unsubscribed successfully."); except Exception as err: - print(f"Failed to unsubscribe consumer. ErrMessage:{err}") + print(f"Failed to unsubscribe consumer. topic: {topic}, groupId: {groupId}, clientId: {clientId}, ErrMessage:{err}.") finally: if consumer: consumer.close() @@ -167,6 +171,6 @@ if __name__ == "__main__": seek_offset(consumer) commit_offset(consumer) except Exception as err: - print(f"Failed to execute consumer example, ErrorMessage:{err}") + print(f"Failed to execute consumer example, topic: {topic}, groupId: {groupId}, clientId: {clientId}, ErrMessage:{err}.") finally: unsubscribe(consumer); From 3e928fc8fc834cc07b03b7c7f1313fa153eebeaf Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Fri, 16 Aug 2024 18:25:10 +0800 Subject: [PATCH 109/181] remove backend data file --- include/libs/stream/tstream.h | 42 ++++----- source/dnode/vnode/src/tq/tq.c | 139 ++++++++++++++-------------- source/libs/stream/src/streamTask.c | 80 ++++++++++++---- 3 files changed, 150 insertions(+), 111 deletions(-) diff --git a/include/libs/stream/tstream.h b/include/libs/stream/tstream.h index 5e7f2bf0a6..f916e05d52 100644 --- a/include/libs/stream/tstream.h +++ b/include/libs/stream/tstream.h @@ -19,7 +19,6 @@ #include "os.h" #include "streamMsg.h" #include "streamState.h" -#include "streamMsg.h" #include "tdatablock.h" #include "tdbInt.h" #include "tmsg.h" @@ -266,14 +265,14 @@ typedef struct SStreamTaskId { } SStreamTaskId; typedef struct SCheckpointInfo { - int64_t startTs; - int64_t checkpointId; // latest checkpoint id - int64_t checkpointVer; // latest checkpoint offset in wal - int64_t checkpointTime; // latest checkpoint time - int64_t processedVer; - int64_t nextProcessVer; // current offset in WAL, not serialize it - int64_t msgVer; - int32_t consensusTransId;// consensus checkpoint id + int64_t startTs; + int64_t checkpointId; // latest checkpoint id + int64_t checkpointVer; // latest checkpoint offset in wal + int64_t checkpointTime; // latest checkpoint time + int64_t processedVer; + int64_t nextProcessVer; // current offset in WAL, not serialize it + int64_t msgVer; + int32_t consensusTransId; // consensus checkpoint id SActiveCheckpointInfo* pActiveInfo; } SCheckpointInfo; @@ -454,7 +453,8 @@ struct SStreamTask { SSHashObj* pNameMap; void* pBackend; int8_t subtableWithoutMd5; - char reserve[256]; + char* backendPath; + char reserve[256 - sizeof(char*)]; }; typedef int32_t (*startComplete_fn_t)(struct SStreamMeta*); @@ -591,9 +591,9 @@ typedef struct STaskStatusEntry { int32_t statusLastDuration; // to record the last duration of current status int64_t stage; int32_t nodeId; - SVersionRange verRange; // start/end version in WAL, only valid for source task - int64_t processedVer; // only valid for source task - double inputQUsed; // in MiB + SVersionRange verRange; // start/end version in WAL, only valid for source task + int64_t processedVer; // only valid for source task + double inputQUsed; // in MiB double inputRate; double procsThroughput; // duration between one element put into input queue and being processed. double procsTotal; // duration between one element put into input queue and being processed. @@ -678,9 +678,9 @@ int32_t streamTaskSendCheckRsp(const SStreamMeta* pMeta, int32_t vgId, SStreamTa int32_t streamTaskProcessCheckRsp(SStreamTask* pTask, const SStreamTaskCheckRsp* pRsp); // check downstream status -void streamTaskStartMonitorCheckRsp(SStreamTask* pTask); -void streamTaskStopMonitorCheckRsp(STaskCheckInfo* pInfo, const char* id); -void streamTaskCleanupCheckInfo(STaskCheckInfo* pInfo); +void streamTaskStartMonitorCheckRsp(SStreamTask* pTask); +void streamTaskStopMonitorCheckRsp(STaskCheckInfo* pInfo, const char* id); +void streamTaskCleanupCheckInfo(STaskCheckInfo* pInfo); // fill-history task int32_t streamLaunchFillHistoryTask(SStreamTask* pTask); @@ -717,8 +717,8 @@ int32_t streamTaskSetDb(SStreamMeta* pMeta, SStreamTask* pTask, const char* key) bool streamTaskIsSinkTask(const SStreamTask* pTask); void streamTaskSetRemoveBackendFiles(SStreamTask* pTask); -void streamTaskStatusInit(STaskStatusEntry* pEntry, const SStreamTask* pTask); -void streamTaskStatusCopy(STaskStatusEntry* pDst, const STaskStatusEntry* pSrc); +void streamTaskStatusInit(STaskStatusEntry* pEntry, const SStreamTask* pTask); +void streamTaskStatusCopy(STaskStatusEntry* pDst, const STaskStatusEntry* pSrc); STaskStatusEntry streamTaskGetStatusEntry(SStreamTask* pTask); // source level @@ -812,9 +812,9 @@ void streamTaskSendRetrieveRsp(SStreamRetrieveReq* pReq, SRpcMsg* pRsp); int32_t streamProcessHeartbeatRsp(SStreamMeta* pMeta, SMStreamHbRspMsg* pRsp); int32_t streamTaskSendCheckpointsourceRsp(SStreamTask* pTask); -void streamMutexLock(TdThreadMutex *pMutex); -void streamMutexUnlock(TdThreadMutex *pMutex); -void streamMutexDestroy(TdThreadMutex *pMutex); +void streamMutexLock(TdThreadMutex* pMutex); +void streamMutexUnlock(TdThreadMutex* pMutex); +void streamMutexDestroy(TdThreadMutex* pMutex); #ifdef __cplusplus } diff --git a/source/dnode/vnode/src/tq/tq.c b/source/dnode/vnode/src/tq/tq.c index 314a6abdf5..a70a04f23d 100644 --- a/source/dnode/vnode/src/tq/tq.c +++ b/source/dnode/vnode/src/tq/tq.c @@ -14,7 +14,10 @@ */ #include "tq.h" +#include "osDef.h" +#include "taoserror.h" #include "tqCommon.h" +#include "tstream.h" #include "vnd.h" // 0: not init @@ -153,7 +156,7 @@ void tqNotifyClose(STQ* pTq) { } void tqPushEmptyDataRsp(STqHandle* pHandle, int32_t vgId) { - int32_t code = 0; + int32_t code = 0; SMqPollReq req = {0}; code = tDeserializeSMqPollReq(pHandle->msg->pCont, pHandle->msg->contLen, &req); if (code < 0) { @@ -169,7 +172,7 @@ void tqPushEmptyDataRsp(STqHandle* pHandle, int32_t vgId) { } dataRsp.common.blockNum = 0; char buf[TSDB_OFFSET_LEN] = {0}; - (void) tFormatOffset(buf, TSDB_OFFSET_LEN, &dataRsp.common.reqOffset); + (void)tFormatOffset(buf, TSDB_OFFSET_LEN, &dataRsp.common.reqOffset); tqInfo("tqPushEmptyDataRsp to consumer:0x%" PRIx64 " vgId:%d, offset:%s, reqId:0x%" PRIx64, req.consumerId, vgId, buf, req.reqId); @@ -180,15 +183,15 @@ void tqPushEmptyDataRsp(STqHandle* pHandle, int32_t vgId) { tDeleteMqDataRsp(&dataRsp); } -int32_t tqSendDataRsp(STqHandle* pHandle, const SRpcMsg* pMsg, const SMqPollReq* pReq, const void* pRsp, - int32_t type, int32_t vgId) { +int32_t tqSendDataRsp(STqHandle* pHandle, const SRpcMsg* pMsg, const SMqPollReq* pReq, const void* pRsp, int32_t type, + int32_t vgId) { int64_t sver = 0, ever = 0; walReaderValidVersionRange(pHandle->execHandle.pTqReader->pWalReader, &sver, &ever); char buf1[TSDB_OFFSET_LEN] = {0}; char buf2[TSDB_OFFSET_LEN] = {0}; - (void) tFormatOffset(buf1, TSDB_OFFSET_LEN, &((SMqDataRspCommon*)pRsp)->reqOffset); - (void) tFormatOffset(buf2, TSDB_OFFSET_LEN, &((SMqDataRspCommon*)pRsp)->rspOffset); + (void)tFormatOffset(buf1, TSDB_OFFSET_LEN, &((SMqDataRspCommon*)pRsp)->reqOffset); + (void)tFormatOffset(buf2, TSDB_OFFSET_LEN, &((SMqDataRspCommon*)pRsp)->rspOffset); tqDebug("tmq poll vgId:%d consumer:0x%" PRIx64 " (epoch %d) send rsp, block num:%d, req:%s, rsp:%s, reqId:0x%" PRIx64, vgId, pReq->consumerId, pReq->epoch, ((SMqDataRspCommon*)pRsp)->blockNum, buf1, buf2, pReq->reqId); @@ -200,7 +203,7 @@ int32_t tqProcessOffsetCommitReq(STQ* pTq, int64_t sversion, char* msg, int32_t SMqVgOffset vgOffset = {0}; int32_t vgId = TD_VID(pTq->pVnode); - int32_t code = 0; + int32_t code = 0; SDecoder decoder; tDecoderInit(&decoder, (uint8_t*)msg, msgLen); if (tDecodeMqVgOffset(&decoder, &vgOffset) < 0) { @@ -233,12 +236,13 @@ int32_t tqProcessOffsetCommitReq(STQ* pTq, int64_t sversion, char* msg, int32_t } // save the new offset value - if (taosHashPut(pTq->pOffset, pOffset->subKey, strlen(pOffset->subKey), pOffset, sizeof(STqOffset))){ + if (taosHashPut(pTq->pOffset, pOffset->subKey, strlen(pOffset->subKey), pOffset, sizeof(STqOffset))) { terrno = TSDB_CODE_OUT_OF_MEMORY; return -1; } - if (tqMetaSaveInfo(pTq, pTq->pOffsetStore, pOffset->subKey, strlen(pOffset->subKey), msg, msgLen - sizeof(vgOffset.consumerId)) < 0) { + if (tqMetaSaveInfo(pTq, pTq->pOffsetStore, pOffset->subKey, strlen(pOffset->subKey), msg, + msgLen - sizeof(vgOffset.consumerId)) < 0) { terrno = TSDB_CODE_OUT_OF_MEMORY; return -1; } @@ -416,7 +420,7 @@ int32_t tqProcessPollReq(STQ* pTq, SRpcMsg* pMsg) { } char buf[TSDB_OFFSET_LEN] = {0}; - (void) tFormatOffset(buf, TSDB_OFFSET_LEN, &reqOffset); + (void)tFormatOffset(buf, TSDB_OFFSET_LEN, &reqOffset); tqDebug("tmq poll: consumer:0x%" PRIx64 " (epoch %d), subkey %s, recv poll req vgId:%d, req:%s, reqId:0x%" PRIx64, consumerId, req.epoch, pHandle->subKey, vgId, buf, req.reqId); @@ -447,7 +451,7 @@ int32_t tqProcessVgCommittedInfoReq(STQ* pTq, SRpcMsg* pMsg) { tDecoderClear(&decoder); STqOffset* pSavedOffset = NULL; - int32_t code = tqMetaGetOffset(pTq, vgOffset.offset.subKey, &pSavedOffset); + int32_t code = tqMetaGetOffset(pTq, vgOffset.offset.subKey, &pSavedOffset); if (code != 0) { return TSDB_CODE_TMQ_NO_COMMITTED; } @@ -479,7 +483,7 @@ int32_t tqProcessVgCommittedInfoReq(STQ* pTq, SRpcMsg* pMsg) { } int32_t tqProcessVgWalInfoReq(STQ* pTq, SRpcMsg* pMsg) { - int32_t code = 0; + int32_t code = 0; SMqPollReq req = {0}; if (tDeserializeSMqPollReq(pMsg->pCont, pMsg->contLen, &req) < 0) { tqError("tDeserializeSMqPollReq %d failed", pMsg->contLen); @@ -505,7 +509,6 @@ int32_t tqProcessVgWalInfoReq(STQ* pTq, SRpcMsg* pMsg) { consumerId, vgId, req.subKey, pHandle->consumerId); taosRUnLockLatch(&pTq->lock); return TSDB_CODE_TMQ_CONSUMER_MISMATCH; - } int64_t sver = 0, ever = 0; @@ -612,8 +615,8 @@ int32_t tqProcessDeleteSubReq(STQ* pTq, int64_t sversion, char* msg, int32_t msg int32_t tqProcessAddCheckInfoReq(STQ* pTq, int64_t sversion, char* msg, int32_t msgLen) { STqCheckInfo info = {0}; - int32_t code = tqMetaDecodeCheckInfo(&info, msg, msgLen); - if(code != 0){ + int32_t code = tqMetaDecodeCheckInfo(&info, msg, msgLen); + if (code != 0) { return code; } @@ -650,7 +653,7 @@ int32_t tqProcessSubscribeReq(STQ* pTq, int64_t sversion, char* msg, int32_t msg taosRLockLatch(&pTq->lock); STqHandle* pHandle = NULL; - (void)tqMetaGetHandle(pTq, req.subKey, &pHandle); //ignore return code + (void)tqMetaGetHandle(pTq, req.subKey, &pHandle); // ignore return code taosRUnLockLatch(&pTq->lock); if (pHandle == NULL) { if (req.oldConsumerId != -1) { @@ -697,7 +700,7 @@ int32_t tqProcessSubscribeReq(STQ* pTq, int64_t sversion, char* msg, int32_t msg } } - end: +end: tDecoderClear(&dc); return ret; } @@ -705,7 +708,7 @@ int32_t tqProcessSubscribeReq(STQ* pTq, int64_t sversion, char* msg, int32_t msg static void freePtr(void* ptr) { taosMemoryFree(*(void**)ptr); } int32_t tqBuildStreamTask(void* pTqObj, SStreamTask* pTask, int64_t nextProcessVer) { - STQ* pTq = (STQ*) pTqObj; + STQ* pTq = (STQ*)pTqObj; int32_t vgId = TD_VID(pTq->pVnode); tqDebug("s-task:0x%x start to build task", pTask->id.taskId); @@ -749,12 +752,12 @@ int32_t tqBuildStreamTask(void* pTqObj, SStreamTask* pTask, int64_t nextProcessV } streamTaskResetUpstreamStageInfo(pTask); - (void) streamSetupScheduleTrigger(pTask); + (void)streamSetupScheduleTrigger(pTask); SCheckpointInfo* pChkInfo = &pTask->chkInfo; tqSetRestoreVersionInfo(pTask); - char* p = streamTaskGetStatus(pTask).name; + char* p = streamTaskGetStatus(pTask).name; const char* pNext = streamTaskGetStatusStr(pTask->status.taskStatus); if (pTask->info.fillHistory) { @@ -766,14 +769,13 @@ int32_t tqBuildStreamTask(void* pTqObj, SStreamTask* pTask, int64_t nextProcessV pTask->info.selfChildId, pTask->info.taskLevel, p, pNext, pTask->info.fillHistory, (int32_t)pTask->streamTaskId.taskId, pTask->info.delaySchedParam, nextProcessVer); } else { - tqInfo( - "vgId:%d build stream task, s-task:%s, checkpointId:%" PRId64 " checkpointVer:%" PRId64 - " nextProcessVer:%" PRId64 - " child id:%d, level:%d, cur-status:%s next-status:%s fill-history:%d, related fill-task:0x%x delaySched:%" PRId64 - " ms, inputVer:%" PRId64, - vgId, pTask->id.idStr, pChkInfo->checkpointId, pChkInfo->checkpointVer, pChkInfo->nextProcessVer, - pTask->info.selfChildId, pTask->info.taskLevel, p, pNext, pTask->info.fillHistory, - (int32_t)pTask->hTaskInfo.id.taskId, pTask->info.delaySchedParam, nextProcessVer); + tqInfo("vgId:%d build stream task, s-task:%s, checkpointId:%" PRId64 " checkpointVer:%" PRId64 + " nextProcessVer:%" PRId64 + " child id:%d, level:%d, cur-status:%s next-status:%s fill-history:%d, related fill-task:0x%x " + "delaySched:%" PRId64 " ms, inputVer:%" PRId64, + vgId, pTask->id.idStr, pChkInfo->checkpointId, pChkInfo->checkpointVer, pChkInfo->nextProcessVer, + pTask->info.selfChildId, pTask->info.taskLevel, p, pNext, pTask->info.fillHistory, + (int32_t)pTask->hTaskInfo.id.taskId, pTask->info.delaySchedParam, nextProcessVer); ASSERT(pChkInfo->checkpointVer <= pChkInfo->nextProcessVer); } @@ -781,8 +783,7 @@ int32_t tqBuildStreamTask(void* pTqObj, SStreamTask* pTask, int64_t nextProcessV return 0; } -int32_t tqProcessTaskCheckReq(STQ* pTq, SRpcMsg* pMsg) { - return tqStreamTaskProcessCheckReq(pTq->pStreamMeta, pMsg); } +int32_t tqProcessTaskCheckReq(STQ* pTq, SRpcMsg* pMsg) { return tqStreamTaskProcessCheckReq(pTq->pStreamMeta, pMsg); } int32_t tqProcessTaskCheckRsp(STQ* pTq, SRpcMsg* pMsg) { return tqStreamTaskProcessCheckRsp(pTq->pStreamMeta, pMsg, vnodeIsRoleLeader(pTq->pVnode)); @@ -803,13 +804,13 @@ static void doStartFillhistoryStep2(SStreamTask* pTask, SStreamTask* pStreamTask pTask->execInfo.step2Start = taosGetTimestampMs(); if (done) { - qDebug("s-task:%s scan wal(step 2) verRange:%" PRId64 "-%" PRId64 " ended, elapsed time:%.2fs", id, pStep2Range->minVer, - pStep2Range->maxVer, 0.0); + qDebug("s-task:%s scan wal(step 2) verRange:%" PRId64 "-%" PRId64 " ended, elapsed time:%.2fs", id, + pStep2Range->minVer, pStep2Range->maxVer, 0.0); int32_t code = streamTaskPutTranstateIntoInputQ(pTask); // todo: msg lost. if (code) { qError("s-task:%s failed put trans-state into inputQ, code:%s", id, tstrerror(code)); } - (void) streamExecTask(pTask); // exec directly + (void)streamExecTask(pTask); // exec directly } else { STimeWindow* pWindow = &pTask->dataRange.window; tqDebug("s-task:%s level:%d verRange:%" PRId64 "-%" PRId64 " window:%" PRId64 "-%" PRId64 @@ -830,12 +831,12 @@ static void doStartFillhistoryStep2(SStreamTask* pTask, SStreamTask* pStreamTask tqDebug("s-task:%s wal reader start scan WAL verRange:%" PRId64 "-%" PRId64 ", set sched-status:%d", id, dstVer, pStep2Range->maxVer, TASK_SCHED_STATUS__INACTIVE); - (void) streamTaskSetSchedStatusInactive(pTask); + (void)streamTaskSetSchedStatusInactive(pTask); // now the fill-history task starts to scan data from wal files. code = streamTaskHandleEvent(pTask->status.pSM, TASK_EVENT_SCANHIST_DONE); if (code == TSDB_CODE_SUCCESS) { - (void) tqScanWalAsync(pTq, false); + (void)tqScanWalAsync(pTq, false); } } } @@ -846,9 +847,9 @@ int32_t handleStep2Async(SStreamTask* pStreamTask, void* param) { SStreamMeta* pMeta = pStreamTask->pMeta; STaskId hId = pStreamTask->hTaskInfo.id; SStreamTask* pTask = NULL; - int32_t code = streamMetaAcquireTask(pStreamTask->pMeta, hId.streamId, hId.taskId, &pTask); + int32_t code = streamMetaAcquireTask(pStreamTask->pMeta, hId.streamId, hId.taskId, &pTask); if (pTask == NULL) { - tqWarn("s-task:0x%x failed to acquired it to exec step 2, scan wal quit", (int32_t) hId.taskId); + tqWarn("s-task:0x%x failed to acquired it to exec step 2, scan wal quit", (int32_t)hId.taskId); return TSDB_CODE_SUCCESS; } @@ -930,8 +931,8 @@ int32_t tqProcessTaskScanHistory(STQ* pTq, SRpcMsg* pMsg) { if (retInfo.ret == TASK_SCANHISTORY_REXEC) { streamExecScanHistoryInFuture(pTask, retInfo.idleTime); } else { - SStreamTaskState p = streamTaskGetStatus(pTask); - ETaskStatus s = p.state; + SStreamTaskState p = streamTaskGetStatus(pTask); + ETaskStatus s = p.state; if (s == TASK_STATUS__PAUSE) { tqDebug("s-task:%s is paused in the step1, elapsed time:%.2fs total:%.2fs, sched-status:%d", pTask->id.idStr, @@ -963,7 +964,7 @@ int32_t tqProcessTaskScanHistory(STQ* pTq, SRpcMsg* pMsg) { atomic_store_32(&pTask->status.inScanHistorySentinel, 0); streamMetaReleaseTask(pMeta, pTask); - return code; // todo: handle failure + return code; // todo: handle failure } ASSERT(pStreamTask->info.taskLevel == TASK_LEVEL__SOURCE); @@ -988,7 +989,7 @@ int32_t tqProcessTaskRunReq(STQ* pTq, SRpcMsg* pMsg) { // let's continue scan data in the wal files if (code == 0 && (pReq->reqType >= 0 || pReq->reqType == STREAM_EXEC_T_RESUME_TASK)) { - (void) tqScanWalAsync(pTq, false); // it's ok to failed + (void)tqScanWalAsync(pTq, false); // it's ok to failed } return code; @@ -1026,11 +1027,9 @@ int32_t tqProcessTaskRetrieveReq(STQ* pTq, SRpcMsg* pMsg) { return tqStreamTaskProcessRetrieveReq(pTq->pStreamMeta, pMsg); } -int32_t tqProcessTaskRetrieveRsp(STQ* pTq, SRpcMsg* pMsg) { - return 0; -} +int32_t tqProcessTaskRetrieveRsp(STQ* pTq, SRpcMsg* pMsg) { return 0; } -int32_t tqStreamProgressRetrieveReq(STQ *pTq, SRpcMsg *pMsg) { +int32_t tqStreamProgressRetrieveReq(STQ* pTq, SRpcMsg* pMsg) { char* msgStr = pMsg->pCont; char* msgBody = POINTER_SHIFT(msgStr, sizeof(SMsgHead)); int32_t msgLen = pMsg->contLen - sizeof(SMsgHead); @@ -1092,18 +1091,18 @@ int32_t tqProcessTaskCheckPointSourceReq(STQ* pTq, SRpcMsg* pMsg, SRpcMsg* pRsp) tqError("vgId:%d failed to decode checkpoint-source msg, code:%s", vgId, tstrerror(code)); SRpcMsg rsp = {0}; - (void) streamTaskBuildCheckpointSourceRsp(&req, &pMsg->info, &rsp, TSDB_CODE_SUCCESS); - tmsgSendRsp(&rsp); // error occurs - return TSDB_CODE_SUCCESS; // always return success to mnode, todo: handle failure of build and send msg to mnode + (void)streamTaskBuildCheckpointSourceRsp(&req, &pMsg->info, &rsp, TSDB_CODE_SUCCESS); + tmsgSendRsp(&rsp); // error occurs + return TSDB_CODE_SUCCESS; // always return success to mnode, todo: handle failure of build and send msg to mnode } tDecoderClear(&decoder); if (!vnodeIsRoleLeader(pTq->pVnode)) { tqDebug("vgId:%d not leader, ignore checkpoint-source msg, s-task:0x%x", vgId, req.taskId); SRpcMsg rsp = {0}; - (void) streamTaskBuildCheckpointSourceRsp(&req, &pMsg->info, &rsp, TSDB_CODE_SUCCESS); - tmsgSendRsp(&rsp); // error occurs - return TSDB_CODE_SUCCESS; // always return success to mnode, todo: handle failure of build and send msg to mnode + (void)streamTaskBuildCheckpointSourceRsp(&req, &pMsg->info, &rsp, TSDB_CODE_SUCCESS); + tmsgSendRsp(&rsp); // error occurs + return TSDB_CODE_SUCCESS; // always return success to mnode, todo: handle failure of build and send msg to mnode } if (!pTq->pVnode->restored) { @@ -1111,9 +1110,9 @@ int32_t tqProcessTaskCheckPointSourceReq(STQ* pTq, SRpcMsg* pMsg, SRpcMsg* pRsp) ", transId:%d s-task:0x%x ignore it", vgId, req.checkpointId, req.transId, req.taskId); SRpcMsg rsp = {0}; - (void) streamTaskBuildCheckpointSourceRsp(&req, &pMsg->info, &rsp, TSDB_CODE_SUCCESS); - tmsgSendRsp(&rsp); // error occurs - return TSDB_CODE_SUCCESS; // always return success to mnode, , todo: handle failure of build and send msg to mnode + (void)streamTaskBuildCheckpointSourceRsp(&req, &pMsg->info, &rsp, TSDB_CODE_SUCCESS); + tmsgSendRsp(&rsp); // error occurs + return TSDB_CODE_SUCCESS; // always return success to mnode, , todo: handle failure of build and send msg to mnode } SStreamTask* pTask = NULL; @@ -1123,7 +1122,7 @@ int32_t tqProcessTaskCheckPointSourceReq(STQ* pTq, SRpcMsg* pMsg, SRpcMsg* pRsp) " transId:%d it may have been destroyed", vgId, req.taskId, req.checkpointId, req.transId); SRpcMsg rsp = {0}; - (void) streamTaskBuildCheckpointSourceRsp(&req, &pMsg->info, &rsp, TSDB_CODE_SUCCESS); + (void)streamTaskBuildCheckpointSourceRsp(&req, &pMsg->info, &rsp, TSDB_CODE_SUCCESS); tmsgSendRsp(&rsp); // error occurs return TSDB_CODE_SUCCESS; } @@ -1136,9 +1135,9 @@ int32_t tqProcessTaskCheckPointSourceReq(STQ* pTq, SRpcMsg* pMsg, SRpcMsg* pRsp) streamMetaReleaseTask(pMeta, pTask); SRpcMsg rsp = {0}; - (void) streamTaskBuildCheckpointSourceRsp(&req, &pMsg->info, &rsp, TSDB_CODE_SUCCESS); - tmsgSendRsp(&rsp); // error occurs - return TSDB_CODE_SUCCESS; // todo retry handle error + (void)streamTaskBuildCheckpointSourceRsp(&req, &pMsg->info, &rsp, TSDB_CODE_SUCCESS); + tmsgSendRsp(&rsp); // error occurs + return TSDB_CODE_SUCCESS; // todo retry handle error } // todo save the checkpoint failed info @@ -1154,14 +1153,14 @@ int32_t tqProcessTaskCheckPointSourceReq(STQ* pTq, SRpcMsg* pMsg, SRpcMsg* pRsp) streamMetaReleaseTask(pMeta, pTask); SRpcMsg rsp = {0}; - (void) streamTaskBuildCheckpointSourceRsp(&req, &pMsg->info, &rsp, TSDB_CODE_SUCCESS); + (void)streamTaskBuildCheckpointSourceRsp(&req, &pMsg->info, &rsp, TSDB_CODE_SUCCESS); tmsgSendRsp(&rsp); // error occurs return TSDB_CODE_SUCCESS; } } else { if (status != TASK_STATUS__HALT) { tqError("s-task:%s should in halt status, let's halt it directly", pTask->id.idStr); -// streamTaskHandleEvent(pTask->status.pSM, TASK_EVENT_HALT); + // streamTaskHandleEvent(pTask->status.pSM, TASK_EVENT_HALT); } } @@ -1178,16 +1177,17 @@ int32_t tqProcessTaskCheckPointSourceReq(STQ* pTq, SRpcMsg* pMsg, SRpcMsg* pRsp) streamMetaReleaseTask(pMeta, pTask); return TSDB_CODE_SUCCESS; - } else { // checkpoint already finished, and not in checkpoint status + } else { // checkpoint already finished, and not in checkpoint status if (req.checkpointId <= pTask->chkInfo.checkpointId) { tqWarn("s-task:%s repeatly recv checkpoint-source msg checkpointId:%" PRId64 - " transId:%d already handled, return success", pTask->id.idStr, req.checkpointId, req.transId); + " transId:%d already handled, return success", + pTask->id.idStr, req.checkpointId, req.transId); streamMutexUnlock(&pTask->lock); streamMetaReleaseTask(pMeta, pTask); SRpcMsg rsp = {0}; - (void) streamTaskBuildCheckpointSourceRsp(&req, &pMsg->info, &rsp, TSDB_CODE_SUCCESS); + (void)streamTaskBuildCheckpointSourceRsp(&req, &pMsg->info, &rsp, TSDB_CODE_SUCCESS); tmsgSendRsp(&rsp); // error occurs return TSDB_CODE_SUCCESS; @@ -1198,7 +1198,8 @@ int32_t tqProcessTaskCheckPointSourceReq(STQ* pTq, SRpcMsg* pMsg, SRpcMsg* pRsp) streamMutexUnlock(&pTask->lock); if (code) { - qError("s-task:%s (vgId:%d) failed to process checkpoint-source req, code:%s", pTask->id.idStr, vgId, tstrerror(code)); + qError("s-task:%s (vgId:%d) failed to process checkpoint-source req, code:%s", pTask->id.idStr, vgId, + tstrerror(code)); return code; } @@ -1215,7 +1216,7 @@ int32_t tqProcessTaskCheckPointSourceReq(STQ* pTq, SRpcMsg* pMsg, SRpcMsg* pRsp) code = streamAddCheckpointSourceRspMsg(&req, &pMsg->info, pTask); if (code != TSDB_CODE_SUCCESS) { SRpcMsg rsp = {0}; - (void) streamTaskBuildCheckpointSourceRsp(&req, &pMsg->info, &rsp, TSDB_CODE_SUCCESS); + (void)streamTaskBuildCheckpointSourceRsp(&req, &pMsg->info, &rsp, TSDB_CODE_SUCCESS); tmsgSendRsp(&rsp); // error occurs return TSDB_CODE_SUCCESS; } @@ -1228,7 +1229,7 @@ int32_t tqProcessTaskCheckPointSourceReq(STQ* pTq, SRpcMsg* pMsg, SRpcMsg* pRsp) int32_t tqProcessTaskCheckpointReadyMsg(STQ* pTq, SRpcMsg* pMsg) { int32_t vgId = TD_VID(pTq->pVnode); - SRetrieveChkptTriggerReq* pReq = (SRetrieveChkptTriggerReq*) pMsg->pCont; + SRetrieveChkptTriggerReq* pReq = (SRetrieveChkptTriggerReq*)pMsg->pCont; if (!vnodeIsRoleLeader(pTq->pVnode)) { tqError("vgId:%d not leader, ignore the retrieve checkpoint-trigger msg from 0x%x", vgId, (int32_t)pReq->downstreamTaskId); @@ -1249,7 +1250,7 @@ int32_t tqProcessTaskResetReq(STQ* pTq, SRpcMsg* pMsg) { int32_t tqProcessTaskRetrieveTriggerReq(STQ* pTq, SRpcMsg* pMsg) { int32_t vgId = TD_VID(pTq->pVnode); - SRetrieveChkptTriggerReq* pReq = (SRetrieveChkptTriggerReq*) pMsg->pCont; + SRetrieveChkptTriggerReq* pReq = (SRetrieveChkptTriggerReq*)pMsg->pCont; if (!vnodeIsRoleLeader(pTq->pVnode)) { tqError("vgId:%d not leader, ignore the retrieve checkpoint-trigger msg from 0x%x", vgId, (int32_t)pReq->downstreamTaskId); @@ -1264,9 +1265,7 @@ int32_t tqProcessTaskRetrieveTriggerRsp(STQ* pTq, SRpcMsg* pMsg) { } // this function is needed, do not try to remove it. -int32_t tqProcessStreamHbRsp(STQ* pTq, SRpcMsg* pMsg) { - return tqStreamProcessStreamHbRsp(pTq->pStreamMeta, pMsg); -} +int32_t tqProcessStreamHbRsp(STQ* pTq, SRpcMsg* pMsg) { return tqStreamProcessStreamHbRsp(pTq->pStreamMeta, pMsg); } int32_t tqProcessStreamReqCheckpointRsp(STQ* pTq, SRpcMsg* pMsg) { return tqStreamProcessReqCheckpointRsp(pTq->pStreamMeta, pMsg); diff --git a/source/libs/stream/src/streamTask.c b/source/libs/stream/src/streamTask.c index f190673430..c531260682 100644 --- a/source/libs/stream/src/streamTask.c +++ b/source/libs/stream/src/streamTask.c @@ -14,6 +14,8 @@ */ #include "executor.h" +#include "osDir.h" +#include "osMemory.h" #include "streamInt.h" #include "streamsm.h" #include "tmisce.h" @@ -30,7 +32,7 @@ static int32_t addToTaskset(SArray* pArray, SStreamTask* pTask) { int32_t childId = taosArrayGetSize(pArray); pTask->info.selfChildId = childId; void* p = taosArrayPush(pArray, &pTask); - return (p == NULL)? TSDB_CODE_OUT_OF_MEMORY:TSDB_CODE_SUCCESS; + return (p == NULL) ? TSDB_CODE_OUT_OF_MEMORY : TSDB_CODE_SUCCESS; } static int32_t doUpdateTaskEpset(SStreamTask* pTask, int32_t nodeId, SEpSet* pEpSet, bool* pUpdated) { @@ -42,7 +44,7 @@ static int32_t doUpdateTaskEpset(SStreamTask* pTask, int32_t nodeId, SEpSet* pEp if (!isEqual) { (*pUpdated) = true; char tmp[512] = {0}; - (void) epsetToStr(&pTask->info.epSet, tmp, tListLen(tmp)); // only for log file, ignore errors + (void)epsetToStr(&pTask->info.epSet, tmp, tListLen(tmp)); // only for log file, ignore errors epsetAssign(&pTask->info.epSet, pEpSet); stDebug("s-task:0x%x (vgId:%d) self node epset is updated %s, old:%s", pTask->id.taskId, nodeId, buf, tmp); @@ -92,7 +94,7 @@ static SStreamUpstreamEpInfo* createStreamTaskEpInfo(const SStreamTask* pTask) { } int32_t tNewStreamTask(int64_t streamId, int8_t taskLevel, SEpSet* pEpset, bool fillHistory, int64_t triggerParam, - SArray* pTaskList, bool hasFillhistory, int8_t subtableWithoutMd5, SStreamTask** p) { + SArray* pTaskList, bool hasFillhistory, int8_t subtableWithoutMd5, SStreamTask** p) { *p = NULL; SStreamTask* pTask = (SStreamTask*)taosMemoryCalloc(1, sizeof(SStreamTask)); @@ -224,17 +226,17 @@ void tFreeStreamTask(SStreamTask* pTask) { } if (pTask->schedInfo.pDelayTimer != NULL) { - (void) taosTmrStop(pTask->schedInfo.pDelayTimer); + (void)taosTmrStop(pTask->schedInfo.pDelayTimer); pTask->schedInfo.pDelayTimer = NULL; } if (pTask->hTaskInfo.pTimer != NULL) { - (void) taosTmrStop(pTask->hTaskInfo.pTimer); + (void)taosTmrStop(pTask->hTaskInfo.pTimer); pTask->hTaskInfo.pTimer = NULL; } if (pTask->msgInfo.pRetryTmr != NULL) { - (void) taosTmrStop(pTask->msgInfo.pRetryTmr); + (void)taosTmrStop(pTask->msgInfo.pRetryTmr); pTask->msgInfo.pRetryTmr = NULL; } @@ -321,10 +323,19 @@ void streamFreeTaskState(SStreamTask* pTask, int8_t remove) { stDebug("s-task:0x%x start to free task state", pTask->id.taskId); streamStateClose(pTask->pState, remove); - if (remove)taskDbSetClearFileFlag(pTask->pBackend); + if (remove) taskDbSetClearFileFlag(pTask->pBackend); + taskDbRemoveRef(pTask->pBackend); pTask->pBackend = NULL; pTask->pState = NULL; + } else { + if (remove) { + if (pTask->backendPath != NULL) { + taosRemoveDir(pTask->backendPath); + taosMemoryFree(pTask->backendPath); + pTask->backendPath = NULL; + } + } } } @@ -364,8 +375,36 @@ static void setInitialVersionInfo(SStreamTask* pTask, int64_t ver) { } } +int32_t streamTaskSetBackendPath(SStreamTask* pTask) { + int64_t streamId = 0; + int32_t taskId = 0; + + if (pTask->info.fillHistory) { + streamId = pTask->hTaskInfo.id.taskId; + taskId = pTask->hTaskInfo.id.taskId; + } else { + streamId = pTask->streamTaskId.taskId; + taskId = pTask->streamTaskId.taskId; + } + + char id[128] = {0}; + int32_t nBytes = sprintf(id, "0x%" PRIx64 "-0x%x", streamId, taskId); + if (nBytes < 0 || nBytes >= sizeof(id)) { + return TSDB_CODE_OUT_OF_BUFFER; + } + + int32_t len = strlen(pTask->pMeta->path); + pTask->backendPath = (char*)taosMemoryMalloc(len + nBytes + 2); + if (pTask->backendPath == NULL) { + return TSDB_CODE_OUT_OF_MEMORY; + } + + (void)sprintf(pTask->backendPath, "%s%s%s", pTask->pMeta->path, TD_DIRSEP, id); + + return 0; +} int32_t streamTaskInit(SStreamTask* pTask, SStreamMeta* pMeta, SMsgCb* pMsgCb, int64_t ver) { - (void) createStreamTaskIdStr(pTask->id.streamId, pTask->id.taskId, &pTask->id.idStr); + (void)createStreamTaskIdStr(pTask->id.streamId, pTask->id.taskId, &pTask->id.idStr); pTask->refCnt = 1; pTask->inputq.status = TASK_INPUT_STATUS__NORMAL; @@ -459,8 +498,9 @@ int32_t streamTaskInit(SStreamTask* pTask, SStreamMeta* pMeta, SMsgCb* pMsgCb, i } if (pTask->chkInfo.pActiveInfo == NULL) { - code = streamTaskCreateActiveChkptInfo(&pTask->chkInfo.pActiveInfo); + code = streamTaskCreateActiveChkptInfo(&pTask->chkInfo.pActiveInfo); } + code = streamTaskSetBackendPath(pTask); return code; } @@ -494,12 +534,12 @@ int32_t streamTaskSetUpstreamInfo(SStreamTask* pTask, const SStreamTask* pUpstre } void* p = taosArrayPush(pTask->upstreamInfo.pList, &pEpInfo); - return (p == NULL)? TSDB_CODE_OUT_OF_MEMORY:TSDB_CODE_SUCCESS; + return (p == NULL) ? TSDB_CODE_OUT_OF_MEMORY : TSDB_CODE_SUCCESS; } void streamTaskUpdateUpstreamInfo(SStreamTask* pTask, int32_t nodeId, const SEpSet* pEpSet, bool* pUpdated) { char buf[512] = {0}; - (void) epsetToStr(pEpSet, buf, tListLen(buf)); // ignore error since it is only for log file. + (void)epsetToStr(pEpSet, buf, tListLen(buf)); // ignore error since it is only for log file. int32_t numOfUpstream = taosArrayGetSize(pTask->upstreamInfo.pList); for (int32_t i = 0; i < numOfUpstream; ++i) { @@ -510,7 +550,7 @@ void streamTaskUpdateUpstreamInfo(SStreamTask* pTask, int32_t nodeId, const SEpS *pUpdated = true; char tmp[512] = {0}; - (void) epsetToStr(&pInfo->epSet, tmp, tListLen(tmp)); + (void)epsetToStr(&pInfo->epSet, tmp, tListLen(tmp)); epsetAssign(&pInfo->epSet, pEpSet); stDebug("s-task:0x%x update the upstreamInfo taskId:0x%x(nodeId:%d) newEpset:%s old:%s", pTask->id.taskId, @@ -545,7 +585,7 @@ void streamTaskSetFixedDownstreamInfo(SStreamTask* pTask, const SStreamTask* pDo void streamTaskUpdateDownstreamInfo(SStreamTask* pTask, int32_t nodeId, const SEpSet* pEpSet, bool* pUpdated) { char buf[512] = {0}; - (void) epsetToStr(pEpSet, buf, tListLen(buf)); // ignore the error since only for log files. + (void)epsetToStr(pEpSet, buf, tListLen(buf)); // ignore the error since only for log files. int32_t id = pTask->id.taskId; int8_t type = pTask->outputInfo.type; @@ -564,7 +604,7 @@ void streamTaskUpdateDownstreamInfo(SStreamTask* pTask, int32_t nodeId, const SE if (!isEqual) { *pUpdated = true; char tmp[512] = {0}; - (void) epsetToStr(&pVgInfo->epSet, tmp, tListLen(tmp)); + (void)epsetToStr(&pVgInfo->epSet, tmp, tListLen(tmp)); epsetAssign(&pVgInfo->epSet, pEpSet); stDebug("s-task:0x%x update dispatch info, task:0x%x(nodeId:%d) newEpset:%s old:%s", id, pVgInfo->taskId, @@ -584,7 +624,7 @@ void streamTaskUpdateDownstreamInfo(SStreamTask* pTask, int32_t nodeId, const SE *pUpdated = true; char tmp[512] = {0}; - (void) epsetToStr(&pDispatcher->epSet, tmp, tListLen(tmp)); + (void)epsetToStr(&pDispatcher->epSet, tmp, tListLen(tmp)); epsetAssign(&pDispatcher->epSet, pEpSet); stDebug("s-task:0x%x update dispatch info, task:0x%x(nodeId:%d) newEpset:%s old:%s", id, pDispatcher->taskId, @@ -919,7 +959,7 @@ STaskStatusEntry streamTaskGetStatusEntry(SStreamTask* pTask) { static int32_t taskPauseCallback(SStreamTask* pTask, void* param) { SStreamMeta* pMeta = pTask->pMeta; - int32_t code = 0; + int32_t code = 0; int32_t num = atomic_add_fetch_32(&pMeta->numOfPausedTasks, 1); stInfo("vgId:%d s-task:%s pause stream task. paused task num:%d", pMeta->vgId, pTask->id.idStr, num); @@ -935,7 +975,7 @@ static int32_t taskPauseCallback(SStreamTask* pTask, void* param) { } void streamTaskPause(SStreamTask* pTask) { - (void) streamTaskHandleEventAsync(pTask->status.pSM, TASK_EVENT_PAUSE, taskPauseCallback, NULL); + (void)streamTaskHandleEventAsync(pTask->status.pSM, TASK_EVENT_PAUSE, taskPauseCallback, NULL); } void streamTaskResume(SStreamTask* pTask) { @@ -1142,13 +1182,13 @@ void streamTaskDestroyActiveChkptInfo(SActiveCheckpointInfo* pInfo) { SStreamTmrInfo* pTriggerTmr = &pInfo->chkptTriggerMsgTmr; if (pTriggerTmr->tmrHandle != NULL) { - (void) taosTmrStop(pTriggerTmr->tmrHandle); + (void)taosTmrStop(pTriggerTmr->tmrHandle); pTriggerTmr->tmrHandle = NULL; } SStreamTmrInfo* pReadyTmr = &pInfo->chkptReadyMsgTmr; if (pReadyTmr->tmrHandle != NULL) { - (void) taosTmrStop(pReadyTmr->tmrHandle); + (void)taosTmrStop(pReadyTmr->tmrHandle); pReadyTmr->tmrHandle = NULL; } @@ -1185,4 +1225,4 @@ const char* streamTaskGetExecType(int32_t type) { default: return "invalid-exec-type"; } -} \ No newline at end of file +} From b5dd25a9ad5843b9213e1ddcc1baee9877a2d3e1 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Fri, 16 Aug 2024 19:34:08 +0800 Subject: [PATCH 110/181] fix(query): return directly. --- source/libs/executor/src/scanoperator.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/libs/executor/src/scanoperator.c b/source/libs/executor/src/scanoperator.c index 0ede1cf379..b3655e16f2 100644 --- a/source/libs/executor/src/scanoperator.c +++ b/source/libs/executor/src/scanoperator.c @@ -392,7 +392,7 @@ static int32_t loadDataBlock(SOperatorInfo* pOperator, STableScanBase* pTableSca pBlockInfo->window.skey, pBlockInfo->window.ekey, pBlockInfo->rows); code = doSetTagColumnData(pTableScanInfo, pBlock, pTaskInfo, pBlock->info.rows); pAPI->tsdReader.tsdReaderReleaseDataBlock(pTableScanInfo->dataReader); - QUERY_CHECK_CODE(code, lino, _end); + return code; } else { qDebug("%s failed to load SMA, since not all columns have SMA", GET_TASKID(pTaskInfo)); *status = FUNC_DATA_REQUIRED_DATA_LOAD; From 6bdbbc4b15adb59dcc1a77c579438b77e1899858 Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Fri, 16 Aug 2024 19:40:51 +0800 Subject: [PATCH 111/181] fix mem leak --- source/libs/stream/src/streamTask.c | 13 +++++-------- 1 file changed, 5 insertions(+), 8 deletions(-) diff --git a/source/libs/stream/src/streamTask.c b/source/libs/stream/src/streamTask.c index c531260682..b7b54e2885 100644 --- a/source/libs/stream/src/streamTask.c +++ b/source/libs/stream/src/streamTask.c @@ -298,14 +298,6 @@ void tFreeStreamTask(SStreamTask* pTask) { taosArrayDestroy(pTask->outputInfo.pNodeEpsetUpdateList); pTask->outputInfo.pNodeEpsetUpdateList = NULL; - // if ((pTask->status.removeBackendFiles) && (pTask->pMeta != NULL)) { - // char* path = taosMemoryCalloc(1, strlen(pTask->pMeta->path) + 128); - // sprintf(path, "%s%s%s", pTask->pMeta->path, TD_DIRSEP, pTask->id.idStr); - // taosRemoveDir(path); - - // stInfo("s-task:0x%x vgId:%d remove all backend files:%s", taskId, pTask->pMeta->vgId, path); - // taosMemoryFree(path); - // } if (pTask->id.idStr != NULL) { taosMemoryFree((void*)pTask->id.idStr); @@ -328,6 +320,11 @@ void streamFreeTaskState(SStreamTask* pTask, int8_t remove) { taskDbRemoveRef(pTask->pBackend); pTask->pBackend = NULL; pTask->pState = NULL; + + if (pTask->backendPath != NULL) { + taosMemoryFree(pTask->backendPath); + pTask->backendPath = NULL; + } } else { if (remove) { if (pTask->backendPath != NULL) { From a49ac1f2ecacf20d96c8aac4f5a87c8aaf86ad2b Mon Sep 17 00:00:00 2001 From: sheyanjie-qq <249478495@qq.com> Date: Fri, 16 Aug 2024 20:03:30 +0800 Subject: [PATCH 112/181] update rust assignment --- docs/examples/rust/nativeexample/examples/tmq.rs | 10 ++++++---- docs/examples/rust/restexample/examples/tmq.rs | 10 ++++++---- 2 files changed, 12 insertions(+), 8 deletions(-) diff --git a/docs/examples/rust/nativeexample/examples/tmq.rs b/docs/examples/rust/nativeexample/examples/tmq.rs index 49580f81b7..800b66e8fe 100644 --- a/docs/examples/rust/nativeexample/examples/tmq.rs +++ b/docs/examples/rust/nativeexample/examples/tmq.rs @@ -167,8 +167,9 @@ async fn main() -> anyhow::Result<()> { let assignments = match consumer.assignments().await{ Some(assignments) => assignments, None => { - eprintln!("Failed to get assignments."); - return Err(anyhow::anyhow!("Failed to get assignments. topic: {}, groupId: {}, clientId: {}", topic, group_id, client_id)); + let error_message = format!("Failed to get assignments. topic: {}, groupId: {}, clientId: {}", topic, group_id, client_id); + eprintln!("{}", error_message); + return Err(anyhow::anyhow!(error_message)); } }; println!("assignments: {:?}", assignments); @@ -209,8 +210,9 @@ async fn main() -> anyhow::Result<()> { let assignments = match consumer.assignments().await{ Some(assignments) => assignments, None => { - eprintln!("Failed to get assignments."); - return Err(anyhow::anyhow!("Failed to get assignments. topic: {}, groupId: {}, clientId: {}", topic, group_id, client_id)); + let error_message = format!("Failed to get assignments. topic: {}, groupId: {}, clientId: {}", topic, group_id, client_id); + eprintln!("{}", error_message); + return Err(anyhow::anyhow!(error_message)); } }; println!("After seek offset assignments: {:?}", assignments); diff --git a/docs/examples/rust/restexample/examples/tmq.rs b/docs/examples/rust/restexample/examples/tmq.rs index 86715d57cb..0a41025955 100644 --- a/docs/examples/rust/restexample/examples/tmq.rs +++ b/docs/examples/rust/restexample/examples/tmq.rs @@ -167,8 +167,9 @@ async fn main() -> anyhow::Result<()> { let assignments = match consumer.assignments().await{ Some(assignments) => assignments, None => { - eprintln!("Failed to get assignments."); - return Err(anyhow::anyhow!("Failed to get assignments. topic: {}, groupId: {}, clientId: {}", topic, group_id, client_id)); + let error_message = format!("Failed to get assignments. topic: {}, groupId: {}, clientId: {}", topic, group_id, client_id); + eprintln!("{}", error_message); + return Err(anyhow::anyhow!(error_message)); } }; println!("assignments: {:?}", assignments); @@ -209,8 +210,9 @@ async fn main() -> anyhow::Result<()> { let assignments = match consumer.assignments().await{ Some(assignments) => assignments, None => { - eprintln!("Failed to get assignments."); - return Err(anyhow::anyhow!("Failed to get assignments. topic: {}, groupId: {}, clientId: {}", topic, group_id, client_id)); + let error_message = format!("Failed to get assignments. topic: {}, groupId: {}, clientId: {}", topic, group_id, client_id); + eprintln!("{}", error_message); + return Err(anyhow::anyhow!(error_message)); } }; println!("After seek offset assignments: {:?}", assignments); From 5011a05e11c7f49d5c09b6d3745d1d08d05f0ce0 Mon Sep 17 00:00:00 2001 From: Yaming Pei Date: Fri, 16 Aug 2024 20:11:18 +0800 Subject: [PATCH 113/181] c language sample tmq program optimization --- docs/examples/c/tmq_demo.c | 23 ++++++++++++++--------- 1 file changed, 14 insertions(+), 9 deletions(-) diff --git a/docs/examples/c/tmq_demo.c b/docs/examples/c/tmq_demo.c index 9deff9add5..b16245d484 100644 --- a/docs/examples/c/tmq_demo.c +++ b/docs/examples/c/tmq_demo.c @@ -314,7 +314,8 @@ tmq_list_t* build_topic_list() { if (code) { // if failed, destroy the list and return NULL tmq_list_destroy(topicList); - fprintf(stderr, "Failed to create topic_list, ErrCode: 0x%x, ErrMessage: %s.\n", code, tmq_err2str(code)); + fprintf(stderr, "Failed to create topic_list, topic: %s, groupId: %s, clientId: %s, ErrCode: 0x%x, ErrMessage: %s.\n", + topic_name, config.group_id, config.client_id, code, tmq_err2str(code)); return NULL; } // if success, return the list @@ -347,7 +348,7 @@ void basic_consume_loop(tmq_t* tmq) { } // print the result: total messages and total rows consumed - fprintf(stderr, "%d msg consumed, include %d rows\n", msgCnt, totalRows); + fprintf(stdout, "%d msg consumed, include %d rows\n", msgCnt, totalRows); } // ANCHOR_END: basic_consume_loop @@ -359,7 +360,8 @@ void consume_repeatly(tmq_t* tmq) { // get the topic assignment int32_t code = tmq_get_topic_assignment(tmq, topic_name, &pAssign, &numOfAssignment); if (code != 0 || pAssign == NULL || numOfAssignment == 0) { - fprintf(stderr, "Failed to get assignment, ErrCode: 0x%x, ErrMessage: %s.\n", code, tmq_err2str(code)); + fprintf(stderr, "Failed to get assignment, topic: %s, groupId: %s, clientId: %s, ErrCode: 0x%x, ErrMessage: %s.\n", + topic_name, config.group_id, config.client_id, code, tmq_err2str(code)); return; } @@ -419,7 +421,7 @@ void manual_commit(tmq_t* tmq) { } // print the result: total messages and total rows consumed - fprintf(stderr, "%d msg consumed, include %d rows.\n", msgCnt, totalRows); + fprintf(stdout, "%d msg consumed, include %d rows.\n", msgCnt, totalRows); } // ANCHOR_END: manual_commit @@ -459,12 +461,14 @@ int main(int argc, char* argv[]) { // ANCHOR: subscribe_3 tmq_list_t* topic_list = build_topic_list(); if (NULL == topic_list) { - fprintf(stderr, "Failed to create topic_list.\n"); + fprintf(stderr, "Failed to create topic_list, topic: %s, groupId: %s, clientId: %s.\n", + topic_name, config.group_id, config.client_id); return -1; } if ((code = tmq_subscribe(tmq, topic_list))) { - fprintf(stderr, "Failed to subscribe topic_list, ErrCode: 0x%x, ErrMessage: %s.\n", code, tmq_err2str(code)); + fprintf(stderr, "Failed to subscribe topic_list, topic: %s, groupId: %s, clientId: %s, ErrCode: 0x%x, ErrMessage: %s.\n", + topic_name, config.group_id, config.client_id, code, tmq_err2str(code)); } else { fprintf(stdout, "Subscribe topics successfully.\n"); } @@ -485,15 +489,16 @@ int main(int argc, char* argv[]) { fprintf(stderr, "Failed to unsubscribe consumer, topic: %s, groupId: %s, clientId: %s, ErrCode: 0x%x, ErrMessage: %s.\n", topic_name, config.group_id, config.client_id, code, tmq_err2str(code)); } else { - fprintf(stderr, "Consumer unsubscribed successfully.\n"); + fprintf(stdout, "Consumer unsubscribed successfully.\n"); } // close the consumer code = tmq_consumer_close(tmq); if (code) { - fprintf(stderr, "Failed to close consumer: %s.\n", tmq_err2str(code)); + fprintf(stderr, "Failed to close consumer, topic: %s, groupId: %s, clientId: %s, ErrCode: 0x%x, ErrMessage: %s.\n", + topic_name, config.group_id, config.client_id, code, tmq_err2str(code)); } else { - fprintf(stderr, "Consumer closed successfully.\n"); + fprintf(stdout, "Consumer closed successfully.\n"); } // ANCHOR_END: unsubscribe_and_close From c1e5a6e6f9c297fda508f2e7136ed802eb7042e2 Mon Sep 17 00:00:00 2001 From: Yaming Pei Date: Fri, 16 Aug 2024 20:36:40 +0800 Subject: [PATCH 114/181] python language sample program modification --- docs/examples/python/tmq_websocket_example.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/examples/python/tmq_websocket_example.py b/docs/examples/python/tmq_websocket_example.py index 793eb3c416..c9c7924661 100644 --- a/docs/examples/python/tmq_websocket_example.py +++ b/docs/examples/python/tmq_websocket_example.py @@ -173,4 +173,4 @@ if __name__ == "__main__": except Exception as err: print(f"Failed to execute consumer example, topic: {topic}, groupId: {groupId}, clientId: {clientId}, ErrMessage:{err}.") finally: - unsubscribe(consumer); + unsubscribe(consumer) From 367b6eee729cde15c12df744fec55fd61a4267ac Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Fri, 16 Aug 2024 21:01:02 +0800 Subject: [PATCH 115/181] fix mem leak --- include/libs/stream/tstream.h | 2 +- source/libs/stream/src/streamTask.c | 8 +++++--- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/include/libs/stream/tstream.h b/include/libs/stream/tstream.h index f916e05d52..fd2802058d 100644 --- a/include/libs/stream/tstream.h +++ b/include/libs/stream/tstream.h @@ -453,8 +453,8 @@ struct SStreamTask { SSHashObj* pNameMap; void* pBackend; int8_t subtableWithoutMd5; + char reserve[256]; char* backendPath; - char reserve[256 - sizeof(char*)]; }; typedef int32_t (*startComplete_fn_t)(struct SStreamMeta*); diff --git a/source/libs/stream/src/streamTask.c b/source/libs/stream/src/streamTask.c index b7b54e2885..cee24bb8dc 100644 --- a/source/libs/stream/src/streamTask.c +++ b/source/libs/stream/src/streamTask.c @@ -298,7 +298,6 @@ void tFreeStreamTask(SStreamTask* pTask) { taosArrayDestroy(pTask->outputInfo.pNodeEpsetUpdateList); pTask->outputInfo.pNodeEpsetUpdateList = NULL; - if (pTask->id.idStr != NULL) { taosMemoryFree((void*)pTask->id.idStr); } @@ -496,10 +495,13 @@ int32_t streamTaskInit(SStreamTask* pTask, SStreamMeta* pMeta, SMsgCb* pMsgCb, i if (pTask->chkInfo.pActiveInfo == NULL) { code = streamTaskCreateActiveChkptInfo(&pTask->chkInfo.pActiveInfo); + if (code) { + stError("s-task:%s failed to create active checkpoint info, code:%s", pTask->id.idStr, tstrerror(code)); + return code; + } } - code = streamTaskSetBackendPath(pTask); - return code; + return streamTaskSetBackendPath(pTask); } int32_t streamTaskGetNumOfDownstream(const SStreamTask* pTask) { From 871f4dcb3df010187fb5066c051f57b4af7ab0c5 Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Fri, 16 Aug 2024 21:18:22 +0800 Subject: [PATCH 116/181] fix mem leak --- source/libs/stream/src/streamTask.c | 1 - 1 file changed, 1 deletion(-) diff --git a/source/libs/stream/src/streamTask.c b/source/libs/stream/src/streamTask.c index cee24bb8dc..ff9688e3d6 100644 --- a/source/libs/stream/src/streamTask.c +++ b/source/libs/stream/src/streamTask.c @@ -319,7 +319,6 @@ void streamFreeTaskState(SStreamTask* pTask, int8_t remove) { taskDbRemoveRef(pTask->pBackend); pTask->pBackend = NULL; pTask->pState = NULL; - if (pTask->backendPath != NULL) { taosMemoryFree(pTask->backendPath); pTask->backendPath = NULL; From d0beeea91712fe5778f1ee25f3f02f7f38e1ce65 Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Fri, 16 Aug 2024 21:18:42 +0800 Subject: [PATCH 117/181] fix mem leak --- source/libs/stream/src/streamTask.c | 1 - 1 file changed, 1 deletion(-) diff --git a/source/libs/stream/src/streamTask.c b/source/libs/stream/src/streamTask.c index ff9688e3d6..fc72489ee6 100644 --- a/source/libs/stream/src/streamTask.c +++ b/source/libs/stream/src/streamTask.c @@ -315,7 +315,6 @@ void streamFreeTaskState(SStreamTask* pTask, int8_t remove) { streamStateClose(pTask->pState, remove); if (remove) taskDbSetClearFileFlag(pTask->pBackend); - taskDbRemoveRef(pTask->pBackend); pTask->pBackend = NULL; pTask->pState = NULL; From 5b82556ff5d716530a70390d3c4859c8f3b314cc Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Fri, 16 Aug 2024 22:31:21 +0800 Subject: [PATCH 118/181] fix(query): reset the errno code. --- source/dnode/vnode/src/tsdb/tsdbMergeTree.c | 3 ++- source/dnode/vnode/src/tsdb/tsdbRead2.c | 1 + 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/source/dnode/vnode/src/tsdb/tsdbMergeTree.c b/source/dnode/vnode/src/tsdb/tsdbMergeTree.c index 099bde5897..9938c073ff 100644 --- a/source/dnode/vnode/src/tsdb/tsdbMergeTree.c +++ b/source/dnode/vnode/src/tsdb/tsdbMergeTree.c @@ -833,7 +833,8 @@ bool tLDataIterNextRow(SLDataIter *pIter, const char *idStr) { int32_t code = 0; int32_t iBlockL = pIter->iSttBlk; SBlockData *pBlockData = NULL; - + terrno = 0; + // no qualified last file block in current file, no need to fetch row if (pIter->pSttBlk == NULL) { return false; diff --git a/source/dnode/vnode/src/tsdb/tsdbRead2.c b/source/dnode/vnode/src/tsdb/tsdbRead2.c index 0572df2922..9a9c74a3a0 100644 --- a/source/dnode/vnode/src/tsdb/tsdbRead2.c +++ b/source/dnode/vnode/src/tsdb/tsdbRead2.c @@ -3598,6 +3598,7 @@ static int32_t buildBlockFromFiles(STsdbReader* pReader) { if (pBlockIter->numOfBlocks == 0) { // let's try to extract data from stt files. + terrno = 0; ERetrieveType type = doReadDataFromSttFiles(pReader); if (type == TSDB_READ_RETURN) { return terrno; From 1f6cef26e8f09ea0dc7a16777acec7ac54f85608 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Sat, 17 Aug 2024 01:44:58 +0800 Subject: [PATCH 119/181] refactor: do some internal refactor. --- source/dnode/vnode/src/tsdb/tsdbMergeTree.c | 27 +++++++++++++-------- 1 file changed, 17 insertions(+), 10 deletions(-) diff --git a/source/dnode/vnode/src/tsdb/tsdbMergeTree.c b/source/dnode/vnode/src/tsdb/tsdbMergeTree.c index 9938c073ff..4729b912a7 100644 --- a/source/dnode/vnode/src/tsdb/tsdbMergeTree.c +++ b/source/dnode/vnode/src/tsdb/tsdbMergeTree.c @@ -828,16 +828,19 @@ static int32_t findNextValidRow(SLDataIter *pIter, const char *idStr) { return code; } -bool tLDataIterNextRow(SLDataIter *pIter, const char *idStr) { +int32_t tLDataIterNextRow(SLDataIter *pIter, const char *idStr, bool* hasNext) { int32_t step = pIter->backward ? -1 : 1; int32_t code = 0; int32_t iBlockL = pIter->iSttBlk; SBlockData *pBlockData = NULL; + int32_t lino = 0; + + *hasNext = false; terrno = 0; // no qualified last file block in current file, no need to fetch row if (pIter->pSttBlk == NULL) { - return false; + return code; } code = loadLastBlock(pIter, idStr, &pBlockData); @@ -850,9 +853,7 @@ bool tLDataIterNextRow(SLDataIter *pIter, const char *idStr) { while (1) { bool skipBlock = false; code = findNextValidRow(pIter, idStr); - if (code) { - goto _exit; - } + TSDB_CHECK_CODE(code, lino, _exit); if (pIter->pBlockLoadInfo->checkRemainingRow) { skipBlock = true; @@ -902,7 +903,8 @@ bool tLDataIterNextRow(SLDataIter *pIter, const char *idStr) { pIter->rInfo.row = tsdbRowFromBlockData(pBlockData, pIter->iRow); _exit: - return (code == TSDB_CODE_SUCCESS) && (pIter->pSttBlk != NULL) && (pBlockData != NULL); + *hasNext = (code == TSDB_CODE_SUCCESS) && (pIter->pSttBlk != NULL) && (pBlockData != NULL); + return code; } // SMergeTree ================================================= @@ -1005,7 +1007,12 @@ int32_t tMergeTreeOpen2(SMergeTree *pMTree, SMergeTreeConf *pConf, SSttDataInfoF goto _end; } - bool hasVal = tLDataIterNextRow(pIter, pMTree->idStr); + bool hasVal = NULL; + code = tLDataIterNextRow(pIter, pMTree->idStr, &hasVal); + if (code) { + goto _end; + } + if (hasVal) { tMergeTreeAddIter(pMTree, pIter); @@ -1018,7 +1025,6 @@ int32_t tMergeTreeOpen2(SMergeTree *pMTree, SMergeTreeConf *pConf, SSttDataInfoF pSttDataInfo->numOfRows += numOfRows; } } else { - TAOS_CHECK_GOTO(terrno, NULL, _end); if (!pMTree->ignoreEarlierTs) { pMTree->ignoreEarlierTs = pIter->ignoreEarlierTs; } @@ -1100,8 +1106,9 @@ bool tMergeTreeNext(SMergeTree *pMTree) { if (pMTree->pIter) { SLDataIter *pIter = pMTree->pIter; - bool hasVal = tLDataIterNextRow(pIter, pMTree->idStr); - if (!hasVal) { + bool hasVal = false; + int32_t code = tLDataIterNextRow(pIter, pMTree->idStr, &hasVal); + if (!hasVal || (code != 0)) { pMTree->pIter = NULL; } From f4bac239064f146a41ced20982ab93b893f6a30a Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Sat, 17 Aug 2024 01:47:30 +0800 Subject: [PATCH 120/181] refactor: do some internal refactor --- source/dnode/mnode/impl/src/mndStreamHb.c | 46 ++++++++++++++++------ source/dnode/vnode/src/tqCommon/tqCommon.c | 21 ---------- source/libs/stream/src/streamCheckpoint.c | 2 +- source/libs/stream/src/streamHb.c | 2 +- source/libs/stream/src/streamMeta.c | 2 +- source/libs/stream/src/streamTask.c | 2 + 6 files changed, 39 insertions(+), 36 deletions(-) diff --git a/source/dnode/mnode/impl/src/mndStreamHb.c b/source/dnode/mnode/impl/src/mndStreamHb.c index 59f07ce977..f515e9565d 100644 --- a/source/dnode/mnode/impl/src/mndStreamHb.c +++ b/source/dnode/mnode/impl/src/mndStreamHb.c @@ -26,12 +26,13 @@ static int32_t mndStreamSendUpdateChkptInfoMsg(SMnode *pMnode); static int32_t mndSendDropOrphanTasksMsg(SMnode *pMnode, SArray *pList); static int32_t mndSendResetFromCheckpointMsg(SMnode *pMnode, int64_t streamId, int32_t transId); static void updateStageInfo(STaskStatusEntry *pTaskEntry, int64_t stage); -static void addIntoCheckpointList(SArray *pList, const SFailedCheckpointInfo *pInfo); +static void addIntoFailedChkptList(SArray *pList, const SFailedCheckpointInfo *pInfo); static int32_t setNodeEpsetExpiredFlag(const SArray *pNodeList); static int32_t suspendAllStreams(SMnode *pMnode, SRpcHandleInfo *info); static bool validateHbMsg(const SArray *pNodeList, int32_t vgId); static void cleanupAfterProcessHbMsg(SStreamHbMsg *pReq, SArray *pFailedChkptList, SArray *pOrphanTasks); static void doSendHbMsgRsp(int32_t code, SRpcHandleInfo *pRpcInfo, int32_t vgId, int32_t msgId); +static void checkforOrphanTask(SMnode* pMnode, STaskStatusEntry* p, SArray* pOrphanTasks); void updateStageInfo(STaskStatusEntry *pTaskEntry, int64_t stage) { int32_t numOfNodes = taosArrayGetSize(execInfo.pNodeList); @@ -52,7 +53,7 @@ void updateStageInfo(STaskStatusEntry *pTaskEntry, int64_t stage) { } } -void addIntoCheckpointList(SArray *pList, const SFailedCheckpointInfo *pInfo) { +void addIntoFailedChkptList(SArray *pList, const SFailedCheckpointInfo *pInfo) { int32_t num = taosArrayGetSize(pList); for (int32_t i = 0; i < num; ++i) { SFailedCheckpointInfo *p = taosArrayGet(pList, i); @@ -401,13 +402,7 @@ int32_t mndProcessStreamHb(SRpcMsg *pReq) { STaskStatusEntry *pTaskEntry = taosHashGet(execInfo.pTaskMap, &p->id, sizeof(p->id)); if (pTaskEntry == NULL) { - mError("s-task:0x%" PRIx64 " not found in mnode task list, added into orphan task list", p->id.taskId); - - SOrphanTask oTask = {.streamId = p->id.streamId, .taskId = p->id.taskId, .nodeId = p->nodeId}; - void* px = taosArrayPush(pOrphanTasks, &oTask); - if (px == NULL) { - mError("failed to put task into list, taskId:0x%" PRIx64, p->id.taskId); - } + checkforOrphanTask(pMnode, p, pOrphanTasks); continue; } @@ -423,7 +418,8 @@ int32_t mndProcessStreamHb(SRpcMsg *pReq) { SStreamObj *pStream = NULL; code = mndGetStreamObj(pMnode, p->id.streamId, &pStream); if (code) { - mError("stream obj not exist, failed to handle consensus checkpoint-info req, code:%s", tstrerror(code)); + mError("stream:0x%" PRIx64 " not exist, failed to handle consensus checkpoint-info req for task:0x%x, code:%s", + p->id.streamId, (int32_t)p->id.taskId, tstrerror(code)); continue; } @@ -434,7 +430,7 @@ int32_t mndProcessStreamHb(SRpcMsg *pReq) { if (code == 0) { mndAddConsensusTasks(pInfo, &cp); } else { - mError("failed to get consensus checkpoint-info"); + mError("failed to get consensus checkpoint-info for stream:0x%" PRIx64, p->id.streamId); } mndReleaseStream(pMnode, pStream); @@ -454,7 +450,7 @@ int32_t mndProcessStreamHb(SRpcMsg *pReq) { SFailedCheckpointInfo info = { .transId = pChkInfo->activeTransId, .checkpointId = pChkInfo->activeId, .streamUid = p->id.streamId}; - addIntoCheckpointList(pFailedChkpt, &info); + addIntoFailedChkptList(pFailedChkpt, &info); // remove failed trans from pChkptStreams code = mndResetChkptReportInfo(execInfo.pChkptStreams, p->id.streamId); @@ -516,6 +512,9 @@ int32_t mndProcessStreamHb(SRpcMsg *pReq) { if (pMnode != NULL) { // make sure that the unit test case can work code = mndStreamSendUpdateChkptInfoMsg(pMnode); + if (code) { + mError("failed to send update checkpointInfo msg, code:%s, try next time", tstrerror(code)); + } } streamMutexUnlock(&execInfo.lock); @@ -554,3 +553,26 @@ void doSendHbMsgRsp(int32_t code, SRpcHandleInfo *pRpcInfo, int32_t vgId, int32_ tmsgSendRsp(&rsp); pRpcInfo->handle = NULL; // disable auto rsp } + +void checkforOrphanTask(SMnode* pMnode, STaskStatusEntry* p, SArray* pOrphanTasks) { + SStreamObj *pStream = NULL; + + int32_t code = mndGetStreamObj(pMnode, p->id.streamId, &pStream); + if (code) { + mError("stream:0x%" PRIx64 " not exists, s-task:0x%" PRIx64 " not found in task list, add into orphan list", + p->id.streamId, p->id.taskId); + + SOrphanTask oTask = {.streamId = p->id.streamId, .taskId = p->id.taskId, .nodeId = p->nodeId}; + void *px = taosArrayPush(pOrphanTasks, &oTask); + if (px == NULL) { + mError("failed to put task into orphan list, taskId:0x%" PRIx64", code:%s", p->id.taskId, tstrerror(terrno)); + } + } else { + if (pStream != NULL) { + mndReleaseStream(pMnode, pStream); + } + + mError("s-task:0x%" PRIx64 " not found in task list but exists in mnode meta, data inconsistent, not drop yet", + p->id.taskId); + } +} \ No newline at end of file diff --git a/source/dnode/vnode/src/tqCommon/tqCommon.c b/source/dnode/vnode/src/tqCommon/tqCommon.c index 7037eb5199..422ca16e50 100644 --- a/source/dnode/vnode/src/tqCommon/tqCommon.c +++ b/source/dnode/vnode/src/tqCommon/tqCommon.c @@ -131,27 +131,6 @@ int32_t tqStreamStartOneTaskAsync(SStreamMeta* pMeta, SMsgCb* cb, int64_t stream return streamTaskSchedTask(cb, vgId, streamId, taskId, STREAM_EXEC_T_START_ONE_TASK); } -int32_t tqStreamTaskRestoreCheckpoint(SStreamMeta* pMeta, int64_t streamId, int32_t taskId) { - int32_t vgId = pMeta->vgId; - int32_t numOfTasks = taosArrayGetSize(pMeta->pTaskList); - if (numOfTasks == 0) { - tqDebug("vgId:%d no stream tasks existed to run", vgId); - return 0; - } - - tqDebug("vgId:%d restore task:0x%" PRIx64 "-0x%x checkpointId", vgId, streamId, taskId); - SStreamTask* pTask = NULL; - int32_t code = streamMetaAcquireTask(pMeta, streamId, taskId, &pTask); - if (pTask == NULL) { - tqError("failed to acquire task:0x%x when trying to restore checkpointId", taskId); - return TSDB_CODE_STREAM_TASK_NOT_EXIST; - } - - code = streamTaskSendRestoreChkptMsg(pTask); - streamMetaReleaseTask(pMeta, pTask); - return code; -} - // this is to process request from transaction, always return true. int32_t tqStreamTaskProcessUpdateReq(SStreamMeta* pMeta, SMsgCb* cb, SRpcMsg* pMsg, bool restored) { int32_t vgId = pMeta->vgId; diff --git a/source/libs/stream/src/streamCheckpoint.c b/source/libs/stream/src/streamCheckpoint.c index 9be8f5ffaa..0ef7c2312a 100644 --- a/source/libs/stream/src/streamCheckpoint.c +++ b/source/libs/stream/src/streamCheckpoint.c @@ -1354,7 +1354,7 @@ int32_t deleteCheckpointFile(const char* id, const char* name) { return code; } -int32_t streamTaskSendRestoreChkptMsg(SStreamTask* pTask) { +int32_t streamTaskSendNegotiateChkptIdMsg(SStreamTask* pTask) { const char* id = pTask->id.idStr; streamMutexLock(&pTask->lock); diff --git a/source/libs/stream/src/streamHb.c b/source/libs/stream/src/streamHb.c index d2c5cb05b7..73392fade0 100644 --- a/source/libs/stream/src/streamHb.c +++ b/source/libs/stream/src/streamHb.c @@ -200,7 +200,7 @@ int32_t streamMetaSendHbHelper(SStreamMeta* pMeta) { if ((*pTask)->status.requireConsensusChkptId) { entry.checkpointInfo.consensusChkptId = 1; (*pTask)->status.requireConsensusChkptId = false; - stDebug("s-task:%s vgId:%d set the require consensus-checkpointId in hbMsg", (*pTask)->id.idStr, pMeta->vgId); + stDebug("s-task:%s vgId:%d set requiring consensus-checkpointId in hbMsg", (*pTask)->id.idStr, pMeta->vgId); } if ((*pTask)->exec.pWalReader != NULL) { diff --git a/source/libs/stream/src/streamMeta.c b/source/libs/stream/src/streamMeta.c index 07c67ba007..5bec930455 100644 --- a/source/libs/stream/src/streamMeta.c +++ b/source/libs/stream/src/streamMeta.c @@ -1424,7 +1424,7 @@ int32_t streamMetaStartAllTasks(SStreamMeta* pMeta) { } // negotiate the consensus checkpoint id for current task - code = streamTaskSendRestoreChkptMsg(pTask); + code = streamTaskSendNegotiateChkptIdMsg(pTask); // this task may has no checkpoint, but others tasks may generate checkpoint already? streamMetaReleaseTask(pMeta, pTask); diff --git a/source/libs/stream/src/streamTask.c b/source/libs/stream/src/streamTask.c index f190673430..5628095973 100644 --- a/source/libs/stream/src/streamTask.c +++ b/source/libs/stream/src/streamTask.c @@ -1182,6 +1182,8 @@ const char* streamTaskGetExecType(int32_t type) { return "resume-task-from-idle"; case STREAM_EXEC_T_ADD_FAILED_TASK: return "record-start-failed-task"; + case 0: + return "exec-all-tasks"; default: return "invalid-exec-type"; } From b6c3e7574696482615d76a55ca5467c1b230c81e Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Sat, 17 Aug 2024 09:47:43 +0800 Subject: [PATCH 121/181] fix mem leak --- source/libs/stream/src/streamTask.c | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/source/libs/stream/src/streamTask.c b/source/libs/stream/src/streamTask.c index fc72489ee6..1cbbcac046 100644 --- a/source/libs/stream/src/streamTask.c +++ b/source/libs/stream/src/streamTask.c @@ -318,19 +318,18 @@ void streamFreeTaskState(SStreamTask* pTask, int8_t remove) { taskDbRemoveRef(pTask->pBackend); pTask->pBackend = NULL; pTask->pState = NULL; - if (pTask->backendPath != NULL) { - taosMemoryFree(pTask->backendPath); - pTask->backendPath = NULL; - } } else { if (remove) { if (pTask->backendPath != NULL) { taosRemoveDir(pTask->backendPath); - taosMemoryFree(pTask->backendPath); - pTask->backendPath = NULL; } } } + + if (pTask->backendPath != NULL) { + taosMemoryFree(pTask->backendPath); + pTask->backendPath = NULL; + } } static void setInitialVersionInfo(SStreamTask* pTask, int64_t ver) { From 42c7b6ca33efdb568866609faf373da03e063df2 Mon Sep 17 00:00:00 2001 From: Yaming Pei Date: Sat, 17 Aug 2024 11:42:14 +0800 Subject: [PATCH 122/181] c language sample program modification --- docs/examples/c/tmq_demo.c | 5 ++--- docs/examples/c/with_reqid_demo.c | 1 - 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/docs/examples/c/tmq_demo.c b/docs/examples/c/tmq_demo.c index b16245d484..18eef02098 100644 --- a/docs/examples/c/tmq_demo.c +++ b/docs/examples/c/tmq_demo.c @@ -92,7 +92,6 @@ void* prepare_data(void* arg) { // ANCHOR: msg_process int32_t msg_process(TAOS_RES* msg) { - char buf[1024]; // buf to store the row content int32_t rows = 0; const char* topicName = tmq_get_topic_name(msg); const char* dbName = tmq_get_db_name(msg); @@ -371,8 +370,8 @@ void consume_repeatly(tmq_t* tmq) { code = tmq_offset_seek(tmq, topic_name, p->vgId, p->begin); if (code != 0) { - fprintf(stderr, "Failed to seek offset, topic: %s, groupId: %s, clientId: %s, ErrCode: 0x%x, ErrMessage: %s.\n", - topic_name, config.group_id, config.client_id, code, tmq_err2str(code)); + fprintf(stderr, "Failed to seek offset, topic: %s, groupId: %s, clientId: %s, vgId: %d, ErrCode: 0x%x, ErrMessage: %s.\n", + topic_name, config.group_id, config.client_id, p->vgId, code, tmq_err2str(code)); break; } } diff --git a/docs/examples/c/with_reqid_demo.c b/docs/examples/c/with_reqid_demo.c index 8942077f67..1a1a53acc6 100644 --- a/docs/examples/c/with_reqid_demo.c +++ b/docs/examples/c/with_reqid_demo.c @@ -57,7 +57,6 @@ static int DemoWithReqId() { fprintf(stdout, "query successfully, got %d fields, the sql is: %s.\n", num_fields, sql); - // fetch the records row by row while ((row = taos_fetch_row(result))) { // Add your data processing logic here From 0d3d0730d4cf697abdb90999e30330dc4f848eba Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Sat, 17 Aug 2024 15:30:10 +0800 Subject: [PATCH 123/181] refactor: do some internal refactor. --- include/libs/stream/tstream.h | 5 +- source/libs/stream/src/streamMeta.c | 402 +------------------- source/libs/stream/src/streamStartTask.c | 444 +++++++++++++++++++++++ 3 files changed, 452 insertions(+), 399 deletions(-) create mode 100644 source/libs/stream/src/streamStartTask.c diff --git a/include/libs/stream/tstream.h b/include/libs/stream/tstream.h index 5e7f2bf0a6..8d6184cab6 100644 --- a/include/libs/stream/tstream.h +++ b/include/libs/stream/tstream.h @@ -750,6 +750,9 @@ void streamMetaStartHb(SStreamMeta* pMeta); bool streamMetaTaskInTimer(SStreamMeta* pMeta); int32_t streamMetaAddTaskLaunchResult(SStreamMeta* pMeta, int64_t streamId, int32_t taskId, int64_t startTs, int64_t endTs, bool ready); +int32_t streamMetaInitStartInfo(STaskStartInfo* pStartInfo); +void streamMetaClearStartInfo(STaskStartInfo* pStartInfo); + int32_t streamMetaResetTaskStatus(SStreamMeta* pMeta); int32_t streamMetaAddFailedTask(SStreamMeta* pMeta, int64_t streamId, int32_t taskId); void streamMetaAddFailedTaskSelf(SStreamTask* pTask, int64_t failedTs); @@ -770,7 +773,7 @@ int32_t streamMetaStartAllTasks(SStreamMeta* pMeta); int32_t streamMetaStopAllTasks(SStreamMeta* pMeta); int32_t streamMetaStartOneTask(SStreamMeta* pMeta, int64_t streamId, int32_t taskId); bool streamMetaAllTasksReady(const SStreamMeta* pMeta); -int32_t streamTaskSendRestoreChkptMsg(SStreamTask* pTask); +int32_t streamTaskSendNegotiateChkptIdMsg(SStreamTask* pTask); // timer int32_t streamTimerGetInstance(tmr_h* pTmr); diff --git a/source/libs/stream/src/streamMeta.c b/source/libs/stream/src/streamMeta.c index 5bec930455..a9976760b6 100644 --- a/source/libs/stream/src/streamMeta.c +++ b/source/libs/stream/src/streamMeta.c @@ -37,12 +37,6 @@ typedef struct { SHashObj* pTable; } SMetaRefMgt; -typedef struct STaskInitTs { - int64_t start; - int64_t end; - bool success; -} STaskInitTs; - SMetaRefMgt gMetaRefMgt; int32_t metaRefMgtInit(); @@ -405,15 +399,8 @@ int32_t streamMetaOpen(const char* path, void* ahandle, FTaskBuild buildTaskFn, goto _err; } - pMeta->startInfo.pReadyTaskSet = taosHashInit(64, fp, false, HASH_NO_LOCK); - if (pMeta->startInfo.pReadyTaskSet == NULL) { - code = TSDB_CODE_OUT_OF_MEMORY; - goto _err; - } - - pMeta->startInfo.pFailedTaskSet = taosHashInit(4, fp, false, HASH_NO_LOCK); - if (pMeta->startInfo.pFailedTaskSet == NULL) { - code = TSDB_CODE_OUT_OF_MEMORY; + code = streamMetaInitStartInfo(&pMeta->startInfo); + if (code) { goto _err; } @@ -609,8 +596,8 @@ void streamMetaCloseImpl(void* arg) { taosHashCleanup(pMeta->pTasksMap); taosHashCleanup(pMeta->pTaskDbUnique); taosHashCleanup(pMeta->updateInfo.pTasks); - taosHashCleanup(pMeta->startInfo.pReadyTaskSet); - taosHashCleanup(pMeta->startInfo.pFailedTaskSet); + + streamMetaClearStartInfo(&pMeta->startInfo); destroyMetaHbInfo(pMeta->pHbInfo); pMeta->pHbInfo = NULL; @@ -1191,18 +1178,6 @@ void streamMetaStartHb(SStreamMeta* pMeta) { streamMetaHbToMnode(pRid, NULL); } -void streamMetaResetStartInfo(STaskStartInfo* pStartInfo, int32_t vgId) { - taosHashClear(pStartInfo->pReadyTaskSet); - taosHashClear(pStartInfo->pFailedTaskSet); - pStartInfo->tasksWillRestart = 0; - pStartInfo->readyTs = 0; - pStartInfo->elapsedTime = 0; - - // reset the sentinel flag value to be 0 - pStartInfo->startAllTasks = 0; - stDebug("vgId:%d clear start-all-task info", vgId); -} - void streamMetaRLock(SStreamMeta* pMeta) { // stTrace("vgId:%d meta-rlock", pMeta->vgId); (void)taosThreadRwlockRdlock(&pMeta->lock); @@ -1302,185 +1277,6 @@ void streamMetaUpdateStageRole(SStreamMeta* pMeta, int64_t stage, bool isLeader) } } -static int32_t prepareBeforeStartTasks(SStreamMeta* pMeta, SArray** pList, int64_t now) { - streamMetaWLock(pMeta); - - if (pMeta->closeFlag) { - streamMetaWUnLock(pMeta); - stError("vgId:%d vnode is closed, not start check task(s) downstream status", pMeta->vgId); - return TSDB_CODE_FAILED; - } - - *pList = taosArrayDup(pMeta->pTaskList, NULL); - if (*pList == NULL) { - return terrno; - } - - taosHashClear(pMeta->startInfo.pReadyTaskSet); - taosHashClear(pMeta->startInfo.pFailedTaskSet); - pMeta->startInfo.startTs = now; - - int32_t code = streamMetaResetTaskStatus(pMeta); - streamMetaWUnLock(pMeta); - - return code; -} - -// restore the checkpoint id by negotiating the latest consensus checkpoint id -int32_t streamMetaStartAllTasks(SStreamMeta* pMeta) { - int32_t code = TSDB_CODE_SUCCESS; - int32_t vgId = pMeta->vgId; - int64_t now = taosGetTimestampMs(); - - int32_t numOfTasks = taosArrayGetSize(pMeta->pTaskList); - stInfo("vgId:%d start to consensus checkpointId for all %d task(s), start ts:%" PRId64, vgId, numOfTasks, now); - - if (numOfTasks == 0) { - stInfo("vgId:%d no tasks exist, quit from consensus checkpointId", pMeta->vgId); - return TSDB_CODE_SUCCESS; - } - - SArray* pTaskList = NULL; - code = prepareBeforeStartTasks(pMeta, &pTaskList, now); - if (code != TSDB_CODE_SUCCESS) { - ASSERT(pTaskList == NULL); - return TSDB_CODE_SUCCESS; - } - - // broadcast the check downstream tasks msg only for tasks with related fill-history tasks. - numOfTasks = taosArrayGetSize(pTaskList); - - // prepare the fill-history task before starting all stream tasks, to avoid fill-history tasks are started without - // initialization, when the operation of check downstream tasks status is executed far quickly. - for (int32_t i = 0; i < numOfTasks; ++i) { - SStreamTaskId* pTaskId = taosArrayGet(pTaskList, i); - SStreamTask* pTask = NULL; - code = streamMetaAcquireTask(pMeta, pTaskId->streamId, pTaskId->taskId, &pTask); - if (pTask == NULL) { - stError("vgId:%d failed to acquire task:0x%x during start tasks", pMeta->vgId, pTaskId->taskId); - (void)streamMetaAddFailedTask(pMeta, pTaskId->streamId, pTaskId->taskId); - continue; - } - - if ((pTask->pBackend == NULL) && ((pTask->info.fillHistory == 1) || HAS_RELATED_FILLHISTORY_TASK(pTask))) { - code = pMeta->expandTaskFn(pTask); - if (code != TSDB_CODE_SUCCESS) { - stError("s-task:0x%x vgId:%d failed to expand stream backend", pTaskId->taskId, vgId); - streamMetaAddFailedTaskSelf(pTask, pTask->execInfo.readyTs); - } - } - - streamMetaReleaseTask(pMeta, pTask); - } - - // Tasks, with related fill-history task or without any checkpoint yet, can be started directly here. - for (int32_t i = 0; i < numOfTasks; ++i) { - SStreamTaskId* pTaskId = taosArrayGet(pTaskList, i); - - SStreamTask* pTask = NULL; - code = streamMetaAcquireTask(pMeta, pTaskId->streamId, pTaskId->taskId, &pTask); - if (pTask == NULL) { - stError("vgId:%d failed to acquire task:0x%x during start tasks", pMeta->vgId, pTaskId->taskId); - (void)streamMetaAddFailedTask(pMeta, pTaskId->streamId, pTaskId->taskId); - continue; - } - - STaskExecStatisInfo* pInfo = &pTask->execInfo; - - // fill-history task can only be launched by related stream tasks. - if (pTask->info.fillHistory == 1) { - stDebug("s-task:%s fill-history task wait related stream task start", pTask->id.idStr); - streamMetaReleaseTask(pMeta, pTask); - continue; - } - - // ready now, start the related fill-history task - if (pTask->status.downstreamReady == 1) { - if (HAS_RELATED_FILLHISTORY_TASK(pTask)) { - stDebug("s-task:%s downstream ready, no need to check downstream, check only related fill-history task", - pTask->id.idStr); - (void)streamLaunchFillHistoryTask(pTask); // todo: how about retry launch fill-history task? - } - - (void)streamMetaAddTaskLaunchResult(pMeta, pTaskId->streamId, pTaskId->taskId, pInfo->checkTs, pInfo->readyTs, - true); - streamMetaReleaseTask(pMeta, pTask); - continue; - } - - if (HAS_RELATED_FILLHISTORY_TASK(pTask)) { - int32_t ret = streamTaskHandleEvent(pTask->status.pSM, TASK_EVENT_INIT); - if (ret != TSDB_CODE_SUCCESS) { - stError("vgId:%d failed to handle event:%d", pMeta->vgId, TASK_EVENT_INIT); - code = ret; - - if (code != TSDB_CODE_STREAM_INVALID_STATETRANS) { - streamMetaAddFailedTaskSelf(pTask, pInfo->readyTs); - } - } - - streamMetaReleaseTask(pMeta, pTask); - continue; - } - - // negotiate the consensus checkpoint id for current task - code = streamTaskSendNegotiateChkptIdMsg(pTask); - - // this task may has no checkpoint, but others tasks may generate checkpoint already? - streamMetaReleaseTask(pMeta, pTask); - } - - // prepare the fill-history task before starting all stream tasks, to avoid fill-history tasks are started without - // initialization, when the operation of check downstream tasks status is executed far quickly. - stInfo("vgId:%d start all task(s) completed", pMeta->vgId); - taosArrayDestroy(pTaskList); - return code; -} - -int32_t streamMetaStopAllTasks(SStreamMeta* pMeta) { - streamMetaRLock(pMeta); - - int32_t num = taosArrayGetSize(pMeta->pTaskList); - stDebug("vgId:%d stop all %d stream task(s)", pMeta->vgId, num); - if (num == 0) { - stDebug("vgId:%d stop all %d task(s) completed, elapsed time:0 Sec.", pMeta->vgId, num); - streamMetaRUnLock(pMeta); - return TSDB_CODE_SUCCESS; - } - - int64_t st = taosGetTimestampMs(); - - // send hb msg to mnode before closing all tasks. - SArray* pTaskList = NULL; - int32_t code = streamMetaSendMsgBeforeCloseTasks(pMeta, &pTaskList); - if (code != TSDB_CODE_SUCCESS) { - return code; - } - - int32_t numOfTasks = taosArrayGetSize(pTaskList); - - for (int32_t i = 0; i < numOfTasks; ++i) { - SStreamTaskId* pTaskId = taosArrayGet(pTaskList, i); - SStreamTask* pTask = NULL; - - code = streamMetaAcquireTaskNoLock(pMeta, pTaskId->streamId, pTaskId->taskId, &pTask); - if (code != TSDB_CODE_SUCCESS) { - continue; - } - - (void)streamTaskStop(pTask); - streamMetaReleaseTask(pMeta, pTask); - } - - taosArrayDestroy(pTaskList); - - double el = (taosGetTimestampMs() - st) / 1000.0; - stDebug("vgId:%d stop all %d task(s) completed, elapsed time:%.2f Sec.", pMeta->vgId, num, el); - - streamMetaRUnLock(pMeta); - return 0; -} - bool streamMetaAllTasksReady(const SStreamMeta* pMeta) { int32_t num = taosArrayGetSize(pMeta->pTaskList); for (int32_t i = 0; i < num; ++i) { @@ -1499,196 +1295,6 @@ bool streamMetaAllTasksReady(const SStreamMeta* pMeta) { return true; } -int32_t streamMetaStartOneTask(SStreamMeta* pMeta, int64_t streamId, int32_t taskId) { - int32_t code = 0; - int32_t vgId = pMeta->vgId; - SStreamTask* pTask = NULL; - bool continueExec = true; - - stInfo("vgId:%d start task:0x%x by checking it's downstream status", vgId, taskId); - - code = streamMetaAcquireTask(pMeta, streamId, taskId, &pTask); - if (pTask == NULL) { - stError("vgId:%d failed to acquire task:0x%x when starting task", vgId, taskId); - (void)streamMetaAddFailedTask(pMeta, streamId, taskId); - return TSDB_CODE_STREAM_TASK_IVLD_STATUS; - } - - // fill-history task can only be launched by related stream tasks. - STaskExecStatisInfo* pInfo = &pTask->execInfo; - if (pTask->info.fillHistory == 1) { - stError("s-task:0x%x vgId:%d fill-histroy task, not start here", taskId, vgId); - streamMetaReleaseTask(pMeta, pTask); - return TSDB_CODE_SUCCESS; - } - - // the start all tasks procedure may happen to start the newly deployed stream task, and results in the - // concurrently start this task by two threads. - streamMutexLock(&pTask->lock); - SStreamTaskState status = streamTaskGetStatus(pTask); - if (status.state != TASK_STATUS__UNINIT) { - stError("s-task:0x%x vgId:%d status:%s not uninit status, not start stream task", taskId, vgId, status.name); - continueExec = false; - } else { - continueExec = true; - } - streamMutexUnlock(&pTask->lock); - - if (!continueExec) { - streamMetaReleaseTask(pMeta, pTask); - return TSDB_CODE_STREAM_TASK_IVLD_STATUS; - } - - ASSERT(pTask->status.downstreamReady == 0); - - // avoid initialization and destroy running concurrently. - streamMutexLock(&pTask->lock); - if (pTask->pBackend == NULL) { - code = pMeta->expandTaskFn(pTask); - streamMutexUnlock(&pTask->lock); - - if (code != TSDB_CODE_SUCCESS) { - streamMetaAddFailedTaskSelf(pTask, pInfo->readyTs); - } - } else { - streamMutexUnlock(&pTask->lock); - } - - // concurrently start task may cause the later started task be failed, and also failed to added into meta result. - if (code == TSDB_CODE_SUCCESS) { - code = streamTaskHandleEvent(pTask->status.pSM, TASK_EVENT_INIT); - if (code != TSDB_CODE_SUCCESS) { - stError("s-task:%s vgId:%d failed to handle event:%d, code:%s", pTask->id.idStr, pMeta->vgId, TASK_EVENT_INIT, - tstrerror(code)); - - // do no added into result hashmap if it is failed due to concurrently starting of this stream task. - if (code != TSDB_CODE_STREAM_INVALID_STATETRANS) { - streamMetaAddFailedTaskSelf(pTask, pInfo->readyTs); - } - } - } - - streamMetaReleaseTask(pMeta, pTask); - return code; -} - -static void displayStatusInfo(SStreamMeta* pMeta, SHashObj* pTaskSet, bool succ) { - int32_t vgId = pMeta->vgId; - void* pIter = NULL; - size_t keyLen = 0; - - stInfo("vgId:%d %d tasks check-downstream completed, %s", vgId, taosHashGetSize(pTaskSet), - succ ? "success" : "failed"); - - while ((pIter = taosHashIterate(pTaskSet, pIter)) != NULL) { - STaskInitTs* pInfo = pIter; - void* key = taosHashGetKey(pIter, &keyLen); - - SStreamTask** pTask1 = taosHashGet(pMeta->pTasksMap, key, sizeof(STaskId)); - if (pTask1 == NULL) { - stInfo("s-task:0x%x is dropped already, %s", (int32_t)((STaskId*)key)->taskId, succ ? "success" : "failed"); - } else { - stInfo("s-task:%s level:%d vgId:%d, init:%" PRId64 ", initEnd:%" PRId64 ", %s", (*pTask1)->id.idStr, - (*pTask1)->info.taskLevel, vgId, pInfo->start, pInfo->end, pInfo->success ? "success" : "failed"); - } - } -} - -// check all existed tasks are received rsp -static bool allCheckDownstreamRsp(SStreamMeta* pMeta, STaskStartInfo* pStartInfo, int32_t numOfTotal) { - for (int32_t i = 0; i < numOfTotal; ++i) { - SStreamTaskId* pTaskId = taosArrayGet(pMeta->pTaskList, i); - if (pTaskId == NULL) { - continue; - } - - STaskId idx = {.streamId = pTaskId->streamId, .taskId = pTaskId->taskId}; - void* px = taosHashGet(pStartInfo->pReadyTaskSet, &idx, sizeof(idx)); - if (px == NULL) { - px = taosHashGet(pStartInfo->pFailedTaskSet, &idx, sizeof(idx)); - if (px == NULL) { - return false; - } - } - } - - return true; -} - -int32_t streamMetaAddTaskLaunchResult(SStreamMeta* pMeta, int64_t streamId, int32_t taskId, int64_t startTs, - int64_t endTs, bool ready) { - STaskStartInfo* pStartInfo = &pMeta->startInfo; - STaskId id = {.streamId = streamId, .taskId = taskId}; - int32_t vgId = pMeta->vgId; - bool allRsp = true; - - streamMetaWLock(pMeta); - SStreamTask** p = taosHashGet(pMeta->pTasksMap, &id, sizeof(id)); - if (p == NULL) { // task does not exists in current vnode, not record the complete info - stError("vgId:%d s-task:0x%x not exists discard the check downstream info", vgId, taskId); - streamMetaWUnLock(pMeta); - return 0; - } - - // clear the send consensus-checkpointId flag - streamMutexLock(&(*p)->lock); - (*p)->status.sendConsensusChkptId = false; - streamMutexUnlock(&(*p)->lock); - - if (pStartInfo->startAllTasks != 1) { - int64_t el = endTs - startTs; - stDebug( - "vgId:%d not in start all task(s) process, not record launch result status, s-task:0x%x launch succ:%d elapsed " - "time:%" PRId64 "ms", - vgId, taskId, ready, el); - streamMetaWUnLock(pMeta); - return 0; - } - - STaskInitTs initTs = {.start = startTs, .end = endTs, .success = ready}; - SHashObj* pDst = ready ? pStartInfo->pReadyTaskSet : pStartInfo->pFailedTaskSet; - int32_t code = taosHashPut(pDst, &id, sizeof(id), &initTs, sizeof(STaskInitTs)); - if (code) { - if (code == TSDB_CODE_DUP_KEY) { - stError("vgId:%d record start task result failed, s-task:0x%" PRIx64 - " already exist start results in meta start task result hashmap", - vgId, id.taskId); - } else { - stError("vgId:%d failed to record start task:0x%" PRIx64 " results, start all tasks failed", vgId, id.taskId); - } - streamMetaWUnLock(pMeta); - return code; - } - - int32_t numOfTotal = streamMetaGetNumOfTasks(pMeta); - int32_t numOfRecv = taosHashGetSize(pStartInfo->pReadyTaskSet) + taosHashGetSize(pStartInfo->pFailedTaskSet); - - allRsp = allCheckDownstreamRsp(pMeta, pStartInfo, numOfTotal); - if (allRsp) { - pStartInfo->readyTs = taosGetTimestampMs(); - pStartInfo->elapsedTime = (pStartInfo->startTs != 0) ? pStartInfo->readyTs - pStartInfo->startTs : 0; - - stDebug("vgId:%d all %d task(s) check downstream completed, last completed task:0x%x (succ:%d) startTs:%" PRId64 - ", readyTs:%" PRId64 " total elapsed time:%.2fs", - vgId, numOfTotal, taskId, ready, pStartInfo->startTs, pStartInfo->readyTs, - pStartInfo->elapsedTime / 1000.0); - - // print the initialization elapsed time and info - displayStatusInfo(pMeta, pStartInfo->pReadyTaskSet, true); - displayStatusInfo(pMeta, pStartInfo->pFailedTaskSet, false); - streamMetaResetStartInfo(pStartInfo, vgId); - streamMetaWUnLock(pMeta); - - code = pStartInfo->completeFn(pMeta); - } else { - streamMetaWUnLock(pMeta); - stDebug("vgId:%d recv check downstream results, s-task:0x%x succ:%d, received:%d, total:%d", vgId, taskId, ready, - numOfRecv, numOfTotal); - } - - return code; -} - int32_t streamMetaResetTaskStatus(SStreamMeta* pMeta) { int32_t numOfTasks = taosArrayGetSize(pMeta->pTaskList); diff --git a/source/libs/stream/src/streamStartTask.c b/source/libs/stream/src/streamStartTask.c new file mode 100644 index 0000000000..3cf06fd04a --- /dev/null +++ b/source/libs/stream/src/streamStartTask.c @@ -0,0 +1,444 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#include "executor.h" +#include "streamBackendRocksdb.h" +#include "streamInt.h" +#include "tmisce.h" +#include "tref.h" +#include "tsched.h" +#include "tstream.h" +#include "ttimer.h" +#include "wal.h" + +typedef struct STaskInitTs { + int64_t start; + int64_t end; + bool success; +} STaskInitTs; + +static int32_t prepareBeforeStartTasks(SStreamMeta* pMeta, SArray** pList, int64_t now); +static bool allCheckDownstreamRsp(SStreamMeta* pMeta, STaskStartInfo* pStartInfo, int32_t numOfTotal); +static void displayStatusInfo(SStreamMeta* pMeta, SHashObj* pTaskSet, bool succ); + +// restore the checkpoint id by negotiating the latest consensus checkpoint id +int32_t streamMetaStartAllTasks(SStreamMeta* pMeta) { + int32_t code = TSDB_CODE_SUCCESS; + int32_t vgId = pMeta->vgId; + int64_t now = taosGetTimestampMs(); + + int32_t numOfTasks = taosArrayGetSize(pMeta->pTaskList); + stInfo("vgId:%d start to consensus checkpointId for all %d task(s), start ts:%" PRId64, vgId, numOfTasks, now); + + if (numOfTasks == 0) { + stInfo("vgId:%d no tasks exist, quit from consensus checkpointId", pMeta->vgId); + return TSDB_CODE_SUCCESS; + } + + SArray* pTaskList = NULL; + code = prepareBeforeStartTasks(pMeta, &pTaskList, now); + if (code != TSDB_CODE_SUCCESS) { + ASSERT(pTaskList == NULL); + return TSDB_CODE_SUCCESS; + } + + // broadcast the check downstream tasks msg only for tasks with related fill-history tasks. + numOfTasks = taosArrayGetSize(pTaskList); + + // prepare the fill-history task before starting all stream tasks, to avoid fill-history tasks are started without + // initialization, when the operation of check downstream tasks status is executed far quickly. + for (int32_t i = 0; i < numOfTasks; ++i) { + SStreamTaskId* pTaskId = taosArrayGet(pTaskList, i); + SStreamTask* pTask = NULL; + code = streamMetaAcquireTask(pMeta, pTaskId->streamId, pTaskId->taskId, &pTask); + if (pTask == NULL) { + stError("vgId:%d failed to acquire task:0x%x during start tasks", pMeta->vgId, pTaskId->taskId); + (void)streamMetaAddFailedTask(pMeta, pTaskId->streamId, pTaskId->taskId); + continue; + } + + if ((pTask->pBackend == NULL) && ((pTask->info.fillHistory == 1) || HAS_RELATED_FILLHISTORY_TASK(pTask))) { + code = pMeta->expandTaskFn(pTask); + if (code != TSDB_CODE_SUCCESS) { + stError("s-task:0x%x vgId:%d failed to expand stream backend", pTaskId->taskId, vgId); + streamMetaAddFailedTaskSelf(pTask, pTask->execInfo.readyTs); + } + } + + streamMetaReleaseTask(pMeta, pTask); + } + + // Tasks, with related fill-history task or without any checkpoint yet, can be started directly here. + for (int32_t i = 0; i < numOfTasks; ++i) { + SStreamTaskId* pTaskId = taosArrayGet(pTaskList, i); + + SStreamTask* pTask = NULL; + code = streamMetaAcquireTask(pMeta, pTaskId->streamId, pTaskId->taskId, &pTask); + if (pTask == NULL) { + stError("vgId:%d failed to acquire task:0x%x during start tasks", pMeta->vgId, pTaskId->taskId); + (void)streamMetaAddFailedTask(pMeta, pTaskId->streamId, pTaskId->taskId); + continue; + } + + STaskExecStatisInfo* pInfo = &pTask->execInfo; + + // fill-history task can only be launched by related stream tasks. + if (pTask->info.fillHistory == 1) { + stDebug("s-task:%s fill-history task wait related stream task start", pTask->id.idStr); + streamMetaReleaseTask(pMeta, pTask); + continue; + } + + // ready now, start the related fill-history task + if (pTask->status.downstreamReady == 1) { + if (HAS_RELATED_FILLHISTORY_TASK(pTask)) { + stDebug("s-task:%s downstream ready, no need to check downstream, check only related fill-history task", + pTask->id.idStr); + (void)streamLaunchFillHistoryTask(pTask); // todo: how about retry launch fill-history task? + } + + (void)streamMetaAddTaskLaunchResult(pMeta, pTaskId->streamId, pTaskId->taskId, pInfo->checkTs, pInfo->readyTs, + true); + streamMetaReleaseTask(pMeta, pTask); + continue; + } + + if (HAS_RELATED_FILLHISTORY_TASK(pTask)) { + int32_t ret = streamTaskHandleEvent(pTask->status.pSM, TASK_EVENT_INIT); + if (ret != TSDB_CODE_SUCCESS) { + stError("vgId:%d failed to handle event:%d", pMeta->vgId, TASK_EVENT_INIT); + code = ret; + + if (code != TSDB_CODE_STREAM_INVALID_STATETRANS) { + streamMetaAddFailedTaskSelf(pTask, pInfo->readyTs); + } + } + + streamMetaReleaseTask(pMeta, pTask); + continue; + } + + // negotiate the consensus checkpoint id for current task + code = streamTaskSendNegotiateChkptIdMsg(pTask); + + // this task may has no checkpoint, but others tasks may generate checkpoint already? + streamMetaReleaseTask(pMeta, pTask); + } + + // prepare the fill-history task before starting all stream tasks, to avoid fill-history tasks are started without + // initialization, when the operation of check downstream tasks status is executed far quickly. + stInfo("vgId:%d start all task(s) completed", pMeta->vgId); + taosArrayDestroy(pTaskList); + return code; +} + +int32_t prepareBeforeStartTasks(SStreamMeta* pMeta, SArray** pList, int64_t now) { + streamMetaWLock(pMeta); + + if (pMeta->closeFlag) { + streamMetaWUnLock(pMeta); + stError("vgId:%d vnode is closed, not start check task(s) downstream status", pMeta->vgId); + return TSDB_CODE_FAILED; + } + + *pList = taosArrayDup(pMeta->pTaskList, NULL); + if (*pList == NULL) { + return terrno; + } + + taosHashClear(pMeta->startInfo.pReadyTaskSet); + taosHashClear(pMeta->startInfo.pFailedTaskSet); + pMeta->startInfo.startTs = now; + + int32_t code = streamMetaResetTaskStatus(pMeta); + streamMetaWUnLock(pMeta); + + return code; +} + +void streamMetaResetStartInfo(STaskStartInfo* pStartInfo, int32_t vgId) { + taosHashClear(pStartInfo->pReadyTaskSet); + taosHashClear(pStartInfo->pFailedTaskSet); + pStartInfo->tasksWillRestart = 0; + pStartInfo->readyTs = 0; + pStartInfo->elapsedTime = 0; + + // reset the sentinel flag value to be 0 + pStartInfo->startAllTasks = 0; + stDebug("vgId:%d clear start-all-task info", vgId); +} + +int32_t streamMetaAddTaskLaunchResult(SStreamMeta* pMeta, int64_t streamId, int32_t taskId, int64_t startTs, + int64_t endTs, bool ready) { + STaskStartInfo* pStartInfo = &pMeta->startInfo; + STaskId id = {.streamId = streamId, .taskId = taskId}; + int32_t vgId = pMeta->vgId; + bool allRsp = true; + + streamMetaWLock(pMeta); + SStreamTask** p = taosHashGet(pMeta->pTasksMap, &id, sizeof(id)); + if (p == NULL) { // task does not exists in current vnode, not record the complete info + stError("vgId:%d s-task:0x%x not exists discard the check downstream info", vgId, taskId); + streamMetaWUnLock(pMeta); + return 0; + } + + // clear the send consensus-checkpointId flag + streamMutexLock(&(*p)->lock); + (*p)->status.sendConsensusChkptId = false; + streamMutexUnlock(&(*p)->lock); + + if (pStartInfo->startAllTasks != 1) { + int64_t el = endTs - startTs; + stDebug( + "vgId:%d not in start all task(s) process, not record launch result status, s-task:0x%x launch succ:%d elapsed " + "time:%" PRId64 "ms", + vgId, taskId, ready, el); + streamMetaWUnLock(pMeta); + return 0; + } + + STaskInitTs initTs = {.start = startTs, .end = endTs, .success = ready}; + SHashObj* pDst = ready ? pStartInfo->pReadyTaskSet : pStartInfo->pFailedTaskSet; + int32_t code = taosHashPut(pDst, &id, sizeof(id), &initTs, sizeof(STaskInitTs)); + if (code) { + if (code == TSDB_CODE_DUP_KEY) { + stError("vgId:%d record start task result failed, s-task:0x%" PRIx64 + " already exist start results in meta start task result hashmap", + vgId, id.taskId); + } else { + stError("vgId:%d failed to record start task:0x%" PRIx64 " results, start all tasks failed", vgId, id.taskId); + } + streamMetaWUnLock(pMeta); + return code; + } + + int32_t numOfTotal = streamMetaGetNumOfTasks(pMeta); + int32_t numOfRecv = taosHashGetSize(pStartInfo->pReadyTaskSet) + taosHashGetSize(pStartInfo->pFailedTaskSet); + + allRsp = allCheckDownstreamRsp(pMeta, pStartInfo, numOfTotal); + if (allRsp) { + pStartInfo->readyTs = taosGetTimestampMs(); + pStartInfo->elapsedTime = (pStartInfo->startTs != 0) ? pStartInfo->readyTs - pStartInfo->startTs : 0; + + stDebug("vgId:%d all %d task(s) check downstream completed, last completed task:0x%x (succ:%d) startTs:%" PRId64 + ", readyTs:%" PRId64 " total elapsed time:%.2fs", + vgId, numOfTotal, taskId, ready, pStartInfo->startTs, pStartInfo->readyTs, + pStartInfo->elapsedTime / 1000.0); + + // print the initialization elapsed time and info + displayStatusInfo(pMeta, pStartInfo->pReadyTaskSet, true); + displayStatusInfo(pMeta, pStartInfo->pFailedTaskSet, false); + streamMetaResetStartInfo(pStartInfo, vgId); + streamMetaWUnLock(pMeta); + + code = pStartInfo->completeFn(pMeta); + } else { + streamMetaWUnLock(pMeta); + stDebug("vgId:%d recv check downstream results, s-task:0x%x succ:%d, received:%d, total:%d", vgId, taskId, ready, + numOfRecv, numOfTotal); + } + + return code; +} + +// check all existed tasks are received rsp +bool allCheckDownstreamRsp(SStreamMeta* pMeta, STaskStartInfo* pStartInfo, int32_t numOfTotal) { + for (int32_t i = 0; i < numOfTotal; ++i) { + SStreamTaskId* pTaskId = taosArrayGet(pMeta->pTaskList, i); + if (pTaskId == NULL) { + continue; + } + + STaskId idx = {.streamId = pTaskId->streamId, .taskId = pTaskId->taskId}; + void* px = taosHashGet(pStartInfo->pReadyTaskSet, &idx, sizeof(idx)); + if (px == NULL) { + px = taosHashGet(pStartInfo->pFailedTaskSet, &idx, sizeof(idx)); + if (px == NULL) { + return false; + } + } + } + + return true; +} + +void displayStatusInfo(SStreamMeta* pMeta, SHashObj* pTaskSet, bool succ) { + int32_t vgId = pMeta->vgId; + void* pIter = NULL; + size_t keyLen = 0; + + stInfo("vgId:%d %d tasks check-downstream completed, %s", vgId, taosHashGetSize(pTaskSet), + succ ? "success" : "failed"); + + while ((pIter = taosHashIterate(pTaskSet, pIter)) != NULL) { + STaskInitTs* pInfo = pIter; + void* key = taosHashGetKey(pIter, &keyLen); + + SStreamTask** pTask1 = taosHashGet(pMeta->pTasksMap, key, sizeof(STaskId)); + if (pTask1 == NULL) { + stInfo("s-task:0x%x is dropped already, %s", (int32_t)((STaskId*)key)->taskId, succ ? "success" : "failed"); + } else { + stInfo("s-task:%s level:%d vgId:%d, init:%" PRId64 ", initEnd:%" PRId64 ", %s", (*pTask1)->id.idStr, + (*pTask1)->info.taskLevel, vgId, pInfo->start, pInfo->end, pInfo->success ? "success" : "failed"); + } + } +} + +int32_t streamMetaInitStartInfo(STaskStartInfo* pStartInfo) { + _hash_fn_t fp = taosGetDefaultHashFunction(TSDB_DATA_TYPE_VARCHAR); + + pStartInfo->pReadyTaskSet = taosHashInit(64, fp, false, HASH_NO_LOCK); + if (pStartInfo->pReadyTaskSet == NULL) { + return terrno; + } + + pStartInfo->pFailedTaskSet = taosHashInit(4, fp, false, HASH_NO_LOCK); + if (pStartInfo->pFailedTaskSet == NULL) { + return terrno; + } + + return 0; +} + +void streamMetaClearStartInfo(STaskStartInfo* pStartInfo) { + taosHashClear(pStartInfo->pReadyTaskSet); + taosHashClear(pStartInfo->pFailedTaskSet); + pStartInfo->readyTs = 0; + pStartInfo->elapsedTime = 0; + pStartInfo->startTs = 0; + pStartInfo->startAllTasks = 0; + pStartInfo->tasksWillRestart = 0; + pStartInfo->restartCount = 0; +} + +int32_t streamMetaStartOneTask(SStreamMeta* pMeta, int64_t streamId, int32_t taskId) { + int32_t code = 0; + int32_t vgId = pMeta->vgId; + SStreamTask* pTask = NULL; + bool continueExec = true; + + stInfo("vgId:%d start task:0x%x by checking it's downstream status", vgId, taskId); + + code = streamMetaAcquireTask(pMeta, streamId, taskId, &pTask); + if (pTask == NULL) { + stError("vgId:%d failed to acquire task:0x%x when starting task", vgId, taskId); + (void)streamMetaAddFailedTask(pMeta, streamId, taskId); + return TSDB_CODE_STREAM_TASK_IVLD_STATUS; + } + + // fill-history task can only be launched by related stream tasks. + STaskExecStatisInfo* pInfo = &pTask->execInfo; + if (pTask->info.fillHistory == 1) { + stError("s-task:0x%x vgId:%d fill-histroy task, not start here", taskId, vgId); + streamMetaReleaseTask(pMeta, pTask); + return TSDB_CODE_SUCCESS; + } + + // the start all tasks procedure may happen to start the newly deployed stream task, and results in the + // concurrently start this task by two threads. + streamMutexLock(&pTask->lock); + SStreamTaskState status = streamTaskGetStatus(pTask); + if (status.state != TASK_STATUS__UNINIT) { + stError("s-task:0x%x vgId:%d status:%s not uninit status, not start stream task", taskId, vgId, status.name); + continueExec = false; + } else { + continueExec = true; + } + streamMutexUnlock(&pTask->lock); + + if (!continueExec) { + streamMetaReleaseTask(pMeta, pTask); + return TSDB_CODE_STREAM_TASK_IVLD_STATUS; + } + + ASSERT(pTask->status.downstreamReady == 0); + + // avoid initialization and destroy running concurrently. + streamMutexLock(&pTask->lock); + if (pTask->pBackend == NULL) { + code = pMeta->expandTaskFn(pTask); + streamMutexUnlock(&pTask->lock); + + if (code != TSDB_CODE_SUCCESS) { + streamMetaAddFailedTaskSelf(pTask, pInfo->readyTs); + } + } else { + streamMutexUnlock(&pTask->lock); + } + + // concurrently start task may cause the later started task be failed, and also failed to added into meta result. + if (code == TSDB_CODE_SUCCESS) { + code = streamTaskHandleEvent(pTask->status.pSM, TASK_EVENT_INIT); + if (code != TSDB_CODE_SUCCESS) { + stError("s-task:%s vgId:%d failed to handle event:%d, code:%s", pTask->id.idStr, pMeta->vgId, TASK_EVENT_INIT, + tstrerror(code)); + + // do no added into result hashmap if it is failed due to concurrently starting of this stream task. + if (code != TSDB_CODE_STREAM_INVALID_STATETRANS) { + streamMetaAddFailedTaskSelf(pTask, pInfo->readyTs); + } + } + } + + streamMetaReleaseTask(pMeta, pTask); + return code; +} + +int32_t streamMetaStopAllTasks(SStreamMeta* pMeta) { + streamMetaRLock(pMeta); + + int32_t num = taosArrayGetSize(pMeta->pTaskList); + stDebug("vgId:%d stop all %d stream task(s)", pMeta->vgId, num); + if (num == 0) { + stDebug("vgId:%d stop all %d task(s) completed, elapsed time:0 Sec.", pMeta->vgId, num); + streamMetaRUnLock(pMeta); + return TSDB_CODE_SUCCESS; + } + + int64_t st = taosGetTimestampMs(); + + // send hb msg to mnode before closing all tasks. + SArray* pTaskList = NULL; + int32_t code = streamMetaSendMsgBeforeCloseTasks(pMeta, &pTaskList); + if (code != TSDB_CODE_SUCCESS) { + return code; + } + + int32_t numOfTasks = taosArrayGetSize(pTaskList); + + for (int32_t i = 0; i < numOfTasks; ++i) { + SStreamTaskId* pTaskId = taosArrayGet(pTaskList, i); + SStreamTask* pTask = NULL; + + code = streamMetaAcquireTaskNoLock(pMeta, pTaskId->streamId, pTaskId->taskId, &pTask); + if (code != TSDB_CODE_SUCCESS) { + continue; + } + + (void)streamTaskStop(pTask); + streamMetaReleaseTask(pMeta, pTask); + } + + taosArrayDestroy(pTaskList); + + double el = (taosGetTimestampMs() - st) / 1000.0; + stDebug("vgId:%d stop all %d task(s) completed, elapsed time:%.2f Sec.", pMeta->vgId, num, el); + + streamMetaRUnLock(pMeta); + return 0; +} + + From a360240e780ec9b08ff007609e8cebff8a8c707e Mon Sep 17 00:00:00 2001 From: sheyanjie-qq <249478495@qq.com> Date: Sat, 17 Aug 2024 17:40:44 +0800 Subject: [PATCH 124/181] update node log --- .../node/websocketexample/line_example.js | 2 +- .../examples/node/websocketexample/sql_example.js | 8 ++++---- .../node/websocketexample/stmt_example.js | 2 +- .../examples/node/websocketexample/tmq_example.js | 15 ++++++++++----- .../node/websocketexample/tmq_seek_example.js | 13 +++++++++---- 5 files changed, 25 insertions(+), 15 deletions(-) diff --git a/docs/examples/node/websocketexample/line_example.js b/docs/examples/node/websocketexample/line_example.js index 4fc5042f5a..ac3083d358 100644 --- a/docs/examples/node/websocketexample/line_example.js +++ b/docs/examples/node/websocketexample/line_example.js @@ -28,7 +28,7 @@ async function test() { console.log("Inserted data with schemaless successfully.") } catch (err) { - console.error("Failed to insert data with schemaless, ErrCode: " + err.code + ", ErrMessage: " + err.message); + console.error(`Failed to insert data with schemaless, ErrCode: ${err.code}, ErrMessage: ${err.message}`); } finally { if (wsRows) { diff --git a/docs/examples/node/websocketexample/sql_example.js b/docs/examples/node/websocketexample/sql_example.js index 8ef4dcb831..1f756c8a09 100644 --- a/docs/examples/node/websocketexample/sql_example.js +++ b/docs/examples/node/websocketexample/sql_example.js @@ -35,7 +35,7 @@ async function createDbAndTable() { console.log("Create stable power.meters successfully"); } catch (err) { - console.error("Failed to create database power or stable meters, ErrCode: " + err.code + ", ErrMessage: " + err.message); + console.error(`Failed to create database power or stable meters, ErrCode: ${err.code}, ErrMessage: ${err.message}`); } finally { if (wsSql) { await wsSql.close(); @@ -62,7 +62,7 @@ async function insertData() { taosResult = await wsSql.exec(insertQuery); console.log("Successfully inserted " + taosResult.getAffectRows() + " rows to power.meters."); } catch (err) { - console.error("Failed to insert data to power.meters, ErrCode: " + err.code + ", ErrMessage: " + err.message); + console.error(`Failed to insert data to power.meters, sql: ${insertQuery}, ErrCode: ${err.code}, ErrMessage: ${err.message}`); } finally { if (wsSql) { await wsSql.close(); @@ -85,7 +85,7 @@ async function queryData() { } } catch (err) { - console.error("Failed to query data from power.meters, sql: " + sql + ", ErrCode:" + err.code + ", ErrMessage: " + err.message); + console.error(`Failed to query data from power.meters, sql: ${sql}, ErrCode: ${err.code}, ErrMessage: ${err.message}`); } finally { if (wsRows) { @@ -112,7 +112,7 @@ async function sqlWithReqid() { } } catch (err) { - console.error("Failed to execute sql with reqId: " + reqId + ", ErrCode: " + err.code + ", ErrMessage: " + err.message); + console.error(`Failed to query data from power.meters, reqId: ${reqId}, ErrCode: ${err.code}, ErrMessage: ${err.message}`); } finally { if (wsRows) { diff --git a/docs/examples/node/websocketexample/stmt_example.js b/docs/examples/node/websocketexample/stmt_example.js index 6ca4959c48..e3bb3c4dda 100644 --- a/docs/examples/node/websocketexample/stmt_example.js +++ b/docs/examples/node/websocketexample/stmt_example.js @@ -59,7 +59,7 @@ async function prepare() { } } catch (err) { - console.error("Failed to insert to table meters using stmt, ErrCode: " + err.code + ", ErrMessage: " + err.message); + console.error(`Failed to insert to table meters using stmt, ErrCode: ${err.code}, ErrMessage: ${err.message}`); } finally { if (stmt) { diff --git a/docs/examples/node/websocketexample/tmq_example.js b/docs/examples/node/websocketexample/tmq_example.js index fc9d2889e0..5097402e6a 100644 --- a/docs/examples/node/websocketexample/tmq_example.js +++ b/docs/examples/node/websocketexample/tmq_example.js @@ -3,12 +3,16 @@ const taos = require("@tdengine/websocket"); // ANCHOR: create_consumer const db = 'power'; const stable = 'meters'; -const topics = ['power_meters_topic']; const url = 'ws://localhost:6041'; +const topic = 'topic_meters' +const topics = [topic]; +const groupId = "group1"; +const clientId = "client1"; + async function createConsumer() { let groupId = "group1"; - let clientId = "1"; + let clientId = "client1"; let configMap = new Map([ [taos.TMQConstants.GROUP_ID, groupId], [taos.TMQConstants.CLIENT_ID, clientId], @@ -24,7 +28,7 @@ async function createConsumer() { console.log(`Create consumer successfully, host: ${url}, groupId: ${groupId}, clientId: ${clientId}`) return conn; } catch (err) { - console.log("Failed to create websocket consumer, ErrCode:" + err.code + ", ErrMessage: " + err.message); + console.error(`Failed to create websocket consumer, topic: ${topic}, groupId: ${groupId}, clientId: ${clientId}, ErrCode: ${err.code}, ErrMessage: ${err.message}`); throw err; } @@ -60,13 +64,14 @@ async function subscribe(consumer) { for (let i = 0; i < 50; i++) { let res = await consumer.poll(100); for (let [key, value] of res) { + // Add your data processing logic here console.log(`data: ${key} ${value}`); } await consumer.commit(); console.log("Commit offset manually successfully."); } } catch (err) { - console.error("Failed to poll data, ErrCode: " + err.code + ", ErrMessage: " + err.message); + console.error(`Failed to poll data, topic: ${topic}, groupId: ${groupId}, clientId: ${clientId}, ErrCode: ${err.code}, ErrMessage: ${err.message}`); throw err; } // ANCHOR_END: commit @@ -83,7 +88,7 @@ async function test() { console.log("Consumer unsubscribed successfully."); } catch (err) { - console.error("Failed to unsubscribe consumer, ErrCode: " + err.code + ", ErrMessage: " + err.message); + console.error(`Failed to unsubscribe consumer, topic: ${topic}, groupId: ${groupId}, clientId: ${clientId}, ErrCode: ${err.code}, ErrMessage: ${err.message}`); } finally { if (consumer) { diff --git a/docs/examples/node/websocketexample/tmq_seek_example.js b/docs/examples/node/websocketexample/tmq_seek_example.js index 259614d5db..b2bd569d92 100644 --- a/docs/examples/node/websocketexample/tmq_seek_example.js +++ b/docs/examples/node/websocketexample/tmq_seek_example.js @@ -2,7 +2,11 @@ const taos = require("@tdengine/websocket"); const db = 'power'; const stable = 'meters'; -const topics = ['power_meters_topic']; +const topic = 'topic_meters' +const topics = [topic]; +const groupId = "group1"; +const clientId = "client1"; + // ANCHOR: create_consumer async function createConsumer() { @@ -19,7 +23,7 @@ async function createConsumer() { try { return await taos.tmqConnect(configMap); } catch (err) { - console.log(err); + console.error(err); throw err; } @@ -55,11 +59,12 @@ async function subscribe(consumer) { for (let i = 0; i < 50; i++) { let res = await consumer.poll(100); for (let [key, value] of res) { + // Add your data processing logic here console.log(`data: ${key} ${value}`); } } } catch (err) { - console.error("Failed to poll data, ErrCode: " + err.code + ", ErrMessage: " + err.message); + console.error(`Failed to poll data, topic: ${topic}, groupId: ${groupId}, clientId: ${clientId}, ErrCode: ${err.code}, ErrMessage: ${err.message}`); throw err; } @@ -83,7 +88,7 @@ async function test() { console.log("Assignment seek to beginning successfully"); } catch (err) { - console.error("Failed to execute seek example, ErrCode: " + err.code + ", ErrMessage: " + err.message); + console.error(`Failed to seek offset, topic: ${topic}, groupId: ${groupId}, clientId: ${clientId}, ErrCode: ${err.code}, ErrMessage: ${err.message}`); } finally { if (consumer) { From 7355967c4102fa0ef9f4b1cb9cdaaf21d7eda946 Mon Sep 17 00:00:00 2001 From: sheyanjie-qq <249478495@qq.com> Date: Sat, 17 Aug 2024 18:02:04 +0800 Subject: [PATCH 125/181] fix typo --- docs/examples/node/websocketexample/sql_example.js | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/examples/node/websocketexample/sql_example.js b/docs/examples/node/websocketexample/sql_example.js index 1f756c8a09..5dc8ba6021 100644 --- a/docs/examples/node/websocketexample/sql_example.js +++ b/docs/examples/node/websocketexample/sql_example.js @@ -13,7 +13,7 @@ async function createConnect() { console.log("Connected to " + dsn + " successfully."); return conn; } catch (err) { - console.log("Failed to connect to " + dns + ", ErrCode: " + err.code + ", ErrMessage: " + err.message); + console.log("Failed to connect to " + dsn + ", ErrCode: " + err.code + ", ErrMessage: " + err.message); throw err; } From 89eaf01621163256fbfa551317808772c123ecf2 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Sat, 17 Aug 2024 23:23:18 +0800 Subject: [PATCH 126/181] fix(stream):fix memory leak. --- source/libs/stream/src/streamStartTask.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/source/libs/stream/src/streamStartTask.c b/source/libs/stream/src/streamStartTask.c index 3cf06fd04a..99f4e84951 100644 --- a/source/libs/stream/src/streamStartTask.c +++ b/source/libs/stream/src/streamStartTask.c @@ -314,8 +314,8 @@ int32_t streamMetaInitStartInfo(STaskStartInfo* pStartInfo) { } void streamMetaClearStartInfo(STaskStartInfo* pStartInfo) { - taosHashClear(pStartInfo->pReadyTaskSet); - taosHashClear(pStartInfo->pFailedTaskSet); + taosHashCleanup(pStartInfo->pReadyTaskSet); + taosHashCleanup(pStartInfo->pFailedTaskSet); pStartInfo->readyTs = 0; pStartInfo->elapsedTime = 0; pStartInfo->startTs = 0; From 72da4be830528136cdb03f7cd03588590996692b Mon Sep 17 00:00:00 2001 From: dapan1121 Date: Mon, 19 Aug 2024 09:15:36 +0800 Subject: [PATCH 127/181] fix: create operator failed issue --- source/libs/executor/src/dynqueryctrloperator.c | 3 ++- source/libs/executor/src/mergejoinoperator.c | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/source/libs/executor/src/dynqueryctrloperator.c b/source/libs/executor/src/dynqueryctrloperator.c index 5359cc0980..02932cd278 100644 --- a/source/libs/executor/src/dynqueryctrloperator.c +++ b/source/libs/executor/src/dynqueryctrloperator.c @@ -966,13 +966,14 @@ int32_t createDynQueryCtrlOperatorInfo(SOperatorInfo** pDownstream, int32_t numO int32_t code = TSDB_CODE_SUCCESS; __optr_fn_t nextFp = NULL; + SOperatorInfo* pOperator = NULL; SDynQueryCtrlOperatorInfo* pInfo = taosMemoryCalloc(1, sizeof(SDynQueryCtrlOperatorInfo)); if (pInfo == NULL) { code = terrno; goto _error; } - SOperatorInfo* pOperator = taosMemoryCalloc(1, sizeof(SOperatorInfo)); + pOperator = taosMemoryCalloc(1, sizeof(SOperatorInfo)); if (pOperator == NULL) { code = terrno; goto _error; diff --git a/source/libs/executor/src/mergejoinoperator.c b/source/libs/executor/src/mergejoinoperator.c index 2c485cdd1b..946a1d2aa5 100644 --- a/source/libs/executor/src/mergejoinoperator.c +++ b/source/libs/executor/src/mergejoinoperator.c @@ -1863,13 +1863,14 @@ int32_t createMergeJoinOperatorInfo(SOperatorInfo** pDownstream, int32_t numOfDo bool newDownstreams = false; int32_t code = TSDB_CODE_SUCCESS; + SOperatorInfo* pOperator = NULL; SMJoinOperatorInfo* pInfo = taosMemoryCalloc(1, sizeof(SMJoinOperatorInfo)); if (pInfo == NULL) { code = terrno; goto _return; } - SOperatorInfo* pOperator = taosMemoryCalloc(1, sizeof(SOperatorInfo)); + pOperator = taosMemoryCalloc(1, sizeof(SOperatorInfo)); if (pOperator == NULL) { code = terrno; goto _return; From ee36bd741f4d511ea31c776f6cc7fa31c3cab8da Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Mon, 19 Aug 2024 09:50:38 +0800 Subject: [PATCH 128/181] fix(query): return error code. --- source/libs/executor/src/executor.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/source/libs/executor/src/executor.c b/source/libs/executor/src/executor.c index b1c9207ab7..a034e011f8 100644 --- a/source/libs/executor/src/executor.c +++ b/source/libs/executor/src/executor.c @@ -663,7 +663,7 @@ int32_t qExecTaskOpt(qTaskInfo_t tinfo, SArray* pResList, uint64_t* useconds, bo if (isTaskKilled(pTaskInfo)) { atomic_store_64(&pTaskInfo->owner, 0); qDebug("%s already killed, abort", GET_TASKID(pTaskInfo)); - return TSDB_CODE_SUCCESS; + return pTaskInfo->code; } // error occurs, record the error code and return to client @@ -785,7 +785,7 @@ int32_t qExecTask(qTaskInfo_t tinfo, SSDataBlock** pRes, uint64_t* useconds) { qDebug("%s already killed, abort", GET_TASKID(pTaskInfo)); taosRUnLockLatch(&pTaskInfo->lock); - return TSDB_CODE_SUCCESS; + return pTaskInfo->code; } if (pTaskInfo->owner != 0) { From 2f92b80cd694c87fdaee04e03cb5bdcc534cdad0 Mon Sep 17 00:00:00 2001 From: sima Date: Mon, 19 Aug 2024 10:10:47 +0800 Subject: [PATCH 129/181] fix:[TD-31511] Fix memory leak when error occurs. --- source/libs/scalar/src/filter.c | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/source/libs/scalar/src/filter.c b/source/libs/scalar/src/filter.c index 00487b140d..cc9cc9ed76 100644 --- a/source/libs/scalar/src/filter.c +++ b/source/libs/scalar/src/filter.c @@ -2372,6 +2372,7 @@ int32_t filterMergeGroupUnits(SFilterInfo *info, SFilterGroupCtx **gRes, int32_t } gRes[gResIdx]->colInfo = taosMemoryCalloc(info->fields[FLD_TYPE_COLUMN].num, sizeof(SFilterColInfo)); if (gRes[gResIdx]->colInfo == NULL) { + filterFreeGroupCtx(gRes[gResIdx]); FLT_ERR_JRET(TSDB_CODE_OUT_OF_MEMORY); } colIdxi = 0; @@ -2384,6 +2385,7 @@ int32_t filterMergeGroupUnits(SFilterInfo *info, SFilterGroupCtx **gRes, int32_t if (gRes[gResIdx]->colInfo[cidx].info == NULL) { gRes[gResIdx]->colInfo[cidx].info = (SArray *)taosArrayInit(4, POINTER_BYTES); if (gRes[gResIdx]->colInfo[cidx].info == NULL) { + filterFreeGroupCtx(gRes[gResIdx]); FLT_ERR_JRET(terrno); } colIdx[colIdxi++] = cidx; @@ -2408,7 +2410,11 @@ int32_t filterMergeGroupUnits(SFilterInfo *info, SFilterGroupCtx **gRes, int32_t continue; } - FLT_ERR_JRET(filterMergeUnits(info, gRes[gResIdx], colIdx[l], &empty)); + code = filterMergeUnits(info, gRes[gResIdx], colIdx[l], &empty); + if (TSDB_CODE_SUCCESS != code) { + filterFreeGroupCtx(gRes[gResIdx]); + SCL_ERR_JRET(code); + } if (empty) { break; @@ -2426,10 +2432,9 @@ int32_t filterMergeGroupUnits(SFilterInfo *info, SFilterGroupCtx **gRes, int32_t gRes[gResIdx]->colNum = colIdxi; FILTER_COPY_IDX(&gRes[gResIdx]->colIdx, colIdx, colIdxi); ++gResIdx; + *gResNum = gResIdx; } - *gResNum = gResIdx; - if (gResIdx == 0) { FILTER_SET_FLAG(info->status, FI_STATUS_EMPTY); } From 30ab835e5355953c732fd767b59d515c2387fd6b Mon Sep 17 00:00:00 2001 From: sheyanjie-qq <249478495@qq.com> Date: Mon, 19 Aug 2024 10:12:55 +0800 Subject: [PATCH 130/181] move java sample code to docs/example --- .../en/14-reference/05-connectors/14-java.mdx | 40 +++++++++---------- .../com/taos}/example/AbsConsumerLoop.java | 4 +- .../com/taos}/example/ConsumerLoopImp.java | 2 +- .../com/taos}/example/ConsumerOffsetSeek.java | 10 +---- .../com/taos}/example/WsConsumerLoopImp.java | 7 +++- .../src/test/java/com/taos/test/TestAll.java | 4 +- 6 files changed, 32 insertions(+), 35 deletions(-) rename {examples/JDBC/JDBCDemo/src/main/java/com/taosdata => docs/examples/java/src/main/java/com/taos}/example/AbsConsumerLoop.java (98%) rename {examples/JDBC/JDBCDemo/src/main/java/com/taosdata => docs/examples/java/src/main/java/com/taos}/example/ConsumerLoopImp.java (98%) rename {examples/JDBC/JDBCDemo/src/main/java/com/taosdata => docs/examples/java/src/main/java/com/taos}/example/ConsumerOffsetSeek.java (88%) rename {examples/JDBC/JDBCDemo/src/main/java/com/taosdata => docs/examples/java/src/main/java/com/taos}/example/WsConsumerLoopImp.java (93%) diff --git a/docs/en/14-reference/05-connectors/14-java.mdx b/docs/en/14-reference/05-connectors/14-java.mdx index 93b72ca026..8192807c6d 100644 --- a/docs/en/14-reference/05-connectors/14-java.mdx +++ b/docs/en/14-reference/05-connectors/14-java.mdx @@ -69,7 +69,7 @@ REST connection supports all platforms that can run Java. After an error is reported, the error message and error code can be obtained through SQLException. ```java -{{#include examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/JdbcBasicDemo.java:jdbc_exception}} +{{#include docs/examples/java/src/main/java/com/taos/example/JdbcBasicDemo.java:jdbc_exception}} ``` There are four types of error codes that the JDBC client library can report: @@ -150,7 +150,7 @@ TDengine currently supports timestamp, number, character, Boolean type, and the Due to historical reasons, the BINARY type data in TDengine is not truly binary data and is no longer recommended for use. Please use VARBINARY type instead. GEOMETRY type is binary data in little endian byte order, which complies with the WKB specification. For detailed information, please refer to [Data Type](../../taos-sql/data-type/) For WKB specifications, please refer to [Well Known Binary (WKB)](https://libgeos.org/specifications/wkb/) -For Java connector, the jts library can be used to easily create GEOMETRY type objects, serialize them, and write them to TDengine. Here is an example [Geometry example](https://github.com/taosdata/TDengine/blob/3.0/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/GeometryDemo.java) +For Java connector, the jts library can be used to easily create GEOMETRY type objects, serialize them, and write them to TDengine. Here is an example [Geometry example](https://github.com/taosdata/TDengine/blob/3.0/docs/examples/java/src/main/java/com/taos/example/GeometryDemo.java) ## Installation Steps @@ -395,7 +395,7 @@ For example, if you specify the password as `taosdata` in the URL and specify th ### Create database and tables ```java -{{#include examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/JdbcBasicDemo.java:create_db_and_table}} +{{#include docs/examples/java/src/main/java/com/taos/example/JdbcBasicDemo.java:create_db_and_table}} ``` > **Note**: If you do not use `USE power` to specify the database, all subsequent operations on the table need to add the database name as a prefix, such as power.meters. @@ -403,7 +403,7 @@ For example, if you specify the password as `taosdata` in the URL and specify th ### Insert data ```java -{{#include examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/JdbcBasicDemo.java:insert_data}} +{{#include docs/examples/java/src/main/java/com/taos/example/JdbcBasicDemo.java:insert_data}} ``` > NOW is an internal function. The default is the current time of the client's computer. @@ -412,7 +412,7 @@ For example, if you specify the password as `taosdata` in the URL and specify th ### Querying data ```java -{{#include examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/JdbcBasicDemo.java:query_data}} +{{#include docs/examples/java/src/main/java/com/taos/example/JdbcBasicDemo.java:query_data}} ``` > The query is consistent with operating a relational database. When using subscripts to get the contents of the returned fields, you have to start from 1. However, we recommend using the field names to get the values of the fields in the result set. @@ -422,7 +422,7 @@ For example, if you specify the password as `taosdata` in the URL and specify th ```java -{{#include examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/JdbcBasicDemo.java:with_reqid}} +{{#include docs/examples/java/src/main/java/com/taos/example/JdbcBasicDemo.java:with_reqid}} ``` ### Writing data via parameter binding @@ -440,20 +440,20 @@ TDengine has significantly improved the bind APIs to support data writing (INSER ```java -{{#include examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/ParameterBindingBasicDemo.java:para_bind}} +{{#include docs/examples/java/src/main/java/com/taos/example/ParameterBindingBasicDemo.java:para_bind}} ``` -This is the [Detailed Example](https://github.com/taosdata/TDengine/blob/main/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/ParameterBindingFullDemo.java) +This is the [Detailed Example](https://github.com/taosdata/TDengine/blob/main/docs/examples/java/src/main/java/com/taos/example/ParameterBindingFullDemo.java) ```java -{{#include examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/WSParameterBindingBasicDemo.java:para_bind}} +{{#include docs/examples/java/src/main/java/com/taos/example/WSParameterBindingBasicDemo.java:para_bind}} ``` -This is the [Detailed Example](https://github.com/taosdata/TDengine/blob/main/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/WSParameterBindingFullDemo.java) +This is the [Detailed Example](https://github.com/taosdata/TDengine/blob/main/docs/examples/java/src/main/java/com/taos/example/WSParameterBindingFullDemo.java) @@ -503,14 +503,14 @@ TDengine supports schemaless writing. It is compatible with InfluxDB's Line Prot ```java -{{#include examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/SchemalessJniTest.java:schemaless}} +{{#include docs/examples/java/src/main/java/com/taos/example/SchemalessJniTest.java:schemaless}} ``` ```java -{{#include examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/SchemalessWsTest.java:schemaless}} +{{#include docs/examples/java/src/main/java/com/taos/example/SchemalessWsTest.java:schemaless}} ``` @@ -531,7 +531,7 @@ The TDengine Java client library supports subscription functionality with the fo #### Create a Topic ```java -{{#include examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/ConsumerLoopImp.java:create_topic}} +{{#include docs/examples/java/src/main/java/com/taos/example/ConsumerLoopImp.java:create_topic}} ``` The preceding example uses the SQL statement `SELECT ts, current, voltage, phase, groupid, location FROM meters` and creates a topic named `topic_meters`. @@ -540,7 +540,7 @@ The preceding example uses the SQL statement `SELECT ts, current, voltage, phase #### Create a Consumer ```java -{{#include examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/AbsConsumerLoop.java:create_consumer}} +{{#include docs/examples/java/src/main/java/com/taos/example/AbsConsumerLoop.java:create_consumer}} ``` - bootstrap.servers: `ip:port` where the TDengine server is located, or `ip:port` where the taosAdapter is located if WebSocket connection is used. @@ -561,7 +561,7 @@ For more information, see [Consumer Parameters](../../../develop/tmq/#create-a-c #### Subscribe to consume data ```java -{{#include examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/AbsConsumerLoop.java:poll_data}} +{{#include docs/examples/java/src/main/java/com/taos/example/AbsConsumerLoop.java:poll_data}} ``` The parameters of the subscribe method are defined as: a list of topics to subscribe, and it supports subscribing to multiple topics at the same time. @@ -588,7 +588,7 @@ void seekToEnd(Collection partitions) throws SQLException; Example usage is as follows. ```java -{{#include examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/ConsumerOffsetSeek.java:consumer_seek}} +{{#include docs/examples/java/src/main/java/com/taos/example/ConsumerOffsetSeek.java:consumer_seek}} ``` #### Commit offset @@ -622,14 +622,14 @@ For more information, see [Data Subscription](../../../develop/tmq). In addition to the native connection, the Java client library also supports subscribing via websocket. ```java -{{#include examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/ConsumerLoopFull.java:consumer_demo}} +{{#include docs/examples/java/src/main/java/com/taos/example/ConsumerLoopFull.java:consumer_demo}} ``` ```java -{{#include examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/WsConsumerLoopFull.java:consumer_demo}} +{{#include docs/examples/java/src/main/java/com/taos/example/WsConsumerLoopFull.java:consumer_demo}} ``` @@ -644,7 +644,7 @@ In addition to the native connection, the Java client library also supports subs Example usage is as follows. ```java -{{#include examples/JDBC/connectionPools/src/main/java/com/taosdata/example/HikariDemo.java:connection_pool}} +{{#include docs/examples/java/src/main/java/com/taos/example/HikariDemo.java:connection_pool}} ``` > getConnection(), you need to call the close() method after you finish using it. It doesn't close the connection. It just puts it back into the connection pool. @@ -655,7 +655,7 @@ Example usage is as follows. Example usage is as follows. ```java -{{#include examples/JDBC/connectionPools/src/main/java/com/taosdata/example/DruidDemo.java:connection_pool}} +{{#include docs/examples/java/src/main/java/com/taos/example/DruidDemo.java:connection_pool}} ``` > For more questions about using druid, please see [Official Instructions](https://github.com/alibaba/druid). diff --git a/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/AbsConsumerLoop.java b/docs/examples/java/src/main/java/com/taos/example/AbsConsumerLoop.java similarity index 98% rename from examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/AbsConsumerLoop.java rename to docs/examples/java/src/main/java/com/taos/example/AbsConsumerLoop.java index 842abb4086..52e294fcf7 100644 --- a/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/AbsConsumerLoop.java +++ b/docs/examples/java/src/main/java/com/taos/example/AbsConsumerLoop.java @@ -1,4 +1,4 @@ -package com.taosdata.example; +package com.taos.example; import com.taosdata.jdbc.tmq.ConsumerRecord; import com.taosdata.jdbc.tmq.ConsumerRecords; @@ -33,7 +33,7 @@ public abstract class AbsConsumerLoop { config.setProperty("auto.commit.interval.ms", "1000"); config.setProperty("group.id", "group1"); config.setProperty("client.id", "client1"); - config.setProperty("value.deserializer", "com.taosdata.example.AbsConsumerLoop$ResultDeserializer"); + config.setProperty("value.deserializer", "com.taos.example.AbsConsumerLoop$ResultDeserializer"); config.setProperty("value.deserializer.encoding", "UTF-8"); try { this.consumer = new TaosConsumer<>(config); diff --git a/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/ConsumerLoopImp.java b/docs/examples/java/src/main/java/com/taos/example/ConsumerLoopImp.java similarity index 98% rename from examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/ConsumerLoopImp.java rename to docs/examples/java/src/main/java/com/taos/example/ConsumerLoopImp.java index 84d84f062b..a59bfc282f 100644 --- a/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/ConsumerLoopImp.java +++ b/docs/examples/java/src/main/java/com/taos/example/ConsumerLoopImp.java @@ -1,4 +1,4 @@ -package com.taosdata.example; +package com.taos.example; import com.alibaba.fastjson.JSON; import com.taosdata.jdbc.TSDBDriver; diff --git a/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/ConsumerOffsetSeek.java b/docs/examples/java/src/main/java/com/taos/example/ConsumerOffsetSeek.java similarity index 88% rename from examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/ConsumerOffsetSeek.java rename to docs/examples/java/src/main/java/com/taos/example/ConsumerOffsetSeek.java index 73901aba49..5903cabd3c 100644 --- a/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/ConsumerOffsetSeek.java +++ b/docs/examples/java/src/main/java/com/taos/example/ConsumerOffsetSeek.java @@ -1,20 +1,14 @@ -package com.taosdata.example; +package com.taos.example; -import com.alibaba.fastjson.JSON; -import com.taosdata.jdbc.TSDBDriver; import com.taosdata.jdbc.tmq.ConsumerRecords; import com.taosdata.jdbc.tmq.TaosConsumer; import com.taosdata.jdbc.tmq.TopicPartition; -import java.sql.Connection; -import java.sql.DriverManager; import java.sql.SQLException; -import java.sql.Statement; import java.time.Duration; import java.util.Collections; import java.util.Map; import java.util.Properties; -import java.util.Scanner; public class ConsumerOffsetSeek { @@ -31,7 +25,7 @@ public class ConsumerOffsetSeek { config.setProperty("auto.commit.interval.ms", "1000"); config.setProperty("group.id", "group1"); config.setProperty("client.id", "1"); - config.setProperty("value.deserializer", "com.taosdata.example.AbsConsumerLoop$ResultDeserializer"); + config.setProperty("value.deserializer", "com.taos.example.AbsConsumerLoop$ResultDeserializer"); config.setProperty("value.deserializer.encoding", "UTF-8"); // ANCHOR: consumer_seek diff --git a/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/WsConsumerLoopImp.java b/docs/examples/java/src/main/java/com/taos/example/WsConsumerLoopImp.java similarity index 93% rename from examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/WsConsumerLoopImp.java rename to docs/examples/java/src/main/java/com/taos/example/WsConsumerLoopImp.java index bf352f3b06..70e29503f8 100644 --- a/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/WsConsumerLoopImp.java +++ b/docs/examples/java/src/main/java/com/taos/example/WsConsumerLoopImp.java @@ -1,9 +1,12 @@ -package com.taosdata.example; +package com.taos.example; import com.alibaba.fastjson.JSON; import com.taosdata.jdbc.TSDBDriver; -import java.sql.*; +import java.sql.Connection; +import java.sql.DriverManager; +import java.sql.SQLException; +import java.sql.Statement; import java.util.Properties; import java.util.Scanner; diff --git a/docs/examples/java/src/test/java/com/taos/test/TestAll.java b/docs/examples/java/src/test/java/com/taos/test/TestAll.java index f24156d8b1..6a45c5fd5b 100644 --- a/docs/examples/java/src/test/java/com/taos/test/TestAll.java +++ b/docs/examples/java/src/test/java/com/taos/test/TestAll.java @@ -40,12 +40,12 @@ public class TestAll { } @Test - public void testJNIConnect() throws SQLException { + public void testJNIConnect() throws Exception { JNIConnectExample.main(args); } @Test - public void testRestConnect() throws SQLException { + public void testRestConnect() throws Exception { RESTConnectExample.main(args); } From 0ec16a723d36df78fada221dee68e60d1c16542a Mon Sep 17 00:00:00 2001 From: 54liuyao <54liuyao> Date: Mon, 19 Aug 2024 10:15:35 +0800 Subject: [PATCH 131/181] fix issue --- source/util/src/tscalablebf.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/source/util/src/tscalablebf.c b/source/util/src/tscalablebf.c index c48cd38886..1d6ef29987 100644 --- a/source/util/src/tscalablebf.c +++ b/source/util/src/tscalablebf.c @@ -85,6 +85,9 @@ int32_t tScalableBfPutNoCheck(SScalableBf* pSBf, const void* keyBuf, uint32_t le pNormalBf->errorRate * DEFAULT_TIGHTENING_RATIO, &pNormalBf); if (code != TSDB_CODE_SUCCESS) { pSBf->status = SBF_INVALID; + if (code == TSDB_CODE_OUT_OF_BUFFER) { + code = TSDB_CODE_SUCCESS; + } QUERY_CHECK_CODE(code, lino, _error); } } @@ -121,6 +124,9 @@ int32_t tScalableBfPut(SScalableBf* pSBf, const void* keyBuf, uint32_t len, int3 pNormalBf->errorRate * DEFAULT_TIGHTENING_RATIO, &pNormalBf); if (code != TSDB_CODE_SUCCESS) { pSBf->status = SBF_INVALID; + if (code == TSDB_CODE_OUT_OF_BUFFER) { + code = TSDB_CODE_SUCCESS; + } QUERY_CHECK_CODE(code, lino, _end); } } From 97e0aa6901a96a855b13d1b35eb44cbc61ebd44c Mon Sep 17 00:00:00 2001 From: Hongze Cheng Date: Mon, 19 Aug 2024 10:29:09 +0800 Subject: [PATCH 132/181] fix: memory leak when continuesly create and drop database --- source/libs/sync/src/syncMain.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/source/libs/sync/src/syncMain.c b/source/libs/sync/src/syncMain.c index fd1d3e371e..5465007b18 100644 --- a/source/libs/sync/src/syncMain.c +++ b/source/libs/sync/src/syncMain.c @@ -788,9 +788,11 @@ int32_t syncCheckMember(int64_t rid) { } if (pSyncNode->myNodeInfo.nodeRole == TAOS_SYNC_ROLE_LEARNER) { + syncNodeRelease(pSyncNode); return TSDB_CODE_SYN_WRONG_ROLE; } + syncNodeRelease(pSyncNode); return 0; } @@ -2446,6 +2448,7 @@ static void syncNodeEqPingTimer(void* param, void* tmrId) { (void)taosTmrReset(syncNodeEqPingTimer, pNode->pingTimerMS, (void*)pNode->rid, syncEnv()->pTimerManager, &pNode->pPingTimer); } + syncNodeRelease(pNode); } static void syncNodeEqElectTimer(void* param, void* tmrId) { From d0e31f711fb82b1f3274365a12013a51d247f6f2 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Mon, 19 Aug 2024 11:01:54 +0800 Subject: [PATCH 133/181] fix(tsdb): return code for tMergeTreeNext --- source/dnode/vnode/src/inc/tsdb.h | 2 +- source/dnode/vnode/src/tsdb/tsdbCache.c | 9 +-- source/dnode/vnode/src/tsdb/tsdbMergeTree.c | 21 +++++-- source/dnode/vnode/src/tsdb/tsdbRead2.c | 64 ++++++++++++--------- 4 files changed, 58 insertions(+), 38 deletions(-) diff --git a/source/dnode/vnode/src/inc/tsdb.h b/source/dnode/vnode/src/inc/tsdb.h index ab5b07581a..85084a0b81 100644 --- a/source/dnode/vnode/src/inc/tsdb.h +++ b/source/dnode/vnode/src/inc/tsdb.h @@ -898,7 +898,7 @@ typedef struct SSttDataInfoForTable { int32_t tMergeTreeOpen2(SMergeTree *pMTree, SMergeTreeConf *pConf, SSttDataInfoForTable *pTableInfo); void tMergeTreeAddIter(SMergeTree *pMTree, SLDataIter *pIter); -bool tMergeTreeNext(SMergeTree *pMTree); +int32_t tMergeTreeNext(SMergeTree *pMTree, bool* pHasNext); void tMergeTreePinSttBlock(SMergeTree *pMTree); void tMergeTreeUnpinSttBlock(SMergeTree *pMTree); bool tMergeTreeIgnoreEarlierTs(SMergeTree *pMTree); diff --git a/source/dnode/vnode/src/tsdb/tsdbCache.c b/source/dnode/vnode/src/tsdb/tsdbCache.c index aa92597211..70e6e1ee2a 100644 --- a/source/dnode/vnode/src/tsdb/tsdbCache.c +++ b/source/dnode/vnode/src/tsdb/tsdbCache.c @@ -2245,17 +2245,18 @@ static int32_t lastIterClose(SFSLastIter **iter) { } static int32_t lastIterNext(SFSLastIter *iter, TSDBROW **ppRow) { - int32_t code = 0; + bool hasVal = false; + int32_t code = tMergeTreeNext(iter->pMergeTree, &hasVal); + if (code != 0) { + return code; + } - bool hasVal = tMergeTreeNext(iter->pMergeTree); if (!hasVal) { *ppRow = NULL; - TAOS_RETURN(code); } *ppRow = tMergeTreeGetRow(iter->pMergeTree); - TAOS_RETURN(code); } diff --git a/source/dnode/vnode/src/tsdb/tsdbMergeTree.c b/source/dnode/vnode/src/tsdb/tsdbMergeTree.c index 4729b912a7..8bfc066731 100644 --- a/source/dnode/vnode/src/tsdb/tsdbMergeTree.c +++ b/source/dnode/vnode/src/tsdb/tsdbMergeTree.c @@ -1102,13 +1102,21 @@ void tMergeTreeUnpinSttBlock(SMergeTree *pMTree) { tLDataIterUnpinSttBlock(pIter, pMTree->idStr); } -bool tMergeTreeNext(SMergeTree *pMTree) { +int32_t tMergeTreeNext(SMergeTree *pMTree, bool *pHasNext) { + int32_t code = 0; + if (pHasNext == NULL) { + return TSDB_CODE_INVALID_PARA; + } + if (pMTree->pIter) { SLDataIter *pIter = pMTree->pIter; - - bool hasVal = false; - int32_t code = tLDataIterNextRow(pIter, pMTree->idStr, &hasVal); + bool hasVal = false; + code = tLDataIterNextRow(pIter, pMTree->idStr, &hasVal); if (!hasVal || (code != 0)) { + if (code == TSDB_CODE_FILE_CORRUPTED) { + code = 0; // suppress the file corrupt error to enable all queries within this cluster can run without failed. + } + pMTree->pIter = NULL; } @@ -1117,7 +1125,7 @@ bool tMergeTreeNext(SMergeTree *pMTree) { if (pMTree->pIter && pIter) { int32_t c = pMTree->rbt.cmprFn(&pMTree->pIter->node, &pIter->node); if (c > 0) { - (void) tRBTreePut(&pMTree->rbt, (SRBTreeNode *)pMTree->pIter); + (void)tRBTreePut(&pMTree->rbt, (SRBTreeNode *)pMTree->pIter); pMTree->pIter = NULL; } else { ASSERT(c); @@ -1132,7 +1140,8 @@ bool tMergeTreeNext(SMergeTree *pMTree) { } } - return pMTree->pIter != NULL; + *pHasNext = (pMTree->pIter != NULL); + return code; } void tMergeTreeClose(SMergeTree *pMTree) { diff --git a/source/dnode/vnode/src/tsdb/tsdbRead2.c b/source/dnode/vnode/src/tsdb/tsdbRead2.c index 9a9c74a3a0..607d96bcbc 100644 --- a/source/dnode/vnode/src/tsdb/tsdbRead2.c +++ b/source/dnode/vnode/src/tsdb/tsdbRead2.c @@ -1759,14 +1759,22 @@ static bool tryCopyDistinctRowFromFileBlock(STsdbReader* pReader, SBlockData* pB return code; } -static bool nextRowFromSttBlocks(SSttBlockReader* pSttBlockReader, STableBlockScanInfo* pScanInfo, int32_t pkSrcSlot, - SVersionRange* pVerRange) { +static int32_t nextRowFromSttBlocks(SSttBlockReader* pSttBlockReader, STableBlockScanInfo* pScanInfo, int32_t pkSrcSlot, + SVersionRange* pVerRange) { + int32_t code = 0; int32_t order = pSttBlockReader->order; int32_t step = ASCENDING_TRAVERSE(order) ? 1 : -1; SRowKey* pNextProc = &pScanInfo->sttKeyInfo.nextProcKey; while (1) { - bool hasVal = tMergeTreeNext(&pSttBlockReader->mergeTree); + bool hasVal = false; + code = tMergeTreeNext(&pSttBlockReader->mergeTree, &hasVal); + if (code) { + tsdbError("failed to iter the next row in stt-file merge tree, code:%s, %s", tstrerror(code), + pSttBlockReader->mergeTree.idStr); + return code; + } + if (!hasVal) { // the next value will be the accessed key in stt pScanInfo->sttKeyInfo.status = STT_FILE_NO_DATA; @@ -1779,7 +1787,6 @@ static bool nextRowFromSttBlocks(SSttBlockReader* pSttBlockReader, STableBlockSc memset(pNextProc->pks[0].pData, 0, pNextProc->pks[0].nData); } } - return false; } TSDBROW* pRow = tMergeTreeGetRow(&pSttBlockReader->mergeTree); @@ -1798,13 +1805,13 @@ static bool nextRowFromSttBlocks(SSttBlockReader* pSttBlockReader, STableBlockSc if (!hasBeenDropped(pScanInfo->delSkyline, &pScanInfo->sttBlockDelIndex, key, ver, order, pVerRange, pSttBlockReader->numOfPks > 0)) { pScanInfo->sttKeyInfo.status = STT_FILE_HAS_DATA; - return true; } } else { pScanInfo->sttKeyInfo.status = STT_FILE_HAS_DATA; - return true; } } + + return code; } static void doPinSttBlock(SSttBlockReader* pSttBlockReader) { tMergeTreePinSttBlock(&pSttBlockReader->mergeTree); } @@ -2380,14 +2387,13 @@ static bool isValidFileBlockRow(SBlockData* pBlockData, int32_t rowIndex, STable return true; } -static bool initSttBlockReader(SSttBlockReader* pSttBlockReader, STableBlockScanInfo* pScanInfo, STsdbReader* pReader) { - bool hasData = true; +static void initSttBlockReader(SSttBlockReader* pSttBlockReader, STableBlockScanInfo* pScanInfo, STsdbReader* pReader) { int32_t order = pReader->info.order; bool asc = ASCENDING_TRAVERSE(order); // the stt block reader has been initialized for this table. if (pSttBlockReader->uid == pScanInfo->uid) { - return hasDataInSttBlock(pScanInfo); + return; } if (pSttBlockReader->uid != 0) { @@ -2396,9 +2402,14 @@ static bool initSttBlockReader(SSttBlockReader* pSttBlockReader, STableBlockScan pSttBlockReader->uid = pScanInfo->uid; - // second time init stt block reader + // second or third time init stt block reader if (pScanInfo->cleanSttBlocks && (pReader->info.execMode == READER_EXEC_ROWS)) { - return !pScanInfo->sttBlockReturned; + // only allowed to retrieve clean stt blocks for count once + if (pScanInfo->sttBlockReturned) { + pScanInfo->sttKeyInfo.status = STT_FILE_NO_DATA; + tsdbDebug("uid:%" PRIu64 " set no stt-file data after stt-block retrieved", pScanInfo->uid, pReader->idStr); + } + return; } STimeWindow w = pSttBlockReader->window; @@ -2435,28 +2446,28 @@ static bool initSttBlockReader(SSttBlockReader* pSttBlockReader, STableBlockScan SSttDataInfoForTable info = {.pKeyRangeList = taosArrayInit(4, sizeof(SSttKeyRange))}; if (info.pKeyRangeList == NULL) { pReader->code = terrno; - return false; + return; } int32_t code = tMergeTreeOpen2(&pSttBlockReader->mergeTree, &conf, &info); if (code != TSDB_CODE_SUCCESS) { taosArrayDestroy(info.pKeyRangeList); pReader->code = code; - return false; + return; } code = initMemDataIterator(pScanInfo, pReader); if (code != TSDB_CODE_SUCCESS) { taosArrayDestroy(info.pKeyRangeList); pReader->code = code; - return false; + return; } code = initDelSkylineIterator(pScanInfo, pReader->info.order, &pReader->cost); if (code != TSDB_CODE_SUCCESS) { taosArrayDestroy(info.pKeyRangeList); pReader->code = code; - return false; + return; } if (conf.rspRows) { @@ -2484,27 +2495,26 @@ static bool initSttBlockReader(SSttBlockReader* pSttBlockReader, STableBlockScan SRowKey* p = asc ? &pScanInfo->sttRange.skey : &pScanInfo->sttRange.ekey; tRowKeyAssign(&pScanInfo->sttKeyInfo.nextProcKey, p); - - hasData = (pScanInfo->sttKeyInfo.status == STT_FILE_HAS_DATA); } else { // not clean stt blocks INIT_KEYRANGE(&pScanInfo->sttRange); // reset the time window - pScanInfo->sttBlockReturned = false; - hasData = nextRowFromSttBlocks(pSttBlockReader, pScanInfo, pReader->suppInfo.pkSrcSlot, &pReader->info.verRange); + code = nextRowFromSttBlocks(pSttBlockReader, pScanInfo, pReader->suppInfo.pkSrcSlot, &pReader->info.verRange); } } else { pScanInfo->cleanSttBlocks = false; INIT_KEYRANGE(&pScanInfo->sttRange); // reset the time window - pScanInfo->sttBlockReturned = false; - hasData = nextRowFromSttBlocks(pSttBlockReader, pScanInfo, pReader->suppInfo.pkSrcSlot, &pReader->info.verRange); + code = nextRowFromSttBlocks(pSttBlockReader, pScanInfo, pReader->suppInfo.pkSrcSlot, &pReader->info.verRange); } + pScanInfo->sttBlockReturned = false; taosArrayDestroy(info.pKeyRangeList); int64_t el = taosGetTimestampUs() - st; pReader->cost.initSttBlockReader += (el / 1000.0); tsdbDebug("init stt block reader completed, elapsed time:%" PRId64 "us %s", el, pReader->idStr); - return hasData; + if (code != 0) { + pReader->code = code; + } } static bool hasDataInSttBlock(STableBlockScanInfo* pInfo) { return pInfo->sttKeyInfo.status == STT_FILE_HAS_DATA; } @@ -2772,7 +2782,7 @@ static int32_t buildComposedDataBlock(STsdbReader* pReader) { } SBlockData* pBlockData = &pReader->status.fileBlockData; - (void) initSttBlockReader(pSttBlockReader, pBlockScanInfo, pReader); + initSttBlockReader(pSttBlockReader, pBlockScanInfo, pReader); if (pReader->code != 0) { code = pReader->code; goto _end; @@ -3190,12 +3200,12 @@ static int32_t doLoadSttBlockSequentially(STsdbReader* pReader) { continue; } - bool hasDataInSttFile = initSttBlockReader(pSttBlockReader, pScanInfo, pReader); + initSttBlockReader(pSttBlockReader, pScanInfo, pReader); if (pReader->code != TSDB_CODE_SUCCESS) { return pReader->code; } - if (!hasDataInSttFile) { + if (!hasDataInSttBlock(pScanInfo)) { bool hasNexTable = moveToNextTable(pUidList, pStatus); if (!hasNexTable) { return TSDB_CODE_SUCCESS; @@ -3287,7 +3297,7 @@ static int32_t doBuildDataBlock(STsdbReader* pReader) { } if (pScanInfo->sttKeyInfo.status == STT_FILE_READER_UNINIT) { - (void) initSttBlockReader(pSttBlockReader, pScanInfo, pReader); + initSttBlockReader(pSttBlockReader, pScanInfo, pReader); if (pReader->code != 0) { return pReader->code; } @@ -3331,7 +3341,7 @@ static int32_t doBuildDataBlock(STsdbReader* pReader) { int64_t st = taosGetTimestampUs(); // let's load data from stt files, make sure clear the cleanStt block flag before load the data from stt files - (void) initSttBlockReader(pSttBlockReader, pScanInfo, pReader); + initSttBlockReader(pSttBlockReader, pScanInfo, pReader); if (pReader->code != 0) { return pReader->code; } From f2f0bad021205290ba959c6037c01b1800352cea Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Mon, 19 Aug 2024 11:06:43 +0800 Subject: [PATCH 134/181] fix(tsdb): return code for tMergeTreeNext --- source/dnode/vnode/src/tsdb/tsdbRead2.c | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/source/dnode/vnode/src/tsdb/tsdbRead2.c b/source/dnode/vnode/src/tsdb/tsdbRead2.c index 607d96bcbc..639cab9f52 100644 --- a/source/dnode/vnode/src/tsdb/tsdbRead2.c +++ b/source/dnode/vnode/src/tsdb/tsdbRead2.c @@ -1826,9 +1826,14 @@ static bool tryCopyDistinctRowFromSttBlock(TSDBROW* fRow, SSttBlockReader* pSttB // avoid the fetch next row replace the referenced stt block in buffer doPinSttBlock(pSttBlockReader); - bool hasVal = nextRowFromSttBlocks(pSttBlockReader, pScanInfo, pReader->suppInfo.pkSrcSlot, &pReader->info.verRange); + code = nextRowFromSttBlocks(pSttBlockReader, pScanInfo, pReader->suppInfo.pkSrcSlot, &pReader->info.verRange); doUnpinSttBlock(pSttBlockReader); - if (hasVal) { + + if (code) { + return code; + } + + if (hasDataInSttBlock(pScanInfo)) { SRowKey* pNext = getCurrentKeyInSttBlock(pSttBlockReader); if (pkCompEx(pSttKey, pNext) != 0) { code = doAppendRowFromFileBlock(pReader->resBlockInfo.pResBlock, pReader, fRow->pBlockData, fRow->iRow); @@ -4097,7 +4102,11 @@ int32_t doMergeRowsInSttBlock(SSttBlockReader* pSttBlockReader, STableBlockScanI SRowKey* pRowKey = &pScanInfo->lastProcKey; int32_t code = TSDB_CODE_SUCCESS; - while (nextRowFromSttBlocks(pSttBlockReader, pScanInfo, pkSrcSlot, pVerRange)) { + while (1) { + code = nextRowFromSttBlocks(pSttBlockReader, pScanInfo, pkSrcSlot, pVerRange); + if (code) { + + } SRowKey* pNextKey = getCurrentKeyInSttBlock(pSttBlockReader); int32_t ret = pkCompEx(pRowKey, pNextKey); From 58fa2453a1ef7cad6902d9bfbee6c4909ed5ebd4 Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Mon, 19 Aug 2024 11:20:42 +0800 Subject: [PATCH 135/181] fix double free --- source/libs/transport/src/transCli.c | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/source/libs/transport/src/transCli.c b/source/libs/transport/src/transCli.c index 59321045ca..862a74c72b 100644 --- a/source/libs/transport/src/transCli.c +++ b/source/libs/transport/src/transCli.c @@ -2302,8 +2302,8 @@ static FORCE_INLINE void destroyCmsgAndAhandle(void* param) { pThrd->destroyAhandleFp(pMsg->ctx->ahandle); } - if (pMsg->msg.info.handle !=0) { - (void)transReleaseExHandle(transGetRefMgt(), (int64_t)pMsg->msg.info.handle); + if (pMsg->msg.info.handle != 0) { + (void)transReleaseExHandle(transGetRefMgt(), (int64_t)pMsg->msg.info.handle); (void)transRemoveExHandle(transGetRefMgt(), (int64_t)pMsg->msg.info.handle); } @@ -2957,6 +2957,7 @@ int32_t transSendRequest(void* shandle, const SEpSet* pEpSet, STransMsg* pReq, S STrans* pTransInst = (STrans*)transAcquireExHandle(transGetInstMgt(), (int64_t)shandle); if (pTransInst == NULL) { transFreeMsg(pReq->pCont); + pReq->pCont = NULL; return TSDB_CODE_RPC_MODULE_QUIT; } int32_t code = 0; @@ -3008,6 +3009,7 @@ int32_t transSendRequest(void* shandle, const SEpSet* pEpSet, STransMsg* pReq, S _exception: transFreeMsg(pReq->pCont); + pReq->pCont = NULL; (void)transReleaseExHandle(transGetInstMgt(), (int64_t)shandle); return code; } @@ -3053,6 +3055,7 @@ int32_t transSendRequestWithId(void* shandle, const SEpSet* pEpSet, STransMsg* p _exception: transFreeMsg(pReq->pCont); + pReq->pCont = NULL; (void)transReleaseExHandle(transGetInstMgt(), (int64_t)shandle); return code; } @@ -3061,6 +3064,7 @@ int32_t transSendRecv(void* shandle, const SEpSet* pEpSet, STransMsg* pReq, STra STrans* pTransInst = (STrans*)transAcquireExHandle(transGetInstMgt(), (int64_t)shandle); if (pTransInst == NULL) { transFreeMsg(pReq->pCont); + pReq->pCont = NULL; return TSDB_CODE_RPC_MODULE_QUIT; } int32_t code = 0; @@ -3139,6 +3143,7 @@ _RETURN1: (void)transReleaseExHandle(transGetInstMgt(), (int64_t)shandle); taosMemoryFree(pTransRsp); taosMemoryFree(pReq->pCont); + pReq->pCont = NULL; return code; } int32_t transCreateSyncMsg(STransMsg* pTransMsg, int64_t* refId) { @@ -3183,6 +3188,7 @@ int32_t transSendRecvWithTimeout(void* shandle, SEpSet* pEpSet, STransMsg* pReq, STrans* pTransInst = (STrans*)transAcquireExHandle(transGetInstMgt(), (int64_t)shandle); if (pTransInst == NULL) { transFreeMsg(pReq->pCont); + pReq->pCont = NULL; return TSDB_CODE_RPC_MODULE_QUIT; } @@ -3263,6 +3269,7 @@ _RETURN: return code; _RETURN2: transFreeMsg(pReq->pCont); + pReq->pCont = NULL; taosMemoryFree(pTransMsg); (void)transReleaseExHandle(transGetInstMgt(), (int64_t)shandle); return code; From bc2f648cf79d5e13c83276a4db0aefb6c7b9e94b Mon Sep 17 00:00:00 2001 From: wangjiaming0909 <604227650@qq.com> Date: Mon, 19 Aug 2024 13:33:34 +0800 Subject: [PATCH 136/181] fix memory leak in tsdbreader --- source/dnode/vnode/src/tsdb/tsdbMergeTree.c | 18 ++++++++---------- 1 file changed, 8 insertions(+), 10 deletions(-) diff --git a/source/dnode/vnode/src/tsdb/tsdbMergeTree.c b/source/dnode/vnode/src/tsdb/tsdbMergeTree.c index 4729b912a7..cfb9a2f215 100644 --- a/source/dnode/vnode/src/tsdb/tsdbMergeTree.c +++ b/source/dnode/vnode/src/tsdb/tsdbMergeTree.c @@ -115,16 +115,14 @@ void destroySttBlockReader(SArray *pLDataIterArray, SSttBlockLoadCostInfo *pLoad SArray *pList = taosArrayGetP(pLDataIterArray, i); for (int32_t j = 0; j < taosArrayGetSize(pList); ++j) { SLDataIter *pIter = taosArrayGetP(pList, j); - if (pIter->pBlockLoadInfo == NULL) { - continue; - } - - SSttBlockLoadCostInfo *pCost = &pIter->pBlockLoadInfo->cost; - if (pLoadCost != NULL) { - pLoadCost->loadBlocks += pCost->loadBlocks; - pLoadCost->loadStatisBlocks += pCost->loadStatisBlocks; - pLoadCost->blockElapsedTime += pCost->blockElapsedTime; - pLoadCost->statisElapsedTime += pCost->statisElapsedTime; + if (pIter->pBlockLoadInfo != NULL) { + SSttBlockLoadCostInfo *pCost = &pIter->pBlockLoadInfo->cost; + if (pLoadCost != NULL) { + pLoadCost->loadBlocks += pCost->loadBlocks; + pLoadCost->loadStatisBlocks += pCost->loadStatisBlocks; + pLoadCost->blockElapsedTime += pCost->blockElapsedTime; + pLoadCost->statisElapsedTime += pCost->statisElapsedTime; + } } destroyLDataIter(pIter); From d4dc632d6f43aab9d5e22c5b41aa5ca1c7ab52d5 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Mon, 19 Aug 2024 13:39:30 +0800 Subject: [PATCH 137/181] fix(tsdb): fix syntax error. --- source/dnode/vnode/src/tsdb/tsdbRead2.c | 2 +- source/libs/executor/src/scanoperator.c | 6 +----- 2 files changed, 2 insertions(+), 6 deletions(-) diff --git a/source/dnode/vnode/src/tsdb/tsdbRead2.c b/source/dnode/vnode/src/tsdb/tsdbRead2.c index 639cab9f52..5987b673c3 100644 --- a/source/dnode/vnode/src/tsdb/tsdbRead2.c +++ b/source/dnode/vnode/src/tsdb/tsdbRead2.c @@ -2412,7 +2412,7 @@ static void initSttBlockReader(SSttBlockReader* pSttBlockReader, STableBlockScan // only allowed to retrieve clean stt blocks for count once if (pScanInfo->sttBlockReturned) { pScanInfo->sttKeyInfo.status = STT_FILE_NO_DATA; - tsdbDebug("uid:%" PRIu64 " set no stt-file data after stt-block retrieved", pScanInfo->uid, pReader->idStr); + tsdbDebug("uid:%" PRIu64 " set no stt-file data after stt-block retrieved, %s", pScanInfo->uid, pReader->idStr); } return; } diff --git a/source/libs/executor/src/scanoperator.c b/source/libs/executor/src/scanoperator.c index 1935f2b0b6..5984f75c05 100644 --- a/source/libs/executor/src/scanoperator.c +++ b/source/libs/executor/src/scanoperator.c @@ -465,14 +465,10 @@ static int32_t loadDataBlock(SOperatorInfo* pOperator, STableScanBase* pTableSca SSDataBlock* p = NULL; code = pAPI->tsdReader.tsdReaderRetrieveDataBlock(pTableScanInfo->dataReader, &p, NULL); - if (p == NULL || code != TSDB_CODE_SUCCESS) { + if (p == NULL || code != TSDB_CODE_SUCCESS || p != pBlock) { return code; } - if(p != pBlock) { - qError("[loadDataBlock] p != pBlock"); - return TSDB_CODE_QRY_EXECUTOR_INTERNAL_ERROR; - } doSetTagColumnData(pTableScanInfo, pBlock, pTaskInfo, pBlock->info.rows); // restore the previous value From 872291686d21fe9ac8345e8560c54b0adb1bf2ab Mon Sep 17 00:00:00 2001 From: Shungang Li Date: Mon, 19 Aug 2024 14:02:10 +0800 Subject: [PATCH 138/181] enh: ttl add more debug info --- source/dnode/vnode/src/meta/metaTtl.c | 18 ++++++++++++++++-- 1 file changed, 16 insertions(+), 2 deletions(-) diff --git a/source/dnode/vnode/src/meta/metaTtl.c b/source/dnode/vnode/src/meta/metaTtl.c index b5436af2bf..cd1aa7bcad 100644 --- a/source/dnode/vnode/src/meta/metaTtl.c +++ b/source/dnode/vnode/src/meta/metaTtl.c @@ -254,6 +254,16 @@ static int32_t ttlMgrFindExpiredOneEntry(const void *pKey, int keyLen, const voi return c; } +// static int32_t ttlMgrDumpOneEntry(const void *pKey, int keyLen, const void *pVal, int valLen, void *pDumpCtx) { +// STtlIdxKeyV1 *ttlKey = (STtlIdxKeyV1 *)pKey; +// int64_t *ttlDays = (int64_t *)pVal; + +// metaInfo("ttlMgr dump, ttl: %" PRId64 ", ctime: %" PRId64 ", uid: %" PRId64, *ttlDays, ttlKey->deleteTimeMs, +// ttlKey->uid); + +// TAOS_RETURN(TSDB_CODE_SUCCESS); +// } + static int ttlMgrConvert(TTB *pOldTtlIdx, TTB *pNewTtlIdx, void *pMeta) { SMeta *meta = pMeta; @@ -409,8 +419,8 @@ int32_t ttlMgrFlush(STtlManger *pTtlMgr, TXN *pTxn) { STtlCacheEntry *cacheEntry = taosHashGet(pTtlMgr->pTtlCache, pUid, sizeof(*pUid)); if (cacheEntry == NULL) { - metaInfo("%s, ttlMgr flush failed to get ttl cache, might be restoring, uid: %" PRId64 ", type: %d", - pTtlMgr->logPrefix, *pUid, pEntry->type); + metaInfo("%s, ttlMgr flush failed to get ttl cache, uid: %" PRId64 ", type: %d", pTtlMgr->logPrefix, *pUid, + pEntry->type); code = taosHashRemove(pTtlMgr->pDirtyUids, pUid, sizeof(*pUid)); if (TSDB_CODE_SUCCESS != code) { metaError("%s, ttlMgr flush failed to remove dirty uid since %s", pTtlMgr->logPrefix, tstrerror(code)); @@ -454,6 +464,10 @@ int32_t ttlMgrFlush(STtlManger *pTtlMgr, TXN *pTxn) { goto _out; } + metaDebug("isdel:%d", pEntry->type == ENTRY_TYPE_DELETE); + metaDebug("ttlkey:%" PRId64 ", uid:%" PRId64, ttlKey.deleteTimeMs, ttlKey.uid); + metaDebug("ttlkeyDirty:%" PRId64 ", uid:%" PRId64, ttlKeyDirty.deleteTimeMs, ttlKeyDirty.uid); + code = taosHashRemove(pTtlMgr->pDirtyUids, pUid, sizeof(*pUid)); if (TSDB_CODE_SUCCESS != code) { metaError("%s, ttlMgr flush failed to remove dirty uid since %s", pTtlMgr->logPrefix, tstrerror(code)); From c6350794fee265cffe8b4a8016199da35df0a285 Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Mon, 19 Aug 2024 15:53:55 +0800 Subject: [PATCH 139/181] fix(scheduler/exec cb): remove schedulerFreeJob from cb --- source/client/src/clientImpl.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/source/client/src/clientImpl.c b/source/client/src/clientImpl.c index 664de5619f..e12c761fcc 100644 --- a/source/client/src/clientImpl.c +++ b/source/client/src/clientImpl.c @@ -1135,8 +1135,6 @@ void schedulerExecCb(SExecResult* pResult, void* param, int32_t code) { (void)atomic_add_fetch_64((int64_t*)&pActivity->numOfInsertRows, pResult->numOfRows); } } - - schedulerFreeJob(&pRequest->body.queryJob, 0); } taosMemoryFree(pResult); From 5fb431aff4e172d50c76391a17b4f0dfda91d7dc Mon Sep 17 00:00:00 2001 From: sima Date: Mon, 19 Aug 2024 15:24:05 +0800 Subject: [PATCH 140/181] enh:[TD-31525] Remove ASSERT in libs/function. --- include/util/taoserror.h | 2 + source/libs/function/inc/thistogram.h | 2 +- source/libs/function/inc/tpercentile.h | 2 +- source/libs/function/src/builtinsimpl.c | 4 +- source/libs/function/src/thistogram.c | 89 +++++++++-------- source/libs/function/src/tpercentile.c | 126 ++++++++++++++++-------- source/util/src/terror.c | 2 + 7 files changed, 142 insertions(+), 85 deletions(-) diff --git a/include/util/taoserror.h b/include/util/taoserror.h index 1911c48d26..94b6d29cd9 100644 --- a/include/util/taoserror.h +++ b/include/util/taoserror.h @@ -878,6 +878,8 @@ int32_t taosGetErrSize(); #define TSDB_CODE_FUNC_INVALID_VALUE_RANGE TAOS_DEF_ERROR_CODE(0, 0x280C) #define TSDB_CODE_FUNC_SETUP_ERROR TAOS_DEF_ERROR_CODE(0, 0x280D) #define TSDB_CODE_FUNC_INVALID_RES_LENGTH TAOS_DEF_ERROR_CODE(0, 0x280E) +#define TSDB_CODE_FUNC_HISTOGRAM_ERROR TAOS_DEF_ERROR_CODE(0, 0x280F) +#define TSDB_CODE_FUNC_PERCENTILE_ERROR TAOS_DEF_ERROR_CODE(0, 0x2810) //udf diff --git a/source/libs/function/inc/thistogram.h b/source/libs/function/inc/thistogram.h index 5bc6a87c70..08bff7117e 100644 --- a/source/libs/function/inc/thistogram.h +++ b/source/libs/function/inc/thistogram.h @@ -59,7 +59,7 @@ int32_t tHistogramCreate(int32_t numOfEntries, SHistogramInfo** pHisto); SHistogramInfo* tHistogramCreateFrom(void* pBuf, int32_t numOfBins); int32_t tHistogramAdd(SHistogramInfo** pHisto, double val); -int64_t tHistogramSum(SHistogramInfo* pHisto, double v); +int32_t tHistogramSum(SHistogramInfo* pHisto, double v, int64_t *res); int32_t tHistogramUniform(SHistogramInfo* pHisto, double* ratio, int32_t num, double** pVal); int32_t tHistogramMerge(SHistogramInfo* pHisto1, SHistogramInfo* pHisto2, int32_t numOfEntries, diff --git a/source/libs/function/inc/tpercentile.h b/source/libs/function/inc/tpercentile.h index 118571c8aa..1b80c2b1da 100644 --- a/source/libs/function/inc/tpercentile.h +++ b/source/libs/function/inc/tpercentile.h @@ -47,7 +47,7 @@ typedef struct tMemBucketSlot { } tMemBucketSlot; struct tMemBucket; -typedef int32_t (*__perc_hash_func_t)(struct tMemBucket *pBucket, const void *value); +typedef int32_t (*__perc_hash_func_t)(struct tMemBucket *pBucket, const void *value, int32_t *index); typedef struct tMemBucket { int16_t numOfSlots; diff --git a/source/libs/function/src/builtinsimpl.c b/source/libs/function/src/builtinsimpl.c index 2d664e5d31..08abf41973 100644 --- a/source/libs/function/src/builtinsimpl.c +++ b/source/libs/function/src/builtinsimpl.c @@ -4018,7 +4018,9 @@ int32_t saveTupleData(SqlFunctionCtx* pCtx, int32_t rowIndex, const SSDataBlock* if (NULL == pColInfo) { return TSDB_CODE_OUT_OF_RANGE; } - ASSERT(pColInfo->info.type == TSDB_DATA_TYPE_TIMESTAMP); + if (pColInfo->info.type != TSDB_DATA_TYPE_TIMESTAMP) { + return TSDB_CODE_FUNC_FUNTION_PARA_TYPE; + } key.groupId = pSrcBlock->info.id.groupId; key.ts = *(int64_t*)colDataGetData(pColInfo, rowIndex); } diff --git a/source/libs/function/src/thistogram.c b/source/libs/function/src/thistogram.c index f57f6aa118..8594b0584c 100644 --- a/source/libs/function/src/thistogram.c +++ b/source/libs/function/src/thistogram.c @@ -14,6 +14,7 @@ */ #include "os.h" +#include "query.h" #include "taosdef.h" #include "thistogram.h" #include "tlosertree.h" @@ -81,9 +82,9 @@ int32_t tHistogramAdd(SHistogramInfo** pHisto, double val) { #if defined(USE_ARRAYLIST) int32_t idx = histoBinarySearch((*pHisto)->elems, (*pHisto)->numOfEntries, val); - if (ASSERTS(idx >= 0 && idx <= (*pHisto)->maxEntries && (*pHisto)->elems != NULL, "tHistogramAdd Error, idx:%d, maxEntries:%d, elems:%p", - idx, (*pHisto)->maxEntries, (*pHisto)->elems)) { - return TSDB_CODE_FAILED; + if (idx < 0 || idx > (*pHisto)->maxEntries || (*pHisto)->elems == NULL) { + qError("tHistogramAdd Error, idx:%d, maxEntries:%d, elems:%p", idx, (*pHisto)->maxEntries, (*pHisto)->elems); + return TSDB_CODE_FUNC_HISTOGRAM_ERROR; } if ((*pHisto)->elems[idx].val == val && idx >= 0) { @@ -95,21 +96,19 @@ int32_t tHistogramAdd(SHistogramInfo** pHisto, double val) { } else { /* insert a new slot */ if ((*pHisto)->numOfElems >= 1 && idx < (*pHisto)->numOfEntries) { if (idx > 0) { - if (ASSERTS((*pHisto)->elems[idx - 1].val <= val, "tHistogramAdd Error, elems[%d].val:%lf, val:%lf", - idx - 1, (*pHisto)->elems[idx - 1].val, val)) { - return TSDB_CODE_FAILED; + if ((*pHisto)->elems[idx - 1].val > val) { + qError("tHistogramAdd Error, elems[%d].val:%lf, val:%lf", idx - 1, (*pHisto)->elems[idx - 1].val, val); + return TSDB_CODE_FUNC_HISTOGRAM_ERROR; } } else { - if (ASSERTS((*pHisto)->elems[idx].val > val, "tHistogramAdd Error, elems[%d].val:%lf, val:%lf", - idx, (*pHisto)->elems[idx].val, val)) { - return TSDB_CODE_FAILED; + if ((*pHisto)->elems[idx].val <= val) { + qError("tHistogramAdd Error, elems[%d].val:%lf, val:%lf", idx, (*pHisto)->elems[idx].val, val); + return TSDB_CODE_FUNC_HISTOGRAM_ERROR; } } - } else if ((*pHisto)->numOfElems > 0) { - if (ASSERTS((*pHisto)->elems[(*pHisto)->numOfEntries].val <= val, "tHistogramAdd Error, elems[%d].val:%lf, val:%lf", - (*pHisto)->numOfEntries, (*pHisto)->elems[idx].val, val)) { - return TSDB_CODE_FAILED; - } + } else if ((*pHisto)->numOfElems > 0 && (*pHisto)->elems[(*pHisto)->numOfEntries].val > val) { + qError("tHistogramAdd Error, elems[%d].val:%lf, val:%lf", (*pHisto)->numOfEntries, (*pHisto)->elems[idx].val, val); + return TSDB_CODE_FUNC_HISTOGRAM_ERROR; } code = histogramCreateBin(*pHisto, idx, val); @@ -225,9 +224,9 @@ int32_t tHistogramAdd(SHistogramInfo** pHisto, double val) { tSkipListNode* pNext = pNode->pForward[0]; SHistBin* pNextEntry = (SHistBin*)pNext->pData; - if (ASSERTS(pNextEntry->val - pEntry->val == pEntry->delta, "tHistogramAdd Error, pNextEntry->val:%lf, pEntry->val:%lf, pEntry->delta:%lf", - pNextEntry->val, pEntry->val, pEntry->delta)) { - return -1; + if (pNextEntry->val - pEntry->val != pEntry->delta) { + qError("tHistogramAdd Error, pNextEntry->val:%lf, pEntry->val:%lf, pEntry->delta:%lf", pNextEntry->val, pEntry->val, pEntry->delta); + return TSDB_CODE_FUNC_HISTOGRAM_ERROR; } double newVal = (pEntry->val * pEntry->num + pNextEntry->val * pNextEntry->num) / (pEntry->num + pNextEntry->num); @@ -278,8 +277,9 @@ int32_t tHistogramAdd(SHistogramInfo** pHisto, double val) { } else { SHistBin* pEntry = (SHistBin*)pResNode->pData; - if (ASSERTS(pEntry->val == val, "tHistogramAdd Error, pEntry->val:%lf, val:%lf")) { - return -1; + if (pEntry->val != val) { + qError("tHistogramAdd Error, pEntry->val:%lf, val:%lf", pEntry->val, val); + return TSDB_CODE_FUNC_HISTOGRAM_ERROR; } pEntry->num += 1; } @@ -356,9 +356,9 @@ int32_t histogramCreateBin(SHistogramInfo* pHisto, int32_t index, double val) { (void)memmove(&pHisto->elems[index + 1], &pHisto->elems[index], sizeof(SHistBin) * remain); } - if (ASSERTS(index >= 0 && index <= pHisto->maxEntries, "histogramCreateBin Error, index:%d, maxEntries:%d", - index, pHisto->maxEntries)) { - return TSDB_CODE_FAILED; + if (index < 0 || index > pHisto->maxEntries) { + qError("histogramCreateBin Error, index:%d, maxEntries:%d", index, pHisto->maxEntries); + return TSDB_CODE_FUNC_HISTOGRAM_ERROR; } pHisto->elems[index].num = 1; @@ -373,9 +373,9 @@ int32_t histogramCreateBin(SHistogramInfo* pHisto, int32_t index, double val) { pHisto->elems[pHisto->maxEntries].num = 0; } #endif - if (ASSERTS(pHisto->numOfEntries <= pHisto->maxEntries, "histogramCreateBin Error, numOfEntries:%d, maxEntries:%d", - pHisto->numOfEntries, pHisto->maxEntries)) { - return TSDB_CODE_FAILED; + if (pHisto->numOfEntries > pHisto->maxEntries) { + qError("histogramCreateBin Error, numOfEntries:%d, maxEntries:%d", pHisto->numOfEntries, pHisto->maxEntries); + return TSDB_CODE_FUNC_HISTOGRAM_ERROR; } return TSDB_CODE_SUCCESS; @@ -411,8 +411,9 @@ void tHistogramPrint(SHistogramInfo* pHisto) { * Estimated number of points in the interval (−inf,b]. * @param pHisto * @param v + * @param res */ -int64_t tHistogramSum(SHistogramInfo* pHisto, double v) { +int32_t tHistogramSum(SHistogramInfo* pHisto, double v, int64_t *res) { #if defined(USE_ARRAYLIST) int32_t slotIdx = histoBinarySearch(pHisto->elems, pHisto->numOfEntries, v); if (pHisto->elems[slotIdx].val != v) { @@ -420,14 +421,18 @@ int64_t tHistogramSum(SHistogramInfo* pHisto, double v) { if (slotIdx < 0) { slotIdx = 0; - ASSERTS(v <= pHisto->elems[slotIdx].val, "tHistogramSum Error, elems[%d].val:%lf, v:%lf", - slotIdx, pHisto->elems[slotIdx].val, v); + if (v > pHisto->elems[slotIdx].val) { + qError("tHistogramSum Error, elems[%d].val:%lf, v:%lf", slotIdx, pHisto->elems[slotIdx].val, v); + return TSDB_CODE_FUNC_HISTOGRAM_ERROR; + } } else { - ASSERTS(v >= pHisto->elems[slotIdx].val, "tHistogramSum Error, elems[%d].val:%lf, v:%lf", - slotIdx, pHisto->elems[slotIdx].val, v); - if (slotIdx + 1 < pHisto->numOfEntries) { - ASSERTS(v < pHisto->elems[slotIdx + 1].val, "tHistogramSum Error, elems[%d].val:%lf, v:%lf", - slotIdx + 1, pHisto->elems[slotIdx + 1].val, v); + if (v < pHisto->elems[slotIdx].val) { + qError("tHistogramSum Error, elems[%d].val:%lf, v:%lf", slotIdx, pHisto->elems[slotIdx].val, v); + return TSDB_CODE_FUNC_HISTOGRAM_ERROR; + } + if (slotIdx + 1 < pHisto->numOfEntries && v >= pHisto->elems[slotIdx + 1].val) { + qError("tHistogramSum Error, elems[%d].val:%lf, v:%lf", slotIdx + 1, pHisto->elems[slotIdx + 1].val, v); + return TSDB_CODE_FUNC_HISTOGRAM_ERROR; } } } @@ -447,8 +452,9 @@ int64_t tHistogramSum(SHistogramInfo* pHisto, double v) { s1 = s1 + m1 / 2; - return (int64_t)s1; + *res = (int64_t)s1; #endif + return TSDB_CODE_SUCCESS; } int32_t tHistogramUniform(SHistogramInfo* pHisto, double* ratio, int32_t num, double** pVal) { @@ -484,9 +490,11 @@ int32_t tHistogramUniform(SHistogramInfo* pHisto, double* ratio, int32_t num, do j += 1; } - ASSERTS(total <= numOfElem && total + pHisto->elems[j + 1].num > numOfElem, - "tHistogramUniform Error, total:%ld, numOfElem:%ld, elems[%d].num:%ld", - total, (int64_t)numOfElem, j + 1, pHisto->elems[j + 1].num); + if (total > numOfElem || total + pHisto->elems[j + 1].num <= numOfElem) { + qError("tHistogramUniform Error, total:%d, numOfElem:%d, elems[%d].num:%d", + (int32_t)total, (int32_t)numOfElem, j + 1, (int32_t)pHisto->elems[j + 1].num); + return TSDB_CODE_FUNC_HISTOGRAM_ERROR; + } double delta = numOfElem - total; if (fabs(delta) < FLT_EPSILON) { @@ -545,9 +553,10 @@ int32_t tHistogramUniform(SHistogramInfo* pHisto, double* ratio, int32_t num, do j += 1; } - ASSERTS(total <= numOfElem && total + pEntry->num > numOfElem, - "tHistogramUniform Error, total:%d, numOfElem:%d, pEntry->num:%d", - total, numOfElem, pEntry->num); + if (total > numOfElem || total + pEntry->num <= numOfElem) { + qError("tHistogramUniform Error, total:%d, numOfElem:%d, pEntry->num:%d", (int32_t)total, (int32_t)numOfElem, (int32_t)pEntry->num); + return TSDB_CODE_FUNC_HISTOGRAM_ERROR; + } double delta = numOfElem - total; if (fabs(delta) < FLT_EPSILON) { diff --git a/source/libs/function/src/tpercentile.c b/source/libs/function/src/tpercentile.c index 4eefd150f3..ae0459427e 100644 --- a/source/libs/function/src/tpercentile.c +++ b/source/libs/function/src/tpercentile.c @@ -13,6 +13,7 @@ * along with this program. If not, see . */ +#include "query.h" #include "taoserror.h" #include "tcompare.h" #include "tglobal.h" @@ -107,7 +108,10 @@ static void resetPosInfo(SSlotInfo *pInfo) { } int32_t findOnlyResult(tMemBucket *pMemBucket, double *result) { - ASSERT(pMemBucket->total == 1); + if (pMemBucket->total != 1) { + qError("MemBucket:%p, total:%d, but only one element is expected", pMemBucket, pMemBucket->total); + return TSDB_CODE_FUNC_PERCENTILE_ERROR; + } terrno = 0; for (int32_t i = 0; i < pMemBucket->numOfSlots; ++i) { @@ -120,7 +124,10 @@ int32_t findOnlyResult(tMemBucket *pMemBucket, double *result) { SArray **pList = taosHashGet(pMemBucket->groupPagesMap, &groupId, sizeof(groupId)); if (pList != NULL) { SArray *list = *pList; - ASSERT(list->size == 1); + if (list->size != 1) { + qError("list:%p, total list size:%zu, but only one element is expected", list, list->size); + return TSDB_CODE_FUNC_PERCENTILE_ERROR; + } int32_t *pageId = taosArrayGet(list, 0); if (NULL == pageId) { @@ -130,7 +137,11 @@ int32_t findOnlyResult(tMemBucket *pMemBucket, double *result) { if (pPage == NULL) { return terrno; } - ASSERT(pPage->num == 1); + if (pPage->num != 1) { + qError("page:%p, total num:%d, but only one element is expected", pPage, pPage->num); + releaseBufPage(pMemBucket->pBuffer, pPage); + return TSDB_CODE_FUNC_PERCENTILE_ERROR; + } GET_TYPED_DATA(*result, double, pMemBucket->type, pPage->data); return TSDB_CODE_SUCCESS; @@ -141,64 +152,69 @@ int32_t findOnlyResult(tMemBucket *pMemBucket, double *result) { return TSDB_CODE_SUCCESS; } -int32_t tBucketIntHash(tMemBucket *pBucket, const void *value) { +int32_t tBucketIntHash(tMemBucket *pBucket, const void *value, int32_t *index) { int64_t v = 0; GET_TYPED_DATA(v, int64_t, pBucket->type, value); - int32_t index = -1; + *index = -1; if (v > pBucket->range.dMaxVal || v < pBucket->range.dMinVal) { - return index; + return TSDB_CODE_SUCCESS; } // divide the value range into 1024 buckets uint64_t span = pBucket->range.dMaxVal - pBucket->range.dMinVal; if (span < pBucket->numOfSlots) { int64_t delta = v - pBucket->range.dMinVal; - index = (delta % pBucket->numOfSlots); + *index = (delta % pBucket->numOfSlots); } else { double slotSpan = ((double)span) / pBucket->numOfSlots; uint64_t delta = (uint64_t)(v - pBucket->range.dMinVal); - index = delta / slotSpan; - if (v == pBucket->range.dMaxVal || index == pBucket->numOfSlots) { - index -= 1; + *index = delta / slotSpan; + if (v == pBucket->range.dMaxVal || *index == pBucket->numOfSlots) { + *index -= 1; } } - ASSERTS(index >= 0 && index < pBucket->numOfSlots, "tBucketIntHash Error, index:%d, numOfSlots:%d", - index, pBucket->numOfSlots); - return index; + if (*index < 0 || *index >= pBucket->numOfSlots) { + qError("tBucketIntHash Error, index:%d, numOfSlots:%d", *index, pBucket->numOfSlots); + return TSDB_CODE_FUNC_PERCENTILE_ERROR; + } + return TSDB_CODE_SUCCESS; } -int32_t tBucketUintHash(tMemBucket *pBucket, const void *value) { +int32_t tBucketUintHash(tMemBucket *pBucket, const void *value, int32_t *index) { int64_t v = 0; GET_TYPED_DATA(v, uint64_t, pBucket->type, value); - int32_t index = -1; + *index = -1; if (v > pBucket->range.u64MaxVal || v < pBucket->range.u64MinVal) { - return index; + return TSDB_CODE_SUCCESS; } // divide the value range into 1024 buckets uint64_t span = pBucket->range.u64MaxVal - pBucket->range.u64MinVal; if (span < pBucket->numOfSlots) { int64_t delta = v - pBucket->range.u64MinVal; - index = (int32_t)(delta % pBucket->numOfSlots); + *index = (int32_t)(delta % pBucket->numOfSlots); } else { double slotSpan = (double)span / pBucket->numOfSlots; - index = (int32_t)((v - pBucket->range.u64MinVal) / slotSpan); + *index = (int32_t)((v - pBucket->range.u64MinVal) / slotSpan); if (v == pBucket->range.u64MaxVal) { - index -= 1; + *index -= 1; } } - ASSERT(index >= 0 && index < pBucket->numOfSlots); - return index; + if (*index < 0 || *index >= pBucket->numOfSlots) { + qError("tBucketUintHash Error, index:%d, numOfSlots:%d", *index, pBucket->numOfSlots); + return TSDB_CODE_FUNC_PERCENTILE_ERROR; + } + return TSDB_CODE_SUCCESS; } -int32_t tBucketDoubleHash(tMemBucket *pBucket, const void *value) { +int32_t tBucketDoubleHash(tMemBucket *pBucket, const void *value, int32_t *index) { double v = 0; if (pBucket->type == TSDB_DATA_TYPE_FLOAT) { v = GET_FLOAT_VAL(value); @@ -206,27 +222,30 @@ int32_t tBucketDoubleHash(tMemBucket *pBucket, const void *value) { v = GET_DOUBLE_VAL(value); } - int32_t index = -1; + *index = -1; if (v > pBucket->range.dMaxVal || v < pBucket->range.dMinVal) { - return index; + return TSDB_CODE_SUCCESS; } // divide a range of [dMinVal, dMaxVal] into 1024 buckets double span = pBucket->range.dMaxVal - pBucket->range.dMinVal; if (span < pBucket->numOfSlots) { int32_t delta = (int32_t)(v - pBucket->range.dMinVal); - index = (delta % pBucket->numOfSlots); + *index = (delta % pBucket->numOfSlots); } else { double slotSpan = span / pBucket->numOfSlots; - index = (int32_t)((v - pBucket->range.dMinVal) / slotSpan); + *index = (int32_t)((v - pBucket->range.dMinVal) / slotSpan); if (v == pBucket->range.dMaxVal) { - index -= 1; + *index -= 1; } } - ASSERT(index >= 0 && index < pBucket->numOfSlots); - return index; + if (*index < 0 || *index >= pBucket->numOfSlots) { + qError("tBucketDoubleHash Error, index:%d, numOfSlots:%d", *index, pBucket->numOfSlots); + return TSDB_CODE_FUNC_PERCENTILE_ERROR; + } + return TSDB_CODE_SUCCESS; } static __perc_hash_func_t getHashFunc(int32_t type) { @@ -333,7 +352,7 @@ void tMemBucketDestroy(tMemBucket *pBucket) { taosMemoryFreeClear(pBucket); } -void tMemBucketUpdateBoundingBox(MinMaxEntry *r, const char *data, int32_t dataType) { +int32_t tMemBucketUpdateBoundingBox(MinMaxEntry *r, const char *data, int32_t dataType) { if (IS_SIGNED_NUMERIC_TYPE(dataType)) { int64_t v = 0; GET_TYPED_DATA(v, int64_t, dataType, data); @@ -368,8 +387,10 @@ void tMemBucketUpdateBoundingBox(MinMaxEntry *r, const char *data, int32_t dataT r->dMaxVal = v; } } else { - ASSERT(0); + qError("tMemBucketUpdateBoundingBox Error, invalid data type:%d", dataType); + return TSDB_CODE_FUNC_PERCENTILE_ERROR; } + return TSDB_CODE_SUCCESS; } /* @@ -378,9 +399,14 @@ void tMemBucketUpdateBoundingBox(MinMaxEntry *r, const char *data, int32_t dataT int32_t tMemBucketPut(tMemBucket *pBucket, const void *data, size_t size) { int32_t count = 0; int32_t bytes = pBucket->bytes; + int32_t code = TSDB_CODE_SUCCESS; for (int32_t i = 0; i < size; ++i) { char *d = (char *)data + i * bytes; - int32_t index = (pBucket->hashFunc)(pBucket, d); + int32_t index = -1; + code = (pBucket->hashFunc)(pBucket, d, &index); + if (TSDB_CODE_SUCCESS != code) { + return code; + } if (index < 0) { continue; } @@ -388,7 +414,10 @@ int32_t tMemBucketPut(tMemBucket *pBucket, const void *data, size_t size) { count += 1; tMemBucketSlot *pSlot = &pBucket->pSlots[index]; - tMemBucketUpdateBoundingBox(&pSlot->range, d, pBucket->type); + code = tMemBucketUpdateBoundingBox(&pSlot->range, d, pBucket->type); + if (TSDB_CODE_SUCCESS != code) { + return code; + } // ensure available memory pages to allocate int32_t groupId = getGroupId(pBucket->numOfSlots, index, pBucket->times); @@ -396,7 +425,11 @@ int32_t tMemBucketPut(tMemBucket *pBucket, const void *data, size_t size) { if (pSlot->info.data == NULL || pSlot->info.data->num >= pBucket->elemPerPage) { if (pSlot->info.data != NULL) { - ASSERT(pSlot->info.data->num >= pBucket->elemPerPage && pSlot->info.size > 0); + if (pSlot->info.data->num < pBucket->elemPerPage || pSlot->info.size <= 0) { + qError("tMemBucketPut failed since wrong pSLot info dataNum : %d, size : %d", + pSlot->info.data->num, pSlot->info.size); + return TSDB_CODE_FUNC_PERCENTILE_ERROR; + } // keep the pointer in memory setBufPageDirty(pSlot->info.data, true); @@ -411,7 +444,7 @@ int32_t tMemBucketPut(tMemBucket *pBucket, const void *data, size_t size) { if (NULL == pPageIdList) { return terrno; } - int32_t code = taosHashPut(pBucket->groupPagesMap, &groupId, sizeof(groupId), &pPageIdList, POINTER_BYTES); + code = taosHashPut(pBucket->groupPagesMap, &groupId, sizeof(groupId), &pPageIdList, POINTER_BYTES); if (TSDB_CODE_SUCCESS != code) { taosArrayDestroy(pPageIdList); return code; @@ -449,21 +482,23 @@ int32_t tMemBucketPut(tMemBucket *pBucket, const void *data, size_t size) { * j is the last slot of current segment, we need to get the first * slot of the next segment. */ -static MinMaxEntry getMinMaxEntryOfNextSlotWithData(tMemBucket *pMemBucket, int32_t slotIdx) { +static int32_t getMinMaxEntryOfNextSlotWithData(tMemBucket *pMemBucket, int32_t slotIdx, MinMaxEntry *next) { int32_t j = slotIdx + 1; while (j < pMemBucket->numOfSlots && (pMemBucket->pSlots[j].info.size == 0)) { ++j; } - ASSERT(j < pMemBucket->numOfSlots); - return pMemBucket->pSlots[j].range; + if (j >= pMemBucket->numOfSlots) { + qError("getMinMaxEntryOfNextSlotWithData can not get valid slot, start with slotIdx:%d", slotIdx); + return TSDB_CODE_FUNC_PERCENTILE_ERROR; + } + *next = pMemBucket->pSlots[j].range; + return TSDB_CODE_SUCCESS; } static bool isIdenticalData(tMemBucket *pMemBucket, int32_t index); static double getIdenticalDataVal(tMemBucket *pMemBucket, int32_t slotIndex) { - ASSERT(isIdenticalData(pMemBucket, slotIndex)); - tMemBucketSlot *pSlot = &pMemBucket->pSlots[slotIndex]; double finalResult = 0.0; @@ -494,7 +529,11 @@ int32_t getPercentileImpl(tMemBucket *pMemBucket, int32_t count, double fraction * now, we need to find the minimum value of the next slot for interpolating the percentile value * j is the last slot of current segment, we need to get the first slot of the next segment. */ - MinMaxEntry next = getMinMaxEntryOfNextSlotWithData(pMemBucket, i); + MinMaxEntry next; + int32_t code = getMinMaxEntryOfNextSlotWithData(pMemBucket, i, &next); + if (TSDB_CODE_SUCCESS != code) { + return code; + } double maxOfThisSlot = 0; double minOfNextSlot = 0; @@ -509,7 +548,10 @@ int32_t getPercentileImpl(tMemBucket *pMemBucket, int32_t count, double fraction minOfNextSlot = (double)next.dMinVal; } - ASSERT(minOfNextSlot > maxOfThisSlot); + if (minOfNextSlot <= maxOfThisSlot) { + qError("getPercentileImpl get minOfNextSlot : %f less equal than maxOfThisSlot : %f", minOfNextSlot, maxOfThisSlot); + return TSDB_CODE_FUNC_PERCENTILE_ERROR; + } *result = (1 - fraction) * maxOfThisSlot + fraction * minOfNextSlot; return TSDB_CODE_SUCCESS; diff --git a/source/util/src/terror.c b/source/util/src/terror.c index 396abf21a7..f2d15abb80 100644 --- a/source/util/src/terror.c +++ b/source/util/src/terror.c @@ -724,6 +724,8 @@ TAOS_DEFINE_ERROR(TSDB_CODE_FUNC_TIME_UNIT_TOO_SMALL, "Function time unit c TAOS_DEFINE_ERROR(TSDB_CODE_FUNC_INVALID_VALUE_RANGE, "Function got invalid value range") TAOS_DEFINE_ERROR(TSDB_CODE_FUNC_SETUP_ERROR, "Function set up failed") TAOS_DEFINE_ERROR(TSDB_CODE_FUNC_INVALID_RES_LENGTH, "Function result exceed max length") +TAOS_DEFINE_ERROR(TSDB_CODE_FUNC_HISTOGRAM_ERROR, "Function failed to calculate histogram") +TAOS_DEFINE_ERROR(TSDB_CODE_FUNC_PERCENTILE_ERROR, "Function failed to calculate percentile") //udf TAOS_DEFINE_ERROR(TSDB_CODE_UDF_STOPPING, "udf is stopping") From 5035b3a624e970b805e7d66facd1e0aa66a484c5 Mon Sep 17 00:00:00 2001 From: dapan1121 Date: Mon, 19 Aug 2024 17:13:27 +0800 Subject: [PATCH 141/181] fix: merge join destroy table issue --- source/libs/executor/src/mergejoinoperator.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/source/libs/executor/src/mergejoinoperator.c b/source/libs/executor/src/mergejoinoperator.c index 946a1d2aa5..52b0da7c92 100644 --- a/source/libs/executor/src/mergejoinoperator.c +++ b/source/libs/executor/src/mergejoinoperator.c @@ -1746,6 +1746,9 @@ void destroyGrpArray(void* ppArray) { } void destroyMergeJoinTableCtx(SMJoinTableCtx* pTable) { + if (NULL == pTable) { + return; + } mJoinDestroyCreatedBlks(pTable->createdBlks); taosArrayDestroy(pTable->createdBlks); tSimpleHashCleanup(pTable->pGrpHash); From 800e7c4e7acd19ec33c2524d1fdde61e8ce64495 Mon Sep 17 00:00:00 2001 From: kailixu Date: Mon, 19 Aug 2024 17:27:15 +0800 Subject: [PATCH 142/181] fix: memory leak of geos --- source/client/src/clientMain.c | 2 + source/dnode/mgmt/node_mgmt/src/dmMgmt.c | 2 + source/util/src/tgeosctx.c | 92 +++++++++++++++++------- source/util/src/tsched.c | 2 +- source/util/src/tworker.c | 4 +- 5 files changed, 75 insertions(+), 27 deletions(-) diff --git a/source/client/src/clientMain.c b/source/client/src/clientMain.c index 12702a93f3..a403f9d1c2 100644 --- a/source/client/src/clientMain.c +++ b/source/client/src/clientMain.c @@ -23,6 +23,7 @@ #include "query.h" #include "scheduler.h" #include "tdatablock.h" +#include "tgeosctx.h" #include "tglobal.h" #include "tmsg.h" #include "tref.h" @@ -94,6 +95,7 @@ void taos_cleanup(void) { tmqMgmtClose(); DestroyRegexCache(); + destroyThreadLocalGeosCtx(); tscInfo("all local resources released"); taosCleanupCfg(); diff --git a/source/dnode/mgmt/node_mgmt/src/dmMgmt.c b/source/dnode/mgmt/node_mgmt/src/dmMgmt.c index fdce9fd4c9..1d62d4bd90 100644 --- a/source/dnode/mgmt/node_mgmt/src/dmMgmt.c +++ b/source/dnode/mgmt/node_mgmt/src/dmMgmt.c @@ -19,6 +19,7 @@ #include "index.h" #include "qworker.h" #include "tcompression.h" +#include "tgeosctx.h" #include "tglobal.h" #include "tgrant.h" #include "tstream.h" @@ -121,6 +122,7 @@ void dmCleanupDnode(SDnode *pDnode) { streamMetaCleanup(); indexCleanup(); taosConvDestroy(); + destroyThreadLocalGeosCtx(); // compress destroy tsCompressExit(); diff --git a/source/util/src/tgeosctx.c b/source/util/src/tgeosctx.c index a05734c911..47d5cc992b 100644 --- a/source/util/src/tgeosctx.c +++ b/source/util/src/tgeosctx.c @@ -15,38 +15,82 @@ #include "tgeosctx.h" #include "tdef.h" +#include "tlockfree.h" +#include "tlog.h" -static threadlocal SGeosContext tlGeosCtx = {0}; +typedef struct { + SGeosContext *pool; + int32_t capacity; + int32_t size; + SRWLatch lock; +} SGeosContextPool; -SGeosContext* getThreadLocalGeosCtx() { return &tlGeosCtx; } +static SGeosContextPool sGeosPool = {0}; -void destroyThreadLocalGeosCtx() { - if (tlGeosCtx.WKTReader) { - GEOSWKTReader_destroy_r(tlGeosCtx.handle, tlGeosCtx.WKTReader); - tlGeosCtx.WKTReader = NULL; +static threadlocal SGeosContext *tlGeosCtx = NULL; + +SGeosContext *getThreadLocalGeosCtx() { + if (tlGeosCtx) return tlGeosCtx; + + taosWLockLatch(&sGeosPool.lock); + if (sGeosPool.size >= sGeosPool.capacity) { + sGeosPool.capacity += 64; + void *tmp = taosMemoryRealloc(sGeosPool.pool, sGeosPool.capacity * sizeof(SGeosContext)); + if (!tmp) { + taosWUnLockLatch(&sGeosPool.lock); + terrno = TSDB_CODE_OUT_OF_MEMORY; + return NULL; + } + sGeosPool.pool = tmp; + TAOS_MEMSET(sGeosPool.pool + sGeosPool.size, 0, (sGeosPool.capacity - sGeosPool.size) * sizeof(SGeosContext)); } + tlGeosCtx = sGeosPool.pool + sGeosPool.size; + ++sGeosPool.size; + taosWUnLockLatch(&sGeosPool.lock); - if (tlGeosCtx.WKTWriter) { - GEOSWKTWriter_destroy_r(tlGeosCtx.handle, tlGeosCtx.WKTWriter); - tlGeosCtx.WKTWriter = NULL; - } + return tlGeosCtx; +} - if (tlGeosCtx.WKBReader) { - GEOSWKBReader_destroy_r(tlGeosCtx.handle, tlGeosCtx.WKBReader); - tlGeosCtx.WKBReader = NULL; - } +static void destroyGeosCtx(SGeosContext *pCtx) { + if (pCtx) { + if (pCtx->WKTReader) { + GEOSWKTReader_destroy_r(pCtx->handle, pCtx->WKTReader); + pCtx->WKTReader = NULL; + } - if (tlGeosCtx.WKBWriter) { - GEOSWKBWriter_destroy_r(tlGeosCtx.handle, tlGeosCtx.WKBWriter); - tlGeosCtx.WKBWriter = NULL; - } + if (pCtx->WKTWriter) { + GEOSWKTWriter_destroy_r(pCtx->handle, pCtx->WKTWriter); + pCtx->WKTWriter = NULL; + } - if (tlGeosCtx.WKTRegex) { - destroyRegexes(tlGeosCtx.WKTRegex, tlGeosCtx.WKTMatchData); - } + if (pCtx->WKBReader) { + GEOSWKBReader_destroy_r(pCtx->handle, pCtx->WKBReader); + pCtx->WKBReader = NULL; + } - if (tlGeosCtx.handle) { - GEOS_finish_r(tlGeosCtx.handle); - tlGeosCtx.handle = NULL; + if (pCtx->WKBWriter) { + GEOSWKBWriter_destroy_r(pCtx->handle, pCtx->WKBWriter); + pCtx->WKBWriter = NULL; + } + + if (pCtx->WKTRegex) { + destroyRegexes(pCtx->WKTRegex, pCtx->WKTMatchData); + pCtx->WKTRegex = NULL; + pCtx->WKTMatchData = NULL; + } + + if (pCtx->handle) { + GEOS_finish_r(pCtx->handle); + pCtx->handle = NULL; + } } } + +void destroyThreadLocalGeosCtx() { + uInfo("geos ctx is cleaned up"); + if (!sGeosPool.pool) return; + for (int32_t i = 0; i < sGeosPool.size; ++i) { + destroyGeosCtx(sGeosPool.pool + i); + } + taosMemoryFreeClear(sGeosPool.pool); +} \ No newline at end of file diff --git a/source/util/src/tsched.c b/source/util/src/tsched.c index 6779e8dee5..509dba0890 100644 --- a/source/util/src/tsched.c +++ b/source/util/src/tsched.c @@ -178,7 +178,7 @@ void *taosProcessSchedQueue(void *scheduler) { (*(msg.tfp))(msg.ahandle, msg.thandle); } - destroyThreadLocalGeosCtx(); + // destroyThreadLocalGeosCtx(); return NULL; } diff --git a/source/util/src/tworker.c b/source/util/src/tworker.c index b2064d6787..2da1abed78 100644 --- a/source/util/src/tworker.c +++ b/source/util/src/tworker.c @@ -105,7 +105,7 @@ static void *tQWorkerThreadFp(SQueueWorker *worker) { taosUpdateItemSize(qinfo.queue, 1); } - destroyThreadLocalGeosCtx(); + // destroyThreadLocalGeosCtx(); DestoryThreadLocalRegComp(); return NULL; @@ -665,7 +665,7 @@ static void *tQueryAutoQWorkerThreadFp(SQueryAutoQWorker *worker) { } } - destroyThreadLocalGeosCtx(); + // destroyThreadLocalGeosCtx(); DestoryThreadLocalRegComp(); return NULL; From 5ce7bcad1e42a37bd3e2fcf8ffb4252120aad5b0 Mon Sep 17 00:00:00 2001 From: Shengliang Guan Date: Mon, 19 Aug 2024 17:30:10 +0800 Subject: [PATCH 143/181] refact: remove the return value of function taosCloseRef --- .gitignore | 1 + include/util/tref.h | 3 +-- source/client/src/clientMain.c | 8 ++------ source/client/src/clientTmq.c | 2 +- source/dnode/vnode/src/sma/smaEnv.c | 4 ++-- source/libs/executor/src/executor.c | 2 +- source/libs/index/src/index.c | 2 +- source/libs/nodes/src/nodesUtilFuncs.c | 2 +- source/libs/qworker/src/qwUtil.c | 2 +- source/libs/scheduler/src/schUtil.c | 2 +- source/libs/stream/src/streamMeta.c | 6 +++--- source/libs/transport/src/transComm.c | 2 +- source/libs/wal/src/walMgmt.c | 2 +- source/util/src/tref.c | 6 ++---- 14 files changed, 19 insertions(+), 25 deletions(-) diff --git a/.gitignore b/.gitignore index f8b42f9176..334947a64c 100644 --- a/.gitignore +++ b/.gitignore @@ -134,3 +134,4 @@ tags .clangd *CMakeCache* *CMakeFiles* +.history/ diff --git a/include/util/tref.h b/include/util/tref.h index c4b2ec8fa7..1520ced14e 100644 --- a/include/util/tref.h +++ b/include/util/tref.h @@ -29,8 +29,7 @@ typedef void (*RefFp)(void *); int32_t taosOpenRef(int32_t max, RefFp fp); // close the reference set, refId is the return value by taosOpenRef -// return 0 if success. On error, -1 is returned, and terrno is set appropriately -int32_t taosCloseRef(int32_t rsetId); +void taosCloseRef(int32_t rsetId); // add ref, p is the pointer to resource or pointer ID // return Reference ID(rid) allocated. On error, -1 is returned, and terrno is set appropriately diff --git a/source/client/src/clientMain.c b/source/client/src/clientMain.c index 12702a93f3..d007dae7f7 100644 --- a/source/client/src/clientMain.c +++ b/source/client/src/clientMain.c @@ -74,15 +74,11 @@ void taos_cleanup(void) { int32_t id = clientReqRefPool; clientReqRefPool = -1; - if (TSDB_CODE_SUCCESS != taosCloseRef(id)) { - tscWarn("failed to close clientReqRefPool"); - } + taosCloseRef(id); id = clientConnRefPool; clientConnRefPool = -1; - if (TSDB_CODE_SUCCESS != taosCloseRef(id)) { - tscWarn("failed to close clientReqRefPool"); - } + taosCloseRef(id); nodesDestroyAllocatorSet(); cleanupAppInfo(); diff --git a/source/client/src/clientTmq.c b/source/client/src/clientTmq.c index 91883869e9..c9f166e565 100644 --- a/source/client/src/clientTmq.c +++ b/source/client/src/clientTmq.c @@ -1184,7 +1184,7 @@ void tmqMgmtClose(void) { } if (tmqMgmt.rsetId >= 0) { - (void)taosCloseRef(tmqMgmt.rsetId); + taosCloseRef(tmqMgmt.rsetId); tmqMgmt.rsetId = -1; } } diff --git a/source/dnode/vnode/src/sma/smaEnv.c b/source/dnode/vnode/src/sma/smaEnv.c index a219da33db..d90e869bd4 100644 --- a/source/dnode/vnode/src/sma/smaEnv.c +++ b/source/dnode/vnode/src/sma/smaEnv.c @@ -69,7 +69,7 @@ int32_t smaInit() { if (!smaMgmt.refHash || !smaMgmt.tmrHandle) { code = terrno; - (void)taosCloseRef(smaMgmt.rsetId); + taosCloseRef(smaMgmt.rsetId); if (smaMgmt.refHash) { taosHashCleanup(smaMgmt.refHash); smaMgmt.refHash = NULL; @@ -103,7 +103,7 @@ void smaCleanUp() { } if (old == 1) { - (void)taosCloseRef(smaMgmt.rsetId); + taosCloseRef(smaMgmt.rsetId); taosHashCleanup(smaMgmt.refHash); smaMgmt.refHash = NULL; taosTmrCleanUp(smaMgmt.tmrHandle); diff --git a/source/libs/executor/src/executor.c b/source/libs/executor/src/executor.c index b1c9207ab7..eb249575fc 100644 --- a/source/libs/executor/src/executor.c +++ b/source/libs/executor/src/executor.c @@ -31,7 +31,7 @@ int32_t exchangeObjRefPool = -1; static void cleanupRefPool() { int32_t ref = atomic_val_compare_exchange_32(&exchangeObjRefPool, exchangeObjRefPool, 0); - (void)taosCloseRef(ref); + taosCloseRef(ref); } static void initRefPool() { diff --git a/source/libs/index/src/index.c b/source/libs/index/src/index.c index 986693ab00..b881d2cac8 100644 --- a/source/libs/index/src/index.c +++ b/source/libs/index/src/index.c @@ -74,7 +74,7 @@ void indexCleanup() { // refacto later taosCleanUpScheduler(indexQhandle); taosMemoryFreeClear(indexQhandle); - (void)taosCloseRef(indexRefMgt); + taosCloseRef(indexRefMgt); } typedef struct SIdxColInfo { diff --git a/source/libs/nodes/src/nodesUtilFuncs.c b/source/libs/nodes/src/nodesUtilFuncs.c index 6b06530b3e..c1afc4afb3 100644 --- a/source/libs/nodes/src/nodesUtilFuncs.c +++ b/source/libs/nodes/src/nodesUtilFuncs.c @@ -238,7 +238,7 @@ void nodesDestroyAllocatorSet() { (void)taosRemoveRef(g_allocatorReqRefPool, refId); pAllocator = taosIterateRef(g_allocatorReqRefPool, refId); } - (void)taosCloseRef(g_allocatorReqRefPool); + taosCloseRef(g_allocatorReqRefPool); } } diff --git a/source/libs/qworker/src/qwUtil.c b/source/libs/qworker/src/qwUtil.c index 441714313c..4b9067a191 100644 --- a/source/libs/qworker/src/qwUtil.c +++ b/source/libs/qworker/src/qwUtil.c @@ -564,7 +564,7 @@ int32_t qwSaveTbVersionInfo(qTaskInfo_t pTaskInfo, SQWTaskCtx *ctx) { void qwCloseRef(void) { taosWLockLatch(&gQwMgmt.lock); if (atomic_load_32(&gQwMgmt.qwNum) <= 0 && gQwMgmt.qwRef >= 0) { - (void)taosCloseRef(gQwMgmt.qwRef); // ignore error + taosCloseRef(gQwMgmt.qwRef); // ignore error gQwMgmt.qwRef = -1; } taosWUnLockLatch(&gQwMgmt.lock); diff --git a/source/libs/scheduler/src/schUtil.c b/source/libs/scheduler/src/schUtil.c index 811890dde5..01249dbb98 100644 --- a/source/libs/scheduler/src/schUtil.c +++ b/source/libs/scheduler/src/schUtil.c @@ -273,7 +273,7 @@ void schCloseJobRef(void) { } if (schMgmt.jobRef >= 0) { - (void)taosCloseRef(schMgmt.jobRef); + taosCloseRef(schMgmt.jobRef); schMgmt.jobRef = -1; } } diff --git a/source/libs/stream/src/streamMeta.c b/source/libs/stream/src/streamMeta.c index a9976760b6..bc12f4307b 100644 --- a/source/libs/stream/src/streamMeta.c +++ b/source/libs/stream/src/streamMeta.c @@ -65,9 +65,9 @@ static void streamMetaEnvInit() { void streamMetaInit() { (void)taosThreadOnce(&streamMetaModuleInit, streamMetaEnvInit); } void streamMetaCleanup() { - (void)taosCloseRef(streamBackendId); - (void)taosCloseRef(streamBackendCfWrapperId); - (void)taosCloseRef(streamMetaId); + taosCloseRef(streamBackendId); + taosCloseRef(streamBackendCfWrapperId); + taosCloseRef(streamMetaId); metaRefMgtCleanup(); streamTimerCleanUp(); diff --git a/source/libs/transport/src/transComm.c b/source/libs/transport/src/transComm.c index b940c494d8..5d82e157b3 100644 --- a/source/libs/transport/src/transComm.c +++ b/source/libs/transport/src/transComm.c @@ -736,7 +736,7 @@ int32_t transOpenRefMgt(int size, void (*func)(void*)) { } void transCloseRefMgt(int32_t mgt) { // close ref - (void)taosCloseRef(mgt); + taosCloseRef(mgt); } int64_t transAddExHandle(int32_t refMgt, void* p) { // acquire extern handle diff --git a/source/libs/wal/src/walMgmt.c b/source/libs/wal/src/walMgmt.c index 581a63671c..e1d31ce113 100644 --- a/source/libs/wal/src/walMgmt.c +++ b/source/libs/wal/src/walMgmt.c @@ -75,7 +75,7 @@ void walCleanUp() { if (old == 1) { walStopThread(); - TAOS_UNUSED(taosCloseRef(tsWal.refSetId)); + taosCloseRef(tsWal.refSetId); wInfo("wal module is cleaned up"); atomic_store_8(&tsWal.inited, 0); } diff --git a/source/util/src/tref.c b/source/util/src/tref.c index f1d9a24757..4b1b477e06 100644 --- a/source/util/src/tref.c +++ b/source/util/src/tref.c @@ -110,13 +110,13 @@ int32_t taosOpenRef(int32_t max, RefFp fp) { return rsetId; } -int32_t taosCloseRef(int32_t rsetId) { +void taosCloseRef(int32_t rsetId) { SRefSet *pSet; int32_t deleted = 0; if (rsetId < 0 || rsetId >= TSDB_REF_OBJECTS) { uTrace("rsetId:%d is invalid, out of range", rsetId); - return terrno = TSDB_CODE_REF_INVALID_ID; + return; } pSet = tsRefSetList + rsetId; @@ -134,8 +134,6 @@ int32_t taosCloseRef(int32_t rsetId) { (void)taosThreadMutexUnlock(&tsRefMutex); if (deleted) taosDecRsetCount(pSet); - - return 0; } int64_t taosAddRef(int32_t rsetId, void *p) { From c7f9c82950c6197bba0c0afcd1cf71898ca7c3ef Mon Sep 17 00:00:00 2001 From: menshibin Date: Mon, 19 Aug 2024 18:02:50 +0800 Subject: [PATCH 144/181] add kafka config propose --- .../20-third-party/01-collection/11-kafka.md | 37 +++++++++++++------ 1 file changed, 25 insertions(+), 12 deletions(-) diff --git a/docs/zh/20-third-party/01-collection/11-kafka.md b/docs/zh/20-third-party/01-collection/11-kafka.md index 651ef860cb..2e4677ca31 100644 --- a/docs/zh/20-third-party/01-collection/11-kafka.md +++ b/docs/zh/20-third-party/01-collection/11-kafka.md @@ -27,22 +27,35 @@ TDengine Source Connector 用于把数据实时地从 TDengine 读出来发送 ## 安装 Kafka -在任意目录下执行: +- 在任意目录下执行: -```shell -curl -O https://downloads.apache.org/kafka/3.4.0/kafka_2.13-3.4.0.tgz -tar xzf kafka_2.13-3.4.0.tgz -C /opt/ -ln -s /opt/kafka_2.13-3.4.0 /opt/kafka -``` + ```shell + curl -O https://downloads.apache.org/kafka/3.4.0/kafka_2.13-3.4.0.tgz + tar xzf kafka_2.13-3.4.0.tgz -C /opt/ + ln -s /opt/kafka_2.13-3.4.0 /opt/kafka + ``` -然后需要把 `$KAFKA_HOME/bin` 目录加入 PATH。 +- 然后需要把 `$KAFKA_HOME/bin` 目录加入 PATH。 -```title=".profile" -export KAFKA_HOME=/opt/kafka -export PATH=$PATH:$KAFKA_HOME/bin -``` + ```title=".profile" + export KAFKA_HOME=/opt/kafka + export PATH=$PATH:$KAFKA_HOME/bin + ``` + 以上脚本可以追加到当前用户的 profile 文件(~/.profile 或 ~/.bash_profile) -以上脚本可以追加到当前用户的 profile 文件(~/.profile 或 ~/.bash_profile) +- 提升 Kafka 吞吐率的建议配置 + + 1. 打开 KAFKA_HOME/config/producer.properties 配置文件。 + 2. 参数说明及建议如下: + + | **参数** | **参数说明** | **设置建议** | + | --------| --------------------------------- | -------------- | + | producer.type | 此参数用于设置消息的发送方式,默认值为 `sync` 表示同步发送,`async` 表示异步发送。采用异步发送能够提升消息发送的吞吐量。 | async | + | request.required.acks | 参数用于配置生产者发送消息后需要等待的确认数量。当设置为1时,表示只要领导者副本成功写入消息就会给生产者发送确认,而无需等待集群中的其他副本写入成功。这种设置可以在一定程度上保证消息的可靠性,同时也能保证一定的吞吐量。因为不需要等待所有副本都写入成功,所以可以减少生产者的等待时间,提高发送消息的效率。|1| + | max.request.size| 该参数决定了生产者在一次请求中可以发送的最大数据量。其默认值为 1048576,也就是 1M。如果设置得太小,可能会导致频繁的网络请求,降低吞吐量。如果设置得太大,可能会导致内存占用过高,或者在网络状况不佳时增加请求失败的概率。建议设置为 100M。|104857600| + |batch.size| 此参数用于设定 batch 的大小,默认值为 16384,即 16KB。在消息发送过程中,发送到 Kafka 缓冲区中的消息会被划分成一个个的 batch。故而减小 batch 大小有助于降低消息延迟,而增大 batch 大小则有利于提升吞吐量,可根据实际的数据量大小进行合理配置。可根据实际情况进行调整,建议设置为 512K。|524288| + | buffer.memory| 此参数用于设置生产者缓冲待发送消息的内存总量。较大的缓冲区可以允许生产者积累更多的消息后批量发送,提高吞吐量,但也会增加延迟和内存使用。可根据机器资源来配置,建议配置为 1G。|1073741824| + ## 安装 TDengine Connector 插件 From e296a2a076e2b443ca5e0a03190a7bad2c02e0f5 Mon Sep 17 00:00:00 2001 From: Shengliang Guan Date: Mon, 19 Aug 2024 18:04:29 +0800 Subject: [PATCH 145/181] fix: init refId before setting env start --- source/libs/sync/inc/syncEnv.h | 4 --- source/libs/sync/src/syncEnv.c | 58 ++++++++++++++++++++++++---------- 2 files changed, 41 insertions(+), 21 deletions(-) diff --git a/source/libs/sync/inc/syncEnv.h b/source/libs/sync/inc/syncEnv.h index 4dc5f58cfe..41ad915333 100644 --- a/source/libs/sync/inc/syncEnv.h +++ b/source/libs/sync/inc/syncEnv.h @@ -42,10 +42,6 @@ typedef struct SSyncEnv { // timer manager tmr_h pTimerManager; - - // other resources shared by SyncNodes - // ... - } SSyncEnv; SSyncEnv* syncEnv(); diff --git a/source/libs/sync/src/syncEnv.c b/source/libs/sync/src/syncEnv.c index 8d1e2cfebd..6d16a76d98 100644 --- a/source/libs/sync/src/syncEnv.c +++ b/source/libs/sync/src/syncEnv.c @@ -42,23 +42,25 @@ int32_t syncInit() { // start tmr thread gSyncEnv.pTimerManager = taosTmrInit(1000, 50, 10000, "SYNC-ENV"); - atomic_store_8(&gSyncEnv.isStart, 1); gNodeRefId = taosOpenRef(200, (RefFp)syncNodeClose); if (gNodeRefId < 0) { - sError("failed to init node ref"); + sError("failed to init node rset"); syncCleanUp(); return TSDB_CODE_SYN_WRONG_REF; } + sDebug("sync node rset is open, rsetId:%d", gNodeRefId); gHbDataRefId = taosOpenRef(200, (RefFp)syncHbTimerDataFree); if (gHbDataRefId < 0) { - sError("failed to init hb-data ref"); + sError("failed to init hbdata rset"); syncCleanUp(); return TSDB_CODE_SYN_WRONG_REF; } - sDebug("sync rsetId:%d is open", gNodeRefId); + sDebug("sync hbdata rset is open, rsetId:%d", gHbDataRefId); + + atomic_store_8(&gSyncEnv.isStart, 1); return 0; } @@ -68,32 +70,40 @@ void syncCleanUp() { memset(&gSyncEnv, 0, sizeof(SSyncEnv)); if (gNodeRefId != -1) { - sDebug("sync rsetId:%d is closed", gNodeRefId); - (void)taosCloseRef(gNodeRefId); + sDebug("sync node rset is closed, rsetId:%d", gNodeRefId); + taosCloseRef(gNodeRefId); gNodeRefId = -1; } if (gHbDataRefId != -1) { - sDebug("sync rsetId:%d is closed", gHbDataRefId); - (void)taosCloseRef(gHbDataRefId); + sDebug("sync hbdata rset is closed, rsetId:%d", gHbDataRefId); + taosCloseRef(gHbDataRefId); gHbDataRefId = -1; } } int64_t syncNodeAdd(SSyncNode *pNode) { pNode->rid = taosAddRef(gNodeRefId, pNode); - if (pNode->rid < 0) return -1; + if (pNode->rid < 0) { + terrno = TSDB_CODE_SYN_WRONG_REF; + return -1; + } - sDebug("vgId:%d, sync rid:%" PRId64 " is added to rsetId:%d", pNode->vgId, pNode->rid, gNodeRefId); + sDebug("vgId:%d, sync node refId:%" PRId64 " is added to rsetId:%d", pNode->vgId, pNode->rid, gNodeRefId); return pNode->rid; } -void syncNodeRemove(int64_t rid) { (void)taosRemoveRef(gNodeRefId, rid); } +void syncNodeRemove(int64_t rid) { + sDebug("sync node refId:%" PRId64 " is removed from rsetId:%d", rid, gNodeRefId); + if (rid > 0) { + (void)taosRemoveRef(gNodeRefId, rid); + } +} SSyncNode *syncNodeAcquire(int64_t rid) { SSyncNode *pNode = taosAcquireRef(gNodeRefId, rid); if (pNode == NULL) { - sError("failed to acquire node from refId:%" PRId64, rid); + sError("failed to acquire sync node from refId:%" PRId64 ", rsetId:%d", rid, gNodeRefId); terrno = TSDB_CODE_SYN_INTERNAL_ERROR; } @@ -101,28 +111,42 @@ SSyncNode *syncNodeAcquire(int64_t rid) { } void syncNodeRelease(SSyncNode *pNode) { - if (pNode) (void)taosReleaseRef(gNodeRefId, pNode->rid); + if (pNode) { + (void)taosReleaseRef(gNodeRefId, pNode->rid); + } } int64_t syncHbTimerDataAdd(SSyncHbTimerData *pData) { pData->rid = taosAddRef(gHbDataRefId, pData); - if (pData->rid < 0) return TSDB_CODE_SYN_WRONG_REF; + if (pData->rid < 0) { + terrno = TSDB_CODE_SYN_WRONG_REF; + return -1; + } + return pData->rid; } -void syncHbTimerDataRemove(int64_t rid) { (void)taosRemoveRef(gHbDataRefId, rid); } +void syncHbTimerDataRemove(int64_t rid) { + if (rid > 0) { + (void)taosRemoveRef(gHbDataRefId, rid); + } +} SSyncHbTimerData *syncHbTimerDataAcquire(int64_t rid) { SSyncHbTimerData *pData = taosAcquireRef(gHbDataRefId, rid); if (pData == NULL && rid > 0) { - sInfo("failed to acquire hb-timer-data from refId:%" PRId64, rid); + sInfo("failed to acquire hbdata from refId:%" PRId64 ", rsetId:%d", rid, gHbDataRefId); terrno = TSDB_CODE_SYN_INTERNAL_ERROR; } return pData; } -void syncHbTimerDataRelease(SSyncHbTimerData *pData) { (void)taosReleaseRef(gHbDataRefId, pData->rid); } +void syncHbTimerDataRelease(SSyncHbTimerData *pData) { + if (pData) { + (void)taosReleaseRef(gHbDataRefId, pData->rid); + } +} #if 0 void syncEnvStartTimer() { From aef00cfb8338e0cebe43f7d77cb55c0407fcd28c Mon Sep 17 00:00:00 2001 From: Shengliang Guan Date: Mon, 19 Aug 2024 18:04:50 +0800 Subject: [PATCH 146/181] refact: remove unused variables --- source/libs/sync/inc/syncEnv.h | 15 +----------- source/libs/sync/src/syncEnv.c | 45 ---------------------------------- 2 files changed, 1 insertion(+), 59 deletions(-) diff --git a/source/libs/sync/inc/syncEnv.h b/source/libs/sync/inc/syncEnv.h index 41ad915333..0376920e8a 100644 --- a/source/libs/sync/inc/syncEnv.h +++ b/source/libs/sync/inc/syncEnv.h @@ -23,25 +23,12 @@ extern "C" { #include "syncInt.h" #define TIMER_MAX_MS 0x7FFFFFFF -#define ENV_TICK_TIMER_MS 1000 #define PING_TIMER_MS 5000 -#define ELECT_TIMER_MS_MIN 2500 -#define HEARTBEAT_TIMER_MS 1000 #define HEARTBEAT_TICK_NUM 20 typedef struct SSyncEnv { uint8_t isStart; - - // tick timer - tmr_h pEnvTickTimer; - int32_t envTickTimerMS; - uint64_t envTickTimerLogicClock; // if use queue, should pass logic clock into queue item - uint64_t envTickTimerLogicClockUser; - TAOS_TMR_CALLBACK FpEnvTickTimer; // Timer Fp - uint64_t envTickTimerCounter; - - // timer manager - tmr_h pTimerManager; + tmr_h pTimerManager; } SSyncEnv; SSyncEnv* syncEnv(); diff --git a/source/libs/sync/src/syncEnv.c b/source/libs/sync/src/syncEnv.c index 6d16a76d98..7474cea83d 100644 --- a/source/libs/sync/src/syncEnv.c +++ b/source/libs/sync/src/syncEnv.c @@ -21,7 +21,6 @@ static SSyncEnv gSyncEnv = {0}; static int32_t gNodeRefId = -1; static int32_t gHbDataRefId = -1; -static void syncEnvTick(void *param, void *tmrId); SSyncEnv *syncEnv() { return &gSyncEnv; } @@ -34,13 +33,6 @@ int32_t syncInit() { taosSeedRand(seed); memset(&gSyncEnv, 0, sizeof(SSyncEnv)); - gSyncEnv.envTickTimerCounter = 0; - gSyncEnv.envTickTimerMS = ENV_TICK_TIMER_MS; - gSyncEnv.FpEnvTickTimer = syncEnvTick; - atomic_store_64(&gSyncEnv.envTickTimerLogicClock, 0); - atomic_store_64(&gSyncEnv.envTickTimerLogicClockUser, 0); - - // start tmr thread gSyncEnv.pTimerManager = taosTmrInit(1000, 50, 10000, "SYNC-ENV"); gNodeRefId = taosOpenRef(200, (RefFp)syncNodeClose); @@ -147,40 +139,3 @@ void syncHbTimerDataRelease(SSyncHbTimerData *pData) { (void)taosReleaseRef(gHbDataRefId, pData->rid); } } - -#if 0 -void syncEnvStartTimer() { - taosTmrReset(gSyncEnv.FpEnvTickTimer, gSyncEnv.envTickTimerMS, &gSyncEnv, gSyncEnv.pTimerManager, - &gSyncEnv.pEnvTickTimer); - atomic_store_64(&gSyncEnv.envTickTimerLogicClock, gSyncEnv.envTickTimerLogicClockUser); -} - -void syncEnvStopTimer() { - int32_t ret = 0; - atomic_add_fetch_64(&gSyncEnv.envTickTimerLogicClockUser, 1); - taosTmrStop(gSyncEnv.pEnvTickTimer); - gSyncEnv.pEnvTickTimer = NULL; - return ret; -} -#endif - -static void syncEnvTick(void *param, void *tmrId) { -#if 0 - SSyncEnv *pSyncEnv = param; - if (atomic_load_64(&gSyncEnv.envTickTimerLogicClockUser) <= atomic_load_64(&gSyncEnv.envTickTimerLogicClock)) { - gSyncEnv.envTickTimerCounter++; - sTrace("syncEnvTick do ... envTickTimerLogicClockUser:%" PRIu64 ", envTickTimerLogicClock:%" PRIu64 - ", envTickTimerCounter:%" PRIu64 ", envTickTimerMS:%d, tmrId:%p", - gSyncEnv.envTickTimerLogicClockUser, gSyncEnv.envTickTimerLogicClock, gSyncEnv.envTickTimerCounter, - gSyncEnv.envTickTimerMS, tmrId); - - // do something, tick ... - taosTmrReset(syncEnvTick, gSyncEnv.envTickTimerMS, pSyncEnv, gSyncEnv.pTimerManager, &gSyncEnv.pEnvTickTimer); - } else { - sTrace("syncEnvTick pass ... envTickTimerLogicClockUser:%" PRIu64 ", envTickTimerLogicClock:%" PRIu64 - ", envTickTimerCounter:%" PRIu64 ", envTickTimerMS:%d, tmrId:%p", - gSyncEnv.envTickTimerLogicClockUser, gSyncEnv.envTickTimerLogicClock, gSyncEnv.envTickTimerCounter, - gSyncEnv.envTickTimerMS, tmrId); - } -#endif -} From 12dbcf561201937c464cbc0ef1054138a52e05a3 Mon Sep 17 00:00:00 2001 From: gccgdb1234 Date: Mon, 19 Aug 2024 18:26:34 +0800 Subject: [PATCH 147/181] docs: add privilege control --- .../03-taos-sql/{25-grant.md => 25-user.md} | 0 docs/zh/14-reference/03-taos-sql/26-grant.md | 168 ++++++++++++++++++ .../03-taos-sql/{26-udf.md => 27-udf.md} | 0 .../{27-indexing.md => 28-index.md} | 0 .../{28-recovery.md => 29-recovery.md} | 0 .../{29-changes.md => 30-changes.md} | 0 .../03-taos-sql/{30-join.md => 31-join.md} | 0 .../{31-compress.md => 32-compress.md} | 0 .../03-taos-sql/{32-view.md => 33-view.md} | 0 9 files changed, 168 insertions(+) rename docs/zh/14-reference/03-taos-sql/{25-grant.md => 25-user.md} (100%) create mode 100644 docs/zh/14-reference/03-taos-sql/26-grant.md rename docs/zh/14-reference/03-taos-sql/{26-udf.md => 27-udf.md} (100%) rename docs/zh/14-reference/03-taos-sql/{27-indexing.md => 28-index.md} (100%) rename docs/zh/14-reference/03-taos-sql/{28-recovery.md => 29-recovery.md} (100%) rename docs/zh/14-reference/03-taos-sql/{29-changes.md => 30-changes.md} (100%) rename docs/zh/14-reference/03-taos-sql/{30-join.md => 31-join.md} (100%) rename docs/zh/14-reference/03-taos-sql/{31-compress.md => 32-compress.md} (100%) rename docs/zh/14-reference/03-taos-sql/{32-view.md => 33-view.md} (100%) diff --git a/docs/zh/14-reference/03-taos-sql/25-grant.md b/docs/zh/14-reference/03-taos-sql/25-user.md similarity index 100% rename from docs/zh/14-reference/03-taos-sql/25-grant.md rename to docs/zh/14-reference/03-taos-sql/25-user.md diff --git a/docs/zh/14-reference/03-taos-sql/26-grant.md b/docs/zh/14-reference/03-taos-sql/26-grant.md new file mode 100644 index 0000000000..c3fd1790d0 --- /dev/null +++ b/docs/zh/14-reference/03-taos-sql/26-grant.md @@ -0,0 +1,168 @@ +--- +toc_max_heading_level: 4 +title: 权限管理 +--- + +TDengine 中的权限管理分为[用户管理](../user)、数据库授权管理以及消息订阅授权管理,本节重点说明数据库授权和订阅授权。 + +## 数据库访问授权 + +系统管理员可以根据业务需要对系统中的每个用户针对每个数据库进行特定的授权,以防止业务数据被不恰当的用户读取或修改。对某个用户进行数据库访问授权的语法如下: + +```sql +GRANT privileges ON priv_level TO user_name + +privileges : { + ALL + | priv_type [, priv_type] ... +} + +priv_type : { + READ + | WRITE +} + +priv_level : { + dbname.tbname + | dbname.* + | *.* +} +``` + +对数据库的访问权限包含读和写两种权限,它们可以被分别授予,也可以被同时授予。 + +说明 + +- priv_level 格式中 "." 之前为数据库名称, "." 之后为表名称,意思为表级别的授权控制。如果 "." 之后为 "\*" ,意为 "." 前所指定的数据库中的所有表 +- "dbname.\*" 意思是名为 "dbname" 的数据库中的所有表 +- "\*.\*" 意思是所有数据库名中的所有表 + +### 数据库权限说明 + +对 root 用户和普通用户的权限的说明如下表 + +| 用户 | 描述 | 权限说明 | +| -------- | ---------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| 超级用户 | 只有 root 是超级用户 | DB 外部 所有操作权限,例如user、dnode、udf、qnode等的CRUD DB 权限,包括 创建 删除 更新,例如修改 Option,移动 Vgruop等 读 写 Enable/Disable 用户 | +| 普通用户 | 除 root 以外的其它用户均为普通用户 | 在可读的 DB 中,普通用户可以进行读操作 select describe show subscribe 在可写 DB 的内部,用户可以进行写操作: 创建、删除、修改 超级表 创建、删除、修改 子表 创建、删除、修改 topic 写入数据 被限制系统信息时,不可进行如下操作 show dnode、mnode、vgroups、qnode、snode 修改用户包括自身密码 show db时只能看到自己的db,并且不能看到vgroups、副本、cache等信息 无论是否被限制系统信息,都可以 管理 udf 可以创建 DB 自己创建的 DB 具备所有权限 非自己创建的 DB ,参照读、写列表中的权限 | + +## 消息订阅授权 + +任意用户都可以在自己拥有读权限的数据库上创建 topic。超级用户 root 可以在任意数据库上创建 topic。每个 topic 的订阅权限都可以被独立授权给任何用户,不管该用户是否拥有该数据库的访问权限。删除 topic 只能由 root 用户或者该 topic 的创建者进行。topic 只能由超级用户、topic的创建者或者被显式授予 subscribe 权限的用户订阅。 + +具体的 SQL 语法如下: + +```sql +GRANT SUBSCRIBE ON topic_name TO user_name + +REVOKE SUBSCRIBE ON topic_name FROM user_name +``` + +## 基于标签的授权(表级授权) + +从 TDengine 3.0.5.0 开始,我们支持按标签授权某个超级表中部分特定的子表。具体的 SQL 语法如下。 + +```sql +GRANT privileges ON priv_level [WITH tag_condition] TO user_name + +privileges : { + ALL + | priv_type [, priv_type] ... +} + +priv_type : { + READ + | WRITE +} + +priv_level : { + dbname.tbname + | dbname.* + | *.* +} + +REVOKE privileges ON priv_level [WITH tag_condition] FROM user_name + +privileges : { + ALL + | priv_type [, priv_type] ... +} + +priv_type : { + READ + | WRITE +} + +priv_level : { + dbname.tbname + | dbname.* + | *.* +} +``` + +上面 SQL 的语义为: + +- 用户可以通过 dbname.tbname 来为指定的表(包括超级表和普通表)授予或回收其读写权限,不支持直接对子表授予或回收权限。 +- 用户可以通过 dbname.tbname 和 WITH 子句来为符合条件的所有子表授予或回收其读写权限。使用 WITH 子句时,权限级别必须为超级表。 + +## 表级权限和数据库权限的关系 + +下表列出了在不同的数据库授权和表级授权的组合下产生的实际权限。 + +| | **表无授权** | **表读授权** | **表读授权有标签条件** | **表写授权** | **表写授权有标签条件** | +| ---------------- | ---------------- | ---------------------------------------- | ------------------------------------------------------------ | ---------------------------------------- | ---------------------------------------------------------- | +| **数据库无授权** | 无授权 | 对此表有读权限,对数据库下的其他表无权限 | 对此表符合标签权限的子表有读权限,对数据库下的其他表无权限 | 对此表有写权限,对数据库下的其他表无权限 | 对此表符合标签权限的子表有写权限,对数据库下的其他表无权限 | +| **数据库读授权** | 对所有表有读权限 | 对所有表有读权限 | 对此表符合标签权限的子表有读权限,对数据库下的其他表有读权限 | 对此表有写权限,对所有表有读权限 | 对此表符合标签权限的子表有写权限,所有表有读权限 | +| **数据库写授权** | 对所有表有写权限 | 对此表有读权限,对所有表有写权限 | 对此表符合标签权限的子表有读权限,对所有表有写权限 | 对所有表有写权限 | 对此表符合标签权限的子表有写权限,数据库下的其他表有写权限 | + + +## 查看用户授权 + +使用下面的命令可以显示一个用户所拥有的授权: + +```sql +show user privileges +``` + +## 撤销授权 + +1. 撤销数据库访问的授权 + +```sql +REVOKE privileges ON priv_level FROM user_name + +privileges : { + ALL + | priv_type [, priv_type] ... +} + +priv_type : { + READ + | WRITE +} + +priv_level : { + dbname.tbname + | dbname.* + | *.* +} +``` + +2. 撤销数据订阅的授权 + +```sql +REVOKE privileges ON priv_level FROM user_name + +privileges : { + ALL + | priv_type [, priv_type] ... +} + +priv_type : { + SUBSCRIBE +} + +priv_level : { + topic_name +} +``` diff --git a/docs/zh/14-reference/03-taos-sql/26-udf.md b/docs/zh/14-reference/03-taos-sql/27-udf.md similarity index 100% rename from docs/zh/14-reference/03-taos-sql/26-udf.md rename to docs/zh/14-reference/03-taos-sql/27-udf.md diff --git a/docs/zh/14-reference/03-taos-sql/27-indexing.md b/docs/zh/14-reference/03-taos-sql/28-index.md similarity index 100% rename from docs/zh/14-reference/03-taos-sql/27-indexing.md rename to docs/zh/14-reference/03-taos-sql/28-index.md diff --git a/docs/zh/14-reference/03-taos-sql/28-recovery.md b/docs/zh/14-reference/03-taos-sql/29-recovery.md similarity index 100% rename from docs/zh/14-reference/03-taos-sql/28-recovery.md rename to docs/zh/14-reference/03-taos-sql/29-recovery.md diff --git a/docs/zh/14-reference/03-taos-sql/29-changes.md b/docs/zh/14-reference/03-taos-sql/30-changes.md similarity index 100% rename from docs/zh/14-reference/03-taos-sql/29-changes.md rename to docs/zh/14-reference/03-taos-sql/30-changes.md diff --git a/docs/zh/14-reference/03-taos-sql/30-join.md b/docs/zh/14-reference/03-taos-sql/31-join.md similarity index 100% rename from docs/zh/14-reference/03-taos-sql/30-join.md rename to docs/zh/14-reference/03-taos-sql/31-join.md diff --git a/docs/zh/14-reference/03-taos-sql/31-compress.md b/docs/zh/14-reference/03-taos-sql/32-compress.md similarity index 100% rename from docs/zh/14-reference/03-taos-sql/31-compress.md rename to docs/zh/14-reference/03-taos-sql/32-compress.md diff --git a/docs/zh/14-reference/03-taos-sql/32-view.md b/docs/zh/14-reference/03-taos-sql/33-view.md similarity index 100% rename from docs/zh/14-reference/03-taos-sql/32-view.md rename to docs/zh/14-reference/03-taos-sql/33-view.md From 128adaa3b4adfa23e82b1f0b8525c229282a87e6 Mon Sep 17 00:00:00 2001 From: menshibin Date: Mon, 19 Aug 2024 18:28:53 +0800 Subject: [PATCH 148/181] add kafka config propose --- .../20-third-party/01-collection/11-kafka.md | 29 ++++++++++--------- 1 file changed, 15 insertions(+), 14 deletions(-) diff --git a/docs/zh/20-third-party/01-collection/11-kafka.md b/docs/zh/20-third-party/01-collection/11-kafka.md index 2e4677ca31..e9ac68251c 100644 --- a/docs/zh/20-third-party/01-collection/11-kafka.md +++ b/docs/zh/20-third-party/01-collection/11-kafka.md @@ -43,20 +43,6 @@ TDengine Source Connector 用于把数据实时地从 TDengine 读出来发送 ``` 以上脚本可以追加到当前用户的 profile 文件(~/.profile 或 ~/.bash_profile) -- 提升 Kafka 吞吐率的建议配置 - - 1. 打开 KAFKA_HOME/config/producer.properties 配置文件。 - 2. 参数说明及建议如下: - - | **参数** | **参数说明** | **设置建议** | - | --------| --------------------------------- | -------------- | - | producer.type | 此参数用于设置消息的发送方式,默认值为 `sync` 表示同步发送,`async` 表示异步发送。采用异步发送能够提升消息发送的吞吐量。 | async | - | request.required.acks | 参数用于配置生产者发送消息后需要等待的确认数量。当设置为1时,表示只要领导者副本成功写入消息就会给生产者发送确认,而无需等待集群中的其他副本写入成功。这种设置可以在一定程度上保证消息的可靠性,同时也能保证一定的吞吐量。因为不需要等待所有副本都写入成功,所以可以减少生产者的等待时间,提高发送消息的效率。|1| - | max.request.size| 该参数决定了生产者在一次请求中可以发送的最大数据量。其默认值为 1048576,也就是 1M。如果设置得太小,可能会导致频繁的网络请求,降低吞吐量。如果设置得太大,可能会导致内存占用过高,或者在网络状况不佳时增加请求失败的概率。建议设置为 100M。|104857600| - |batch.size| 此参数用于设定 batch 的大小,默认值为 16384,即 16KB。在消息发送过程中,发送到 Kafka 缓冲区中的消息会被划分成一个个的 batch。故而减小 batch 大小有助于降低消息延迟,而增大 batch 大小则有利于提升吞吐量,可根据实际的数据量大小进行合理配置。可根据实际情况进行调整,建议设置为 512K。|524288| - | buffer.memory| 此参数用于设置生产者缓冲待发送消息的内存总量。较大的缓冲区可以允许生产者积累更多的消息后批量发送,提高吞吐量,但也会增加延迟和内存使用。可根据机器资源来配置,建议配置为 1G。|1073741824| - - ## 安装 TDengine Connector 插件 ### 编译插件 @@ -338,6 +324,21 @@ curl -X DELETE http://localhost:8083/connectors/TDengineSinkConnector curl -X DELETE http://localhost:8083/connectors/TDengineSourceConnector ``` +### 性能调优 + +如果在从 TDengine 同步数据到 Kafka 的过程中发现性能不达预期,可以尝试使用如下参数提升 Kafka 的写入吞吐量。 + +1. 打开 KAFKA_HOME/config/producer.properties 配置文件。 +2. 参数说明及配置建议如下: + | **参数** | **参数说明** | **设置建议** | + | --------| --------------------------------- | -------------- | + | producer.type | 此参数用于设置消息的发送方式,默认值为 `sync` 表示同步发送,`async` 表示异步发送。采用异步发送能够提升消息发送的吞吐量。 | async | + | request.required.acks | 参数用于配置生产者发送消息后需要等待的确认数量。当设置为1时,表示只要领导者副本成功写入消息就会给生产者发送确认,而无需等待集群中的其他副本写入成功。这种设置可以在一定程度上保证消息的可靠性,同时也能保证一定的吞吐量。因为不需要等待所有副本都写入成功,所以可以减少生产者的等待时间,提高发送消息的效率。|1| + | max.request.size| 该参数决定了生产者在一次请求中可以发送的最大数据量。其默认值为 1048576,也就是 1M。如果设置得太小,可能会导致频繁的网络请求,降低吞吐量。如果设置得太大,可能会导致内存占用过高,或者在网络状况不佳时增加请求失败的概率。建议设置为 100M。|104857600| + |batch.size| 此参数用于设定 batch 的大小,默认值为 16384,即 16KB。在消息发送过程中,发送到 Kafka 缓冲区中的消息会被划分成一个个的 batch。故而减小 batch 大小有助于降低消息延迟,而增大 batch 大小则有利于提升吞吐量,可根据实际的数据量大小进行合理配置。可根据实际情况进行调整,建议设置为 512K。|524288| + | buffer.memory| 此参数用于设置生产者缓冲待发送消息的内存总量。较大的缓冲区可以允许生产者积累更多的消息后批量发送,提高吞吐量,但也会增加延迟和内存使用。可根据机器资源来配置,建议配置为 1G。|1073741824| + + ## 配置参考 ### 通用配置 From 28b9611f380062aeea0a6120fd62b12c2d27fbfa Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Mon, 19 Aug 2024 18:33:39 +0800 Subject: [PATCH 149/181] fix(tsdb): return if no data. --- source/dnode/vnode/src/tsdb/tsdbRead2.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/dnode/vnode/src/tsdb/tsdbRead2.c b/source/dnode/vnode/src/tsdb/tsdbRead2.c index 5987b673c3..355bcca469 100644 --- a/source/dnode/vnode/src/tsdb/tsdbRead2.c +++ b/source/dnode/vnode/src/tsdb/tsdbRead2.c @@ -1383,7 +1383,6 @@ static int32_t copyBlockDataToSDataBlock(STsdbReader* pReader, SRowKey* pLastPro static FORCE_INLINE STSchema* getTableSchemaImpl(STsdbReader* pReader, uint64_t uid) { ASSERT(pReader->info.pSchema == NULL); - int32_t code = metaGetTbTSchemaEx(pReader->pTsdb->pVnode->pMeta, pReader->info.suid, uid, -1, &pReader->info.pSchema); if (code != TSDB_CODE_SUCCESS || pReader->info.pSchema == NULL) { terrno = code; @@ -1787,6 +1786,7 @@ static int32_t nextRowFromSttBlocks(SSttBlockReader* pSttBlockReader, STableBloc memset(pNextProc->pks[0].pData, 0, pNextProc->pks[0].nData); } } + return code; } TSDBROW* pRow = tMergeTreeGetRow(&pSttBlockReader->mergeTree); From 0531a4f4bd7a26468232cbedfd1ae4b62c33aa0f Mon Sep 17 00:00:00 2001 From: kailixu Date: Mon, 19 Aug 2024 18:36:39 +0800 Subject: [PATCH 150/181] fix: memory leak of geos --- include/util/tgeosctx.h | 8 ++-- source/client/src/clientMain.c | 2 +- source/dnode/mgmt/node_mgmt/src/dmMgmt.c | 2 +- source/libs/executor/src/sysscanoperator.c | 2 +- source/libs/geometry/src/geosWrapper.c | 45 +++++++++++++++++++++- source/libs/parser/src/parInsertSql.c | 4 +- source/libs/scalar/src/sclvector.c | 8 ++-- source/util/src/tgeosctx.c | 17 +++++--- source/util/src/tsched.c | 2 - source/util/src/tworker.c | 2 - tools/shell/src/shellEngine.c | 5 +-- 11 files changed, 71 insertions(+), 26 deletions(-) diff --git a/include/util/tgeosctx.h b/include/util/tgeosctx.h index 267ba9e049..a4355db29a 100644 --- a/include/util/tgeosctx.h +++ b/include/util/tgeosctx.h @@ -32,14 +32,16 @@ typedef struct SGeosContext { GEOSWKBReader *WKBReader; GEOSWKBWriter *WKBWriter; - pcre2_code *WKTRegex; + pcre2_code *WKTRegex; pcre2_match_data *WKTMatchData; char errMsg[512]; } SGeosContext; -SGeosContext* getThreadLocalGeosCtx(); -void destroyThreadLocalGeosCtx(); +SGeosContext *acquireThreadLocalGeosCtx(); +SGeosContext *getThreadLocalGeosCtx(); +const char *getGeosErrMsg(int32_t code); +void taosGeosDestroy(); #ifdef __cplusplus } diff --git a/source/client/src/clientMain.c b/source/client/src/clientMain.c index a403f9d1c2..ec3147f49a 100644 --- a/source/client/src/clientMain.c +++ b/source/client/src/clientMain.c @@ -91,11 +91,11 @@ void taos_cleanup(void) { tscDebug("rpc cleanup"); taosConvDestroy(); + taosGeosDestroy(); tmqMgmtClose(); DestroyRegexCache(); - destroyThreadLocalGeosCtx(); tscInfo("all local resources released"); taosCleanupCfg(); diff --git a/source/dnode/mgmt/node_mgmt/src/dmMgmt.c b/source/dnode/mgmt/node_mgmt/src/dmMgmt.c index 1d62d4bd90..ae6efa7af4 100644 --- a/source/dnode/mgmt/node_mgmt/src/dmMgmt.c +++ b/source/dnode/mgmt/node_mgmt/src/dmMgmt.c @@ -122,7 +122,7 @@ void dmCleanupDnode(SDnode *pDnode) { streamMetaCleanup(); indexCleanup(); taosConvDestroy(); - destroyThreadLocalGeosCtx(); + taosGeosDestroy(); // compress destroy tsCompressExit(); diff --git a/source/libs/executor/src/sysscanoperator.c b/source/libs/executor/src/sysscanoperator.c index d8a2331980..082d4e7789 100644 --- a/source/libs/executor/src/sysscanoperator.c +++ b/source/libs/executor/src/sysscanoperator.c @@ -979,7 +979,7 @@ static int32_t sysTableGetGeomText(char* iGeom, int32_t nGeom, char** output, in if (TSDB_CODE_SUCCESS != (code = initCtxAsText()) || TSDB_CODE_SUCCESS != (code = doAsText(iGeom, nGeom, &outputWKT))) { - qError("geo text for systable failed:%s", getThreadLocalGeosCtx()->errMsg); + qError("geo text for systable failed:%s", getGeosErrMsg(code)); *output = NULL; *nOutput = 0; return code; diff --git a/source/libs/geometry/src/geosWrapper.c b/source/libs/geometry/src/geosWrapper.c index dde34edc91..4f3f7d75c2 100644 --- a/source/libs/geometry/src/geosWrapper.c +++ b/source/libs/geometry/src/geosWrapper.c @@ -23,7 +23,8 @@ typedef char (*_geosPreparedRelationFunc_t)(GEOSContextHandle_t handle, const GE void geosFreeBuffer(void *buffer) { if (buffer) { - GEOSFree_r(getThreadLocalGeosCtx()->handle, buffer); + SGeosContext *pCtx = acquireThreadLocalGeosCtx(); + if (pCtx) GEOSFree_r(pCtx->handle, buffer); } } @@ -36,6 +37,11 @@ int32_t initCtxMakePoint() { int32_t code = TSDB_CODE_FAILED; SGeosContext *geosCtx = getThreadLocalGeosCtx(); + if (!geosCtx) { + code = TSDB_CODE_OUT_OF_MEMORY; + return code; + } + if (geosCtx->handle == NULL) { geosCtx->handle = GEOS_init_r(); if (geosCtx->handle == NULL) { @@ -61,6 +67,11 @@ int32_t doMakePoint(double x, double y, unsigned char **outputGeom, size_t *size int32_t code = TSDB_CODE_FAILED; SGeosContext *geosCtx = getThreadLocalGeosCtx(); + if (!geosCtx) { + code = TSDB_CODE_OUT_OF_MEMORY; + return code; + } + GEOSGeometry *geom = NULL; unsigned char *wkb = NULL; @@ -166,6 +177,11 @@ int32_t initCtxGeomFromText() { int32_t code = TSDB_CODE_FAILED; SGeosContext *geosCtx = getThreadLocalGeosCtx(); + if (!geosCtx) { + code = TSDB_CODE_OUT_OF_MEMORY; + return code; + } + if (geosCtx->handle == NULL) { geosCtx->handle = GEOS_init_r(); if (geosCtx->handle == NULL) { @@ -202,6 +218,11 @@ int32_t doGeomFromText(const char *inputWKT, unsigned char **outputGeom, size_t int32_t code = TSDB_CODE_FAILED; SGeosContext *geosCtx = getThreadLocalGeosCtx(); + if (!geosCtx) { + code = TSDB_CODE_OUT_OF_MEMORY; + return code; + } + GEOSGeometry *geom = NULL; unsigned char *wkb = NULL; @@ -237,6 +258,11 @@ int32_t initCtxAsText() { int32_t code = TSDB_CODE_FAILED; SGeosContext *geosCtx = getThreadLocalGeosCtx(); + if (!geosCtx) { + code = TSDB_CODE_OUT_OF_MEMORY; + return code; + } + if (geosCtx->handle == NULL) { geosCtx->handle = GEOS_init_r(); if (geosCtx->handle == NULL) { @@ -273,6 +299,11 @@ int32_t doAsText(const unsigned char *inputGeom, size_t size, char **outputWKT) int32_t code = TSDB_CODE_FAILED; SGeosContext *geosCtx = getThreadLocalGeosCtx(); + if (!geosCtx) { + code = TSDB_CODE_OUT_OF_MEMORY; + return code; + } + GEOSGeometry *geom = NULL; char *wkt = NULL; @@ -304,6 +335,11 @@ int32_t initCtxRelationFunc() { int32_t code = TSDB_CODE_FAILED; SGeosContext *geosCtx = getThreadLocalGeosCtx(); + if (!geosCtx) { + code = TSDB_CODE_OUT_OF_MEMORY; + return code; + } + if (geosCtx->handle == NULL) { geosCtx->handle = GEOS_init_r(); if (geosCtx->handle == NULL) { @@ -329,6 +365,10 @@ int32_t doGeosRelation(const GEOSGeometry *geom1, const GEOSPreparedGeometry *pr _geosPreparedRelationFunc_t swappedPreparedRelationFn) { SGeosContext *geosCtx = getThreadLocalGeosCtx(); + if (!geosCtx) { + return TSDB_CODE_OUT_OF_MEMORY; + } + if (!preparedGeom1) { if (!swapped) { ASSERT(relationFn); @@ -390,6 +430,9 @@ int32_t doContainsProperly(const GEOSGeometry *geom1, const GEOSPreparedGeometry int32_t readGeometry(const unsigned char *input, GEOSGeometry **outputGeom, const GEOSPreparedGeometry **outputPreparedGeom) { SGeosContext *geosCtx = getThreadLocalGeosCtx(); + if (!geosCtx) { + return TSDB_CODE_OUT_OF_MEMORY; + } ASSERT(outputGeom); // it is not allowed if outputGeom is NULL *outputGeom = NULL; diff --git a/source/libs/parser/src/parInsertSql.c b/source/libs/parser/src/parInsertSql.c index cb94cd42f7..aa6116287e 100644 --- a/source/libs/parser/src/parInsertSql.c +++ b/source/libs/parser/src/parInsertSql.c @@ -655,7 +655,7 @@ static int32_t parseTagToken(const char** end, SToken* pToken, SSchema* pSchema, code = parseGeometry(pToken, &output, &size); if (code != TSDB_CODE_SUCCESS) { - code = buildSyntaxErrMsg(pMsgBuf, getThreadLocalGeosCtx()->errMsg, pToken->z); + code = buildSyntaxErrMsg(pMsgBuf, getGeosErrMsg(code), pToken->z); } else if (size + VARSTR_HEADER_SIZE > pSchema->bytes) { // Too long values will raise the invalid sql error message code = generateSyntaxErrMsg(pMsgBuf, TSDB_CODE_PAR_VALUE_TOO_LONG, pSchema->name); @@ -1646,7 +1646,7 @@ static int32_t parseValueTokenImpl(SInsertParseContext* pCxt, const char** pSql, code = parseGeometry(pToken, &output, &size); if (code != TSDB_CODE_SUCCESS) { - code = buildSyntaxErrMsg(&pCxt->msg, getThreadLocalGeosCtx()->errMsg, pToken->z); + code = buildSyntaxErrMsg(&pCxt->msg, getGeosErrMsg(code), pToken->z); } // Too long values will raise the invalid sql error message else if (size + VARSTR_HEADER_SIZE > pSchema->bytes) { diff --git a/source/libs/scalar/src/sclvector.c b/source/libs/scalar/src/sclvector.c index 71773ced57..daf44ec527 100644 --- a/source/libs/scalar/src/sclvector.c +++ b/source/libs/scalar/src/sclvector.c @@ -441,12 +441,12 @@ static FORCE_INLINE int32_t varToGeometry(char *buf, SScalarParam *pOut, int32_t unsigned char *t = NULL; char *output = NULL; - if (initCtxGeomFromText()) { - sclError("failed to init geometry ctx, %s", getThreadLocalGeosCtx()->errMsg); + if ((code = initCtxGeomFromText()) != 0) { + sclError("failed to init geometry ctx, %s", getGeosErrMsg(code)); SCL_ERR_JRET(TSDB_CODE_APP_ERROR); } - if (doGeomFromText(buf, &t, &len)) { - sclInfo("failed to convert text to geometry, %s", getThreadLocalGeosCtx()->errMsg); + if ((code = doGeomFromText(buf, &t, &len))) { + sclInfo("failed to convert text to geometry, %s", getGeosErrMsg(code)); SCL_ERR_JRET(TSDB_CODE_SCALAR_CONVERT_ERROR); } diff --git a/source/util/src/tgeosctx.c b/source/util/src/tgeosctx.c index 47d5cc992b..473b7539fc 100644 --- a/source/util/src/tgeosctx.c +++ b/source/util/src/tgeosctx.c @@ -25,16 +25,19 @@ typedef struct { SRWLatch lock; } SGeosContextPool; -static SGeosContextPool sGeosPool = {0}; - +static SGeosContextPool sGeosPool = {0}; static threadlocal SGeosContext *tlGeosCtx = NULL; +SGeosContext *acquireThreadLocalGeosCtx() { return tlGeosCtx; } + SGeosContext *getThreadLocalGeosCtx() { - if (tlGeosCtx) return tlGeosCtx; + if (tlGeosCtx) { + return tlGeosCtx; + } taosWLockLatch(&sGeosPool.lock); if (sGeosPool.size >= sGeosPool.capacity) { - sGeosPool.capacity += 64; + sGeosPool.capacity += 128; void *tmp = taosMemoryRealloc(sGeosPool.pool, sGeosPool.capacity * sizeof(SGeosContext)); if (!tmp) { taosWUnLockLatch(&sGeosPool.lock); @@ -51,6 +54,8 @@ SGeosContext *getThreadLocalGeosCtx() { return tlGeosCtx; } +const char *getGeosErrMsg(int32_t code) { return tlGeosCtx ? tlGeosCtx->errMsg : (code != 0 ? tstrerror(code) : ""); } + static void destroyGeosCtx(SGeosContext *pCtx) { if (pCtx) { if (pCtx->WKTReader) { @@ -86,8 +91,8 @@ static void destroyGeosCtx(SGeosContext *pCtx) { } } -void destroyThreadLocalGeosCtx() { - uInfo("geos ctx is cleaned up"); +void taosGeosDestroy() { + uInfo("geos is cleaned up"); if (!sGeosPool.pool) return; for (int32_t i = 0; i < sGeosPool.size; ++i) { destroyGeosCtx(sGeosPool.pool + i); diff --git a/source/util/src/tsched.c b/source/util/src/tsched.c index 509dba0890..55a927f340 100644 --- a/source/util/src/tsched.c +++ b/source/util/src/tsched.c @@ -178,8 +178,6 @@ void *taosProcessSchedQueue(void *scheduler) { (*(msg.tfp))(msg.ahandle, msg.thandle); } - // destroyThreadLocalGeosCtx(); - return NULL; } diff --git a/source/util/src/tworker.c b/source/util/src/tworker.c index 2da1abed78..ebec134c91 100644 --- a/source/util/src/tworker.c +++ b/source/util/src/tworker.c @@ -105,7 +105,6 @@ static void *tQWorkerThreadFp(SQueueWorker *worker) { taosUpdateItemSize(qinfo.queue, 1); } - // destroyThreadLocalGeosCtx(); DestoryThreadLocalRegComp(); return NULL; @@ -665,7 +664,6 @@ static void *tQueryAutoQWorkerThreadFp(SQueryAutoQWorker *worker) { } } - // destroyThreadLocalGeosCtx(); DestoryThreadLocalRegComp(); return NULL; diff --git a/tools/shell/src/shellEngine.c b/tools/shell/src/shellEngine.c index 0ccbd683dc..2c8330c433 100644 --- a/tools/shell/src/shellEngine.c +++ b/tools/shell/src/shellEngine.c @@ -611,14 +611,14 @@ void shellPrintGeometry(const unsigned char *val, int32_t length, int32_t width) code = initCtxAsText(); if (code != TSDB_CODE_SUCCESS) { - shellPrintString(getThreadLocalGeosCtx()->errMsg, width); + shellPrintString(getGeosErrMsg(code), width); return; } char *outputWKT = NULL; code = doAsText(val, length, &outputWKT); if (code != TSDB_CODE_SUCCESS) { - shellPrintString(getThreadLocalGeosCtx()->errMsg, width); // should NOT happen + shellPrintString(getGeosErrMsg(code), width); // should NOT happen return; } @@ -1282,7 +1282,6 @@ void *shellThreadLoop(void *arg) { taosResetTerminalMode(); } while (shellRunCommand(command, true) == 0); - destroyThreadLocalGeosCtx(); taosMemoryFreeClear(command); shellWriteHistory(); shellExit(); From e298b5acb320cc379660885e4c6288852d162f04 Mon Sep 17 00:00:00 2001 From: Shungang Li Date: Mon, 19 Aug 2024 18:39:08 +0800 Subject: [PATCH 151/181] fix: s3 support multi proto --- source/common/src/cos.c | 44 +++++++++++++++++++------------------ source/common/src/tglobal.c | 9 ++++---- 2 files changed, 27 insertions(+), 26 deletions(-) diff --git a/source/common/src/cos.c b/source/common/src/cos.c index 8392b0564a..a5a278e82e 100644 --- a/source/common/src/cos.c +++ b/source/common/src/cos.c @@ -12,7 +12,7 @@ extern char tsS3AccessKeySecret[][TSDB_FQDN_LEN]; extern char tsS3BucketName[TSDB_FQDN_LEN]; extern char tsS3AppId[][TSDB_FQDN_LEN]; extern char tsS3Hostname[][TSDB_FQDN_LEN]; -extern int8_t tsS3Https; +extern int8_t tsS3Https[]; static int32_t s3ListBucketByEp(char const *bucketname, int8_t epIndex); static int32_t s3PutObjectFromFileOffsetByEp(const char *file, const char *object_name, int64_t offset, int64_t size, @@ -33,13 +33,13 @@ static int verifyPeerG = 0; static const char *awsRegionG = NULL; static int forceG = 0; static int showResponsePropertiesG = 0; -static S3Protocol protocolG = S3ProtocolHTTPS; +static S3Protocol protocolG[TSDB_MAX_EP_NUM] = {S3ProtocolHTTPS}; // static S3Protocol protocolG = S3ProtocolHTTP; -static S3UriStyle uriStyleG = S3UriStylePath; +static S3UriStyle uriStyleG[TSDB_MAX_EP_NUM] = {S3UriStylePath}; static int retriesG = 5; static int timeoutMsG = 0; -extern int8_t tsS3Oss; +extern int8_t tsS3Oss[]; int32_t s3Begin() { S3Status status; @@ -55,9 +55,11 @@ int32_t s3Begin() { TAOS_RETURN(TSDB_CODE_FAILED); } - protocolG = !tsS3Https; - if (tsS3Oss) { - uriStyleG = S3UriStyleVirtualHost; + for (int i = 0; i < tsS3EpNum; i++) { + protocolG[i] = !tsS3Https[i]; + if (tsS3Oss[i]) { + uriStyleG[i] = S3UriStyleVirtualHost; + } } TAOS_RETURN(TSDB_CODE_SUCCESS); @@ -976,8 +978,8 @@ int32_t s3PutObjectFromFile2ByEp(const char *file, const char *object_name, int8 S3BucketContext bucketContext = {tsS3Hostname[epIndex], tsS3BucketName, - protocolG, - uriStyleG, + protocolG[epIndex], + uriStyleG[epIndex], tsS3AccessKeyId[epIndex], tsS3AccessKeySecret[epIndex], 0, @@ -1059,8 +1061,8 @@ static int32_t s3PutObjectFromFileOffsetByEp(const char *file, const char *objec S3BucketContext bucketContext = {tsS3Hostname[epIndex], tsS3BucketName, - protocolG, - uriStyleG, + protocolG[epIndex], + uriStyleG[epIndex], tsS3AccessKeyId[epIndex], tsS3AccessKeySecret[epIndex], 0, @@ -1155,8 +1157,8 @@ static void s3FreeObjectKey(void *pItem) { static SArray *getListByPrefixByEp(const char *prefix, int8_t epIndex) { S3BucketContext bucketContext = {tsS3Hostname[epIndex], tsS3BucketName, - protocolG, - uriStyleG, + protocolG[epIndex], + uriStyleG[epIndex], tsS3AccessKeyId[epIndex], tsS3AccessKeySecret[epIndex], 0, @@ -1223,8 +1225,8 @@ static int32_t s3DeleteObjectsByEp(const char *object_name[], int nobject, int8_ S3BucketContext bucketContext = {tsS3Hostname[epIndex], tsS3BucketName, - protocolG, - uriStyleG, + protocolG[epIndex], + uriStyleG[epIndex], tsS3AccessKeyId[epIndex], tsS3AccessKeySecret[epIndex], 0, @@ -1299,8 +1301,8 @@ static int32_t s3GetObjectBlockByEp(const char *object_name, int64_t offset, int S3BucketContext bucketContext = {tsS3Hostname[epIndex], tsS3BucketName, - protocolG, - uriStyleG, + protocolG[epIndex], + uriStyleG[epIndex], tsS3AccessKeyId[epIndex], tsS3AccessKeySecret[epIndex], 0, @@ -1372,8 +1374,8 @@ static int32_t s3GetObjectToFileByEp(const char *object_name, const char *fileNa S3BucketContext bucketContext = {tsS3Hostname[epIndex], tsS3BucketName, - protocolG, - uriStyleG, + protocolG[epIndex], + uriStyleG[epIndex], tsS3AccessKeyId[epIndex], tsS3AccessKeySecret[epIndex], 0, @@ -1449,8 +1451,8 @@ static long s3SizeByEp(const char *object_name, int8_t epIndex) { S3BucketContext bucketContext = {tsS3Hostname[epIndex], tsS3BucketName, - protocolG, - uriStyleG, + protocolG[epIndex], + uriStyleG[epIndex], tsS3AccessKeyId[epIndex], tsS3AccessKeySecret[epIndex], 0, diff --git a/source/common/src/tglobal.c b/source/common/src/tglobal.c index 40ace11d4f..cf0a4725c1 100644 --- a/source/common/src/tglobal.c +++ b/source/common/src/tglobal.c @@ -303,10 +303,10 @@ char tsS3BucketName[TSDB_FQDN_LEN] = ""; char tsS3AppId[TSDB_MAX_EP_NUM][TSDB_FQDN_LEN] = {""}; int8_t tsS3Enabled = false; int8_t tsS3EnabledCfg = false; -int8_t tsS3Oss = false; +int8_t tsS3Oss[TSDB_MAX_EP_NUM] = {false}; int8_t tsS3StreamEnabled = false; -int8_t tsS3Https = true; +int8_t tsS3Https[TSDB_MAX_EP_NUM] = {true}; char tsS3Hostname[TSDB_MAX_EP_NUM][TSDB_FQDN_LEN] = {""}; int32_t tsS3BlockSize = -1; // number of tsdb pages (4096) @@ -431,11 +431,10 @@ int32_t taosSetS3Cfg(SConfig *pCfg) { tstrncpy(tsS3AppId[i], appid + 1, TSDB_FQDN_LEN); } } + tsS3Https[i] = (strstr(tsS3Endpoint[i], "https://") != NULL); + tsS3Oss[i] = (strstr(tsS3Endpoint[i], "aliyuncs.") != NULL); } - tsS3Https = (strstr(tsS3Endpoint[0], "https://") != NULL); - tsS3Oss = (strstr(tsS3Endpoint[0], "aliyuncs.") != NULL); - if (tsS3BucketName[0] != '<') { #if defined(USE_COS) || defined(USE_S3) #ifdef TD_ENTERPRISE From 21c266d1231c476ee76c1778d1c93debbb5260f2 Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Mon, 19 Aug 2024 18:52:14 +0800 Subject: [PATCH 152/181] fix(vnode/cfg): use default value if loading 0 --- source/dnode/vnode/src/vnd/vnodeCfg.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/source/dnode/vnode/src/vnd/vnodeCfg.c b/source/dnode/vnode/src/vnd/vnodeCfg.c index 1f2cf707f3..e2791d8a00 100644 --- a/source/dnode/vnode/src/vnd/vnodeCfg.c +++ b/source/dnode/vnode/src/vnd/vnodeCfg.c @@ -375,11 +375,11 @@ int vnodeDecodeConfig(const SJson *pJson, void *pObj) { } tjsonGetNumberValue(pJson, "s3ChunkSize", pCfg->s3ChunkSize, code); - if (code < 0) { + if (code < 0 || pCfg->s3ChunkSize < TSDB_MIN_S3_CHUNK_SIZE) { pCfg->s3ChunkSize = TSDB_DEFAULT_S3_CHUNK_SIZE; } tjsonGetNumberValue(pJson, "s3KeepLocal", pCfg->s3KeepLocal, code); - if (code < 0) { + if (code < 0 || pCfg->s3KeepLocal < TSDB_MIN_S3_KEEP_LOCAL) { pCfg->s3KeepLocal = TSDB_DEFAULT_S3_KEEP_LOCAL; } tjsonGetNumberValue(pJson, "s3Compact", pCfg->s3Compact, code); From 1b59d4a8d7443c41c971ad39bdc5de467d0c6c9b Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Mon, 19 Aug 2024 18:54:42 +0800 Subject: [PATCH 153/181] fix(tsdb): pass the error code out. --- source/dnode/vnode/src/tsdb/tsdbRead2.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/source/dnode/vnode/src/tsdb/tsdbRead2.c b/source/dnode/vnode/src/tsdb/tsdbRead2.c index 355bcca469..24476e8df1 100644 --- a/source/dnode/vnode/src/tsdb/tsdbRead2.c +++ b/source/dnode/vnode/src/tsdb/tsdbRead2.c @@ -1413,7 +1413,8 @@ static int32_t doLoadFileBlockData(STsdbReader* pReader, SDataBlockIter* pBlockI if (pReader->info.pSchema == NULL) { pSchema = getTableSchemaImpl(pReader, uid); if (pSchema == NULL) { - tsdbDebug("%p table uid:%" PRIu64 " has been dropped, no data existed, %s", pReader, uid, pReader->idStr); + code = terrno; + tsdbError("%p table uid:%" PRIu64 " has been dropped, no data existed, %s", pReader, uid, pReader->idStr); return code; } } @@ -1448,7 +1449,7 @@ static int32_t doLoadFileBlockData(STsdbReader* pReader, SDataBlockIter* pBlockI pReader->cost.blockLoadTime += elapsedTime; pDumpInfo->allDumped = false; - return TSDB_CODE_SUCCESS; + return code; } /** @@ -2137,7 +2138,7 @@ static int32_t doMergeMultiLevelRows(STsdbReader* pReader, STableBlockScanInfo* if (piRow->type == TSDBROW_ROW_FMT) { piSchema = doGetSchemaForTSRow(TSDBROW_SVERSION(piRow), pReader, pBlockScanInfo->uid); if (piSchema == NULL) { - return code; + return terrno; } } From 2b68e110e51728c0ecd9ea2346eab4641aecf4af Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Mon, 19 Aug 2024 19:01:26 +0800 Subject: [PATCH 154/181] fix(tsdb): check return value. --- source/dnode/vnode/src/tsdb/tsdbCache.c | 4 +++- source/dnode/vnode/src/tsdb/tsdbMergeTree.c | 1 + source/dnode/vnode/src/tsdb/tsdbRead2.c | 5 +++-- 3 files changed, 7 insertions(+), 3 deletions(-) diff --git a/source/dnode/vnode/src/tsdb/tsdbCache.c b/source/dnode/vnode/src/tsdb/tsdbCache.c index 70e6e1ee2a..fb72784229 100644 --- a/source/dnode/vnode/src/tsdb/tsdbCache.c +++ b/source/dnode/vnode/src/tsdb/tsdbCache.c @@ -2245,7 +2245,9 @@ static int32_t lastIterClose(SFSLastIter **iter) { } static int32_t lastIterNext(SFSLastIter *iter, TSDBROW **ppRow) { - bool hasVal = false; + bool hasVal = false; + *ppRow = NULL; + int32_t code = tMergeTreeNext(iter->pMergeTree, &hasVal); if (code != 0) { return code; diff --git a/source/dnode/vnode/src/tsdb/tsdbMergeTree.c b/source/dnode/vnode/src/tsdb/tsdbMergeTree.c index 8bfc066731..160ff2e13c 100644 --- a/source/dnode/vnode/src/tsdb/tsdbMergeTree.c +++ b/source/dnode/vnode/src/tsdb/tsdbMergeTree.c @@ -1108,6 +1108,7 @@ int32_t tMergeTreeNext(SMergeTree *pMTree, bool *pHasNext) { return TSDB_CODE_INVALID_PARA; } + *pHasNext = false; if (pMTree->pIter) { SLDataIter *pIter = pMTree->pIter; bool hasVal = false; diff --git a/source/dnode/vnode/src/tsdb/tsdbRead2.c b/source/dnode/vnode/src/tsdb/tsdbRead2.c index 24476e8df1..cc369ba3b0 100644 --- a/source/dnode/vnode/src/tsdb/tsdbRead2.c +++ b/source/dnode/vnode/src/tsdb/tsdbRead2.c @@ -4105,9 +4105,10 @@ int32_t doMergeRowsInSttBlock(SSttBlockReader* pSttBlockReader, STableBlockScanI while (1) { code = nextRowFromSttBlocks(pSttBlockReader, pScanInfo, pkSrcSlot, pVerRange); - if (code) { - + if (code || (!hasDataInSttBlock(pScanInfo))) { + return code; } + SRowKey* pNextKey = getCurrentKeyInSttBlock(pSttBlockReader); int32_t ret = pkCompEx(pRowKey, pNextKey); From a760ede4ae473a52cc7284706362ab44c25cc1f5 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Mon, 19 Aug 2024 19:16:40 +0800 Subject: [PATCH 155/181] refactor: update logs. --- source/dnode/vnode/src/tsdb/tsdbRead2.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/source/dnode/vnode/src/tsdb/tsdbRead2.c b/source/dnode/vnode/src/tsdb/tsdbRead2.c index cc369ba3b0..74992c40d3 100644 --- a/source/dnode/vnode/src/tsdb/tsdbRead2.c +++ b/source/dnode/vnode/src/tsdb/tsdbRead2.c @@ -1414,7 +1414,8 @@ static int32_t doLoadFileBlockData(STsdbReader* pReader, SDataBlockIter* pBlockI pSchema = getTableSchemaImpl(pReader, uid); if (pSchema == NULL) { code = terrno; - tsdbError("%p table uid:%" PRIu64 " has been dropped, no data existed, %s", pReader, uid, pReader->idStr); + tsdbError("%p table uid:%" PRIu64 " failed to get tableschema, code:%s, %s", pReader, uid, tstrerror(code), + pReader->idStr); return code; } } From 660bfde59370e531dd290c2d1ab61c65235a650f Mon Sep 17 00:00:00 2001 From: Shengliang Guan Date: Mon, 19 Aug 2024 19:16:37 +0800 Subject: [PATCH 156/181] refact: adjust util log --- source/libs/sync/inc/syncUtil.h | 2 -- source/libs/sync/src/syncEnv.c | 10 ++++---- source/libs/sync/src/syncUtil.c | 41 +++++++++++++++++---------------- 3 files changed, 25 insertions(+), 28 deletions(-) diff --git a/source/libs/sync/inc/syncUtil.h b/source/libs/sync/inc/syncUtil.h index a550ae8fbb..555607d40c 100644 --- a/source/libs/sync/inc/syncUtil.h +++ b/source/libs/sync/inc/syncUtil.h @@ -75,8 +75,6 @@ int32_t syncUtilElectRandomMS(int32_t min, int32_t max); int32_t syncUtilQuorum(int32_t replicaNum); const char* syncStr(ESyncState state); void syncUtilMsgHtoN(void* msg); -bool syncUtilUserPreCommit(tmsg_t msgType); -bool syncUtilUserRollback(tmsg_t msgType); void syncUtilGenerateArbToken(int32_t nodeId, int32_t groupId, char* buf); diff --git a/source/libs/sync/src/syncEnv.c b/source/libs/sync/src/syncEnv.c index 7474cea83d..1ebf47403a 100644 --- a/source/libs/sync/src/syncEnv.c +++ b/source/libs/sync/src/syncEnv.c @@ -32,7 +32,7 @@ int32_t syncInit() { uint32_t seed = (uint32_t)(taosGetTimestampNs() & 0x00000000FFFFFFFF); taosSeedRand(seed); - memset(&gSyncEnv, 0, sizeof(SSyncEnv)); + (void)memset(&gSyncEnv, 0, sizeof(SSyncEnv)); gSyncEnv.pTimerManager = taosTmrInit(1000, 50, 10000, "SYNC-ENV"); gNodeRefId = taosOpenRef(200, (RefFp)syncNodeClose); @@ -59,7 +59,7 @@ int32_t syncInit() { void syncCleanUp() { atomic_store_8(&gSyncEnv.isStart, 0); taosTmrCleanUp(gSyncEnv.pTimerManager); - memset(&gSyncEnv, 0, sizeof(SSyncEnv)); + (void)memset(&gSyncEnv, 0, sizeof(SSyncEnv)); if (gNodeRefId != -1) { sDebug("sync node rset is closed, rsetId:%d", gNodeRefId); @@ -77,8 +77,7 @@ void syncCleanUp() { int64_t syncNodeAdd(SSyncNode *pNode) { pNode->rid = taosAddRef(gNodeRefId, pNode); if (pNode->rid < 0) { - terrno = TSDB_CODE_SYN_WRONG_REF; - return -1; + return terrno = TSDB_CODE_SYN_WRONG_REF; } sDebug("vgId:%d, sync node refId:%" PRId64 " is added to rsetId:%d", pNode->vgId, pNode->rid, gNodeRefId); @@ -111,8 +110,7 @@ void syncNodeRelease(SSyncNode *pNode) { int64_t syncHbTimerDataAdd(SSyncHbTimerData *pData) { pData->rid = taosAddRef(gHbDataRefId, pData); if (pData->rid < 0) { - terrno = TSDB_CODE_SYN_WRONG_REF; - return -1; + return terrno = TSDB_CODE_SYN_WRONG_REF; } return pData->rid; diff --git a/source/libs/sync/src/syncUtil.c b/source/libs/sync/src/syncUtil.c index 49737b9045..ca879f70d9 100644 --- a/source/libs/sync/src/syncUtil.c +++ b/source/libs/sync/src/syncUtil.c @@ -23,7 +23,7 @@ #include "syncSnapshot.h" #include "tglobal.h" -void syncCfg2SimpleStr(const SSyncCfg* pCfg, char* buf, int32_t bufLen) { +static void syncCfg2SimpleStr(const SSyncCfg* pCfg, char* buf, int32_t bufLen) { int32_t len = snprintf(buf, bufLen, "{num:%d, as:%d, [", pCfg->replicaNum, pCfg->myIndex); for (int32_t i = 0; i < pCfg->replicaNum; ++i) { len += snprintf(buf + len, bufLen - len, "%s:%d", pCfg->nodeInfo[i].nodeFqdn, pCfg->nodeInfo[i].nodePort); @@ -43,14 +43,13 @@ void syncUtilNodeInfo2EpSet(const SNodeInfo* pInfo, SEpSet* pEpSet) { bool syncUtilNodeInfo2RaftId(const SNodeInfo* pInfo, SyncGroupId vgId, SRaftId* raftId) { uint32_t ipv4 = 0xFFFFFFFF; - sDebug( - "vgId:%d, start to resolve sync addr fqdn in %d seconds, " - "dnode:%d cluster:%" PRId64 " fqdn:%s port:%u ", - vgId, tsResolveFQDNRetryTime, pInfo->nodeId, pInfo->clusterId, pInfo->nodeFqdn, pInfo->nodePort); - for (int i = 0; i < tsResolveFQDNRetryTime; i++) { + sDebug("vgId:%d, resolve sync addr from fqdn, dnode:%d cluster:%" PRId64 " fqdn:%s port:%u", vgId, pInfo->nodeId, + pInfo->clusterId, pInfo->nodeFqdn, pInfo->nodePort); + for (int32_t i = 0; i < tsResolveFQDNRetryTime; i++) { int32_t code = taosGetIpv4FromFqdn(pInfo->nodeFqdn, &ipv4); if (code) { - sError("failed to resolve ipv4 addr, fqdn:%s, wait one second", pInfo->nodeFqdn); + sError("vgId:%d, failed to resolve sync addr, dnode:%d fqdn:%s, wait one second", vgId, pInfo->nodeId, + pInfo->nodeFqdn); taosSsleep(1); } else { break; @@ -58,7 +57,7 @@ bool syncUtilNodeInfo2RaftId(const SNodeInfo* pInfo, SyncGroupId vgId, SRaftId* } if (ipv4 == 0xFFFFFFFF || ipv4 == 1) { - sError("failed to resolve ipv4 addr, fqdn:%s", pInfo->nodeFqdn); + sError("vgId:%d, failed to resolve sync addr, fqdn:%s", vgId, pInfo->nodeFqdn); terrno = TSDB_CODE_TSC_INVALID_FQDN; return false; } @@ -68,14 +67,20 @@ bool syncUtilNodeInfo2RaftId(const SNodeInfo* pInfo, SyncGroupId vgId, SRaftId* raftId->addr = SYNC_ADDR(pInfo); raftId->vgId = vgId; - sInfo("vgId:%d, sync addr:%" PRIu64 ", dnode:%d cluster:%" PRId64 " fqdn:%s ip:%s port:%u ipv4:%u", vgId, - raftId->addr, pInfo->nodeId, pInfo->clusterId, pInfo->nodeFqdn, ipbuf, pInfo->nodePort, ipv4); + sInfo("vgId:%d, sync addr:%" PRIu64 " is resolved, dnode:%d cluster:%" PRId64 " fqdn:%s port:%u ip:%s ipv4:%u", vgId, + raftId->addr, pInfo->nodeId, pInfo->clusterId, pInfo->nodeFqdn, pInfo->nodePort, ipbuf, ipv4); return true; } bool syncUtilSameId(const SRaftId* pId1, const SRaftId* pId2) { - if (pId1->addr == pId2->addr && pId1->vgId == pId2->vgId) return true; - if ((CID(pId1) == 0 || CID(pId2) == 0) && (DID(pId1) == DID(pId2)) && pId1->vgId == pId2->vgId) return true; + if (pId1->addr == pId2->addr && pId1->vgId == pId2->vgId) { + return true; + } + + if ((CID(pId1) == 0 || CID(pId2) == 0) && (DID(pId1) == DID(pId2)) && pId1->vgId == pId2->vgId) { + return true; + } + return false; } @@ -98,10 +103,6 @@ void syncUtilMsgHtoN(void* msg) { pHead->vgId = htonl(pHead->vgId); } -bool syncUtilUserPreCommit(tmsg_t msgType) { return msgType != TDMT_SYNC_NOOP && msgType != TDMT_SYNC_LEADER_TRANSFER; } - -bool syncUtilUserRollback(tmsg_t msgType) { return msgType != TDMT_SYNC_NOOP && msgType != TDMT_SYNC_LEADER_TRANSFER; } - void syncUtilGenerateArbToken(int32_t nodeId, int32_t groupId, char* buf) { (void)memset(buf, 0, TSDB_ARB_TOKEN_SIZE); int32_t randVal = taosSafeRand() % 1000; @@ -142,18 +143,18 @@ static void syncLogBufferStates2Str(SSyncNode* pSyncNode, char* buf, int32_t buf if (pBuf == NULL) { return; } - int len = 0; + int32_t len = 0; len += snprintf(buf + len, bufLen - len, "[%" PRId64 " %" PRId64 " %" PRId64 ", %" PRId64 ")", pBuf->startIndex, pBuf->commitIndex, pBuf->matchIndex, pBuf->endIndex); } static void syncLogReplStates2Str(SSyncNode* pSyncNode, char* buf, int32_t bufLen) { - int len = 0; + int32_t len = 0; len += snprintf(buf + len, bufLen - len, "%s", "{"); for (int32_t i = 0; i < pSyncNode->replicaNum; i++) { SSyncLogReplMgr* pMgr = pSyncNode->logReplMgrs[i]; if (pMgr == NULL) break; - len += snprintf(buf + len, bufLen - len, "%d:%d [%" PRId64 " %" PRId64 ", %" PRId64 ")", i, pMgr->restored, + len += snprintf(buf + len, bufLen - len, "%d:%d [%" PRId64 " %" PRId64 ", %" PRId64 "]", i, pMgr->restored, pMgr->startIndex, pMgr->matchIndex, pMgr->endIndex); if (i + 1 < pSyncNode->replicaNum) { len += snprintf(buf + len, bufLen - len, "%s", ", "); @@ -280,7 +281,7 @@ void syncPrintSnapshotSenderLog(const char* flags, ELogLevel level, int32_t dfla " end:%" PRId64 " last-index:%" PRId64 " last-term:%" PRId64 " last-cfg:%" PRId64 ", seq:%d, ack:%d, " " buf:[%" PRId64 " %" PRId64 ", %" PRId64 - "), finish:%d, as:%d, to-dnode:%d}" + "], finish:%d, as:%d, to-dnode:%d}" ", term:%" PRIu64 ", commit-index:%" PRId64 ", firstver:%" PRId64 ", lastver:%" PRId64 ", min-match:%" PRId64 ", snap:{last-index:%" PRId64 ", term:%" PRIu64 "}, standby:%d, batch-sz:%d, replicas:%d, last-cfg:%" PRId64 From 50672cd1a875f81dcb83db05d2b50e6e9fd1ad86 Mon Sep 17 00:00:00 2001 From: sima Date: Mon, 19 Aug 2024 16:52:30 +0800 Subject: [PATCH 157/181] enh:[TD-31529] Remove ASSERT in libs/scalar. --- include/util/taoserror.h | 3 + source/libs/geometry/src/geomFunc.c | 6 +- source/libs/scalar/inc/sclvector.h | 37 ++-- source/libs/scalar/src/filter.c | 172 ++++++++++++++---- source/libs/scalar/src/sclfunc.c | 7 +- source/libs/scalar/src/sclvector.c | 168 ++++++++++------- source/libs/scalar/test/filter/CMakeLists.txt | 2 +- .../libs/scalar/test/filter/filterTests.cpp | 3 +- .../libs/scalar/test/scalar/scalarTests.cpp | 14 +- source/util/src/terror.c | 3 + 10 files changed, 281 insertions(+), 134 deletions(-) diff --git a/include/util/taoserror.h b/include/util/taoserror.h index 1911c48d26..cc31cde61a 100644 --- a/include/util/taoserror.h +++ b/include/util/taoserror.h @@ -604,6 +604,9 @@ int32_t taosGetErrSize(); #define TSDB_CODE_QRY_EXECUTOR_INTERNAL_ERROR TAOS_DEF_ERROR_CODE(0, 0x0732) #define TSDB_CODE_QRY_INVALID_JOIN_CONDITION TAOS_DEF_ERROR_CODE(0, 0x0733) #define TSDB_CODE_QRY_FILTER_NOT_SUPPORT_TYPE TAOS_DEF_ERROR_CODE(0, 0x0734) +#define TSDB_CODE_QRY_FILTER_WRONG_OPTR_TYPE TAOS_DEF_ERROR_CODE(0, 0x0735) +#define TSDB_CODE_QRY_FILTER_RANGE_ERROR TAOS_DEF_ERROR_CODE(0, 0x0736) +#define TSDB_CODE_QRY_FILTER_INVALID_TYPE TAOS_DEF_ERROR_CODE(0, 0x0737) // grant #define TSDB_CODE_GRANT_EXPIRED TAOS_DEF_ERROR_CODE(0, 0x0800) diff --git a/source/libs/geometry/src/geomFunc.c b/source/libs/geometry/src/geomFunc.c index 4426427bf5..1752493dff 100644 --- a/source/libs/geometry/src/geomFunc.c +++ b/source/libs/geometry/src/geomFunc.c @@ -131,12 +131,12 @@ _exit: int32_t executeMakePointFunc(SColumnInfoData *pInputData[], int32_t iLeft, int32_t iRight, SColumnInfoData *pOutputData) { int32_t code = TSDB_CODE_FAILED; + unsigned char *output = NULL; _getDoubleValue_fn_t getDoubleValueFn[2]; - getDoubleValueFn[0] = getVectorDoubleValueFn(pInputData[0]->info.type); - getDoubleValueFn[1] = getVectorDoubleValueFn(pInputData[1]->info.type); + TAOS_CHECK_GOTO(getVectorDoubleValueFn(pInputData[0]->info.type, &getDoubleValueFn[0]), NULL, _exit); + TAOS_CHECK_GOTO(getVectorDoubleValueFn(pInputData[1]->info.type, &getDoubleValueFn[1]), NULL, _exit); - unsigned char *output = NULL; double leftRes = 0; double rightRes = 0; diff --git a/source/libs/scalar/inc/sclvector.h b/source/libs/scalar/inc/sclvector.h index c2eb13dc75..fdd3e92501 100644 --- a/source/libs/scalar/inc/sclvector.h +++ b/source/libs/scalar/inc/sclvector.h @@ -78,40 +78,41 @@ static FORCE_INLINE int32_t getVectorDoubleValue_BOOL(void *src, int32_t index, int32_t getVectorDoubleValue_JSON(void *src, int32_t index, double *out); -static FORCE_INLINE _getDoubleValue_fn_t getVectorDoubleValueFn(int32_t srcType) { - _getDoubleValue_fn_t p = NULL; +static FORCE_INLINE int32_t getVectorDoubleValueFn(int32_t srcType, _getDoubleValue_fn_t *p) { + *p = NULL; if (srcType == TSDB_DATA_TYPE_TINYINT) { - p = getVectorDoubleValue_TINYINT; + *p = getVectorDoubleValue_TINYINT; } else if (srcType == TSDB_DATA_TYPE_UTINYINT) { - p = getVectorDoubleValue_UTINYINT; + *p = getVectorDoubleValue_UTINYINT; } else if (srcType == TSDB_DATA_TYPE_SMALLINT) { - p = getVectorDoubleValue_SMALLINT; + *p = getVectorDoubleValue_SMALLINT; } else if (srcType == TSDB_DATA_TYPE_USMALLINT) { - p = getVectorDoubleValue_USMALLINT; + *p = getVectorDoubleValue_USMALLINT; } else if (srcType == TSDB_DATA_TYPE_INT) { - p = getVectorDoubleValue_INT; + *p = getVectorDoubleValue_INT; } else if (srcType == TSDB_DATA_TYPE_UINT) { - p = getVectorDoubleValue_UINT; + *p = getVectorDoubleValue_UINT; } else if (srcType == TSDB_DATA_TYPE_BIGINT) { - p = getVectorDoubleValue_BIGINT; + *p = getVectorDoubleValue_BIGINT; } else if (srcType == TSDB_DATA_TYPE_UBIGINT) { - p = getVectorDoubleValue_UBIGINT; + *p = getVectorDoubleValue_UBIGINT; } else if (srcType == TSDB_DATA_TYPE_FLOAT) { - p = getVectorDoubleValue_FLOAT; + *p = getVectorDoubleValue_FLOAT; } else if (srcType == TSDB_DATA_TYPE_DOUBLE) { - p = getVectorDoubleValue_DOUBLE; + *p = getVectorDoubleValue_DOUBLE; } else if (srcType == TSDB_DATA_TYPE_TIMESTAMP) { - p = getVectorDoubleValue_BIGINT; + *p = getVectorDoubleValue_BIGINT; } else if (srcType == TSDB_DATA_TYPE_JSON) { - p = getVectorDoubleValue_JSON; + *p = getVectorDoubleValue_JSON; } else if (srcType == TSDB_DATA_TYPE_BOOL) { - p = getVectorDoubleValue_BOOL; + *p = getVectorDoubleValue_BOOL; } else if (srcType == TSDB_DATA_TYPE_NULL) { - p = NULL; + *p = NULL; } else { - ASSERT(0); + *p = NULL; + return TSDB_CODE_SCALAR_CONVERT_ERROR; } - return p; + return TSDB_CODE_SUCCESS; } typedef int32_t (*_bufConverteFunc)(char *buf, SScalarParam *pOut, int32_t outType, int32_t *overflow); diff --git a/source/libs/scalar/src/filter.c b/source/libs/scalar/src/filter.c index cc9cc9ed76..988aeba46f 100644 --- a/source/libs/scalar/src/filter.c +++ b/source/libs/scalar/src/filter.c @@ -92,7 +92,9 @@ rangeCompFunc gRangeCompare[] = {filterRangeCompee, filterRangeCompei, filterRan int8_t filterGetRangeCompFuncFromOptrs(uint8_t optr, uint8_t optr2) { if (optr2) { - ASSERT(optr2 == OP_TYPE_LOWER_THAN || optr2 == OP_TYPE_LOWER_EQUAL); + if (optr2 != OP_TYPE_LOWER_THAN && optr2 != OP_TYPE_LOWER_EQUAL) { + return -1; + } if (optr == OP_TYPE_GREATER_THAN) { if (optr2 == OP_TYPE_LOWER_THAN) { @@ -763,7 +765,10 @@ int32_t filterAddRangeCtx(void *dst, void *src, int32_t optr) { SFilterRangeCtx *dctx = (SFilterRangeCtx *)dst; SFilterRangeCtx *sctx = (SFilterRangeCtx *)src; - ASSERT(optr == LOGIC_COND_TYPE_OR); + if (optr != LOGIC_COND_TYPE_OR) { + fltError("filterAddRangeCtx get invalid optr:%d", optr); + return TSDB_CODE_QRY_FILTER_WRONG_OPTR_TYPE; + } if (sctx->rs == NULL) { return TSDB_CODE_SUCCESS; @@ -1204,7 +1209,10 @@ int32_t filterAddUnitImpl(SFilterInfo *info, uint8_t optr, SFilterFieldId *left, if (u->right.type == FLD_TYPE_VALUE) { SFilterField *val = FILTER_UNIT_RIGHT_FIELD(info, u); - ASSERT(FILTER_GET_FLAG(val->flag, FLD_TYPE_VALUE)); + if (!FILTER_GET_FLAG(val->flag, FLD_TYPE_VALUE)) { + fltError("filterAddUnitImpl get invalid flag : %d in val", val->flag); + return TSDB_CODE_APP_ERROR; + } } else { int32_t paramNum = scalarGetOperatorParamNum(optr); if (1 != paramNum) { @@ -1214,7 +1222,10 @@ int32_t filterAddUnitImpl(SFilterInfo *info, uint8_t optr, SFilterFieldId *left, } SFilterField *col = FILTER_UNIT_LEFT_FIELD(info, u); - ASSERT(FILTER_GET_FLAG(col->flag, FLD_TYPE_COLUMN)); + if (!FILTER_GET_FLAG(col->flag, FLD_TYPE_COLUMN)) { + fltError("filterAddUnitImpl get invalid flag : %d in col", col->flag); + return TSDB_CODE_APP_ERROR; + } info->units[info->unitNum].compare.type = FILTER_GET_COL_FIELD_TYPE(col); info->units[info->unitNum].compare.precision = FILTER_GET_COL_FIELD_PRECISION(col); @@ -1398,29 +1409,48 @@ int32_t filterAddGroupUnitFromCtx(SFilterInfo *dst, SFilterInfo *src, SFilterRan if (optr == LOGIC_COND_TYPE_AND) { if (ctx->isnull) { - ASSERT(ctx->notnull == false && ctx->isrange == false); + if (ctx->notnull || ctx->isrange) { + fltError("filterAddGroupUnitFromCtx get invalid ctx : isnull %d, notnull %d, isrange %d", + ctx->isnull, ctx->notnull, ctx->isrange); + FLT_ERR_RET(TSDB_CODE_QRY_FILTER_RANGE_ERROR); + } FLT_ERR_RET(filterAddUnit(dst, OP_TYPE_IS_NULL, &left, NULL, &uidx)); FLT_ERR_RET(filterAddUnitToGroup(g, uidx)); return TSDB_CODE_SUCCESS; } if (ctx->notnull) { - ASSERT(ctx->isnull == false && ctx->isrange == false); + if (ctx->isnull || ctx->isrange) { + fltError("filterAddGroupUnitFromCtx get invalid ctx : isnull %d, notnull %d, isrange %d", + ctx->isnull, ctx->notnull, ctx->isrange); + FLT_ERR_RET(TSDB_CODE_QRY_FILTER_RANGE_ERROR); + } FLT_ERR_RET(filterAddUnit(dst, OP_TYPE_IS_NOT_NULL, &left, NULL, &uidx)); FLT_ERR_RET(filterAddUnitToGroup(g, uidx)); return TSDB_CODE_SUCCESS; } if (!ctx->isrange) { - ASSERT(ctx->isnull || ctx->notnull); + if (!ctx->isnull && !ctx->notnull) { + fltError("filterAddGroupUnitFromCtx get invalid ctx : isnull %d, notnull %d, isrange %d", + ctx->isnull, ctx->notnull, ctx->isrange); + FLT_ERR_RET(TSDB_CODE_QRY_FILTER_RANGE_ERROR); + } return TSDB_CODE_SUCCESS; } - ASSERT(ctx->rs && ctx->rs->next == NULL); + if (!ctx->rs || ctx->rs->next != NULL) { + fltError("filterAddGroupUnitFromCtx get invalid range node with rs:%p", ctx->rs); + FLT_ERR_RET(TSDB_CODE_QRY_FILTER_RANGE_ERROR); + } SFilterRange *ra = &ctx->rs->ra; - ASSERT(!((FILTER_GET_FLAG(ra->sflag, RANGE_FLG_NULL)) && (FILTER_GET_FLAG(ra->eflag, RANGE_FLG_NULL)))); + if (((FILTER_GET_FLAG(ra->sflag, RANGE_FLG_NULL)) && (FILTER_GET_FLAG(ra->eflag, RANGE_FLG_NULL)))) { + fltError("filterAddGroupUnitFromCtx get invalid range with sflag:%d, eflag:%d", + FILTER_GET_FLAG(ra->sflag, RANGE_FLG_NULL), FILTER_GET_FLAG(ra->eflag, RANGE_FLG_NULL)); + FLT_ERR_RET(TSDB_CODE_QRY_FILTER_RANGE_ERROR); + } if ((!FILTER_GET_FLAG(ra->sflag, RANGE_FLG_NULL)) && (!FILTER_GET_FLAG(ra->eflag, RANGE_FLG_NULL))) { __compar_fn_t func = getComparFunc(type, 0); @@ -1489,7 +1519,11 @@ int32_t filterAddGroupUnitFromCtx(SFilterInfo *dst, SFilterInfo *src, SFilterRan SFilterGroup ng = {0}; g = &ng; - ASSERT(ctx->isnull || ctx->notnull || ctx->isrange); + if (!ctx->isnull && !ctx->notnull && !ctx->isrange) { + fltError("filterAddGroupUnitFromCtx get invalid ctx : isnull %d, notnull %d, isrange %d", + ctx->isnull, ctx->notnull, ctx->isrange); + FLT_ERR_RET(TSDB_CODE_APP_ERROR); + } if (ctx->isnull) { FLT_ERR_RET(filterAddUnit(dst, OP_TYPE_IS_NULL, &left, NULL, &uidx)); @@ -1500,7 +1534,11 @@ int32_t filterAddGroupUnitFromCtx(SFilterInfo *dst, SFilterInfo *src, SFilterRan } if (ctx->notnull) { - ASSERT(!ctx->isrange); + if (ctx->isrange) { + fltError("filterAddGroupUnitFromCtx get invalid ctx : isnull %d, notnull %d, isrange %d", + ctx->isnull, ctx->notnull, ctx->isrange); + FLT_ERR_RET(TSDB_CODE_QRY_FILTER_RANGE_ERROR); + } (void)memset(g, 0, sizeof(*g)); FLT_ERR_RET(filterAddUnit(dst, OP_TYPE_IS_NOT_NULL, &left, NULL, &uidx)); @@ -1511,7 +1549,11 @@ int32_t filterAddGroupUnitFromCtx(SFilterInfo *dst, SFilterInfo *src, SFilterRan } if (!ctx->isrange) { - ASSERT(ctx->isnull || ctx->notnull); + if (!ctx->isnull && !ctx->notnull) { + fltError("filterAddGroupUnitFromCtx get invalid ctx : isnull %d, notnull %d, isrange %d", + ctx->isnull, ctx->notnull, ctx->isrange); + FLT_ERR_RET(TSDB_CODE_QRY_FILTER_RANGE_ERROR); + } g->unitNum = 0; return TSDB_CODE_SUCCESS; } @@ -1586,7 +1628,10 @@ int32_t filterAddGroupUnitFromCtx(SFilterInfo *dst, SFilterInfo *src, SFilterRan FLT_ERR_RET(filterAddUnitToGroup(g, uidx)); } - ASSERT(g->unitNum > 0); + if (g->unitNum <= 0) { + fltError("filterAddGroupUnitFromCtx get invalid filter group unit num %d", g->unitNum); + FLT_ERR_RET(TSDB_CODE_QRY_FILTER_RANGE_ERROR); + } if (NULL == taosArrayPush(res,g)) { FLT_ERR_RET(TSDB_CODE_OUT_OF_MEMORY); @@ -2066,7 +2111,10 @@ void filterFreeInfo(SFilterInfo *info) { } int32_t filterHandleValueExtInfo(SFilterUnit *unit, char extInfo) { - ASSERT(extInfo > 0 || extInfo < 0); + if (extInfo == 0) { + fltError("filterHandleValueExtInfo get invalid extInfo : %d", extInfo); + return TSDB_CODE_APP_ERROR; + } uint8_t optr = FILTER_UNIT_OPTR(unit); switch (optr) { @@ -2093,13 +2141,20 @@ int32_t fltInitValFieldData(SFilterInfo *info) { for (uint32_t i = 0; i < info->unitNum; ++i) { SFilterUnit *unit = &info->units[i]; if (unit->right.type != FLD_TYPE_VALUE) { - ASSERT(unit->compare.optr == FILTER_DUMMY_EMPTY_OPTR || scalarGetOperatorParamNum(unit->compare.optr) == 1); + if (unit->compare.optr != FILTER_DUMMY_EMPTY_OPTR && scalarGetOperatorParamNum(unit->compare.optr) != 1) { + fltError("filterInitValFieldData get invalid operator param num : %d and invalid compare optr %d", + scalarGetOperatorParamNum(unit->compare.optr), unit->compare.optr); + return TSDB_CODE_APP_ERROR; + } continue; } SFilterField *right = FILTER_UNIT_RIGHT_FIELD(info, unit); - ASSERT(FILTER_GET_FLAG(right->flag, FLD_TYPE_VALUE)); + if (!FILTER_GET_FLAG(right->flag, FLD_TYPE_VALUE)) { + fltError("filterInitValFieldData get invalid field flag : %d", right->flag); + return TSDB_CODE_APP_ERROR; + } uint32_t type = FILTER_UNIT_DATA_TYPE(unit); int8_t precision = FILTER_UNIT_DATA_PRECISION(unit); @@ -2107,7 +2162,10 @@ int32_t fltInitValFieldData(SFilterInfo *info) { SValueNode *var = (SValueNode *)fi->desc; if (var == NULL) { - ASSERT(fi->data != NULL); + if (!fi->data) { + fltError("filterInitValFieldData get invalid field data : NULL"); + return TSDB_CODE_APP_ERROR; + } continue; } @@ -2257,7 +2315,10 @@ int32_t filterAddUnitRange(SFilterInfo *info, SFilterUnit *u, SFilterRangeCtx *c FILTER_SET_FLAG(ra.sflag, RANGE_FLG_NULL); break; case OP_TYPE_NOT_EQUAL: - ASSERT(type == TSDB_DATA_TYPE_BOOL); + if (type != TSDB_DATA_TYPE_BOOL) { + fltError("filterAddUnitRange get invalid type : %d", type); + return TSDB_CODE_QRY_FILTER_INVALID_TYPE; + } if (GET_INT8_VAL(val)) { SIMPLE_COPY_VALUES(&ra.s, &tmp); SIMPLE_COPY_VALUES(&ra.e, &tmp); @@ -2273,7 +2334,7 @@ int32_t filterAddUnitRange(SFilterInfo *info, SFilterUnit *u, SFilterRangeCtx *c break; default: fltError("unsupported operator type"); - return TSDB_CODE_APP_ERROR; + return TSDB_CODE_QRY_FILTER_NOT_SUPPORT_TYPE; } FLT_ERR_RET(filterAddRange(ctx, &ra, optr)); @@ -2547,8 +2608,11 @@ int32_t filterMergeTwoGroupsImpl(SFilterInfo *info, SFilterRangeCtx **ctx, int32 FLT_ERR_RET(filterReuseRangeCtx(*ctx, type, 0)); } - ASSERT(gRes2->colInfo[cidx].type == RANGE_TYPE_MR_CTX); - ASSERT(gRes1->colInfo[cidx].type == RANGE_TYPE_MR_CTX); + if (gRes2->colInfo[cidx].type != RANGE_TYPE_MR_CTX || gRes1->colInfo[cidx].type != RANGE_TYPE_MR_CTX) { + fltError("filterMergeTwoGroupsImpl get invalid col type : %d and %d", + gRes2->colInfo[cidx].type, gRes1->colInfo[cidx].type); + return TSDB_CODE_QRY_FILTER_NOT_SUPPORT_TYPE; + } FLT_ERR_RET(filterCopyRangeCtx(*ctx, gRes2->colInfo[cidx].info)); FLT_ERR_RET(filterSourceRangeFromCtx(*ctx, gRes1->colInfo[cidx].info, optr, empty, all)); @@ -2588,7 +2652,10 @@ int32_t filterMergeTwoGroups(SFilterInfo *info, SFilterGroupCtx **gRes1, SFilter continue; } - ASSERT(idx1 == idx2); + if (idx1 != idx2) { + fltError("filterMergeTwoGroups get invalid idx : %d and %d", idx1, idx2); + FLT_ERR_JRET(TSDB_CODE_APP_ERROR); + } ++merNum; @@ -2644,16 +2711,19 @@ int32_t filterMergeTwoGroups(SFilterInfo *info, SFilterGroupCtx **gRes1, SFilter } } - ASSERT(merNum > 0); - - SFilterColInfo *colInfo = NULL; - ASSERT(merNum == equal1 || merNum == equal2); + if (merNum == 0 || (equal1 != merNum && equal2 != merNum)) { + fltError("filterMergeTwoGroups get invalid merge num : %d, equal1 : %d, equal2 : %d", merNum, equal1, equal2); + FLT_ERR_JRET(TSDB_CODE_APP_ERROR); + } filterFreeGroupCtx(*gRes2); *gRes2 = NULL; - ASSERT(colCtxs && taosArrayGetSize(colCtxs) > 0); - + if (!colCtxs || taosArrayGetSize(colCtxs) <= 0) { + fltError("filterMergeTwoGroups get invalid colCtxs with size %zu", taosArrayGetSize(colCtxs)); + FLT_ERR_JRET(TSDB_CODE_APP_ERROR); + } + SFilterColInfo *colInfo = NULL; int32_t ctxSize = (int32_t)taosArrayGetSize(colCtxs); SFilterColCtx *pctx = NULL; @@ -2713,7 +2783,10 @@ int32_t filterMergeGroups(SFilterInfo *info, SFilterGroupCtx **gRes, int32_t *gR if (pColNum > 0) { for (int32_t m = 0; m <= pEnd; ++m) { for (int32_t n = cStart; n <= cEnd; ++n) { - ASSERT(m < n); + if (m >= n) { + fltError("filterMergeGroups get invalid m : %d and n : %d", m, n); + FLT_ERR_JRET(TSDB_CODE_APP_ERROR); + } FLT_ERR_JRET(filterMergeTwoGroups(info, &gRes[m], &gRes[n], &all)); FLT_CHK_JMP(all); @@ -2734,7 +2807,10 @@ int32_t filterMergeGroups(SFilterInfo *info, SFilterGroupCtx **gRes, int32_t *gR for (int32_t m = cStart; m < cEnd; ++m) { for (int32_t n = m + 1; n <= cEnd; ++n) { - ASSERT(m < n); + if (m >= n) { + fltError("filterMergeGroups get invalid m : %d and n : %d", m, n); + FLT_ERR_JRET(TSDB_CODE_APP_ERROR); + } FLT_ERR_JRET(filterMergeTwoGroups(info, &gRes[m], &gRes[n], &all)); FLT_CHK_JMP(all); @@ -2844,7 +2920,10 @@ int32_t filterRewrite(SFilterInfo *info, SFilterGroupCtx **gRes, int32_t gResNum for (uint32_t m = 0; m < res->colNum; ++m) { colInfo = &res->colInfo[res->colIdx[m]]; if (FILTER_NO_MERGE_DATA_TYPE(colInfo->dataType)) { - ASSERT(colInfo->type == RANGE_TYPE_UNIT); + if (colInfo->type != RANGE_TYPE_UNIT) { + fltError("filterRewrite get invalid col type : %d", colInfo->type); + FLT_ERR_JRET(TSDB_CODE_QRY_FILTER_INVALID_TYPE); + } int32_t usize = (int32_t)taosArrayGetSize((SArray *)colInfo->info); for (int32_t n = 0; n < usize; ++n) { @@ -2859,7 +2938,10 @@ int32_t filterRewrite(SFilterInfo *info, SFilterGroupCtx **gRes, int32_t gResNum continue; } - ASSERT(colInfo->type == RANGE_TYPE_MR_CTX); + if (colInfo->type != RANGE_TYPE_MR_CTX) { + fltError("filterRewrite get invalid col type : %d", colInfo->type); + FLT_ERR_JRET(TSDB_CODE_QRY_FILTER_INVALID_TYPE); + } FLT_ERR_JRET(filterAddGroupUnitFromCtx(info, &oinfo, colInfo->info, res->colIdx[m], &ng, optr, group)); } @@ -2905,7 +2987,10 @@ int32_t filterGenerateColRange(SFilterInfo *info, SFilterGroupCtx **gRes, int32_ continue; } - ASSERT(idxNum[i] == gResNum); + if (idxNum[i] != gResNum) { + fltError("filterGenerateColRange get invalid idxNum : %d and gResNum : %d", idxNum[i], gResNum); + FLT_ERR_JRET(TSDB_CODE_APP_ERROR); + } if (idxs == NULL) { idxs = taosMemoryCalloc(info->fields[FLD_TYPE_COLUMN].num, sizeof(*idxs)); @@ -2936,7 +3021,10 @@ int32_t filterGenerateColRange(SFilterInfo *info, SFilterGroupCtx **gRes, int32_ continue; } - ASSERT(res->colIdx[n] == idxs[m]); + if (res->colIdx[n] != idxs[m]) { + fltError("filterGenerateColRange get invalid colIdx : %d and idxs : %d", res->colIdx[n], idxs[m]); + SCL_ERR_JRET(TSDB_CODE_APP_ERROR); + } SFilterColInfo *colInfo = &res->colInfo[res->colIdx[n]]; if (info->colRange[m] == NULL) { @@ -2945,7 +3033,10 @@ int32_t filterGenerateColRange(SFilterInfo *info, SFilterGroupCtx **gRes, int32_ info->colRange[m]->colId = FILTER_GET_COL_FIELD_ID(fi); } - ASSERT(colInfo->type == RANGE_TYPE_MR_CTX); + if (colInfo->type != RANGE_TYPE_MR_CTX) { + fltError("filterGenerateColRange get invalid col type : %d", colInfo->type); + FLT_ERR_JRET(TSDB_CODE_QRY_FILTER_INVALID_TYPE); + } bool all = false; FLT_ERR_JRET(filterSourceRangeFromCtx(info->colRange[m], colInfo->info, LOGIC_COND_TYPE_OR, NULL, &all)); @@ -3195,7 +3286,10 @@ int32_t filterRmUnitByRange(SFilterInfo *info, SColumnDataAgg *pDataStatis, int3 unitIdx = pGroupIdx; --info->blkGroupNum; - ASSERT(empty || all); + if (!empty && !all) { + fltError("filterRmUnitByRange get invalid empty and all : %d and %d", empty, all); + FLT_ERR_RET(TSDB_CODE_APP_ERROR); + } if (empty) { FILTER_SET_FLAG(info->blkFlag, FI_STATUS_BLK_EMPTY); @@ -3303,7 +3397,10 @@ int32_t filterExecuteBasedOnStatis(SFilterInfo *info, int32_t numOfRows, SColumn goto _return; } - ASSERT(info->unitNum > 1); + if (info->unitNum <= 1) { + fltError("filterExecuteBasedOnStatis get invalid unit num : %d", info->unitNum); + FLT_ERR_JRET(TSDB_CODE_APP_ERROR); + } *all = filterExecuteBasedOnStatisImpl(info, numOfRows, p, statis, numOfCols); goto _return; @@ -5161,7 +5258,6 @@ int32_t filterExecute(SFilterInfo *info, SSDataBlock *pSrc, SColumnInfoData **p, return TSDB_CODE_SUCCESS; } - ASSERT(false == info->scalarMode); *p = output.columnData; output.numOfRows = pSrc->info.rows; diff --git a/source/libs/scalar/src/sclfunc.c b/source/libs/scalar/src/sclfunc.c index f81205df7a..37daff1d63 100644 --- a/source/libs/scalar/src/sclfunc.c +++ b/source/libs/scalar/src/sclfunc.c @@ -297,7 +297,8 @@ static int32_t doScalarFunctionUnique(SScalarParam *pInput, int32_t inputNum, SS SColumnInfoData *pInputData = pInput->columnData; SColumnInfoData *pOutputData = pOutput->columnData; - _getDoubleValue_fn_t getValueFn = getVectorDoubleValueFn(type); + _getDoubleValue_fn_t getValueFn; + SCL_ERR_RET(getVectorDoubleValueFn(type, &getValueFn)); double *out = (double *)pOutputData->pData; @@ -328,7 +329,7 @@ static int32_t doScalarFunctionUnique2(SScalarParam *pInput, int32_t inputNum, S for (int32_t i = 0; i < inputNum; ++i) { pInputData[i] = pInput[i].columnData; - getValueFn[i] = getVectorDoubleValueFn(GET_PARAM_TYPE(&pInput[i])); + SCL_ERR_RET(getVectorDoubleValueFn(GET_PARAM_TYPE(&pInput[i]), &getValueFn[i])); } double *out = (double *)pOutputData->pData; @@ -2918,7 +2919,7 @@ static int32_t doScalarFunction2(SScalarParam *pInput, int32_t inputNum, SScalar for (int32_t i = 0; i < inputNum; ++i) { pInputData[i] = pInput[i].columnData; - getValueFn[i] = getVectorDoubleValueFn(GET_PARAM_TYPE(&pInput[i])); + SCL_ERR_RET(getVectorDoubleValueFn(GET_PARAM_TYPE(&pInput[i]), &getValueFn[i])); } bool hasNullType = (IS_NULL_TYPE(GET_PARAM_TYPE(&pInput[0])) || IS_NULL_TYPE(GET_PARAM_TYPE(&pInput[1]))); diff --git a/source/libs/scalar/src/sclvector.c b/source/libs/scalar/src/sclvector.c index 71773ced57..5556108a52 100644 --- a/source/libs/scalar/src/sclvector.c +++ b/source/libs/scalar/src/sclvector.c @@ -43,7 +43,7 @@ bool noConvertBeforeCompare(int32_t leftType, int32_t rightType, int32_t optr) { (optr >= OP_TYPE_GREATER_THAN && optr <= OP_TYPE_NOT_EQUAL); } -void convertNumberToNumber(const void *inData, void *outData, int8_t inType, int8_t outType) { +int32_t convertNumberToNumber(const void *inData, void *outData, int8_t inType, int8_t outType) { switch (outType) { case TSDB_DATA_TYPE_BOOL: { GET_TYPED_DATA(*((bool *)outData), bool, inType, inData); @@ -91,9 +91,10 @@ void convertNumberToNumber(const void *inData, void *outData, int8_t inType, int break; } default: { - ASSERT(0); + return TSDB_CODE_SCALAR_CONVERT_ERROR; } } + return TSDB_CODE_SUCCESS; } int32_t convertNcharToDouble(const void *inData, void *outData) { @@ -180,7 +181,10 @@ int32_t getVectorBigintValue_BOOL(void *src, int32_t index, int64_t *res) { } int32_t getVectorBigintValue_JSON(void *src, int32_t index, int64_t *res) { - ASSERT(!colDataIsNull_var(((SColumnInfoData *)src), index)); + if (colDataIsNull_var(((SColumnInfoData *)src), index)) { + sclError("getVectorBigintValue_JSON get json data null with index %d", index); + SCL_ERR_RET(TSDB_CODE_SCALAR_CONVERT_ERROR); + } char *data = colDataGetVarData((SColumnInfoData *)src, index); double out = 0; if (*data == TSDB_DATA_TYPE_NULL) { @@ -192,46 +196,47 @@ int32_t getVectorBigintValue_JSON(void *src, int32_t index, int64_t *res) { *res = 0; SCL_ERR_RET(TSDB_CODE_QRY_JSON_NOT_SUPPORT_ERROR); } else { - convertNumberToNumber(data + CHAR_BYTES, &out, *data, TSDB_DATA_TYPE_DOUBLE); + SCL_ERR_RET(convertNumberToNumber(data + CHAR_BYTES, &out, *data, TSDB_DATA_TYPE_DOUBLE)); } *res = (int64_t)out; SCL_RET(TSDB_CODE_SUCCESS); } -_getBigintValue_fn_t getVectorBigintValueFn(int32_t srcType) { - _getBigintValue_fn_t p = NULL; +int32_t getVectorBigintValueFn(int32_t srcType, _getBigintValue_fn_t *p) { + *p = NULL; if (srcType == TSDB_DATA_TYPE_TINYINT) { - p = getVectorBigintValue_TINYINT; + *p = getVectorBigintValue_TINYINT; } else if (srcType == TSDB_DATA_TYPE_UTINYINT) { - p = getVectorBigintValue_UTINYINT; + *p = getVectorBigintValue_UTINYINT; } else if (srcType == TSDB_DATA_TYPE_SMALLINT) { - p = getVectorBigintValue_SMALLINT; + *p = getVectorBigintValue_SMALLINT; } else if (srcType == TSDB_DATA_TYPE_USMALLINT) { - p = getVectorBigintValue_USMALLINT; + *p = getVectorBigintValue_USMALLINT; } else if (srcType == TSDB_DATA_TYPE_INT) { - p = getVectorBigintValue_INT; + *p = getVectorBigintValue_INT; } else if (srcType == TSDB_DATA_TYPE_UINT) { - p = getVectorBigintValue_UINT; + *p = getVectorBigintValue_UINT; } else if (srcType == TSDB_DATA_TYPE_BIGINT) { - p = getVectorBigintValue_BIGINT; + *p = getVectorBigintValue_BIGINT; } else if (srcType == TSDB_DATA_TYPE_UBIGINT) { - p = getVectorBigintValue_UBIGINT; + *p = getVectorBigintValue_UBIGINT; } else if (srcType == TSDB_DATA_TYPE_FLOAT) { - p = getVectorBigintValue_FLOAT; + *p = getVectorBigintValue_FLOAT; } else if (srcType == TSDB_DATA_TYPE_DOUBLE) { - p = getVectorBigintValue_DOUBLE; + *p = getVectorBigintValue_DOUBLE; } else if (srcType == TSDB_DATA_TYPE_TIMESTAMP) { - p = getVectorBigintValue_BIGINT; + *p = getVectorBigintValue_BIGINT; } else if (srcType == TSDB_DATA_TYPE_BOOL) { - p = getVectorBigintValue_BOOL; + *p = getVectorBigintValue_BOOL; } else if (srcType == TSDB_DATA_TYPE_JSON) { - p = getVectorBigintValue_JSON; + *p = getVectorBigintValue_JSON; } else if (srcType == TSDB_DATA_TYPE_NULL) { - p = NULL; + *p = NULL; } else { - ASSERT(0); + sclError("getVectorBigintValueFn invalid srcType : %d", srcType); + return TSDB_CODE_SCALAR_CONVERT_ERROR; } - return p; + return TSDB_CODE_SUCCESS; } static FORCE_INLINE int32_t varToTimestamp(char *buf, SScalarParam *pOut, int32_t rowIndex, int32_t *overflow) { @@ -467,7 +472,7 @@ static FORCE_INLINE int32_t varToGeometry(char *buf, SScalarParam *pOut, int32_t _return: taosMemoryFree(output); geosFreeBuffer(t); - ASSERT(t == NULL && len == 0); + t = NULL; VarDataLenT dummyHeader = 0; SCL_ERR_RET(colDataSetVal(pOut->columnData, rowIndex, (const char *)&dummyHeader, false)); SCL_RET(code); @@ -525,7 +530,7 @@ int32_t vectorConvertFromVarData(SSclVectorConvCtx *pCtx, int32_t *overflow) { } else if (tTagIsJson(data) || *data == TSDB_DATA_TYPE_NULL) { SCL_ERR_JRET(TSDB_CODE_QRY_JSON_NOT_SUPPORT_ERROR); } else { - convertNumberToNumber(data + CHAR_BYTES, colDataGetNumData(pCtx->pOut->columnData, i), *data, pCtx->outType); + SCL_ERR_JRET(convertNumberToNumber(data + CHAR_BYTES, colDataGetNumData(pCtx->pOut->columnData, i), *data, pCtx->outType)); continue; } } @@ -582,7 +587,7 @@ int32_t getVectorDoubleValue_JSON(void *src, int32_t index, double *out) { } else if (tTagIsJson(data)) { SCL_ERR_RET(TSDB_CODE_QRY_JSON_NOT_SUPPORT_ERROR); } else { - convertNumberToNumber(data + CHAR_BYTES, out, *data, TSDB_DATA_TYPE_DOUBLE); + SCL_ERR_RET(convertNumberToNumber(data + CHAR_BYTES, out, *data, TSDB_DATA_TYPE_DOUBLE)); } SCL_RET(TSDB_CODE_SUCCESS); } @@ -673,7 +678,7 @@ int32_t convertJsonValue(__compar_fn_t *fp, int32_t optr, int8_t typeLeft, int8_ *result = false; return TSDB_CODE_SUCCESS; } else if (typeLeft != type) { - convertNumberToNumber(*pLeftData, pLeftOut, typeLeft, type); + SCL_ERR_RET(convertNumberToNumber(*pLeftData, pLeftOut, typeLeft, type)); *pLeftData = pLeftOut; } @@ -683,7 +688,7 @@ int32_t convertJsonValue(__compar_fn_t *fp, int32_t optr, int8_t typeLeft, int8_ *result = false; return TSDB_CODE_SUCCESS; } else if (typeRight != type) { - convertNumberToNumber(*pRightData, pRightOut, typeRight, type); + SCL_ERR_RET(convertNumberToNumber(*pRightData, pRightOut, typeRight, type)); *pRightData = pRightOut; } } else if (type == TSDB_DATA_TYPE_BINARY || @@ -1130,8 +1135,10 @@ enum { // TODO not correct for descending order scan static int32_t vectorMathAddHelper(SColumnInfoData *pLeftCol, SColumnInfoData *pRightCol, SColumnInfoData *pOutputCol, int32_t numOfRows, int32_t step, int32_t i) { - _getDoubleValue_fn_t getVectorDoubleValueFnLeft = getVectorDoubleValueFn(pLeftCol->info.type); - _getDoubleValue_fn_t getVectorDoubleValueFnRight = getVectorDoubleValueFn(pRightCol->info.type); + _getDoubleValue_fn_t getVectorDoubleValueFnLeft; + _getDoubleValue_fn_t getVectorDoubleValueFnRight; + SCL_ERR_RET(getVectorDoubleValueFn(pLeftCol->info.type, &getVectorDoubleValueFnLeft)); + SCL_ERR_RET(getVectorDoubleValueFn(pRightCol->info.type, &getVectorDoubleValueFnRight)); double *output = (double *)pOutputCol->pData; @@ -1155,9 +1162,10 @@ static int32_t vectorMathAddHelper(SColumnInfoData *pLeftCol, SColumnInfoData *p static int32_t vectorMathTsAddHelper(SColumnInfoData *pLeftCol, SColumnInfoData *pRightCol, SColumnInfoData *pOutputCol, int32_t numOfRows, int32_t step, int32_t i) { - _getBigintValue_fn_t getVectorBigintValueFnLeft = getVectorBigintValueFn(pLeftCol->info.type); - _getBigintValue_fn_t getVectorBigintValueFnRight = getVectorBigintValueFn(pRightCol->info.type); - + _getBigintValue_fn_t getVectorBigintValueFnLeft; + _getBigintValue_fn_t getVectorBigintValueFnRight; + SCL_ERR_RET(getVectorBigintValueFn(pLeftCol->info.type, &getVectorBigintValueFnLeft)); + SCL_ERR_RET(getVectorBigintValueFn(pRightCol->info.type, &getVectorBigintValueFnRight)); int64_t *output = (int64_t *)pOutputCol->pData; if (IS_HELPER_NULL(pRightCol, 0)) { // Set pLeft->numOfRows NULL value @@ -1230,8 +1238,10 @@ int32_t vectorMathAdd(SScalarParam *pLeft, SScalarParam *pRight, SScalarParam *p (GET_PARAM_TYPE(pRight) == TSDB_DATA_TYPE_TIMESTAMP && GET_PARAM_TYPE(pLeft) == TSDB_DATA_TYPE_BOOL)) { // timestamp plus duration int64_t *output = (int64_t *)pOutputCol->pData; - _getBigintValue_fn_t getVectorBigintValueFnLeft = getVectorBigintValueFn(pLeftCol->info.type); - _getBigintValue_fn_t getVectorBigintValueFnRight = getVectorBigintValueFn(pRightCol->info.type); + _getBigintValue_fn_t getVectorBigintValueFnLeft; + _getBigintValue_fn_t getVectorBigintValueFnRight; + SCL_ERR_JRET(getVectorBigintValueFn(pLeftCol->info.type, &getVectorBigintValueFnLeft)); + SCL_ERR_JRET(getVectorBigintValueFn(pRightCol->info.type, &getVectorBigintValueFnRight)); if (pLeft->numOfRows == 1 && pRight->numOfRows == 1) { if (GET_PARAM_TYPE(pLeft) == TSDB_DATA_TYPE_TIMESTAMP) { @@ -1258,9 +1268,10 @@ int32_t vectorMathAdd(SScalarParam *pLeft, SScalarParam *pRight, SScalarParam *p } } else { double *output = (double *)pOutputCol->pData; - _getDoubleValue_fn_t getVectorDoubleValueFnLeft = getVectorDoubleValueFn(pLeftCol->info.type); - _getDoubleValue_fn_t getVectorDoubleValueFnRight = getVectorDoubleValueFn(pRightCol->info.type); - + _getDoubleValue_fn_t getVectorDoubleValueFnLeft; + _getDoubleValue_fn_t getVectorDoubleValueFnRight; + SCL_ERR_JRET(getVectorDoubleValueFn(pLeftCol->info.type, &getVectorDoubleValueFnLeft)); + SCL_ERR_JRET(getVectorDoubleValueFn(pRightCol->info.type, &getVectorDoubleValueFnRight)); if (pLeft->numOfRows == pRight->numOfRows) { for (; i < pRight->numOfRows && i >= 0; i += step, output += 1) { if (IS_NULL) { @@ -1289,8 +1300,10 @@ _return: // TODO not correct for descending order scan static int32_t vectorMathSubHelper(SColumnInfoData *pLeftCol, SColumnInfoData *pRightCol, SColumnInfoData *pOutputCol, int32_t numOfRows, int32_t step, int32_t factor, int32_t i) { - _getDoubleValue_fn_t getVectorDoubleValueFnLeft = getVectorDoubleValueFn(pLeftCol->info.type); - _getDoubleValue_fn_t getVectorDoubleValueFnRight = getVectorDoubleValueFn(pRightCol->info.type); + _getDoubleValue_fn_t getVectorDoubleValueFnLeft; + _getDoubleValue_fn_t getVectorDoubleValueFnRight; + SCL_ERR_RET(getVectorDoubleValueFn(pLeftCol->info.type, &getVectorDoubleValueFnLeft)); + SCL_ERR_RET(getVectorDoubleValueFn(pRightCol->info.type, &getVectorDoubleValueFnRight)); double *output = (double *)pOutputCol->pData; @@ -1314,8 +1327,10 @@ static int32_t vectorMathSubHelper(SColumnInfoData *pLeftCol, SColumnInfoData *p static int32_t vectorMathTsSubHelper(SColumnInfoData *pLeftCol, SColumnInfoData *pRightCol, SColumnInfoData *pOutputCol, int32_t numOfRows, int32_t step, int32_t factor, int32_t i) { - _getBigintValue_fn_t getVectorBigintValueFnLeft = getVectorBigintValueFn(pLeftCol->info.type); - _getBigintValue_fn_t getVectorBigintValueFnRight = getVectorBigintValueFn(pRightCol->info.type); + _getBigintValue_fn_t getVectorBigintValueFnLeft; + _getBigintValue_fn_t getVectorBigintValueFnRight; + SCL_ERR_RET(getVectorBigintValueFn(pLeftCol->info.type, &getVectorBigintValueFnLeft)); + SCL_ERR_RET(getVectorBigintValueFn(pRightCol->info.type, &getVectorBigintValueFnRight)); int64_t *output = (int64_t *)pOutputCol->pData; @@ -1357,8 +1372,10 @@ int32_t vectorMathSub(SScalarParam *pLeft, SScalarParam *pRight, SScalarParam *p (GET_PARAM_TYPE(pRight) == TSDB_DATA_TYPE_TIMESTAMP && GET_PARAM_TYPE(pLeft) == TSDB_DATA_TYPE_BIGINT)) { // timestamp minus duration int64_t *output = (int64_t *)pOutputCol->pData; - _getBigintValue_fn_t getVectorBigintValueFnLeft = getVectorBigintValueFn(pLeftCol->info.type); - _getBigintValue_fn_t getVectorBigintValueFnRight = getVectorBigintValueFn(pRightCol->info.type); + _getBigintValue_fn_t getVectorBigintValueFnLeft; + _getBigintValue_fn_t getVectorBigintValueFnRight; + SCL_ERR_JRET(getVectorBigintValueFn(pLeftCol->info.type, &getVectorBigintValueFnLeft)); + SCL_ERR_JRET(getVectorBigintValueFn(pRightCol->info.type, &getVectorBigintValueFnRight)); if (pLeft->numOfRows == 1 && pRight->numOfRows == 1) { SCL_ERR_JRET(vectorMathTsSubHelper(pLeftCol, pRightCol, pOutputCol, pLeft->numOfRows, step, 1, i)); @@ -1381,8 +1398,10 @@ int32_t vectorMathSub(SScalarParam *pLeft, SScalarParam *pRight, SScalarParam *p } } else { double *output = (double *)pOutputCol->pData; - _getDoubleValue_fn_t getVectorDoubleValueFnLeft = getVectorDoubleValueFn(pLeftCol->info.type); - _getDoubleValue_fn_t getVectorDoubleValueFnRight = getVectorDoubleValueFn(pRightCol->info.type); + _getDoubleValue_fn_t getVectorDoubleValueFnLeft; + _getDoubleValue_fn_t getVectorDoubleValueFnRight; + SCL_ERR_JRET(getVectorDoubleValueFn(pLeftCol->info.type, &getVectorDoubleValueFnLeft)); + SCL_ERR_JRET(getVectorDoubleValueFn(pRightCol->info.type, &getVectorDoubleValueFnRight)); if (pLeft->numOfRows == pRight->numOfRows) { for (; i < pRight->numOfRows && i >= 0; i += step, output += 1) { @@ -1412,8 +1431,10 @@ _return: // TODO not correct for descending order scan static int32_t vectorMathMultiplyHelper(SColumnInfoData *pLeftCol, SColumnInfoData *pRightCol, SColumnInfoData *pOutputCol, int32_t numOfRows, int32_t step, int32_t i) { - _getDoubleValue_fn_t getVectorDoubleValueFnLeft = getVectorDoubleValueFn(pLeftCol->info.type); - _getDoubleValue_fn_t getVectorDoubleValueFnRight = getVectorDoubleValueFn(pRightCol->info.type); + _getDoubleValue_fn_t getVectorDoubleValueFnLeft; + _getDoubleValue_fn_t getVectorDoubleValueFnRight; + SCL_ERR_RET(getVectorDoubleValueFn(pLeftCol->info.type, &getVectorDoubleValueFnLeft)); + SCL_ERR_RET(getVectorDoubleValueFn(pRightCol->info.type, &getVectorDoubleValueFnRight)); double *output = (double *)pOutputCol->pData; @@ -1449,8 +1470,10 @@ int32_t vectorMathMultiply(SScalarParam *pLeft, SScalarParam *pRight, SScalarPar SCL_ERR_JRET(vectorConvertVarToDouble(pLeft, &leftConvert, &pLeftCol)); SCL_ERR_JRET(vectorConvertVarToDouble(pRight, &rightConvert, &pRightCol)); - _getDoubleValue_fn_t getVectorDoubleValueFnLeft = getVectorDoubleValueFn(pLeftCol->info.type); - _getDoubleValue_fn_t getVectorDoubleValueFnRight = getVectorDoubleValueFn(pRightCol->info.type); + _getDoubleValue_fn_t getVectorDoubleValueFnLeft; + _getDoubleValue_fn_t getVectorDoubleValueFnRight; + SCL_ERR_JRET(getVectorDoubleValueFn(pLeftCol->info.type, &getVectorDoubleValueFnLeft)); + SCL_ERR_JRET(getVectorDoubleValueFn(pRightCol->info.type, &getVectorDoubleValueFnRight)); double *output = (double *)pOutputCol->pData; if (pLeft->numOfRows == pRight->numOfRows) { @@ -1491,8 +1514,10 @@ int32_t vectorMathDivide(SScalarParam *pLeft, SScalarParam *pRight, SScalarParam SCL_ERR_JRET(vectorConvertVarToDouble(pLeft, &leftConvert, &pLeftCol)); SCL_ERR_JRET(vectorConvertVarToDouble(pRight, &rightConvert, &pRightCol)); - _getDoubleValue_fn_t getVectorDoubleValueFnLeft = getVectorDoubleValueFn(pLeftCol->info.type); - _getDoubleValue_fn_t getVectorDoubleValueFnRight = getVectorDoubleValueFn(pRightCol->info.type); + _getDoubleValue_fn_t getVectorDoubleValueFnLeft; + _getDoubleValue_fn_t getVectorDoubleValueFnRight; + SCL_ERR_JRET(getVectorDoubleValueFn(pLeftCol->info.type, &getVectorDoubleValueFnLeft)); + SCL_ERR_JRET(getVectorDoubleValueFn(pRightCol->info.type, &getVectorDoubleValueFnRight)); double *output = (double *)pOutputCol->pData; if (pLeft->numOfRows == pRight->numOfRows) { @@ -1573,8 +1598,10 @@ int32_t vectorMathRemainder(SScalarParam *pLeft, SScalarParam *pRight, SScalarPa SCL_ERR_JRET(vectorConvertVarToDouble(pLeft, &leftConvert, &pLeftCol)); SCL_ERR_JRET(vectorConvertVarToDouble(pRight, &rightConvert, &pRightCol)); - _getDoubleValue_fn_t getVectorDoubleValueFnLeft = getVectorDoubleValueFn(pLeftCol->info.type); - _getDoubleValue_fn_t getVectorDoubleValueFnRight = getVectorDoubleValueFn(pRightCol->info.type); + _getDoubleValue_fn_t getVectorDoubleValueFnLeft; + _getDoubleValue_fn_t getVectorDoubleValueFnRight; + SCL_ERR_JRET(getVectorDoubleValueFn(pLeftCol->info.type, &getVectorDoubleValueFnLeft)); + SCL_ERR_JRET(getVectorDoubleValueFn(pRightCol->info.type, &getVectorDoubleValueFnRight)); double *output = (double *)pOutputCol->pData; @@ -1661,7 +1688,8 @@ int32_t vectorMathMinus(SScalarParam *pLeft, SScalarParam *pRight, SScalarParam SColumnInfoData *pLeftCol = NULL; SCL_ERR_JRET(vectorConvertVarToDouble(pLeft, &leftConvert, &pLeftCol)); - _getDoubleValue_fn_t getVectorDoubleValueFnLeft = getVectorDoubleValueFn(pLeftCol->info.type); + _getDoubleValue_fn_t getVectorDoubleValueFnLeft; + SCL_ERR_JRET(getVectorDoubleValueFn(pLeftCol->info.type, &getVectorDoubleValueFnLeft)); double *output = (double *)pOutputCol->pData; for (; i < pLeft->numOfRows && i >= 0; i += step, output += 1) { @@ -1692,15 +1720,20 @@ int32_t vectorAssign(SScalarParam *pLeft, SScalarParam *pRight, SScalarParam *pO } } - ASSERT(pRight->numOfQualified == 1 || pRight->numOfQualified == 0); + if (pRight->numOfQualified != 1 && pRight->numOfQualified != 0) { + sclError("vectorAssign: invalid qualified number %d", pRight->numOfQualified); + SCL_ERR_RET(TSDB_CODE_APP_ERROR); + } pOut->numOfQualified = pRight->numOfQualified * pOut->numOfRows; return TSDB_CODE_SUCCESS; } static int32_t vectorBitAndHelper(SColumnInfoData *pLeftCol, SColumnInfoData *pRightCol, SColumnInfoData *pOutputCol, int32_t numOfRows, int32_t step, int32_t i) { - _getBigintValue_fn_t getVectorBigintValueFnLeft = getVectorBigintValueFn(pLeftCol->info.type); - _getBigintValue_fn_t getVectorBigintValueFnRight = getVectorBigintValueFn(pRightCol->info.type); + _getBigintValue_fn_t getVectorBigintValueFnLeft; + _getBigintValue_fn_t getVectorBigintValueFnRight; + SCL_ERR_RET(getVectorBigintValueFn(pLeftCol->info.type, &getVectorBigintValueFnLeft)); + SCL_ERR_RET(getVectorBigintValueFn(pRightCol->info.type, &getVectorBigintValueFnRight)); int64_t *output = (int64_t *)pOutputCol->pData; @@ -1736,8 +1769,10 @@ int32_t vectorBitAnd(SScalarParam *pLeft, SScalarParam *pRight, SScalarParam *pO SCL_ERR_JRET(vectorConvertVarToDouble(pLeft, &leftConvert, &pLeftCol)); SCL_ERR_JRET(vectorConvertVarToDouble(pRight, &rightConvert, &pRightCol)); - _getBigintValue_fn_t getVectorBigintValueFnLeft = getVectorBigintValueFn(pLeftCol->info.type); - _getBigintValue_fn_t getVectorBigintValueFnRight = getVectorBigintValueFn(pRightCol->info.type); + _getBigintValue_fn_t getVectorBigintValueFnLeft; + _getBigintValue_fn_t getVectorBigintValueFnRight; + SCL_ERR_JRET(getVectorBigintValueFn(pLeftCol->info.type, &getVectorBigintValueFnLeft)); + SCL_ERR_JRET(getVectorBigintValueFn(pRightCol->info.type, &getVectorBigintValueFnRight)); int64_t *output = (int64_t *)pOutputCol->pData; if (pLeft->numOfRows == pRight->numOfRows) { @@ -1766,8 +1801,10 @@ _return: static int32_t vectorBitOrHelper(SColumnInfoData *pLeftCol, SColumnInfoData *pRightCol, SColumnInfoData *pOutputCol, int32_t numOfRows, int32_t step, int32_t i) { - _getBigintValue_fn_t getVectorBigintValueFnLeft = getVectorBigintValueFn(pLeftCol->info.type); - _getBigintValue_fn_t getVectorBigintValueFnRight = getVectorBigintValueFn(pRightCol->info.type); + _getBigintValue_fn_t getVectorBigintValueFnLeft; + _getBigintValue_fn_t getVectorBigintValueFnRight; + SCL_ERR_RET(getVectorBigintValueFn(pLeftCol->info.type, &getVectorBigintValueFnLeft)); + SCL_ERR_RET(getVectorBigintValueFn(pRightCol->info.type, &getVectorBigintValueFnRight)); int64_t *output = (int64_t *)pOutputCol->pData; @@ -1803,8 +1840,10 @@ int32_t vectorBitOr(SScalarParam *pLeft, SScalarParam *pRight, SScalarParam *pOu SCL_ERR_JRET(vectorConvertVarToDouble(pLeft, &leftConvert, &pLeftCol)); SCL_ERR_JRET(vectorConvertVarToDouble(pRight, &rightConvert, &pRightCol)); - _getBigintValue_fn_t getVectorBigintValueFnLeft = getVectorBigintValueFn(pLeftCol->info.type); - _getBigintValue_fn_t getVectorBigintValueFnRight = getVectorBigintValueFn(pRightCol->info.type); + _getBigintValue_fn_t getVectorBigintValueFnLeft; + _getBigintValue_fn_t getVectorBigintValueFnRight; + SCL_ERR_JRET(getVectorBigintValueFn(pLeftCol->info.type, &getVectorBigintValueFnLeft)); + SCL_ERR_JRET(getVectorBigintValueFn(pRightCol->info.type, &getVectorBigintValueFnRight)); int64_t *output = (int64_t *)pOutputCol->pData; if (pLeft->numOfRows == pRight->numOfRows) { @@ -1892,7 +1931,8 @@ int32_t doVectorCompareImpl(SScalarParam *pLeft, SScalarParam *pRight, SScalarPa &leftOut, &rightOut, &isJsonnull, &freeLeft, &freeRight, &result)); if (isJsonnull) { - ASSERT(0); + sclError("doVectorCompareImpl: invalid json null value"); + SCL_ERR_RET(TSDB_CODE_APP_ERROR); } if (!pLeftData || !pRightData) { diff --git a/source/libs/scalar/test/filter/CMakeLists.txt b/source/libs/scalar/test/filter/CMakeLists.txt index 94af1eb6f0..44a0395286 100644 --- a/source/libs/scalar/test/filter/CMakeLists.txt +++ b/source/libs/scalar/test/filter/CMakeLists.txt @@ -1,7 +1,7 @@ MESSAGE(STATUS "build filter unit test") -IF(NOT TD_DARWIN) +IF(TD_DARWIN) # GoogleTest requires at least C++11 SET(CMAKE_CXX_STANDARD 11) AUX_SOURCE_DIRECTORY(${CMAKE_CURRENT_SOURCE_DIR} SOURCE_LIST) diff --git a/source/libs/scalar/test/filter/filterTests.cpp b/source/libs/scalar/test/filter/filterTests.cpp index 33d2c1e2ef..b970bf5297 100644 --- a/source/libs/scalar/test/filter/filterTests.cpp +++ b/source/libs/scalar/test/filter/filterTests.cpp @@ -113,8 +113,7 @@ int32_t flttMakeColumnNode(SNode **pNode, SSDataBlock **block, int32_t dataType, if (NULL == *block) { SSDataBlock *res = NULL; - int32_t code = createDataBlock(&res); - ASSERT(code == 0); + FLT_ERR_RET(createDataBlock(&res)); for (int32_t i = 0; i < 2; ++i) { SColumnInfoData idata = createColumnInfoData(TSDB_DATA_TYPE_NULL, 10, 1 + i); diff --git a/source/libs/scalar/test/scalar/scalarTests.cpp b/source/libs/scalar/test/scalar/scalarTests.cpp index 032b197046..c29f7b8a5b 100644 --- a/source/libs/scalar/test/scalar/scalarTests.cpp +++ b/source/libs/scalar/test/scalar/scalarTests.cpp @@ -115,7 +115,10 @@ int32_t scltAppendReservedSlot(SArray *pBlockList, int16_t *dataBlockId, int16_t res->info.capacity = rows; res->info.rows = rows; SColumnInfoData *p = static_cast(taosArrayGet(res->pDataBlock, 0)); - ASSERT(p->pData != NULL && p->nullbitmap != NULL); + if (p->pData == NULL || p->nullbitmap == NULL) { + sclError("data block is not initialized since pData or nullbitmap is NULL"); + SCL_ERR_RET(TSDB_CODE_APP_ERROR); + } (void)taosArrayPush(pBlockList, &res); *dataBlockId = taosArrayGetSize(pBlockList) - 1; @@ -189,8 +192,7 @@ int32_t scltMakeColumnNode(SNode **pNode, SSDataBlock **block, int32_t dataType, if (NULL == *block) { SSDataBlock *res = NULL; - int32_t code = createDataBlock(&res); - ASSERT(code == 0); + SCL_ERR_RET(createDataBlock(&res)); for (int32_t i = 0; i < 2; ++i) { SColumnInfoData idata = createColumnInfoData(TSDB_DATA_TYPE_INT, 10, i + 1); @@ -1422,8 +1424,10 @@ int32_t makeCalculate(void *json, void *key, int32_t rightType, void *rightData, opType == OP_TYPE_NMATCH) { (void)printf("op:%s,3result:%d,except:%f\n", operatorTypeStr(opType), *((bool *)colDataGetData(column, 0)), exceptValue); - assert(*(bool *)colDataGetData(column, 0) == exceptValue); -// ASSERT_EQ((int) *((bool *)colDataGetData(column, 0)), (int)exceptValue); + if(*(bool *)colDataGetData(column, 0) != exceptValue) { + (void)printf("expect value %d, but got %d\n", *((bool *)colDataGetData(column, 0)), exceptValue); + SCL_ERR_RET(TSDB_CODE_FAILED); + } } taosArrayDestroyEx(blockList, scltFreeDataBlock); diff --git a/source/util/src/terror.c b/source/util/src/terror.c index 396abf21a7..c04e34b443 100644 --- a/source/util/src/terror.c +++ b/source/util/src/terror.c @@ -478,6 +478,9 @@ TAOS_DEFINE_ERROR(TSDB_CODE_QRY_INVALID_WINDOW_CONDITION, "The time pseudo colum TAOS_DEFINE_ERROR(TSDB_CODE_QRY_EXECUTOR_INTERNAL_ERROR, "Executor internal error") TAOS_DEFINE_ERROR(TSDB_CODE_QRY_INVALID_JOIN_CONDITION, "Not supported join on condition") TAOS_DEFINE_ERROR(TSDB_CODE_QRY_FILTER_NOT_SUPPORT_TYPE, "Not supported range type") +TAOS_DEFINE_ERROR(TSDB_CODE_QRY_FILTER_WRONG_OPTR_TYPE, "Wrong operator type") +TAOS_DEFINE_ERROR(TSDB_CODE_QRY_FILTER_RANGE_ERROR, "Wrong filter range") +TAOS_DEFINE_ERROR(TSDB_CODE_QRY_FILTER_INVALID_TYPE, "Invalid filter type") // grant TAOS_DEFINE_ERROR(TSDB_CODE_GRANT_EXPIRED, "License expired") From 6055b9172ec4e1192c077b7de1f57438ef444388 Mon Sep 17 00:00:00 2001 From: kailixu Date: Tue, 20 Aug 2024 09:03:57 +0800 Subject: [PATCH 158/181] fix: memory leak of geos --- source/libs/geometry/src/geosWrapper.c | 51 +++++++------------------- source/util/src/tgeosctx.c | 4 +- 2 files changed, 15 insertions(+), 40 deletions(-) diff --git a/source/libs/geometry/src/geosWrapper.c b/source/libs/geometry/src/geosWrapper.c index 4f3f7d75c2..7372521276 100644 --- a/source/libs/geometry/src/geosWrapper.c +++ b/source/libs/geometry/src/geosWrapper.c @@ -37,10 +37,7 @@ int32_t initCtxMakePoint() { int32_t code = TSDB_CODE_FAILED; SGeosContext *geosCtx = getThreadLocalGeosCtx(); - if (!geosCtx) { - code = TSDB_CODE_OUT_OF_MEMORY; - return code; - } + if (!geosCtx) return TSDB_CODE_OUT_OF_MEMORY; if (geosCtx->handle == NULL) { geosCtx->handle = GEOS_init_r(); @@ -67,10 +64,7 @@ int32_t doMakePoint(double x, double y, unsigned char **outputGeom, size_t *size int32_t code = TSDB_CODE_FAILED; SGeosContext *geosCtx = getThreadLocalGeosCtx(); - if (!geosCtx) { - code = TSDB_CODE_OUT_OF_MEMORY; - return code; - } + if (!geosCtx) return TSDB_CODE_OUT_OF_MEMORY; GEOSGeometry *geom = NULL; unsigned char *wkb = NULL; @@ -177,10 +171,7 @@ int32_t initCtxGeomFromText() { int32_t code = TSDB_CODE_FAILED; SGeosContext *geosCtx = getThreadLocalGeosCtx(); - if (!geosCtx) { - code = TSDB_CODE_OUT_OF_MEMORY; - return code; - } + if (!geosCtx) return TSDB_CODE_OUT_OF_MEMORY; if (geosCtx->handle == NULL) { geosCtx->handle = GEOS_init_r(); @@ -218,10 +209,7 @@ int32_t doGeomFromText(const char *inputWKT, unsigned char **outputGeom, size_t int32_t code = TSDB_CODE_FAILED; SGeosContext *geosCtx = getThreadLocalGeosCtx(); - if (!geosCtx) { - code = TSDB_CODE_OUT_OF_MEMORY; - return code; - } + if (!geosCtx) return TSDB_CODE_OUT_OF_MEMORY; GEOSGeometry *geom = NULL; unsigned char *wkb = NULL; @@ -258,10 +246,7 @@ int32_t initCtxAsText() { int32_t code = TSDB_CODE_FAILED; SGeosContext *geosCtx = getThreadLocalGeosCtx(); - if (!geosCtx) { - code = TSDB_CODE_OUT_OF_MEMORY; - return code; - } + if (!geosCtx) return TSDB_CODE_OUT_OF_MEMORY; if (geosCtx->handle == NULL) { geosCtx->handle = GEOS_init_r(); @@ -299,10 +284,7 @@ int32_t doAsText(const unsigned char *inputGeom, size_t size, char **outputWKT) int32_t code = TSDB_CODE_FAILED; SGeosContext *geosCtx = getThreadLocalGeosCtx(); - if (!geosCtx) { - code = TSDB_CODE_OUT_OF_MEMORY; - return code; - } + if (!geosCtx) return TSDB_CODE_OUT_OF_MEMORY; GEOSGeometry *geom = NULL; char *wkt = NULL; @@ -335,10 +317,7 @@ int32_t initCtxRelationFunc() { int32_t code = TSDB_CODE_FAILED; SGeosContext *geosCtx = getThreadLocalGeosCtx(); - if (!geosCtx) { - code = TSDB_CODE_OUT_OF_MEMORY; - return code; - } + if (!geosCtx) return TSDB_CODE_OUT_OF_MEMORY; if (geosCtx->handle == NULL) { geosCtx->handle = GEOS_init_r(); @@ -365,9 +344,7 @@ int32_t doGeosRelation(const GEOSGeometry *geom1, const GEOSPreparedGeometry *pr _geosPreparedRelationFunc_t swappedPreparedRelationFn) { SGeosContext *geosCtx = getThreadLocalGeosCtx(); - if (!geosCtx) { - return TSDB_CODE_OUT_OF_MEMORY; - } + if (!geosCtx) return TSDB_CODE_OUT_OF_MEMORY; if (!preparedGeom1) { if (!swapped) { @@ -429,11 +406,6 @@ int32_t doContainsProperly(const GEOSGeometry *geom1, const GEOSPreparedGeometry // need to call destroyGeometry(outputGeom, outputPreparedGeom) later int32_t readGeometry(const unsigned char *input, GEOSGeometry **outputGeom, const GEOSPreparedGeometry **outputPreparedGeom) { - SGeosContext *geosCtx = getThreadLocalGeosCtx(); - if (!geosCtx) { - return TSDB_CODE_OUT_OF_MEMORY; - } - ASSERT(outputGeom); // it is not allowed if outputGeom is NULL *outputGeom = NULL; @@ -445,6 +417,10 @@ int32_t readGeometry(const unsigned char *input, GEOSGeometry **outputGeom, return TSDB_CODE_SUCCESS; } + SGeosContext *geosCtx = getThreadLocalGeosCtx(); + + if (!geosCtx) return TSDB_CODE_OUT_OF_MEMORY; + *outputGeom = GEOSWKBReader_read_r(geosCtx->handle, geosCtx->WKBReader, varDataVal(input), varDataLen(input)); if (*outputGeom == NULL) { return TSDB_CODE_FUNC_FUNTION_PARA_VALUE; @@ -461,7 +437,8 @@ int32_t readGeometry(const unsigned char *input, GEOSGeometry **outputGeom, } void destroyGeometry(GEOSGeometry **geom, const GEOSPreparedGeometry **preparedGeom) { - SGeosContext *geosCtx = getThreadLocalGeosCtx(); + SGeosContext *geosCtx = acquireThreadLocalGeosCtx(); + if (!geosCtx) return; if (preparedGeom && *preparedGeom) { GEOSPreparedGeom_destroy_r(geosCtx->handle, *preparedGeom); diff --git a/source/util/src/tgeosctx.c b/source/util/src/tgeosctx.c index 473b7539fc..5d47452fda 100644 --- a/source/util/src/tgeosctx.c +++ b/source/util/src/tgeosctx.c @@ -31,9 +31,7 @@ static threadlocal SGeosContext *tlGeosCtx = NULL; SGeosContext *acquireThreadLocalGeosCtx() { return tlGeosCtx; } SGeosContext *getThreadLocalGeosCtx() { - if (tlGeosCtx) { - return tlGeosCtx; - } + if (tlGeosCtx) return tlGeosCtx; taosWLockLatch(&sGeosPool.lock); if (sGeosPool.size >= sGeosPool.capacity) { From a2217973aeb70a8d7651263c7f11c6dd6e0c7a99 Mon Sep 17 00:00:00 2001 From: kailixu Date: Tue, 20 Aug 2024 09:07:29 +0800 Subject: [PATCH 159/181] fix: memory leak of geos --- source/libs/scalar/src/sclvector.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/libs/scalar/src/sclvector.c b/source/libs/scalar/src/sclvector.c index daf44ec527..e20b0cb6fc 100644 --- a/source/libs/scalar/src/sclvector.c +++ b/source/libs/scalar/src/sclvector.c @@ -445,7 +445,7 @@ static FORCE_INLINE int32_t varToGeometry(char *buf, SScalarParam *pOut, int32_t sclError("failed to init geometry ctx, %s", getGeosErrMsg(code)); SCL_ERR_JRET(TSDB_CODE_APP_ERROR); } - if ((code = doGeomFromText(buf, &t, &len))) { + if ((code = doGeomFromText(buf, &t, &len)) != 0) { sclInfo("failed to convert text to geometry, %s", getGeosErrMsg(code)); SCL_ERR_JRET(TSDB_CODE_SCALAR_CONVERT_ERROR); } From 2f4d0815920a46e45f38fb1e927528e4ff5ca07f Mon Sep 17 00:00:00 2001 From: kailixu Date: Tue, 20 Aug 2024 09:08:22 +0800 Subject: [PATCH 160/181] fix: memory leak of geos --- source/libs/scalar/src/sclvector.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/libs/scalar/src/sclvector.c b/source/libs/scalar/src/sclvector.c index e20b0cb6fc..9bac02e5f9 100644 --- a/source/libs/scalar/src/sclvector.c +++ b/source/libs/scalar/src/sclvector.c @@ -446,7 +446,7 @@ static FORCE_INLINE int32_t varToGeometry(char *buf, SScalarParam *pOut, int32_t SCL_ERR_JRET(TSDB_CODE_APP_ERROR); } if ((code = doGeomFromText(buf, &t, &len)) != 0) { - sclInfo("failed to convert text to geometry, %s", getGeosErrMsg(code)); + sclError("failed to convert text to geometry, %s", getGeosErrMsg(code)); SCL_ERR_JRET(TSDB_CODE_SCALAR_CONVERT_ERROR); } From 051763e71f0e41a44fd38a93ab3ffe10bc2e1a88 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Tue, 20 Aug 2024 09:14:50 +0800 Subject: [PATCH 161/181] fix(tsdb): return if get data. --- source/dnode/vnode/src/tsdb/tsdbRead2.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/source/dnode/vnode/src/tsdb/tsdbRead2.c b/source/dnode/vnode/src/tsdb/tsdbRead2.c index 74992c40d3..9be2c3b3f6 100644 --- a/source/dnode/vnode/src/tsdb/tsdbRead2.c +++ b/source/dnode/vnode/src/tsdb/tsdbRead2.c @@ -1807,9 +1807,11 @@ static int32_t nextRowFromSttBlocks(SSttBlockReader* pSttBlockReader, STableBloc if (!hasBeenDropped(pScanInfo->delSkyline, &pScanInfo->sttBlockDelIndex, key, ver, order, pVerRange, pSttBlockReader->numOfPks > 0)) { pScanInfo->sttKeyInfo.status = STT_FILE_HAS_DATA; + return code; } } else { pScanInfo->sttKeyInfo.status = STT_FILE_HAS_DATA; + return code; } } From 0e25059039a154f16e67cb15a6514fca8ec023a2 Mon Sep 17 00:00:00 2001 From: Shungang Li Date: Tue, 20 Aug 2024 09:17:50 +0800 Subject: [PATCH 162/181] fix: s3 protocolG/uriStyleG init --- source/common/src/cos.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/source/common/src/cos.c b/source/common/src/cos.c index a5a278e82e..6e9c7dd50d 100644 --- a/source/common/src/cos.c +++ b/source/common/src/cos.c @@ -56,10 +56,8 @@ int32_t s3Begin() { } for (int i = 0; i < tsS3EpNum; i++) { - protocolG[i] = !tsS3Https[i]; - if (tsS3Oss[i]) { - uriStyleG[i] = S3UriStyleVirtualHost; - } + protocolG[i] = tsS3Https[i] ? S3ProtocolHTTPS : S3ProtocolHTTP; + uriStyleG[i] = tsS3Oss[i] ? S3UriStyleVirtualHost : S3UriStylePath; } TAOS_RETURN(TSDB_CODE_SUCCESS); From 1f1a2d9f6c7f464ad31d941817c4f369609eee05 Mon Sep 17 00:00:00 2001 From: Shengliang Guan Date: Tue, 20 Aug 2024 09:54:26 +0800 Subject: [PATCH 163/181] fix: handle error code --- include/libs/sync/sync.h | 30 ++++++++++----------- source/libs/sync/inc/syncRaftCfg.h | 1 - source/libs/sync/inc/syncUtil.h | 2 +- source/libs/sync/src/syncMain.c | 26 +++++++++++++----- source/libs/sync/src/syncRaftCfg.c | 42 +++++++++++++++++------------- 5 files changed, 60 insertions(+), 41 deletions(-) diff --git a/include/libs/sync/sync.h b/include/libs/sync/sync.h index 1fb077e3ca..07d56f9b07 100644 --- a/include/libs/sync/sync.h +++ b/include/libs/sync/sync.h @@ -156,11 +156,11 @@ typedef struct SSnapshotParam { typedef struct SSnapshot { int32_t type; - SSyncTLV* data; + SSyncTLV* data; ESyncFsmState state; - SyncIndex lastApplyIndex; - SyncTerm lastApplyTerm; - SyncIndex lastConfigIndex; + SyncIndex lastApplyIndex; + SyncTerm lastApplyTerm; + SyncIndex lastConfigIndex; } SSnapshot; typedef struct SSnapshotMeta { @@ -263,16 +263,16 @@ typedef struct SSyncState { int64_t startTimeMs; } SSyncState; -int32_t syncInit(); -void syncCleanUp(); -int64_t syncOpen(SSyncInfo* pSyncInfo, int32_t vnodeVersion); -int32_t syncStart(int64_t rid); -void syncStop(int64_t rid); -void syncPreStop(int64_t rid); -void syncPostStop(int64_t rid); -int32_t syncPropose(int64_t rid, SRpcMsg* pMsg, bool isWeak, int64_t* seq); -int32_t syncCheckMember(int64_t rid); -int32_t syncIsCatchUp(int64_t rid); +int32_t syncInit(); +void syncCleanUp(); +int64_t syncOpen(SSyncInfo* pSyncInfo, int32_t vnodeVersion); +int32_t syncStart(int64_t rid); +void syncStop(int64_t rid); +void syncPreStop(int64_t rid); +void syncPostStop(int64_t rid); +int32_t syncPropose(int64_t rid, SRpcMsg* pMsg, bool isWeak, int64_t* seq); +int32_t syncCheckMember(int64_t rid); +int32_t syncIsCatchUp(int64_t rid); ESyncRole syncGetRole(int64_t rid); int64_t syncGetTerm(int64_t rid); int32_t syncProcessMsg(int64_t rid, SRpcMsg* pMsg); @@ -296,7 +296,7 @@ int32_t syncGetAssignedLogSynced(int64_t rid); void syncGetRetryEpSet(int64_t rid, SEpSet* pEpSet); const char* syncStr(ESyncState state); -int32_t syncNodeGetConfig(int64_t rid, SSyncCfg *cfg); +int32_t syncNodeGetConfig(int64_t rid, SSyncCfg* cfg); // util int32_t syncSnapInfoDataRealloc(SSnapshot* pSnap, int32_t size); diff --git a/source/libs/sync/inc/syncRaftCfg.h b/source/libs/sync/inc/syncRaftCfg.h index 4f03a60fbc..2c1626c4e8 100644 --- a/source/libs/sync/inc/syncRaftCfg.h +++ b/source/libs/sync/inc/syncRaftCfg.h @@ -24,7 +24,6 @@ extern "C" { int32_t syncWriteCfgFile(SSyncNode *pNode); int32_t syncReadCfgFile(SSyncNode *pNode); -int32_t syncAddCfgIndex(SSyncNode *pNode, SyncIndex cfgIndex); #ifdef __cplusplus } diff --git a/source/libs/sync/inc/syncUtil.h b/source/libs/sync/inc/syncUtil.h index 555607d40c..2b5b818da3 100644 --- a/source/libs/sync/inc/syncUtil.h +++ b/source/libs/sync/inc/syncUtil.h @@ -107,7 +107,7 @@ void syncLogRecvAppendEntries(SSyncNode* pSyncNode, const SyncAppendEntries* pMs void syncLogSendAppendEntries(SSyncNode* pSyncNode, const SyncAppendEntries* pMsg, const char* s); void syncLogRecvRequestVote(SSyncNode* pSyncNode, const SyncRequestVote* pMsg, int32_t voteGranted, const char* s); -void syncLogSendRequestVote(SSyncNode* pNode, const SyncRequestVote* pMsg, const char* s); +void syncLogSendRequestVote(SSyncNode* pSyncNode, const SyncRequestVote* pMsg, const char* s); void syncLogRecvRequestVoteReply(SSyncNode* pSyncNode, const SyncRequestVoteReply* pMsg, const char* s); void syncLogSendRequestVoteReply(SSyncNode* pSyncNode, const SyncRequestVoteReply* pMsg, const char* s); diff --git a/source/libs/sync/src/syncMain.c b/source/libs/sync/src/syncMain.c index 5465007b18..8b8a9e1279 100644 --- a/source/libs/sync/src/syncMain.c +++ b/source/libs/sync/src/syncMain.c @@ -50,7 +50,7 @@ static int32_t syncHbTimerStart(SSyncNode* pSyncNode, SSyncTimer* pSyncTimer); static int32_t syncHbTimerStop(SSyncNode* pSyncNode, SSyncTimer* pSyncTimer); static int32_t syncNodeUpdateNewConfigIndex(SSyncNode* ths, SSyncCfg* pNewCfg); static bool syncNodeInConfig(SSyncNode* pSyncNode, const SSyncCfg* config); -static void syncNodeDoConfigChange(SSyncNode* pSyncNode, SSyncCfg* newConfig, SyncIndex lastConfigChangeIndex); +static int32_t syncNodeDoConfigChange(SSyncNode* pSyncNode, SSyncCfg* newConfig, SyncIndex lastConfigChangeIndex); static bool syncNodeIsOptimizedOneReplica(SSyncNode* ths, SRpcMsg* pMsg); static bool syncNodeCanChange(SSyncNode* pSyncNode); @@ -182,7 +182,12 @@ int32_t syncReconfig(int64_t rid, SSyncCfg* pNewCfg) { } TAOS_CHECK_RETURN(syncNodeUpdateNewConfigIndex(pSyncNode, pNewCfg)); - syncNodeDoConfigChange(pSyncNode, pNewCfg, pNewCfg->lastIndex); + + if (syncNodeDoConfigChange(pSyncNode, pNewCfg, pNewCfg->lastIndex) != 0) { + code = TSDB_CODE_SYN_NEW_CONFIG_ERROR; + sError("vgId:%d, failed to reconfig since do change error", pSyncNode->vgId); + TAOS_RETURN(code); + } if (pSyncNode->state == TAOS_SYNC_STATE_LEADER || pSyncNode->state == TAOS_SYNC_STATE_ASSIGNED_LEADER) { // TODO check return value @@ -1015,7 +1020,7 @@ SSyncNode* syncNodeOpen(SSyncInfo* pSyncInfo, int32_t vnodeVersion) { if (!taosDirExist((char*)(pSyncInfo->path))) { if (taosMkDir(pSyncInfo->path) != 0) { terrno = TAOS_SYSTEM_ERROR(errno); - sError("failed to create dir:%s since %s", pSyncInfo->path, terrstr()); + sError("vgId:%d, failed to create dir:%s since %s", pSyncInfo->vgId, pSyncInfo->path, terrstr()); goto _error; } } @@ -1766,11 +1771,11 @@ static bool syncIsConfigChanged(const SSyncCfg* pOldCfg, const SSyncCfg* pNewCfg return false; } -void syncNodeDoConfigChange(SSyncNode* pSyncNode, SSyncCfg* pNewConfig, SyncIndex lastConfigChangeIndex) { +int32_t syncNodeDoConfigChange(SSyncNode* pSyncNode, SSyncCfg* pNewConfig, SyncIndex lastConfigChangeIndex) { SSyncCfg oldConfig = pSyncNode->raftCfg.cfg; if (!syncIsConfigChanged(&oldConfig, pNewConfig)) { sInfo("vgId:1, sync not reconfig since not changed"); - return; + return 0; } pSyncNode->raftCfg.cfg = *pNewConfig; @@ -1809,7 +1814,15 @@ void syncNodeDoConfigChange(SSyncNode* pSyncNode, SSyncCfg* pNewConfig, SyncInde } // add last config index - (void)syncAddCfgIndex(pSyncNode, lastConfigChangeIndex); + SRaftCfg* pCfg = &pSyncNode->raftCfg; + if (pCfg->configIndexCount >= MAX_CONFIG_INDEX_COUNT) { + sNError(pSyncNode, "failed to add cfg index:%d since out of range", pCfg->configIndexCount); + terrno = TSDB_CODE_OUT_OF_RANGE; + return -1; + } + + pCfg->configIndexArr[pCfg->configIndexCount] = lastConfigChangeIndex; + pCfg->configIndexCount++; if (IamInNew) { //----------------------------------------- @@ -1924,6 +1937,7 @@ void syncNodeDoConfigChange(SSyncNode* pSyncNode, SSyncCfg* pNewConfig, SyncInde _END: // log end config change sNInfo(pSyncNode, "end do config change, from %d to %d", oldConfig.totalReplicaNum, pNewConfig->totalReplicaNum); + return 0; } // raft state change -------------- diff --git a/source/libs/sync/src/syncRaftCfg.c b/source/libs/sync/src/syncRaftCfg.c index b0e6abffc6..82cc86ed86 100644 --- a/source/libs/sync/src/syncRaftCfg.c +++ b/source/libs/sync/src/syncRaftCfg.c @@ -18,7 +18,7 @@ #include "syncUtil.h" #include "tjson.h" -const char *syncRoleToStr(ESyncRole role) { +static const char *syncRoleToStr(ESyncRole role) { switch (role) { case TAOS_SYNC_ROLE_VOTER: return "true"; @@ -29,15 +29,14 @@ const char *syncRoleToStr(ESyncRole role) { } } -const ESyncRole syncStrToRole(char *str) { +static const ESyncRole syncStrToRole(char *str) { if (strcmp(str, "true") == 0) { return TAOS_SYNC_ROLE_VOTER; - } - if (strcmp(str, "false") == 0) { + } else if (strcmp(str, "false") == 0) { return TAOS_SYNC_ROLE_LEARNER; + } else { + return TAOS_SYNC_ROLE_ERROR; } - - return TAOS_SYNC_ROLE_ERROR; } static int32_t syncEncodeSyncCfg(const void *pObj, SJson *pJson) { @@ -52,10 +51,12 @@ static int32_t syncEncodeSyncCfg(const void *pObj, SJson *pJson) { if (nodeInfo == NULL) { TAOS_CHECK_EXIT(TSDB_CODE_OUT_OF_MEMORY); } + if ((code = tjsonAddItemToObject(pJson, "nodeInfo", nodeInfo)) < 0) { tjsonDelete(nodeInfo); TAOS_CHECK_EXIT(code); } + for (int32_t i = 0; i < pCfg->totalReplicaNum; ++i) { SJson *info = tjsonCreateObject(); if (info == NULL) { @@ -68,20 +69,25 @@ static int32_t syncEncodeSyncCfg(const void *pObj, SJson *pJson) { TAOS_CHECK_GOTO(tjsonAddStringToObject(info, "isReplica", syncRoleToStr(pCfg->nodeInfo[i].nodeRole)), NULL, _err); TAOS_CHECK_GOTO(tjsonAddItemToArray(nodeInfo, info), NULL, _err); continue; + _err: tjsonDelete(info); break; } + _exit: if (code < 0) { sError("failed to encode sync cfg at line %d since %s", lino, tstrerror(code)); } + TAOS_RETURN(code); } static int32_t syncEncodeRaftCfg(const void *pObj, SJson *pJson) { SRaftCfg *pCfg = (SRaftCfg *)pObj; - int32_t code = 0, lino = 0; + int32_t code = 0; + int32_t lino = 0; + TAOS_CHECK_EXIT(tjsonAddObject(pJson, "SSyncCfg", syncEncodeSyncCfg, (void *)&pCfg->cfg)); TAOS_CHECK_EXIT(tjsonAddDoubleToObject(pJson, "isStandBy", pCfg->isStandBy)); TAOS_CHECK_EXIT(tjsonAddDoubleToObject(pJson, "snapshotStrategy", pCfg->snapshotStrategy)); @@ -93,10 +99,12 @@ static int32_t syncEncodeRaftCfg(const void *pObj, SJson *pJson) { if (configIndexArr == NULL) { TAOS_CHECK_EXIT(TSDB_CODE_OUT_OF_MEMORY); } + if ((code = tjsonAddItemToObject(pJson, "configIndexArr", configIndexArr)) < 0) { tjsonDelete(configIndexArr); TAOS_CHECK_EXIT(code); } + for (int32_t i = 0; i < pCfg->configIndexCount; ++i) { SJson *configIndex = tjsonCreateObject(); if (configIndex == NULL) { @@ -105,14 +113,17 @@ static int32_t syncEncodeRaftCfg(const void *pObj, SJson *pJson) { TAOS_CHECK_EXIT(tjsonAddIntegerToObject(configIndex, "index", pCfg->configIndexArr[i])); TAOS_CHECK_EXIT(tjsonAddItemToArray(configIndexArr, configIndex)); continue; + _err: tjsonDelete(configIndex); break; } + _exit: if (code < 0) { sError("failed to encode raft cfg at line %d since %s", lino, tstrerror(code)); } + TAOS_RETURN(code); } @@ -124,11 +135,13 @@ int32_t syncWriteCfgFile(SSyncNode *pNode) { const char *realfile = pNode->configPath; SRaftCfg *pCfg = &pNode->raftCfg; char file[PATH_MAX] = {0}; + (void)snprintf(file, sizeof(file), "%s.bak", realfile); if ((pJson = tjsonCreateObject()) == NULL) { TAOS_CHECK_EXIT(TSDB_CODE_OUT_OF_MEMORY); } + TAOS_CHECK_EXIT(tjsonAddObject(pJson, "RaftCfg", syncEncodeRaftCfg, pCfg)); buffer = tjsonToString(pJson); if (buffer == NULL) { @@ -145,6 +158,7 @@ int32_t syncWriteCfgFile(SSyncNode *pNode) { if (taosWriteFile(pFile, buffer, len) <= 0) { TAOS_CHECK_EXIT(TAOS_SYSTEM_ERROR(errno)); } + if (taosFsyncFile(pFile) < 0) { TAOS_CHECK_EXIT(TAOS_SYSTEM_ERROR(errno)); } @@ -165,6 +179,7 @@ _exit: if (code != 0) { sError("vgId:%d, failed to write sync cfg file:%s since %s", pNode->vgId, realfile, tstrerror(code)); } + TAOS_RETURN(code); } @@ -232,6 +247,7 @@ static int32_t syncDecodeRaftCfg(const SJson *pJson, void *pObj) { tjsonGetNumberValue(configIndex, "index", pCfg->configIndexArr[i], code); if (code < 0) return TSDB_CODE_INVALID_JSON_FORMAT; } + return 0; } @@ -292,16 +308,6 @@ _OVER: if (code != 0) { sError("vgId:%d, failed to read sync cfg file:%s since %s", pNode->vgId, file, tstrerror(code)); } + TAOS_RETURN(code); } - -int32_t syncAddCfgIndex(SSyncNode *pNode, SyncIndex cfgIndex) { - SRaftCfg *pCfg = &pNode->raftCfg; - if (pCfg->configIndexCount >= MAX_CONFIG_INDEX_COUNT) { - return TSDB_CODE_OUT_OF_RANGE; - } - - pCfg->configIndexArr[pCfg->configIndexCount] = cfgIndex; - pCfg->configIndexCount++; - return 0; -} \ No newline at end of file From 66878fd040922b8881501d04768d879c9f5316b0 Mon Sep 17 00:00:00 2001 From: kailixu Date: Tue, 20 Aug 2024 10:10:42 +0800 Subject: [PATCH 164/181] fix: memory leak of geos --- source/util/src/tgeosctx.c | 45 ++++++++++++++++++++++++++------------ 1 file changed, 31 insertions(+), 14 deletions(-) diff --git a/source/util/src/tgeosctx.c b/source/util/src/tgeosctx.c index 5d47452fda..42cde5b8c7 100644 --- a/source/util/src/tgeosctx.c +++ b/source/util/src/tgeosctx.c @@ -14,14 +14,16 @@ */ #include "tgeosctx.h" +#include "tarray.h" #include "tdef.h" #include "tlockfree.h" #include "tlog.h" +#define GEOS_POOL_CAPACITY 64 typedef struct { - SGeosContext *pool; - int32_t capacity; - int32_t size; + SArray *poolArray; // totalSize: (GEOS_POOL_CAPACITY * (taosArrayGetSize(poolArray) - 1)) + size + SGeosContext *pool; // current SGeosContext pool + int32_t size; // size of current SGeosContext pool, size <= GEOS_POOL_CAPACITY SRWLatch lock; } SGeosContextPool; @@ -34,16 +36,26 @@ SGeosContext *getThreadLocalGeosCtx() { if (tlGeosCtx) return tlGeosCtx; taosWLockLatch(&sGeosPool.lock); - if (sGeosPool.size >= sGeosPool.capacity) { - sGeosPool.capacity += 128; - void *tmp = taosMemoryRealloc(sGeosPool.pool, sGeosPool.capacity * sizeof(SGeosContext)); - if (!tmp) { + if (!sGeosPool.pool || sGeosPool.size >= GEOS_POOL_CAPACITY) { + if (!(sGeosPool.pool = (SGeosContext *)taosMemoryCalloc(GEOS_POOL_CAPACITY, sizeof(SGeosContext)))) { taosWUnLockLatch(&sGeosPool.lock); - terrno = TSDB_CODE_OUT_OF_MEMORY; return NULL; } - sGeosPool.pool = tmp; - TAOS_MEMSET(sGeosPool.pool + sGeosPool.size, 0, (sGeosPool.capacity - sGeosPool.size) * sizeof(SGeosContext)); + if (!sGeosPool.poolArray) { + if (!(sGeosPool.poolArray = taosArrayInit(16, POINTER_BYTES))) { + taosMemoryFree(sGeosPool.pool); + sGeosPool.pool = NULL; + taosWUnLockLatch(&sGeosPool.lock); + return NULL; + } + } + if (!taosArrayPush(sGeosPool.poolArray, &sGeosPool.pool)) { + taosMemoryFree(sGeosPool.pool); + sGeosPool.pool = NULL; + taosWUnLockLatch(&sGeosPool.lock); + return NULL; + } + sGeosPool.size = 0; } tlGeosCtx = sGeosPool.pool + sGeosPool.size; ++sGeosPool.size; @@ -91,9 +103,14 @@ static void destroyGeosCtx(SGeosContext *pCtx) { void taosGeosDestroy() { uInfo("geos is cleaned up"); - if (!sGeosPool.pool) return; - for (int32_t i = 0; i < sGeosPool.size; ++i) { - destroyGeosCtx(sGeosPool.pool + i); + int32_t size = taosArrayGetSize(sGeosPool.poolArray); + for (int32_t i = 0; i < size; ++i) { + SGeosContext *pool = *(SGeosContext **)TARRAY_GET_ELEM(sGeosPool.poolArray, i); + for (int32_t j = 0; j < GEOS_POOL_CAPACITY; ++j) { + destroyGeosCtx(pool + j); + } + taosMemoryFree(pool); } - taosMemoryFreeClear(sGeosPool.pool); + taosArrayDestroy(sGeosPool.poolArray); + sGeosPool.poolArray = NULL; } \ No newline at end of file From 1bff0e89f4f3b7b12b4cca070a23a557c341b44c Mon Sep 17 00:00:00 2001 From: 54liuyao <54liuyao> Date: Tue, 20 Aug 2024 10:15:52 +0800 Subject: [PATCH 165/181] fix issue --- source/libs/parser/src/parTranslater.c | 58 ++++++++++++++++++++++---- tests/script/tsim/show/showalive.sim | 44 ++++++++++++++++++- 2 files changed, 92 insertions(+), 10 deletions(-) diff --git a/source/libs/parser/src/parTranslater.c b/source/libs/parser/src/parTranslater.c index a262acb2ec..05cceb656e 100755 --- a/source/libs/parser/src/parTranslater.c +++ b/source/libs/parser/src/parTranslater.c @@ -12628,6 +12628,27 @@ static int32_t createParOperatorNode(EOperatorType opType, const char* pLeftCol, return TSDB_CODE_SUCCESS; } +static int32_t createIsOperatorNode(EOperatorType opType, const char* pColName, SNode** pOp) { + SOperatorNode* pOper = NULL; + int32_t code = nodesMakeNode(QUERY_NODE_OPERATOR, (SNode**)&pOper); + if (NULL == pOper) { + return code; + } + + pOper->opType = opType; + code = nodesMakeNode(QUERY_NODE_COLUMN, (SNode**)&pOper->pLeft); + if (TSDB_CODE_SUCCESS != code) { + nodesDestroyNode((SNode*)pOper); + return code; + } + pOper->pRight = NULL; + + snprintf(((SColumnNode*)pOper->pLeft)->colName, sizeof(((SColumnNode*)pOper->pLeft)->colName), "%s", pColName); + + *pOp = (SNode*)pOper; + return TSDB_CODE_SUCCESS; +} + static const char* getTbNameColName(ENodeType type) { const char* colName; switch (type) { @@ -15035,7 +15056,7 @@ static int32_t rewriteShowAliveStmt(STranslateContext* pCxt, SQuery* pQuery) { // pWhenThenlist and pElse need to free - // case when (v1_status = "leader" or v2_status = "lead er" or v3_status = "leader" or v4_status = "leader") then 1 + // case when (v1_status = "leader" or v2_status = "leader" or v3_status = "leader" or v4_status = "leader") then 1 // else 0 end SNode* pCaseWhen = NULL; code = createParCaseWhenNode(NULL, pWhenThenlist, pElse, NULL, &pCaseWhen); @@ -15190,23 +15211,42 @@ static int32_t rewriteShowAliveStmt(STranslateContext* pCxt, SQuery* pQuery) { return code; } - // pSubSelect, pTemp1, pTempVal need to free - - pThen = NULL; - code = nodesMakeValueNodeFromInt32(1, &pThen); + SNode* pCondIsNULL = NULL; + code = createIsOperatorNode(OP_TYPE_IS_NULL, pSumColAlias, &pCondIsNULL); if (TSDB_CODE_SUCCESS != code) { nodesDestroyNode((SNode*)pSubSelect); nodesDestroyNode(pTemp1); nodesDestroyNode(pTempVal); return code; } - // pSubSelect, pTemp1, pThen, pTempVal need to free - pWhenThen = NULL; - code = createParWhenThenNode(pTemp1, pThen, &pWhenThen); + SNode* pCondFull1 = NULL; + code = createLogicCondNode(&pTemp1, &pCondIsNULL, &pCondFull1, LOGIC_COND_TYPE_OR); if (TSDB_CODE_SUCCESS != code) { nodesDestroyNode((SNode*)pSubSelect); nodesDestroyNode(pTemp1); + nodesDestroyNode(pTempVal); + nodesDestroyNode(pCondIsNULL); + return code; + } + + // pSubSelect, pCondFull1, pTempVal need to free + + pThen = NULL; + code = nodesMakeValueNodeFromInt32(1, &pThen); + if (TSDB_CODE_SUCCESS != code) { + nodesDestroyNode((SNode*)pSubSelect); + nodesDestroyNode(pCondFull1); + nodesDestroyNode(pTempVal); + return code; + } + // pSubSelect, pCondFull1, pThen, pTempVal need to free + + pWhenThen = NULL; + code = createParWhenThenNode(pCondFull1, pThen, &pWhenThen); + if (TSDB_CODE_SUCCESS != code) { + nodesDestroyNode((SNode*)pSubSelect); + nodesDestroyNode(pCondFull1); nodesDestroyNode(pThen); nodesDestroyNode(pTempVal); return code; @@ -15289,7 +15329,7 @@ static int32_t rewriteShowAliveStmt(STranslateContext* pCxt, SQuery* pQuery) { } // pSubSelect, pWhenThenlist need to free - // case when leader_col = count_col and count_col > 0 then 1 when leader_col < count_col and count_col > 0 then 2 else + // case when leader_col = count_col and leader_col > 0 then 1 when leader_col < count_col and leader_col > 0 then 2 else // 0 end as status pElse = NULL; code = nodesMakeValueNodeFromInt32(0, &pElse); diff --git a/tests/script/tsim/show/showalive.sim b/tests/script/tsim/show/showalive.sim index 4cad1da01d..72fad47f57 100644 --- a/tests/script/tsim/show/showalive.sim +++ b/tests/script/tsim/show/showalive.sim @@ -21,6 +21,30 @@ sql create dnode $hostname4 port 7500 sleep 1000 +$loop_count = 0 + +loop00: + +sleep 1000 + +$loop_count = $loop_count + 1 +if $loop_count == 20 then + return -1 +endi + +print 0 show cluster alive; +sql show cluster alive; + +print res------------------------ +print $data00 $data01 +print $data10 $data11 + +if $data00 != 1 then + print =====data00=$data00 + goto loop00 +endi + + print =============== create database, stable, table sql create database test vgroups 6; sql use test; @@ -46,6 +70,15 @@ endi print show cluster alive; sql show cluster alive; +print res------------------------ +print $data00 $data01 +print $data10 $data11 + +if $rows != 1 then + print =====rows=$rows + goto loop0 +endi + if $data00 != 1 then print =====data00=$data00 goto loop0 @@ -54,6 +87,15 @@ endi print show test.alive; sql show test.alive; +print res------------------------ +print $data00 $data01 +print $data10 $data11 + +if $rows != 1 then + print =====rows=$rows + goto loop0 +endi + if $data00 != 1 then print =====data00=$data00 goto loop0 @@ -164,4 +206,4 @@ endi system sh/exec.sh -n dnode1 -s stop -x SIGINT system sh/exec.sh -n dnode2 -s stop -x SIGINT system sh/exec.sh -n dnode3 -s stop -x SIGINT -system sh/exec.sh -n dnode4 -s stop -x SIGINT +system sh/exec.sh -n dnode4 -s stop -x SIGINT \ No newline at end of file From 5e77f6f6ca873e0b1aa308e5721a383fdec1f7d6 Mon Sep 17 00:00:00 2001 From: kailixu Date: Tue, 20 Aug 2024 10:18:14 +0800 Subject: [PATCH 166/181] fix: memory leak of geos --- source/util/src/tgeosctx.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/source/util/src/tgeosctx.c b/source/util/src/tgeosctx.c index 42cde5b8c7..99655ed7f7 100644 --- a/source/util/src/tgeosctx.c +++ b/source/util/src/tgeosctx.c @@ -43,15 +43,13 @@ SGeosContext *getThreadLocalGeosCtx() { } if (!sGeosPool.poolArray) { if (!(sGeosPool.poolArray = taosArrayInit(16, POINTER_BYTES))) { - taosMemoryFree(sGeosPool.pool); - sGeosPool.pool = NULL; + taosMemoryFreeClear(sGeosPool.pool); taosWUnLockLatch(&sGeosPool.lock); return NULL; } } if (!taosArrayPush(sGeosPool.poolArray, &sGeosPool.pool)) { - taosMemoryFree(sGeosPool.pool); - sGeosPool.pool = NULL; + taosMemoryFreeClear(sGeosPool.pool); taosWUnLockLatch(&sGeosPool.lock); return NULL; } From e15bd712740de4df8bf7e5778fbb69935e4a1859 Mon Sep 17 00:00:00 2001 From: 54liuyao <54liuyao> Date: Tue, 20 Aug 2024 10:38:00 +0800 Subject: [PATCH 167/181] fix issue --- source/util/src/tscalablebf.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/source/util/src/tscalablebf.c b/source/util/src/tscalablebf.c index 1d6ef29987..ffcfdfeaf1 100644 --- a/source/util/src/tscalablebf.c +++ b/source/util/src/tscalablebf.c @@ -87,6 +87,7 @@ int32_t tScalableBfPutNoCheck(SScalableBf* pSBf, const void* keyBuf, uint32_t le pSBf->status = SBF_INVALID; if (code == TSDB_CODE_OUT_OF_BUFFER) { code = TSDB_CODE_SUCCESS; + return code; } QUERY_CHECK_CODE(code, lino, _error); } @@ -126,6 +127,8 @@ int32_t tScalableBfPut(SScalableBf* pSBf, const void* keyBuf, uint32_t len, int3 pSBf->status = SBF_INVALID; if (code == TSDB_CODE_OUT_OF_BUFFER) { code = TSDB_CODE_SUCCESS; + (*winRes) = TSDB_CODE_FAILED; + goto _end; } QUERY_CHECK_CODE(code, lino, _end); } From 820661f192d61f5188bca14539cb33890351a439 Mon Sep 17 00:00:00 2001 From: Hongze Cheng Date: Tue, 20 Aug 2024 10:43:28 +0800 Subject: [PATCH 168/181] enh: remove some asserts --- include/util/tcoding.h | 3 -- source/dnode/vnode/src/tsdb/tsdbSttFileRW.c | 36 ++++++++++++++------- source/dnode/vnode/src/tsdb/tsdbUtil2.c | 2 +- source/dnode/vnode/src/vnd/vnodeAsync.c | 15 ++------- source/dnode/vnode/src/vnd/vnodeCommit.c | 3 +- source/dnode/vnode/src/vnd/vnodeHash.c | 1 - source/dnode/vnode/src/vnd/vnodeOpen.c | 4 ++- 7 files changed, 32 insertions(+), 32 deletions(-) diff --git a/include/util/tcoding.h b/include/util/tcoding.h index 1040adf431..b4f62d349c 100644 --- a/include/util/tcoding.h +++ b/include/util/tcoding.h @@ -213,7 +213,6 @@ static FORCE_INLINE int32_t taosEncodeVariantU16(void **buf, uint16_t value) { if (buf != NULL) ((uint8_t *)(*buf))[i] = (uint8_t)(value | ENCODE_LIMIT); value >>= 7; i++; - ASSERT(i < 3); } if (buf != NULL) { @@ -261,7 +260,6 @@ static FORCE_INLINE int32_t taosEncodeVariantU32(void **buf, uint32_t value) { if (buf != NULL) ((uint8_t *)(*buf))[i] = (value | ENCODE_LIMIT); value >>= 7; i++; - ASSERT(i < 5); } if (buf != NULL) { @@ -309,7 +307,6 @@ static FORCE_INLINE int32_t taosEncodeVariantU64(void **buf, uint64_t value) { if (buf != NULL) ((uint8_t *)(*buf))[i] = (uint8_t)(value | ENCODE_LIMIT); value >>= 7; i++; - ASSERT(i < 10); } if (buf != NULL) { diff --git a/source/dnode/vnode/src/tsdb/tsdbSttFileRW.c b/source/dnode/vnode/src/tsdb/tsdbSttFileRW.c index e3d7f9d45f..0bd00a100c 100644 --- a/source/dnode/vnode/src/tsdb/tsdbSttFileRW.c +++ b/source/dnode/vnode/src/tsdb/tsdbSttFileRW.c @@ -61,7 +61,9 @@ int32_t tsdbSttFileReaderOpen(const char *fname, const SSttFileReaderConfig *con // // open each segment reader int64_t offset = config->file->size - sizeof(SSttFooter); - ASSERT(offset >= TSDB_FHDR_SIZE); + if (offset < TSDB_FHDR_SIZE) { + TSDB_CHECK_CODE(code = TSDB_CODE_FILE_CORRUPTED, lino, _exit); + } int32_t encryptAlgoirthm = config->tsdb->pVnode->config.tsdbCfg.encryptAlgorithm; char *encryptKey = config->tsdb->pVnode->config.tsdbCfg.encryptKey; @@ -115,7 +117,9 @@ int32_t tsdbSttFileReaderClose(SSttFileReader **reader) { int32_t tsdbSttFileReadStatisBlk(SSttFileReader *reader, const TStatisBlkArray **statisBlkArray) { if (!reader->ctx->statisBlkLoaded) { if (reader->footer->statisBlkPtr->size > 0) { - ASSERT(reader->footer->statisBlkPtr->size % sizeof(SStatisBlk) == 0); + if (reader->footer->statisBlkPtr->size % sizeof(SStatisBlk) != 0) { + return TSDB_CODE_FILE_CORRUPTED; + } int32_t size = reader->footer->statisBlkPtr->size / sizeof(SStatisBlk); void *data = taosMemoryMalloc(reader->footer->statisBlkPtr->size); @@ -147,7 +151,9 @@ int32_t tsdbSttFileReadStatisBlk(SSttFileReader *reader, const TStatisBlkArray * int32_t tsdbSttFileReadTombBlk(SSttFileReader *reader, const TTombBlkArray **tombBlkArray) { if (!reader->ctx->tombBlkLoaded) { if (reader->footer->tombBlkPtr->size > 0) { - ASSERT(reader->footer->tombBlkPtr->size % sizeof(STombBlk) == 0); + if (reader->footer->tombBlkPtr->size % sizeof(STombBlk) != 0) { + return TSDB_CODE_FILE_CORRUPTED; + } int32_t size = reader->footer->tombBlkPtr->size / sizeof(STombBlk); void *data = taosMemoryMalloc(reader->footer->tombBlkPtr->size); @@ -179,7 +185,9 @@ int32_t tsdbSttFileReadTombBlk(SSttFileReader *reader, const TTombBlkArray **tom int32_t tsdbSttFileReadSttBlk(SSttFileReader *reader, const TSttBlkArray **sttBlkArray) { if (!reader->ctx->sttBlkLoaded) { if (reader->footer->sttBlkPtr->size > 0) { - ASSERT(reader->footer->sttBlkPtr->size % sizeof(SSttBlk) == 0); + if (reader->footer->sttBlkPtr->size % sizeof(SSttBlk) != 0) { + return TSDB_CODE_FILE_CORRUPTED; + } int32_t size = reader->footer->sttBlkPtr->size / sizeof(SSttBlk); void *data = taosMemoryMalloc(reader->footer->sttBlkPtr->size); @@ -256,7 +264,9 @@ int32_t tsdbSttFileReadBlockDataByColumn(SSttFileReader *reader, const SSttBlk * SBufferReader br = BUFFER_READER_INITIALIZER(0, buffer0); TAOS_CHECK_GOTO(tGetDiskDataHdr(&br, &hdr), &lino, _exit); - ASSERT(hdr.delimiter == TSDB_FILE_DLMT); + if (hdr.delimiter != TSDB_FILE_DLMT) { + TSDB_CHECK_CODE(code = TSDB_CODE_FILE_CORRUPTED, lino, _exit); + } // set data container tBlockDataReset(bData); @@ -266,7 +276,9 @@ int32_t tsdbSttFileReadBlockDataByColumn(SSttFileReader *reader, const SSttBlk * // key part TAOS_CHECK_GOTO(tBlockDataDecompressKeyPart(&hdr, &br, bData, assist), &lino, _exit); - ASSERT(br.offset == buffer0->size); + if (br.offset != buffer0->size) { + TSDB_CHECK_CODE(code = TSDB_CODE_FILE_CORRUPTED, lino, _exit); + } bool loadExtra = false; for (int i = 0; i < ncid; i++) { @@ -376,7 +388,10 @@ int32_t tsdbSttFileReadTombBlock(SSttFileReader *reader, const STombBlk *tombBlk br.offset += tombBlk->size[i]; } - ASSERT(br.offset == tombBlk->dp->size); + if (br.offset != tombBlk->dp->size) { + TSDB_CHECK_CODE(code = TSDB_CODE_FILE_CORRUPTED, lino, _exit); + } + _exit: if (code) { tsdbError("vgId:%d %s failed at %s:%d since %s", TD_VID(reader->config->tsdb->pVnode), __func__, __FILE__, lino, @@ -444,7 +459,9 @@ int32_t tsdbSttFileReadStatisBlock(SSttFileReader *reader, const SStatisBlk *sta } } - ASSERT(br.offset == buffer0->size); + if (br.offset != buffer0->size) { + TSDB_CHECK_CODE(code = TSDB_CODE_FILE_CORRUPTED, lino, _exit); + } _exit: if (code) { @@ -814,8 +831,6 @@ _exit: } static void tsdbSttFWriterDoClose(SSttFileWriter *writer) { - ASSERT(writer->fd == NULL); - for (int32_t i = 0; i < ARRAY_SIZE(writer->local); ++i) { tBufferDestroy(writer->local + i); } @@ -854,7 +869,6 @@ static int32_t tsdbSttFWriterCloseCommit(SSttFileWriter *writer, TFileOpArray *o tsdbCloseFile(&writer->fd); - ASSERT(writer->file->size > 0); STFileOp op = (STFileOp){ .optype = TSDB_FOP_CREATE, .fid = writer->config->fid, diff --git a/source/dnode/vnode/src/tsdb/tsdbUtil2.c b/source/dnode/vnode/src/tsdb/tsdbUtil2.c index 7ada3085b1..e13e520cbf 100644 --- a/source/dnode/vnode/src/tsdb/tsdbUtil2.c +++ b/source/dnode/vnode/src/tsdb/tsdbUtil2.c @@ -171,7 +171,7 @@ static int32_t tStatisBlockUpdate(STbStatisBlock *block, SRowInfo *row) { TAOS_CHECK_RETURN(tBufferPutAt(&block->counts, (block->numOfRecords - 1) * sizeof(record.count), &record.count, sizeof(record.count))); } else { - ASSERT(0); + return TSDB_CODE_INVALID_PARA; } return 0; diff --git a/source/dnode/vnode/src/vnd/vnodeAsync.c b/source/dnode/vnode/src/vnd/vnodeAsync.c index 2ddd3c9d3e..1208b06337 100644 --- a/source/dnode/vnode/src/vnd/vnodeAsync.c +++ b/source/dnode/vnode/src/vnd/vnodeAsync.c @@ -165,9 +165,7 @@ static int32_t vnodeAsyncTaskDone(SVAsync *async, SVATask *task) { } ret = vHashDrop(async->taskTable, task); - if (ret != 0) { - ASSERT(0); - } + TAOS_UNUSED(ret); async->numTasks--; if (task->numWait == 0) { @@ -403,7 +401,6 @@ static int32_t vnodeAsyncDestroy(SVAsync **async) { } (void)taosThreadJoin((*async)->workers[i].thread, NULL); - ASSERT((*async)->workers[i].state == EVA_WORKER_STATE_STOP); (*async)->workers[i].state = EVA_WORKER_STATE_UINIT; } @@ -413,18 +410,11 @@ static int32_t vnodeAsyncDestroy(SVAsync **async) { channel->prev->next = channel->next; int32_t ret = vHashDrop((*async)->channelTable, channel); - if (ret) { - ASSERT(0); - } + TAOS_UNUSED(ret); (*async)->numChannels--; taosMemoryFree(channel); } - ASSERT((*async)->numLaunchWorkers == 0); - ASSERT((*async)->numIdleWorkers == 0); - ASSERT((*async)->numChannels == 0); - ASSERT((*async)->numTasks == 0); - (void)taosThreadMutexDestroy(&(*async)->mutex); (void)taosThreadCondDestroy(&(*async)->hasTask); @@ -438,7 +428,6 @@ static int32_t vnodeAsyncDestroy(SVAsync **async) { static int32_t vnodeAsyncLaunchWorker(SVAsync *async) { for (int32_t i = 0; i < async->numWorkers; i++) { - ASSERT(async->workers[i].state != EVA_WORKER_STATE_IDLE); if (async->workers[i].state == EVA_WORKER_STATE_ACTIVE) { continue; } else if (async->workers[i].state == EVA_WORKER_STATE_STOP) { diff --git a/source/dnode/vnode/src/vnd/vnodeCommit.c b/source/dnode/vnode/src/vnd/vnodeCommit.c index 8fcbe49f9a..70b40e8d0b 100644 --- a/source/dnode/vnode/src/vnd/vnodeCommit.c +++ b/source/dnode/vnode/src/vnd/vnodeCommit.c @@ -302,7 +302,6 @@ static int32_t vnodePrepareCommit(SVnode *pVnode, SCommitInfo *pInfo) { TSDB_CHECK_CODE(code, lino, _exit); (void)taosThreadMutexLock(&pVnode->mutex); - ASSERT(pVnode->onCommit == NULL); pVnode->onCommit = pVnode->inUse; pVnode->inUse = NULL; (void)taosThreadMutexUnlock(&pVnode->mutex); @@ -339,7 +338,7 @@ static void vnodeReturnBufPool(SVnode *pVnode) { pVnode->recycleTail = pPool; } } else { - ASSERT(0); + vError("vgId:%d, buffer pool %p of id %d nRef:%d", TD_VID(pVnode), pPool, pPool->id, nRef); } (void)taosThreadMutexUnlock(&pVnode->mutex); diff --git a/source/dnode/vnode/src/vnd/vnodeHash.c b/source/dnode/vnode/src/vnd/vnodeHash.c index 00fc2dfc00..96ad759a90 100644 --- a/source/dnode/vnode/src/vnd/vnodeHash.c +++ b/source/dnode/vnode/src/vnd/vnodeHash.c @@ -77,7 +77,6 @@ int32_t vHashDestroy(SVHashTable** ht) { } if (*ht) { - ASSERT((*ht)->numEntries == 0); taosMemoryFree((*ht)->buckets); taosMemoryFree(*ht); (*ht) = NULL; diff --git a/source/dnode/vnode/src/vnd/vnodeOpen.c b/source/dnode/vnode/src/vnd/vnodeOpen.c index ed008d4f88..989faa3a0f 100644 --- a/source/dnode/vnode/src/vnd/vnodeOpen.c +++ b/source/dnode/vnode/src/vnd/vnodeOpen.c @@ -558,7 +558,9 @@ void vnodeClose(SVnode *pVnode) { // start the sync timer after the queue is ready int32_t vnodeStart(SVnode *pVnode) { - ASSERT(pVnode); + if (pVnode == NULL) { + return TSDB_CODE_INVALID_PARA; + } return vnodeSyncStart(pVnode); } From 7073f204ac4594d7271efc6f8e82594bf2bbc8f9 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Tue, 20 Aug 2024 11:00:02 +0800 Subject: [PATCH 169/181] refactor: do some internal refactor. --- source/dnode/vnode/src/tsdb/tsdbMergeTree.c | 1 + 1 file changed, 1 insertion(+) diff --git a/source/dnode/vnode/src/tsdb/tsdbMergeTree.c b/source/dnode/vnode/src/tsdb/tsdbMergeTree.c index 160ff2e13c..2288e8bbce 100644 --- a/source/dnode/vnode/src/tsdb/tsdbMergeTree.c +++ b/source/dnode/vnode/src/tsdb/tsdbMergeTree.c @@ -903,6 +903,7 @@ int32_t tLDataIterNextRow(SLDataIter *pIter, const char *idStr, bool* hasNext) { pIter->rInfo.row = tsdbRowFromBlockData(pBlockData, pIter->iRow); _exit: + tsdbError("failed to exec stt-file nextIter, lino:%d, code:%s, %s", lino, tstrerror(code), idStr); *hasNext = (code == TSDB_CODE_SUCCESS) && (pIter->pSttBlk != NULL) && (pBlockData != NULL); return code; } From bbdd1f655b62698733e99f0f1188d9761c68b70f Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Tue, 20 Aug 2024 11:04:44 +0800 Subject: [PATCH 170/181] fix(stream): send kill checkpoint trans to mnode when trying to close vnode. --- source/dnode/vnode/src/tq/tq.c | 4 ++-- source/libs/stream/src/streamMeta.c | 4 +++- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/source/dnode/vnode/src/tq/tq.c b/source/dnode/vnode/src/tq/tq.c index a70a04f23d..5fc550da32 100644 --- a/source/dnode/vnode/src/tq/tq.c +++ b/source/dnode/vnode/src/tq/tq.c @@ -1204,11 +1204,11 @@ int32_t tqProcessTaskCheckPointSourceReq(STQ* pTq, SRpcMsg* pMsg, SRpcMsg* pRsp) } if (req.mndTrigger) { - qInfo("s-task:%s (vgId:%d) level:%d receive checkpoint-source msg chkpt:%" PRId64 ", transId:%d, ", pTask->id.idStr, + tqInfo("s-task:%s (vgId:%d) level:%d receive checkpoint-source msg chkpt:%" PRId64 ", transId:%d, ", pTask->id.idStr, vgId, pTask->info.taskLevel, req.checkpointId, req.transId); } else { const char* pPrevStatus = streamTaskGetStatusStr(streamTaskGetPrevStatus(pTask)); - qInfo("s-task:%s (vgId:%d) level:%d receive checkpoint-source msg chkpt:%" PRId64 + tqInfo("s-task:%s (vgId:%d) level:%d receive checkpoint-source msg chkpt:%" PRId64 ", transId:%d after transfer-state, prev status:%s", pTask->id.idStr, vgId, pTask->info.taskLevel, req.checkpointId, req.transId, pPrevStatus); } diff --git a/source/libs/stream/src/streamMeta.c b/source/libs/stream/src/streamMeta.c index a9976760b6..b6be1d04ca 100644 --- a/source/libs/stream/src/streamMeta.c +++ b/source/libs/stream/src/streamMeta.c @@ -1260,7 +1260,9 @@ void streamMetaUpdateStageRole(SStreamMeta* pMeta, int64_t stage, bool isLeader) pMeta->stage = stage; // mark the sign to send msg before close all tasks - if ((!isLeader) && (pMeta->role == NODE_ROLE_LEADER)) { + // 1. for leader vnode, always send msg before closing + // 2. for follower vnode, if it's is changed from leader, also sending msg before closing. + if (pMeta->role == NODE_ROLE_LEADER) { pMeta->sendMsgBeforeClosing = true; } From 6470e5f6e3b3a7318ca1fce15aed9685496949f9 Mon Sep 17 00:00:00 2001 From: Shengliang Guan Date: Tue, 20 Aug 2024 12:24:01 +0800 Subject: [PATCH 171/181] refact: refact function syncUtilNodeInfo2RaftId --- source/dnode/mnode/impl/src/mndSync.c | 2 +- source/libs/sync/src/syncMain.c | 52 +++++++++++---------------- source/libs/sync/src/syncUtil.c | 12 +++---- 3 files changed, 27 insertions(+), 39 deletions(-) diff --git a/source/dnode/mnode/impl/src/mndSync.c b/source/dnode/mnode/impl/src/mndSync.c index cf7769b932..3c5724dde3 100644 --- a/source/dnode/mnode/impl/src/mndSync.c +++ b/source/dnode/mnode/impl/src/mndSync.c @@ -486,7 +486,7 @@ int32_t mndInitSync(SMnode *pMnode) { int32_t code = 0; (void)tsem_init(&pMgmt->syncSem, 0, 0); - pMgmt->sync = syncOpen(&syncInfo, true); + pMgmt->sync = syncOpen(&syncInfo, 1); // always check if (pMgmt->sync <= 0) { if (terrno != 0) code = terrno; mError("failed to open sync since %s", tstrerror(code)); diff --git a/source/libs/sync/src/syncMain.c b/source/libs/sync/src/syncMain.c index 8b8a9e1279..ffd180ee01 100644 --- a/source/libs/sync/src/syncMain.c +++ b/source/libs/sync/src/syncMain.c @@ -1113,37 +1113,6 @@ SSyncNode* syncNodeOpen(SSyncInfo* pSyncInfo, int32_t vnodeVersion) { goto _error; } - // init internal - pSyncNode->myNodeInfo = pSyncNode->raftCfg.cfg.nodeInfo[pSyncNode->raftCfg.cfg.myIndex]; - if (!syncUtilNodeInfo2RaftId(&pSyncNode->myNodeInfo, pSyncNode->vgId, &pSyncNode->myRaftId)) { - terrno = TSDB_CODE_SYN_INTERNAL_ERROR; - sError("vgId:%d, failed to determine my raft member id", pSyncNode->vgId); - goto _error; - } - - pSyncNode->arbTerm = -1; - (void)taosThreadMutexInit(&pSyncNode->arbTokenMutex, NULL); - syncUtilGenerateArbToken(pSyncNode->myNodeInfo.nodeId, pSyncInfo->vgId, pSyncNode->arbToken); - sInfo("vgId:%d, arb token:%s", pSyncNode->vgId, pSyncNode->arbToken); - - // init peersNum, peers, peersId - pSyncNode->peersNum = pSyncNode->raftCfg.cfg.totalReplicaNum - 1; - int32_t j = 0; - for (int32_t i = 0; i < pSyncNode->raftCfg.cfg.totalReplicaNum; ++i) { - if (i != pSyncNode->raftCfg.cfg.myIndex) { - pSyncNode->peersNodeInfo[j] = pSyncNode->raftCfg.cfg.nodeInfo[i]; - syncUtilNodeInfo2EpSet(&pSyncNode->peersNodeInfo[j], &pSyncNode->peersEpset[j]); - j++; - } - } - for (int32_t i = 0; i < pSyncNode->peersNum; ++i) { - if (!syncUtilNodeInfo2RaftId(&pSyncNode->peersNodeInfo[i], pSyncNode->vgId, &pSyncNode->peersId[i])) { - terrno = TSDB_CODE_SYN_INTERNAL_ERROR; - sError("vgId:%d, failed to determine raft member id, peer:%d", pSyncNode->vgId, i); - goto _error; - } - } - // init replicaNum, replicasId pSyncNode->replicaNum = pSyncNode->raftCfg.cfg.replicaNum; pSyncNode->totalReplicaNum = pSyncNode->raftCfg.cfg.totalReplicaNum; @@ -1155,6 +1124,27 @@ SSyncNode* syncNodeOpen(SSyncInfo* pSyncInfo, int32_t vnodeVersion) { } } + // init internal + pSyncNode->myNodeInfo = pSyncNode->raftCfg.cfg.nodeInfo[pSyncNode->raftCfg.cfg.myIndex]; + pSyncNode->myRaftId = pSyncNode->replicasId[pSyncNode->raftCfg.cfg.myIndex]; + + // init peersNum, peers, peersId + pSyncNode->peersNum = pSyncNode->raftCfg.cfg.totalReplicaNum - 1; + int32_t j = 0; + for (int32_t i = 0; i < pSyncNode->raftCfg.cfg.totalReplicaNum; ++i) { + if (i != pSyncNode->raftCfg.cfg.myIndex) { + pSyncNode->peersNodeInfo[j] = pSyncNode->raftCfg.cfg.nodeInfo[i]; + pSyncNode->peersId[j] = pSyncNode->replicasId[i]; + syncUtilNodeInfo2EpSet(&pSyncNode->peersNodeInfo[j], &pSyncNode->peersEpset[j]); + j++; + } + } + + pSyncNode->arbTerm = -1; + (void)taosThreadMutexInit(&pSyncNode->arbTokenMutex, NULL); + syncUtilGenerateArbToken(pSyncNode->myNodeInfo.nodeId, pSyncInfo->vgId, pSyncNode->arbToken); + sInfo("vgId:%d, generate arb token:%s", pSyncNode->vgId, pSyncNode->arbToken); + // init raft algorithm pSyncNode->pFsm = pSyncInfo->pFsm; pSyncInfo->pFsm = NULL; diff --git a/source/libs/sync/src/syncUtil.c b/source/libs/sync/src/syncUtil.c index ca879f70d9..69abbcdea7 100644 --- a/source/libs/sync/src/syncUtil.c +++ b/source/libs/sync/src/syncUtil.c @@ -43,13 +43,11 @@ void syncUtilNodeInfo2EpSet(const SNodeInfo* pInfo, SEpSet* pEpSet) { bool syncUtilNodeInfo2RaftId(const SNodeInfo* pInfo, SyncGroupId vgId, SRaftId* raftId) { uint32_t ipv4 = 0xFFFFFFFF; - sDebug("vgId:%d, resolve sync addr from fqdn, dnode:%d cluster:%" PRId64 " fqdn:%s port:%u", vgId, pInfo->nodeId, - pInfo->clusterId, pInfo->nodeFqdn, pInfo->nodePort); + sDebug("vgId:%d, resolve sync addr from fqdn, ep:%s:%u", vgId, pInfo->nodeFqdn, pInfo->nodePort); for (int32_t i = 0; i < tsResolveFQDNRetryTime; i++) { int32_t code = taosGetIpv4FromFqdn(pInfo->nodeFqdn, &ipv4); if (code) { - sError("vgId:%d, failed to resolve sync addr, dnode:%d fqdn:%s, wait one second", vgId, pInfo->nodeId, - pInfo->nodeFqdn); + sError("vgId:%d, failed to resolve sync addr, dnode:%d fqdn:%s, retry", vgId, pInfo->nodeId, pInfo->nodeFqdn); taosSsleep(1); } else { break; @@ -57,7 +55,7 @@ bool syncUtilNodeInfo2RaftId(const SNodeInfo* pInfo, SyncGroupId vgId, SRaftId* } if (ipv4 == 0xFFFFFFFF || ipv4 == 1) { - sError("vgId:%d, failed to resolve sync addr, fqdn:%s", vgId, pInfo->nodeFqdn); + sError("vgId:%d, failed to resolve sync addr, dnode:%d fqdn:%s", vgId, pInfo->nodeId, pInfo->nodeFqdn); terrno = TSDB_CODE_TSC_INVALID_FQDN; return false; } @@ -67,8 +65,8 @@ bool syncUtilNodeInfo2RaftId(const SNodeInfo* pInfo, SyncGroupId vgId, SRaftId* raftId->addr = SYNC_ADDR(pInfo); raftId->vgId = vgId; - sInfo("vgId:%d, sync addr:%" PRIu64 " is resolved, dnode:%d cluster:%" PRId64 " fqdn:%s port:%u ip:%s ipv4:%u", vgId, - raftId->addr, pInfo->nodeId, pInfo->clusterId, pInfo->nodeFqdn, pInfo->nodePort, ipbuf, ipv4); + sInfo("vgId:%d, sync addr:%" PRIu64 " is resolved, ep:%s:%u ip:%s ipv4:%u dnode:%d cluster:%" PRId64, vgId, + raftId->addr, pInfo->nodeFqdn, pInfo->nodePort, ipbuf, ipv4, pInfo->nodeId, pInfo->clusterId); return true; } From 54dbf92517ccd514a9e0a3c29fc4b52e65d65f4e Mon Sep 17 00:00:00 2001 From: 54liuyao <54liuyao> Date: Tue, 20 Aug 2024 13:28:53 +0800 Subject: [PATCH 172/181] fix mem leak for max delay --- source/libs/executor/src/streamtimewindowoperator.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/source/libs/executor/src/streamtimewindowoperator.c b/source/libs/executor/src/streamtimewindowoperator.c index 5c12db1ab9..9147ff9b27 100644 --- a/source/libs/executor/src/streamtimewindowoperator.c +++ b/source/libs/executor/src/streamtimewindowoperator.c @@ -439,6 +439,11 @@ void destroyFlusedPos(void* pRes) { } } +void destroyFlusedppPos(void* ppRes) { + void *pRes = *(void **)ppRes; + destroyFlusedPos(pRes); +} + void clearGroupResInfo(SGroupResInfo* pGroupResInfo) { if (pGroupResInfo->freeItem) { int32_t size = taosArrayGetSize(pGroupResInfo->pRows); @@ -1920,6 +1925,7 @@ int32_t createStreamFinalIntervalOperatorInfo(SOperatorInfo* downstream, SPhysiN code = initAggSup(&pOperator->exprSupp, &pInfo->aggSup, pExprInfo, numOfCols, keyBufSize, pTaskInfo->id.str, pInfo->pState, &pTaskInfo->storageAPI.functionStore); QUERY_CHECK_CODE(code, lino, _error); + tSimpleHashSetFreeFp(pInfo->aggSup.pResultRowHashTable, destroyFlusedppPos); code = initExecTimeWindowInfo(&pInfo->twAggSup.timeWindowData, &pTaskInfo->window); QUERY_CHECK_CODE(code, lino, _error); @@ -5283,6 +5289,7 @@ int32_t createStreamIntervalOperatorInfo(SOperatorInfo* downstream, SPhysiNode* code = initAggSup(pSup, &pInfo->aggSup, pExprInfo, numOfCols, keyBufSize, pTaskInfo->id.str, pInfo->pState, &pTaskInfo->storageAPI.functionStore); QUERY_CHECK_CODE(code, lino, _error); + tSimpleHashSetFreeFp(pInfo->aggSup.pResultRowHashTable, destroyFlusedppPos); if (pIntervalPhyNode->window.pExprs != NULL) { int32_t numOfScalar = 0; From 7ac14bca97d841839bf299cdc994b512c149a110 Mon Sep 17 00:00:00 2001 From: kailixu Date: Tue, 20 Aug 2024 14:06:35 +0800 Subject: [PATCH 173/181] fix: memory leak of geos --- source/util/src/tgeosctx.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/source/util/src/tgeosctx.c b/source/util/src/tgeosctx.c index 99655ed7f7..82a360edd1 100644 --- a/source/util/src/tgeosctx.c +++ b/source/util/src/tgeosctx.c @@ -104,7 +104,8 @@ void taosGeosDestroy() { int32_t size = taosArrayGetSize(sGeosPool.poolArray); for (int32_t i = 0; i < size; ++i) { SGeosContext *pool = *(SGeosContext **)TARRAY_GET_ELEM(sGeosPool.poolArray, i); - for (int32_t j = 0; j < GEOS_POOL_CAPACITY; ++j) { + int32_t poolSize = i == size - 1 ? sGeosPool.size : GEOS_POOL_CAPACITY; + for (int32_t j = 0; j < poolSize; ++j) { destroyGeosCtx(pool + j); } taosMemoryFree(pool); From 7759e6aea41fd2cd57f27a1b5095d1e0b98c1c6e Mon Sep 17 00:00:00 2001 From: sima Date: Tue, 20 Aug 2024 14:12:30 +0800 Subject: [PATCH 174/181] fix:[TD-31558] Handle error of tDecodeSSchemaWrapper. --- source/dnode/vnode/src/meta/metaQuery.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/source/dnode/vnode/src/meta/metaQuery.c b/source/dnode/vnode/src/meta/metaQuery.c index bee4727260..3b10182c90 100644 --- a/source/dnode/vnode/src/meta/metaQuery.c +++ b/source/dnode/vnode/src/meta/metaQuery.c @@ -694,9 +694,13 @@ int32_t metaGetTbTSchemaEx(SMeta *pMeta, tb_uid_t suid, tb_uid_t uid, int32_t sv SSchemaWrapper *pSchemaWrapper = &schema; tDecoderInit(&dc, pData, nData); - (void)tDecodeSSchemaWrapper(&dc, pSchemaWrapper); + code = tDecodeSSchemaWrapper(&dc, pSchemaWrapper); tDecoderClear(&dc); tdbFree(pData); + if (TSDB_CODE_SUCCESS != code) { + taosMemoryFree(pSchemaWrapper->pSchema); + goto _exit; + } // convert STSchema *pTSchema = tBuildTSchema(pSchemaWrapper->pSchema, pSchemaWrapper->nCols, pSchemaWrapper->version); From b70ad8d3bb5cd2030051c8254c478754519444ed Mon Sep 17 00:00:00 2001 From: dapan1121 Date: Tue, 20 Aug 2024 14:13:06 +0800 Subject: [PATCH 175/181] fix: return code check issue --- source/libs/executor/src/dynqueryctrloperator.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/libs/executor/src/dynqueryctrloperator.c b/source/libs/executor/src/dynqueryctrloperator.c index 02932cd278..772222f9ed 100644 --- a/source/libs/executor/src/dynqueryctrloperator.c +++ b/source/libs/executor/src/dynqueryctrloperator.c @@ -320,7 +320,7 @@ static int32_t buildMergeJoinOperatorParam(SOperatorParam** ppRes, bool initPara return code; } (*ppRes)->pChildren = taosArrayInit(2, POINTER_BYTES); - if (NULL == *ppRes) { + if (NULL == (*ppRes)->pChildren) { code = terrno; freeOperatorParam(pChild0, OP_GET_PARAM); freeOperatorParam(pChild1, OP_GET_PARAM); From 557dbd883880fac40d815ec6b3f546b6596fdfbd Mon Sep 17 00:00:00 2001 From: dapan1121 Date: Tue, 20 Aug 2024 14:53:28 +0800 Subject: [PATCH 176/181] fix: data sink memory leak --- source/libs/executor/src/dataDispatcher.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/source/libs/executor/src/dataDispatcher.c b/source/libs/executor/src/dataDispatcher.c index 3964422411..f616cb05da 100644 --- a/source/libs/executor/src/dataDispatcher.c +++ b/source/libs/executor/src/dataDispatcher.c @@ -318,6 +318,7 @@ int32_t createDataDispatcher(SDataSinkManager* pManager, const SDataSinkNode* pD dispatcher->sink.fGetCacheSize = getCacheSize; dispatcher->pManager = pManager; + pManager = NULL; dispatcher->pSchema = pDataSink->pInputDataBlockDesc; dispatcher->status = DS_BUF_EMPTY; dispatcher->queryEnd = false; @@ -336,6 +337,9 @@ int32_t createDataDispatcher(SDataSinkManager* pManager, const SDataSinkNode* pD return TSDB_CODE_SUCCESS; _return: + + taosMemoryFree(pManager); + if (dispatcher) { dsDestroyDataSinker(dispatcher); } From b06d16162dbbefa7cbc04493173481bdee977ef8 Mon Sep 17 00:00:00 2001 From: xsren <285808407@qq.com> Date: Tue, 20 Aug 2024 15:10:58 +0800 Subject: [PATCH 177/181] fix: tudf crash --- include/util/taoserror.h | 1 + source/libs/function/src/tudf.c | 5 +++++ source/util/src/terror.c | 1 + 3 files changed, 7 insertions(+) diff --git a/include/util/taoserror.h b/include/util/taoserror.h index 32e3ba1e43..b772edbf22 100644 --- a/include/util/taoserror.h +++ b/include/util/taoserror.h @@ -897,6 +897,7 @@ int32_t taosGetErrSize(); #define TSDB_CODE_UDF_SCRIPT_NOT_SUPPORTED TAOS_DEF_ERROR_CODE(0, 0x2909) #define TSDB_CODE_UDF_FUNC_EXEC_FAILURE TAOS_DEF_ERROR_CODE(0, 0x290A) #define TSDB_CODE_UDF_UV_EXEC_FAILURE TAOS_DEF_ERROR_CODE(0, 0x290B) +#define TSDB_CODE_UDF_INTERNAL_ERROR TAOS_DEF_ERROR_CODE(0, 0x2920) // sml #define TSDB_CODE_SML_INVALID_PROTOCOL_TYPE TAOS_DEF_ERROR_CODE(0, 0x3000) diff --git a/source/libs/function/src/tudf.c b/source/libs/function/src/tudf.c index 9a751db801..d5ecf09cee 100644 --- a/source/libs/function/src/tudf.c +++ b/source/libs/function/src/tudf.c @@ -796,9 +796,14 @@ void *decodeUdfResponse(const void *buf, SUdfResponse *rsp) { buf = decodeUdfTeardownResponse(buf, &rsp->teardownRsp); break; default: + rsp->code = TSDB_CODE_UDF_INTERNAL_ERROR; fnError("decode udf response, invalid udf response type %d", rsp->type); break; } + if(buf == NULL) { + rsp->code = terrno; + fnError("decode udf response failed, code:0x%x", rsp->code); + } return (void *)buf; } diff --git a/source/util/src/terror.c b/source/util/src/terror.c index b627a2aa80..b307c4ac4b 100644 --- a/source/util/src/terror.c +++ b/source/util/src/terror.c @@ -742,6 +742,7 @@ TAOS_DEFINE_ERROR(TSDB_CODE_UDF_INVALID_OUTPUT_TYPE, "udf invalid output TAOS_DEFINE_ERROR(TSDB_CODE_UDF_SCRIPT_NOT_SUPPORTED, "udf program language not supported") TAOS_DEFINE_ERROR(TSDB_CODE_UDF_FUNC_EXEC_FAILURE, "udf function execution failure") TAOS_DEFINE_ERROR(TSDB_CODE_UDF_UV_EXEC_FAILURE, "udf uvlib function execution failure") +TAOS_DEFINE_ERROR(TSDB_CODE_UDF_INTERNAL_ERROR, "udf internal error") //schemaless TAOS_DEFINE_ERROR(TSDB_CODE_SML_INVALID_PROTOCOL_TYPE, "Invalid line protocol type") From 9900643657b94237c35aeab683c6e74861e8f48b Mon Sep 17 00:00:00 2001 From: sima Date: Tue, 20 Aug 2024 15:33:28 +0800 Subject: [PATCH 178/181] enh:[TD-31564] Remove ASSERT in client. --- source/client/src/clientImpl.c | 30 ++++++++++++++++++------------ source/client/src/clientMain.c | 27 ++++++++++++++------------- 2 files changed, 32 insertions(+), 25 deletions(-) diff --git a/source/client/src/clientImpl.c b/source/client/src/clientImpl.c index e12c761fcc..563d70ab08 100644 --- a/source/client/src/clientImpl.c +++ b/source/client/src/clientImpl.c @@ -166,7 +166,10 @@ int32_t taos_connect_internal(const char* ip, const char* user, const char* pass pInst = &p; } else { - ASSERTS((*pInst) && (*pInst)->pAppHbMgr, "*pInst:%p, pAppHgMgr:%p", *pInst, (*pInst) ? (*pInst)->pAppHbMgr : NULL); + if (NULL == *pInst || NULL == (*pInst)->pAppHbMgr) { + tscError("*pInst:%p, pAppHgMgr:%p", *pInst, (*pInst) ? (*pInst)->pAppHbMgr : NULL); + TSC_ERR_JRET(TSDB_CODE_TSC_INTERNAL_ERROR); + } // reset to 0 in case of conn with duplicated user key but its user has ever been dropped. atomic_store_8(&(*pInst)->pAppHbMgr->connHbFlag, 0); } @@ -2036,9 +2039,9 @@ static int32_t estimateJsonLen(SReqResultInfo* pResultInfo, int32_t numOfCols, i // | version | total length | total rows | total columns | flag seg| block group id | column schema | each column // length | int32_t cols = *(int32_t*)(p + sizeof(int32_t) * 3); - if (ASSERT(numOfCols == cols)) { + if (numOfCols != cols) { tscError("estimateJsonLen error: numOfCols:%d != cols:%d", numOfCols, cols); - return -1; + return TSDB_CODE_TSC_INTERNAL_ERROR; } int32_t len = getVersion1BlockMetaSize(p, numOfCols); @@ -2123,7 +2126,7 @@ static int32_t doConvertJson(SReqResultInfo* pResultInfo, int32_t numOfCols, int int32_t totalLen = 0; int32_t cols = *(int32_t*)(p + sizeof(int32_t) * 3); - if (ASSERT(numOfCols == cols)) { + if (numOfCols != cols) { tscError("doConvertJson error: numOfCols:%d != cols:%d", numOfCols, cols); return TSDB_CODE_TSC_INTERNAL_ERROR; } @@ -2148,7 +2151,7 @@ static int32_t doConvertJson(SReqResultInfo* pResultInfo, int32_t numOfCols, int for (int32_t i = 0; i < numOfCols; ++i) { int32_t colLen = (blockVersion == BLOCK_VERSION_1) ? htonl(colLength[i]) : colLength[i]; int32_t colLen1 = (blockVersion == BLOCK_VERSION_1) ? htonl(colLength1[i]) : colLength1[i]; - if (ASSERT(colLen < dataLen)) { + if (colLen >= dataLen) { tscError("doConvertJson error: colLen:%d >= dataLen:%d", colLen, dataLen); return TSDB_CODE_TSC_INTERNAL_ERROR; } @@ -2236,7 +2239,7 @@ static int32_t doConvertJson(SReqResultInfo* pResultInfo, int32_t numOfCols, int int32_t setResultDataPtr(SReqResultInfo* pResultInfo, TAOS_FIELD* pFields, int32_t numOfCols, int32_t numOfRows, bool convertUcs4) { - if (ASSERT(numOfCols > 0 && pFields != NULL && pResultInfo != NULL)) { + if (numOfCols <= 0 || pFields == NULL || pResultInfo == NULL) { tscError("setResultDataPtr paras error"); return TSDB_CODE_TSC_INTERNAL_ERROR; } @@ -2269,7 +2272,7 @@ int32_t setResultDataPtr(SReqResultInfo* pResultInfo, TAOS_FIELD* pFields, int32 int32_t cols = *(int32_t*)p; p += sizeof(int32_t); - if (ASSERT(rows == numOfRows && cols == numOfCols)) { + if (rows != numOfRows || cols != numOfCols) { tscError("setResultDataPtr paras error:rows;%d numOfRows:%d cols:%d numOfCols:%d", rows, numOfRows, cols, numOfCols); return TSDB_CODE_TSC_INTERNAL_ERROR; @@ -2288,8 +2291,6 @@ int32_t setResultDataPtr(SReqResultInfo* pResultInfo, TAOS_FIELD* pFields, int32 int32_t bytes = *(int32_t*)p; p += sizeof(int32_t); - - /*ASSERT(type == pFields[i].type && bytes == pFields[i].bytes);*/ } int32_t* colLength = (int32_t*)p; @@ -2411,18 +2412,23 @@ int32_t setQueryResultFromRsp(SReqResultInfo* pResultInfo, const SRetrieveTableR if (pRsp->compressed && compLen < rawLen) { int32_t len = tsDecompressString(pStart, compLen, 1, pResultInfo->decompBuf, rawLen, ONE_STAGE_COMP, NULL, 0); - ASSERT(len == rawLen); if (len < 0) { tscError("tsDecompressString failed"); return terrno ? terrno : TSDB_CODE_FAILED; } - + if (len != rawLen) { + tscError("tsDecompressString failed, len:%d != rawLen:%d", len, rawLen); + return TSDB_CODE_TSC_INTERNAL_ERROR; + } pResultInfo->pData = pResultInfo->decompBuf; pResultInfo->payloadLen = rawLen; } else { pResultInfo->pData = pStart; pResultInfo->payloadLen = htonl(pRsp->compLen); - ASSERT(pRsp->compLen == pRsp->payloadLen); + if (pRsp->compLen != pRsp->payloadLen) { + tscError("pRsp->compLen:%d != pRsp->payloadLen:%d", pRsp->compLen, pRsp->payloadLen); + return TSDB_CODE_TSC_INTERNAL_ERROR; + } } } diff --git a/source/client/src/clientMain.c b/source/client/src/clientMain.c index d007dae7f7..de56a4844a 100644 --- a/source/client/src/clientMain.c +++ b/source/client/src/clientMain.c @@ -477,7 +477,6 @@ TAOS_ROW taos_fetch_row(TAOS_RES *res) { } else if (TD_RES_TMQ_META(res) || TD_RES_TMQ_BATCH_META(res)) { return NULL; } else { - // assert to avoid un-initialization error tscError("invalid result passed to taos_fetch_row"); terrno = TSDB_CODE_TSC_INTERNAL_ERROR; return NULL; @@ -557,12 +556,14 @@ int taos_print_row(char *str, TAOS_ROW row, TAOS_FIELD *fields, int num_fields) case TSDB_DATA_TYPE_GEOMETRY: { int32_t charLen = varDataLen((char *)row[i] - VARSTR_HEADER_SIZE); if (fields[i].type == TSDB_DATA_TYPE_BINARY || fields[i].type == TSDB_DATA_TYPE_VARBINARY || fields[i].type == TSDB_DATA_TYPE_GEOMETRY) { - if (ASSERT(charLen <= fields[i].bytes && charLen >= 0)) { + if (charLen > fields[i].bytes || charLen < 0) { tscError("taos_print_row error binary. charLen:%d, fields[i].bytes:%d", charLen, fields[i].bytes); + break; } } else { - if (ASSERT(charLen <= fields[i].bytes * TSDB_NCHAR_SIZE && charLen >= 0)) { + if (charLen > fields[i].bytes * TSDB_NCHAR_SIZE || charLen < 0) { tscError("taos_print_row error. charLen:%d, fields[i].bytes:%d", charLen, fields[i].bytes); + break; } } @@ -1315,11 +1316,11 @@ void restartAsyncQuery(SRequestObj *pRequest, int32_t code) { } void taos_fetch_rows_a(TAOS_RES *res, __taos_async_fn_t fp, void *param) { - if (ASSERT(res != NULL && fp != NULL)) { + if (res == NULL || fp == NULL) { tscError("taos_fetch_rows_a invalid paras"); return; } - if (ASSERT(TD_RES_QUERY(res))) { + if (!TD_RES_QUERY(res)) { tscError("taos_fetch_rows_a res is NULL"); return; } @@ -1334,12 +1335,12 @@ void taos_fetch_rows_a(TAOS_RES *res, __taos_async_fn_t fp, void *param) { } void taos_fetch_raw_block_a(TAOS_RES *res, __taos_async_fn_t fp, void *param) { - if (ASSERT(res != NULL && fp != NULL)) { - tscError("taos_fetch_rows_a invalid paras"); + if (res == NULL || fp == NULL) { + tscError("taos_fetch_raw_block_a invalid paras"); return; } - if (ASSERT(TD_RES_QUERY(res))) { - tscError("taos_fetch_rows_a res is NULL"); + if (!TD_RES_QUERY(res)) { + tscError("taos_fetch_raw_block_a res is NULL"); return; } SRequestObj *pRequest = res; @@ -1353,12 +1354,12 @@ void taos_fetch_raw_block_a(TAOS_RES *res, __taos_async_fn_t fp, void *param) { } const void *taos_get_raw_block(TAOS_RES *res) { - if (ASSERT(res != NULL)) { - tscError("taos_fetch_rows_a invalid paras"); + if (res == NULL) { + tscError("taos_get_raw_block invalid paras"); return NULL; } - if (ASSERT(TD_RES_QUERY(res))) { - tscError("taos_fetch_rows_a res is NULL"); + if (!TD_RES_QUERY(res)) { + tscError("taos_get_raw_block res is NULL"); return NULL; } SRequestObj *pRequest = res; From 3cdef9f0bfd8968a42cf17847d3068f9d87618c2 Mon Sep 17 00:00:00 2001 From: sima Date: Tue, 20 Aug 2024 17:51:28 +0800 Subject: [PATCH 179/181] fix:[TD-31567] Fix repeatFunction accessing out-of-bounds array index. --- source/libs/scalar/src/sclfunc.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/libs/scalar/src/sclfunc.c b/source/libs/scalar/src/sclfunc.c index 37daff1d63..12f0137fc5 100644 --- a/source/libs/scalar/src/sclfunc.c +++ b/source/libs/scalar/src/sclfunc.c @@ -1768,7 +1768,7 @@ int32_t repeatFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOu continue; } int32_t count = 0; - GET_TYPED_DATA(count, int32_t, GET_PARAM_TYPE(&pInput[1]), colDataGetData(pInput[1].columnData, i)); + GET_TYPED_DATA(count, int32_t, GET_PARAM_TYPE(&pInput[1]), colDataGetData(pInput[1].columnData, 0)); if (count <= 0) { varDataSetLen(output, 0); SCL_ERR_JRET(colDataSetVal(pOutputData, i, outputBuf, false)); From 9f8f7591696558b4ba433e03c71ee33f477d3dff Mon Sep 17 00:00:00 2001 From: chenhaoran Date: Tue, 20 Aug 2024 21:10:14 +0800 Subject: [PATCH 180/181] tetst:update windows test case in full ci --- tests/script/win-test-file | 14 +++-- tests/system-test/win-test-file | 108 +++++++++++++++++++++++++++++++- 2 files changed, 117 insertions(+), 5 deletions(-) diff --git a/tests/script/win-test-file b/tests/script/win-test-file index acc4c74d21..8c96722c9f 100644 --- a/tests/script/win-test-file +++ b/tests/script/win-test-file @@ -1,3 +1,5 @@ +./test.sh -f tsim/query/timeline.sim +./test.sh -f tsim/join/join.sim ./test.sh -f tsim/tmq/basic2Of2ConsOverlap.sim ./test.sh -f tsim/parser/where.sim ./test.sh -f tsim/parser/join_manyblocks.sim @@ -150,6 +152,7 @@ ./test.sh -f tsim/parser/join_multivnode.sim ./test.sh -f tsim/parser/join.sim ./test.sh -f tsim/parser/last_cache.sim +./test.sh -f tsim/parser/last_both.sim ./test.sh -f tsim/parser/last_groupby.sim ./test.sh -f tsim/parser/lastrow.sim ./test.sh -f tsim/parser/lastrow2.sim @@ -192,6 +195,7 @@ ./test.sh -f tsim/query/session.sim ./test.sh -f tsim/query/join_interval.sim ./test.sh -f tsim/query/join_pk.sim +./test.sh -f tsim/query/join_order.sim ./test.sh -f tsim/query/count_spread.sim ./test.sh -f tsim/query/unionall_as_table.sim ./test.sh -f tsim/query/multi_order_by.sim @@ -214,6 +218,9 @@ ./test.sh -f tsim/query/bug3398.sim ./test.sh -f tsim/query/explain_tsorder.sim ./test.sh -f tsim/query/apercentile.sim +./test.sh -f tsim/query/query_count0.sim +./test.sh -f tsim/query/query_count_sliding0.sim +./test.sh -f tsim/query/union_precision.sim ./test.sh -f tsim/qnode/basic1.sim ./test.sh -f tsim/snode/basic1.sim ./test.sh -f tsim/mnode/basic1.sim @@ -287,9 +294,6 @@ ./test.sh -f tsim/sma/tsmaCreateInsertQuery.sim ./test.sh -f tsim/sma/rsmaCreateInsertQuery.sim ./test.sh -f tsim/sma/rsmaCreateInsertQueryDelete.sim - -### refactor stream backend, open case after rsma refactored -#./test.sh -f tsim/sma/rsmaPersistenceRecovery.sim ./test.sh -f tsim/sync/vnodesnapshot-rsma-test.sim ./test.sh -f tsim/valgrind/checkError1.sim ./test.sh -f tsim/valgrind/checkError2.sim @@ -326,6 +330,7 @@ ./test.sh -f tsim/compress/commitlog.sim ./test.sh -f tsim/compress/compress2.sim ./test.sh -f tsim/compress/compress.sim +./test.sh -f tsim/compress/compress_col.sim ./test.sh -f tsim/compress/uncompress.sim ./test.sh -f tsim/compute/avg.sim ./test.sh -f tsim/compute/block_dist.sim @@ -401,8 +406,9 @@ ./test.sh -f tsim/tag/tbNameIn.sim ./test.sh -f tmp/monitor.sim ./test.sh -f tsim/tagindex/add_index.sim -./test.sh -f tsim/tagindex/indexOverflow.sim ./test.sh -f tsim/tagindex/sma_and_tag_index.sim +./test.sh -f tsim/tagindex/indexOverflow.sim ./test.sh -f tsim/view/view.sim ./test.sh -f tsim/query/cache_last.sim ./test.sh -f tsim/query/const.sim +./test.sh -f tsim/query/nestedJoinView.sim diff --git a/tests/system-test/win-test-file b/tests/system-test/win-test-file index 69688e7450..41eb28e071 100644 --- a/tests/system-test/win-test-file +++ b/tests/system-test/win-test-file @@ -1,5 +1,13 @@ +python3 ./test.py -f 2-query/pk_error.py +python3 ./test.py -f 2-query/pk_func.py +python3 ./test.py -f 2-query/pk_varchar.py +python3 ./test.py -f 2-query/pk_func_group.py +python3 ./test.py -f 2-query/partition_expr.py +python3 ./test.py -f 2-query/project_group.py python3 ./test.py -f 2-query/tbname_vgroup.py +python3 ./test.py -f 2-query/count_interval.py python3 ./test.py -f 2-query/compact-col.py +python3 ./test.py -f 2-query/tms_memleak.py python3 ./test.py -f 2-query/stbJoin.py python3 ./test.py -f 2-query/stbJoin.py -Q 2 python3 ./test.py -f 2-query/stbJoin.py -Q 3 @@ -8,6 +16,8 @@ python3 ./test.py -f 2-query/hint.py python3 ./test.py -f 2-query/hint.py -Q 2 python3 ./test.py -f 2-query/hint.py -Q 3 python3 ./test.py -f 2-query/hint.py -Q 4 +python3 ./test.py -f 2-query/para_tms.py +python3 ./test.py -f 2-query/para_tms2.py python3 ./test.py -f 2-query/nestedQuery.py python3 ./test.py -f 2-query/nestedQuery_str.py python3 ./test.py -f 2-query/nestedQuery_math.py @@ -52,6 +62,20 @@ python3 ./test.py -f 2-query/last_cache_scan.py python3 ./test.py -f 2-query/last_cache_scan.py -Q 2 python3 ./test.py -f 2-query/last_cache_scan.py -Q 3 python3 ./test.py -f 2-query/last_cache_scan.py -Q 4 +python3 ./test.py -f 2-query/tbname.py +python3 ./test.py -f 2-query/tbname.py -Q 2 +python3 ./test.py -f 2-query/tbname.py -Q 3 +python3 ./test.py -f 2-query/tbname.py -Q 4 +python3 ./test.py -f 2-query/tsma.py +python3 ./test.py -f 2-query/tsma.py -R +python3 ./test.py -f 2-query/tsma.py -Q 2 +python3 ./test.py -f 2-query/tsma.py -Q 3 +python3 ./test.py -f 2-query/tsma.py -Q 4 +python3 ./test.py -f 2-query/tsma2.py +python3 ./test.py -f 2-query/tsma2.py -R +python3 ./test.py -f 2-query/tsma2.py -Q 2 +python3 ./test.py -f 2-query/tsma2.py -Q 3 +python3 ./test.py -f 2-query/tsma2.py -Q 4 python3 ./test.py -f 7-tmq/tmqShow.py python3 ./test.py -f 7-tmq/tmqDropStb.py python3 ./test.py -f 7-tmq/subscribeStb0.py @@ -67,11 +91,14 @@ python3 ./test.py -f 7-tmq/tmqClientConsLog.py python3 ./test.py -f 7-tmq/tmqMaxGroupIds.py python3 ./test.py -f 7-tmq/tmqConsumeDiscontinuousData.py python3 ./test.py -f 7-tmq/tmqOffset.py +python3 ./test.py -f 7-tmq/tmq_primary_key.py python3 ./test.py -f 7-tmq/tmqDropConsumer.py python3 ./test.py -f 1-insert/insert_stb.py python3 ./test.py -f 1-insert/delete_stable.py +python3 ./test.py -f 1-insert/stt_blocks_check.py python3 ./test.py -f 2-query/out_of_order.py -Q 3 python3 ./test.py -f 2-query/out_of_order.py +python3 ./test.py -f 2-query/agg_null.py python3 ./test.py -f 2-query/insert_null_none.py python3 ./test.py -f 2-query/insert_null_none.py -R python3 ./test.py -f 2-query/insert_null_none.py -Q 2 @@ -108,6 +135,18 @@ python3 ./test.py -f 2-query/match.py python3 ./test.py -f 2-query/match.py -Q 2 python3 ./test.py -f 2-query/match.py -Q 3 python3 ./test.py -f 2-query/match.py -Q 4 +python3 ./test.py -f 2-query/td-28068.py +python3 ./test.py -f 2-query/td-28068.py -Q 2 +python3 ./test.py -f 2-query/td-28068.py -Q 3 +python3 ./test.py -f 2-query/td-28068.py -Q 4 +python3 ./test.py -f 2-query/agg_group_AlwaysReturnValue.py +python3 ./test.py -f 2-query/agg_group_AlwaysReturnValue.py -Q 2 +python3 ./test.py -f 2-query/agg_group_AlwaysReturnValue.py -Q 3 +python3 ./test.py -f 2-query/agg_group_AlwaysReturnValue.py -Q 4 +python3 ./test.py -f 2-query/agg_group_NotReturnValue.py +python3 ./test.py -f 2-query/agg_group_NotReturnValue.py -Q 2 +python3 ./test.py -f 2-query/agg_group_NotReturnValue.py -Q 3 +python3 ./test.py -f 2-query/agg_group_NotReturnValue.py -Q 4 python3 ./test.py -f 3-enterprise/restore/restoreDnode.py -N 5 -M 3 -i False python3 ./test.py -f 3-enterprise/restore/restoreVnode.py -N 5 -M 3 -i False python3 ./test.py -f 3-enterprise/restore/restoreMnode.py -N 5 -M 3 -i False @@ -158,7 +197,8 @@ python3 ./test.py -f 7-tmq/tmqDropNtb-snapshot1.py python3 ./test.py -f 7-tmq/stbTagFilter-1ctb.py python3 ./test.py -f 7-tmq/dataFromTsdbNWal.py python3 ./test.py -f 7-tmq/dataFromTsdbNWal-multiCtb.py -# python3 ./test.py -f 7-tmq/tmq_taosx.py +python3 ./test.py -f 7-tmq/tmq_taosx.py +python3 ./test.py -f 7-tmq/tmq_ts4563.py python3 ./test.py -f 7-tmq/tmq_replay.py python3 ./test.py -f 7-tmq/tmqSeekAndCommit.py python3 ./test.py -f 7-tmq/tmq_offset.py @@ -168,15 +208,21 @@ python3 ./test.py -f 7-tmq/stbTagFilter-multiCtb.py python3 ./test.py -f 7-tmq/tmqSubscribeStb-r3.py -N 5 python3 ./test.py -f 7-tmq/tmq3mnodeSwitch.py -N 6 -M 3 -i True python3 ./test.py -f 7-tmq/tmq3mnodeSwitch.py -N 6 -M 3 -n 3 -i True +python3 test.py -f 7-tmq/tmqVnodeTransform-db-removewal.py -N 2 -n 1 +python3 test.py -f 7-tmq/tmqVnodeTransform-stb-removewal.py -N 6 -n 3 python3 test.py -f 7-tmq/tmqVnodeTransform-stb.py -N 2 -n 1 python3 test.py -f 7-tmq/tmqVnodeTransform-stb.py -N 6 -n 3 python3 test.py -f 7-tmq/tmqVnodeSplit-stb-select.py -N 2 -n 1 python3 test.py -f 7-tmq/tmqVnodeSplit-stb-select-duplicatedata.py -N 3 -n 3 python3 test.py -f 7-tmq/tmqVnodeSplit-stb-select-duplicatedata-false.py -N 3 -n 3 python3 test.py -f 7-tmq/tmqVnodeSplit-stb-select.py -N 3 -n 3 +python3 test.py -f 7-tmq/tmqVnodeSplit-stb-select-false.py -N 3 -n 3 python3 test.py -f 7-tmq/tmqVnodeSplit-stb.py -N 3 -n 3 +python3 test.py -f 7-tmq/tmqVnodeSplit-stb-false.py -N 3 -n 3 python3 test.py -f 7-tmq/tmqVnodeSplit-column.py -N 3 -n 3 +python3 test.py -f 7-tmq/tmqVnodeSplit-column-false.py -N 3 -n 3 python3 test.py -f 7-tmq/tmqVnodeSplit-db.py -N 3 -n 3 +python3 test.py -f 7-tmq/tmqVnodeSplit-db-false.py -N 3 -n 3 python3 test.py -f 7-tmq/tmqVnodeReplicate.py -M 3 -N 3 -n 3 python3 ./test.py -f 99-TDcase/TD-19201.py python3 ./test.py -f 99-TDcase/TD-21561.py @@ -184,6 +230,7 @@ python3 ./test.py -f 99-TDcase/TS-3404.py python3 ./test.py -f 99-TDcase/TS-3581.py python3 ./test.py -f 99-TDcase/TS-3311.py python3 ./test.py -f 99-TDcase/TS-3821.py +python3 ./test.py -f 99-TDcase/TS-5130.py python3 ./test.py -f 0-others/balance_vgroups_r1.py -N 6 python3 ./test.py -f 0-others/taosShell.py python3 ./test.py -f 0-others/taosShellError.py @@ -217,6 +264,10 @@ python3 ./test.py -f 0-others/splitVGroupWal.py -N 3 -n 3 python3 ./test.py -f 0-others/timeRangeWise.py -N 3 python3 ./test.py -f 0-others/delete_check.py python3 ./test.py -f 0-others/test_hot_refresh_configurations.py +python3 ./test.py -f 0-others/empty_identifier.py +python3 ./test.py -f 1-insert/composite_primary_key_create.py +python3 ./test.py -f 1-insert/composite_primary_key_insert.py +python3 ./test.py -f 1-insert/composite_primary_key_delete.py python3 ./test.py -f 1-insert/insert_double.py python3 ./test.py -f 1-insert/alter_database.py python3 ./test.py -f 1-insert/alter_replica.py -N 3 @@ -270,7 +321,10 @@ python3 ./test.py -f 1-insert/test_ts4219.py python3 ./test.py -f 1-insert/ts-4272.py python3 ./test.py -f 1-insert/test_ts4295.py python3 ./test.py -f 1-insert/test_td27388.py +python3 ./test.py -f 1-insert/test_ts4479.py +python3 ./test.py -f 1-insert/test_td29793.py python3 ./test.py -f 1-insert/insert_timestamp.py +python3 ./test.py -f 1-insert/test_td29157.py python3 ./test.py -f 0-others/show.py python3 ./test.py -f 0-others/show_tag_index.py python3 ./test.py -f 0-others/information_schema.py @@ -308,6 +362,11 @@ python3 ./test.py -f 2-query/concat_ws2.py python3 ./test.py -f 2-query/concat_ws2.py -R python3 ./test.py -f 2-query/cos.py python3 ./test.py -f 2-query/cos.py -R +python3 ./test.py -f 2-query/group_partition.py +python3 ./test.py -f 2-query/group_partition.py -R +python3 ./test.py -f 2-query/group_partition.py -Q 2 +python3 ./test.py -f 2-query/group_partition.py -Q 3 +python3 ./test.py -f 2-query/group_partition.py -Q 4 python3 ./test.py -f 2-query/count_partition.py python3 ./test.py -f 2-query/count_partition.py -R python3 ./test.py -f 2-query/count.py @@ -361,6 +420,40 @@ python3 ./test.py -f 2-query/last_row.py python3 ./test.py -f 2-query/last_row.py -R python3 ./test.py -f 2-query/last.py python3 ./test.py -f 2-query/last.py -R +python3 ./test.py -f 2-query/last_and_last_row.py +python3 ./test.py -f 2-query/last_and_last_row.py -R +python3 ./test.py -f 2-query/last_and_last_row.py -Q 2 +python3 ./test.py -f 2-query/last_and_last_row.py -Q 3 +python3 ./test.py -f 2-query/last_and_last_row.py -Q 4 +python3 ./test.py -f 2-query/last+last_row.py +python3 ./test.py -f 2-query/last+last_row.py -Q 2 +python3 ./test.py -f 2-query/last+last_row.py -Q 3 +python3 ./test.py -f 2-query/last+last_row.py -Q 4 +python3 ./test.py -f 2-query/primary_ts_base_1.py +python3 ./test.py -f 2-query/primary_ts_base_1.py -R +python3 ./test.py -f 2-query/primary_ts_base_1.py -Q 2 +python3 ./test.py -f 2-query/primary_ts_base_1.py -Q 3 +python3 ./test.py -f 2-query/primary_ts_base_1.py -Q 4 +python3 ./test.py -f 2-query/primary_ts_base_2.py +python3 ./test.py -f 2-query/primary_ts_base_2.py -R +python3 ./test.py -f 2-query/primary_ts_base_2.py -Q 2 +python3 ./test.py -f 2-query/primary_ts_base_2.py -Q 3 +python3 ./test.py -f 2-query/primary_ts_base_2.py -Q 4 +python3 ./test.py -f 2-query/primary_ts_base_3.py +python3 ./test.py -f 2-query/primary_ts_base_3.py -R +python3 ./test.py -f 2-query/primary_ts_base_3.py -Q 2 +python3 ./test.py -f 2-query/primary_ts_base_3.py -Q 3 +python3 ./test.py -f 2-query/primary_ts_base_3.py -Q 4 +python3 ./test.py -f 2-query/primary_ts_base_4.py +python3 ./test.py -f 2-query/primary_ts_base_4.py -R +python3 ./test.py -f 2-query/primary_ts_base_4.py -Q 2 +python3 ./test.py -f 2-query/primary_ts_base_4.py -Q 3 +python3 ./test.py -f 2-query/primary_ts_base_4.py -Q 4 +python3 ./test.py -f 2-query/primary_ts_base_5.py +python3 ./test.py -f 2-query/primary_ts_base_5.py -R +python3 ./test.py -f 2-query/primary_ts_base_5.py -Q 2 +python3 ./test.py -f 2-query/primary_ts_base_5.py -Q 3 +python3 ./test.py -f 2-query/primary_ts_base_5.py -Q 4 python3 ./test.py -f 2-query/leastsquares.py python3 ./test.py -f 2-query/leastsquares.py -R python3 ./test.py -f 2-query/length.py @@ -368,6 +461,8 @@ python3 ./test.py -f 2-query/length.py -R python3 ./test.py -f 2-query/limit.py python3 ./test.py -f 2-query/log.py python3 ./test.py -f 2-query/log.py -R +python3 ./test.py -f 2-query/logical_operators.py +python3 ./test.py -f 2-query/logical_operators.py -R python3 ./test.py -f 2-query/lower.py python3 ./test.py -f 2-query/lower.py -R python3 ./test.py -f 2-query/ltrim.py @@ -376,6 +471,8 @@ python3 ./test.py -f 2-query/mavg.py python3 ./test.py -f 2-query/mavg.py -R python3 ./test.py -f 2-query/max_partition.py python3 ./test.py -f 2-query/max_partition.py -R +python3 ./test.py -f 2-query/partition_limit_interval.py +python3 ./test.py -f 2-query/partition_limit_interval.py -R python3 ./test.py -f 2-query/max_min_last_interval.py python3 ./test.py -f 2-query/last_row_interval.py python3 ./test.py -f 2-query/max.py @@ -478,6 +575,8 @@ python3 ./test.py -f 2-query/json_tag.py python3 ./test.py -f 2-query/nestedQueryInterval.py python3 ./test.py -f 2-query/systable_func.py python3 ./test.py -f 2-query/test_ts4382.py +python3 ./test.py -f 2-query/test_ts4403.py +python3 ./test.py -f 2-query/test_td28163.py python3 ./test.py -f 2-query/stablity.py python3 ./test.py -f 2-query/stablity_1.py python3 ./test.py -f 2-query/elapsed.py @@ -486,6 +585,10 @@ python3 ./test.py -f 2-query/function_diff.py python3 ./test.py -f 2-query/tagFilter.py python3 ./test.py -f 2-query/projectionDesc.py python3 ./test.py -f 2-query/ts_3405_3398_3423.py -N 3 -n 3 +python3 ./test.py -f 2-query/ts-4348-td-27939.py +python3 ./test.py -f 2-query/backslash_g.py +python3 ./test.py -f 2-query/test_ts4467.py +python3 ./test.py -f 2-query/geometry.py python3 ./test.py -f 2-query/queryQnode.py python3 ./test.py -f 6-cluster/5dnode1mnode.py python3 ./test.py -f 6-cluster/5dnode2mnode.py -N 5 @@ -612,6 +715,7 @@ python3 ./test.py -f 2-query/irate.py -Q 2 python3 ./test.py -f 2-query/function_null.py -Q 2 python3 ./test.py -f 2-query/count_partition.py -Q 2 python3 ./test.py -f 2-query/max_partition.py -Q 2 +python3 ./test.py -f 2-query/partition_limit_interval.py -Q 2 python3 ./test.py -f 2-query/max_min_last_interval.py -Q 2 python3 ./test.py -f 2-query/last_row_interval.py -Q 2 python3 ./test.py -f 2-query/last_row.py -Q 2 @@ -707,6 +811,7 @@ python3 ./test.py -f 2-query/irate.py -Q 3 python3 ./test.py -f 2-query/function_null.py -Q 3 python3 ./test.py -f 2-query/count_partition.py -Q 3 python3 ./test.py -f 2-query/max_partition.py -Q 3 +python3 ./test.py -f 2-query/partition_limit_interval.py -Q 3 python3 ./test.py -f 2-query/max_min_last_interval.py -Q 3 python3 ./test.py -f 2-query/last_row_interval.py -Q 3 python3 ./test.py -f 2-query/last_row.py -Q 3 @@ -802,6 +907,7 @@ python3 ./test.py -f 2-query/irate.py -Q 4 python3 ./test.py -f 2-query/function_null.py -Q 4 python3 ./test.py -f 2-query/count_partition.py -Q 4 python3 ./test.py -f 2-query/max_partition.py -Q 4 +python3 ./test.py -f 2-query/partition_limit_interval.py -Q 4 python3 ./test.py -f 2-query/max_min_last_interval.py -Q 4 python3 ./test.py -f 2-query/last_row_interval.py -Q 4 python3 ./test.py -f 2-query/last_row.py -Q 4 From 87e56d3d67f394394759efd066e7925451267d4b Mon Sep 17 00:00:00 2001 From: Hongze Cheng Date: Wed, 21 Aug 2024 10:23:22 +0800 Subject: [PATCH 181/181] Revert "fix: memory leak of geos" --- include/util/tgeosctx.h | 8 +- source/client/src/clientMain.c | 2 - source/dnode/mgmt/node_mgmt/src/dmMgmt.c | 2 - source/libs/executor/src/sysscanoperator.c | 2 +- source/libs/geometry/src/geosWrapper.c | 28 +----- source/libs/parser/src/parInsertSql.c | 4 +- source/libs/scalar/src/sclvector.c | 8 +- source/util/src/tgeosctx.c | 111 +++++---------------- source/util/src/tsched.c | 2 + source/util/src/tworker.c | 2 + tools/shell/src/shellEngine.c | 5 +- 11 files changed, 45 insertions(+), 129 deletions(-) diff --git a/include/util/tgeosctx.h b/include/util/tgeosctx.h index a4355db29a..267ba9e049 100644 --- a/include/util/tgeosctx.h +++ b/include/util/tgeosctx.h @@ -32,16 +32,14 @@ typedef struct SGeosContext { GEOSWKBReader *WKBReader; GEOSWKBWriter *WKBWriter; - pcre2_code *WKTRegex; + pcre2_code *WKTRegex; pcre2_match_data *WKTMatchData; char errMsg[512]; } SGeosContext; -SGeosContext *acquireThreadLocalGeosCtx(); -SGeosContext *getThreadLocalGeosCtx(); -const char *getGeosErrMsg(int32_t code); -void taosGeosDestroy(); +SGeosContext* getThreadLocalGeosCtx(); +void destroyThreadLocalGeosCtx(); #ifdef __cplusplus } diff --git a/source/client/src/clientMain.c b/source/client/src/clientMain.c index c0eaf27077..d007dae7f7 100644 --- a/source/client/src/clientMain.c +++ b/source/client/src/clientMain.c @@ -23,7 +23,6 @@ #include "query.h" #include "scheduler.h" #include "tdatablock.h" -#include "tgeosctx.h" #include "tglobal.h" #include "tmsg.h" #include "tref.h" @@ -87,7 +86,6 @@ void taos_cleanup(void) { tscDebug("rpc cleanup"); taosConvDestroy(); - taosGeosDestroy(); tmqMgmtClose(); diff --git a/source/dnode/mgmt/node_mgmt/src/dmMgmt.c b/source/dnode/mgmt/node_mgmt/src/dmMgmt.c index ae6efa7af4..fdce9fd4c9 100644 --- a/source/dnode/mgmt/node_mgmt/src/dmMgmt.c +++ b/source/dnode/mgmt/node_mgmt/src/dmMgmt.c @@ -19,7 +19,6 @@ #include "index.h" #include "qworker.h" #include "tcompression.h" -#include "tgeosctx.h" #include "tglobal.h" #include "tgrant.h" #include "tstream.h" @@ -122,7 +121,6 @@ void dmCleanupDnode(SDnode *pDnode) { streamMetaCleanup(); indexCleanup(); taosConvDestroy(); - taosGeosDestroy(); // compress destroy tsCompressExit(); diff --git a/source/libs/executor/src/sysscanoperator.c b/source/libs/executor/src/sysscanoperator.c index 082d4e7789..d8a2331980 100644 --- a/source/libs/executor/src/sysscanoperator.c +++ b/source/libs/executor/src/sysscanoperator.c @@ -979,7 +979,7 @@ static int32_t sysTableGetGeomText(char* iGeom, int32_t nGeom, char** output, in if (TSDB_CODE_SUCCESS != (code = initCtxAsText()) || TSDB_CODE_SUCCESS != (code = doAsText(iGeom, nGeom, &outputWKT))) { - qError("geo text for systable failed:%s", getGeosErrMsg(code)); + qError("geo text for systable failed:%s", getThreadLocalGeosCtx()->errMsg); *output = NULL; *nOutput = 0; return code; diff --git a/source/libs/geometry/src/geosWrapper.c b/source/libs/geometry/src/geosWrapper.c index 7372521276..dde34edc91 100644 --- a/source/libs/geometry/src/geosWrapper.c +++ b/source/libs/geometry/src/geosWrapper.c @@ -23,8 +23,7 @@ typedef char (*_geosPreparedRelationFunc_t)(GEOSContextHandle_t handle, const GE void geosFreeBuffer(void *buffer) { if (buffer) { - SGeosContext *pCtx = acquireThreadLocalGeosCtx(); - if (pCtx) GEOSFree_r(pCtx->handle, buffer); + GEOSFree_r(getThreadLocalGeosCtx()->handle, buffer); } } @@ -37,8 +36,6 @@ int32_t initCtxMakePoint() { int32_t code = TSDB_CODE_FAILED; SGeosContext *geosCtx = getThreadLocalGeosCtx(); - if (!geosCtx) return TSDB_CODE_OUT_OF_MEMORY; - if (geosCtx->handle == NULL) { geosCtx->handle = GEOS_init_r(); if (geosCtx->handle == NULL) { @@ -64,8 +61,6 @@ int32_t doMakePoint(double x, double y, unsigned char **outputGeom, size_t *size int32_t code = TSDB_CODE_FAILED; SGeosContext *geosCtx = getThreadLocalGeosCtx(); - if (!geosCtx) return TSDB_CODE_OUT_OF_MEMORY; - GEOSGeometry *geom = NULL; unsigned char *wkb = NULL; @@ -171,8 +166,6 @@ int32_t initCtxGeomFromText() { int32_t code = TSDB_CODE_FAILED; SGeosContext *geosCtx = getThreadLocalGeosCtx(); - if (!geosCtx) return TSDB_CODE_OUT_OF_MEMORY; - if (geosCtx->handle == NULL) { geosCtx->handle = GEOS_init_r(); if (geosCtx->handle == NULL) { @@ -209,8 +202,6 @@ int32_t doGeomFromText(const char *inputWKT, unsigned char **outputGeom, size_t int32_t code = TSDB_CODE_FAILED; SGeosContext *geosCtx = getThreadLocalGeosCtx(); - if (!geosCtx) return TSDB_CODE_OUT_OF_MEMORY; - GEOSGeometry *geom = NULL; unsigned char *wkb = NULL; @@ -246,8 +237,6 @@ int32_t initCtxAsText() { int32_t code = TSDB_CODE_FAILED; SGeosContext *geosCtx = getThreadLocalGeosCtx(); - if (!geosCtx) return TSDB_CODE_OUT_OF_MEMORY; - if (geosCtx->handle == NULL) { geosCtx->handle = GEOS_init_r(); if (geosCtx->handle == NULL) { @@ -284,8 +273,6 @@ int32_t doAsText(const unsigned char *inputGeom, size_t size, char **outputWKT) int32_t code = TSDB_CODE_FAILED; SGeosContext *geosCtx = getThreadLocalGeosCtx(); - if (!geosCtx) return TSDB_CODE_OUT_OF_MEMORY; - GEOSGeometry *geom = NULL; char *wkt = NULL; @@ -317,8 +304,6 @@ int32_t initCtxRelationFunc() { int32_t code = TSDB_CODE_FAILED; SGeosContext *geosCtx = getThreadLocalGeosCtx(); - if (!geosCtx) return TSDB_CODE_OUT_OF_MEMORY; - if (geosCtx->handle == NULL) { geosCtx->handle = GEOS_init_r(); if (geosCtx->handle == NULL) { @@ -344,8 +329,6 @@ int32_t doGeosRelation(const GEOSGeometry *geom1, const GEOSPreparedGeometry *pr _geosPreparedRelationFunc_t swappedPreparedRelationFn) { SGeosContext *geosCtx = getThreadLocalGeosCtx(); - if (!geosCtx) return TSDB_CODE_OUT_OF_MEMORY; - if (!preparedGeom1) { if (!swapped) { ASSERT(relationFn); @@ -406,6 +389,8 @@ int32_t doContainsProperly(const GEOSGeometry *geom1, const GEOSPreparedGeometry // need to call destroyGeometry(outputGeom, outputPreparedGeom) later int32_t readGeometry(const unsigned char *input, GEOSGeometry **outputGeom, const GEOSPreparedGeometry **outputPreparedGeom) { + SGeosContext *geosCtx = getThreadLocalGeosCtx(); + ASSERT(outputGeom); // it is not allowed if outputGeom is NULL *outputGeom = NULL; @@ -417,10 +402,6 @@ int32_t readGeometry(const unsigned char *input, GEOSGeometry **outputGeom, return TSDB_CODE_SUCCESS; } - SGeosContext *geosCtx = getThreadLocalGeosCtx(); - - if (!geosCtx) return TSDB_CODE_OUT_OF_MEMORY; - *outputGeom = GEOSWKBReader_read_r(geosCtx->handle, geosCtx->WKBReader, varDataVal(input), varDataLen(input)); if (*outputGeom == NULL) { return TSDB_CODE_FUNC_FUNTION_PARA_VALUE; @@ -437,8 +418,7 @@ int32_t readGeometry(const unsigned char *input, GEOSGeometry **outputGeom, } void destroyGeometry(GEOSGeometry **geom, const GEOSPreparedGeometry **preparedGeom) { - SGeosContext *geosCtx = acquireThreadLocalGeosCtx(); - if (!geosCtx) return; + SGeosContext *geosCtx = getThreadLocalGeosCtx(); if (preparedGeom && *preparedGeom) { GEOSPreparedGeom_destroy_r(geosCtx->handle, *preparedGeom); diff --git a/source/libs/parser/src/parInsertSql.c b/source/libs/parser/src/parInsertSql.c index aa6116287e..cb94cd42f7 100644 --- a/source/libs/parser/src/parInsertSql.c +++ b/source/libs/parser/src/parInsertSql.c @@ -655,7 +655,7 @@ static int32_t parseTagToken(const char** end, SToken* pToken, SSchema* pSchema, code = parseGeometry(pToken, &output, &size); if (code != TSDB_CODE_SUCCESS) { - code = buildSyntaxErrMsg(pMsgBuf, getGeosErrMsg(code), pToken->z); + code = buildSyntaxErrMsg(pMsgBuf, getThreadLocalGeosCtx()->errMsg, pToken->z); } else if (size + VARSTR_HEADER_SIZE > pSchema->bytes) { // Too long values will raise the invalid sql error message code = generateSyntaxErrMsg(pMsgBuf, TSDB_CODE_PAR_VALUE_TOO_LONG, pSchema->name); @@ -1646,7 +1646,7 @@ static int32_t parseValueTokenImpl(SInsertParseContext* pCxt, const char** pSql, code = parseGeometry(pToken, &output, &size); if (code != TSDB_CODE_SUCCESS) { - code = buildSyntaxErrMsg(&pCxt->msg, getGeosErrMsg(code), pToken->z); + code = buildSyntaxErrMsg(&pCxt->msg, getThreadLocalGeosCtx()->errMsg, pToken->z); } // Too long values will raise the invalid sql error message else if (size + VARSTR_HEADER_SIZE > pSchema->bytes) { diff --git a/source/libs/scalar/src/sclvector.c b/source/libs/scalar/src/sclvector.c index bc8a2ae233..5556108a52 100644 --- a/source/libs/scalar/src/sclvector.c +++ b/source/libs/scalar/src/sclvector.c @@ -446,12 +446,12 @@ static FORCE_INLINE int32_t varToGeometry(char *buf, SScalarParam *pOut, int32_t unsigned char *t = NULL; char *output = NULL; - if ((code = initCtxGeomFromText()) != 0) { - sclError("failed to init geometry ctx, %s", getGeosErrMsg(code)); + if (initCtxGeomFromText()) { + sclError("failed to init geometry ctx, %s", getThreadLocalGeosCtx()->errMsg); SCL_ERR_JRET(TSDB_CODE_APP_ERROR); } - if ((code = doGeomFromText(buf, &t, &len)) != 0) { - sclError("failed to convert text to geometry, %s", getGeosErrMsg(code)); + if (doGeomFromText(buf, &t, &len)) { + sclInfo("failed to convert text to geometry, %s", getThreadLocalGeosCtx()->errMsg); SCL_ERR_JRET(TSDB_CODE_SCALAR_CONVERT_ERROR); } diff --git a/source/util/src/tgeosctx.c b/source/util/src/tgeosctx.c index 82a360edd1..a05734c911 100644 --- a/source/util/src/tgeosctx.c +++ b/source/util/src/tgeosctx.c @@ -14,102 +14,39 @@ */ #include "tgeosctx.h" -#include "tarray.h" #include "tdef.h" -#include "tlockfree.h" -#include "tlog.h" -#define GEOS_POOL_CAPACITY 64 -typedef struct { - SArray *poolArray; // totalSize: (GEOS_POOL_CAPACITY * (taosArrayGetSize(poolArray) - 1)) + size - SGeosContext *pool; // current SGeosContext pool - int32_t size; // size of current SGeosContext pool, size <= GEOS_POOL_CAPACITY - SRWLatch lock; -} SGeosContextPool; +static threadlocal SGeosContext tlGeosCtx = {0}; -static SGeosContextPool sGeosPool = {0}; -static threadlocal SGeosContext *tlGeosCtx = NULL; +SGeosContext* getThreadLocalGeosCtx() { return &tlGeosCtx; } -SGeosContext *acquireThreadLocalGeosCtx() { return tlGeosCtx; } - -SGeosContext *getThreadLocalGeosCtx() { - if (tlGeosCtx) return tlGeosCtx; - - taosWLockLatch(&sGeosPool.lock); - if (!sGeosPool.pool || sGeosPool.size >= GEOS_POOL_CAPACITY) { - if (!(sGeosPool.pool = (SGeosContext *)taosMemoryCalloc(GEOS_POOL_CAPACITY, sizeof(SGeosContext)))) { - taosWUnLockLatch(&sGeosPool.lock); - return NULL; - } - if (!sGeosPool.poolArray) { - if (!(sGeosPool.poolArray = taosArrayInit(16, POINTER_BYTES))) { - taosMemoryFreeClear(sGeosPool.pool); - taosWUnLockLatch(&sGeosPool.lock); - return NULL; - } - } - if (!taosArrayPush(sGeosPool.poolArray, &sGeosPool.pool)) { - taosMemoryFreeClear(sGeosPool.pool); - taosWUnLockLatch(&sGeosPool.lock); - return NULL; - } - sGeosPool.size = 0; +void destroyThreadLocalGeosCtx() { + if (tlGeosCtx.WKTReader) { + GEOSWKTReader_destroy_r(tlGeosCtx.handle, tlGeosCtx.WKTReader); + tlGeosCtx.WKTReader = NULL; } - tlGeosCtx = sGeosPool.pool + sGeosPool.size; - ++sGeosPool.size; - taosWUnLockLatch(&sGeosPool.lock); - return tlGeosCtx; -} + if (tlGeosCtx.WKTWriter) { + GEOSWKTWriter_destroy_r(tlGeosCtx.handle, tlGeosCtx.WKTWriter); + tlGeosCtx.WKTWriter = NULL; + } -const char *getGeosErrMsg(int32_t code) { return tlGeosCtx ? tlGeosCtx->errMsg : (code != 0 ? tstrerror(code) : ""); } + if (tlGeosCtx.WKBReader) { + GEOSWKBReader_destroy_r(tlGeosCtx.handle, tlGeosCtx.WKBReader); + tlGeosCtx.WKBReader = NULL; + } -static void destroyGeosCtx(SGeosContext *pCtx) { - if (pCtx) { - if (pCtx->WKTReader) { - GEOSWKTReader_destroy_r(pCtx->handle, pCtx->WKTReader); - pCtx->WKTReader = NULL; - } + if (tlGeosCtx.WKBWriter) { + GEOSWKBWriter_destroy_r(tlGeosCtx.handle, tlGeosCtx.WKBWriter); + tlGeosCtx.WKBWriter = NULL; + } - if (pCtx->WKTWriter) { - GEOSWKTWriter_destroy_r(pCtx->handle, pCtx->WKTWriter); - pCtx->WKTWriter = NULL; - } + if (tlGeosCtx.WKTRegex) { + destroyRegexes(tlGeosCtx.WKTRegex, tlGeosCtx.WKTMatchData); + } - if (pCtx->WKBReader) { - GEOSWKBReader_destroy_r(pCtx->handle, pCtx->WKBReader); - pCtx->WKBReader = NULL; - } - - if (pCtx->WKBWriter) { - GEOSWKBWriter_destroy_r(pCtx->handle, pCtx->WKBWriter); - pCtx->WKBWriter = NULL; - } - - if (pCtx->WKTRegex) { - destroyRegexes(pCtx->WKTRegex, pCtx->WKTMatchData); - pCtx->WKTRegex = NULL; - pCtx->WKTMatchData = NULL; - } - - if (pCtx->handle) { - GEOS_finish_r(pCtx->handle); - pCtx->handle = NULL; - } + if (tlGeosCtx.handle) { + GEOS_finish_r(tlGeosCtx.handle); + tlGeosCtx.handle = NULL; } } - -void taosGeosDestroy() { - uInfo("geos is cleaned up"); - int32_t size = taosArrayGetSize(sGeosPool.poolArray); - for (int32_t i = 0; i < size; ++i) { - SGeosContext *pool = *(SGeosContext **)TARRAY_GET_ELEM(sGeosPool.poolArray, i); - int32_t poolSize = i == size - 1 ? sGeosPool.size : GEOS_POOL_CAPACITY; - for (int32_t j = 0; j < poolSize; ++j) { - destroyGeosCtx(pool + j); - } - taosMemoryFree(pool); - } - taosArrayDestroy(sGeosPool.poolArray); - sGeosPool.poolArray = NULL; -} \ No newline at end of file diff --git a/source/util/src/tsched.c b/source/util/src/tsched.c index 55a927f340..6779e8dee5 100644 --- a/source/util/src/tsched.c +++ b/source/util/src/tsched.c @@ -178,6 +178,8 @@ void *taosProcessSchedQueue(void *scheduler) { (*(msg.tfp))(msg.ahandle, msg.thandle); } + destroyThreadLocalGeosCtx(); + return NULL; } diff --git a/source/util/src/tworker.c b/source/util/src/tworker.c index ebec134c91..b2064d6787 100644 --- a/source/util/src/tworker.c +++ b/source/util/src/tworker.c @@ -105,6 +105,7 @@ static void *tQWorkerThreadFp(SQueueWorker *worker) { taosUpdateItemSize(qinfo.queue, 1); } + destroyThreadLocalGeosCtx(); DestoryThreadLocalRegComp(); return NULL; @@ -664,6 +665,7 @@ static void *tQueryAutoQWorkerThreadFp(SQueryAutoQWorker *worker) { } } + destroyThreadLocalGeosCtx(); DestoryThreadLocalRegComp(); return NULL; diff --git a/tools/shell/src/shellEngine.c b/tools/shell/src/shellEngine.c index 2c8330c433..0ccbd683dc 100644 --- a/tools/shell/src/shellEngine.c +++ b/tools/shell/src/shellEngine.c @@ -611,14 +611,14 @@ void shellPrintGeometry(const unsigned char *val, int32_t length, int32_t width) code = initCtxAsText(); if (code != TSDB_CODE_SUCCESS) { - shellPrintString(getGeosErrMsg(code), width); + shellPrintString(getThreadLocalGeosCtx()->errMsg, width); return; } char *outputWKT = NULL; code = doAsText(val, length, &outputWKT); if (code != TSDB_CODE_SUCCESS) { - shellPrintString(getGeosErrMsg(code), width); // should NOT happen + shellPrintString(getThreadLocalGeosCtx()->errMsg, width); // should NOT happen return; } @@ -1282,6 +1282,7 @@ void *shellThreadLoop(void *arg) { taosResetTerminalMode(); } while (shellRunCommand(command, true) == 0); + destroyThreadLocalGeosCtx(); taosMemoryFreeClear(command); shellWriteHistory(); shellExit();