From 6c732a14a38b9323d825ca65398662f71e4731ca Mon Sep 17 00:00:00 2001 From: Yihao Deng Date: Fri, 19 Jan 2024 06:20:16 +0000 Subject: [PATCH 01/32] refactor retry --- source/dnode/mnode/impl/src/mndMnode.c | 76 ++--- source/libs/sync/src/syncMain.c | 452 ++++++++++++------------- 2 files changed, 260 insertions(+), 268 deletions(-) diff --git a/source/dnode/mnode/impl/src/mndMnode.c b/source/dnode/mnode/impl/src/mndMnode.c index 5a09072577..9592be5263 100644 --- a/source/dnode/mnode/impl/src/mndMnode.c +++ b/source/dnode/mnode/impl/src/mndMnode.c @@ -15,6 +15,7 @@ #define _DEFAULT_SOURCE #include "mndMnode.h" +#include "audit.h" #include "mndCluster.h" #include "mndDnode.h" #include "mndPrivilege.h" @@ -22,7 +23,6 @@ #include "mndSync.h" #include "mndTrans.h" #include "tmisce.h" -#include "audit.h" #define MNODE_VER_NUMBER 2 #define MNODE_RESERVE_SIZE 64 @@ -168,7 +168,7 @@ static SSdbRow *mndMnodeActionDecode(SSdbRaw *pRaw) { SDB_GET_INT32(pRaw, dataPos, &pObj->id, _OVER) SDB_GET_INT64(pRaw, dataPos, &pObj->createdTime, _OVER) SDB_GET_INT64(pRaw, dataPos, &pObj->updateTime, _OVER) - if(sver >=2){ + if (sver >= 2) { SDB_GET_INT32(pRaw, dataPos, &pObj->role, _OVER) SDB_GET_INT64(pRaw, dataPos, &pObj->lastIndex, _OVER) } @@ -241,6 +241,7 @@ void mndGetMnodeEpSet(SMnode *pMnode, SEpSet *pEpSet) { } void *pIter = NULL; + pEpSet->inUse = 0; while (1) { SMnodeObj *pObj = NULL; pIter = sdbFetch(pSdb, SDB_MNODE, pIter, (void **)&pObj); @@ -250,7 +251,7 @@ void mndGetMnodeEpSet(SMnode *pMnode, SEpSet *pEpSet) { if (mndIsLeader(pMnode)) { pEpSet->inUse = pEpSet->numOfEps; } else { - pEpSet->inUse = (pEpSet->numOfEps + 1) % totalMnodes; + // pEpSet->inUse = (pEpSet->numOfEps + 1) % totalMnodes; } } if (pObj->pDnode != NULL) { @@ -320,8 +321,8 @@ static int32_t mndBuildCreateMnodeRedoAction(STrans *pTrans, SDCreateMnodeReq *p return 0; } -static int32_t mndBuildAlterMnodeTypeRedoAction(STrans *pTrans, - SDAlterMnodeTypeReq *pAlterMnodeTypeReq, SEpSet *pAlterMnodeTypeEpSet) { +static int32_t mndBuildAlterMnodeTypeRedoAction(STrans *pTrans, SDAlterMnodeTypeReq *pAlterMnodeTypeReq, + SEpSet *pAlterMnodeTypeEpSet) { int32_t contLen = tSerializeSDCreateMnodeReq(NULL, 0, pAlterMnodeTypeReq); void *pReq = taosMemoryMalloc(contLen); tSerializeSDCreateMnodeReq(pReq, contLen, pAlterMnodeTypeReq); @@ -396,13 +397,12 @@ static int32_t mndSetCreateMnodeRedoActions(SMnode *pMnode, STrans *pTrans, SDno pIter = sdbFetch(pSdb, SDB_MNODE, pIter, (void **)&pMObj); if (pIter == NULL) break; - if(pMObj->role == TAOS_SYNC_ROLE_VOTER){ + if (pMObj->role == TAOS_SYNC_ROLE_VOTER) { createReq.replicas[numOfReplicas].id = pMObj->id; createReq.replicas[numOfReplicas].port = pMObj->pDnode->port; memcpy(createReq.replicas[numOfReplicas].fqdn, pMObj->pDnode->fqdn, TSDB_FQDN_LEN); numOfReplicas++; - } - else{ + } else { createReq.learnerReplicas[numOfLearnerReplicas].id = pMObj->id; createReq.learnerReplicas[numOfLearnerReplicas].port = pMObj->pDnode->port; memcpy(createReq.learnerReplicas[numOfLearnerReplicas].fqdn, pMObj->pDnode->fqdn, TSDB_FQDN_LEN); @@ -441,18 +441,17 @@ int32_t mndSetRestoreCreateMnodeRedoActions(SMnode *pMnode, STrans *pTrans, SDno pIter = sdbFetch(pSdb, SDB_MNODE, pIter, (void **)&pMObj); if (pIter == NULL) break; - if(pMObj->id == pDnode->id) { + if (pMObj->id == pDnode->id) { sdbRelease(pSdb, pMObj); continue; } - if(pMObj->role == TAOS_SYNC_ROLE_VOTER){ + if (pMObj->role == TAOS_SYNC_ROLE_VOTER) { createReq.replicas[createReq.replica].id = pMObj->id; createReq.replicas[createReq.replica].port = pMObj->pDnode->port; memcpy(createReq.replicas[createReq.replica].fqdn, pMObj->pDnode->fqdn, TSDB_FQDN_LEN); createReq.replica++; - } - else{ + } else { createReq.learnerReplicas[createReq.learnerReplica].id = pMObj->id; createReq.learnerReplicas[createReq.learnerReplica].port = pMObj->pDnode->port; memcpy(createReq.learnerReplicas[createReq.learnerReplica].fqdn, pMObj->pDnode->fqdn, TSDB_FQDN_LEN); @@ -480,23 +479,22 @@ int32_t mndSetRestoreCreateMnodeRedoActions(SMnode *pMnode, STrans *pTrans, SDno } static int32_t mndSetAlterMnodeTypeRedoActions(SMnode *pMnode, STrans *pTrans, SDnodeObj *pDnode, SMnodeObj *pObj) { - SSdb *pSdb = pMnode->pSdb; - void *pIter = NULL; - SDAlterMnodeTypeReq alterReq = {0}; - SEpSet createEpset = {0}; + SSdb *pSdb = pMnode->pSdb; + void *pIter = NULL; + SDAlterMnodeTypeReq alterReq = {0}; + SEpSet createEpset = {0}; while (1) { SMnodeObj *pMObj = NULL; pIter = sdbFetch(pSdb, SDB_MNODE, pIter, (void **)&pMObj); if (pIter == NULL) break; - if(pMObj->role == TAOS_SYNC_ROLE_VOTER){ + if (pMObj->role == TAOS_SYNC_ROLE_VOTER) { alterReq.replicas[alterReq.replica].id = pMObj->id; alterReq.replicas[alterReq.replica].port = pMObj->pDnode->port; memcpy(alterReq.replicas[alterReq.replica].fqdn, pMObj->pDnode->fqdn, TSDB_FQDN_LEN); alterReq.replica++; - } - else{ + } else { alterReq.learnerReplicas[alterReq.learnerReplica].id = pMObj->id; alterReq.learnerReplicas[alterReq.learnerReplica].port = pMObj->pDnode->port; memcpy(alterReq.learnerReplicas[alterReq.learnerReplica].fqdn, pMObj->pDnode->fqdn, TSDB_FQDN_LEN); @@ -524,28 +522,27 @@ static int32_t mndSetAlterMnodeTypeRedoActions(SMnode *pMnode, STrans *pTrans, S } int32_t mndSetRestoreAlterMnodeTypeRedoActions(SMnode *pMnode, STrans *pTrans, SDnodeObj *pDnode, SMnodeObj *pObj) { - SSdb *pSdb = pMnode->pSdb; - void *pIter = NULL; - SDAlterMnodeTypeReq alterReq = {0}; - SEpSet createEpset = {0}; + SSdb *pSdb = pMnode->pSdb; + void *pIter = NULL; + SDAlterMnodeTypeReq alterReq = {0}; + SEpSet createEpset = {0}; while (1) { SMnodeObj *pMObj = NULL; pIter = sdbFetch(pSdb, SDB_MNODE, pIter, (void **)&pMObj); if (pIter == NULL) break; - if(pMObj->id == pDnode->id) { + if (pMObj->id == pDnode->id) { sdbRelease(pSdb, pMObj); continue; } - if(pMObj->role == TAOS_SYNC_ROLE_VOTER){ + if (pMObj->role == TAOS_SYNC_ROLE_VOTER) { alterReq.replicas[alterReq.replica].id = pMObj->id; alterReq.replicas[alterReq.replica].port = pMObj->pDnode->port; memcpy(alterReq.replicas[alterReq.replica].fqdn, pMObj->pDnode->fqdn, TSDB_FQDN_LEN); alterReq.replica++; - } - else{ + } else { alterReq.learnerReplicas[alterReq.learnerReplica].id = pMObj->id; alterReq.learnerReplicas[alterReq.learnerReplica].port = pMObj->pDnode->port; memcpy(alterReq.learnerReplicas[alterReq.learnerReplica].fqdn, pMObj->pDnode->fqdn, TSDB_FQDN_LEN); @@ -959,8 +956,11 @@ static void mndReloadSyncConfig(SMnode *pMnode) { void *pIter = NULL; int32_t updatingMnodes = 0; int32_t readyMnodes = 0; - SSyncCfg cfg = {.myIndex = -1, .lastIndex = 0,}; - SyncIndex maxIndex = 0; + SSyncCfg cfg = { + .myIndex = -1, + .lastIndex = 0, + }; + SyncIndex maxIndex = 0; while (1) { pIter = sdbFetchAll(pSdb, SDB_MNODE, pIter, (void **)&pObj, &objStatus, false); @@ -986,17 +986,17 @@ static void mndReloadSyncConfig(SMnode *pMnode) { if (pObj->pDnode->id == pMnode->selfDnodeId) { cfg.myIndex = cfg.totalReplicaNum; } - if(pNode->nodeRole == TAOS_SYNC_ROLE_VOTER){ + if (pNode->nodeRole == TAOS_SYNC_ROLE_VOTER) { cfg.replicaNum++; } cfg.totalReplicaNum++; - if(pObj->lastIndex > cfg.lastIndex){ + if (pObj->lastIndex > cfg.lastIndex) { cfg.lastIndex = pObj->lastIndex; } } if (objStatus == SDB_STATUS_DROPPING) { - if(pObj->lastIndex > cfg.lastIndex){ + if (pObj->lastIndex > cfg.lastIndex) { cfg.lastIndex = pObj->lastIndex; } } @@ -1006,10 +1006,10 @@ static void mndReloadSyncConfig(SMnode *pMnode) { sdbReleaseLock(pSdb, pObj, false); } - //if (readyMnodes <= 0 || updatingMnodes <= 0) { - // mInfo("vgId:1, mnode sync not reconfig since readyMnodes:%d updatingMnodes:%d", readyMnodes, updatingMnodes); - // return; - //} + // if (readyMnodes <= 0 || updatingMnodes <= 0) { + // mInfo("vgId:1, mnode sync not reconfig since readyMnodes:%d updatingMnodes:%d", readyMnodes, updatingMnodes); + // return; + // } if (cfg.myIndex == -1) { #if 1 @@ -1023,8 +1023,8 @@ static void mndReloadSyncConfig(SMnode *pMnode) { } if (pMnode->syncMgmt.sync > 0) { - mInfo("vgId:1, mnode sync reconfig, totalReplica:%d replica:%d myIndex:%d", - cfg.totalReplicaNum, cfg.replicaNum, cfg.myIndex); + mInfo("vgId:1, mnode sync reconfig, totalReplica:%d replica:%d myIndex:%d", cfg.totalReplicaNum, cfg.replicaNum, + cfg.myIndex); for (int32_t i = 0; i < cfg.totalReplicaNum; ++i) { SNodeInfo *pNode = &cfg.nodeInfo[i]; diff --git a/source/libs/sync/src/syncMain.c b/source/libs/sync/src/syncMain.c index ff6401cba8..89a41806cd 100644 --- a/source/libs/sync/src/syncMain.c +++ b/source/libs/sync/src/syncMain.c @@ -106,7 +106,7 @@ _err: return -1; } -int32_t syncNodeGetConfig(int64_t rid, SSyncCfg *cfg){ +int32_t syncNodeGetConfig(int64_t rid, SSyncCfg* cfg) { SSyncNode* pSyncNode = syncNodeAcquire(rid); if (pSyncNode == NULL) { @@ -546,7 +546,7 @@ SSyncState syncGetState(int64_t rid) { state.progress = -1; } sDebug("vgId:%d, learner progress state, commitIndex:%" PRId64 " totalIndex:%" PRId64 ", " - "progress:%lf, progress:%d", + "progress:%lf, progress:%d", pSyncNode->vgId, pSyncNode->pLogBuf->commitIndex, pSyncNode->pLogBuf->totalIndex, progress, state.progress); */ @@ -589,6 +589,7 @@ void syncGetRetryEpSet(int64_t rid, SEpSet* pEpSet) { } if (pEpSet->numOfEps > 0) { pEpSet->inUse = (pSyncNode->raftCfg.cfg.myIndex + 1) % pEpSet->numOfEps; + pEpSet->inUse = 0; } sInfo("vgId:%d, sync get retry epset numOfEps:%d inUse:%d", pSyncNode->vgId, pEpSet->numOfEps, pEpSet->inUse); @@ -614,7 +615,7 @@ int32_t syncCheckMember(int64_t rid) { return -1; } - if(pSyncNode->myNodeInfo.nodeRole == TAOS_SYNC_ROLE_LEARNER){ + if (pSyncNode->myNodeInfo.nodeRole == TAOS_SYNC_ROLE_LEARNER) { return -1; } @@ -682,24 +683,24 @@ int32_t syncNodePropose(SSyncNode* pSyncNode, SRpcMsg* pMsg, bool isWeak, int64_ } // optimized one replica - if (syncNodeIsOptimizedOneReplica(pSyncNode, pMsg)) { + if (syncNodeIsOptimizedOneReplica(pSyncNode, pMsg)) { SyncIndex retIndex; int32_t code = syncNodeOnClientRequest(pSyncNode, pMsg, &retIndex); if (code >= 0) { pMsg->info.conn.applyIndex = retIndex; pMsg->info.conn.applyTerm = raftStoreGetTerm(pSyncNode); - //after raft member change, need to handle 1->2 switching point - //at this point, need to switch entry handling thread - if(pSyncNode->replicaNum == 1){ + // after raft member change, need to handle 1->2 switching point + // at this point, need to switch entry handling thread + if (pSyncNode->replicaNum == 1) { sTrace("vgId:%d, propose optimized msg, index:%" PRId64 " type:%s", pSyncNode->vgId, retIndex, - TMSG_INFO(pMsg->msgType)); + TMSG_INFO(pMsg->msgType)); return 1; - } - else{ - sTrace("vgId:%d, propose optimized msg, return to normal, index:%" PRId64 " type:%s, " - "handle:%p", pSyncNode->vgId, retIndex, - TMSG_INFO(pMsg->msgType), pMsg->info.handle); + } else { + sTrace("vgId:%d, propose optimized msg, return to normal, index:%" PRId64 + " type:%s, " + "handle:%p", + pSyncNode->vgId, retIndex, TMSG_INFO(pMsg->msgType), pMsg->info.handle); return 0; } } else { @@ -844,7 +845,7 @@ SSyncNode* syncNodeOpen(SSyncInfo* pSyncInfo, int32_t vnodeVersion) { goto _error; } - if(vnodeVersion > pSyncNode->raftCfg.cfg.changeVersion){ + if (vnodeVersion > pSyncNode->raftCfg.cfg.changeVersion) { if (pSyncInfo->syncCfg.totalReplicaNum > 0 && syncIsConfigChanged(&pSyncNode->raftCfg.cfg, &pSyncInfo->syncCfg)) { sInfo("vgId:%d, use sync config from input options and write to cfg file", pSyncNode->vgId); pSyncNode->raftCfg.cfg = pSyncInfo->syncCfg; @@ -856,15 +857,13 @@ SSyncNode* syncNodeOpen(SSyncInfo* pSyncInfo, int32_t vnodeVersion) { sInfo("vgId:%d, use sync config from sync cfg file", pSyncNode->vgId); pSyncInfo->syncCfg = pSyncNode->raftCfg.cfg; } - } - else{ - sInfo("vgId:%d, skip save sync cfg file since request ver:%d <= file ver:%d", - pSyncNode->vgId, vnodeVersion, pSyncInfo->syncCfg.changeVersion); + } else { + sInfo("vgId:%d, skip save sync cfg file since request ver:%d <= file ver:%d", pSyncNode->vgId, vnodeVersion, + pSyncInfo->syncCfg.changeVersion); } } - - // init by SSyncInfo + // init by SSyncInfo pSyncNode->vgId = pSyncInfo->vgId; SSyncCfg* pCfg = &pSyncNode->raftCfg.cfg; bool updated = false; @@ -879,7 +878,7 @@ SSyncNode* syncNodeOpen(SSyncInfo* pSyncInfo, int32_t vnodeVersion) { pNode->nodeId, pNode->clusterId); } - if(vnodeVersion > pSyncInfo->syncCfg.changeVersion){ + if (vnodeVersion > pSyncInfo->syncCfg.changeVersion) { if (updated) { sInfo("vgId:%d, save config info since dnode info changed", pSyncNode->vgId); if (syncWriteCfgFile(pSyncNode) != 0) { @@ -888,7 +887,7 @@ SSyncNode* syncNodeOpen(SSyncInfo* pSyncInfo, int32_t vnodeVersion) { } } } - + pSyncNode->pWal = pSyncInfo->pWal; pSyncNode->msgcb = pSyncInfo->msgcb; pSyncNode->syncSendMSg = pSyncInfo->syncSendMSg; @@ -2335,47 +2334,49 @@ int32_t syncCacheEntry(SSyncLogStore* pLogStore, SSyncRaftEntry* pEntry, LRUHand return code; } -void syncBuildConfigFromReq(SAlterVnodeReplicaReq *pReq, SSyncCfg *cfg){//TODO SAlterVnodeReplicaReq name is proper? +void syncBuildConfigFromReq(SAlterVnodeReplicaReq* pReq, SSyncCfg* cfg) { // TODO SAlterVnodeReplicaReq name is proper? cfg->replicaNum = 0; cfg->totalReplicaNum = 0; for (int i = 0; i < pReq->replica; ++i) { - SNodeInfo *pNode = &cfg->nodeInfo[i]; + SNodeInfo* pNode = &cfg->nodeInfo[i]; pNode->nodeId = pReq->replicas[i].id; pNode->nodePort = pReq->replicas[i].port; tstrncpy(pNode->nodeFqdn, pReq->replicas[i].fqdn, sizeof(pNode->nodeFqdn)); pNode->nodeRole = TAOS_SYNC_ROLE_VOTER; (void)tmsgUpdateDnodeInfo(&pNode->nodeId, &pNode->clusterId, pNode->nodeFqdn, &pNode->nodePort); - sInfo("vgId:%d, replica:%d ep:%s:%u dnode:%d nodeRole:%d", pReq->vgId, i, pNode->nodeFqdn, pNode->nodePort, pNode->nodeId, pNode->nodeRole); + sInfo("vgId:%d, replica:%d ep:%s:%u dnode:%d nodeRole:%d", pReq->vgId, i, pNode->nodeFqdn, pNode->nodePort, + pNode->nodeId, pNode->nodeRole); cfg->replicaNum++; } - if(pReq->selfIndex != -1){ + if (pReq->selfIndex != -1) { cfg->myIndex = pReq->selfIndex; } for (int i = cfg->replicaNum; i < pReq->replica + pReq->learnerReplica; ++i) { - SNodeInfo *pNode = &cfg->nodeInfo[i]; + SNodeInfo* pNode = &cfg->nodeInfo[i]; pNode->nodeId = pReq->learnerReplicas[cfg->totalReplicaNum].id; pNode->nodePort = pReq->learnerReplicas[cfg->totalReplicaNum].port; pNode->nodeRole = TAOS_SYNC_ROLE_LEARNER; tstrncpy(pNode->nodeFqdn, pReq->learnerReplicas[cfg->totalReplicaNum].fqdn, sizeof(pNode->nodeFqdn)); (void)tmsgUpdateDnodeInfo(&pNode->nodeId, &pNode->clusterId, pNode->nodeFqdn, &pNode->nodePort); - sInfo("vgId:%d, replica:%d ep:%s:%u dnode:%d nodeRole:%d", pReq->vgId, i, pNode->nodeFqdn, pNode->nodePort, pNode->nodeId, pNode->nodeRole); + sInfo("vgId:%d, replica:%d ep:%s:%u dnode:%d nodeRole:%d", pReq->vgId, i, pNode->nodeFqdn, pNode->nodePort, + pNode->nodeId, pNode->nodeRole); cfg->totalReplicaNum++; } cfg->totalReplicaNum += pReq->replica; - if(pReq->learnerSelfIndex != -1){ + if (pReq->learnerSelfIndex != -1) { cfg->myIndex = pReq->replica + pReq->learnerSelfIndex; } cfg->changeVersion = pReq->changeVersion; } -int32_t syncNodeCheckChangeConfig(SSyncNode* ths, SSyncRaftEntry* pEntry){ - if(pEntry->originalRpcType != TDMT_SYNC_CONFIG_CHANGE){ +int32_t syncNodeCheckChangeConfig(SSyncNode* ths, SSyncRaftEntry* pEntry) { + if (pEntry->originalRpcType != TDMT_SYNC_CONFIG_CHANGE) { return -1; } - SMsgHead *head = (SMsgHead *)pEntry->data; - void *pReq = POINTER_SHIFT(head, sizeof(SMsgHead)); + SMsgHead* head = (SMsgHead*)pEntry->data; + void* pReq = POINTER_SHIFT(head, sizeof(SMsgHead)); SAlterVnodeTypeReq req = {0}; if (tDeserializeSAlterVnodeReplicaReq(pReq, head->contLen, &req) != 0) { @@ -2386,17 +2387,17 @@ int32_t syncNodeCheckChangeConfig(SSyncNode* ths, SSyncRaftEntry* pEntry){ SSyncCfg cfg = {0}; syncBuildConfigFromReq(&req, &cfg); - if(cfg.totalReplicaNum >= 1 && ths->state == TAOS_SYNC_STATE_LEADER){ + if (cfg.totalReplicaNum >= 1 && ths->state == TAOS_SYNC_STATE_LEADER) { bool incfg = false; - for(int32_t j = 0; j < cfg.totalReplicaNum; ++j){ - if(strcmp(ths->myNodeInfo.nodeFqdn, cfg.nodeInfo[j].nodeFqdn) == 0 - && ths->myNodeInfo.nodePort == cfg.nodeInfo[j].nodePort){ + for (int32_t j = 0; j < cfg.totalReplicaNum; ++j) { + if (strcmp(ths->myNodeInfo.nodeFqdn, cfg.nodeInfo[j].nodeFqdn) == 0 && + ths->myNodeInfo.nodePort == cfg.nodeInfo[j].nodePort) { incfg = true; break; } } - if(!incfg){ + if (!incfg) { SyncTerm currentTerm = raftStoreGetTerm(ths); syncNodeStepDown(ths, currentTerm); return 1; @@ -2405,26 +2406,25 @@ int32_t syncNodeCheckChangeConfig(SSyncNode* ths, SSyncRaftEntry* pEntry){ return 0; } -void syncNodeLogConfigInfo(SSyncNode* ths, SSyncCfg *cfg, char *str){ - sInfo("vgId:%d, %s. SyncNode, replicaNum:%d, peersNum:%d, lastConfigIndex:%" PRId64 ", changeVersion:%d, " - "restoreFinish:%d", - ths->vgId, str, - ths->replicaNum, ths->peersNum, ths->raftCfg.lastConfigIndex, ths->raftCfg.cfg.changeVersion, +void syncNodeLogConfigInfo(SSyncNode* ths, SSyncCfg* cfg, char* str) { + sInfo("vgId:%d, %s. SyncNode, replicaNum:%d, peersNum:%d, lastConfigIndex:%" PRId64 + ", changeVersion:%d, " + "restoreFinish:%d", + ths->vgId, str, ths->replicaNum, ths->peersNum, ths->raftCfg.lastConfigIndex, ths->raftCfg.cfg.changeVersion, ths->restoreFinish); - sInfo("vgId:%d, %s, myNodeInfo, clusterId:%" PRId64 ", nodeId:%d, Fqdn:%s, port:%d, role:%d", - ths->vgId, str, ths->myNodeInfo.clusterId, ths->myNodeInfo.nodeId, ths->myNodeInfo.nodeFqdn, - ths->myNodeInfo.nodePort, ths->myNodeInfo.nodeRole); + sInfo("vgId:%d, %s, myNodeInfo, clusterId:%" PRId64 ", nodeId:%d, Fqdn:%s, port:%d, role:%d", ths->vgId, str, + ths->myNodeInfo.clusterId, ths->myNodeInfo.nodeId, ths->myNodeInfo.nodeFqdn, ths->myNodeInfo.nodePort, + ths->myNodeInfo.nodeRole); - for (int32_t i = 0; i < ths->peersNum; ++i){ - sInfo("vgId:%d, %s, peersNodeInfo%d, clusterId:%" PRId64 ", nodeId:%d, Fqdn:%s, port:%d, role:%d", - ths->vgId, str, i, ths->peersNodeInfo[i].clusterId, - ths->peersNodeInfo[i].nodeId, ths->peersNodeInfo[i].nodeFqdn, - ths->peersNodeInfo[i].nodePort, ths->peersNodeInfo[i].nodeRole); + for (int32_t i = 0; i < ths->peersNum; ++i) { + sInfo("vgId:%d, %s, peersNodeInfo%d, clusterId:%" PRId64 ", nodeId:%d, Fqdn:%s, port:%d, role:%d", ths->vgId, str, + i, ths->peersNodeInfo[i].clusterId, ths->peersNodeInfo[i].nodeId, ths->peersNodeInfo[i].nodeFqdn, + ths->peersNodeInfo[i].nodePort, ths->peersNodeInfo[i].nodeRole); } - for (int32_t i = 0; i < ths->peersNum; ++i){ - char buf[256]; + for (int32_t i = 0; i < ths->peersNum; ++i) { + char buf[256]; int32_t len = 256; int32_t n = 0; n += snprintf(buf + n, len - n, "%s", "{"); @@ -2434,37 +2434,33 @@ void syncNodeLogConfigInfo(SSyncNode* ths, SSyncCfg *cfg, char *str){ } n += snprintf(buf + n, len - n, "%s", "}"); - sInfo("vgId:%d, %s, peersEpset%d, %s, inUse:%d", - ths->vgId, str, i, buf, ths->peersEpset->inUse); + sInfo("vgId:%d, %s, peersEpset%d, %s, inUse:%d", ths->vgId, str, i, buf, ths->peersEpset->inUse); } - for (int32_t i = 0; i < ths->peersNum; ++i){ - sInfo("vgId:%d, %s, peersId%d, addr:%"PRId64, - ths->vgId, str, i, ths->peersId[i].addr); + for (int32_t i = 0; i < ths->peersNum; ++i) { + sInfo("vgId:%d, %s, peersId%d, addr:%" PRId64, ths->vgId, str, i, ths->peersId[i].addr); } - for (int32_t i = 0; i < ths->raftCfg.cfg.totalReplicaNum; ++i){ - sInfo("vgId:%d, %s, nodeInfo%d, clusterId:%" PRId64 ", nodeId:%d, Fqdn:%s, port:%d, role:%d", - ths->vgId, str, i, ths->raftCfg.cfg.nodeInfo[i].clusterId, - ths->raftCfg.cfg.nodeInfo[i].nodeId, ths->raftCfg.cfg.nodeInfo[i].nodeFqdn, - ths->raftCfg.cfg.nodeInfo[i].nodePort, ths->raftCfg.cfg.nodeInfo[i].nodeRole); + for (int32_t i = 0; i < ths->raftCfg.cfg.totalReplicaNum; ++i) { + sInfo("vgId:%d, %s, nodeInfo%d, clusterId:%" PRId64 ", nodeId:%d, Fqdn:%s, port:%d, role:%d", ths->vgId, str, i, + ths->raftCfg.cfg.nodeInfo[i].clusterId, ths->raftCfg.cfg.nodeInfo[i].nodeId, + ths->raftCfg.cfg.nodeInfo[i].nodeFqdn, ths->raftCfg.cfg.nodeInfo[i].nodePort, + ths->raftCfg.cfg.nodeInfo[i].nodeRole); } - for (int32_t i = 0; i < ths->raftCfg.cfg.totalReplicaNum; ++i){ - sInfo("vgId:%d, %s, replicasId%d, addr:%" PRId64, - ths->vgId, str, i, ths->replicasId[i].addr); + for (int32_t i = 0; i < ths->raftCfg.cfg.totalReplicaNum; ++i) { + sInfo("vgId:%d, %s, replicasId%d, addr:%" PRId64, ths->vgId, str, i, ths->replicasId[i].addr); } - } -int32_t syncNodeRebuildPeerAndCfg(SSyncNode* ths, SSyncCfg *cfg){ +int32_t syncNodeRebuildPeerAndCfg(SSyncNode* ths, SSyncCfg* cfg) { int32_t i = 0; - //change peersNodeInfo + // change peersNodeInfo i = 0; - for(int32_t j = 0; j < cfg->totalReplicaNum; ++j){ - if(!(strcmp(ths->myNodeInfo.nodeFqdn, cfg->nodeInfo[j].nodeFqdn) == 0 - && ths->myNodeInfo.nodePort == cfg->nodeInfo[j].nodePort)){ + for (int32_t j = 0; j < cfg->totalReplicaNum; ++j) { + if (!(strcmp(ths->myNodeInfo.nodeFqdn, cfg->nodeInfo[j].nodeFqdn) == 0 && + ths->myNodeInfo.nodePort == cfg->nodeInfo[j].nodePort)) { ths->peersNodeInfo[i].nodeRole = cfg->nodeInfo[j].nodeRole; ths->peersNodeInfo[i].clusterId = cfg->nodeInfo[j].clusterId; tstrncpy(ths->peersNodeInfo[i].nodeFqdn, cfg->nodeInfo[j].nodeFqdn, TSDB_FQDN_LEN); @@ -2483,11 +2479,11 @@ int32_t syncNodeRebuildPeerAndCfg(SSyncNode* ths, SSyncCfg *cfg){ } ths->peersNum = i; - //change cfg nodeInfo + // change cfg nodeInfo ths->raftCfg.cfg.replicaNum = 0; i = 0; - for(int32_t j = 0; j < cfg->totalReplicaNum; ++j) { - if(cfg->nodeInfo[j].nodeRole == TAOS_SYNC_ROLE_VOTER){ + for (int32_t j = 0; j < cfg->totalReplicaNum; ++j) { + if (cfg->nodeInfo[j].nodeRole == TAOS_SYNC_ROLE_VOTER) { ths->raftCfg.cfg.replicaNum++; } ths->raftCfg.cfg.nodeInfo[i].nodeRole = cfg->nodeInfo[j].nodeRole; @@ -2495,9 +2491,9 @@ int32_t syncNodeRebuildPeerAndCfg(SSyncNode* ths, SSyncCfg *cfg){ tstrncpy(ths->raftCfg.cfg.nodeInfo[i].nodeFqdn, cfg->nodeInfo[j].nodeFqdn, TSDB_FQDN_LEN); ths->raftCfg.cfg.nodeInfo[i].nodeId = cfg->nodeInfo[j].nodeId; ths->raftCfg.cfg.nodeInfo[i].nodePort = cfg->nodeInfo[j].nodePort; - if((strcmp(ths->myNodeInfo.nodeFqdn, cfg->nodeInfo[j].nodeFqdn) == 0 - && ths->myNodeInfo.nodePort == cfg->nodeInfo[j].nodePort)){ - ths->raftCfg.cfg.myIndex = i; + if ((strcmp(ths->myNodeInfo.nodeFqdn, cfg->nodeInfo[j].nodeFqdn) == 0 && + ths->myNodeInfo.nodePort == cfg->nodeInfo[j].nodePort)) { + ths->raftCfg.cfg.myIndex = i; } i++; } @@ -2506,26 +2502,26 @@ int32_t syncNodeRebuildPeerAndCfg(SSyncNode* ths, SSyncCfg *cfg){ return 0; } -void syncNodeChangePeerAndCfgToVoter(SSyncNode* ths, SSyncCfg *cfg){ - //change peersNodeInfo +void syncNodeChangePeerAndCfgToVoter(SSyncNode* ths, SSyncCfg* cfg) { + // change peersNodeInfo for (int32_t i = 0; i < ths->peersNum; ++i) { - for(int32_t j = 0; j < cfg->totalReplicaNum; ++j){ - if(strcmp(ths->peersNodeInfo[i].nodeFqdn, cfg->nodeInfo[j].nodeFqdn) == 0 - && ths->peersNodeInfo[i].nodePort == cfg->nodeInfo[j].nodePort){ - if(cfg->nodeInfo[j].nodeRole == TAOS_SYNC_ROLE_VOTER){ + for (int32_t j = 0; j < cfg->totalReplicaNum; ++j) { + if (strcmp(ths->peersNodeInfo[i].nodeFqdn, cfg->nodeInfo[j].nodeFqdn) == 0 && + ths->peersNodeInfo[i].nodePort == cfg->nodeInfo[j].nodePort) { + if (cfg->nodeInfo[j].nodeRole == TAOS_SYNC_ROLE_VOTER) { ths->peersNodeInfo[i].nodeRole = TAOS_SYNC_ROLE_VOTER; } } } } - //change cfg nodeInfo + // change cfg nodeInfo ths->raftCfg.cfg.replicaNum = 0; for (int32_t i = 0; i < ths->raftCfg.cfg.totalReplicaNum; ++i) { - for(int32_t j = 0; j < cfg->totalReplicaNum; ++j){ - if(strcmp(ths->raftCfg.cfg.nodeInfo[i].nodeFqdn, cfg->nodeInfo[j].nodeFqdn) == 0 - && ths->raftCfg.cfg.nodeInfo[i].nodePort == cfg->nodeInfo[j].nodePort){ - if(cfg->nodeInfo[j].nodeRole == TAOS_SYNC_ROLE_VOTER){ + for (int32_t j = 0; j < cfg->totalReplicaNum; ++j) { + if (strcmp(ths->raftCfg.cfg.nodeInfo[i].nodeFqdn, cfg->nodeInfo[j].nodeFqdn) == 0 && + ths->raftCfg.cfg.nodeInfo[i].nodePort == cfg->nodeInfo[j].nodePort) { + if (cfg->nodeInfo[j].nodeRole == TAOS_SYNC_ROLE_VOTER) { ths->raftCfg.cfg.nodeInfo[i].nodeRole = TAOS_SYNC_ROLE_VOTER; ths->raftCfg.cfg.replicaNum++; } @@ -2534,8 +2530,8 @@ void syncNodeChangePeerAndCfgToVoter(SSyncNode* ths, SSyncCfg *cfg){ } } -int32_t syncNodeRebuildAndCopyIfExist(SSyncNode* ths, int32_t oldtotalReplicaNum){ - //1.rebuild replicasId, remove deleted one +int32_t syncNodeRebuildAndCopyIfExist(SSyncNode* ths, int32_t oldtotalReplicaNum) { + // 1.rebuild replicasId, remove deleted one SRaftId oldReplicasId[TSDB_MAX_REPLICA + TSDB_MAX_LEARNER_REPLICA]; memcpy(oldReplicasId, ths->replicasId, sizeof(oldReplicasId)); @@ -2545,9 +2541,8 @@ int32_t syncNodeRebuildAndCopyIfExist(SSyncNode* ths, int32_t oldtotalReplicaNum syncUtilNodeInfo2RaftId(&ths->raftCfg.cfg.nodeInfo[i], ths->vgId, &ths->replicasId[i]); } - - //2.rebuild MatchIndex, remove deleted one - SSyncIndexMgr *oldIndex = ths->pMatchIndex; + // 2.rebuild MatchIndex, remove deleted one + SSyncIndexMgr* oldIndex = ths->pMatchIndex; ths->pMatchIndex = syncIndexMgrCreate(ths); @@ -2555,9 +2550,8 @@ int32_t syncNodeRebuildAndCopyIfExist(SSyncNode* ths, int32_t oldtotalReplicaNum syncIndexMgrDestroy(oldIndex); - - //3.rebuild NextIndex, remove deleted one - SSyncIndexMgr *oldNextIndex = ths->pNextIndex; + // 3.rebuild NextIndex, remove deleted one + SSyncIndexMgr* oldNextIndex = ths->pNextIndex; ths->pNextIndex = syncIndexMgrCreate(ths); @@ -2565,17 +2559,15 @@ int32_t syncNodeRebuildAndCopyIfExist(SSyncNode* ths, int32_t oldtotalReplicaNum syncIndexMgrDestroy(oldNextIndex); - - //4.rebuild pVotesGranted, pVotesRespond, no need to keep old vote state, only rebuild + // 4.rebuild pVotesGranted, pVotesRespond, no need to keep old vote state, only rebuild voteGrantedUpdate(ths->pVotesGranted, ths); votesRespondUpdate(ths->pVotesRespond, ths); - - //5.rebuild logReplMgr - for(int i = 0; i < oldtotalReplicaNum; ++i){ - sDebug("vgId:%d, old logReplMgrs i:%d, peerId:%d, restoreed:%d, [%" PRId64 " %" PRId64 ", %" PRId64 ")", ths->vgId, i, - ths->logReplMgrs[i]->peerId, ths->logReplMgrs[i]->restored, ths->logReplMgrs[i]->startIndex, - ths->logReplMgrs[i]->matchIndex, ths->logReplMgrs[i]->endIndex); + // 5.rebuild logReplMgr + for (int i = 0; i < oldtotalReplicaNum; ++i) { + sDebug("vgId:%d, old logReplMgrs i:%d, peerId:%d, restoreed:%d, [%" PRId64 " %" PRId64 ", %" PRId64 ")", ths->vgId, + i, ths->logReplMgrs[i]->peerId, ths->logReplMgrs[i]->restored, ths->logReplMgrs[i]->startIndex, + ths->logReplMgrs[i]->matchIndex, ths->logReplMgrs[i]->endIndex); } SSyncLogReplMgr* oldLogReplMgrs = NULL; @@ -2584,32 +2576,32 @@ int32_t syncNodeRebuildAndCopyIfExist(SSyncNode* ths, int32_t oldtotalReplicaNum if (NULL == oldLogReplMgrs) return -1; memset(oldLogReplMgrs, 0, length); - for(int i = 0; i < oldtotalReplicaNum; i++){ + for (int i = 0; i < oldtotalReplicaNum; i++) { oldLogReplMgrs[i] = *(ths->logReplMgrs[i]); } syncNodeLogReplDestroy(ths); syncNodeLogReplInit(ths); - for(int i = 0; i < ths->totalReplicaNum; ++i){ - for(int j = 0; j < oldtotalReplicaNum; j++){ + for (int i = 0; i < ths->totalReplicaNum; ++i) { + for (int j = 0; j < oldtotalReplicaNum; j++) { if (syncUtilSameId(&ths->replicasId[i], &oldReplicasId[j])) { *(ths->logReplMgrs[i]) = oldLogReplMgrs[j]; ths->logReplMgrs[i]->peerId = i; } - } - } - - for(int i = 0; i < ths->totalReplicaNum; ++i){ - sDebug("vgId:%d, new logReplMgrs i:%d, peerId:%d, restoreed:%d, [%" PRId64 " %" PRId64 ", %" PRId64 ")" , ths->vgId, i, - ths->logReplMgrs[i]->peerId, ths->logReplMgrs[i]->restored, ths->logReplMgrs[i]->startIndex, - ths->logReplMgrs[i]->matchIndex, ths->logReplMgrs[i]->endIndex); + } } - //6.rebuild sender - for(int i = 0; i < oldtotalReplicaNum; ++i){ - sDebug("vgId:%d, old sender i:%d, replicaIndex:%d, lastSendTime:%" PRId64, - ths->vgId, i, ths->senders[i]->replicaIndex, ths->senders[i]->lastSendTime) + for (int i = 0; i < ths->totalReplicaNum; ++i) { + sDebug("vgId:%d, new logReplMgrs i:%d, peerId:%d, restoreed:%d, [%" PRId64 " %" PRId64 ", %" PRId64 ")", ths->vgId, + i, ths->logReplMgrs[i]->peerId, ths->logReplMgrs[i]->restored, ths->logReplMgrs[i]->startIndex, + ths->logReplMgrs[i]->matchIndex, ths->logReplMgrs[i]->endIndex); + } + + // 6.rebuild sender + for (int i = 0; i < oldtotalReplicaNum; ++i) { + sDebug("vgId:%d, old sender i:%d, replicaIndex:%d, lastSendTime:%" PRId64, ths->vgId, i, + ths->senders[i]->replicaIndex, ths->senders[i]->lastSendTime) } for (int32_t i = 0; i < TSDB_MAX_REPLICA + TSDB_MAX_LEARNER_REPLICA; ++i) { @@ -2633,13 +2625,12 @@ int32_t syncNodeRebuildAndCopyIfExist(SSyncNode* ths, int32_t oldtotalReplicaNum sSDebug(pSender, "snapshot sender create while open sync node, data:%p", pSender); } - for(int i = 0; i < ths->totalReplicaNum; i++){ - sDebug("vgId:%d, new sender i:%d, replicaIndex:%d, lastSendTime:%" PRId64, - ths->vgId, i, ths->senders[i]->replicaIndex, ths->senders[i]->lastSendTime) + for (int i = 0; i < ths->totalReplicaNum; i++) { + sDebug("vgId:%d, new sender i:%d, replicaIndex:%d, lastSendTime:%" PRId64, ths->vgId, i, + ths->senders[i]->replicaIndex, ths->senders[i]->lastSendTime) } - - //7.rebuild synctimer + // 7.rebuild synctimer syncNodeStopHeartbeatTimer(ths); for (int32_t i = 0; i < TSDB_MAX_REPLICA + TSDB_MAX_LEARNER_REPLICA; ++i) { @@ -2648,16 +2639,15 @@ int32_t syncNodeRebuildAndCopyIfExist(SSyncNode* ths, int32_t oldtotalReplicaNum syncNodeStartHeartbeatTimer(ths); - - //8.rebuild peerStates + // 8.rebuild peerStates SPeerState oldState[TSDB_MAX_REPLICA + TSDB_MAX_LEARNER_REPLICA] = {0}; - for(int i = 0; i < TSDB_MAX_REPLICA + TSDB_MAX_LEARNER_REPLICA; i++){ + for (int i = 0; i < TSDB_MAX_REPLICA + TSDB_MAX_LEARNER_REPLICA; i++) { oldState[i] = ths->peerStates[i]; } - for(int i = 0; i < ths->totalReplicaNum; i++){ - for(int j = 0; j < oldtotalReplicaNum; j++){ - if (syncUtilSameId(&ths->replicasId[i], &oldReplicasId[j])){ + for (int i = 0; i < ths->totalReplicaNum; i++) { + for (int j = 0; j < oldtotalReplicaNum; j++) { + if (syncUtilSameId(&ths->replicasId[i], &oldReplicasId[j])) { ths->peerStates[i] = oldState[j]; } } @@ -2668,32 +2658,32 @@ int32_t syncNodeRebuildAndCopyIfExist(SSyncNode* ths, int32_t oldtotalReplicaNum return 0; } -void syncNodeChangeToVoter(SSyncNode* ths){ - //replicasId, only need to change replicaNum when 1->3 +void syncNodeChangeToVoter(SSyncNode* ths) { + // replicasId, only need to change replicaNum when 1->3 ths->replicaNum = ths->raftCfg.cfg.replicaNum; sDebug("vgId:%d, totalReplicaNum:%d", ths->vgId, ths->totalReplicaNum); - for (int32_t i = 0; i < ths->totalReplicaNum; ++i){ + for (int32_t i = 0; i < ths->totalReplicaNum; ++i) { sDebug("vgId:%d, i:%d, replicaId.addr:%" PRIx64, ths->vgId, i, ths->replicasId[i].addr); } - //pMatchIndex, pNextIndex, only need to change replicaNum when 1->3 + // pMatchIndex, pNextIndex, only need to change replicaNum when 1->3 ths->pMatchIndex->replicaNum = ths->raftCfg.cfg.replicaNum; ths->pNextIndex->replicaNum = ths->raftCfg.cfg.replicaNum; sDebug("vgId:%d, pMatchIndex->totalReplicaNum:%d", ths->vgId, ths->pMatchIndex->totalReplicaNum); - for (int32_t i = 0; i < ths->pMatchIndex->totalReplicaNum; ++i){ + for (int32_t i = 0; i < ths->pMatchIndex->totalReplicaNum; ++i) { sDebug("vgId:%d, i:%d, match.index:%" PRId64, ths->vgId, i, ths->pMatchIndex->index[i]); } - //pVotesGranted, pVotesRespond + // pVotesGranted, pVotesRespond voteGrantedUpdate(ths->pVotesGranted, ths); votesRespondUpdate(ths->pVotesRespond, ths); - //logRepMgrs - //no need to change logRepMgrs when 1->3 + // logRepMgrs + // no need to change logRepMgrs when 1->3 } -void syncNodeResetPeerAndCfg(SSyncNode* ths){ +void syncNodeResetPeerAndCfg(SSyncNode* ths) { SNodeInfo node = {0}; for (int32_t i = 0; i < ths->peersNum; ++i) { memcpy(&ths->peersNodeInfo[i], &node, sizeof(SNodeInfo)); @@ -2704,13 +2694,13 @@ void syncNodeResetPeerAndCfg(SSyncNode* ths){ } } -int32_t syncNodeChangeConfig(SSyncNode* ths, SSyncRaftEntry* pEntry, char* str){ - if(pEntry->originalRpcType != TDMT_SYNC_CONFIG_CHANGE){ +int32_t syncNodeChangeConfig(SSyncNode* ths, SSyncRaftEntry* pEntry, char* str) { + if (pEntry->originalRpcType != TDMT_SYNC_CONFIG_CHANGE) { return -1; } - SMsgHead *head = (SMsgHead *)pEntry->data; - void *pReq = POINTER_SHIFT(head, sizeof(SMsgHead)); + SMsgHead* head = (SMsgHead*)pEntry->data; + void* pReq = POINTER_SHIFT(head, sizeof(SMsgHead)); SAlterVnodeTypeReq req = {0}; if (tDeserializeSAlterVnodeReplicaReq(pReq, head->contLen, &req) != 0) { @@ -2719,141 +2709,143 @@ int32_t syncNodeChangeConfig(SSyncNode* ths, SSyncRaftEntry* pEntry, char* str){ } SSyncCfg cfg = {0}; - syncBuildConfigFromReq(&req, &cfg); + syncBuildConfigFromReq(&req, &cfg); - if(cfg.changeVersion <= ths->raftCfg.cfg.changeVersion){ - sInfo("vgId:%d, skip conf change entry since lower version. " - "this entry, index:%" PRId64 ", term:%" PRId64 ", totalReplicaNum:%d, changeVersion:%d; " - "current node, replicaNum:%d, peersNum:%d, lastConfigIndex:%" PRId64", changeVersion:%d", - ths->vgId, - pEntry->index, pEntry->term, cfg.totalReplicaNum, cfg.changeVersion, - ths->replicaNum, ths->peersNum, ths->raftCfg.lastConfigIndex, ths->raftCfg.cfg.changeVersion); + if (cfg.changeVersion <= ths->raftCfg.cfg.changeVersion) { + sInfo( + "vgId:%d, skip conf change entry since lower version. " + "this entry, index:%" PRId64 ", term:%" PRId64 + ", totalReplicaNum:%d, changeVersion:%d; " + "current node, replicaNum:%d, peersNum:%d, lastConfigIndex:%" PRId64 ", changeVersion:%d", + ths->vgId, pEntry->index, pEntry->term, cfg.totalReplicaNum, cfg.changeVersion, ths->replicaNum, ths->peersNum, + ths->raftCfg.lastConfigIndex, ths->raftCfg.cfg.changeVersion); return 0; } - if(strcmp(str, "Commit") == 0){ - sInfo("vgId:%d, change config from %s. " - "this, i:%" PRId64 ", trNum:%d, vers:%d; " - "node, rNum:%d, pNum:%d, trNum:%d, " - "buffer: [%" PRId64 " %" PRId64 " %" PRId64 ", %" PRId64 "), " - "cond:(next i:%" PRId64 ", t:%" PRId64 " ==%s)", - ths->vgId, str, pEntry->index - 1, cfg.totalReplicaNum, cfg.changeVersion, - ths->replicaNum, ths->peersNum, ths->totalReplicaNum, - ths->pLogBuf->startIndex, ths->pLogBuf->commitIndex, ths->pLogBuf->matchIndex, ths->pLogBuf->endIndex, - pEntry->index, pEntry->term, TMSG_INFO(pEntry->originalRpcType)); - } - else{ - sInfo("vgId:%d, change config from %s. " - "this, i:%" PRId64 ", t:%" PRId64 ", trNum:%d, vers:%d; " - "node, rNum:%d, pNum:%d, trNum:%d, " - "buffer: [%" PRId64 " %" PRId64 " %" PRId64 ", %" PRId64 "), " - "cond:(pre i:%" PRId64 "==ci:%" PRId64 ", bci:%" PRId64 ")", - ths->vgId, str, pEntry->index, pEntry->term, cfg.totalReplicaNum, cfg.changeVersion, - ths->replicaNum, ths->peersNum, ths->totalReplicaNum, - ths->pLogBuf->startIndex, ths->pLogBuf->commitIndex, ths->pLogBuf->matchIndex, ths->pLogBuf->endIndex, - pEntry->index -1, ths->commitIndex, ths->pLogBuf->commitIndex); + if (strcmp(str, "Commit") == 0) { + sInfo( + "vgId:%d, change config from %s. " + "this, i:%" PRId64 + ", trNum:%d, vers:%d; " + "node, rNum:%d, pNum:%d, trNum:%d, " + "buffer: [%" PRId64 " %" PRId64 " %" PRId64 ", %" PRId64 + "), " + "cond:(next i:%" PRId64 ", t:%" PRId64 " ==%s)", + ths->vgId, str, pEntry->index - 1, cfg.totalReplicaNum, cfg.changeVersion, ths->replicaNum, ths->peersNum, + ths->totalReplicaNum, ths->pLogBuf->startIndex, ths->pLogBuf->commitIndex, ths->pLogBuf->matchIndex, + ths->pLogBuf->endIndex, pEntry->index, pEntry->term, TMSG_INFO(pEntry->originalRpcType)); + } else { + sInfo( + "vgId:%d, change config from %s. " + "this, i:%" PRId64 ", t:%" PRId64 + ", trNum:%d, vers:%d; " + "node, rNum:%d, pNum:%d, trNum:%d, " + "buffer: [%" PRId64 " %" PRId64 " %" PRId64 ", %" PRId64 + "), " + "cond:(pre i:%" PRId64 "==ci:%" PRId64 ", bci:%" PRId64 ")", + ths->vgId, str, pEntry->index, pEntry->term, cfg.totalReplicaNum, cfg.changeVersion, ths->replicaNum, + ths->peersNum, ths->totalReplicaNum, ths->pLogBuf->startIndex, ths->pLogBuf->commitIndex, + ths->pLogBuf->matchIndex, ths->pLogBuf->endIndex, pEntry->index - 1, ths->commitIndex, + ths->pLogBuf->commitIndex); } syncNodeLogConfigInfo(ths, &cfg, "before config change"); - + int32_t oldTotalReplicaNum = ths->totalReplicaNum; - if(cfg.totalReplicaNum == 1 || cfg.totalReplicaNum == 2){//remove replica - + if (cfg.totalReplicaNum == 1 || cfg.totalReplicaNum == 2) { // remove replica + bool incfg = false; - for(int32_t j = 0; j < cfg.totalReplicaNum; ++j){ - if(strcmp(ths->myNodeInfo.nodeFqdn, cfg.nodeInfo[j].nodeFqdn) == 0 - && ths->myNodeInfo.nodePort == cfg.nodeInfo[j].nodePort){ + for (int32_t j = 0; j < cfg.totalReplicaNum; ++j) { + if (strcmp(ths->myNodeInfo.nodeFqdn, cfg.nodeInfo[j].nodeFqdn) == 0 && + ths->myNodeInfo.nodePort == cfg.nodeInfo[j].nodePort) { incfg = true; break; } } - if(incfg){//remove other + if (incfg) { // remove other syncNodeResetPeerAndCfg(ths); - //no need to change myNodeInfo + // no need to change myNodeInfo - if(syncNodeRebuildPeerAndCfg(ths, &cfg) != 0){ - return -1; - }; - - if(syncNodeRebuildAndCopyIfExist(ths, oldTotalReplicaNum) != 0){ + if (syncNodeRebuildPeerAndCfg(ths, &cfg) != 0) { return -1; }; - } - else{//remove myself - //no need to do anything actually, to change the following to reduce distruptive server chance + + if (syncNodeRebuildAndCopyIfExist(ths, oldTotalReplicaNum) != 0) { + return -1; + }; + } else { // remove myself + // no need to do anything actually, to change the following to reduce distruptive server chance syncNodeResetPeerAndCfg(ths); - //change myNodeInfo + // change myNodeInfo ths->myNodeInfo.nodeRole = TAOS_SYNC_ROLE_LEARNER; - //change peer and cfg + // change peer and cfg ths->peersNum = 0; memcpy(&ths->raftCfg.cfg.nodeInfo[0], &ths->myNodeInfo, sizeof(SNodeInfo)); ths->raftCfg.cfg.replicaNum = 0; ths->raftCfg.cfg.totalReplicaNum = 1; - //change other - if(syncNodeRebuildAndCopyIfExist(ths, oldTotalReplicaNum) != 0){ + // change other + if (syncNodeRebuildAndCopyIfExist(ths, oldTotalReplicaNum) != 0) { return -1; } - //change state + // change state ths->state = TAOS_SYNC_STATE_LEARNER; } - ths->restoreFinish = false; - } - else{//add replica, or change replica type - if(ths->totalReplicaNum == 3){ //change replica type - sInfo("vgId:%d, begin change replica type", ths->vgId); + ths->restoreFinish = false; + } else { // add replica, or change replica type + if (ths->totalReplicaNum == 3) { // change replica type + sInfo("vgId:%d, begin change replica type", ths->vgId); - //change myNodeInfo - for(int32_t j = 0; j < cfg.totalReplicaNum; ++j){ - if(strcmp(ths->myNodeInfo.nodeFqdn, cfg.nodeInfo[j].nodeFqdn) == 0 - && ths->myNodeInfo.nodePort == cfg.nodeInfo[j].nodePort){ - if(cfg.nodeInfo[j].nodeRole == TAOS_SYNC_ROLE_VOTER){ + // change myNodeInfo + for (int32_t j = 0; j < cfg.totalReplicaNum; ++j) { + if (strcmp(ths->myNodeInfo.nodeFqdn, cfg.nodeInfo[j].nodeFqdn) == 0 && + ths->myNodeInfo.nodePort == cfg.nodeInfo[j].nodePort) { + if (cfg.nodeInfo[j].nodeRole == TAOS_SYNC_ROLE_VOTER) { ths->myNodeInfo.nodeRole = TAOS_SYNC_ROLE_VOTER; } } } - - //change peer and cfg + + // change peer and cfg syncNodeChangePeerAndCfgToVoter(ths, &cfg); - //change other + // change other syncNodeChangeToVoter(ths); - //change state - if(ths->state ==TAOS_SYNC_STATE_LEARNER){ - if(ths->myNodeInfo.nodeRole == TAOS_SYNC_ROLE_VOTER ){ + // change state + if (ths->state == TAOS_SYNC_STATE_LEARNER) { + if (ths->myNodeInfo.nodeRole == TAOS_SYNC_ROLE_VOTER) { ths->state = TAOS_SYNC_STATE_FOLLOWER; } } - ths->restoreFinish = false; - } - else{//add replica + ths->restoreFinish = false; + } else { // add replica sInfo("vgId:%d, begin add replica", ths->vgId); - //no need to change myNodeInfo + // no need to change myNodeInfo - //change peer and cfg - if(syncNodeRebuildPeerAndCfg(ths, &cfg) != 0){ + // change peer and cfg + if (syncNodeRebuildPeerAndCfg(ths, &cfg) != 0) { return -1; }; - //change other - if(syncNodeRebuildAndCopyIfExist(ths, oldTotalReplicaNum) != 0){ + // change other + if (syncNodeRebuildAndCopyIfExist(ths, oldTotalReplicaNum) != 0) { return -1; }; - //no need to change state + // no need to change state - if(ths->myNodeInfo.nodeRole == TAOS_SYNC_ROLE_LEARNER){ + if (ths->myNodeInfo.nodeRole == TAOS_SYNC_ROLE_LEARNER) { ths->restoreFinish = false; } } @@ -2867,7 +2859,7 @@ int32_t syncNodeChangeConfig(SSyncNode* ths, SSyncRaftEntry* pEntry, char* str){ syncNodeLogConfigInfo(ths, &cfg, "after config change"); - if(syncWriteCfgFile(ths) != 0){ + if (syncWriteCfgFile(ths) != 0) { sError("vgId:%d, failed to create sync cfg file", ths->vgId); return -1; }; @@ -2896,7 +2888,7 @@ int32_t syncNodeAppend(SSyncNode* ths, SSyncRaftEntry* pEntry) { code = 0; _out:; // proceed match index, with replicating on needed - SyncIndex matchIndex = syncLogBufferProceed(ths->pLogBuf, ths, NULL, "Append"); + SyncIndex matchIndex = syncLogBufferProceed(ths->pLogBuf, ths, NULL, "Append"); sTrace("vgId:%d, append raft entry. index:%" PRId64 ", term:%" PRId64 " pBuf: [%" PRId64 " %" PRId64 " %" PRId64 ", %" PRId64 ")", @@ -2927,7 +2919,7 @@ bool syncNodeHeartbeatReplyTimeout(SSyncNode* pSyncNode) { int32_t toCount = 0; int64_t tsNow = taosGetTimestampMs(); for (int32_t i = 0; i < pSyncNode->peersNum; ++i) { - if(pSyncNode->peersNodeInfo[i].nodeRole == TAOS_SYNC_ROLE_LEARNER){ + if (pSyncNode->peersNodeInfo[i].nodeRole == TAOS_SYNC_ROLE_LEARNER) { continue; } int64_t recvTime = syncIndexMgrGetRecvTime(pSyncNode->pMatchIndex, &(pSyncNode->peersId[i])); @@ -3191,9 +3183,9 @@ int32_t syncNodeOnClientRequest(SSyncNode* ths, SRpcMsg* pMsg, SyncIndex* pRetIn pEntry = syncEntryBuildFromRpcMsg(pMsg, term, index); } - //1->2, config change is add in write thread, and will continue in sync thread - //need save message for it - if(pMsg->msgType == TDMT_SYNC_CONFIG_CHANGE){ + // 1->2, config change is add in write thread, and will continue in sync thread + // need save message for it + if (pMsg->msgType == TDMT_SYNC_CONFIG_CHANGE) { SRespStub stub = {.createTime = taosGetTimestampMs(), .rpcMsg = *pMsg}; uint64_t seqNum = syncRespMgrAdd(ths->pSyncRespMgr, &stub); pEntry->seqNum = seqNum; @@ -3209,21 +3201,21 @@ int32_t syncNodeOnClientRequest(SSyncNode* ths, SRpcMsg* pMsg, SyncIndex* pRetIn (*pRetIndex) = index; } - if(pEntry->originalRpcType == TDMT_SYNC_CONFIG_CHANGE){ + if (pEntry->originalRpcType == TDMT_SYNC_CONFIG_CHANGE) { int32_t code = syncNodeCheckChangeConfig(ths, pEntry); - if(code < 0){ + if (code < 0) { sError("vgId:%d, failed to check change config since %s.", ths->vgId, terrstr()); syncEntryDestroy(pEntry); pEntry = NULL; return -1; } - - if(code > 0){ + + if (code > 0) { SRpcMsg rsp = {.code = pMsg->code, .info = pMsg->info}; (void)syncRespMgrGetAndDel(ths->pSyncRespMgr, pEntry->seqNum, &rsp.info); if (rsp.info.handle != NULL) { tmsgSendRsp(&rsp); - } + } syncEntryDestroy(pEntry); pEntry = NULL; return -1; From c56ddc1ba944ac4b9b7cce4a5352804434ee8f18 Mon Sep 17 00:00:00 2001 From: Yihao Deng Date: Fri, 19 Jan 2024 07:52:32 +0000 Subject: [PATCH 02/32] refactor retry --- source/dnode/mnode/impl/src/mndMnode.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/source/dnode/mnode/impl/src/mndMnode.c b/source/dnode/mnode/impl/src/mndMnode.c index 9592be5263..1068a2c5b3 100644 --- a/source/dnode/mnode/impl/src/mndMnode.c +++ b/source/dnode/mnode/impl/src/mndMnode.c @@ -241,7 +241,7 @@ void mndGetMnodeEpSet(SMnode *pMnode, SEpSet *pEpSet) { } void *pIter = NULL; - pEpSet->inUse = 0; + // pEpSet->inUse = 0; while (1) { SMnodeObj *pObj = NULL; pIter = sdbFetch(pSdb, SDB_MNODE, pIter, (void **)&pObj); @@ -251,7 +251,8 @@ void mndGetMnodeEpSet(SMnode *pMnode, SEpSet *pEpSet) { if (mndIsLeader(pMnode)) { pEpSet->inUse = pEpSet->numOfEps; } else { - // pEpSet->inUse = (pEpSet->numOfEps + 1) % totalMnodes; + pEpSet->inUse = (pEpSet->numOfEps + 1) % totalMnodes; + pEpSet->inUse = 0; } } if (pObj->pDnode != NULL) { From 95dec503400c00c729cd10962ca1a94662b15f5c Mon Sep 17 00:00:00 2001 From: danielclow <106956386+danielclow@users.noreply.github.com> Date: Fri, 19 Jan 2024 16:49:46 +0800 Subject: [PATCH 03/32] docs: update docs for docusaurus 3.0 --- .../03-insert-data/50-opentsdb-json.mdx | 2 +- docs/en/07-develop/04-query-data/index.mdx | 4 +- docs/en/07-develop/09-udf.md | 4 +- docs/en/08-client-libraries/03-cpp.mdx | 2 +- docs/en/08-client-libraries/06-rust.mdx | 2 +- docs/en/08-client-libraries/07-python.mdx | 2 +- docs/en/08-client-libraries/80-php.mdx | 2 +- docs/en/12-taos-sql/01-data-type.md | 8 +-- docs/en/12-taos-sql/02-database.md | 2 +- docs/en/12-taos-sql/10-function.md | 6 +- docs/en/12-taos-sql/16-operators.md | 6 +- docs/en/12-taos-sql/29-changes.md | 2 +- docs/en/13-operation/17-diagnose.md | 4 +- .../14-reference/02-rest-api/02-rest-api.mdx | 2 +- docs/en/14-reference/04-taosadapter.md | 4 +- docs/en/14-reference/05-taosbenchmark.md | 60 +++++++++---------- docs/en/14-reference/12-config/index.md | 2 +- docs/en/14-reference/_collectd.mdx | 4 +- docs/en/14-reference/_icinga2.mdx | 2 +- docs/en/14-reference/_prometheus.mdx | 4 +- docs/en/14-reference/_statsd.mdx | 2 +- docs/en/14-reference/_telegraf.mdx | 2 +- docs/en/20-third-party/01-grafana.mdx | 8 +-- docs/en/20-third-party/11-kafka.md | 8 +-- 24 files changed, 72 insertions(+), 72 deletions(-) diff --git a/docs/en/07-develop/03-insert-data/50-opentsdb-json.mdx b/docs/en/07-develop/03-insert-data/50-opentsdb-json.mdx index a40b5f264d..fc54421daf 100644 --- a/docs/en/07-develop/03-insert-data/50-opentsdb-json.mdx +++ b/docs/en/07-develop/03-insert-data/50-opentsdb-json.mdx @@ -101,7 +101,7 @@ Query OK, 2 row(s) in set (0.004076s) ## Query Examples -If you want query the data of "tags": {"location": "California.LosAngeles", "groupid": 1}, here is the query SQL: +If you want query the data of "tags": {"location": "California.LosAngeles", "groupid": 1}, here is the query SQL: ```sql SELECT * FROM `meters_current` WHERE location = "California.LosAngeles" AND groupid = 3; diff --git a/docs/en/07-develop/04-query-data/index.mdx b/docs/en/07-develop/04-query-data/index.mdx index e44161d397..8e21fd325c 100644 --- a/docs/en/07-develop/04-query-data/index.mdx +++ b/docs/en/07-develop/04-query-data/index.mdx @@ -22,7 +22,7 @@ import CAsync from "./_c_async.mdx"; SQL is used by TDengine as its query language. Application programs can send SQL statements to TDengine through REST API or client libraries. TDengine's CLI `taos` can also be used to execute ad hoc SQL queries. Here is the list of major query functionalities supported by TDengine: - Query on single column or multiple columns -- Filter on tags or data columns: >, <, =, <\>, like +- Filter on tags or data columns: >, <, =, <>, like - Grouping of results: `Group By` - Sorting of results: `Order By` - Limit the number of results: `Limit/Offset` - Windowed aggregate queries for time windows (interval), session windows (session), and state windows (state_window) - Arithmetic on columns of numeric types or aggregate results @@ -159,7 +159,7 @@ In the section describing [Insert](../insert-data/sql-writing), a database named :::note 1. With either REST connection or native connection, the above sample code works well. -2. Please note that `use db` can't be used in case of REST connection because it's stateless. You can specify the database name by either the REST endpoint's parameter or . in the SQL command. +2. Please note that `use db` can't be used in case of REST connection because it's stateless. You can specify the database name by either the REST endpoint's parameter or <db_name>.<table_name> in the SQL command. ::: diff --git a/docs/en/07-develop/09-udf.md b/docs/en/07-develop/09-udf.md index 9471efc761..f99e98929d 100644 --- a/docs/en/07-develop/09-udf.md +++ b/docs/en/07-develop/09-udf.md @@ -104,7 +104,7 @@ Replace `aggfn` with the name of your function. ### UDF Interface Definition in C -There are strict naming conventions for interface functions. The names of the start, finish, init, and destroy interfaces must be _start, _finish, _init, and _destroy, respectively. Replace `scalarfn`, `aggfn`, and `udf` with the name of your user-defined function. +There are strict naming conventions for interface functions. The names of the start, finish, init, and destroy interfaces must be <udf-name>_start, <udf-name>_finish, <udf-name>_init, and <udf-name>_destroy, respectively. Replace `scalarfn`, `aggfn`, and `udf` with the name of your user-defined function. Interface functions return a value that indicates whether the operation was successful. If an operation fails, the interface function returns an error code. Otherwise, it returns TSDB_CODE_SUCCESS. The error codes are defined in `taoserror.h` and in the common API error codes in `taos.h`. For example, TSDB_CODE_UDF_INVALID_INPUT indicates invalid input. TSDB_CODE_OUT_OF_MEMORY indicates insufficient memory. @@ -194,7 +194,7 @@ typedef struct SUdfInterBuf { ``` The data structure is described as follows: -- The SUdfDataBlock block includes the number of rows (numOfRows) and the number of columns (numCols). udfCols[i] (0 <= i <= numCols-1) indicates that each column is of type SUdfColumn. +- The SUdfDataBlock block includes the number of rows (numOfRows) and the number of columns (numCols). udfCols[i] (0 <= i <= numCols-1) indicates that each column is of type SUdfColumn. - SUdfColumn includes the definition of the data type of the column (colMeta) and the data in the column (colData). - The member definitions of SUdfColumnMeta are the same as the data type definitions in `taos.h`. - The data in SUdfColumnData can become longer. varLenCol indicates variable-length data, and fixLenCol indicates fixed-length data. diff --git a/docs/en/08-client-libraries/03-cpp.mdx b/docs/en/08-client-libraries/03-cpp.mdx index 80014ef3bf..59c5af9c03 100644 --- a/docs/en/08-client-libraries/03-cpp.mdx +++ b/docs/en/08-client-libraries/03-cpp.mdx @@ -186,7 +186,7 @@ The base API is used to do things like create database connections and provide a - The variables database and len are applied by the user outside and allocated space. The current database name and length will be assigned to database and len. - As long as the db name is not assigned to the database normally (including truncation), an error will be returned with the return value of -1, and then the user can use taos_errstr(NULL) to get error message. - - If database==NULL or len<=0, returns an error, the space required to store the db (including the last '\0') in the variable required + - If database==NULL or len<=0, returns an error, the space required to store the db (including the last '\0') in the variable required - If len is less than the space required to store the db (including the last '\0'), an error is returned. The truncated data assigned in the database ends with '\0'. - If len is greater than or equal to the space required to store the db (including the last '\0'), return normal 0, and assign the db name ending with '\0' in the database. diff --git a/docs/en/08-client-libraries/06-rust.mdx b/docs/en/08-client-libraries/06-rust.mdx index 8fa5c946aa..ff4c1bf92b 100644 --- a/docs/en/08-client-libraries/06-rust.mdx +++ b/docs/en/08-client-libraries/06-rust.mdx @@ -69,7 +69,7 @@ TDengine currently supports timestamp, number, character, Boolean type, and the | SMALLINT | i16 | | TINYINT | i8 | | BOOL | bool | -| BINARY | Vec | +| BINARY | Vec<u8> | | NCHAR | String | | JSON | serde_json::Value | diff --git a/docs/en/08-client-libraries/07-python.mdx b/docs/en/08-client-libraries/07-python.mdx index 4a06c42c12..aacfd0fe53 100644 --- a/docs/en/08-client-libraries/07-python.mdx +++ b/docs/en/08-client-libraries/07-python.mdx @@ -315,7 +315,7 @@ The `connect()` function returns a `taos.TaosConnection` instance. In client-sid All arguments to the `connect()` function are optional keyword arguments. The following are the connection parameters specified. -- `url`: The URL of taosAdapter REST service. The default is . +- `url`: The URL of taosAdapter REST service. The default is `http://localhost:6041`. - `user`: TDengine user name. The default is `root`. - `password`: TDengine user password. The default is `taosdata`. - `timeout`: HTTP request timeout. Enter a value in seconds. The default is `socket._GLOBAL_DEFAULT_TIMEOUT`. Usually, no configuration is needed. diff --git a/docs/en/08-client-libraries/80-php.mdx b/docs/en/08-client-libraries/80-php.mdx index ccaa2f8d55..a83391c19c 100644 --- a/docs/en/08-client-libraries/80-php.mdx +++ b/docs/en/08-client-libraries/80-php.mdx @@ -8,7 +8,7 @@ description: This document describes the TDengine PHP client library. PHP client library relies on TDengine client driver. -Project Repository: +Project Repository: [https://github.com/Yurunsoft/php-tdengine](https://github.com/Yurunsoft/php-tdengine) After TDengine client or server is installed, `taos.h` is located at: diff --git a/docs/en/12-taos-sql/01-data-type.md b/docs/en/12-taos-sql/01-data-type.md index 020eb27cfe..065daf2ecd 100644 --- a/docs/en/12-taos-sql/01-data-type.md +++ b/docs/en/12-taos-sql/01-data-type.md @@ -68,14 +68,14 @@ TDengine supports a variety of constants: | # | **Syntax** | **Type** | **Description** | | --- | :-----------------------------------------------: | --------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| 1 | [{+ \| -}]123 | BIGINT | Integer literals are of type BIGINT. Data that exceeds the length of the BIGINT type is truncated. | +| 1 | [+ \| -]123 | BIGINT | Integer literals are of type BIGINT. Data that exceeds the length of the BIGINT type is truncated. | | 2 | 123.45 | DOUBLE | Floating-point literals are of type DOUBLE. Numeric values will be determined as integer or float type according to whether there is decimal point or whether scientific notation is used. | | 3 | 1.2E3 | DOUBLE | Literals in scientific notation are of type DOUBLE. | | 4 | 'abc' | BINARY | Content enclosed in single quotation marks is of type BINARY. The size of a BINARY is the size of the string in bytes. A literal single quote inside the string must be escaped with a backslash `\'`. | | 5 | 'abc' | BINARY | Content enclosed in double quotation marks is of type BINARY. The size of a BINARY is the size of the string in bytes. A literal double quote inside the string must be escaped with a backslash `\"`. | -| 6 | TIMESTAMP {'literal' \| "literal"} | TIMESTAMP | The TIMESTAMP keyword indicates that the following string literal is interpreted as a timestamp. The string must be in YYYY-MM-DD HH:mm:ss.MS format. The precision is inherited from the database configuration. | -| 7 | {TRUE \| FALSE} | BOOL | Boolean literals are of type BOOL. | -| 8 | {'' \| "" \| '\t' \| "\t" \| ' ' \| " " \| NULL } | -- | The preceding characters indicate null literals. These can be used with any data type. | +| 6 | TIMESTAMP ['literal' \| "literal"] | TIMESTAMP | The TIMESTAMP keyword indicates that the following string literal is interpreted as a timestamp. The string must be in YYYY-MM-DD HH:mm:ss.MS format. The precision is inherited from the database configuration. | +| 7 | [TRUE \| FALSE] | BOOL | Boolean literals are of type BOOL. | +| 8 | ['' \| "" \| '\t' \| "\t" \| ' ' \| " " \| NULL ] | -- | The preceding characters indicate null literals. These can be used with any data type. | :::note Numeric values will be determined as integer or float type according to whether there is decimal point or whether scientific notation is used, so attention must be paid to avoid overflow. For example, 9999999999999999999 will be considered as overflow because it exceeds the upper limit of long integer, but 9999999999999999999.0 will be considered as a legal float number. diff --git a/docs/en/12-taos-sql/02-database.md b/docs/en/12-taos-sql/02-database.md index ccf340b511..f49a9c6881 100644 --- a/docs/en/12-taos-sql/02-database.md +++ b/docs/en/12-taos-sql/02-database.md @@ -56,7 +56,7 @@ database_option: { - WAL_FSYNC_PERIOD: specifies the interval (in milliseconds) at which data is written from the WAL to disk. This parameter takes effect only when the WAL parameter is set to 2. The default value is 3000. Enter a value between 0 and 180000. The value 0 indicates that incoming data is immediately written to disk. - MAXROWS: specifies the maximum number of rows recorded in a block. The default value is 4096. - MINROWS: specifies the minimum number of rows recorded in a block. The default value is 100. -- KEEP: specifies the time for which data is retained. Enter a value between 1 and 365000. The default value is 3650. The value of the KEEP parameter must be greater than or equal to three times of the value of the DURATION parameter. TDengine automatically deletes data that is older than the value of the KEEP parameter. You can use m (minutes), h (hours), and d (days) as the unit, for example KEEP 100h or KEEP 10d. If you do not include a unit, d is used by default. TDengine Enterprise supports [Tiered Storage](https://docs.tdengine.com/tdinternal/arch/#tiered-storage) function, thus multiple KEEP values (comma separated and up to 3 values supported, and meet keep 0 <= keep 1 <= keep 2, e.g. KEEP 100h,100d,3650d) are supported; TDengine OSS does not support Tiered Storage function (although multiple keep values are configured, they do not take effect, only the maximum keep value is used as KEEP). +- KEEP: specifies the time for which data is retained. Enter a value between 1 and 365000. The default value is 3650. The value of the KEEP parameter must be greater than or equal to three times of the value of the DURATION parameter. TDengine automatically deletes data that is older than the value of the KEEP parameter. You can use m (minutes), h (hours), and d (days) as the unit, for example KEEP 100h or KEEP 10d. If you do not include a unit, d is used by default. TDengine Enterprise supports [Tiered Storage](https://docs.tdengine.com/tdinternal/arch/#tiered-storage) function, thus multiple KEEP values (comma separated and up to 3 values supported, and meet keep 0 <= keep 1 <= keep 2, e.g. KEEP 100h,100d,3650d) are supported; TDengine OSS does not support Tiered Storage function (although multiple keep values are configured, they do not take effect, only the maximum keep value is used as KEEP). - PAGES: specifies the number of pages in the metadata storage engine cache on each vnode. Enter a value greater than or equal to 64. The default value is 256. The space occupied by metadata storage on each vnode is equal to the product of the values of the PAGESIZE and PAGES parameters. The space occupied by default is 1 MB. - PAGESIZE: specifies the size (in KB) of each page in the metadata storage engine cache on each vnode. The default value is 4. Enter a value between 1 and 16384. - PRECISION: specifies the precision at which a database records timestamps. Enter ms for milliseconds, us for microseconds, or ns for nanoseconds. The default value is ms. diff --git a/docs/en/12-taos-sql/10-function.md b/docs/en/12-taos-sql/10-function.md index 851ef86b67..fbdae3445b 100644 --- a/docs/en/12-taos-sql/10-function.md +++ b/docs/en/12-taos-sql/10-function.md @@ -877,11 +877,11 @@ HISTOGRAM(expr, bin_type, bin_description, normalized) - "user_input": "[1, 3, 5, 7]": User specified bin values. - - "linear_bin": "{"start": 0.0, "width": 5.0, "count": 5, "infinity": true}" + - "linear_bin": "{"start": 0.0, "width": 5.0, "count": 5, "infinity": true}" "start" - bin starting point. "width" - bin offset. "count" - number of bins generated. "infinity" - whether to add (-inf, inf) as start/end point in generated set of bins. The above "linear_bin" descriptor generates a set of bins: [-inf, 0.0, 5.0, 10.0, 15.0, 20.0, +inf]. - - "log_bin": "{"start":1.0, "factor": 2.0, "count": 5, "infinity": true}" + - "log_bin": "{"start":1.0, "factor": 2.0, "count": 5, "infinity": true}" "start" - bin starting point. "factor" - exponential factor of bin offset. "count" - number of bins generated. "infinity" - whether to add (-inf, inf) as start/end point in generated range of bins. The above "linear_bin" descriptor generates a set of bins: [-inf, 1.0, 2.0, 4.0, 8.0, 16.0, +inf]. - normalized: setting to 1/0 to turn on/off result normalization. Valid values are 0 or 1. @@ -977,7 +977,7 @@ ignore_null_values: { - `INTERP` is used to get the value that matches the specified time slice from a column. If no such value exists an interpolation value will be returned based on `FILL` parameter. - The input data of `INTERP` is the value of the specified column and a `where` clause can be used to filter the original data. If no `where` condition is specified then all original data is the input. - `INTERP` must be used along with `RANGE`, `EVERY`, `FILL` keywords. -- The output time range of `INTERP` is specified by `RANGE(timestamp1,timestamp2)` parameter, with timestamp1 <= timestamp2. timestamp1 is the starting point of the output time range. timestamp2 is the ending point of the output time range. +- The output time range of `INTERP` is specified by `RANGE(timestamp1,timestamp2)` parameter, with timestamp1 <= timestamp2. timestamp1 is the starting point of the output time range. timestamp2 is the ending point of the output time range. - The number of rows in the result set of `INTERP` is determined by the parameter `EVERY(time_unit)`. Starting from timestamp1, one interpolation is performed for every time interval specified `time_unit` parameter. The parameter `time_unit` must be an integer, with no quotes, with a time unit of: a(millisecond)), s(second), m(minute), h(hour), d(day), or w(week). For example, `EVERY(500a)` will interpolate every 500 milliseconds. - Interpolation is performed based on `FILL` parameter. For more information about FILL clause, see [FILL Clause](../distinguished/#fill-clause). - When only one timestamp value is specified in `RANGE` clause, `INTERP` is used to generate interpolation at this point in time. In this case, `EVERY` clause can be omitted. For example, SELECT INTERP(col) FROM tb RANGE('2023-01-01 00:00:00') FILL(linear). diff --git a/docs/en/12-taos-sql/16-operators.md b/docs/en/12-taos-sql/16-operators.md index ce8ab8a03c..26c937b351 100644 --- a/docs/en/12-taos-sql/16-operators.md +++ b/docs/en/12-taos-sql/16-operators.md @@ -35,9 +35,9 @@ TDengine supports the `UNION` and `UNION ALL` operations. UNION ALL collects all | # | **Operator** | **Supported Data Types** | **Description** | | --- | :---------------: | -------------------------------------------------------------------- | -------------------- | | 1 | = | All types except BLOB, MEDIUMBLOB, and JSON | Equal to | -| 2 | <\>, != | All types except BLOB, MEDIUMBLOB, and JSON; the primary key (timestamp) is also not supported | Not equal to | -| 3 | \>, < | All types except BLOB, MEDIUMBLOB, and JSON | Greater than and less than | -| 4 | \>=, <= | All types except BLOB, MEDIUMBLOB, and JSON | Greater than or equal to and less than or equal to | +| 2 | <>, != | All types except BLOB, MEDIUMBLOB, and JSON; the primary key (timestamp) is also not supported | Not equal to | +| 3 | >, < | All types except BLOB, MEDIUMBLOB, and JSON | Greater than and less than | +| 4 | >=, <= | All types except BLOB, MEDIUMBLOB, and JSON | Greater than or equal to and less than or equal to | | 5 | IS [NOT] NULL | All types | Indicates whether the value is null | | 6 | [NOT] BETWEEN AND | All types except BLOB, MEDIUMBLOB, JSON and GEOMETRY | Closed interval comparison | | 7 | IN | All types except BLOB, MEDIUMBLOB, and JSON; the primary key (timestamp) is also not supported | Equal to any value in the list | diff --git a/docs/en/12-taos-sql/29-changes.md b/docs/en/12-taos-sql/29-changes.md index bbb52db4d9..a269e675d1 100644 --- a/docs/en/12-taos-sql/29-changes.md +++ b/docs/en/12-taos-sql/29-changes.md @@ -71,7 +71,7 @@ The following data types can be used in the schema for standard tables. | 44 | SHOW STREAMS | Modified | This statement previously showed continuous queries. The continuous query feature has been replaced with the stream processing feature. This statement now shows streams that have been created. | 45 | SHOW SUBSCRIPTIONS | Added | Shows all subscriptions in the current database. | 46 | SHOW TABLES | Modified | Only shows table names. -| 47 | SHOW TABLE DISTRIBUTED | Added | Shows how table data is distributed. This replaces the `SELECT _block_dist() FROM { tb_name | stb_name }` command. +| 47 | SHOW TABLE DISTRIBUTED | Added | Shows how table data is distributed. This replaces the `SELECT _block_dist() FROM { tb_name | stb_name }` command. | 48 | SHOW TOPICS | Added | Shows all subscribed topics in the current database. | 49 | SHOW TRANSACTIONS | Added | Shows all running transactions in the system. | 50 | SHOW DNODE VARIABLES | Added | Shows the configuration of the specified dnode. diff --git a/docs/en/13-operation/17-diagnose.md b/docs/en/13-operation/17-diagnose.md index 33a0a8c28c..6cf8b1da1d 100644 --- a/docs/en/13-operation/17-diagnose.md +++ b/docs/en/13-operation/17-diagnose.md @@ -15,7 +15,7 @@ Diagnostic steps: 2. On the server side, execute command `taos -n server -P -l ` to monitor the port range starting from the port specified by `-P` parameter with the role of "server". 3. On the client side, execute command `taos -n client -h -P -l ` to send a testing package to the specified server and port. --l : The size of the testing package, in bytes. The value range is [11, 64,000] and default value is 1,000. +-l <pktlen>: The size of the testing package, in bytes. The value range is [11, 64,000] and default value is 1,000. Please note that the package length must be same in the above 2 commands executed on server side and client side respectively. Output of the server side for the example is below: @@ -63,7 +63,7 @@ Once this parameter is set to 135 or 143, the log file grows very quickly especi ## Client Log -An independent log file, named as "taoslog+" is generated for each client program, i.e. a client process. The parameter `debugFlag` is used to control the log level. The default value is 131. For debugging and tracing, it needs to be set to either 135 or 143 respectively. +An independent log file, named as "taoslog+<seq num>" is generated for each client program, i.e. a client process. The parameter `debugFlag` is used to control the log level. The default value is 131. For debugging and tracing, it needs to be set to either 135 or 143 respectively. The default value of `debugFlag` is also 131 and only logs at level of INFO/ERROR/WARNING are recorded. As stated above, for debugging and tracing, it needs to be changed to 135 or 143 respectively, so that logs at DEBUG or TRACE level can be recorded. diff --git a/docs/en/14-reference/02-rest-api/02-rest-api.mdx b/docs/en/14-reference/02-rest-api/02-rest-api.mdx index 76dc3b6b58..405b154d1d 100644 --- a/docs/en/14-reference/02-rest-api/02-rest-api.mdx +++ b/docs/en/14-reference/02-rest-api/02-rest-api.mdx @@ -81,7 +81,7 @@ Parameter Description: :::note -URL Encoding. Make sure that parameters are properly encoded. For example, when specifying a timezone you must properly encode special characters. ?tz=Etc/GMT+10 will not work because the <+> plus symbol is recognized as a space in the url. It's best practice to encode all special characters in a parameter. Instead use ?tz=Etc%2FGMT%2B10 for the parameter. +URL Encoding. Make sure that parameters are properly encoded. For example, when specifying a timezone you must properly encode special characters. ?tz=Etc/GMT+10 will not work because the + plus symbol is recognized as a space in the url. It's best practice to encode all special characters in a parameter. Instead use ?tz=Etc%2FGMT%2B10 for the parameter. ::: diff --git a/docs/en/14-reference/04-taosadapter.md b/docs/en/14-reference/04-taosadapter.md index a9330d21c7..c21a2d3a3f 100644 --- a/docs/en/14-reference/04-taosadapter.md +++ b/docs/en/14-reference/04-taosadapter.md @@ -166,8 +166,8 @@ See [example/config/taosadapter.toml](https://github.com/taosdata/taosadapter/bl - Compatible with InfluxDB v1 write interface [https://docs.influxdata.com/influxdb/v2.0/reference/api/influxdb-1x/write/](https://docs.influxdata.com/influxdb/v2.0/reference/api/influxdb-1x/write/) - Compatible with OpenTSDB JSON and telnet format writes - - - - + - [http://opentsdb.net/docs/build/html/api_http/put.html](http://opentsdb.net/docs/build/html/api_http/put.html) + - [http://opentsdb.net/docs/build/html/api_telnet/put.html](http://opentsdb.net/docs/build/html/api_telnet/put.html) - Seamless connection to collectd collectd is a system statistics collection daemon, please visit [https://collectd.org/](https://collectd.org/) for more information. - Seamless connection with StatsD diff --git a/docs/en/14-reference/05-taosbenchmark.md b/docs/en/14-reference/05-taosbenchmark.md index 4744e143fc..2f953b1f8c 100644 --- a/docs/en/14-reference/05-taosbenchmark.md +++ b/docs/en/14-reference/05-taosbenchmark.md @@ -94,67 +94,67 @@ taosBenchmark -f ## Command-line argument in detail -- **-f/--file ** : +- **-f/--file <json file>** : specify the configuration file to use. This file includes All parameters. Users should not use this parameter with other parameters on the command-line. There is no default value. -- **-c/--config-dir ** : +- **-c/--config-dir <dir>** : specify the directory where the TDengine cluster configuration file. The default path is `/etc/taos`. -- **-h/--host ** : +- **-h/--host <host>** : Specify the FQDN of the TDengine server to connect to. The default value is localhost. -- **-P/--port ** : +- **-P/--port <port>** : The port number of the TDengine server to connect to, the default value is 6030. -- **-I/--interface ** : +- **-I/--interface <insertMode>** : Insert mode. Options are taosc, rest, stmt, sml, sml-rest, corresponding to normal write, restful interface writing, parameter binding interface writing, schemaless interface writing, RESTful schemaless interface writing (provided by taosAdapter). The default value is taosc. -- **-u/--user ** : +- **-u/--user <user>** : User name to connect to the TDengine server. Default is root. - **-U/--supplement-insert ** : Supplementally insert data without create database and table, optional, default is off. -- **-p/--password ** : +- **-p/--password <passwd>** : The default password to connect to the TDengine server is `taosdata`. -- **-o/--output ** : +- **-o/--output <file>** : specify the path of the result output file, the default value is `. /output.txt`. -- **-T/--thread ** : +- **-T/--thread <threadNum>** : The number of threads to insert data. Default is 8. -- **-B/--interlace-rows ** : +- **-B/--interlace-rows <rowNum>** : Enables interleaved insertion mode and specifies the number of rows of data to be inserted into each child table. Interleaved insertion mode means inserting the number of rows specified by this parameter into each sub-table and repeating the process until all sub-tables have been inserted. The default value is 0, i.e., data is inserted into one sub-table before the next sub-table is inserted. -- **-i/--insert-interval ** : +- **-i/--insert-interval <timeInterval>** : Specify the insert interval in `ms` for interleaved insert mode. The default value is 0. It only works if `-B/--interlace-rows` is greater than 0. After inserting interlaced rows for each child table, the data insertion thread will wait for the interval specified by this value before proceeding to the next round of writes. -- **-r/--rec-per-req ** : +- **-r/--rec-per-req <rowNum>** : Writing the number of rows of records per request to TDengine, the default value is 30000. -- **-t/--tables ** : +- **-t/--tables <tableNum>** : Specify the number of sub-tables. The default is 10000. -- **-S/--timestampstep ** : +- **-S/--timestampstep <stepLength>** : Timestamp step for inserting data in each child table in ms, default is 1. -- **-n/--records ** : +- **-n/--records <recordNum>** : The default value of the number of records inserted in each sub-table is 10000. -- **-d/--database ** : +- **-d/--database <dbName>** : The name of the database used, the default value is `test`. -- **-b/--data-type ** : +- **-b/--data-type <colType>** : specify the type of the data columns of the super table. It defaults to three columns of type FLOAT, INT, and FLOAT if not used. -- **-l/--columns ** : +- **-l/--columns <colNum>** : specify the number of columns in the super table. If both this parameter and `-b/--data-type` is set, the final result number of columns is the greater of the two. If the number specified by this parameter is greater than the number of columns specified by `-b/--data-type`, the unspecified column type defaults to INT, for example: `-l 5 -b float,double`, then the final column is `FLOAT,DOUBLE,INT,INT,INT`. If the number of columns specified is less than or equal to the number of columns specified by `-b/--data-type`, then the result is the column and type specified by `-b/--data-type`, e.g.: `-l 3 -b float,double,float,bigint`. The last column is `FLOAT,DOUBLE, FLOAT,BIGINT`. -- **-L/--partial-col-num ** : +- **-L/--partial-col-num <colNum> ** : Specify first numbers of columns has data. Rest of columns' data are NULL. Default is all columns have data. -- **-A/--tag-type ** : +- **-A/--tag-type <tagType>** : The tag column type of the super table. nchar and binary types can both set the length, for example: ``` @@ -168,10 +168,10 @@ Note: In some shells, such as bash, "()" needs to be escaped, so the above comma taosBenchmark -A INT,DOUBLE,NCHAR,BINARY\(16\) ``` -- **-w/--binwidth **: +- **-w/--binwidth <length>**: specify the default length for nchar and binary types. The default value is 64. -- **-m/--table-prefix ** : +- **-m/--table-prefix <tablePrefix>** : The prefix of the sub-table name, the default value is "d". - **-E/--escape-character** : @@ -192,25 +192,25 @@ taosBenchmark -A INT,DOUBLE,NCHAR,BINARY\(16\) - **-y/--answer-yes** : Switch parameter that requires the user to confirm at the prompt to continue. The default value is false. -- **-O/--disorder ** : +- **-O/--disorder <Percentage>** : Specify the percentage probability of disordered data, with a value range of [0,50]. The default is 0, i.e., there is no disordered data. -- **-R/--disorder-range ** : +- **-R/--disorder-range <timeRange>** : Specify the timestamp range for the disordered data. It leads the resulting disorder timestamp as the ordered timestamp minus a random value in this range. Valid only if the percentage of disordered data specified by `-O/--disorder` is greater than 0. -- **-F/--prepared_rand ** : +- **-F/--prepared_rand <Num>** : Specify the number of unique values in the generated random data. A value of 1 means that all data are equal. The default value is 10000. -- **-a/--replica ** : +- **-a/--replica <replicaNum>** : Specify the number of replicas when creating the database. The default value is 1. -- **-k/--keep-trying ** : +- **-k/--keep-trying <NUMBER>** : Keep trying if failed to insert, default is no. Available with v3.0.9+. -- **-z/--trying-interval ** : +- **-z/--trying-interval <NUMBER&;gt;** : Specify interval between keep trying insert. Valid value is a positive number. Only valid when keep trying be enabled. Available with v3.0.9+. -- **-v/--vgroups ** : +- **-v/--vgroups <NUMBER>** : Specify vgroups number for creating a database, only valid with daemon version 3.0+ - **-V/--version** : @@ -226,7 +226,7 @@ taosBenchmark -A INT,DOUBLE,NCHAR,BINARY\(16\) The parameters listed in this section apply to all function modes. - **filetype** : The function to be tested, with optional values `insert`, `query` and `subscribe`. These correspond to the insert, query, and subscribe functions, respectively. Users can specify only one of these in each configuration file. -**cfgdir**: specify the TDengine client configuration file's directory. The default path is /etc/taos. +**cfgdir**: specify the TDengine client configuration file's directory. The default path is `/etc/taos`. - **host**: Specify the FQDN of the TDengine server to connect. The default value is `localhost`. diff --git a/docs/en/14-reference/12-config/index.md b/docs/en/14-reference/12-config/index.md index c1abfd3e39..af88978603 100755 --- a/docs/en/14-reference/12-config/index.md +++ b/docs/en/14-reference/12-config/index.md @@ -289,7 +289,7 @@ A specific type "nchar" is provided in TDengine to store non-ASCII characters su The characters input on the client side are encoded using the default system encoding, which is UTF-8 on Linux/macOS, or GB18030 or GBK on some systems in Chinese, POSIX in docker, CP936 on Windows in Chinese. The encoding of the operating system in use must be set correctly so that the characters in nchar type can be converted to UCS4-LE. -The locale definition standard on Linux/macOS is: \_., for example, in "zh_CN.UTF-8", "zh" means Chinese, "CN" means China mainland, "UTF-8" means charset. The charset indicates how to display the characters. On Linux/macOS, the charset can be set by locale in the system. On Windows system another configuration parameter `charset` must be used to configure charset because the locale used on Windows is not POSIX standard. Of course, `charset` can also be used on Linux/macOS to specify the charset. +The locale definition standard on Linux/macOS is: <Language>\_<Region>.<charset>, for example, in "zh_CN.UTF-8", "zh" means Chinese, "CN" means China mainland, "UTF-8" means charset. The charset indicates how to display the characters. On Linux/macOS, the charset can be set by locale in the system. On Windows system another configuration parameter `charset` must be used to configure charset because the locale used on Windows is not POSIX standard. Of course, `charset` can also be used on Linux/macOS to specify the charset. ::: diff --git a/docs/en/14-reference/_collectd.mdx b/docs/en/14-reference/_collectd.mdx index ce88328098..9dd2f08b1c 100644 --- a/docs/en/14-reference/_collectd.mdx +++ b/docs/en/14-reference/_collectd.mdx @@ -36,7 +36,7 @@ LoadPlugin network ``` -where fills in the server's domain name or IP address running taosAdapter. fills in the port that taosAdapter uses to receive collectd data (default is 6045). +where <taosAdapter's host> fills in the server's domain name or IP address running taosAdapter. <port for collectd direct> fills in the port that taosAdapter uses to receive collectd data (default is 6045). An example is as follows. @@ -62,7 +62,7 @@ LoadPlugin write_tsdb ``` -Where is the domain name or IP address of the server running taosAdapter. Fill in the data that taosAdapter uses to receive the collectd write_tsdb plugin (default is 6047). +Where <taosAdapter's host> is the domain name or IP address of the server running taosAdapter. <port for collectd write_tsdb plugin> Fill in the data that taosAdapter uses to receive the collectd write_tsdb plugin (default is 6047). ```text LoadPlugin write_tsdb diff --git a/docs/en/14-reference/_icinga2.mdx b/docs/en/14-reference/_icinga2.mdx index 0a2bf52c27..2afcbf52eb 100644 --- a/docs/en/14-reference/_icinga2.mdx +++ b/docs/en/14-reference/_icinga2.mdx @@ -26,7 +26,7 @@ The default database name written by the taosAdapter is `icinga2`. You can also ### Configure icinga3 - Enable opentsdb-writer for icinga2 (refer to the link https://icinga.com/docs/icinga-2/latest/doc/14-features/#opentsdb-writer) -- Modify the configuration file `/etc/icinga2/features-enabled/opentsdb.conf` by filling in as the domain name or IP address of the server running taosAdapter and as the corresponding port on which taosAdapter supports receiving icinga2 data (default is 6048) +- Modify the configuration file `/etc/icinga2/features-enabled/opentsdb.conf` by filling in <taosAdapter's host> as the domain name or IP address of the server running taosAdapter and <port for icinga2> as the corresponding port on which taosAdapter supports receiving icinga2 data (default is 6048) ``` object OpenTsdbWriter "opentsdb" { diff --git a/docs/en/14-reference/_prometheus.mdx b/docs/en/14-reference/_prometheus.mdx index 0940e4adb2..29317be6ea 100644 --- a/docs/en/14-reference/_prometheus.mdx +++ b/docs/en/14-reference/_prometheus.mdx @@ -9,8 +9,8 @@ Point the `remote_read url` and `remote_write url` to the domain name or IP addr ### Configure Basic authentication -- username: -- password: +- username: TDengine's username +- password: TDengine's password ### Example configuration of remote_write and remote_read related sections in prometheus.yml file diff --git a/docs/en/14-reference/_statsd.mdx b/docs/en/14-reference/_statsd.mdx index b15c9640db..d839385ccd 100644 --- a/docs/en/14-reference/_statsd.mdx +++ b/docs/en/14-reference/_statsd.mdx @@ -31,7 +31,7 @@ The default database name written by taosAdapter is `statsd`. To specify a diffe ### Configuring StatsD -To use StatsD, you need to download its [source code](https://github.com/statsd/statsd). Please refer to the example file `exampleConfig.js` in the root directory of the source download to modify the configuration file. In , please fill in the domain name or IP address of the server running taosAdapter, and , please fill in the port where taosAdapter receives StatsD data (default is 6044). +To use StatsD, you need to download its [source code](https://github.com/statsd/statsd). Please refer to the example file `exampleConfig.js` in the root directory of the source download to modify the configuration file. In <taosAdapter's host>, please fill in the domain name or IP address of the server running taosAdapter, and <port for StatsD>, please fill in the port where taosAdapter receives StatsD data (default is 6044). ``` backends section add ". /backends/repeater" diff --git a/docs/en/14-reference/_telegraf.mdx b/docs/en/14-reference/_telegraf.mdx index bcf1a0893f..4c15ceaaaa 100644 --- a/docs/en/14-reference/_telegraf.mdx +++ b/docs/en/14-reference/_telegraf.mdx @@ -10,7 +10,7 @@ In the Telegraf configuration file (default location `/etc/telegraf/telegraf.con ... ``` -Where please fill in the server's domain name or IP address running the taosAdapter service. please fill in the port of the REST service (default is 6041). and please fill in the actual configuration of the currently running TDengine. And please fill in the database name where you want to store Telegraf data in TDengine. +Where <taosAdapter's host> please fill in the server's domain name or IP address running the taosAdapter service. <REST service port> please fill in the port of the REST service (default is 6041). <TDengine's username> and <TDengine's password> please fill in the actual configuration of the currently running TDengine. And <database name> please fill in the database name where you want to store Telegraf data in TDengine. An example is as follows. diff --git a/docs/en/20-third-party/01-grafana.mdx b/docs/en/20-third-party/01-grafana.mdx index f7d1a2db7e..75614d159f 100644 --- a/docs/en/20-third-party/01-grafana.mdx +++ b/docs/en/20-third-party/01-grafana.mdx @@ -23,7 +23,7 @@ Record these values: ## Installing Grafana -TDengine currently supports Grafana versions 7.5 and above. Users can go to the Grafana official website to download the installation package and execute the installation according to the current operating system. The download address is as follows: . +TDengine currently supports Grafana versions 7.5 and above. Users can go to the Grafana official website to download the installation package and execute the installation according to the current operating system. The download address is as follows: [https://grafana.com/grafana/download](https://grafana.com/grafana/download). ## Configuring Grafana @@ -59,7 +59,7 @@ bash -c "$(curl -fsSL \ -p taosdata ``` -Restart Grafana service and open Grafana in web-browser, usually . +Restart Grafana service and open Grafana in web-browser, usually `http://localhost:3000`. Save the script and type `./install.sh --help` for the full usage of the script. @@ -181,7 +181,7 @@ You can setup a zero-configuration stack for TDengine + Grafana by [docker-compo 3. Start TDengine and Grafana services: `docker-compose up -d`. -Open Grafana , and you can add dashboard with TDengine now. +Open Grafana (http://localhost:3000), and you can add dashboard with TDengine now. @@ -202,7 +202,7 @@ As shown above, select the `TDengine` data source in the `Query` and enter the c :::note -Since the REST connection because is stateless. Grafana plugin can use . in the SQL command to specify the database name. +Since the REST connection because is stateless. Grafana plugin can use <db_name>.<table_name> in the SQL command to specify the database name. ::: diff --git a/docs/en/20-third-party/11-kafka.md b/docs/en/20-third-party/11-kafka.md index 42266d232c..344db06322 100644 --- a/docs/en/20-third-party/11-kafka.md +++ b/docs/en/20-third-party/11-kafka.md @@ -345,7 +345,7 @@ The following configuration items apply to TDengine Sink Connector and TDengine ### TDengine Sink Connector specific configuration 1. `connection.database`: The name of the target database. If the specified database does not exist, it will be created automatically. The time precision used for automatic library building is nanoseconds. The default value is null. When it is NULL, refer to the description of the `connection.database.prefix` parameter for the naming rules of the target database -2. `connection.database.prefix`: When `connection.database` is null, the prefix of the target database. Can contain placeholder '${topic}'. For example, kafka_${topic}, for topic 'orders' will be written to database 'kafka_orders'. Default null. When null, the name of the target database is the same as the name of the topic. +2. `connection.database.prefix`: When `connection.database` is null, the prefix of the target database. Can contain placeholder '${topic}'. For example, kafka_${topic}, for topic 'orders' will be written to database 'kafka_orders'. Default null. When null, the name of the target database is the same as the name of the topic. 3. `batch.size`: Write the number of records in each batch in batches. When the data received by the sink connector at one time is larger than this value, it will be written in some batches. 4. `max.retries`: The maximum number of retries when an error occurs. Defaults to 1. 5. `retry.backoff.ms`: The time interval for retry when sending an error. The unit is milliseconds. The default is 3000. @@ -370,12 +370,12 @@ The following configuration items apply to TDengine Sink Connector and TDengine ## Other notes -1. To use Kafka Connect, refer to . +1. To use Kafka Connect, refer to [https://kafka.apache.org/documentation/#connect](https://kafka.apache.org/documentation/#connect). ## Feedback - +[https://github.com/taosdata/kafka-connect-tdengine/issues](https://github.com/taosdata/kafka-connect-tdengine/issues) ## Reference -1. For more information, see +1. For more information, see [https://kafka.apache.org/documentation/](https://kafka.apache.org/documentation/). From a20845ec69a81cf4a066ac90eb90826f0cab7406 Mon Sep 17 00:00:00 2001 From: Yihao Deng Date: Mon, 22 Jan 2024 01:04:06 +0000 Subject: [PATCH 04/32] refactor retry --- source/dnode/mnode/impl/src/mndMnode.c | 2 +- source/libs/sync/src/syncMain.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/source/dnode/mnode/impl/src/mndMnode.c b/source/dnode/mnode/impl/src/mndMnode.c index 1068a2c5b3..385f20d39e 100644 --- a/source/dnode/mnode/impl/src/mndMnode.c +++ b/source/dnode/mnode/impl/src/mndMnode.c @@ -252,7 +252,7 @@ void mndGetMnodeEpSet(SMnode *pMnode, SEpSet *pEpSet) { pEpSet->inUse = pEpSet->numOfEps; } else { pEpSet->inUse = (pEpSet->numOfEps + 1) % totalMnodes; - pEpSet->inUse = 0; + //pEpSet->inUse = 0; } } if (pObj->pDnode != NULL) { diff --git a/source/libs/sync/src/syncMain.c b/source/libs/sync/src/syncMain.c index 89a41806cd..c0a4a6a73f 100644 --- a/source/libs/sync/src/syncMain.c +++ b/source/libs/sync/src/syncMain.c @@ -589,7 +589,7 @@ void syncGetRetryEpSet(int64_t rid, SEpSet* pEpSet) { } if (pEpSet->numOfEps > 0) { pEpSet->inUse = (pSyncNode->raftCfg.cfg.myIndex + 1) % pEpSet->numOfEps; - pEpSet->inUse = 0; + // pEpSet->inUse = 0; } sInfo("vgId:%d, sync get retry epset numOfEps:%d inUse:%d", pSyncNode->vgId, pEpSet->numOfEps, pEpSet->inUse); From 5d58c27f56e11b2dc00b3b3a84157e894292be4f Mon Sep 17 00:00:00 2001 From: dapan1121 Date: Mon, 22 Jan 2024 14:03:11 +0800 Subject: [PATCH 05/32] fix: join subquery timestamp order mis-match issue --- source/libs/planner/src/planOptimizer.c | 128 ++++++++++++++++++++++++ tests/parallel_test/cases.task | 1 + tests/script/tsim/query/join_order.sim | 51 ++++++++++ 3 files changed, 180 insertions(+) create mode 100644 tests/script/tsim/query/join_order.sim diff --git a/source/libs/planner/src/planOptimizer.c b/source/libs/planner/src/planOptimizer.c index dfb9343cc8..231e2fa32f 100644 --- a/source/libs/planner/src/planOptimizer.c +++ b/source/libs/planner/src/planOptimizer.c @@ -338,6 +338,7 @@ static void scanPathOptSetScanOrder(EScanOrder scanOrder, SScanLogicNode* pScan) if (pScan->sortPrimaryKey || pScan->scanSeq[0] > 1 || pScan->scanSeq[1] > 1) { return; } + pScan->node.outputTsOrder = scanOrder; switch (scanOrder) { case SCAN_ORDER_ASC: pScan->scanSeq[0] = 1; @@ -1450,6 +1451,132 @@ static int32_t sortPrimaryKeyOptimize(SOptimizeContext* pCxt, SLogicSubplan* pLo return sortPrimaryKeyOptimizeImpl(pCxt, pLogicSubplan, pSort); } +static int32_t sortForJoinOptimizeImpl(SOptimizeContext* pCxt, SLogicSubplan* pLogicSubplan, SJoinLogicNode* pJoin) { + SLogicNode* pLeft = (SLogicNode*)nodesListGetNode(pJoin->node.pChildren, 0); + SLogicNode* pRight = (SLogicNode*)nodesListGetNode(pJoin->node.pChildren, 1); + SScanLogicNode* pScan = NULL; + + if (QUERY_NODE_LOGIC_PLAN_SCAN == nodeType(pLeft) && ((SScanLogicNode*)pLeft)->node.outputTsOrder != SCAN_ORDER_BOTH) { + pScan = (SScanLogicNode*)pLeft; + } else if (QUERY_NODE_LOGIC_PLAN_SCAN == nodeType(pRight) && ((SScanLogicNode*)pRight)->node.outputTsOrder != SCAN_ORDER_BOTH) { + pScan = (SScanLogicNode*)pRight; + } + + if (NULL != pScan) { + switch (pScan->node.outputTsOrder) { + case SCAN_ORDER_ASC: + pScan->scanSeq[0] = 0; + pScan->scanSeq[1] = 1; + pScan->node.outputTsOrder = SCAN_ORDER_DESC; + goto _return; + case SCAN_ORDER_DESC: + pScan->scanSeq[0] = 1; + pScan->scanSeq[1] = 0; + pScan->node.outputTsOrder = SCAN_ORDER_ASC; + goto _return; + default: + break; + } + } + + if (QUERY_NODE_OPERATOR != nodeType(pJoin->pPrimKeyEqCond)) { + return TSDB_CODE_PLAN_INTERNAL_ERROR; + } + + bool res = false; + SOperatorNode* pOp = (SOperatorNode*)pJoin->pPrimKeyEqCond; + if (QUERY_NODE_COLUMN != nodeType(pOp->pLeft) || QUERY_NODE_COLUMN != nodeType(pOp->pRight)) { + return TSDB_CODE_PLAN_INTERNAL_ERROR; + } + + SNode* pOrderByNode = NULL; + SSHashObj* pLeftTables = NULL; + collectTableAliasFromNodes(nodesListGetNode(pJoin->node.pChildren, 0), &pLeftTables); + + if (NULL != tSimpleHashGet(pLeftTables, ((SColumnNode*)pOp->pLeft)->tableAlias, strlen(((SColumnNode*)pOp->pLeft)->tableAlias))) { + pOrderByNode = pOp->pLeft; + } else if (NULL != tSimpleHashGet(pLeftTables, ((SColumnNode*)pOp->pRight)->tableAlias, strlen(((SColumnNode*)pOp->pRight)->tableAlias))) { + pOrderByNode = pOp->pRight; + } + + tSimpleHashCleanup(pLeftTables); + + if (NULL == pOrderByNode) { + return TSDB_CODE_PLAN_INTERNAL_ERROR; + } + + SSortLogicNode* pSort = (SSortLogicNode*)nodesMakeNode(QUERY_NODE_LOGIC_PLAN_SORT); + if (NULL == pSort) { + return TSDB_CODE_OUT_OF_MEMORY; + } + + pSort->node.outputTsOrder = (ORDER_ASC == pLeft->outputTsOrder) ? ORDER_DESC : ORDER_ASC; + pSort->groupSort = false; + SOrderByExprNode* pOrder = (SOrderByExprNode*)nodesMakeNode(QUERY_NODE_ORDER_BY_EXPR); + if (NULL == pOrder) { + nodesDestroyNode((SNode *)pSort); + return TSDB_CODE_OUT_OF_MEMORY; + } + + nodesListMakeAppend(&pSort->pSortKeys, (SNode*)pOrder); + pOrder->order = (ORDER_ASC == pLeft->outputTsOrder) ? ORDER_DESC : ORDER_ASC; + pOrder->pExpr = nodesCloneNode(pOrderByNode); + pOrder->nullOrder = (ORDER_ASC == pOrder->order) ? NULL_ORDER_FIRST : NULL_ORDER_LAST; + if (!pOrder->pExpr) { + nodesDestroyNode((SNode *)pSort); + return TSDB_CODE_OUT_OF_MEMORY; + } + + pLeft->pParent = (SLogicNode*)pSort; + nodesListMakeAppend(&pSort->node.pChildren, (SNode*)pLeft); + pJoin->node.pChildren->pHead->pNode = (SNode*)pSort; + pSort->node.pParent = (SLogicNode*)pJoin;; + +_return: + + pCxt->optimized = true; + + return TSDB_CODE_SUCCESS; +} + + +static bool sortForJoinOptMayBeOptimized(SLogicNode* pNode) { + if (QUERY_NODE_LOGIC_PLAN_JOIN != nodeType(pNode)) { + return false; + } + + SJoinLogicNode* pJoin = (SJoinLogicNode*)pNode; + if (pNode->pChildren->length != 2 || !pJoin->hasSubQuery || pJoin->isLowLevelJoin) { + return false; + } + + SLogicNode* pLeft = (SLogicNode*)nodesListGetNode(pJoin->node.pChildren, 0); + SLogicNode* pRight = (SLogicNode*)nodesListGetNode(pJoin->node.pChildren, 1); + + if (ORDER_ASC != pLeft->outputTsOrder && ORDER_DESC != pLeft->outputTsOrder) { + return false; + } + if (ORDER_ASC != pRight->outputTsOrder && ORDER_DESC != pRight->outputTsOrder) { + return false; + } + + if (pLeft->outputTsOrder == pRight->outputTsOrder) { + return false; + } + + return true; +} + + +static int32_t sortForJoinOptimize(SOptimizeContext* pCxt, SLogicSubplan* pLogicSubplan) { + SJoinLogicNode* pJoin = (SJoinLogicNode*)optFindPossibleNode(pLogicSubplan->pNode, sortForJoinOptMayBeOptimized); + if (NULL == pJoin) { + return TSDB_CODE_SUCCESS; + } + return sortForJoinOptimizeImpl(pCxt, pLogicSubplan, pJoin); +} + + static bool smaIndexOptMayBeOptimized(SLogicNode* pNode) { if (QUERY_NODE_LOGIC_PLAN_SCAN != nodeType(pNode) || NULL == pNode->pParent || QUERY_NODE_LOGIC_PLAN_WINDOW != nodeType(pNode->pParent) || @@ -4197,6 +4324,7 @@ static const SOptimizeRule optimizeRuleSet[] = { {.pName = "StableJoin", .optimizeFunc = stableJoinOptimize}, {.pName = "sortNonPriKeyOptimize", .optimizeFunc = sortNonPriKeyOptimize}, {.pName = "SortPrimaryKey", .optimizeFunc = sortPrimaryKeyOptimize}, + {.pName = "SortForjoin", .optimizeFunc = sortForJoinOptimize}, {.pName = "SmaIndex", .optimizeFunc = smaIndexOptimize}, {.pName = "PushDownLimit", .optimizeFunc = pushDownLimitOptimize}, {.pName = "PartitionTags", .optimizeFunc = partTagsOptimize}, diff --git a/tests/parallel_test/cases.task b/tests/parallel_test/cases.task index 0687a3271c..8d9794b0dd 100644 --- a/tests/parallel_test/cases.task +++ b/tests/parallel_test/cases.task @@ -1108,6 +1108,7 @@ e ,,y,script,./test.sh -f tsim/query/udf_with_const.sim ,,y,script,./test.sh -f tsim/query/join_interval.sim ,,y,script,./test.sh -f tsim/query/join_pk.sim +,,y,script,./test.sh -f tsim/query/join_order.sim ,,y,script,./test.sh -f tsim/query/count_spread.sim ,,y,script,./test.sh -f tsim/query/unionall_as_table.sim ,,y,script,./test.sh -f tsim/query/multi_order_by.sim diff --git a/tests/script/tsim/query/join_order.sim b/tests/script/tsim/query/join_order.sim new file mode 100644 index 0000000000..bd772ff407 --- /dev/null +++ b/tests/script/tsim/query/join_order.sim @@ -0,0 +1,51 @@ +system sh/stop_dnodes.sh +system sh/deploy.sh -n dnode1 -i 1 +system sh/exec.sh -n dnode1 -s start +sql connect + +sql drop database if exists db1; +sql create database db1 vgroups 1; +sql use db1; +sql create stable sta (ts timestamp, col1 int) tags(t1 int); +sql create table tba1 using sta tags(1); + +sql insert into tba1 values ('2023-11-17 16:29:00', 1); +sql insert into tba1 values ('2023-11-17 16:29:02', 3); +sql insert into tba1 values ('2023-11-17 16:29:03', 4); +sql insert into tba1 values ('2023-11-17 16:29:04', 5); + + +sql select a.*,b.* from tba1 a, (select * from tba1 order by ts) b where a.ts=b.ts; +if $rows != 4 then + return -1 +endi +sql select a.*,b.* from (select * from tba1 order by ts) a, tba1 b where a.ts=b.ts; +if $rows != 4 then + return -1 +endi +sql select a.*,b.* from tba1 a, (select * from tba1 order by ts desc) b where a.ts=b.ts; +if $rows != 4 then + return -1 +endi +sql select a.*,b.* from (select * from tba1 order by ts desc) a, tba1 b where a.ts=b.ts; +if $rows != 4 then + return -1 +endi +sql select a.*,b.* from (select * from tba1 order by ts) a, (select * from tba1 order by ts) b where a.ts=b.ts; +if $rows != 4 then + return -1 +endi +sql select a.*,b.* from (select * from tba1 order by ts desc) a, (select * from tba1 order by ts desc) b where a.ts=b.ts; +if $rows != 4 then + return -1 +endi +sql select a.*,b.* from (select * from tba1 order by ts) a, (select * from tba1 order by ts desc) b where a.ts=b.ts; +if $rows != 4 then + return -1 +endi +sql select a.*,b.* from (select * from tba1 order by ts desc) a, (select * from tba1 order by ts) b where a.ts=b.ts; +if $rows != 4 then + return -1 +endi + +system sh/exec.sh -n dnode1 -s stop -x SIGINT From ec16e79b395cb2ded48b49fc61e7cb0178e789f7 Mon Sep 17 00:00:00 2001 From: menshibin Date: Mon, 22 Jan 2024 14:41:48 +0800 Subject: [PATCH 06/32] add srvCtl cluster support --- tests/army/community/cluster/incSnapshot.py | 15 ++++++++------- tests/army/frame/server/cluster.py | 1 + tests/army/frame/server/dnodes.py | 9 +++++++++ tests/army/frame/srvCtl.py | 18 +++++++++++++++++- 4 files changed, 35 insertions(+), 8 deletions(-) diff --git a/tests/army/community/cluster/incSnapshot.py b/tests/army/community/cluster/incSnapshot.py index b1b53e65bd..d309bae72c 100644 --- a/tests/army/community/cluster/incSnapshot.py +++ b/tests/army/community/cluster/incSnapshot.py @@ -9,11 +9,12 @@ import time from frame.log import * from frame.cases import * from frame.sql import * +from frame.srvCtl import * from frame.caseBase import * from frame import * from frame.autogen import * -from frame.server.dnodes import * -from frame.server.cluster import * +# from frame.server.dnodes import * +# from frame.server.cluster import * class TDTestCase(TBase): @@ -34,7 +35,7 @@ class TDTestCase(TBase): autoGen.create_child(self.stb, "d", self.childtable_count) autoGen.insert_data(1000) tdSql.execute(f"flush database {self.db}") - clusterDnodes.stoptaosd(3) + sc.dnodeStop(3) # clusterDnodes.stoptaosd(1) # clusterDnodes.starttaosd(3) # time.sleep(5) @@ -56,7 +57,7 @@ class TDTestCase(TBase): # break self.snapshotAgg() time.sleep(10) - clusterDnodes.stopAll() + sc.dnodeStopAll() for i in range(1, 4): path = clusterDnodes.getDnodeDir(i) dnodesRootDir = os.path.join(path,"data","vnode", "vnode*") @@ -66,9 +67,9 @@ class TDTestCase(TBase): tdLog.debug("delete dir: %s " % (dnodesRootDir)) self.remove_directory(os.path.join(dir, "wal")) - clusterDnodes.starttaosd(1) - clusterDnodes.starttaosd(2) - clusterDnodes.starttaosd(3) + sc.dnodeStart(1) + sc.dnodeStart(2) + sc.dnodeStart(3) sql = "show vnodes;" time.sleep(10) while True: diff --git a/tests/army/frame/server/cluster.py b/tests/army/frame/server/cluster.py index 309ca3bbd4..d514bfd9d5 100644 --- a/tests/army/frame/server/cluster.py +++ b/tests/army/frame/server/cluster.py @@ -22,6 +22,7 @@ class ClusterDnodes(TDDnodes): def init(self, dnodes_lists, deployPath, masterIp): self.dnodes = dnodes_lists # dnode must be TDDnode instance super(ClusterDnodes, self).init(deployPath, masterIp) + self.model = "cluster" clusterDnodes = ClusterDnodes() diff --git a/tests/army/frame/server/dnodes.py b/tests/army/frame/server/dnodes.py index ad8a336857..7a37badd06 100644 --- a/tests/army/frame/server/dnodes.py +++ b/tests/army/frame/server/dnodes.py @@ -47,6 +47,7 @@ class TDDnodes: self.valgrind = 0 self.asan = False self.killValgrind = 0 + self.model = "dnode" def init(self, path, remoteIP = ""): binPath = self.dnodes[0].getPath() + "/../../../" @@ -268,6 +269,14 @@ class TDDnodes: def getAsan(self): return self.asan + def getModel(self): + return self.model + + def getDnodeCfgPath(self, index): + self.check(index) + return self.dnodes[index - 1].cfgPath + + def setLevelDisk(self, level, disk): for i in range(len(self.dnodes)): self.dnodes[i].level = int(level) diff --git a/tests/army/frame/srvCtl.py b/tests/army/frame/srvCtl.py index c01ea33578..091856056b 100644 --- a/tests/army/frame/srvCtl.py +++ b/tests/army/frame/srvCtl.py @@ -18,6 +18,7 @@ import datetime from frame.server.dnode import * from frame.server.dnodes import * +from frame.server.cluster import * class srvCtl: @@ -34,19 +35,32 @@ class srvCtl: # start def dnodeStart(self, idx): + if clusterDnodes.getModel() == 'cluster': + return clusterDnodes.starttaosd(idx) + return tdDnodes.starttaosd(idx) # stop def dnodeStop(self, idx): + if clusterDnodes.getModel() == 'cluster': + return clusterDnodes.stoptaosd(idx) + return tdDnodes.stoptaosd(idx) + def dnodeStopAll(self): + if clusterDnodes.getModel() == 'cluster': + return clusterDnodes.stopAll() + return tdDnodes.stopAll() # # about path # # get cluster root path like /root/TDinternal/sim/ def clusterRootPath(self): + if clusterDnodes.getModel() == 'cluster': + return clusterDnodes.getDnodesRootDir() + return tdDnodes.getDnodesRootDir() # return dnode data files list @@ -60,7 +74,9 @@ class srvCtl: # taos.cfg position def dnodeCfgPath(self, idx): - return tdDnodes.dnodes[idx-1].cfgPath + if clusterDnodes.getModel() == 'cluster': + return clusterDnodes.getDnodeCfgPath(idx) + return tdDnodes.getDnodeCfgPath(idx) sc = srvCtl() \ No newline at end of file From aa14e67da9ab95e67a4745a4538672da907eb0d4 Mon Sep 17 00:00:00 2001 From: Yihao Deng Date: Mon, 22 Jan 2024 07:21:14 +0000 Subject: [PATCH 07/32] refactor retry --- source/libs/sync/src/syncMain.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/libs/sync/src/syncMain.c b/source/libs/sync/src/syncMain.c index c0a4a6a73f..89a41806cd 100644 --- a/source/libs/sync/src/syncMain.c +++ b/source/libs/sync/src/syncMain.c @@ -589,7 +589,7 @@ void syncGetRetryEpSet(int64_t rid, SEpSet* pEpSet) { } if (pEpSet->numOfEps > 0) { pEpSet->inUse = (pSyncNode->raftCfg.cfg.myIndex + 1) % pEpSet->numOfEps; - // pEpSet->inUse = 0; + pEpSet->inUse = 0; } sInfo("vgId:%d, sync get retry epset numOfEps:%d inUse:%d", pSyncNode->vgId, pEpSet->numOfEps, pEpSet->inUse); From 25b3ce511f9dba25a9358e15797950be2dd0ae78 Mon Sep 17 00:00:00 2001 From: menshibin Date: Mon, 22 Jan 2024 16:03:04 +0800 Subject: [PATCH 08/32] modify dnodes default single model --- tests/army/frame/server/dnodes.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/army/frame/server/dnodes.py b/tests/army/frame/server/dnodes.py index 7a37badd06..cd2b89acbd 100644 --- a/tests/army/frame/server/dnodes.py +++ b/tests/army/frame/server/dnodes.py @@ -47,7 +47,7 @@ class TDDnodes: self.valgrind = 0 self.asan = False self.killValgrind = 0 - self.model = "dnode" + self.model = "single" def init(self, path, remoteIP = ""): binPath = self.dnodes[0].getPath() + "/../../../" From 04538f9049db41241b79be0d9b3627d1768540db Mon Sep 17 00:00:00 2001 From: Yihao Deng Date: Mon, 22 Jan 2024 08:59:40 +0000 Subject: [PATCH 09/32] add test case --- include/common/tmisce.h | 9 +++++---- source/common/src/tmisce.c | 33 ++++++++++++++++++++++++++++----- source/libs/sync/src/syncMain.c | 4 +++- 3 files changed, 36 insertions(+), 10 deletions(-) diff --git a/include/common/tmisce.h b/include/common/tmisce.h index 3d1afcd21f..afb33c733a 100644 --- a/include/common/tmisce.h +++ b/include/common/tmisce.h @@ -47,10 +47,11 @@ typedef struct SCorEpSet { int32_t taosGetFqdnPortFromEp(const char* ep, SEp* pEp); void addEpIntoEpSet(SEpSet* pEpSet, const char* fqdn, uint16_t port); -bool isEpsetEqual(const SEpSet* s1, const SEpSet* s2); -void epsetAssign(SEpSet* dst, const SEpSet* pSrc); -void updateEpSet_s(SCorEpSet* pEpSet, SEpSet* pNewEpSet); -SEpSet getEpSet_s(SCorEpSet* pEpSet); +bool isEpsetEqual(const SEpSet* s1, const SEpSet* s2); +void epsetAssign(SEpSet* dst, const SEpSet* pSrc); +void updateEpSet_s(SCorEpSet* pEpSet, SEpSet* pNewEpSet); +SEpSet getEpSet_s(SCorEpSet* pEpSet); +void epsetSort(SEpSet* pEpSet); #ifdef __cplusplus } diff --git a/source/common/src/tmisce.c b/source/common/src/tmisce.c index 95a5c27cf1..c3e2846d9a 100644 --- a/source/common/src/tmisce.c +++ b/source/common/src/tmisce.c @@ -15,11 +15,8 @@ #define _DEFAULT_SOURCE #include "tmisce.h" -#include "tjson.h" #include "tglobal.h" -#include "tlog.h" -#include "tname.h" - +#include "tjson.h" int32_t taosGetFqdnPortFromEp(const char* ep, SEp* pEp) { pEp->port = 0; memset(pEp->fqdn, 0, TSDB_FQDN_LEN); @@ -63,7 +60,7 @@ bool isEpsetEqual(const SEpSet* s1, const SEpSet* s2) { void epsetAssign(SEpSet* pDst, const SEpSet* pSrc) { if (pSrc == NULL || pDst == NULL) { - return; + return; } pDst->inUse = pSrc->inUse; @@ -73,6 +70,32 @@ void epsetAssign(SEpSet* pDst, const SEpSet* pSrc) { tstrncpy(pDst->eps[i].fqdn, pSrc->eps[i].fqdn, tListLen(pSrc->eps[i].fqdn)); } } +void epAssign(SEp* pDst, SEp* pSrc) { + if (pSrc == NULL || pDst == NULL) { + return; + } + memset(pDst->fqdn, 0, tListLen(pSrc->fqdn)); + tstrncpy(pDst->fqdn, pSrc->fqdn, tListLen(pSrc->fqdn)); + pDst->port = pSrc->port; +} +void epsetSort(SEpSet* pDst) { + if (pDst->numOfEps <= 1) { + return; + } + for (int i = 0; i < pDst->numOfEps - 1; i++) { + for (int j = 0; j < pDst->numOfEps - 1 - i; j++) { + SEp* f = &pDst->eps[j]; + SEp* s = &pDst->eps[j + 1]; + int cmp = strncmp(f->fqdn, s->fqdn, sizeof(f->fqdn)); + if (cmp > 0 || (cmp == 0 && f->port > s->port)) { + SEp ep = {0}; + epAssign(&ep, f); + epAssign(f, s); + epAssign(s, &ep); + } + } + } +} void updateEpSet_s(SCorEpSet* pEpSet, SEpSet* pNewEpSet) { taosCorBeginWrite(&pEpSet->version); diff --git a/source/libs/sync/src/syncMain.c b/source/libs/sync/src/syncMain.c index 89a41806cd..b60b3c96ca 100644 --- a/source/libs/sync/src/syncMain.c +++ b/source/libs/sync/src/syncMain.c @@ -579,13 +579,15 @@ void syncGetRetryEpSet(int64_t rid, SEpSet* pEpSet) { SSyncNode* pSyncNode = syncNodeAcquire(rid); if (pSyncNode == NULL) return; + int j = 0; for (int32_t i = 0; i < pSyncNode->raftCfg.cfg.totalReplicaNum; ++i) { if (pSyncNode->raftCfg.cfg.nodeInfo[i].nodeRole == TAOS_SYNC_ROLE_LEARNER) continue; - SEp* pEp = &pEpSet->eps[i]; + SEp* pEp = &pEpSet->eps[j]; tstrncpy(pEp->fqdn, pSyncNode->raftCfg.cfg.nodeInfo[i].nodeFqdn, TSDB_FQDN_LEN); pEp->port = (pSyncNode->raftCfg.cfg.nodeInfo)[i].nodePort; pEpSet->numOfEps++; sDebug("vgId:%d, sync get retry epset, index:%d %s:%d", pSyncNode->vgId, i, pEp->fqdn, pEp->port); + j++; } if (pEpSet->numOfEps > 0) { pEpSet->inUse = (pSyncNode->raftCfg.cfg.myIndex + 1) % pEpSet->numOfEps; From 20aa81c96a558c25a0c66ac2d7dcff30610aebea Mon Sep 17 00:00:00 2001 From: Yihao Deng Date: Mon, 22 Jan 2024 09:00:24 +0000 Subject: [PATCH 10/32] add test case --- source/common/test/commonTests.cpp | 99 +++++++++++++++++++++++------- 1 file changed, 76 insertions(+), 23 deletions(-) diff --git a/source/common/test/commonTests.cpp b/source/common/test/commonTests.cpp index 9f7ee165ac..8e0e50165f 100644 --- a/source/common/test/commonTests.cpp +++ b/source/common/test/commonTests.cpp @@ -12,9 +12,10 @@ #include "tcommon.h" #include "tdatablock.h" #include "tdef.h" -#include "tvariant.h" +#include "tmisce.h" #include "ttime.h" #include "ttokendef.h" +#include "tvariant.h" namespace { // @@ -25,11 +26,10 @@ int main(int argc, char** argv) { return RUN_ALL_TESTS(); } - TEST(testCase, toUIntegerEx_test) { uint64_t val = 0; - char* s = "123"; + char* s = "123"; int32_t ret = toUIntegerEx(s, strlen(s), TK_NK_INTEGER, &val); ASSERT_EQ(ret, 0); ASSERT_EQ(val, 123); @@ -59,7 +59,7 @@ TEST(testCase, toUIntegerEx_test) { ASSERT_EQ(val, 18699); s = "-1"; - ret = toUIntegerEx(s, strlen(s),TK_NK_INTEGER, &val); + ret = toUIntegerEx(s, strlen(s), TK_NK_INTEGER, &val); ASSERT_EQ(ret, -1); s = "-0b10010"; @@ -103,7 +103,7 @@ TEST(testCase, toUIntegerEx_test) { TEST(testCase, toIntegerEx_test) { int64_t val = 0; - char* s = "123"; + char* s = "123"; int32_t ret = toIntegerEx(s, strlen(s), TK_NK_INTEGER, &val); ASSERT_EQ(ret, 0); ASSERT_EQ(val, 123); @@ -166,7 +166,7 @@ TEST(testCase, toIntegerEx_test) { s = "-9223372036854775808"; ret = toIntegerEx(s, strlen(s), TK_NK_INTEGER, &val); ASSERT_EQ(ret, 0); - ASSERT_EQ(val, -9223372036854775808); + // ASSERT_EQ(val, -9223372036854775808); // out of range s = "9323372036854775807"; @@ -186,7 +186,7 @@ TEST(testCase, toIntegerEx_test) { TEST(testCase, toInteger_test) { int64_t val = 0; - char* s = "123"; + char* s = "123"; int32_t ret = toInteger(s, strlen(s), 10, &val); ASSERT_EQ(ret, 0); ASSERT_EQ(val, 123); @@ -223,10 +223,10 @@ TEST(testCase, toInteger_test) { s = "-9223372036854775808"; ret = toInteger(s, strlen(s), 10, &val); ASSERT_EQ(ret, 0); - ASSERT_EQ(val, -9223372036854775808); + // ASSERT_EQ(val, -9223372036854775808); // out of range - s = "9323372036854775807"; + s = "9323372036854775807"; ret = toInteger(s, strlen(s), 10, &val); ASSERT_EQ(ret, -1); @@ -418,9 +418,10 @@ void check_tm(const STm* tm, int32_t y, int32_t mon, int32_t d, int32_t h, int32 ASSERT_EQ(tm->fsec, fsec); } -void test_timestamp_tm_conversion(int64_t ts, int32_t precision, int32_t y, int32_t mon, int32_t d, int32_t h, int32_t m, int32_t s, int64_t fsec) { - int64_t ts_tmp; - char buf[128] = {0}; +void test_timestamp_tm_conversion(int64_t ts, int32_t precision, int32_t y, int32_t mon, int32_t d, int32_t h, + int32_t m, int32_t s, int64_t fsec) { + int64_t ts_tmp; + char buf[128] = {0}; struct STm tm; taosFormatUtcTime(buf, 128, ts, precision); printf("formated ts of %ld, precision: %d is: %s\n", ts, precision, buf); @@ -457,7 +458,7 @@ TEST(timeTest, timestamp2tm) { test_timestamp_tm_conversion(ts, TSDB_TIME_PRECISION_MILLI, 1970 - 1900, 0 /* mon start from 0*/, 1, 8, 0, 0, 000000000L); - ts = -62198784343000; // milliseconds before epoch, Friday, January 1, -0001 12:00:00 AM GMT+08:06 + ts = -62198784343000; // milliseconds before epoch, Friday, January 1, -0001 12:00:00 AM GMT+08:06 test_timestamp_tm_conversion(ts, TSDB_TIME_PRECISION_MILLI, -1 - 1900, 0 /* mon start from 0*/, 1, 0 /* hour start from 0*/, 0, 0, 000000000L); } @@ -472,7 +473,7 @@ void test_ts2char(int64_t ts, const char* format, int32_t precison, const char* TEST(timeTest, ts2char) { osDefaultInit(); if (tsTimezone != TdEastZone8) GTEST_SKIP(); - int64_t ts; + int64_t ts; const char* format = "YYYY-MM-DD"; ts = 0; test_ts2char(ts, format, TSDB_TIME_PRECISION_MILLI, "1970-01-01"); @@ -493,12 +494,13 @@ TEST(timeTest, ts2char) { "2023-023-23-3-2023-023-23-3-年-OCTOBER -OCT-October -Oct-october " "-oct-月-286-13-6-286-13-6-FRIDAY -Friday -friday -日"); #endif - ts = 1697182085123L; // Friday, October 13, 2023 3:28:05.123 PM GMT+08:00 + ts = 1697182085123L; // Friday, October 13, 2023 3:28:05.123 PM GMT+08:00 test_ts2char(ts, "HH24:hh24:HH12:hh12:HH:hh:MI:mi:SS:ss:MS:ms:US:us:NS:ns:PM:AM:pm:am", TSDB_TIME_PRECISION_MILLI, "15:15:03:03:03:03:28:28:05:05:123:123:123000:123000:123000000:123000000:PM:PM:pm:pm"); // double quotes normal output - test_ts2char(ts, "\\\"HH24:hh24:HH12:hh12:HH:hh:MI:mi:SS:ss:MS:ms:US:us:NS:ns:PM:AM:pm:am\\\"", TSDB_TIME_PRECISION_MILLI, + test_ts2char(ts, "\\\"HH24:hh24:HH12:hh12:HH:hh:MI:mi:SS:ss:MS:ms:US:us:NS:ns:PM:AM:pm:am\\\"", + TSDB_TIME_PRECISION_MILLI, "\"15:15:03:03:03:03:28:28:05:05:123:123:123000:123000:123000000:123000000:PM:PM:pm:pm\""); test_ts2char(ts, "\\\"HH24:hh24:HH12:hh12:HH:hh:MI:mi:SS:ss:MS:ms:US:us:NS:ns:PM:AM:pm:am", TSDB_TIME_PRECISION_MILLI, "\"15:15:03:03:03:03:28:28:05:05:123:123:123000:123000:123000000:123000000:PM:PM:pm:pm"); @@ -506,14 +508,18 @@ TEST(timeTest, ts2char) { test_ts2char(ts, "\"HH24:hh24:HH12:hh12:HH:hh:MI:mi:SS:ss:MS:ms:US:us:NS:ns:PM:AM:pm:am", TSDB_TIME_PRECISION_MILLI, "HH24:hh24:HH12:hh12:HH:hh:MI:mi:SS:ss:MS:ms:US:us:NS:ns:PM:AM:pm:am"); test_ts2char(ts, "yyyy-mm-dd hh24:mi:ss.nsamaaa", TSDB_TIME_PRECISION_MILLI, "2023-10-13 15:28:05.123000000pmaaa"); - test_ts2char(ts, "aaa--yyyy-mm-dd hh24:mi:ss.nsamaaa", TSDB_TIME_PRECISION_MILLI, "aaa--2023-10-13 15:28:05.123000000pmaaa"); - test_ts2char(ts, "add--yyyy-mm-dd hh24:mi:ss.nsamaaa", TSDB_TIME_PRECISION_MILLI, "a13--2023-10-13 15:28:05.123000000pmaaa"); + test_ts2char(ts, "aaa--yyyy-mm-dd hh24:mi:ss.nsamaaa", TSDB_TIME_PRECISION_MILLI, + "aaa--2023-10-13 15:28:05.123000000pmaaa"); + test_ts2char(ts, "add--yyyy-mm-dd hh24:mi:ss.nsamaaa", TSDB_TIME_PRECISION_MILLI, + "a13--2023-10-13 15:28:05.123000000pmaaa"); ts = 1693946405000; - test_ts2char(ts, "Day, Month dd, YYYY hh24:mi:ss AM TZH:tzh", TSDB_TIME_PRECISION_MILLI, "Wednesday, September 06, 2023 04:40:05 AM +08:+08"); + test_ts2char(ts, "Day, Month dd, YYYY hh24:mi:ss AM TZH:tzh", TSDB_TIME_PRECISION_MILLI, + "Wednesday, September 06, 2023 04:40:05 AM +08:+08"); - ts = -62198784343000; // milliseconds before epoch, Friday, January 1, -0001 12:00:00 AM GMT+08:06 - test_ts2char(ts, "Day, Month dd, YYYY hh12:mi:ss AM", TSDB_TIME_PRECISION_MILLI, "Friday , January 01, -001 12:00:00 AM"); + ts = -62198784343000; // milliseconds before epoch, Friday, January 1, -0001 12:00:00 AM GMT+08:06 + test_ts2char(ts, "Day, Month dd, YYYY hh12:mi:ss AM", TSDB_TIME_PRECISION_MILLI, + "Friday , January 01, -001 12:00:00 AM"); } TEST(timeTest, char2ts) { @@ -609,7 +615,7 @@ TEST(timeTest, char2ts) { ASSERT_EQ(-1, TEST_char2ts("yyyyMMdd ", &ts, TSDB_TIME_PRECISION_MICRO, "2100/2/1")); // nothing to be converted to dd ASSERT_EQ(0, TEST_char2ts("yyyyMMdd ", &ts, TSDB_TIME_PRECISION_MICRO, "210012")); - ASSERT_EQ(ts, 4131273600000000LL); // 2100-12-1 + ASSERT_EQ(ts, 4131273600000000LL); // 2100-12-1 ASSERT_EQ(-1, TEST_char2ts("yyyyMMdd ", &ts, TSDB_TIME_PRECISION_MICRO, "21001")); ASSERT_EQ(-1, TEST_char2ts("yyyyMM-dd ", &ts, TSDB_TIME_PRECISION_MICRO, "23a1-1")); @@ -635,8 +641,55 @@ TEST(timeTest, char2ts) { ASSERT_EQ(0, TEST_char2ts("yyyy年 MM/ddTZH", &ts, TSDB_TIME_PRECISION_MICRO, "1970年 1/1+0")); ASSERT_EQ(ts, 0); ASSERT_EQ(0, TEST_char2ts("yyyy年 a a a MM/ddTZH", &ts, TSDB_TIME_PRECISION_MICRO, "1970年 a a a 1/1+0")); - ASSERT_EQ(0, TEST_char2ts("yyyy年 a a a a a a a a a a a a a a a MM/ddTZH", &ts, TSDB_TIME_PRECISION_MICRO, "1970年 a ")); + ASSERT_EQ(0, TEST_char2ts("yyyy年 a a a a a a a a a a a a a a a MM/ddTZH", &ts, TSDB_TIME_PRECISION_MICRO, + "1970年 a ")); ASSERT_EQ(-3, TEST_char2ts("yyyy-mm-DDD", &ts, TSDB_TIME_PRECISION_MILLI, "1970-01-001")); } +TEST(timeTest, epSet) { + { + SEpSet ep = {0}; + addEpIntoEpSet(&ep, "local", 14); + addEpIntoEpSet(&ep, "aocal", 13); + addEpIntoEpSet(&ep, "abcal", 12); + addEpIntoEpSet(&ep, "abcaleb", 11); + epsetSort(&ep); + ASSERT_EQ(strcmp(ep.eps[0].fqdn, "abcal"), 0); + ASSERT_EQ(ep.eps[0].port, 12); + + ASSERT_EQ(strcmp(ep.eps[1].fqdn, "abcaleb"), 0); + ASSERT_EQ(ep.eps[1].port, 11); + + ASSERT_EQ(strcmp(ep.eps[2].fqdn, "aocal"), 0); + ASSERT_EQ(ep.eps[2].port, 13); + + ASSERT_EQ(strcmp(ep.eps[3].fqdn, "local"), 0); + ASSERT_EQ(ep.eps[3].port, 14); + } + { + SEpSet ep = {0}; + addEpIntoEpSet(&ep, "local", 14); + addEpIntoEpSet(&ep, "local", 13); + addEpIntoEpSet(&ep, "local", 12); + addEpIntoEpSet(&ep, "local", 11); + epsetSort(&ep); + ASSERT_EQ(strcmp(ep.eps[0].fqdn, "local"), 0); + ASSERT_EQ(ep.eps[0].port, 11); + + ASSERT_EQ(strcmp(ep.eps[0].fqdn, "local"), 0); + ASSERT_EQ(ep.eps[1].port, 12); + + ASSERT_EQ(strcmp(ep.eps[0].fqdn, "local"), 0); + ASSERT_EQ(ep.eps[2].port, 13); + + ASSERT_EQ(strcmp(ep.eps[0].fqdn, "local"), 0); + ASSERT_EQ(ep.eps[3].port, 14); + } + { + SEpSet ep = {0}; + addEpIntoEpSet(&ep, "local", 14); + epsetSort(&ep); + ASSERT_EQ(ep.numOfEps, 1); + } +} #pragma GCC diagnostic pop From cbe34ad76e8e86d7532ff41958ffa8315d5ef18a Mon Sep 17 00:00:00 2001 From: dapan1121 Date: Mon, 22 Jan 2024 17:11:15 +0800 Subject: [PATCH 11/32] fix: mac compile error --- source/libs/planner/src/planOptimizer.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/source/libs/planner/src/planOptimizer.c b/source/libs/planner/src/planOptimizer.c index 231e2fa32f..af9ec93f65 100644 --- a/source/libs/planner/src/planOptimizer.c +++ b/source/libs/planner/src/planOptimizer.c @@ -338,7 +338,7 @@ static void scanPathOptSetScanOrder(EScanOrder scanOrder, SScanLogicNode* pScan) if (pScan->sortPrimaryKey || pScan->scanSeq[0] > 1 || pScan->scanSeq[1] > 1) { return; } - pScan->node.outputTsOrder = scanOrder; + pScan->node.outputTsOrder = (SCAN_ORDER_ASC == scanOrder) ? ORDER_ASC : ORDER_DESC; switch (scanOrder) { case SCAN_ORDER_ASC: pScan->scanSeq[0] = 1; @@ -1467,12 +1467,12 @@ static int32_t sortForJoinOptimizeImpl(SOptimizeContext* pCxt, SLogicSubplan* pL case SCAN_ORDER_ASC: pScan->scanSeq[0] = 0; pScan->scanSeq[1] = 1; - pScan->node.outputTsOrder = SCAN_ORDER_DESC; + pScan->node.outputTsOrder = ORDER_DESC; goto _return; case SCAN_ORDER_DESC: pScan->scanSeq[0] = 1; pScan->scanSeq[1] = 0; - pScan->node.outputTsOrder = SCAN_ORDER_ASC; + pScan->node.outputTsOrder = ORDER_ASC; goto _return; default: break; From 6c823efc6011eb6907352e7a3df1ba9b36ac590d Mon Sep 17 00:00:00 2001 From: Shungang Li Date: Mon, 22 Jan 2024 19:53:59 +0800 Subject: [PATCH 12/32] fix: no retry for ttl drop table --- source/dnode/mgmt/node_mgmt/src/dmTransport.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/dnode/mgmt/node_mgmt/src/dmTransport.c b/source/dnode/mgmt/node_mgmt/src/dmTransport.c index 3b7ecce77c..479b3b6aa3 100644 --- a/source/dnode/mgmt/node_mgmt/src/dmTransport.c +++ b/source/dnode/mgmt/node_mgmt/src/dmTransport.c @@ -350,7 +350,7 @@ static bool rpcRfp(int32_t code, tmsg_t msgType) { code == TSDB_CODE_SYN_RESTORING || code == TSDB_CODE_VND_STOPPED || code == TSDB_CODE_APP_IS_STARTING || code == TSDB_CODE_APP_IS_STOPPING) { if (msgType == TDMT_SCH_QUERY || msgType == TDMT_SCH_MERGE_QUERY || msgType == TDMT_SCH_FETCH || - msgType == TDMT_SCH_MERGE_FETCH || msgType == TDMT_SCH_TASK_NOTIFY) { + msgType == TDMT_SCH_MERGE_FETCH || msgType == TDMT_SCH_TASK_NOTIFY || msgType == TDMT_VND_DROP_TTL_TABLE) { return false; } return true; From 192bb179e73bce70e60bbaf3a87094cba4199057 Mon Sep 17 00:00:00 2001 From: Benguang Zhao Date: Tue, 23 Jan 2024 10:15:00 +0800 Subject: [PATCH 13/32] enh: trigger vnodeCommit at exit even if no data changed --- source/dnode/vnode/src/vnd/vnodeCommit.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/source/dnode/vnode/src/vnd/vnodeCommit.c b/source/dnode/vnode/src/vnd/vnodeCommit.c index c8cd167393..645f2620dc 100644 --- a/source/dnode/vnode/src/vnd/vnodeCommit.c +++ b/source/dnode/vnode/src/vnd/vnodeCommit.c @@ -157,7 +157,8 @@ int vnodeShouldCommit(SVnode *pVnode, bool atExit) { taosThreadMutexLock(&pVnode->mutex); if (pVnode->inUse && diskAvail) { needCommit = (pVnode->inUse->size > pVnode->inUse->node.size) || - (atExit && (pVnode->inUse->size > 0 || pVnode->pMeta->changed)); + (atExit && (pVnode->inUse->size > 0 || pVnode->pMeta->changed || + pVnode->state.applied - pVnode->state.committed > 4096)); } taosThreadMutexUnlock(&pVnode->mutex); return needCommit; From 5a42b515230e243bac8aba6a56bd42ddd5d445ee Mon Sep 17 00:00:00 2001 From: Alex Duan <51781608+DuanKuanJun@users.noreply.github.com> Date: Tue, 23 Jan 2024 10:45:17 +0800 Subject: [PATCH 14/32] Update s3_basic.py stream is not right --- tests/army/enterprise/s3/s3_basic.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/army/enterprise/s3/s3_basic.py b/tests/army/enterprise/s3/s3_basic.py index 976ad85747..a1a945a304 100644 --- a/tests/army/enterprise/s3/s3_basic.py +++ b/tests/army/enterprise/s3/s3_basic.py @@ -128,7 +128,7 @@ class TDTestCase(TBase): self.checkInsertCorrect() # check stream correct and drop stream - self.checkStreamCorrect() + # self.checkStreamCorrect() # drop stream self.dropStream(self.sname) From 2dcec8304a9f93746ed2370e724c238641a61429 Mon Sep 17 00:00:00 2001 From: chenhaoran Date: Tue, 23 Jan 2024 11:35:13 +0800 Subject: [PATCH 15/32] test:add special compatibility testcase for code coverage --- tests/pytest/util/common.py | 58 +++++++++---------- .../0-others/compatibility_coverage.py | 2 +- .../6-cluster/clusterCommonCreate.py | 3 + 3 files changed, 33 insertions(+), 30 deletions(-) diff --git a/tests/pytest/util/common.py b/tests/pytest/util/common.py index c4885747d1..cb649d966f 100644 --- a/tests/pytest/util/common.py +++ b/tests/pytest/util/common.py @@ -1862,38 +1862,38 @@ class TDCom: time.sleep(1) return tbname -def is_json(msg): - if isinstance(msg, str): - try: - json.loads(msg) - return True - except: + def is_json(msg): + if isinstance(msg, str): + try: + json.loads(msg) + return True + except: + return False + else: return False - else: - return False -def get_path(tool="taosd"): - selfPath = os.path.dirname(os.path.realpath(__file__)) - if ("community" in selfPath): - projPath = selfPath[:selfPath.find("community")] - else: - projPath = selfPath[:selfPath.find("tests")] + def get_path(tool="taosd"): + selfPath = os.path.dirname(os.path.realpath(__file__)) + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] - paths = [] - for root, dirs, files in os.walk(projPath): - if ((tool) in files or ("%s.exe"%tool) in files): - rootRealPath = os.path.dirname(os.path.realpath(root)) - if ("packaging" not in rootRealPath): - paths.append(os.path.join(root, tool)) - break - if (len(paths) == 0): + paths = [] + for root, dirs, files in os.walk(projPath): + if ((tool) in files or ("%s.exe"%tool) in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + paths.append(os.path.join(root, tool)) + break + if (len(paths) == 0): + return "" + return paths[0] + + def dict2toml(in_dict: dict, file:str): + if not isinstance(in_dict, dict): return "" - return paths[0] - -def dict2toml(in_dict: dict, file:str): - if not isinstance(in_dict, dict): - return "" - with open(file, 'w') as f: - toml.dump(in_dict, f) + with open(file, 'w') as f: + toml.dump(in_dict, f) tdCom = TDCom() diff --git a/tests/system-test/0-others/compatibility_coverage.py b/tests/system-test/0-others/compatibility_coverage.py index 7a123739f7..6eccf78c5a 100644 --- a/tests/system-test/0-others/compatibility_coverage.py +++ b/tests/system-test/0-others/compatibility_coverage.py @@ -152,7 +152,7 @@ class TDTestCase: os.system(f"rm -rf {cPath}/../data") print(self.projPath) # this data file is special for coverage test in 192.168.1.96 - os.system("cp -r f{self.projPath}/../comp_testdata/data/ {self.projPath}/sim/dnode1") + os.system(f"cp -r {self.projPath}/../comp_testdata/data/ {self.projPath}/community/sim/dnode1") tdDnodes.stop(1) tdDnodes.start(1) diff --git a/tests/system-test/6-cluster/clusterCommonCreate.py b/tests/system-test/6-cluster/clusterCommonCreate.py index a06c1233d8..cb44710b58 100644 --- a/tests/system-test/6-cluster/clusterCommonCreate.py +++ b/tests/system-test/6-cluster/clusterCommonCreate.py @@ -215,7 +215,10 @@ class ClusterComCreate: return def alterStbMetaData(self,tsql,dbName,stbName,ctbNum,rowsPerTbl,batchNum,startTs=None): + tdLog.debug("alter Stb column ............") + tdLog.debug(f"describe STABLE {dbName}.{stbName} ") + tsql.execute(f"describe STABLE {dbName}.{stbName} ;") tdLog.debug(f"ALTER STABLE {dbName}.{stbName} MODIFY COLUMN c3 binary(20);") tsql.execute(f" ALTER STABLE {dbName}.{stbName} MODIFY COLUMN c3 binary(20);") tdLog.debug(f"ALTER STABLE {dbName}.{stbName} ADD COLUMN c4 DOUBLE;") From 5bd14b4866893dcd86da69e34ab35a17241d4185 Mon Sep 17 00:00:00 2001 From: Shungang Li Date: Tue, 23 Jan 2024 13:53:51 +0800 Subject: [PATCH 16/32] enh: errcode for message has been processed in preprocess --- include/util/taoserror.h | 1 + source/dnode/vnode/src/vnd/vnodeSvr.c | 9 +++++++-- source/dnode/vnode/src/vnd/vnodeSync.c | 13 ++++++++++--- source/util/src/terror.c | 1 + 4 files changed, 19 insertions(+), 5 deletions(-) diff --git a/include/util/taoserror.h b/include/util/taoserror.h index b5389e60d3..3727be3da2 100644 --- a/include/util/taoserror.h +++ b/include/util/taoserror.h @@ -126,6 +126,7 @@ int32_t* taosGetErrno(); #define TSDB_CODE_IP_NOT_IN_WHITE_LIST TAOS_DEF_ERROR_CODE(0, 0x0134) #define TSDB_CODE_FAILED_TO_CONNECT_S3 TAOS_DEF_ERROR_CODE(0, 0x0135) +#define TSDB_CODE_MSG_PREPROCESSED TAOS_DEF_ERROR_CODE(0, 0x0136) // internal //client #define TSDB_CODE_TSC_INVALID_OPERATION TAOS_DEF_ERROR_CODE(0, 0x0200) diff --git a/source/dnode/vnode/src/vnd/vnodeSvr.c b/source/dnode/vnode/src/vnd/vnodeSvr.c index db807d000b..4bcf445615 100644 --- a/source/dnode/vnode/src/vnd/vnodeSvr.c +++ b/source/dnode/vnode/src/vnd/vnodeSvr.c @@ -183,6 +183,11 @@ static int32_t vnodePreProcessDropTtlMsg(SVnode *pVnode, SRpcMsg *pMsg) { ttlReq.pTbUids = tbUids; } + if (ttlReq.nUids == 0) { + code = TSDB_CODE_MSG_PREPROCESSED; + TSDB_CHECK_CODE(code, lino, _exit); + } + { // prepare new content int32_t reqLenNew = tSerializeSVDropTtlTableReq(NULL, 0, &ttlReq); int32_t contLenNew = reqLenNew + sizeof(SMsgHead); @@ -207,7 +212,7 @@ static int32_t vnodePreProcessDropTtlMsg(SVnode *pVnode, SRpcMsg *pMsg) { _exit: taosArrayDestroy(tbUids); - if (code) { + if (code && code != TSDB_CODE_MSG_PREPROCESSED) { vError("vgId:%d, %s:%d failed to preprocess drop ttl request since %s, msg type:%s", TD_VID(pVnode), __func__, lino, tstrerror(code), TMSG_INFO(pMsg->msgType)); } else { @@ -464,7 +469,7 @@ int32_t vnodePreProcessWriteMsg(SVnode *pVnode, SRpcMsg *pMsg) { break; } - if (code) { + if (code && code != TSDB_CODE_MSG_PREPROCESSED) { vError("vgId:%d, failed to preprocess write request since %s, msg type:%s", TD_VID(pVnode), tstrerror(code), TMSG_INFO(pMsg->msgType)); } diff --git a/source/dnode/vnode/src/vnd/vnodeSync.c b/source/dnode/vnode/src/vnd/vnodeSync.c index 8844e358d5..5f4b7b8442 100644 --- a/source/dnode/vnode/src/vnd/vnodeSync.c +++ b/source/dnode/vnode/src/vnd/vnodeSync.c @@ -95,6 +95,11 @@ static void inline vnodeHandleWriteMsg(SVnode *pVnode, SRpcMsg *pMsg) { static void vnodeHandleProposeError(SVnode *pVnode, SRpcMsg *pMsg, int32_t code) { if (code == TSDB_CODE_SYN_NOT_LEADER || code == TSDB_CODE_SYN_RESTORING) { vnodeRedirectRpcMsg(pVnode, pMsg, code); + } else if (code == TSDB_CODE_MSG_PREPROCESSED) { + SRpcMsg rsp = {.code = TSDB_CODE_SUCCESS, .info = pMsg->info}; + if (rsp.info.handle != NULL) { + tmsgSendRsp(&rsp); + } } else { const STraceId *trace = &pMsg->info.traceId; vGError("vgId:%d, msg:%p failed to propose since %s, code:0x%x", pVnode->config.vgId, pMsg, tstrerror(code), code); @@ -297,8 +302,10 @@ void vnodeProposeWriteMsg(SQueueInfo *pInfo, STaosQall *qall, int32_t numOfMsgs) code = vnodePreProcessWriteMsg(pVnode, pMsg); if (code != 0) { - vGError("vgId:%d, msg:%p failed to pre-process since %s", vgId, pMsg, tstrerror(code)); - if (terrno != 0) code = terrno; + if (code != TSDB_CODE_MSG_PREPROCESSED) { + vGError("vgId:%d, msg:%p failed to pre-process since %s", vgId, pMsg, tstrerror(code)); + if (terrno != 0) code = terrno; + } vnodeHandleProposeError(pVnode, pMsg, code); rpcFreeCont(pMsg->pCont); taosFreeQitem(pMsg); @@ -759,7 +766,7 @@ void vnodeSyncCheckTimeout(SVnode *pVnode) { vError("vgId:%d, failed to propose since timeout and post block, start:%d cur:%d delta:%d seq:%" PRId64, pVnode->config.vgId, pVnode->blockSec, curSec, delta, pVnode->blockSeq); if (syncSendTimeoutRsp(pVnode->sync, pVnode->blockSeq) != 0) { -#if 0 +#if 0 SRpcMsg rpcMsg = {.code = TSDB_CODE_SYN_TIMEOUT, .info = pVnode->blockInfo}; vError("send timeout response since its applyed, seq:%" PRId64 " handle:%p ahandle:%p", pVnode->blockSeq, rpcMsg.info.handle, rpcMsg.info.ahandle); diff --git a/source/util/src/terror.c b/source/util/src/terror.c index 79b9e9bbed..cac5f14cff 100644 --- a/source/util/src/terror.c +++ b/source/util/src/terror.c @@ -103,6 +103,7 @@ TAOS_DEFINE_ERROR(TSDB_CODE_INVALID_DATA_FMT, "Invalid data format") TAOS_DEFINE_ERROR(TSDB_CODE_INVALID_CFG_VALUE, "Invalid configuration value") TAOS_DEFINE_ERROR(TSDB_CODE_IP_NOT_IN_WHITE_LIST, "Not allowed to connect") TAOS_DEFINE_ERROR(TSDB_CODE_FAILED_TO_CONNECT_S3, "Failed to connect to s3 server") +TAOS_DEFINE_ERROR(TSDB_CODE_MSG_PREPROCESSED, "Message has been processed in preprocess") //client TAOS_DEFINE_ERROR(TSDB_CODE_TSC_INVALID_OPERATION, "Invalid operation") From 62e6b5ca31b8182e46895690267f9ba00115a91f Mon Sep 17 00:00:00 2001 From: Yihao Deng Date: Tue, 23 Jan 2024 06:23:34 +0000 Subject: [PATCH 17/32] sort epset --- source/common/src/tmisce.c | 15 +++++++++++++++ source/dnode/mnode/impl/src/mndMnode.c | 4 ++-- source/libs/sync/src/syncMain.c | 2 ++ 3 files changed, 19 insertions(+), 2 deletions(-) diff --git a/source/common/src/tmisce.c b/source/common/src/tmisce.c index c3e2846d9a..1606b45eed 100644 --- a/source/common/src/tmisce.c +++ b/source/common/src/tmisce.c @@ -82,6 +82,13 @@ void epsetSort(SEpSet* pDst) { if (pDst->numOfEps <= 1) { return; } + int validIdx = false; + SEp ep = {0}; + if (pDst->inUse >= 0 && pDst->inUse < pDst->numOfEps) { + validIdx = true; + epAssign(&ep, &pDst->eps[pDst->inUse]); + } + for (int i = 0; i < pDst->numOfEps - 1; i++) { for (int j = 0; j < pDst->numOfEps - 1 - i; j++) { SEp* f = &pDst->eps[j]; @@ -95,6 +102,14 @@ void epsetSort(SEpSet* pDst) { } } } + if (validIdx == true) + for (int i = 0; i < pDst->numOfEps; i++) { + int cmp = strncmp(ep.fqdn, pDst->eps[i].fqdn, sizeof(ep.fqdn)); + if (cmp == 0 && ep.port == pDst->eps[i].port) { + pDst->inUse = i; + break; + } + } } void updateEpSet_s(SCorEpSet* pEpSet, SEpSet* pNewEpSet) { diff --git a/source/dnode/mnode/impl/src/mndMnode.c b/source/dnode/mnode/impl/src/mndMnode.c index 385f20d39e..af6ae8c5a0 100644 --- a/source/dnode/mnode/impl/src/mndMnode.c +++ b/source/dnode/mnode/impl/src/mndMnode.c @@ -241,7 +241,6 @@ void mndGetMnodeEpSet(SMnode *pMnode, SEpSet *pEpSet) { } void *pIter = NULL; - // pEpSet->inUse = 0; while (1) { SMnodeObj *pObj = NULL; pIter = sdbFetch(pSdb, SDB_MNODE, pIter, (void **)&pObj); @@ -252,7 +251,7 @@ void mndGetMnodeEpSet(SMnode *pMnode, SEpSet *pEpSet) { pEpSet->inUse = pEpSet->numOfEps; } else { pEpSet->inUse = (pEpSet->numOfEps + 1) % totalMnodes; - //pEpSet->inUse = 0; + // pEpSet->inUse = 0; } } if (pObj->pDnode != NULL) { @@ -268,6 +267,7 @@ void mndGetMnodeEpSet(SMnode *pMnode, SEpSet *pEpSet) { if (pEpSet->inUse >= pEpSet->numOfEps) { pEpSet->inUse = 0; } + epsetSort(pEpSet); } static int32_t mndSetCreateMnodeRedoLogs(SMnode *pMnode, STrans *pTrans, SMnodeObj *pObj) { diff --git a/source/libs/sync/src/syncMain.c b/source/libs/sync/src/syncMain.c index b60b3c96ca..f26a38ee1d 100644 --- a/source/libs/sync/src/syncMain.c +++ b/source/libs/sync/src/syncMain.c @@ -36,6 +36,7 @@ #include "syncUtil.h" #include "syncVoteMgr.h" #include "tglobal.h" +#include "tmisce.h" #include "tref.h" static void syncNodeEqPingTimer(void* param, void* tmrId); @@ -593,6 +594,7 @@ void syncGetRetryEpSet(int64_t rid, SEpSet* pEpSet) { pEpSet->inUse = (pSyncNode->raftCfg.cfg.myIndex + 1) % pEpSet->numOfEps; pEpSet->inUse = 0; } + epsetSort(pEpSet); sInfo("vgId:%d, sync get retry epset numOfEps:%d inUse:%d", pSyncNode->vgId, pEpSet->numOfEps, pEpSet->inUse); syncNodeRelease(pSyncNode); From 2479df3b1e1c09563df016e81b2687b8c0107ca9 Mon Sep 17 00:00:00 2001 From: Yihao Deng Date: Tue, 23 Jan 2024 06:42:42 +0000 Subject: [PATCH 18/32] sort epset --- source/dnode/mgmt/node_util/src/dmEps.c | 3 ++- source/dnode/mnode/impl/src/mndVgroup.c | 1 + 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/source/dnode/mgmt/node_util/src/dmEps.c b/source/dnode/mgmt/node_util/src/dmEps.c index bee77528bd..20245c806b 100644 --- a/source/dnode/mgmt/node_util/src/dmEps.c +++ b/source/dnode/mgmt/node_util/src/dmEps.c @@ -223,7 +223,7 @@ int32_t dmWriteEps(SDnodeData *pData) { terrno = TSDB_CODE_OUT_OF_MEMORY; - if((code == dmInitDndInfo(pData)) != 0) goto _OVER; + if ((code == dmInitDndInfo(pData)) != 0) goto _OVER; pJson = tjsonCreateObject(); if (pJson == NULL) goto _OVER; pData->engineVer = tsVersion; @@ -289,6 +289,7 @@ static void dmResetEps(SDnodeData *pData, SArray *dnodeEps) { pData->mnodeEps.eps[mIndex] = pDnodeEp->ep; mIndex++; } + epsetSort(&pData->mnodeEps); for (int32_t i = 0; i < numOfEps; i++) { SDnodeEp *pDnodeEp = taosArrayGet(dnodeEps, i); diff --git a/source/dnode/mnode/impl/src/mndVgroup.c b/source/dnode/mnode/impl/src/mndVgroup.c index 1055aa0874..a5df9ad820 100644 --- a/source/dnode/mnode/impl/src/mndVgroup.c +++ b/source/dnode/mnode/impl/src/mndVgroup.c @@ -877,6 +877,7 @@ SEpSet mndGetVgroupEpset(SMnode *pMnode, const SVgObj *pVgroup) { addEpIntoEpSet(&epset, pDnode->fqdn, pDnode->port); mndReleaseDnode(pMnode, pDnode); } + epsetSort(&epset); return epset; } From 251585b49c211f5aea3e74421276867cc1f0d46d Mon Sep 17 00:00:00 2001 From: factosea <285808407@qq.com> Date: Tue, 23 Jan 2024 15:37:50 +0800 Subject: [PATCH 19/32] fix: orderby function first hit column --- source/libs/nodes/src/nodesTraverseFuncs.c | 15 ++++++++------- source/libs/parser/src/parTranslater.c | 6 +++++- tests/system-test/2-query/orderBy.py | 5 +++++ 3 files changed, 18 insertions(+), 8 deletions(-) diff --git a/source/libs/nodes/src/nodesTraverseFuncs.c b/source/libs/nodes/src/nodesTraverseFuncs.c index b3623a4b0a..8b44e478c0 100644 --- a/source/libs/nodes/src/nodesTraverseFuncs.c +++ b/source/libs/nodes/src/nodesTraverseFuncs.c @@ -214,14 +214,15 @@ void nodesWalkExprsPostOrder(SNodeList* pList, FNodeWalker walker, void* pContex (void)walkExprs(pList, TRAVERSAL_POSTORDER, walker, pContext); } -static void checkParamIsFunc(SFunctionNode *pFunc) { +static void checkParamIsFunc(SFunctionNode* pFunc) { int32_t numOfParams = LIST_LENGTH(pFunc->pParameterList); - if (numOfParams > 1) { - for (int32_t i = 0; i < numOfParams; ++i) { - SNode* pPara = nodesListGetNode(pFunc->pParameterList, i); - if (nodeType(pPara) == QUERY_NODE_FUNCTION) { - ((SFunctionNode *)pPara)->node.asParam = true; - } + for (int32_t i = 0; i < numOfParams; ++i) { + SNode* pPara = nodesListGetNode(pFunc->pParameterList, i); + if (numOfParams > 1 && nodeType(pPara) == QUERY_NODE_FUNCTION) { + ((SFunctionNode*)pPara)->node.asParam = true; + } + if (nodeType(pPara) == QUERY_NODE_COLUMN) { + ((SColumnNode*)pPara)->node.asParam = true; } } } diff --git a/source/libs/parser/src/parTranslater.c b/source/libs/parser/src/parTranslater.c index 69e464b3c3..d246641576 100644 --- a/source/libs/parser/src/parTranslater.c +++ b/source/libs/parser/src/parTranslater.c @@ -1323,7 +1323,7 @@ static EDealRes translateColumn(STranslateContext* pCxt, SColumnNode** pCol) { res = translateColumnWithPrefix(pCxt, pCol); } else { bool found = false; - if (SQL_CLAUSE_ORDER_BY == pCxt->currClause) { + if (SQL_CLAUSE_ORDER_BY == pCxt->currClause && !(*pCol)->node.asParam) { res = translateColumnUseAlias(pCxt, pCol, &found); } if (DEAL_RES_ERROR != res && !found) { @@ -1333,6 +1333,10 @@ static EDealRes translateColumn(STranslateContext* pCxt, SColumnNode** pCol) { res = translateColumnWithoutPrefix(pCxt, pCol); } } + if(SQL_CLAUSE_ORDER_BY == pCxt->currClause && !(*pCol)->node.asParam + && res != DEAL_RES_CONTINUE && res != DEAL_RES_END) { + res = translateColumnUseAlias(pCxt, pCol, &found); + } } return res; } diff --git a/tests/system-test/2-query/orderBy.py b/tests/system-test/2-query/orderBy.py index 2d7b7d9a1f..af1ddadc39 100644 --- a/tests/system-test/2-query/orderBy.py +++ b/tests/system-test/2-query/orderBy.py @@ -302,6 +302,11 @@ class TDTestCase: tdSql.error(f"SELECT last(ts) as t2, ts FROM t1 order by last(t2)") + tdSql.execute(f"alter local 'keepColumnName' '1'") + tdSql.no_error(f"SELECT last(ts), first(ts) FROM t1 order by last(ts)") + tdSql.no_error(f"SELECT last(c1), first(c1) FROM t1 order by last(c1)") + tdSql.error(f"SELECT last(ts) as t, first(ts) as t FROM t1 order by last(t)") + def queryOrderByAmbiguousName(self): tdSql.error(sql="select c1 as name, c2 as name, c3 from t1 order by name", expectErrInfo='ambiguous', fullMatched=False) From f1b606c73002bd361051757ae02ce61339fad2da Mon Sep 17 00:00:00 2001 From: chenhaoran Date: Tue, 23 Jan 2024 16:01:31 +0800 Subject: [PATCH 20/32] test:add special compatibility testcase for code coverage --- tests/pytest/util/common.py | 58 ++++++++++++++++++------------------- 1 file changed, 29 insertions(+), 29 deletions(-) diff --git a/tests/pytest/util/common.py b/tests/pytest/util/common.py index cb649d966f..c4885747d1 100644 --- a/tests/pytest/util/common.py +++ b/tests/pytest/util/common.py @@ -1862,38 +1862,38 @@ class TDCom: time.sleep(1) return tbname - def is_json(msg): - if isinstance(msg, str): - try: - json.loads(msg) - return True - except: - return False - else: +def is_json(msg): + if isinstance(msg, str): + try: + json.loads(msg) + return True + except: return False + else: + return False - def get_path(tool="taosd"): - selfPath = os.path.dirname(os.path.realpath(__file__)) - if ("community" in selfPath): - projPath = selfPath[:selfPath.find("community")] - else: - projPath = selfPath[:selfPath.find("tests")] +def get_path(tool="taosd"): + selfPath = os.path.dirname(os.path.realpath(__file__)) + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] - paths = [] - for root, dirs, files in os.walk(projPath): - if ((tool) in files or ("%s.exe"%tool) in files): - rootRealPath = os.path.dirname(os.path.realpath(root)) - if ("packaging" not in rootRealPath): - paths.append(os.path.join(root, tool)) - break - if (len(paths) == 0): - return "" - return paths[0] - - def dict2toml(in_dict: dict, file:str): - if not isinstance(in_dict, dict): + paths = [] + for root, dirs, files in os.walk(projPath): + if ((tool) in files or ("%s.exe"%tool) in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + paths.append(os.path.join(root, tool)) + break + if (len(paths) == 0): return "" - with open(file, 'w') as f: - toml.dump(in_dict, f) + return paths[0] + +def dict2toml(in_dict: dict, file:str): + if not isinstance(in_dict, dict): + return "" + with open(file, 'w') as f: + toml.dump(in_dict, f) tdCom = TDCom() From 74fdc14ed3f769ea4b0870f1adf337cedd6af625 Mon Sep 17 00:00:00 2001 From: dmchen Date: Tue, 23 Jan 2024 08:33:12 +0000 Subject: [PATCH 21/32] fix/TD-28430 --- source/dnode/mgmt/exe/dmMain.c | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/source/dnode/mgmt/exe/dmMain.c b/source/dnode/mgmt/exe/dmMain.c index 756ac8167e..882f04c75e 100644 --- a/source/dnode/mgmt/exe/dmMain.c +++ b/source/dnode/mgmt/exe/dmMain.c @@ -169,7 +169,16 @@ static int32_t dmParseArgs(int32_t argc, char const *argv[]) { return -1; } } else if (strcmp(argv[i], "-a") == 0) { - tstrncpy(global.apolloUrl, argv[++i], PATH_MAX); + if(i < argc - 1) { + if (strlen(argv[++i]) >= PATH_MAX) { + printf("apollo url overflow"); + return -1; + } + tstrncpy(global.apolloUrl, argv[i], PATH_MAX); + } else { + printf("'-a' requires a parameter\n"); + return -1; + } } else if (strcmp(argv[i], "-s") == 0) { global.dumpSdb = true; } else if (strcmp(argv[i], "-E") == 0) { From 20b9028a9376ab4c4946ee858c841eb15d33d906 Mon Sep 17 00:00:00 2001 From: factosea <285808407@qq.com> Date: Tue, 23 Jan 2024 16:55:45 +0800 Subject: [PATCH 22/32] fix: case result change --- tests/system-test/2-query/td-28068.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/tests/system-test/2-query/td-28068.py b/tests/system-test/2-query/td-28068.py index d77e012d9a..0dfaf8e126 100644 --- a/tests/system-test/2-query/td-28068.py +++ b/tests/system-test/2-query/td-28068.py @@ -20,9 +20,10 @@ class TDTestCase: tdSql.execute("insert into td_28068.ct4 using td_28068.st (branch, scenario) tags ('3.1', 'scenario2') values (now(), 'query1', 9,10);") def run(self): - tdSql.error('select last(ts) as ts, last(branch) as branch, last(scenario) as scenario, last(test_case) as test_case from td_28068.st group by branch, scenario order by last(branch);') - tdSql.error('select last(ts) as ts, last(branch) as branch1, last(scenario) as scenario, last(test_case) as test_case from td_28068.st group by branch, scenario order by last(branch), last(scenario); ') - + tdSql.query('select last(ts) as ts, last(branch) as branch, last(scenario) as scenario, last(test_case) as test_case from td_28068.st group by branch, scenario order by last(branch);') + tdSql.checkRows(4) + tdSql.query('select last(ts) as ts, last(branch) as branch1, last(scenario) as scenario, last(test_case) as test_case from td_28068.st group by branch, scenario order by last(branch), last(scenario); ') + tdSql.checkRows(4) tdSql.query('select last(ts) as ts, last(branch) as branch1, last(scenario) as scenario, last(test_case) as test_case from td_28068.st group by branch, scenario order by last(branch); ') tdSql.checkRows(4) From 663b5b4ecf57b9bd06009689ae40bf19da26ba93 Mon Sep 17 00:00:00 2001 From: Yihao Deng Date: Tue, 23 Jan 2024 09:09:29 +0000 Subject: [PATCH 23/32] sort epset --- source/libs/sync/src/syncMain.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/libs/sync/src/syncMain.c b/source/libs/sync/src/syncMain.c index f26a38ee1d..edaf59f9db 100644 --- a/source/libs/sync/src/syncMain.c +++ b/source/libs/sync/src/syncMain.c @@ -592,7 +592,7 @@ void syncGetRetryEpSet(int64_t rid, SEpSet* pEpSet) { } if (pEpSet->numOfEps > 0) { pEpSet->inUse = (pSyncNode->raftCfg.cfg.myIndex + 1) % pEpSet->numOfEps; - pEpSet->inUse = 0; + // pEpSet->inUse = 0; } epsetSort(pEpSet); From f349bbd51fffb1bf3d100c828df793192d7266be Mon Sep 17 00:00:00 2001 From: dmchen Date: Tue, 23 Jan 2024 09:25:48 +0000 Subject: [PATCH 24/32] fix/TD-28437 --- source/dnode/mnode/sdb/src/sdbHash.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/source/dnode/mnode/sdb/src/sdbHash.c b/source/dnode/mnode/sdb/src/sdbHash.c index 1d2e2de17d..df228c1fcc 100644 --- a/source/dnode/mnode/sdb/src/sdbHash.c +++ b/source/dnode/mnode/sdb/src/sdbHash.c @@ -64,6 +64,8 @@ const char *sdbTableName(ESdbType type) { return "idx"; case SDB_VIEW: return "view"; + case SDB_STREAM_SEQ: + return "stream_seq"; case SDB_COMPACT: return "compact"; case SDB_COMPACT_DETAIL: From d63606c04f6e3bddf879b17c659f2005b9b1bff9 Mon Sep 17 00:00:00 2001 From: charles Date: Tue, 23 Jan 2024 17:36:06 +0800 Subject: [PATCH 25/32] update test case test_ts4382.py for special testing and alter_database.py for arm64 --- tests/system-test/1-insert/alter_database.py | 25 +- tests/system-test/2-query/test_ts4382.py | 540 +++++++++++++++++-- 2 files changed, 511 insertions(+), 54 deletions(-) diff --git a/tests/system-test/1-insert/alter_database.py b/tests/system-test/1-insert/alter_database.py index 6a831b88ff..d83813bf3a 100644 --- a/tests/system-test/1-insert/alter_database.py +++ b/tests/system-test/1-insert/alter_database.py @@ -19,12 +19,12 @@ class TDTestCase: tdSql.init(conn.cursor(), logSql) self.buffer_boundary = [3, 4097, 8193, 12289, 16384] # remove the value > free_memory, 70% is the weight to calculate the max value - if platform.system() == "Linux" and platform.machine() == "aarch64": - mem = psutil.virtual_memory() - free_memory = mem.free * 0.7 / 1024 / 1024 - for item in self.buffer_boundary: - if item > free_memory: - self.buffer_boundary.remove(item) + # if platform.system() == "Linux" and platform.machine() == "aarch64": + # mem = psutil.virtual_memory() + # free_memory = mem.free * 0.7 / 1024 / 1024 + # for item in self.buffer_boundary: + # if item > free_memory: + # self.buffer_boundary.remove(item) self.buffer_error = [self.buffer_boundary[0] - 1, self.buffer_boundary[-1]+1] @@ -34,11 +34,14 @@ class TDTestCase: def alter_buffer(self): tdSql.execute('create database db') - for buffer in self.buffer_boundary: - tdSql.execute(f'alter database db buffer {buffer}') - tdSql.query( - 'select * from information_schema.ins_databases where name = "db"') - tdSql.checkEqual(tdSql.queryResult[0][8], buffer) + if platform.system() == "Linux" and platform.machine() == "aarch64": + tdLog.debug("Skip check points for Linux aarch64 due to environment settings") + else: + for buffer in self.buffer_boundary: + tdSql.execute(f'alter database db buffer {buffer}') + tdSql.query( + 'select * from information_schema.ins_databases where name = "db"') + tdSql.checkEqual(tdSql.queryResult[0][8], buffer) tdSql.execute('drop database db') tdSql.execute('create database db vgroups 10') for buffer in self.buffer_error: diff --git a/tests/system-test/2-query/test_ts4382.py b/tests/system-test/2-query/test_ts4382.py index 3568c11b3e..9b2b3770b9 100644 --- a/tests/system-test/2-query/test_ts4382.py +++ b/tests/system-test/2-query/test_ts4382.py @@ -1,5 +1,5 @@ import random -import string +import itertools from util.log import * from util.cases import * from util.sql import * @@ -15,56 +15,510 @@ class TDTestCase: self.replicaVar = int(replicaVar) tdLog.debug("start to execute %s" % __file__) tdSql.init(conn.cursor()) - self.dbname = 'db' - self.stbname = 'st' - self.ctbname_list = ["ct1", "ct2"] - self.tag_value_list = ['{"instance":"100"}', '{"instance":"200"}'] + + self.metadata_dic = { + "db_tag_json": { + "supertables": [ + { + "name": "st", + "child_table_num": 2, + "columns": [ + { + "name": "ts", + "type": "timestamp" + }, + { + "name": "col1", + "type": "int" + } + ], + "tags": [ + { + "name": "t1", + "type": "json" + } + ] + } + ] + }, + "db": { + "supertables": [ + { + "name": "st1", + "child_table_num": 2, + "columns": [ + { + "name": "ts", + "type": "timestamp" + }, + { + "name": "col1", + "type": "int" + }, + { + "name": "col2", + "type": "bigint" + }, + { + "name": "col3", + "type": "float" + }, + { + "name": "col4", + "type": "double" + }, + { + "name": "col5", + "type": "bool" + }, + { + "name": "col6", + "type": "binary(16)" + }, + { + "name": "col7", + "type": "nchar(16)" + }, + { + "name": "col8", + "type": "geometry(512)" + }, + { + "name": "col9", + "type": "varbinary(32)" + } + ], + "tags": [ + { + "name": "t1", + "type": "timestamp" + }, + { + "name": "t2", + "type": "int" + }, + { + "name": "t3", + "type": "bigint" + }, + { + "name": "t4", + "type": "float" + }, + { + "name": "t5", + "type": "double" + }, + { + "name": "t6", + "type": "bool" + }, + { + "name": "t7", + "type": "binary(16)" + }, + { + "name": "t8", + "type": "nchar(16)" + }, + { + "name": "t9", + "type": "geometry(512)" + }, + { + "name": "t10", + "type": "varbinary(32)" + } + ] + }, + { + "name": "st2", + "child_table_num": 2, + "columns": [ + { + "name": "ts", + "type": "timestamp" + }, + { + "name": "col1", + "type": "int" + }, + { + "name": "col2", + "type": "bigint" + }, + { + "name": "col3", + "type": "float" + }, + { + "name": "col4", + "type": "double" + }, + { + "name": "col5", + "type": "bool" + }, + { + "name": "col6", + "type": "binary(16)" + }, + { + "name": "col7", + "type": "nchar(16)" + }, + { + "name": "col8", + "type": "geometry(512)" + }, + { + "name": "col9", + "type": "varbinary(32)" + } + ], + "tags": [ + { + "name": "t1", + "type": "timestamp" + }, + { + "name": "t2", + "type": "int" + }, + { + "name": "t3", + "type": "bigint" + }, + { + "name": "t4", + "type": "float" + }, + { + "name": "t5", + "type": "double" + }, + { + "name": "t6", + "type": "bool" + }, + { + "name": "t7", + "type": "binary(16)" + }, + { + "name": "t8", + "type": "nchar(16)" + }, + { + "name": "t9", + "type": "geometry(512)" + }, + { + "name": "t10", + "type": "varbinary(32)" + } + ] + } + ] + } + } def prepareData(self): - # db - tdSql.execute("create database {};".format(self.dbname)) - tdSql.execute("use {};".format(self.dbname)) - tdLog.debug("Create database %s" % self.dbname) + for db in self.metadata_dic.keys(): + if db == "db_tag_json": + # db + tdSql.execute(f"create database {db};") + tdSql.execute(f"use {db};") + tdLog.debug(f"Create database {db}") - # super table - tdSql.execute("create table {} (ts timestamp, col1 int) tags (t1 json);".format(self.stbname)) - tdLog.debug("Create super table %s" % self.stbname) + # super table + for item in self.metadata_dic[db]["supertables"]: + sql = f"create table {item['name']} (" + for column in item["columns"]: + sql += f"{column['name']} {column['type']}," + sql = sql[:-1] + ") tags (" + for tag in item["tags"]: + sql += f"{tag['name']} {tag['type']}," + sql = sql[:-1] + ");" + tdLog.debug(sql) + tdSql.execute(sql) + tdLog.debug(f"Create super table {item['name']}") - # child table - for i in range(len(self.ctbname_list)): - tdSql.execute("create table {} using {} tags('{}');".format(self.ctbname_list[i], self.stbname, self.tag_value_list[i])) - tdLog.debug("Create child table %s" % self.ctbname_list) + # child table + tag_value_list = ['{"instance":"100"}', '{"instance":"200"}'] + for i in range(item["child_table_num"]): + tdSql.execute(f"create table {'ct' + str(i+1)} using {item['name']} tags('{tag_value_list[i]}');") + tdLog.debug(f"Create child table {'ct' + str(i+1)} successfully") - # insert data - tdSql.execute("insert into {} values(now, 1)(now+1s, 2)".format(self.ctbname_list[0])) - tdSql.execute("insert into {} values(now, null)(now+1s, null)".format(self.ctbname_list[1])) + # insert data + if i == 0: + tdSql.execute(f"insert into {'ct' + str(i+1)} values(now, 1)(now+1s, 2)") + elif i == 1: + tdSql.execute(f"insert into {'ct' + str(i+1)} values(now, null)(now+1s, null)") + elif db == "db": + # create database db_empty + tdSql.execute("create database db_empty;") + tdSql.execute("use db_empty;") + tdLog.debug("Create database db_empty successfully") + + # super table + for item in self.metadata_dic[db]["supertables"]: + sql = f"create table {item['name']} (" + for column in item["columns"]: + sql += f"{column['name']} {column['type']}," + sql = sql[:-1] + ") tags (" + for tag in item["tags"]: + sql += f"{tag['name']} {tag['type']}," + sql = sql[:-1] + ");" + tdLog.debug(sql) + tdSql.execute(sql) + tdLog.debug(f"Create super table {item['name']}") + + # child table + tag_value_list = [['2024-01-01 12:00:00.000', 1, 1111111111111, 1.11, 111111.1111, True, 'aaa', 'beijing', 'POINT (3.000000 6.000000)', '0x7661726331'],['2024-01-02 12:00:00.000', 2, 2222222222222, 2.22, 222222.2222, False, 'bbb', 'shanghai', 'LINESTRING (1.000000 1.000000, 2.000000 2.000000, 5.000000 5.000000)', '0x7f829000']] + for i in range(item["child_table_num"]): + sql = f"create table {'ct' + (str(i+1) if item['name'] == 'st1' else str(i+3))} using {item['name']} tags(" + for tag in tag_value_list[i]: + if type(tag) == str: + sql += f"'{tag}'," + else: + sql += f"{tag}," + sql = sql[:-1] + ");" + tdSql.execute(sql) + tdLog.debug(f"Create child table {'ct' + (str(i+1) if item['name'] == 'st1' else str(i+3))} successfully") + + # create database db_with_data + tdSql.execute("create database db_with_data;") + tdSql.execute("use db_with_data;") + tdLog.debug("Create database db_with_data successfully") + + # super table + for item in self.metadata_dic[db]["supertables"]: + sql = f"create table {item['name']} (" + for column in item["columns"]: + sql += f"{column['name']} {column['type']}," + sql = sql[:-1] + ") tags (" + for tag in item["tags"]: + sql += f"{tag['name']} {tag['type']}," + sql = sql[:-1] + ");" + tdLog.debug(sql) + tdSql.execute(sql) + tdLog.debug(f"Create super table {item['name']}") + + # child table + tag_value_list = [['2024-01-01 12:00:00.000', 1, 1111111111111, 1.11, 111111.1111, True, 'aaa', 'beijing', 'POINT (3.000000 6.000000)', '0x7661726331'],['2024-01-02 12:00:00.000', 2, 2222222222222, 2.22, 222222.2222, False, 'bbb', 'shanghai', 'LINESTRING (1.000000 1.000000, 2.000000 2.000000, 5.000000 5.000000)', '0x7f829000']] + for i in range(item["child_table_num"]): + sql = f"create table {'ct' + (str(i+1) if item['name'] == 'st1' else str(i+3))} using {item['name']} tags(" + for tag in tag_value_list[i]: + if type(tag) == str: + sql += f"'{tag}'," + else: + sql += f"{tag}," + sql = sql[:-1] + ");" + tdSql.execute(sql) + tdLog.debug(f"Create child table {'ct' + (str(i+1) if item['name'] == 'st1' else str(i+3))} successfully") + + # insert into data + start_ts = 1677654000000 # 2023-03-01 15:00:00.000 + sql = "insert into {} values".format("ct" + (str(i+1) if item["name"] == "st1" else str(i+3))) + binary_vlist = ["ccc", "ddd", "eee", "fff"] + nchar_vlist = ["guangzhou", "tianjing", "shenzhen", "hangzhou"] + geometry_vlist = ["POINT (4.0 8.0)", "POINT (3.0 5.0)", "LINESTRING (1.000000 1.000000, 2.000000 2.000000, 5.000000 5.000000)", "POLYGON ((3.000000 6.000000, 5.000000 6.000000, 5.000000 8.000000, 3.000000 8.000000, 3.000000 6.000000))"] + varbinary_vlist = ["0x7661726332", "0x7661726333", "0x7661726334", "0x7661726335"] + st_index = i if item["name"] == "st1" else (i+2) + for i in range(100): + sql += f"({start_ts + 1000 * i}, {str(i+1)}, {str(i+1)}, {str(i+1)}, {str(i+1)}, {True if i % 2 == 0 else False}, '{binary_vlist[st_index % 4]}', '{nchar_vlist[st_index % 4]}', '{geometry_vlist[st_index % 4]}', '{varbinary_vlist[st_index % 4]}')" + tdSql.execute(sql) + tdLog.debug(f"Insert into data into child table {'ct' + (str(i+1) if item['name'] == 'st1' else str(i+3))} successfully") + + def test_tag_json(self): + tdSql.execute("use db_tag_json;") + + # super table query with correct tag name of json type + tdSql.query("select to_char(ts, 'yyyy-mm-dd hh24:mi:ss') as time, irate(col1) from st group by to_char(ts, 'yyyy-mm-dd hh24:mi:ss'), t1->'instance' order by time;") + tdSql.checkRows(2) + + # child table query with incorrect tag name of json type + tdSql.query("select to_char(ts, 'yyyy-mm-dd hh24:mi:ss') as time, irate(col1) from ct1 group by to_char(ts, 'yyyy-mm-dd hh24:mi:ss'), t1->'name' order by time;") + tdSql.checkRows(0) + + # child table query with null value + tdSql.query("select ts, avg(col1) from ct2 group by ts, t1->'name' order by ts;") + tdSql.checkRows(2) + + def test_db_empty(self): + tdSql.execute("use db_empty;") + table_list = ["st1", "ct1"] + column_list = ["col1", "col2", "col3", "col4", "col5"] + tag_list = ["t2", "t3", "t4", "t5", "t6"] + operator_list = ["+", "-", "*", "/"] + fun_list = ["avg", "count", "sum", "spread"] + + # two columns with arithmetic operation + for table in table_list: + for columns in list(itertools.combinations(column_list + tag_list, 2)): + operator = random.choice(operator_list) + sql = f"select ({columns[0]} {operator} {columns[1]}) as total from {table};" + tdSql.query(sql) + tdSql.checkRows(0) + + # aggregation function + for table in table_list: + for columns in list(itertools.combinations(column_list[:-1] + tag_list[:-1], 2)): + fun = random.sample(fun_list, 2) + sql = f"select ({fun[0]}({columns[0]}) + {fun[1]}({columns[1]})) as total from {table};" + tdSql.query(sql) + if "count" in fun: + # default config 'countAlwaysReturnValue' as 0 + tdSql.checkRows(1) + else: + tdSql.checkRows(0) + + # join + table_list = ["st1", "st2", "ct1", "ct2", "ct3", "ct4"] + column_list = ["col1", "col2", "col3", "col4", "col5"] + tag_list = ["t2", "t3", "t4", "t5", "t6"] + where_list = ["col1 > 100", "col2 < 237883294", "col3 >= 163.23", "col4 <= 674324.2374898237", "col5=true", "col6='aaa'", + "col7='beijing'", "col8!='POINT (3.000000 6.000000)'", "col9='0x7661726331'"] + for table in list(itertools.combinations(table_list,2)): + where = random.choice(where_list) + column = random.choice(column_list) + tag = random.choice(tag_list) + sql = f"select ({table[0] + '.' + column} + {table[1] + '.' + tag}) total from {table[0]} join {table[1]} on {table[0]+ '.ts=' + table[1] + '.ts'} where {table[0] + '.' + where};" + tdSql.query(sql) + tdSql.checkRows(0) + + # group by + value_fun_list = ["sum(col1+col2)", "avg(col3+col4)", "count(col6+col7)", "stddev(col2+col4)", "spread(col2+col3)"] + group_by_list = ["tbname", "t1", "t2", "t3", "t4", "t5", "t6", "t7", "t8", "t9", "t10"] + for table in table_list: + value_fun = random.choice(value_fun_list) + where = random.choice(where_list) + group_by = random.choice(group_by_list) + sql = f"select {value_fun} from {table} where {where} group by {group_by};" + tdSql.query(sql) + # default config 'countAlwaysReturnValue' as 0 + if "count" in value_fun and "st" in table: + tdSql.checkRows(2) + elif "count" in value_fun and "ct" in table: + tdSql.checkRows(1) + else: + tdSql.checkRows(0) + + # window query + for table in table_list: + tag = random.choice(tag_list) + if "st" in table: + sql = f"select _wstart, {tag}, avg(col3+col4) from {table} where ts between '2024-03-01' and '2024-03-02' partition by {tag} interval(10s) sliding(5s) fill(linear);" + elif "ct" in table: + sql = f"select _wstart, sum(col1+col2) from {table} where ts between '2024-03-01' and '2024-03-02' partition by {tag} interval(10s) sliding(5s) fill(next);" + tdSql.query(sql) + tdSql.checkRows(0) + + # nested query + for table in table_list: + sql_list = [ + "select (col1 + col2) from (select sum(col1) as col1, avg(col2) as col2 from {} where col1 > 100 and ts between '2024-03-01' and '2024-03-02' group by tbname);".format(table), + "select last(ts), avg(col2 - col3) from (select first(ts) as ts, sum(col2) as col2, last(col3) as col3 from {} where col9 != 'abc' partition by tbname interval(10s) sliding(5s));".format(table), + "select elapsed(ts, 1s), sum(c1 + c2) from (select * from (select ts, (col1+col2) as c1, (col3 * col4) as c2, tbname from {} where col1 > 100 and ts between '2024-03-01' and '2024-03-02')) group by tbname;".format(table) + ] + for sql in sql_list: + tdSql.query(sql) + tdSql.checkRows(0) + + # drop column/tag + del_column_tag_list = ["col1", "t1"] + error_sql_list = [ + "select first(t1), sum(col1) from st1 group by tbname;", + "select last(ts), avg(col1) from st1 group by tbname;", + "select count(col1) from (select * from st1 where ts between '2024-03-01' and '2024-03-02' and col1 > 100) group by tbname;", + ] + for item in del_column_tag_list: + if "col" in item: + sql = f"alter table st1 drop column {item};" + elif "t" in item: + sql = f"alter table st1 drop tag {item};" + tdSql.execute(sql) + tdLog.debug("Delete {} successfully".format(str(del_column_tag_list))) + + for table in table_list: + for sql in error_sql_list: + tdSql.error(sql) + + # modify column for common table + tdSql.execute("create table t1 (ts timestamp, col1 int, col2 bigint, col3 float, col4 double, col5 bool, col6 binary(16), col7 nchar(16), col8 geometry(512), col9 varbinary(32));") + tdSql.execute("insert into t1 values(now, 1, 1111111111111, 1.11, 111111.1111, True, 'aaa', 'beijing', 'POINT (3.000000 6.000000)', '0x7661726331');") + tdSql.execute("alter table t1 rename column col1 col11;") + tdSql.error("select col1 from t1 where ts <= now and col3=1.11;") + tdSql.query("select col11 from t1 where ts <= now and col3=1.11;") + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + def test_db_with_data(self): + tdSql.execute("use db_with_data;") + + sql_list = [ + "select pow(col1, null) from st1 where ts > now;", + "select pow(null, col1) from st1 where ts > now;", + "select log(null, col2) from st1 where col1 > 1000;", + "select log(col2, null) from st1 where col1 > 1000;", + "select avg(col1 + t2) from ct1 where ts between '2025-03-01' and '2025-03-02' and t2 < 0;", + "select char_length(col6) from st1 where ts > now;", + "select concat(col6, col7) from st1 where ts > now;", + "select char_length(concat(col6, col7)) from st1 where ts > now;", + "select rtrim(ltrim(concat(col6, col7))) from st1 where ts > now;", + "select lower(rtrim(ltrim(concat(col6, col7)))) from st1 where ts > now;", + "select upper(rtrim(ltrim(concat(col6, col7)))) from st1 where ts > now;", + "select substr(rtrim(ltrim(concat(col6, col7))), 1, 10) from st1 where ts > now;", + "select avg(col1 - col2) as v from st1 where ts between '2022-03-01' and '2022-03-02';", + "select avg(col1 * col3) as v from st1 where ts between '2022-03-01' and '2022-03-02' and col1 > 100 group by tbname;", + "select sum(col1 / col4) as cv, avg(t2 + t3) as tv from st1 where ts between '2022-03-01' and '2022-03-02' and col1 > 100 group by tbname;", + "select sum(v1+v2) from (select first(ts) as time, avg(col1+col2) as v1, max(col3) as v2 from st1 where ts > now group by (col1+col2) order by (col1+col2));", + "select first(ts), count(*), avg(col2 * t3) from (select ts, col1, col2, col3, t1, t2, t3, tbname from st1 where ts between '2022-03-01' and '2022-03-02' and col1 > 100) group by tbname;", + "select cast(t8 as nchar(32)), sum(col1), avg(col2) from st1 where ts > now group by cast(t8 as nchar(32));", + "select to_char(time, 'yyyy-mm-dd'), sum(v2 - v1) from (select first(ts) as time, avg(col2 + col3) as v1, max(col4) as v2 from st1 where ts < now group by (col2+col3) order by (col2+col3)) where time > now group by to_char(time, 'yyyy-mm-dd');", + "select count(time) * sum(v) from (select to_iso8601(ts, '+00:00') as time, abs(col1+col2) as v, tbname from st1 where ts between '2023-03-01' and '2023-03-02' and col1 > 100) group by tbname;", + "select avg(v) from (select apercentile(col1, 50) as v from st1 where ts between '2023-03-01' and '2023-03-02' group by tbname) where v > 50;", + ] + for sql in sql_list: + tdSql.query(sql) + tdSql.checkRows(0) + + tdSql.query("select total / v from (select elapsed(ts, 1s) as v, sum(col1) as total from st1 where ts between '2023-03-01' and '2023-03-02' interval(10s) fill(next));") + tdSql.checkRows(8641) + tdSql.checkData(0, 0, 11) + + tdSql.query("select to_char(time, 'yyyy-mm-dd'), sum(v2 - v1) from (select first(ts) as time, avg(col2 + col3) as v1, max(col4) as v2 from st1 where ts < now group by (col2+col3) order by (col2+col3)) group by to_char(time, 'yyyy-mm-dd');") + tdSql.checkRows(1) + tdSql.checkData(0, 0, '2023-03-01') + tdSql.checkData(0, 1, -5050) + + tdSql.query("select avg(v) from (select apercentile(col1, 50) as v from st1 where ts between '2023-03-01' and '2023-03-02' group by tbname) group by (50 -v);") + tdSql.checkRows(1) + tdSql.checkData(0, 0, 50) + + # drop or modify column/tag + tdSql.execute("alter stable st1 drop column col7;") + tdLog.debug("Drop column col7 successfully") + tdSql.error("select count(*) from (select upper(col7) from st1);") + + tdSql.execute("alter stable st1 drop column col8;") + tdLog.debug("Drop column col8 successfully") + tdSql.error("select last(ts), avg(col1) from (select *, tbname from st1 where col8='POINT (3.0 6.0)') group by tbname;") + + tdSql.execute("alter stable st1 rename tag t8 t88;") + tdLog.debug("Rename tag t8 to t88 successfully") + tdSql.error("select count(*) from st1 t1, (select * from st1 where t8 is not null order by ts limit 10) t2 where t1.ts=t2.ts;") + + tdSql.execute("alter stable st1 rename tag t9 t99;") + tdLog.debug("Rename tag t9 to t99 successfully") + tdSql.error("select count(*) from st1 t1, (select * from st1 where t9='POINT (4.0 8.0)' limit 5) t2 where t1.ts=t2.ts;") def run(self): self.prepareData() - sql_list = [ - # super table query with correct tag name of json type - { - "sql": "select to_char(ts, 'yyyy-mm-dd hh24:mi:ss') as time, irate(col1) from st group by to_char(ts, 'yyyy-mm-dd hh24:mi:ss'), t1->'instance' order by time;", - "result_check": "0.0" - }, - # child table query with incorrect tag name of json type - { - "sql": "select to_char(ts, 'yyyy-mm-dd hh24:mi:ss') as time, irate(col1) from ct1 group by to_char(ts, 'yyyy-mm-dd hh24:mi:ss'), t1->'name' order by time;", - "result_check": "None" - }, - # child table query with null value - { - "sql": "select ts, avg(col1) from ct2 group by ts, t1->'name' order by ts;", - "result_check": "None" - } - ] - for sql_dic in sql_list: - tdSql.query(sql_dic["sql"]) - tdLog.debug("execute sql: %s" % sql_dic["sql"]) - for item in [row[1] for row in tdSql.queryResult]: - if sql_dic["result_check"] in str(item): - tdLog.debug("Check query result of '{}' successfully".format(sql_dic["sql"])) - break + self.test_tag_json() + self.test_db_empty() + self.test_db_with_data() def stop(self): tdSql.close() From e65c80d495f5fbd6442256cd76dd8079c7f28268 Mon Sep 17 00:00:00 2001 From: dmchen Date: Tue, 23 Jan 2024 09:43:23 +0000 Subject: [PATCH 26/32] fix/TD-28431 --- source/dnode/mgmt/exe/dmMain.c | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/source/dnode/mgmt/exe/dmMain.c b/source/dnode/mgmt/exe/dmMain.c index 882f04c75e..1508d88def 100644 --- a/source/dnode/mgmt/exe/dmMain.c +++ b/source/dnode/mgmt/exe/dmMain.c @@ -182,7 +182,16 @@ static int32_t dmParseArgs(int32_t argc, char const *argv[]) { } else if (strcmp(argv[i], "-s") == 0) { global.dumpSdb = true; } else if (strcmp(argv[i], "-E") == 0) { - tstrncpy(global.envFile, argv[++i], PATH_MAX); + if(i < argc - 1) { + if (strlen(argv[++i]) >= PATH_MAX) { + printf("env file path overflow"); + return -1; + } + tstrncpy(global.envFile, argv[i], PATH_MAX); + } else { + printf("'-E' requires a parameter\n"); + return -1; + } } else if (strcmp(argv[i], "-k") == 0) { global.generateGrant = true; } else if (strcmp(argv[i], "-C") == 0) { From 408212949f535cb90cf41dbafd82b8ec3c4553e0 Mon Sep 17 00:00:00 2001 From: chenhaoran Date: Tue, 23 Jan 2024 18:21:10 +0800 Subject: [PATCH 27/32] test:add special compatibility testcase for code coverage --- tests/system-test/6-cluster/clusterCommonCreate.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/system-test/6-cluster/clusterCommonCreate.py b/tests/system-test/6-cluster/clusterCommonCreate.py index cb44710b58..cb8a9bc9e2 100644 --- a/tests/system-test/6-cluster/clusterCommonCreate.py +++ b/tests/system-test/6-cluster/clusterCommonCreate.py @@ -217,8 +217,8 @@ class ClusterComCreate: def alterStbMetaData(self,tsql,dbName,stbName,ctbNum,rowsPerTbl,batchNum,startTs=None): tdLog.debug("alter Stb column ............") - tdLog.debug(f"describe STABLE {dbName}.{stbName} ") - tsql.execute(f"describe STABLE {dbName}.{stbName} ;") + tdLog.debug(f"describe {dbName}.{stbName} ") + tsql.execute(f"describe {dbName}.{stbName} ;") tdLog.debug(f"ALTER STABLE {dbName}.{stbName} MODIFY COLUMN c3 binary(20);") tsql.execute(f" ALTER STABLE {dbName}.{stbName} MODIFY COLUMN c3 binary(20);") tdLog.debug(f"ALTER STABLE {dbName}.{stbName} ADD COLUMN c4 DOUBLE;") From 262bb4cf127fb71e1bebd4502f4721f97f3efe34 Mon Sep 17 00:00:00 2001 From: dmchen Date: Wed, 24 Jan 2024 02:30:03 +0000 Subject: [PATCH 28/32] fix/TD-28437 --- source/dnode/mnode/impl/src/mndDump.c | 1 + 1 file changed, 1 insertion(+) diff --git a/source/dnode/mnode/impl/src/mndDump.c b/source/dnode/mnode/impl/src/mndDump.c index c68b11d184..00e72fb329 100644 --- a/source/dnode/mnode/impl/src/mndDump.c +++ b/source/dnode/mnode/impl/src/mndDump.c @@ -545,6 +545,7 @@ void dumpHeader(SSdb *pSdb, SJson *json) { SJson *maxIdsJson = tjsonCreateObject(); tjsonAddItemToObject(json, "maxIds", maxIdsJson); for (int32_t i = 0; i < SDB_MAX; ++i) { + if(i == 5) continue; int64_t maxId = 0; if (i < SDB_MAX) { maxId = pSdb->maxId[i]; From 22225d31c361e8a3cc43ced4502c61d4bedda98d Mon Sep 17 00:00:00 2001 From: wangmm0220 Date: Wed, 24 Jan 2024 10:39:22 +0800 Subject: [PATCH 29/32] fix:[TD-28025]return 0 if create table failed if stable not exist --- source/client/src/clientRawBlockWrite.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/source/client/src/clientRawBlockWrite.c b/source/client/src/clientRawBlockWrite.c index db8de44f1c..739224be38 100644 --- a/source/client/src/clientRawBlockWrite.c +++ b/source/client/src/clientRawBlockWrite.c @@ -966,6 +966,12 @@ static int32_t taosCreateTable(TAOS* taos, void* meta, int32_t metaLen) { // pCreateReq->ctb.suid = processSuid(pCreateReq->ctb.suid, pRequest->pDb); toName(pTscObj->acctId, pRequest->pDb, pCreateReq->ctb.stbName, &sName); code = catalogGetTableMeta(pCatalog, &conn, &sName, &pTableMeta); + if (code == TSDB_CODE_PAR_TABLE_NOT_EXIST) { + code = TSDB_CODE_SUCCESS; + taosMemoryFreeClear(pTableMeta); + continue; + } + if (code != TSDB_CODE_SUCCESS) { goto end; } From 6e09164c3ae88f2104ab16f244e3e0a0bbdd0dec Mon Sep 17 00:00:00 2001 From: wangmm0220 Date: Wed, 24 Jan 2024 11:38:17 +0800 Subject: [PATCH 30/32] fix:[TD-28025]return 0 if create table failed if stable not exist --- source/client/src/clientRawBlockWrite.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/client/src/clientRawBlockWrite.c b/source/client/src/clientRawBlockWrite.c index 739224be38..b0739b463f 100644 --- a/source/client/src/clientRawBlockWrite.c +++ b/source/client/src/clientRawBlockWrite.c @@ -955,7 +955,6 @@ static int32_t taosCreateTable(TAOS* taos, void* meta, int32_t metaLen) { if (code != TSDB_CODE_SUCCESS) { goto end; } - taosArrayPush(pRequest->tableList, &pName); pCreateReq->flags |= TD_CREATE_IF_NOT_EXISTS; // change tag cid to new cid @@ -989,6 +988,7 @@ static int32_t taosCreateTable(TAOS* taos, void* meta, int32_t metaLen) { } taosMemoryFreeClear(pTableMeta); } + taosArrayPush(pRequest->tableList, &pName); SVgroupCreateTableBatch* pTableBatch = taosHashGet(pVgroupHashmap, &pInfo.vgId, sizeof(pInfo.vgId)); if (pTableBatch == NULL) { From 7d3aa6974050d3fd8e1ea2aaf8e1e36c528b5946 Mon Sep 17 00:00:00 2001 From: wangmm0220 Date: Wed, 24 Jan 2024 14:02:07 +0800 Subject: [PATCH 31/32] fix:[TD-28025]return 0 if create table failed if stable not exist --- source/client/src/clientRawBlockWrite.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/source/client/src/clientRawBlockWrite.c b/source/client/src/clientRawBlockWrite.c index b0739b463f..1ea3eaf219 100644 --- a/source/client/src/clientRawBlockWrite.c +++ b/source/client/src/clientRawBlockWrite.c @@ -1005,6 +1005,9 @@ static int32_t taosCreateTable(TAOS* taos, void* meta, int32_t metaLen) { } } + if (taosHashGetSize(pVgroupHashmap) == 0) { + goto end; + } SArray* pBufArray = serializeVgroupsCreateTableBatch(pVgroupHashmap); if (NULL == pBufArray) { code = TSDB_CODE_OUT_OF_MEMORY; From dc6ee3e1a0519c3db4926d8f5ec13a8020da2bcb Mon Sep 17 00:00:00 2001 From: factosea <285808407@qq.com> Date: Wed, 24 Jan 2024 19:34:54 +0800 Subject: [PATCH 32/32] fix: daylight --- source/os/src/osTimezone.c | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/source/os/src/osTimezone.c b/source/os/src/osTimezone.c index 4280490c68..72f7dda41c 100644 --- a/source/os/src/osTimezone.c +++ b/source/os/src/osTimezone.c @@ -740,6 +740,8 @@ char *tz_win[554][2] = {{"Asia/Shanghai", "China Standard Time"}, #include #endif +static int isdst_now = 0; + void taosSetSystemTimezone(const char *inTimezoneStr, char *outTimezoneStr, int8_t *outDaylight, enum TdTimezone *tsTimezone) { if (inTimezoneStr == NULL || inTimezoneStr[0] == 0) return; @@ -805,19 +807,19 @@ void taosSetSystemTimezone(const char *inTimezoneStr, char *outTimezoneStr, int8 tzset(); int32_t tz = (int32_t)((-timezone * MILLISECOND_PER_SECOND) / MILLISECOND_PER_HOUR); *tsTimezone = tz; - tz += daylight; + tz += isdst_now; - sprintf(outTimezoneStr, "%s (%s, %s%02d00)", buf, tzname[daylight], tz >= 0 ? "+" : "-", abs(tz)); - *outDaylight = daylight; + sprintf(outTimezoneStr, "%s (%s, %s%02d00)", buf, tzname[isdst_now], tz >= 0 ? "+" : "-", abs(tz)); + *outDaylight = isdst_now; #else setenv("TZ", buf, 1); tzset(); int32_t tz = (int32_t)((-timezone * MILLISECOND_PER_SECOND) / MILLISECOND_PER_HOUR); *tsTimezone = tz; - tz += daylight; - sprintf(outTimezoneStr, "%s (%s, %s%02d00)", buf, tzname[daylight], tz >= 0 ? "+" : "-", abs(tz)); - *outDaylight = daylight; + tz += isdst_now; + sprintf(outTimezoneStr, "%s (%s, %s%02d00)", buf, tzname[isdst_now], tz >= 0 ? "+" : "-", abs(tz)); + *outDaylight = isdst_now; #endif @@ -895,6 +897,7 @@ void taosGetSystemTimezone(char *outTimezoneStr, enum TdTimezone *tsTimezone) { struct tm tm1; taosLocalTime(&tx1, &tm1, NULL); daylight = tm1.tm_isdst; + isdst_now = tm1.tm_isdst; /* * format example: @@ -1009,6 +1012,7 @@ void taosGetSystemTimezone(char *outTimezoneStr, enum TdTimezone *tsTimezone) { time_t tx1 = taosGetTimestampSec(); struct tm tm1; taosLocalTime(&tx1, &tm1, NULL); + isdst_now = tm1.tm_isdst; /* * format example: