Merge remote-tracking branch 'origin/3.0' into fix/tsim

This commit is contained in:
Shengliang Guan 2022-07-05 18:44:43 +08:00
commit 32bea46f3c
34 changed files with 2390 additions and 558 deletions

View File

@ -118,6 +118,7 @@ execute_process(COMMAND "${CMAKE_COMMAND}" --build .
# ================================================================================================ # ================================================================================================
# googletest # googletest
if(${BUILD_TEST}) if(${BUILD_TEST})
set(gtest_force_shared_crt ON CACHE BOOL "" FORCE)
add_subdirectory(googletest EXCLUDE_FROM_ALL) add_subdirectory(googletest EXCLUDE_FROM_ALL)
target_include_directories( target_include_directories(
gtest gtest
@ -259,7 +260,7 @@ if(${BUILD_MSVCREGEX})
SET_TARGET_PROPERTIES(msvcregex PROPERTIES OUTPUT_NAME msvcregex) SET_TARGET_PROPERTIES(msvcregex PROPERTIES OUTPUT_NAME msvcregex)
endif(${BUILD_MSVCREGEX}) endif(${BUILD_MSVCREGEX})
# msvcregex # wcwidth
if(${BUILD_WCWIDTH}) if(${BUILD_WCWIDTH})
add_library(wcwidth STATIC "") add_library(wcwidth STATIC "")
target_sources(wcwidth target_sources(wcwidth

View File

@ -55,7 +55,8 @@ extern int32_t tMsgDict[];
#define TMSG_SEG_CODE(TYPE) (((TYPE)&0xff00) >> 8) #define TMSG_SEG_CODE(TYPE) (((TYPE)&0xff00) >> 8)
#define TMSG_SEG_SEQ(TYPE) ((TYPE)&0xff) #define TMSG_SEG_SEQ(TYPE) ((TYPE)&0xff)
#define TMSG_INFO(TYPE) tMsgInfo[tMsgDict[TMSG_SEG_CODE(TYPE)] + TMSG_SEG_SEQ(TYPE)] #define TMSG_INFO(TYPE) \
(((TYPE) >= 0 && (TYPE) < TDMT_MAX) ? tMsgInfo[tMsgDict[TMSG_SEG_CODE(TYPE)] + TMSG_SEG_SEQ(TYPE)] : 0)
#define TMSG_INDEX(TYPE) (tMsgDict[TMSG_SEG_CODE(TYPE)] + TMSG_SEG_SEQ(TYPE)) #define TMSG_INDEX(TYPE) (tMsgDict[TMSG_SEG_CODE(TYPE)] + TMSG_SEG_SEQ(TYPE))
typedef uint16_t tmsg_t; typedef uint16_t tmsg_t;

View File

@ -82,6 +82,7 @@ typedef struct SScanLogicNode {
typedef struct SJoinLogicNode { typedef struct SJoinLogicNode {
SLogicNode node; SLogicNode node;
EJoinType joinType; EJoinType joinType;
SNode* pMergeCondition;
SNode* pOnConditions; SNode* pOnConditions;
bool isSingleTableJoin; bool isSingleTableJoin;
} SJoinLogicNode; } SJoinLogicNode;
@ -329,6 +330,7 @@ typedef struct SInterpFuncPhysiNode {
typedef struct SJoinPhysiNode { typedef struct SJoinPhysiNode {
SPhysiNode node; SPhysiNode node;
EJoinType joinType; EJoinType joinType;
SNode* pMergeCondition;
SNode* pOnConditions; SNode* pOnConditions;
SNodeList* pTargets; SNodeList* pTargets;
} SJoinPhysiNode; } SJoinPhysiNode;

View File

@ -376,6 +376,7 @@ void nodesRewriteSelectStmt(SSelectStmt* pSelect, ESqlClause clause, FNodeRewrit
typedef enum ECollectColType { COLLECT_COL_TYPE_COL = 1, COLLECT_COL_TYPE_TAG, COLLECT_COL_TYPE_ALL } ECollectColType; typedef enum ECollectColType { COLLECT_COL_TYPE_COL = 1, COLLECT_COL_TYPE_TAG, COLLECT_COL_TYPE_ALL } ECollectColType;
int32_t nodesCollectColumns(SSelectStmt* pSelect, ESqlClause clause, const char* pTableAlias, ECollectColType type, int32_t nodesCollectColumns(SSelectStmt* pSelect, ESqlClause clause, const char* pTableAlias, ECollectColType type,
SNodeList** pCols); SNodeList** pCols);
int32_t nodesCollectColumnsFromNode(SNode* node, const char* pTableAlias, ECollectColType type, SNodeList** pCols);
typedef bool (*FFuncClassifier)(int32_t funcId); typedef bool (*FFuncClassifier)(int32_t funcId);
int32_t nodesCollectFuncs(SSelectStmt* pSelect, ESqlClause clause, FFuncClassifier classifier, SNodeList** pFuncs); int32_t nodesCollectFuncs(SSelectStmt* pSelect, ESqlClause clause, FFuncClassifier classifier, SNodeList** pFuncs);

View File

@ -27,7 +27,7 @@ int vnodeGetTableMeta(SVnode *pVnode, SRpcMsg *pMsg) {
SMetaReader mer1 = {0}; SMetaReader mer1 = {0};
SMetaReader mer2 = {0}; SMetaReader mer2 = {0};
char tableFName[TSDB_TABLE_FNAME_LEN]; char tableFName[TSDB_TABLE_FNAME_LEN];
SRpcMsg rpcMsg; SRpcMsg rpcMsg = {0};
int32_t code = 0; int32_t code = 0;
int32_t rspLen = 0; int32_t rspLen = 0;
void * pRsp = NULL; void * pRsp = NULL;
@ -111,6 +111,7 @@ _exit:
rpcMsg.pCont = pRsp; rpcMsg.pCont = pRsp;
rpcMsg.contLen = rspLen; rpcMsg.contLen = rspLen;
rpcMsg.code = code; rpcMsg.code = code;
rpcMsg.msgType = pMsg->msgType;
if (code) { if (code) {
qError("get table %s meta failed cause of %s", infoReq.tbName, tstrerror(code)); qError("get table %s meta failed cause of %s", infoReq.tbName, tstrerror(code));
@ -130,7 +131,7 @@ int vnodeGetTableCfg(SVnode *pVnode, SRpcMsg *pMsg) {
SMetaReader mer1 = {0}; SMetaReader mer1 = {0};
SMetaReader mer2 = {0}; SMetaReader mer2 = {0};
char tableFName[TSDB_TABLE_FNAME_LEN]; char tableFName[TSDB_TABLE_FNAME_LEN];
SRpcMsg rpcMsg; SRpcMsg rpcMsg = {0};
int32_t code = 0; int32_t code = 0;
int32_t rspLen = 0; int32_t rspLen = 0;
void * pRsp = NULL; void * pRsp = NULL;
@ -220,6 +221,7 @@ _exit:
rpcMsg.pCont = pRsp; rpcMsg.pCont = pRsp;
rpcMsg.contLen = rspLen; rpcMsg.contLen = rspLen;
rpcMsg.code = code; rpcMsg.code = code;
rpcMsg.msgType = pMsg->msgType;
if (code) { if (code) {
qError("get table %s cfg failed cause of %s", cfgReq.tbName, tstrerror(code)); qError("get table %s cfg failed cause of %s", cfgReq.tbName, tstrerror(code));

View File

@ -680,7 +680,7 @@ typedef struct SJoinOperatorInfo {
SSDataBlock *pRight; SSDataBlock *pRight;
int32_t rightPos; int32_t rightPos;
SColumnInfo rightCol; SColumnInfo rightCol;
SNode *pOnCondition; SNode *pCondAfterMerge;
} SJoinOperatorInfo; } SJoinOperatorInfo;
#define OPTR_IS_OPENED(_optr) (((_optr)->status & OP_OPENED) == OP_OPENED) #define OPTR_IS_OPENED(_optr) (((_optr)->status & OP_OPENED) == OP_OPENED)

View File

@ -53,13 +53,28 @@ SOperatorInfo* createMergeJoinOperatorInfo(SOperatorInfo** pDownstream, int32_t
pOperator->info = pInfo; pOperator->info = pInfo;
pOperator->pTaskInfo = pTaskInfo; pOperator->pTaskInfo = pTaskInfo;
SNode* pOnCondition = pJoinNode->pOnConditions; SNode* pMergeCondition = pJoinNode->pMergeCondition;
if (nodeType(pOnCondition) == QUERY_NODE_OPERATOR) { if (nodeType(pMergeCondition) == QUERY_NODE_OPERATOR) {
SOperatorNode* pNode = (SOperatorNode*)pOnCondition; SOperatorNode* pNode = (SOperatorNode*)pMergeCondition;
setJoinColumnInfo(&pInfo->leftCol, (SColumnNode*)pNode->pLeft); setJoinColumnInfo(&pInfo->leftCol, (SColumnNode*)pNode->pLeft);
setJoinColumnInfo(&pInfo->rightCol, (SColumnNode*)pNode->pRight); setJoinColumnInfo(&pInfo->rightCol, (SColumnNode*)pNode->pRight);
} else if (nodeType(pOnCondition) == QUERY_NODE_LOGIC_CONDITION) { } else {
extractTimeCondition(pInfo, (SLogicConditionNode*)pOnCondition); ASSERT(false);
}
if (pJoinNode->pOnConditions != NULL && pJoinNode->node.pConditions != NULL) {
pInfo->pCondAfterMerge = nodesMakeNode(QUERY_NODE_LOGIC_CONDITION);
SLogicConditionNode* pLogicCond = (SLogicConditionNode*)(pInfo->pCondAfterMerge);
pLogicCond->pParameterList = nodesMakeList();
nodesListMakeAppend(&pLogicCond->pParameterList, nodesCloneNode(pJoinNode->pOnConditions));
nodesListMakeAppend(&pLogicCond->pParameterList, nodesCloneNode(pJoinNode->node.pConditions));
pLogicCond->condType = LOGIC_COND_TYPE_AND;
} else if (pJoinNode->pOnConditions != NULL) {
pInfo->pCondAfterMerge = nodesCloneNode(pJoinNode->pOnConditions);
} else if (pJoinNode->node.pConditions != NULL) {
pInfo->pCondAfterMerge = nodesCloneNode(pJoinNode->node.pConditions);
} else {
pInfo->pCondAfterMerge = NULL;
} }
pOperator->fpSet = pOperator->fpSet =
@ -88,15 +103,12 @@ void setJoinColumnInfo(SColumnInfo* pColumn, const SColumnNode* pColumnNode) {
void destroyMergeJoinOperator(void* param, int32_t numOfOutput) { void destroyMergeJoinOperator(void* param, int32_t numOfOutput) {
SJoinOperatorInfo* pJoinOperator = (SJoinOperatorInfo*)param; SJoinOperatorInfo* pJoinOperator = (SJoinOperatorInfo*)param;
nodesDestroyNode(pJoinOperator->pCondAfterMerge);
} }
SSDataBlock* doMergeJoin(struct SOperatorInfo* pOperator) { static void doMergeJoinImpl(struct SOperatorInfo* pOperator, SSDataBlock* pRes) {
SJoinOperatorInfo* pJoinInfo = pOperator->info; SJoinOperatorInfo* pJoinInfo = pOperator->info;
SSDataBlock* pRes = pJoinInfo->pRes;
blockDataCleanup(pRes);
blockDataEnsureCapacity(pRes, 4096);
int32_t nrows = 0; int32_t nrows = 0;
while (1) { while (1) {
@ -181,7 +193,28 @@ SSDataBlock* doMergeJoin(struct SOperatorInfo* pOperator) {
break; break;
} }
} }
}
SSDataBlock* doMergeJoin(struct SOperatorInfo* pOperator) {
SJoinOperatorInfo* pJoinInfo = pOperator->info;
SSDataBlock* pRes = pJoinInfo->pRes;
blockDataCleanup(pRes);
blockDataEnsureCapacity(pRes, 4096);
while (true) {
int32_t numOfRowsBefore = pRes->info.rows;
doMergeJoinImpl(pOperator, pRes);
int32_t numOfNewRows = pRes->info.rows - numOfRowsBefore;
if (numOfNewRows == 0) {
break;
}
if (pJoinInfo->pCondAfterMerge != NULL) {
doFilter(pJoinInfo->pCondAfterMerge, pRes);
}
if (pRes->info.rows >= pOperator->resultInfo.threshold) {
break;
}
}
return (pRes->info.rows > 0) ? pRes : NULL; return (pRes->info.rows > 0) ? pRes : NULL;
} }

View File

@ -1565,6 +1565,10 @@ void constructUdfService(void *argsThread) {
//TODO return value of uv_run //TODO return value of uv_run
uv_run(&udfc->uvLoop, UV_RUN_DEFAULT); uv_run(&udfc->uvLoop, UV_RUN_DEFAULT);
uv_loop_close(&udfc->uvLoop); uv_loop_close(&udfc->uvLoop);
uv_walk(&udfc->uvLoop, udfUdfdCloseWalkCb, NULL);
uv_run(&udfc->uvLoop, UV_RUN_DEFAULT);
uv_loop_close(&udfc->uvLoop);
} }
int32_t udfcOpen() { int32_t udfcOpen() {

View File

@ -368,6 +368,7 @@ static int32_t logicScanCopy(const SScanLogicNode* pSrc, SScanLogicNode* pDst) {
static int32_t logicJoinCopy(const SJoinLogicNode* pSrc, SJoinLogicNode* pDst) { static int32_t logicJoinCopy(const SJoinLogicNode* pSrc, SJoinLogicNode* pDst) {
COPY_BASE_OBJECT_FIELD(node, logicNodeCopy); COPY_BASE_OBJECT_FIELD(node, logicNodeCopy);
COPY_SCALAR_FIELD(joinType); COPY_SCALAR_FIELD(joinType);
CLONE_NODE_FIELD(pMergeCondition);
CLONE_NODE_FIELD(pOnConditions); CLONE_NODE_FIELD(pOnConditions);
COPY_SCALAR_FIELD(isSingleTableJoin); COPY_SCALAR_FIELD(isSingleTableJoin);
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;

View File

@ -1254,6 +1254,7 @@ static int32_t jsonToLogicPlan(const SJson* pJson, void* pObj) {
static const char* jkJoinLogicPlanJoinType = "JoinType"; static const char* jkJoinLogicPlanJoinType = "JoinType";
static const char* jkJoinLogicPlanOnConditions = "OnConditions"; static const char* jkJoinLogicPlanOnConditions = "OnConditions";
static const char* jkJoinLogicPlanMergeCondition = "MergeConditions";
static int32_t logicJoinNodeToJson(const void* pObj, SJson* pJson) { static int32_t logicJoinNodeToJson(const void* pObj, SJson* pJson) {
const SJoinLogicNode* pNode = (const SJoinLogicNode*)pObj; const SJoinLogicNode* pNode = (const SJoinLogicNode*)pObj;
@ -1262,6 +1263,9 @@ static int32_t logicJoinNodeToJson(const void* pObj, SJson* pJson) {
if (TSDB_CODE_SUCCESS == code) { if (TSDB_CODE_SUCCESS == code) {
code = tjsonAddIntegerToObject(pJson, jkJoinLogicPlanJoinType, pNode->joinType); code = tjsonAddIntegerToObject(pJson, jkJoinLogicPlanJoinType, pNode->joinType);
} }
if (TSDB_CODE_SUCCESS == code) {
code = tjsonAddObject(pJson, jkJoinLogicPlanMergeCondition, nodeToJson, pNode->pMergeCondition);
}
if (TSDB_CODE_SUCCESS == code) { if (TSDB_CODE_SUCCESS == code) {
code = tjsonAddObject(pJson, jkJoinLogicPlanOnConditions, nodeToJson, pNode->pOnConditions); code = tjsonAddObject(pJson, jkJoinLogicPlanOnConditions, nodeToJson, pNode->pOnConditions);
} }
@ -1617,6 +1621,7 @@ static int32_t jsonToPhysiProjectNode(const SJson* pJson, void* pObj) {
} }
static const char* jkJoinPhysiPlanJoinType = "JoinType"; static const char* jkJoinPhysiPlanJoinType = "JoinType";
static const char* jkJoinPhysiPlanMergeCondition = "MergeCondition";
static const char* jkJoinPhysiPlanOnConditions = "OnConditions"; static const char* jkJoinPhysiPlanOnConditions = "OnConditions";
static const char* jkJoinPhysiPlanTargets = "Targets"; static const char* jkJoinPhysiPlanTargets = "Targets";
@ -1627,6 +1632,9 @@ static int32_t physiJoinNodeToJson(const void* pObj, SJson* pJson) {
if (TSDB_CODE_SUCCESS == code) { if (TSDB_CODE_SUCCESS == code) {
code = tjsonAddIntegerToObject(pJson, jkJoinPhysiPlanJoinType, pNode->joinType); code = tjsonAddIntegerToObject(pJson, jkJoinPhysiPlanJoinType, pNode->joinType);
} }
if (TSDB_CODE_SUCCESS == code) {
code = tjsonAddObject(pJson, jkJoinPhysiPlanMergeCondition, nodeToJson, pNode->pMergeCondition);
}
if (TSDB_CODE_SUCCESS == code) { if (TSDB_CODE_SUCCESS == code) {
code = tjsonAddObject(pJson, jkJoinPhysiPlanOnConditions, nodeToJson, pNode->pOnConditions); code = tjsonAddObject(pJson, jkJoinPhysiPlanOnConditions, nodeToJson, pNode->pOnConditions);
} }
@ -1648,6 +1656,9 @@ static int32_t jsonToPhysiJoinNode(const SJson* pJson, void* pObj) {
if (TSDB_CODE_SUCCESS == code) { if (TSDB_CODE_SUCCESS == code) {
code = jsonToNodeObject(pJson, jkJoinPhysiPlanOnConditions, &pNode->pOnConditions); code = jsonToNodeObject(pJson, jkJoinPhysiPlanOnConditions, &pNode->pOnConditions);
} }
if (TSDB_CODE_SUCCESS == code) {
code = jsonToNodeObject(pJson, jkJoinPhysiPlanMergeCondition, &pNode->pMergeCondition);
}
if (TSDB_CODE_SUCCESS == code) { if (TSDB_CODE_SUCCESS == code) {
code = jsonToNodeList(pJson, jkJoinPhysiPlanTargets, &pNode->pTargets); code = jsonToNodeList(pJson, jkJoinPhysiPlanTargets, &pNode->pTargets);
} }

View File

@ -470,6 +470,9 @@ static EDealRes dispatchPhysiPlan(SNode* pNode, ETraversalOrder order, FNodeWalk
case QUERY_NODE_PHYSICAL_PLAN_MERGE_JOIN: { case QUERY_NODE_PHYSICAL_PLAN_MERGE_JOIN: {
SJoinPhysiNode* pJoin = (SJoinPhysiNode*)pNode; SJoinPhysiNode* pJoin = (SJoinPhysiNode*)pNode;
res = walkPhysiNode((SPhysiNode*)pNode, order, walker, pContext); res = walkPhysiNode((SPhysiNode*)pNode, order, walker, pContext);
if (DEAL_RES_ERROR != res && DEAL_RES_END != res) {
res = walkPhysiPlan(pJoin->pMergeCondition, order, walker, pContext);
}
if (DEAL_RES_ERROR != res && DEAL_RES_END != res) { if (DEAL_RES_ERROR != res && DEAL_RES_END != res) {
res = walkPhysiPlan(pJoin->pOnConditions, order, walker, pContext); res = walkPhysiPlan(pJoin->pOnConditions, order, walker, pContext);
} }

View File

@ -718,6 +718,7 @@ void nodesDestroyNode(SNode* pNode) {
case QUERY_NODE_LOGIC_PLAN_JOIN: { case QUERY_NODE_LOGIC_PLAN_JOIN: {
SJoinLogicNode* pLogicNode = (SJoinLogicNode*)pNode; SJoinLogicNode* pLogicNode = (SJoinLogicNode*)pNode;
destroyLogicNode((SLogicNode*)pLogicNode); destroyLogicNode((SLogicNode*)pLogicNode);
nodesDestroyNode(pLogicNode->pMergeCondition);
nodesDestroyNode(pLogicNode->pOnConditions); nodesDestroyNode(pLogicNode->pOnConditions);
break; break;
} }
@ -828,6 +829,7 @@ void nodesDestroyNode(SNode* pNode) {
case QUERY_NODE_PHYSICAL_PLAN_MERGE_JOIN: { case QUERY_NODE_PHYSICAL_PLAN_MERGE_JOIN: {
SJoinPhysiNode* pPhyNode = (SJoinPhysiNode*)pNode; SJoinPhysiNode* pPhyNode = (SJoinPhysiNode*)pNode;
destroyPhysiNode((SPhysiNode*)pPhyNode); destroyPhysiNode((SPhysiNode*)pPhyNode);
nodesDestroyNode(pPhyNode->pMergeCondition);
nodesDestroyNode(pPhyNode->pOnConditions); nodesDestroyNode(pPhyNode->pOnConditions);
nodesDestroyList(pPhyNode->pTargets); nodesDestroyList(pPhyNode->pTargets);
break; break;
@ -1493,6 +1495,38 @@ int32_t nodesCollectColumns(SSelectStmt* pSelect, ESqlClause clause, const char*
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
} }
int32_t nodesCollectColumnsFromNode(SNode* node, const char* pTableAlias, ECollectColType type, SNodeList** pCols) {
if (NULL == pCols) {
return TSDB_CODE_FAILED;
}
SCollectColumnsCxt cxt = {
.errCode = TSDB_CODE_SUCCESS,
.pTableAlias = pTableAlias,
.collectType = type,
.pCols = (NULL == *pCols ? nodesMakeList() : *pCols),
.pColHash = taosHashInit(128, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_NO_LOCK)};
if (NULL == cxt.pCols || NULL == cxt.pColHash) {
return TSDB_CODE_OUT_OF_MEMORY;
}
*pCols = NULL;
nodesWalkExpr(node, collectColumns, &cxt);
taosHashCleanup(cxt.pColHash);
if (TSDB_CODE_SUCCESS != cxt.errCode) {
nodesDestroyList(cxt.pCols);
return cxt.errCode;
}
if (LIST_LENGTH(cxt.pCols) > 0) {
*pCols = cxt.pCols;
} else {
nodesDestroyList(cxt.pCols);
}
return TSDB_CODE_SUCCESS;
}
typedef struct SCollectFuncsCxt { typedef struct SCollectFuncsCxt {
int32_t errCode; int32_t errCode;
FFuncClassifier classifier; FFuncClassifier classifier;

View File

@ -480,12 +480,18 @@ static int32_t pushDownCondOptPushCondToProject(SOptimizeContext* pCxt, SProject
return pushDownCondOptAppendCond(&pProject->node.pConditions, pCond); return pushDownCondOptAppendCond(&pProject->node.pConditions, pCond);
} }
static int32_t pushDownCondOptPushCondToJoin(SOptimizeContext* pCxt, SJoinLogicNode * pJoin, SNode** pCond) {
return pushDownCondOptAppendCond(&pJoin->node.pConditions, pCond);
}
static int32_t pushDownCondOptPushCondToChild(SOptimizeContext* pCxt, SLogicNode* pChild, SNode** pCond) { static int32_t pushDownCondOptPushCondToChild(SOptimizeContext* pCxt, SLogicNode* pChild, SNode** pCond) {
switch (nodeType(pChild)) { switch (nodeType(pChild)) {
case QUERY_NODE_LOGIC_PLAN_SCAN: case QUERY_NODE_LOGIC_PLAN_SCAN:
return pushDownCondOptPushCondToScan(pCxt, (SScanLogicNode*)pChild, pCond); return pushDownCondOptPushCondToScan(pCxt, (SScanLogicNode*)pChild, pCond);
case QUERY_NODE_LOGIC_PLAN_PROJECT: case QUERY_NODE_LOGIC_PLAN_PROJECT:
return pushDownCondOptPushCondToProject(pCxt, (SProjectLogicNode*)pChild, pCond); return pushDownCondOptPushCondToProject(pCxt, (SProjectLogicNode*)pChild, pCond);
case QUERY_NODE_LOGIC_PLAN_JOIN:
return pushDownCondOptPushCondToJoin(pCxt, (SJoinLogicNode*)pChild, pCond);
default: default:
break; break;
} }
@ -554,13 +560,83 @@ static int32_t pushDownCondOptCheckJoinOnCond(SOptimizeContext* pCxt, SJoinLogic
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
} }
static int32_t pushDownCondOptPartJoinOnCondLogicCond(SJoinLogicNode* pJoin, SNode** ppMergeCond, SNode** ppOnCond) {
SLogicConditionNode* pLogicCond = (SLogicConditionNode*)(pJoin->pOnConditions);
int32_t code = TSDB_CODE_SUCCESS;
SNodeList* pOnConds = NULL;
SNode* pCond = NULL;
FOREACH(pCond, pLogicCond->pParameterList) {
if (pushDownCondOptIsPriKeyEqualCond(pJoin, pCond)) {
*ppMergeCond = nodesCloneNode(pCond);
} else {
code = nodesListMakeAppend(&pOnConds, nodesCloneNode(pCond));
}
}
SNode* pTempOnCond = NULL;
if (TSDB_CODE_SUCCESS == code) {
code = nodesMergeConds(&pTempOnCond, &pOnConds);
}
if (TSDB_CODE_SUCCESS == code && NULL != *ppMergeCond) {
*ppOnCond = pTempOnCond;
nodesDestroyNode(pJoin->pOnConditions);
pJoin->pOnConditions = NULL;
return TSDB_CODE_SUCCESS;
} else {
nodesDestroyList(pOnConds);
nodesDestroyNode(pTempOnCond);
return TSDB_CODE_PLAN_INTERNAL_ERROR;
}
}
static int32_t pushDownCondOptPartJoinOnCond(SJoinLogicNode* pJoin, SNode** ppMergeCond, SNode** ppOnCond) {
if (QUERY_NODE_LOGIC_CONDITION == nodeType(pJoin->pOnConditions) &&
LOGIC_COND_TYPE_AND == ((SLogicConditionNode*)(pJoin->pOnConditions))->condType) {
return pushDownCondOptPartJoinOnCondLogicCond(pJoin, ppMergeCond, ppOnCond);
}
if (pushDownCondOptIsPriKeyEqualCond(pJoin, pJoin->pOnConditions)) {
*ppMergeCond = nodesCloneNode(pJoin->pOnConditions);
*ppOnCond = NULL;
nodesDestroyNode(pJoin->pOnConditions);
pJoin->pOnConditions = NULL;
return TSDB_CODE_SUCCESS;
} else {
return TSDB_CODE_PLAN_INTERNAL_ERROR;
}
}
static int32_t pushDownCondOptJoinExtractMergeCond(SOptimizeContext* pCxt, SJoinLogicNode* pJoin) {
int32_t code = pushDownCondOptCheckJoinOnCond(pCxt, pJoin);
SNode* pJoinMergeCond = NULL;
SNode* pJoinOnCond = NULL;
if (TSDB_CODE_SUCCESS == code) {
code = pushDownCondOptPartJoinOnCond(pJoin, &pJoinMergeCond, &pJoinOnCond);
}
if (TSDB_CODE_SUCCESS == code) {
pJoin->pMergeCondition = pJoinMergeCond;
pJoin->pOnConditions = pJoinOnCond;
} else {
nodesDestroyNode(pJoinMergeCond);
nodesDestroyNode(pJoinOnCond);
}
return code;
}
static int32_t pushDownCondOptDealJoin(SOptimizeContext* pCxt, SJoinLogicNode* pJoin) { static int32_t pushDownCondOptDealJoin(SOptimizeContext* pCxt, SJoinLogicNode* pJoin) {
if (OPTIMIZE_FLAG_TEST_MASK(pJoin->node.optimizedFlag, OPTIMIZE_FLAG_PUSH_DOWN_CONDE)) { if (OPTIMIZE_FLAG_TEST_MASK(pJoin->node.optimizedFlag, OPTIMIZE_FLAG_PUSH_DOWN_CONDE)) {
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
} }
if (NULL == pJoin->node.pConditions) { if (NULL == pJoin->node.pConditions) {
return pushDownCondOptCheckJoinOnCond(pCxt, pJoin); int32_t code = pushDownCondOptJoinExtractMergeCond(pCxt, pJoin);
if (TSDB_CODE_SUCCESS == code) {
OPTIMIZE_FLAG_SET_MASK(pJoin->node.optimizedFlag, OPTIMIZE_FLAG_PUSH_DOWN_CONDE);
pCxt->optimized = true;
}
return code;
} }
SNode* pOnCond = NULL; SNode* pOnCond = NULL;
@ -579,10 +655,13 @@ static int32_t pushDownCondOptDealJoin(SOptimizeContext* pCxt, SJoinLogicNode* p
pushDownCondOptPushCondToChild(pCxt, (SLogicNode*)nodesListGetNode(pJoin->node.pChildren, 1), &pRightChildCond); pushDownCondOptPushCondToChild(pCxt, (SLogicNode*)nodesListGetNode(pJoin->node.pChildren, 1), &pRightChildCond);
} }
if (TSDB_CODE_SUCCESS == code) {
code = pushDownCondOptJoinExtractMergeCond(pCxt, pJoin);
}
if (TSDB_CODE_SUCCESS == code) { if (TSDB_CODE_SUCCESS == code) {
OPTIMIZE_FLAG_SET_MASK(pJoin->node.optimizedFlag, OPTIMIZE_FLAG_PUSH_DOWN_CONDE); OPTIMIZE_FLAG_SET_MASK(pJoin->node.optimizedFlag, OPTIMIZE_FLAG_PUSH_DOWN_CONDE);
pCxt->optimized = true; pCxt->optimized = true;
code = pushDownCondOptCheckJoinOnCond(pCxt, pJoin);
} else { } else {
nodesDestroyNode(pOnCond); nodesDestroyNode(pOnCond);
nodesDestroyNode(pLeftChildCond); nodesDestroyNode(pLeftChildCond);
@ -720,7 +799,8 @@ static int32_t pushDownCondOptDealAgg(SOptimizeContext* pCxt, SAggLogicNode* pAg
// TODO: remove it after full implementation of pushing down to child // TODO: remove it after full implementation of pushing down to child
if (1 != LIST_LENGTH(pAgg->node.pChildren) || if (1 != LIST_LENGTH(pAgg->node.pChildren) ||
QUERY_NODE_LOGIC_PLAN_SCAN != nodeType(nodesListGetNode(pAgg->node.pChildren, 0)) && QUERY_NODE_LOGIC_PLAN_SCAN != nodeType(nodesListGetNode(pAgg->node.pChildren, 0)) &&
QUERY_NODE_LOGIC_PLAN_PROJECT != nodeType(nodesListGetNode(pAgg->node.pChildren, 0))) { QUERY_NODE_LOGIC_PLAN_PROJECT != nodeType(nodesListGetNode(pAgg->node.pChildren, 0)) &&
QUERY_NODE_LOGIC_PLAN_JOIN != nodeType(nodesListGetNode(pAgg->node.pChildren, 0))) {
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
} }

View File

@ -612,10 +612,8 @@ static int32_t createJoinPhysiNode(SPhysiPlanContext* pCxt, SNodeList* pChildren
int32_t code = TSDB_CODE_SUCCESS; int32_t code = TSDB_CODE_SUCCESS;
pJoin->joinType = pJoinLogicNode->joinType; pJoin->joinType = pJoinLogicNode->joinType;
if (NULL != pJoinLogicNode->pOnConditions) { setNodeSlotId(pCxt, pLeftDesc->dataBlockId, pRightDesc->dataBlockId, pJoinLogicNode->pMergeCondition,
code = setNodeSlotId(pCxt, pLeftDesc->dataBlockId, pRightDesc->dataBlockId, pJoinLogicNode->pOnConditions, &pJoin->pMergeCondition);
&pJoin->pOnConditions);
}
if (TSDB_CODE_SUCCESS == code) { if (TSDB_CODE_SUCCESS == code) {
code = setListSlotId(pCxt, pLeftDesc->dataBlockId, pRightDesc->dataBlockId, pJoinLogicNode->node.pTargets, code = setListSlotId(pCxt, pLeftDesc->dataBlockId, pRightDesc->dataBlockId, pJoinLogicNode->node.pTargets,
&pJoin->pTargets); &pJoin->pTargets);
@ -623,6 +621,21 @@ static int32_t createJoinPhysiNode(SPhysiPlanContext* pCxt, SNodeList* pChildren
if (TSDB_CODE_SUCCESS == code) { if (TSDB_CODE_SUCCESS == code) {
code = addDataBlockSlots(pCxt, pJoin->pTargets, pJoin->node.pOutputDataBlockDesc); code = addDataBlockSlots(pCxt, pJoin->pTargets, pJoin->node.pOutputDataBlockDesc);
} }
SNodeList* condCols = nodesMakeList();
if (TSDB_CODE_SUCCESS == code && NULL != pJoinLogicNode->pOnConditions) {
code = nodesCollectColumnsFromNode(pJoinLogicNode->pOnConditions, NULL, COLLECT_COL_TYPE_ALL, &condCols);
}
if (TSDB_CODE_SUCCESS == code) {
code = addDataBlockSlots(pCxt, condCols, pJoin->node.pOutputDataBlockDesc);
nodesDestroyList(condCols);
}
if (TSDB_CODE_SUCCESS == code && NULL != pJoinLogicNode->pOnConditions) {
code = setNodeSlotId(pCxt, ((SPhysiNode*)pJoin)->pOutputDataBlockDesc->dataBlockId, -1, pJoinLogicNode->pOnConditions,
&pJoin->pOnConditions);
}
if (TSDB_CODE_SUCCESS == code) { if (TSDB_CODE_SUCCESS == code) {
code = setConditionsSlotId(pCxt, (const SLogicNode*)pJoinLogicNode, (SPhysiNode*)pJoin); code = setConditionsSlotId(pCxt, (const SLogicNode*)pJoinLogicNode, (SPhysiNode*)pJoin);
} }

View File

@ -83,7 +83,7 @@ typedef struct SSyncSnapshotReceiver {
SSyncSnapshotReceiver *snapshotReceiverCreate(SSyncNode *pSyncNode, SRaftId fromId); SSyncSnapshotReceiver *snapshotReceiverCreate(SSyncNode *pSyncNode, SRaftId fromId);
void snapshotReceiverDestroy(SSyncSnapshotReceiver *pReceiver); void snapshotReceiverDestroy(SSyncSnapshotReceiver *pReceiver);
int32_t snapshotReceiverStart(SSyncSnapshotReceiver *pReceiver, SyncTerm privateTerm, SyncSnapshotSend *pBeginMsg); int32_t snapshotReceiverStart(SSyncSnapshotReceiver *pReceiver, SyncSnapshotSend *pBeginMsg);
int32_t snapshotReceiverStop(SSyncSnapshotReceiver *pReceiver); int32_t snapshotReceiverStop(SSyncSnapshotReceiver *pReceiver);
bool snapshotReceiverIsStart(SSyncSnapshotReceiver *pReceiver); bool snapshotReceiverIsStart(SSyncSnapshotReceiver *pReceiver);

View File

@ -158,13 +158,13 @@ int32_t syncSetStandby(int64_t rid) {
} }
if (pSyncNode->state != TAOS_SYNC_STATE_FOLLOWER) { if (pSyncNode->state != TAOS_SYNC_STATE_FOLLOWER) {
taosReleaseRef(tsNodeRefId, pSyncNode->rid);
if (pSyncNode->state == TAOS_SYNC_STATE_LEADER) { if (pSyncNode->state == TAOS_SYNC_STATE_LEADER) {
terrno = TSDB_CODE_SYN_IS_LEADER; terrno = TSDB_CODE_SYN_IS_LEADER;
} else { } else {
terrno = TSDB_CODE_SYN_STANDBY_NOT_READY; terrno = TSDB_CODE_SYN_STANDBY_NOT_READY;
} }
sError("failed to set standby since it is not follower, state:%s rid:%" PRId64, syncStr(pSyncNode->state), rid); sError("failed to set standby since it is not follower, state:%s rid:%" PRId64, syncStr(pSyncNode->state), rid);
taosReleaseRef(tsNodeRefId, pSyncNode->rid);
return -1; return -1;
} }
@ -620,6 +620,7 @@ int32_t syncPropose(int64_t rid, SRpcMsg* pMsg, bool isWeak) {
SSyncNode* pSyncNode = taosAcquireRef(tsNodeRefId, rid); SSyncNode* pSyncNode = taosAcquireRef(tsNodeRefId, rid);
if (pSyncNode == NULL) { if (pSyncNode == NULL) {
taosReleaseRef(tsNodeRefId, rid);
terrno = TSDB_CODE_SYN_INTERNAL_ERROR; terrno = TSDB_CODE_SYN_INTERNAL_ERROR;
return -1; return -1;
} }

View File

@ -14,6 +14,7 @@
*/ */
#include "syncRespMgr.h" #include "syncRespMgr.h"
#include "syncRaftEntry.h"
#include "syncRaftStore.h" #include "syncRaftStore.h"
SSyncRespMgr *syncRespMgrCreate(void *data, int64_t ttl) { SSyncRespMgr *syncRespMgrCreate(void *data, int64_t ttl) {
@ -116,4 +117,59 @@ void syncRespClean(SSyncRespMgr *pObj) {
taosThreadMutexUnlock(&(pObj->mutex)); taosThreadMutexUnlock(&(pObj->mutex));
} }
void syncRespCleanByTTL(SSyncRespMgr *pObj, int64_t ttl) {} void syncRespCleanByTTL(SSyncRespMgr *pObj, int64_t ttl) {
SRespStub *pStub = (SRespStub *)taosHashIterate(pObj->pRespHash, NULL);
int cnt = 0;
SSyncNode *pSyncNode = pObj->data;
SArray *delIndexArray = taosArrayInit(0, sizeof(SyncIndex));
ASSERT(delIndexArray != NULL);
while (pStub) {
size_t len;
void *key = taosHashGetKey(pStub, &len);
SyncIndex *pIndex = (SyncIndex *)key;
int64_t nowMS = taosGetTimestampMs();
if (nowMS - pStub->createTime > ttl) {
taosArrayPush(delIndexArray, pIndex);
cnt++;
SSyncRaftEntry *pEntry = NULL;
int32_t code = 0;
if (pSyncNode->pLogStore != NULL) {
code = pSyncNode->pLogStore->syncLogGetEntry(pSyncNode->pLogStore, *pIndex, &pEntry);
if (code == 0 && pEntry != NULL) {
SFsmCbMeta cbMeta = {0};
cbMeta.index = pEntry->index;
cbMeta.lastConfigIndex = syncNodeGetSnapshotConfigIndex(pSyncNode, cbMeta.index);
cbMeta.isWeak = pEntry->isWeak;
cbMeta.code = TSDB_CODE_SYN_TIMEOUT;
cbMeta.state = pSyncNode->state;
cbMeta.seqNum = pEntry->seqNum;
cbMeta.term = pEntry->term;
cbMeta.currentTerm = pSyncNode->pRaftStore->currentTerm;
cbMeta.flag = 0;
SRpcMsg rpcMsg = pStub->rpcMsg;
rpcMsg.pCont = rpcMallocCont(pEntry->dataLen);
memcpy(rpcMsg.pCont, pEntry->data, pEntry->dataLen);
pSyncNode->pFsm->FpCommitCb(pSyncNode->pFsm, &rpcMsg, cbMeta);
syncEntryDestory(pEntry);
}
}
}
pStub = (SRespStub *)taosHashIterate(pObj->pRespHash, pStub);
}
int32_t arraySize = taosArrayGetSize(delIndexArray);
sDebug("vgId:%d, resp clean by ttl, cnt:%d, array-size:%d", pSyncNode->vgId, cnt, arraySize);
for (int32_t i = 0; i < arraySize; ++i) {
SyncIndex *pIndex = taosArrayGet(delIndexArray, i);
taosHashRemove(pObj->pRespHash, pIndex, sizeof(SyncIndex));
}
taosArrayDestroy(delIndexArray);
}

View File

@ -22,9 +22,11 @@
#include "wal.h" #include "wal.h"
//---------------------------------- //----------------------------------
static void snapshotReceiverDoStart(SSyncSnapshotReceiver *pReceiver, SyncTerm privateTerm, static void snapshotSenderUpdateProgress(SSyncSnapshotSender *pSender, SyncSnapshotRsp *pMsg);
SyncSnapshotSend *pBeginMsg); static void snapshotReceiverDoStart(SSyncSnapshotReceiver *pReceiver, SyncSnapshotSend *pBeginMsg);
static void snapshotReceiverForceStop(SSyncSnapshotReceiver *pReceiver);
static void snapshotReceiverGotData(SSyncSnapshotReceiver *pReceiver, SyncSnapshotSend *pMsg); static void snapshotReceiverGotData(SSyncSnapshotReceiver *pReceiver, SyncSnapshotSend *pMsg);
static int32_t snapshotReceiverFinish(SSyncSnapshotReceiver *pReceiver, SyncSnapshotSend *pMsg);
//---------------------------------- //----------------------------------
SSyncSnapshotSender *snapshotSenderCreate(SSyncNode *pSyncNode, int32_t replicaIndex) { SSyncSnapshotSender *snapshotSenderCreate(SSyncNode *pSyncNode, int32_t replicaIndex) {
@ -68,7 +70,9 @@ void snapshotSenderDestroy(SSyncSnapshotSender *pSender) {
// close reader // close reader
if (pSender->pReader != NULL) { if (pSender->pReader != NULL) {
int32_t ret = pSender->pSyncNode->pFsm->FpSnapshotStopRead(pSender->pSyncNode->pFsm, pSender->pReader); int32_t ret = pSender->pSyncNode->pFsm->FpSnapshotStopRead(pSender->pSyncNode->pFsm, pSender->pReader);
ASSERT(ret == 0); if (ret != 0) {
syncNodeErrorLog(pSender->pSyncNode, "stop reader error");
}
pSender->pReader = NULL; pSender->pReader = NULL;
} }
@ -79,7 +83,12 @@ void snapshotSenderDestroy(SSyncSnapshotSender *pSender) {
bool snapshotSenderIsStart(SSyncSnapshotSender *pSender) { return pSender->start; } bool snapshotSenderIsStart(SSyncSnapshotSender *pSender) { return pSender->start; }
// begin send snapshot by snapshot, pReader // begin send snapshot by param, snapshot, pReader
//
// action:
// 1. assert reader not start
// 2. update state
// 3. send first snapshot block
int32_t snapshotSenderStart(SSyncSnapshotSender *pSender, SSnapshotParam snapshotParam, SSnapshot snapshot, int32_t snapshotSenderStart(SSyncSnapshotSender *pSender, SSnapshotParam snapshotParam, SSnapshot snapshot,
void *pReader) { void *pReader) {
ASSERT(!snapshotSenderIsStart(pSender)); ASSERT(!snapshotSenderIsStart(pSender));
@ -98,7 +107,7 @@ int32_t snapshotSenderStart(SSyncSnapshotSender *pSender, SSnapshotParam snapsho
// update term // update term
pSender->term = pSender->pSyncNode->pRaftStore->currentTerm; pSender->term = pSender->pSyncNode->pRaftStore->currentTerm;
++(pSender->privateTerm); ++(pSender->privateTerm); // increase private term
// update state // update state
pSender->finish = false; pSender->finish = false;
@ -114,9 +123,7 @@ int32_t snapshotSenderStart(SSyncSnapshotSender *pSender, SSnapshotParam snapsho
code = pSender->pSyncNode->pLogStore->syncLogGetEntry(pSender->pSyncNode->pLogStore, code = pSender->pSyncNode->pLogStore->syncLogGetEntry(pSender->pSyncNode->pLogStore,
pSender->snapshot.lastConfigIndex, &pEntry); pSender->snapshot.lastConfigIndex, &pEntry);
if (code == 0) { if (code == 0 && pEntry != NULL) {
ASSERT(pEntry != NULL);
SRpcMsg rpcMsg; SRpcMsg rpcMsg;
syncEntry2OriginalRpc(pEntry, &rpcMsg); syncEntry2OriginalRpc(pEntry, &rpcMsg);
@ -207,6 +214,8 @@ int32_t snapshotSenderStop(SSyncSnapshotSender *pSender, bool finish) {
pSender->start = false; pSender->start = false;
pSender->finish = finish; pSender->finish = finish;
// do not update term, maybe print
// event log // event log
do { do {
char *eventLog = snapshotSender2SimpleStr(pSender, "snapshot sender stop"); char *eventLog = snapshotSender2SimpleStr(pSender, "snapshot sender stop");
@ -243,6 +252,7 @@ int32_t snapshotSend(SSyncSnapshotSender *pSender) {
pMsg->srcId = pSender->pSyncNode->myRaftId; pMsg->srcId = pSender->pSyncNode->myRaftId;
pMsg->destId = (pSender->pSyncNode->replicasId)[pSender->replicaIndex]; pMsg->destId = (pSender->pSyncNode->replicasId)[pSender->replicaIndex];
pMsg->term = pSender->pSyncNode->pRaftStore->currentTerm; pMsg->term = pSender->pSyncNode->pRaftStore->currentTerm;
pMsg->beginIndex = pSender->snapshotParam.start;
pMsg->lastIndex = pSender->snapshot.lastApplyIndex; pMsg->lastIndex = pSender->snapshot.lastApplyIndex;
pMsg->lastTerm = pSender->snapshot.lastApplyTerm; pMsg->lastTerm = pSender->snapshot.lastApplyTerm;
pMsg->lastConfigIndex = pSender->snapshot.lastConfigIndex; pMsg->lastConfigIndex = pSender->snapshot.lastConfigIndex;
@ -281,11 +291,13 @@ int32_t snapshotReSend(SSyncSnapshotSender *pSender) {
pMsg->srcId = pSender->pSyncNode->myRaftId; pMsg->srcId = pSender->pSyncNode->myRaftId;
pMsg->destId = (pSender->pSyncNode->replicasId)[pSender->replicaIndex]; pMsg->destId = (pSender->pSyncNode->replicasId)[pSender->replicaIndex];
pMsg->term = pSender->pSyncNode->pRaftStore->currentTerm; pMsg->term = pSender->pSyncNode->pRaftStore->currentTerm;
pMsg->beginIndex = pSender->snapshotParam.start;
pMsg->lastIndex = pSender->snapshot.lastApplyIndex; pMsg->lastIndex = pSender->snapshot.lastApplyIndex;
pMsg->lastTerm = pSender->snapshot.lastApplyTerm; pMsg->lastTerm = pSender->snapshot.lastApplyTerm;
pMsg->lastConfigIndex = pSender->snapshot.lastConfigIndex; pMsg->lastConfigIndex = pSender->snapshot.lastConfigIndex;
pMsg->lastConfig = pSender->lastConfig; pMsg->lastConfig = pSender->lastConfig;
pMsg->seq = pSender->seq; pMsg->seq = pSender->seq;
pMsg->privateTerm = pSender->privateTerm;
memcpy(pMsg->data, pSender->pCurrentBlock, pSender->blockLen); memcpy(pMsg->data, pSender->pCurrentBlock, pSender->blockLen);
// send msg // send msg
@ -305,6 +317,12 @@ int32_t snapshotReSend(SSyncSnapshotSender *pSender) {
return 0; return 0;
} }
static void snapshotSenderUpdateProgress(SSyncSnapshotSender *pSender, SyncSnapshotRsp *pMsg) {
ASSERT(pMsg->ack == pSender->seq);
pSender->ack = pMsg->ack;
++(pSender->seq);
}
cJSON *snapshotSender2Json(SSyncSnapshotSender *pSender) { cJSON *snapshotSender2Json(SSyncSnapshotSender *pSender) {
char u64buf[128]; char u64buf[128];
cJSON *pRoot = cJSON_CreateObject(); cJSON *pRoot = cJSON_CreateObject();
@ -371,10 +389,11 @@ char *snapshotSender2SimpleStr(SSyncSnapshotSender *pSender, char *event) {
syncUtilU642Addr(destId.addr, host, sizeof(host), &port); syncUtilU642Addr(destId.addr, host, sizeof(host), &port);
snprintf(s, len, snprintf(s, len,
"%s {%p laindex:%ld laterm:%lu lcindex:%ld seq:%d ack:%d finish:%d pterm:%lu replica-index:%d %s:%d}", event, "%s {%p s-param:%ld e-param:%ld laindex:%ld laterm:%lu lcindex:%ld seq:%d ack:%d finish:%d pterm:%lu "
pSender, pSender->snapshot.lastApplyIndex, pSender->snapshot.lastApplyTerm, "replica-index:%d %s:%d}",
pSender->snapshot.lastConfigIndex, pSender->seq, pSender->ack, pSender->finish, pSender->privateTerm, event, pSender, pSender->snapshotParam.start, pSender->snapshotParam.end, pSender->snapshot.lastApplyIndex,
pSender->replicaIndex, host, port); pSender->snapshot.lastApplyTerm, pSender->snapshot.lastConfigIndex, pSender->seq, pSender->ack,
pSender->finish, pSender->privateTerm, pSender->replicaIndex, host, port);
return s; return s;
} }
@ -429,11 +448,10 @@ bool snapshotReceiverIsStart(SSyncSnapshotReceiver *pReceiver) { return pReceive
// static do start by privateTerm, pBeginMsg // static do start by privateTerm, pBeginMsg
// receive first snapshot data // receive first snapshot data
// write first block data // write first block data
static void snapshotReceiverDoStart(SSyncSnapshotReceiver *pReceiver, SyncTerm privateTerm, static void snapshotReceiverDoStart(SSyncSnapshotReceiver *pReceiver, SyncSnapshotSend *pBeginMsg) {
SyncSnapshotSend *pBeginMsg) {
// update state // update state
pReceiver->term = pReceiver->pSyncNode->pRaftStore->currentTerm; pReceiver->term = pReceiver->pSyncNode->pRaftStore->currentTerm;
pReceiver->privateTerm = privateTerm; pReceiver->privateTerm = pBeginMsg->privateTerm;
pReceiver->ack = SYNC_SNAPSHOT_SEQ_BEGIN; pReceiver->ack = SYNC_SNAPSHOT_SEQ_BEGIN;
pReceiver->fromId = pBeginMsg->srcId; pReceiver->fromId = pBeginMsg->srcId;
pReceiver->start = true; pReceiver->start = true;
@ -445,7 +463,7 @@ static void snapshotReceiverDoStart(SSyncSnapshotReceiver *pReceiver, SyncTerm p
pReceiver->snapshotParam.start = pBeginMsg->beginIndex; pReceiver->snapshotParam.start = pBeginMsg->beginIndex;
pReceiver->snapshotParam.end = pBeginMsg->lastIndex; pReceiver->snapshotParam.end = pBeginMsg->lastIndex;
// write data // start writer
ASSERT(pReceiver->pWriter == NULL); ASSERT(pReceiver->pWriter == NULL);
int32_t ret = pReceiver->pSyncNode->pFsm->FpSnapshotStartWrite(pReceiver->pSyncNode->pFsm, int32_t ret = pReceiver->pSyncNode->pFsm->FpSnapshotStartWrite(pReceiver->pSyncNode->pFsm,
&(pReceiver->snapshotParam), &(pReceiver->pWriter)); &(pReceiver->snapshotParam), &(pReceiver->pWriter));
@ -481,10 +499,10 @@ static void snapshotReceiverForceStop(SSyncSnapshotReceiver *pReceiver) {
// if receiver receive msg from seq = SYNC_SNAPSHOT_SEQ_BEGIN, start receiver // if receiver receive msg from seq = SYNC_SNAPSHOT_SEQ_BEGIN, start receiver
// if already start, force close, start again // if already start, force close, start again
int32_t snapshotReceiverStart(SSyncSnapshotReceiver *pReceiver, SyncTerm privateTerm, SyncSnapshotSend *pBeginMsg) { int32_t snapshotReceiverStart(SSyncSnapshotReceiver *pReceiver, SyncSnapshotSend *pBeginMsg) {
if (!snapshotReceiverIsStart(pReceiver)) { if (!snapshotReceiverIsStart(pReceiver)) {
// first start // first start
snapshotReceiverDoStart(pReceiver, privateTerm, pBeginMsg); snapshotReceiverDoStart(pReceiver, pBeginMsg);
} else { } else {
// already start // already start
@ -494,12 +512,14 @@ int32_t snapshotReceiverStart(SSyncSnapshotReceiver *pReceiver, SyncTerm private
snapshotReceiverForceStop(pReceiver); snapshotReceiverForceStop(pReceiver);
// start again // start again
snapshotReceiverDoStart(pReceiver, privateTerm, pBeginMsg); snapshotReceiverDoStart(pReceiver, pBeginMsg);
} }
return 0; return 0;
} }
// just set start = false
// FpSnapshotStopWrite should not be called, assert writer == NULL
int32_t snapshotReceiverStop(SSyncSnapshotReceiver *pReceiver) { int32_t snapshotReceiverStop(SSyncSnapshotReceiver *pReceiver) {
if (pReceiver->pWriter != NULL) { if (pReceiver->pWriter != NULL) {
int32_t ret = int32_t ret =
@ -522,6 +542,7 @@ int32_t snapshotReceiverStop(SSyncSnapshotReceiver *pReceiver) {
return 0; return 0;
} }
// when recv last snapshot block, apply data into snapshot
static int32_t snapshotReceiverFinish(SSyncSnapshotReceiver *pReceiver, SyncSnapshotSend *pMsg) { static int32_t snapshotReceiverFinish(SSyncSnapshotReceiver *pReceiver, SyncSnapshotSend *pMsg) {
ASSERT(pMsg->seq == SYNC_SNAPSHOT_SEQ_END); ASSERT(pMsg->seq == SYNC_SNAPSHOT_SEQ_END);
@ -550,7 +571,7 @@ static int32_t snapshotReceiverFinish(SSyncSnapshotReceiver *pReceiver, SyncSnap
pReceiver->pSyncNode->commitIndex = pReceiver->snapshot.lastApplyIndex; pReceiver->pSyncNode->commitIndex = pReceiver->snapshot.lastApplyIndex;
} }
// stop writer // stop writer, apply data
code = pReceiver->pSyncNode->pFsm->FpSnapshotStopWrite(pReceiver->pSyncNode->pFsm, pReceiver->pWriter, true); code = pReceiver->pSyncNode->pFsm->FpSnapshotStopWrite(pReceiver->pSyncNode->pFsm, pReceiver->pWriter, true);
if (code != 0) { if (code != 0) {
syncNodeErrorLog(pReceiver->pSyncNode, "snapshot stop writer true error"); syncNodeErrorLog(pReceiver->pSyncNode, "snapshot stop writer true error");
@ -579,15 +600,20 @@ static int32_t snapshotReceiverFinish(SSyncSnapshotReceiver *pReceiver, SyncSnap
return 0; return 0;
} }
// apply data block
// update progress
static void snapshotReceiverGotData(SSyncSnapshotReceiver *pReceiver, SyncSnapshotSend *pMsg) { static void snapshotReceiverGotData(SSyncSnapshotReceiver *pReceiver, SyncSnapshotSend *pMsg) {
ASSERT(pMsg->seq == pReceiver->ack + 1); ASSERT(pMsg->seq == pReceiver->ack + 1);
if (pReceiver->pWriter != NULL) { if (pReceiver->pWriter != NULL) {
if (pMsg->dataLen > 0) { if (pMsg->dataLen > 0) {
// apply data block
int32_t code = pReceiver->pSyncNode->pFsm->FpSnapshotDoWrite(pReceiver->pSyncNode->pFsm, pReceiver->pWriter, int32_t code = pReceiver->pSyncNode->pFsm->FpSnapshotDoWrite(pReceiver->pSyncNode->pFsm, pReceiver->pWriter,
pMsg->data, pMsg->dataLen); pMsg->data, pMsg->dataLen);
ASSERT(code == 0); ASSERT(code == 0);
} }
// update progress
pReceiver->ack = pMsg->seq; pReceiver->ack = pMsg->seq;
// event log // event log
@ -665,14 +691,23 @@ char *snapshotReceiver2SimpleStr(SSyncSnapshotReceiver *pReceiver, char *event)
uint16_t port; uint16_t port;
syncUtilU642Addr(fromId.addr, host, sizeof(host), &port); syncUtilU642Addr(fromId.addr, host, sizeof(host), &port);
snprintf(s, len, "%s {%p start:%d ack:%d term:%lu pterm:%lu from:%s:%d laindex:%ld laterm:%lu lcindex:%ld}", event, snprintf(s, len,
pReceiver, pReceiver->start, pReceiver->ack, pReceiver->term, pReceiver->privateTerm, host, port, "%s {%p start:%d ack:%d term:%lu pterm:%lu from:%s:%d s-param:%ld e-param:%ld laindex:%ld laterm:%lu "
pReceiver->snapshot.lastApplyIndex, pReceiver->snapshot.lastApplyTerm, pReceiver->snapshot.lastConfigIndex); "lcindex:%ld}",
event, pReceiver, pReceiver->start, pReceiver->ack, pReceiver->term, pReceiver->privateTerm, host, port,
pReceiver->snapshotParam.start, pReceiver->snapshotParam.end, pReceiver->snapshot.lastApplyIndex,
pReceiver->snapshot.lastApplyTerm, pReceiver->snapshot.lastConfigIndex);
return s; return s;
} }
// receiver do something // receiver on message
//
// condition 1, recv SYNC_SNAPSHOT_SEQ_BEGIN, start receiver, update privateTerm
// condition 2, recv SYNC_SNAPSHOT_SEQ_END, finish receiver(apply snapshot data, update commit index, maybe reconfig)
// condition 3, recv SYNC_SNAPSHOT_SEQ_FORCE_CLOSE, force close
// condition 4, got data, update ack
//
int32_t syncNodeOnSnapshotSendCb(SSyncNode *pSyncNode, SyncSnapshotSend *pMsg) { int32_t syncNodeOnSnapshotSendCb(SSyncNode *pSyncNode, SyncSnapshotSend *pMsg) {
// get receiver // get receiver
SSyncSnapshotReceiver *pReceiver = pSyncNode->pNewNodeReceiver; SSyncSnapshotReceiver *pReceiver = pSyncNode->pNewNodeReceiver;
@ -683,11 +718,13 @@ int32_t syncNodeOnSnapshotSendCb(SSyncNode *pSyncNode, SyncSnapshotSend *pMsg) {
if (pSyncNode->state == TAOS_SYNC_STATE_FOLLOWER) { if (pSyncNode->state == TAOS_SYNC_STATE_FOLLOWER) {
if (pMsg->term == pSyncNode->pRaftStore->currentTerm) { if (pMsg->term == pSyncNode->pRaftStore->currentTerm) {
if (pMsg->seq == SYNC_SNAPSHOT_SEQ_BEGIN) { if (pMsg->seq == SYNC_SNAPSHOT_SEQ_BEGIN) {
// condition 1
// begin, no data // begin, no data
snapshotReceiverStart(pReceiver, pMsg->privateTerm, pMsg); snapshotReceiverStart(pReceiver, pMsg);
needRsp = true; needRsp = true;
} else if (pMsg->seq == SYNC_SNAPSHOT_SEQ_END) { } else if (pMsg->seq == SYNC_SNAPSHOT_SEQ_END) {
// condition 2
// end, finish FSM // end, finish FSM
code = snapshotReceiverFinish(pReceiver, pMsg); code = snapshotReceiverFinish(pReceiver, pMsg);
if (code == 0) { if (code == 0) {
@ -697,7 +734,6 @@ int32_t syncNodeOnSnapshotSendCb(SSyncNode *pSyncNode, SyncSnapshotSend *pMsg) {
// maybe update lastconfig // maybe update lastconfig
if (pMsg->lastConfigIndex >= SYNC_INDEX_BEGIN) { if (pMsg->lastConfigIndex >= SYNC_INDEX_BEGIN) {
// int32_t oldReplicaNum = pSyncNode->replicaNum;
SSyncCfg oldSyncCfg = pSyncNode->pRaftCfg->cfg; SSyncCfg oldSyncCfg = pSyncNode->pRaftCfg->cfg;
// update new config myIndex // update new config myIndex
@ -709,11 +745,13 @@ int32_t syncNodeOnSnapshotSendCb(SSyncNode *pSyncNode, SyncSnapshotSend *pMsg) {
} }
} else if (pMsg->seq == SYNC_SNAPSHOT_SEQ_FORCE_CLOSE) { } else if (pMsg->seq == SYNC_SNAPSHOT_SEQ_FORCE_CLOSE) {
// condition 3
// force close // force close
snapshotReceiverForceStop(pReceiver); snapshotReceiverForceStop(pReceiver);
needRsp = false; needRsp = false;
} else if (pMsg->seq > SYNC_SNAPSHOT_SEQ_BEGIN && pMsg->seq < SYNC_SNAPSHOT_SEQ_END) { } else if (pMsg->seq > SYNC_SNAPSHOT_SEQ_BEGIN && pMsg->seq < SYNC_SNAPSHOT_SEQ_END) {
// condition 4
// transfering // transfering
if (pMsg->seq == pReceiver->ack + 1) { if (pMsg->seq == pReceiver->ack + 1) {
snapshotReceiverGotData(pReceiver, pMsg); snapshotReceiverGotData(pReceiver, pMsg);
@ -752,6 +790,7 @@ int32_t syncNodeOnSnapshotSendCb(SSyncNode *pSyncNode, SyncSnapshotSend *pMsg) {
syncNodeSendMsgById(&(pRspMsg->destId), pSyncNode, &rpcMsg); syncNodeSendMsgById(&(pRspMsg->destId), pSyncNode, &rpcMsg);
syncSnapshotRspDestroy(pRspMsg); syncSnapshotRspDestroy(pRspMsg);
} }
} else { } else {
// error log // error log
do { do {
@ -759,6 +798,8 @@ int32_t syncNodeOnSnapshotSendCb(SSyncNode *pSyncNode, SyncSnapshotSend *pMsg) {
syncNodeErrorLog(pSyncNode, eventLog); syncNodeErrorLog(pSyncNode, eventLog);
taosMemoryFree(eventLog); taosMemoryFree(eventLog);
} while (0); } while (0);
return -1;
} }
} else { } else {
// error log // error log
@ -767,19 +808,19 @@ int32_t syncNodeOnSnapshotSendCb(SSyncNode *pSyncNode, SyncSnapshotSend *pMsg) {
syncNodeErrorLog(pSyncNode, eventLog); syncNodeErrorLog(pSyncNode, eventLog);
taosMemoryFree(eventLog); taosMemoryFree(eventLog);
} while (0); } while (0);
return -1;
} }
return 0; return 0;
} }
static void snapshotSenderUpdateProgress(SSyncSnapshotSender *pSender, SyncSnapshotRsp *pMsg) { // sender on message
ASSERT(pMsg->ack == pSender->seq); //
pSender->ack = pMsg->ack; // condition 1 sender receives SYNC_SNAPSHOT_SEQ_END, close sender
++(pSender->seq); // condition 2 sender receives ack, set seq = ack + 1, send msg from seq
} // condition 3 sender receives error msg, just print error log
//
// sender receives ack, set seq = ack + 1, send msg from seq
// if ack == SYNC_SNAPSHOT_SEQ_END, stop sender
int32_t syncNodeOnSnapshotRspCb(SSyncNode *pSyncNode, SyncSnapshotRsp *pMsg) { int32_t syncNodeOnSnapshotRspCb(SSyncNode *pSyncNode, SyncSnapshotRsp *pMsg) {
// if already drop replica, do not process // if already drop replica, do not process
if (!syncNodeInRaftGroup(pSyncNode, &(pMsg->srcId)) && pSyncNode->state == TAOS_SYNC_STATE_LEADER) { if (!syncNodeInRaftGroup(pSyncNode, &(pMsg->srcId)) && pSyncNode->state == TAOS_SYNC_STATE_LEADER) {
@ -794,12 +835,14 @@ int32_t syncNodeOnSnapshotRspCb(SSyncNode *pSyncNode, SyncSnapshotRsp *pMsg) {
// state, term, seq/ack // state, term, seq/ack
if (pSyncNode->state == TAOS_SYNC_STATE_LEADER) { if (pSyncNode->state == TAOS_SYNC_STATE_LEADER) {
if (pMsg->term == pSyncNode->pRaftStore->currentTerm) { if (pMsg->term == pSyncNode->pRaftStore->currentTerm) {
// receiver ack is finish, close sender // condition 1
// receive ack is finish, close sender
if (pMsg->ack == SYNC_SNAPSHOT_SEQ_END) { if (pMsg->ack == SYNC_SNAPSHOT_SEQ_END) {
snapshotSenderStop(pSender, true); snapshotSenderStop(pSender, true);
return 0; return 0;
} }
// condition 2
// send next msg // send next msg
if (pMsg->ack == pSender->seq) { if (pMsg->ack == pSender->seq) {
// update sender ack // update sender ack
@ -807,6 +850,7 @@ int32_t syncNodeOnSnapshotRspCb(SSyncNode *pSyncNode, SyncSnapshotRsp *pMsg) {
snapshotSend(pSender); snapshotSend(pSender);
} else if (pMsg->ack == pSender->seq - 1) { } else if (pMsg->ack == pSender->seq - 1) {
// maybe resend
snapshotReSend(pSender); snapshotReSend(pSender);
} else { } else {

View File

@ -8,7 +8,13 @@ void print(SHashObj *pNextIndex) {
printf("----------------\n"); printf("----------------\n");
uint64_t *p = (uint64_t *)taosHashIterate(pNextIndex, NULL); uint64_t *p = (uint64_t *)taosHashIterate(pNextIndex, NULL);
while (p) { while (p) {
printf("%lu \n", *p);
size_t len;
void* key = taosHashGetKey(p, &len);
SRaftId *pRaftId = (SRaftId*)key;
printf("key:<%lu, %d>, value:%lu \n", pRaftId->addr, pRaftId->vgId, *p);
p = (uint64_t *)taosHashIterate(pNextIndex, p); p = (uint64_t *)taosHashIterate(pNextIndex, p);
} }
} }

View File

@ -73,9 +73,15 @@ void syncRespMgrGetAndDelTest(uint64_t i) {
} }
} }
SSyncNode *createSyncNode() {
SSyncNode *pSyncNode = (SSyncNode*)taosMemoryMalloc(sizeof(SSyncNode));
memset(pSyncNode, 0, sizeof(SSyncNode));
return pSyncNode;
}
void test1() { void test1() {
printf("------- test1 ---------\n"); printf("------- test1 ---------\n");
pMgr = syncRespMgrCreate(NULL, 0); pMgr = syncRespMgrCreate(createSyncNode(), 0);
assert(pMgr != NULL); assert(pMgr != NULL);
syncRespMgrInsert(10); syncRespMgrInsert(10);
@ -100,7 +106,7 @@ void test1() {
void test2() { void test2() {
printf("------- test2 ---------\n"); printf("------- test2 ---------\n");
pMgr = syncRespMgrCreate(NULL, 0); pMgr = syncRespMgrCreate(createSyncNode(), 0);
assert(pMgr != NULL); assert(pMgr != NULL);
syncRespMgrInsert(10); syncRespMgrInsert(10);
@ -117,7 +123,7 @@ void test2() {
void test3() { void test3() {
printf("------- test3 ---------\n"); printf("------- test3 ---------\n");
pMgr = syncRespMgrCreate(NULL, 0); pMgr = syncRespMgrCreate(createSyncNode(), 0);
assert(pMgr != NULL); assert(pMgr != NULL);
syncRespMgrInsert(10); syncRespMgrInsert(10);
@ -132,13 +138,34 @@ void test3() {
syncRespMgrDestroy(pMgr); syncRespMgrDestroy(pMgr);
} }
void test4() {
printf("------- test4 ---------\n");
pMgr = syncRespMgrCreate(createSyncNode(), 2);
assert(pMgr != NULL);
syncRespMgrInsert(5);
syncRespMgrPrint();
taosMsleep(3000);
syncRespMgrInsert(3);
syncRespMgrPrint();
printf("====== after clean ttl \n");
syncRespClean(pMgr);
syncRespMgrPrint();
syncRespMgrDestroy(pMgr);
}
int main() { int main() {
tsAsyncLog = 0; tsAsyncLog = 0;
sDebugFlag = DEBUG_TRACE + DEBUG_SCREEN + DEBUG_FILE; sDebugFlag = DEBUG_DEBUG + DEBUG_TRACE + DEBUG_SCREEN + DEBUG_FILE;
logTest(); logTest();
test1(); test1();
test2(); test2();
test3(); test3();
test4();
return 0; return 0;
} }

View File

@ -321,7 +321,7 @@ static int tdbBtreeOpenImpl(SBTree *pBt) {
{ {
// 1. TODO: Search the main DB to check if the DB exists // 1. TODO: Search the main DB to check if the DB exists
ret = tdbPagerOpenDB(pBt->pPager, &pgno, true); ret = tdbPagerOpenDB(pBt->pPager, &pgno, true, pBt);
ASSERT(ret == 0); ASSERT(ret == 0);
} }
@ -721,7 +721,8 @@ static int tdbBtreeBalanceNonRoot(SBTree *pBt, SPage *pParent, int idx, TXN *pTx
int szNewCell; int szNewCell;
SPgno pgno; SPgno pgno;
pgno = TDB_PAGE_PGNO(pNews[iNew]); pgno = TDB_PAGE_PGNO(pNews[iNew]);
tdbBtreeEncodeCell(pParent, cd.pKey, cd.kLen, (void *)&pgno, sizeof(SPgno), pNewCell, &szNewCell, pTxn, pBt); tdbBtreeEncodeCell(pParent, cd.pKey, cd.kLen, (void *)&pgno, sizeof(SPgno), pNewCell, &szNewCell, pTxn,
pBt);
tdbPageInsertCell(pParent, sIdx++, pNewCell, szNewCell, 0); tdbPageInsertCell(pParent, sIdx++, pNewCell, szNewCell, 0);
tdbOsFree(pNewCell); tdbOsFree(pNewCell);
} }
@ -1142,7 +1143,8 @@ static int tdbBtreeEncodeCell(SPage *pPage, const void *pKey, int kLen, const vo
return 0; return 0;
} }
static int tdbBtreeDecodePayload(SPage *pPage, const SCell *pCell, int nHeader, SCellDecoder *pDecoder, TXN *pTxn, SBTree *pBt) { static int tdbBtreeDecodePayload(SPage *pPage, const SCell *pCell, int nHeader, SCellDecoder *pDecoder, TXN *pTxn,
SBTree *pBt) {
int ret = 0; int ret = 0;
int nPayload; int nPayload;
int maxLocal = pPage->maxLocal; int maxLocal = pPage->maxLocal;

View File

@ -98,7 +98,7 @@ int tdbPagerClose(SPager *pPager) {
return 0; return 0;
} }
int tdbPagerOpenDB(SPager *pPager, SPgno *ppgno, bool toCreate) { int tdbPagerOpenDB(SPager *pPager, SPgno *ppgno, bool toCreate, SBTree *pBt) {
SPgno pgno; SPgno pgno;
SPage *pPage; SPage *pPage;
int ret; int ret;
@ -114,19 +114,35 @@ int tdbPagerOpenDB(SPager *pPager, SPgno *ppgno, bool toCreate) {
// pgno = 0; // pgno = 0;
} }
// if (pgno == 0 && toCreate) { if (pgno == 0 && toCreate) {
// allocate a new child page
TXN txn;
tdbTxnOpen(&txn, 0, tdbDefaultMalloc, tdbDefaultFree, NULL, 0);
pPager->inTran = 1;
SBtreeInitPageArg zArg;
zArg.flags = 0x1 | 0x2; // root leaf node;
zArg.pBt = pBt;
ret = tdbPagerFetchPage(pPager, &pgno, &pPage, tdbBtreeInitPage, &zArg, &txn);
if (ret < 0) {
return -1;
}
// ret = tdbPagerAllocPage(pPager, &pPage, &pgno); // ret = tdbPagerAllocPage(pPager, &pPage, &pgno);
// if (ret < 0) { // if (ret < 0) {
// return -1; // return -1;
//} //}
// // TODO: Need to zero the page // TODO: Need to zero the page
// ret = tdbPagerWrite(pPager, pPage); ret = tdbPagerWrite(pPager, pPage);
// if (ret < 0) { if (ret < 0) {
// return -1; return -1;
// } }
// }
tdbTxnClose(&txn);
}
*ppgno = pgno; *ppgno = pgno;
return 0; return 0;

View File

@ -184,7 +184,7 @@ int tdbBtcUpsert(SBTC *pBtc, const void *pKey, int kLen, const void *pData, int
int tdbPagerOpen(SPCache *pCache, const char *fileName, SPager **ppPager); int tdbPagerOpen(SPCache *pCache, const char *fileName, SPager **ppPager);
int tdbPagerClose(SPager *pPager); int tdbPagerClose(SPager *pPager);
int tdbPagerOpenDB(SPager *pPager, SPgno *ppgno, bool toCreate); int tdbPagerOpenDB(SPager *pPager, SPgno *ppgno, bool toCreate, SBTree *pBt);
int tdbPagerWrite(SPager *pPager, SPage *pPage); int tdbPagerWrite(SPager *pPager, SPage *pPage);
int tdbPagerBegin(SPager *pPager, TXN *pTxn); int tdbPagerBegin(SPager *pPager, TXN *pTxn);
int tdbPagerCommit(SPager *pPager, TXN *pTxn); int tdbPagerCommit(SPager *pPager, TXN *pTxn);
@ -318,15 +318,14 @@ static inline int tdbTryLockPage(tdb_spinlock_t *pLock) {
#define TDB_PAGE_USABLE_SIZE(pPage) ((u8 *)(pPage)->pPageFtr - (pPage)->pCellIdx) #define TDB_PAGE_USABLE_SIZE(pPage) ((u8 *)(pPage)->pPageFtr - (pPage)->pCellIdx)
#define TDB_PAGE_FREE_SIZE(pPage) (*(pPage)->pPageMethods->getFreeBytes)(pPage) #define TDB_PAGE_FREE_SIZE(pPage) (*(pPage)->pPageMethods->getFreeBytes)(pPage)
#define TDB_PAGE_PGNO(pPage) ((pPage)->pgid.pgno) #define TDB_PAGE_PGNO(pPage) ((pPage)->pgid.pgno)
#define TDB_BYTES_CELL_TAKEN(pPage, pCell) ((*(pPage)->xCellSize)(pPage, pCell, 0, NULL, NULL) + (pPage)->pPageMethods->szOffset) #define TDB_BYTES_CELL_TAKEN(pPage, pCell) \
((*(pPage)->xCellSize)(pPage, pCell, 0, NULL, NULL) + (pPage)->pPageMethods->szOffset)
#define TDB_PAGE_OFFSET_SIZE(pPage) ((pPage)->pPageMethods->szOffset) #define TDB_PAGE_OFFSET_SIZE(pPage) ((pPage)->pPageMethods->szOffset)
int tdbPageCreate(int pageSize, SPage **ppPage, void *(*xMalloc)(void *, size_t), void *arg); int tdbPageCreate(int pageSize, SPage **ppPage, void *(*xMalloc)(void *, size_t), void *arg);
int tdbPageDestroy(SPage *pPage, void (*xFree)(void *arg, void *ptr), void *arg); int tdbPageDestroy(SPage *pPage, void (*xFree)(void *arg, void *ptr), void *arg);
void tdbPageZero(SPage *pPage, u8 szAmHdr, int (*xCellSize)(const SPage *, SCell *, int, void tdbPageZero(SPage *pPage, u8 szAmHdr, int (*xCellSize)(const SPage *, SCell *, int, TXN *, SBTree *pBt));
TXN *, SBTree *pBt)); void tdbPageInit(SPage *pPage, u8 szAmHdr, int (*xCellSize)(const SPage *, SCell *, int, TXN *, SBTree *pBt));
void tdbPageInit(SPage *pPage, u8 szAmHdr, int (*xCellSize)(const SPage *, SCell *, int,
TXN *, SBTree *pBt));
int tdbPageInsertCell(SPage *pPage, int idx, SCell *pCell, int szCell, u8 asOvfl); int tdbPageInsertCell(SPage *pPage, int idx, SCell *pCell, int szCell, u8 asOvfl);
int tdbPageDropCell(SPage *pPage, int idx, TXN *pTxn, SBTree *pBt); int tdbPageDropCell(SPage *pPage, int idx, TXN *pTxn, SBTree *pBt);
int tdbPageUpdateCell(SPage *pPage, int idx, SCell *pCell, int szCell, TXN *pTxn, SBTree *pBt); int tdbPageUpdateCell(SPage *pPage, int idx, SCell *pCell, int szCell, TXN *pTxn, SBTree *pBt);

View File

@ -402,10 +402,11 @@ static void uvPrepareSendData(SSvrMsg* smsg, uv_buf_t* wb) {
} else { } else {
pHead->msgType = pMsg->msgType; pHead->msgType = pMsg->msgType;
// set up resp msg type // set up resp msg type
if (pHead->msgType == 0 && transMsgLenFromCont(pMsg->contLen) == sizeof(STransMsgHead)) if (pHead->msgType == 0 && transMsgLenFromCont(pMsg->contLen) == sizeof(STransMsgHead)) {
pHead->msgType = pConn->inType + 1; pHead->msgType = pConn->inType + 1;
} }
} }
}
pHead->release = smsg->type == Release ? 1 : 0; pHead->release = smsg->type == Release ? 1 : 0;
pHead->code = htonl(pMsg->code); pHead->code = htonl(pMsg->code);

View File

@ -687,8 +687,8 @@ class TDTestCase:
# to_json() # to_json()
tdSql.query("select to_json('{\"abc\":123}') from jsons1_1") tdSql.query("select to_json('{\"abc\":123}') from jsons1_1")
tdSql.checkRows(2) tdSql.checkRows(2)
# tdSql.checkData(0, 0, '{"abc":123}') tdSql.checkData(0, 0, '{"abc":123}')
# tdSql.checkData(1, 0, '{"abc":123}') tdSql.checkData(1, 0, '{"abc":123}')
tdSql.query("select to_json('null') from jsons1_1") tdSql.query("select to_json('null') from jsons1_1")
tdSql.checkRows(2) tdSql.checkRows(2)
tdSql.checkData(0, 0, 'null') tdSql.checkData(0, 0, 'null')

View File

@ -3,7 +3,8 @@ from util.log import *
from util.cases import * from util.cases import *
from util.sql import * from util.sql import *
import numpy as np import numpy as np
import time
from datetime import datetime
class TDTestCase: class TDTestCase:
def init(self, conn, logSql): def init(self, conn, logSql):
@ -13,205 +14,209 @@ class TDTestCase:
self.rowNum = 10 self.rowNum = 10
self.ts = 1537146000000 # 2018-9-17 09:00:00.000 self.ts = 1537146000000 # 2018-9-17 09:00:00.000
self.ts_str = [
'2020-1-1',
'2020-2-1 00:00:01',
'2020-3-1 00:00:00.001',
'2020-4-1 00:00:00.001002',
'2020-5-1 00:00:00.001002001'
]
self.db_param_precision = ['ms','us','ns']
self.time_unit = ['1w','1d','1h','1m','1s','1a','1u']
self.error_unit = ['1b','2w','2d','2h','2m','2s','2a','2u','1c','#1']
self.ntbname = 'ntb'
self.stbname = 'stb'
self.ctbname = 'ctb'
def get_ms_timestamp(self,ts_str):
_ts_str = ts_str
if " " in ts_str:
p = ts_str.split(" ")[1]
if len(p) > 15 :
_ts_str = ts_str[:-3]
if ':' in _ts_str and '.' in _ts_str:
timestamp = datetime.strptime(_ts_str, "%Y-%m-%d %H:%M:%S.%f")
date_time = int(int(time.mktime(timestamp.timetuple()))*1000 + timestamp.microsecond/1000)
elif ':' in _ts_str and '.' not in _ts_str:
timestamp = datetime.strptime(_ts_str, "%Y-%m-%d %H:%M:%S")
date_time = int(int(time.mktime(timestamp.timetuple()))*1000 + timestamp.microsecond/1000)
else:
timestamp = datetime.strptime(_ts_str, "%Y-%m-%d")
date_time = int(int(time.mktime(timestamp.timetuple()))*1000 + timestamp.microsecond/1000)
return date_time
def get_us_timestamp(self,ts_str):
_ts = self.get_ms_timestamp(ts_str) * 1000
if " " in ts_str:
p = ts_str.split(" ")[1]
if len(p) > 12:
us_ts = p[12:15]
_ts += int(us_ts)
return _ts
def get_ns_timestamp(self,ts_str):
_ts = self.get_us_timestamp(ts_str) *1000
if " " in ts_str:
p = ts_str.split(" ")[1]
if len(p) > 15:
us_ts = p[15:]
_ts += int(us_ts)
return _ts
def time_transform(self,ts_str,precision):
date_time = []
if precision == 'ms':
for i in ts_str:
date_time.append(self.get_ms_timestamp(i))
elif precision == 'us':
for i in ts_str:
date_time.append(self.get_us_timestamp(i))
elif precision == 'ns':
for i in ts_str:
date_time.append(self.get_us_timestamp(i))
return date_time
def check_ms_timestamp(self,unit,date_time):
if unit.lower() == '1a':
for i in range(len(self.ts_str)):
ts_result = self.get_ms_timestamp(str(tdSql.queryResult[i][0]))
tdSql.checkEqual(ts_result,int(date_time[i]))
elif unit.lower() == '1s':
for i in range(len(self.ts_str)):
ts_result = self.get_ms_timestamp(str(tdSql.queryResult[i][0]))
tdSql.checkEqual(ts_result,int(date_time[i]/1000)*1000)
elif unit.lower() == '1m':
for i in range(len(self.ts_str)):
ts_result = self.get_ms_timestamp(str(tdSql.queryResult[i][0]))
tdSql.checkEqual(ts_result,int(date_time[i]/1000/60)*60*1000)
elif unit.lower() == '1h':
for i in range(len(self.ts_str)):
ts_result = self.get_ms_timestamp(str(tdSql.queryResult[i][0]))
tdSql.checkEqual(ts_result,int(date_time[i]/1000/60/60)*60*60*1000 )
elif unit.lower() == '1d':
for i in range(len(self.ts_str)):
ts_result = self.get_ms_timestamp(str(tdSql.queryResult[i][0]))
tdSql.checkEqual(ts_result,int(date_time[i]/1000/60/60/24)*24*60*60*1000)
elif unit.lower() == '1w':
for i in range(len(self.ts_str)):
ts_result = self.get_ms_timestamp(str(tdSql.queryResult[i][0]))
tdSql.checkEqual(ts_result,int(date_time[i]/1000/60/60/24/7)*7*24*60*60*1000)
def check_us_timestamp(self,unit,date_time):
if unit.lower() == '1u':
for i in range(len(self.ts_str)):
ts_result = self.get_us_timestamp(str(tdSql.queryResult[i][0]))
tdSql.checkEqual(ts_result,int(date_time[i]))
elif unit.lower() == '1a':
for i in range(len(self.ts_str)):
ts_result = self.get_us_timestamp(str(tdSql.queryResult[i][0]))
tdSql.checkEqual(ts_result,int(date_time[i]/1000)*1000)
elif unit.lower() == '1s':
for i in range(len(self.ts_str)):
ts_result = self.get_us_timestamp(str(tdSql.queryResult[i][0]))
tdSql.checkEqual(ts_result,int(date_time[i]/1000/1000)*1000*1000)
elif unit.lower() == '1m':
for i in range(len(self.ts_str)):
ts_result = self.get_us_timestamp(str(tdSql.queryResult[i][0]))
tdSql.checkEqual(ts_result,int(date_time[i]/1000/1000/60)*60*1000*1000)
elif unit.lower() == '1h':
for i in range(len(self.ts_str)):
ts_result = self.get_us_timestamp(str(tdSql.queryResult[i][0]))
tdSql.checkEqual(ts_result,int(date_time[i]/1000/1000/60/60)*60*60*1000*1000 )
elif unit.lower() == '1d':
for i in range(len(self.ts_str)):
ts_result = self.get_us_timestamp(str(tdSql.queryResult[i][0]))
tdSql.checkEqual(ts_result,int(date_time[i]/1000/1000/60/60/24)*24*60*60*1000*1000 )
elif unit.lower() == '1w':
for i in range(len(self.ts_str)):
ts_result = self.get_us_timestamp(str(tdSql.queryResult[i][0]))
tdSql.checkEqual(ts_result,int(date_time[i]/1000/1000/60/60/24/7)*7*24*60*60*1000*1000)
def check_ns_timestamp(self,unit,date_time):
if unit.lower() == '1u':
for i in range(len(self.ts_str)):
tdSql.checkEqual(tdSql.queryResult[i][0],int(date_time[i]*1000/1000)*1000)
elif unit.lower() == '1a':
for i in range(len(self.ts_str)):
tdSql.checkEqual(tdSql.queryResult[i][0],int(date_time[i]*1000/1000/1000)*1000*1000)
elif unit.lower() == '1s':
for i in range(len(self.ts_str)):
tdSql.checkEqual(tdSql.queryResult[i][0],int(date_time[i]*1000/1000/1000/1000)*1000*1000*1000)
elif unit.lower() == '1m':
for i in range(len(self.ts_str)):
tdSql.checkEqual(tdSql.queryResult[i][0],int(date_time[i]*1000/1000/1000/1000/60)*60*1000*1000*1000)
elif unit.lower() == '1h':
for i in range(len(self.ts_str)):
tdSql.checkEqual(tdSql.queryResult[i][0],int(date_time[i]*1000/1000/1000/1000/60/60)*60*60*1000*1000*1000 )
elif unit.lower() == '1d':
for i in range(len(self.ts_str)):
tdSql.checkEqual(tdSql.queryResult[i][0],int(date_time[i]*1000/1000/1000/1000/60/60/24)*24*60*60*1000*1000*1000 )
elif unit.lower() == '1w':
for i in range(len(self.ts_str)):
tdSql.checkEqual(tdSql.queryResult[i][0],int(date_time[i]*1000/1000/1000/1000/60/60/24/7)*7*24*60*60*1000*1000*1000)
def data_check(self,date_time,precision,tb_type):
for unit in self.time_unit:
if (unit.lower() == '1u' and precision.lower() == 'ms') or () :
if tb_type.lower() == 'ntb':
tdSql.error(f'select timetruncate(ts,{unit}) from {self.ntbname}')
elif tb_type.lower() == 'ctb':
tdSql.error(f'select timetruncate(ts,{unit}) from {self.ctbname}')
elif tb_type.lower() == 'stb':
tdSql.error(f'select timetruncate(ts,{unit}) from {self.stbname}')
elif precision.lower() == 'ms':
if tb_type.lower() == 'ntb':
tdSql.query(f'select timetruncate(ts,{unit}) from {self.ntbname}')
elif tb_type.lower() == 'ctb':
tdSql.query(f'select timetruncate(ts,{unit}) from {self.ctbname}')
elif tb_type.lower() == 'stb':
tdSql.query(f'select timetruncate(ts,{unit}) from {self.stbname}')
tdSql.checkRows(len(self.ts_str))
self.check_ms_timestamp(unit,date_time)
elif precision.lower() == 'us':
if tb_type.lower() == 'ntb':
tdSql.query(f'select timetruncate(ts,{unit}) from {self.ntbname}')
elif tb_type.lower() == 'ctb':
tdSql.query(f'select timetruncate(ts,{unit}) from {self.ctbname}')
elif tb_type.lower() == 'stb':
tdSql.query(f'select timetruncate(ts,{unit}) from {self.stbname}')
tdSql.checkRows(len(self.ts_str))
self.check_us_timestamp(unit,date_time)
elif precision.lower() == 'ns':
if tb_type.lower() == 'ntb':
tdSql.query(f'select timetruncate(ts,{unit}) from {self.ntbname}')
elif tb_type.lower() == 'ctb':
tdSql.query(f'select timetruncate(ts,{unit}) from {self.ctbname}')
elif tb_type.lower() == 'stb':
tdSql.query(f'select timetruncate(ts,{unit}) from {self.stbname}')
tdSql.checkRows(len(self.ts_str))
self.check_ns_timestamp(unit,date_time)
for unit in self.error_unit:
if tb_type.lower() == 'ntb':
tdSql.error(f'select timetruncate(ts,{unit}) from {self.ntbname}')
elif tb_type.lower() == 'ctb':
tdSql.error(f'select timetruncate(ts,{unit}) from {self.ctbname}')
elif tb_type.lower() == 'stb':
tdSql.error(f'select timetruncate(ts,{unit}) from {self.stbname}')
def function_check_ntb(self):
for precision in self.db_param_precision:
tdSql.execute('drop database if exists db')
tdSql.execute(f'create database db precision "{precision}"')
tdSql.execute('use db')
tdSql.execute(f'create table {self.ntbname} (ts timestamp,c0 int)')
for ts in self.ts_str:
tdSql.execute(f'insert into {self.ntbname} values("{ts}",1)')
date_time = self.time_transform(self.ts_str,precision)
self.data_check(date_time,precision,'ntb')
def function_check_stb(self):
for precision in self.db_param_precision:
tdSql.execute('drop database if exists db')
tdSql.execute(f'create database db precision "{precision}"')
tdSql.execute('use db')
tdSql.execute(f'create table {self.stbname} (ts timestamp,c0 int) tags(t0 int)')
tdSql.execute(f'create table {self.ctbname} using {self.stbname} tags(1)')
for ts in self.ts_str:
tdSql.execute(f'insert into {self.ctbname} values("{ts}",1)')
date_time = self.time_transform(self.ts_str,precision)
self.data_check(date_time,precision,'ctb')
self.data_check(date_time,precision,'stb')
def run(self): def run(self):
tdSql.prepare() self.function_check_ntb()
self.function_check_stb()
intData = []
floatData = []
tdSql.execute('''create table stb(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double,
col7 bool, col8 binary(20), col9 nchar(20), col11 tinyint unsigned, col12 smallint unsigned, col13 int unsigned, col14 bigint unsigned) tags(loc nchar(20))''')
tdSql.execute("create table stb_1 using stb tags('beijing')")
tdSql.execute('''create table ntb(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double,
col7 bool, col8 binary(20), col9 nchar(20), col11 tinyint unsigned, col12 smallint unsigned, col13 int unsigned, col14 bigint unsigned)''')
for i in range(self.rowNum):
tdSql.execute("insert into ntb values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d)"
% (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1))
intData.append(i + 1)
floatData.append(i + 0.1)
for i in range(self.rowNum):
tdSql.execute("insert into stb_1 values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d)"
% (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1))
intData.append(i + 1)
floatData.append(i + 0.1)
tdSql.query("select timetruncate(1,1d) from ntb")
tdSql.checkRows(10)
tdSql.error("select timetruncate(1,1u) from ntb")
#tdSql.checkRows(10)
tdSql.query("select timetruncate(1,1a) from ntb")
tdSql.checkRows(10)
tdSql.query("select timetruncate(1,1m) from ntb")
tdSql.checkRows(10)
tdSql.query("select timetruncate(1,1h) from ntb")
tdSql.checkRows(10)
tdSql.query("select timetruncate(ts,1d) from ntb")
tdSql.checkRows(10)
tdSql.checkData(0,0,"2018-09-17 08:00:00.000")
tdSql.query("select timetruncate(ts,1h) from ntb")
tdSql.checkRows(10)
tdSql.checkData(0,0,"2018-09-17 09:00:00.000")
tdSql.query("select timetruncate(ts,1m) from ntb")
tdSql.checkRows(10)
tdSql.checkData(0,0,"2018-09-17 09:00:00.000")
tdSql.query("select timetruncate(ts,1s) from ntb")
tdSql.checkRows(10)
tdSql.checkData(0,0,"2018-09-17 09:00:00.000")
tdSql.query("select timetruncate(ts,1a) from ntb")
tdSql.checkRows(10)
tdSql.checkData(0,0,"2018-09-17 09:00:00.000")
tdSql.checkData(1,0,"2018-09-17 09:00:00.001")
tdSql.checkData(2,0,"2018-09-17 09:00:00.002")
tdSql.checkData(3,0,"2018-09-17 09:00:00.003")
tdSql.checkData(4,0,"2018-09-17 09:00:00.004")
tdSql.checkData(5,0,"2018-09-17 09:00:00.005")
tdSql.checkData(6,0,"2018-09-17 09:00:00.006")
tdSql.checkData(7,0,"2018-09-17 09:00:00.007")
tdSql.checkData(8,0,"2018-09-17 09:00:00.008")
tdSql.checkData(9,0,"2018-09-17 09:00:00.009")
# tdSql.query("select timetruncate(ts,1u) from ntb")
# tdSql.checkRows(10)
# tdSql.checkData(0,0,"2018-09-17 09:00:00.000000")
# tdSql.checkData(1,0,"2018-09-17 09:00:00.001000")
# tdSql.checkData(2,0,"2018-09-17 09:00:00.002000")
# tdSql.checkData(3,0,"2018-09-17 09:00:00.003000")
# tdSql.checkData(4,0,"2018-09-17 09:00:00.004000")
# tdSql.checkData(5,0,"2018-09-17 09:00:00.005000")
# tdSql.checkData(6,0,"2018-09-17 09:00:00.006000")
# tdSql.checkData(7,0,"2018-09-17 09:00:00.007000")
# tdSql.checkData(8,0,"2018-09-17 09:00:00.008000")
# tdSql.checkData(9,0,"2018-09-17 09:00:00.009000")
# tdSql.query("select timetruncate(ts,1b) from ntb")
# tdSql.checkRows(10)
# tdSql.checkData(0,0,"2018-09-17 09:00:00.000000000")
# tdSql.checkData(1,0,"2018-09-17 09:00:00.001000000")
# tdSql.checkData(2,0,"2018-09-17 09:00:00.002000000")
# tdSql.checkData(3,0,"2018-09-17 09:00:00.003000000")
# tdSql.checkData(4,0,"2018-09-17 09:00:00.004000000")
# tdSql.checkData(5,0,"2018-09-17 09:00:00.005000000")
# tdSql.checkData(6,0,"2018-09-17 09:00:00.006000000")
# tdSql.checkData(7,0,"2018-09-17 09:00:00.007000000")
# tdSql.checkData(8,0,"2018-09-17 09:00:00.008000000")
# tdSql.checkData(9,0,"2018-09-17 09:00:00.009000000")
tdSql.query("select timetruncate(1,1d) from stb")
tdSql.checkRows(10)
tdSql.error("select timetruncate(1,1u) from stb")
#tdSql.checkRows(10)
tdSql.query("select timetruncate(1,1a) from stb")
tdSql.checkRows(10)
tdSql.query("select timetruncate(1,1m) from stb")
tdSql.checkRows(10)
tdSql.query("select timetruncate(1,1h) from stb")
tdSql.checkRows(10)
tdSql.query("select timetruncate(ts,1d) from stb")
tdSql.checkRows(10)
tdSql.checkData(0,0,"2018-09-17 08:00:00.000")
tdSql.query("select timetruncate(ts,1h) from stb")
tdSql.checkRows(10)
tdSql.checkData(0,0,"2018-09-17 09:00:00.000")
tdSql.query("select timetruncate(ts,1m) from stb")
tdSql.checkRows(10)
tdSql.checkData(0,0,"2018-09-17 09:00:00.000")
tdSql.query("select timetruncate(ts,1s) from stb")
tdSql.checkRows(10)
tdSql.checkData(0,0,"2018-09-17 09:00:00.000")
tdSql.query("select timetruncate(ts,1a) from stb")
tdSql.checkRows(10)
tdSql.checkData(0,0,"2018-09-17 09:00:00.000")
tdSql.checkData(1,0,"2018-09-17 09:00:00.001")
tdSql.checkData(2,0,"2018-09-17 09:00:00.002")
tdSql.checkData(3,0,"2018-09-17 09:00:00.003")
tdSql.checkData(4,0,"2018-09-17 09:00:00.004")
tdSql.checkData(5,0,"2018-09-17 09:00:00.005")
tdSql.checkData(6,0,"2018-09-17 09:00:00.006")
tdSql.checkData(7,0,"2018-09-17 09:00:00.007")
tdSql.checkData(8,0,"2018-09-17 09:00:00.008")
tdSql.checkData(9,0,"2018-09-17 09:00:00.009")
# tdSql.query("select timetruncate(ts,1u) from stb")
# tdSql.checkRows(10)
# tdSql.checkData(0,0,"2018-09-17 09:00:00.000000")
# tdSql.checkData(1,0,"2018-09-17 09:00:00.001000")
# tdSql.checkData(2,0,"2018-09-17 09:00:00.002000")
# tdSql.checkData(3,0,"2018-09-17 09:00:00.003000")
# tdSql.checkData(4,0,"2018-09-17 09:00:00.004000")
# tdSql.checkData(5,0,"2018-09-17 09:00:00.005000")
# tdSql.checkData(6,0,"2018-09-17 09:00:00.006000")
# tdSql.checkData(7,0,"2018-09-17 09:00:00.007000")
# tdSql.checkData(8,0,"2018-09-17 09:00:00.008000")
# tdSql.checkData(9,0,"2018-09-17 09:00:00.009000")
# tdSql.query("select timetruncate(ts,1b) from stb")
# tdSql.checkRows(10)
# tdSql.checkData(0,0,"2018-09-17 09:00:00.000000000")
# tdSql.checkData(1,0,"2018-09-17 09:00:00.001000000")
# tdSql.checkData(2,0,"2018-09-17 09:00:00.002000000")
# tdSql.checkData(3,0,"2018-09-17 09:00:00.003000000")
# tdSql.checkData(4,0,"2018-09-17 09:00:00.004000000")
# tdSql.checkData(5,0,"2018-09-17 09:00:00.005000000")
# tdSql.checkData(6,0,"2018-09-17 09:00:00.006000000")
# tdSql.checkData(7,0,"2018-09-17 09:00:00.007000000")
# tdSql.checkData(8,0,"2018-09-17 09:00:00.008000000")
# tdSql.checkData(9,0,"2018-09-17 09:00:00.009000000")
tdSql.query("select timetruncate(1,1d) from stb_1")
tdSql.checkRows(10)
tdSql.error("select timetruncate(1,1u) from stb_1")
#tdSql.checkRows(10)
tdSql.query("select timetruncate(1,1a) from stb_1")
tdSql.checkRows(10)
tdSql.query("select timetruncate(1,1m) from stb_1")
tdSql.checkRows(10)
tdSql.query("select timetruncate(1,1h) from stb_1")
tdSql.checkRows(10)
tdSql.query("select timetruncate(ts,1d) from stb_1")
tdSql.checkRows(10)
tdSql.checkData(0,0,"2018-09-17 08:00:00.000")
tdSql.query("select timetruncate(ts,1h) from stb_1")
tdSql.checkRows(10)
tdSql.checkData(0,0,"2018-09-17 09:00:00.000")
tdSql.query("select timetruncate(ts,1m) from stb_1")
tdSql.checkRows(10)
tdSql.checkData(0,0,"2018-09-17 09:00:00.000")
tdSql.query("select timetruncate(ts,1s) from stb_1")
tdSql.checkRows(10)
tdSql.checkData(0,0,"2018-09-17 09:00:00.000")
tdSql.query("select timetruncate(ts,1a) from stb_1")
tdSql.checkRows(10)
tdSql.checkData(0,0,"2018-09-17 09:00:00.000")
tdSql.checkData(1,0,"2018-09-17 09:00:00.001")
tdSql.checkData(2,0,"2018-09-17 09:00:00.002")
tdSql.checkData(3,0,"2018-09-17 09:00:00.003")
tdSql.checkData(4,0,"2018-09-17 09:00:00.004")
tdSql.checkData(5,0,"2018-09-17 09:00:00.005")
tdSql.checkData(6,0,"2018-09-17 09:00:00.006")
tdSql.checkData(7,0,"2018-09-17 09:00:00.007")
tdSql.checkData(8,0,"2018-09-17 09:00:00.008")
tdSql.checkData(9,0,"2018-09-17 09:00:00.009")
# tdSql.query("select timetruncate(ts,1u) from stb_1")
# tdSql.checkRows(10)
# tdSql.checkData(0,0,"2018-09-17 09:00:00.000000")
# tdSql.checkData(1,0,"2018-09-17 09:00:00.001000")
# tdSql.checkData(2,0,"2018-09-17 09:00:00.002000")
# tdSql.checkData(3,0,"2018-09-17 09:00:00.003000")
# tdSql.checkData(4,0,"2018-09-17 09:00:00.004000")
# tdSql.checkData(5,0,"2018-09-17 09:00:00.005000")
# tdSql.checkData(6,0,"2018-09-17 09:00:00.006000")
# tdSql.checkData(7,0,"2018-09-17 09:00:00.007000")
# tdSql.checkData(8,0,"2018-09-17 09:00:00.008000")
# tdSql.checkData(9,0,"2018-09-17 09:00:00.009000")
# tdSql.query("select timetruncate(ts,1b) from stb_1")
# tdSql.checkRows(10)
# tdSql.checkData(0,0,"2018-09-17 09:00:00.000000000")
# tdSql.checkData(1,0,"2018-09-17 09:00:00.001000000")
# tdSql.checkData(2,0,"2018-09-17 09:00:00.002000000")
# tdSql.checkData(3,0,"2018-09-17 09:00:00.003000000")
# tdSql.checkData(4,0,"2018-09-17 09:00:00.004000000")
# tdSql.checkData(5,0,"2018-09-17 09:00:00.005000000")
# tdSql.checkData(6,0,"2018-09-17 09:00:00.006000000")
# tdSql.checkData(7,0,"2018-09-17 09:00:00.007000000")
# tdSql.checkData(8,0,"2018-09-17 09:00:00.008000000")
# tdSql.checkData(9,0,"2018-09-17 09:00:00.009000000")
def stop(self): def stop(self):
tdSql.close() tdSql.close()
tdLog.success("%s successfully executed" % __file__) tdLog.success("%s successfully executed" % __file__)

View File

@ -0,0 +1,264 @@
import taos
import sys
import time
import socket
import os
import threading
import math
from util.log import *
from util.sql import *
from util.cases import *
from util.dnodes import *
from util.common import *
sys.path.append("./7-tmq")
from tmqCommon import *
class TDTestCase:
def __init__(self):
self.vgroups = 1
self.ctbNum = 1
self.rowsPerTbl = 100000
def init(self, conn, logSql):
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor(), False)
def prepareTestEnv(self):
tdLog.printNoPrefix("======== prepare test env include database, stable, ctables, and insert data: ")
paraDict = {'dbName': 'dbt',
'dropFlag': 1,
'event': '',
'vgroups': 1,
'stbName': 'stb',
'colPrefix': 'c',
'tagPrefix': 't',
'colSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1},{'type': 'TIMESTAMP', 'count':1}],
'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}],
'ctbPrefix': 'ctb',
'ctbStartIdx': 0,
'ctbNum': 1,
'rowsPerTbl': 100000,
'batchNum': 10,
'startTs': 1640966400000, # 2022-01-01 00:00:00.000
'pollDelay': 3,
'showMsg': 1,
'showRow': 1,
'snapshot': 1}
paraDict['vgroups'] = self.vgroups
paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
tmqCom.initConsumerTable()
tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=paraDict["vgroups"],replica=1)
tdLog.info("create stb")
tmqCom.create_stable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"])
tdLog.info("create ctb")
tmqCom.create_ctable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"],ctbPrefix=paraDict['ctbPrefix'],
ctbNum=paraDict["ctbNum"],ctbStartIdx=paraDict['ctbStartIdx'])
tdLog.info("insert data")
tmqCom.insert_data_interlaceByMultiTbl(tsql=tdSql,dbName=paraDict["dbName"],ctbPrefix=paraDict["ctbPrefix"],
ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"],
startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx'])
tdLog.info("restart taosd to ensure that the data falls into the disk")
tdDnodes.stop(1)
tdDnodes.start(1)
return
def tmqCase1(self):
tdLog.printNoPrefix("======== test case 1: ")
paraDict = {'dbName': 'dbt',
'dropFlag': 1,
'event': '',
'vgroups': 1,
'stbName': 'stb',
'colPrefix': 'c',
'tagPrefix': 't',
'colSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1},{'type': 'TIMESTAMP', 'count':1}],
'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}],
'ctbPrefix': 'ctb',
'ctbStartIdx': 0,
'ctbNum': 1,
'rowsPerTbl': 10000,
'batchNum': 10,
'startTs': 1640966400000, # 2022-01-01 00:00:00.000
'pollDelay': 3,
'showMsg': 1,
'showRow': 1,
'snapshot': 1}
paraDict['vgroups'] = self.vgroups
paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
topicNameList = ['topic1']
expectRowsList = []
tmqCom.initConsumerTable()
tdLog.info("create topics from stb with filter")
queryString = "select ts, acos(c1), ceil(pow(c1,3)) from %s.%s where (sin(c2) >= 0) and (c1 %% 4 == 0) and (ts >= %d) and (t4 like 'shanghai')"%(paraDict['dbName'], paraDict['stbName'], paraDict["startTs"]+9379)
# sqlString = "create topic %s as stable %s" %(topicNameList[0], paraDict['stbName'])
sqlString = "create topic %s as %s" %(topicNameList[0], queryString)
tdLog.info("create topic sql: %s"%sqlString)
tdSql.execute(sqlString)
tdSql.query(queryString)
expectRowsList.append(tdSql.getRows())
# init consume info, and start tmq_sim, then check consume result
tdLog.info("insert consume info to consume processor")
consumerId = 0
expectrowcnt = paraDict["rowsPerTbl"] * paraDict["ctbNum"]
topicList = topicNameList[0]
ifcheckdata = 1
ifManualCommit = 1
keyList = 'group.id:cgrp1, enable.auto.commit:true, auto.commit.interval.ms:1000, auto.offset.reset:earliest'
tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
tdLog.info("start consume processor")
tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot'])
tdLog.info("wait the consume result")
expectRows = 1
resultList = tmqCom.selectConsumeResult(expectRows)
if expectRowsList[0] != resultList[0]:
tdLog.info("expect consume rows: %d, act consume rows: %d"%(expectRowsList[0], resultList[0]))
tdLog.exit("%d tmq consume rows error!"%consumerId)
tmqCom.checkFileContent(consumerId, queryString)
time.sleep(10)
for i in range(len(topicNameList)):
tdSql.query("drop topic %s"%topicNameList[i])
tdLog.printNoPrefix("======== test case 1 end ...... ")
def tmqCase2(self):
tdLog.printNoPrefix("======== test case 2: ")
paraDict = {'dbName': 'dbt',
'dropFlag': 1,
'event': '',
'vgroups': 1,
'stbName': 'stb',
'colPrefix': 'c',
'tagPrefix': 't',
'colSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1},{'type': 'TIMESTAMP', 'count':1}],
'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}],
'ctbPrefix': 'ctb',
'ctbStartIdx': 0,
'ctbNum': 1,
'rowsPerTbl': 10000,
'batchNum': 10,
'startTs': 1640966400000, # 2022-01-01 00:00:00.000
'pollDelay': 3,
'showMsg': 1,
'showRow': 1,
'snapshot': 1}
paraDict['vgroups'] = self.vgroups
paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
topicNameList = ['topic1']
expectRowsList = []
tmqCom.initConsumerTable()
tdLog.info("create topics from stb with filter")
queryString = "select ts, acos(c1), ceil(pow(c1,3)) from %s.%s where (sin(c2) >= 0) and (c1 %% 4 == 0) and (ts >= %d) and (t4 like 'shanghai')"%(paraDict['dbName'], paraDict['stbName'], paraDict["startTs"]+9379)
# queryString = "select * from %s.%s"%(paraDict['dbName'], paraDict['stbName'])
# sqlString = "create topic %s as stable %s" %(topicNameList[0], paraDict['stbName'])
sqlString = "create topic %s as %s" %(topicNameList[0], queryString)
tdLog.info("create topic sql: %s"%sqlString)
tdSql.execute(sqlString)
tdSql.query(queryString)
expectRowsList.append(tdSql.getRows())
totalRowsInserted = expectRowsList[0]
# init consume info, and start tmq_sim, then check consume result
tdLog.info("insert consume info to consume processor")
consumerId = 1
expectrowcnt = math.ceil(totalRowsInserted/3)
topicList = topicNameList[0]
ifcheckdata = 1
ifManualCommit = 1
keyList = 'group.id:cgrp1, enable.auto.commit:true, auto.commit.interval.ms:1000, auto.offset.reset:earliest'
tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
tdLog.info("start consume processor 0")
tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot'])
tdLog.info("wait the consume result")
expectRows = 1
resultList = tmqCom.selectConsumeResult(expectRows)
firstConsumeRows = resultList[0]
if not (expectrowcnt <= resultList[0] and totalRowsInserted >= resultList[0]):
tdLog.info("act consume rows: %d, expect consume rows between %d and %d"%(resultList[0], expectrowcnt, totalRowsInserted))
tdLog.exit("%d tmq consume rows error!"%consumerId)
# reinit consume info, and start tmq_sim, then check consume result
tmqCom.initConsumerTable()
consumerId = 2
expectrowcnt = math.ceil(totalRowsInserted/3)
tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
tdLog.info("start consume processor 1")
tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot'])
tdLog.info("wait the consume result")
expectRows = 1
resultList = tmqCom.selectConsumeResult(expectRows)
secondConsumeRows = resultList[0]
if not (expectrowcnt <= resultList[0] and totalRowsInserted >= resultList[0]):
tdLog.info("act consume rows: %d, expect consume rows between %d and %d"%(resultList[0], expectrowcnt, totalRowsInserted))
tdLog.exit("%d tmq consume rows error!"%consumerId)
# reinit consume info, and start tmq_sim, then check consume result
tmqCom.initConsumerTable()
consumerId = 3
expectrowcnt = math.ceil(totalRowsInserted/3)
tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
tdLog.info("start consume processor 1")
tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot'])
tdLog.info("wait the consume result")
expectRows = 1
resultList = tmqCom.selectConsumeResult(expectRows)
thirdConsumeRows = resultList[0]
if not (totalRowsInserted >= resultList[0]):
tdLog.info("act consume rows: %d, expect consume rows between %d and %d"%(resultList[0], expectrowcnt, totalRowsInserted))
tdLog.exit("%d tmq consume rows error!"%consumerId)
# total consume
actConsumeTotalRows = firstConsumeRows + secondConsumeRows + thirdConsumeRows
if not (totalRowsInserted == actConsumeTotalRows):
tdLog.info("sum of two consume rows: %d should be equal to total inserted rows: %d"%(actConsumeTotalRows, totalRowsInserted))
tdLog.exit("%d tmq consume rows error!"%consumerId)
time.sleep(10)
for i in range(len(topicNameList)):
tdSql.query("drop topic %s"%topicNameList[i])
tdLog.printNoPrefix("======== test case 2 end ...... ")
def run(self):
tdSql.prepare()
self.prepareTestEnv()
self.tmqCase1()
self.tmqCase2()
def stop(self):
tdSql.close()
tdLog.success(f"{__file__} successfully executed")
event = threading.Event()
tdCases.addLinux(__file__, TDTestCase())
tdCases.addWindows(__file__, TDTestCase())

View File

@ -0,0 +1,245 @@
import taos
import sys
import time
import socket
import os
import threading
import math
from util.log import *
from util.sql import *
from util.cases import *
from util.dnodes import *
from util.common import *
sys.path.append("./7-tmq")
from tmqCommon import *
class TDTestCase:
def __init__(self):
self.vgroups = 4
self.ctbNum = 3000
self.rowsPerTbl = 150
def init(self, conn, logSql):
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor(), False)
def prepareTestEnv(self):
tdLog.printNoPrefix("======== prepare test env include database, stable, ctables, and insert data: ")
paraDict = {'dbName': 'dbt',
'dropFlag': 1,
'event': '',
'vgroups': 1,
'stbName': 'stb',
'colPrefix': 'c',
'tagPrefix': 't',
'colSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1},{'type': 'TIMESTAMP', 'count':1}],
'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}],
'ctbPrefix': 'ctb',
'ctbStartIdx': 0,
'ctbNum': 10,
'rowsPerTbl': 10000,
'batchNum': 200,
'startTs': 1640966400000, # 2022-01-01 00:00:00.000
'pollDelay': 3,
'showMsg': 1,
'showRow': 1,
'snapshot': 1}
paraDict['vgroups'] = self.vgroups
paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
tmqCom.initConsumerTable()
tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=paraDict["vgroups"],replica=1)
tdLog.info("create stb")
tmqCom.create_stable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"])
tdLog.info("create ctb")
tmqCom.create_ctable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"],ctbPrefix=paraDict['ctbPrefix'],
ctbNum=paraDict["ctbNum"],ctbStartIdx=paraDict['ctbStartIdx'])
tdLog.info("insert data")
tmqCom.insert_data_interlaceByMultiTbl(tsql=tdSql,dbName=paraDict["dbName"],ctbPrefix=paraDict["ctbPrefix"],
ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"],
startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx'])
tdLog.info("restart taosd to ensure that the data falls into the disk")
tdDnodes.stop(1)
# tdDnodes.start(1)
tdDnodes.starttaosd(1)
return
def tmqCase1(self):
tdLog.printNoPrefix("======== test case 1: ")
paraDict = {'dbName': 'dbt',
'dropFlag': 1,
'event': '',
'vgroups': 1,
'stbName': 'stb',
'colPrefix': 'c',
'tagPrefix': 't',
'colSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1},{'type': 'TIMESTAMP', 'count':1}],
'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}],
'ctbPrefix': 'ctb',
'ctbStartIdx': 0,
'ctbNum': 10,
'rowsPerTbl': 10000,
'batchNum': 10,
'startTs': 1640966400000, # 2022-01-01 00:00:00.000
'pollDelay': 3,
'showMsg': 1,
'showRow': 1,
'snapshot': 1}
paraDict['vgroups'] = self.vgroups
paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
topicNameList = ['topic1']
expectRowsList = []
tmqCom.initConsumerTable()
tdLog.info("create topics from stb with filter")
queryString = "select ts, acos(c1), ceil(pow(c1,3)) from %s.%s where (sin(c2) >= 0) and (c1 %% 4 == 0) and (ts >= %d) and (t4 like 'shanghai')"%(paraDict['dbName'], paraDict['stbName'], paraDict["startTs"]+math.ceil(self.rowsPerTbl/5))
# queryString = "select * from %s.%s"%(paraDict['dbName'], paraDict['stbName'])
# sqlString = "create topic %s as stable %s" %(topicNameList[0], paraDict['stbName'])
sqlString = "create topic %s as %s" %(topicNameList[0], queryString)
tdLog.info("create topic sql: %s"%sqlString)
tdSql.execute(sqlString)
tdSql.query(queryString)
expectRowsList.append(tdSql.getRows())
# init consume info, and start tmq_sim, then check consume result
tdLog.info("insert consume info to consume processor")
consumerId = 0
expectrowcnt = paraDict["rowsPerTbl"] * paraDict["ctbNum"]
topicList = topicNameList[0]
ifcheckdata = 1
ifManualCommit = 1
keyList = 'group.id:cgrp1, enable.auto.commit:true, auto.commit.interval.ms:1000, auto.offset.reset:earliest'
tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
tdLog.info("start consume processor")
tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot'])
tdLog.info("wait the consume result")
expectRows = 1
resultList = tmqCom.selectConsumeResult(expectRows)
if expectRowsList[0] != resultList[0]:
tdLog.info("expect consume rows: %d, act consume rows: %d"%(expectRowsList[0], resultList[0]))
tdLog.exit("%d tmq consume rows error!"%consumerId)
# tmqCom.checkFileContent(consumerId, queryString)
time.sleep(10)
for i in range(len(topicNameList)):
tdSql.query("drop topic %s"%topicNameList[i])
tdLog.printNoPrefix("======== test case 1 end ...... ")
def tmqCase2(self):
tdLog.printNoPrefix("======== test case 2: ")
paraDict = {'dbName': 'dbt',
'dropFlag': 1,
'event': '',
'vgroups': 1,
'stbName': 'stb',
'colPrefix': 'c',
'tagPrefix': 't',
'colSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1},{'type': 'TIMESTAMP', 'count':1}],
'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}],
'ctbPrefix': 'ctb',
'ctbStartIdx': 0,
'ctbNum': 20,
'rowsPerTbl': 10000,
'batchNum': 10,
'startTs': 1640966400000, # 2022-01-01 00:00:00.000
'pollDelay': 10,
'showMsg': 1,
'showRow': 1,
'snapshot': 1}
paraDict['vgroups'] = self.vgroups
paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
topicNameList = ['topic1']
expectRowsList = []
tmqCom.initConsumerTable()
tdLog.info("create topics from stb with filter")
queryString = "select ts, acos(c1), ceil(pow(c1,3)) from %s.%s where (sin(c2) >= 0) and (c1 %% 4 == 0) and (ts >= %d) and (t4 like 'shanghai')"%(paraDict['dbName'], paraDict['stbName'], paraDict["startTs"]+math.ceil(self.rowsPerTbl/5))
# queryString = "select * from %s.%s"%(paraDict['dbName'], paraDict['stbName'])
# sqlString = "create topic %s as stable %s" %(topicNameList[0], paraDict['stbName'])
sqlString = "create topic %s as %s" %(topicNameList[0], queryString)
tdLog.info("create topic sql: %s"%sqlString)
tdSql.execute(sqlString)
tdSql.query(queryString)
expectRowsList.append(tdSql.getRows())
totalRowsInserted = expectRowsList[0]
tdLog.info("select result rows: %d"%totalRowsInserted)
# init consume info, and start tmq_sim, then check consume result
tdLog.info("insert consume info to consume processor")
consumerId = 1
expectrowcnt = math.ceil(totalRowsInserted/3)
topicList = topicNameList[0]
ifcheckdata = 1
ifManualCommit = 1
keyList = 'group.id:cgrp1, enable.auto.commit:true, auto.commit.interval.ms:1000, auto.offset.reset:earliest'
tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
tdLog.info("start consume processor 0")
tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot'])
tdLog.info("wait the consume result")
expectRows = 1
resultList = tmqCom.selectConsumeResult(expectRows)
if not (expectrowcnt <= resultList[0] and totalRowsInserted >= resultList[0]):
tdLog.info("act consume rows: %d, expect consume rows between %d and %d"%(resultList[0], expectrowcnt, totalRowsInserted))
tdLog.exit("%d tmq consume rows error!"%consumerId)
firstConsumeRows = resultList[0]
# reinit consume info, and start tmq_sim, then check consume result
tmqCom.initConsumerTable()
consumerId = 2
expectrowcnt = math.ceil(totalRowsInserted*2/3)
tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
tdLog.info("start consume processor 1")
tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot'])
tdLog.info("wait the consume result")
expectRows = 1
resultList = tmqCom.selectConsumeResult(expectRows)
actConsumeTotalRows = firstConsumeRows + resultList[0]
if not (expectrowcnt >= resultList[0] and totalRowsInserted == actConsumeTotalRows):
tdLog.info("act consume rows, first: %d, second: %d "%(firstConsumeRows, resultList[0]))
tdLog.info("and sum of two consume rows: %d should be equal to total inserted rows: %d"%(actConsumeTotalRows, totalRowsInserted))
tdLog.exit("%d tmq consume rows error!"%consumerId)
time.sleep(10)
for i in range(len(topicNameList)):
tdSql.query("drop topic %s"%topicNameList[i])
tdLog.printNoPrefix("======== test case 2 end ...... ")
def run(self):
tdSql.prepare()
self.prepareTestEnv()
self.tmqCase1()
self.tmqCase2()
def stop(self):
tdSql.close()
tdLog.success(f"{__file__} successfully executed")
event = threading.Event()
tdCases.addLinux(__file__, TDTestCase())
tdCases.addWindows(__file__, TDTestCase())

View File

@ -0,0 +1,242 @@
import taos
import sys
import time
import socket
import os
import threading
import math
from util.log import *
from util.sql import *
from util.cases import *
from util.dnodes import *
from util.common import *
sys.path.append("./7-tmq")
from tmqCommon import *
class TDTestCase:
def __init__(self):
self.vgroups = 4
self.ctbNum = 3000
self.rowsPerTbl = 70
def init(self, conn, logSql):
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor(), False)
def prepareTestEnv(self):
tdLog.printNoPrefix("======== prepare test env include database, stable, ctables, and insert data: ")
paraDict = {'dbName': 'dbt',
'dropFlag': 1,
'event': '',
'vgroups': 1,
'stbName': 'stb',
'colPrefix': 'c',
'tagPrefix': 't',
'colSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1},{'type': 'TIMESTAMP', 'count':1}],
'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}],
'ctbPrefix': 'ctb',
'ctbStartIdx': 0,
'ctbNum': 10,
'rowsPerTbl': 10000,
'batchNum': 100,
'startTs': 1640966400000, # 2022-01-01 00:00:00.000
'pollDelay': 3,
'showMsg': 1,
'showRow': 1,
'snapshot': 1}
paraDict['vgroups'] = self.vgroups
paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
tmqCom.initConsumerTable()
tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=paraDict["vgroups"],replica=1)
tdLog.info("create stb")
tmqCom.create_stable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"])
tdLog.info("create ctb")
tmqCom.create_ctable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"],ctbPrefix=paraDict['ctbPrefix'],
ctbNum=paraDict["ctbNum"],ctbStartIdx=paraDict['ctbStartIdx'])
tdLog.info("insert data")
tmqCom.insert_data_interlaceByMultiTbl(tsql=tdSql,dbName=paraDict["dbName"],ctbPrefix=paraDict["ctbPrefix"],
ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"],
startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx'])
tdLog.info("restart taosd to ensure that the data falls into the disk")
tdDnodes.stop(1)
# tdDnodes.start(1)
tdDnodes.starttaosd(1)
return
def tmqCase1(self):
tdLog.printNoPrefix("======== test case 1: ")
paraDict = {'dbName': 'dbt',
'dropFlag': 1,
'event': '',
'vgroups': 1,
'stbName': 'stb',
'colPrefix': 'c',
'tagPrefix': 't',
'colSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1},{'type': 'TIMESTAMP', 'count':1}],
'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}],
'ctbPrefix': 'ctb',
'ctbStartIdx': 0,
'ctbNum': 10,
'rowsPerTbl': 10000,
'batchNum': 10,
'startTs': 1640966400000, # 2022-01-01 00:00:00.000
'pollDelay': 3,
'showMsg': 1,
'showRow': 1,
'snapshot': 1}
paraDict['vgroups'] = self.vgroups
paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
topicNameList = ['topic1']
expectRowsList = []
tmqCom.initConsumerTable()
tdLog.info("create topics from stb with filter")
queryString = "select * from %s.%s"%(paraDict['dbName'], paraDict['stbName'])
# sqlString = "create topic %s as stable %s" %(topicNameList[0], paraDict['stbName'])
sqlString = "create topic %s as %s" %(topicNameList[0], queryString)
tdLog.info("create topic sql: %s"%sqlString)
tdSql.execute(sqlString)
tdSql.query(queryString)
expectRowsList.append(tdSql.getRows())
# init consume info, and start tmq_sim, then check consume result
tdLog.info("insert consume info to consume processor")
consumerId = 0
expectrowcnt = paraDict["rowsPerTbl"] * paraDict["ctbNum"]
topicList = topicNameList[0]
ifcheckdata = 1
ifManualCommit = 1
keyList = 'group.id:cgrp1, enable.auto.commit:true, auto.commit.interval.ms:1000, auto.offset.reset:earliest'
tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
tdLog.info("start consume processor")
tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot'])
tdLog.info("wait the consume result")
expectRows = 1
resultList = tmqCom.selectConsumeResult(expectRows)
if expectRowsList[0] != resultList[0]:
tdLog.info("expect consume rows: %d, act consume rows: %d"%(expectRowsList[0], resultList[0]))
tdLog.exit("%d tmq consume rows error!"%consumerId)
# tmqCom.checkFileContent(consumerId, queryString)
time.sleep(10)
for i in range(len(topicNameList)):
tdSql.query("drop topic %s"%topicNameList[i])
tdLog.printNoPrefix("======== test case 1 end ...... ")
def tmqCase2(self):
tdLog.printNoPrefix("======== test case 2: ")
paraDict = {'dbName': 'dbt',
'dropFlag': 1,
'event': '',
'vgroups': 1,
'stbName': 'stb',
'colPrefix': 'c',
'tagPrefix': 't',
'colSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1},{'type': 'TIMESTAMP', 'count':1}],
'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}],
'ctbPrefix': 'ctb',
'ctbStartIdx': 0,
'ctbNum': 10,
'rowsPerTbl': 10000,
'batchNum': 10,
'startTs': 1640966400000, # 2022-01-01 00:00:00.000
'pollDelay': 3,
'showMsg': 1,
'showRow': 1,
'snapshot': 1}
paraDict['vgroups'] = self.vgroups
paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
topicNameList = ['topic1']
expectRowsList = []
tmqCom.initConsumerTable()
tdLog.info("create topics from stb with filter")
queryString = "select * from %s.%s"%(paraDict['dbName'], paraDict['stbName'])
# sqlString = "create topic %s as stable %s" %(topicNameList[0], paraDict['stbName'])
sqlString = "create topic %s as %s" %(topicNameList[0], queryString)
tdLog.info("create topic sql: %s"%sqlString)
tdSql.execute(sqlString)
tdSql.query(queryString)
expectRowsList.append(tdSql.getRows())
totalRowsInserted = expectRowsList[0]
# init consume info, and start tmq_sim, then check consume result
tdLog.info("insert consume info to consume processor")
consumerId = 1
expectrowcnt = math.ceil(paraDict["rowsPerTbl"] * paraDict["ctbNum"] / 3)
topicList = topicNameList[0]
ifcheckdata = 1
ifManualCommit = 1
keyList = 'group.id:cgrp1, enable.auto.commit:true, auto.commit.interval.ms:1000, auto.offset.reset:earliest'
tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
tdLog.info("start consume processor 0")
tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot'])
tdLog.info("wait the consume result")
expectRows = 1
resultList = tmqCom.selectConsumeResult(expectRows)
if not (expectrowcnt <= resultList[0] and totalRowsInserted >= resultList[0]):
tdLog.info("act consume rows: %d, expect consume rows between %d and %d"%(resultList[0], expectrowcnt, totalRowsInserted))
tdLog.exit("%d tmq consume rows error!"%consumerId)
firstConsumeRows = resultList[0]
# reinit consume info, and start tmq_sim, then check consume result
tmqCom.initConsumerTable()
consumerId = 2
expectrowcnt = math.ceil(paraDict["rowsPerTbl"] * paraDict["ctbNum"] * 2/3)
tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
tdLog.info("start consume processor 1")
tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot'])
tdLog.info("wait the consume result")
expectRows = 1
resultList = tmqCom.selectConsumeResult(expectRows)
actConsumeTotalRows = firstConsumeRows + resultList[0]
if not (expectrowcnt >= resultList[0] and totalRowsInserted == actConsumeTotalRows):
tdLog.info("act consume rows, first: %d, second: %d "%(firstConsumeRows, resultList[0]))
tdLog.info("and sum of two consume rows: %d should be equal to total inserted rows: %d"%(actConsumeTotalRows, totalRowsInserted))
tdLog.exit("%d tmq consume rows error!"%consumerId)
time.sleep(10)
for i in range(len(topicNameList)):
tdSql.query("drop topic %s"%topicNameList[i])
tdLog.printNoPrefix("======== test case 2 end ...... ")
def run(self):
tdSql.prepare()
self.prepareTestEnv()
self.tmqCase1()
self.tmqCase2()
def stop(self):
tdSql.close()
tdLog.success(f"{__file__} successfully executed")
event = threading.Event()
tdCases.addLinux(__file__, TDTestCase())
tdCases.addWindows(__file__, TDTestCase())

View File

@ -0,0 +1,243 @@
import taos
import sys
import time
import socket
import os
import threading
import math
from util.log import *
from util.sql import *
from util.cases import *
from util.dnodes import *
from util.common import *
sys.path.append("./7-tmq")
from tmqCommon import *
class TDTestCase:
def __init__(self):
self.vgroups = 1
self.ctbNum = 1
self.rowsPerTbl = 1000000
def init(self, conn, logSql):
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor(), False)
def prepareTestEnv(self):
tdLog.printNoPrefix("======== prepare test env include database, stable, ctables, and insert data: ")
paraDict = {'dbName': 'dbt',
'dropFlag': 1,
'event': '',
'vgroups': 1,
'stbName': 'stb',
'colPrefix': 'c',
'tagPrefix': 't',
'colSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1},{'type': 'TIMESTAMP', 'count':1}],
'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}],
'ctbPrefix': 'ctb',
'ctbStartIdx': 0,
'ctbNum': 10,
'rowsPerTbl': 10000,
'batchNum': 1000,
'startTs': 1640966400000, # 2022-01-01 00:00:00.000
'pollDelay': 3,
'showMsg': 1,
'showRow': 1,
'snapshot': 1}
paraDict['vgroups'] = self.vgroups
paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
tmqCom.initConsumerTable()
tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=paraDict["vgroups"],replica=1)
tdLog.info("create stb")
tmqCom.create_stable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"])
tdLog.info("create ctb")
tmqCom.create_ctable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"],ctbPrefix=paraDict['ctbPrefix'],
ctbNum=paraDict["ctbNum"],ctbStartIdx=paraDict['ctbStartIdx'])
tdLog.info("insert data")
tmqCom.insert_data_interlaceByMultiTbl(tsql=tdSql,dbName=paraDict["dbName"],ctbPrefix=paraDict["ctbPrefix"],
ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"],
startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx'])
tdLog.info("restart taosd to ensure that the data falls into the disk")
tdDnodes.stop(1)
tdDnodes.start(1)
return
def tmqCase3(self):
tdLog.printNoPrefix("======== test case 3: ")
paraDict = {'dbName': 'dbt',
'dropFlag': 1,
'event': '',
'vgroups': 1,
'stbName': 'stb',
'colPrefix': 'c',
'tagPrefix': 't',
'colSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1},{'type': 'TIMESTAMP', 'count':1}],
'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}],
'ctbPrefix': 'ctb',
'ctbStartIdx': 0,
'ctbNum': 1,
'rowsPerTbl': 10000,
'batchNum': 10,
'startTs': 1640966400000, # 2022-01-01 00:00:00.000
'pollDelay': 15,
'showMsg': 1,
'showRow': 1,
'snapshot': 1}
paraDict['vgroups'] = self.vgroups
paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
topicNameList = ['topic1']
expectRowsList = []
tmqCom.initConsumerTable()
tdLog.info("create topics from stb with filter")
# queryString = "select * from %s.%s"%(paraDict['dbName'], paraDict['stbName'])
queryString = "select ts, acos(c1), ceil(pow(c1,3)) from %s.%s where (sin(c2) >= 0) and (c1 %% 4 == 0) and (ts >= %d) and (t4 like 'shanghai')"%(paraDict['dbName'], paraDict['stbName'], paraDict["startTs"]+9379)
# sqlString = "create topic %s as stable %s" %(topicNameList[0], paraDict['stbName'])
sqlString = "create topic %s as %s" %(topicNameList[0], queryString)
tdLog.info("create topic sql: %s"%sqlString)
tdSql.execute(sqlString)
tdSql.query(queryString)
expectRowsList.append(tdSql.getRows())
totalRowsInserted = expectRowsList[0]
# init consume info, and start tmq_sim, then check consume result
tdLog.info("insert consume info to consume processor")
consumerId = 3
expectrowcnt = math.ceil(totalRowsInserted/3)
topicList = topicNameList[0]
ifcheckdata = 1
ifManualCommit = 1
keyList = 'group.id:cgrp1, enable.auto.commit:true, auto.commit.interval.ms:1000, auto.offset.reset:earliest'
tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
consumerId = 4
expectrowcnt = math.ceil(totalRowsInserted * 2/3)
tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
tdLog.info("start consume processor 0")
tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot'])
tdLog.info("wait the consume result")
expectRows = 2
resultList = tmqCom.selectConsumeResult(expectRows)
actConsumeTotalRows = resultList[0] + resultList[1]
if not (totalRowsInserted == actConsumeTotalRows):
tdLog.info("sum of two consume rows: %d should be equal to total inserted rows: %d"%(actConsumeTotalRows, totalRowsInserted))
tdLog.exit("%d tmq consume rows error!"%consumerId)
time.sleep(10)
for i in range(len(topicNameList)):
tdSql.query("drop topic %s"%topicNameList[i])
tdLog.printNoPrefix("======== test case 3 end ...... ")
def tmqCase4(self):
tdLog.printNoPrefix("======== test case 4: ")
paraDict = {'dbName': 'dbt',
'dropFlag': 1,
'event': '',
'vgroups': 1,
'stbName': 'stb',
'colPrefix': 'c',
'tagPrefix': 't',
'colSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1},{'type': 'TIMESTAMP', 'count':1}],
'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}],
'ctbPrefix': 'ctb',
'ctbStartIdx': 0,
'ctbNum': 1,
'rowsPerTbl': 10000,
'batchNum': 10,
'startTs': 1640966400000, # 2022-01-01 00:00:00.000
'pollDelay': 25,
'showMsg': 1,
'showRow': 1,
'snapshot': 1}
paraDict['vgroups'] = self.vgroups
paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
topicNameList = ['topic1']
expectRowsList = []
tmqCom.initConsumerTable()
tdLog.info("create topics from stb with filter")
queryString = "select ts, acos(c1), ceil(pow(c1,3)) from %s.%s where (sin(c2) >= 0) and (c1 %% 4 == 0) and (ts >= %d) and (t4 like 'shanghai')"%(paraDict['dbName'], paraDict['stbName'], paraDict["startTs"]+9379)
# queryString = "select * from %s.%s"%(paraDict['dbName'], paraDict['stbName'])
# sqlString = "create topic %s as stable %s" %(topicNameList[0], paraDict['stbName'])
sqlString = "create topic %s as %s" %(topicNameList[0], queryString)
tdLog.info("create topic sql: %s"%sqlString)
tdSql.execute(sqlString)
tdSql.query(queryString)
expectRowsList.append(tdSql.getRows())
totalRowsInserted = expectRowsList[0]
# init consume info, and start tmq_sim, then check consume result
tdLog.info("insert consume info to consume processor")
consumerId = 5
expectrowcnt = math.ceil(totalRowsInserted)
topicList = topicNameList[0]
ifcheckdata = 1
ifManualCommit = 1
keyList = 'group.id:cgrp1, enable.auto.commit:true, auto.commit.interval.ms:500, auto.offset.reset:earliest'
tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
tdLog.info("start consume processor 0")
tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot'])
tdLog.info("wait commit notify")
tmqCom.getStartCommitNotifyFromTmqsim()
tdLog.info("pkill consume processor")
tdCom.killProcessor("tmq_sim")
# time.sleep(10)
# reinit consume info, and start tmq_sim, then check consume result
tmqCom.initConsumerTable()
consumerId = 6
tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
tdLog.info("start consume processor 1")
tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot'])
tdLog.info("wait the consume result")
expectRows = 1
resultList = tmqCom.selectConsumeResult(expectRows)
actConsumeTotalRows = resultList[0]
if not (actConsumeTotalRows > 0 and actConsumeTotalRows < totalRowsInserted):
tdLog.info("act consume rows: %d"%(actConsumeTotalRows))
tdLog.info("and second consume rows should be between 0 and %d"%(totalRowsInserted))
tdLog.exit("%d tmq consume rows error!"%consumerId)
time.sleep(10)
for i in range(len(topicNameList)):
tdSql.query("drop topic %s"%topicNameList[i])
tdLog.printNoPrefix("======== test case 4 end ...... ")
def run(self):
tdSql.prepare()
self.prepareTestEnv()
self.tmqCase3()
self.tmqCase4()
def stop(self):
tdSql.close()
tdLog.success(f"{__file__} successfully executed")
event = threading.Event()
tdCases.addLinux(__file__, TDTestCase())
tdCases.addWindows(__file__, TDTestCase())

View File

@ -0,0 +1,247 @@
import taos
import sys
import time
import socket
import os
import threading
import math
from util.log import *
from util.sql import *
from util.cases import *
from util.dnodes import *
from util.common import *
sys.path.append("./7-tmq")
from tmqCommon import *
class TDTestCase:
def __init__(self):
self.vgroups = 4
self.ctbNum = 4000
self.rowsPerTbl = 150
def init(self, conn, logSql):
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor(), False)
def prepareTestEnv(self):
tdLog.printNoPrefix("======== prepare test env include database, stable, ctables, and insert data: ")
paraDict = {'dbName': 'dbt',
'dropFlag': 1,
'event': '',
'vgroups': 1,
'stbName': 'stb',
'colPrefix': 'c',
'tagPrefix': 't',
'colSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1},{'type': 'TIMESTAMP', 'count':1}],
'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}],
'ctbPrefix': 'ctb',
'ctbStartIdx': 0,
'ctbNum': 10,
'rowsPerTbl': 10000,
'batchNum': 200,
'startTs': 1640966400000, # 2022-01-01 00:00:00.000
'pollDelay': 3,
'showMsg': 1,
'showRow': 1,
'snapshot': 1}
paraDict['vgroups'] = self.vgroups
paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
tmqCom.initConsumerTable()
tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=paraDict["vgroups"],replica=1)
tdLog.info("create stb")
tmqCom.create_stable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"])
tdLog.info("create ctb")
tmqCom.create_ctable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"],ctbPrefix=paraDict['ctbPrefix'],
ctbNum=paraDict["ctbNum"],ctbStartIdx=paraDict['ctbStartIdx'])
tdLog.info("insert data")
tmqCom.insert_data_interlaceByMultiTbl(tsql=tdSql,dbName=paraDict["dbName"],ctbPrefix=paraDict["ctbPrefix"],
ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"],
startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx'])
tdLog.info("restart taosd to ensure that the data falls into the disk")
tdDnodes.stop(1)
tdDnodes.start(1)
return
def tmqCase3(self):
tdLog.printNoPrefix("======== test case 3: ")
paraDict = {'dbName': 'dbt',
'dropFlag': 1,
'event': '',
'vgroups': 1,
'stbName': 'stb',
'colPrefix': 'c',
'tagPrefix': 't',
'colSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1},{'type': 'TIMESTAMP', 'count':1}],
'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}],
'ctbPrefix': 'ctb',
'ctbStartIdx': 0,
'ctbNum': 10,
'rowsPerTbl': 10000,
'batchNum': 10,
'startTs': 1640966400000, # 2022-01-01 00:00:00.000
'pollDelay': 10,
'showMsg': 1,
'showRow': 1,
'snapshot': 1}
paraDict['vgroups'] = self.vgroups
paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
topicNameList = ['topic1']
expectRowsList = []
tmqCom.initConsumerTable()
tdLog.info("create topics from stb with filter")
queryString = "select ts, acos(c1), ceil(pow(c1,3)) from %s.%s where (sin(c2) >= 0) and (c1 %% 4 == 0) and (ts >= %d) and (t4 like 'shanghai')"%(paraDict['dbName'], paraDict['stbName'], paraDict["startTs"]+math.ceil(self.rowsPerTbl/5))
# queryString = "select * from %s.%s"%(paraDict['dbName'], paraDict['stbName'])
# sqlString = "create topic %s as stable %s" %(topicNameList[0], paraDict['stbName'])
sqlString = "create topic %s as %s" %(topicNameList[0], queryString)
tdLog.info("create topic sql: %s"%sqlString)
tdSql.execute(sqlString)
tdSql.query(queryString)
expectRowsList.append(tdSql.getRows())
totalRowsInserted = expectRowsList[0]
tdLog.info("select result rows: %d"%totalRowsInserted)
# init consume info, and start tmq_sim, then check consume result
tdLog.info("insert consume info to consume processor")
consumerId = 3
expectrowcnt = math.ceil(totalRowsInserted/3)
topicList = topicNameList[0]
ifcheckdata = 1
ifManualCommit = 1
keyList = 'group.id:cgrp1, enable.auto.commit:true, auto.commit.interval.ms:1000, auto.offset.reset:earliest'
tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
consumerId = 4
expectrowcnt = math.ceil(totalRowsInserted*2/3)
tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
tdLog.info("start consume processor 0")
tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot'])
tdLog.info("wait the consume result")
expectRows = 2
resultList = tmqCom.selectConsumeResult(expectRows)
actConsumeTotalRows = resultList[0] + resultList[1]
if not (totalRowsInserted == actConsumeTotalRows):
tdLog.info("sum of two consume rows: %d should be equal to total inserted rows: %d"%(actConsumeTotalRows, totalRowsInserted))
tdLog.exit("%d tmq consume rows error!"%consumerId)
time.sleep(10)
for i in range(len(topicNameList)):
tdSql.query("drop topic %s"%topicNameList[i])
tdLog.printNoPrefix("======== test case 3 end ...... ")
def tmqCase4(self):
tdLog.printNoPrefix("======== test case 4: ")
paraDict = {'dbName': 'dbt',
'dropFlag': 1,
'event': '',
'vgroups': 1,
'stbName': 'stb',
'colPrefix': 'c',
'tagPrefix': 't',
'colSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1},{'type': 'TIMESTAMP', 'count':1}],
'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}],
'ctbPrefix': 'ctb',
'ctbStartIdx': 0,
'ctbNum': 10,
'rowsPerTbl': 10000,
'batchNum': 10,
'startTs': 1640966400000, # 2022-01-01 00:00:00.000
'pollDelay': 10,
'showMsg': 1,
'showRow': 1,
'snapshot': 1}
paraDict['vgroups'] = self.vgroups
paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
topicNameList = ['topic1']
expectRowsList = []
tmqCom.initConsumerTable()
tdLog.info("create topics from stb with filter")
queryString = "select ts, acos(c1), ceil(pow(c1,3)) from %s.%s where (sin(c2) >= 0) and (c1 %% 4 == 0) and (ts >= %d) and (t4 like 'shanghai')"%(paraDict['dbName'], paraDict['stbName'], paraDict["startTs"]+math.ceil(self.rowsPerTbl/5))
# queryString = "select * from %s.%s"%(paraDict['dbName'], paraDict['stbName'])
# sqlString = "create topic %s as stable %s" %(topicNameList[0], paraDict['stbName'])
sqlString = "create topic %s as %s" %(topicNameList[0], queryString)
tdLog.info("create topic sql: %s"%sqlString)
tdSql.execute(sqlString)
tdSql.query(queryString)
expectRowsList.append(tdSql.getRows())
totalRowsInserted = expectRowsList[0]
tdLog.info("select result rows: %d"%totalRowsInserted)
# init consume info, and start tmq_sim, then check consume result
tdLog.info("insert consume info to consume processor")
consumerId = 5
expectrowcnt = math.ceil(totalRowsInserted)
topicList = topicNameList[0]
ifcheckdata = 1
ifManualCommit = 1
keyList = 'group.id:cgrp1, enable.auto.commit:true, auto.commit.interval.ms:300, auto.offset.reset:earliest'
tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
tdLog.info("start consume processor 0")
tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot'])
tdLog.info("wait commit notify")
tmqCom.getStartCommitNotifyFromTmqsim()
# tdLog.info("wait start consume notify")
# tmqCom.getStartConsumeNotifyFromTmqsim()
tdLog.info("pkill consume processor")
tdCom.killProcessor("tmq_sim")
# time.sleep(10)
# reinit consume info, and start tmq_sim, then check consume result
tmqCom.initConsumerTable()
consumerId = 6
tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
tdLog.info("start consume processor 1")
tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot'])
tdLog.info("wait the consume result")
expectRows = 1
resultList = tmqCom.selectConsumeResult(expectRows)
actConsumeTotalRows = resultList[0]
if not (actConsumeTotalRows > 0 and actConsumeTotalRows < totalRowsInserted):
tdLog.info("act consume rows: %d"%(actConsumeTotalRows))
tdLog.info("and second consume rows should be between 0 and %d"%(totalRowsInserted))
tdLog.exit("%d tmq consume rows error!"%consumerId)
time.sleep(10)
for i in range(len(topicNameList)):
tdSql.query("drop topic %s"%topicNameList[i])
tdLog.printNoPrefix("======== test case 4 end ...... ")
def run(self):
tdSql.prepare()
self.prepareTestEnv()
self.tmqCase3()
self.tmqCase4()
def stop(self):
tdSql.close()
tdLog.success(f"{__file__} successfully executed")
event = threading.Event()
tdCases.addLinux(__file__, TDTestCase())
tdCases.addWindows(__file__, TDTestCase())

View File

@ -0,0 +1,241 @@
import taos
import sys
import time
import socket
import os
import threading
import math
from util.log import *
from util.sql import *
from util.cases import *
from util.dnodes import *
from util.common import *
sys.path.append("./7-tmq")
from tmqCommon import *
class TDTestCase:
def __init__(self):
self.vgroups = 4
self.ctbNum = 3000
self.rowsPerTbl = 70
def init(self, conn, logSql):
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor(), False)
def prepareTestEnv(self):
tdLog.printNoPrefix("======== prepare test env include database, stable, ctables, and insert data: ")
paraDict = {'dbName': 'dbt',
'dropFlag': 1,
'event': '',
'vgroups': 1,
'stbName': 'stb',
'colPrefix': 'c',
'tagPrefix': 't',
'colSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1},{'type': 'TIMESTAMP', 'count':1}],
'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}],
'ctbPrefix': 'ctb',
'ctbStartIdx': 0,
'ctbNum': 10,
'rowsPerTbl': 10000,
'batchNum': 100,
'startTs': 1640966400000, # 2022-01-01 00:00:00.000
'pollDelay': 3,
'showMsg': 1,
'showRow': 1,
'snapshot': 1}
paraDict['vgroups'] = self.vgroups
paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
tmqCom.initConsumerTable()
tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=paraDict["vgroups"],replica=1)
tdLog.info("create stb")
tmqCom.create_stable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"])
tdLog.info("create ctb")
tmqCom.create_ctable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"],ctbPrefix=paraDict['ctbPrefix'],
ctbNum=paraDict["ctbNum"],ctbStartIdx=paraDict['ctbStartIdx'])
tdLog.info("insert data")
tmqCom.insert_data_interlaceByMultiTbl(tsql=tdSql,dbName=paraDict["dbName"],ctbPrefix=paraDict["ctbPrefix"],
ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"],
startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx'])
tdLog.info("restart taosd to ensure that the data falls into the disk")
tdDnodes.stop(1)
tdDnodes.start(1)
return
def tmqCase3(self):
tdLog.printNoPrefix("======== test case 3: ")
paraDict = {'dbName': 'dbt',
'dropFlag': 1,
'event': '',
'vgroups': 1,
'stbName': 'stb',
'colPrefix': 'c',
'tagPrefix': 't',
'colSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1},{'type': 'TIMESTAMP', 'count':1}],
'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}],
'ctbPrefix': 'ctb',
'ctbStartIdx': 0,
'ctbNum': 10,
'rowsPerTbl': 10000,
'batchNum': 10,
'startTs': 1640966400000, # 2022-01-01 00:00:00.000
'pollDelay': 10,
'showMsg': 1,
'showRow': 1,
'snapshot': 1}
paraDict['vgroups'] = self.vgroups
paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
topicNameList = ['topic1']
expectRowsList = []
tmqCom.initConsumerTable()
tdLog.info("create topics from stb with filter")
queryString = "select * from %s.%s"%(paraDict['dbName'], paraDict['stbName'])
# sqlString = "create topic %s as stable %s" %(topicNameList[0], paraDict['stbName'])
sqlString = "create topic %s as %s" %(topicNameList[0], queryString)
tdLog.info("create topic sql: %s"%sqlString)
tdSql.execute(sqlString)
tdSql.query(queryString)
expectRowsList.append(tdSql.getRows())
totalRowsInserted = expectRowsList[0]
# init consume info, and start tmq_sim, then check consume result
tdLog.info("insert consume info to consume processor")
consumerId = 3
expectrowcnt = math.ceil(paraDict["rowsPerTbl"] * paraDict["ctbNum"] / 3)
topicList = topicNameList[0]
ifcheckdata = 1
ifManualCommit = 1
keyList = 'group.id:cgrp1, enable.auto.commit:true, auto.commit.interval.ms:1000, auto.offset.reset:earliest'
tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
consumerId = 4
expectrowcnt = math.ceil(paraDict["rowsPerTbl"] * paraDict["ctbNum"] * 2/3)
tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
tdLog.info("start consume processor 0")
tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot'])
tdLog.info("wait the consume result")
expectRows = 2
resultList = tmqCom.selectConsumeResult(expectRows)
actConsumeTotalRows = resultList[0] + resultList[1]
if not (totalRowsInserted == actConsumeTotalRows):
tdLog.info("sum of two consume rows: %d should be equal to total inserted rows: %d"%(actConsumeTotalRows, totalRowsInserted))
tdLog.exit("%d tmq consume rows error!"%consumerId)
time.sleep(10)
for i in range(len(topicNameList)):
tdSql.query("drop topic %s"%topicNameList[i])
tdLog.printNoPrefix("======== test case 3 end ...... ")
def tmqCase4(self):
tdLog.printNoPrefix("======== test case 4: ")
paraDict = {'dbName': 'dbt',
'dropFlag': 1,
'event': '',
'vgroups': 1,
'stbName': 'stb',
'colPrefix': 'c',
'tagPrefix': 't',
'colSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1},{'type': 'TIMESTAMP', 'count':1}],
'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}],
'ctbPrefix': 'ctb',
'ctbStartIdx': 0,
'ctbNum': 10,
'rowsPerTbl': 10000,
'batchNum': 10,
'startTs': 1640966400000, # 2022-01-01 00:00:00.000
'pollDelay': 10,
'showMsg': 1,
'showRow': 1,
'snapshot': 1}
paraDict['vgroups'] = self.vgroups
paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
topicNameList = ['topic1']
expectRowsList = []
tmqCom.initConsumerTable()
tdLog.info("create topics from stb with filter")
queryString = "select * from %s.%s"%(paraDict['dbName'], paraDict['stbName'])
# sqlString = "create topic %s as stable %s" %(topicNameList[0], paraDict['stbName'])
sqlString = "create topic %s as %s" %(topicNameList[0], queryString)
tdLog.info("create topic sql: %s"%sqlString)
tdSql.execute(sqlString)
tdSql.query(queryString)
expectRowsList.append(tdSql.getRows())
totalRowsInserted = expectRowsList[0]
# init consume info, and start tmq_sim, then check consume result
tdLog.info("insert consume info to consume processor")
consumerId = 5
expectrowcnt = math.ceil(paraDict["rowsPerTbl"] * paraDict["ctbNum"])
topicList = topicNameList[0]
ifcheckdata = 1
ifManualCommit = 1
keyList = 'group.id:cgrp1, enable.auto.commit:true, auto.commit.interval.ms:1000, auto.offset.reset:earliest'
tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
tdLog.info("start consume processor 0")
tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot'])
tdLog.info("wait commit notify")
tmqCom.getStartCommitNotifyFromTmqsim()
tdLog.info("pkill consume processor")
tdCom.killProcessor("tmq_sim")
# time.sleep(10)
# reinit consume info, and start tmq_sim, then check consume result
tmqCom.initConsumerTable()
consumerId = 6
tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
tdLog.info("start consume processor 1")
tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot'])
tdLog.info("wait the consume result")
expectRows = 1
resultList = tmqCom.selectConsumeResult(expectRows)
actConsumeTotalRows = resultList[0]
if not (actConsumeTotalRows > 0 and actConsumeTotalRows < totalRowsInserted):
tdLog.info("act consume rows: %d"%(actConsumeTotalRows))
tdLog.info("and second consume rows should be between 0 and %d"%(totalRowsInserted))
tdLog.exit("%d tmq consume rows error!"%consumerId)
time.sleep(10)
for i in range(len(topicNameList)):
tdSql.query("drop topic %s"%topicNameList[i])
tdLog.printNoPrefix("======== test case 4 end ...... ")
def run(self):
tdSql.prepare()
self.prepareTestEnv()
self.tmqCase3()
self.tmqCase4()
def stop(self):
tdSql.close()
tdLog.success(f"{__file__} successfully executed")
event = threading.Event()
tdCases.addLinux(__file__, TDTestCase())
tdCases.addWindows(__file__, TDTestCase())

View File

@ -165,3 +165,10 @@ python3 ./test.py -f 7-tmq/tmqConsFromTsdb-mutilVg.py
python3 ./test.py -f 7-tmq/tmqConsFromTsdb1-mutilVg.py python3 ./test.py -f 7-tmq/tmqConsFromTsdb1-mutilVg.py
python3 ./test.py -f 7-tmq/tmqConsFromTsdb-1ctb.py python3 ./test.py -f 7-tmq/tmqConsFromTsdb-1ctb.py
python3 ./test.py -f 7-tmq/tmqConsFromTsdb1-1ctb.py python3 ./test.py -f 7-tmq/tmqConsFromTsdb1-1ctb.py
python3 ./test.py -f 7-tmq/tmqConsFromTsdb-1ctb-funcNFilter.py
python3 ./test.py -f 7-tmq/tmqConsFromTsdb-mutilVg-mutilCtb-funcNFilter.py
python3 ./test.py -f 7-tmq/tmqConsFromTsdb-mutilVg-mutilCtb.py
python3 ./test.py -f 7-tmq/tmqConsFromTsdb1-1ctb-funcNFilter.py
python3 ./test.py -f 7-tmq/tmqConsFromTsdb1-mutilVg-mutilCtb-funcNFilter.py
python3 ./test.py -f 7-tmq/tmqConsFromTsdb1-mutilVg-mutilCtb.py